md: make it easier to wait for bad blocks to be acknowledged.
[pandora-kernel.git] / drivers / md / raid5.c
1 /*
2  * raid5.c : Multiple Devices driver for Linux
3  *         Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4  *         Copyright (C) 1999, 2000 Ingo Molnar
5  *         Copyright (C) 2002, 2003 H. Peter Anvin
6  *
7  * RAID-4/5/6 management functions.
8  * Thanks to Penguin Computing for making the RAID-6 development possible
9  * by donating a test server!
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  * You should have received a copy of the GNU General Public License
17  * (for example /usr/src/linux/COPYING); if not, write to the Free
18  * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19  */
20
21 /*
22  * BITMAP UNPLUGGING:
23  *
24  * The sequencing for updating the bitmap reliably is a little
25  * subtle (and I got it wrong the first time) so it deserves some
26  * explanation.
27  *
28  * We group bitmap updates into batches.  Each batch has a number.
29  * We may write out several batches at once, but that isn't very important.
30  * conf->seq_write is the number of the last batch successfully written.
31  * conf->seq_flush is the number of the last batch that was closed to
32  *    new additions.
33  * When we discover that we will need to write to any block in a stripe
34  * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
35  * the number of the batch it will be in. This is seq_flush+1.
36  * When we are ready to do a write, if that batch hasn't been written yet,
37  *   we plug the array and queue the stripe for later.
38  * When an unplug happens, we increment bm_flush, thus closing the current
39  *   batch.
40  * When we notice that bm_flush > bm_write, we write out all pending updates
41  * to the bitmap, and advance bm_write to where bm_flush was.
42  * This may occasionally write a bit out twice, but is sure never to
43  * miss any bits.
44  */
45
46 #include <linux/blkdev.h>
47 #include <linux/kthread.h>
48 #include <linux/raid/pq.h>
49 #include <linux/async_tx.h>
50 #include <linux/async.h>
51 #include <linux/seq_file.h>
52 #include <linux/cpu.h>
53 #include <linux/slab.h>
54 #include <linux/ratelimit.h>
55 #include "md.h"
56 #include "raid5.h"
57 #include "raid0.h"
58 #include "bitmap.h"
59
60 /*
61  * Stripe cache
62  */
63
64 #define NR_STRIPES              256
65 #define STRIPE_SIZE             PAGE_SIZE
66 #define STRIPE_SHIFT            (PAGE_SHIFT - 9)
67 #define STRIPE_SECTORS          (STRIPE_SIZE>>9)
68 #define IO_THRESHOLD            1
69 #define BYPASS_THRESHOLD        1
70 #define NR_HASH                 (PAGE_SIZE / sizeof(struct hlist_head))
71 #define HASH_MASK               (NR_HASH - 1)
72
73 #define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
74
75 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
76  * order without overlap.  There may be several bio's per stripe+device, and
77  * a bio could span several devices.
78  * When walking this list for a particular stripe+device, we must never proceed
79  * beyond a bio that extends past this device, as the next bio might no longer
80  * be valid.
81  * This macro is used to determine the 'next' bio in the list, given the sector
82  * of the current stripe+device
83  */
84 #define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
85 /*
86  * The following can be used to debug the driver
87  */
88 #define RAID5_PARANOIA  1
89 #if RAID5_PARANOIA && defined(CONFIG_SMP)
90 # define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
91 #else
92 # define CHECK_DEVLOCK()
93 #endif
94
95 #ifdef DEBUG
96 #define inline
97 #define __inline__
98 #endif
99
100 /*
101  * We maintain a biased count of active stripes in the bottom 16 bits of
102  * bi_phys_segments, and a count of processed stripes in the upper 16 bits
103  */
104 static inline int raid5_bi_phys_segments(struct bio *bio)
105 {
106         return bio->bi_phys_segments & 0xffff;
107 }
108
109 static inline int raid5_bi_hw_segments(struct bio *bio)
110 {
111         return (bio->bi_phys_segments >> 16) & 0xffff;
112 }
113
114 static inline int raid5_dec_bi_phys_segments(struct bio *bio)
115 {
116         --bio->bi_phys_segments;
117         return raid5_bi_phys_segments(bio);
118 }
119
120 static inline int raid5_dec_bi_hw_segments(struct bio *bio)
121 {
122         unsigned short val = raid5_bi_hw_segments(bio);
123
124         --val;
125         bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio);
126         return val;
127 }
128
129 static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt)
130 {
131         bio->bi_phys_segments = raid5_bi_phys_segments(bio) | (cnt << 16);
132 }
133
134 /* Find first data disk in a raid6 stripe */
135 static inline int raid6_d0(struct stripe_head *sh)
136 {
137         if (sh->ddf_layout)
138                 /* ddf always start from first device */
139                 return 0;
140         /* md starts just after Q block */
141         if (sh->qd_idx == sh->disks - 1)
142                 return 0;
143         else
144                 return sh->qd_idx + 1;
145 }
146 static inline int raid6_next_disk(int disk, int raid_disks)
147 {
148         disk++;
149         return (disk < raid_disks) ? disk : 0;
150 }
151
152 /* When walking through the disks in a raid5, starting at raid6_d0,
153  * We need to map each disk to a 'slot', where the data disks are slot
154  * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
155  * is raid_disks-1.  This help does that mapping.
156  */
157 static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
158                              int *count, int syndrome_disks)
159 {
160         int slot = *count;
161
162         if (sh->ddf_layout)
163                 (*count)++;
164         if (idx == sh->pd_idx)
165                 return syndrome_disks;
166         if (idx == sh->qd_idx)
167                 return syndrome_disks + 1;
168         if (!sh->ddf_layout)
169                 (*count)++;
170         return slot;
171 }
172
173 static void return_io(struct bio *return_bi)
174 {
175         struct bio *bi = return_bi;
176         while (bi) {
177
178                 return_bi = bi->bi_next;
179                 bi->bi_next = NULL;
180                 bi->bi_size = 0;
181                 bio_endio(bi, 0);
182                 bi = return_bi;
183         }
184 }
185
186 static void print_raid5_conf (raid5_conf_t *conf);
187
188 static int stripe_operations_active(struct stripe_head *sh)
189 {
190         return sh->check_state || sh->reconstruct_state ||
191                test_bit(STRIPE_BIOFILL_RUN, &sh->state) ||
192                test_bit(STRIPE_COMPUTE_RUN, &sh->state);
193 }
194
195 static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
196 {
197         if (atomic_dec_and_test(&sh->count)) {
198                 BUG_ON(!list_empty(&sh->lru));
199                 BUG_ON(atomic_read(&conf->active_stripes)==0);
200                 if (test_bit(STRIPE_HANDLE, &sh->state)) {
201                         if (test_bit(STRIPE_DELAYED, &sh->state))
202                                 list_add_tail(&sh->lru, &conf->delayed_list);
203                         else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
204                                    sh->bm_seq - conf->seq_write > 0)
205                                 list_add_tail(&sh->lru, &conf->bitmap_list);
206                         else {
207                                 clear_bit(STRIPE_BIT_DELAY, &sh->state);
208                                 list_add_tail(&sh->lru, &conf->handle_list);
209                         }
210                         md_wakeup_thread(conf->mddev->thread);
211                 } else {
212                         BUG_ON(stripe_operations_active(sh));
213                         if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
214                                 atomic_dec(&conf->preread_active_stripes);
215                                 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
216                                         md_wakeup_thread(conf->mddev->thread);
217                         }
218                         atomic_dec(&conf->active_stripes);
219                         if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
220                                 list_add_tail(&sh->lru, &conf->inactive_list);
221                                 wake_up(&conf->wait_for_stripe);
222                                 if (conf->retry_read_aligned)
223                                         md_wakeup_thread(conf->mddev->thread);
224                         }
225                 }
226         }
227 }
228
229 static void release_stripe(struct stripe_head *sh)
230 {
231         raid5_conf_t *conf = sh->raid_conf;
232         unsigned long flags;
233
234         spin_lock_irqsave(&conf->device_lock, flags);
235         __release_stripe(conf, sh);
236         spin_unlock_irqrestore(&conf->device_lock, flags);
237 }
238
239 static inline void remove_hash(struct stripe_head *sh)
240 {
241         pr_debug("remove_hash(), stripe %llu\n",
242                 (unsigned long long)sh->sector);
243
244         hlist_del_init(&sh->hash);
245 }
246
247 static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
248 {
249         struct hlist_head *hp = stripe_hash(conf, sh->sector);
250
251         pr_debug("insert_hash(), stripe %llu\n",
252                 (unsigned long long)sh->sector);
253
254         CHECK_DEVLOCK();
255         hlist_add_head(&sh->hash, hp);
256 }
257
258
259 /* find an idle stripe, make sure it is unhashed, and return it. */
260 static struct stripe_head *get_free_stripe(raid5_conf_t *conf)
261 {
262         struct stripe_head *sh = NULL;
263         struct list_head *first;
264
265         CHECK_DEVLOCK();
266         if (list_empty(&conf->inactive_list))
267                 goto out;
268         first = conf->inactive_list.next;
269         sh = list_entry(first, struct stripe_head, lru);
270         list_del_init(first);
271         remove_hash(sh);
272         atomic_inc(&conf->active_stripes);
273 out:
274         return sh;
275 }
276
277 static void shrink_buffers(struct stripe_head *sh)
278 {
279         struct page *p;
280         int i;
281         int num = sh->raid_conf->pool_size;
282
283         for (i = 0; i < num ; i++) {
284                 p = sh->dev[i].page;
285                 if (!p)
286                         continue;
287                 sh->dev[i].page = NULL;
288                 put_page(p);
289         }
290 }
291
292 static int grow_buffers(struct stripe_head *sh)
293 {
294         int i;
295         int num = sh->raid_conf->pool_size;
296
297         for (i = 0; i < num; i++) {
298                 struct page *page;
299
300                 if (!(page = alloc_page(GFP_KERNEL))) {
301                         return 1;
302                 }
303                 sh->dev[i].page = page;
304         }
305         return 0;
306 }
307
308 static void raid5_build_block(struct stripe_head *sh, int i, int previous);
309 static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
310                             struct stripe_head *sh);
311
312 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
313 {
314         raid5_conf_t *conf = sh->raid_conf;
315         int i;
316
317         BUG_ON(atomic_read(&sh->count) != 0);
318         BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
319         BUG_ON(stripe_operations_active(sh));
320
321         CHECK_DEVLOCK();
322         pr_debug("init_stripe called, stripe %llu\n",
323                 (unsigned long long)sh->sector);
324
325         remove_hash(sh);
326
327         sh->generation = conf->generation - previous;
328         sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
329         sh->sector = sector;
330         stripe_set_idx(sector, conf, previous, sh);
331         sh->state = 0;
332
333
334         for (i = sh->disks; i--; ) {
335                 struct r5dev *dev = &sh->dev[i];
336
337                 if (dev->toread || dev->read || dev->towrite || dev->written ||
338                     test_bit(R5_LOCKED, &dev->flags)) {
339                         printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n",
340                                (unsigned long long)sh->sector, i, dev->toread,
341                                dev->read, dev->towrite, dev->written,
342                                test_bit(R5_LOCKED, &dev->flags));
343                         WARN_ON(1);
344                 }
345                 dev->flags = 0;
346                 raid5_build_block(sh, i, previous);
347         }
348         insert_hash(conf, sh);
349 }
350
351 static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector,
352                                          short generation)
353 {
354         struct stripe_head *sh;
355         struct hlist_node *hn;
356
357         CHECK_DEVLOCK();
358         pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
359         hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
360                 if (sh->sector == sector && sh->generation == generation)
361                         return sh;
362         pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
363         return NULL;
364 }
365
366 /*
367  * Need to check if array has failed when deciding whether to:
368  *  - start an array
369  *  - remove non-faulty devices
370  *  - add a spare
371  *  - allow a reshape
372  * This determination is simple when no reshape is happening.
373  * However if there is a reshape, we need to carefully check
374  * both the before and after sections.
375  * This is because some failed devices may only affect one
376  * of the two sections, and some non-in_sync devices may
377  * be insync in the section most affected by failed devices.
378  */
379 static int has_failed(raid5_conf_t *conf)
380 {
381         int degraded;
382         int i;
383         if (conf->mddev->reshape_position == MaxSector)
384                 return conf->mddev->degraded > conf->max_degraded;
385
386         rcu_read_lock();
387         degraded = 0;
388         for (i = 0; i < conf->previous_raid_disks; i++) {
389                 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
390                 if (!rdev || test_bit(Faulty, &rdev->flags))
391                         degraded++;
392                 else if (test_bit(In_sync, &rdev->flags))
393                         ;
394                 else
395                         /* not in-sync or faulty.
396                          * If the reshape increases the number of devices,
397                          * this is being recovered by the reshape, so
398                          * this 'previous' section is not in_sync.
399                          * If the number of devices is being reduced however,
400                          * the device can only be part of the array if
401                          * we are reverting a reshape, so this section will
402                          * be in-sync.
403                          */
404                         if (conf->raid_disks >= conf->previous_raid_disks)
405                                 degraded++;
406         }
407         rcu_read_unlock();
408         if (degraded > conf->max_degraded)
409                 return 1;
410         rcu_read_lock();
411         degraded = 0;
412         for (i = 0; i < conf->raid_disks; i++) {
413                 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
414                 if (!rdev || test_bit(Faulty, &rdev->flags))
415                         degraded++;
416                 else if (test_bit(In_sync, &rdev->flags))
417                         ;
418                 else
419                         /* not in-sync or faulty.
420                          * If reshape increases the number of devices, this
421                          * section has already been recovered, else it
422                          * almost certainly hasn't.
423                          */
424                         if (conf->raid_disks <= conf->previous_raid_disks)
425                                 degraded++;
426         }
427         rcu_read_unlock();
428         if (degraded > conf->max_degraded)
429                 return 1;
430         return 0;
431 }
432
433 static struct stripe_head *
434 get_active_stripe(raid5_conf_t *conf, sector_t sector,
435                   int previous, int noblock, int noquiesce)
436 {
437         struct stripe_head *sh;
438
439         pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
440
441         spin_lock_irq(&conf->device_lock);
442
443         do {
444                 wait_event_lock_irq(conf->wait_for_stripe,
445                                     conf->quiesce == 0 || noquiesce,
446                                     conf->device_lock, /* nothing */);
447                 sh = __find_stripe(conf, sector, conf->generation - previous);
448                 if (!sh) {
449                         if (!conf->inactive_blocked)
450                                 sh = get_free_stripe(conf);
451                         if (noblock && sh == NULL)
452                                 break;
453                         if (!sh) {
454                                 conf->inactive_blocked = 1;
455                                 wait_event_lock_irq(conf->wait_for_stripe,
456                                                     !list_empty(&conf->inactive_list) &&
457                                                     (atomic_read(&conf->active_stripes)
458                                                      < (conf->max_nr_stripes *3/4)
459                                                      || !conf->inactive_blocked),
460                                                     conf->device_lock,
461                                                     );
462                                 conf->inactive_blocked = 0;
463                         } else
464                                 init_stripe(sh, sector, previous);
465                 } else {
466                         if (atomic_read(&sh->count)) {
467                                 BUG_ON(!list_empty(&sh->lru)
468                                     && !test_bit(STRIPE_EXPANDING, &sh->state));
469                         } else {
470                                 if (!test_bit(STRIPE_HANDLE, &sh->state))
471                                         atomic_inc(&conf->active_stripes);
472                                 if (list_empty(&sh->lru) &&
473                                     !test_bit(STRIPE_EXPANDING, &sh->state))
474                                         BUG();
475                                 list_del_init(&sh->lru);
476                         }
477                 }
478         } while (sh == NULL);
479
480         if (sh)
481                 atomic_inc(&sh->count);
482
483         spin_unlock_irq(&conf->device_lock);
484         return sh;
485 }
486
487 static void
488 raid5_end_read_request(struct bio *bi, int error);
489 static void
490 raid5_end_write_request(struct bio *bi, int error);
491
492 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
493 {
494         raid5_conf_t *conf = sh->raid_conf;
495         int i, disks = sh->disks;
496
497         might_sleep();
498
499         for (i = disks; i--; ) {
500                 int rw;
501                 struct bio *bi;
502                 mdk_rdev_t *rdev;
503                 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
504                         if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
505                                 rw = WRITE_FUA;
506                         else
507                                 rw = WRITE;
508                 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
509                         rw = READ;
510                 else
511                         continue;
512
513                 bi = &sh->dev[i].req;
514
515                 bi->bi_rw = rw;
516                 if (rw & WRITE)
517                         bi->bi_end_io = raid5_end_write_request;
518                 else
519                         bi->bi_end_io = raid5_end_read_request;
520
521                 rcu_read_lock();
522                 rdev = rcu_dereference(conf->disks[i].rdev);
523                 if (rdev && test_bit(Faulty, &rdev->flags))
524                         rdev = NULL;
525                 if (rdev)
526                         atomic_inc(&rdev->nr_pending);
527                 rcu_read_unlock();
528
529                 if (rdev) {
530                         if (s->syncing || s->expanding || s->expanded)
531                                 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
532
533                         set_bit(STRIPE_IO_STARTED, &sh->state);
534
535                         bi->bi_bdev = rdev->bdev;
536                         pr_debug("%s: for %llu schedule op %ld on disc %d\n",
537                                 __func__, (unsigned long long)sh->sector,
538                                 bi->bi_rw, i);
539                         atomic_inc(&sh->count);
540                         bi->bi_sector = sh->sector + rdev->data_offset;
541                         bi->bi_flags = 1 << BIO_UPTODATE;
542                         bi->bi_vcnt = 1;
543                         bi->bi_max_vecs = 1;
544                         bi->bi_idx = 0;
545                         bi->bi_io_vec = &sh->dev[i].vec;
546                         bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
547                         bi->bi_io_vec[0].bv_offset = 0;
548                         bi->bi_size = STRIPE_SIZE;
549                         bi->bi_next = NULL;
550                         generic_make_request(bi);
551                 } else {
552                         if (rw & WRITE)
553                                 set_bit(STRIPE_DEGRADED, &sh->state);
554                         pr_debug("skip op %ld on disc %d for sector %llu\n",
555                                 bi->bi_rw, i, (unsigned long long)sh->sector);
556                         clear_bit(R5_LOCKED, &sh->dev[i].flags);
557                         set_bit(STRIPE_HANDLE, &sh->state);
558                 }
559         }
560 }
561
562 static struct dma_async_tx_descriptor *
563 async_copy_data(int frombio, struct bio *bio, struct page *page,
564         sector_t sector, struct dma_async_tx_descriptor *tx)
565 {
566         struct bio_vec *bvl;
567         struct page *bio_page;
568         int i;
569         int page_offset;
570         struct async_submit_ctl submit;
571         enum async_tx_flags flags = 0;
572
573         if (bio->bi_sector >= sector)
574                 page_offset = (signed)(bio->bi_sector - sector) * 512;
575         else
576                 page_offset = (signed)(sector - bio->bi_sector) * -512;
577
578         if (frombio)
579                 flags |= ASYNC_TX_FENCE;
580         init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
581
582         bio_for_each_segment(bvl, bio, i) {
583                 int len = bvl->bv_len;
584                 int clen;
585                 int b_offset = 0;
586
587                 if (page_offset < 0) {
588                         b_offset = -page_offset;
589                         page_offset += b_offset;
590                         len -= b_offset;
591                 }
592
593                 if (len > 0 && page_offset + len > STRIPE_SIZE)
594                         clen = STRIPE_SIZE - page_offset;
595                 else
596                         clen = len;
597
598                 if (clen > 0) {
599                         b_offset += bvl->bv_offset;
600                         bio_page = bvl->bv_page;
601                         if (frombio)
602                                 tx = async_memcpy(page, bio_page, page_offset,
603                                                   b_offset, clen, &submit);
604                         else
605                                 tx = async_memcpy(bio_page, page, b_offset,
606                                                   page_offset, clen, &submit);
607                 }
608                 /* chain the operations */
609                 submit.depend_tx = tx;
610
611                 if (clen < len) /* hit end of page */
612                         break;
613                 page_offset +=  len;
614         }
615
616         return tx;
617 }
618
619 static void ops_complete_biofill(void *stripe_head_ref)
620 {
621         struct stripe_head *sh = stripe_head_ref;
622         struct bio *return_bi = NULL;
623         raid5_conf_t *conf = sh->raid_conf;
624         int i;
625
626         pr_debug("%s: stripe %llu\n", __func__,
627                 (unsigned long long)sh->sector);
628
629         /* clear completed biofills */
630         spin_lock_irq(&conf->device_lock);
631         for (i = sh->disks; i--; ) {
632                 struct r5dev *dev = &sh->dev[i];
633
634                 /* acknowledge completion of a biofill operation */
635                 /* and check if we need to reply to a read request,
636                  * new R5_Wantfill requests are held off until
637                  * !STRIPE_BIOFILL_RUN
638                  */
639                 if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
640                         struct bio *rbi, *rbi2;
641
642                         BUG_ON(!dev->read);
643                         rbi = dev->read;
644                         dev->read = NULL;
645                         while (rbi && rbi->bi_sector <
646                                 dev->sector + STRIPE_SECTORS) {
647                                 rbi2 = r5_next_bio(rbi, dev->sector);
648                                 if (!raid5_dec_bi_phys_segments(rbi)) {
649                                         rbi->bi_next = return_bi;
650                                         return_bi = rbi;
651                                 }
652                                 rbi = rbi2;
653                         }
654                 }
655         }
656         spin_unlock_irq(&conf->device_lock);
657         clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
658
659         return_io(return_bi);
660
661         set_bit(STRIPE_HANDLE, &sh->state);
662         release_stripe(sh);
663 }
664
665 static void ops_run_biofill(struct stripe_head *sh)
666 {
667         struct dma_async_tx_descriptor *tx = NULL;
668         raid5_conf_t *conf = sh->raid_conf;
669         struct async_submit_ctl submit;
670         int i;
671
672         pr_debug("%s: stripe %llu\n", __func__,
673                 (unsigned long long)sh->sector);
674
675         for (i = sh->disks; i--; ) {
676                 struct r5dev *dev = &sh->dev[i];
677                 if (test_bit(R5_Wantfill, &dev->flags)) {
678                         struct bio *rbi;
679                         spin_lock_irq(&conf->device_lock);
680                         dev->read = rbi = dev->toread;
681                         dev->toread = NULL;
682                         spin_unlock_irq(&conf->device_lock);
683                         while (rbi && rbi->bi_sector <
684                                 dev->sector + STRIPE_SECTORS) {
685                                 tx = async_copy_data(0, rbi, dev->page,
686                                         dev->sector, tx);
687                                 rbi = r5_next_bio(rbi, dev->sector);
688                         }
689                 }
690         }
691
692         atomic_inc(&sh->count);
693         init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL);
694         async_trigger_callback(&submit);
695 }
696
697 static void mark_target_uptodate(struct stripe_head *sh, int target)
698 {
699         struct r5dev *tgt;
700
701         if (target < 0)
702                 return;
703
704         tgt = &sh->dev[target];
705         set_bit(R5_UPTODATE, &tgt->flags);
706         BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
707         clear_bit(R5_Wantcompute, &tgt->flags);
708 }
709
710 static void ops_complete_compute(void *stripe_head_ref)
711 {
712         struct stripe_head *sh = stripe_head_ref;
713
714         pr_debug("%s: stripe %llu\n", __func__,
715                 (unsigned long long)sh->sector);
716
717         /* mark the computed target(s) as uptodate */
718         mark_target_uptodate(sh, sh->ops.target);
719         mark_target_uptodate(sh, sh->ops.target2);
720
721         clear_bit(STRIPE_COMPUTE_RUN, &sh->state);
722         if (sh->check_state == check_state_compute_run)
723                 sh->check_state = check_state_compute_result;
724         set_bit(STRIPE_HANDLE, &sh->state);
725         release_stripe(sh);
726 }
727
728 /* return a pointer to the address conversion region of the scribble buffer */
729 static addr_conv_t *to_addr_conv(struct stripe_head *sh,
730                                  struct raid5_percpu *percpu)
731 {
732         return percpu->scribble + sizeof(struct page *) * (sh->disks + 2);
733 }
734
735 static struct dma_async_tx_descriptor *
736 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
737 {
738         int disks = sh->disks;
739         struct page **xor_srcs = percpu->scribble;
740         int target = sh->ops.target;
741         struct r5dev *tgt = &sh->dev[target];
742         struct page *xor_dest = tgt->page;
743         int count = 0;
744         struct dma_async_tx_descriptor *tx;
745         struct async_submit_ctl submit;
746         int i;
747
748         pr_debug("%s: stripe %llu block: %d\n",
749                 __func__, (unsigned long long)sh->sector, target);
750         BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
751
752         for (i = disks; i--; )
753                 if (i != target)
754                         xor_srcs[count++] = sh->dev[i].page;
755
756         atomic_inc(&sh->count);
757
758         init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
759                           ops_complete_compute, sh, to_addr_conv(sh, percpu));
760         if (unlikely(count == 1))
761                 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
762         else
763                 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
764
765         return tx;
766 }
767
768 /* set_syndrome_sources - populate source buffers for gen_syndrome
769  * @srcs - (struct page *) array of size sh->disks
770  * @sh - stripe_head to parse
771  *
772  * Populates srcs in proper layout order for the stripe and returns the
773  * 'count' of sources to be used in a call to async_gen_syndrome.  The P
774  * destination buffer is recorded in srcs[count] and the Q destination
775  * is recorded in srcs[count+1]].
776  */
777 static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
778 {
779         int disks = sh->disks;
780         int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
781         int d0_idx = raid6_d0(sh);
782         int count;
783         int i;
784
785         for (i = 0; i < disks; i++)
786                 srcs[i] = NULL;
787
788         count = 0;
789         i = d0_idx;
790         do {
791                 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
792
793                 srcs[slot] = sh->dev[i].page;
794                 i = raid6_next_disk(i, disks);
795         } while (i != d0_idx);
796
797         return syndrome_disks;
798 }
799
800 static struct dma_async_tx_descriptor *
801 ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
802 {
803         int disks = sh->disks;
804         struct page **blocks = percpu->scribble;
805         int target;
806         int qd_idx = sh->qd_idx;
807         struct dma_async_tx_descriptor *tx;
808         struct async_submit_ctl submit;
809         struct r5dev *tgt;
810         struct page *dest;
811         int i;
812         int count;
813
814         if (sh->ops.target < 0)
815                 target = sh->ops.target2;
816         else if (sh->ops.target2 < 0)
817                 target = sh->ops.target;
818         else
819                 /* we should only have one valid target */
820                 BUG();
821         BUG_ON(target < 0);
822         pr_debug("%s: stripe %llu block: %d\n",
823                 __func__, (unsigned long long)sh->sector, target);
824
825         tgt = &sh->dev[target];
826         BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
827         dest = tgt->page;
828
829         atomic_inc(&sh->count);
830
831         if (target == qd_idx) {
832                 count = set_syndrome_sources(blocks, sh);
833                 blocks[count] = NULL; /* regenerating p is not necessary */
834                 BUG_ON(blocks[count+1] != dest); /* q should already be set */
835                 init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
836                                   ops_complete_compute, sh,
837                                   to_addr_conv(sh, percpu));
838                 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
839         } else {
840                 /* Compute any data- or p-drive using XOR */
841                 count = 0;
842                 for (i = disks; i-- ; ) {
843                         if (i == target || i == qd_idx)
844                                 continue;
845                         blocks[count++] = sh->dev[i].page;
846                 }
847
848                 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
849                                   NULL, ops_complete_compute, sh,
850                                   to_addr_conv(sh, percpu));
851                 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit);
852         }
853
854         return tx;
855 }
856
857 static struct dma_async_tx_descriptor *
858 ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
859 {
860         int i, count, disks = sh->disks;
861         int syndrome_disks = sh->ddf_layout ? disks : disks-2;
862         int d0_idx = raid6_d0(sh);
863         int faila = -1, failb = -1;
864         int target = sh->ops.target;
865         int target2 = sh->ops.target2;
866         struct r5dev *tgt = &sh->dev[target];
867         struct r5dev *tgt2 = &sh->dev[target2];
868         struct dma_async_tx_descriptor *tx;
869         struct page **blocks = percpu->scribble;
870         struct async_submit_ctl submit;
871
872         pr_debug("%s: stripe %llu block1: %d block2: %d\n",
873                  __func__, (unsigned long long)sh->sector, target, target2);
874         BUG_ON(target < 0 || target2 < 0);
875         BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
876         BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags));
877
878         /* we need to open-code set_syndrome_sources to handle the
879          * slot number conversion for 'faila' and 'failb'
880          */
881         for (i = 0; i < disks ; i++)
882                 blocks[i] = NULL;
883         count = 0;
884         i = d0_idx;
885         do {
886                 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
887
888                 blocks[slot] = sh->dev[i].page;
889
890                 if (i == target)
891                         faila = slot;
892                 if (i == target2)
893                         failb = slot;
894                 i = raid6_next_disk(i, disks);
895         } while (i != d0_idx);
896
897         BUG_ON(faila == failb);
898         if (failb < faila)
899                 swap(faila, failb);
900         pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
901                  __func__, (unsigned long long)sh->sector, faila, failb);
902
903         atomic_inc(&sh->count);
904
905         if (failb == syndrome_disks+1) {
906                 /* Q disk is one of the missing disks */
907                 if (faila == syndrome_disks) {
908                         /* Missing P+Q, just recompute */
909                         init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
910                                           ops_complete_compute, sh,
911                                           to_addr_conv(sh, percpu));
912                         return async_gen_syndrome(blocks, 0, syndrome_disks+2,
913                                                   STRIPE_SIZE, &submit);
914                 } else {
915                         struct page *dest;
916                         int data_target;
917                         int qd_idx = sh->qd_idx;
918
919                         /* Missing D+Q: recompute D from P, then recompute Q */
920                         if (target == qd_idx)
921                                 data_target = target2;
922                         else
923                                 data_target = target;
924
925                         count = 0;
926                         for (i = disks; i-- ; ) {
927                                 if (i == data_target || i == qd_idx)
928                                         continue;
929                                 blocks[count++] = sh->dev[i].page;
930                         }
931                         dest = sh->dev[data_target].page;
932                         init_async_submit(&submit,
933                                           ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
934                                           NULL, NULL, NULL,
935                                           to_addr_conv(sh, percpu));
936                         tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE,
937                                        &submit);
938
939                         count = set_syndrome_sources(blocks, sh);
940                         init_async_submit(&submit, ASYNC_TX_FENCE, tx,
941                                           ops_complete_compute, sh,
942                                           to_addr_conv(sh, percpu));
943                         return async_gen_syndrome(blocks, 0, count+2,
944                                                   STRIPE_SIZE, &submit);
945                 }
946         } else {
947                 init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
948                                   ops_complete_compute, sh,
949                                   to_addr_conv(sh, percpu));
950                 if (failb == syndrome_disks) {
951                         /* We're missing D+P. */
952                         return async_raid6_datap_recov(syndrome_disks+2,
953                                                        STRIPE_SIZE, faila,
954                                                        blocks, &submit);
955                 } else {
956                         /* We're missing D+D. */
957                         return async_raid6_2data_recov(syndrome_disks+2,
958                                                        STRIPE_SIZE, faila, failb,
959                                                        blocks, &submit);
960                 }
961         }
962 }
963
964
965 static void ops_complete_prexor(void *stripe_head_ref)
966 {
967         struct stripe_head *sh = stripe_head_ref;
968
969         pr_debug("%s: stripe %llu\n", __func__,
970                 (unsigned long long)sh->sector);
971 }
972
973 static struct dma_async_tx_descriptor *
974 ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu,
975                struct dma_async_tx_descriptor *tx)
976 {
977         int disks = sh->disks;
978         struct page **xor_srcs = percpu->scribble;
979         int count = 0, pd_idx = sh->pd_idx, i;
980         struct async_submit_ctl submit;
981
982         /* existing parity data subtracted */
983         struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
984
985         pr_debug("%s: stripe %llu\n", __func__,
986                 (unsigned long long)sh->sector);
987
988         for (i = disks; i--; ) {
989                 struct r5dev *dev = &sh->dev[i];
990                 /* Only process blocks that are known to be uptodate */
991                 if (test_bit(R5_Wantdrain, &dev->flags))
992                         xor_srcs[count++] = dev->page;
993         }
994
995         init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
996                           ops_complete_prexor, sh, to_addr_conv(sh, percpu));
997         tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
998
999         return tx;
1000 }
1001
1002 static struct dma_async_tx_descriptor *
1003 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
1004 {
1005         int disks = sh->disks;
1006         int i;
1007
1008         pr_debug("%s: stripe %llu\n", __func__,
1009                 (unsigned long long)sh->sector);
1010
1011         for (i = disks; i--; ) {
1012                 struct r5dev *dev = &sh->dev[i];
1013                 struct bio *chosen;
1014
1015                 if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
1016                         struct bio *wbi;
1017
1018                         spin_lock_irq(&sh->raid_conf->device_lock);
1019                         chosen = dev->towrite;
1020                         dev->towrite = NULL;
1021                         BUG_ON(dev->written);
1022                         wbi = dev->written = chosen;
1023                         spin_unlock_irq(&sh->raid_conf->device_lock);
1024
1025                         while (wbi && wbi->bi_sector <
1026                                 dev->sector + STRIPE_SECTORS) {
1027                                 if (wbi->bi_rw & REQ_FUA)
1028                                         set_bit(R5_WantFUA, &dev->flags);
1029                                 tx = async_copy_data(1, wbi, dev->page,
1030                                         dev->sector, tx);
1031                                 wbi = r5_next_bio(wbi, dev->sector);
1032                         }
1033                 }
1034         }
1035
1036         return tx;
1037 }
1038
1039 static void ops_complete_reconstruct(void *stripe_head_ref)
1040 {
1041         struct stripe_head *sh = stripe_head_ref;
1042         int disks = sh->disks;
1043         int pd_idx = sh->pd_idx;
1044         int qd_idx = sh->qd_idx;
1045         int i;
1046         bool fua = false;
1047
1048         pr_debug("%s: stripe %llu\n", __func__,
1049                 (unsigned long long)sh->sector);
1050
1051         for (i = disks; i--; )
1052                 fua |= test_bit(R5_WantFUA, &sh->dev[i].flags);
1053
1054         for (i = disks; i--; ) {
1055                 struct r5dev *dev = &sh->dev[i];
1056
1057                 if (dev->written || i == pd_idx || i == qd_idx) {
1058                         set_bit(R5_UPTODATE, &dev->flags);
1059                         if (fua)
1060                                 set_bit(R5_WantFUA, &dev->flags);
1061                 }
1062         }
1063
1064         if (sh->reconstruct_state == reconstruct_state_drain_run)
1065                 sh->reconstruct_state = reconstruct_state_drain_result;
1066         else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run)
1067                 sh->reconstruct_state = reconstruct_state_prexor_drain_result;
1068         else {
1069                 BUG_ON(sh->reconstruct_state != reconstruct_state_run);
1070                 sh->reconstruct_state = reconstruct_state_result;
1071         }
1072
1073         set_bit(STRIPE_HANDLE, &sh->state);
1074         release_stripe(sh);
1075 }
1076
1077 static void
1078 ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
1079                      struct dma_async_tx_descriptor *tx)
1080 {
1081         int disks = sh->disks;
1082         struct page **xor_srcs = percpu->scribble;
1083         struct async_submit_ctl submit;
1084         int count = 0, pd_idx = sh->pd_idx, i;
1085         struct page *xor_dest;
1086         int prexor = 0;
1087         unsigned long flags;
1088
1089         pr_debug("%s: stripe %llu\n", __func__,
1090                 (unsigned long long)sh->sector);
1091
1092         /* check if prexor is active which means only process blocks
1093          * that are part of a read-modify-write (written)
1094          */
1095         if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
1096                 prexor = 1;
1097                 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
1098                 for (i = disks; i--; ) {
1099                         struct r5dev *dev = &sh->dev[i];
1100                         if (dev->written)
1101                                 xor_srcs[count++] = dev->page;
1102                 }
1103         } else {
1104                 xor_dest = sh->dev[pd_idx].page;
1105                 for (i = disks; i--; ) {
1106                         struct r5dev *dev = &sh->dev[i];
1107                         if (i != pd_idx)
1108                                 xor_srcs[count++] = dev->page;
1109                 }
1110         }
1111
1112         /* 1/ if we prexor'd then the dest is reused as a source
1113          * 2/ if we did not prexor then we are redoing the parity
1114          * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
1115          * for the synchronous xor case
1116          */
1117         flags = ASYNC_TX_ACK |
1118                 (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
1119
1120         atomic_inc(&sh->count);
1121
1122         init_async_submit(&submit, flags, tx, ops_complete_reconstruct, sh,
1123                           to_addr_conv(sh, percpu));
1124         if (unlikely(count == 1))
1125                 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
1126         else
1127                 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
1128 }
1129
1130 static void
1131 ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
1132                      struct dma_async_tx_descriptor *tx)
1133 {
1134         struct async_submit_ctl submit;
1135         struct page **blocks = percpu->scribble;
1136         int count;
1137
1138         pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
1139
1140         count = set_syndrome_sources(blocks, sh);
1141
1142         atomic_inc(&sh->count);
1143
1144         init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_reconstruct,
1145                           sh, to_addr_conv(sh, percpu));
1146         async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE,  &submit);
1147 }
1148
1149 static void ops_complete_check(void *stripe_head_ref)
1150 {
1151         struct stripe_head *sh = stripe_head_ref;
1152
1153         pr_debug("%s: stripe %llu\n", __func__,
1154                 (unsigned long long)sh->sector);
1155
1156         sh->check_state = check_state_check_result;
1157         set_bit(STRIPE_HANDLE, &sh->state);
1158         release_stripe(sh);
1159 }
1160
1161 static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
1162 {
1163         int disks = sh->disks;
1164         int pd_idx = sh->pd_idx;
1165         int qd_idx = sh->qd_idx;
1166         struct page *xor_dest;
1167         struct page **xor_srcs = percpu->scribble;
1168         struct dma_async_tx_descriptor *tx;
1169         struct async_submit_ctl submit;
1170         int count;
1171         int i;
1172
1173         pr_debug("%s: stripe %llu\n", __func__,
1174                 (unsigned long long)sh->sector);
1175
1176         count = 0;
1177         xor_dest = sh->dev[pd_idx].page;
1178         xor_srcs[count++] = xor_dest;
1179         for (i = disks; i--; ) {
1180                 if (i == pd_idx || i == qd_idx)
1181                         continue;
1182                 xor_srcs[count++] = sh->dev[i].page;
1183         }
1184
1185         init_async_submit(&submit, 0, NULL, NULL, NULL,
1186                           to_addr_conv(sh, percpu));
1187         tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
1188                            &sh->ops.zero_sum_result, &submit);
1189
1190         atomic_inc(&sh->count);
1191         init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL);
1192         tx = async_trigger_callback(&submit);
1193 }
1194
1195 static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp)
1196 {
1197         struct page **srcs = percpu->scribble;
1198         struct async_submit_ctl submit;
1199         int count;
1200
1201         pr_debug("%s: stripe %llu checkp: %d\n", __func__,
1202                 (unsigned long long)sh->sector, checkp);
1203
1204         count = set_syndrome_sources(srcs, sh);
1205         if (!checkp)
1206                 srcs[count] = NULL;
1207
1208         atomic_inc(&sh->count);
1209         init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
1210                           sh, to_addr_conv(sh, percpu));
1211         async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE,
1212                            &sh->ops.zero_sum_result, percpu->spare_page, &submit);
1213 }
1214
1215 static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1216 {
1217         int overlap_clear = 0, i, disks = sh->disks;
1218         struct dma_async_tx_descriptor *tx = NULL;
1219         raid5_conf_t *conf = sh->raid_conf;
1220         int level = conf->level;
1221         struct raid5_percpu *percpu;
1222         unsigned long cpu;
1223
1224         cpu = get_cpu();
1225         percpu = per_cpu_ptr(conf->percpu, cpu);
1226         if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
1227                 ops_run_biofill(sh);
1228                 overlap_clear++;
1229         }
1230
1231         if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {
1232                 if (level < 6)
1233                         tx = ops_run_compute5(sh, percpu);
1234                 else {
1235                         if (sh->ops.target2 < 0 || sh->ops.target < 0)
1236                                 tx = ops_run_compute6_1(sh, percpu);
1237                         else
1238                                 tx = ops_run_compute6_2(sh, percpu);
1239                 }
1240                 /* terminate the chain if reconstruct is not set to be run */
1241                 if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request))
1242                         async_tx_ack(tx);
1243         }
1244
1245         if (test_bit(STRIPE_OP_PREXOR, &ops_request))
1246                 tx = ops_run_prexor(sh, percpu, tx);
1247
1248         if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
1249                 tx = ops_run_biodrain(sh, tx);
1250                 overlap_clear++;
1251         }
1252
1253         if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) {
1254                 if (level < 6)
1255                         ops_run_reconstruct5(sh, percpu, tx);
1256                 else
1257                         ops_run_reconstruct6(sh, percpu, tx);
1258         }
1259
1260         if (test_bit(STRIPE_OP_CHECK, &ops_request)) {
1261                 if (sh->check_state == check_state_run)
1262                         ops_run_check_p(sh, percpu);
1263                 else if (sh->check_state == check_state_run_q)
1264                         ops_run_check_pq(sh, percpu, 0);
1265                 else if (sh->check_state == check_state_run_pq)
1266                         ops_run_check_pq(sh, percpu, 1);
1267                 else
1268                         BUG();
1269         }
1270
1271         if (overlap_clear)
1272                 for (i = disks; i--; ) {
1273                         struct r5dev *dev = &sh->dev[i];
1274                         if (test_and_clear_bit(R5_Overlap, &dev->flags))
1275                                 wake_up(&sh->raid_conf->wait_for_overlap);
1276                 }
1277         put_cpu();
1278 }
1279
1280 #ifdef CONFIG_MULTICORE_RAID456
1281 static void async_run_ops(void *param, async_cookie_t cookie)
1282 {
1283         struct stripe_head *sh = param;
1284         unsigned long ops_request = sh->ops.request;
1285
1286         clear_bit_unlock(STRIPE_OPS_REQ_PENDING, &sh->state);
1287         wake_up(&sh->ops.wait_for_ops);
1288
1289         __raid_run_ops(sh, ops_request);
1290         release_stripe(sh);
1291 }
1292
1293 static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1294 {
1295         /* since handle_stripe can be called outside of raid5d context
1296          * we need to ensure sh->ops.request is de-staged before another
1297          * request arrives
1298          */
1299         wait_event(sh->ops.wait_for_ops,
1300                    !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING, &sh->state));
1301         sh->ops.request = ops_request;
1302
1303         atomic_inc(&sh->count);
1304         async_schedule(async_run_ops, sh);
1305 }
1306 #else
1307 #define raid_run_ops __raid_run_ops
1308 #endif
1309
1310 static int grow_one_stripe(raid5_conf_t *conf)
1311 {
1312         struct stripe_head *sh;
1313         sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL);
1314         if (!sh)
1315                 return 0;
1316
1317         sh->raid_conf = conf;
1318         #ifdef CONFIG_MULTICORE_RAID456
1319         init_waitqueue_head(&sh->ops.wait_for_ops);
1320         #endif
1321
1322         if (grow_buffers(sh)) {
1323                 shrink_buffers(sh);
1324                 kmem_cache_free(conf->slab_cache, sh);
1325                 return 0;
1326         }
1327         /* we just created an active stripe so... */
1328         atomic_set(&sh->count, 1);
1329         atomic_inc(&conf->active_stripes);
1330         INIT_LIST_HEAD(&sh->lru);
1331         release_stripe(sh);
1332         return 1;
1333 }
1334
1335 static int grow_stripes(raid5_conf_t *conf, int num)
1336 {
1337         struct kmem_cache *sc;
1338         int devs = max(conf->raid_disks, conf->previous_raid_disks);
1339
1340         if (conf->mddev->gendisk)
1341                 sprintf(conf->cache_name[0],
1342                         "raid%d-%s", conf->level, mdname(conf->mddev));
1343         else
1344                 sprintf(conf->cache_name[0],
1345                         "raid%d-%p", conf->level, conf->mddev);
1346         sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
1347
1348         conf->active_name = 0;
1349         sc = kmem_cache_create(conf->cache_name[conf->active_name],
1350                                sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
1351                                0, 0, NULL);
1352         if (!sc)
1353                 return 1;
1354         conf->slab_cache = sc;
1355         conf->pool_size = devs;
1356         while (num--)
1357                 if (!grow_one_stripe(conf))
1358                         return 1;
1359         return 0;
1360 }
1361
1362 /**
1363  * scribble_len - return the required size of the scribble region
1364  * @num - total number of disks in the array
1365  *
1366  * The size must be enough to contain:
1367  * 1/ a struct page pointer for each device in the array +2
1368  * 2/ room to convert each entry in (1) to its corresponding dma
1369  *    (dma_map_page()) or page (page_address()) address.
1370  *
1371  * Note: the +2 is for the destination buffers of the ddf/raid6 case where we
1372  * calculate over all devices (not just the data blocks), using zeros in place
1373  * of the P and Q blocks.
1374  */
1375 static size_t scribble_len(int num)
1376 {
1377         size_t len;
1378
1379         len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2);
1380
1381         return len;
1382 }
1383
1384 static int resize_stripes(raid5_conf_t *conf, int newsize)
1385 {
1386         /* Make all the stripes able to hold 'newsize' devices.
1387          * New slots in each stripe get 'page' set to a new page.
1388          *
1389          * This happens in stages:
1390          * 1/ create a new kmem_cache and allocate the required number of
1391          *    stripe_heads.
1392          * 2/ gather all the old stripe_heads and tranfer the pages across
1393          *    to the new stripe_heads.  This will have the side effect of
1394          *    freezing the array as once all stripe_heads have been collected,
1395          *    no IO will be possible.  Old stripe heads are freed once their
1396          *    pages have been transferred over, and the old kmem_cache is
1397          *    freed when all stripes are done.
1398          * 3/ reallocate conf->disks to be suitable bigger.  If this fails,
1399          *    we simple return a failre status - no need to clean anything up.
1400          * 4/ allocate new pages for the new slots in the new stripe_heads.
1401          *    If this fails, we don't bother trying the shrink the
1402          *    stripe_heads down again, we just leave them as they are.
1403          *    As each stripe_head is processed the new one is released into
1404          *    active service.
1405          *
1406          * Once step2 is started, we cannot afford to wait for a write,
1407          * so we use GFP_NOIO allocations.
1408          */
1409         struct stripe_head *osh, *nsh;
1410         LIST_HEAD(newstripes);
1411         struct disk_info *ndisks;
1412         unsigned long cpu;
1413         int err;
1414         struct kmem_cache *sc;
1415         int i;
1416
1417         if (newsize <= conf->pool_size)
1418                 return 0; /* never bother to shrink */
1419
1420         err = md_allow_write(conf->mddev);
1421         if (err)
1422                 return err;
1423
1424         /* Step 1 */
1425         sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
1426                                sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
1427                                0, 0, NULL);
1428         if (!sc)
1429                 return -ENOMEM;
1430
1431         for (i = conf->max_nr_stripes; i; i--) {
1432                 nsh = kmem_cache_zalloc(sc, GFP_KERNEL);
1433                 if (!nsh)
1434                         break;
1435
1436                 nsh->raid_conf = conf;
1437                 #ifdef CONFIG_MULTICORE_RAID456
1438                 init_waitqueue_head(&nsh->ops.wait_for_ops);
1439                 #endif
1440
1441                 list_add(&nsh->lru, &newstripes);
1442         }
1443         if (i) {
1444                 /* didn't get enough, give up */
1445                 while (!list_empty(&newstripes)) {
1446                         nsh = list_entry(newstripes.next, struct stripe_head, lru);
1447                         list_del(&nsh->lru);
1448                         kmem_cache_free(sc, nsh);
1449                 }
1450                 kmem_cache_destroy(sc);
1451                 return -ENOMEM;
1452         }
1453         /* Step 2 - Must use GFP_NOIO now.
1454          * OK, we have enough stripes, start collecting inactive
1455          * stripes and copying them over
1456          */
1457         list_for_each_entry(nsh, &newstripes, lru) {
1458                 spin_lock_irq(&conf->device_lock);
1459                 wait_event_lock_irq(conf->wait_for_stripe,
1460                                     !list_empty(&conf->inactive_list),
1461                                     conf->device_lock,
1462                                     );
1463                 osh = get_free_stripe(conf);
1464                 spin_unlock_irq(&conf->device_lock);
1465                 atomic_set(&nsh->count, 1);
1466                 for(i=0; i<conf->pool_size; i++)
1467                         nsh->dev[i].page = osh->dev[i].page;
1468                 for( ; i<newsize; i++)
1469                         nsh->dev[i].page = NULL;
1470                 kmem_cache_free(conf->slab_cache, osh);
1471         }
1472         kmem_cache_destroy(conf->slab_cache);
1473
1474         /* Step 3.
1475          * At this point, we are holding all the stripes so the array
1476          * is completely stalled, so now is a good time to resize
1477          * conf->disks and the scribble region
1478          */
1479         ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
1480         if (ndisks) {
1481                 for (i=0; i<conf->raid_disks; i++)
1482                         ndisks[i] = conf->disks[i];
1483                 kfree(conf->disks);
1484                 conf->disks = ndisks;
1485         } else
1486                 err = -ENOMEM;
1487
1488         get_online_cpus();
1489         conf->scribble_len = scribble_len(newsize);
1490         for_each_present_cpu(cpu) {
1491                 struct raid5_percpu *percpu;
1492                 void *scribble;
1493
1494                 percpu = per_cpu_ptr(conf->percpu, cpu);
1495                 scribble = kmalloc(conf->scribble_len, GFP_NOIO);
1496
1497                 if (scribble) {
1498                         kfree(percpu->scribble);
1499                         percpu->scribble = scribble;
1500                 } else {
1501                         err = -ENOMEM;
1502                         break;
1503                 }
1504         }
1505         put_online_cpus();
1506
1507         /* Step 4, return new stripes to service */
1508         while(!list_empty(&newstripes)) {
1509                 nsh = list_entry(newstripes.next, struct stripe_head, lru);
1510                 list_del_init(&nsh->lru);
1511
1512                 for (i=conf->raid_disks; i < newsize; i++)
1513                         if (nsh->dev[i].page == NULL) {
1514                                 struct page *p = alloc_page(GFP_NOIO);
1515                                 nsh->dev[i].page = p;
1516                                 if (!p)
1517                                         err = -ENOMEM;
1518                         }
1519                 release_stripe(nsh);
1520         }
1521         /* critical section pass, GFP_NOIO no longer needed */
1522
1523         conf->slab_cache = sc;
1524         conf->active_name = 1-conf->active_name;
1525         conf->pool_size = newsize;
1526         return err;
1527 }
1528
1529 static int drop_one_stripe(raid5_conf_t *conf)
1530 {
1531         struct stripe_head *sh;
1532
1533         spin_lock_irq(&conf->device_lock);
1534         sh = get_free_stripe(conf);
1535         spin_unlock_irq(&conf->device_lock);
1536         if (!sh)
1537                 return 0;
1538         BUG_ON(atomic_read(&sh->count));
1539         shrink_buffers(sh);
1540         kmem_cache_free(conf->slab_cache, sh);
1541         atomic_dec(&conf->active_stripes);
1542         return 1;
1543 }
1544
1545 static void shrink_stripes(raid5_conf_t *conf)
1546 {
1547         while (drop_one_stripe(conf))
1548                 ;
1549
1550         if (conf->slab_cache)
1551                 kmem_cache_destroy(conf->slab_cache);
1552         conf->slab_cache = NULL;
1553 }
1554
1555 static void raid5_end_read_request(struct bio * bi, int error)
1556 {
1557         struct stripe_head *sh = bi->bi_private;
1558         raid5_conf_t *conf = sh->raid_conf;
1559         int disks = sh->disks, i;
1560         int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
1561         char b[BDEVNAME_SIZE];
1562         mdk_rdev_t *rdev;
1563
1564
1565         for (i=0 ; i<disks; i++)
1566                 if (bi == &sh->dev[i].req)
1567                         break;
1568
1569         pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
1570                 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
1571                 uptodate);
1572         if (i == disks) {
1573                 BUG();
1574                 return;
1575         }
1576
1577         if (uptodate) {
1578                 set_bit(R5_UPTODATE, &sh->dev[i].flags);
1579                 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
1580                         rdev = conf->disks[i].rdev;
1581                         printk_ratelimited(
1582                                 KERN_INFO
1583                                 "md/raid:%s: read error corrected"
1584                                 " (%lu sectors at %llu on %s)\n",
1585                                 mdname(conf->mddev), STRIPE_SECTORS,
1586                                 (unsigned long long)(sh->sector
1587                                                      + rdev->data_offset),
1588                                 bdevname(rdev->bdev, b));
1589                         atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
1590                         clear_bit(R5_ReadError, &sh->dev[i].flags);
1591                         clear_bit(R5_ReWrite, &sh->dev[i].flags);
1592                 }
1593                 if (atomic_read(&conf->disks[i].rdev->read_errors))
1594                         atomic_set(&conf->disks[i].rdev->read_errors, 0);
1595         } else {
1596                 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
1597                 int retry = 0;
1598                 rdev = conf->disks[i].rdev;
1599
1600                 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
1601                 atomic_inc(&rdev->read_errors);
1602                 if (conf->mddev->degraded >= conf->max_degraded)
1603                         printk_ratelimited(
1604                                 KERN_WARNING
1605                                 "md/raid:%s: read error not correctable "
1606                                 "(sector %llu on %s).\n",
1607                                 mdname(conf->mddev),
1608                                 (unsigned long long)(sh->sector
1609                                                      + rdev->data_offset),
1610                                 bdn);
1611                 else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
1612                         /* Oh, no!!! */
1613                         printk_ratelimited(
1614                                 KERN_WARNING
1615                                 "md/raid:%s: read error NOT corrected!! "
1616                                 "(sector %llu on %s).\n",
1617                                 mdname(conf->mddev),
1618                                 (unsigned long long)(sh->sector
1619                                                      + rdev->data_offset),
1620                                 bdn);
1621                 else if (atomic_read(&rdev->read_errors)
1622                          > conf->max_nr_stripes)
1623                         printk(KERN_WARNING
1624                                "md/raid:%s: Too many read errors, failing device %s.\n",
1625                                mdname(conf->mddev), bdn);
1626                 else
1627                         retry = 1;
1628                 if (retry)
1629                         set_bit(R5_ReadError, &sh->dev[i].flags);
1630                 else {
1631                         clear_bit(R5_ReadError, &sh->dev[i].flags);
1632                         clear_bit(R5_ReWrite, &sh->dev[i].flags);
1633                         md_error(conf->mddev, rdev);
1634                 }
1635         }
1636         rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
1637         clear_bit(R5_LOCKED, &sh->dev[i].flags);
1638         set_bit(STRIPE_HANDLE, &sh->state);
1639         release_stripe(sh);
1640 }
1641
1642 static void raid5_end_write_request(struct bio *bi, int error)
1643 {
1644         struct stripe_head *sh = bi->bi_private;
1645         raid5_conf_t *conf = sh->raid_conf;
1646         int disks = sh->disks, i;
1647         int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
1648
1649         for (i=0 ; i<disks; i++)
1650                 if (bi == &sh->dev[i].req)
1651                         break;
1652
1653         pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
1654                 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
1655                 uptodate);
1656         if (i == disks) {
1657                 BUG();
1658                 return;
1659         }
1660
1661         if (!uptodate)
1662                 md_error(conf->mddev, conf->disks[i].rdev);
1663
1664         rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
1665         
1666         clear_bit(R5_LOCKED, &sh->dev[i].flags);
1667         set_bit(STRIPE_HANDLE, &sh->state);
1668         release_stripe(sh);
1669 }
1670
1671
1672 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
1673         
1674 static void raid5_build_block(struct stripe_head *sh, int i, int previous)
1675 {
1676         struct r5dev *dev = &sh->dev[i];
1677
1678         bio_init(&dev->req);
1679         dev->req.bi_io_vec = &dev->vec;
1680         dev->req.bi_vcnt++;
1681         dev->req.bi_max_vecs++;
1682         dev->vec.bv_page = dev->page;
1683         dev->vec.bv_len = STRIPE_SIZE;
1684         dev->vec.bv_offset = 0;
1685
1686         dev->req.bi_sector = sh->sector;
1687         dev->req.bi_private = sh;
1688
1689         dev->flags = 0;
1690         dev->sector = compute_blocknr(sh, i, previous);
1691 }
1692
1693 static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1694 {
1695         char b[BDEVNAME_SIZE];
1696         raid5_conf_t *conf = mddev->private;
1697         pr_debug("raid456: error called\n");
1698
1699         if (test_and_clear_bit(In_sync, &rdev->flags)) {
1700                 unsigned long flags;
1701                 spin_lock_irqsave(&conf->device_lock, flags);
1702                 mddev->degraded++;
1703                 spin_unlock_irqrestore(&conf->device_lock, flags);
1704                 /*
1705                  * if recovery was running, make sure it aborts.
1706                  */
1707                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1708         }
1709         set_bit(Blocked, &rdev->flags);
1710         set_bit(Faulty, &rdev->flags);
1711         set_bit(MD_CHANGE_DEVS, &mddev->flags);
1712         printk(KERN_ALERT
1713                "md/raid:%s: Disk failure on %s, disabling device.\n"
1714                "md/raid:%s: Operation continuing on %d devices.\n",
1715                mdname(mddev),
1716                bdevname(rdev->bdev, b),
1717                mdname(mddev),
1718                conf->raid_disks - mddev->degraded);
1719 }
1720
1721 /*
1722  * Input: a 'big' sector number,
1723  * Output: index of the data and parity disk, and the sector # in them.
1724  */
1725 static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1726                                      int previous, int *dd_idx,
1727                                      struct stripe_head *sh)
1728 {
1729         sector_t stripe, stripe2;
1730         sector_t chunk_number;
1731         unsigned int chunk_offset;
1732         int pd_idx, qd_idx;
1733         int ddf_layout = 0;
1734         sector_t new_sector;
1735         int algorithm = previous ? conf->prev_algo
1736                                  : conf->algorithm;
1737         int sectors_per_chunk = previous ? conf->prev_chunk_sectors
1738                                          : conf->chunk_sectors;
1739         int raid_disks = previous ? conf->previous_raid_disks
1740                                   : conf->raid_disks;
1741         int data_disks = raid_disks - conf->max_degraded;
1742
1743         /* First compute the information on this sector */
1744
1745         /*
1746          * Compute the chunk number and the sector offset inside the chunk
1747          */
1748         chunk_offset = sector_div(r_sector, sectors_per_chunk);
1749         chunk_number = r_sector;
1750
1751         /*
1752          * Compute the stripe number
1753          */
1754         stripe = chunk_number;
1755         *dd_idx = sector_div(stripe, data_disks);
1756         stripe2 = stripe;
1757         /*
1758          * Select the parity disk based on the user selected algorithm.
1759          */
1760         pd_idx = qd_idx = -1;
1761         switch(conf->level) {
1762         case 4:
1763                 pd_idx = data_disks;
1764                 break;
1765         case 5:
1766                 switch (algorithm) {
1767                 case ALGORITHM_LEFT_ASYMMETRIC:
1768                         pd_idx = data_disks - sector_div(stripe2, raid_disks);
1769                         if (*dd_idx >= pd_idx)
1770                                 (*dd_idx)++;
1771                         break;
1772                 case ALGORITHM_RIGHT_ASYMMETRIC:
1773                         pd_idx = sector_div(stripe2, raid_disks);
1774                         if (*dd_idx >= pd_idx)
1775                                 (*dd_idx)++;
1776                         break;
1777                 case ALGORITHM_LEFT_SYMMETRIC:
1778                         pd_idx = data_disks - sector_div(stripe2, raid_disks);
1779                         *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1780                         break;
1781                 case ALGORITHM_RIGHT_SYMMETRIC:
1782                         pd_idx = sector_div(stripe2, raid_disks);
1783                         *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1784                         break;
1785                 case ALGORITHM_PARITY_0:
1786                         pd_idx = 0;
1787                         (*dd_idx)++;
1788                         break;
1789                 case ALGORITHM_PARITY_N:
1790                         pd_idx = data_disks;
1791                         break;
1792                 default:
1793                         BUG();
1794                 }
1795                 break;
1796         case 6:
1797
1798                 switch (algorithm) {
1799                 case ALGORITHM_LEFT_ASYMMETRIC:
1800                         pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1801                         qd_idx = pd_idx + 1;
1802                         if (pd_idx == raid_disks-1) {
1803                                 (*dd_idx)++;    /* Q D D D P */
1804                                 qd_idx = 0;
1805                         } else if (*dd_idx >= pd_idx)
1806                                 (*dd_idx) += 2; /* D D P Q D */
1807                         break;
1808                 case ALGORITHM_RIGHT_ASYMMETRIC:
1809                         pd_idx = sector_div(stripe2, raid_disks);
1810                         qd_idx = pd_idx + 1;
1811                         if (pd_idx == raid_disks-1) {
1812                                 (*dd_idx)++;    /* Q D D D P */
1813                                 qd_idx = 0;
1814                         } else if (*dd_idx >= pd_idx)
1815                                 (*dd_idx) += 2; /* D D P Q D */
1816                         break;
1817                 case ALGORITHM_LEFT_SYMMETRIC:
1818                         pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1819                         qd_idx = (pd_idx + 1) % raid_disks;
1820                         *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
1821                         break;
1822                 case ALGORITHM_RIGHT_SYMMETRIC:
1823                         pd_idx = sector_div(stripe2, raid_disks);
1824                         qd_idx = (pd_idx + 1) % raid_disks;
1825                         *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
1826                         break;
1827
1828                 case ALGORITHM_PARITY_0:
1829                         pd_idx = 0;
1830                         qd_idx = 1;
1831                         (*dd_idx) += 2;
1832                         break;
1833                 case ALGORITHM_PARITY_N:
1834                         pd_idx = data_disks;
1835                         qd_idx = data_disks + 1;
1836                         break;
1837
1838                 case ALGORITHM_ROTATING_ZERO_RESTART:
1839                         /* Exactly the same as RIGHT_ASYMMETRIC, but or
1840                          * of blocks for computing Q is different.
1841                          */
1842                         pd_idx = sector_div(stripe2, raid_disks);
1843                         qd_idx = pd_idx + 1;
1844                         if (pd_idx == raid_disks-1) {
1845                                 (*dd_idx)++;    /* Q D D D P */
1846                                 qd_idx = 0;
1847                         } else if (*dd_idx >= pd_idx)
1848                                 (*dd_idx) += 2; /* D D P Q D */
1849                         ddf_layout = 1;
1850                         break;
1851
1852                 case ALGORITHM_ROTATING_N_RESTART:
1853                         /* Same a left_asymmetric, by first stripe is
1854                          * D D D P Q  rather than
1855                          * Q D D D P
1856                          */
1857                         stripe2 += 1;
1858                         pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1859                         qd_idx = pd_idx + 1;
1860                         if (pd_idx == raid_disks-1) {
1861                                 (*dd_idx)++;    /* Q D D D P */
1862                                 qd_idx = 0;
1863                         } else if (*dd_idx >= pd_idx)
1864                                 (*dd_idx) += 2; /* D D P Q D */
1865                         ddf_layout = 1;
1866                         break;
1867
1868                 case ALGORITHM_ROTATING_N_CONTINUE:
1869                         /* Same as left_symmetric but Q is before P */
1870                         pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1871                         qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
1872                         *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1873                         ddf_layout = 1;
1874                         break;
1875
1876                 case ALGORITHM_LEFT_ASYMMETRIC_6:
1877                         /* RAID5 left_asymmetric, with Q on last device */
1878                         pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
1879                         if (*dd_idx >= pd_idx)
1880                                 (*dd_idx)++;
1881                         qd_idx = raid_disks - 1;
1882                         break;
1883
1884                 case ALGORITHM_RIGHT_ASYMMETRIC_6:
1885                         pd_idx = sector_div(stripe2, raid_disks-1);
1886                         if (*dd_idx >= pd_idx)
1887                                 (*dd_idx)++;
1888                         qd_idx = raid_disks - 1;
1889                         break;
1890
1891                 case ALGORITHM_LEFT_SYMMETRIC_6:
1892                         pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
1893                         *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1894                         qd_idx = raid_disks - 1;
1895                         break;
1896
1897                 case ALGORITHM_RIGHT_SYMMETRIC_6:
1898                         pd_idx = sector_div(stripe2, raid_disks-1);
1899                         *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1900                         qd_idx = raid_disks - 1;
1901                         break;
1902
1903                 case ALGORITHM_PARITY_0_6:
1904                         pd_idx = 0;
1905                         (*dd_idx)++;
1906                         qd_idx = raid_disks - 1;
1907                         break;
1908
1909                 default:
1910                         BUG();
1911                 }
1912                 break;
1913         }
1914
1915         if (sh) {
1916                 sh->pd_idx = pd_idx;
1917                 sh->qd_idx = qd_idx;
1918                 sh->ddf_layout = ddf_layout;
1919         }
1920         /*
1921          * Finally, compute the new sector number
1922          */
1923         new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
1924         return new_sector;
1925 }
1926
1927
1928 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
1929 {
1930         raid5_conf_t *conf = sh->raid_conf;
1931         int raid_disks = sh->disks;
1932         int data_disks = raid_disks - conf->max_degraded;
1933         sector_t new_sector = sh->sector, check;
1934         int sectors_per_chunk = previous ? conf->prev_chunk_sectors
1935                                          : conf->chunk_sectors;
1936         int algorithm = previous ? conf->prev_algo
1937                                  : conf->algorithm;
1938         sector_t stripe;
1939         int chunk_offset;
1940         sector_t chunk_number;
1941         int dummy1, dd_idx = i;
1942         sector_t r_sector;
1943         struct stripe_head sh2;
1944
1945
1946         chunk_offset = sector_div(new_sector, sectors_per_chunk);
1947         stripe = new_sector;
1948
1949         if (i == sh->pd_idx)
1950                 return 0;
1951         switch(conf->level) {
1952         case 4: break;
1953         case 5:
1954                 switch (algorithm) {
1955                 case ALGORITHM_LEFT_ASYMMETRIC:
1956                 case ALGORITHM_RIGHT_ASYMMETRIC:
1957                         if (i > sh->pd_idx)
1958                                 i--;
1959                         break;
1960                 case ALGORITHM_LEFT_SYMMETRIC:
1961                 case ALGORITHM_RIGHT_SYMMETRIC:
1962                         if (i < sh->pd_idx)
1963                                 i += raid_disks;
1964                         i -= (sh->pd_idx + 1);
1965                         break;
1966                 case ALGORITHM_PARITY_0:
1967                         i -= 1;
1968                         break;
1969                 case ALGORITHM_PARITY_N:
1970                         break;
1971                 default:
1972                         BUG();
1973                 }
1974                 break;
1975         case 6:
1976                 if (i == sh->qd_idx)
1977                         return 0; /* It is the Q disk */
1978                 switch (algorithm) {
1979                 case ALGORITHM_LEFT_ASYMMETRIC:
1980                 case ALGORITHM_RIGHT_ASYMMETRIC:
1981                 case ALGORITHM_ROTATING_ZERO_RESTART:
1982                 case ALGORITHM_ROTATING_N_RESTART:
1983                         if (sh->pd_idx == raid_disks-1)
1984                                 i--;    /* Q D D D P */
1985                         else if (i > sh->pd_idx)
1986                                 i -= 2; /* D D P Q D */
1987                         break;
1988                 case ALGORITHM_LEFT_SYMMETRIC:
1989                 case ALGORITHM_RIGHT_SYMMETRIC:
1990                         if (sh->pd_idx == raid_disks-1)
1991                                 i--; /* Q D D D P */
1992                         else {
1993                                 /* D D P Q D */
1994                                 if (i < sh->pd_idx)
1995                                         i += raid_disks;
1996                                 i -= (sh->pd_idx + 2);
1997                         }
1998                         break;
1999                 case ALGORITHM_PARITY_0:
2000                         i -= 2;
2001                         break;
2002                 case ALGORITHM_PARITY_N:
2003                         break;
2004                 case ALGORITHM_ROTATING_N_CONTINUE:
2005                         /* Like left_symmetric, but P is before Q */
2006                         if (sh->pd_idx == 0)
2007                                 i--;    /* P D D D Q */
2008                         else {
2009                                 /* D D Q P D */
2010                                 if (i < sh->pd_idx)
2011                                         i += raid_disks;
2012                                 i -= (sh->pd_idx + 1);
2013                         }
2014                         break;
2015                 case ALGORITHM_LEFT_ASYMMETRIC_6:
2016                 case ALGORITHM_RIGHT_ASYMMETRIC_6:
2017                         if (i > sh->pd_idx)
2018                                 i--;
2019                         break;
2020                 case ALGORITHM_LEFT_SYMMETRIC_6:
2021                 case ALGORITHM_RIGHT_SYMMETRIC_6:
2022                         if (i < sh->pd_idx)
2023                                 i += data_disks + 1;
2024                         i -= (sh->pd_idx + 1);
2025                         break;
2026                 case ALGORITHM_PARITY_0_6:
2027                         i -= 1;
2028                         break;
2029                 default:
2030                         BUG();
2031                 }
2032                 break;
2033         }
2034
2035         chunk_number = stripe * data_disks + i;
2036         r_sector = chunk_number * sectors_per_chunk + chunk_offset;
2037
2038         check = raid5_compute_sector(conf, r_sector,
2039                                      previous, &dummy1, &sh2);
2040         if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
2041                 || sh2.qd_idx != sh->qd_idx) {
2042                 printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n",
2043                        mdname(conf->mddev));
2044                 return 0;
2045         }
2046         return r_sector;
2047 }
2048
2049
2050 static void
2051 schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
2052                          int rcw, int expand)
2053 {
2054         int i, pd_idx = sh->pd_idx, disks = sh->disks;
2055         raid5_conf_t *conf = sh->raid_conf;
2056         int level = conf->level;
2057
2058         if (rcw) {
2059                 /* if we are not expanding this is a proper write request, and
2060                  * there will be bios with new data to be drained into the
2061                  * stripe cache
2062                  */
2063                 if (!expand) {
2064                         sh->reconstruct_state = reconstruct_state_drain_run;
2065                         set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2066                 } else
2067                         sh->reconstruct_state = reconstruct_state_run;
2068
2069                 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
2070
2071                 for (i = disks; i--; ) {
2072                         struct r5dev *dev = &sh->dev[i];
2073
2074                         if (dev->towrite) {
2075                                 set_bit(R5_LOCKED, &dev->flags);
2076                                 set_bit(R5_Wantdrain, &dev->flags);
2077                                 if (!expand)
2078                                         clear_bit(R5_UPTODATE, &dev->flags);
2079                                 s->locked++;
2080                         }
2081                 }
2082                 if (s->locked + conf->max_degraded == disks)
2083                         if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
2084                                 atomic_inc(&conf->pending_full_writes);
2085         } else {
2086                 BUG_ON(level == 6);
2087                 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
2088                         test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
2089
2090                 sh->reconstruct_state = reconstruct_state_prexor_drain_run;
2091                 set_bit(STRIPE_OP_PREXOR, &s->ops_request);
2092                 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2093                 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
2094
2095                 for (i = disks; i--; ) {
2096                         struct r5dev *dev = &sh->dev[i];
2097                         if (i == pd_idx)
2098                                 continue;
2099
2100                         if (dev->towrite &&
2101                             (test_bit(R5_UPTODATE, &dev->flags) ||
2102                              test_bit(R5_Wantcompute, &dev->flags))) {
2103                                 set_bit(R5_Wantdrain, &dev->flags);
2104                                 set_bit(R5_LOCKED, &dev->flags);
2105                                 clear_bit(R5_UPTODATE, &dev->flags);
2106                                 s->locked++;
2107                         }
2108                 }
2109         }
2110
2111         /* keep the parity disk(s) locked while asynchronous operations
2112          * are in flight
2113          */
2114         set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
2115         clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
2116         s->locked++;
2117
2118         if (level == 6) {
2119                 int qd_idx = sh->qd_idx;
2120                 struct r5dev *dev = &sh->dev[qd_idx];
2121
2122                 set_bit(R5_LOCKED, &dev->flags);
2123                 clear_bit(R5_UPTODATE, &dev->flags);
2124                 s->locked++;
2125         }
2126
2127         pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
2128                 __func__, (unsigned long long)sh->sector,
2129                 s->locked, s->ops_request);
2130 }
2131
2132 /*
2133  * Each stripe/dev can have one or more bion attached.
2134  * toread/towrite point to the first in a chain.
2135  * The bi_next chain must be in order.
2136  */
2137 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
2138 {
2139         struct bio **bip;
2140         raid5_conf_t *conf = sh->raid_conf;
2141         int firstwrite=0;
2142
2143         pr_debug("adding bi b#%llu to stripe s#%llu\n",
2144                 (unsigned long long)bi->bi_sector,
2145                 (unsigned long long)sh->sector);
2146
2147
2148         spin_lock_irq(&conf->device_lock);
2149         if (forwrite) {
2150                 bip = &sh->dev[dd_idx].towrite;
2151                 if (*bip == NULL && sh->dev[dd_idx].written == NULL)
2152                         firstwrite = 1;
2153         } else
2154                 bip = &sh->dev[dd_idx].toread;
2155         while (*bip && (*bip)->bi_sector < bi->bi_sector) {
2156                 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
2157                         goto overlap;
2158                 bip = & (*bip)->bi_next;
2159         }
2160         if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
2161                 goto overlap;
2162
2163         BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
2164         if (*bip)
2165                 bi->bi_next = *bip;
2166         *bip = bi;
2167         bi->bi_phys_segments++;
2168
2169         if (forwrite) {
2170                 /* check if page is covered */
2171                 sector_t sector = sh->dev[dd_idx].sector;
2172                 for (bi=sh->dev[dd_idx].towrite;
2173                      sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
2174                              bi && bi->bi_sector <= sector;
2175                      bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
2176                         if (bi->bi_sector + (bi->bi_size>>9) >= sector)
2177                                 sector = bi->bi_sector + (bi->bi_size>>9);
2178                 }
2179                 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
2180                         set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
2181         }
2182         spin_unlock_irq(&conf->device_lock);
2183
2184         pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
2185                 (unsigned long long)(*bip)->bi_sector,
2186                 (unsigned long long)sh->sector, dd_idx);
2187
2188         if (conf->mddev->bitmap && firstwrite) {
2189                 bitmap_startwrite(conf->mddev->bitmap, sh->sector,
2190                                   STRIPE_SECTORS, 0);
2191                 sh->bm_seq = conf->seq_flush+1;
2192                 set_bit(STRIPE_BIT_DELAY, &sh->state);
2193         }
2194         return 1;
2195
2196  overlap:
2197         set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
2198         spin_unlock_irq(&conf->device_lock);
2199         return 0;
2200 }
2201
2202 static void end_reshape(raid5_conf_t *conf);
2203
2204 static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
2205                             struct stripe_head *sh)
2206 {
2207         int sectors_per_chunk =
2208                 previous ? conf->prev_chunk_sectors : conf->chunk_sectors;
2209         int dd_idx;
2210         int chunk_offset = sector_div(stripe, sectors_per_chunk);
2211         int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
2212
2213         raid5_compute_sector(conf,
2214                              stripe * (disks - conf->max_degraded)
2215                              *sectors_per_chunk + chunk_offset,
2216                              previous,
2217                              &dd_idx, sh);
2218 }
2219
2220 static void
2221 handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
2222                                 struct stripe_head_state *s, int disks,
2223                                 struct bio **return_bi)
2224 {
2225         int i;
2226         for (i = disks; i--; ) {
2227                 struct bio *bi;
2228                 int bitmap_end = 0;
2229
2230                 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
2231                         mdk_rdev_t *rdev;
2232                         rcu_read_lock();
2233                         rdev = rcu_dereference(conf->disks[i].rdev);
2234                         if (rdev && test_bit(In_sync, &rdev->flags))
2235                                 /* multiple read failures in one stripe */
2236                                 md_error(conf->mddev, rdev);
2237                         rcu_read_unlock();
2238                 }
2239                 spin_lock_irq(&conf->device_lock);
2240                 /* fail all writes first */
2241                 bi = sh->dev[i].towrite;
2242                 sh->dev[i].towrite = NULL;
2243                 if (bi) {
2244                         s->to_write--;
2245                         bitmap_end = 1;
2246                 }
2247
2248                 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2249                         wake_up(&conf->wait_for_overlap);
2250
2251                 while (bi && bi->bi_sector <
2252                         sh->dev[i].sector + STRIPE_SECTORS) {
2253                         struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
2254                         clear_bit(BIO_UPTODATE, &bi->bi_flags);
2255                         if (!raid5_dec_bi_phys_segments(bi)) {
2256                                 md_write_end(conf->mddev);
2257                                 bi->bi_next = *return_bi;
2258                                 *return_bi = bi;
2259                         }
2260                         bi = nextbi;
2261                 }
2262                 /* and fail all 'written' */
2263                 bi = sh->dev[i].written;
2264                 sh->dev[i].written = NULL;
2265                 if (bi) bitmap_end = 1;
2266                 while (bi && bi->bi_sector <
2267                        sh->dev[i].sector + STRIPE_SECTORS) {
2268                         struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
2269                         clear_bit(BIO_UPTODATE, &bi->bi_flags);
2270                         if (!raid5_dec_bi_phys_segments(bi)) {
2271                                 md_write_end(conf->mddev);
2272                                 bi->bi_next = *return_bi;
2273                                 *return_bi = bi;
2274                         }
2275                         bi = bi2;
2276                 }
2277
2278                 /* fail any reads if this device is non-operational and
2279                  * the data has not reached the cache yet.
2280                  */
2281                 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
2282                     (!test_bit(R5_Insync, &sh->dev[i].flags) ||
2283                       test_bit(R5_ReadError, &sh->dev[i].flags))) {
2284                         bi = sh->dev[i].toread;
2285                         sh->dev[i].toread = NULL;
2286                         if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2287                                 wake_up(&conf->wait_for_overlap);
2288                         if (bi) s->to_read--;
2289                         while (bi && bi->bi_sector <
2290                                sh->dev[i].sector + STRIPE_SECTORS) {
2291                                 struct bio *nextbi =
2292                                         r5_next_bio(bi, sh->dev[i].sector);
2293                                 clear_bit(BIO_UPTODATE, &bi->bi_flags);
2294                                 if (!raid5_dec_bi_phys_segments(bi)) {
2295                                         bi->bi_next = *return_bi;
2296                                         *return_bi = bi;
2297                                 }
2298                                 bi = nextbi;
2299                         }
2300                 }
2301                 spin_unlock_irq(&conf->device_lock);
2302                 if (bitmap_end)
2303                         bitmap_endwrite(conf->mddev->bitmap, sh->sector,
2304                                         STRIPE_SECTORS, 0, 0);
2305                 /* If we were in the middle of a write the parity block might
2306                  * still be locked - so just clear all R5_LOCKED flags
2307                  */
2308                 clear_bit(R5_LOCKED, &sh->dev[i].flags);
2309         }
2310
2311         if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2312                 if (atomic_dec_and_test(&conf->pending_full_writes))
2313                         md_wakeup_thread(conf->mddev->thread);
2314 }
2315
2316 /* fetch_block - checks the given member device to see if its data needs
2317  * to be read or computed to satisfy a request.
2318  *
2319  * Returns 1 when no more member devices need to be checked, otherwise returns
2320  * 0 to tell the loop in handle_stripe_fill to continue
2321  */
2322 static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
2323                        int disk_idx, int disks)
2324 {
2325         struct r5dev *dev = &sh->dev[disk_idx];
2326         struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]],
2327                                   &sh->dev[s->failed_num[1]] };
2328
2329         /* is the data in this block needed, and can we get it? */
2330         if (!test_bit(R5_LOCKED, &dev->flags) &&
2331             !test_bit(R5_UPTODATE, &dev->flags) &&
2332             (dev->toread ||
2333              (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
2334              s->syncing || s->expanding ||
2335              (s->failed >= 1 && fdev[0]->toread) ||
2336              (s->failed >= 2 && fdev[1]->toread) ||
2337              (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite &&
2338               !test_bit(R5_OVERWRITE, &fdev[0]->flags)) ||
2339              (sh->raid_conf->level == 6 && s->failed && s->to_write))) {
2340                 /* we would like to get this block, possibly by computing it,
2341                  * otherwise read it if the backing disk is insync
2342                  */
2343                 BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
2344                 BUG_ON(test_bit(R5_Wantread, &dev->flags));
2345                 if ((s->uptodate == disks - 1) &&
2346                     (s->failed && (disk_idx == s->failed_num[0] ||
2347                                    disk_idx == s->failed_num[1]))) {
2348                         /* have disk failed, and we're requested to fetch it;
2349                          * do compute it
2350                          */
2351                         pr_debug("Computing stripe %llu block %d\n",
2352                                (unsigned long long)sh->sector, disk_idx);
2353                         set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2354                         set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2355                         set_bit(R5_Wantcompute, &dev->flags);
2356                         sh->ops.target = disk_idx;
2357                         sh->ops.target2 = -1; /* no 2nd target */
2358                         s->req_compute = 1;
2359                         /* Careful: from this point on 'uptodate' is in the eye
2360                          * of raid_run_ops which services 'compute' operations
2361                          * before writes. R5_Wantcompute flags a block that will
2362                          * be R5_UPTODATE by the time it is needed for a
2363                          * subsequent operation.
2364                          */
2365                         s->uptodate++;
2366                         return 1;
2367                 } else if (s->uptodate == disks-2 && s->failed >= 2) {
2368                         /* Computing 2-failure is *very* expensive; only
2369                          * do it if failed >= 2
2370                          */
2371                         int other;
2372                         for (other = disks; other--; ) {
2373                                 if (other == disk_idx)
2374                                         continue;
2375                                 if (!test_bit(R5_UPTODATE,
2376                                       &sh->dev[other].flags))
2377                                         break;
2378                         }
2379                         BUG_ON(other < 0);
2380                         pr_debug("Computing stripe %llu blocks %d,%d\n",
2381                                (unsigned long long)sh->sector,
2382                                disk_idx, other);
2383                         set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2384                         set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2385                         set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags);
2386                         set_bit(R5_Wantcompute, &sh->dev[other].flags);
2387                         sh->ops.target = disk_idx;
2388                         sh->ops.target2 = other;
2389                         s->uptodate += 2;
2390                         s->req_compute = 1;
2391                         return 1;
2392                 } else if (test_bit(R5_Insync, &dev->flags)) {
2393                         set_bit(R5_LOCKED, &dev->flags);
2394                         set_bit(R5_Wantread, &dev->flags);
2395                         s->locked++;
2396                         pr_debug("Reading block %d (sync=%d)\n",
2397                                 disk_idx, s->syncing);
2398                 }
2399         }
2400
2401         return 0;
2402 }
2403
2404 /**
2405  * handle_stripe_fill - read or compute data to satisfy pending requests.
2406  */
2407 static void handle_stripe_fill(struct stripe_head *sh,
2408                                struct stripe_head_state *s,
2409                                int disks)
2410 {
2411         int i;
2412
2413         /* look for blocks to read/compute, skip this if a compute
2414          * is already in flight, or if the stripe contents are in the
2415          * midst of changing due to a write
2416          */
2417         if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
2418             !sh->reconstruct_state)
2419                 for (i = disks; i--; )
2420                         if (fetch_block(sh, s, i, disks))
2421                                 break;
2422         set_bit(STRIPE_HANDLE, &sh->state);
2423 }
2424
2425
2426 /* handle_stripe_clean_event
2427  * any written block on an uptodate or failed drive can be returned.
2428  * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
2429  * never LOCKED, so we don't need to test 'failed' directly.
2430  */
2431 static void handle_stripe_clean_event(raid5_conf_t *conf,
2432         struct stripe_head *sh, int disks, struct bio **return_bi)
2433 {
2434         int i;
2435         struct r5dev *dev;
2436
2437         for (i = disks; i--; )
2438                 if (sh->dev[i].written) {
2439                         dev = &sh->dev[i];
2440                         if (!test_bit(R5_LOCKED, &dev->flags) &&
2441                                 test_bit(R5_UPTODATE, &dev->flags)) {
2442                                 /* We can return any write requests */
2443                                 struct bio *wbi, *wbi2;
2444                                 int bitmap_end = 0;
2445                                 pr_debug("Return write for disc %d\n", i);
2446                                 spin_lock_irq(&conf->device_lock);
2447                                 wbi = dev->written;
2448                                 dev->written = NULL;
2449                                 while (wbi && wbi->bi_sector <
2450                                         dev->sector + STRIPE_SECTORS) {
2451                                         wbi2 = r5_next_bio(wbi, dev->sector);
2452                                         if (!raid5_dec_bi_phys_segments(wbi)) {
2453                                                 md_write_end(conf->mddev);
2454                                                 wbi->bi_next = *return_bi;
2455                                                 *return_bi = wbi;
2456                                         }
2457                                         wbi = wbi2;
2458                                 }
2459                                 if (dev->towrite == NULL)
2460                                         bitmap_end = 1;
2461                                 spin_unlock_irq(&conf->device_lock);
2462                                 if (bitmap_end)
2463                                         bitmap_endwrite(conf->mddev->bitmap,
2464                                                         sh->sector,
2465                                                         STRIPE_SECTORS,
2466                                          !test_bit(STRIPE_DEGRADED, &sh->state),
2467                                                         0);
2468                         }
2469                 }
2470
2471         if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2472                 if (atomic_dec_and_test(&conf->pending_full_writes))
2473                         md_wakeup_thread(conf->mddev->thread);
2474 }
2475
2476 static void handle_stripe_dirtying(raid5_conf_t *conf,
2477                                    struct stripe_head *sh,
2478                                    struct stripe_head_state *s,
2479                                    int disks)
2480 {
2481         int rmw = 0, rcw = 0, i;
2482         if (conf->max_degraded == 2) {
2483                 /* RAID6 requires 'rcw' in current implementation
2484                  * Calculate the real rcw later - for now fake it
2485                  * look like rcw is cheaper
2486                  */
2487                 rcw = 1; rmw = 2;
2488         } else for (i = disks; i--; ) {
2489                 /* would I have to read this buffer for read_modify_write */
2490                 struct r5dev *dev = &sh->dev[i];
2491                 if ((dev->towrite || i == sh->pd_idx) &&
2492                     !test_bit(R5_LOCKED, &dev->flags) &&
2493                     !(test_bit(R5_UPTODATE, &dev->flags) ||
2494                       test_bit(R5_Wantcompute, &dev->flags))) {
2495                         if (test_bit(R5_Insync, &dev->flags))
2496                                 rmw++;
2497                         else
2498                                 rmw += 2*disks;  /* cannot read it */
2499                 }
2500                 /* Would I have to read this buffer for reconstruct_write */
2501                 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
2502                     !test_bit(R5_LOCKED, &dev->flags) &&
2503                     !(test_bit(R5_UPTODATE, &dev->flags) ||
2504                     test_bit(R5_Wantcompute, &dev->flags))) {
2505                         if (test_bit(R5_Insync, &dev->flags)) rcw++;
2506                         else
2507                                 rcw += 2*disks;
2508                 }
2509         }
2510         pr_debug("for sector %llu, rmw=%d rcw=%d\n",
2511                 (unsigned long long)sh->sector, rmw, rcw);
2512         set_bit(STRIPE_HANDLE, &sh->state);
2513         if (rmw < rcw && rmw > 0)
2514                 /* prefer read-modify-write, but need to get some data */
2515                 for (i = disks; i--; ) {
2516                         struct r5dev *dev = &sh->dev[i];
2517                         if ((dev->towrite || i == sh->pd_idx) &&
2518                             !test_bit(R5_LOCKED, &dev->flags) &&
2519                             !(test_bit(R5_UPTODATE, &dev->flags) ||
2520                             test_bit(R5_Wantcompute, &dev->flags)) &&
2521                             test_bit(R5_Insync, &dev->flags)) {
2522                                 if (
2523                                   test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2524                                         pr_debug("Read_old block "
2525                                                 "%d for r-m-w\n", i);
2526                                         set_bit(R5_LOCKED, &dev->flags);
2527                                         set_bit(R5_Wantread, &dev->flags);
2528                                         s->locked++;
2529                                 } else {
2530                                         set_bit(STRIPE_DELAYED, &sh->state);
2531                                         set_bit(STRIPE_HANDLE, &sh->state);
2532                                 }
2533                         }
2534                 }
2535         if (rcw <= rmw && rcw > 0) {
2536                 /* want reconstruct write, but need to get some data */
2537                 rcw = 0;
2538                 for (i = disks; i--; ) {
2539                         struct r5dev *dev = &sh->dev[i];
2540                         if (!test_bit(R5_OVERWRITE, &dev->flags) &&
2541                             i != sh->pd_idx && i != sh->qd_idx &&
2542                             !test_bit(R5_LOCKED, &dev->flags) &&
2543                             !(test_bit(R5_UPTODATE, &dev->flags) ||
2544                               test_bit(R5_Wantcompute, &dev->flags))) {
2545                                 rcw++;
2546                                 if (!test_bit(R5_Insync, &dev->flags))
2547                                         continue; /* it's a failed drive */
2548                                 if (
2549                                   test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2550                                         pr_debug("Read_old block "
2551                                                 "%d for Reconstruct\n", i);
2552                                         set_bit(R5_LOCKED, &dev->flags);
2553                                         set_bit(R5_Wantread, &dev->flags);
2554                                         s->locked++;
2555                                 } else {
2556                                         set_bit(STRIPE_DELAYED, &sh->state);
2557                                         set_bit(STRIPE_HANDLE, &sh->state);
2558                                 }
2559                         }
2560                 }
2561         }
2562         /* now if nothing is locked, and if we have enough data,
2563          * we can start a write request
2564          */
2565         /* since handle_stripe can be called at any time we need to handle the
2566          * case where a compute block operation has been submitted and then a
2567          * subsequent call wants to start a write request.  raid_run_ops only
2568          * handles the case where compute block and reconstruct are requested
2569          * simultaneously.  If this is not the case then new writes need to be
2570          * held off until the compute completes.
2571          */
2572         if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
2573             (s->locked == 0 && (rcw == 0 || rmw == 0) &&
2574             !test_bit(STRIPE_BIT_DELAY, &sh->state)))
2575                 schedule_reconstruction(sh, s, rcw == 0, 0);
2576 }
2577
2578 static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
2579                                 struct stripe_head_state *s, int disks)
2580 {
2581         struct r5dev *dev = NULL;
2582
2583         set_bit(STRIPE_HANDLE, &sh->state);
2584
2585         switch (sh->check_state) {
2586         case check_state_idle:
2587                 /* start a new check operation if there are no failures */
2588                 if (s->failed == 0) {
2589                         BUG_ON(s->uptodate != disks);
2590                         sh->check_state = check_state_run;
2591                         set_bit(STRIPE_OP_CHECK, &s->ops_request);
2592                         clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
2593                         s->uptodate--;
2594                         break;
2595                 }
2596                 dev = &sh->dev[s->failed_num[0]];
2597                 /* fall through */
2598         case check_state_compute_result:
2599                 sh->check_state = check_state_idle;
2600                 if (!dev)
2601                         dev = &sh->dev[sh->pd_idx];
2602
2603                 /* check that a write has not made the stripe insync */
2604                 if (test_bit(STRIPE_INSYNC, &sh->state))
2605                         break;
2606
2607                 /* either failed parity check, or recovery is happening */
2608                 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
2609                 BUG_ON(s->uptodate != disks);
2610
2611                 set_bit(R5_LOCKED, &dev->flags);
2612                 s->locked++;
2613                 set_bit(R5_Wantwrite, &dev->flags);
2614
2615                 clear_bit(STRIPE_DEGRADED, &sh->state);
2616                 set_bit(STRIPE_INSYNC, &sh->state);
2617                 break;
2618         case check_state_run:
2619                 break; /* we will be called again upon completion */
2620         case check_state_check_result:
2621                 sh->check_state = check_state_idle;
2622
2623                 /* if a failure occurred during the check operation, leave
2624                  * STRIPE_INSYNC not set and let the stripe be handled again
2625                  */
2626                 if (s->failed)
2627                         break;
2628
2629                 /* handle a successful check operation, if parity is correct
2630                  * we are done.  Otherwise update the mismatch count and repair
2631                  * parity if !MD_RECOVERY_CHECK
2632                  */
2633                 if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0)
2634                         /* parity is correct (on disc,
2635                          * not in buffer any more)
2636                          */
2637                         set_bit(STRIPE_INSYNC, &sh->state);
2638                 else {
2639                         conf->mddev->resync_mismatches += STRIPE_SECTORS;
2640                         if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
2641                                 /* don't try to repair!! */
2642                                 set_bit(STRIPE_INSYNC, &sh->state);
2643                         else {
2644                                 sh->check_state = check_state_compute_run;
2645                                 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2646                                 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2647                                 set_bit(R5_Wantcompute,
2648                                         &sh->dev[sh->pd_idx].flags);
2649                                 sh->ops.target = sh->pd_idx;
2650                                 sh->ops.target2 = -1;
2651                                 s->uptodate++;
2652                         }
2653                 }
2654                 break;
2655         case check_state_compute_run:
2656                 break;
2657         default:
2658                 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
2659                        __func__, sh->check_state,
2660                        (unsigned long long) sh->sector);
2661                 BUG();
2662         }
2663 }
2664
2665
2666 static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh,
2667                                   struct stripe_head_state *s,
2668                                   int disks)
2669 {
2670         int pd_idx = sh->pd_idx;
2671         int qd_idx = sh->qd_idx;
2672         struct r5dev *dev;
2673
2674         set_bit(STRIPE_HANDLE, &sh->state);
2675
2676         BUG_ON(s->failed > 2);
2677
2678         /* Want to check and possibly repair P and Q.
2679          * However there could be one 'failed' device, in which
2680          * case we can only check one of them, possibly using the
2681          * other to generate missing data
2682          */
2683
2684         switch (sh->check_state) {
2685         case check_state_idle:
2686                 /* start a new check operation if there are < 2 failures */
2687                 if (s->failed == s->q_failed) {
2688                         /* The only possible failed device holds Q, so it
2689                          * makes sense to check P (If anything else were failed,
2690                          * we would have used P to recreate it).
2691                          */
2692                         sh->check_state = check_state_run;
2693                 }
2694                 if (!s->q_failed && s->failed < 2) {
2695                         /* Q is not failed, and we didn't use it to generate
2696                          * anything, so it makes sense to check it
2697                          */
2698                         if (sh->check_state == check_state_run)
2699                                 sh->check_state = check_state_run_pq;
2700                         else
2701                                 sh->check_state = check_state_run_q;
2702                 }
2703
2704                 /* discard potentially stale zero_sum_result */
2705                 sh->ops.zero_sum_result = 0;
2706
2707                 if (sh->check_state == check_state_run) {
2708                         /* async_xor_zero_sum destroys the contents of P */
2709                         clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
2710                         s->uptodate--;
2711                 }
2712                 if (sh->check_state >= check_state_run &&
2713                     sh->check_state <= check_state_run_pq) {
2714                         /* async_syndrome_zero_sum preserves P and Q, so
2715                          * no need to mark them !uptodate here
2716                          */
2717                         set_bit(STRIPE_OP_CHECK, &s->ops_request);
2718                         break;
2719                 }
2720
2721                 /* we have 2-disk failure */
2722                 BUG_ON(s->failed != 2);
2723                 /* fall through */
2724         case check_state_compute_result:
2725                 sh->check_state = check_state_idle;
2726
2727                 /* check that a write has not made the stripe insync */
2728                 if (test_bit(STRIPE_INSYNC, &sh->state))
2729                         break;
2730
2731                 /* now write out any block on a failed drive,
2732                  * or P or Q if they were recomputed
2733                  */
2734                 BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */
2735                 if (s->failed == 2) {
2736                         dev = &sh->dev[s->failed_num[1]];
2737                         s->locked++;
2738                         set_bit(R5_LOCKED, &dev->flags);
2739                         set_bit(R5_Wantwrite, &dev->flags);
2740                 }
2741                 if (s->failed >= 1) {
2742                         dev = &sh->dev[s->failed_num[0]];
2743                         s->locked++;
2744                         set_bit(R5_LOCKED, &dev->flags);
2745                         set_bit(R5_Wantwrite, &dev->flags);
2746                 }
2747                 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
2748                         dev = &sh->dev[pd_idx];
2749                         s->locked++;
2750                         set_bit(R5_LOCKED, &dev->flags);
2751                         set_bit(R5_Wantwrite, &dev->flags);
2752                 }
2753                 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
2754                         dev = &sh->dev[qd_idx];
2755                         s->locked++;
2756                         set_bit(R5_LOCKED, &dev->flags);
2757                         set_bit(R5_Wantwrite, &dev->flags);
2758                 }
2759                 clear_bit(STRIPE_DEGRADED, &sh->state);
2760
2761                 set_bit(STRIPE_INSYNC, &sh->state);
2762                 break;
2763         case check_state_run:
2764         case check_state_run_q:
2765         case check_state_run_pq:
2766                 break; /* we will be called again upon completion */
2767         case check_state_check_result:
2768                 sh->check_state = check_state_idle;
2769
2770                 /* handle a successful check operation, if parity is correct
2771                  * we are done.  Otherwise update the mismatch count and repair
2772                  * parity if !MD_RECOVERY_CHECK
2773                  */
2774                 if (sh->ops.zero_sum_result == 0) {
2775                         /* both parities are correct */
2776                         if (!s->failed)
2777                                 set_bit(STRIPE_INSYNC, &sh->state);
2778                         else {
2779                                 /* in contrast to the raid5 case we can validate
2780                                  * parity, but still have a failure to write
2781                                  * back
2782                                  */
2783                                 sh->check_state = check_state_compute_result;
2784                                 /* Returning at this point means that we may go
2785                                  * off and bring p and/or q uptodate again so
2786                                  * we make sure to check zero_sum_result again
2787                                  * to verify if p or q need writeback
2788                                  */
2789                         }
2790                 } else {
2791                         conf->mddev->resync_mismatches += STRIPE_SECTORS;
2792                         if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
2793                                 /* don't try to repair!! */
2794                                 set_bit(STRIPE_INSYNC, &sh->state);
2795                         else {
2796                                 int *target = &sh->ops.target;
2797
2798                                 sh->ops.target = -1;
2799                                 sh->ops.target2 = -1;
2800                                 sh->check_state = check_state_compute_run;
2801                                 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2802                                 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2803                                 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
2804                                         set_bit(R5_Wantcompute,
2805                                                 &sh->dev[pd_idx].flags);
2806                                         *target = pd_idx;
2807                                         target = &sh->ops.target2;
2808                                         s->uptodate++;
2809                                 }
2810                                 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
2811                                         set_bit(R5_Wantcompute,
2812                                                 &sh->dev[qd_idx].flags);
2813                                         *target = qd_idx;
2814                                         s->uptodate++;
2815                                 }
2816                         }
2817                 }
2818                 break;
2819         case check_state_compute_run:
2820                 break;
2821         default:
2822                 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
2823                        __func__, sh->check_state,
2824                        (unsigned long long) sh->sector);
2825                 BUG();
2826         }
2827 }
2828
2829 static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh)
2830 {
2831         int i;
2832
2833         /* We have read all the blocks in this stripe and now we need to
2834          * copy some of them into a target stripe for expand.
2835          */
2836         struct dma_async_tx_descriptor *tx = NULL;
2837         clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
2838         for (i = 0; i < sh->disks; i++)
2839                 if (i != sh->pd_idx && i != sh->qd_idx) {
2840                         int dd_idx, j;
2841                         struct stripe_head *sh2;
2842                         struct async_submit_ctl submit;
2843
2844                         sector_t bn = compute_blocknr(sh, i, 1);
2845                         sector_t s = raid5_compute_sector(conf, bn, 0,
2846                                                           &dd_idx, NULL);
2847                         sh2 = get_active_stripe(conf, s, 0, 1, 1);
2848                         if (sh2 == NULL)
2849                                 /* so far only the early blocks of this stripe
2850                                  * have been requested.  When later blocks
2851                                  * get requested, we will try again
2852                                  */
2853                                 continue;
2854                         if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
2855                            test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
2856                                 /* must have already done this block */
2857                                 release_stripe(sh2);
2858                                 continue;
2859                         }
2860
2861                         /* place all the copies on one channel */
2862                         init_async_submit(&submit, 0, tx, NULL, NULL, NULL);
2863                         tx = async_memcpy(sh2->dev[dd_idx].page,
2864                                           sh->dev[i].page, 0, 0, STRIPE_SIZE,
2865                                           &submit);
2866
2867                         set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
2868                         set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
2869                         for (j = 0; j < conf->raid_disks; j++)
2870                                 if (j != sh2->pd_idx &&
2871                                     j != sh2->qd_idx &&
2872                                     !test_bit(R5_Expanded, &sh2->dev[j].flags))
2873                                         break;
2874                         if (j == conf->raid_disks) {
2875                                 set_bit(STRIPE_EXPAND_READY, &sh2->state);
2876                                 set_bit(STRIPE_HANDLE, &sh2->state);
2877                         }
2878                         release_stripe(sh2);
2879
2880                 }
2881         /* done submitting copies, wait for them to complete */
2882         if (tx) {
2883                 async_tx_ack(tx);
2884                 dma_wait_for_async_tx(tx);
2885         }
2886 }
2887
2888
2889 /*
2890  * handle_stripe - do things to a stripe.
2891  *
2892  * We lock the stripe and then examine the state of various bits
2893  * to see what needs to be done.
2894  * Possible results:
2895  *    return some read request which now have data
2896  *    return some write requests which are safely on disc
2897  *    schedule a read on some buffers
2898  *    schedule a write of some buffers
2899  *    return confirmation of parity correctness
2900  *
2901  * buffers are taken off read_list or write_list, and bh_cache buffers
2902  * get BH_Lock set before the stripe lock is released.
2903  *
2904  */
2905
2906 static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
2907 {
2908         raid5_conf_t *conf = sh->raid_conf;
2909         int disks = sh->disks;
2910         struct r5dev *dev;
2911         int i;
2912
2913         memset(s, 0, sizeof(*s));
2914
2915         s->syncing = test_bit(STRIPE_SYNCING, &sh->state);
2916         s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
2917         s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
2918         s->failed_num[0] = -1;
2919         s->failed_num[1] = -1;
2920
2921         /* Now to look around and see what can be done */
2922         rcu_read_lock();
2923         spin_lock_irq(&conf->device_lock);
2924         for (i=disks; i--; ) {
2925                 mdk_rdev_t *rdev;
2926
2927                 dev = &sh->dev[i];
2928
2929                 pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
2930                         i, dev->flags, dev->toread, dev->towrite, dev->written);
2931                 /* maybe we can reply to a read
2932                  *
2933                  * new wantfill requests are only permitted while
2934                  * ops_complete_biofill is guaranteed to be inactive
2935                  */
2936                 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
2937                     !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
2938                         set_bit(R5_Wantfill, &dev->flags);
2939
2940                 /* now count some things */
2941                 if (test_bit(R5_LOCKED, &dev->flags))
2942                         s->locked++;
2943                 if (test_bit(R5_UPTODATE, &dev->flags))
2944                         s->uptodate++;
2945                 if (test_bit(R5_Wantcompute, &dev->flags)) {
2946                         s->compute++;
2947                         BUG_ON(s->compute > 2);
2948                 }
2949
2950                 if (test_bit(R5_Wantfill, &dev->flags))
2951                         s->to_fill++;
2952                 else if (dev->toread)
2953                         s->to_read++;
2954                 if (dev->towrite) {
2955                         s->to_write++;
2956                         if (!test_bit(R5_OVERWRITE, &dev->flags))
2957                                 s->non_overwrite++;
2958                 }
2959                 if (dev->written)
2960                         s->written++;
2961                 rdev = rcu_dereference(conf->disks[i].rdev);
2962                 if (s->blocked_rdev == NULL &&
2963                     rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
2964                         s->blocked_rdev = rdev;
2965                         atomic_inc(&rdev->nr_pending);
2966                 }
2967                 clear_bit(R5_Insync, &dev->flags);
2968                 if (!rdev)
2969                         /* Not in-sync */;
2970                 else if (test_bit(In_sync, &rdev->flags))
2971                         set_bit(R5_Insync, &dev->flags);
2972                 else {
2973                         /* in sync if before recovery_offset */
2974                         if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
2975                                 set_bit(R5_Insync, &dev->flags);
2976                 }
2977                 if (!test_bit(R5_Insync, &dev->flags)) {
2978                         /* The ReadError flag will just be confusing now */
2979                         clear_bit(R5_ReadError, &dev->flags);
2980                         clear_bit(R5_ReWrite, &dev->flags);
2981                 }
2982                 if (test_bit(R5_ReadError, &dev->flags))
2983                         clear_bit(R5_Insync, &dev->flags);
2984                 if (!test_bit(R5_Insync, &dev->flags)) {
2985                         if (s->failed < 2)
2986                                 s->failed_num[s->failed] = i;
2987                         s->failed++;
2988                 }
2989         }
2990         spin_unlock_irq(&conf->device_lock);
2991         rcu_read_unlock();
2992 }
2993
2994 static void handle_stripe(struct stripe_head *sh)
2995 {
2996         struct stripe_head_state s;
2997         raid5_conf_t *conf = sh->raid_conf;
2998         int i;
2999         int prexor;
3000         int disks = sh->disks;
3001         struct r5dev *pdev, *qdev;
3002
3003         clear_bit(STRIPE_HANDLE, &sh->state);
3004         if (test_and_set_bit(STRIPE_ACTIVE, &sh->state)) {
3005                 /* already being handled, ensure it gets handled
3006                  * again when current action finishes */
3007                 set_bit(STRIPE_HANDLE, &sh->state);
3008                 return;
3009         }
3010
3011         if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
3012                 set_bit(STRIPE_SYNCING, &sh->state);
3013                 clear_bit(STRIPE_INSYNC, &sh->state);
3014         }
3015         clear_bit(STRIPE_DELAYED, &sh->state);
3016
3017         pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
3018                 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
3019                (unsigned long long)sh->sector, sh->state,
3020                atomic_read(&sh->count), sh->pd_idx, sh->qd_idx,
3021                sh->check_state, sh->reconstruct_state);
3022
3023         analyse_stripe(sh, &s);
3024
3025         if (unlikely(s.blocked_rdev)) {
3026                 if (s.syncing || s.expanding || s.expanded ||
3027                     s.to_write || s.written) {
3028                         set_bit(STRIPE_HANDLE, &sh->state);
3029                         goto finish;
3030                 }
3031                 /* There is nothing for the blocked_rdev to block */
3032                 rdev_dec_pending(s.blocked_rdev, conf->mddev);
3033                 s.blocked_rdev = NULL;
3034         }
3035
3036         if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
3037                 set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
3038                 set_bit(STRIPE_BIOFILL_RUN, &sh->state);
3039         }
3040
3041         pr_debug("locked=%d uptodate=%d to_read=%d"
3042                " to_write=%d failed=%d failed_num=%d,%d\n",
3043                s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
3044                s.failed_num[0], s.failed_num[1]);
3045         /* check if the array has lost more than max_degraded devices and,
3046          * if so, some requests might need to be failed.
3047          */
3048         if (s.failed > conf->max_degraded && s.to_read+s.to_write+s.written)
3049                 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
3050         if (s.failed > conf->max_degraded && s.syncing) {
3051                 md_done_sync(conf->mddev, STRIPE_SECTORS, 0);
3052                 clear_bit(STRIPE_SYNCING, &sh->state);
3053                 s.syncing = 0;
3054         }
3055
3056         /*
3057          * might be able to return some write requests if the parity blocks
3058          * are safe, or on a failed drive
3059          */
3060         pdev = &sh->dev[sh->pd_idx];
3061         s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx)
3062                 || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx);
3063         qdev = &sh->dev[sh->qd_idx];
3064         s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx)
3065                 || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx)
3066                 || conf->level < 6;
3067
3068         if (s.written &&
3069             (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
3070                              && !test_bit(R5_LOCKED, &pdev->flags)
3071                              && test_bit(R5_UPTODATE, &pdev->flags)))) &&
3072             (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
3073                              && !test_bit(R5_LOCKED, &qdev->flags)
3074                              && test_bit(R5_UPTODATE, &qdev->flags)))))
3075                 handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
3076
3077         /* Now we might consider reading some blocks, either to check/generate
3078          * parity, or to satisfy requests
3079          * or to load a block that is being partially written.
3080          */
3081         if (s.to_read || s.non_overwrite
3082             || (conf->level == 6 && s.to_write && s.failed)
3083             || (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
3084                 handle_stripe_fill(sh, &s, disks);
3085
3086         /* Now we check to see if any write operations have recently
3087          * completed
3088          */
3089         prexor = 0;
3090         if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
3091                 prexor = 1;
3092         if (sh->reconstruct_state == reconstruct_state_drain_result ||
3093             sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
3094                 sh->reconstruct_state = reconstruct_state_idle;
3095
3096                 /* All the 'written' buffers and the parity block are ready to
3097                  * be written back to disk
3098                  */
3099                 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
3100                 BUG_ON(sh->qd_idx >= 0 &&
3101                        !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags));
3102                 for (i = disks; i--; ) {
3103                         struct r5dev *dev = &sh->dev[i];
3104                         if (test_bit(R5_LOCKED, &dev->flags) &&
3105                                 (i == sh->pd_idx || i == sh->qd_idx ||
3106                                  dev->written)) {
3107                                 pr_debug("Writing block %d\n", i);
3108                                 set_bit(R5_Wantwrite, &dev->flags);
3109                                 if (prexor)
3110                                         continue;
3111                                 if (!test_bit(R5_Insync, &dev->flags) ||
3112                                     ((i == sh->pd_idx || i == sh->qd_idx)  &&
3113                                      s.failed == 0))
3114                                         set_bit(STRIPE_INSYNC, &sh->state);
3115                         }
3116                 }
3117                 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3118                         s.dec_preread_active = 1;
3119         }
3120
3121         /* Now to consider new write requests and what else, if anything
3122          * should be read.  We do not handle new writes when:
3123          * 1/ A 'write' operation (copy+xor) is already in flight.
3124          * 2/ A 'check' operation is in flight, as it may clobber the parity
3125          *    block.
3126          */
3127         if (s.to_write && !sh->reconstruct_state && !sh->check_state)
3128                 handle_stripe_dirtying(conf, sh, &s, disks);
3129
3130         /* maybe we need to check and possibly fix the parity for this stripe
3131          * Any reads will already have been scheduled, so we just see if enough
3132          * data is available.  The parity check is held off while parity
3133          * dependent operations are in flight.
3134          */
3135         if (sh->check_state ||
3136             (s.syncing && s.locked == 0 &&
3137              !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
3138              !test_bit(STRIPE_INSYNC, &sh->state))) {
3139                 if (conf->level == 6)
3140                         handle_parity_checks6(conf, sh, &s, disks);
3141                 else
3142                         handle_parity_checks5(conf, sh, &s, disks);
3143         }
3144
3145         if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
3146                 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3147                 clear_bit(STRIPE_SYNCING, &sh->state);
3148         }
3149
3150         /* If the failed drives are just a ReadError, then we might need
3151          * to progress the repair/check process
3152          */
3153         if (s.failed <= conf->max_degraded && !conf->mddev->ro)
3154                 for (i = 0; i < s.failed; i++) {
3155                         struct r5dev *dev = &sh->dev[s.failed_num[i]];
3156                         if (test_bit(R5_ReadError, &dev->flags)
3157                             && !test_bit(R5_LOCKED, &dev->flags)
3158                             && test_bit(R5_UPTODATE, &dev->flags)
3159                                 ) {
3160                                 if (!test_bit(R5_ReWrite, &dev->flags)) {
3161                                         set_bit(R5_Wantwrite, &dev->flags);
3162                                         set_bit(R5_ReWrite, &dev->flags);
3163                                         set_bit(R5_LOCKED, &dev->flags);
3164                                         s.locked++;
3165                                 } else {
3166                                         /* let's read it back */
3167                                         set_bit(R5_Wantread, &dev->flags);
3168                                         set_bit(R5_LOCKED, &dev->flags);
3169                                         s.locked++;
3170                                 }
3171                         }
3172                 }
3173
3174
3175         /* Finish reconstruct operations initiated by the expansion process */
3176         if (sh->reconstruct_state == reconstruct_state_result) {
3177                 struct stripe_head *sh_src
3178                         = get_active_stripe(conf, sh->sector, 1, 1, 1);
3179                 if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) {
3180                         /* sh cannot be written until sh_src has been read.
3181                          * so arrange for sh to be delayed a little
3182                          */
3183                         set_bit(STRIPE_DELAYED, &sh->state);
3184                         set_bit(STRIPE_HANDLE, &sh->state);
3185                         if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
3186                                               &sh_src->state))
3187                                 atomic_inc(&conf->preread_active_stripes);
3188                         release_stripe(sh_src);
3189                         goto finish;
3190                 }
3191                 if (sh_src)
3192                         release_stripe(sh_src);
3193
3194                 sh->reconstruct_state = reconstruct_state_idle;
3195                 clear_bit(STRIPE_EXPANDING, &sh->state);
3196                 for (i = conf->raid_disks; i--; ) {
3197                         set_bit(R5_Wantwrite, &sh->dev[i].flags);
3198                         set_bit(R5_LOCKED, &sh->dev[i].flags);
3199                         s.locked++;
3200                 }
3201         }
3202
3203         if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
3204             !sh->reconstruct_state) {
3205                 /* Need to write out all blocks after computing parity */
3206                 sh->disks = conf->raid_disks;
3207                 stripe_set_idx(sh->sector, conf, 0, sh);
3208                 schedule_reconstruction(sh, &s, 1, 1);
3209         } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
3210                 clear_bit(STRIPE_EXPAND_READY, &sh->state);
3211                 atomic_dec(&conf->reshape_stripes);
3212                 wake_up(&conf->wait_for_overlap);
3213                 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3214         }
3215
3216         if (s.expanding && s.locked == 0 &&
3217             !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
3218                 handle_stripe_expansion(conf, sh);
3219
3220 finish:
3221         /* wait for this device to become unblocked */
3222         if (unlikely(s.blocked_rdev))
3223                 md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev);
3224
3225         if (s.ops_request)
3226                 raid_run_ops(sh, s.ops_request);
3227
3228         ops_run_io(sh, &s);
3229
3230
3231         if (s.dec_preread_active) {
3232                 /* We delay this until after ops_run_io so that if make_request
3233                  * is waiting on a flush, it won't continue until the writes
3234                  * have actually been submitted.
3235                  */
3236                 atomic_dec(&conf->preread_active_stripes);
3237                 if (atomic_read(&conf->preread_active_stripes) <
3238                     IO_THRESHOLD)
3239                         md_wakeup_thread(conf->mddev->thread);
3240         }
3241
3242         return_io(s.return_bi);
3243
3244         clear_bit(STRIPE_ACTIVE, &sh->state);
3245 }
3246
3247 static void raid5_activate_delayed(raid5_conf_t *conf)
3248 {
3249         if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
3250                 while (!list_empty(&conf->delayed_list)) {
3251                         struct list_head *l = conf->delayed_list.next;
3252                         struct stripe_head *sh;
3253                         sh = list_entry(l, struct stripe_head, lru);
3254                         list_del_init(l);
3255                         clear_bit(STRIPE_DELAYED, &sh->state);
3256                         if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3257                                 atomic_inc(&conf->preread_active_stripes);
3258                         list_add_tail(&sh->lru, &conf->hold_list);
3259                 }
3260         }
3261 }
3262
3263 static void activate_bit_delay(raid5_conf_t *conf)
3264 {
3265         /* device_lock is held */
3266         struct list_head head;
3267         list_add(&head, &conf->bitmap_list);
3268         list_del_init(&conf->bitmap_list);
3269         while (!list_empty(&head)) {
3270                 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
3271                 list_del_init(&sh->lru);
3272                 atomic_inc(&sh->count);
3273                 __release_stripe(conf, sh);
3274         }
3275 }
3276
3277 int md_raid5_congested(mddev_t *mddev, int bits)
3278 {
3279         raid5_conf_t *conf = mddev->private;
3280
3281         /* No difference between reads and writes.  Just check
3282          * how busy the stripe_cache is
3283          */
3284
3285         if (conf->inactive_blocked)
3286                 return 1;
3287         if (conf->quiesce)
3288                 return 1;
3289         if (list_empty_careful(&conf->inactive_list))
3290                 return 1;
3291
3292         return 0;
3293 }
3294 EXPORT_SYMBOL_GPL(md_raid5_congested);
3295
3296 static int raid5_congested(void *data, int bits)
3297 {
3298         mddev_t *mddev = data;
3299
3300         return mddev_congested(mddev, bits) ||
3301                 md_raid5_congested(mddev, bits);
3302 }
3303
3304 /* We want read requests to align with chunks where possible,
3305  * but write requests don't need to.
3306  */
3307 static int raid5_mergeable_bvec(struct request_queue *q,
3308                                 struct bvec_merge_data *bvm,
3309                                 struct bio_vec *biovec)
3310 {
3311         mddev_t *mddev = q->queuedata;
3312         sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
3313         int max;
3314         unsigned int chunk_sectors = mddev->chunk_sectors;
3315         unsigned int bio_sectors = bvm->bi_size >> 9;
3316
3317         if ((bvm->bi_rw & 1) == WRITE)
3318                 return biovec->bv_len; /* always allow writes to be mergeable */
3319
3320         if (mddev->new_chunk_sectors < mddev->chunk_sectors)
3321                 chunk_sectors = mddev->new_chunk_sectors;
3322         max =  (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
3323         if (max < 0) max = 0;
3324         if (max <= biovec->bv_len && bio_sectors == 0)
3325                 return biovec->bv_len;
3326         else
3327                 return max;
3328 }
3329
3330
3331 static int in_chunk_boundary(mddev_t *mddev, struct bio *bio)
3332 {
3333         sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
3334         unsigned int chunk_sectors = mddev->chunk_sectors;
3335         unsigned int bio_sectors = bio->bi_size >> 9;
3336
3337         if (mddev->new_chunk_sectors < mddev->chunk_sectors)
3338                 chunk_sectors = mddev->new_chunk_sectors;
3339         return  chunk_sectors >=
3340                 ((sector & (chunk_sectors - 1)) + bio_sectors);
3341 }
3342
3343 /*
3344  *  add bio to the retry LIFO  ( in O(1) ... we are in interrupt )
3345  *  later sampled by raid5d.
3346  */
3347 static void add_bio_to_retry(struct bio *bi,raid5_conf_t *conf)
3348 {
3349         unsigned long flags;
3350
3351         spin_lock_irqsave(&conf->device_lock, flags);
3352
3353         bi->bi_next = conf->retry_read_aligned_list;
3354         conf->retry_read_aligned_list = bi;
3355
3356         spin_unlock_irqrestore(&conf->device_lock, flags);
3357         md_wakeup_thread(conf->mddev->thread);
3358 }
3359
3360
3361 static struct bio *remove_bio_from_retry(raid5_conf_t *conf)
3362 {
3363         struct bio *bi;
3364
3365         bi = conf->retry_read_aligned;
3366         if (bi) {
3367                 conf->retry_read_aligned = NULL;
3368                 return bi;
3369         }
3370         bi = conf->retry_read_aligned_list;
3371         if(bi) {
3372                 conf->retry_read_aligned_list = bi->bi_next;
3373                 bi->bi_next = NULL;
3374                 /*
3375                  * this sets the active strip count to 1 and the processed
3376                  * strip count to zero (upper 8 bits)
3377                  */
3378                 bi->bi_phys_segments = 1; /* biased count of active stripes */
3379         }
3380
3381         return bi;
3382 }
3383
3384
3385 /*
3386  *  The "raid5_align_endio" should check if the read succeeded and if it
3387  *  did, call bio_endio on the original bio (having bio_put the new bio
3388  *  first).
3389  *  If the read failed..
3390  */
3391 static void raid5_align_endio(struct bio *bi, int error)
3392 {
3393         struct bio* raid_bi  = bi->bi_private;
3394         mddev_t *mddev;
3395         raid5_conf_t *conf;
3396         int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
3397         mdk_rdev_t *rdev;
3398
3399         bio_put(bi);
3400
3401         rdev = (void*)raid_bi->bi_next;
3402         raid_bi->bi_next = NULL;
3403         mddev = rdev->mddev;
3404         conf = mddev->private;
3405
3406         rdev_dec_pending(rdev, conf->mddev);
3407
3408         if (!error && uptodate) {
3409                 bio_endio(raid_bi, 0);
3410                 if (atomic_dec_and_test(&conf->active_aligned_reads))
3411                         wake_up(&conf->wait_for_stripe);
3412                 return;
3413         }
3414
3415
3416         pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
3417
3418         add_bio_to_retry(raid_bi, conf);
3419 }
3420
3421 static int bio_fits_rdev(struct bio *bi)
3422 {
3423         struct request_queue *q = bdev_get_queue(bi->bi_bdev);
3424
3425         if ((bi->bi_size>>9) > queue_max_sectors(q))
3426                 return 0;
3427         blk_recount_segments(q, bi);
3428         if (bi->bi_phys_segments > queue_max_segments(q))
3429                 return 0;
3430
3431         if (q->merge_bvec_fn)
3432                 /* it's too hard to apply the merge_bvec_fn at this stage,
3433                  * just just give up
3434                  */
3435                 return 0;
3436
3437         return 1;
3438 }
3439
3440
3441 static int chunk_aligned_read(mddev_t *mddev, struct bio * raid_bio)
3442 {
3443         raid5_conf_t *conf = mddev->private;
3444         int dd_idx;
3445         struct bio* align_bi;
3446         mdk_rdev_t *rdev;
3447
3448         if (!in_chunk_boundary(mddev, raid_bio)) {
3449                 pr_debug("chunk_aligned_read : non aligned\n");
3450                 return 0;
3451         }
3452         /*
3453          * use bio_clone_mddev to make a copy of the bio
3454          */
3455         align_bi = bio_clone_mddev(raid_bio, GFP_NOIO, mddev);
3456         if (!align_bi)
3457                 return 0;
3458         /*
3459          *   set bi_end_io to a new function, and set bi_private to the
3460          *     original bio.
3461          */
3462         align_bi->bi_end_io  = raid5_align_endio;
3463         align_bi->bi_private = raid_bio;
3464         /*
3465          *      compute position
3466          */
3467         align_bi->bi_sector =  raid5_compute_sector(conf, raid_bio->bi_sector,
3468                                                     0,
3469                                                     &dd_idx, NULL);
3470
3471         rcu_read_lock();
3472         rdev = rcu_dereference(conf->disks[dd_idx].rdev);
3473         if (rdev && test_bit(In_sync, &rdev->flags)) {
3474                 atomic_inc(&rdev->nr_pending);
3475                 rcu_read_unlock();
3476                 raid_bio->bi_next = (void*)rdev;
3477                 align_bi->bi_bdev =  rdev->bdev;
3478                 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
3479                 align_bi->bi_sector += rdev->data_offset;
3480
3481                 if (!bio_fits_rdev(align_bi)) {
3482                         /* too big in some way */
3483                         bio_put(align_bi);
3484                         rdev_dec_pending(rdev, mddev);
3485                         return 0;
3486                 }
3487
3488                 spin_lock_irq(&conf->device_lock);
3489                 wait_event_lock_irq(conf->wait_for_stripe,
3490                                     conf->quiesce == 0,
3491                                     conf->device_lock, /* nothing */);
3492                 atomic_inc(&conf->active_aligned_reads);
3493                 spin_unlock_irq(&conf->device_lock);
3494
3495                 generic_make_request(align_bi);
3496                 return 1;
3497         } else {
3498                 rcu_read_unlock();
3499                 bio_put(align_bi);
3500                 return 0;
3501         }
3502 }
3503
3504 /* __get_priority_stripe - get the next stripe to process
3505  *
3506  * Full stripe writes are allowed to pass preread active stripes up until
3507  * the bypass_threshold is exceeded.  In general the bypass_count
3508  * increments when the handle_list is handled before the hold_list; however, it
3509  * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
3510  * stripe with in flight i/o.  The bypass_count will be reset when the
3511  * head of the hold_list has changed, i.e. the head was promoted to the
3512  * handle_list.
3513  */
3514 static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf)
3515 {
3516         struct stripe_head *sh;
3517
3518         pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
3519                   __func__,
3520                   list_empty(&conf->handle_list) ? "empty" : "busy",
3521                   list_empty(&conf->hold_list) ? "empty" : "busy",
3522                   atomic_read(&conf->pending_full_writes), conf->bypass_count);
3523
3524         if (!list_empty(&conf->handle_list)) {
3525                 sh = list_entry(conf->handle_list.next, typeof(*sh), lru);
3526
3527                 if (list_empty(&conf->hold_list))
3528                         conf->bypass_count = 0;
3529                 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) {
3530                         if (conf->hold_list.next == conf->last_hold)
3531                                 conf->bypass_count++;
3532                         else {
3533                                 conf->last_hold = conf->hold_list.next;
3534                                 conf->bypass_count -= conf->bypass_threshold;
3535                                 if (conf->bypass_count < 0)
3536                                         conf->bypass_count = 0;
3537                         }
3538                 }
3539         } else if (!list_empty(&conf->hold_list) &&
3540                    ((conf->bypass_threshold &&
3541                      conf->bypass_count > conf->bypass_threshold) ||
3542                     atomic_read(&conf->pending_full_writes) == 0)) {
3543                 sh = list_entry(conf->hold_list.next,
3544                                 typeof(*sh), lru);
3545                 conf->bypass_count -= conf->bypass_threshold;
3546                 if (conf->bypass_count < 0)
3547                         conf->bypass_count = 0;
3548         } else
3549                 return NULL;
3550
3551         list_del_init(&sh->lru);
3552         atomic_inc(&sh->count);
3553         BUG_ON(atomic_read(&sh->count) != 1);
3554         return sh;
3555 }
3556
3557 static int make_request(mddev_t *mddev, struct bio * bi)
3558 {
3559         raid5_conf_t *conf = mddev->private;
3560         int dd_idx;
3561         sector_t new_sector;
3562         sector_t logical_sector, last_sector;
3563         struct stripe_head *sh;
3564         const int rw = bio_data_dir(bi);
3565         int remaining;
3566         int plugged;
3567
3568         if (unlikely(bi->bi_rw & REQ_FLUSH)) {
3569                 md_flush_request(mddev, bi);
3570                 return 0;
3571         }
3572
3573         md_write_start(mddev, bi);
3574
3575         if (rw == READ &&
3576              mddev->reshape_position == MaxSector &&
3577              chunk_aligned_read(mddev,bi))
3578                 return 0;
3579
3580         logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
3581         last_sector = bi->bi_sector + (bi->bi_size>>9);
3582         bi->bi_next = NULL;
3583         bi->bi_phys_segments = 1;       /* over-loaded to count active stripes */
3584
3585         plugged = mddev_check_plugged(mddev);
3586         for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
3587                 DEFINE_WAIT(w);
3588                 int disks, data_disks;
3589                 int previous;
3590
3591         retry:
3592                 previous = 0;
3593                 disks = conf->raid_disks;
3594                 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
3595                 if (unlikely(conf->reshape_progress != MaxSector)) {
3596                         /* spinlock is needed as reshape_progress may be
3597                          * 64bit on a 32bit platform, and so it might be
3598                          * possible to see a half-updated value
3599                          * Of course reshape_progress could change after
3600                          * the lock is dropped, so once we get a reference
3601                          * to the stripe that we think it is, we will have
3602                          * to check again.
3603                          */
3604                         spin_lock_irq(&conf->device_lock);
3605                         if (mddev->delta_disks < 0
3606                             ? logical_sector < conf->reshape_progress
3607                             : logical_sector >= conf->reshape_progress) {
3608                                 disks = conf->previous_raid_disks;
3609                                 previous = 1;
3610                         } else {
3611                                 if (mddev->delta_disks < 0
3612                                     ? logical_sector < conf->reshape_safe
3613                                     : logical_sector >= conf->reshape_safe) {
3614                                         spin_unlock_irq(&conf->device_lock);
3615                                         schedule();
3616                                         goto retry;
3617                                 }
3618                         }
3619                         spin_unlock_irq(&conf->device_lock);
3620                 }
3621                 data_disks = disks - conf->max_degraded;
3622
3623                 new_sector = raid5_compute_sector(conf, logical_sector,
3624                                                   previous,
3625                                                   &dd_idx, NULL);
3626                 pr_debug("raid456: make_request, sector %llu logical %llu\n",
3627                         (unsigned long long)new_sector, 
3628                         (unsigned long long)logical_sector);
3629
3630                 sh = get_active_stripe(conf, new_sector, previous,
3631                                        (bi->bi_rw&RWA_MASK), 0);
3632                 if (sh) {
3633                         if (unlikely(previous)) {
3634                                 /* expansion might have moved on while waiting for a
3635                                  * stripe, so we must do the range check again.
3636                                  * Expansion could still move past after this
3637                                  * test, but as we are holding a reference to
3638                                  * 'sh', we know that if that happens,
3639                                  *  STRIPE_EXPANDING will get set and the expansion
3640                                  * won't proceed until we finish with the stripe.
3641                                  */
3642                                 int must_retry = 0;
3643                                 spin_lock_irq(&conf->device_lock);
3644                                 if (mddev->delta_disks < 0
3645                                     ? logical_sector >= conf->reshape_progress
3646                                     : logical_sector < conf->reshape_progress)
3647                                         /* mismatch, need to try again */
3648                                         must_retry = 1;
3649                                 spin_unlock_irq(&conf->device_lock);
3650                                 if (must_retry) {
3651                                         release_stripe(sh);
3652                                         schedule();
3653                                         goto retry;
3654                                 }
3655                         }
3656
3657                         if (rw == WRITE &&
3658                             logical_sector >= mddev->suspend_lo &&
3659                             logical_sector < mddev->suspend_hi) {
3660                                 release_stripe(sh);
3661                                 /* As the suspend_* range is controlled by
3662                                  * userspace, we want an interruptible
3663                                  * wait.
3664                                  */
3665                                 flush_signals(current);
3666                                 prepare_to_wait(&conf->wait_for_overlap,
3667                                                 &w, TASK_INTERRUPTIBLE);
3668                                 if (logical_sector >= mddev->suspend_lo &&
3669                                     logical_sector < mddev->suspend_hi)
3670                                         schedule();
3671                                 goto retry;
3672                         }
3673
3674                         if (test_bit(STRIPE_EXPANDING, &sh->state) ||
3675                             !add_stripe_bio(sh, bi, dd_idx, rw)) {
3676                                 /* Stripe is busy expanding or
3677                                  * add failed due to overlap.  Flush everything
3678                                  * and wait a while
3679                                  */
3680                                 md_wakeup_thread(mddev->thread);
3681                                 release_stripe(sh);
3682                                 schedule();
3683                                 goto retry;
3684                         }
3685                         finish_wait(&conf->wait_for_overlap, &w);
3686                         set_bit(STRIPE_HANDLE, &sh->state);
3687                         clear_bit(STRIPE_DELAYED, &sh->state);
3688                         if ((bi->bi_rw & REQ_SYNC) &&
3689                             !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3690                                 atomic_inc(&conf->preread_active_stripes);
3691                         release_stripe(sh);
3692                 } else {
3693                         /* cannot get stripe for read-ahead, just give-up */
3694                         clear_bit(BIO_UPTODATE, &bi->bi_flags);
3695                         finish_wait(&conf->wait_for_overlap, &w);
3696                         break;
3697                 }
3698                         
3699         }
3700         if (!plugged)
3701                 md_wakeup_thread(mddev->thread);
3702
3703         spin_lock_irq(&conf->device_lock);
3704         remaining = raid5_dec_bi_phys_segments(bi);
3705         spin_unlock_irq(&conf->device_lock);
3706         if (remaining == 0) {
3707
3708                 if ( rw == WRITE )
3709                         md_write_end(mddev);
3710
3711                 bio_endio(bi, 0);
3712         }
3713
3714         return 0;
3715 }
3716
3717 static sector_t raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks);
3718
3719 static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped)
3720 {
3721         /* reshaping is quite different to recovery/resync so it is
3722          * handled quite separately ... here.
3723          *
3724          * On each call to sync_request, we gather one chunk worth of
3725          * destination stripes and flag them as expanding.
3726          * Then we find all the source stripes and request reads.
3727          * As the reads complete, handle_stripe will copy the data
3728          * into the destination stripe and release that stripe.
3729          */
3730         raid5_conf_t *conf = mddev->private;
3731         struct stripe_head *sh;
3732         sector_t first_sector, last_sector;
3733         int raid_disks = conf->previous_raid_disks;
3734         int data_disks = raid_disks - conf->max_degraded;
3735         int new_data_disks = conf->raid_disks - conf->max_degraded;
3736         int i;
3737         int dd_idx;
3738         sector_t writepos, readpos, safepos;
3739         sector_t stripe_addr;
3740         int reshape_sectors;
3741         struct list_head stripes;
3742
3743         if (sector_nr == 0) {
3744                 /* If restarting in the middle, skip the initial sectors */
3745                 if (mddev->delta_disks < 0 &&
3746                     conf->reshape_progress < raid5_size(mddev, 0, 0)) {
3747                         sector_nr = raid5_size(mddev, 0, 0)
3748                                 - conf->reshape_progress;
3749                 } else if (mddev->delta_disks >= 0 &&
3750                            conf->reshape_progress > 0)
3751                         sector_nr = conf->reshape_progress;
3752                 sector_div(sector_nr, new_data_disks);
3753                 if (sector_nr) {
3754                         mddev->curr_resync_completed = sector_nr;
3755                         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
3756                         *skipped = 1;
3757                         return sector_nr;
3758                 }
3759         }
3760
3761         /* We need to process a full chunk at a time.
3762          * If old and new chunk sizes differ, we need to process the
3763          * largest of these
3764          */
3765         if (mddev->new_chunk_sectors > mddev->chunk_sectors)
3766                 reshape_sectors = mddev->new_chunk_sectors;
3767         else
3768                 reshape_sectors = mddev->chunk_sectors;
3769
3770         /* we update the metadata when there is more than 3Meg
3771          * in the block range (that is rather arbitrary, should
3772          * probably be time based) or when the data about to be
3773          * copied would over-write the source of the data at
3774          * the front of the range.
3775          * i.e. one new_stripe along from reshape_progress new_maps
3776          * to after where reshape_safe old_maps to
3777          */
3778         writepos = conf->reshape_progress;
3779         sector_div(writepos, new_data_disks);
3780         readpos = conf->reshape_progress;
3781         sector_div(readpos, data_disks);
3782         safepos = conf->reshape_safe;
3783         sector_div(safepos, data_disks);
3784         if (mddev->delta_disks < 0) {
3785                 writepos -= min_t(sector_t, reshape_sectors, writepos);
3786                 readpos += reshape_sectors;
3787                 safepos += reshape_sectors;
3788         } else {
3789                 writepos += reshape_sectors;
3790                 readpos -= min_t(sector_t, reshape_sectors, readpos);
3791                 safepos -= min_t(sector_t, reshape_sectors, safepos);
3792         }
3793
3794         /* 'writepos' is the most advanced device address we might write.
3795          * 'readpos' is the least advanced device address we might read.
3796          * 'safepos' is the least address recorded in the metadata as having
3797          *     been reshaped.
3798          * If 'readpos' is behind 'writepos', then there is no way that we can
3799          * ensure safety in the face of a crash - that must be done by userspace
3800          * making a backup of the data.  So in that case there is no particular
3801          * rush to update metadata.
3802          * Otherwise if 'safepos' is behind 'writepos', then we really need to
3803          * update the metadata to advance 'safepos' to match 'readpos' so that
3804          * we can be safe in the event of a crash.
3805          * So we insist on updating metadata if safepos is behind writepos and
3806          * readpos is beyond writepos.
3807          * In any case, update the metadata every 10 seconds.
3808          * Maybe that number should be configurable, but I'm not sure it is
3809          * worth it.... maybe it could be a multiple of safemode_delay???
3810          */
3811         if ((mddev->delta_disks < 0
3812              ? (safepos > writepos && readpos < writepos)
3813              : (safepos < writepos && readpos > writepos)) ||
3814             time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
3815                 /* Cannot proceed until we've updated the superblock... */
3816                 wait_event(conf->wait_for_overlap,
3817                            atomic_read(&conf->reshape_stripes)==0);
3818                 mddev->reshape_position = conf->reshape_progress;
3819                 mddev->curr_resync_completed = sector_nr;
3820                 conf->reshape_checkpoint = jiffies;
3821                 set_bit(MD_CHANGE_DEVS, &mddev->flags);
3822                 md_wakeup_thread(mddev->thread);
3823                 wait_event(mddev->sb_wait, mddev->flags == 0 ||
3824                            kthread_should_stop());
3825                 spin_lock_irq(&conf->device_lock);
3826                 conf->reshape_safe = mddev->reshape_position;
3827                 spin_unlock_irq(&conf->device_lock);
3828                 wake_up(&conf->wait_for_overlap);
3829                 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
3830         }
3831
3832         if (mddev->delta_disks < 0) {
3833                 BUG_ON(conf->reshape_progress == 0);
3834                 stripe_addr = writepos;
3835                 BUG_ON((mddev->dev_sectors &
3836                         ~((sector_t)reshape_sectors - 1))
3837                        - reshape_sectors - stripe_addr
3838                        != sector_nr);
3839         } else {
3840                 BUG_ON(writepos != sector_nr + reshape_sectors);
3841                 stripe_addr = sector_nr;
3842         }
3843         INIT_LIST_HEAD(&stripes);
3844         for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
3845                 int j;
3846                 int skipped_disk = 0;
3847                 sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
3848                 set_bit(STRIPE_EXPANDING, &sh->state);
3849                 atomic_inc(&conf->reshape_stripes);
3850                 /* If any of this stripe is beyond the end of the old
3851                  * array, then we need to zero those blocks
3852                  */
3853                 for (j=sh->disks; j--;) {
3854                         sector_t s;
3855                         if (j == sh->pd_idx)
3856                                 continue;
3857                         if (conf->level == 6 &&
3858                             j == sh->qd_idx)
3859                                 continue;
3860                         s = compute_blocknr(sh, j, 0);
3861                         if (s < raid5_size(mddev, 0, 0)) {
3862                                 skipped_disk = 1;
3863                                 continue;
3864                         }
3865                         memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
3866                         set_bit(R5_Expanded, &sh->dev[j].flags);
3867                         set_bit(R5_UPTODATE, &sh->dev[j].flags);
3868                 }
3869                 if (!skipped_disk) {
3870                         set_bit(STRIPE_EXPAND_READY, &sh->state);
3871                         set_bit(STRIPE_HANDLE, &sh->state);
3872                 }
3873                 list_add(&sh->lru, &stripes);
3874         }
3875         spin_lock_irq(&conf->device_lock);
3876         if (mddev->delta_disks < 0)
3877                 conf->reshape_progress -= reshape_sectors * new_data_disks;
3878         else
3879                 conf->reshape_progress += reshape_sectors * new_data_disks;
3880         spin_unlock_irq(&conf->device_lock);
3881         /* Ok, those stripe are ready. We can start scheduling
3882          * reads on the source stripes.
3883          * The source stripes are determined by mapping the first and last
3884          * block on the destination stripes.
3885          */
3886         first_sector =
3887                 raid5_compute_sector(conf, stripe_addr*(new_data_disks),
3888                                      1, &dd_idx, NULL);
3889         last_sector =
3890                 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors)
3891                                             * new_data_disks - 1),
3892                                      1, &dd_idx, NULL);
3893         if (last_sector >= mddev->dev_sectors)
3894                 last_sector = mddev->dev_sectors - 1;
3895         while (first_sector <= last_sector) {
3896                 sh = get_active_stripe(conf, first_sector, 1, 0, 1);
3897                 set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
3898                 set_bit(STRIPE_HANDLE, &sh->state);
3899                 release_stripe(sh);
3900                 first_sector += STRIPE_SECTORS;
3901         }
3902         /* Now that the sources are clearly marked, we can release
3903          * the destination stripes
3904          */
3905         while (!list_empty(&stripes)) {
3906                 sh = list_entry(stripes.next, struct stripe_head, lru);
3907                 list_del_init(&sh->lru);
3908                 release_stripe(sh);
3909         }
3910         /* If this takes us to the resync_max point where we have to pause,
3911          * then we need to write out the superblock.
3912          */
3913         sector_nr += reshape_sectors;
3914         if ((sector_nr - mddev->curr_resync_completed) * 2
3915             >= mddev->resync_max - mddev->curr_resync_completed) {
3916                 /* Cannot proceed until we've updated the superblock... */
3917                 wait_event(conf->wait_for_overlap,
3918                            atomic_read(&conf->reshape_stripes) == 0);
3919                 mddev->reshape_position = conf->reshape_progress;
3920                 mddev->curr_resync_completed = sector_nr;
3921                 conf->reshape_checkpoint = jiffies;
3922                 set_bit(MD_CHANGE_DEVS, &mddev->flags);
3923                 md_wakeup_thread(mddev->thread);
3924                 wait_event(mddev->sb_wait,
3925                            !test_bit(MD_CHANGE_DEVS, &mddev->flags)
3926                            || kthread_should_stop());
3927                 spin_lock_irq(&conf->device_lock);
3928                 conf->reshape_safe = mddev->reshape_position;
3929                 spin_unlock_irq(&conf->device_lock);
3930                 wake_up(&conf->wait_for_overlap);
3931                 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
3932         }
3933         return reshape_sectors;
3934 }
3935
3936 /* FIXME go_faster isn't used */
3937 static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
3938 {
3939         raid5_conf_t *conf = mddev->private;
3940         struct stripe_head *sh;
3941         sector_t max_sector = mddev->dev_sectors;
3942         sector_t sync_blocks;
3943         int still_degraded = 0;
3944         int i;
3945
3946         if (sector_nr >= max_sector) {
3947                 /* just being told to finish up .. nothing much to do */
3948
3949                 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
3950                         end_reshape(conf);
3951                         return 0;
3952                 }
3953
3954                 if (mddev->curr_resync < max_sector) /* aborted */
3955                         bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
3956                                         &sync_blocks, 1);
3957                 else /* completed sync */
3958                         conf->fullsync = 0;
3959                 bitmap_close_sync(mddev->bitmap);
3960
3961                 return 0;
3962         }
3963
3964         /* Allow raid5_quiesce to complete */
3965         wait_event(conf->wait_for_overlap, conf->quiesce != 2);
3966
3967         if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3968                 return reshape_request(mddev, sector_nr, skipped);
3969
3970         /* No need to check resync_max as we never do more than one
3971          * stripe, and as resync_max will always be on a chunk boundary,
3972          * if the check in md_do_sync didn't fire, there is no chance
3973          * of overstepping resync_max here
3974          */
3975
3976         /* if there is too many failed drives and we are trying
3977          * to resync, then assert that we are finished, because there is
3978          * nothing we can do.
3979          */
3980         if (mddev->degraded >= conf->max_degraded &&
3981             test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3982                 sector_t rv = mddev->dev_sectors - sector_nr;
3983                 *skipped = 1;
3984                 return rv;
3985         }
3986         if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
3987             !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
3988             !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
3989                 /* we can skip this block, and probably more */
3990                 sync_blocks /= STRIPE_SECTORS;
3991                 *skipped = 1;
3992                 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
3993         }
3994
3995
3996         bitmap_cond_end_sync(mddev->bitmap, sector_nr);
3997
3998         sh = get_active_stripe(conf, sector_nr, 0, 1, 0);
3999         if (sh == NULL) {
4000                 sh = get_active_stripe(conf, sector_nr, 0, 0, 0);
4001                 /* make sure we don't swamp the stripe cache if someone else
4002                  * is trying to get access
4003                  */
4004                 schedule_timeout_uninterruptible(1);
4005         }
4006         /* Need to check if array will still be degraded after recovery/resync
4007          * We don't need to check the 'failed' flag as when that gets set,
4008          * recovery aborts.
4009          */
4010         for (i = 0; i < conf->raid_disks; i++)
4011                 if (conf->disks[i].rdev == NULL)
4012                         still_degraded = 1;
4013
4014         bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
4015
4016         set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
4017
4018         handle_stripe(sh);
4019         release_stripe(sh);
4020
4021         return STRIPE_SECTORS;
4022 }
4023
4024 static int  retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
4025 {
4026         /* We may not be able to submit a whole bio at once as there
4027          * may not be enough stripe_heads available.
4028          * We cannot pre-allocate enough stripe_heads as we may need
4029          * more than exist in the cache (if we allow ever large chunks).
4030          * So we do one stripe head at a time and record in
4031          * ->bi_hw_segments how many have been done.
4032          *
4033          * We *know* that this entire raid_bio is in one chunk, so
4034          * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
4035          */
4036         struct stripe_head *sh;
4037         int dd_idx;
4038         sector_t sector, logical_sector, last_sector;
4039         int scnt = 0;
4040         int remaining;
4041         int handled = 0;
4042
4043         logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
4044         sector = raid5_compute_sector(conf, logical_sector,
4045                                       0, &dd_idx, NULL);
4046         last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9);
4047
4048         for (; logical_sector < last_sector;
4049              logical_sector += STRIPE_SECTORS,
4050                      sector += STRIPE_SECTORS,
4051                      scnt++) {
4052
4053                 if (scnt < raid5_bi_hw_segments(raid_bio))
4054                         /* already done this stripe */
4055                         continue;
4056
4057                 sh = get_active_stripe(conf, sector, 0, 1, 0);
4058
4059                 if (!sh) {
4060                         /* failed to get a stripe - must wait */
4061                         raid5_set_bi_hw_segments(raid_bio, scnt);
4062                         conf->retry_read_aligned = raid_bio;
4063                         return handled;
4064                 }
4065
4066                 set_bit(R5_ReadError, &sh->dev[dd_idx].flags);
4067                 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
4068                         release_stripe(sh);
4069                         raid5_set_bi_hw_segments(raid_bio, scnt);
4070                         conf->retry_read_aligned = raid_bio;
4071                         return handled;
4072                 }
4073
4074                 handle_stripe(sh);
4075                 release_stripe(sh);
4076                 handled++;
4077         }
4078         spin_lock_irq(&conf->device_lock);
4079         remaining = raid5_dec_bi_phys_segments(raid_bio);
4080         spin_unlock_irq(&conf->device_lock);
4081         if (remaining == 0)
4082                 bio_endio(raid_bio, 0);
4083         if (atomic_dec_and_test(&conf->active_aligned_reads))
4084                 wake_up(&conf->wait_for_stripe);
4085         return handled;
4086 }
4087
4088
4089 /*
4090  * This is our raid5 kernel thread.
4091  *
4092  * We scan the hash table for stripes which can be handled now.
4093  * During the scan, completed stripes are saved for us by the interrupt
4094  * handler, so that they will not have to wait for our next wakeup.
4095  */
4096 static void raid5d(mddev_t *mddev)
4097 {
4098         struct stripe_head *sh;
4099         raid5_conf_t *conf = mddev->private;
4100         int handled;
4101         struct blk_plug plug;
4102
4103         pr_debug("+++ raid5d active\n");
4104
4105         md_check_recovery(mddev);
4106
4107         blk_start_plug(&plug);
4108         handled = 0;
4109         spin_lock_irq(&conf->device_lock);
4110         while (1) {
4111                 struct bio *bio;
4112
4113                 if (atomic_read(&mddev->plug_cnt) == 0 &&
4114                     !list_empty(&conf->bitmap_list)) {
4115                         /* Now is a good time to flush some bitmap updates */
4116                         conf->seq_flush++;
4117                         spin_unlock_irq(&conf->device_lock);
4118                         bitmap_unplug(mddev->bitmap);
4119                         spin_lock_irq(&conf->device_lock);
4120                         conf->seq_write = conf->seq_flush;
4121                         activate_bit_delay(conf);
4122                 }
4123                 if (atomic_read(&mddev->plug_cnt) == 0)
4124                         raid5_activate_delayed(conf);
4125
4126                 while ((bio = remove_bio_from_retry(conf))) {
4127                         int ok;
4128                         spin_unlock_irq(&conf->device_lock);
4129                         ok = retry_aligned_read(conf, bio);
4130                         spin_lock_irq(&conf->device_lock);
4131                         if (!ok)
4132                                 break;
4133                         handled++;
4134                 }
4135
4136                 sh = __get_priority_stripe(conf);
4137
4138                 if (!sh)
4139                         break;
4140                 spin_unlock_irq(&conf->device_lock);
4141                 
4142                 handled++;
4143                 handle_stripe(sh);
4144                 release_stripe(sh);
4145                 cond_resched();
4146
4147                 if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
4148                         md_check_recovery(mddev);
4149
4150                 spin_lock_irq(&conf->device_lock);
4151         }
4152         pr_debug("%d stripes handled\n", handled);
4153
4154         spin_unlock_irq(&conf->device_lock);
4155
4156         async_tx_issue_pending_all();
4157         blk_finish_plug(&plug);
4158
4159         pr_debug("--- raid5d inactive\n");
4160 }
4161
4162 static ssize_t
4163 raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
4164 {
4165         raid5_conf_t *conf = mddev->private;
4166         if (conf)
4167                 return sprintf(page, "%d\n", conf->max_nr_stripes);
4168         else
4169                 return 0;
4170 }
4171
4172 int
4173 raid5_set_cache_size(mddev_t *mddev, int size)
4174 {
4175         raid5_conf_t *conf = mddev->private;
4176         int err;
4177
4178         if (size <= 16 || size > 32768)
4179                 return -EINVAL;
4180         while (size < conf->max_nr_stripes) {
4181                 if (drop_one_stripe(conf))
4182                         conf->max_nr_stripes--;
4183                 else
4184                         break;
4185         }
4186         err = md_allow_write(mddev);
4187         if (err)
4188                 return err;
4189         while (size > conf->max_nr_stripes) {
4190                 if (grow_one_stripe(conf))
4191                         conf->max_nr_stripes++;
4192                 else break;
4193         }
4194         return 0;
4195 }
4196 EXPORT_SYMBOL(raid5_set_cache_size);
4197
4198 static ssize_t
4199 raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
4200 {
4201         raid5_conf_t *conf = mddev->private;
4202         unsigned long new;
4203         int err;
4204
4205         if (len >= PAGE_SIZE)
4206                 return -EINVAL;
4207         if (!conf)
4208                 return -ENODEV;
4209
4210         if (strict_strtoul(page, 10, &new))
4211                 return -EINVAL;
4212         err = raid5_set_cache_size(mddev, new);
4213         if (err)
4214                 return err;
4215         return len;
4216 }
4217
4218 static struct md_sysfs_entry
4219 raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
4220                                 raid5_show_stripe_cache_size,
4221                                 raid5_store_stripe_cache_size);
4222
4223 static ssize_t
4224 raid5_show_preread_threshold(mddev_t *mddev, char *page)
4225 {
4226         raid5_conf_t *conf = mddev->private;
4227         if (conf)
4228                 return sprintf(page, "%d\n", conf->bypass_threshold);
4229         else
4230                 return 0;
4231 }
4232
4233 static ssize_t
4234 raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len)
4235 {
4236         raid5_conf_t *conf = mddev->private;
4237         unsigned long new;
4238         if (len >= PAGE_SIZE)
4239                 return -EINVAL;
4240         if (!conf)
4241                 return -ENODEV;
4242
4243         if (strict_strtoul(page, 10, &new))
4244                 return -EINVAL;
4245         if (new > conf->max_nr_stripes)
4246                 return -EINVAL;
4247         conf->bypass_threshold = new;
4248         return len;
4249 }
4250
4251 static struct md_sysfs_entry
4252 raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
4253                                         S_IRUGO | S_IWUSR,
4254                                         raid5_show_preread_threshold,
4255                                         raid5_store_preread_threshold);
4256
4257 static ssize_t
4258 stripe_cache_active_show(mddev_t *mddev, char *page)
4259 {
4260         raid5_conf_t *conf = mddev->private;
4261         if (conf)
4262                 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
4263         else
4264                 return 0;
4265 }
4266
4267 static struct md_sysfs_entry
4268 raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
4269
4270 static struct attribute *raid5_attrs[] =  {
4271         &raid5_stripecache_size.attr,
4272         &raid5_stripecache_active.attr,
4273         &raid5_preread_bypass_threshold.attr,
4274         NULL,
4275 };
4276 static struct attribute_group raid5_attrs_group = {
4277         .name = NULL,
4278         .attrs = raid5_attrs,
4279 };
4280
4281 static sector_t
4282 raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks)
4283 {
4284         raid5_conf_t *conf = mddev->private;
4285
4286         if (!sectors)
4287                 sectors = mddev->dev_sectors;
4288         if (!raid_disks)
4289                 /* size is defined by the smallest of previous and new size */
4290                 raid_disks = min(conf->raid_disks, conf->previous_raid_disks);
4291
4292         sectors &= ~((sector_t)mddev->chunk_sectors - 1);
4293         sectors &= ~((sector_t)mddev->new_chunk_sectors - 1);
4294         return sectors * (raid_disks - conf->max_degraded);
4295 }
4296
4297 static void raid5_free_percpu(raid5_conf_t *conf)
4298 {
4299         struct raid5_percpu *percpu;
4300         unsigned long cpu;
4301
4302         if (!conf->percpu)
4303                 return;
4304
4305         get_online_cpus();
4306         for_each_possible_cpu(cpu) {
4307                 percpu = per_cpu_ptr(conf->percpu, cpu);
4308                 safe_put_page(percpu->spare_page);
4309                 kfree(percpu->scribble);
4310         }
4311 #ifdef CONFIG_HOTPLUG_CPU
4312         unregister_cpu_notifier(&conf->cpu_notify);
4313 #endif
4314         put_online_cpus();
4315
4316         free_percpu(conf->percpu);
4317 }
4318
4319 static void free_conf(raid5_conf_t *conf)
4320 {
4321         shrink_stripes(conf);
4322         raid5_free_percpu(conf);
4323         kfree(conf->disks);
4324         kfree(conf->stripe_hashtbl);
4325         kfree(conf);
4326 }
4327
4328 #ifdef CONFIG_HOTPLUG_CPU
4329 static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
4330                               void *hcpu)
4331 {
4332         raid5_conf_t *conf = container_of(nfb, raid5_conf_t, cpu_notify);
4333         long cpu = (long)hcpu;
4334         struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
4335
4336         switch (action) {
4337         case CPU_UP_PREPARE:
4338         case CPU_UP_PREPARE_FROZEN:
4339                 if (conf->level == 6 && !percpu->spare_page)
4340                         percpu->spare_page = alloc_page(GFP_KERNEL);
4341                 if (!percpu->scribble)
4342                         percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
4343
4344                 if (!percpu->scribble ||
4345                     (conf->level == 6 && !percpu->spare_page)) {
4346                         safe_put_page(percpu->spare_page);
4347                         kfree(percpu->scribble);
4348                         pr_err("%s: failed memory allocation for cpu%ld\n",
4349                                __func__, cpu);
4350                         return notifier_from_errno(-ENOMEM);
4351                 }
4352                 break;
4353         case CPU_DEAD:
4354         case CPU_DEAD_FROZEN:
4355                 safe_put_page(percpu->spare_page);
4356                 kfree(percpu->scribble);
4357                 percpu->spare_page = NULL;
4358                 percpu->scribble = NULL;
4359                 break;
4360         default:
4361                 break;
4362         }
4363         return NOTIFY_OK;
4364 }
4365 #endif
4366
4367 static int raid5_alloc_percpu(raid5_conf_t *conf)
4368 {
4369         unsigned long cpu;
4370         struct page *spare_page;
4371         struct raid5_percpu __percpu *allcpus;
4372         void *scribble;
4373         int err;
4374
4375         allcpus = alloc_percpu(struct raid5_percpu);
4376         if (!allcpus)
4377                 return -ENOMEM;
4378         conf->percpu = allcpus;
4379
4380         get_online_cpus();
4381         err = 0;
4382         for_each_present_cpu(cpu) {
4383                 if (conf->level == 6) {
4384                         spare_page = alloc_page(GFP_KERNEL);
4385                         if (!spare_page) {
4386                                 err = -ENOMEM;
4387                                 break;
4388                         }
4389                         per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
4390                 }
4391                 scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
4392                 if (!scribble) {
4393                         err = -ENOMEM;
4394                         break;
4395                 }
4396                 per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
4397         }
4398 #ifdef CONFIG_HOTPLUG_CPU
4399         conf->cpu_notify.notifier_call = raid456_cpu_notify;
4400         conf->cpu_notify.priority = 0;
4401         if (err == 0)
4402                 err = register_cpu_notifier(&conf->cpu_notify);
4403 #endif
4404         put_online_cpus();
4405
4406         return err;
4407 }
4408
4409 static raid5_conf_t *setup_conf(mddev_t *mddev)
4410 {
4411         raid5_conf_t *conf;
4412         int raid_disk, memory, max_disks;
4413         mdk_rdev_t *rdev;
4414         struct disk_info *disk;
4415
4416         if (mddev->new_level != 5
4417             && mddev->new_level != 4
4418             && mddev->new_level != 6) {
4419                 printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n",
4420                        mdname(mddev), mddev->new_level);
4421                 return ERR_PTR(-EIO);
4422         }
4423         if ((mddev->new_level == 5
4424              && !algorithm_valid_raid5(mddev->new_layout)) ||
4425             (mddev->new_level == 6
4426              && !algorithm_valid_raid6(mddev->new_layout))) {
4427                 printk(KERN_ERR "md/raid:%s: layout %d not supported\n",
4428                        mdname(mddev), mddev->new_layout);
4429                 return ERR_PTR(-EIO);
4430         }
4431         if (mddev->new_level == 6 && mddev->raid_disks < 4) {
4432                 printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n",
4433                        mdname(mddev), mddev->raid_disks);
4434                 return ERR_PTR(-EINVAL);
4435         }
4436
4437         if (!mddev->new_chunk_sectors ||
4438             (mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
4439             !is_power_of_2(mddev->new_chunk_sectors)) {
4440                 printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n",
4441                        mdname(mddev), mddev->new_chunk_sectors << 9);
4442                 return ERR_PTR(-EINVAL);
4443         }
4444
4445         conf = kzalloc(sizeof(raid5_conf_t), GFP_KERNEL);
4446         if (conf == NULL)
4447                 goto abort;
4448         spin_lock_init(&conf->device_lock);
4449         init_waitqueue_head(&conf->wait_for_stripe);
4450         init_waitqueue_head(&conf->wait_for_overlap);
4451         INIT_LIST_HEAD(&conf->handle_list);
4452         INIT_LIST_HEAD(&conf->hold_list);
4453         INIT_LIST_HEAD(&conf->delayed_list);
4454         INIT_LIST_HEAD(&conf->bitmap_list);
4455         INIT_LIST_HEAD(&conf->inactive_list);
4456         atomic_set(&conf->active_stripes, 0);
4457         atomic_set(&conf->preread_active_stripes, 0);
4458         atomic_set(&conf->active_aligned_reads, 0);
4459         conf->bypass_threshold = BYPASS_THRESHOLD;
4460
4461         conf->raid_disks = mddev->raid_disks;
4462         if (mddev->reshape_position == MaxSector)
4463                 conf->previous_raid_disks = mddev->raid_disks;
4464         else
4465                 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
4466         max_disks = max(conf->raid_disks, conf->previous_raid_disks);
4467         conf->scribble_len = scribble_len(max_disks);
4468
4469         conf->disks = kzalloc(max_disks * sizeof(struct disk_info),
4470                               GFP_KERNEL);
4471         if (!conf->disks)
4472                 goto abort;
4473
4474         conf->mddev = mddev;
4475
4476         if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
4477                 goto abort;
4478
4479         conf->level = mddev->new_level;
4480         if (raid5_alloc_percpu(conf) != 0)
4481                 goto abort;
4482
4483         pr_debug("raid456: run(%s) called.\n", mdname(mddev));
4484
4485         list_for_each_entry(rdev, &mddev->disks, same_set) {
4486                 raid_disk = rdev->raid_disk;
4487                 if (raid_disk >= max_disks
4488                     || raid_disk < 0)
4489                         continue;
4490                 disk = conf->disks + raid_disk;
4491
4492                 disk->rdev = rdev;
4493
4494                 if (test_bit(In_sync, &rdev->flags)) {
4495                         char b[BDEVNAME_SIZE];
4496                         printk(KERN_INFO "md/raid:%s: device %s operational as raid"
4497                                " disk %d\n",
4498                                mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
4499                 } else if (rdev->saved_raid_disk != raid_disk)
4500                         /* Cannot rely on bitmap to complete recovery */
4501                         conf->fullsync = 1;
4502         }
4503
4504         conf->chunk_sectors = mddev->new_chunk_sectors;
4505         conf->level = mddev->new_level;
4506         if (conf->level == 6)
4507                 conf->max_degraded = 2;
4508         else
4509                 conf->max_degraded = 1;
4510         conf->algorithm = mddev->new_layout;
4511         conf->max_nr_stripes = NR_STRIPES;
4512         conf->reshape_progress = mddev->reshape_position;
4513         if (conf->reshape_progress != MaxSector) {
4514                 conf->prev_chunk_sectors = mddev->chunk_sectors;
4515                 conf->prev_algo = mddev->layout;
4516         }
4517
4518         memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
4519                  max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
4520         if (grow_stripes(conf, conf->max_nr_stripes)) {
4521                 printk(KERN_ERR
4522                        "md/raid:%s: couldn't allocate %dkB for buffers\n",
4523                        mdname(mddev), memory);
4524                 goto abort;
4525         } else
4526                 printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
4527                        mdname(mddev), memory);
4528
4529         conf->thread = md_register_thread(raid5d, mddev, NULL);
4530         if (!conf->thread) {
4531                 printk(KERN_ERR
4532                        "md/raid:%s: couldn't allocate thread.\n",
4533                        mdname(mddev));
4534                 goto abort;
4535         }
4536
4537         return conf;
4538
4539  abort:
4540         if (conf) {
4541                 free_conf(conf);
4542                 return ERR_PTR(-EIO);
4543         } else
4544                 return ERR_PTR(-ENOMEM);
4545 }
4546
4547
4548 static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded)
4549 {
4550         switch (algo) {
4551         case ALGORITHM_PARITY_0:
4552                 if (raid_disk < max_degraded)
4553                         return 1;
4554                 break;
4555         case ALGORITHM_PARITY_N:
4556                 if (raid_disk >= raid_disks - max_degraded)
4557                         return 1;
4558                 break;
4559         case ALGORITHM_PARITY_0_6:
4560                 if (raid_disk == 0 || 
4561                     raid_disk == raid_disks - 1)
4562                         return 1;
4563                 break;
4564         case ALGORITHM_LEFT_ASYMMETRIC_6:
4565         case ALGORITHM_RIGHT_ASYMMETRIC_6:
4566         case ALGORITHM_LEFT_SYMMETRIC_6:
4567         case ALGORITHM_RIGHT_SYMMETRIC_6:
4568                 if (raid_disk == raid_disks - 1)
4569                         return 1;
4570         }
4571         return 0;
4572 }
4573
4574 static int run(mddev_t *mddev)
4575 {
4576         raid5_conf_t *conf;
4577         int working_disks = 0;
4578         int dirty_parity_disks = 0;
4579         mdk_rdev_t *rdev;
4580         sector_t reshape_offset = 0;
4581
4582         if (mddev->recovery_cp != MaxSector)
4583                 printk(KERN_NOTICE "md/raid:%s: not clean"
4584                        " -- starting background reconstruction\n",
4585                        mdname(mddev));
4586         if (mddev->reshape_position != MaxSector) {
4587                 /* Check that we can continue the reshape.
4588                  * Currently only disks can change, it must
4589                  * increase, and we must be past the point where
4590                  * a stripe over-writes itself
4591                  */
4592                 sector_t here_new, here_old;
4593                 int old_disks;
4594                 int max_degraded = (mddev->level == 6 ? 2 : 1);
4595
4596                 if (mddev->new_level != mddev->level) {
4597                         printk(KERN_ERR "md/raid:%s: unsupported reshape "
4598                                "required - aborting.\n",
4599                                mdname(mddev));
4600                         return -EINVAL;
4601                 }
4602                 old_disks = mddev->raid_disks - mddev->delta_disks;
4603                 /* reshape_position must be on a new-stripe boundary, and one
4604                  * further up in new geometry must map after here in old
4605                  * geometry.
4606                  */
4607                 here_new = mddev->reshape_position;
4608                 if (sector_div(here_new, mddev->new_chunk_sectors *
4609                                (mddev->raid_disks - max_degraded))) {
4610                         printk(KERN_ERR "md/raid:%s: reshape_position not "
4611                                "on a stripe boundary\n", mdname(mddev));
4612                         return -EINVAL;
4613                 }
4614                 reshape_offset = here_new * mddev->new_chunk_sectors;
4615                 /* here_new is the stripe we will write to */
4616                 here_old = mddev->reshape_position;
4617                 sector_div(here_old, mddev->chunk_sectors *
4618                            (old_disks-max_degraded));
4619                 /* here_old is the first stripe that we might need to read
4620                  * from */
4621                 if (mddev->delta_disks == 0) {
4622                         /* We cannot be sure it is safe to start an in-place
4623                          * reshape.  It is only safe if user-space if monitoring
4624                          * and taking constant backups.
4625                          * mdadm always starts a situation like this in
4626                          * readonly mode so it can take control before
4627                          * allowing any writes.  So just check for that.
4628                          */
4629                         if ((here_new * mddev->new_chunk_sectors != 
4630                              here_old * mddev->chunk_sectors) ||
4631                             mddev->ro == 0) {
4632                                 printk(KERN_ERR "md/raid:%s: in-place reshape must be started"
4633                                        " in read-only mode - aborting\n",
4634                                        mdname(mddev));
4635                                 return -EINVAL;
4636                         }
4637                 } else if (mddev->delta_disks < 0
4638                     ? (here_new * mddev->new_chunk_sectors <=
4639                        here_old * mddev->chunk_sectors)
4640                     : (here_new * mddev->new_chunk_sectors >=
4641                        here_old * mddev->chunk_sectors)) {
4642                         /* Reading from the same stripe as writing to - bad */
4643                         printk(KERN_ERR "md/raid:%s: reshape_position too early for "
4644                                "auto-recovery - aborting.\n",
4645                                mdname(mddev));
4646                         return -EINVAL;
4647                 }
4648                 printk(KERN_INFO "md/raid:%s: reshape will continue\n",
4649                        mdname(mddev));
4650                 /* OK, we should be able to continue; */
4651         } else {
4652                 BUG_ON(mddev->level != mddev->new_level);
4653                 BUG_ON(mddev->layout != mddev->new_layout);
4654                 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors);
4655                 BUG_ON(mddev->delta_disks != 0);
4656         }
4657
4658         if (mddev->private == NULL)
4659                 conf = setup_conf(mddev);
4660         else
4661                 conf = mddev->private;
4662
4663         if (IS_ERR(conf))
4664                 return PTR_ERR(conf);
4665
4666         mddev->thread = conf->thread;
4667         conf->thread = NULL;
4668         mddev->private = conf;
4669
4670         /*
4671          * 0 for a fully functional array, 1 or 2 for a degraded array.
4672          */
4673         list_for_each_entry(rdev, &mddev->disks, same_set) {
4674                 if (rdev->badblocks.count) {
4675                         printk(KERN_ERR "md/raid5: cannot handle bad blocks yet\n");
4676                         goto abort;
4677                 }
4678                 if (rdev->raid_disk < 0)
4679                         continue;
4680                 if (test_bit(In_sync, &rdev->flags)) {
4681                         working_disks++;
4682                         continue;
4683                 }
4684                 /* This disc is not fully in-sync.  However if it
4685                  * just stored parity (beyond the recovery_offset),
4686                  * when we don't need to be concerned about the
4687                  * array being dirty.
4688                  * When reshape goes 'backwards', we never have
4689                  * partially completed devices, so we only need
4690                  * to worry about reshape going forwards.
4691                  */
4692                 /* Hack because v0.91 doesn't store recovery_offset properly. */
4693                 if (mddev->major_version == 0 &&
4694                     mddev->minor_version > 90)
4695                         rdev->recovery_offset = reshape_offset;
4696                         
4697                 if (rdev->recovery_offset < reshape_offset) {
4698                         /* We need to check old and new layout */
4699                         if (!only_parity(rdev->raid_disk,
4700                                          conf->algorithm,
4701                                          conf->raid_disks,
4702                                          conf->max_degraded))
4703                                 continue;
4704                 }
4705                 if (!only_parity(rdev->raid_disk,
4706                                  conf->prev_algo,
4707                                  conf->previous_raid_disks,
4708                                  conf->max_degraded))
4709                         continue;
4710                 dirty_parity_disks++;
4711         }
4712
4713         mddev->degraded = (max(conf->raid_disks, conf->previous_raid_disks)
4714                            - working_disks);
4715
4716         if (has_failed(conf)) {
4717                 printk(KERN_ERR "md/raid:%s: not enough operational devices"
4718                         " (%d/%d failed)\n",
4719                         mdname(mddev), mddev->degraded, conf->raid_disks);
4720                 goto abort;
4721         }
4722
4723         /* device size must be a multiple of chunk size */
4724         mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
4725         mddev->resync_max_sectors = mddev->dev_sectors;
4726
4727         if (mddev->degraded > dirty_parity_disks &&
4728             mddev->recovery_cp != MaxSector) {
4729                 if (mddev->ok_start_degraded)
4730                         printk(KERN_WARNING
4731                                "md/raid:%s: starting dirty degraded array"
4732                                " - data corruption possible.\n",
4733                                mdname(mddev));
4734                 else {
4735                         printk(KERN_ERR
4736                                "md/raid:%s: cannot start dirty degraded array.\n",
4737                                mdname(mddev));
4738                         goto abort;
4739                 }
4740         }
4741
4742         if (mddev->degraded == 0)
4743                 printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d"
4744                        " devices, algorithm %d\n", mdname(mddev), conf->level,
4745                        mddev->raid_disks-mddev->degraded, mddev->raid_disks,
4746                        mddev->new_layout);
4747         else
4748                 printk(KERN_ALERT "md/raid:%s: raid level %d active with %d"
4749                        " out of %d devices, algorithm %d\n",
4750                        mdname(mddev), conf->level,
4751                        mddev->raid_disks - mddev->degraded,
4752                        mddev->raid_disks, mddev->new_layout);
4753
4754         print_raid5_conf(conf);
4755
4756         if (conf->reshape_progress != MaxSector) {
4757                 conf->reshape_safe = conf->reshape_progress;
4758                 atomic_set(&conf->reshape_stripes, 0);
4759                 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4760                 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4761                 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4762                 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4763                 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
4764                                                         "reshape");
4765         }
4766
4767
4768         /* Ok, everything is just fine now */
4769         if (mddev->to_remove == &raid5_attrs_group)
4770                 mddev->to_remove = NULL;
4771         else if (mddev->kobj.sd &&
4772             sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
4773                 printk(KERN_WARNING
4774                        "raid5: failed to create sysfs attributes for %s\n",
4775                        mdname(mddev));
4776         md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
4777
4778         if (mddev->queue) {
4779                 int chunk_size;
4780                 /* read-ahead size must cover two whole stripes, which
4781                  * is 2 * (datadisks) * chunksize where 'n' is the
4782                  * number of raid devices
4783                  */
4784                 int data_disks = conf->previous_raid_disks - conf->max_degraded;
4785                 int stripe = data_disks *
4786                         ((mddev->chunk_sectors << 9) / PAGE_SIZE);
4787                 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
4788                         mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
4789
4790                 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
4791
4792                 mddev->queue->backing_dev_info.congested_data = mddev;
4793                 mddev->queue->backing_dev_info.congested_fn = raid5_congested;
4794
4795                 chunk_size = mddev->chunk_sectors << 9;
4796                 blk_queue_io_min(mddev->queue, chunk_size);
4797                 blk_queue_io_opt(mddev->queue, chunk_size *
4798                                  (conf->raid_disks - conf->max_degraded));
4799
4800                 list_for_each_entry(rdev, &mddev->disks, same_set)
4801                         disk_stack_limits(mddev->gendisk, rdev->bdev,
4802                                           rdev->data_offset << 9);
4803         }
4804
4805         return 0;
4806 abort:
4807         md_unregister_thread(mddev->thread);
4808         mddev->thread = NULL;
4809         if (conf) {
4810                 print_raid5_conf(conf);
4811                 free_conf(conf);
4812         }
4813         mddev->private = NULL;
4814         printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev));
4815         return -EIO;
4816 }
4817
4818 static int stop(mddev_t *mddev)
4819 {
4820         raid5_conf_t *conf = mddev->private;
4821
4822         md_unregister_thread(mddev->thread);
4823         mddev->thread = NULL;
4824         if (mddev->queue)
4825                 mddev->queue->backing_dev_info.congested_fn = NULL;
4826         free_conf(conf);
4827         mddev->private = NULL;
4828         mddev->to_remove = &raid5_attrs_group;
4829         return 0;
4830 }
4831
4832 #ifdef DEBUG
4833 static void print_sh(struct seq_file *seq, struct stripe_head *sh)
4834 {
4835         int i;
4836
4837         seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n",
4838                    (unsigned long long)sh->sector, sh->pd_idx, sh->state);
4839         seq_printf(seq, "sh %llu,  count %d.\n",
4840                    (unsigned long long)sh->sector, atomic_read(&sh->count));
4841         seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector);
4842         for (i = 0; i < sh->disks; i++) {
4843                 seq_printf(seq, "(cache%d: %p %ld) ",
4844                            i, sh->dev[i].page, sh->dev[i].flags);
4845         }
4846         seq_printf(seq, "\n");
4847 }
4848
4849 static void printall(struct seq_file *seq, raid5_conf_t *conf)
4850 {
4851         struct stripe_head *sh;
4852         struct hlist_node *hn;
4853         int i;
4854
4855         spin_lock_irq(&conf->device_lock);
4856         for (i = 0; i < NR_HASH; i++) {
4857                 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) {
4858                         if (sh->raid_conf != conf)
4859                                 continue;
4860                         print_sh(seq, sh);
4861                 }
4862         }
4863         spin_unlock_irq(&conf->device_lock);
4864 }
4865 #endif
4866
4867 static void status(struct seq_file *seq, mddev_t *mddev)
4868 {
4869         raid5_conf_t *conf = mddev->private;
4870         int i;
4871
4872         seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
4873                 mddev->chunk_sectors / 2, mddev->layout);
4874         seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
4875         for (i = 0; i < conf->raid_disks; i++)
4876                 seq_printf (seq, "%s",
4877                                conf->disks[i].rdev &&
4878                                test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
4879         seq_printf (seq, "]");
4880 #ifdef DEBUG
4881         seq_printf (seq, "\n");
4882         printall(seq, conf);
4883 #endif
4884 }
4885
4886 static void print_raid5_conf (raid5_conf_t *conf)
4887 {
4888         int i;
4889         struct disk_info *tmp;
4890
4891         printk(KERN_DEBUG "RAID conf printout:\n");
4892         if (!conf) {
4893                 printk("(conf==NULL)\n");
4894                 return;
4895         }
4896         printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level,
4897                conf->raid_disks,
4898                conf->raid_disks - conf->mddev->degraded);
4899
4900         for (i = 0; i < conf->raid_disks; i++) {
4901                 char b[BDEVNAME_SIZE];
4902                 tmp = conf->disks + i;
4903                 if (tmp->rdev)
4904                         printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n",
4905                                i, !test_bit(Faulty, &tmp->rdev->flags),
4906                                bdevname(tmp->rdev->bdev, b));
4907         }
4908 }
4909
4910 static int raid5_spare_active(mddev_t *mddev)
4911 {
4912         int i;
4913         raid5_conf_t *conf = mddev->private;
4914         struct disk_info *tmp;
4915         int count = 0;
4916         unsigned long flags;
4917
4918         for (i = 0; i < conf->raid_disks; i++) {
4919                 tmp = conf->disks + i;
4920                 if (tmp->rdev
4921                     && tmp->rdev->recovery_offset == MaxSector
4922                     && !test_bit(Faulty, &tmp->rdev->flags)
4923                     && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
4924                         count++;
4925                         sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
4926                 }
4927         }
4928         spin_lock_irqsave(&conf->device_lock, flags);
4929         mddev->degraded -= count;
4930         spin_unlock_irqrestore(&conf->device_lock, flags);
4931         print_raid5_conf(conf);
4932         return count;
4933 }
4934
4935 static int raid5_remove_disk(mddev_t *mddev, int number)
4936 {
4937         raid5_conf_t *conf = mddev->private;
4938         int err = 0;
4939         mdk_rdev_t *rdev;
4940         struct disk_info *p = conf->disks + number;
4941
4942         print_raid5_conf(conf);
4943         rdev = p->rdev;
4944         if (rdev) {
4945                 if (number >= conf->raid_disks &&
4946                     conf->reshape_progress == MaxSector)
4947                         clear_bit(In_sync, &rdev->flags);
4948
4949                 if (test_bit(In_sync, &rdev->flags) ||
4950                     atomic_read(&rdev->nr_pending)) {
4951                         err = -EBUSY;
4952                         goto abort;
4953                 }
4954                 /* Only remove non-faulty devices if recovery
4955                  * isn't possible.
4956                  */
4957                 if (!test_bit(Faulty, &rdev->flags) &&
4958                     !has_failed(conf) &&
4959                     number < conf->raid_disks) {
4960                         err = -EBUSY;
4961                         goto abort;
4962                 }
4963                 p->rdev = NULL;
4964                 synchronize_rcu();
4965                 if (atomic_read(&rdev->nr_pending)) {
4966                         /* lost the race, try later */
4967                         err = -EBUSY;
4968                         p->rdev = rdev;
4969                 }
4970         }
4971 abort:
4972
4973         print_raid5_conf(conf);
4974         return err;
4975 }
4976
4977 static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
4978 {
4979         raid5_conf_t *conf = mddev->private;
4980         int err = -EEXIST;
4981         int disk;
4982         struct disk_info *p;
4983         int first = 0;
4984         int last = conf->raid_disks - 1;
4985
4986         if (rdev->badblocks.count)
4987                 return -EINVAL;
4988
4989         if (has_failed(conf))
4990                 /* no point adding a device */
4991                 return -EINVAL;
4992
4993         if (rdev->raid_disk >= 0)
4994                 first = last = rdev->raid_disk;
4995
4996         /*
4997          * find the disk ... but prefer rdev->saved_raid_disk
4998          * if possible.
4999          */
5000         if (rdev->saved_raid_disk >= 0 &&
5001             rdev->saved_raid_disk >= first &&
5002             conf->disks[rdev->saved_raid_disk].rdev == NULL)
5003                 disk = rdev->saved_raid_disk;
5004         else
5005                 disk = first;
5006         for ( ; disk <= last ; disk++)
5007                 if ((p=conf->disks + disk)->rdev == NULL) {
5008                         clear_bit(In_sync, &rdev->flags);
5009                         rdev->raid_disk = disk;
5010                         err = 0;
5011                         if (rdev->saved_raid_disk != disk)
5012                                 conf->fullsync = 1;
5013                         rcu_assign_pointer(p->rdev, rdev);
5014                         break;
5015                 }
5016         print_raid5_conf(conf);
5017         return err;
5018 }
5019
5020 static int raid5_resize(mddev_t *mddev, sector_t sectors)
5021 {
5022         /* no resync is happening, and there is enough space
5023          * on all devices, so we can resize.
5024          * We need to make sure resync covers any new space.
5025          * If the array is shrinking we should possibly wait until
5026          * any io in the removed space completes, but it hardly seems
5027          * worth it.
5028          */
5029         sectors &= ~((sector_t)mddev->chunk_sectors - 1);
5030         md_set_array_sectors(mddev, raid5_size(mddev, sectors,
5031                                                mddev->raid_disks));
5032         if (mddev->array_sectors >
5033             raid5_size(mddev, sectors, mddev->raid_disks))
5034                 return -EINVAL;
5035         set_capacity(mddev->gendisk, mddev->array_sectors);
5036         revalidate_disk(mddev->gendisk);
5037         if (sectors > mddev->dev_sectors &&
5038             mddev->recovery_cp > mddev->dev_sectors) {
5039                 mddev->recovery_cp = mddev->dev_sectors;
5040                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5041         }
5042         mddev->dev_sectors = sectors;
5043         mddev->resync_max_sectors = sectors;
5044         return 0;
5045 }
5046
5047 static int check_stripe_cache(mddev_t *mddev)
5048 {
5049         /* Can only proceed if there are plenty of stripe_heads.
5050          * We need a minimum of one full stripe,, and for sensible progress
5051          * it is best to have about 4 times that.
5052          * If we require 4 times, then the default 256 4K stripe_heads will
5053          * allow for chunk sizes up to 256K, which is probably OK.
5054          * If the chunk size is greater, user-space should request more
5055          * stripe_heads first.
5056          */
5057         raid5_conf_t *conf = mddev->private;
5058         if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4
5059             > conf->max_nr_stripes ||
5060             ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
5061             > conf->max_nr_stripes) {
5062                 printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes.  Needed %lu\n",
5063                        mdname(mddev),
5064                        ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
5065                         / STRIPE_SIZE)*4);
5066                 return 0;
5067         }
5068         return 1;
5069 }
5070
5071 static int check_reshape(mddev_t *mddev)
5072 {
5073         raid5_conf_t *conf = mddev->private;
5074
5075         if (mddev->delta_disks == 0 &&
5076             mddev->new_layout == mddev->layout &&
5077             mddev->new_chunk_sectors == mddev->chunk_sectors)
5078                 return 0; /* nothing to do */
5079         if (mddev->bitmap)
5080                 /* Cannot grow a bitmap yet */
5081                 return -EBUSY;
5082         if (has_failed(conf))
5083                 return -EINVAL;
5084         if (mddev->delta_disks < 0) {
5085                 /* We might be able to shrink, but the devices must
5086                  * be made bigger first.
5087                  * For raid6, 4 is the minimum size.
5088                  * Otherwise 2 is the minimum
5089                  */
5090                 int min = 2;
5091                 if (mddev->level == 6)
5092                         min = 4;
5093                 if (mddev->raid_disks + mddev->delta_disks < min)
5094                         return -EINVAL;
5095         }
5096
5097         if (!check_stripe_cache(mddev))
5098                 return -ENOSPC;
5099
5100         return resize_stripes(conf, conf->raid_disks + mddev->delta_disks);
5101 }
5102
5103 static int raid5_start_reshape(mddev_t *mddev)
5104 {
5105         raid5_conf_t *conf = mddev->private;
5106         mdk_rdev_t *rdev;
5107         int spares = 0;
5108         unsigned long flags;
5109
5110         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5111                 return -EBUSY;
5112
5113         if (!check_stripe_cache(mddev))
5114                 return -ENOSPC;
5115
5116         list_for_each_entry(rdev, &mddev->disks, same_set)
5117                 if (!test_bit(In_sync, &rdev->flags)
5118                     && !test_bit(Faulty, &rdev->flags))
5119                         spares++;
5120
5121         if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
5122                 /* Not enough devices even to make a degraded array
5123                  * of that size
5124                  */
5125                 return -EINVAL;
5126
5127         /* Refuse to reduce size of the array.  Any reductions in
5128          * array size must be through explicit setting of array_size
5129          * attribute.
5130          */
5131         if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
5132             < mddev->array_sectors) {
5133                 printk(KERN_ERR "md/raid:%s: array size must be reduced "
5134                        "before number of disks\n", mdname(mddev));
5135                 return -EINVAL;
5136         }
5137
5138         atomic_set(&conf->reshape_stripes, 0);
5139         spin_lock_irq(&conf->device_lock);
5140         conf->previous_raid_disks = conf->raid_disks;
5141         conf->raid_disks += mddev->delta_disks;
5142         conf->prev_chunk_sectors = conf->chunk_sectors;
5143         conf->chunk_sectors = mddev->new_chunk_sectors;
5144         conf->prev_algo = conf->algorithm;
5145         conf->algorithm = mddev->new_layout;
5146         if (mddev->delta_disks < 0)
5147                 conf->reshape_progress = raid5_size(mddev, 0, 0);
5148         else
5149                 conf->reshape_progress = 0;
5150         conf->reshape_safe = conf->reshape_progress;
5151         conf->generation++;
5152         spin_unlock_irq(&conf->device_lock);
5153
5154         /* Add some new drives, as many as will fit.
5155          * We know there are enough to make the newly sized array work.
5156          * Don't add devices if we are reducing the number of
5157          * devices in the array.  This is because it is not possible
5158          * to correctly record the "partially reconstructed" state of
5159          * such devices during the reshape and confusion could result.
5160          */
5161         if (mddev->delta_disks >= 0) {
5162                 int added_devices = 0;
5163                 list_for_each_entry(rdev, &mddev->disks, same_set)
5164                         if (rdev->raid_disk < 0 &&
5165                             !test_bit(Faulty, &rdev->flags)) {
5166                                 if (raid5_add_disk(mddev, rdev) == 0) {
5167                                         if (rdev->raid_disk
5168                                             >= conf->previous_raid_disks) {
5169                                                 set_bit(In_sync, &rdev->flags);
5170                                                 added_devices++;
5171                                         } else
5172                                                 rdev->recovery_offset = 0;
5173
5174                                         if (sysfs_link_rdev(mddev, rdev))
5175                                                 /* Failure here is OK */;
5176                                 }
5177                         } else if (rdev->raid_disk >= conf->previous_raid_disks
5178                                    && !test_bit(Faulty, &rdev->flags)) {
5179                                 /* This is a spare that was manually added */
5180                                 set_bit(In_sync, &rdev->flags);
5181                                 added_devices++;
5182                         }
5183
5184                 /* When a reshape changes the number of devices,
5185                  * ->degraded is measured against the larger of the
5186                  * pre and post number of devices.
5187                  */
5188                 spin_lock_irqsave(&conf->device_lock, flags);
5189                 mddev->degraded += (conf->raid_disks - conf->previous_raid_disks)
5190                         - added_devices;
5191                 spin_unlock_irqrestore(&conf->device_lock, flags);
5192         }
5193         mddev->raid_disks = conf->raid_disks;
5194         mddev->reshape_position = conf->reshape_progress;
5195         set_bit(MD_CHANGE_DEVS, &mddev->flags);
5196
5197         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5198         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
5199         set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
5200         set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
5201         mddev->sync_thread = md_register_thread(md_do_sync, mddev,
5202                                                 "reshape");
5203         if (!mddev->sync_thread) {
5204                 mddev->recovery = 0;
5205                 spin_lock_irq(&conf->device_lock);
5206                 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
5207                 conf->reshape_progress = MaxSector;
5208                 spin_unlock_irq(&conf->device_lock);
5209                 return -EAGAIN;
5210         }
5211         conf->reshape_checkpoint = jiffies;
5212         md_wakeup_thread(mddev->sync_thread);
5213         md_new_event(mddev);
5214         return 0;
5215 }
5216
5217 /* This is called from the reshape thread and should make any
5218  * changes needed in 'conf'
5219  */
5220 static void end_reshape(raid5_conf_t *conf)
5221 {
5222
5223         if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
5224
5225                 spin_lock_irq(&conf->device_lock);
5226                 conf->previous_raid_disks = conf->raid_disks;
5227                 conf->reshape_progress = MaxSector;
5228                 spin_unlock_irq(&conf->device_lock);
5229                 wake_up(&conf->wait_for_overlap);
5230
5231                 /* read-ahead size must cover two whole stripes, which is
5232                  * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
5233                  */
5234                 if (conf->mddev->queue) {
5235                         int data_disks = conf->raid_disks - conf->max_degraded;
5236                         int stripe = data_disks * ((conf->chunk_sectors << 9)
5237                                                    / PAGE_SIZE);
5238                         if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
5239                                 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
5240                 }
5241         }
5242 }
5243
5244 /* This is called from the raid5d thread with mddev_lock held.
5245  * It makes config changes to the device.
5246  */
5247 static void raid5_finish_reshape(mddev_t *mddev)
5248 {
5249         raid5_conf_t *conf = mddev->private;
5250
5251         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5252
5253                 if (mddev->delta_disks > 0) {
5254                         md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
5255                         set_capacity(mddev->gendisk, mddev->array_sectors);
5256                         revalidate_disk(mddev->gendisk);
5257                 } else {
5258                         int d;
5259                         mddev->degraded = conf->raid_disks;
5260                         for (d = 0; d < conf->raid_disks ; d++)
5261                                 if (conf->disks[d].rdev &&
5262                                     test_bit(In_sync,
5263                                              &conf->disks[d].rdev->flags))
5264                                         mddev->degraded--;
5265                         for (d = conf->raid_disks ;
5266                              d < conf->raid_disks - mddev->delta_disks;
5267                              d++) {
5268                                 mdk_rdev_t *rdev = conf->disks[d].rdev;
5269                                 if (rdev && raid5_remove_disk(mddev, d) == 0) {
5270                                         sysfs_unlink_rdev(mddev, rdev);
5271                                         rdev->raid_disk = -1;
5272                                 }
5273                         }
5274                 }
5275                 mddev->layout = conf->algorithm;
5276                 mddev->chunk_sectors = conf->chunk_sectors;
5277                 mddev->reshape_position = MaxSector;
5278                 mddev->delta_disks = 0;
5279         }
5280 }
5281
5282 static void raid5_quiesce(mddev_t *mddev, int state)
5283 {
5284         raid5_conf_t *conf = mddev->private;
5285
5286         switch(state) {
5287         case 2: /* resume for a suspend */
5288                 wake_up(&conf->wait_for_overlap);
5289                 break;
5290
5291         case 1: /* stop all writes */
5292                 spin_lock_irq(&conf->device_lock);
5293                 /* '2' tells resync/reshape to pause so that all
5294                  * active stripes can drain
5295                  */
5296                 conf->quiesce = 2;
5297                 wait_event_lock_irq(conf->wait_for_stripe,
5298                                     atomic_read(&conf->active_stripes) == 0 &&
5299                                     atomic_read(&conf->active_aligned_reads) == 0,
5300                                     conf->device_lock, /* nothing */);
5301                 conf->quiesce = 1;
5302                 spin_unlock_irq(&conf->device_lock);
5303                 /* allow reshape to continue */
5304                 wake_up(&conf->wait_for_overlap);
5305                 break;
5306
5307         case 0: /* re-enable writes */
5308                 spin_lock_irq(&conf->device_lock);
5309                 conf->quiesce = 0;
5310                 wake_up(&conf->wait_for_stripe);
5311                 wake_up(&conf->wait_for_overlap);
5312                 spin_unlock_irq(&conf->device_lock);
5313                 break;
5314         }
5315 }
5316
5317
5318 static void *raid45_takeover_raid0(mddev_t *mddev, int level)
5319 {
5320         struct raid0_private_data *raid0_priv = mddev->private;
5321         sector_t sectors;
5322
5323         /* for raid0 takeover only one zone is supported */
5324         if (raid0_priv->nr_strip_zones > 1) {
5325                 printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n",
5326                        mdname(mddev));
5327                 return ERR_PTR(-EINVAL);
5328         }
5329
5330         sectors = raid0_priv->strip_zone[0].zone_end;
5331         sector_div(sectors, raid0_priv->strip_zone[0].nb_dev);
5332         mddev->dev_sectors = sectors;
5333         mddev->new_level = level;
5334         mddev->new_layout = ALGORITHM_PARITY_N;
5335         mddev->new_chunk_sectors = mddev->chunk_sectors;
5336         mddev->raid_disks += 1;
5337         mddev->delta_disks = 1;
5338         /* make sure it will be not marked as dirty */
5339         mddev->recovery_cp = MaxSector;
5340
5341         return setup_conf(mddev);
5342 }
5343
5344
5345 static void *raid5_takeover_raid1(mddev_t *mddev)
5346 {
5347         int chunksect;
5348
5349         if (mddev->raid_disks != 2 ||
5350             mddev->degraded > 1)
5351                 return ERR_PTR(-EINVAL);
5352
5353         /* Should check if there are write-behind devices? */
5354
5355         chunksect = 64*2; /* 64K by default */
5356
5357         /* The array must be an exact multiple of chunksize */
5358         while (chunksect && (mddev->array_sectors & (chunksect-1)))
5359                 chunksect >>= 1;
5360
5361         if ((chunksect<<9) < STRIPE_SIZE)
5362                 /* array size does not allow a suitable chunk size */
5363                 return ERR_PTR(-EINVAL);
5364
5365         mddev->new_level = 5;
5366         mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
5367         mddev->new_chunk_sectors = chunksect;
5368
5369         return setup_conf(mddev);
5370 }
5371
5372 static void *raid5_takeover_raid6(mddev_t *mddev)
5373 {
5374         int new_layout;
5375
5376         switch (mddev->layout) {
5377         case ALGORITHM_LEFT_ASYMMETRIC_6:
5378                 new_layout = ALGORITHM_LEFT_ASYMMETRIC;
5379                 break;
5380         case ALGORITHM_RIGHT_ASYMMETRIC_6:
5381                 new_layout = ALGORITHM_RIGHT_ASYMMETRIC;
5382                 break;
5383         case ALGORITHM_LEFT_SYMMETRIC_6:
5384                 new_layout = ALGORITHM_LEFT_SYMMETRIC;
5385                 break;
5386         case ALGORITHM_RIGHT_SYMMETRIC_6:
5387                 new_layout = ALGORITHM_RIGHT_SYMMETRIC;
5388                 break;
5389         case ALGORITHM_PARITY_0_6:
5390                 new_layout = ALGORITHM_PARITY_0;
5391                 break;
5392         case ALGORITHM_PARITY_N:
5393                 new_layout = ALGORITHM_PARITY_N;
5394                 break;
5395         default:
5396                 return ERR_PTR(-EINVAL);
5397         }
5398         mddev->new_level = 5;
5399         mddev->new_layout = new_layout;
5400         mddev->delta_disks = -1;
5401         mddev->raid_disks -= 1;
5402         return setup_conf(mddev);
5403 }
5404
5405
5406 static int raid5_check_reshape(mddev_t *mddev)
5407 {
5408         /* For a 2-drive array, the layout and chunk size can be changed
5409          * immediately as not restriping is needed.
5410          * For larger arrays we record the new value - after validation
5411          * to be used by a reshape pass.
5412          */
5413         raid5_conf_t *conf = mddev->private;
5414         int new_chunk = mddev->new_chunk_sectors;
5415
5416         if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout))
5417                 return -EINVAL;
5418         if (new_chunk > 0) {
5419                 if (!is_power_of_2(new_chunk))
5420                         return -EINVAL;
5421                 if (new_chunk < (PAGE_SIZE>>9))
5422                         return -EINVAL;
5423                 if (mddev->array_sectors & (new_chunk-1))
5424                         /* not factor of array size */
5425                         return -EINVAL;
5426         }
5427
5428         /* They look valid */
5429
5430         if (mddev->raid_disks == 2) {
5431                 /* can make the change immediately */
5432                 if (mddev->new_layout >= 0) {
5433                         conf->algorithm = mddev->new_layout;
5434                         mddev->layout = mddev->new_layout;
5435                 }
5436                 if (new_chunk > 0) {
5437                         conf->chunk_sectors = new_chunk ;
5438                         mddev->chunk_sectors = new_chunk;
5439                 }
5440                 set_bit(MD_CHANGE_DEVS, &mddev->flags);
5441                 md_wakeup_thread(mddev->thread);
5442         }
5443         return check_reshape(mddev);
5444 }
5445
5446 static int raid6_check_reshape(mddev_t *mddev)
5447 {
5448         int new_chunk = mddev->new_chunk_sectors;
5449
5450         if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout))
5451                 return -EINVAL;
5452         if (new_chunk > 0) {
5453                 if (!is_power_of_2(new_chunk))
5454                         return -EINVAL;
5455                 if (new_chunk < (PAGE_SIZE >> 9))
5456                         return -EINVAL;
5457                 if (mddev->array_sectors & (new_chunk-1))
5458                         /* not factor of array size */
5459                         return -EINVAL;
5460         }
5461
5462         /* They look valid */
5463         return check_reshape(mddev);
5464 }
5465
5466 static void *raid5_takeover(mddev_t *mddev)
5467 {
5468         /* raid5 can take over:
5469          *  raid0 - if there is only one strip zone - make it a raid4 layout
5470          *  raid1 - if there are two drives.  We need to know the chunk size
5471          *  raid4 - trivial - just use a raid4 layout.
5472          *  raid6 - Providing it is a *_6 layout
5473          */
5474         if (mddev->level == 0)
5475                 return raid45_takeover_raid0(mddev, 5);
5476         if (mddev->level == 1)
5477                 return raid5_takeover_raid1(mddev);
5478         if (mddev->level == 4) {
5479                 mddev->new_layout = ALGORITHM_PARITY_N;
5480                 mddev->new_level = 5;
5481                 return setup_conf(mddev);
5482         }
5483         if (mddev->level == 6)
5484                 return raid5_takeover_raid6(mddev);
5485
5486         return ERR_PTR(-EINVAL);
5487 }
5488
5489 static void *raid4_takeover(mddev_t *mddev)
5490 {
5491         /* raid4 can take over:
5492          *  raid0 - if there is only one strip zone
5493          *  raid5 - if layout is right
5494          */
5495         if (mddev->level == 0)
5496                 return raid45_takeover_raid0(mddev, 4);
5497         if (mddev->level == 5 &&
5498             mddev->layout == ALGORITHM_PARITY_N) {
5499                 mddev->new_layout = 0;
5500                 mddev->new_level = 4;
5501                 return setup_conf(mddev);
5502         }
5503         return ERR_PTR(-EINVAL);
5504 }
5505
5506 static struct mdk_personality raid5_personality;
5507
5508 static void *raid6_takeover(mddev_t *mddev)
5509 {
5510         /* Currently can only take over a raid5.  We map the
5511          * personality to an equivalent raid6 personality
5512          * with the Q block at the end.
5513          */
5514         int new_layout;
5515
5516         if (mddev->pers != &raid5_personality)
5517                 return ERR_PTR(-EINVAL);
5518         if (mddev->degraded > 1)
5519                 return ERR_PTR(-EINVAL);
5520         if (mddev->raid_disks > 253)
5521                 return ERR_PTR(-EINVAL);
5522         if (mddev->raid_disks < 3)
5523                 return ERR_PTR(-EINVAL);
5524
5525         switch (mddev->layout) {
5526         case ALGORITHM_LEFT_ASYMMETRIC:
5527                 new_layout = ALGORITHM_LEFT_ASYMMETRIC_6;
5528                 break;
5529         case ALGORITHM_RIGHT_ASYMMETRIC:
5530                 new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
5531                 break;
5532         case ALGORITHM_LEFT_SYMMETRIC:
5533                 new_layout = ALGORITHM_LEFT_SYMMETRIC_6;
5534                 break;
5535         case ALGORITHM_RIGHT_SYMMETRIC:
5536                 new_layout = ALGORITHM_RIGHT_SYMMETRIC_6;
5537                 break;
5538         case ALGORITHM_PARITY_0:
5539                 new_layout = ALGORITHM_PARITY_0_6;
5540                 break;
5541         case ALGORITHM_PARITY_N:
5542                 new_layout = ALGORITHM_PARITY_N;
5543                 break;
5544         default:
5545                 return ERR_PTR(-EINVAL);
5546         }
5547         mddev->new_level = 6;
5548         mddev->new_layout = new_layout;
5549         mddev->delta_disks = 1;
5550         mddev->raid_disks += 1;
5551         return setup_conf(mddev);
5552 }
5553
5554
5555 static struct mdk_personality raid6_personality =
5556 {
5557         .name           = "raid6",
5558         .level          = 6,
5559         .owner          = THIS_MODULE,
5560         .make_request   = make_request,
5561         .run            = run,
5562         .stop           = stop,
5563         .status         = status,
5564         .error_handler  = error,
5565         .hot_add_disk   = raid5_add_disk,
5566         .hot_remove_disk= raid5_remove_disk,
5567         .spare_active   = raid5_spare_active,
5568         .sync_request   = sync_request,
5569         .resize         = raid5_resize,
5570         .size           = raid5_size,
5571         .check_reshape  = raid6_check_reshape,
5572         .start_reshape  = raid5_start_reshape,
5573         .finish_reshape = raid5_finish_reshape,
5574         .quiesce        = raid5_quiesce,
5575         .takeover       = raid6_takeover,
5576 };
5577 static struct mdk_personality raid5_personality =
5578 {
5579         .name           = "raid5",
5580         .level          = 5,
5581         .owner          = THIS_MODULE,
5582         .make_request   = make_request,
5583         .run            = run,
5584         .stop           = stop,
5585         .status         = status,
5586         .error_handler  = error,
5587         .hot_add_disk   = raid5_add_disk,
5588         .hot_remove_disk= raid5_remove_disk,
5589         .spare_active   = raid5_spare_active,
5590         .sync_request   = sync_request,
5591         .resize         = raid5_resize,
5592         .size           = raid5_size,
5593         .check_reshape  = raid5_check_reshape,
5594         .start_reshape  = raid5_start_reshape,
5595         .finish_reshape = raid5_finish_reshape,
5596         .quiesce        = raid5_quiesce,
5597         .takeover       = raid5_takeover,
5598 };
5599
5600 static struct mdk_personality raid4_personality =
5601 {
5602         .name           = "raid4",
5603         .level          = 4,
5604         .owner          = THIS_MODULE,
5605         .make_request   = make_request,
5606         .run            = run,
5607         .stop           = stop,
5608         .status         = status,
5609         .error_handler  = error,
5610         .hot_add_disk   = raid5_add_disk,
5611         .hot_remove_disk= raid5_remove_disk,
5612         .spare_active   = raid5_spare_active,
5613         .sync_request   = sync_request,
5614         .resize         = raid5_resize,
5615         .size           = raid5_size,
5616         .check_reshape  = raid5_check_reshape,
5617         .start_reshape  = raid5_start_reshape,
5618         .finish_reshape = raid5_finish_reshape,
5619         .quiesce        = raid5_quiesce,
5620         .takeover       = raid4_takeover,
5621 };
5622
5623 static int __init raid5_init(void)
5624 {
5625         register_md_personality(&raid6_personality);
5626         register_md_personality(&raid5_personality);
5627         register_md_personality(&raid4_personality);
5628         return 0;
5629 }
5630
5631 static void raid5_exit(void)
5632 {
5633         unregister_md_personality(&raid6_personality);
5634         unregister_md_personality(&raid5_personality);
5635         unregister_md_personality(&raid4_personality);
5636 }
5637
5638 module_init(raid5_init);
5639 module_exit(raid5_exit);
5640 MODULE_LICENSE("GPL");
5641 MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD");
5642 MODULE_ALIAS("md-personality-4"); /* RAID5 */
5643 MODULE_ALIAS("md-raid5");
5644 MODULE_ALIAS("md-raid4");
5645 MODULE_ALIAS("md-level-5");
5646 MODULE_ALIAS("md-level-4");
5647 MODULE_ALIAS("md-personality-8"); /* RAID6 */
5648 MODULE_ALIAS("md-raid6");
5649 MODULE_ALIAS("md-level-6");
5650
5651 /* This used to be two separate modules, they were: */
5652 MODULE_ALIAS("raid5");
5653 MODULE_ALIAS("raid6");