2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
5 * Copyright (C) 2002, 2003 H. Peter Anvin
7 * RAID-4/5/6 management functions.
8 * Thanks to Penguin Computing for making the RAID-6 development possible
9 * by donating a test server!
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 * The sequencing for updating the bitmap reliably is a little
25 * subtle (and I got it wrong the first time) so it deserves some
28 * We group bitmap updates into batches. Each batch has a number.
29 * We may write out several batches at once, but that isn't very important.
30 * conf->bm_write is the number of the last batch successfully written.
31 * conf->bm_flush is the number of the last batch that was closed to
33 * When we discover that we will need to write to any block in a stripe
34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
35 * the number of the batch it will be in. This is bm_flush+1.
36 * When we are ready to do a write, if that batch hasn't been written yet,
37 * we plug the array and queue the stripe for later.
38 * When an unplug happens, we increment bm_flush, thus closing the current
40 * When we notice that bm_flush > bm_write, we write out all pending updates
41 * to the bitmap, and advance bm_write to where bm_flush was.
42 * This may occasionally write a bit out twice, but is sure never to
46 #include <linux/blkdev.h>
47 #include <linux/kthread.h>
48 #include <linux/raid/pq.h>
49 #include <linux/async_tx.h>
50 #include <linux/async.h>
51 #include <linux/seq_file.h>
52 #include <linux/cpu.h>
61 #define NR_STRIPES 256
62 #define STRIPE_SIZE PAGE_SIZE
63 #define STRIPE_SHIFT (PAGE_SHIFT - 9)
64 #define STRIPE_SECTORS (STRIPE_SIZE>>9)
65 #define IO_THRESHOLD 1
66 #define BYPASS_THRESHOLD 1
67 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
68 #define HASH_MASK (NR_HASH - 1)
70 #define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
72 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
73 * order without overlap. There may be several bio's per stripe+device, and
74 * a bio could span several devices.
75 * When walking this list for a particular stripe+device, we must never proceed
76 * beyond a bio that extends past this device, as the next bio might no longer
78 * This macro is used to determine the 'next' bio in the list, given the sector
79 * of the current stripe+device
81 #define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
83 * The following can be used to debug the driver
85 #define RAID5_PARANOIA 1
86 #if RAID5_PARANOIA && defined(CONFIG_SMP)
87 # define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
89 # define CHECK_DEVLOCK()
97 #define printk_rl(args...) ((void) (printk_ratelimit() && printk(args)))
100 * We maintain a biased count of active stripes in the bottom 16 bits of
101 * bi_phys_segments, and a count of processed stripes in the upper 16 bits
103 static inline int raid5_bi_phys_segments(struct bio *bio)
105 return bio->bi_phys_segments & 0xffff;
108 static inline int raid5_bi_hw_segments(struct bio *bio)
110 return (bio->bi_phys_segments >> 16) & 0xffff;
113 static inline int raid5_dec_bi_phys_segments(struct bio *bio)
115 --bio->bi_phys_segments;
116 return raid5_bi_phys_segments(bio);
119 static inline int raid5_dec_bi_hw_segments(struct bio *bio)
121 unsigned short val = raid5_bi_hw_segments(bio);
124 bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio);
128 static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt)
130 bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16);
133 /* Find first data disk in a raid6 stripe */
134 static inline int raid6_d0(struct stripe_head *sh)
137 /* ddf always start from first device */
139 /* md starts just after Q block */
140 if (sh->qd_idx == sh->disks - 1)
143 return sh->qd_idx + 1;
145 static inline int raid6_next_disk(int disk, int raid_disks)
148 return (disk < raid_disks) ? disk : 0;
151 /* When walking through the disks in a raid5, starting at raid6_d0,
152 * We need to map each disk to a 'slot', where the data disks are slot
153 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
154 * is raid_disks-1. This help does that mapping.
156 static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
157 int *count, int syndrome_disks)
161 if (idx == sh->pd_idx)
162 return syndrome_disks;
163 if (idx == sh->qd_idx)
164 return syndrome_disks + 1;
169 static void return_io(struct bio *return_bi)
171 struct bio *bi = return_bi;
174 return_bi = bi->bi_next;
182 static void print_raid5_conf (raid5_conf_t *conf);
184 static int stripe_operations_active(struct stripe_head *sh)
186 return sh->check_state || sh->reconstruct_state ||
187 test_bit(STRIPE_BIOFILL_RUN, &sh->state) ||
188 test_bit(STRIPE_COMPUTE_RUN, &sh->state);
191 static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
193 if (atomic_dec_and_test(&sh->count)) {
194 BUG_ON(!list_empty(&sh->lru));
195 BUG_ON(atomic_read(&conf->active_stripes)==0);
196 if (test_bit(STRIPE_HANDLE, &sh->state)) {
197 if (test_bit(STRIPE_DELAYED, &sh->state)) {
198 list_add_tail(&sh->lru, &conf->delayed_list);
199 blk_plug_device(conf->mddev->queue);
200 } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
201 sh->bm_seq - conf->seq_write > 0) {
202 list_add_tail(&sh->lru, &conf->bitmap_list);
203 blk_plug_device(conf->mddev->queue);
205 clear_bit(STRIPE_BIT_DELAY, &sh->state);
206 list_add_tail(&sh->lru, &conf->handle_list);
208 md_wakeup_thread(conf->mddev->thread);
210 BUG_ON(stripe_operations_active(sh));
211 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
212 atomic_dec(&conf->preread_active_stripes);
213 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
214 md_wakeup_thread(conf->mddev->thread);
216 atomic_dec(&conf->active_stripes);
217 if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
218 list_add_tail(&sh->lru, &conf->inactive_list);
219 wake_up(&conf->wait_for_stripe);
220 if (conf->retry_read_aligned)
221 md_wakeup_thread(conf->mddev->thread);
227 static void release_stripe(struct stripe_head *sh)
229 raid5_conf_t *conf = sh->raid_conf;
232 spin_lock_irqsave(&conf->device_lock, flags);
233 __release_stripe(conf, sh);
234 spin_unlock_irqrestore(&conf->device_lock, flags);
237 static inline void remove_hash(struct stripe_head *sh)
239 pr_debug("remove_hash(), stripe %llu\n",
240 (unsigned long long)sh->sector);
242 hlist_del_init(&sh->hash);
245 static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
247 struct hlist_head *hp = stripe_hash(conf, sh->sector);
249 pr_debug("insert_hash(), stripe %llu\n",
250 (unsigned long long)sh->sector);
253 hlist_add_head(&sh->hash, hp);
257 /* find an idle stripe, make sure it is unhashed, and return it. */
258 static struct stripe_head *get_free_stripe(raid5_conf_t *conf)
260 struct stripe_head *sh = NULL;
261 struct list_head *first;
264 if (list_empty(&conf->inactive_list))
266 first = conf->inactive_list.next;
267 sh = list_entry(first, struct stripe_head, lru);
268 list_del_init(first);
270 atomic_inc(&conf->active_stripes);
275 static void shrink_buffers(struct stripe_head *sh, int num)
280 for (i=0; i<num ; i++) {
284 sh->dev[i].page = NULL;
289 static int grow_buffers(struct stripe_head *sh, int num)
293 for (i=0; i<num; i++) {
296 if (!(page = alloc_page(GFP_KERNEL))) {
299 sh->dev[i].page = page;
304 static void raid5_build_block(struct stripe_head *sh, int i, int previous);
305 static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
306 struct stripe_head *sh);
308 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
310 raid5_conf_t *conf = sh->raid_conf;
313 BUG_ON(atomic_read(&sh->count) != 0);
314 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
315 BUG_ON(stripe_operations_active(sh));
318 pr_debug("init_stripe called, stripe %llu\n",
319 (unsigned long long)sh->sector);
323 sh->generation = conf->generation - previous;
324 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
326 stripe_set_idx(sector, conf, previous, sh);
330 for (i = sh->disks; i--; ) {
331 struct r5dev *dev = &sh->dev[i];
333 if (dev->toread || dev->read || dev->towrite || dev->written ||
334 test_bit(R5_LOCKED, &dev->flags)) {
335 printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n",
336 (unsigned long long)sh->sector, i, dev->toread,
337 dev->read, dev->towrite, dev->written,
338 test_bit(R5_LOCKED, &dev->flags));
342 raid5_build_block(sh, i, previous);
344 insert_hash(conf, sh);
347 static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector,
350 struct stripe_head *sh;
351 struct hlist_node *hn;
354 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
355 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
356 if (sh->sector == sector && sh->generation == generation)
358 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
362 static void unplug_slaves(mddev_t *mddev);
363 static void raid5_unplug_device(struct request_queue *q);
365 static struct stripe_head *
366 get_active_stripe(raid5_conf_t *conf, sector_t sector,
367 int previous, int noblock, int noquiesce)
369 struct stripe_head *sh;
371 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
373 spin_lock_irq(&conf->device_lock);
376 wait_event_lock_irq(conf->wait_for_stripe,
377 conf->quiesce == 0 || noquiesce,
378 conf->device_lock, /* nothing */);
379 sh = __find_stripe(conf, sector, conf->generation - previous);
381 if (!conf->inactive_blocked)
382 sh = get_free_stripe(conf);
383 if (noblock && sh == NULL)
386 conf->inactive_blocked = 1;
387 wait_event_lock_irq(conf->wait_for_stripe,
388 !list_empty(&conf->inactive_list) &&
389 (atomic_read(&conf->active_stripes)
390 < (conf->max_nr_stripes *3/4)
391 || !conf->inactive_blocked),
393 raid5_unplug_device(conf->mddev->queue)
395 conf->inactive_blocked = 0;
397 init_stripe(sh, sector, previous);
399 if (atomic_read(&sh->count)) {
400 BUG_ON(!list_empty(&sh->lru)
401 && !test_bit(STRIPE_EXPANDING, &sh->state));
403 if (!test_bit(STRIPE_HANDLE, &sh->state))
404 atomic_inc(&conf->active_stripes);
405 if (list_empty(&sh->lru) &&
406 !test_bit(STRIPE_EXPANDING, &sh->state))
408 list_del_init(&sh->lru);
411 } while (sh == NULL);
414 atomic_inc(&sh->count);
416 spin_unlock_irq(&conf->device_lock);
421 raid5_end_read_request(struct bio *bi, int error);
423 raid5_end_write_request(struct bio *bi, int error);
425 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
427 raid5_conf_t *conf = sh->raid_conf;
428 int i, disks = sh->disks;
432 for (i = disks; i--; ) {
436 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
438 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
443 bi = &sh->dev[i].req;
447 bi->bi_end_io = raid5_end_write_request;
449 bi->bi_end_io = raid5_end_read_request;
452 rdev = rcu_dereference(conf->disks[i].rdev);
453 if (rdev && test_bit(Faulty, &rdev->flags))
456 atomic_inc(&rdev->nr_pending);
460 if (s->syncing || s->expanding || s->expanded)
461 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
463 set_bit(STRIPE_IO_STARTED, &sh->state);
465 bi->bi_bdev = rdev->bdev;
466 pr_debug("%s: for %llu schedule op %ld on disc %d\n",
467 __func__, (unsigned long long)sh->sector,
469 atomic_inc(&sh->count);
470 bi->bi_sector = sh->sector + rdev->data_offset;
471 bi->bi_flags = 1 << BIO_UPTODATE;
475 bi->bi_io_vec = &sh->dev[i].vec;
476 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
477 bi->bi_io_vec[0].bv_offset = 0;
478 bi->bi_size = STRIPE_SIZE;
481 test_bit(R5_ReWrite, &sh->dev[i].flags))
482 atomic_add(STRIPE_SECTORS,
483 &rdev->corrected_errors);
484 generic_make_request(bi);
487 set_bit(STRIPE_DEGRADED, &sh->state);
488 pr_debug("skip op %ld on disc %d for sector %llu\n",
489 bi->bi_rw, i, (unsigned long long)sh->sector);
490 clear_bit(R5_LOCKED, &sh->dev[i].flags);
491 set_bit(STRIPE_HANDLE, &sh->state);
496 static struct dma_async_tx_descriptor *
497 async_copy_data(int frombio, struct bio *bio, struct page *page,
498 sector_t sector, struct dma_async_tx_descriptor *tx)
501 struct page *bio_page;
504 struct async_submit_ctl submit;
505 enum async_tx_flags flags = 0;
507 if (bio->bi_sector >= sector)
508 page_offset = (signed)(bio->bi_sector - sector) * 512;
510 page_offset = (signed)(sector - bio->bi_sector) * -512;
513 flags |= ASYNC_TX_FENCE;
514 init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
516 bio_for_each_segment(bvl, bio, i) {
517 int len = bio_iovec_idx(bio, i)->bv_len;
521 if (page_offset < 0) {
522 b_offset = -page_offset;
523 page_offset += b_offset;
527 if (len > 0 && page_offset + len > STRIPE_SIZE)
528 clen = STRIPE_SIZE - page_offset;
533 b_offset += bio_iovec_idx(bio, i)->bv_offset;
534 bio_page = bio_iovec_idx(bio, i)->bv_page;
536 tx = async_memcpy(page, bio_page, page_offset,
537 b_offset, clen, &submit);
539 tx = async_memcpy(bio_page, page, b_offset,
540 page_offset, clen, &submit);
542 /* chain the operations */
543 submit.depend_tx = tx;
545 if (clen < len) /* hit end of page */
553 static void ops_complete_biofill(void *stripe_head_ref)
555 struct stripe_head *sh = stripe_head_ref;
556 struct bio *return_bi = NULL;
557 raid5_conf_t *conf = sh->raid_conf;
560 pr_debug("%s: stripe %llu\n", __func__,
561 (unsigned long long)sh->sector);
563 /* clear completed biofills */
564 spin_lock_irq(&conf->device_lock);
565 for (i = sh->disks; i--; ) {
566 struct r5dev *dev = &sh->dev[i];
568 /* acknowledge completion of a biofill operation */
569 /* and check if we need to reply to a read request,
570 * new R5_Wantfill requests are held off until
571 * !STRIPE_BIOFILL_RUN
573 if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
574 struct bio *rbi, *rbi2;
579 while (rbi && rbi->bi_sector <
580 dev->sector + STRIPE_SECTORS) {
581 rbi2 = r5_next_bio(rbi, dev->sector);
582 if (!raid5_dec_bi_phys_segments(rbi)) {
583 rbi->bi_next = return_bi;
590 spin_unlock_irq(&conf->device_lock);
591 clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
593 return_io(return_bi);
595 set_bit(STRIPE_HANDLE, &sh->state);
599 static void ops_run_biofill(struct stripe_head *sh)
601 struct dma_async_tx_descriptor *tx = NULL;
602 raid5_conf_t *conf = sh->raid_conf;
603 struct async_submit_ctl submit;
606 pr_debug("%s: stripe %llu\n", __func__,
607 (unsigned long long)sh->sector);
609 for (i = sh->disks; i--; ) {
610 struct r5dev *dev = &sh->dev[i];
611 if (test_bit(R5_Wantfill, &dev->flags)) {
613 spin_lock_irq(&conf->device_lock);
614 dev->read = rbi = dev->toread;
616 spin_unlock_irq(&conf->device_lock);
617 while (rbi && rbi->bi_sector <
618 dev->sector + STRIPE_SECTORS) {
619 tx = async_copy_data(0, rbi, dev->page,
621 rbi = r5_next_bio(rbi, dev->sector);
626 atomic_inc(&sh->count);
627 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL);
628 async_trigger_callback(&submit);
631 static void mark_target_uptodate(struct stripe_head *sh, int target)
638 tgt = &sh->dev[target];
639 set_bit(R5_UPTODATE, &tgt->flags);
640 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
641 clear_bit(R5_Wantcompute, &tgt->flags);
644 static void ops_complete_compute(void *stripe_head_ref)
646 struct stripe_head *sh = stripe_head_ref;
648 pr_debug("%s: stripe %llu\n", __func__,
649 (unsigned long long)sh->sector);
651 /* mark the computed target(s) as uptodate */
652 mark_target_uptodate(sh, sh->ops.target);
653 mark_target_uptodate(sh, sh->ops.target2);
655 clear_bit(STRIPE_COMPUTE_RUN, &sh->state);
656 if (sh->check_state == check_state_compute_run)
657 sh->check_state = check_state_compute_result;
658 set_bit(STRIPE_HANDLE, &sh->state);
662 /* return a pointer to the address conversion region of the scribble buffer */
663 static addr_conv_t *to_addr_conv(struct stripe_head *sh,
664 struct raid5_percpu *percpu)
666 return percpu->scribble + sizeof(struct page *) * (sh->disks + 2);
669 static struct dma_async_tx_descriptor *
670 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
672 int disks = sh->disks;
673 struct page **xor_srcs = percpu->scribble;
674 int target = sh->ops.target;
675 struct r5dev *tgt = &sh->dev[target];
676 struct page *xor_dest = tgt->page;
678 struct dma_async_tx_descriptor *tx;
679 struct async_submit_ctl submit;
682 pr_debug("%s: stripe %llu block: %d\n",
683 __func__, (unsigned long long)sh->sector, target);
684 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
686 for (i = disks; i--; )
688 xor_srcs[count++] = sh->dev[i].page;
690 atomic_inc(&sh->count);
692 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
693 ops_complete_compute, sh, to_addr_conv(sh, percpu));
694 if (unlikely(count == 1))
695 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
697 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
702 /* set_syndrome_sources - populate source buffers for gen_syndrome
703 * @srcs - (struct page *) array of size sh->disks
704 * @sh - stripe_head to parse
706 * Populates srcs in proper layout order for the stripe and returns the
707 * 'count' of sources to be used in a call to async_gen_syndrome. The P
708 * destination buffer is recorded in srcs[count] and the Q destination
709 * is recorded in srcs[count+1]].
711 static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
713 int disks = sh->disks;
714 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
715 int d0_idx = raid6_d0(sh);
719 for (i = 0; i < disks; i++)
720 srcs[i] = (void *)raid6_empty_zero_page;
725 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
727 srcs[slot] = sh->dev[i].page;
728 i = raid6_next_disk(i, disks);
729 } while (i != d0_idx);
730 BUG_ON(count != syndrome_disks);
735 static struct dma_async_tx_descriptor *
736 ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
738 int disks = sh->disks;
739 struct page **blocks = percpu->scribble;
741 int qd_idx = sh->qd_idx;
742 struct dma_async_tx_descriptor *tx;
743 struct async_submit_ctl submit;
749 if (sh->ops.target < 0)
750 target = sh->ops.target2;
751 else if (sh->ops.target2 < 0)
752 target = sh->ops.target;
754 /* we should only have one valid target */
757 pr_debug("%s: stripe %llu block: %d\n",
758 __func__, (unsigned long long)sh->sector, target);
760 tgt = &sh->dev[target];
761 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
764 atomic_inc(&sh->count);
766 if (target == qd_idx) {
767 count = set_syndrome_sources(blocks, sh);
768 blocks[count] = NULL; /* regenerating p is not necessary */
769 BUG_ON(blocks[count+1] != dest); /* q should already be set */
770 init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
771 ops_complete_compute, sh,
772 to_addr_conv(sh, percpu));
773 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
775 /* Compute any data- or p-drive using XOR */
777 for (i = disks; i-- ; ) {
778 if (i == target || i == qd_idx)
780 blocks[count++] = sh->dev[i].page;
783 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
784 NULL, ops_complete_compute, sh,
785 to_addr_conv(sh, percpu));
786 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit);
792 static struct dma_async_tx_descriptor *
793 ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
795 int i, count, disks = sh->disks;
796 int syndrome_disks = sh->ddf_layout ? disks : disks-2;
797 int d0_idx = raid6_d0(sh);
798 int faila = -1, failb = -1;
799 int target = sh->ops.target;
800 int target2 = sh->ops.target2;
801 struct r5dev *tgt = &sh->dev[target];
802 struct r5dev *tgt2 = &sh->dev[target2];
803 struct dma_async_tx_descriptor *tx;
804 struct page **blocks = percpu->scribble;
805 struct async_submit_ctl submit;
807 pr_debug("%s: stripe %llu block1: %d block2: %d\n",
808 __func__, (unsigned long long)sh->sector, target, target2);
809 BUG_ON(target < 0 || target2 < 0);
810 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
811 BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags));
813 /* we need to open-code set_syndrome_sources to handle to the
814 * slot number conversion for 'faila' and 'failb'
816 for (i = 0; i < disks ; i++)
817 blocks[i] = (void *)raid6_empty_zero_page;
821 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
823 blocks[slot] = sh->dev[i].page;
829 i = raid6_next_disk(i, disks);
830 } while (i != d0_idx);
831 BUG_ON(count != syndrome_disks);
833 BUG_ON(faila == failb);
836 pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
837 __func__, (unsigned long long)sh->sector, faila, failb);
839 atomic_inc(&sh->count);
841 if (failb == syndrome_disks+1) {
842 /* Q disk is one of the missing disks */
843 if (faila == syndrome_disks) {
844 /* Missing P+Q, just recompute */
845 init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
846 ops_complete_compute, sh,
847 to_addr_conv(sh, percpu));
848 return async_gen_syndrome(blocks, 0, count+2,
849 STRIPE_SIZE, &submit);
853 int qd_idx = sh->qd_idx;
855 /* Missing D+Q: recompute D from P, then recompute Q */
856 if (target == qd_idx)
857 data_target = target2;
859 data_target = target;
862 for (i = disks; i-- ; ) {
863 if (i == data_target || i == qd_idx)
865 blocks[count++] = sh->dev[i].page;
867 dest = sh->dev[data_target].page;
868 init_async_submit(&submit,
869 ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
871 to_addr_conv(sh, percpu));
872 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE,
875 count = set_syndrome_sources(blocks, sh);
876 init_async_submit(&submit, ASYNC_TX_FENCE, tx,
877 ops_complete_compute, sh,
878 to_addr_conv(sh, percpu));
879 return async_gen_syndrome(blocks, 0, count+2,
880 STRIPE_SIZE, &submit);
884 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, ops_complete_compute,
885 sh, to_addr_conv(sh, percpu));
886 if (failb == syndrome_disks) {
887 /* We're missing D+P. */
888 return async_raid6_datap_recov(syndrome_disks+2, STRIPE_SIZE,
889 faila, blocks, &submit);
891 /* We're missing D+D. */
892 return async_raid6_2data_recov(syndrome_disks+2, STRIPE_SIZE,
893 faila, failb, blocks, &submit);
898 static void ops_complete_prexor(void *stripe_head_ref)
900 struct stripe_head *sh = stripe_head_ref;
902 pr_debug("%s: stripe %llu\n", __func__,
903 (unsigned long long)sh->sector);
906 static struct dma_async_tx_descriptor *
907 ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu,
908 struct dma_async_tx_descriptor *tx)
910 int disks = sh->disks;
911 struct page **xor_srcs = percpu->scribble;
912 int count = 0, pd_idx = sh->pd_idx, i;
913 struct async_submit_ctl submit;
915 /* existing parity data subtracted */
916 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
918 pr_debug("%s: stripe %llu\n", __func__,
919 (unsigned long long)sh->sector);
921 for (i = disks; i--; ) {
922 struct r5dev *dev = &sh->dev[i];
923 /* Only process blocks that are known to be uptodate */
924 if (test_bit(R5_Wantdrain, &dev->flags))
925 xor_srcs[count++] = dev->page;
928 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
929 ops_complete_prexor, sh, to_addr_conv(sh, percpu));
930 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
935 static struct dma_async_tx_descriptor *
936 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
938 int disks = sh->disks;
941 pr_debug("%s: stripe %llu\n", __func__,
942 (unsigned long long)sh->sector);
944 for (i = disks; i--; ) {
945 struct r5dev *dev = &sh->dev[i];
948 if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
951 spin_lock(&sh->lock);
952 chosen = dev->towrite;
954 BUG_ON(dev->written);
955 wbi = dev->written = chosen;
956 spin_unlock(&sh->lock);
958 while (wbi && wbi->bi_sector <
959 dev->sector + STRIPE_SECTORS) {
960 tx = async_copy_data(1, wbi, dev->page,
962 wbi = r5_next_bio(wbi, dev->sector);
970 static void ops_complete_reconstruct(void *stripe_head_ref)
972 struct stripe_head *sh = stripe_head_ref;
973 int disks = sh->disks;
974 int pd_idx = sh->pd_idx;
975 int qd_idx = sh->qd_idx;
978 pr_debug("%s: stripe %llu\n", __func__,
979 (unsigned long long)sh->sector);
981 for (i = disks; i--; ) {
982 struct r5dev *dev = &sh->dev[i];
984 if (dev->written || i == pd_idx || i == qd_idx)
985 set_bit(R5_UPTODATE, &dev->flags);
988 if (sh->reconstruct_state == reconstruct_state_drain_run)
989 sh->reconstruct_state = reconstruct_state_drain_result;
990 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run)
991 sh->reconstruct_state = reconstruct_state_prexor_drain_result;
993 BUG_ON(sh->reconstruct_state != reconstruct_state_run);
994 sh->reconstruct_state = reconstruct_state_result;
997 set_bit(STRIPE_HANDLE, &sh->state);
1002 ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
1003 struct dma_async_tx_descriptor *tx)
1005 int disks = sh->disks;
1006 struct page **xor_srcs = percpu->scribble;
1007 struct async_submit_ctl submit;
1008 int count = 0, pd_idx = sh->pd_idx, i;
1009 struct page *xor_dest;
1011 unsigned long flags;
1013 pr_debug("%s: stripe %llu\n", __func__,
1014 (unsigned long long)sh->sector);
1016 /* check if prexor is active which means only process blocks
1017 * that are part of a read-modify-write (written)
1019 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
1021 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
1022 for (i = disks; i--; ) {
1023 struct r5dev *dev = &sh->dev[i];
1025 xor_srcs[count++] = dev->page;
1028 xor_dest = sh->dev[pd_idx].page;
1029 for (i = disks; i--; ) {
1030 struct r5dev *dev = &sh->dev[i];
1032 xor_srcs[count++] = dev->page;
1036 /* 1/ if we prexor'd then the dest is reused as a source
1037 * 2/ if we did not prexor then we are redoing the parity
1038 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
1039 * for the synchronous xor case
1041 flags = ASYNC_TX_ACK |
1042 (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
1044 atomic_inc(&sh->count);
1046 init_async_submit(&submit, flags, tx, ops_complete_reconstruct, sh,
1047 to_addr_conv(sh, percpu));
1048 if (unlikely(count == 1))
1049 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
1051 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
1055 ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
1056 struct dma_async_tx_descriptor *tx)
1058 struct async_submit_ctl submit;
1059 struct page **blocks = percpu->scribble;
1062 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
1064 count = set_syndrome_sources(blocks, sh);
1066 atomic_inc(&sh->count);
1068 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_reconstruct,
1069 sh, to_addr_conv(sh, percpu));
1070 async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
1073 static void ops_complete_check(void *stripe_head_ref)
1075 struct stripe_head *sh = stripe_head_ref;
1077 pr_debug("%s: stripe %llu\n", __func__,
1078 (unsigned long long)sh->sector);
1080 sh->check_state = check_state_check_result;
1081 set_bit(STRIPE_HANDLE, &sh->state);
1085 static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
1087 int disks = sh->disks;
1088 int pd_idx = sh->pd_idx;
1089 int qd_idx = sh->qd_idx;
1090 struct page *xor_dest;
1091 struct page **xor_srcs = percpu->scribble;
1092 struct dma_async_tx_descriptor *tx;
1093 struct async_submit_ctl submit;
1097 pr_debug("%s: stripe %llu\n", __func__,
1098 (unsigned long long)sh->sector);
1101 xor_dest = sh->dev[pd_idx].page;
1102 xor_srcs[count++] = xor_dest;
1103 for (i = disks; i--; ) {
1104 if (i == pd_idx || i == qd_idx)
1106 xor_srcs[count++] = sh->dev[i].page;
1109 init_async_submit(&submit, 0, NULL, NULL, NULL,
1110 to_addr_conv(sh, percpu));
1111 tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
1112 &sh->ops.zero_sum_result, &submit);
1114 atomic_inc(&sh->count);
1115 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL);
1116 tx = async_trigger_callback(&submit);
1119 static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp)
1121 struct page **srcs = percpu->scribble;
1122 struct async_submit_ctl submit;
1125 pr_debug("%s: stripe %llu checkp: %d\n", __func__,
1126 (unsigned long long)sh->sector, checkp);
1128 count = set_syndrome_sources(srcs, sh);
1132 atomic_inc(&sh->count);
1133 init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
1134 sh, to_addr_conv(sh, percpu));
1135 async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE,
1136 &sh->ops.zero_sum_result, percpu->spare_page, &submit);
1139 static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1141 int overlap_clear = 0, i, disks = sh->disks;
1142 struct dma_async_tx_descriptor *tx = NULL;
1143 raid5_conf_t *conf = sh->raid_conf;
1144 int level = conf->level;
1145 struct raid5_percpu *percpu;
1149 percpu = per_cpu_ptr(conf->percpu, cpu);
1150 if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
1151 ops_run_biofill(sh);
1155 if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {
1157 tx = ops_run_compute5(sh, percpu);
1159 if (sh->ops.target2 < 0 || sh->ops.target < 0)
1160 tx = ops_run_compute6_1(sh, percpu);
1162 tx = ops_run_compute6_2(sh, percpu);
1164 /* terminate the chain if reconstruct is not set to be run */
1165 if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request))
1169 if (test_bit(STRIPE_OP_PREXOR, &ops_request))
1170 tx = ops_run_prexor(sh, percpu, tx);
1172 if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
1173 tx = ops_run_biodrain(sh, tx);
1177 if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) {
1179 ops_run_reconstruct5(sh, percpu, tx);
1181 ops_run_reconstruct6(sh, percpu, tx);
1184 if (test_bit(STRIPE_OP_CHECK, &ops_request)) {
1185 if (sh->check_state == check_state_run)
1186 ops_run_check_p(sh, percpu);
1187 else if (sh->check_state == check_state_run_q)
1188 ops_run_check_pq(sh, percpu, 0);
1189 else if (sh->check_state == check_state_run_pq)
1190 ops_run_check_pq(sh, percpu, 1);
1196 for (i = disks; i--; ) {
1197 struct r5dev *dev = &sh->dev[i];
1198 if (test_and_clear_bit(R5_Overlap, &dev->flags))
1199 wake_up(&sh->raid_conf->wait_for_overlap);
1204 static int grow_one_stripe(raid5_conf_t *conf)
1206 struct stripe_head *sh;
1207 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
1210 memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev));
1211 sh->raid_conf = conf;
1212 spin_lock_init(&sh->lock);
1214 if (grow_buffers(sh, conf->raid_disks)) {
1215 shrink_buffers(sh, conf->raid_disks);
1216 kmem_cache_free(conf->slab_cache, sh);
1219 sh->disks = conf->raid_disks;
1220 /* we just created an active stripe so... */
1221 atomic_set(&sh->count, 1);
1222 atomic_inc(&conf->active_stripes);
1223 INIT_LIST_HEAD(&sh->lru);
1228 static int grow_stripes(raid5_conf_t *conf, int num)
1230 struct kmem_cache *sc;
1231 int devs = conf->raid_disks;
1233 sprintf(conf->cache_name[0],
1234 "raid%d-%s", conf->level, mdname(conf->mddev));
1235 sprintf(conf->cache_name[1],
1236 "raid%d-%s-alt", conf->level, mdname(conf->mddev));
1237 conf->active_name = 0;
1238 sc = kmem_cache_create(conf->cache_name[conf->active_name],
1239 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
1243 conf->slab_cache = sc;
1244 conf->pool_size = devs;
1246 if (!grow_one_stripe(conf))
1252 * scribble_len - return the required size of the scribble region
1253 * @num - total number of disks in the array
1255 * The size must be enough to contain:
1256 * 1/ a struct page pointer for each device in the array +2
1257 * 2/ room to convert each entry in (1) to its corresponding dma
1258 * (dma_map_page()) or page (page_address()) address.
1260 * Note: the +2 is for the destination buffers of the ddf/raid6 case where we
1261 * calculate over all devices (not just the data blocks), using zeros in place
1262 * of the P and Q blocks.
1264 static size_t scribble_len(int num)
1268 len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2);
1273 static int resize_stripes(raid5_conf_t *conf, int newsize)
1275 /* Make all the stripes able to hold 'newsize' devices.
1276 * New slots in each stripe get 'page' set to a new page.
1278 * This happens in stages:
1279 * 1/ create a new kmem_cache and allocate the required number of
1281 * 2/ gather all the old stripe_heads and tranfer the pages across
1282 * to the new stripe_heads. This will have the side effect of
1283 * freezing the array as once all stripe_heads have been collected,
1284 * no IO will be possible. Old stripe heads are freed once their
1285 * pages have been transferred over, and the old kmem_cache is
1286 * freed when all stripes are done.
1287 * 3/ reallocate conf->disks to be suitable bigger. If this fails,
1288 * we simple return a failre status - no need to clean anything up.
1289 * 4/ allocate new pages for the new slots in the new stripe_heads.
1290 * If this fails, we don't bother trying the shrink the
1291 * stripe_heads down again, we just leave them as they are.
1292 * As each stripe_head is processed the new one is released into
1295 * Once step2 is started, we cannot afford to wait for a write,
1296 * so we use GFP_NOIO allocations.
1298 struct stripe_head *osh, *nsh;
1299 LIST_HEAD(newstripes);
1300 struct disk_info *ndisks;
1303 struct kmem_cache *sc;
1306 if (newsize <= conf->pool_size)
1307 return 0; /* never bother to shrink */
1309 err = md_allow_write(conf->mddev);
1314 sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
1315 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
1320 for (i = conf->max_nr_stripes; i; i--) {
1321 nsh = kmem_cache_alloc(sc, GFP_KERNEL);
1325 memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev));
1327 nsh->raid_conf = conf;
1328 spin_lock_init(&nsh->lock);
1330 list_add(&nsh->lru, &newstripes);
1333 /* didn't get enough, give up */
1334 while (!list_empty(&newstripes)) {
1335 nsh = list_entry(newstripes.next, struct stripe_head, lru);
1336 list_del(&nsh->lru);
1337 kmem_cache_free(sc, nsh);
1339 kmem_cache_destroy(sc);
1342 /* Step 2 - Must use GFP_NOIO now.
1343 * OK, we have enough stripes, start collecting inactive
1344 * stripes and copying them over
1346 list_for_each_entry(nsh, &newstripes, lru) {
1347 spin_lock_irq(&conf->device_lock);
1348 wait_event_lock_irq(conf->wait_for_stripe,
1349 !list_empty(&conf->inactive_list),
1351 unplug_slaves(conf->mddev)
1353 osh = get_free_stripe(conf);
1354 spin_unlock_irq(&conf->device_lock);
1355 atomic_set(&nsh->count, 1);
1356 for(i=0; i<conf->pool_size; i++)
1357 nsh->dev[i].page = osh->dev[i].page;
1358 for( ; i<newsize; i++)
1359 nsh->dev[i].page = NULL;
1360 kmem_cache_free(conf->slab_cache, osh);
1362 kmem_cache_destroy(conf->slab_cache);
1365 * At this point, we are holding all the stripes so the array
1366 * is completely stalled, so now is a good time to resize
1367 * conf->disks and the scribble region
1369 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
1371 for (i=0; i<conf->raid_disks; i++)
1372 ndisks[i] = conf->disks[i];
1374 conf->disks = ndisks;
1379 conf->scribble_len = scribble_len(newsize);
1380 for_each_present_cpu(cpu) {
1381 struct raid5_percpu *percpu;
1384 percpu = per_cpu_ptr(conf->percpu, cpu);
1385 scribble = kmalloc(conf->scribble_len, GFP_NOIO);
1388 kfree(percpu->scribble);
1389 percpu->scribble = scribble;
1397 /* Step 4, return new stripes to service */
1398 while(!list_empty(&newstripes)) {
1399 nsh = list_entry(newstripes.next, struct stripe_head, lru);
1400 list_del_init(&nsh->lru);
1402 for (i=conf->raid_disks; i < newsize; i++)
1403 if (nsh->dev[i].page == NULL) {
1404 struct page *p = alloc_page(GFP_NOIO);
1405 nsh->dev[i].page = p;
1409 release_stripe(nsh);
1411 /* critical section pass, GFP_NOIO no longer needed */
1413 conf->slab_cache = sc;
1414 conf->active_name = 1-conf->active_name;
1415 conf->pool_size = newsize;
1419 static int drop_one_stripe(raid5_conf_t *conf)
1421 struct stripe_head *sh;
1423 spin_lock_irq(&conf->device_lock);
1424 sh = get_free_stripe(conf);
1425 spin_unlock_irq(&conf->device_lock);
1428 BUG_ON(atomic_read(&sh->count));
1429 shrink_buffers(sh, conf->pool_size);
1430 kmem_cache_free(conf->slab_cache, sh);
1431 atomic_dec(&conf->active_stripes);
1435 static void shrink_stripes(raid5_conf_t *conf)
1437 while (drop_one_stripe(conf))
1440 if (conf->slab_cache)
1441 kmem_cache_destroy(conf->slab_cache);
1442 conf->slab_cache = NULL;
1445 static void raid5_end_read_request(struct bio * bi, int error)
1447 struct stripe_head *sh = bi->bi_private;
1448 raid5_conf_t *conf = sh->raid_conf;
1449 int disks = sh->disks, i;
1450 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
1451 char b[BDEVNAME_SIZE];
1455 for (i=0 ; i<disks; i++)
1456 if (bi == &sh->dev[i].req)
1459 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
1460 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
1468 set_bit(R5_UPTODATE, &sh->dev[i].flags);
1469 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
1470 rdev = conf->disks[i].rdev;
1471 printk_rl(KERN_INFO "raid5:%s: read error corrected"
1472 " (%lu sectors at %llu on %s)\n",
1473 mdname(conf->mddev), STRIPE_SECTORS,
1474 (unsigned long long)(sh->sector
1475 + rdev->data_offset),
1476 bdevname(rdev->bdev, b));
1477 clear_bit(R5_ReadError, &sh->dev[i].flags);
1478 clear_bit(R5_ReWrite, &sh->dev[i].flags);
1480 if (atomic_read(&conf->disks[i].rdev->read_errors))
1481 atomic_set(&conf->disks[i].rdev->read_errors, 0);
1483 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
1485 rdev = conf->disks[i].rdev;
1487 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
1488 atomic_inc(&rdev->read_errors);
1489 if (conf->mddev->degraded)
1490 printk_rl(KERN_WARNING
1491 "raid5:%s: read error not correctable "
1492 "(sector %llu on %s).\n",
1493 mdname(conf->mddev),
1494 (unsigned long long)(sh->sector
1495 + rdev->data_offset),
1497 else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
1499 printk_rl(KERN_WARNING
1500 "raid5:%s: read error NOT corrected!! "
1501 "(sector %llu on %s).\n",
1502 mdname(conf->mddev),
1503 (unsigned long long)(sh->sector
1504 + rdev->data_offset),
1506 else if (atomic_read(&rdev->read_errors)
1507 > conf->max_nr_stripes)
1509 "raid5:%s: Too many read errors, failing device %s.\n",
1510 mdname(conf->mddev), bdn);
1514 set_bit(R5_ReadError, &sh->dev[i].flags);
1516 clear_bit(R5_ReadError, &sh->dev[i].flags);
1517 clear_bit(R5_ReWrite, &sh->dev[i].flags);
1518 md_error(conf->mddev, rdev);
1521 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
1522 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1523 set_bit(STRIPE_HANDLE, &sh->state);
1527 static void raid5_end_write_request(struct bio *bi, int error)
1529 struct stripe_head *sh = bi->bi_private;
1530 raid5_conf_t *conf = sh->raid_conf;
1531 int disks = sh->disks, i;
1532 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
1534 for (i=0 ; i<disks; i++)
1535 if (bi == &sh->dev[i].req)
1538 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
1539 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
1547 md_error(conf->mddev, conf->disks[i].rdev);
1549 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
1551 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1552 set_bit(STRIPE_HANDLE, &sh->state);
1557 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
1559 static void raid5_build_block(struct stripe_head *sh, int i, int previous)
1561 struct r5dev *dev = &sh->dev[i];
1563 bio_init(&dev->req);
1564 dev->req.bi_io_vec = &dev->vec;
1566 dev->req.bi_max_vecs++;
1567 dev->vec.bv_page = dev->page;
1568 dev->vec.bv_len = STRIPE_SIZE;
1569 dev->vec.bv_offset = 0;
1571 dev->req.bi_sector = sh->sector;
1572 dev->req.bi_private = sh;
1575 dev->sector = compute_blocknr(sh, i, previous);
1578 static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1580 char b[BDEVNAME_SIZE];
1581 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
1582 pr_debug("raid5: error called\n");
1584 if (!test_bit(Faulty, &rdev->flags)) {
1585 set_bit(MD_CHANGE_DEVS, &mddev->flags);
1586 if (test_and_clear_bit(In_sync, &rdev->flags)) {
1587 unsigned long flags;
1588 spin_lock_irqsave(&conf->device_lock, flags);
1590 spin_unlock_irqrestore(&conf->device_lock, flags);
1592 * if recovery was running, make sure it aborts.
1594 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1596 set_bit(Faulty, &rdev->flags);
1598 "raid5: Disk failure on %s, disabling device.\n"
1599 "raid5: Operation continuing on %d devices.\n",
1600 bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
1605 * Input: a 'big' sector number,
1606 * Output: index of the data and parity disk, and the sector # in them.
1608 static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1609 int previous, int *dd_idx,
1610 struct stripe_head *sh)
1613 unsigned long chunk_number;
1614 unsigned int chunk_offset;
1617 sector_t new_sector;
1618 int algorithm = previous ? conf->prev_algo
1620 int sectors_per_chunk = previous ? conf->prev_chunk_sectors
1621 : conf->chunk_sectors;
1622 int raid_disks = previous ? conf->previous_raid_disks
1624 int data_disks = raid_disks - conf->max_degraded;
1626 /* First compute the information on this sector */
1629 * Compute the chunk number and the sector offset inside the chunk
1631 chunk_offset = sector_div(r_sector, sectors_per_chunk);
1632 chunk_number = r_sector;
1633 BUG_ON(r_sector != chunk_number);
1636 * Compute the stripe number
1638 stripe = chunk_number / data_disks;
1641 * Compute the data disk and parity disk indexes inside the stripe
1643 *dd_idx = chunk_number % data_disks;
1646 * Select the parity disk based on the user selected algorithm.
1648 pd_idx = qd_idx = ~0;
1649 switch(conf->level) {
1651 pd_idx = data_disks;
1654 switch (algorithm) {
1655 case ALGORITHM_LEFT_ASYMMETRIC:
1656 pd_idx = data_disks - stripe % raid_disks;
1657 if (*dd_idx >= pd_idx)
1660 case ALGORITHM_RIGHT_ASYMMETRIC:
1661 pd_idx = stripe % raid_disks;
1662 if (*dd_idx >= pd_idx)
1665 case ALGORITHM_LEFT_SYMMETRIC:
1666 pd_idx = data_disks - stripe % raid_disks;
1667 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1669 case ALGORITHM_RIGHT_SYMMETRIC:
1670 pd_idx = stripe % raid_disks;
1671 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1673 case ALGORITHM_PARITY_0:
1677 case ALGORITHM_PARITY_N:
1678 pd_idx = data_disks;
1681 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
1688 switch (algorithm) {
1689 case ALGORITHM_LEFT_ASYMMETRIC:
1690 pd_idx = raid_disks - 1 - (stripe % raid_disks);
1691 qd_idx = pd_idx + 1;
1692 if (pd_idx == raid_disks-1) {
1693 (*dd_idx)++; /* Q D D D P */
1695 } else if (*dd_idx >= pd_idx)
1696 (*dd_idx) += 2; /* D D P Q D */
1698 case ALGORITHM_RIGHT_ASYMMETRIC:
1699 pd_idx = stripe % raid_disks;
1700 qd_idx = pd_idx + 1;
1701 if (pd_idx == raid_disks-1) {
1702 (*dd_idx)++; /* Q D D D P */
1704 } else if (*dd_idx >= pd_idx)
1705 (*dd_idx) += 2; /* D D P Q D */
1707 case ALGORITHM_LEFT_SYMMETRIC:
1708 pd_idx = raid_disks - 1 - (stripe % raid_disks);
1709 qd_idx = (pd_idx + 1) % raid_disks;
1710 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
1712 case ALGORITHM_RIGHT_SYMMETRIC:
1713 pd_idx = stripe % raid_disks;
1714 qd_idx = (pd_idx + 1) % raid_disks;
1715 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
1718 case ALGORITHM_PARITY_0:
1723 case ALGORITHM_PARITY_N:
1724 pd_idx = data_disks;
1725 qd_idx = data_disks + 1;
1728 case ALGORITHM_ROTATING_ZERO_RESTART:
1729 /* Exactly the same as RIGHT_ASYMMETRIC, but or
1730 * of blocks for computing Q is different.
1732 pd_idx = stripe % raid_disks;
1733 qd_idx = pd_idx + 1;
1734 if (pd_idx == raid_disks-1) {
1735 (*dd_idx)++; /* Q D D D P */
1737 } else if (*dd_idx >= pd_idx)
1738 (*dd_idx) += 2; /* D D P Q D */
1742 case ALGORITHM_ROTATING_N_RESTART:
1743 /* Same a left_asymmetric, by first stripe is
1744 * D D D P Q rather than
1747 pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks);
1748 qd_idx = pd_idx + 1;
1749 if (pd_idx == raid_disks-1) {
1750 (*dd_idx)++; /* Q D D D P */
1752 } else if (*dd_idx >= pd_idx)
1753 (*dd_idx) += 2; /* D D P Q D */
1757 case ALGORITHM_ROTATING_N_CONTINUE:
1758 /* Same as left_symmetric but Q is before P */
1759 pd_idx = raid_disks - 1 - (stripe % raid_disks);
1760 qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
1761 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1765 case ALGORITHM_LEFT_ASYMMETRIC_6:
1766 /* RAID5 left_asymmetric, with Q on last device */
1767 pd_idx = data_disks - stripe % (raid_disks-1);
1768 if (*dd_idx >= pd_idx)
1770 qd_idx = raid_disks - 1;
1773 case ALGORITHM_RIGHT_ASYMMETRIC_6:
1774 pd_idx = stripe % (raid_disks-1);
1775 if (*dd_idx >= pd_idx)
1777 qd_idx = raid_disks - 1;
1780 case ALGORITHM_LEFT_SYMMETRIC_6:
1781 pd_idx = data_disks - stripe % (raid_disks-1);
1782 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1783 qd_idx = raid_disks - 1;
1786 case ALGORITHM_RIGHT_SYMMETRIC_6:
1787 pd_idx = stripe % (raid_disks-1);
1788 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1789 qd_idx = raid_disks - 1;
1792 case ALGORITHM_PARITY_0_6:
1795 qd_idx = raid_disks - 1;
1800 printk(KERN_CRIT "raid6: unsupported algorithm %d\n",
1808 sh->pd_idx = pd_idx;
1809 sh->qd_idx = qd_idx;
1810 sh->ddf_layout = ddf_layout;
1813 * Finally, compute the new sector number
1815 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
1820 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
1822 raid5_conf_t *conf = sh->raid_conf;
1823 int raid_disks = sh->disks;
1824 int data_disks = raid_disks - conf->max_degraded;
1825 sector_t new_sector = sh->sector, check;
1826 int sectors_per_chunk = previous ? conf->prev_chunk_sectors
1827 : conf->chunk_sectors;
1828 int algorithm = previous ? conf->prev_algo
1832 int chunk_number, dummy1, dd_idx = i;
1834 struct stripe_head sh2;
1837 chunk_offset = sector_div(new_sector, sectors_per_chunk);
1838 stripe = new_sector;
1839 BUG_ON(new_sector != stripe);
1841 if (i == sh->pd_idx)
1843 switch(conf->level) {
1846 switch (algorithm) {
1847 case ALGORITHM_LEFT_ASYMMETRIC:
1848 case ALGORITHM_RIGHT_ASYMMETRIC:
1852 case ALGORITHM_LEFT_SYMMETRIC:
1853 case ALGORITHM_RIGHT_SYMMETRIC:
1856 i -= (sh->pd_idx + 1);
1858 case ALGORITHM_PARITY_0:
1861 case ALGORITHM_PARITY_N:
1864 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
1870 if (i == sh->qd_idx)
1871 return 0; /* It is the Q disk */
1872 switch (algorithm) {
1873 case ALGORITHM_LEFT_ASYMMETRIC:
1874 case ALGORITHM_RIGHT_ASYMMETRIC:
1875 case ALGORITHM_ROTATING_ZERO_RESTART:
1876 case ALGORITHM_ROTATING_N_RESTART:
1877 if (sh->pd_idx == raid_disks-1)
1878 i--; /* Q D D D P */
1879 else if (i > sh->pd_idx)
1880 i -= 2; /* D D P Q D */
1882 case ALGORITHM_LEFT_SYMMETRIC:
1883 case ALGORITHM_RIGHT_SYMMETRIC:
1884 if (sh->pd_idx == raid_disks-1)
1885 i--; /* Q D D D P */
1890 i -= (sh->pd_idx + 2);
1893 case ALGORITHM_PARITY_0:
1896 case ALGORITHM_PARITY_N:
1898 case ALGORITHM_ROTATING_N_CONTINUE:
1899 if (sh->pd_idx == 0)
1900 i--; /* P D D D Q */
1901 else if (i > sh->pd_idx)
1902 i -= 2; /* D D Q P D */
1904 case ALGORITHM_LEFT_ASYMMETRIC_6:
1905 case ALGORITHM_RIGHT_ASYMMETRIC_6:
1909 case ALGORITHM_LEFT_SYMMETRIC_6:
1910 case ALGORITHM_RIGHT_SYMMETRIC_6:
1912 i += data_disks + 1;
1913 i -= (sh->pd_idx + 1);
1915 case ALGORITHM_PARITY_0_6:
1919 printk(KERN_CRIT "raid6: unsupported algorithm %d\n",
1926 chunk_number = stripe * data_disks + i;
1927 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset;
1929 check = raid5_compute_sector(conf, r_sector,
1930 previous, &dummy1, &sh2);
1931 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
1932 || sh2.qd_idx != sh->qd_idx) {
1933 printk(KERN_ERR "compute_blocknr: map not correct\n");
1941 schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
1942 int rcw, int expand)
1944 int i, pd_idx = sh->pd_idx, disks = sh->disks;
1945 raid5_conf_t *conf = sh->raid_conf;
1946 int level = conf->level;
1949 /* if we are not expanding this is a proper write request, and
1950 * there will be bios with new data to be drained into the
1954 sh->reconstruct_state = reconstruct_state_drain_run;
1955 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
1957 sh->reconstruct_state = reconstruct_state_run;
1959 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
1961 for (i = disks; i--; ) {
1962 struct r5dev *dev = &sh->dev[i];
1965 set_bit(R5_LOCKED, &dev->flags);
1966 set_bit(R5_Wantdrain, &dev->flags);
1968 clear_bit(R5_UPTODATE, &dev->flags);
1972 if (s->locked + conf->max_degraded == disks)
1973 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
1974 atomic_inc(&conf->pending_full_writes);
1977 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
1978 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
1980 sh->reconstruct_state = reconstruct_state_prexor_drain_run;
1981 set_bit(STRIPE_OP_PREXOR, &s->ops_request);
1982 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
1983 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
1985 for (i = disks; i--; ) {
1986 struct r5dev *dev = &sh->dev[i];
1991 (test_bit(R5_UPTODATE, &dev->flags) ||
1992 test_bit(R5_Wantcompute, &dev->flags))) {
1993 set_bit(R5_Wantdrain, &dev->flags);
1994 set_bit(R5_LOCKED, &dev->flags);
1995 clear_bit(R5_UPTODATE, &dev->flags);
2001 /* keep the parity disk(s) locked while asynchronous operations
2004 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
2005 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
2009 int qd_idx = sh->qd_idx;
2010 struct r5dev *dev = &sh->dev[qd_idx];
2012 set_bit(R5_LOCKED, &dev->flags);
2013 clear_bit(R5_UPTODATE, &dev->flags);
2017 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
2018 __func__, (unsigned long long)sh->sector,
2019 s->locked, s->ops_request);
2023 * Each stripe/dev can have one or more bion attached.
2024 * toread/towrite point to the first in a chain.
2025 * The bi_next chain must be in order.
2027 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
2030 raid5_conf_t *conf = sh->raid_conf;
2033 pr_debug("adding bh b#%llu to stripe s#%llu\n",
2034 (unsigned long long)bi->bi_sector,
2035 (unsigned long long)sh->sector);
2038 spin_lock(&sh->lock);
2039 spin_lock_irq(&conf->device_lock);
2041 bip = &sh->dev[dd_idx].towrite;
2042 if (*bip == NULL && sh->dev[dd_idx].written == NULL)
2045 bip = &sh->dev[dd_idx].toread;
2046 while (*bip && (*bip)->bi_sector < bi->bi_sector) {
2047 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
2049 bip = & (*bip)->bi_next;
2051 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
2054 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
2058 bi->bi_phys_segments++;
2059 spin_unlock_irq(&conf->device_lock);
2060 spin_unlock(&sh->lock);
2062 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
2063 (unsigned long long)bi->bi_sector,
2064 (unsigned long long)sh->sector, dd_idx);
2066 if (conf->mddev->bitmap && firstwrite) {
2067 bitmap_startwrite(conf->mddev->bitmap, sh->sector,
2069 sh->bm_seq = conf->seq_flush+1;
2070 set_bit(STRIPE_BIT_DELAY, &sh->state);
2074 /* check if page is covered */
2075 sector_t sector = sh->dev[dd_idx].sector;
2076 for (bi=sh->dev[dd_idx].towrite;
2077 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
2078 bi && bi->bi_sector <= sector;
2079 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
2080 if (bi->bi_sector + (bi->bi_size>>9) >= sector)
2081 sector = bi->bi_sector + (bi->bi_size>>9);
2083 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
2084 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
2089 set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
2090 spin_unlock_irq(&conf->device_lock);
2091 spin_unlock(&sh->lock);
2095 static void end_reshape(raid5_conf_t *conf);
2097 static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
2098 struct stripe_head *sh)
2100 int sectors_per_chunk =
2101 previous ? conf->prev_chunk_sectors : conf->chunk_sectors;
2103 int chunk_offset = sector_div(stripe, sectors_per_chunk);
2104 int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
2106 raid5_compute_sector(conf,
2107 stripe * (disks - conf->max_degraded)
2108 *sectors_per_chunk + chunk_offset,
2114 handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
2115 struct stripe_head_state *s, int disks,
2116 struct bio **return_bi)
2119 for (i = disks; i--; ) {
2123 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
2126 rdev = rcu_dereference(conf->disks[i].rdev);
2127 if (rdev && test_bit(In_sync, &rdev->flags))
2128 /* multiple read failures in one stripe */
2129 md_error(conf->mddev, rdev);
2132 spin_lock_irq(&conf->device_lock);
2133 /* fail all writes first */
2134 bi = sh->dev[i].towrite;
2135 sh->dev[i].towrite = NULL;
2141 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2142 wake_up(&conf->wait_for_overlap);
2144 while (bi && bi->bi_sector <
2145 sh->dev[i].sector + STRIPE_SECTORS) {
2146 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
2147 clear_bit(BIO_UPTODATE, &bi->bi_flags);
2148 if (!raid5_dec_bi_phys_segments(bi)) {
2149 md_write_end(conf->mddev);
2150 bi->bi_next = *return_bi;
2155 /* and fail all 'written' */
2156 bi = sh->dev[i].written;
2157 sh->dev[i].written = NULL;
2158 if (bi) bitmap_end = 1;
2159 while (bi && bi->bi_sector <
2160 sh->dev[i].sector + STRIPE_SECTORS) {
2161 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
2162 clear_bit(BIO_UPTODATE, &bi->bi_flags);
2163 if (!raid5_dec_bi_phys_segments(bi)) {
2164 md_write_end(conf->mddev);
2165 bi->bi_next = *return_bi;
2171 /* fail any reads if this device is non-operational and
2172 * the data has not reached the cache yet.
2174 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
2175 (!test_bit(R5_Insync, &sh->dev[i].flags) ||
2176 test_bit(R5_ReadError, &sh->dev[i].flags))) {
2177 bi = sh->dev[i].toread;
2178 sh->dev[i].toread = NULL;
2179 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2180 wake_up(&conf->wait_for_overlap);
2181 if (bi) s->to_read--;
2182 while (bi && bi->bi_sector <
2183 sh->dev[i].sector + STRIPE_SECTORS) {
2184 struct bio *nextbi =
2185 r5_next_bio(bi, sh->dev[i].sector);
2186 clear_bit(BIO_UPTODATE, &bi->bi_flags);
2187 if (!raid5_dec_bi_phys_segments(bi)) {
2188 bi->bi_next = *return_bi;
2194 spin_unlock_irq(&conf->device_lock);
2196 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
2197 STRIPE_SECTORS, 0, 0);
2200 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2201 if (atomic_dec_and_test(&conf->pending_full_writes))
2202 md_wakeup_thread(conf->mddev->thread);
2205 /* fetch_block5 - checks the given member device to see if its data needs
2206 * to be read or computed to satisfy a request.
2208 * Returns 1 when no more member devices need to be checked, otherwise returns
2209 * 0 to tell the loop in handle_stripe_fill5 to continue
2211 static int fetch_block5(struct stripe_head *sh, struct stripe_head_state *s,
2212 int disk_idx, int disks)
2214 struct r5dev *dev = &sh->dev[disk_idx];
2215 struct r5dev *failed_dev = &sh->dev[s->failed_num];
2217 /* is the data in this block needed, and can we get it? */
2218 if (!test_bit(R5_LOCKED, &dev->flags) &&
2219 !test_bit(R5_UPTODATE, &dev->flags) &&
2221 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
2222 s->syncing || s->expanding ||
2224 (failed_dev->toread ||
2225 (failed_dev->towrite &&
2226 !test_bit(R5_OVERWRITE, &failed_dev->flags)))))) {
2227 /* We would like to get this block, possibly by computing it,
2228 * otherwise read it if the backing disk is insync
2230 if ((s->uptodate == disks - 1) &&
2231 (s->failed && disk_idx == s->failed_num)) {
2232 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2233 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2234 set_bit(R5_Wantcompute, &dev->flags);
2235 sh->ops.target = disk_idx;
2236 sh->ops.target2 = -1;
2238 /* Careful: from this point on 'uptodate' is in the eye
2239 * of raid_run_ops which services 'compute' operations
2240 * before writes. R5_Wantcompute flags a block that will
2241 * be R5_UPTODATE by the time it is needed for a
2242 * subsequent operation.
2245 return 1; /* uptodate + compute == disks */
2246 } else if (test_bit(R5_Insync, &dev->flags)) {
2247 set_bit(R5_LOCKED, &dev->flags);
2248 set_bit(R5_Wantread, &dev->flags);
2250 pr_debug("Reading block %d (sync=%d)\n", disk_idx,
2259 * handle_stripe_fill5 - read or compute data to satisfy pending requests.
2261 static void handle_stripe_fill5(struct stripe_head *sh,
2262 struct stripe_head_state *s, int disks)
2266 /* look for blocks to read/compute, skip this if a compute
2267 * is already in flight, or if the stripe contents are in the
2268 * midst of changing due to a write
2270 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
2271 !sh->reconstruct_state)
2272 for (i = disks; i--; )
2273 if (fetch_block5(sh, s, i, disks))
2275 set_bit(STRIPE_HANDLE, &sh->state);
2278 /* fetch_block6 - checks the given member device to see if its data needs
2279 * to be read or computed to satisfy a request.
2281 * Returns 1 when no more member devices need to be checked, otherwise returns
2282 * 0 to tell the loop in handle_stripe_fill6 to continue
2284 static int fetch_block6(struct stripe_head *sh, struct stripe_head_state *s,
2285 struct r6_state *r6s, int disk_idx, int disks)
2287 struct r5dev *dev = &sh->dev[disk_idx];
2288 struct r5dev *fdev[2] = { &sh->dev[r6s->failed_num[0]],
2289 &sh->dev[r6s->failed_num[1]] };
2291 if (!test_bit(R5_LOCKED, &dev->flags) &&
2292 !test_bit(R5_UPTODATE, &dev->flags) &&
2294 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
2295 s->syncing || s->expanding ||
2297 (fdev[0]->toread || s->to_write)) ||
2299 (fdev[1]->toread || s->to_write)))) {
2300 /* we would like to get this block, possibly by computing it,
2301 * otherwise read it if the backing disk is insync
2303 BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
2304 BUG_ON(test_bit(R5_Wantread, &dev->flags));
2305 if ((s->uptodate == disks - 1) &&
2306 (s->failed && (disk_idx == r6s->failed_num[0] ||
2307 disk_idx == r6s->failed_num[1]))) {
2308 /* have disk failed, and we're requested to fetch it;
2311 pr_debug("Computing stripe %llu block %d\n",
2312 (unsigned long long)sh->sector, disk_idx);
2313 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2314 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2315 set_bit(R5_Wantcompute, &dev->flags);
2316 sh->ops.target = disk_idx;
2317 sh->ops.target2 = -1; /* no 2nd target */
2321 } else if (s->uptodate == disks-2 && s->failed >= 2) {
2322 /* Computing 2-failure is *very* expensive; only
2323 * do it if failed >= 2
2326 for (other = disks; other--; ) {
2327 if (other == disk_idx)
2329 if (!test_bit(R5_UPTODATE,
2330 &sh->dev[other].flags))
2334 pr_debug("Computing stripe %llu blocks %d,%d\n",
2335 (unsigned long long)sh->sector,
2337 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2338 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2339 set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags);
2340 set_bit(R5_Wantcompute, &sh->dev[other].flags);
2341 sh->ops.target = disk_idx;
2342 sh->ops.target2 = other;
2346 } else if (test_bit(R5_Insync, &dev->flags)) {
2347 set_bit(R5_LOCKED, &dev->flags);
2348 set_bit(R5_Wantread, &dev->flags);
2350 pr_debug("Reading block %d (sync=%d)\n",
2351 disk_idx, s->syncing);
2359 * handle_stripe_fill6 - read or compute data to satisfy pending requests.
2361 static void handle_stripe_fill6(struct stripe_head *sh,
2362 struct stripe_head_state *s, struct r6_state *r6s,
2367 /* look for blocks to read/compute, skip this if a compute
2368 * is already in flight, or if the stripe contents are in the
2369 * midst of changing due to a write
2371 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
2372 !sh->reconstruct_state)
2373 for (i = disks; i--; )
2374 if (fetch_block6(sh, s, r6s, i, disks))
2376 set_bit(STRIPE_HANDLE, &sh->state);
2380 /* handle_stripe_clean_event
2381 * any written block on an uptodate or failed drive can be returned.
2382 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
2383 * never LOCKED, so we don't need to test 'failed' directly.
2385 static void handle_stripe_clean_event(raid5_conf_t *conf,
2386 struct stripe_head *sh, int disks, struct bio **return_bi)
2391 for (i = disks; i--; )
2392 if (sh->dev[i].written) {
2394 if (!test_bit(R5_LOCKED, &dev->flags) &&
2395 test_bit(R5_UPTODATE, &dev->flags)) {
2396 /* We can return any write requests */
2397 struct bio *wbi, *wbi2;
2399 pr_debug("Return write for disc %d\n", i);
2400 spin_lock_irq(&conf->device_lock);
2402 dev->written = NULL;
2403 while (wbi && wbi->bi_sector <
2404 dev->sector + STRIPE_SECTORS) {
2405 wbi2 = r5_next_bio(wbi, dev->sector);
2406 if (!raid5_dec_bi_phys_segments(wbi)) {
2407 md_write_end(conf->mddev);
2408 wbi->bi_next = *return_bi;
2413 if (dev->towrite == NULL)
2415 spin_unlock_irq(&conf->device_lock);
2417 bitmap_endwrite(conf->mddev->bitmap,
2420 !test_bit(STRIPE_DEGRADED, &sh->state),
2425 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2426 if (atomic_dec_and_test(&conf->pending_full_writes))
2427 md_wakeup_thread(conf->mddev->thread);
2430 static void handle_stripe_dirtying5(raid5_conf_t *conf,
2431 struct stripe_head *sh, struct stripe_head_state *s, int disks)
2433 int rmw = 0, rcw = 0, i;
2434 for (i = disks; i--; ) {
2435 /* would I have to read this buffer for read_modify_write */
2436 struct r5dev *dev = &sh->dev[i];
2437 if ((dev->towrite || i == sh->pd_idx) &&
2438 !test_bit(R5_LOCKED, &dev->flags) &&
2439 !(test_bit(R5_UPTODATE, &dev->flags) ||
2440 test_bit(R5_Wantcompute, &dev->flags))) {
2441 if (test_bit(R5_Insync, &dev->flags))
2444 rmw += 2*disks; /* cannot read it */
2446 /* Would I have to read this buffer for reconstruct_write */
2447 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
2448 !test_bit(R5_LOCKED, &dev->flags) &&
2449 !(test_bit(R5_UPTODATE, &dev->flags) ||
2450 test_bit(R5_Wantcompute, &dev->flags))) {
2451 if (test_bit(R5_Insync, &dev->flags)) rcw++;
2456 pr_debug("for sector %llu, rmw=%d rcw=%d\n",
2457 (unsigned long long)sh->sector, rmw, rcw);
2458 set_bit(STRIPE_HANDLE, &sh->state);
2459 if (rmw < rcw && rmw > 0)
2460 /* prefer read-modify-write, but need to get some data */
2461 for (i = disks; i--; ) {
2462 struct r5dev *dev = &sh->dev[i];
2463 if ((dev->towrite || i == sh->pd_idx) &&
2464 !test_bit(R5_LOCKED, &dev->flags) &&
2465 !(test_bit(R5_UPTODATE, &dev->flags) ||
2466 test_bit(R5_Wantcompute, &dev->flags)) &&
2467 test_bit(R5_Insync, &dev->flags)) {
2469 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2470 pr_debug("Read_old block "
2471 "%d for r-m-w\n", i);
2472 set_bit(R5_LOCKED, &dev->flags);
2473 set_bit(R5_Wantread, &dev->flags);
2476 set_bit(STRIPE_DELAYED, &sh->state);
2477 set_bit(STRIPE_HANDLE, &sh->state);
2481 if (rcw <= rmw && rcw > 0)
2482 /* want reconstruct write, but need to get some data */
2483 for (i = disks; i--; ) {
2484 struct r5dev *dev = &sh->dev[i];
2485 if (!test_bit(R5_OVERWRITE, &dev->flags) &&
2487 !test_bit(R5_LOCKED, &dev->flags) &&
2488 !(test_bit(R5_UPTODATE, &dev->flags) ||
2489 test_bit(R5_Wantcompute, &dev->flags)) &&
2490 test_bit(R5_Insync, &dev->flags)) {
2492 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2493 pr_debug("Read_old block "
2494 "%d for Reconstruct\n", i);
2495 set_bit(R5_LOCKED, &dev->flags);
2496 set_bit(R5_Wantread, &dev->flags);
2499 set_bit(STRIPE_DELAYED, &sh->state);
2500 set_bit(STRIPE_HANDLE, &sh->state);
2504 /* now if nothing is locked, and if we have enough data,
2505 * we can start a write request
2507 /* since handle_stripe can be called at any time we need to handle the
2508 * case where a compute block operation has been submitted and then a
2509 * subsequent call wants to start a write request. raid_run_ops only
2510 * handles the case where compute block and reconstruct are requested
2511 * simultaneously. If this is not the case then new writes need to be
2512 * held off until the compute completes.
2514 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
2515 (s->locked == 0 && (rcw == 0 || rmw == 0) &&
2516 !test_bit(STRIPE_BIT_DELAY, &sh->state)))
2517 schedule_reconstruction(sh, s, rcw == 0, 0);
2520 static void handle_stripe_dirtying6(raid5_conf_t *conf,
2521 struct stripe_head *sh, struct stripe_head_state *s,
2522 struct r6_state *r6s, int disks)
2524 int rcw = 0, pd_idx = sh->pd_idx, i;
2525 int qd_idx = sh->qd_idx;
2527 set_bit(STRIPE_HANDLE, &sh->state);
2528 for (i = disks; i--; ) {
2529 struct r5dev *dev = &sh->dev[i];
2530 /* check if we haven't enough data */
2531 if (!test_bit(R5_OVERWRITE, &dev->flags) &&
2532 i != pd_idx && i != qd_idx &&
2533 !test_bit(R5_LOCKED, &dev->flags) &&
2534 !(test_bit(R5_UPTODATE, &dev->flags) ||
2535 test_bit(R5_Wantcompute, &dev->flags))) {
2537 if (!test_bit(R5_Insync, &dev->flags))
2538 continue; /* it's a failed drive */
2541 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2542 pr_debug("Read_old stripe %llu "
2543 "block %d for Reconstruct\n",
2544 (unsigned long long)sh->sector, i);
2545 set_bit(R5_LOCKED, &dev->flags);
2546 set_bit(R5_Wantread, &dev->flags);
2549 pr_debug("Request delayed stripe %llu "
2550 "block %d for Reconstruct\n",
2551 (unsigned long long)sh->sector, i);
2552 set_bit(STRIPE_DELAYED, &sh->state);
2553 set_bit(STRIPE_HANDLE, &sh->state);
2557 /* now if nothing is locked, and if we have enough data, we can start a
2560 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
2561 s->locked == 0 && rcw == 0 &&
2562 !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
2563 schedule_reconstruction(sh, s, 1, 0);
2567 static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
2568 struct stripe_head_state *s, int disks)
2570 struct r5dev *dev = NULL;
2572 set_bit(STRIPE_HANDLE, &sh->state);
2574 switch (sh->check_state) {
2575 case check_state_idle:
2576 /* start a new check operation if there are no failures */
2577 if (s->failed == 0) {
2578 BUG_ON(s->uptodate != disks);
2579 sh->check_state = check_state_run;
2580 set_bit(STRIPE_OP_CHECK, &s->ops_request);
2581 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
2585 dev = &sh->dev[s->failed_num];
2587 case check_state_compute_result:
2588 sh->check_state = check_state_idle;
2590 dev = &sh->dev[sh->pd_idx];
2592 /* check that a write has not made the stripe insync */
2593 if (test_bit(STRIPE_INSYNC, &sh->state))
2596 /* either failed parity check, or recovery is happening */
2597 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
2598 BUG_ON(s->uptodate != disks);
2600 set_bit(R5_LOCKED, &dev->flags);
2602 set_bit(R5_Wantwrite, &dev->flags);
2604 clear_bit(STRIPE_DEGRADED, &sh->state);
2605 set_bit(STRIPE_INSYNC, &sh->state);
2607 case check_state_run:
2608 break; /* we will be called again upon completion */
2609 case check_state_check_result:
2610 sh->check_state = check_state_idle;
2612 /* if a failure occurred during the check operation, leave
2613 * STRIPE_INSYNC not set and let the stripe be handled again
2618 /* handle a successful check operation, if parity is correct
2619 * we are done. Otherwise update the mismatch count and repair
2620 * parity if !MD_RECOVERY_CHECK
2622 if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0)
2623 /* parity is correct (on disc,
2624 * not in buffer any more)
2626 set_bit(STRIPE_INSYNC, &sh->state);
2628 conf->mddev->resync_mismatches += STRIPE_SECTORS;
2629 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
2630 /* don't try to repair!! */
2631 set_bit(STRIPE_INSYNC, &sh->state);
2633 sh->check_state = check_state_compute_run;
2634 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2635 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2636 set_bit(R5_Wantcompute,
2637 &sh->dev[sh->pd_idx].flags);
2638 sh->ops.target = sh->pd_idx;
2639 sh->ops.target2 = -1;
2644 case check_state_compute_run:
2647 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
2648 __func__, sh->check_state,
2649 (unsigned long long) sh->sector);
2655 static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh,
2656 struct stripe_head_state *s,
2657 struct r6_state *r6s, int disks)
2659 int pd_idx = sh->pd_idx;
2660 int qd_idx = sh->qd_idx;
2663 set_bit(STRIPE_HANDLE, &sh->state);
2665 BUG_ON(s->failed > 2);
2667 /* Want to check and possibly repair P and Q.
2668 * However there could be one 'failed' device, in which
2669 * case we can only check one of them, possibly using the
2670 * other to generate missing data
2673 switch (sh->check_state) {
2674 case check_state_idle:
2675 /* start a new check operation if there are < 2 failures */
2676 if (s->failed == r6s->q_failed) {
2677 /* The only possible failed device holds Q, so it
2678 * makes sense to check P (If anything else were failed,
2679 * we would have used P to recreate it).
2681 sh->check_state = check_state_run;
2683 if (!r6s->q_failed && s->failed < 2) {
2684 /* Q is not failed, and we didn't use it to generate
2685 * anything, so it makes sense to check it
2687 if (sh->check_state == check_state_run)
2688 sh->check_state = check_state_run_pq;
2690 sh->check_state = check_state_run_q;
2693 /* discard potentially stale zero_sum_result */
2694 sh->ops.zero_sum_result = 0;
2696 if (sh->check_state == check_state_run) {
2697 /* async_xor_zero_sum destroys the contents of P */
2698 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
2701 if (sh->check_state >= check_state_run &&
2702 sh->check_state <= check_state_run_pq) {
2703 /* async_syndrome_zero_sum preserves P and Q, so
2704 * no need to mark them !uptodate here
2706 set_bit(STRIPE_OP_CHECK, &s->ops_request);
2710 /* we have 2-disk failure */
2711 BUG_ON(s->failed != 2);
2713 case check_state_compute_result:
2714 sh->check_state = check_state_idle;
2716 /* check that a write has not made the stripe insync */
2717 if (test_bit(STRIPE_INSYNC, &sh->state))
2720 /* now write out any block on a failed drive,
2721 * or P or Q if they were recomputed
2723 BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */
2724 if (s->failed == 2) {
2725 dev = &sh->dev[r6s->failed_num[1]];
2727 set_bit(R5_LOCKED, &dev->flags);
2728 set_bit(R5_Wantwrite, &dev->flags);
2730 if (s->failed >= 1) {
2731 dev = &sh->dev[r6s->failed_num[0]];
2733 set_bit(R5_LOCKED, &dev->flags);
2734 set_bit(R5_Wantwrite, &dev->flags);
2736 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
2737 dev = &sh->dev[pd_idx];
2739 set_bit(R5_LOCKED, &dev->flags);
2740 set_bit(R5_Wantwrite, &dev->flags);
2742 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
2743 dev = &sh->dev[qd_idx];
2745 set_bit(R5_LOCKED, &dev->flags);
2746 set_bit(R5_Wantwrite, &dev->flags);
2748 clear_bit(STRIPE_DEGRADED, &sh->state);
2750 set_bit(STRIPE_INSYNC, &sh->state);
2752 case check_state_run:
2753 case check_state_run_q:
2754 case check_state_run_pq:
2755 break; /* we will be called again upon completion */
2756 case check_state_check_result:
2757 sh->check_state = check_state_idle;
2759 /* handle a successful check operation, if parity is correct
2760 * we are done. Otherwise update the mismatch count and repair
2761 * parity if !MD_RECOVERY_CHECK
2763 if (sh->ops.zero_sum_result == 0) {
2764 /* both parities are correct */
2766 set_bit(STRIPE_INSYNC, &sh->state);
2768 /* in contrast to the raid5 case we can validate
2769 * parity, but still have a failure to write
2772 sh->check_state = check_state_compute_result;
2773 /* Returning at this point means that we may go
2774 * off and bring p and/or q uptodate again so
2775 * we make sure to check zero_sum_result again
2776 * to verify if p or q need writeback
2780 conf->mddev->resync_mismatches += STRIPE_SECTORS;
2781 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
2782 /* don't try to repair!! */
2783 set_bit(STRIPE_INSYNC, &sh->state);
2785 int *target = &sh->ops.target;
2787 sh->ops.target = -1;
2788 sh->ops.target2 = -1;
2789 sh->check_state = check_state_compute_run;
2790 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2791 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2792 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
2793 set_bit(R5_Wantcompute,
2794 &sh->dev[pd_idx].flags);
2796 target = &sh->ops.target2;
2799 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
2800 set_bit(R5_Wantcompute,
2801 &sh->dev[qd_idx].flags);
2808 case check_state_compute_run:
2811 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
2812 __func__, sh->check_state,
2813 (unsigned long long) sh->sector);
2818 static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
2819 struct r6_state *r6s)
2823 /* We have read all the blocks in this stripe and now we need to
2824 * copy some of them into a target stripe for expand.
2826 struct dma_async_tx_descriptor *tx = NULL;
2827 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
2828 for (i = 0; i < sh->disks; i++)
2829 if (i != sh->pd_idx && i != sh->qd_idx) {
2831 struct stripe_head *sh2;
2832 struct async_submit_ctl submit;
2834 sector_t bn = compute_blocknr(sh, i, 1);
2835 sector_t s = raid5_compute_sector(conf, bn, 0,
2837 sh2 = get_active_stripe(conf, s, 0, 1, 1);
2839 /* so far only the early blocks of this stripe
2840 * have been requested. When later blocks
2841 * get requested, we will try again
2844 if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
2845 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
2846 /* must have already done this block */
2847 release_stripe(sh2);
2851 /* place all the copies on one channel */
2852 init_async_submit(&submit, 0, tx, NULL, NULL, NULL);
2853 tx = async_memcpy(sh2->dev[dd_idx].page,
2854 sh->dev[i].page, 0, 0, STRIPE_SIZE,
2857 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
2858 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
2859 for (j = 0; j < conf->raid_disks; j++)
2860 if (j != sh2->pd_idx &&
2861 (!r6s || j != sh2->qd_idx) &&
2862 !test_bit(R5_Expanded, &sh2->dev[j].flags))
2864 if (j == conf->raid_disks) {
2865 set_bit(STRIPE_EXPAND_READY, &sh2->state);
2866 set_bit(STRIPE_HANDLE, &sh2->state);
2868 release_stripe(sh2);
2871 /* done submitting copies, wait for them to complete */
2874 dma_wait_for_async_tx(tx);
2880 * handle_stripe - do things to a stripe.
2882 * We lock the stripe and then examine the state of various bits
2883 * to see what needs to be done.
2885 * return some read request which now have data
2886 * return some write requests which are safely on disc
2887 * schedule a read on some buffers
2888 * schedule a write of some buffers
2889 * return confirmation of parity correctness
2891 * buffers are taken off read_list or write_list, and bh_cache buffers
2892 * get BH_Lock set before the stripe lock is released.
2896 static bool handle_stripe5(struct stripe_head *sh)
2898 raid5_conf_t *conf = sh->raid_conf;
2899 int disks = sh->disks, i;
2900 struct bio *return_bi = NULL;
2901 struct stripe_head_state s;
2903 mdk_rdev_t *blocked_rdev = NULL;
2906 memset(&s, 0, sizeof(s));
2907 pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d "
2908 "reconstruct:%d\n", (unsigned long long)sh->sector, sh->state,
2909 atomic_read(&sh->count), sh->pd_idx, sh->check_state,
2910 sh->reconstruct_state);
2912 spin_lock(&sh->lock);
2913 clear_bit(STRIPE_HANDLE, &sh->state);
2914 clear_bit(STRIPE_DELAYED, &sh->state);
2916 s.syncing = test_bit(STRIPE_SYNCING, &sh->state);
2917 s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
2918 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
2920 /* Now to look around and see what can be done */
2922 for (i=disks; i--; ) {
2924 struct r5dev *dev = &sh->dev[i];
2925 clear_bit(R5_Insync, &dev->flags);
2927 pr_debug("check %d: state 0x%lx toread %p read %p write %p "
2928 "written %p\n", i, dev->flags, dev->toread, dev->read,
2929 dev->towrite, dev->written);
2931 /* maybe we can request a biofill operation
2933 * new wantfill requests are only permitted while
2934 * ops_complete_biofill is guaranteed to be inactive
2936 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
2937 !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
2938 set_bit(R5_Wantfill, &dev->flags);
2940 /* now count some things */
2941 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++;
2942 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++;
2943 if (test_bit(R5_Wantcompute, &dev->flags)) s.compute++;
2945 if (test_bit(R5_Wantfill, &dev->flags))
2947 else if (dev->toread)
2951 if (!test_bit(R5_OVERWRITE, &dev->flags))
2956 rdev = rcu_dereference(conf->disks[i].rdev);
2957 if (blocked_rdev == NULL &&
2958 rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
2959 blocked_rdev = rdev;
2960 atomic_inc(&rdev->nr_pending);
2962 if (!rdev || !test_bit(In_sync, &rdev->flags)) {
2963 /* The ReadError flag will just be confusing now */
2964 clear_bit(R5_ReadError, &dev->flags);
2965 clear_bit(R5_ReWrite, &dev->flags);
2967 if (!rdev || !test_bit(In_sync, &rdev->flags)
2968 || test_bit(R5_ReadError, &dev->flags)) {
2972 set_bit(R5_Insync, &dev->flags);
2976 if (unlikely(blocked_rdev)) {
2977 if (s.syncing || s.expanding || s.expanded ||
2978 s.to_write || s.written) {
2979 set_bit(STRIPE_HANDLE, &sh->state);
2982 /* There is nothing for the blocked_rdev to block */
2983 rdev_dec_pending(blocked_rdev, conf->mddev);
2984 blocked_rdev = NULL;
2987 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
2988 set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
2989 set_bit(STRIPE_BIOFILL_RUN, &sh->state);
2992 pr_debug("locked=%d uptodate=%d to_read=%d"
2993 " to_write=%d failed=%d failed_num=%d\n",
2994 s.locked, s.uptodate, s.to_read, s.to_write,
2995 s.failed, s.failed_num);
2996 /* check if the array has lost two devices and, if so, some requests might
2999 if (s.failed > 1 && s.to_read+s.to_write+s.written)
3000 handle_failed_stripe(conf, sh, &s, disks, &return_bi);
3001 if (s.failed > 1 && s.syncing) {
3002 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
3003 clear_bit(STRIPE_SYNCING, &sh->state);
3007 /* might be able to return some write requests if the parity block
3008 * is safe, or on a failed drive
3010 dev = &sh->dev[sh->pd_idx];
3012 ((test_bit(R5_Insync, &dev->flags) &&
3013 !test_bit(R5_LOCKED, &dev->flags) &&
3014 test_bit(R5_UPTODATE, &dev->flags)) ||
3015 (s.failed == 1 && s.failed_num == sh->pd_idx)))
3016 handle_stripe_clean_event(conf, sh, disks, &return_bi);
3018 /* Now we might consider reading some blocks, either to check/generate
3019 * parity, or to satisfy requests
3020 * or to load a block that is being partially written.
3022 if (s.to_read || s.non_overwrite ||
3023 (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
3024 handle_stripe_fill5(sh, &s, disks);
3026 /* Now we check to see if any write operations have recently
3030 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
3032 if (sh->reconstruct_state == reconstruct_state_drain_result ||
3033 sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
3034 sh->reconstruct_state = reconstruct_state_idle;
3036 /* All the 'written' buffers and the parity block are ready to
3037 * be written back to disk
3039 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
3040 for (i = disks; i--; ) {
3042 if (test_bit(R5_LOCKED, &dev->flags) &&
3043 (i == sh->pd_idx || dev->written)) {
3044 pr_debug("Writing block %d\n", i);
3045 set_bit(R5_Wantwrite, &dev->flags);
3048 if (!test_bit(R5_Insync, &dev->flags) ||
3049 (i == sh->pd_idx && s.failed == 0))
3050 set_bit(STRIPE_INSYNC, &sh->state);
3053 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
3054 atomic_dec(&conf->preread_active_stripes);
3055 if (atomic_read(&conf->preread_active_stripes) <
3057 md_wakeup_thread(conf->mddev->thread);
3061 /* Now to consider new write requests and what else, if anything
3062 * should be read. We do not handle new writes when:
3063 * 1/ A 'write' operation (copy+xor) is already in flight.
3064 * 2/ A 'check' operation is in flight, as it may clobber the parity
3067 if (s.to_write && !sh->reconstruct_state && !sh->check_state)
3068 handle_stripe_dirtying5(conf, sh, &s, disks);
3070 /* maybe we need to check and possibly fix the parity for this stripe
3071 * Any reads will already have been scheduled, so we just see if enough
3072 * data is available. The parity check is held off while parity
3073 * dependent operations are in flight.
3075 if (sh->check_state ||
3076 (s.syncing && s.locked == 0 &&
3077 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
3078 !test_bit(STRIPE_INSYNC, &sh->state)))
3079 handle_parity_checks5(conf, sh, &s, disks);
3081 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
3082 md_done_sync(conf->mddev, STRIPE_SECTORS,1);
3083 clear_bit(STRIPE_SYNCING, &sh->state);
3086 /* If the failed drive is just a ReadError, then we might need to progress
3087 * the repair/check process
3089 if (s.failed == 1 && !conf->mddev->ro &&
3090 test_bit(R5_ReadError, &sh->dev[s.failed_num].flags)
3091 && !test_bit(R5_LOCKED, &sh->dev[s.failed_num].flags)
3092 && test_bit(R5_UPTODATE, &sh->dev[s.failed_num].flags)
3094 dev = &sh->dev[s.failed_num];
3095 if (!test_bit(R5_ReWrite, &dev->flags)) {
3096 set_bit(R5_Wantwrite, &dev->flags);
3097 set_bit(R5_ReWrite, &dev->flags);
3098 set_bit(R5_LOCKED, &dev->flags);
3101 /* let's read it back */
3102 set_bit(R5_Wantread, &dev->flags);
3103 set_bit(R5_LOCKED, &dev->flags);
3108 /* Finish reconstruct operations initiated by the expansion process */
3109 if (sh->reconstruct_state == reconstruct_state_result) {
3110 struct stripe_head *sh2
3111 = get_active_stripe(conf, sh->sector, 1, 1, 1);
3112 if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) {
3113 /* sh cannot be written until sh2 has been read.
3114 * so arrange for sh to be delayed a little
3116 set_bit(STRIPE_DELAYED, &sh->state);
3117 set_bit(STRIPE_HANDLE, &sh->state);
3118 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
3120 atomic_inc(&conf->preread_active_stripes);
3121 release_stripe(sh2);
3125 release_stripe(sh2);
3127 sh->reconstruct_state = reconstruct_state_idle;
3128 clear_bit(STRIPE_EXPANDING, &sh->state);
3129 for (i = conf->raid_disks; i--; ) {
3130 set_bit(R5_Wantwrite, &sh->dev[i].flags);
3131 set_bit(R5_LOCKED, &sh->dev[i].flags);
3136 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
3137 !sh->reconstruct_state) {
3138 /* Need to write out all blocks after computing parity */
3139 sh->disks = conf->raid_disks;
3140 stripe_set_idx(sh->sector, conf, 0, sh);
3141 schedule_reconstruction(sh, &s, 1, 1);
3142 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
3143 clear_bit(STRIPE_EXPAND_READY, &sh->state);
3144 atomic_dec(&conf->reshape_stripes);
3145 wake_up(&conf->wait_for_overlap);
3146 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3149 if (s.expanding && s.locked == 0 &&
3150 !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
3151 handle_stripe_expansion(conf, sh, NULL);
3154 spin_unlock(&sh->lock);
3156 /* wait for this device to become unblocked */
3157 if (unlikely(blocked_rdev))
3158 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
3161 raid_run_ops(sh, s.ops_request);
3165 return_io(return_bi);
3167 return blocked_rdev == NULL;
3170 static bool handle_stripe6(struct stripe_head *sh)
3172 raid5_conf_t *conf = sh->raid_conf;
3173 int disks = sh->disks;
3174 struct bio *return_bi = NULL;
3175 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx;
3176 struct stripe_head_state s;
3177 struct r6_state r6s;
3178 struct r5dev *dev, *pdev, *qdev;
3179 mdk_rdev_t *blocked_rdev = NULL;
3181 pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
3182 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
3183 (unsigned long long)sh->sector, sh->state,
3184 atomic_read(&sh->count), pd_idx, qd_idx,
3185 sh->check_state, sh->reconstruct_state);
3186 memset(&s, 0, sizeof(s));
3188 spin_lock(&sh->lock);
3189 clear_bit(STRIPE_HANDLE, &sh->state);
3190 clear_bit(STRIPE_DELAYED, &sh->state);
3192 s.syncing = test_bit(STRIPE_SYNCING, &sh->state);
3193 s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
3194 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
3195 /* Now to look around and see what can be done */
3198 for (i=disks; i--; ) {
3201 clear_bit(R5_Insync, &dev->flags);
3203 pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
3204 i, dev->flags, dev->toread, dev->towrite, dev->written);
3205 /* maybe we can reply to a read
3207 * new wantfill requests are only permitted while
3208 * ops_complete_biofill is guaranteed to be inactive
3210 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
3211 !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
3212 set_bit(R5_Wantfill, &dev->flags);
3214 /* now count some things */
3215 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++;
3216 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++;
3217 if (test_bit(R5_Wantcompute, &dev->flags))
3218 BUG_ON(++s.compute > 2);
3220 if (test_bit(R5_Wantfill, &dev->flags)) {
3222 } else if (dev->toread)
3226 if (!test_bit(R5_OVERWRITE, &dev->flags))
3231 rdev = rcu_dereference(conf->disks[i].rdev);
3232 if (blocked_rdev == NULL &&
3233 rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
3234 blocked_rdev = rdev;
3235 atomic_inc(&rdev->nr_pending);
3237 if (!rdev || !test_bit(In_sync, &rdev->flags)) {
3238 /* The ReadError flag will just be confusing now */
3239 clear_bit(R5_ReadError, &dev->flags);
3240 clear_bit(R5_ReWrite, &dev->flags);
3242 if (!rdev || !test_bit(In_sync, &rdev->flags)
3243 || test_bit(R5_ReadError, &dev->flags)) {
3245 r6s.failed_num[s.failed] = i;
3248 set_bit(R5_Insync, &dev->flags);
3252 if (unlikely(blocked_rdev)) {
3253 if (s.syncing || s.expanding || s.expanded ||
3254 s.to_write || s.written) {
3255 set_bit(STRIPE_HANDLE, &sh->state);
3258 /* There is nothing for the blocked_rdev to block */
3259 rdev_dec_pending(blocked_rdev, conf->mddev);
3260 blocked_rdev = NULL;
3263 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
3264 set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
3265 set_bit(STRIPE_BIOFILL_RUN, &sh->state);
3268 pr_debug("locked=%d uptodate=%d to_read=%d"
3269 " to_write=%d failed=%d failed_num=%d,%d\n",
3270 s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
3271 r6s.failed_num[0], r6s.failed_num[1]);
3272 /* check if the array has lost >2 devices and, if so, some requests
3273 * might need to be failed
3275 if (s.failed > 2 && s.to_read+s.to_write+s.written)
3276 handle_failed_stripe(conf, sh, &s, disks, &return_bi);
3277 if (s.failed > 2 && s.syncing) {
3278 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
3279 clear_bit(STRIPE_SYNCING, &sh->state);
3284 * might be able to return some write requests if the parity blocks
3285 * are safe, or on a failed drive
3287 pdev = &sh->dev[pd_idx];
3288 r6s.p_failed = (s.failed >= 1 && r6s.failed_num[0] == pd_idx)
3289 || (s.failed >= 2 && r6s.failed_num[1] == pd_idx);
3290 qdev = &sh->dev[qd_idx];
3291 r6s.q_failed = (s.failed >= 1 && r6s.failed_num[0] == qd_idx)
3292 || (s.failed >= 2 && r6s.failed_num[1] == qd_idx);
3295 ( r6s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
3296 && !test_bit(R5_LOCKED, &pdev->flags)
3297 && test_bit(R5_UPTODATE, &pdev->flags)))) &&
3298 ( r6s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
3299 && !test_bit(R5_LOCKED, &qdev->flags)
3300 && test_bit(R5_UPTODATE, &qdev->flags)))))
3301 handle_stripe_clean_event(conf, sh, disks, &return_bi);
3303 /* Now we might consider reading some blocks, either to check/generate
3304 * parity, or to satisfy requests
3305 * or to load a block that is being partially written.
3307 if (s.to_read || s.non_overwrite || (s.to_write && s.failed) ||
3308 (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
3309 handle_stripe_fill6(sh, &s, &r6s, disks);
3311 /* Now we check to see if any write operations have recently
3314 if (sh->reconstruct_state == reconstruct_state_drain_result) {
3315 int qd_idx = sh->qd_idx;
3317 sh->reconstruct_state = reconstruct_state_idle;
3318 /* All the 'written' buffers and the parity blocks are ready to
3319 * be written back to disk
3321 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
3322 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags));
3323 for (i = disks; i--; ) {
3325 if (test_bit(R5_LOCKED, &dev->flags) &&
3326 (i == sh->pd_idx || i == qd_idx ||
3328 pr_debug("Writing block %d\n", i);
3329 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
3330 set_bit(R5_Wantwrite, &dev->flags);
3331 if (!test_bit(R5_Insync, &dev->flags) ||
3332 ((i == sh->pd_idx || i == qd_idx) &&
3334 set_bit(STRIPE_INSYNC, &sh->state);
3337 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
3338 atomic_dec(&conf->preread_active_stripes);
3339 if (atomic_read(&conf->preread_active_stripes) <
3341 md_wakeup_thread(conf->mddev->thread);
3345 /* Now to consider new write requests and what else, if anything
3346 * should be read. We do not handle new writes when:
3347 * 1/ A 'write' operation (copy+gen_syndrome) is already in flight.
3348 * 2/ A 'check' operation is in flight, as it may clobber the parity
3351 if (s.to_write && !sh->reconstruct_state && !sh->check_state)
3352 handle_stripe_dirtying6(conf, sh, &s, &r6s, disks);
3354 /* maybe we need to check and possibly fix the parity for this stripe
3355 * Any reads will already have been scheduled, so we just see if enough
3356 * data is available. The parity check is held off while parity
3357 * dependent operations are in flight.
3359 if (sh->check_state ||
3360 (s.syncing && s.locked == 0 &&
3361 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
3362 !test_bit(STRIPE_INSYNC, &sh->state)))
3363 handle_parity_checks6(conf, sh, &s, &r6s, disks);
3365 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
3366 md_done_sync(conf->mddev, STRIPE_SECTORS,1);
3367 clear_bit(STRIPE_SYNCING, &sh->state);
3370 /* If the failed drives are just a ReadError, then we might need
3371 * to progress the repair/check process
3373 if (s.failed <= 2 && !conf->mddev->ro)
3374 for (i = 0; i < s.failed; i++) {
3375 dev = &sh->dev[r6s.failed_num[i]];
3376 if (test_bit(R5_ReadError, &dev->flags)
3377 && !test_bit(R5_LOCKED, &dev->flags)
3378 && test_bit(R5_UPTODATE, &dev->flags)
3380 if (!test_bit(R5_ReWrite, &dev->flags)) {
3381 set_bit(R5_Wantwrite, &dev->flags);
3382 set_bit(R5_ReWrite, &dev->flags);
3383 set_bit(R5_LOCKED, &dev->flags);
3386 /* let's read it back */
3387 set_bit(R5_Wantread, &dev->flags);
3388 set_bit(R5_LOCKED, &dev->flags);
3394 /* Finish reconstruct operations initiated by the expansion process */
3395 if (sh->reconstruct_state == reconstruct_state_result) {
3396 sh->reconstruct_state = reconstruct_state_idle;
3397 clear_bit(STRIPE_EXPANDING, &sh->state);
3398 for (i = conf->raid_disks; i--; ) {
3399 set_bit(R5_Wantwrite, &sh->dev[i].flags);
3400 set_bit(R5_LOCKED, &sh->dev[i].flags);
3405 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
3406 !sh->reconstruct_state) {
3407 struct stripe_head *sh2
3408 = get_active_stripe(conf, sh->sector, 1, 1, 1);
3409 if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) {
3410 /* sh cannot be written until sh2 has been read.
3411 * so arrange for sh to be delayed a little
3413 set_bit(STRIPE_DELAYED, &sh->state);
3414 set_bit(STRIPE_HANDLE, &sh->state);
3415 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
3417 atomic_inc(&conf->preread_active_stripes);
3418 release_stripe(sh2);
3422 release_stripe(sh2);
3424 /* Need to write out all blocks after computing P&Q */
3425 sh->disks = conf->raid_disks;
3426 stripe_set_idx(sh->sector, conf, 0, sh);
3427 schedule_reconstruction(sh, &s, 1, 1);
3428 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
3429 clear_bit(STRIPE_EXPAND_READY, &sh->state);
3430 atomic_dec(&conf->reshape_stripes);
3431 wake_up(&conf->wait_for_overlap);
3432 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3435 if (s.expanding && s.locked == 0 &&
3436 !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
3437 handle_stripe_expansion(conf, sh, &r6s);
3440 spin_unlock(&sh->lock);
3442 /* wait for this device to become unblocked */
3443 if (unlikely(blocked_rdev))
3444 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
3447 raid_run_ops(sh, s.ops_request);
3451 return_io(return_bi);
3453 return blocked_rdev == NULL;
3456 /* returns true if the stripe was handled */
3457 static bool handle_stripe(struct stripe_head *sh)
3459 if (sh->raid_conf->level == 6)
3460 return handle_stripe6(sh);
3462 return handle_stripe5(sh);
3465 static void raid5_activate_delayed(raid5_conf_t *conf)
3467 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
3468 while (!list_empty(&conf->delayed_list)) {
3469 struct list_head *l = conf->delayed_list.next;
3470 struct stripe_head *sh;
3471 sh = list_entry(l, struct stripe_head, lru);
3473 clear_bit(STRIPE_DELAYED, &sh->state);
3474 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3475 atomic_inc(&conf->preread_active_stripes);
3476 list_add_tail(&sh->lru, &conf->hold_list);
3479 blk_plug_device(conf->mddev->queue);
3482 static void activate_bit_delay(raid5_conf_t *conf)
3484 /* device_lock is held */
3485 struct list_head head;
3486 list_add(&head, &conf->bitmap_list);
3487 list_del_init(&conf->bitmap_list);
3488 while (!list_empty(&head)) {
3489 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
3490 list_del_init(&sh->lru);
3491 atomic_inc(&sh->count);
3492 __release_stripe(conf, sh);
3496 static void unplug_slaves(mddev_t *mddev)
3498 raid5_conf_t *conf = mddev->private;
3502 for (i = 0; i < conf->raid_disks; i++) {
3503 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
3504 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
3505 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
3507 atomic_inc(&rdev->nr_pending);
3510 blk_unplug(r_queue);
3512 rdev_dec_pending(rdev, mddev);
3519 static void raid5_unplug_device(struct request_queue *q)
3521 mddev_t *mddev = q->queuedata;
3522 raid5_conf_t *conf = mddev->private;
3523 unsigned long flags;
3525 spin_lock_irqsave(&conf->device_lock, flags);
3527 if (blk_remove_plug(q)) {
3529 raid5_activate_delayed(conf);
3531 md_wakeup_thread(mddev->thread);
3533 spin_unlock_irqrestore(&conf->device_lock, flags);
3535 unplug_slaves(mddev);
3538 static int raid5_congested(void *data, int bits)
3540 mddev_t *mddev = data;
3541 raid5_conf_t *conf = mddev->private;
3543 /* No difference between reads and writes. Just check
3544 * how busy the stripe_cache is
3546 if (conf->inactive_blocked)
3550 if (list_empty_careful(&conf->inactive_list))
3556 /* We want read requests to align with chunks where possible,
3557 * but write requests don't need to.
3559 static int raid5_mergeable_bvec(struct request_queue *q,
3560 struct bvec_merge_data *bvm,
3561 struct bio_vec *biovec)
3563 mddev_t *mddev = q->queuedata;
3564 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
3566 unsigned int chunk_sectors = mddev->chunk_sectors;
3567 unsigned int bio_sectors = bvm->bi_size >> 9;
3569 if ((bvm->bi_rw & 1) == WRITE)
3570 return biovec->bv_len; /* always allow writes to be mergeable */
3572 if (mddev->new_chunk_sectors < mddev->chunk_sectors)
3573 chunk_sectors = mddev->new_chunk_sectors;
3574 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
3575 if (max < 0) max = 0;
3576 if (max <= biovec->bv_len && bio_sectors == 0)
3577 return biovec->bv_len;
3583 static int in_chunk_boundary(mddev_t *mddev, struct bio *bio)
3585 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
3586 unsigned int chunk_sectors = mddev->chunk_sectors;
3587 unsigned int bio_sectors = bio->bi_size >> 9;
3589 if (mddev->new_chunk_sectors < mddev->chunk_sectors)
3590 chunk_sectors = mddev->new_chunk_sectors;
3591 return chunk_sectors >=
3592 ((sector & (chunk_sectors - 1)) + bio_sectors);
3596 * add bio to the retry LIFO ( in O(1) ... we are in interrupt )
3597 * later sampled by raid5d.
3599 static void add_bio_to_retry(struct bio *bi,raid5_conf_t *conf)
3601 unsigned long flags;
3603 spin_lock_irqsave(&conf->device_lock, flags);
3605 bi->bi_next = conf->retry_read_aligned_list;
3606 conf->retry_read_aligned_list = bi;
3608 spin_unlock_irqrestore(&conf->device_lock, flags);
3609 md_wakeup_thread(conf->mddev->thread);
3613 static struct bio *remove_bio_from_retry(raid5_conf_t *conf)
3617 bi = conf->retry_read_aligned;
3619 conf->retry_read_aligned = NULL;
3622 bi = conf->retry_read_aligned_list;
3624 conf->retry_read_aligned_list = bi->bi_next;
3627 * this sets the active strip count to 1 and the processed
3628 * strip count to zero (upper 8 bits)
3630 bi->bi_phys_segments = 1; /* biased count of active stripes */
3638 * The "raid5_align_endio" should check if the read succeeded and if it
3639 * did, call bio_endio on the original bio (having bio_put the new bio
3641 * If the read failed..
3643 static void raid5_align_endio(struct bio *bi, int error)
3645 struct bio* raid_bi = bi->bi_private;
3648 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
3653 mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata;
3654 conf = mddev->private;
3655 rdev = (void*)raid_bi->bi_next;
3656 raid_bi->bi_next = NULL;
3658 rdev_dec_pending(rdev, conf->mddev);
3660 if (!error && uptodate) {
3661 bio_endio(raid_bi, 0);
3662 if (atomic_dec_and_test(&conf->active_aligned_reads))
3663 wake_up(&conf->wait_for_stripe);
3668 pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
3670 add_bio_to_retry(raid_bi, conf);
3673 static int bio_fits_rdev(struct bio *bi)
3675 struct request_queue *q = bdev_get_queue(bi->bi_bdev);
3677 if ((bi->bi_size>>9) > queue_max_sectors(q))
3679 blk_recount_segments(q, bi);
3680 if (bi->bi_phys_segments > queue_max_phys_segments(q))
3683 if (q->merge_bvec_fn)
3684 /* it's too hard to apply the merge_bvec_fn at this stage,
3693 static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
3695 mddev_t *mddev = q->queuedata;
3696 raid5_conf_t *conf = mddev->private;
3697 unsigned int dd_idx;
3698 struct bio* align_bi;
3701 if (!in_chunk_boundary(mddev, raid_bio)) {
3702 pr_debug("chunk_aligned_read : non aligned\n");
3706 * use bio_clone to make a copy of the bio
3708 align_bi = bio_clone(raid_bio, GFP_NOIO);
3712 * set bi_end_io to a new function, and set bi_private to the
3715 align_bi->bi_end_io = raid5_align_endio;
3716 align_bi->bi_private = raid_bio;
3720 align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector,
3725 rdev = rcu_dereference(conf->disks[dd_idx].rdev);
3726 if (rdev && test_bit(In_sync, &rdev->flags)) {
3727 atomic_inc(&rdev->nr_pending);
3729 raid_bio->bi_next = (void*)rdev;
3730 align_bi->bi_bdev = rdev->bdev;
3731 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
3732 align_bi->bi_sector += rdev->data_offset;
3734 if (!bio_fits_rdev(align_bi)) {
3735 /* too big in some way */
3737 rdev_dec_pending(rdev, mddev);
3741 spin_lock_irq(&conf->device_lock);
3742 wait_event_lock_irq(conf->wait_for_stripe,
3744 conf->device_lock, /* nothing */);
3745 atomic_inc(&conf->active_aligned_reads);
3746 spin_unlock_irq(&conf->device_lock);
3748 generic_make_request(align_bi);
3757 /* __get_priority_stripe - get the next stripe to process
3759 * Full stripe writes are allowed to pass preread active stripes up until
3760 * the bypass_threshold is exceeded. In general the bypass_count
3761 * increments when the handle_list is handled before the hold_list; however, it
3762 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
3763 * stripe with in flight i/o. The bypass_count will be reset when the
3764 * head of the hold_list has changed, i.e. the head was promoted to the
3767 static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf)
3769 struct stripe_head *sh;
3771 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
3773 list_empty(&conf->handle_list) ? "empty" : "busy",
3774 list_empty(&conf->hold_list) ? "empty" : "busy",
3775 atomic_read(&conf->pending_full_writes), conf->bypass_count);
3777 if (!list_empty(&conf->handle_list)) {
3778 sh = list_entry(conf->handle_list.next, typeof(*sh), lru);
3780 if (list_empty(&conf->hold_list))
3781 conf->bypass_count = 0;
3782 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) {
3783 if (conf->hold_list.next == conf->last_hold)
3784 conf->bypass_count++;
3786 conf->last_hold = conf->hold_list.next;
3787 conf->bypass_count -= conf->bypass_threshold;
3788 if (conf->bypass_count < 0)
3789 conf->bypass_count = 0;
3792 } else if (!list_empty(&conf->hold_list) &&
3793 ((conf->bypass_threshold &&
3794 conf->bypass_count > conf->bypass_threshold) ||
3795 atomic_read(&conf->pending_full_writes) == 0)) {
3796 sh = list_entry(conf->hold_list.next,
3798 conf->bypass_count -= conf->bypass_threshold;
3799 if (conf->bypass_count < 0)
3800 conf->bypass_count = 0;
3804 list_del_init(&sh->lru);
3805 atomic_inc(&sh->count);
3806 BUG_ON(atomic_read(&sh->count) != 1);
3810 static int make_request(struct request_queue *q, struct bio * bi)
3812 mddev_t *mddev = q->queuedata;
3813 raid5_conf_t *conf = mddev->private;
3815 sector_t new_sector;
3816 sector_t logical_sector, last_sector;
3817 struct stripe_head *sh;
3818 const int rw = bio_data_dir(bi);
3821 if (unlikely(bio_barrier(bi))) {
3822 bio_endio(bi, -EOPNOTSUPP);
3826 md_write_start(mddev, bi);
3828 cpu = part_stat_lock();
3829 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
3830 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
3835 mddev->reshape_position == MaxSector &&
3836 chunk_aligned_read(q,bi))
3839 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
3840 last_sector = bi->bi_sector + (bi->bi_size>>9);
3842 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
3844 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
3846 int disks, data_disks;
3851 disks = conf->raid_disks;
3852 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
3853 if (unlikely(conf->reshape_progress != MaxSector)) {
3854 /* spinlock is needed as reshape_progress may be
3855 * 64bit on a 32bit platform, and so it might be
3856 * possible to see a half-updated value
3857 * Ofcourse reshape_progress could change after
3858 * the lock is dropped, so once we get a reference
3859 * to the stripe that we think it is, we will have
3862 spin_lock_irq(&conf->device_lock);
3863 if (mddev->delta_disks < 0
3864 ? logical_sector < conf->reshape_progress
3865 : logical_sector >= conf->reshape_progress) {
3866 disks = conf->previous_raid_disks;
3869 if (mddev->delta_disks < 0
3870 ? logical_sector < conf->reshape_safe
3871 : logical_sector >= conf->reshape_safe) {
3872 spin_unlock_irq(&conf->device_lock);
3877 spin_unlock_irq(&conf->device_lock);
3879 data_disks = disks - conf->max_degraded;
3881 new_sector = raid5_compute_sector(conf, logical_sector,
3884 pr_debug("raid5: make_request, sector %llu logical %llu\n",
3885 (unsigned long long)new_sector,
3886 (unsigned long long)logical_sector);
3888 sh = get_active_stripe(conf, new_sector, previous,
3889 (bi->bi_rw&RWA_MASK), 0);
3891 if (unlikely(previous)) {
3892 /* expansion might have moved on while waiting for a
3893 * stripe, so we must do the range check again.
3894 * Expansion could still move past after this
3895 * test, but as we are holding a reference to
3896 * 'sh', we know that if that happens,
3897 * STRIPE_EXPANDING will get set and the expansion
3898 * won't proceed until we finish with the stripe.
3901 spin_lock_irq(&conf->device_lock);
3902 if (mddev->delta_disks < 0
3903 ? logical_sector >= conf->reshape_progress
3904 : logical_sector < conf->reshape_progress)
3905 /* mismatch, need to try again */
3907 spin_unlock_irq(&conf->device_lock);
3914 /* FIXME what if we get a false positive because these
3915 * are being updated.
3917 if (logical_sector >= mddev->suspend_lo &&
3918 logical_sector < mddev->suspend_hi) {
3924 if (test_bit(STRIPE_EXPANDING, &sh->state) ||
3925 !add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) {
3926 /* Stripe is busy expanding or
3927 * add failed due to overlap. Flush everything
3930 raid5_unplug_device(mddev->queue);
3935 finish_wait(&conf->wait_for_overlap, &w);
3936 set_bit(STRIPE_HANDLE, &sh->state);
3937 clear_bit(STRIPE_DELAYED, &sh->state);
3940 /* cannot get stripe for read-ahead, just give-up */
3941 clear_bit(BIO_UPTODATE, &bi->bi_flags);
3942 finish_wait(&conf->wait_for_overlap, &w);
3947 spin_lock_irq(&conf->device_lock);
3948 remaining = raid5_dec_bi_phys_segments(bi);
3949 spin_unlock_irq(&conf->device_lock);
3950 if (remaining == 0) {
3953 md_write_end(mddev);
3960 static sector_t raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks);
3962 static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped)
3964 /* reshaping is quite different to recovery/resync so it is
3965 * handled quite separately ... here.
3967 * On each call to sync_request, we gather one chunk worth of
3968 * destination stripes and flag them as expanding.
3969 * Then we find all the source stripes and request reads.
3970 * As the reads complete, handle_stripe will copy the data
3971 * into the destination stripe and release that stripe.
3973 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
3974 struct stripe_head *sh;
3975 sector_t first_sector, last_sector;
3976 int raid_disks = conf->previous_raid_disks;
3977 int data_disks = raid_disks - conf->max_degraded;
3978 int new_data_disks = conf->raid_disks - conf->max_degraded;
3981 sector_t writepos, readpos, safepos;
3982 sector_t stripe_addr;
3983 int reshape_sectors;
3984 struct list_head stripes;
3986 if (sector_nr == 0) {
3987 /* If restarting in the middle, skip the initial sectors */
3988 if (mddev->delta_disks < 0 &&
3989 conf->reshape_progress < raid5_size(mddev, 0, 0)) {
3990 sector_nr = raid5_size(mddev, 0, 0)
3991 - conf->reshape_progress;
3992 } else if (mddev->delta_disks > 0 &&
3993 conf->reshape_progress > 0)
3994 sector_nr = conf->reshape_progress;
3995 sector_div(sector_nr, new_data_disks);
4002 /* We need to process a full chunk at a time.
4003 * If old and new chunk sizes differ, we need to process the
4006 if (mddev->new_chunk_sectors > mddev->chunk_sectors)
4007 reshape_sectors = mddev->new_chunk_sectors;
4009 reshape_sectors = mddev->chunk_sectors;
4011 /* we update the metadata when there is more than 3Meg
4012 * in the block range (that is rather arbitrary, should
4013 * probably be time based) or when the data about to be
4014 * copied would over-write the source of the data at
4015 * the front of the range.
4016 * i.e. one new_stripe along from reshape_progress new_maps
4017 * to after where reshape_safe old_maps to
4019 writepos = conf->reshape_progress;
4020 sector_div(writepos, new_data_disks);
4021 readpos = conf->reshape_progress;
4022 sector_div(readpos, data_disks);
4023 safepos = conf->reshape_safe;
4024 sector_div(safepos, data_disks);
4025 if (mddev->delta_disks < 0) {
4026 writepos -= min_t(sector_t, reshape_sectors, writepos);
4027 readpos += reshape_sectors;
4028 safepos += reshape_sectors;
4030 writepos += reshape_sectors;
4031 readpos -= min_t(sector_t, reshape_sectors, readpos);
4032 safepos -= min_t(sector_t, reshape_sectors, safepos);
4035 /* 'writepos' is the most advanced device address we might write.
4036 * 'readpos' is the least advanced device address we might read.
4037 * 'safepos' is the least address recorded in the metadata as having
4039 * If 'readpos' is behind 'writepos', then there is no way that we can
4040 * ensure safety in the face of a crash - that must be done by userspace
4041 * making a backup of the data. So in that case there is no particular
4042 * rush to update metadata.
4043 * Otherwise if 'safepos' is behind 'writepos', then we really need to
4044 * update the metadata to advance 'safepos' to match 'readpos' so that
4045 * we can be safe in the event of a crash.
4046 * So we insist on updating metadata if safepos is behind writepos and
4047 * readpos is beyond writepos.
4048 * In any case, update the metadata every 10 seconds.
4049 * Maybe that number should be configurable, but I'm not sure it is
4050 * worth it.... maybe it could be a multiple of safemode_delay???
4052 if ((mddev->delta_disks < 0
4053 ? (safepos > writepos && readpos < writepos)
4054 : (safepos < writepos && readpos > writepos)) ||
4055 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
4056 /* Cannot proceed until we've updated the superblock... */
4057 wait_event(conf->wait_for_overlap,
4058 atomic_read(&conf->reshape_stripes)==0);
4059 mddev->reshape_position = conf->reshape_progress;
4060 mddev->curr_resync_completed = mddev->curr_resync;
4061 conf->reshape_checkpoint = jiffies;
4062 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4063 md_wakeup_thread(mddev->thread);
4064 wait_event(mddev->sb_wait, mddev->flags == 0 ||
4065 kthread_should_stop());
4066 spin_lock_irq(&conf->device_lock);
4067 conf->reshape_safe = mddev->reshape_position;
4068 spin_unlock_irq(&conf->device_lock);
4069 wake_up(&conf->wait_for_overlap);
4070 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4073 if (mddev->delta_disks < 0) {
4074 BUG_ON(conf->reshape_progress == 0);
4075 stripe_addr = writepos;
4076 BUG_ON((mddev->dev_sectors &
4077 ~((sector_t)reshape_sectors - 1))
4078 - reshape_sectors - stripe_addr
4081 BUG_ON(writepos != sector_nr + reshape_sectors);
4082 stripe_addr = sector_nr;
4084 INIT_LIST_HEAD(&stripes);
4085 for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
4088 sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
4089 set_bit(STRIPE_EXPANDING, &sh->state);
4090 atomic_inc(&conf->reshape_stripes);
4091 /* If any of this stripe is beyond the end of the old
4092 * array, then we need to zero those blocks
4094 for (j=sh->disks; j--;) {
4096 if (j == sh->pd_idx)
4098 if (conf->level == 6 &&
4101 s = compute_blocknr(sh, j, 0);
4102 if (s < raid5_size(mddev, 0, 0)) {
4106 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
4107 set_bit(R5_Expanded, &sh->dev[j].flags);
4108 set_bit(R5_UPTODATE, &sh->dev[j].flags);
4111 set_bit(STRIPE_EXPAND_READY, &sh->state);
4112 set_bit(STRIPE_HANDLE, &sh->state);
4114 list_add(&sh->lru, &stripes);
4116 spin_lock_irq(&conf->device_lock);
4117 if (mddev->delta_disks < 0)
4118 conf->reshape_progress -= reshape_sectors * new_data_disks;
4120 conf->reshape_progress += reshape_sectors * new_data_disks;
4121 spin_unlock_irq(&conf->device_lock);
4122 /* Ok, those stripe are ready. We can start scheduling
4123 * reads on the source stripes.
4124 * The source stripes are determined by mapping the first and last
4125 * block on the destination stripes.
4128 raid5_compute_sector(conf, stripe_addr*(new_data_disks),
4131 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors)
4132 * new_data_disks - 1),
4134 if (last_sector >= mddev->dev_sectors)
4135 last_sector = mddev->dev_sectors - 1;
4136 while (first_sector <= last_sector) {
4137 sh = get_active_stripe(conf, first_sector, 1, 0, 1);
4138 set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
4139 set_bit(STRIPE_HANDLE, &sh->state);
4141 first_sector += STRIPE_SECTORS;
4143 /* Now that the sources are clearly marked, we can release
4144 * the destination stripes
4146 while (!list_empty(&stripes)) {
4147 sh = list_entry(stripes.next, struct stripe_head, lru);
4148 list_del_init(&sh->lru);
4151 /* If this takes us to the resync_max point where we have to pause,
4152 * then we need to write out the superblock.
4154 sector_nr += reshape_sectors;
4155 if ((sector_nr - mddev->curr_resync_completed) * 2
4156 >= mddev->resync_max - mddev->curr_resync_completed) {
4157 /* Cannot proceed until we've updated the superblock... */
4158 wait_event(conf->wait_for_overlap,
4159 atomic_read(&conf->reshape_stripes) == 0);
4160 mddev->reshape_position = conf->reshape_progress;
4161 mddev->curr_resync_completed = mddev->curr_resync + reshape_sectors;
4162 conf->reshape_checkpoint = jiffies;
4163 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4164 md_wakeup_thread(mddev->thread);
4165 wait_event(mddev->sb_wait,
4166 !test_bit(MD_CHANGE_DEVS, &mddev->flags)
4167 || kthread_should_stop());
4168 spin_lock_irq(&conf->device_lock);
4169 conf->reshape_safe = mddev->reshape_position;
4170 spin_unlock_irq(&conf->device_lock);
4171 wake_up(&conf->wait_for_overlap);
4172 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4174 return reshape_sectors;
4177 /* FIXME go_faster isn't used */
4178 static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
4180 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
4181 struct stripe_head *sh;
4182 sector_t max_sector = mddev->dev_sectors;
4184 int still_degraded = 0;
4187 if (sector_nr >= max_sector) {
4188 /* just being told to finish up .. nothing much to do */
4189 unplug_slaves(mddev);
4191 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
4196 if (mddev->curr_resync < max_sector) /* aborted */
4197 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
4199 else /* completed sync */
4201 bitmap_close_sync(mddev->bitmap);
4206 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4207 return reshape_request(mddev, sector_nr, skipped);
4209 /* No need to check resync_max as we never do more than one
4210 * stripe, and as resync_max will always be on a chunk boundary,
4211 * if the check in md_do_sync didn't fire, there is no chance
4212 * of overstepping resync_max here
4215 /* if there is too many failed drives and we are trying
4216 * to resync, then assert that we are finished, because there is
4217 * nothing we can do.
4219 if (mddev->degraded >= conf->max_degraded &&
4220 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
4221 sector_t rv = mddev->dev_sectors - sector_nr;
4225 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
4226 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
4227 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
4228 /* we can skip this block, and probably more */
4229 sync_blocks /= STRIPE_SECTORS;
4231 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
4235 bitmap_cond_end_sync(mddev->bitmap, sector_nr);
4237 sh = get_active_stripe(conf, sector_nr, 0, 1, 0);
4239 sh = get_active_stripe(conf, sector_nr, 0, 0, 0);
4240 /* make sure we don't swamp the stripe cache if someone else
4241 * is trying to get access
4243 schedule_timeout_uninterruptible(1);
4245 /* Need to check if array will still be degraded after recovery/resync
4246 * We don't need to check the 'failed' flag as when that gets set,
4249 for (i = 0; i < conf->raid_disks; i++)
4250 if (conf->disks[i].rdev == NULL)
4253 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
4255 spin_lock(&sh->lock);
4256 set_bit(STRIPE_SYNCING, &sh->state);
4257 clear_bit(STRIPE_INSYNC, &sh->state);
4258 spin_unlock(&sh->lock);
4260 /* wait for any blocked device to be handled */
4261 while (unlikely(!handle_stripe(sh)))
4265 return STRIPE_SECTORS;
4268 static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
4270 /* We may not be able to submit a whole bio at once as there
4271 * may not be enough stripe_heads available.
4272 * We cannot pre-allocate enough stripe_heads as we may need
4273 * more than exist in the cache (if we allow ever large chunks).
4274 * So we do one stripe head at a time and record in
4275 * ->bi_hw_segments how many have been done.
4277 * We *know* that this entire raid_bio is in one chunk, so
4278 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
4280 struct stripe_head *sh;
4282 sector_t sector, logical_sector, last_sector;
4287 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
4288 sector = raid5_compute_sector(conf, logical_sector,
4290 last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9);
4292 for (; logical_sector < last_sector;
4293 logical_sector += STRIPE_SECTORS,
4294 sector += STRIPE_SECTORS,
4297 if (scnt < raid5_bi_hw_segments(raid_bio))
4298 /* already done this stripe */
4301 sh = get_active_stripe(conf, sector, 0, 1, 0);
4304 /* failed to get a stripe - must wait */
4305 raid5_set_bi_hw_segments(raid_bio, scnt);
4306 conf->retry_read_aligned = raid_bio;
4310 set_bit(R5_ReadError, &sh->dev[dd_idx].flags);
4311 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
4313 raid5_set_bi_hw_segments(raid_bio, scnt);
4314 conf->retry_read_aligned = raid_bio;
4322 spin_lock_irq(&conf->device_lock);
4323 remaining = raid5_dec_bi_phys_segments(raid_bio);
4324 spin_unlock_irq(&conf->device_lock);
4326 bio_endio(raid_bio, 0);
4327 if (atomic_dec_and_test(&conf->active_aligned_reads))
4328 wake_up(&conf->wait_for_stripe);
4332 #ifdef CONFIG_MULTICORE_RAID456
4333 static void __process_stripe(void *param, async_cookie_t cookie)
4335 struct stripe_head *sh = param;
4341 static void process_stripe(struct stripe_head *sh, struct list_head *domain)
4343 async_schedule_domain(__process_stripe, sh, domain);
4346 static void synchronize_stripe_processing(struct list_head *domain)
4348 async_synchronize_full_domain(domain);
4351 static void process_stripe(struct stripe_head *sh, struct list_head *domain)
4358 static void synchronize_stripe_processing(struct list_head *domain)
4365 * This is our raid5 kernel thread.
4367 * We scan the hash table for stripes which can be handled now.
4368 * During the scan, completed stripes are saved for us by the interrupt
4369 * handler, so that they will not have to wait for our next wakeup.
4371 static void raid5d(mddev_t *mddev)
4373 struct stripe_head *sh;
4374 raid5_conf_t *conf = mddev->private;
4376 LIST_HEAD(raid_domain);
4378 pr_debug("+++ raid5d active\n");
4380 md_check_recovery(mddev);
4383 spin_lock_irq(&conf->device_lock);
4387 if (conf->seq_flush != conf->seq_write) {
4388 int seq = conf->seq_flush;
4389 spin_unlock_irq(&conf->device_lock);
4390 bitmap_unplug(mddev->bitmap);
4391 spin_lock_irq(&conf->device_lock);
4392 conf->seq_write = seq;
4393 activate_bit_delay(conf);
4396 while ((bio = remove_bio_from_retry(conf))) {
4398 spin_unlock_irq(&conf->device_lock);
4399 ok = retry_aligned_read(conf, bio);
4400 spin_lock_irq(&conf->device_lock);
4406 sh = __get_priority_stripe(conf);
4410 spin_unlock_irq(&conf->device_lock);
4413 process_stripe(sh, &raid_domain);
4415 spin_lock_irq(&conf->device_lock);
4417 pr_debug("%d stripes handled\n", handled);
4419 spin_unlock_irq(&conf->device_lock);
4421 synchronize_stripe_processing(&raid_domain);
4422 async_tx_issue_pending_all();
4423 unplug_slaves(mddev);
4425 pr_debug("--- raid5d inactive\n");
4429 raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
4431 raid5_conf_t *conf = mddev->private;
4433 return sprintf(page, "%d\n", conf->max_nr_stripes);
4439 raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
4441 raid5_conf_t *conf = mddev->private;
4445 if (len >= PAGE_SIZE)
4450 if (strict_strtoul(page, 10, &new))
4452 if (new <= 16 || new > 32768)
4454 while (new < conf->max_nr_stripes) {
4455 if (drop_one_stripe(conf))
4456 conf->max_nr_stripes--;
4460 err = md_allow_write(mddev);
4463 while (new > conf->max_nr_stripes) {
4464 if (grow_one_stripe(conf))
4465 conf->max_nr_stripes++;
4471 static struct md_sysfs_entry
4472 raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
4473 raid5_show_stripe_cache_size,
4474 raid5_store_stripe_cache_size);
4477 raid5_show_preread_threshold(mddev_t *mddev, char *page)
4479 raid5_conf_t *conf = mddev->private;
4481 return sprintf(page, "%d\n", conf->bypass_threshold);
4487 raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len)
4489 raid5_conf_t *conf = mddev->private;
4491 if (len >= PAGE_SIZE)
4496 if (strict_strtoul(page, 10, &new))
4498 if (new > conf->max_nr_stripes)
4500 conf->bypass_threshold = new;
4504 static struct md_sysfs_entry
4505 raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
4507 raid5_show_preread_threshold,
4508 raid5_store_preread_threshold);
4511 stripe_cache_active_show(mddev_t *mddev, char *page)
4513 raid5_conf_t *conf = mddev->private;
4515 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
4520 static struct md_sysfs_entry
4521 raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
4523 static struct attribute *raid5_attrs[] = {
4524 &raid5_stripecache_size.attr,
4525 &raid5_stripecache_active.attr,
4526 &raid5_preread_bypass_threshold.attr,
4529 static struct attribute_group raid5_attrs_group = {
4531 .attrs = raid5_attrs,
4535 raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks)
4537 raid5_conf_t *conf = mddev->private;
4540 sectors = mddev->dev_sectors;
4542 /* size is defined by the smallest of previous and new size */
4543 if (conf->raid_disks < conf->previous_raid_disks)
4544 raid_disks = conf->raid_disks;
4546 raid_disks = conf->previous_raid_disks;
4549 sectors &= ~((sector_t)mddev->chunk_sectors - 1);
4550 sectors &= ~((sector_t)mddev->new_chunk_sectors - 1);
4551 return sectors * (raid_disks - conf->max_degraded);
4554 static void raid5_free_percpu(raid5_conf_t *conf)
4556 struct raid5_percpu *percpu;
4563 for_each_possible_cpu(cpu) {
4564 percpu = per_cpu_ptr(conf->percpu, cpu);
4565 safe_put_page(percpu->spare_page);
4566 kfree(percpu->scribble);
4568 #ifdef CONFIG_HOTPLUG_CPU
4569 unregister_cpu_notifier(&conf->cpu_notify);
4573 free_percpu(conf->percpu);
4576 static void free_conf(raid5_conf_t *conf)
4578 shrink_stripes(conf);
4579 raid5_free_percpu(conf);
4581 kfree(conf->stripe_hashtbl);
4585 #ifdef CONFIG_HOTPLUG_CPU
4586 static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
4589 raid5_conf_t *conf = container_of(nfb, raid5_conf_t, cpu_notify);
4590 long cpu = (long)hcpu;
4591 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
4594 case CPU_UP_PREPARE:
4595 case CPU_UP_PREPARE_FROZEN:
4596 if (conf->level == 6 && !percpu->spare_page)
4597 percpu->spare_page = alloc_page(GFP_KERNEL);
4598 if (!percpu->scribble)
4599 percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
4601 if (!percpu->scribble ||
4602 (conf->level == 6 && !percpu->spare_page)) {
4603 safe_put_page(percpu->spare_page);
4604 kfree(percpu->scribble);
4605 pr_err("%s: failed memory allocation for cpu%ld\n",
4611 case CPU_DEAD_FROZEN:
4612 safe_put_page(percpu->spare_page);
4613 kfree(percpu->scribble);
4614 percpu->spare_page = NULL;
4615 percpu->scribble = NULL;
4624 static int raid5_alloc_percpu(raid5_conf_t *conf)
4627 struct page *spare_page;
4628 struct raid5_percpu *allcpus;
4632 allcpus = alloc_percpu(struct raid5_percpu);
4635 conf->percpu = allcpus;
4639 for_each_present_cpu(cpu) {
4640 if (conf->level == 6) {
4641 spare_page = alloc_page(GFP_KERNEL);
4646 per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
4648 scribble = kmalloc(scribble_len(conf->raid_disks), GFP_KERNEL);
4653 per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
4655 #ifdef CONFIG_HOTPLUG_CPU
4656 conf->cpu_notify.notifier_call = raid456_cpu_notify;
4657 conf->cpu_notify.priority = 0;
4659 err = register_cpu_notifier(&conf->cpu_notify);
4666 static raid5_conf_t *setup_conf(mddev_t *mddev)
4669 int raid_disk, memory;
4671 struct disk_info *disk;
4673 if (mddev->new_level != 5
4674 && mddev->new_level != 4
4675 && mddev->new_level != 6) {
4676 printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n",
4677 mdname(mddev), mddev->new_level);
4678 return ERR_PTR(-EIO);
4680 if ((mddev->new_level == 5
4681 && !algorithm_valid_raid5(mddev->new_layout)) ||
4682 (mddev->new_level == 6
4683 && !algorithm_valid_raid6(mddev->new_layout))) {
4684 printk(KERN_ERR "raid5: %s: layout %d not supported\n",
4685 mdname(mddev), mddev->new_layout);
4686 return ERR_PTR(-EIO);
4688 if (mddev->new_level == 6 && mddev->raid_disks < 4) {
4689 printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n",
4690 mdname(mddev), mddev->raid_disks);
4691 return ERR_PTR(-EINVAL);
4694 if (!mddev->new_chunk_sectors ||
4695 (mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
4696 !is_power_of_2(mddev->new_chunk_sectors)) {
4697 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n",
4698 mddev->new_chunk_sectors << 9, mdname(mddev));
4699 return ERR_PTR(-EINVAL);
4702 conf = kzalloc(sizeof(raid5_conf_t), GFP_KERNEL);
4706 conf->raid_disks = mddev->raid_disks;
4707 conf->scribble_len = scribble_len(conf->raid_disks);
4708 if (mddev->reshape_position == MaxSector)
4709 conf->previous_raid_disks = mddev->raid_disks;
4711 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
4713 conf->disks = kzalloc(conf->raid_disks * sizeof(struct disk_info),
4718 conf->mddev = mddev;
4720 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
4723 conf->level = mddev->new_level;
4724 if (raid5_alloc_percpu(conf) != 0)
4727 spin_lock_init(&conf->device_lock);
4728 init_waitqueue_head(&conf->wait_for_stripe);
4729 init_waitqueue_head(&conf->wait_for_overlap);
4730 INIT_LIST_HEAD(&conf->handle_list);
4731 INIT_LIST_HEAD(&conf->hold_list);
4732 INIT_LIST_HEAD(&conf->delayed_list);
4733 INIT_LIST_HEAD(&conf->bitmap_list);
4734 INIT_LIST_HEAD(&conf->inactive_list);
4735 atomic_set(&conf->active_stripes, 0);
4736 atomic_set(&conf->preread_active_stripes, 0);
4737 atomic_set(&conf->active_aligned_reads, 0);
4738 conf->bypass_threshold = BYPASS_THRESHOLD;
4740 pr_debug("raid5: run(%s) called.\n", mdname(mddev));
4742 list_for_each_entry(rdev, &mddev->disks, same_set) {
4743 raid_disk = rdev->raid_disk;
4744 if (raid_disk >= conf->raid_disks
4747 disk = conf->disks + raid_disk;
4751 if (test_bit(In_sync, &rdev->flags)) {
4752 char b[BDEVNAME_SIZE];
4753 printk(KERN_INFO "raid5: device %s operational as raid"
4754 " disk %d\n", bdevname(rdev->bdev,b),
4757 /* Cannot rely on bitmap to complete recovery */
4761 conf->chunk_sectors = mddev->new_chunk_sectors;
4762 conf->level = mddev->new_level;
4763 if (conf->level == 6)
4764 conf->max_degraded = 2;
4766 conf->max_degraded = 1;
4767 conf->algorithm = mddev->new_layout;
4768 conf->max_nr_stripes = NR_STRIPES;
4769 conf->reshape_progress = mddev->reshape_position;
4770 if (conf->reshape_progress != MaxSector) {
4771 conf->prev_chunk_sectors = mddev->chunk_sectors;
4772 conf->prev_algo = mddev->layout;
4775 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
4776 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
4777 if (grow_stripes(conf, conf->max_nr_stripes)) {
4779 "raid5: couldn't allocate %dkB for buffers\n", memory);
4782 printk(KERN_INFO "raid5: allocated %dkB for %s\n",
4783 memory, mdname(mddev));
4785 conf->thread = md_register_thread(raid5d, mddev, "%s_raid5");
4786 if (!conf->thread) {
4788 "raid5: couldn't allocate thread for %s\n",
4798 return ERR_PTR(-EIO);
4800 return ERR_PTR(-ENOMEM);
4803 static int run(mddev_t *mddev)
4806 int working_disks = 0;
4809 if (mddev->recovery_cp != MaxSector)
4810 printk(KERN_NOTICE "raid5: %s is not clean"
4811 " -- starting background reconstruction\n",
4813 if (mddev->reshape_position != MaxSector) {
4814 /* Check that we can continue the reshape.
4815 * Currently only disks can change, it must
4816 * increase, and we must be past the point where
4817 * a stripe over-writes itself
4819 sector_t here_new, here_old;
4821 int max_degraded = (mddev->level == 6 ? 2 : 1);
4823 if (mddev->new_level != mddev->level) {
4824 printk(KERN_ERR "raid5: %s: unsupported reshape "
4825 "required - aborting.\n",
4829 old_disks = mddev->raid_disks - mddev->delta_disks;
4830 /* reshape_position must be on a new-stripe boundary, and one
4831 * further up in new geometry must map after here in old
4834 here_new = mddev->reshape_position;
4835 if (sector_div(here_new, mddev->new_chunk_sectors *
4836 (mddev->raid_disks - max_degraded))) {
4837 printk(KERN_ERR "raid5: reshape_position not "
4838 "on a stripe boundary\n");
4841 /* here_new is the stripe we will write to */
4842 here_old = mddev->reshape_position;
4843 sector_div(here_old, mddev->chunk_sectors *
4844 (old_disks-max_degraded));
4845 /* here_old is the first stripe that we might need to read
4847 if (here_new >= here_old) {
4848 /* Reading from the same stripe as writing to - bad */
4849 printk(KERN_ERR "raid5: reshape_position too early for "
4850 "auto-recovery - aborting.\n");
4853 printk(KERN_INFO "raid5: reshape will continue\n");
4854 /* OK, we should be able to continue; */
4856 BUG_ON(mddev->level != mddev->new_level);
4857 BUG_ON(mddev->layout != mddev->new_layout);
4858 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors);
4859 BUG_ON(mddev->delta_disks != 0);
4862 if (mddev->private == NULL)
4863 conf = setup_conf(mddev);
4865 conf = mddev->private;
4868 return PTR_ERR(conf);
4870 mddev->thread = conf->thread;
4871 conf->thread = NULL;
4872 mddev->private = conf;
4875 * 0 for a fully functional array, 1 or 2 for a degraded array.
4877 list_for_each_entry(rdev, &mddev->disks, same_set)
4878 if (rdev->raid_disk >= 0 &&
4879 test_bit(In_sync, &rdev->flags))
4882 mddev->degraded = conf->raid_disks - working_disks;
4884 if (mddev->degraded > conf->max_degraded) {
4885 printk(KERN_ERR "raid5: not enough operational devices for %s"
4886 " (%d/%d failed)\n",
4887 mdname(mddev), mddev->degraded, conf->raid_disks);
4891 /* device size must be a multiple of chunk size */
4892 mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
4893 mddev->resync_max_sectors = mddev->dev_sectors;
4895 if (mddev->degraded > 0 &&
4896 mddev->recovery_cp != MaxSector) {
4897 if (mddev->ok_start_degraded)
4899 "raid5: starting dirty degraded array: %s"
4900 "- data corruption possible.\n",
4904 "raid5: cannot start dirty degraded array for %s\n",
4910 if (mddev->degraded == 0)
4911 printk("raid5: raid level %d set %s active with %d out of %d"
4912 " devices, algorithm %d\n", conf->level, mdname(mddev),
4913 mddev->raid_disks-mddev->degraded, mddev->raid_disks,
4916 printk(KERN_ALERT "raid5: raid level %d set %s active with %d"
4917 " out of %d devices, algorithm %d\n", conf->level,
4918 mdname(mddev), mddev->raid_disks - mddev->degraded,
4919 mddev->raid_disks, mddev->new_layout);
4921 print_raid5_conf(conf);
4923 if (conf->reshape_progress != MaxSector) {
4924 printk("...ok start reshape thread\n");
4925 conf->reshape_safe = conf->reshape_progress;
4926 atomic_set(&conf->reshape_stripes, 0);
4927 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4928 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4929 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4930 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4931 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
4935 /* read-ahead size must cover two whole stripes, which is
4936 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
4939 int data_disks = conf->previous_raid_disks - conf->max_degraded;
4940 int stripe = data_disks *
4941 ((mddev->chunk_sectors << 9) / PAGE_SIZE);
4942 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
4943 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
4946 /* Ok, everything is just fine now */
4947 if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
4949 "raid5: failed to create sysfs attributes for %s\n",
4952 mddev->queue->queue_lock = &conf->device_lock;
4954 mddev->queue->unplug_fn = raid5_unplug_device;
4955 mddev->queue->backing_dev_info.congested_data = mddev;
4956 mddev->queue->backing_dev_info.congested_fn = raid5_congested;
4958 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
4960 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
4964 md_unregister_thread(mddev->thread);
4965 mddev->thread = NULL;
4967 print_raid5_conf(conf);
4970 mddev->private = NULL;
4971 printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev));
4977 static int stop(mddev_t *mddev)
4979 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
4981 md_unregister_thread(mddev->thread);
4982 mddev->thread = NULL;
4983 mddev->queue->backing_dev_info.congested_fn = NULL;
4984 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
4985 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
4987 mddev->private = NULL;
4992 static void print_sh(struct seq_file *seq, struct stripe_head *sh)
4996 seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n",
4997 (unsigned long long)sh->sector, sh->pd_idx, sh->state);
4998 seq_printf(seq, "sh %llu, count %d.\n",
4999 (unsigned long long)sh->sector, atomic_read(&sh->count));
5000 seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector);
5001 for (i = 0; i < sh->disks; i++) {
5002 seq_printf(seq, "(cache%d: %p %ld) ",
5003 i, sh->dev[i].page, sh->dev[i].flags);
5005 seq_printf(seq, "\n");
5008 static void printall(struct seq_file *seq, raid5_conf_t *conf)
5010 struct stripe_head *sh;
5011 struct hlist_node *hn;
5014 spin_lock_irq(&conf->device_lock);
5015 for (i = 0; i < NR_HASH; i++) {
5016 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) {
5017 if (sh->raid_conf != conf)
5022 spin_unlock_irq(&conf->device_lock);
5026 static void status(struct seq_file *seq, mddev_t *mddev)
5028 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
5031 seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
5032 mddev->chunk_sectors / 2, mddev->layout);
5033 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
5034 for (i = 0; i < conf->raid_disks; i++)
5035 seq_printf (seq, "%s",
5036 conf->disks[i].rdev &&
5037 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
5038 seq_printf (seq, "]");
5040 seq_printf (seq, "\n");
5041 printall(seq, conf);
5045 static void print_raid5_conf (raid5_conf_t *conf)
5048 struct disk_info *tmp;
5050 printk("RAID5 conf printout:\n");
5052 printk("(conf==NULL)\n");
5055 printk(" --- rd:%d wd:%d\n", conf->raid_disks,
5056 conf->raid_disks - conf->mddev->degraded);
5058 for (i = 0; i < conf->raid_disks; i++) {
5059 char b[BDEVNAME_SIZE];
5060 tmp = conf->disks + i;
5062 printk(" disk %d, o:%d, dev:%s\n",
5063 i, !test_bit(Faulty, &tmp->rdev->flags),
5064 bdevname(tmp->rdev->bdev,b));
5068 static int raid5_spare_active(mddev_t *mddev)
5071 raid5_conf_t *conf = mddev->private;
5072 struct disk_info *tmp;
5074 for (i = 0; i < conf->raid_disks; i++) {
5075 tmp = conf->disks + i;
5077 && !test_bit(Faulty, &tmp->rdev->flags)
5078 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
5079 unsigned long flags;
5080 spin_lock_irqsave(&conf->device_lock, flags);
5082 spin_unlock_irqrestore(&conf->device_lock, flags);
5085 print_raid5_conf(conf);
5089 static int raid5_remove_disk(mddev_t *mddev, int number)
5091 raid5_conf_t *conf = mddev->private;
5094 struct disk_info *p = conf->disks + number;
5096 print_raid5_conf(conf);
5099 if (number >= conf->raid_disks &&
5100 conf->reshape_progress == MaxSector)
5101 clear_bit(In_sync, &rdev->flags);
5103 if (test_bit(In_sync, &rdev->flags) ||
5104 atomic_read(&rdev->nr_pending)) {
5108 /* Only remove non-faulty devices if recovery
5111 if (!test_bit(Faulty, &rdev->flags) &&
5112 mddev->degraded <= conf->max_degraded &&
5113 number < conf->raid_disks) {
5119 if (atomic_read(&rdev->nr_pending)) {
5120 /* lost the race, try later */
5127 print_raid5_conf(conf);
5131 static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
5133 raid5_conf_t *conf = mddev->private;
5136 struct disk_info *p;
5138 int last = conf->raid_disks - 1;
5140 if (mddev->degraded > conf->max_degraded)
5141 /* no point adding a device */
5144 if (rdev->raid_disk >= 0)
5145 first = last = rdev->raid_disk;
5148 * find the disk ... but prefer rdev->saved_raid_disk
5151 if (rdev->saved_raid_disk >= 0 &&
5152 rdev->saved_raid_disk >= first &&
5153 conf->disks[rdev->saved_raid_disk].rdev == NULL)
5154 disk = rdev->saved_raid_disk;
5157 for ( ; disk <= last ; disk++)
5158 if ((p=conf->disks + disk)->rdev == NULL) {
5159 clear_bit(In_sync, &rdev->flags);
5160 rdev->raid_disk = disk;
5162 if (rdev->saved_raid_disk != disk)
5164 rcu_assign_pointer(p->rdev, rdev);
5167 print_raid5_conf(conf);
5171 static int raid5_resize(mddev_t *mddev, sector_t sectors)
5173 /* no resync is happening, and there is enough space
5174 * on all devices, so we can resize.
5175 * We need to make sure resync covers any new space.
5176 * If the array is shrinking we should possibly wait until
5177 * any io in the removed space completes, but it hardly seems
5180 sectors &= ~((sector_t)mddev->chunk_sectors - 1);
5181 md_set_array_sectors(mddev, raid5_size(mddev, sectors,
5182 mddev->raid_disks));
5183 if (mddev->array_sectors >
5184 raid5_size(mddev, sectors, mddev->raid_disks))
5186 set_capacity(mddev->gendisk, mddev->array_sectors);
5188 if (sectors > mddev->dev_sectors && mddev->recovery_cp == MaxSector) {
5189 mddev->recovery_cp = mddev->dev_sectors;
5190 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5192 mddev->dev_sectors = sectors;
5193 mddev->resync_max_sectors = sectors;
5197 static int check_stripe_cache(mddev_t *mddev)
5199 /* Can only proceed if there are plenty of stripe_heads.
5200 * We need a minimum of one full stripe,, and for sensible progress
5201 * it is best to have about 4 times that.
5202 * If we require 4 times, then the default 256 4K stripe_heads will
5203 * allow for chunk sizes up to 256K, which is probably OK.
5204 * If the chunk size is greater, user-space should request more
5205 * stripe_heads first.
5207 raid5_conf_t *conf = mddev->private;
5208 if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4
5209 > conf->max_nr_stripes ||
5210 ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
5211 > conf->max_nr_stripes) {
5212 printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n",
5213 ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
5220 static int check_reshape(mddev_t *mddev)
5222 raid5_conf_t *conf = mddev->private;
5224 if (mddev->delta_disks == 0 &&
5225 mddev->new_layout == mddev->layout &&
5226 mddev->new_chunk_sectors == mddev->chunk_sectors)
5227 return 0; /* nothing to do */
5229 /* Cannot grow a bitmap yet */
5231 if (mddev->degraded > conf->max_degraded)
5233 if (mddev->delta_disks < 0) {
5234 /* We might be able to shrink, but the devices must
5235 * be made bigger first.
5236 * For raid6, 4 is the minimum size.
5237 * Otherwise 2 is the minimum
5240 if (mddev->level == 6)
5242 if (mddev->raid_disks + mddev->delta_disks < min)
5246 if (!check_stripe_cache(mddev))
5249 return resize_stripes(conf, conf->raid_disks + mddev->delta_disks);
5252 static int raid5_start_reshape(mddev_t *mddev)
5254 raid5_conf_t *conf = mddev->private;
5257 int added_devices = 0;
5258 unsigned long flags;
5260 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5263 if (!check_stripe_cache(mddev))
5266 list_for_each_entry(rdev, &mddev->disks, same_set)
5267 if (rdev->raid_disk < 0 &&
5268 !test_bit(Faulty, &rdev->flags))
5271 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
5272 /* Not enough devices even to make a degraded array
5277 /* Refuse to reduce size of the array. Any reductions in
5278 * array size must be through explicit setting of array_size
5281 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
5282 < mddev->array_sectors) {
5283 printk(KERN_ERR "md: %s: array size must be reduced "
5284 "before number of disks\n", mdname(mddev));
5288 atomic_set(&conf->reshape_stripes, 0);
5289 spin_lock_irq(&conf->device_lock);
5290 conf->previous_raid_disks = conf->raid_disks;
5291 conf->raid_disks += mddev->delta_disks;
5292 conf->prev_chunk_sectors = conf->chunk_sectors;
5293 conf->chunk_sectors = mddev->new_chunk_sectors;
5294 conf->prev_algo = conf->algorithm;
5295 conf->algorithm = mddev->new_layout;
5296 if (mddev->delta_disks < 0)
5297 conf->reshape_progress = raid5_size(mddev, 0, 0);
5299 conf->reshape_progress = 0;
5300 conf->reshape_safe = conf->reshape_progress;
5302 spin_unlock_irq(&conf->device_lock);
5304 /* Add some new drives, as many as will fit.
5305 * We know there are enough to make the newly sized array work.
5307 list_for_each_entry(rdev, &mddev->disks, same_set)
5308 if (rdev->raid_disk < 0 &&
5309 !test_bit(Faulty, &rdev->flags)) {
5310 if (raid5_add_disk(mddev, rdev) == 0) {
5312 set_bit(In_sync, &rdev->flags);
5314 rdev->recovery_offset = 0;
5315 sprintf(nm, "rd%d", rdev->raid_disk);
5316 if (sysfs_create_link(&mddev->kobj,
5319 "raid5: failed to create "
5320 " link %s for %s\n",
5326 if (mddev->delta_disks > 0) {
5327 spin_lock_irqsave(&conf->device_lock, flags);
5328 mddev->degraded = (conf->raid_disks - conf->previous_raid_disks)
5330 spin_unlock_irqrestore(&conf->device_lock, flags);
5332 mddev->raid_disks = conf->raid_disks;
5333 mddev->reshape_position = 0;
5334 set_bit(MD_CHANGE_DEVS, &mddev->flags);
5336 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5337 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
5338 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
5339 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
5340 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
5342 if (!mddev->sync_thread) {
5343 mddev->recovery = 0;
5344 spin_lock_irq(&conf->device_lock);
5345 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
5346 conf->reshape_progress = MaxSector;
5347 spin_unlock_irq(&conf->device_lock);
5350 conf->reshape_checkpoint = jiffies;
5351 md_wakeup_thread(mddev->sync_thread);
5352 md_new_event(mddev);
5356 /* This is called from the reshape thread and should make any
5357 * changes needed in 'conf'
5359 static void end_reshape(raid5_conf_t *conf)
5362 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
5364 spin_lock_irq(&conf->device_lock);
5365 conf->previous_raid_disks = conf->raid_disks;
5366 conf->reshape_progress = MaxSector;
5367 spin_unlock_irq(&conf->device_lock);
5368 wake_up(&conf->wait_for_overlap);
5370 /* read-ahead size must cover two whole stripes, which is
5371 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
5374 int data_disks = conf->raid_disks - conf->max_degraded;
5375 int stripe = data_disks * ((conf->chunk_sectors << 9)
5377 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
5378 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
5383 /* This is called from the raid5d thread with mddev_lock held.
5384 * It makes config changes to the device.
5386 static void raid5_finish_reshape(mddev_t *mddev)
5388 struct block_device *bdev;
5389 raid5_conf_t *conf = mddev->private;
5391 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5393 if (mddev->delta_disks > 0) {
5394 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
5395 set_capacity(mddev->gendisk, mddev->array_sectors);
5398 bdev = bdget_disk(mddev->gendisk, 0);
5400 mutex_lock(&bdev->bd_inode->i_mutex);
5401 i_size_write(bdev->bd_inode,
5402 (loff_t)mddev->array_sectors << 9);
5403 mutex_unlock(&bdev->bd_inode->i_mutex);
5408 mddev->degraded = conf->raid_disks;
5409 for (d = 0; d < conf->raid_disks ; d++)
5410 if (conf->disks[d].rdev &&
5412 &conf->disks[d].rdev->flags))
5414 for (d = conf->raid_disks ;
5415 d < conf->raid_disks - mddev->delta_disks;
5417 raid5_remove_disk(mddev, d);
5419 mddev->layout = conf->algorithm;
5420 mddev->chunk_sectors = conf->chunk_sectors;
5421 mddev->reshape_position = MaxSector;
5422 mddev->delta_disks = 0;
5426 static void raid5_quiesce(mddev_t *mddev, int state)
5428 raid5_conf_t *conf = mddev->private;
5431 case 2: /* resume for a suspend */
5432 wake_up(&conf->wait_for_overlap);
5435 case 1: /* stop all writes */
5436 spin_lock_irq(&conf->device_lock);
5438 wait_event_lock_irq(conf->wait_for_stripe,
5439 atomic_read(&conf->active_stripes) == 0 &&
5440 atomic_read(&conf->active_aligned_reads) == 0,
5441 conf->device_lock, /* nothing */);
5442 spin_unlock_irq(&conf->device_lock);
5445 case 0: /* re-enable writes */
5446 spin_lock_irq(&conf->device_lock);
5448 wake_up(&conf->wait_for_stripe);
5449 wake_up(&conf->wait_for_overlap);
5450 spin_unlock_irq(&conf->device_lock);
5456 static void *raid5_takeover_raid1(mddev_t *mddev)
5460 if (mddev->raid_disks != 2 ||
5461 mddev->degraded > 1)
5462 return ERR_PTR(-EINVAL);
5464 /* Should check if there are write-behind devices? */
5466 chunksect = 64*2; /* 64K by default */
5468 /* The array must be an exact multiple of chunksize */
5469 while (chunksect && (mddev->array_sectors & (chunksect-1)))
5472 if ((chunksect<<9) < STRIPE_SIZE)
5473 /* array size does not allow a suitable chunk size */
5474 return ERR_PTR(-EINVAL);
5476 mddev->new_level = 5;
5477 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
5478 mddev->new_chunk_sectors = chunksect;
5480 return setup_conf(mddev);
5483 static void *raid5_takeover_raid6(mddev_t *mddev)
5487 switch (mddev->layout) {
5488 case ALGORITHM_LEFT_ASYMMETRIC_6:
5489 new_layout = ALGORITHM_LEFT_ASYMMETRIC;
5491 case ALGORITHM_RIGHT_ASYMMETRIC_6:
5492 new_layout = ALGORITHM_RIGHT_ASYMMETRIC;
5494 case ALGORITHM_LEFT_SYMMETRIC_6:
5495 new_layout = ALGORITHM_LEFT_SYMMETRIC;
5497 case ALGORITHM_RIGHT_SYMMETRIC_6:
5498 new_layout = ALGORITHM_RIGHT_SYMMETRIC;
5500 case ALGORITHM_PARITY_0_6:
5501 new_layout = ALGORITHM_PARITY_0;
5503 case ALGORITHM_PARITY_N:
5504 new_layout = ALGORITHM_PARITY_N;
5507 return ERR_PTR(-EINVAL);
5509 mddev->new_level = 5;
5510 mddev->new_layout = new_layout;
5511 mddev->delta_disks = -1;
5512 mddev->raid_disks -= 1;
5513 return setup_conf(mddev);
5517 static int raid5_check_reshape(mddev_t *mddev)
5519 /* For a 2-drive array, the layout and chunk size can be changed
5520 * immediately as not restriping is needed.
5521 * For larger arrays we record the new value - after validation
5522 * to be used by a reshape pass.
5524 raid5_conf_t *conf = mddev->private;
5525 int new_chunk = mddev->new_chunk_sectors;
5527 if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout))
5529 if (new_chunk > 0) {
5530 if (!is_power_of_2(new_chunk))
5532 if (new_chunk < (PAGE_SIZE>>9))
5534 if (mddev->array_sectors & (new_chunk-1))
5535 /* not factor of array size */
5539 /* They look valid */
5541 if (mddev->raid_disks == 2) {
5542 /* can make the change immediately */
5543 if (mddev->new_layout >= 0) {
5544 conf->algorithm = mddev->new_layout;
5545 mddev->layout = mddev->new_layout;
5547 if (new_chunk > 0) {
5548 conf->chunk_sectors = new_chunk ;
5549 mddev->chunk_sectors = new_chunk;
5551 set_bit(MD_CHANGE_DEVS, &mddev->flags);
5552 md_wakeup_thread(mddev->thread);
5554 return check_reshape(mddev);
5557 static int raid6_check_reshape(mddev_t *mddev)
5559 int new_chunk = mddev->new_chunk_sectors;
5561 if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout))
5563 if (new_chunk > 0) {
5564 if (!is_power_of_2(new_chunk))
5566 if (new_chunk < (PAGE_SIZE >> 9))
5568 if (mddev->array_sectors & (new_chunk-1))
5569 /* not factor of array size */
5573 /* They look valid */
5574 return check_reshape(mddev);
5577 static void *raid5_takeover(mddev_t *mddev)
5579 /* raid5 can take over:
5580 * raid0 - if all devices are the same - make it a raid4 layout
5581 * raid1 - if there are two drives. We need to know the chunk size
5582 * raid4 - trivial - just use a raid4 layout.
5583 * raid6 - Providing it is a *_6 layout
5586 if (mddev->level == 1)
5587 return raid5_takeover_raid1(mddev);
5588 if (mddev->level == 4) {
5589 mddev->new_layout = ALGORITHM_PARITY_N;
5590 mddev->new_level = 5;
5591 return setup_conf(mddev);
5593 if (mddev->level == 6)
5594 return raid5_takeover_raid6(mddev);
5596 return ERR_PTR(-EINVAL);
5600 static struct mdk_personality raid5_personality;
5602 static void *raid6_takeover(mddev_t *mddev)
5604 /* Currently can only take over a raid5. We map the
5605 * personality to an equivalent raid6 personality
5606 * with the Q block at the end.
5610 if (mddev->pers != &raid5_personality)
5611 return ERR_PTR(-EINVAL);
5612 if (mddev->degraded > 1)
5613 return ERR_PTR(-EINVAL);
5614 if (mddev->raid_disks > 253)
5615 return ERR_PTR(-EINVAL);
5616 if (mddev->raid_disks < 3)
5617 return ERR_PTR(-EINVAL);
5619 switch (mddev->layout) {
5620 case ALGORITHM_LEFT_ASYMMETRIC:
5621 new_layout = ALGORITHM_LEFT_ASYMMETRIC_6;
5623 case ALGORITHM_RIGHT_ASYMMETRIC:
5624 new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
5626 case ALGORITHM_LEFT_SYMMETRIC:
5627 new_layout = ALGORITHM_LEFT_SYMMETRIC_6;
5629 case ALGORITHM_RIGHT_SYMMETRIC:
5630 new_layout = ALGORITHM_RIGHT_SYMMETRIC_6;
5632 case ALGORITHM_PARITY_0:
5633 new_layout = ALGORITHM_PARITY_0_6;
5635 case ALGORITHM_PARITY_N:
5636 new_layout = ALGORITHM_PARITY_N;
5639 return ERR_PTR(-EINVAL);
5641 mddev->new_level = 6;
5642 mddev->new_layout = new_layout;
5643 mddev->delta_disks = 1;
5644 mddev->raid_disks += 1;
5645 return setup_conf(mddev);
5649 static struct mdk_personality raid6_personality =
5653 .owner = THIS_MODULE,
5654 .make_request = make_request,
5658 .error_handler = error,
5659 .hot_add_disk = raid5_add_disk,
5660 .hot_remove_disk= raid5_remove_disk,
5661 .spare_active = raid5_spare_active,
5662 .sync_request = sync_request,
5663 .resize = raid5_resize,
5665 .check_reshape = raid6_check_reshape,
5666 .start_reshape = raid5_start_reshape,
5667 .finish_reshape = raid5_finish_reshape,
5668 .quiesce = raid5_quiesce,
5669 .takeover = raid6_takeover,
5671 static struct mdk_personality raid5_personality =
5675 .owner = THIS_MODULE,
5676 .make_request = make_request,
5680 .error_handler = error,
5681 .hot_add_disk = raid5_add_disk,
5682 .hot_remove_disk= raid5_remove_disk,
5683 .spare_active = raid5_spare_active,
5684 .sync_request = sync_request,
5685 .resize = raid5_resize,
5687 .check_reshape = raid5_check_reshape,
5688 .start_reshape = raid5_start_reshape,
5689 .finish_reshape = raid5_finish_reshape,
5690 .quiesce = raid5_quiesce,
5691 .takeover = raid5_takeover,
5694 static struct mdk_personality raid4_personality =
5698 .owner = THIS_MODULE,
5699 .make_request = make_request,
5703 .error_handler = error,
5704 .hot_add_disk = raid5_add_disk,
5705 .hot_remove_disk= raid5_remove_disk,
5706 .spare_active = raid5_spare_active,
5707 .sync_request = sync_request,
5708 .resize = raid5_resize,
5710 .check_reshape = raid5_check_reshape,
5711 .start_reshape = raid5_start_reshape,
5712 .finish_reshape = raid5_finish_reshape,
5713 .quiesce = raid5_quiesce,
5716 static int __init raid5_init(void)
5718 register_md_personality(&raid6_personality);
5719 register_md_personality(&raid5_personality);
5720 register_md_personality(&raid4_personality);
5724 static void raid5_exit(void)
5726 unregister_md_personality(&raid6_personality);
5727 unregister_md_personality(&raid5_personality);
5728 unregister_md_personality(&raid4_personality);
5731 module_init(raid5_init);
5732 module_exit(raid5_exit);
5733 MODULE_LICENSE("GPL");
5734 MODULE_ALIAS("md-personality-4"); /* RAID5 */
5735 MODULE_ALIAS("md-raid5");
5736 MODULE_ALIAS("md-raid4");
5737 MODULE_ALIAS("md-level-5");
5738 MODULE_ALIAS("md-level-4");
5739 MODULE_ALIAS("md-personality-8"); /* RAID6 */
5740 MODULE_ALIAS("md-raid6");
5741 MODULE_ALIAS("md-level-6");
5743 /* This used to be two separate modules, they were: */
5744 MODULE_ALIAS("raid5");
5745 MODULE_ALIAS("raid6");