2 md.c : Multiple Devices driver for Linux
3 Copyright (C) 1998, 1999, 2000 Ingo Molnar
5 completely rewritten, based on the MD driver code from Marc Zyngier
9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13 - kmod support by: Cyrus Durgin
14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17 - lots of fixes and improvements to the RAID1/RAID5 and generic
18 RAID code (such as request based resynchronization):
20 Neil Brown <neilb@cse.unsw.edu.au>.
22 - persistent bitmap code
23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25 This program is free software; you can redistribute it and/or modify
26 it under the terms of the GNU General Public License as published by
27 the Free Software Foundation; either version 2, or (at your option)
30 You should have received a copy of the GNU General Public License
31 (for example /usr/src/linux/COPYING); if not, write to the Free
32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 #include <linux/kthread.h>
36 #include <linux/blkdev.h>
37 #include <linux/sysctl.h>
38 #include <linux/seq_file.h>
40 #include <linux/poll.h>
41 #include <linux/ctype.h>
42 #include <linux/string.h>
43 #include <linux/hdreg.h>
44 #include <linux/proc_fs.h>
45 #include <linux/random.h>
46 #include <linux/module.h>
47 #include <linux/reboot.h>
48 #include <linux/file.h>
49 #include <linux/compat.h>
50 #include <linux/delay.h>
51 #include <linux/raid/md_p.h>
52 #include <linux/raid/md_u.h>
53 #include <linux/slab.h>
56 #include "md-cluster.h"
59 static void autostart_arrays(int part);
62 /* pers_list is a list of registered personalities protected
64 * pers_lock does extra service to protect accesses to
65 * mddev->thread when the mutex cannot be held.
67 static LIST_HEAD(pers_list);
68 static DEFINE_SPINLOCK(pers_lock);
70 struct md_cluster_operations *md_cluster_ops;
71 EXPORT_SYMBOL(md_cluster_ops);
72 struct module *md_cluster_mod;
73 EXPORT_SYMBOL(md_cluster_mod);
75 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
76 static struct workqueue_struct *md_wq;
77 static struct workqueue_struct *md_misc_wq;
79 static int remove_and_add_spares(struct mddev *mddev,
80 struct md_rdev *this);
81 static void mddev_detach(struct mddev *mddev);
84 * Default number of read corrections we'll attempt on an rdev
85 * before ejecting it from the array. We divide the read error
86 * count by 2 for every hour elapsed between read errors.
88 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
90 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
91 * is 1000 KB/sec, so the extra system load does not show up that much.
92 * Increase it if you want to have more _guaranteed_ speed. Note that
93 * the RAID driver will use the maximum available bandwidth if the IO
94 * subsystem is idle. There is also an 'absolute maximum' reconstruction
95 * speed limit - in case reconstruction slows down your system despite
98 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
99 * or /sys/block/mdX/md/sync_speed_{min,max}
102 static int sysctl_speed_limit_min = 1000;
103 static int sysctl_speed_limit_max = 200000;
104 static inline int speed_min(struct mddev *mddev)
106 return mddev->sync_speed_min ?
107 mddev->sync_speed_min : sysctl_speed_limit_min;
110 static inline int speed_max(struct mddev *mddev)
112 return mddev->sync_speed_max ?
113 mddev->sync_speed_max : sysctl_speed_limit_max;
116 static struct ctl_table_header *raid_table_header;
118 static struct ctl_table raid_table[] = {
120 .procname = "speed_limit_min",
121 .data = &sysctl_speed_limit_min,
122 .maxlen = sizeof(int),
123 .mode = S_IRUGO|S_IWUSR,
124 .proc_handler = proc_dointvec,
127 .procname = "speed_limit_max",
128 .data = &sysctl_speed_limit_max,
129 .maxlen = sizeof(int),
130 .mode = S_IRUGO|S_IWUSR,
131 .proc_handler = proc_dointvec,
136 static struct ctl_table raid_dir_table[] = {
140 .mode = S_IRUGO|S_IXUGO,
146 static struct ctl_table raid_root_table[] = {
151 .child = raid_dir_table,
156 static const struct block_device_operations md_fops;
158 static int start_readonly;
161 * like bio_clone, but with a local bio set
164 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
169 if (!mddev || !mddev->bio_set)
170 return bio_alloc(gfp_mask, nr_iovecs);
172 b = bio_alloc_bioset(gfp_mask, nr_iovecs, mddev->bio_set);
177 EXPORT_SYMBOL_GPL(bio_alloc_mddev);
179 struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
182 if (!mddev || !mddev->bio_set)
183 return bio_clone(bio, gfp_mask);
185 return bio_clone_bioset(bio, gfp_mask, mddev->bio_set);
187 EXPORT_SYMBOL_GPL(bio_clone_mddev);
190 * We have a system wide 'event count' that is incremented
191 * on any 'interesting' event, and readers of /proc/mdstat
192 * can use 'poll' or 'select' to find out when the event
196 * start array, stop array, error, add device, remove device,
197 * start build, activate spare
199 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
200 static atomic_t md_event_count;
201 void md_new_event(struct mddev *mddev)
203 atomic_inc(&md_event_count);
204 wake_up(&md_event_waiters);
206 EXPORT_SYMBOL_GPL(md_new_event);
208 /* Alternate version that can be called from interrupts
209 * when calling sysfs_notify isn't needed.
211 static void md_new_event_inintr(struct mddev *mddev)
213 atomic_inc(&md_event_count);
214 wake_up(&md_event_waiters);
218 * Enables to iterate over all existing md arrays
219 * all_mddevs_lock protects this list.
221 static LIST_HEAD(all_mddevs);
222 static DEFINE_SPINLOCK(all_mddevs_lock);
225 * iterates through all used mddevs in the system.
226 * We take care to grab the all_mddevs_lock whenever navigating
227 * the list, and to always hold a refcount when unlocked.
228 * Any code which breaks out of this loop while own
229 * a reference to the current mddev and must mddev_put it.
231 #define for_each_mddev(_mddev,_tmp) \
233 for (({ spin_lock(&all_mddevs_lock); \
234 _tmp = all_mddevs.next; \
236 ({ if (_tmp != &all_mddevs) \
237 mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
238 spin_unlock(&all_mddevs_lock); \
239 if (_mddev) mddev_put(_mddev); \
240 _mddev = list_entry(_tmp, struct mddev, all_mddevs); \
241 _tmp != &all_mddevs;}); \
242 ({ spin_lock(&all_mddevs_lock); \
243 _tmp = _tmp->next;}) \
246 /* Rather than calling directly into the personality make_request function,
247 * IO requests come here first so that we can check if the device is
248 * being suspended pending a reconfiguration.
249 * We hold a refcount over the call to ->make_request. By the time that
250 * call has finished, the bio has been linked into some internal structure
251 * and so is visible to ->quiesce(), so we don't need the refcount any more.
253 static void md_make_request(struct request_queue *q, struct bio *bio)
255 const int rw = bio_data_dir(bio);
256 struct mddev *mddev = q->queuedata;
257 unsigned int sectors;
260 if (mddev == NULL || mddev->pers == NULL
265 if (mddev->ro == 1 && unlikely(rw == WRITE)) {
266 bio_endio(bio, bio_sectors(bio) == 0 ? 0 : -EROFS);
269 smp_rmb(); /* Ensure implications of 'active' are visible */
271 if (mddev->suspended) {
274 prepare_to_wait(&mddev->sb_wait, &__wait,
275 TASK_UNINTERRUPTIBLE);
276 if (!mddev->suspended)
282 finish_wait(&mddev->sb_wait, &__wait);
284 atomic_inc(&mddev->active_io);
288 * save the sectors now since our bio can
289 * go away inside make_request
291 sectors = bio_sectors(bio);
292 mddev->pers->make_request(mddev, bio);
294 cpu = part_stat_lock();
295 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
296 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
299 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
300 wake_up(&mddev->sb_wait);
303 /* mddev_suspend makes sure no new requests are submitted
304 * to the device, and that any requests that have been submitted
305 * are completely handled.
306 * Once mddev_detach() is called and completes, the module will be
309 void mddev_suspend(struct mddev *mddev)
311 BUG_ON(mddev->suspended);
312 mddev->suspended = 1;
314 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
315 mddev->pers->quiesce(mddev, 1);
317 del_timer_sync(&mddev->safemode_timer);
319 EXPORT_SYMBOL_GPL(mddev_suspend);
321 void mddev_resume(struct mddev *mddev)
323 mddev->suspended = 0;
324 wake_up(&mddev->sb_wait);
325 mddev->pers->quiesce(mddev, 0);
327 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
328 md_wakeup_thread(mddev->thread);
329 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
331 EXPORT_SYMBOL_GPL(mddev_resume);
333 int mddev_congested(struct mddev *mddev, int bits)
335 struct md_personality *pers = mddev->pers;
339 if (mddev->suspended)
341 else if (pers && pers->congested)
342 ret = pers->congested(mddev, bits);
346 EXPORT_SYMBOL_GPL(mddev_congested);
347 static int md_congested(void *data, int bits)
349 struct mddev *mddev = data;
350 return mddev_congested(mddev, bits);
353 static int md_mergeable_bvec(struct request_queue *q,
354 struct bvec_merge_data *bvm,
355 struct bio_vec *biovec)
357 struct mddev *mddev = q->queuedata;
360 if (mddev->suspended) {
361 /* Must always allow one vec */
362 if (bvm->bi_size == 0)
363 ret = biovec->bv_len;
367 struct md_personality *pers = mddev->pers;
368 if (pers && pers->mergeable_bvec)
369 ret = pers->mergeable_bvec(mddev, bvm, biovec);
371 ret = biovec->bv_len;
377 * Generic flush handling for md
380 static void md_end_flush(struct bio *bio, int err)
382 struct md_rdev *rdev = bio->bi_private;
383 struct mddev *mddev = rdev->mddev;
385 rdev_dec_pending(rdev, mddev);
387 if (atomic_dec_and_test(&mddev->flush_pending)) {
388 /* The pre-request flush has finished */
389 queue_work(md_wq, &mddev->flush_work);
394 static void md_submit_flush_data(struct work_struct *ws);
396 static void submit_flushes(struct work_struct *ws)
398 struct mddev *mddev = container_of(ws, struct mddev, flush_work);
399 struct md_rdev *rdev;
401 INIT_WORK(&mddev->flush_work, md_submit_flush_data);
402 atomic_set(&mddev->flush_pending, 1);
404 rdev_for_each_rcu(rdev, mddev)
405 if (rdev->raid_disk >= 0 &&
406 !test_bit(Faulty, &rdev->flags)) {
407 /* Take two references, one is dropped
408 * when request finishes, one after
409 * we reclaim rcu_read_lock
412 atomic_inc(&rdev->nr_pending);
413 atomic_inc(&rdev->nr_pending);
415 bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
416 bi->bi_end_io = md_end_flush;
417 bi->bi_private = rdev;
418 bi->bi_bdev = rdev->bdev;
419 atomic_inc(&mddev->flush_pending);
420 submit_bio(WRITE_FLUSH, bi);
422 rdev_dec_pending(rdev, mddev);
425 if (atomic_dec_and_test(&mddev->flush_pending))
426 queue_work(md_wq, &mddev->flush_work);
429 static void md_submit_flush_data(struct work_struct *ws)
431 struct mddev *mddev = container_of(ws, struct mddev, flush_work);
432 struct bio *bio = mddev->flush_bio;
434 if (bio->bi_iter.bi_size == 0)
435 /* an empty barrier - all done */
438 bio->bi_rw &= ~REQ_FLUSH;
439 mddev->pers->make_request(mddev, bio);
442 mddev->flush_bio = NULL;
443 wake_up(&mddev->sb_wait);
446 void md_flush_request(struct mddev *mddev, struct bio *bio)
448 spin_lock_irq(&mddev->lock);
449 wait_event_lock_irq(mddev->sb_wait,
452 mddev->flush_bio = bio;
453 spin_unlock_irq(&mddev->lock);
455 INIT_WORK(&mddev->flush_work, submit_flushes);
456 queue_work(md_wq, &mddev->flush_work);
458 EXPORT_SYMBOL(md_flush_request);
460 void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
462 struct mddev *mddev = cb->data;
463 md_wakeup_thread(mddev->thread);
466 EXPORT_SYMBOL(md_unplug);
468 static inline struct mddev *mddev_get(struct mddev *mddev)
470 atomic_inc(&mddev->active);
474 static void mddev_delayed_delete(struct work_struct *ws);
476 static void mddev_put(struct mddev *mddev)
478 struct bio_set *bs = NULL;
480 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
482 if (!mddev->raid_disks && list_empty(&mddev->disks) &&
483 mddev->ctime == 0 && !mddev->hold_active) {
484 /* Array is not configured at all, and not held active,
486 list_del_init(&mddev->all_mddevs);
488 mddev->bio_set = NULL;
489 if (mddev->gendisk) {
490 /* We did a probe so need to clean up. Call
491 * queue_work inside the spinlock so that
492 * flush_workqueue() after mddev_find will
493 * succeed in waiting for the work to be done.
495 INIT_WORK(&mddev->del_work, mddev_delayed_delete);
496 queue_work(md_misc_wq, &mddev->del_work);
500 spin_unlock(&all_mddevs_lock);
505 static void md_safemode_timeout(unsigned long data);
507 void mddev_init(struct mddev *mddev)
509 mutex_init(&mddev->open_mutex);
510 mutex_init(&mddev->reconfig_mutex);
511 mutex_init(&mddev->bitmap_info.mutex);
512 INIT_LIST_HEAD(&mddev->disks);
513 INIT_LIST_HEAD(&mddev->all_mddevs);
514 setup_timer(&mddev->safemode_timer, md_safemode_timeout,
515 (unsigned long) mddev);
516 atomic_set(&mddev->active, 1);
517 atomic_set(&mddev->openers, 0);
518 atomic_set(&mddev->active_io, 0);
519 spin_lock_init(&mddev->lock);
520 atomic_set(&mddev->flush_pending, 0);
521 init_waitqueue_head(&mddev->sb_wait);
522 init_waitqueue_head(&mddev->recovery_wait);
523 mddev->reshape_position = MaxSector;
524 mddev->reshape_backwards = 0;
525 mddev->last_sync_action = "none";
526 mddev->resync_min = 0;
527 mddev->resync_max = MaxSector;
528 mddev->level = LEVEL_NONE;
530 EXPORT_SYMBOL_GPL(mddev_init);
532 static struct mddev *mddev_find(dev_t unit)
534 struct mddev *mddev, *new = NULL;
536 if (unit && MAJOR(unit) != MD_MAJOR)
537 unit &= ~((1<<MdpMinorShift)-1);
540 spin_lock(&all_mddevs_lock);
543 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
544 if (mddev->unit == unit) {
546 spin_unlock(&all_mddevs_lock);
552 list_add(&new->all_mddevs, &all_mddevs);
553 spin_unlock(&all_mddevs_lock);
554 new->hold_active = UNTIL_IOCTL;
558 /* find an unused unit number */
559 static int next_minor = 512;
560 int start = next_minor;
564 dev = MKDEV(MD_MAJOR, next_minor);
566 if (next_minor > MINORMASK)
568 if (next_minor == start) {
569 /* Oh dear, all in use. */
570 spin_unlock(&all_mddevs_lock);
576 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
577 if (mddev->unit == dev) {
583 new->md_minor = MINOR(dev);
584 new->hold_active = UNTIL_STOP;
585 list_add(&new->all_mddevs, &all_mddevs);
586 spin_unlock(&all_mddevs_lock);
589 spin_unlock(&all_mddevs_lock);
591 new = kzalloc(sizeof(*new), GFP_KERNEL);
596 if (MAJOR(unit) == MD_MAJOR)
597 new->md_minor = MINOR(unit);
599 new->md_minor = MINOR(unit) >> MdpMinorShift;
606 static struct attribute_group md_redundancy_group;
608 void mddev_unlock(struct mddev *mddev)
610 if (mddev->to_remove) {
611 /* These cannot be removed under reconfig_mutex as
612 * an access to the files will try to take reconfig_mutex
613 * while holding the file unremovable, which leads to
615 * So hold set sysfs_active while the remove in happeing,
616 * and anything else which might set ->to_remove or my
617 * otherwise change the sysfs namespace will fail with
618 * -EBUSY if sysfs_active is still set.
619 * We set sysfs_active under reconfig_mutex and elsewhere
620 * test it under the same mutex to ensure its correct value
623 struct attribute_group *to_remove = mddev->to_remove;
624 mddev->to_remove = NULL;
625 mddev->sysfs_active = 1;
626 mutex_unlock(&mddev->reconfig_mutex);
628 if (mddev->kobj.sd) {
629 if (to_remove != &md_redundancy_group)
630 sysfs_remove_group(&mddev->kobj, to_remove);
631 if (mddev->pers == NULL ||
632 mddev->pers->sync_request == NULL) {
633 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
634 if (mddev->sysfs_action)
635 sysfs_put(mddev->sysfs_action);
636 mddev->sysfs_action = NULL;
639 mddev->sysfs_active = 0;
641 mutex_unlock(&mddev->reconfig_mutex);
643 /* As we've dropped the mutex we need a spinlock to
644 * make sure the thread doesn't disappear
646 spin_lock(&pers_lock);
647 md_wakeup_thread(mddev->thread);
648 spin_unlock(&pers_lock);
650 EXPORT_SYMBOL_GPL(mddev_unlock);
652 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr)
654 struct md_rdev *rdev;
656 rdev_for_each_rcu(rdev, mddev)
657 if (rdev->desc_nr == nr)
662 EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu);
664 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
666 struct md_rdev *rdev;
668 rdev_for_each(rdev, mddev)
669 if (rdev->bdev->bd_dev == dev)
675 static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev)
677 struct md_rdev *rdev;
679 rdev_for_each_rcu(rdev, mddev)
680 if (rdev->bdev->bd_dev == dev)
686 static struct md_personality *find_pers(int level, char *clevel)
688 struct md_personality *pers;
689 list_for_each_entry(pers, &pers_list, list) {
690 if (level != LEVEL_NONE && pers->level == level)
692 if (strcmp(pers->name, clevel)==0)
698 /* return the offset of the super block in 512byte sectors */
699 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
701 sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
702 return MD_NEW_SIZE_SECTORS(num_sectors);
705 static int alloc_disk_sb(struct md_rdev *rdev)
707 rdev->sb_page = alloc_page(GFP_KERNEL);
708 if (!rdev->sb_page) {
709 printk(KERN_ALERT "md: out of memory.\n");
716 void md_rdev_clear(struct md_rdev *rdev)
719 put_page(rdev->sb_page);
721 rdev->sb_page = NULL;
726 put_page(rdev->bb_page);
727 rdev->bb_page = NULL;
729 kfree(rdev->badblocks.page);
730 rdev->badblocks.page = NULL;
732 EXPORT_SYMBOL_GPL(md_rdev_clear);
734 static void super_written(struct bio *bio, int error)
736 struct md_rdev *rdev = bio->bi_private;
737 struct mddev *mddev = rdev->mddev;
739 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
740 printk("md: super_written gets error=%d, uptodate=%d\n",
741 error, test_bit(BIO_UPTODATE, &bio->bi_flags));
742 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
743 md_error(mddev, rdev);
746 if (atomic_dec_and_test(&mddev->pending_writes))
747 wake_up(&mddev->sb_wait);
751 void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
752 sector_t sector, int size, struct page *page)
754 /* write first size bytes of page to sector of rdev
755 * Increment mddev->pending_writes before returning
756 * and decrement it on completion, waking up sb_wait
757 * if zero is reached.
758 * If an error occurred, call md_error
760 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
762 bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
763 bio->bi_iter.bi_sector = sector;
764 bio_add_page(bio, page, size, 0);
765 bio->bi_private = rdev;
766 bio->bi_end_io = super_written;
768 atomic_inc(&mddev->pending_writes);
769 submit_bio(WRITE_FLUSH_FUA, bio);
772 void md_super_wait(struct mddev *mddev)
774 /* wait for all superblock writes that were scheduled to complete */
775 wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
778 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
779 struct page *page, int rw, bool metadata_op)
781 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
784 bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
785 rdev->meta_bdev : rdev->bdev;
787 bio->bi_iter.bi_sector = sector + rdev->sb_start;
788 else if (rdev->mddev->reshape_position != MaxSector &&
789 (rdev->mddev->reshape_backwards ==
790 (sector >= rdev->mddev->reshape_position)))
791 bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
793 bio->bi_iter.bi_sector = sector + rdev->data_offset;
794 bio_add_page(bio, page, size, 0);
795 submit_bio_wait(rw, bio);
797 ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
801 EXPORT_SYMBOL_GPL(sync_page_io);
803 static int read_disk_sb(struct md_rdev *rdev, int size)
805 char b[BDEVNAME_SIZE];
810 if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true))
816 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
817 bdevname(rdev->bdev,b));
821 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
823 return sb1->set_uuid0 == sb2->set_uuid0 &&
824 sb1->set_uuid1 == sb2->set_uuid1 &&
825 sb1->set_uuid2 == sb2->set_uuid2 &&
826 sb1->set_uuid3 == sb2->set_uuid3;
829 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
832 mdp_super_t *tmp1, *tmp2;
834 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
835 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
837 if (!tmp1 || !tmp2) {
839 printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
847 * nr_disks is not constant
852 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
859 static u32 md_csum_fold(u32 csum)
861 csum = (csum & 0xffff) + (csum >> 16);
862 return (csum & 0xffff) + (csum >> 16);
865 static unsigned int calc_sb_csum(mdp_super_t *sb)
868 u32 *sb32 = (u32*)sb;
870 unsigned int disk_csum, csum;
872 disk_csum = sb->sb_csum;
875 for (i = 0; i < MD_SB_BYTES/4 ; i++)
877 csum = (newcsum & 0xffffffff) + (newcsum>>32);
880 /* This used to use csum_partial, which was wrong for several
881 * reasons including that different results are returned on
882 * different architectures. It isn't critical that we get exactly
883 * the same return value as before (we always csum_fold before
884 * testing, and that removes any differences). However as we
885 * know that csum_partial always returned a 16bit value on
886 * alphas, do a fold to maximise conformity to previous behaviour.
888 sb->sb_csum = md_csum_fold(disk_csum);
890 sb->sb_csum = disk_csum;
896 * Handle superblock details.
897 * We want to be able to handle multiple superblock formats
898 * so we have a common interface to them all, and an array of
899 * different handlers.
900 * We rely on user-space to write the initial superblock, and support
901 * reading and updating of superblocks.
902 * Interface methods are:
903 * int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
904 * loads and validates a superblock on dev.
905 * if refdev != NULL, compare superblocks on both devices
907 * 0 - dev has a superblock that is compatible with refdev
908 * 1 - dev has a superblock that is compatible and newer than refdev
909 * so dev should be used as the refdev in future
910 * -EINVAL superblock incompatible or invalid
911 * -othererror e.g. -EIO
913 * int validate_super(struct mddev *mddev, struct md_rdev *dev)
914 * Verify that dev is acceptable into mddev.
915 * The first time, mddev->raid_disks will be 0, and data from
916 * dev should be merged in. Subsequent calls check that dev
917 * is new enough. Return 0 or -EINVAL
919 * void sync_super(struct mddev *mddev, struct md_rdev *dev)
920 * Update the superblock for rdev with data in mddev
921 * This does not write to disc.
927 struct module *owner;
928 int (*load_super)(struct md_rdev *rdev,
929 struct md_rdev *refdev,
931 int (*validate_super)(struct mddev *mddev,
932 struct md_rdev *rdev);
933 void (*sync_super)(struct mddev *mddev,
934 struct md_rdev *rdev);
935 unsigned long long (*rdev_size_change)(struct md_rdev *rdev,
936 sector_t num_sectors);
937 int (*allow_new_offset)(struct md_rdev *rdev,
938 unsigned long long new_offset);
942 * Check that the given mddev has no bitmap.
944 * This function is called from the run method of all personalities that do not
945 * support bitmaps. It prints an error message and returns non-zero if mddev
946 * has a bitmap. Otherwise, it returns 0.
949 int md_check_no_bitmap(struct mddev *mddev)
951 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
953 printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
954 mdname(mddev), mddev->pers->name);
957 EXPORT_SYMBOL(md_check_no_bitmap);
960 * load_super for 0.90.0
962 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
964 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
969 * Calculate the position of the superblock (512byte sectors),
970 * it's at the end of the disk.
972 * It also happens to be a multiple of 4Kb.
974 rdev->sb_start = calc_dev_sboffset(rdev);
976 ret = read_disk_sb(rdev, MD_SB_BYTES);
981 bdevname(rdev->bdev, b);
982 sb = page_address(rdev->sb_page);
984 if (sb->md_magic != MD_SB_MAGIC) {
985 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
990 if (sb->major_version != 0 ||
991 sb->minor_version < 90 ||
992 sb->minor_version > 91) {
993 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
994 sb->major_version, sb->minor_version,
999 if (sb->raid_disks <= 0)
1002 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
1003 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
1008 rdev->preferred_minor = sb->md_minor;
1009 rdev->data_offset = 0;
1010 rdev->new_data_offset = 0;
1011 rdev->sb_size = MD_SB_BYTES;
1012 rdev->badblocks.shift = -1;
1014 if (sb->level == LEVEL_MULTIPATH)
1017 rdev->desc_nr = sb->this_disk.number;
1023 mdp_super_t *refsb = page_address(refdev->sb_page);
1024 if (!uuid_equal(refsb, sb)) {
1025 printk(KERN_WARNING "md: %s has different UUID to %s\n",
1026 b, bdevname(refdev->bdev,b2));
1029 if (!sb_equal(refsb, sb)) {
1030 printk(KERN_WARNING "md: %s has same UUID"
1031 " but different superblock to %s\n",
1032 b, bdevname(refdev->bdev, b2));
1036 ev2 = md_event(refsb);
1042 rdev->sectors = rdev->sb_start;
1043 /* Limit to 4TB as metadata cannot record more than that.
1044 * (not needed for Linear and RAID0 as metadata doesn't
1047 if (rdev->sectors >= (2ULL << 32) && sb->level >= 1)
1048 rdev->sectors = (2ULL << 32) - 2;
1050 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
1051 /* "this cannot possibly happen" ... */
1059 * validate_super for 0.90.0
1061 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
1064 mdp_super_t *sb = page_address(rdev->sb_page);
1065 __u64 ev1 = md_event(sb);
1067 rdev->raid_disk = -1;
1068 clear_bit(Faulty, &rdev->flags);
1069 clear_bit(In_sync, &rdev->flags);
1070 clear_bit(Bitmap_sync, &rdev->flags);
1071 clear_bit(WriteMostly, &rdev->flags);
1073 if (mddev->raid_disks == 0) {
1074 mddev->major_version = 0;
1075 mddev->minor_version = sb->minor_version;
1076 mddev->patch_version = sb->patch_version;
1077 mddev->external = 0;
1078 mddev->chunk_sectors = sb->chunk_size >> 9;
1079 mddev->ctime = sb->ctime;
1080 mddev->utime = sb->utime;
1081 mddev->level = sb->level;
1082 mddev->clevel[0] = 0;
1083 mddev->layout = sb->layout;
1084 mddev->raid_disks = sb->raid_disks;
1085 mddev->dev_sectors = ((sector_t)sb->size) * 2;
1086 mddev->events = ev1;
1087 mddev->bitmap_info.offset = 0;
1088 mddev->bitmap_info.space = 0;
1089 /* bitmap can use 60 K after the 4K superblocks */
1090 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
1091 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
1092 mddev->reshape_backwards = 0;
1094 if (mddev->minor_version >= 91) {
1095 mddev->reshape_position = sb->reshape_position;
1096 mddev->delta_disks = sb->delta_disks;
1097 mddev->new_level = sb->new_level;
1098 mddev->new_layout = sb->new_layout;
1099 mddev->new_chunk_sectors = sb->new_chunk >> 9;
1100 if (mddev->delta_disks < 0)
1101 mddev->reshape_backwards = 1;
1103 mddev->reshape_position = MaxSector;
1104 mddev->delta_disks = 0;
1105 mddev->new_level = mddev->level;
1106 mddev->new_layout = mddev->layout;
1107 mddev->new_chunk_sectors = mddev->chunk_sectors;
1110 if (sb->state & (1<<MD_SB_CLEAN))
1111 mddev->recovery_cp = MaxSector;
1113 if (sb->events_hi == sb->cp_events_hi &&
1114 sb->events_lo == sb->cp_events_lo) {
1115 mddev->recovery_cp = sb->recovery_cp;
1117 mddev->recovery_cp = 0;
1120 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1121 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1122 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1123 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1125 mddev->max_disks = MD_SB_DISKS;
1127 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
1128 mddev->bitmap_info.file == NULL) {
1129 mddev->bitmap_info.offset =
1130 mddev->bitmap_info.default_offset;
1131 mddev->bitmap_info.space =
1132 mddev->bitmap_info.default_space;
1135 } else if (mddev->pers == NULL) {
1136 /* Insist on good event counter while assembling, except
1137 * for spares (which don't need an event count) */
1139 if (sb->disks[rdev->desc_nr].state & (
1140 (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
1141 if (ev1 < mddev->events)
1143 } else if (mddev->bitmap) {
1144 /* if adding to array with a bitmap, then we can accept an
1145 * older device ... but not too old.
1147 if (ev1 < mddev->bitmap->events_cleared)
1149 if (ev1 < mddev->events)
1150 set_bit(Bitmap_sync, &rdev->flags);
1152 if (ev1 < mddev->events)
1153 /* just a hot-add of a new device, leave raid_disk at -1 */
1157 if (mddev->level != LEVEL_MULTIPATH) {
1158 desc = sb->disks + rdev->desc_nr;
1160 if (desc->state & (1<<MD_DISK_FAULTY))
1161 set_bit(Faulty, &rdev->flags);
1162 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
1163 desc->raid_disk < mddev->raid_disks */) {
1164 set_bit(In_sync, &rdev->flags);
1165 rdev->raid_disk = desc->raid_disk;
1166 rdev->saved_raid_disk = desc->raid_disk;
1167 } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1168 /* active but not in sync implies recovery up to
1169 * reshape position. We don't know exactly where
1170 * that is, so set to zero for now */
1171 if (mddev->minor_version >= 91) {
1172 rdev->recovery_offset = 0;
1173 rdev->raid_disk = desc->raid_disk;
1176 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1177 set_bit(WriteMostly, &rdev->flags);
1178 } else /* MULTIPATH are always insync */
1179 set_bit(In_sync, &rdev->flags);
1184 * sync_super for 0.90.0
1186 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
1189 struct md_rdev *rdev2;
1190 int next_spare = mddev->raid_disks;
1192 /* make rdev->sb match mddev data..
1195 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1196 * 3/ any empty disks < next_spare become removed
1198 * disks[0] gets initialised to REMOVED because
1199 * we cannot be sure from other fields if it has
1200 * been initialised or not.
1203 int active=0, working=0,failed=0,spare=0,nr_disks=0;
1205 rdev->sb_size = MD_SB_BYTES;
1207 sb = page_address(rdev->sb_page);
1209 memset(sb, 0, sizeof(*sb));
1211 sb->md_magic = MD_SB_MAGIC;
1212 sb->major_version = mddev->major_version;
1213 sb->patch_version = mddev->patch_version;
1214 sb->gvalid_words = 0; /* ignored */
1215 memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1216 memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1217 memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1218 memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1220 sb->ctime = mddev->ctime;
1221 sb->level = mddev->level;
1222 sb->size = mddev->dev_sectors / 2;
1223 sb->raid_disks = mddev->raid_disks;
1224 sb->md_minor = mddev->md_minor;
1225 sb->not_persistent = 0;
1226 sb->utime = mddev->utime;
1228 sb->events_hi = (mddev->events>>32);
1229 sb->events_lo = (u32)mddev->events;
1231 if (mddev->reshape_position == MaxSector)
1232 sb->minor_version = 90;
1234 sb->minor_version = 91;
1235 sb->reshape_position = mddev->reshape_position;
1236 sb->new_level = mddev->new_level;
1237 sb->delta_disks = mddev->delta_disks;
1238 sb->new_layout = mddev->new_layout;
1239 sb->new_chunk = mddev->new_chunk_sectors << 9;
1241 mddev->minor_version = sb->minor_version;
1244 sb->recovery_cp = mddev->recovery_cp;
1245 sb->cp_events_hi = (mddev->events>>32);
1246 sb->cp_events_lo = (u32)mddev->events;
1247 if (mddev->recovery_cp == MaxSector)
1248 sb->state = (1<< MD_SB_CLEAN);
1250 sb->recovery_cp = 0;
1252 sb->layout = mddev->layout;
1253 sb->chunk_size = mddev->chunk_sectors << 9;
1255 if (mddev->bitmap && mddev->bitmap_info.file == NULL)
1256 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1258 sb->disks[0].state = (1<<MD_DISK_REMOVED);
1259 rdev_for_each(rdev2, mddev) {
1262 int is_active = test_bit(In_sync, &rdev2->flags);
1264 if (rdev2->raid_disk >= 0 &&
1265 sb->minor_version >= 91)
1266 /* we have nowhere to store the recovery_offset,
1267 * but if it is not below the reshape_position,
1268 * we can piggy-back on that.
1271 if (rdev2->raid_disk < 0 ||
1272 test_bit(Faulty, &rdev2->flags))
1275 desc_nr = rdev2->raid_disk;
1277 desc_nr = next_spare++;
1278 rdev2->desc_nr = desc_nr;
1279 d = &sb->disks[rdev2->desc_nr];
1281 d->number = rdev2->desc_nr;
1282 d->major = MAJOR(rdev2->bdev->bd_dev);
1283 d->minor = MINOR(rdev2->bdev->bd_dev);
1285 d->raid_disk = rdev2->raid_disk;
1287 d->raid_disk = rdev2->desc_nr; /* compatibility */
1288 if (test_bit(Faulty, &rdev2->flags))
1289 d->state = (1<<MD_DISK_FAULTY);
1290 else if (is_active) {
1291 d->state = (1<<MD_DISK_ACTIVE);
1292 if (test_bit(In_sync, &rdev2->flags))
1293 d->state |= (1<<MD_DISK_SYNC);
1301 if (test_bit(WriteMostly, &rdev2->flags))
1302 d->state |= (1<<MD_DISK_WRITEMOSTLY);
1304 /* now set the "removed" and "faulty" bits on any missing devices */
1305 for (i=0 ; i < mddev->raid_disks ; i++) {
1306 mdp_disk_t *d = &sb->disks[i];
1307 if (d->state == 0 && d->number == 0) {
1310 d->state = (1<<MD_DISK_REMOVED);
1311 d->state |= (1<<MD_DISK_FAULTY);
1315 sb->nr_disks = nr_disks;
1316 sb->active_disks = active;
1317 sb->working_disks = working;
1318 sb->failed_disks = failed;
1319 sb->spare_disks = spare;
1321 sb->this_disk = sb->disks[rdev->desc_nr];
1322 sb->sb_csum = calc_sb_csum(sb);
1326 * rdev_size_change for 0.90.0
1328 static unsigned long long
1329 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1331 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1332 return 0; /* component must fit device */
1333 if (rdev->mddev->bitmap_info.offset)
1334 return 0; /* can't move bitmap */
1335 rdev->sb_start = calc_dev_sboffset(rdev);
1336 if (!num_sectors || num_sectors > rdev->sb_start)
1337 num_sectors = rdev->sb_start;
1338 /* Limit to 4TB as metadata cannot record more than that.
1339 * 4TB == 2^32 KB, or 2*2^32 sectors.
1341 if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
1342 num_sectors = (2ULL << 32) - 2;
1343 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1345 md_super_wait(rdev->mddev);
1350 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
1352 /* non-zero offset changes not possible with v0.90 */
1353 return new_offset == 0;
1357 * version 1 superblock
1360 static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
1364 unsigned long long newcsum;
1365 int size = 256 + le32_to_cpu(sb->max_dev)*2;
1366 __le32 *isuper = (__le32*)sb;
1368 disk_csum = sb->sb_csum;
1371 for (; size >= 4; size -= 4)
1372 newcsum += le32_to_cpu(*isuper++);
1375 newcsum += le16_to_cpu(*(__le16*) isuper);
1377 csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1378 sb->sb_csum = disk_csum;
1379 return cpu_to_le32(csum);
1382 static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
1384 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1386 struct mdp_superblock_1 *sb;
1390 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1394 * Calculate the position of the superblock in 512byte sectors.
1395 * It is always aligned to a 4K boundary and
1396 * depeding on minor_version, it can be:
1397 * 0: At least 8K, but less than 12K, from end of device
1398 * 1: At start of device
1399 * 2: 4K from start of device.
1401 switch(minor_version) {
1403 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
1405 sb_start &= ~(sector_t)(4*2-1);
1416 rdev->sb_start = sb_start;
1418 /* superblock is rarely larger than 1K, but it can be larger,
1419 * and it is safe to read 4k, so we do that
1421 ret = read_disk_sb(rdev, 4096);
1422 if (ret) return ret;
1424 sb = page_address(rdev->sb_page);
1426 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1427 sb->major_version != cpu_to_le32(1) ||
1428 le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1429 le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1430 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1433 if (calc_sb_1_csum(sb) != sb->sb_csum) {
1434 printk("md: invalid superblock checksum on %s\n",
1435 bdevname(rdev->bdev,b));
1438 if (le64_to_cpu(sb->data_size) < 10) {
1439 printk("md: data_size too small on %s\n",
1440 bdevname(rdev->bdev,b));
1445 memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
1446 /* Some padding is non-zero, might be a new feature */
1449 rdev->preferred_minor = 0xffff;
1450 rdev->data_offset = le64_to_cpu(sb->data_offset);
1451 rdev->new_data_offset = rdev->data_offset;
1452 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
1453 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
1454 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
1455 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1457 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1458 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1459 if (rdev->sb_size & bmask)
1460 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1463 && rdev->data_offset < sb_start + (rdev->sb_size/512))
1466 && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
1469 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1472 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1474 if (!rdev->bb_page) {
1475 rdev->bb_page = alloc_page(GFP_KERNEL);
1479 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
1480 rdev->badblocks.count == 0) {
1481 /* need to load the bad block list.
1482 * Currently we limit it to one page.
1488 int sectors = le16_to_cpu(sb->bblog_size);
1489 if (sectors > (PAGE_SIZE / 512))
1491 offset = le32_to_cpu(sb->bblog_offset);
1494 bb_sector = (long long)offset;
1495 if (!sync_page_io(rdev, bb_sector, sectors << 9,
1496 rdev->bb_page, READ, true))
1498 bbp = (u64 *)page_address(rdev->bb_page);
1499 rdev->badblocks.shift = sb->bblog_shift;
1500 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
1501 u64 bb = le64_to_cpu(*bbp);
1502 int count = bb & (0x3ff);
1503 u64 sector = bb >> 10;
1504 sector <<= sb->bblog_shift;
1505 count <<= sb->bblog_shift;
1508 if (md_set_badblocks(&rdev->badblocks,
1509 sector, count, 1) == 0)
1512 } else if (sb->bblog_offset != 0)
1513 rdev->badblocks.shift = 0;
1519 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
1521 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1522 sb->level != refsb->level ||
1523 sb->layout != refsb->layout ||
1524 sb->chunksize != refsb->chunksize) {
1525 printk(KERN_WARNING "md: %s has strangely different"
1526 " superblock to %s\n",
1527 bdevname(rdev->bdev,b),
1528 bdevname(refdev->bdev,b2));
1531 ev1 = le64_to_cpu(sb->events);
1532 ev2 = le64_to_cpu(refsb->events);
1539 if (minor_version) {
1540 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
1541 sectors -= rdev->data_offset;
1543 sectors = rdev->sb_start;
1544 if (sectors < le64_to_cpu(sb->data_size))
1546 rdev->sectors = le64_to_cpu(sb->data_size);
1550 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
1552 struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
1553 __u64 ev1 = le64_to_cpu(sb->events);
1555 rdev->raid_disk = -1;
1556 clear_bit(Faulty, &rdev->flags);
1557 clear_bit(In_sync, &rdev->flags);
1558 clear_bit(Bitmap_sync, &rdev->flags);
1559 clear_bit(WriteMostly, &rdev->flags);
1561 if (mddev->raid_disks == 0) {
1562 mddev->major_version = 1;
1563 mddev->patch_version = 0;
1564 mddev->external = 0;
1565 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
1566 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1567 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1568 mddev->level = le32_to_cpu(sb->level);
1569 mddev->clevel[0] = 0;
1570 mddev->layout = le32_to_cpu(sb->layout);
1571 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1572 mddev->dev_sectors = le64_to_cpu(sb->size);
1573 mddev->events = ev1;
1574 mddev->bitmap_info.offset = 0;
1575 mddev->bitmap_info.space = 0;
1576 /* Default location for bitmap is 1K after superblock
1577 * using 3K - total of 4K
1579 mddev->bitmap_info.default_offset = 1024 >> 9;
1580 mddev->bitmap_info.default_space = (4096-1024) >> 9;
1581 mddev->reshape_backwards = 0;
1583 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1584 memcpy(mddev->uuid, sb->set_uuid, 16);
1586 mddev->max_disks = (4096-256)/2;
1588 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1589 mddev->bitmap_info.file == NULL) {
1590 mddev->bitmap_info.offset =
1591 (__s32)le32_to_cpu(sb->bitmap_offset);
1592 /* Metadata doesn't record how much space is available.
1593 * For 1.0, we assume we can use up to the superblock
1594 * if before, else to 4K beyond superblock.
1595 * For others, assume no change is possible.
1597 if (mddev->minor_version > 0)
1598 mddev->bitmap_info.space = 0;
1599 else if (mddev->bitmap_info.offset > 0)
1600 mddev->bitmap_info.space =
1601 8 - mddev->bitmap_info.offset;
1603 mddev->bitmap_info.space =
1604 -mddev->bitmap_info.offset;
1607 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1608 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1609 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1610 mddev->new_level = le32_to_cpu(sb->new_level);
1611 mddev->new_layout = le32_to_cpu(sb->new_layout);
1612 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
1613 if (mddev->delta_disks < 0 ||
1614 (mddev->delta_disks == 0 &&
1615 (le32_to_cpu(sb->feature_map)
1616 & MD_FEATURE_RESHAPE_BACKWARDS)))
1617 mddev->reshape_backwards = 1;
1619 mddev->reshape_position = MaxSector;
1620 mddev->delta_disks = 0;
1621 mddev->new_level = mddev->level;
1622 mddev->new_layout = mddev->layout;
1623 mddev->new_chunk_sectors = mddev->chunk_sectors;
1626 } else if (mddev->pers == NULL) {
1627 /* Insist of good event counter while assembling, except for
1628 * spares (which don't need an event count) */
1630 if (rdev->desc_nr >= 0 &&
1631 rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1632 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe)
1633 if (ev1 < mddev->events)
1635 } else if (mddev->bitmap) {
1636 /* If adding to array with a bitmap, then we can accept an
1637 * older device, but not too old.
1639 if (ev1 < mddev->bitmap->events_cleared)
1641 if (ev1 < mddev->events)
1642 set_bit(Bitmap_sync, &rdev->flags);
1644 if (ev1 < mddev->events)
1645 /* just a hot-add of a new device, leave raid_disk at -1 */
1648 if (mddev->level != LEVEL_MULTIPATH) {
1650 if (rdev->desc_nr < 0 ||
1651 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
1655 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1657 case 0xffff: /* spare */
1659 case 0xfffe: /* faulty */
1660 set_bit(Faulty, &rdev->flags);
1663 rdev->saved_raid_disk = role;
1664 if ((le32_to_cpu(sb->feature_map) &
1665 MD_FEATURE_RECOVERY_OFFSET)) {
1666 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1667 if (!(le32_to_cpu(sb->feature_map) &
1668 MD_FEATURE_RECOVERY_BITMAP))
1669 rdev->saved_raid_disk = -1;
1671 set_bit(In_sync, &rdev->flags);
1672 rdev->raid_disk = role;
1675 if (sb->devflags & WriteMostly1)
1676 set_bit(WriteMostly, &rdev->flags);
1677 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
1678 set_bit(Replacement, &rdev->flags);
1679 } else /* MULTIPATH are always insync */
1680 set_bit(In_sync, &rdev->flags);
1685 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
1687 struct mdp_superblock_1 *sb;
1688 struct md_rdev *rdev2;
1690 /* make rdev->sb match mddev and rdev data. */
1692 sb = page_address(rdev->sb_page);
1694 sb->feature_map = 0;
1696 sb->recovery_offset = cpu_to_le64(0);
1697 memset(sb->pad3, 0, sizeof(sb->pad3));
1699 sb->utime = cpu_to_le64((__u64)mddev->utime);
1700 sb->events = cpu_to_le64(mddev->events);
1702 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1704 sb->resync_offset = cpu_to_le64(0);
1706 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1708 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1709 sb->size = cpu_to_le64(mddev->dev_sectors);
1710 sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
1711 sb->level = cpu_to_le32(mddev->level);
1712 sb->layout = cpu_to_le32(mddev->layout);
1714 if (test_bit(WriteMostly, &rdev->flags))
1715 sb->devflags |= WriteMostly1;
1717 sb->devflags &= ~WriteMostly1;
1718 sb->data_offset = cpu_to_le64(rdev->data_offset);
1719 sb->data_size = cpu_to_le64(rdev->sectors);
1721 if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
1722 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
1723 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1726 if (rdev->raid_disk >= 0 &&
1727 !test_bit(In_sync, &rdev->flags)) {
1729 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1730 sb->recovery_offset =
1731 cpu_to_le64(rdev->recovery_offset);
1732 if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
1734 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
1736 if (test_bit(Replacement, &rdev->flags))
1738 cpu_to_le32(MD_FEATURE_REPLACEMENT);
1740 if (mddev->reshape_position != MaxSector) {
1741 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1742 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1743 sb->new_layout = cpu_to_le32(mddev->new_layout);
1744 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1745 sb->new_level = cpu_to_le32(mddev->new_level);
1746 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
1747 if (mddev->delta_disks == 0 &&
1748 mddev->reshape_backwards)
1750 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
1751 if (rdev->new_data_offset != rdev->data_offset) {
1753 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
1754 sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
1755 - rdev->data_offset));
1759 if (rdev->badblocks.count == 0)
1760 /* Nothing to do for bad blocks*/ ;
1761 else if (sb->bblog_offset == 0)
1762 /* Cannot record bad blocks on this device */
1763 md_error(mddev, rdev);
1765 struct badblocks *bb = &rdev->badblocks;
1766 u64 *bbp = (u64 *)page_address(rdev->bb_page);
1768 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
1773 seq = read_seqbegin(&bb->lock);
1775 memset(bbp, 0xff, PAGE_SIZE);
1777 for (i = 0 ; i < bb->count ; i++) {
1778 u64 internal_bb = p[i];
1779 u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
1780 | BB_LEN(internal_bb));
1781 bbp[i] = cpu_to_le64(store_bb);
1784 if (read_seqretry(&bb->lock, seq))
1787 bb->sector = (rdev->sb_start +
1788 (int)le32_to_cpu(sb->bblog_offset));
1789 bb->size = le16_to_cpu(sb->bblog_size);
1794 rdev_for_each(rdev2, mddev)
1795 if (rdev2->desc_nr+1 > max_dev)
1796 max_dev = rdev2->desc_nr+1;
1798 if (max_dev > le32_to_cpu(sb->max_dev)) {
1800 sb->max_dev = cpu_to_le32(max_dev);
1801 rdev->sb_size = max_dev * 2 + 256;
1802 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1803 if (rdev->sb_size & bmask)
1804 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1806 max_dev = le32_to_cpu(sb->max_dev);
1808 for (i=0; i<max_dev;i++)
1809 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1811 rdev_for_each(rdev2, mddev) {
1813 if (test_bit(Faulty, &rdev2->flags))
1814 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1815 else if (test_bit(In_sync, &rdev2->flags))
1816 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1817 else if (rdev2->raid_disk >= 0)
1818 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1820 sb->dev_roles[i] = cpu_to_le16(0xffff);
1823 sb->sb_csum = calc_sb_1_csum(sb);
1826 static unsigned long long
1827 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1829 struct mdp_superblock_1 *sb;
1830 sector_t max_sectors;
1831 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1832 return 0; /* component must fit device */
1833 if (rdev->data_offset != rdev->new_data_offset)
1834 return 0; /* too confusing */
1835 if (rdev->sb_start < rdev->data_offset) {
1836 /* minor versions 1 and 2; superblock before data */
1837 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
1838 max_sectors -= rdev->data_offset;
1839 if (!num_sectors || num_sectors > max_sectors)
1840 num_sectors = max_sectors;
1841 } else if (rdev->mddev->bitmap_info.offset) {
1842 /* minor version 0 with bitmap we can't move */
1845 /* minor version 0; superblock after data */
1847 sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
1848 sb_start &= ~(sector_t)(4*2 - 1);
1849 max_sectors = rdev->sectors + sb_start - rdev->sb_start;
1850 if (!num_sectors || num_sectors > max_sectors)
1851 num_sectors = max_sectors;
1852 rdev->sb_start = sb_start;
1854 sb = page_address(rdev->sb_page);
1855 sb->data_size = cpu_to_le64(num_sectors);
1856 sb->super_offset = rdev->sb_start;
1857 sb->sb_csum = calc_sb_1_csum(sb);
1858 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1860 md_super_wait(rdev->mddev);
1866 super_1_allow_new_offset(struct md_rdev *rdev,
1867 unsigned long long new_offset)
1869 /* All necessary checks on new >= old have been done */
1870 struct bitmap *bitmap;
1871 if (new_offset >= rdev->data_offset)
1874 /* with 1.0 metadata, there is no metadata to tread on
1875 * so we can always move back */
1876 if (rdev->mddev->minor_version == 0)
1879 /* otherwise we must be sure not to step on
1880 * any metadata, so stay:
1881 * 36K beyond start of superblock
1882 * beyond end of badblocks
1883 * beyond write-intent bitmap
1885 if (rdev->sb_start + (32+4)*2 > new_offset)
1887 bitmap = rdev->mddev->bitmap;
1888 if (bitmap && !rdev->mddev->bitmap_info.file &&
1889 rdev->sb_start + rdev->mddev->bitmap_info.offset +
1890 bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
1892 if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
1898 static struct super_type super_types[] = {
1901 .owner = THIS_MODULE,
1902 .load_super = super_90_load,
1903 .validate_super = super_90_validate,
1904 .sync_super = super_90_sync,
1905 .rdev_size_change = super_90_rdev_size_change,
1906 .allow_new_offset = super_90_allow_new_offset,
1910 .owner = THIS_MODULE,
1911 .load_super = super_1_load,
1912 .validate_super = super_1_validate,
1913 .sync_super = super_1_sync,
1914 .rdev_size_change = super_1_rdev_size_change,
1915 .allow_new_offset = super_1_allow_new_offset,
1919 static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
1921 if (mddev->sync_super) {
1922 mddev->sync_super(mddev, rdev);
1926 BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
1928 super_types[mddev->major_version].sync_super(mddev, rdev);
1931 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
1933 struct md_rdev *rdev, *rdev2;
1936 rdev_for_each_rcu(rdev, mddev1)
1937 rdev_for_each_rcu(rdev2, mddev2)
1938 if (rdev->bdev->bd_contains ==
1939 rdev2->bdev->bd_contains) {
1947 static LIST_HEAD(pending_raid_disks);
1950 * Try to register data integrity profile for an mddev
1952 * This is called when an array is started and after a disk has been kicked
1953 * from the array. It only succeeds if all working and active component devices
1954 * are integrity capable with matching profiles.
1956 int md_integrity_register(struct mddev *mddev)
1958 struct md_rdev *rdev, *reference = NULL;
1960 if (list_empty(&mddev->disks))
1961 return 0; /* nothing to do */
1962 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
1963 return 0; /* shouldn't register, or already is */
1964 rdev_for_each(rdev, mddev) {
1965 /* skip spares and non-functional disks */
1966 if (test_bit(Faulty, &rdev->flags))
1968 if (rdev->raid_disk < 0)
1971 /* Use the first rdev as the reference */
1975 /* does this rdev's profile match the reference profile? */
1976 if (blk_integrity_compare(reference->bdev->bd_disk,
1977 rdev->bdev->bd_disk) < 0)
1980 if (!reference || !bdev_get_integrity(reference->bdev))
1983 * All component devices are integrity capable and have matching
1984 * profiles, register the common profile for the md device.
1986 if (blk_integrity_register(mddev->gendisk,
1987 bdev_get_integrity(reference->bdev)) != 0) {
1988 printk(KERN_ERR "md: failed to register integrity for %s\n",
1992 printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev));
1993 if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
1994 printk(KERN_ERR "md: failed to create integrity pool for %s\n",
2000 EXPORT_SYMBOL(md_integrity_register);
2002 /* Disable data integrity if non-capable/non-matching disk is being added */
2003 void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
2005 struct blk_integrity *bi_rdev;
2006 struct blk_integrity *bi_mddev;
2008 if (!mddev->gendisk)
2011 bi_rdev = bdev_get_integrity(rdev->bdev);
2012 bi_mddev = blk_get_integrity(mddev->gendisk);
2014 if (!bi_mddev) /* nothing to do */
2016 if (rdev->raid_disk < 0) /* skip spares */
2018 if (bi_rdev && blk_integrity_compare(mddev->gendisk,
2019 rdev->bdev->bd_disk) >= 0)
2021 printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
2022 blk_integrity_unregister(mddev->gendisk);
2024 EXPORT_SYMBOL(md_integrity_add_rdev);
2026 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
2028 char b[BDEVNAME_SIZE];
2032 /* prevent duplicates */
2033 if (find_rdev(mddev, rdev->bdev->bd_dev))
2036 /* make sure rdev->sectors exceeds mddev->dev_sectors */
2037 if (rdev->sectors && (mddev->dev_sectors == 0 ||
2038 rdev->sectors < mddev->dev_sectors)) {
2040 /* Cannot change size, so fail
2041 * If mddev->level <= 0, then we don't care
2042 * about aligning sizes (e.g. linear)
2044 if (mddev->level > 0)
2047 mddev->dev_sectors = rdev->sectors;
2050 /* Verify rdev->desc_nr is unique.
2051 * If it is -1, assign a free number, else
2052 * check number is not in use
2055 if (rdev->desc_nr < 0) {
2058 choice = mddev->raid_disks;
2059 while (md_find_rdev_nr_rcu(mddev, choice))
2061 rdev->desc_nr = choice;
2063 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
2069 if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
2070 printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
2071 mdname(mddev), mddev->max_disks);
2074 bdevname(rdev->bdev,b);
2075 strreplace(b, '/', '!');
2077 rdev->mddev = mddev;
2078 printk(KERN_INFO "md: bind<%s>\n", b);
2080 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
2083 ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
2084 if (sysfs_create_link(&rdev->kobj, ko, "block"))
2085 /* failure here is OK */;
2086 rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
2088 list_add_rcu(&rdev->same_set, &mddev->disks);
2089 bd_link_disk_holder(rdev->bdev, mddev->gendisk);
2091 /* May as well allow recovery to be retried once */
2092 mddev->recovery_disabled++;
2097 printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
2102 static void md_delayed_delete(struct work_struct *ws)
2104 struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
2105 kobject_del(&rdev->kobj);
2106 kobject_put(&rdev->kobj);
2109 static void unbind_rdev_from_array(struct md_rdev *rdev)
2111 char b[BDEVNAME_SIZE];
2113 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
2114 list_del_rcu(&rdev->same_set);
2115 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
2117 sysfs_remove_link(&rdev->kobj, "block");
2118 sysfs_put(rdev->sysfs_state);
2119 rdev->sysfs_state = NULL;
2120 rdev->badblocks.count = 0;
2121 /* We need to delay this, otherwise we can deadlock when
2122 * writing to 'remove' to "dev/state". We also need
2123 * to delay it due to rcu usage.
2126 INIT_WORK(&rdev->del_work, md_delayed_delete);
2127 kobject_get(&rdev->kobj);
2128 queue_work(md_misc_wq, &rdev->del_work);
2132 * prevent the device from being mounted, repartitioned or
2133 * otherwise reused by a RAID array (or any other kernel
2134 * subsystem), by bd_claiming the device.
2136 static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
2139 struct block_device *bdev;
2140 char b[BDEVNAME_SIZE];
2142 bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
2143 shared ? (struct md_rdev *)lock_rdev : rdev);
2145 printk(KERN_ERR "md: could not open %s.\n",
2146 __bdevname(dev, b));
2147 return PTR_ERR(bdev);
2153 static void unlock_rdev(struct md_rdev *rdev)
2155 struct block_device *bdev = rdev->bdev;
2157 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2160 void md_autodetect_dev(dev_t dev);
2162 static void export_rdev(struct md_rdev *rdev)
2164 char b[BDEVNAME_SIZE];
2166 printk(KERN_INFO "md: export_rdev(%s)\n",
2167 bdevname(rdev->bdev,b));
2168 md_rdev_clear(rdev);
2170 if (test_bit(AutoDetected, &rdev->flags))
2171 md_autodetect_dev(rdev->bdev->bd_dev);
2174 kobject_put(&rdev->kobj);
2177 void md_kick_rdev_from_array(struct md_rdev *rdev)
2179 unbind_rdev_from_array(rdev);
2182 EXPORT_SYMBOL_GPL(md_kick_rdev_from_array);
2184 static void export_array(struct mddev *mddev)
2186 struct md_rdev *rdev;
2188 while (!list_empty(&mddev->disks)) {
2189 rdev = list_first_entry(&mddev->disks, struct md_rdev,
2191 md_kick_rdev_from_array(rdev);
2193 mddev->raid_disks = 0;
2194 mddev->major_version = 0;
2197 static void sync_sbs(struct mddev *mddev, int nospares)
2199 /* Update each superblock (in-memory image), but
2200 * if we are allowed to, skip spares which already
2201 * have the right event counter, or have one earlier
2202 * (which would mean they aren't being marked as dirty
2203 * with the rest of the array)
2205 struct md_rdev *rdev;
2206 rdev_for_each(rdev, mddev) {
2207 if (rdev->sb_events == mddev->events ||
2209 rdev->raid_disk < 0 &&
2210 rdev->sb_events+1 == mddev->events)) {
2211 /* Don't update this superblock */
2212 rdev->sb_loaded = 2;
2214 sync_super(mddev, rdev);
2215 rdev->sb_loaded = 1;
2220 void md_update_sb(struct mddev *mddev, int force_change)
2222 struct md_rdev *rdev;
2225 int any_badblocks_changed = 0;
2229 set_bit(MD_CHANGE_DEVS, &mddev->flags);
2233 /* First make sure individual recovery_offsets are correct */
2234 rdev_for_each(rdev, mddev) {
2235 if (rdev->raid_disk >= 0 &&
2236 mddev->delta_disks >= 0 &&
2237 !test_bit(In_sync, &rdev->flags) &&
2238 mddev->curr_resync_completed > rdev->recovery_offset)
2239 rdev->recovery_offset = mddev->curr_resync_completed;
2242 if (!mddev->persistent) {
2243 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
2244 clear_bit(MD_CHANGE_DEVS, &mddev->flags);
2245 if (!mddev->external) {
2246 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2247 rdev_for_each(rdev, mddev) {
2248 if (rdev->badblocks.changed) {
2249 rdev->badblocks.changed = 0;
2250 md_ack_all_badblocks(&rdev->badblocks);
2251 md_error(mddev, rdev);
2253 clear_bit(Blocked, &rdev->flags);
2254 clear_bit(BlockedBadBlocks, &rdev->flags);
2255 wake_up(&rdev->blocked_wait);
2258 wake_up(&mddev->sb_wait);
2262 spin_lock(&mddev->lock);
2264 mddev->utime = get_seconds();
2266 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
2268 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
2269 /* just a clean<-> dirty transition, possibly leave spares alone,
2270 * though if events isn't the right even/odd, we will have to do
2276 if (mddev->degraded)
2277 /* If the array is degraded, then skipping spares is both
2278 * dangerous and fairly pointless.
2279 * Dangerous because a device that was removed from the array
2280 * might have a event_count that still looks up-to-date,
2281 * so it can be re-added without a resync.
2282 * Pointless because if there are any spares to skip,
2283 * then a recovery will happen and soon that array won't
2284 * be degraded any more and the spare can go back to sleep then.
2288 sync_req = mddev->in_sync;
2290 /* If this is just a dirty<->clean transition, and the array is clean
2291 * and 'events' is odd, we can roll back to the previous clean state */
2293 && (mddev->in_sync && mddev->recovery_cp == MaxSector)
2294 && mddev->can_decrease_events
2295 && mddev->events != 1) {
2297 mddev->can_decrease_events = 0;
2299 /* otherwise we have to go forward and ... */
2301 mddev->can_decrease_events = nospares;
2305 * This 64-bit counter should never wrap.
2306 * Either we are in around ~1 trillion A.C., assuming
2307 * 1 reboot per second, or we have a bug...
2309 WARN_ON(mddev->events == 0);
2311 rdev_for_each(rdev, mddev) {
2312 if (rdev->badblocks.changed)
2313 any_badblocks_changed++;
2314 if (test_bit(Faulty, &rdev->flags))
2315 set_bit(FaultRecorded, &rdev->flags);
2318 sync_sbs(mddev, nospares);
2319 spin_unlock(&mddev->lock);
2321 pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
2322 mdname(mddev), mddev->in_sync);
2324 bitmap_update_sb(mddev->bitmap);
2325 rdev_for_each(rdev, mddev) {
2326 char b[BDEVNAME_SIZE];
2328 if (rdev->sb_loaded != 1)
2329 continue; /* no noise on spare devices */
2331 if (!test_bit(Faulty, &rdev->flags)) {
2332 md_super_write(mddev,rdev,
2333 rdev->sb_start, rdev->sb_size,
2335 pr_debug("md: (write) %s's sb offset: %llu\n",
2336 bdevname(rdev->bdev, b),
2337 (unsigned long long)rdev->sb_start);
2338 rdev->sb_events = mddev->events;
2339 if (rdev->badblocks.size) {
2340 md_super_write(mddev, rdev,
2341 rdev->badblocks.sector,
2342 rdev->badblocks.size << 9,
2344 rdev->badblocks.size = 0;
2348 pr_debug("md: %s (skipping faulty)\n",
2349 bdevname(rdev->bdev, b));
2351 if (mddev->level == LEVEL_MULTIPATH)
2352 /* only need to write one superblock... */
2355 md_super_wait(mddev);
2356 /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
2358 spin_lock(&mddev->lock);
2359 if (mddev->in_sync != sync_req ||
2360 test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
2361 /* have to write it out again */
2362 spin_unlock(&mddev->lock);
2365 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2366 spin_unlock(&mddev->lock);
2367 wake_up(&mddev->sb_wait);
2368 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2369 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
2371 rdev_for_each(rdev, mddev) {
2372 if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2373 clear_bit(Blocked, &rdev->flags);
2375 if (any_badblocks_changed)
2376 md_ack_all_badblocks(&rdev->badblocks);
2377 clear_bit(BlockedBadBlocks, &rdev->flags);
2378 wake_up(&rdev->blocked_wait);
2381 EXPORT_SYMBOL(md_update_sb);
2383 static int add_bound_rdev(struct md_rdev *rdev)
2385 struct mddev *mddev = rdev->mddev;
2388 if (!mddev->pers->hot_remove_disk) {
2389 /* If there is hot_add_disk but no hot_remove_disk
2390 * then added disks for geometry changes,
2391 * and should be added immediately.
2393 super_types[mddev->major_version].
2394 validate_super(mddev, rdev);
2395 err = mddev->pers->hot_add_disk(mddev, rdev);
2397 unbind_rdev_from_array(rdev);
2402 sysfs_notify_dirent_safe(rdev->sysfs_state);
2404 set_bit(MD_CHANGE_DEVS, &mddev->flags);
2405 if (mddev->degraded)
2406 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
2407 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2408 md_new_event(mddev);
2409 md_wakeup_thread(mddev->thread);
2413 /* words written to sysfs files may, or may not, be \n terminated.
2414 * We want to accept with case. For this we use cmd_match.
2416 static int cmd_match(const char *cmd, const char *str)
2418 /* See if cmd, written into a sysfs file, matches
2419 * str. They must either be the same, or cmd can
2420 * have a trailing newline
2422 while (*cmd && *str && *cmd == *str) {
2433 struct rdev_sysfs_entry {
2434 struct attribute attr;
2435 ssize_t (*show)(struct md_rdev *, char *);
2436 ssize_t (*store)(struct md_rdev *, const char *, size_t);
2440 state_show(struct md_rdev *rdev, char *page)
2444 unsigned long flags = ACCESS_ONCE(rdev->flags);
2446 if (test_bit(Faulty, &flags) ||
2447 rdev->badblocks.unacked_exist) {
2448 len+= sprintf(page+len, "%sfaulty",sep);
2451 if (test_bit(In_sync, &flags)) {
2452 len += sprintf(page+len, "%sin_sync",sep);
2455 if (test_bit(WriteMostly, &flags)) {
2456 len += sprintf(page+len, "%swrite_mostly",sep);
2459 if (test_bit(Blocked, &flags) ||
2460 (rdev->badblocks.unacked_exist
2461 && !test_bit(Faulty, &flags))) {
2462 len += sprintf(page+len, "%sblocked", sep);
2465 if (!test_bit(Faulty, &flags) &&
2466 !test_bit(In_sync, &flags)) {
2467 len += sprintf(page+len, "%sspare", sep);
2470 if (test_bit(WriteErrorSeen, &flags)) {
2471 len += sprintf(page+len, "%swrite_error", sep);
2474 if (test_bit(WantReplacement, &flags)) {
2475 len += sprintf(page+len, "%swant_replacement", sep);
2478 if (test_bit(Replacement, &flags)) {
2479 len += sprintf(page+len, "%sreplacement", sep);
2483 return len+sprintf(page+len, "\n");
2487 state_store(struct md_rdev *rdev, const char *buf, size_t len)
2490 * faulty - simulates an error
2491 * remove - disconnects the device
2492 * writemostly - sets write_mostly
2493 * -writemostly - clears write_mostly
2494 * blocked - sets the Blocked flags
2495 * -blocked - clears the Blocked and possibly simulates an error
2496 * insync - sets Insync providing device isn't active
2497 * -insync - clear Insync for a device with a slot assigned,
2498 * so that it gets rebuilt based on bitmap
2499 * write_error - sets WriteErrorSeen
2500 * -write_error - clears WriteErrorSeen
2503 if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2504 md_error(rdev->mddev, rdev);
2505 if (test_bit(Faulty, &rdev->flags))
2509 } else if (cmd_match(buf, "remove")) {
2510 if (rdev->raid_disk >= 0)
2513 struct mddev *mddev = rdev->mddev;
2514 if (mddev_is_clustered(mddev))
2515 md_cluster_ops->remove_disk(mddev, rdev);
2516 md_kick_rdev_from_array(rdev);
2517 if (mddev_is_clustered(mddev))
2518 md_cluster_ops->metadata_update_start(mddev);
2520 md_update_sb(mddev, 1);
2521 md_new_event(mddev);
2522 if (mddev_is_clustered(mddev))
2523 md_cluster_ops->metadata_update_finish(mddev);
2526 } else if (cmd_match(buf, "writemostly")) {
2527 set_bit(WriteMostly, &rdev->flags);
2529 } else if (cmd_match(buf, "-writemostly")) {
2530 clear_bit(WriteMostly, &rdev->flags);
2532 } else if (cmd_match(buf, "blocked")) {
2533 set_bit(Blocked, &rdev->flags);
2535 } else if (cmd_match(buf, "-blocked")) {
2536 if (!test_bit(Faulty, &rdev->flags) &&
2537 rdev->badblocks.unacked_exist) {
2538 /* metadata handler doesn't understand badblocks,
2539 * so we need to fail the device
2541 md_error(rdev->mddev, rdev);
2543 clear_bit(Blocked, &rdev->flags);
2544 clear_bit(BlockedBadBlocks, &rdev->flags);
2545 wake_up(&rdev->blocked_wait);
2546 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2547 md_wakeup_thread(rdev->mddev->thread);
2550 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
2551 set_bit(In_sync, &rdev->flags);
2553 } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0) {
2554 if (rdev->mddev->pers == NULL) {
2555 clear_bit(In_sync, &rdev->flags);
2556 rdev->saved_raid_disk = rdev->raid_disk;
2557 rdev->raid_disk = -1;
2560 } else if (cmd_match(buf, "write_error")) {
2561 set_bit(WriteErrorSeen, &rdev->flags);
2563 } else if (cmd_match(buf, "-write_error")) {
2564 clear_bit(WriteErrorSeen, &rdev->flags);
2566 } else if (cmd_match(buf, "want_replacement")) {
2567 /* Any non-spare device that is not a replacement can
2568 * become want_replacement at any time, but we then need to
2569 * check if recovery is needed.
2571 if (rdev->raid_disk >= 0 &&
2572 !test_bit(Replacement, &rdev->flags))
2573 set_bit(WantReplacement, &rdev->flags);
2574 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2575 md_wakeup_thread(rdev->mddev->thread);
2577 } else if (cmd_match(buf, "-want_replacement")) {
2578 /* Clearing 'want_replacement' is always allowed.
2579 * Once replacements starts it is too late though.
2582 clear_bit(WantReplacement, &rdev->flags);
2583 } else if (cmd_match(buf, "replacement")) {
2584 /* Can only set a device as a replacement when array has not
2585 * yet been started. Once running, replacement is automatic
2586 * from spares, or by assigning 'slot'.
2588 if (rdev->mddev->pers)
2591 set_bit(Replacement, &rdev->flags);
2594 } else if (cmd_match(buf, "-replacement")) {
2595 /* Similarly, can only clear Replacement before start */
2596 if (rdev->mddev->pers)
2599 clear_bit(Replacement, &rdev->flags);
2602 } else if (cmd_match(buf, "re-add")) {
2603 if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1)) {
2604 /* clear_bit is performed _after_ all the devices
2605 * have their local Faulty bit cleared. If any writes
2606 * happen in the meantime in the local node, they
2607 * will land in the local bitmap, which will be synced
2608 * by this node eventually
2610 if (!mddev_is_clustered(rdev->mddev) ||
2611 (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
2612 clear_bit(Faulty, &rdev->flags);
2613 err = add_bound_rdev(rdev);
2619 sysfs_notify_dirent_safe(rdev->sysfs_state);
2620 return err ? err : len;
2622 static struct rdev_sysfs_entry rdev_state =
2623 __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
2626 errors_show(struct md_rdev *rdev, char *page)
2628 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
2632 errors_store(struct md_rdev *rdev, const char *buf, size_t len)
2637 rv = kstrtouint(buf, 10, &n);
2640 atomic_set(&rdev->corrected_errors, n);
2643 static struct rdev_sysfs_entry rdev_errors =
2644 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
2647 slot_show(struct md_rdev *rdev, char *page)
2649 if (rdev->raid_disk < 0)
2650 return sprintf(page, "none\n");
2652 return sprintf(page, "%d\n", rdev->raid_disk);
2656 slot_store(struct md_rdev *rdev, const char *buf, size_t len)
2661 if (strncmp(buf, "none", 4)==0)
2664 err = kstrtouint(buf, 10, (unsigned int *)&slot);
2668 if (rdev->mddev->pers && slot == -1) {
2669 /* Setting 'slot' on an active array requires also
2670 * updating the 'rd%d' link, and communicating
2671 * with the personality with ->hot_*_disk.
2672 * For now we only support removing
2673 * failed/spare devices. This normally happens automatically,
2674 * but not when the metadata is externally managed.
2676 if (rdev->raid_disk == -1)
2678 /* personality does all needed checks */
2679 if (rdev->mddev->pers->hot_remove_disk == NULL)
2681 clear_bit(Blocked, &rdev->flags);
2682 remove_and_add_spares(rdev->mddev, rdev);
2683 if (rdev->raid_disk >= 0)
2685 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2686 md_wakeup_thread(rdev->mddev->thread);
2687 } else if (rdev->mddev->pers) {
2688 /* Activating a spare .. or possibly reactivating
2689 * if we ever get bitmaps working here.
2692 if (rdev->raid_disk != -1)
2695 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
2698 if (rdev->mddev->pers->hot_add_disk == NULL)
2701 if (slot >= rdev->mddev->raid_disks &&
2702 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
2705 rdev->raid_disk = slot;
2706 if (test_bit(In_sync, &rdev->flags))
2707 rdev->saved_raid_disk = slot;
2709 rdev->saved_raid_disk = -1;
2710 clear_bit(In_sync, &rdev->flags);
2711 clear_bit(Bitmap_sync, &rdev->flags);
2712 err = rdev->mddev->pers->
2713 hot_add_disk(rdev->mddev, rdev);
2715 rdev->raid_disk = -1;
2718 sysfs_notify_dirent_safe(rdev->sysfs_state);
2719 if (sysfs_link_rdev(rdev->mddev, rdev))
2720 /* failure here is OK */;
2721 /* don't wakeup anyone, leave that to userspace. */
2723 if (slot >= rdev->mddev->raid_disks &&
2724 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
2726 rdev->raid_disk = slot;
2727 /* assume it is working */
2728 clear_bit(Faulty, &rdev->flags);
2729 clear_bit(WriteMostly, &rdev->flags);
2730 set_bit(In_sync, &rdev->flags);
2731 sysfs_notify_dirent_safe(rdev->sysfs_state);
2736 static struct rdev_sysfs_entry rdev_slot =
2737 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
2740 offset_show(struct md_rdev *rdev, char *page)
2742 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
2746 offset_store(struct md_rdev *rdev, const char *buf, size_t len)
2748 unsigned long long offset;
2749 if (kstrtoull(buf, 10, &offset) < 0)
2751 if (rdev->mddev->pers && rdev->raid_disk >= 0)
2753 if (rdev->sectors && rdev->mddev->external)
2754 /* Must set offset before size, so overlap checks
2757 rdev->data_offset = offset;
2758 rdev->new_data_offset = offset;
2762 static struct rdev_sysfs_entry rdev_offset =
2763 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
2765 static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
2767 return sprintf(page, "%llu\n",
2768 (unsigned long long)rdev->new_data_offset);
2771 static ssize_t new_offset_store(struct md_rdev *rdev,
2772 const char *buf, size_t len)
2774 unsigned long long new_offset;
2775 struct mddev *mddev = rdev->mddev;
2777 if (kstrtoull(buf, 10, &new_offset) < 0)
2780 if (mddev->sync_thread ||
2781 test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
2783 if (new_offset == rdev->data_offset)
2784 /* reset is always permitted */
2786 else if (new_offset > rdev->data_offset) {
2787 /* must not push array size beyond rdev_sectors */
2788 if (new_offset - rdev->data_offset
2789 + mddev->dev_sectors > rdev->sectors)
2792 /* Metadata worries about other space details. */
2794 /* decreasing the offset is inconsistent with a backwards
2797 if (new_offset < rdev->data_offset &&
2798 mddev->reshape_backwards)
2800 /* Increasing offset is inconsistent with forwards
2801 * reshape. reshape_direction should be set to
2802 * 'backwards' first.
2804 if (new_offset > rdev->data_offset &&
2805 !mddev->reshape_backwards)
2808 if (mddev->pers && mddev->persistent &&
2809 !super_types[mddev->major_version]
2810 .allow_new_offset(rdev, new_offset))
2812 rdev->new_data_offset = new_offset;
2813 if (new_offset > rdev->data_offset)
2814 mddev->reshape_backwards = 1;
2815 else if (new_offset < rdev->data_offset)
2816 mddev->reshape_backwards = 0;
2820 static struct rdev_sysfs_entry rdev_new_offset =
2821 __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
2824 rdev_size_show(struct md_rdev *rdev, char *page)
2826 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
2829 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
2831 /* check if two start/length pairs overlap */
2839 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
2841 unsigned long long blocks;
2844 if (kstrtoull(buf, 10, &blocks) < 0)
2847 if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
2848 return -EINVAL; /* sector conversion overflow */
2851 if (new != blocks * 2)
2852 return -EINVAL; /* unsigned long long to sector_t overflow */
2859 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
2861 struct mddev *my_mddev = rdev->mddev;
2862 sector_t oldsectors = rdev->sectors;
2865 if (strict_blocks_to_sectors(buf, §ors) < 0)
2867 if (rdev->data_offset != rdev->new_data_offset)
2868 return -EINVAL; /* too confusing */
2869 if (my_mddev->pers && rdev->raid_disk >= 0) {
2870 if (my_mddev->persistent) {
2871 sectors = super_types[my_mddev->major_version].
2872 rdev_size_change(rdev, sectors);
2875 } else if (!sectors)
2876 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
2878 if (!my_mddev->pers->resize)
2879 /* Cannot change size for RAID0 or Linear etc */
2882 if (sectors < my_mddev->dev_sectors)
2883 return -EINVAL; /* component must fit device */
2885 rdev->sectors = sectors;
2886 if (sectors > oldsectors && my_mddev->external) {
2887 /* Need to check that all other rdevs with the same
2888 * ->bdev do not overlap. 'rcu' is sufficient to walk
2889 * the rdev lists safely.
2890 * This check does not provide a hard guarantee, it
2891 * just helps avoid dangerous mistakes.
2893 struct mddev *mddev;
2895 struct list_head *tmp;
2898 for_each_mddev(mddev, tmp) {
2899 struct md_rdev *rdev2;
2901 rdev_for_each(rdev2, mddev)
2902 if (rdev->bdev == rdev2->bdev &&
2904 overlaps(rdev->data_offset, rdev->sectors,
2917 /* Someone else could have slipped in a size
2918 * change here, but doing so is just silly.
2919 * We put oldsectors back because we *know* it is
2920 * safe, and trust userspace not to race with
2923 rdev->sectors = oldsectors;
2930 static struct rdev_sysfs_entry rdev_size =
2931 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
2933 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
2935 unsigned long long recovery_start = rdev->recovery_offset;
2937 if (test_bit(In_sync, &rdev->flags) ||
2938 recovery_start == MaxSector)
2939 return sprintf(page, "none\n");
2941 return sprintf(page, "%llu\n", recovery_start);
2944 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
2946 unsigned long long recovery_start;
2948 if (cmd_match(buf, "none"))
2949 recovery_start = MaxSector;
2950 else if (kstrtoull(buf, 10, &recovery_start))
2953 if (rdev->mddev->pers &&
2954 rdev->raid_disk >= 0)
2957 rdev->recovery_offset = recovery_start;
2958 if (recovery_start == MaxSector)
2959 set_bit(In_sync, &rdev->flags);
2961 clear_bit(In_sync, &rdev->flags);
2965 static struct rdev_sysfs_entry rdev_recovery_start =
2966 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
2969 badblocks_show(struct badblocks *bb, char *page, int unack);
2971 badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack);
2973 static ssize_t bb_show(struct md_rdev *rdev, char *page)
2975 return badblocks_show(&rdev->badblocks, page, 0);
2977 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
2979 int rv = badblocks_store(&rdev->badblocks, page, len, 0);
2980 /* Maybe that ack was all we needed */
2981 if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
2982 wake_up(&rdev->blocked_wait);
2985 static struct rdev_sysfs_entry rdev_bad_blocks =
2986 __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
2988 static ssize_t ubb_show(struct md_rdev *rdev, char *page)
2990 return badblocks_show(&rdev->badblocks, page, 1);
2992 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
2994 return badblocks_store(&rdev->badblocks, page, len, 1);
2996 static struct rdev_sysfs_entry rdev_unack_bad_blocks =
2997 __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
2999 static struct attribute *rdev_default_attrs[] = {
3004 &rdev_new_offset.attr,
3006 &rdev_recovery_start.attr,
3007 &rdev_bad_blocks.attr,
3008 &rdev_unack_bad_blocks.attr,
3012 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3014 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3015 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3021 return entry->show(rdev, page);
3025 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
3026 const char *page, size_t length)
3028 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3029 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3031 struct mddev *mddev = rdev->mddev;
3035 if (!capable(CAP_SYS_ADMIN))
3037 rv = mddev ? mddev_lock(mddev): -EBUSY;
3039 if (rdev->mddev == NULL)
3042 rv = entry->store(rdev, page, length);
3043 mddev_unlock(mddev);
3048 static void rdev_free(struct kobject *ko)
3050 struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
3053 static const struct sysfs_ops rdev_sysfs_ops = {
3054 .show = rdev_attr_show,
3055 .store = rdev_attr_store,
3057 static struct kobj_type rdev_ktype = {
3058 .release = rdev_free,
3059 .sysfs_ops = &rdev_sysfs_ops,
3060 .default_attrs = rdev_default_attrs,
3063 int md_rdev_init(struct md_rdev *rdev)
3066 rdev->saved_raid_disk = -1;
3067 rdev->raid_disk = -1;
3069 rdev->data_offset = 0;
3070 rdev->new_data_offset = 0;
3071 rdev->sb_events = 0;
3072 rdev->last_read_error.tv_sec = 0;
3073 rdev->last_read_error.tv_nsec = 0;
3074 rdev->sb_loaded = 0;
3075 rdev->bb_page = NULL;
3076 atomic_set(&rdev->nr_pending, 0);
3077 atomic_set(&rdev->read_errors, 0);
3078 atomic_set(&rdev->corrected_errors, 0);
3080 INIT_LIST_HEAD(&rdev->same_set);
3081 init_waitqueue_head(&rdev->blocked_wait);
3083 /* Add space to store bad block list.
3084 * This reserves the space even on arrays where it cannot
3085 * be used - I wonder if that matters
3087 rdev->badblocks.count = 0;
3088 rdev->badblocks.shift = -1; /* disabled until explicitly enabled */
3089 rdev->badblocks.page = kmalloc(PAGE_SIZE, GFP_KERNEL);
3090 seqlock_init(&rdev->badblocks.lock);
3091 if (rdev->badblocks.page == NULL)
3096 EXPORT_SYMBOL_GPL(md_rdev_init);
3098 * Import a device. If 'super_format' >= 0, then sanity check the superblock
3100 * mark the device faulty if:
3102 * - the device is nonexistent (zero size)
3103 * - the device has no valid superblock
3105 * a faulty rdev _never_ has rdev->sb set.
3107 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
3109 char b[BDEVNAME_SIZE];
3111 struct md_rdev *rdev;
3114 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
3116 printk(KERN_ERR "md: could not alloc mem for new device!\n");
3117 return ERR_PTR(-ENOMEM);
3120 err = md_rdev_init(rdev);
3123 err = alloc_disk_sb(rdev);
3127 err = lock_rdev(rdev, newdev, super_format == -2);
3131 kobject_init(&rdev->kobj, &rdev_ktype);
3133 size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
3136 "md: %s has zero or unknown size, marking faulty!\n",
3137 bdevname(rdev->bdev,b));
3142 if (super_format >= 0) {
3143 err = super_types[super_format].
3144 load_super(rdev, NULL, super_minor);
3145 if (err == -EINVAL) {
3147 "md: %s does not have a valid v%d.%d "
3148 "superblock, not importing!\n",
3149 bdevname(rdev->bdev,b),
3150 super_format, super_minor);
3155 "md: could not read %s's sb, not importing!\n",
3156 bdevname(rdev->bdev,b));
3166 md_rdev_clear(rdev);
3168 return ERR_PTR(err);
3172 * Check a full RAID array for plausibility
3175 static void analyze_sbs(struct mddev *mddev)
3178 struct md_rdev *rdev, *freshest, *tmp;
3179 char b[BDEVNAME_SIZE];
3182 rdev_for_each_safe(rdev, tmp, mddev)
3183 switch (super_types[mddev->major_version].
3184 load_super(rdev, freshest, mddev->minor_version)) {
3192 "md: fatal superblock inconsistency in %s"
3193 " -- removing from array\n",
3194 bdevname(rdev->bdev,b));
3195 md_kick_rdev_from_array(rdev);
3198 super_types[mddev->major_version].
3199 validate_super(mddev, freshest);
3202 rdev_for_each_safe(rdev, tmp, mddev) {
3203 if (mddev->max_disks &&
3204 (rdev->desc_nr >= mddev->max_disks ||
3205 i > mddev->max_disks)) {
3207 "md: %s: %s: only %d devices permitted\n",
3208 mdname(mddev), bdevname(rdev->bdev, b),
3210 md_kick_rdev_from_array(rdev);
3213 if (rdev != freshest) {
3214 if (super_types[mddev->major_version].
3215 validate_super(mddev, rdev)) {
3216 printk(KERN_WARNING "md: kicking non-fresh %s"
3218 bdevname(rdev->bdev,b));
3219 md_kick_rdev_from_array(rdev);
3222 /* No device should have a Candidate flag
3223 * when reading devices
3225 if (test_bit(Candidate, &rdev->flags)) {
3226 pr_info("md: kicking Cluster Candidate %s from array!\n",
3227 bdevname(rdev->bdev, b));
3228 md_kick_rdev_from_array(rdev);
3231 if (mddev->level == LEVEL_MULTIPATH) {
3232 rdev->desc_nr = i++;
3233 rdev->raid_disk = rdev->desc_nr;
3234 set_bit(In_sync, &rdev->flags);
3235 } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) {
3236 rdev->raid_disk = -1;
3237 clear_bit(In_sync, &rdev->flags);
3242 /* Read a fixed-point number.
3243 * Numbers in sysfs attributes should be in "standard" units where
3244 * possible, so time should be in seconds.
3245 * However we internally use a a much smaller unit such as
3246 * milliseconds or jiffies.
3247 * This function takes a decimal number with a possible fractional
3248 * component, and produces an integer which is the result of
3249 * multiplying that number by 10^'scale'.
3250 * all without any floating-point arithmetic.
3252 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
3254 unsigned long result = 0;
3256 while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
3259 else if (decimals < scale) {
3262 result = result * 10 + value;
3274 while (decimals < scale) {
3283 safe_delay_show(struct mddev *mddev, char *page)
3285 int msec = (mddev->safemode_delay*1000)/HZ;
3286 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
3289 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
3293 if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
3296 mddev->safemode_delay = 0;
3298 unsigned long old_delay = mddev->safemode_delay;
3299 unsigned long new_delay = (msec*HZ)/1000;
3303 mddev->safemode_delay = new_delay;
3304 if (new_delay < old_delay || old_delay == 0)
3305 mod_timer(&mddev->safemode_timer, jiffies+1);
3309 static struct md_sysfs_entry md_safe_delay =
3310 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
3313 level_show(struct mddev *mddev, char *page)
3315 struct md_personality *p;
3317 spin_lock(&mddev->lock);
3320 ret = sprintf(page, "%s\n", p->name);
3321 else if (mddev->clevel[0])
3322 ret = sprintf(page, "%s\n", mddev->clevel);
3323 else if (mddev->level != LEVEL_NONE)
3324 ret = sprintf(page, "%d\n", mddev->level);
3327 spin_unlock(&mddev->lock);
3332 level_store(struct mddev *mddev, const char *buf, size_t len)
3337 struct md_personality *pers, *oldpers;
3339 void *priv, *oldpriv;
3340 struct md_rdev *rdev;
3342 if (slen == 0 || slen >= sizeof(clevel))
3345 rv = mddev_lock(mddev);
3349 if (mddev->pers == NULL) {
3350 strncpy(mddev->clevel, buf, slen);
3351 if (mddev->clevel[slen-1] == '\n')
3353 mddev->clevel[slen] = 0;
3354 mddev->level = LEVEL_NONE;
3362 /* request to change the personality. Need to ensure:
3363 * - array is not engaged in resync/recovery/reshape
3364 * - old personality can be suspended
3365 * - new personality will access other array.
3369 if (mddev->sync_thread ||
3370 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3371 mddev->reshape_position != MaxSector ||
3372 mddev->sysfs_active)
3376 if (!mddev->pers->quiesce) {
3377 printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
3378 mdname(mddev), mddev->pers->name);
3382 /* Now find the new personality */
3383 strncpy(clevel, buf, slen);
3384 if (clevel[slen-1] == '\n')
3387 if (kstrtol(clevel, 10, &level))
3390 if (request_module("md-%s", clevel) != 0)
3391 request_module("md-level-%s", clevel);
3392 spin_lock(&pers_lock);
3393 pers = find_pers(level, clevel);
3394 if (!pers || !try_module_get(pers->owner)) {
3395 spin_unlock(&pers_lock);
3396 printk(KERN_WARNING "md: personality %s not loaded\n", clevel);
3400 spin_unlock(&pers_lock);
3402 if (pers == mddev->pers) {
3403 /* Nothing to do! */
3404 module_put(pers->owner);
3408 if (!pers->takeover) {
3409 module_put(pers->owner);
3410 printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
3411 mdname(mddev), clevel);
3416 rdev_for_each(rdev, mddev)
3417 rdev->new_raid_disk = rdev->raid_disk;
3419 /* ->takeover must set new_* and/or delta_disks
3420 * if it succeeds, and may set them when it fails.
3422 priv = pers->takeover(mddev);
3424 mddev->new_level = mddev->level;
3425 mddev->new_layout = mddev->layout;
3426 mddev->new_chunk_sectors = mddev->chunk_sectors;
3427 mddev->raid_disks -= mddev->delta_disks;
3428 mddev->delta_disks = 0;
3429 mddev->reshape_backwards = 0;
3430 module_put(pers->owner);
3431 printk(KERN_WARNING "md: %s: %s would not accept array\n",
3432 mdname(mddev), clevel);
3437 /* Looks like we have a winner */
3438 mddev_suspend(mddev);
3439 mddev_detach(mddev);
3441 spin_lock(&mddev->lock);
3442 oldpers = mddev->pers;
3443 oldpriv = mddev->private;
3445 mddev->private = priv;
3446 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3447 mddev->level = mddev->new_level;
3448 mddev->layout = mddev->new_layout;
3449 mddev->chunk_sectors = mddev->new_chunk_sectors;
3450 mddev->delta_disks = 0;
3451 mddev->reshape_backwards = 0;
3452 mddev->degraded = 0;
3453 spin_unlock(&mddev->lock);
3455 if (oldpers->sync_request == NULL &&
3457 /* We are converting from a no-redundancy array
3458 * to a redundancy array and metadata is managed
3459 * externally so we need to be sure that writes
3460 * won't block due to a need to transition
3462 * until external management is started.
3465 mddev->safemode_delay = 0;
3466 mddev->safemode = 0;
3469 oldpers->free(mddev, oldpriv);
3471 if (oldpers->sync_request == NULL &&
3472 pers->sync_request != NULL) {
3473 /* need to add the md_redundancy_group */
3474 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
3476 "md: cannot register extra attributes for %s\n",
3478 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
3480 if (oldpers->sync_request != NULL &&
3481 pers->sync_request == NULL) {
3482 /* need to remove the md_redundancy_group */
3483 if (mddev->to_remove == NULL)
3484 mddev->to_remove = &md_redundancy_group;
3487 rdev_for_each(rdev, mddev) {
3488 if (rdev->raid_disk < 0)
3490 if (rdev->new_raid_disk >= mddev->raid_disks)
3491 rdev->new_raid_disk = -1;
3492 if (rdev->new_raid_disk == rdev->raid_disk)
3494 sysfs_unlink_rdev(mddev, rdev);
3496 rdev_for_each(rdev, mddev) {
3497 if (rdev->raid_disk < 0)
3499 if (rdev->new_raid_disk == rdev->raid_disk)
3501 rdev->raid_disk = rdev->new_raid_disk;
3502 if (rdev->raid_disk < 0)
3503 clear_bit(In_sync, &rdev->flags);
3505 if (sysfs_link_rdev(mddev, rdev))
3506 printk(KERN_WARNING "md: cannot register rd%d"
3507 " for %s after level change\n",
3508 rdev->raid_disk, mdname(mddev));
3512 if (pers->sync_request == NULL) {
3513 /* this is now an array without redundancy, so
3514 * it must always be in_sync
3517 del_timer_sync(&mddev->safemode_timer);
3519 blk_set_stacking_limits(&mddev->queue->limits);
3521 set_bit(MD_CHANGE_DEVS, &mddev->flags);
3522 mddev_resume(mddev);
3524 md_update_sb(mddev, 1);
3525 sysfs_notify(&mddev->kobj, NULL, "level");
3526 md_new_event(mddev);
3529 mddev_unlock(mddev);
3533 static struct md_sysfs_entry md_level =
3534 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
3537 layout_show(struct mddev *mddev, char *page)
3539 /* just a number, not meaningful for all levels */
3540 if (mddev->reshape_position != MaxSector &&
3541 mddev->layout != mddev->new_layout)
3542 return sprintf(page, "%d (%d)\n",
3543 mddev->new_layout, mddev->layout);
3544 return sprintf(page, "%d\n", mddev->layout);
3548 layout_store(struct mddev *mddev, const char *buf, size_t len)
3553 err = kstrtouint(buf, 10, &n);
3556 err = mddev_lock(mddev);
3561 if (mddev->pers->check_reshape == NULL)
3566 mddev->new_layout = n;
3567 err = mddev->pers->check_reshape(mddev);
3569 mddev->new_layout = mddev->layout;
3572 mddev->new_layout = n;
3573 if (mddev->reshape_position == MaxSector)
3576 mddev_unlock(mddev);
3579 static struct md_sysfs_entry md_layout =
3580 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
3583 raid_disks_show(struct mddev *mddev, char *page)
3585 if (mddev->raid_disks == 0)
3587 if (mddev->reshape_position != MaxSector &&
3588 mddev->delta_disks != 0)
3589 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
3590 mddev->raid_disks - mddev->delta_disks);
3591 return sprintf(page, "%d\n", mddev->raid_disks);
3594 static int update_raid_disks(struct mddev *mddev, int raid_disks);
3597 raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
3602 err = kstrtouint(buf, 10, &n);
3606 err = mddev_lock(mddev);
3610 err = update_raid_disks(mddev, n);
3611 else if (mddev->reshape_position != MaxSector) {
3612 struct md_rdev *rdev;
3613 int olddisks = mddev->raid_disks - mddev->delta_disks;
3616 rdev_for_each(rdev, mddev) {
3618 rdev->data_offset < rdev->new_data_offset)
3621 rdev->data_offset > rdev->new_data_offset)
3625 mddev->delta_disks = n - olddisks;
3626 mddev->raid_disks = n;
3627 mddev->reshape_backwards = (mddev->delta_disks < 0);
3629 mddev->raid_disks = n;
3631 mddev_unlock(mddev);
3632 return err ? err : len;
3634 static struct md_sysfs_entry md_raid_disks =
3635 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
3638 chunk_size_show(struct mddev *mddev, char *page)
3640 if (mddev->reshape_position != MaxSector &&
3641 mddev->chunk_sectors != mddev->new_chunk_sectors)
3642 return sprintf(page, "%d (%d)\n",
3643 mddev->new_chunk_sectors << 9,
3644 mddev->chunk_sectors << 9);
3645 return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
3649 chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
3654 err = kstrtoul(buf, 10, &n);
3658 err = mddev_lock(mddev);
3662 if (mddev->pers->check_reshape == NULL)
3667 mddev->new_chunk_sectors = n >> 9;
3668 err = mddev->pers->check_reshape(mddev);
3670 mddev->new_chunk_sectors = mddev->chunk_sectors;
3673 mddev->new_chunk_sectors = n >> 9;
3674 if (mddev->reshape_position == MaxSector)
3675 mddev->chunk_sectors = n >> 9;
3677 mddev_unlock(mddev);
3680 static struct md_sysfs_entry md_chunk_size =
3681 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
3684 resync_start_show(struct mddev *mddev, char *page)
3686 if (mddev->recovery_cp == MaxSector)
3687 return sprintf(page, "none\n");
3688 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
3692 resync_start_store(struct mddev *mddev, const char *buf, size_t len)
3694 unsigned long long n;
3697 if (cmd_match(buf, "none"))
3700 err = kstrtoull(buf, 10, &n);
3703 if (n != (sector_t)n)
3707 err = mddev_lock(mddev);
3710 if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
3714 mddev->recovery_cp = n;
3716 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
3718 mddev_unlock(mddev);
3721 static struct md_sysfs_entry md_resync_start =
3722 __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
3723 resync_start_show, resync_start_store);
3726 * The array state can be:
3729 * No devices, no size, no level
3730 * Equivalent to STOP_ARRAY ioctl
3732 * May have some settings, but array is not active
3733 * all IO results in error
3734 * When written, doesn't tear down array, but just stops it
3735 * suspended (not supported yet)
3736 * All IO requests will block. The array can be reconfigured.
3737 * Writing this, if accepted, will block until array is quiescent
3739 * no resync can happen. no superblocks get written.
3740 * write requests fail
3742 * like readonly, but behaves like 'clean' on a write request.
3744 * clean - no pending writes, but otherwise active.
3745 * When written to inactive array, starts without resync
3746 * If a write request arrives then
3747 * if metadata is known, mark 'dirty' and switch to 'active'.
3748 * if not known, block and switch to write-pending
3749 * If written to an active array that has pending writes, then fails.
3751 * fully active: IO and resync can be happening.
3752 * When written to inactive array, starts with resync
3755 * clean, but writes are blocked waiting for 'active' to be written.
3758 * like active, but no writes have been seen for a while (100msec).
3761 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
3762 write_pending, active_idle, bad_word};
3763 static char *array_states[] = {
3764 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
3765 "write-pending", "active-idle", NULL };
3767 static int match_word(const char *word, char **list)
3770 for (n=0; list[n]; n++)
3771 if (cmd_match(word, list[n]))
3777 array_state_show(struct mddev *mddev, char *page)
3779 enum array_state st = inactive;
3792 else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
3794 else if (mddev->safemode)
3800 if (list_empty(&mddev->disks) &&
3801 mddev->raid_disks == 0 &&
3802 mddev->dev_sectors == 0)
3807 return sprintf(page, "%s\n", array_states[st]);
3810 static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
3811 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
3812 static int do_md_run(struct mddev *mddev);
3813 static int restart_array(struct mddev *mddev);
3816 array_state_store(struct mddev *mddev, const char *buf, size_t len)
3819 enum array_state st = match_word(buf, array_states);
3821 if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) {
3822 /* don't take reconfig_mutex when toggling between
3825 spin_lock(&mddev->lock);
3827 restart_array(mddev);
3828 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
3829 wake_up(&mddev->sb_wait);
3831 } else /* st == clean */ {
3832 restart_array(mddev);
3833 if (atomic_read(&mddev->writes_pending) == 0) {
3834 if (mddev->in_sync == 0) {
3836 if (mddev->safemode == 1)
3837 mddev->safemode = 0;
3838 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
3844 spin_unlock(&mddev->lock);
3847 err = mddev_lock(mddev);
3855 /* stopping an active array */
3856 err = do_md_stop(mddev, 0, NULL);
3859 /* stopping an active array */
3861 err = do_md_stop(mddev, 2, NULL);
3863 err = 0; /* already inactive */
3866 break; /* not supported yet */
3869 err = md_set_readonly(mddev, NULL);
3872 set_disk_ro(mddev->gendisk, 1);
3873 err = do_md_run(mddev);
3879 err = md_set_readonly(mddev, NULL);
3880 else if (mddev->ro == 1)
3881 err = restart_array(mddev);
3884 set_disk_ro(mddev->gendisk, 0);
3888 err = do_md_run(mddev);
3893 restart_array(mddev);
3894 spin_lock(&mddev->lock);
3895 if (atomic_read(&mddev->writes_pending) == 0) {
3896 if (mddev->in_sync == 0) {
3898 if (mddev->safemode == 1)
3899 mddev->safemode = 0;
3900 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
3905 spin_unlock(&mddev->lock);
3911 restart_array(mddev);
3912 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
3913 wake_up(&mddev->sb_wait);
3917 set_disk_ro(mddev->gendisk, 0);
3918 err = do_md_run(mddev);
3923 /* these cannot be set */
3928 if (mddev->hold_active == UNTIL_IOCTL)
3929 mddev->hold_active = 0;
3930 sysfs_notify_dirent_safe(mddev->sysfs_state);
3932 mddev_unlock(mddev);
3935 static struct md_sysfs_entry md_array_state =
3936 __ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
3939 max_corrected_read_errors_show(struct mddev *mddev, char *page) {
3940 return sprintf(page, "%d\n",
3941 atomic_read(&mddev->max_corr_read_errors));
3945 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
3950 rv = kstrtouint(buf, 10, &n);
3953 atomic_set(&mddev->max_corr_read_errors, n);
3957 static struct md_sysfs_entry max_corr_read_errors =
3958 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
3959 max_corrected_read_errors_store);
3962 null_show(struct mddev *mddev, char *page)
3968 new_dev_store(struct mddev *mddev, const char *buf, size_t len)
3970 /* buf must be %d:%d\n? giving major and minor numbers */
3971 /* The new device is added to the array.
3972 * If the array has a persistent superblock, we read the
3973 * superblock to initialise info and check validity.
3974 * Otherwise, only checking done is that in bind_rdev_to_array,
3975 * which mainly checks size.
3978 int major = simple_strtoul(buf, &e, 10);
3981 struct md_rdev *rdev;
3984 if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
3986 minor = simple_strtoul(e+1, &e, 10);
3987 if (*e && *e != '\n')
3989 dev = MKDEV(major, minor);
3990 if (major != MAJOR(dev) ||
3991 minor != MINOR(dev))
3994 flush_workqueue(md_misc_wq);
3996 err = mddev_lock(mddev);
3999 if (mddev->persistent) {
4000 rdev = md_import_device(dev, mddev->major_version,
4001 mddev->minor_version);
4002 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
4003 struct md_rdev *rdev0
4004 = list_entry(mddev->disks.next,
4005 struct md_rdev, same_set);
4006 err = super_types[mddev->major_version]
4007 .load_super(rdev, rdev0, mddev->minor_version);
4011 } else if (mddev->external)
4012 rdev = md_import_device(dev, -2, -1);
4014 rdev = md_import_device(dev, -1, -1);
4017 mddev_unlock(mddev);
4018 return PTR_ERR(rdev);
4020 err = bind_rdev_to_array(rdev, mddev);
4024 mddev_unlock(mddev);
4025 return err ? err : len;
4028 static struct md_sysfs_entry md_new_device =
4029 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
4032 bitmap_store(struct mddev *mddev, const char *buf, size_t len)
4035 unsigned long chunk, end_chunk;
4038 err = mddev_lock(mddev);
4043 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
4045 chunk = end_chunk = simple_strtoul(buf, &end, 0);
4046 if (buf == end) break;
4047 if (*end == '-') { /* range */
4049 end_chunk = simple_strtoul(buf, &end, 0);
4050 if (buf == end) break;
4052 if (*end && !isspace(*end)) break;
4053 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
4054 buf = skip_spaces(end);
4056 bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
4058 mddev_unlock(mddev);
4062 static struct md_sysfs_entry md_bitmap =
4063 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
4066 size_show(struct mddev *mddev, char *page)
4068 return sprintf(page, "%llu\n",
4069 (unsigned long long)mddev->dev_sectors / 2);
4072 static int update_size(struct mddev *mddev, sector_t num_sectors);
4075 size_store(struct mddev *mddev, const char *buf, size_t len)
4077 /* If array is inactive, we can reduce the component size, but
4078 * not increase it (except from 0).
4079 * If array is active, we can try an on-line resize
4082 int err = strict_blocks_to_sectors(buf, §ors);
4086 err = mddev_lock(mddev);
4090 if (mddev_is_clustered(mddev))
4091 md_cluster_ops->metadata_update_start(mddev);
4092 err = update_size(mddev, sectors);
4093 md_update_sb(mddev, 1);
4094 if (mddev_is_clustered(mddev))
4095 md_cluster_ops->metadata_update_finish(mddev);
4097 if (mddev->dev_sectors == 0 ||
4098 mddev->dev_sectors > sectors)
4099 mddev->dev_sectors = sectors;
4103 mddev_unlock(mddev);
4104 return err ? err : len;
4107 static struct md_sysfs_entry md_size =
4108 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
4110 /* Metadata version.
4112 * 'none' for arrays with no metadata (good luck...)
4113 * 'external' for arrays with externally managed metadata,
4114 * or N.M for internally known formats
4117 metadata_show(struct mddev *mddev, char *page)
4119 if (mddev->persistent)
4120 return sprintf(page, "%d.%d\n",
4121 mddev->major_version, mddev->minor_version);
4122 else if (mddev->external)
4123 return sprintf(page, "external:%s\n", mddev->metadata_type);
4125 return sprintf(page, "none\n");
4129 metadata_store(struct mddev *mddev, const char *buf, size_t len)
4134 /* Changing the details of 'external' metadata is
4135 * always permitted. Otherwise there must be
4136 * no devices attached to the array.
4139 err = mddev_lock(mddev);
4143 if (mddev->external && strncmp(buf, "external:", 9) == 0)
4145 else if (!list_empty(&mddev->disks))
4149 if (cmd_match(buf, "none")) {
4150 mddev->persistent = 0;
4151 mddev->external = 0;
4152 mddev->major_version = 0;
4153 mddev->minor_version = 90;
4156 if (strncmp(buf, "external:", 9) == 0) {
4157 size_t namelen = len-9;
4158 if (namelen >= sizeof(mddev->metadata_type))
4159 namelen = sizeof(mddev->metadata_type)-1;
4160 strncpy(mddev->metadata_type, buf+9, namelen);
4161 mddev->metadata_type[namelen] = 0;
4162 if (namelen && mddev->metadata_type[namelen-1] == '\n')
4163 mddev->metadata_type[--namelen] = 0;
4164 mddev->persistent = 0;
4165 mddev->external = 1;
4166 mddev->major_version = 0;
4167 mddev->minor_version = 90;
4170 major = simple_strtoul(buf, &e, 10);
4172 if (e==buf || *e != '.')
4175 minor = simple_strtoul(buf, &e, 10);
4176 if (e==buf || (*e && *e != '\n') )
4179 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
4181 mddev->major_version = major;
4182 mddev->minor_version = minor;
4183 mddev->persistent = 1;
4184 mddev->external = 0;
4187 mddev_unlock(mddev);
4191 static struct md_sysfs_entry md_metadata =
4192 __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
4195 action_show(struct mddev *mddev, char *page)
4197 char *type = "idle";
4198 unsigned long recovery = mddev->recovery;
4199 if (test_bit(MD_RECOVERY_FROZEN, &recovery))
4201 else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
4202 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
4203 if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
4205 else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
4206 if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
4208 else if (test_bit(MD_RECOVERY_CHECK, &recovery))
4212 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
4214 else if (mddev->reshape_position != MaxSector)
4217 return sprintf(page, "%s\n", type);
4221 action_store(struct mddev *mddev, const char *page, size_t len)
4223 if (!mddev->pers || !mddev->pers->sync_request)
4227 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
4228 if (cmd_match(page, "frozen"))
4229 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4231 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4232 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
4233 mddev_lock(mddev) == 0) {
4234 flush_workqueue(md_misc_wq);
4235 if (mddev->sync_thread) {
4236 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4237 md_reap_sync_thread(mddev);
4239 mddev_unlock(mddev);
4241 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
4242 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
4244 else if (cmd_match(page, "resync"))
4245 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4246 else if (cmd_match(page, "recover")) {
4247 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4248 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
4249 } else if (cmd_match(page, "reshape")) {
4251 if (mddev->pers->start_reshape == NULL)
4253 err = mddev_lock(mddev);
4255 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4256 err = mddev->pers->start_reshape(mddev);
4257 mddev_unlock(mddev);
4261 sysfs_notify(&mddev->kobj, NULL, "degraded");
4263 if (cmd_match(page, "check"))
4264 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4265 else if (!cmd_match(page, "repair"))
4267 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4268 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
4269 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4271 if (mddev->ro == 2) {
4272 /* A write to sync_action is enough to justify
4273 * canceling read-auto mode
4276 md_wakeup_thread(mddev->sync_thread);
4278 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4279 md_wakeup_thread(mddev->thread);
4280 sysfs_notify_dirent_safe(mddev->sysfs_action);
4284 static struct md_sysfs_entry md_scan_mode =
4285 __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
4288 last_sync_action_show(struct mddev *mddev, char *page)
4290 return sprintf(page, "%s\n", mddev->last_sync_action);
4293 static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
4296 mismatch_cnt_show(struct mddev *mddev, char *page)
4298 return sprintf(page, "%llu\n",
4299 (unsigned long long)
4300 atomic64_read(&mddev->resync_mismatches));
4303 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
4306 sync_min_show(struct mddev *mddev, char *page)
4308 return sprintf(page, "%d (%s)\n", speed_min(mddev),
4309 mddev->sync_speed_min ? "local": "system");
4313 sync_min_store(struct mddev *mddev, const char *buf, size_t len)
4318 if (strncmp(buf, "system", 6)==0) {
4321 rv = kstrtouint(buf, 10, &min);
4327 mddev->sync_speed_min = min;
4331 static struct md_sysfs_entry md_sync_min =
4332 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
4335 sync_max_show(struct mddev *mddev, char *page)
4337 return sprintf(page, "%d (%s)\n", speed_max(mddev),
4338 mddev->sync_speed_max ? "local": "system");
4342 sync_max_store(struct mddev *mddev, const char *buf, size_t len)
4347 if (strncmp(buf, "system", 6)==0) {
4350 rv = kstrtouint(buf, 10, &max);
4356 mddev->sync_speed_max = max;
4360 static struct md_sysfs_entry md_sync_max =
4361 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
4364 degraded_show(struct mddev *mddev, char *page)
4366 return sprintf(page, "%d\n", mddev->degraded);
4368 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
4371 sync_force_parallel_show(struct mddev *mddev, char *page)
4373 return sprintf(page, "%d\n", mddev->parallel_resync);
4377 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
4381 if (kstrtol(buf, 10, &n))
4384 if (n != 0 && n != 1)
4387 mddev->parallel_resync = n;
4389 if (mddev->sync_thread)
4390 wake_up(&resync_wait);
4395 /* force parallel resync, even with shared block devices */
4396 static struct md_sysfs_entry md_sync_force_parallel =
4397 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
4398 sync_force_parallel_show, sync_force_parallel_store);
4401 sync_speed_show(struct mddev *mddev, char *page)
4403 unsigned long resync, dt, db;
4404 if (mddev->curr_resync == 0)
4405 return sprintf(page, "none\n");
4406 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
4407 dt = (jiffies - mddev->resync_mark) / HZ;
4409 db = resync - mddev->resync_mark_cnt;
4410 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
4413 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
4416 sync_completed_show(struct mddev *mddev, char *page)
4418 unsigned long long max_sectors, resync;
4420 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4421 return sprintf(page, "none\n");
4423 if (mddev->curr_resync == 1 ||
4424 mddev->curr_resync == 2)
4425 return sprintf(page, "delayed\n");
4427 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
4428 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4429 max_sectors = mddev->resync_max_sectors;
4431 max_sectors = mddev->dev_sectors;
4433 resync = mddev->curr_resync_completed;
4434 return sprintf(page, "%llu / %llu\n", resync, max_sectors);
4437 static struct md_sysfs_entry md_sync_completed =
4438 __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
4441 min_sync_show(struct mddev *mddev, char *page)
4443 return sprintf(page, "%llu\n",
4444 (unsigned long long)mddev->resync_min);
4447 min_sync_store(struct mddev *mddev, const char *buf, size_t len)
4449 unsigned long long min;
4452 if (kstrtoull(buf, 10, &min))
4455 spin_lock(&mddev->lock);
4457 if (min > mddev->resync_max)
4461 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4464 /* Round down to multiple of 4K for safety */
4465 mddev->resync_min = round_down(min, 8);
4469 spin_unlock(&mddev->lock);
4473 static struct md_sysfs_entry md_min_sync =
4474 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
4477 max_sync_show(struct mddev *mddev, char *page)
4479 if (mddev->resync_max == MaxSector)
4480 return sprintf(page, "max\n");
4482 return sprintf(page, "%llu\n",
4483 (unsigned long long)mddev->resync_max);
4486 max_sync_store(struct mddev *mddev, const char *buf, size_t len)
4489 spin_lock(&mddev->lock);
4490 if (strncmp(buf, "max", 3) == 0)
4491 mddev->resync_max = MaxSector;
4493 unsigned long long max;
4497 if (kstrtoull(buf, 10, &max))
4499 if (max < mddev->resync_min)
4503 if (max < mddev->resync_max &&
4505 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4508 /* Must be a multiple of chunk_size */
4509 chunk = mddev->chunk_sectors;
4511 sector_t temp = max;
4514 if (sector_div(temp, chunk))
4517 mddev->resync_max = max;
4519 wake_up(&mddev->recovery_wait);
4522 spin_unlock(&mddev->lock);
4526 static struct md_sysfs_entry md_max_sync =
4527 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
4530 suspend_lo_show(struct mddev *mddev, char *page)
4532 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
4536 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
4538 unsigned long long old, new;
4541 err = kstrtoull(buf, 10, &new);
4544 if (new != (sector_t)new)
4547 err = mddev_lock(mddev);
4551 if (mddev->pers == NULL ||
4552 mddev->pers->quiesce == NULL)
4554 old = mddev->suspend_lo;
4555 mddev->suspend_lo = new;
4557 /* Shrinking suspended region */
4558 mddev->pers->quiesce(mddev, 2);
4560 /* Expanding suspended region - need to wait */
4561 mddev->pers->quiesce(mddev, 1);
4562 mddev->pers->quiesce(mddev, 0);
4566 mddev_unlock(mddev);
4569 static struct md_sysfs_entry md_suspend_lo =
4570 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
4573 suspend_hi_show(struct mddev *mddev, char *page)
4575 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
4579 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
4581 unsigned long long old, new;
4584 err = kstrtoull(buf, 10, &new);
4587 if (new != (sector_t)new)
4590 err = mddev_lock(mddev);
4594 if (mddev->pers == NULL ||
4595 mddev->pers->quiesce == NULL)
4597 old = mddev->suspend_hi;
4598 mddev->suspend_hi = new;
4600 /* Shrinking suspended region */
4601 mddev->pers->quiesce(mddev, 2);
4603 /* Expanding suspended region - need to wait */
4604 mddev->pers->quiesce(mddev, 1);
4605 mddev->pers->quiesce(mddev, 0);
4609 mddev_unlock(mddev);
4612 static struct md_sysfs_entry md_suspend_hi =
4613 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
4616 reshape_position_show(struct mddev *mddev, char *page)
4618 if (mddev->reshape_position != MaxSector)
4619 return sprintf(page, "%llu\n",
4620 (unsigned long long)mddev->reshape_position);
4621 strcpy(page, "none\n");
4626 reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
4628 struct md_rdev *rdev;
4629 unsigned long long new;
4632 err = kstrtoull(buf, 10, &new);
4635 if (new != (sector_t)new)
4637 err = mddev_lock(mddev);
4643 mddev->reshape_position = new;
4644 mddev->delta_disks = 0;
4645 mddev->reshape_backwards = 0;
4646 mddev->new_level = mddev->level;
4647 mddev->new_layout = mddev->layout;
4648 mddev->new_chunk_sectors = mddev->chunk_sectors;
4649 rdev_for_each(rdev, mddev)
4650 rdev->new_data_offset = rdev->data_offset;
4653 mddev_unlock(mddev);
4657 static struct md_sysfs_entry md_reshape_position =
4658 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
4659 reshape_position_store);
4662 reshape_direction_show(struct mddev *mddev, char *page)
4664 return sprintf(page, "%s\n",
4665 mddev->reshape_backwards ? "backwards" : "forwards");
4669 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
4674 if (cmd_match(buf, "forwards"))
4676 else if (cmd_match(buf, "backwards"))
4680 if (mddev->reshape_backwards == backwards)
4683 err = mddev_lock(mddev);
4686 /* check if we are allowed to change */
4687 if (mddev->delta_disks)
4689 else if (mddev->persistent &&
4690 mddev->major_version == 0)
4693 mddev->reshape_backwards = backwards;
4694 mddev_unlock(mddev);
4698 static struct md_sysfs_entry md_reshape_direction =
4699 __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
4700 reshape_direction_store);
4703 array_size_show(struct mddev *mddev, char *page)
4705 if (mddev->external_size)
4706 return sprintf(page, "%llu\n",
4707 (unsigned long long)mddev->array_sectors/2);
4709 return sprintf(page, "default\n");
4713 array_size_store(struct mddev *mddev, const char *buf, size_t len)
4718 err = mddev_lock(mddev);
4722 if (strncmp(buf, "default", 7) == 0) {
4724 sectors = mddev->pers->size(mddev, 0, 0);
4726 sectors = mddev->array_sectors;
4728 mddev->external_size = 0;
4730 if (strict_blocks_to_sectors(buf, §ors) < 0)
4732 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
4735 mddev->external_size = 1;
4739 mddev->array_sectors = sectors;
4741 set_capacity(mddev->gendisk, mddev->array_sectors);
4742 revalidate_disk(mddev->gendisk);
4745 mddev_unlock(mddev);
4749 static struct md_sysfs_entry md_array_size =
4750 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
4753 static struct attribute *md_default_attrs[] = {
4756 &md_raid_disks.attr,
4757 &md_chunk_size.attr,
4759 &md_resync_start.attr,
4761 &md_new_device.attr,
4762 &md_safe_delay.attr,
4763 &md_array_state.attr,
4764 &md_reshape_position.attr,
4765 &md_reshape_direction.attr,
4766 &md_array_size.attr,
4767 &max_corr_read_errors.attr,
4771 static struct attribute *md_redundancy_attrs[] = {
4773 &md_last_scan_mode.attr,
4774 &md_mismatches.attr,
4777 &md_sync_speed.attr,
4778 &md_sync_force_parallel.attr,
4779 &md_sync_completed.attr,
4782 &md_suspend_lo.attr,
4783 &md_suspend_hi.attr,
4788 static struct attribute_group md_redundancy_group = {
4790 .attrs = md_redundancy_attrs,
4794 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
4796 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4797 struct mddev *mddev = container_of(kobj, struct mddev, kobj);
4802 spin_lock(&all_mddevs_lock);
4803 if (list_empty(&mddev->all_mddevs)) {
4804 spin_unlock(&all_mddevs_lock);
4808 spin_unlock(&all_mddevs_lock);
4810 rv = entry->show(mddev, page);
4816 md_attr_store(struct kobject *kobj, struct attribute *attr,
4817 const char *page, size_t length)
4819 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4820 struct mddev *mddev = container_of(kobj, struct mddev, kobj);
4825 if (!capable(CAP_SYS_ADMIN))
4827 spin_lock(&all_mddevs_lock);
4828 if (list_empty(&mddev->all_mddevs)) {
4829 spin_unlock(&all_mddevs_lock);
4833 spin_unlock(&all_mddevs_lock);
4834 rv = entry->store(mddev, page, length);
4839 static void md_free(struct kobject *ko)
4841 struct mddev *mddev = container_of(ko, struct mddev, kobj);
4843 if (mddev->sysfs_state)
4844 sysfs_put(mddev->sysfs_state);
4847 blk_cleanup_queue(mddev->queue);
4848 if (mddev->gendisk) {
4849 del_gendisk(mddev->gendisk);
4850 put_disk(mddev->gendisk);
4856 static const struct sysfs_ops md_sysfs_ops = {
4857 .show = md_attr_show,
4858 .store = md_attr_store,
4860 static struct kobj_type md_ktype = {
4862 .sysfs_ops = &md_sysfs_ops,
4863 .default_attrs = md_default_attrs,
4868 static void mddev_delayed_delete(struct work_struct *ws)
4870 struct mddev *mddev = container_of(ws, struct mddev, del_work);
4872 sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
4873 kobject_del(&mddev->kobj);
4874 kobject_put(&mddev->kobj);
4877 static int md_alloc(dev_t dev, char *name)
4879 static DEFINE_MUTEX(disks_mutex);
4880 struct mddev *mddev = mddev_find(dev);
4881 struct gendisk *disk;
4890 partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
4891 shift = partitioned ? MdpMinorShift : 0;
4892 unit = MINOR(mddev->unit) >> shift;
4894 /* wait for any previous instance of this device to be
4895 * completely removed (mddev_delayed_delete).
4897 flush_workqueue(md_misc_wq);
4899 mutex_lock(&disks_mutex);
4905 /* Need to ensure that 'name' is not a duplicate.
4907 struct mddev *mddev2;
4908 spin_lock(&all_mddevs_lock);
4910 list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
4911 if (mddev2->gendisk &&
4912 strcmp(mddev2->gendisk->disk_name, name) == 0) {
4913 spin_unlock(&all_mddevs_lock);
4916 spin_unlock(&all_mddevs_lock);
4920 mddev->queue = blk_alloc_queue(GFP_KERNEL);
4923 mddev->queue->queuedata = mddev;
4925 blk_queue_make_request(mddev->queue, md_make_request);
4926 blk_set_stacking_limits(&mddev->queue->limits);
4928 disk = alloc_disk(1 << shift);
4930 blk_cleanup_queue(mddev->queue);
4931 mddev->queue = NULL;
4934 disk->major = MAJOR(mddev->unit);
4935 disk->first_minor = unit << shift;
4937 strcpy(disk->disk_name, name);
4938 else if (partitioned)
4939 sprintf(disk->disk_name, "md_d%d", unit);
4941 sprintf(disk->disk_name, "md%d", unit);
4942 disk->fops = &md_fops;
4943 disk->private_data = mddev;
4944 disk->queue = mddev->queue;
4945 blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA);
4946 /* Allow extended partitions. This makes the
4947 * 'mdp' device redundant, but we can't really
4950 disk->flags |= GENHD_FL_EXT_DEVT;
4951 mddev->gendisk = disk;
4952 /* As soon as we call add_disk(), another thread could get
4953 * through to md_open, so make sure it doesn't get too far
4955 mutex_lock(&mddev->open_mutex);
4958 error = kobject_init_and_add(&mddev->kobj, &md_ktype,
4959 &disk_to_dev(disk)->kobj, "%s", "md");
4961 /* This isn't possible, but as kobject_init_and_add is marked
4962 * __must_check, we must do something with the result
4964 printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
4968 if (mddev->kobj.sd &&
4969 sysfs_create_group(&mddev->kobj, &md_bitmap_group))
4970 printk(KERN_DEBUG "pointless warning\n");
4971 mutex_unlock(&mddev->open_mutex);
4973 mutex_unlock(&disks_mutex);
4974 if (!error && mddev->kobj.sd) {
4975 kobject_uevent(&mddev->kobj, KOBJ_ADD);
4976 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
4982 static struct kobject *md_probe(dev_t dev, int *part, void *data)
4984 md_alloc(dev, NULL);
4988 static int add_named_array(const char *val, struct kernel_param *kp)
4990 /* val must be "md_*" where * is not all digits.
4991 * We allocate an array with a large free minor number, and
4992 * set the name to val. val must not already be an active name.
4994 int len = strlen(val);
4995 char buf[DISK_NAME_LEN];
4997 while (len && val[len-1] == '\n')
4999 if (len >= DISK_NAME_LEN)
5001 strlcpy(buf, val, len+1);
5002 if (strncmp(buf, "md_", 3) != 0)
5004 return md_alloc(0, buf);
5007 static void md_safemode_timeout(unsigned long data)
5009 struct mddev *mddev = (struct mddev *) data;
5011 if (!atomic_read(&mddev->writes_pending)) {
5012 mddev->safemode = 1;
5013 if (mddev->external)
5014 sysfs_notify_dirent_safe(mddev->sysfs_state);
5016 md_wakeup_thread(mddev->thread);
5019 static int start_dirty_degraded;
5021 int md_run(struct mddev *mddev)
5024 struct md_rdev *rdev;
5025 struct md_personality *pers;
5027 if (list_empty(&mddev->disks))
5028 /* cannot run an array with no devices.. */
5033 /* Cannot run until previous stop completes properly */
5034 if (mddev->sysfs_active)
5038 * Analyze all RAID superblock(s)
5040 if (!mddev->raid_disks) {
5041 if (!mddev->persistent)
5046 if (mddev->level != LEVEL_NONE)
5047 request_module("md-level-%d", mddev->level);
5048 else if (mddev->clevel[0])
5049 request_module("md-%s", mddev->clevel);
5052 * Drop all container device buffers, from now on
5053 * the only valid external interface is through the md
5056 rdev_for_each(rdev, mddev) {
5057 if (test_bit(Faulty, &rdev->flags))
5059 sync_blockdev(rdev->bdev);
5060 invalidate_bdev(rdev->bdev);
5062 /* perform some consistency tests on the device.
5063 * We don't want the data to overlap the metadata,
5064 * Internal Bitmap issues have been handled elsewhere.
5066 if (rdev->meta_bdev) {
5067 /* Nothing to check */;
5068 } else if (rdev->data_offset < rdev->sb_start) {
5069 if (mddev->dev_sectors &&
5070 rdev->data_offset + mddev->dev_sectors
5072 printk("md: %s: data overlaps metadata\n",
5077 if (rdev->sb_start + rdev->sb_size/512
5078 > rdev->data_offset) {
5079 printk("md: %s: metadata overlaps data\n",
5084 sysfs_notify_dirent_safe(rdev->sysfs_state);
5087 if (mddev->bio_set == NULL)
5088 mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0);
5090 spin_lock(&pers_lock);
5091 pers = find_pers(mddev->level, mddev->clevel);
5092 if (!pers || !try_module_get(pers->owner)) {
5093 spin_unlock(&pers_lock);
5094 if (mddev->level != LEVEL_NONE)
5095 printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
5098 printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
5102 spin_unlock(&pers_lock);
5103 if (mddev->level != pers->level) {
5104 mddev->level = pers->level;
5105 mddev->new_level = pers->level;
5107 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
5109 if (mddev->reshape_position != MaxSector &&
5110 pers->start_reshape == NULL) {
5111 /* This personality cannot handle reshaping... */
5112 module_put(pers->owner);
5116 if (pers->sync_request) {
5117 /* Warn if this is a potentially silly
5120 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
5121 struct md_rdev *rdev2;
5124 rdev_for_each(rdev, mddev)
5125 rdev_for_each(rdev2, mddev) {
5127 rdev->bdev->bd_contains ==
5128 rdev2->bdev->bd_contains) {
5130 "%s: WARNING: %s appears to be"
5131 " on the same physical disk as"
5134 bdevname(rdev->bdev,b),
5135 bdevname(rdev2->bdev,b2));
5142 "True protection against single-disk"
5143 " failure might be compromised.\n");
5146 mddev->recovery = 0;
5147 /* may be over-ridden by personality */
5148 mddev->resync_max_sectors = mddev->dev_sectors;
5150 mddev->ok_start_degraded = start_dirty_degraded;
5152 if (start_readonly && mddev->ro == 0)
5153 mddev->ro = 2; /* read-only, but switch on first write */
5155 err = pers->run(mddev);
5157 printk(KERN_ERR "md: pers->run() failed ...\n");
5158 else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
5159 WARN_ONCE(!mddev->external_size, "%s: default size too small,"
5160 " but 'external_size' not in effect?\n", __func__);
5162 "md: invalid array_size %llu > default size %llu\n",
5163 (unsigned long long)mddev->array_sectors / 2,
5164 (unsigned long long)pers->size(mddev, 0, 0) / 2);
5167 if (err == 0 && pers->sync_request &&
5168 (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
5169 struct bitmap *bitmap;
5171 bitmap = bitmap_create(mddev, -1);
5172 if (IS_ERR(bitmap)) {
5173 err = PTR_ERR(bitmap);
5174 printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
5175 mdname(mddev), err);
5177 mddev->bitmap = bitmap;
5181 mddev_detach(mddev);
5183 pers->free(mddev, mddev->private);
5184 mddev->private = NULL;
5185 module_put(pers->owner);
5186 bitmap_destroy(mddev);
5190 mddev->queue->backing_dev_info.congested_data = mddev;
5191 mddev->queue->backing_dev_info.congested_fn = md_congested;
5192 blk_queue_merge_bvec(mddev->queue, md_mergeable_bvec);
5194 if (pers->sync_request) {
5195 if (mddev->kobj.sd &&
5196 sysfs_create_group(&mddev->kobj, &md_redundancy_group))
5198 "md: cannot register extra attributes for %s\n",
5200 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
5201 } else if (mddev->ro == 2) /* auto-readonly not meaningful */
5204 atomic_set(&mddev->writes_pending,0);
5205 atomic_set(&mddev->max_corr_read_errors,
5206 MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
5207 mddev->safemode = 0;
5208 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
5211 spin_lock(&mddev->lock);
5214 spin_unlock(&mddev->lock);
5215 rdev_for_each(rdev, mddev)
5216 if (rdev->raid_disk >= 0)
5217 if (sysfs_link_rdev(mddev, rdev))
5218 /* failure here is OK */;
5220 if (mddev->degraded && !mddev->ro)
5221 /* This ensures that recovering status is reported immediately
5222 * via sysfs - until a lack of spares is confirmed.
5224 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5225 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5227 if (mddev->flags & MD_UPDATE_SB_FLAGS)
5228 md_update_sb(mddev, 0);
5230 md_new_event(mddev);
5231 sysfs_notify_dirent_safe(mddev->sysfs_state);
5232 sysfs_notify_dirent_safe(mddev->sysfs_action);
5233 sysfs_notify(&mddev->kobj, NULL, "degraded");
5236 EXPORT_SYMBOL_GPL(md_run);
5238 static int do_md_run(struct mddev *mddev)
5242 err = md_run(mddev);
5245 err = bitmap_load(mddev);
5247 bitmap_destroy(mddev);
5251 md_wakeup_thread(mddev->thread);
5252 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
5254 set_capacity(mddev->gendisk, mddev->array_sectors);
5255 revalidate_disk(mddev->gendisk);
5257 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
5262 static int restart_array(struct mddev *mddev)
5264 struct gendisk *disk = mddev->gendisk;
5266 /* Complain if it has no devices */
5267 if (list_empty(&mddev->disks))
5273 mddev->safemode = 0;
5275 set_disk_ro(disk, 0);
5276 printk(KERN_INFO "md: %s switched to read-write mode.\n",
5278 /* Kick recovery or resync if necessary */
5279 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5280 md_wakeup_thread(mddev->thread);
5281 md_wakeup_thread(mddev->sync_thread);
5282 sysfs_notify_dirent_safe(mddev->sysfs_state);
5286 static void md_clean(struct mddev *mddev)
5288 mddev->array_sectors = 0;
5289 mddev->external_size = 0;
5290 mddev->dev_sectors = 0;
5291 mddev->raid_disks = 0;
5292 mddev->recovery_cp = 0;
5293 mddev->resync_min = 0;
5294 mddev->resync_max = MaxSector;
5295 mddev->reshape_position = MaxSector;
5296 mddev->external = 0;
5297 mddev->persistent = 0;
5298 mddev->level = LEVEL_NONE;
5299 mddev->clevel[0] = 0;
5302 mddev->metadata_type[0] = 0;
5303 mddev->chunk_sectors = 0;
5304 mddev->ctime = mddev->utime = 0;
5306 mddev->max_disks = 0;
5308 mddev->can_decrease_events = 0;
5309 mddev->delta_disks = 0;
5310 mddev->reshape_backwards = 0;
5311 mddev->new_level = LEVEL_NONE;
5312 mddev->new_layout = 0;
5313 mddev->new_chunk_sectors = 0;
5314 mddev->curr_resync = 0;
5315 atomic64_set(&mddev->resync_mismatches, 0);
5316 mddev->suspend_lo = mddev->suspend_hi = 0;
5317 mddev->sync_speed_min = mddev->sync_speed_max = 0;
5318 mddev->recovery = 0;
5321 mddev->degraded = 0;
5322 mddev->safemode = 0;
5323 mddev->private = NULL;
5324 mddev->merge_check_needed = 0;
5325 mddev->bitmap_info.offset = 0;
5326 mddev->bitmap_info.default_offset = 0;
5327 mddev->bitmap_info.default_space = 0;
5328 mddev->bitmap_info.chunksize = 0;
5329 mddev->bitmap_info.daemon_sleep = 0;
5330 mddev->bitmap_info.max_write_behind = 0;
5333 static void __md_stop_writes(struct mddev *mddev)
5335 if (mddev_is_clustered(mddev))
5336 md_cluster_ops->metadata_update_start(mddev);
5337 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5338 flush_workqueue(md_misc_wq);
5339 if (mddev->sync_thread) {
5340 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5341 md_reap_sync_thread(mddev);
5344 del_timer_sync(&mddev->safemode_timer);
5346 bitmap_flush(mddev);
5347 md_super_wait(mddev);
5349 if (mddev->ro == 0 &&
5350 (!mddev->in_sync || (mddev->flags & MD_UPDATE_SB_FLAGS))) {
5351 /* mark array as shutdown cleanly */
5353 md_update_sb(mddev, 1);
5355 if (mddev_is_clustered(mddev))
5356 md_cluster_ops->metadata_update_finish(mddev);
5359 void md_stop_writes(struct mddev *mddev)
5361 mddev_lock_nointr(mddev);
5362 __md_stop_writes(mddev);
5363 mddev_unlock(mddev);
5365 EXPORT_SYMBOL_GPL(md_stop_writes);
5367 static void mddev_detach(struct mddev *mddev)
5369 struct bitmap *bitmap = mddev->bitmap;
5370 /* wait for behind writes to complete */
5371 if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
5372 printk(KERN_INFO "md:%s: behind writes in progress - waiting to stop.\n",
5374 /* need to kick something here to make sure I/O goes? */
5375 wait_event(bitmap->behind_wait,
5376 atomic_read(&bitmap->behind_writes) == 0);
5378 if (mddev->pers && mddev->pers->quiesce) {
5379 mddev->pers->quiesce(mddev, 1);
5380 mddev->pers->quiesce(mddev, 0);
5382 md_unregister_thread(&mddev->thread);
5384 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
5387 static void __md_stop(struct mddev *mddev)
5389 struct md_personality *pers = mddev->pers;
5390 mddev_detach(mddev);
5391 /* Ensure ->event_work is done */
5392 flush_workqueue(md_misc_wq);
5393 spin_lock(&mddev->lock);
5396 spin_unlock(&mddev->lock);
5397 pers->free(mddev, mddev->private);
5398 mddev->private = NULL;
5399 if (pers->sync_request && mddev->to_remove == NULL)
5400 mddev->to_remove = &md_redundancy_group;
5401 module_put(pers->owner);
5402 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5405 void md_stop(struct mddev *mddev)
5407 /* stop the array and free an attached data structures.
5408 * This is called from dm-raid
5411 bitmap_destroy(mddev);
5413 bioset_free(mddev->bio_set);
5416 EXPORT_SYMBOL_GPL(md_stop);
5418 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
5423 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
5425 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5426 md_wakeup_thread(mddev->thread);
5428 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5429 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5430 if (mddev->sync_thread)
5431 /* Thread might be blocked waiting for metadata update
5432 * which will now never happen */
5433 wake_up_process(mddev->sync_thread->tsk);
5435 mddev_unlock(mddev);
5436 wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
5438 mddev_lock_nointr(mddev);
5440 mutex_lock(&mddev->open_mutex);
5441 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
5442 mddev->sync_thread ||
5443 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
5444 (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
5445 printk("md: %s still in use.\n",mdname(mddev));
5447 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5448 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5449 md_wakeup_thread(mddev->thread);
5455 __md_stop_writes(mddev);
5461 set_disk_ro(mddev->gendisk, 1);
5462 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5463 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5464 md_wakeup_thread(mddev->thread);
5465 sysfs_notify_dirent_safe(mddev->sysfs_state);
5469 mutex_unlock(&mddev->open_mutex);
5474 * 0 - completely stop and dis-assemble array
5475 * 2 - stop but do not disassemble array
5477 static int do_md_stop(struct mddev *mddev, int mode,
5478 struct block_device *bdev)
5480 struct gendisk *disk = mddev->gendisk;
5481 struct md_rdev *rdev;
5484 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
5486 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5487 md_wakeup_thread(mddev->thread);
5489 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5490 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5491 if (mddev->sync_thread)
5492 /* Thread might be blocked waiting for metadata update
5493 * which will now never happen */
5494 wake_up_process(mddev->sync_thread->tsk);
5496 mddev_unlock(mddev);
5497 wait_event(resync_wait, (mddev->sync_thread == NULL &&
5498 !test_bit(MD_RECOVERY_RUNNING,
5499 &mddev->recovery)));
5500 mddev_lock_nointr(mddev);
5502 mutex_lock(&mddev->open_mutex);
5503 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
5504 mddev->sysfs_active ||
5505 mddev->sync_thread ||
5506 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
5507 (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
5508 printk("md: %s still in use.\n",mdname(mddev));
5509 mutex_unlock(&mddev->open_mutex);
5511 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5512 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5513 md_wakeup_thread(mddev->thread);
5519 set_disk_ro(disk, 0);
5521 __md_stop_writes(mddev);
5523 mddev->queue->merge_bvec_fn = NULL;
5524 mddev->queue->backing_dev_info.congested_fn = NULL;
5526 /* tell userspace to handle 'inactive' */
5527 sysfs_notify_dirent_safe(mddev->sysfs_state);
5529 rdev_for_each(rdev, mddev)
5530 if (rdev->raid_disk >= 0)
5531 sysfs_unlink_rdev(mddev, rdev);
5533 set_capacity(disk, 0);
5534 mutex_unlock(&mddev->open_mutex);
5536 revalidate_disk(disk);
5541 mutex_unlock(&mddev->open_mutex);
5543 * Free resources if final stop
5546 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
5548 bitmap_destroy(mddev);
5549 if (mddev->bitmap_info.file) {
5550 struct file *f = mddev->bitmap_info.file;
5551 spin_lock(&mddev->lock);
5552 mddev->bitmap_info.file = NULL;
5553 spin_unlock(&mddev->lock);
5556 mddev->bitmap_info.offset = 0;
5558 export_array(mddev);
5561 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
5562 if (mddev->hold_active == UNTIL_STOP)
5563 mddev->hold_active = 0;
5565 blk_integrity_unregister(disk);
5566 md_new_event(mddev);
5567 sysfs_notify_dirent_safe(mddev->sysfs_state);
5572 static void autorun_array(struct mddev *mddev)
5574 struct md_rdev *rdev;
5577 if (list_empty(&mddev->disks))
5580 printk(KERN_INFO "md: running: ");
5582 rdev_for_each(rdev, mddev) {
5583 char b[BDEVNAME_SIZE];
5584 printk("<%s>", bdevname(rdev->bdev,b));
5588 err = do_md_run(mddev);
5590 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
5591 do_md_stop(mddev, 0, NULL);
5596 * lets try to run arrays based on all disks that have arrived
5597 * until now. (those are in pending_raid_disks)
5599 * the method: pick the first pending disk, collect all disks with
5600 * the same UUID, remove all from the pending list and put them into
5601 * the 'same_array' list. Then order this list based on superblock
5602 * update time (freshest comes first), kick out 'old' disks and
5603 * compare superblocks. If everything's fine then run it.
5605 * If "unit" is allocated, then bump its reference count
5607 static void autorun_devices(int part)
5609 struct md_rdev *rdev0, *rdev, *tmp;
5610 struct mddev *mddev;
5611 char b[BDEVNAME_SIZE];
5613 printk(KERN_INFO "md: autorun ...\n");
5614 while (!list_empty(&pending_raid_disks)) {
5617 LIST_HEAD(candidates);
5618 rdev0 = list_entry(pending_raid_disks.next,
5619 struct md_rdev, same_set);
5621 printk(KERN_INFO "md: considering %s ...\n",
5622 bdevname(rdev0->bdev,b));
5623 INIT_LIST_HEAD(&candidates);
5624 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
5625 if (super_90_load(rdev, rdev0, 0) >= 0) {
5626 printk(KERN_INFO "md: adding %s ...\n",
5627 bdevname(rdev->bdev,b));
5628 list_move(&rdev->same_set, &candidates);
5631 * now we have a set of devices, with all of them having
5632 * mostly sane superblocks. It's time to allocate the
5636 dev = MKDEV(mdp_major,
5637 rdev0->preferred_minor << MdpMinorShift);
5638 unit = MINOR(dev) >> MdpMinorShift;
5640 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
5643 if (rdev0->preferred_minor != unit) {
5644 printk(KERN_INFO "md: unit number in %s is bad: %d\n",
5645 bdevname(rdev0->bdev, b), rdev0->preferred_minor);
5649 md_probe(dev, NULL, NULL);
5650 mddev = mddev_find(dev);
5651 if (!mddev || !mddev->gendisk) {
5655 "md: cannot allocate memory for md drive.\n");
5658 if (mddev_lock(mddev))
5659 printk(KERN_WARNING "md: %s locked, cannot run\n",
5661 else if (mddev->raid_disks || mddev->major_version
5662 || !list_empty(&mddev->disks)) {
5664 "md: %s already running, cannot run %s\n",
5665 mdname(mddev), bdevname(rdev0->bdev,b));
5666 mddev_unlock(mddev);
5668 printk(KERN_INFO "md: created %s\n", mdname(mddev));
5669 mddev->persistent = 1;
5670 rdev_for_each_list(rdev, tmp, &candidates) {
5671 list_del_init(&rdev->same_set);
5672 if (bind_rdev_to_array(rdev, mddev))
5675 autorun_array(mddev);
5676 mddev_unlock(mddev);
5678 /* on success, candidates will be empty, on error
5681 rdev_for_each_list(rdev, tmp, &candidates) {
5682 list_del_init(&rdev->same_set);
5687 printk(KERN_INFO "md: ... autorun DONE.\n");
5689 #endif /* !MODULE */
5691 static int get_version(void __user *arg)
5695 ver.major = MD_MAJOR_VERSION;
5696 ver.minor = MD_MINOR_VERSION;
5697 ver.patchlevel = MD_PATCHLEVEL_VERSION;
5699 if (copy_to_user(arg, &ver, sizeof(ver)))
5705 static int get_array_info(struct mddev *mddev, void __user *arg)
5707 mdu_array_info_t info;
5708 int nr,working,insync,failed,spare;
5709 struct md_rdev *rdev;
5711 nr = working = insync = failed = spare = 0;
5713 rdev_for_each_rcu(rdev, mddev) {
5715 if (test_bit(Faulty, &rdev->flags))
5719 if (test_bit(In_sync, &rdev->flags))
5727 info.major_version = mddev->major_version;
5728 info.minor_version = mddev->minor_version;
5729 info.patch_version = MD_PATCHLEVEL_VERSION;
5730 info.ctime = mddev->ctime;
5731 info.level = mddev->level;
5732 info.size = mddev->dev_sectors / 2;
5733 if (info.size != mddev->dev_sectors / 2) /* overflow */
5736 info.raid_disks = mddev->raid_disks;
5737 info.md_minor = mddev->md_minor;
5738 info.not_persistent= !mddev->persistent;
5740 info.utime = mddev->utime;
5743 info.state = (1<<MD_SB_CLEAN);
5744 if (mddev->bitmap && mddev->bitmap_info.offset)
5745 info.state |= (1<<MD_SB_BITMAP_PRESENT);
5746 if (mddev_is_clustered(mddev))
5747 info.state |= (1<<MD_SB_CLUSTERED);
5748 info.active_disks = insync;
5749 info.working_disks = working;
5750 info.failed_disks = failed;
5751 info.spare_disks = spare;
5753 info.layout = mddev->layout;
5754 info.chunk_size = mddev->chunk_sectors << 9;
5756 if (copy_to_user(arg, &info, sizeof(info)))
5762 static int get_bitmap_file(struct mddev *mddev, void __user * arg)
5764 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
5768 file = kzalloc(sizeof(*file), GFP_NOIO);
5773 spin_lock(&mddev->lock);
5774 /* bitmap enabled */
5775 if (mddev->bitmap_info.file) {
5776 ptr = file_path(mddev->bitmap_info.file, file->pathname,
5777 sizeof(file->pathname));
5781 memmove(file->pathname, ptr,
5782 sizeof(file->pathname)-(ptr-file->pathname));
5784 spin_unlock(&mddev->lock);
5787 copy_to_user(arg, file, sizeof(*file)))
5794 static int get_disk_info(struct mddev *mddev, void __user * arg)
5796 mdu_disk_info_t info;
5797 struct md_rdev *rdev;
5799 if (copy_from_user(&info, arg, sizeof(info)))
5803 rdev = md_find_rdev_nr_rcu(mddev, info.number);
5805 info.major = MAJOR(rdev->bdev->bd_dev);
5806 info.minor = MINOR(rdev->bdev->bd_dev);
5807 info.raid_disk = rdev->raid_disk;
5809 if (test_bit(Faulty, &rdev->flags))
5810 info.state |= (1<<MD_DISK_FAULTY);
5811 else if (test_bit(In_sync, &rdev->flags)) {
5812 info.state |= (1<<MD_DISK_ACTIVE);
5813 info.state |= (1<<MD_DISK_SYNC);
5815 if (test_bit(WriteMostly, &rdev->flags))
5816 info.state |= (1<<MD_DISK_WRITEMOSTLY);
5818 info.major = info.minor = 0;
5819 info.raid_disk = -1;
5820 info.state = (1<<MD_DISK_REMOVED);
5824 if (copy_to_user(arg, &info, sizeof(info)))
5830 static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
5832 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
5833 struct md_rdev *rdev;
5834 dev_t dev = MKDEV(info->major,info->minor);
5836 if (mddev_is_clustered(mddev) &&
5837 !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
5838 pr_err("%s: Cannot add to clustered mddev.\n",
5843 if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
5846 if (!mddev->raid_disks) {
5848 /* expecting a device which has a superblock */
5849 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
5852 "md: md_import_device returned %ld\n",
5854 return PTR_ERR(rdev);
5856 if (!list_empty(&mddev->disks)) {
5857 struct md_rdev *rdev0
5858 = list_entry(mddev->disks.next,
5859 struct md_rdev, same_set);
5860 err = super_types[mddev->major_version]
5861 .load_super(rdev, rdev0, mddev->minor_version);
5864 "md: %s has different UUID to %s\n",
5865 bdevname(rdev->bdev,b),
5866 bdevname(rdev0->bdev,b2));
5871 err = bind_rdev_to_array(rdev, mddev);
5878 * add_new_disk can be used once the array is assembled
5879 * to add "hot spares". They must already have a superblock
5884 if (!mddev->pers->hot_add_disk) {
5886 "%s: personality does not support diskops!\n",
5890 if (mddev->persistent)
5891 rdev = md_import_device(dev, mddev->major_version,
5892 mddev->minor_version);
5894 rdev = md_import_device(dev, -1, -1);
5897 "md: md_import_device returned %ld\n",
5899 return PTR_ERR(rdev);
5901 /* set saved_raid_disk if appropriate */
5902 if (!mddev->persistent) {
5903 if (info->state & (1<<MD_DISK_SYNC) &&
5904 info->raid_disk < mddev->raid_disks) {
5905 rdev->raid_disk = info->raid_disk;
5906 set_bit(In_sync, &rdev->flags);
5907 clear_bit(Bitmap_sync, &rdev->flags);
5909 rdev->raid_disk = -1;
5910 rdev->saved_raid_disk = rdev->raid_disk;
5912 super_types[mddev->major_version].
5913 validate_super(mddev, rdev);
5914 if ((info->state & (1<<MD_DISK_SYNC)) &&
5915 rdev->raid_disk != info->raid_disk) {
5916 /* This was a hot-add request, but events doesn't
5917 * match, so reject it.
5923 clear_bit(In_sync, &rdev->flags); /* just to be sure */
5924 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
5925 set_bit(WriteMostly, &rdev->flags);
5927 clear_bit(WriteMostly, &rdev->flags);
5930 * check whether the device shows up in other nodes
5932 if (mddev_is_clustered(mddev)) {
5933 if (info->state & (1 << MD_DISK_CANDIDATE)) {
5934 /* Through --cluster-confirm */
5935 set_bit(Candidate, &rdev->flags);
5936 err = md_cluster_ops->new_disk_ack(mddev, true);
5941 } else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
5942 /* --add initiated by this node */
5943 err = md_cluster_ops->add_new_disk_start(mddev, rdev);
5945 md_cluster_ops->add_new_disk_finish(mddev);
5952 rdev->raid_disk = -1;
5953 err = bind_rdev_to_array(rdev, mddev);
5957 err = add_bound_rdev(rdev);
5958 if (mddev_is_clustered(mddev) &&
5959 (info->state & (1 << MD_DISK_CLUSTER_ADD)))
5960 md_cluster_ops->add_new_disk_finish(mddev);
5964 /* otherwise, add_new_disk is only allowed
5965 * for major_version==0 superblocks
5967 if (mddev->major_version != 0) {
5968 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
5973 if (!(info->state & (1<<MD_DISK_FAULTY))) {
5975 rdev = md_import_device(dev, -1, 0);
5978 "md: error, md_import_device() returned %ld\n",
5980 return PTR_ERR(rdev);
5982 rdev->desc_nr = info->number;
5983 if (info->raid_disk < mddev->raid_disks)
5984 rdev->raid_disk = info->raid_disk;
5986 rdev->raid_disk = -1;
5988 if (rdev->raid_disk < mddev->raid_disks)
5989 if (info->state & (1<<MD_DISK_SYNC))
5990 set_bit(In_sync, &rdev->flags);
5992 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
5993 set_bit(WriteMostly, &rdev->flags);
5995 if (!mddev->persistent) {
5996 printk(KERN_INFO "md: nonpersistent superblock ...\n");
5997 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
5999 rdev->sb_start = calc_dev_sboffset(rdev);
6000 rdev->sectors = rdev->sb_start;
6002 err = bind_rdev_to_array(rdev, mddev);
6012 static int hot_remove_disk(struct mddev *mddev, dev_t dev)
6014 char b[BDEVNAME_SIZE];
6015 struct md_rdev *rdev;
6017 rdev = find_rdev(mddev, dev);
6021 if (mddev_is_clustered(mddev))
6022 md_cluster_ops->metadata_update_start(mddev);
6024 clear_bit(Blocked, &rdev->flags);
6025 remove_and_add_spares(mddev, rdev);
6027 if (rdev->raid_disk >= 0)
6030 if (mddev_is_clustered(mddev))
6031 md_cluster_ops->remove_disk(mddev, rdev);
6033 md_kick_rdev_from_array(rdev);
6034 md_update_sb(mddev, 1);
6035 md_new_event(mddev);
6037 if (mddev_is_clustered(mddev))
6038 md_cluster_ops->metadata_update_finish(mddev);
6042 if (mddev_is_clustered(mddev))
6043 md_cluster_ops->metadata_update_cancel(mddev);
6044 printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
6045 bdevname(rdev->bdev,b), mdname(mddev));
6049 static int hot_add_disk(struct mddev *mddev, dev_t dev)
6051 char b[BDEVNAME_SIZE];
6053 struct md_rdev *rdev;
6058 if (mddev->major_version != 0) {
6059 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
6060 " version-0 superblocks.\n",
6064 if (!mddev->pers->hot_add_disk) {
6066 "%s: personality does not support diskops!\n",
6071 rdev = md_import_device(dev, -1, 0);
6074 "md: error, md_import_device() returned %ld\n",
6079 if (mddev->persistent)
6080 rdev->sb_start = calc_dev_sboffset(rdev);
6082 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
6084 rdev->sectors = rdev->sb_start;
6086 if (test_bit(Faulty, &rdev->flags)) {
6088 "md: can not hot-add faulty %s disk to %s!\n",
6089 bdevname(rdev->bdev,b), mdname(mddev));
6094 if (mddev_is_clustered(mddev))
6095 md_cluster_ops->metadata_update_start(mddev);
6096 clear_bit(In_sync, &rdev->flags);
6098 rdev->saved_raid_disk = -1;
6099 err = bind_rdev_to_array(rdev, mddev);
6101 goto abort_clustered;
6104 * The rest should better be atomic, we can have disk failures
6105 * noticed in interrupt contexts ...
6108 rdev->raid_disk = -1;
6110 md_update_sb(mddev, 1);
6112 if (mddev_is_clustered(mddev))
6113 md_cluster_ops->metadata_update_finish(mddev);
6115 * Kick recovery, maybe this spare has to be added to the
6116 * array immediately.
6118 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6119 md_wakeup_thread(mddev->thread);
6120 md_new_event(mddev);
6124 if (mddev_is_clustered(mddev))
6125 md_cluster_ops->metadata_update_cancel(mddev);
6131 static int set_bitmap_file(struct mddev *mddev, int fd)
6136 if (!mddev->pers->quiesce || !mddev->thread)
6138 if (mddev->recovery || mddev->sync_thread)
6140 /* we should be able to change the bitmap.. */
6144 struct inode *inode;
6147 if (mddev->bitmap || mddev->bitmap_info.file)
6148 return -EEXIST; /* cannot add when bitmap is present */
6152 printk(KERN_ERR "%s: error: failed to get bitmap file\n",
6157 inode = f->f_mapping->host;
6158 if (!S_ISREG(inode->i_mode)) {
6159 printk(KERN_ERR "%s: error: bitmap file must be a regular file\n",
6162 } else if (!(f->f_mode & FMODE_WRITE)) {
6163 printk(KERN_ERR "%s: error: bitmap file must open for write\n",
6166 } else if (atomic_read(&inode->i_writecount) != 1) {
6167 printk(KERN_ERR "%s: error: bitmap file is already in use\n",
6175 mddev->bitmap_info.file = f;
6176 mddev->bitmap_info.offset = 0; /* file overrides offset */
6177 } else if (mddev->bitmap == NULL)
6178 return -ENOENT; /* cannot remove what isn't there */
6181 mddev->pers->quiesce(mddev, 1);
6183 struct bitmap *bitmap;
6185 bitmap = bitmap_create(mddev, -1);
6186 if (!IS_ERR(bitmap)) {
6187 mddev->bitmap = bitmap;
6188 err = bitmap_load(mddev);
6190 err = PTR_ERR(bitmap);
6192 if (fd < 0 || err) {
6193 bitmap_destroy(mddev);
6194 fd = -1; /* make sure to put the file */
6196 mddev->pers->quiesce(mddev, 0);
6199 struct file *f = mddev->bitmap_info.file;
6201 spin_lock(&mddev->lock);
6202 mddev->bitmap_info.file = NULL;
6203 spin_unlock(&mddev->lock);
6212 * set_array_info is used two different ways
6213 * The original usage is when creating a new array.
6214 * In this usage, raid_disks is > 0 and it together with
6215 * level, size, not_persistent,layout,chunksize determine the
6216 * shape of the array.
6217 * This will always create an array with a type-0.90.0 superblock.
6218 * The newer usage is when assembling an array.
6219 * In this case raid_disks will be 0, and the major_version field is
6220 * use to determine which style super-blocks are to be found on the devices.
6221 * The minor and patch _version numbers are also kept incase the
6222 * super_block handler wishes to interpret them.
6224 static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
6227 if (info->raid_disks == 0) {
6228 /* just setting version number for superblock loading */
6229 if (info->major_version < 0 ||
6230 info->major_version >= ARRAY_SIZE(super_types) ||
6231 super_types[info->major_version].name == NULL) {
6232 /* maybe try to auto-load a module? */
6234 "md: superblock version %d not known\n",
6235 info->major_version);
6238 mddev->major_version = info->major_version;
6239 mddev->minor_version = info->minor_version;
6240 mddev->patch_version = info->patch_version;
6241 mddev->persistent = !info->not_persistent;
6242 /* ensure mddev_put doesn't delete this now that there
6243 * is some minimal configuration.
6245 mddev->ctime = get_seconds();
6248 mddev->major_version = MD_MAJOR_VERSION;
6249 mddev->minor_version = MD_MINOR_VERSION;
6250 mddev->patch_version = MD_PATCHLEVEL_VERSION;
6251 mddev->ctime = get_seconds();
6253 mddev->level = info->level;
6254 mddev->clevel[0] = 0;
6255 mddev->dev_sectors = 2 * (sector_t)info->size;
6256 mddev->raid_disks = info->raid_disks;
6257 /* don't set md_minor, it is determined by which /dev/md* was
6260 if (info->state & (1<<MD_SB_CLEAN))
6261 mddev->recovery_cp = MaxSector;
6263 mddev->recovery_cp = 0;
6264 mddev->persistent = ! info->not_persistent;
6265 mddev->external = 0;
6267 mddev->layout = info->layout;
6268 mddev->chunk_sectors = info->chunk_size >> 9;
6270 mddev->max_disks = MD_SB_DISKS;
6272 if (mddev->persistent)
6274 set_bit(MD_CHANGE_DEVS, &mddev->flags);
6276 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
6277 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
6278 mddev->bitmap_info.offset = 0;
6280 mddev->reshape_position = MaxSector;
6283 * Generate a 128 bit UUID
6285 get_random_bytes(mddev->uuid, 16);
6287 mddev->new_level = mddev->level;
6288 mddev->new_chunk_sectors = mddev->chunk_sectors;
6289 mddev->new_layout = mddev->layout;
6290 mddev->delta_disks = 0;
6291 mddev->reshape_backwards = 0;
6296 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
6298 WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);
6300 if (mddev->external_size)
6303 mddev->array_sectors = array_sectors;
6305 EXPORT_SYMBOL(md_set_array_sectors);
6307 static int update_size(struct mddev *mddev, sector_t num_sectors)
6309 struct md_rdev *rdev;
6311 int fit = (num_sectors == 0);
6313 if (mddev->pers->resize == NULL)
6315 /* The "num_sectors" is the number of sectors of each device that
6316 * is used. This can only make sense for arrays with redundancy.
6317 * linear and raid0 always use whatever space is available. We can only
6318 * consider changing this number if no resync or reconstruction is
6319 * happening, and if the new size is acceptable. It must fit before the
6320 * sb_start or, if that is <data_offset, it must fit before the size
6321 * of each device. If num_sectors is zero, we find the largest size
6324 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
6330 rdev_for_each(rdev, mddev) {
6331 sector_t avail = rdev->sectors;
6333 if (fit && (num_sectors == 0 || num_sectors > avail))
6334 num_sectors = avail;
6335 if (avail < num_sectors)
6338 rv = mddev->pers->resize(mddev, num_sectors);
6340 revalidate_disk(mddev->gendisk);
6344 static int update_raid_disks(struct mddev *mddev, int raid_disks)
6347 struct md_rdev *rdev;
6348 /* change the number of raid disks */
6349 if (mddev->pers->check_reshape == NULL)
6353 if (raid_disks <= 0 ||
6354 (mddev->max_disks && raid_disks >= mddev->max_disks))
6356 if (mddev->sync_thread ||
6357 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
6358 mddev->reshape_position != MaxSector)
6361 rdev_for_each(rdev, mddev) {
6362 if (mddev->raid_disks < raid_disks &&
6363 rdev->data_offset < rdev->new_data_offset)
6365 if (mddev->raid_disks > raid_disks &&
6366 rdev->data_offset > rdev->new_data_offset)
6370 mddev->delta_disks = raid_disks - mddev->raid_disks;
6371 if (mddev->delta_disks < 0)
6372 mddev->reshape_backwards = 1;
6373 else if (mddev->delta_disks > 0)
6374 mddev->reshape_backwards = 0;
6376 rv = mddev->pers->check_reshape(mddev);
6378 mddev->delta_disks = 0;
6379 mddev->reshape_backwards = 0;
6385 * update_array_info is used to change the configuration of an
6387 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
6388 * fields in the info are checked against the array.
6389 * Any differences that cannot be handled will cause an error.
6390 * Normally, only one change can be managed at a time.
6392 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
6398 /* calculate expected state,ignoring low bits */
6399 if (mddev->bitmap && mddev->bitmap_info.offset)
6400 state |= (1 << MD_SB_BITMAP_PRESENT);
6402 if (mddev->major_version != info->major_version ||
6403 mddev->minor_version != info->minor_version ||
6404 /* mddev->patch_version != info->patch_version || */
6405 mddev->ctime != info->ctime ||
6406 mddev->level != info->level ||
6407 /* mddev->layout != info->layout || */
6408 mddev->persistent != !info->not_persistent ||
6409 mddev->chunk_sectors != info->chunk_size >> 9 ||
6410 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
6411 ((state^info->state) & 0xfffffe00)
6414 /* Check there is only one change */
6415 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
6417 if (mddev->raid_disks != info->raid_disks)
6419 if (mddev->layout != info->layout)
6421 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
6428 if (mddev->layout != info->layout) {
6430 * we don't need to do anything at the md level, the
6431 * personality will take care of it all.
6433 if (mddev->pers->check_reshape == NULL)
6436 mddev->new_layout = info->layout;
6437 rv = mddev->pers->check_reshape(mddev);
6439 mddev->new_layout = mddev->layout;
6443 if (mddev_is_clustered(mddev))
6444 md_cluster_ops->metadata_update_start(mddev);
6445 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
6446 rv = update_size(mddev, (sector_t)info->size * 2);
6448 if (mddev->raid_disks != info->raid_disks)
6449 rv = update_raid_disks(mddev, info->raid_disks);
6451 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
6452 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
6456 if (mddev->recovery || mddev->sync_thread) {
6460 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
6461 struct bitmap *bitmap;
6462 /* add the bitmap */
6463 if (mddev->bitmap) {
6467 if (mddev->bitmap_info.default_offset == 0) {
6471 mddev->bitmap_info.offset =
6472 mddev->bitmap_info.default_offset;
6473 mddev->bitmap_info.space =
6474 mddev->bitmap_info.default_space;
6475 mddev->pers->quiesce(mddev, 1);
6476 bitmap = bitmap_create(mddev, -1);
6477 if (!IS_ERR(bitmap)) {
6478 mddev->bitmap = bitmap;
6479 rv = bitmap_load(mddev);
6481 rv = PTR_ERR(bitmap);
6483 bitmap_destroy(mddev);
6484 mddev->pers->quiesce(mddev, 0);
6486 /* remove the bitmap */
6487 if (!mddev->bitmap) {
6491 if (mddev->bitmap->storage.file) {
6495 mddev->pers->quiesce(mddev, 1);
6496 bitmap_destroy(mddev);
6497 mddev->pers->quiesce(mddev, 0);
6498 mddev->bitmap_info.offset = 0;
6501 md_update_sb(mddev, 1);
6502 if (mddev_is_clustered(mddev))
6503 md_cluster_ops->metadata_update_finish(mddev);
6506 if (mddev_is_clustered(mddev))
6507 md_cluster_ops->metadata_update_cancel(mddev);
6511 static int set_disk_faulty(struct mddev *mddev, dev_t dev)
6513 struct md_rdev *rdev;
6516 if (mddev->pers == NULL)
6520 rdev = find_rdev_rcu(mddev, dev);
6524 md_error(mddev, rdev);
6525 if (!test_bit(Faulty, &rdev->flags))
6533 * We have a problem here : there is no easy way to give a CHS
6534 * virtual geometry. We currently pretend that we have a 2 heads
6535 * 4 sectors (with a BIG number of cylinders...). This drives
6536 * dosfs just mad... ;-)
6538 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
6540 struct mddev *mddev = bdev->bd_disk->private_data;
6544 geo->cylinders = mddev->array_sectors / 8;
6548 static inline bool md_ioctl_valid(unsigned int cmd)
6553 case GET_ARRAY_INFO:
6554 case GET_BITMAP_FILE:
6557 case HOT_REMOVE_DISK:
6560 case RESTART_ARRAY_RW:
6562 case SET_ARRAY_INFO:
6563 case SET_BITMAP_FILE:
6564 case SET_DISK_FAULTY:
6567 case CLUSTERED_DISK_NACK:
6574 static int md_ioctl(struct block_device *bdev, fmode_t mode,
6575 unsigned int cmd, unsigned long arg)
6578 void __user *argp = (void __user *)arg;
6579 struct mddev *mddev = NULL;
6582 if (!md_ioctl_valid(cmd))
6587 case GET_ARRAY_INFO:
6591 if (!capable(CAP_SYS_ADMIN))
6596 * Commands dealing with the RAID driver but not any
6601 err = get_version(argp);
6607 autostart_arrays(arg);
6614 * Commands creating/starting a new array:
6617 mddev = bdev->bd_disk->private_data;
6624 /* Some actions do not requires the mutex */
6626 case GET_ARRAY_INFO:
6627 if (!mddev->raid_disks && !mddev->external)
6630 err = get_array_info(mddev, argp);
6634 if (!mddev->raid_disks && !mddev->external)
6637 err = get_disk_info(mddev, argp);
6640 case SET_DISK_FAULTY:
6641 err = set_disk_faulty(mddev, new_decode_dev(arg));
6644 case GET_BITMAP_FILE:
6645 err = get_bitmap_file(mddev, argp);
6650 if (cmd == ADD_NEW_DISK)
6651 /* need to ensure md_delayed_delete() has completed */
6652 flush_workqueue(md_misc_wq);
6654 if (cmd == HOT_REMOVE_DISK)
6655 /* need to ensure recovery thread has run */
6656 wait_event_interruptible_timeout(mddev->sb_wait,
6657 !test_bit(MD_RECOVERY_NEEDED,
6659 msecs_to_jiffies(5000));
6660 if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
6661 /* Need to flush page cache, and ensure no-one else opens
6664 mutex_lock(&mddev->open_mutex);
6665 if (mddev->pers && atomic_read(&mddev->openers) > 1) {
6666 mutex_unlock(&mddev->open_mutex);
6670 set_bit(MD_STILL_CLOSED, &mddev->flags);
6671 mutex_unlock(&mddev->open_mutex);
6672 sync_blockdev(bdev);
6674 err = mddev_lock(mddev);
6677 "md: ioctl lock interrupted, reason %d, cmd %d\n",
6682 if (cmd == SET_ARRAY_INFO) {
6683 mdu_array_info_t info;
6685 memset(&info, 0, sizeof(info));
6686 else if (copy_from_user(&info, argp, sizeof(info))) {
6691 err = update_array_info(mddev, &info);
6693 printk(KERN_WARNING "md: couldn't update"
6694 " array info. %d\n", err);
6699 if (!list_empty(&mddev->disks)) {
6701 "md: array %s already has disks!\n",
6706 if (mddev->raid_disks) {
6708 "md: array %s already initialised!\n",
6713 err = set_array_info(mddev, &info);
6715 printk(KERN_WARNING "md: couldn't set"
6716 " array info. %d\n", err);
6723 * Commands querying/configuring an existing array:
6725 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
6726 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
6727 if ((!mddev->raid_disks && !mddev->external)
6728 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
6729 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
6730 && cmd != GET_BITMAP_FILE) {
6736 * Commands even a read-only array can execute:
6739 case RESTART_ARRAY_RW:
6740 err = restart_array(mddev);
6744 err = do_md_stop(mddev, 0, bdev);
6748 err = md_set_readonly(mddev, bdev);
6751 case HOT_REMOVE_DISK:
6752 err = hot_remove_disk(mddev, new_decode_dev(arg));
6756 /* We can support ADD_NEW_DISK on read-only arrays
6757 * on if we are re-adding a preexisting device.
6758 * So require mddev->pers and MD_DISK_SYNC.
6761 mdu_disk_info_t info;
6762 if (copy_from_user(&info, argp, sizeof(info)))
6764 else if (!(info.state & (1<<MD_DISK_SYNC)))
6765 /* Need to clear read-only for this */
6768 err = add_new_disk(mddev, &info);
6774 if (get_user(ro, (int __user *)(arg))) {
6780 /* if the bdev is going readonly the value of mddev->ro
6781 * does not matter, no writes are coming
6786 /* are we are already prepared for writes? */
6790 /* transitioning to readauto need only happen for
6791 * arrays that call md_write_start
6794 err = restart_array(mddev);
6797 set_disk_ro(mddev->gendisk, 0);
6804 * The remaining ioctls are changing the state of the
6805 * superblock, so we do not allow them on read-only arrays.
6807 if (mddev->ro && mddev->pers) {
6808 if (mddev->ro == 2) {
6810 sysfs_notify_dirent_safe(mddev->sysfs_state);
6811 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6812 /* mddev_unlock will wake thread */
6813 /* If a device failed while we were read-only, we
6814 * need to make sure the metadata is updated now.
6816 if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
6817 mddev_unlock(mddev);
6818 wait_event(mddev->sb_wait,
6819 !test_bit(MD_CHANGE_DEVS, &mddev->flags) &&
6820 !test_bit(MD_CHANGE_PENDING, &mddev->flags));
6821 mddev_lock_nointr(mddev);
6832 mdu_disk_info_t info;
6833 if (copy_from_user(&info, argp, sizeof(info)))
6836 err = add_new_disk(mddev, &info);
6840 case CLUSTERED_DISK_NACK:
6841 if (mddev_is_clustered(mddev))
6842 md_cluster_ops->new_disk_ack(mddev, false);
6848 err = hot_add_disk(mddev, new_decode_dev(arg));
6852 err = do_md_run(mddev);
6855 case SET_BITMAP_FILE:
6856 err = set_bitmap_file(mddev, (int)arg);
6865 if (mddev->hold_active == UNTIL_IOCTL &&
6867 mddev->hold_active = 0;
6868 mddev_unlock(mddev);
6872 #ifdef CONFIG_COMPAT
6873 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
6874 unsigned int cmd, unsigned long arg)
6877 case HOT_REMOVE_DISK:
6879 case SET_DISK_FAULTY:
6880 case SET_BITMAP_FILE:
6881 /* These take in integer arg, do not convert */
6884 arg = (unsigned long)compat_ptr(arg);
6888 return md_ioctl(bdev, mode, cmd, arg);
6890 #endif /* CONFIG_COMPAT */
6892 static int md_open(struct block_device *bdev, fmode_t mode)
6895 * Succeed if we can lock the mddev, which confirms that
6896 * it isn't being stopped right now.
6898 struct mddev *mddev = mddev_find(bdev->bd_dev);
6904 if (mddev->gendisk != bdev->bd_disk) {
6905 /* we are racing with mddev_put which is discarding this
6909 /* Wait until bdev->bd_disk is definitely gone */
6910 flush_workqueue(md_misc_wq);
6911 /* Then retry the open from the top */
6912 return -ERESTARTSYS;
6914 BUG_ON(mddev != bdev->bd_disk->private_data);
6916 if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
6920 atomic_inc(&mddev->openers);
6921 clear_bit(MD_STILL_CLOSED, &mddev->flags);
6922 mutex_unlock(&mddev->open_mutex);
6924 check_disk_change(bdev);
6929 static void md_release(struct gendisk *disk, fmode_t mode)
6931 struct mddev *mddev = disk->private_data;
6934 atomic_dec(&mddev->openers);
6938 static int md_media_changed(struct gendisk *disk)
6940 struct mddev *mddev = disk->private_data;
6942 return mddev->changed;
6945 static int md_revalidate(struct gendisk *disk)
6947 struct mddev *mddev = disk->private_data;
6952 static const struct block_device_operations md_fops =
6954 .owner = THIS_MODULE,
6956 .release = md_release,
6958 #ifdef CONFIG_COMPAT
6959 .compat_ioctl = md_compat_ioctl,
6961 .getgeo = md_getgeo,
6962 .media_changed = md_media_changed,
6963 .revalidate_disk= md_revalidate,
6966 static int md_thread(void *arg)
6968 struct md_thread *thread = arg;
6971 * md_thread is a 'system-thread', it's priority should be very
6972 * high. We avoid resource deadlocks individually in each
6973 * raid personality. (RAID5 does preallocation) We also use RR and
6974 * the very same RT priority as kswapd, thus we will never get
6975 * into a priority inversion deadlock.
6977 * we definitely have to have equal or higher priority than
6978 * bdflush, otherwise bdflush will deadlock if there are too
6979 * many dirty RAID5 blocks.
6982 allow_signal(SIGKILL);
6983 while (!kthread_should_stop()) {
6985 /* We need to wait INTERRUPTIBLE so that
6986 * we don't add to the load-average.
6987 * That means we need to be sure no signals are
6990 if (signal_pending(current))
6991 flush_signals(current);
6993 wait_event_interruptible_timeout
6995 test_bit(THREAD_WAKEUP, &thread->flags)
6996 || kthread_should_stop(),
6999 clear_bit(THREAD_WAKEUP, &thread->flags);
7000 if (!kthread_should_stop())
7001 thread->run(thread);
7007 void md_wakeup_thread(struct md_thread *thread)
7010 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
7011 set_bit(THREAD_WAKEUP, &thread->flags);
7012 wake_up(&thread->wqueue);
7015 EXPORT_SYMBOL(md_wakeup_thread);
7017 struct md_thread *md_register_thread(void (*run) (struct md_thread *),
7018 struct mddev *mddev, const char *name)
7020 struct md_thread *thread;
7022 thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
7026 init_waitqueue_head(&thread->wqueue);
7029 thread->mddev = mddev;
7030 thread->timeout = MAX_SCHEDULE_TIMEOUT;
7031 thread->tsk = kthread_run(md_thread, thread,
7033 mdname(thread->mddev),
7035 if (IS_ERR(thread->tsk)) {
7041 EXPORT_SYMBOL(md_register_thread);
7043 void md_unregister_thread(struct md_thread **threadp)
7045 struct md_thread *thread = *threadp;
7048 pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
7049 /* Locking ensures that mddev_unlock does not wake_up a
7050 * non-existent thread
7052 spin_lock(&pers_lock);
7054 spin_unlock(&pers_lock);
7056 kthread_stop(thread->tsk);
7059 EXPORT_SYMBOL(md_unregister_thread);
7061 void md_error(struct mddev *mddev, struct md_rdev *rdev)
7063 if (!rdev || test_bit(Faulty, &rdev->flags))
7066 if (!mddev->pers || !mddev->pers->error_handler)
7068 mddev->pers->error_handler(mddev,rdev);
7069 if (mddev->degraded)
7070 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7071 sysfs_notify_dirent_safe(rdev->sysfs_state);
7072 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7073 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7074 md_wakeup_thread(mddev->thread);
7075 if (mddev->event_work.func)
7076 queue_work(md_misc_wq, &mddev->event_work);
7077 md_new_event_inintr(mddev);
7079 EXPORT_SYMBOL(md_error);
7081 /* seq_file implementation /proc/mdstat */
7083 static void status_unused(struct seq_file *seq)
7086 struct md_rdev *rdev;
7088 seq_printf(seq, "unused devices: ");
7090 list_for_each_entry(rdev, &pending_raid_disks, same_set) {
7091 char b[BDEVNAME_SIZE];
7093 seq_printf(seq, "%s ",
7094 bdevname(rdev->bdev,b));
7097 seq_printf(seq, "<none>");
7099 seq_printf(seq, "\n");
7102 static int status_resync(struct seq_file *seq, struct mddev *mddev)
7104 sector_t max_sectors, resync, res;
7105 unsigned long dt, db;
7108 unsigned int per_milli;
7110 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
7111 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7112 max_sectors = mddev->resync_max_sectors;
7114 max_sectors = mddev->dev_sectors;
7116 resync = mddev->curr_resync;
7118 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
7119 /* Still cleaning up */
7120 resync = max_sectors;
7122 resync -= atomic_read(&mddev->recovery_active);
7125 if (mddev->recovery_cp < MaxSector) {
7126 seq_printf(seq, "\tresync=PENDING");
7132 seq_printf(seq, "\tresync=DELAYED");
7136 WARN_ON(max_sectors == 0);
7137 /* Pick 'scale' such that (resync>>scale)*1000 will fit
7138 * in a sector_t, and (max_sectors>>scale) will fit in a
7139 * u32, as those are the requirements for sector_div.
7140 * Thus 'scale' must be at least 10
7143 if (sizeof(sector_t) > sizeof(unsigned long)) {
7144 while ( max_sectors/2 > (1ULL<<(scale+32)))
7147 res = (resync>>scale)*1000;
7148 sector_div(res, (u32)((max_sectors>>scale)+1));
7152 int i, x = per_milli/50, y = 20-x;
7153 seq_printf(seq, "[");
7154 for (i = 0; i < x; i++)
7155 seq_printf(seq, "=");
7156 seq_printf(seq, ">");
7157 for (i = 0; i < y; i++)
7158 seq_printf(seq, ".");
7159 seq_printf(seq, "] ");
7161 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
7162 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
7164 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
7166 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
7167 "resync" : "recovery"))),
7168 per_milli/10, per_milli % 10,
7169 (unsigned long long) resync/2,
7170 (unsigned long long) max_sectors/2);
7173 * dt: time from mark until now
7174 * db: blocks written from mark until now
7175 * rt: remaining time
7177 * rt is a sector_t, so could be 32bit or 64bit.
7178 * So we divide before multiply in case it is 32bit and close
7180 * We scale the divisor (db) by 32 to avoid losing precision
7181 * near the end of resync when the number of remaining sectors
7183 * We then divide rt by 32 after multiplying by db to compensate.
7184 * The '+1' avoids division by zero if db is very small.
7186 dt = ((jiffies - mddev->resync_mark) / HZ);
7188 db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
7189 - mddev->resync_mark_cnt;
7191 rt = max_sectors - resync; /* number of remaining sectors */
7192 sector_div(rt, db/32+1);
7196 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
7197 ((unsigned long)rt % 60)/6);
7199 seq_printf(seq, " speed=%ldK/sec", db/2/dt);
7203 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
7205 struct list_head *tmp;
7207 struct mddev *mddev;
7215 spin_lock(&all_mddevs_lock);
7216 list_for_each(tmp,&all_mddevs)
7218 mddev = list_entry(tmp, struct mddev, all_mddevs);
7220 spin_unlock(&all_mddevs_lock);
7223 spin_unlock(&all_mddevs_lock);
7225 return (void*)2;/* tail */
7229 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
7231 struct list_head *tmp;
7232 struct mddev *next_mddev, *mddev = v;
7238 spin_lock(&all_mddevs_lock);
7240 tmp = all_mddevs.next;
7242 tmp = mddev->all_mddevs.next;
7243 if (tmp != &all_mddevs)
7244 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
7246 next_mddev = (void*)2;
7249 spin_unlock(&all_mddevs_lock);
7257 static void md_seq_stop(struct seq_file *seq, void *v)
7259 struct mddev *mddev = v;
7261 if (mddev && v != (void*)1 && v != (void*)2)
7265 static int md_seq_show(struct seq_file *seq, void *v)
7267 struct mddev *mddev = v;
7269 struct md_rdev *rdev;
7271 if (v == (void*)1) {
7272 struct md_personality *pers;
7273 seq_printf(seq, "Personalities : ");
7274 spin_lock(&pers_lock);
7275 list_for_each_entry(pers, &pers_list, list)
7276 seq_printf(seq, "[%s] ", pers->name);
7278 spin_unlock(&pers_lock);
7279 seq_printf(seq, "\n");
7280 seq->poll_event = atomic_read(&md_event_count);
7283 if (v == (void*)2) {
7288 spin_lock(&mddev->lock);
7289 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
7290 seq_printf(seq, "%s : %sactive", mdname(mddev),
7291 mddev->pers ? "" : "in");
7294 seq_printf(seq, " (read-only)");
7296 seq_printf(seq, " (auto-read-only)");
7297 seq_printf(seq, " %s", mddev->pers->name);
7302 rdev_for_each_rcu(rdev, mddev) {
7303 char b[BDEVNAME_SIZE];
7304 seq_printf(seq, " %s[%d]",
7305 bdevname(rdev->bdev,b), rdev->desc_nr);
7306 if (test_bit(WriteMostly, &rdev->flags))
7307 seq_printf(seq, "(W)");
7308 if (test_bit(Faulty, &rdev->flags)) {
7309 seq_printf(seq, "(F)");
7312 if (rdev->raid_disk < 0)
7313 seq_printf(seq, "(S)"); /* spare */
7314 if (test_bit(Replacement, &rdev->flags))
7315 seq_printf(seq, "(R)");
7316 sectors += rdev->sectors;
7320 if (!list_empty(&mddev->disks)) {
7322 seq_printf(seq, "\n %llu blocks",
7323 (unsigned long long)
7324 mddev->array_sectors / 2);
7326 seq_printf(seq, "\n %llu blocks",
7327 (unsigned long long)sectors / 2);
7329 if (mddev->persistent) {
7330 if (mddev->major_version != 0 ||
7331 mddev->minor_version != 90) {
7332 seq_printf(seq," super %d.%d",
7333 mddev->major_version,
7334 mddev->minor_version);
7336 } else if (mddev->external)
7337 seq_printf(seq, " super external:%s",
7338 mddev->metadata_type);
7340 seq_printf(seq, " super non-persistent");
7343 mddev->pers->status(seq, mddev);
7344 seq_printf(seq, "\n ");
7345 if (mddev->pers->sync_request) {
7346 if (status_resync(seq, mddev))
7347 seq_printf(seq, "\n ");
7350 seq_printf(seq, "\n ");
7352 bitmap_status(seq, mddev->bitmap);
7354 seq_printf(seq, "\n");
7356 spin_unlock(&mddev->lock);
7361 static const struct seq_operations md_seq_ops = {
7362 .start = md_seq_start,
7363 .next = md_seq_next,
7364 .stop = md_seq_stop,
7365 .show = md_seq_show,
7368 static int md_seq_open(struct inode *inode, struct file *file)
7370 struct seq_file *seq;
7373 error = seq_open(file, &md_seq_ops);
7377 seq = file->private_data;
7378 seq->poll_event = atomic_read(&md_event_count);
7382 static int md_unloading;
7383 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
7385 struct seq_file *seq = filp->private_data;
7389 return POLLIN|POLLRDNORM|POLLERR|POLLPRI;
7390 poll_wait(filp, &md_event_waiters, wait);
7392 /* always allow read */
7393 mask = POLLIN | POLLRDNORM;
7395 if (seq->poll_event != atomic_read(&md_event_count))
7396 mask |= POLLERR | POLLPRI;
7400 static const struct file_operations md_seq_fops = {
7401 .owner = THIS_MODULE,
7402 .open = md_seq_open,
7404 .llseek = seq_lseek,
7405 .release = seq_release_private,
7406 .poll = mdstat_poll,
7409 int register_md_personality(struct md_personality *p)
7411 printk(KERN_INFO "md: %s personality registered for level %d\n",
7413 spin_lock(&pers_lock);
7414 list_add_tail(&p->list, &pers_list);
7415 spin_unlock(&pers_lock);
7418 EXPORT_SYMBOL(register_md_personality);
7420 int unregister_md_personality(struct md_personality *p)
7422 printk(KERN_INFO "md: %s personality unregistered\n", p->name);
7423 spin_lock(&pers_lock);
7424 list_del_init(&p->list);
7425 spin_unlock(&pers_lock);
7428 EXPORT_SYMBOL(unregister_md_personality);
7430 int register_md_cluster_operations(struct md_cluster_operations *ops,
7431 struct module *module)
7434 spin_lock(&pers_lock);
7435 if (md_cluster_ops != NULL)
7438 md_cluster_ops = ops;
7439 md_cluster_mod = module;
7441 spin_unlock(&pers_lock);
7444 EXPORT_SYMBOL(register_md_cluster_operations);
7446 int unregister_md_cluster_operations(void)
7448 spin_lock(&pers_lock);
7449 md_cluster_ops = NULL;
7450 spin_unlock(&pers_lock);
7453 EXPORT_SYMBOL(unregister_md_cluster_operations);
7455 int md_setup_cluster(struct mddev *mddev, int nodes)
7459 err = request_module("md-cluster");
7461 pr_err("md-cluster module not found.\n");
7465 spin_lock(&pers_lock);
7466 if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
7467 spin_unlock(&pers_lock);
7470 spin_unlock(&pers_lock);
7472 return md_cluster_ops->join(mddev, nodes);
7475 void md_cluster_stop(struct mddev *mddev)
7477 if (!md_cluster_ops)
7479 md_cluster_ops->leave(mddev);
7480 module_put(md_cluster_mod);
7483 static int is_mddev_idle(struct mddev *mddev, int init)
7485 struct md_rdev *rdev;
7491 rdev_for_each_rcu(rdev, mddev) {
7492 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
7493 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
7494 (int)part_stat_read(&disk->part0, sectors[1]) -
7495 atomic_read(&disk->sync_io);
7496 /* sync IO will cause sync_io to increase before the disk_stats
7497 * as sync_io is counted when a request starts, and
7498 * disk_stats is counted when it completes.
7499 * So resync activity will cause curr_events to be smaller than
7500 * when there was no such activity.
7501 * non-sync IO will cause disk_stat to increase without
7502 * increasing sync_io so curr_events will (eventually)
7503 * be larger than it was before. Once it becomes
7504 * substantially larger, the test below will cause
7505 * the array to appear non-idle, and resync will slow
7507 * If there is a lot of outstanding resync activity when
7508 * we set last_event to curr_events, then all that activity
7509 * completing might cause the array to appear non-idle
7510 * and resync will be slowed down even though there might
7511 * not have been non-resync activity. This will only
7512 * happen once though. 'last_events' will soon reflect
7513 * the state where there is little or no outstanding
7514 * resync requests, and further resync activity will
7515 * always make curr_events less than last_events.
7518 if (init || curr_events - rdev->last_events > 64) {
7519 rdev->last_events = curr_events;
7527 void md_done_sync(struct mddev *mddev, int blocks, int ok)
7529 /* another "blocks" (512byte) blocks have been synced */
7530 atomic_sub(blocks, &mddev->recovery_active);
7531 wake_up(&mddev->recovery_wait);
7533 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7534 set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
7535 md_wakeup_thread(mddev->thread);
7536 // stop recovery, signal do_sync ....
7539 EXPORT_SYMBOL(md_done_sync);
7541 /* md_write_start(mddev, bi)
7542 * If we need to update some array metadata (e.g. 'active' flag
7543 * in superblock) before writing, schedule a superblock update
7544 * and wait for it to complete.
7546 void md_write_start(struct mddev *mddev, struct bio *bi)
7549 if (bio_data_dir(bi) != WRITE)
7552 BUG_ON(mddev->ro == 1);
7553 if (mddev->ro == 2) {
7554 /* need to switch to read/write */
7556 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7557 md_wakeup_thread(mddev->thread);
7558 md_wakeup_thread(mddev->sync_thread);
7561 atomic_inc(&mddev->writes_pending);
7562 if (mddev->safemode == 1)
7563 mddev->safemode = 0;
7564 if (mddev->in_sync) {
7565 spin_lock(&mddev->lock);
7566 if (mddev->in_sync) {
7568 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7569 set_bit(MD_CHANGE_PENDING, &mddev->flags);
7570 md_wakeup_thread(mddev->thread);
7573 spin_unlock(&mddev->lock);
7576 sysfs_notify_dirent_safe(mddev->sysfs_state);
7577 wait_event(mddev->sb_wait,
7578 !test_bit(MD_CHANGE_PENDING, &mddev->flags));
7580 EXPORT_SYMBOL(md_write_start);
7582 void md_write_end(struct mddev *mddev)
7584 if (atomic_dec_and_test(&mddev->writes_pending)) {
7585 if (mddev->safemode == 2)
7586 md_wakeup_thread(mddev->thread);
7587 else if (mddev->safemode_delay)
7588 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
7591 EXPORT_SYMBOL(md_write_end);
7593 /* md_allow_write(mddev)
7594 * Calling this ensures that the array is marked 'active' so that writes
7595 * may proceed without blocking. It is important to call this before
7596 * attempting a GFP_KERNEL allocation while holding the mddev lock.
7597 * Must be called with mddev_lock held.
7599 * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
7600 * is dropped, so return -EAGAIN after notifying userspace.
7602 int md_allow_write(struct mddev *mddev)
7608 if (!mddev->pers->sync_request)
7611 spin_lock(&mddev->lock);
7612 if (mddev->in_sync) {
7614 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7615 set_bit(MD_CHANGE_PENDING, &mddev->flags);
7616 if (mddev->safemode_delay &&
7617 mddev->safemode == 0)
7618 mddev->safemode = 1;
7619 spin_unlock(&mddev->lock);
7620 if (mddev_is_clustered(mddev))
7621 md_cluster_ops->metadata_update_start(mddev);
7622 md_update_sb(mddev, 0);
7623 if (mddev_is_clustered(mddev))
7624 md_cluster_ops->metadata_update_finish(mddev);
7625 sysfs_notify_dirent_safe(mddev->sysfs_state);
7627 spin_unlock(&mddev->lock);
7629 if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
7634 EXPORT_SYMBOL_GPL(md_allow_write);
7636 #define SYNC_MARKS 10
7637 #define SYNC_MARK_STEP (3*HZ)
7638 #define UPDATE_FREQUENCY (5*60*HZ)
7639 void md_do_sync(struct md_thread *thread)
7641 struct mddev *mddev = thread->mddev;
7642 struct mddev *mddev2;
7643 unsigned int currspeed = 0,
7645 sector_t max_sectors,j, io_sectors, recovery_done;
7646 unsigned long mark[SYNC_MARKS];
7647 unsigned long update_time;
7648 sector_t mark_cnt[SYNC_MARKS];
7650 struct list_head *tmp;
7651 sector_t last_check;
7653 struct md_rdev *rdev;
7654 char *desc, *action = NULL;
7655 struct blk_plug plug;
7657 /* just incase thread restarts... */
7658 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
7660 if (mddev->ro) {/* never try to sync a read-only array */
7661 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7665 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
7666 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
7667 desc = "data-check";
7669 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
7670 desc = "requested-resync";
7674 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7679 mddev->last_sync_action = action ?: desc;
7681 /* we overload curr_resync somewhat here.
7682 * 0 == not engaged in resync at all
7683 * 2 == checking that there is no conflict with another sync
7684 * 1 == like 2, but have yielded to allow conflicting resync to
7686 * other == active in resync - this many blocks
7688 * Before starting a resync we must have set curr_resync to
7689 * 2, and then checked that every "conflicting" array has curr_resync
7690 * less than ours. When we find one that is the same or higher
7691 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
7692 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
7693 * This will mean we have to start checking from the beginning again.
7698 mddev->curr_resync = 2;
7701 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
7703 for_each_mddev(mddev2, tmp) {
7704 if (mddev2 == mddev)
7706 if (!mddev->parallel_resync
7707 && mddev2->curr_resync
7708 && match_mddev_units(mddev, mddev2)) {
7710 if (mddev < mddev2 && mddev->curr_resync == 2) {
7711 /* arbitrarily yield */
7712 mddev->curr_resync = 1;
7713 wake_up(&resync_wait);
7715 if (mddev > mddev2 && mddev->curr_resync == 1)
7716 /* no need to wait here, we can wait the next
7717 * time 'round when curr_resync == 2
7720 /* We need to wait 'interruptible' so as not to
7721 * contribute to the load average, and not to
7722 * be caught by 'softlockup'
7724 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
7725 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
7726 mddev2->curr_resync >= mddev->curr_resync) {
7727 printk(KERN_INFO "md: delaying %s of %s"
7728 " until %s has finished (they"
7729 " share one or more physical units)\n",
7730 desc, mdname(mddev), mdname(mddev2));
7732 if (signal_pending(current))
7733 flush_signals(current);
7735 finish_wait(&resync_wait, &wq);
7738 finish_wait(&resync_wait, &wq);
7741 } while (mddev->curr_resync < 2);
7744 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
7745 /* resync follows the size requested by the personality,
7746 * which defaults to physical size, but can be virtual size
7748 max_sectors = mddev->resync_max_sectors;
7749 atomic64_set(&mddev->resync_mismatches, 0);
7750 /* we don't use the checkpoint if there's a bitmap */
7751 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
7752 j = mddev->resync_min;
7753 else if (!mddev->bitmap)
7754 j = mddev->recovery_cp;
7756 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7757 max_sectors = mddev->resync_max_sectors;
7759 /* recovery follows the physical size of devices */
7760 max_sectors = mddev->dev_sectors;
7763 rdev_for_each_rcu(rdev, mddev)
7764 if (rdev->raid_disk >= 0 &&
7765 !test_bit(Faulty, &rdev->flags) &&
7766 !test_bit(In_sync, &rdev->flags) &&
7767 rdev->recovery_offset < j)
7768 j = rdev->recovery_offset;
7771 /* If there is a bitmap, we need to make sure all
7772 * writes that started before we added a spare
7773 * complete before we start doing a recovery.
7774 * Otherwise the write might complete and (via
7775 * bitmap_endwrite) set a bit in the bitmap after the
7776 * recovery has checked that bit and skipped that
7779 if (mddev->bitmap) {
7780 mddev->pers->quiesce(mddev, 1);
7781 mddev->pers->quiesce(mddev, 0);
7785 printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
7786 printk(KERN_INFO "md: minimum _guaranteed_ speed:"
7787 " %d KB/sec/disk.\n", speed_min(mddev));
7788 printk(KERN_INFO "md: using maximum available idle IO bandwidth "
7789 "(but not more than %d KB/sec) for %s.\n",
7790 speed_max(mddev), desc);
7792 is_mddev_idle(mddev, 1); /* this initializes IO event counters */
7795 for (m = 0; m < SYNC_MARKS; m++) {
7797 mark_cnt[m] = io_sectors;
7800 mddev->resync_mark = mark[last_mark];
7801 mddev->resync_mark_cnt = mark_cnt[last_mark];
7804 * Tune reconstruction:
7806 window = 32*(PAGE_SIZE/512);
7807 printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n",
7808 window/2, (unsigned long long)max_sectors/2);
7810 atomic_set(&mddev->recovery_active, 0);
7815 "md: resuming %s of %s from checkpoint.\n",
7816 desc, mdname(mddev));
7817 mddev->curr_resync = j;
7819 mddev->curr_resync = 3; /* no longer delayed */
7820 mddev->curr_resync_completed = j;
7821 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
7822 md_new_event(mddev);
7823 update_time = jiffies;
7825 if (mddev_is_clustered(mddev))
7826 md_cluster_ops->resync_start(mddev, j, max_sectors);
7828 blk_start_plug(&plug);
7829 while (j < max_sectors) {
7834 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
7835 ((mddev->curr_resync > mddev->curr_resync_completed &&
7836 (mddev->curr_resync - mddev->curr_resync_completed)
7837 > (max_sectors >> 4)) ||
7838 time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
7839 (j - mddev->curr_resync_completed)*2
7840 >= mddev->resync_max - mddev->curr_resync_completed ||
7841 mddev->curr_resync_completed > mddev->resync_max
7843 /* time to update curr_resync_completed */
7844 wait_event(mddev->recovery_wait,
7845 atomic_read(&mddev->recovery_active) == 0);
7846 mddev->curr_resync_completed = j;
7847 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
7848 j > mddev->recovery_cp)
7849 mddev->recovery_cp = j;
7850 update_time = jiffies;
7851 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7852 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
7855 while (j >= mddev->resync_max &&
7856 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
7857 /* As this condition is controlled by user-space,
7858 * we can block indefinitely, so use '_interruptible'
7859 * to avoid triggering warnings.
7861 flush_signals(current); /* just in case */
7862 wait_event_interruptible(mddev->recovery_wait,
7863 mddev->resync_max > j
7864 || test_bit(MD_RECOVERY_INTR,
7868 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
7871 sectors = mddev->pers->sync_request(mddev, j, &skipped);
7873 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7877 if (!skipped) { /* actual IO requested */
7878 io_sectors += sectors;
7879 atomic_add(sectors, &mddev->recovery_active);
7882 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
7886 if (j > max_sectors)
7887 /* when skipping, extra large numbers can be returned. */
7890 mddev->curr_resync = j;
7891 if (mddev_is_clustered(mddev))
7892 md_cluster_ops->resync_info_update(mddev, j, max_sectors);
7893 mddev->curr_mark_cnt = io_sectors;
7894 if (last_check == 0)
7895 /* this is the earliest that rebuild will be
7896 * visible in /proc/mdstat
7898 md_new_event(mddev);
7900 if (last_check + window > io_sectors || j == max_sectors)
7903 last_check = io_sectors;
7905 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
7907 int next = (last_mark+1) % SYNC_MARKS;
7909 mddev->resync_mark = mark[next];
7910 mddev->resync_mark_cnt = mark_cnt[next];
7911 mark[next] = jiffies;
7912 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
7916 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
7920 * this loop exits only if either when we are slower than
7921 * the 'hard' speed limit, or the system was IO-idle for
7923 * the system might be non-idle CPU-wise, but we only care
7924 * about not overloading the IO subsystem. (things like an
7925 * e2fsck being done on the RAID array should execute fast)
7929 recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
7930 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
7931 /((jiffies-mddev->resync_mark)/HZ +1) +1;
7933 if (currspeed > speed_min(mddev)) {
7934 if (currspeed > speed_max(mddev)) {
7938 if (!is_mddev_idle(mddev, 0)) {
7940 * Give other IO more of a chance.
7941 * The faster the devices, the less we wait.
7943 wait_event(mddev->recovery_wait,
7944 !atomic_read(&mddev->recovery_active));
7948 printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc,
7949 test_bit(MD_RECOVERY_INTR, &mddev->recovery)
7950 ? "interrupted" : "done");
7952 * this also signals 'finished resyncing' to md_stop
7954 blk_finish_plug(&plug);
7955 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
7957 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
7958 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
7959 mddev->curr_resync > 2) {
7960 mddev->curr_resync_completed = mddev->curr_resync;
7961 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
7963 /* tell personality that we are finished */
7964 mddev->pers->sync_request(mddev, max_sectors, &skipped);
7966 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
7967 mddev->curr_resync > 2) {
7968 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
7969 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
7970 if (mddev->curr_resync >= mddev->recovery_cp) {
7972 "md: checkpointing %s of %s.\n",
7973 desc, mdname(mddev));
7974 if (test_bit(MD_RECOVERY_ERROR,
7976 mddev->recovery_cp =
7977 mddev->curr_resync_completed;
7979 mddev->recovery_cp =
7983 mddev->recovery_cp = MaxSector;
7985 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
7986 mddev->curr_resync = MaxSector;
7988 rdev_for_each_rcu(rdev, mddev)
7989 if (rdev->raid_disk >= 0 &&
7990 mddev->delta_disks >= 0 &&
7991 !test_bit(Faulty, &rdev->flags) &&
7992 !test_bit(In_sync, &rdev->flags) &&
7993 rdev->recovery_offset < mddev->curr_resync)
7994 rdev->recovery_offset = mddev->curr_resync;
7999 if (mddev_is_clustered(mddev))
8000 md_cluster_ops->resync_finish(mddev);
8002 set_bit(MD_CHANGE_DEVS, &mddev->flags);
8004 spin_lock(&mddev->lock);
8005 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8006 /* We completed so min/max setting can be forgotten if used. */
8007 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8008 mddev->resync_min = 0;
8009 mddev->resync_max = MaxSector;
8010 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8011 mddev->resync_min = mddev->curr_resync_completed;
8012 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
8013 mddev->curr_resync = 0;
8014 spin_unlock(&mddev->lock);
8016 wake_up(&resync_wait);
8017 md_wakeup_thread(mddev->thread);
8020 EXPORT_SYMBOL_GPL(md_do_sync);
8022 static int remove_and_add_spares(struct mddev *mddev,
8023 struct md_rdev *this)
8025 struct md_rdev *rdev;
8029 rdev_for_each(rdev, mddev)
8030 if ((this == NULL || rdev == this) &&
8031 rdev->raid_disk >= 0 &&
8032 !test_bit(Blocked, &rdev->flags) &&
8033 (test_bit(Faulty, &rdev->flags) ||
8034 ! test_bit(In_sync, &rdev->flags)) &&
8035 atomic_read(&rdev->nr_pending)==0) {
8036 if (mddev->pers->hot_remove_disk(
8037 mddev, rdev) == 0) {
8038 sysfs_unlink_rdev(mddev, rdev);
8039 rdev->raid_disk = -1;
8043 if (removed && mddev->kobj.sd)
8044 sysfs_notify(&mddev->kobj, NULL, "degraded");
8049 rdev_for_each(rdev, mddev) {
8050 if (rdev->raid_disk >= 0 &&
8051 !test_bit(In_sync, &rdev->flags) &&
8052 !test_bit(Faulty, &rdev->flags))
8054 if (rdev->raid_disk >= 0)
8056 if (test_bit(Faulty, &rdev->flags))
8059 ! (rdev->saved_raid_disk >= 0 &&
8060 !test_bit(Bitmap_sync, &rdev->flags)))
8063 if (rdev->saved_raid_disk < 0)
8064 rdev->recovery_offset = 0;
8066 hot_add_disk(mddev, rdev) == 0) {
8067 if (sysfs_link_rdev(mddev, rdev))
8068 /* failure here is OK */;
8070 md_new_event(mddev);
8071 set_bit(MD_CHANGE_DEVS, &mddev->flags);
8076 set_bit(MD_CHANGE_DEVS, &mddev->flags);
8080 static void md_start_sync(struct work_struct *ws)
8082 struct mddev *mddev = container_of(ws, struct mddev, del_work);
8084 mddev->sync_thread = md_register_thread(md_do_sync,
8087 if (!mddev->sync_thread) {
8088 printk(KERN_ERR "%s: could not start resync"
8091 /* leave the spares where they are, it shouldn't hurt */
8092 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8093 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8094 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
8095 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8096 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8097 wake_up(&resync_wait);
8098 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
8100 if (mddev->sysfs_action)
8101 sysfs_notify_dirent_safe(mddev->sysfs_action);
8103 md_wakeup_thread(mddev->sync_thread);
8104 sysfs_notify_dirent_safe(mddev->sysfs_action);
8105 md_new_event(mddev);
8109 * This routine is regularly called by all per-raid-array threads to
8110 * deal with generic issues like resync and super-block update.
8111 * Raid personalities that don't have a thread (linear/raid0) do not
8112 * need this as they never do any recovery or update the superblock.
8114 * It does not do any resync itself, but rather "forks" off other threads
8115 * to do that as needed.
8116 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
8117 * "->recovery" and create a thread at ->sync_thread.
8118 * When the thread finishes it sets MD_RECOVERY_DONE
8119 * and wakeups up this thread which will reap the thread and finish up.
8120 * This thread also removes any faulty devices (with nr_pending == 0).
8122 * The overall approach is:
8123 * 1/ if the superblock needs updating, update it.
8124 * 2/ If a recovery thread is running, don't do anything else.
8125 * 3/ If recovery has finished, clean up, possibly marking spares active.
8126 * 4/ If there are any faulty devices, remove them.
8127 * 5/ If array is degraded, try to add spares devices
8128 * 6/ If array has spares or is not in-sync, start a resync thread.
8130 void md_check_recovery(struct mddev *mddev)
8132 if (mddev->suspended)
8136 bitmap_daemon_work(mddev);
8138 if (signal_pending(current)) {
8139 if (mddev->pers->sync_request && !mddev->external) {
8140 printk(KERN_INFO "md: %s in immediate safe mode\n",
8142 mddev->safemode = 2;
8144 flush_signals(current);
8147 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
8150 (mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<<MD_CHANGE_PENDING)) ||
8151 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
8152 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
8153 (mddev->external == 0 && mddev->safemode == 1) ||
8154 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
8155 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
8159 if (mddev_trylock(mddev)) {
8163 struct md_rdev *rdev;
8164 if (!mddev->external && mddev->in_sync)
8165 /* 'Blocked' flag not needed as failed devices
8166 * will be recorded if array switched to read/write.
8167 * Leaving it set will prevent the device
8168 * from being removed.
8170 rdev_for_each(rdev, mddev)
8171 clear_bit(Blocked, &rdev->flags);
8172 /* On a read-only array we can:
8173 * - remove failed devices
8174 * - add already-in_sync devices if the array itself
8176 * As we only add devices that are already in-sync,
8177 * we can activate the spares immediately.
8179 remove_and_add_spares(mddev, NULL);
8180 /* There is no thread, but we need to call
8181 * ->spare_active and clear saved_raid_disk
8183 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8184 md_reap_sync_thread(mddev);
8185 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8186 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8190 if (!mddev->external) {
8192 spin_lock(&mddev->lock);
8193 if (mddev->safemode &&
8194 !atomic_read(&mddev->writes_pending) &&
8196 mddev->recovery_cp == MaxSector) {
8199 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
8201 if (mddev->safemode == 1)
8202 mddev->safemode = 0;
8203 spin_unlock(&mddev->lock);
8205 sysfs_notify_dirent_safe(mddev->sysfs_state);
8208 if (mddev->flags & MD_UPDATE_SB_FLAGS) {
8209 if (mddev_is_clustered(mddev))
8210 md_cluster_ops->metadata_update_start(mddev);
8211 md_update_sb(mddev, 0);
8212 if (mddev_is_clustered(mddev))
8213 md_cluster_ops->metadata_update_finish(mddev);
8216 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
8217 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
8218 /* resync/recovery still happening */
8219 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8222 if (mddev->sync_thread) {
8223 md_reap_sync_thread(mddev);
8226 /* Set RUNNING before clearing NEEDED to avoid
8227 * any transients in the value of "sync_action".
8229 mddev->curr_resync_completed = 0;
8230 spin_lock(&mddev->lock);
8231 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8232 spin_unlock(&mddev->lock);
8233 /* Clear some bits that don't mean anything, but
8236 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
8237 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
8239 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
8240 test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
8242 /* no recovery is running.
8243 * remove any failed drives, then
8244 * add spares if possible.
8245 * Spares are also removed and re-added, to allow
8246 * the personality to fail the re-add.
8249 if (mddev->reshape_position != MaxSector) {
8250 if (mddev->pers->check_reshape == NULL ||
8251 mddev->pers->check_reshape(mddev) != 0)
8252 /* Cannot proceed */
8254 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8255 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8256 } else if ((spares = remove_and_add_spares(mddev, NULL))) {
8257 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8258 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8259 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
8260 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8261 } else if (mddev->recovery_cp < MaxSector) {
8262 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8263 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8264 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
8265 /* nothing to be done ... */
8268 if (mddev->pers->sync_request) {
8270 /* We are adding a device or devices to an array
8271 * which has the bitmap stored on all devices.
8272 * So make sure all bitmap pages get written
8274 bitmap_write_all(mddev->bitmap);
8276 INIT_WORK(&mddev->del_work, md_start_sync);
8277 queue_work(md_misc_wq, &mddev->del_work);
8281 if (!mddev->sync_thread) {
8282 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8283 wake_up(&resync_wait);
8284 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
8286 if (mddev->sysfs_action)
8287 sysfs_notify_dirent_safe(mddev->sysfs_action);
8290 wake_up(&mddev->sb_wait);
8291 mddev_unlock(mddev);
8294 EXPORT_SYMBOL(md_check_recovery);
8296 void md_reap_sync_thread(struct mddev *mddev)
8298 struct md_rdev *rdev;
8300 /* resync has finished, collect result */
8301 md_unregister_thread(&mddev->sync_thread);
8302 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8303 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
8305 /* activate any spares */
8306 if (mddev->pers->spare_active(mddev)) {
8307 sysfs_notify(&mddev->kobj, NULL,
8309 set_bit(MD_CHANGE_DEVS, &mddev->flags);
8312 if (mddev_is_clustered(mddev))
8313 md_cluster_ops->metadata_update_start(mddev);
8314 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8315 mddev->pers->finish_reshape)
8316 mddev->pers->finish_reshape(mddev);
8318 /* If array is no-longer degraded, then any saved_raid_disk
8319 * information must be scrapped.
8321 if (!mddev->degraded)
8322 rdev_for_each(rdev, mddev)
8323 rdev->saved_raid_disk = -1;
8325 md_update_sb(mddev, 1);
8326 if (mddev_is_clustered(mddev))
8327 md_cluster_ops->metadata_update_finish(mddev);
8328 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8329 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
8330 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8331 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8332 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
8333 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8334 wake_up(&resync_wait);
8335 /* flag recovery needed just to double check */
8336 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8337 sysfs_notify_dirent_safe(mddev->sysfs_action);
8338 md_new_event(mddev);
8339 if (mddev->event_work.func)
8340 queue_work(md_misc_wq, &mddev->event_work);
8342 EXPORT_SYMBOL(md_reap_sync_thread);
8344 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
8346 sysfs_notify_dirent_safe(rdev->sysfs_state);
8347 wait_event_timeout(rdev->blocked_wait,
8348 !test_bit(Blocked, &rdev->flags) &&
8349 !test_bit(BlockedBadBlocks, &rdev->flags),
8350 msecs_to_jiffies(5000));
8351 rdev_dec_pending(rdev, mddev);
8353 EXPORT_SYMBOL(md_wait_for_blocked_rdev);
8355 void md_finish_reshape(struct mddev *mddev)
8357 /* called be personality module when reshape completes. */
8358 struct md_rdev *rdev;
8360 rdev_for_each(rdev, mddev) {
8361 if (rdev->data_offset > rdev->new_data_offset)
8362 rdev->sectors += rdev->data_offset - rdev->new_data_offset;
8364 rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
8365 rdev->data_offset = rdev->new_data_offset;
8368 EXPORT_SYMBOL(md_finish_reshape);
8370 /* Bad block management.
8371 * We can record which blocks on each device are 'bad' and so just
8372 * fail those blocks, or that stripe, rather than the whole device.
8373 * Entries in the bad-block table are 64bits wide. This comprises:
8374 * Length of bad-range, in sectors: 0-511 for lengths 1-512
8375 * Start of bad-range, sector offset, 54 bits (allows 8 exbibytes)
8376 * A 'shift' can be set so that larger blocks are tracked and
8377 * consequently larger devices can be covered.
8378 * 'Acknowledged' flag - 1 bit. - the most significant bit.
8380 * Locking of the bad-block table uses a seqlock so md_is_badblock
8381 * might need to retry if it is very unlucky.
8382 * We will sometimes want to check for bad blocks in a bi_end_io function,
8383 * so we use the write_seqlock_irq variant.
8385 * When looking for a bad block we specify a range and want to
8386 * know if any block in the range is bad. So we binary-search
8387 * to the last range that starts at-or-before the given endpoint,
8388 * (or "before the sector after the target range")
8389 * then see if it ends after the given start.
8391 * 0 if there are no known bad blocks in the range
8392 * 1 if there are known bad block which are all acknowledged
8393 * -1 if there are bad blocks which have not yet been acknowledged in metadata.
8394 * plus the start/length of the first bad section we overlap.
8396 int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
8397 sector_t *first_bad, int *bad_sectors)
8403 sector_t target = s + sectors;
8406 if (bb->shift > 0) {
8407 /* round the start down, and the end up */
8409 target += (1<<bb->shift) - 1;
8410 target >>= bb->shift;
8411 sectors = target - s;
8413 /* 'target' is now the first block after the bad range */
8416 seq = read_seqbegin(&bb->lock);
8421 /* Binary search between lo and hi for 'target'
8422 * i.e. for the last range that starts before 'target'
8424 /* INVARIANT: ranges before 'lo' and at-or-after 'hi'
8425 * are known not to be the last range before target.
8426 * VARIANT: hi-lo is the number of possible
8427 * ranges, and decreases until it reaches 1
8429 while (hi - lo > 1) {
8430 int mid = (lo + hi) / 2;
8431 sector_t a = BB_OFFSET(p[mid]);
8433 /* This could still be the one, earlier ranges
8437 /* This and later ranges are definitely out. */
8440 /* 'lo' might be the last that started before target, but 'hi' isn't */
8442 /* need to check all range that end after 's' to see if
8443 * any are unacknowledged.
8446 BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
8447 if (BB_OFFSET(p[lo]) < target) {
8448 /* starts before the end, and finishes after
8449 * the start, so they must overlap
8451 if (rv != -1 && BB_ACK(p[lo]))
8455 *first_bad = BB_OFFSET(p[lo]);
8456 *bad_sectors = BB_LEN(p[lo]);
8462 if (read_seqretry(&bb->lock, seq))
8467 EXPORT_SYMBOL_GPL(md_is_badblock);
8470 * Add a range of bad blocks to the table.
8471 * This might extend the table, or might contract it
8472 * if two adjacent ranges can be merged.
8473 * We binary-search to find the 'insertion' point, then
8474 * decide how best to handle it.
8476 static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
8482 unsigned long flags;
8485 /* badblocks are disabled */
8489 /* round the start down, and the end up */
8490 sector_t next = s + sectors;
8492 next += (1<<bb->shift) - 1;
8497 write_seqlock_irqsave(&bb->lock, flags);
8502 /* Find the last range that starts at-or-before 's' */
8503 while (hi - lo > 1) {
8504 int mid = (lo + hi) / 2;
8505 sector_t a = BB_OFFSET(p[mid]);
8511 if (hi > lo && BB_OFFSET(p[lo]) > s)
8515 /* we found a range that might merge with the start
8518 sector_t a = BB_OFFSET(p[lo]);
8519 sector_t e = a + BB_LEN(p[lo]);
8520 int ack = BB_ACK(p[lo]);
8522 /* Yes, we can merge with a previous range */
8523 if (s == a && s + sectors >= e)
8524 /* new range covers old */
8527 ack = ack && acknowledged;
8529 if (e < s + sectors)
8531 if (e - a <= BB_MAX_LEN) {
8532 p[lo] = BB_MAKE(a, e-a, ack);
8535 /* does not all fit in one range,
8536 * make p[lo] maximal
8538 if (BB_LEN(p[lo]) != BB_MAX_LEN)
8539 p[lo] = BB_MAKE(a, BB_MAX_LEN, ack);
8545 if (sectors && hi < bb->count) {
8546 /* 'hi' points to the first range that starts after 's'.
8547 * Maybe we can merge with the start of that range */
8548 sector_t a = BB_OFFSET(p[hi]);
8549 sector_t e = a + BB_LEN(p[hi]);
8550 int ack = BB_ACK(p[hi]);
8551 if (a <= s + sectors) {
8552 /* merging is possible */
8553 if (e <= s + sectors) {
8558 ack = ack && acknowledged;
8561 if (e - a <= BB_MAX_LEN) {
8562 p[hi] = BB_MAKE(a, e-a, ack);
8565 p[hi] = BB_MAKE(a, BB_MAX_LEN, ack);
8573 if (sectors == 0 && hi < bb->count) {
8574 /* we might be able to combine lo and hi */
8575 /* Note: 's' is at the end of 'lo' */
8576 sector_t a = BB_OFFSET(p[hi]);
8577 int lolen = BB_LEN(p[lo]);
8578 int hilen = BB_LEN(p[hi]);
8579 int newlen = lolen + hilen - (s - a);
8580 if (s >= a && newlen < BB_MAX_LEN) {
8581 /* yes, we can combine them */
8582 int ack = BB_ACK(p[lo]) && BB_ACK(p[hi]);
8583 p[lo] = BB_MAKE(BB_OFFSET(p[lo]), newlen, ack);
8584 memmove(p + hi, p + hi + 1,
8585 (bb->count - hi - 1) * 8);
8590 /* didn't merge (it all).
8591 * Need to add a range just before 'hi' */
8592 if (bb->count >= MD_MAX_BADBLOCKS) {
8593 /* No room for more */
8597 int this_sectors = sectors;
8598 memmove(p + hi + 1, p + hi,
8599 (bb->count - hi) * 8);
8602 if (this_sectors > BB_MAX_LEN)
8603 this_sectors = BB_MAX_LEN;
8604 p[hi] = BB_MAKE(s, this_sectors, acknowledged);
8605 sectors -= this_sectors;
8612 bb->unacked_exist = 1;
8613 write_sequnlock_irqrestore(&bb->lock, flags);
8618 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
8623 s += rdev->new_data_offset;
8625 s += rdev->data_offset;
8626 rv = md_set_badblocks(&rdev->badblocks,
8629 /* Make sure they get written out promptly */
8630 sysfs_notify_dirent_safe(rdev->sysfs_state);
8631 set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
8632 md_wakeup_thread(rdev->mddev->thread);
8636 EXPORT_SYMBOL_GPL(rdev_set_badblocks);
8639 * Remove a range of bad blocks from the table.
8640 * This may involve extending the table if we spilt a region,
8641 * but it must not fail. So if the table becomes full, we just
8642 * drop the remove request.
8644 static int md_clear_badblocks(struct badblocks *bb, sector_t s, int sectors)
8648 sector_t target = s + sectors;
8651 if (bb->shift > 0) {
8652 /* When clearing we round the start up and the end down.
8653 * This should not matter as the shift should align with
8654 * the block size and no rounding should ever be needed.
8655 * However it is better the think a block is bad when it
8656 * isn't than to think a block is not bad when it is.
8658 s += (1<<bb->shift) - 1;
8660 target >>= bb->shift;
8661 sectors = target - s;
8664 write_seqlock_irq(&bb->lock);
8669 /* Find the last range that starts before 'target' */
8670 while (hi - lo > 1) {
8671 int mid = (lo + hi) / 2;
8672 sector_t a = BB_OFFSET(p[mid]);
8679 /* p[lo] is the last range that could overlap the
8680 * current range. Earlier ranges could also overlap,
8681 * but only this one can overlap the end of the range.
8683 if (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) {
8684 /* Partial overlap, leave the tail of this range */
8685 int ack = BB_ACK(p[lo]);
8686 sector_t a = BB_OFFSET(p[lo]);
8687 sector_t end = a + BB_LEN(p[lo]);
8690 /* we need to split this range */
8691 if (bb->count >= MD_MAX_BADBLOCKS) {
8695 memmove(p+lo+1, p+lo, (bb->count - lo) * 8);
8697 p[lo] = BB_MAKE(a, s-a, ack);
8700 p[lo] = BB_MAKE(target, end - target, ack);
8701 /* there is no longer an overlap */
8706 BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
8707 /* This range does overlap */
8708 if (BB_OFFSET(p[lo]) < s) {
8709 /* Keep the early parts of this range. */
8710 int ack = BB_ACK(p[lo]);
8711 sector_t start = BB_OFFSET(p[lo]);
8712 p[lo] = BB_MAKE(start, s - start, ack);
8713 /* now low doesn't overlap, so.. */
8718 /* 'lo' is strictly before, 'hi' is strictly after,
8719 * anything between needs to be discarded
8722 memmove(p+lo+1, p+hi, (bb->count - hi) * 8);
8723 bb->count -= (hi - lo - 1);
8729 write_sequnlock_irq(&bb->lock);
8733 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
8737 s += rdev->new_data_offset;
8739 s += rdev->data_offset;
8740 return md_clear_badblocks(&rdev->badblocks,
8743 EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
8746 * Acknowledge all bad blocks in a list.
8747 * This only succeeds if ->changed is clear. It is used by
8748 * in-kernel metadata updates
8750 void md_ack_all_badblocks(struct badblocks *bb)
8752 if (bb->page == NULL || bb->changed)
8753 /* no point even trying */
8755 write_seqlock_irq(&bb->lock);
8757 if (bb->changed == 0 && bb->unacked_exist) {
8760 for (i = 0; i < bb->count ; i++) {
8761 if (!BB_ACK(p[i])) {
8762 sector_t start = BB_OFFSET(p[i]);
8763 int len = BB_LEN(p[i]);
8764 p[i] = BB_MAKE(start, len, 1);
8767 bb->unacked_exist = 0;
8769 write_sequnlock_irq(&bb->lock);
8771 EXPORT_SYMBOL_GPL(md_ack_all_badblocks);
8773 /* sysfs access to bad-blocks list.
8774 * We present two files.
8775 * 'bad-blocks' lists sector numbers and lengths of ranges that
8776 * are recorded as bad. The list is truncated to fit within
8777 * the one-page limit of sysfs.
8778 * Writing "sector length" to this file adds an acknowledged
8780 * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
8781 * been acknowledged. Writing to this file adds bad blocks
8782 * without acknowledging them. This is largely for testing.
8786 badblocks_show(struct badblocks *bb, char *page, int unack)
8797 seq = read_seqbegin(&bb->lock);
8802 while (len < PAGE_SIZE && i < bb->count) {
8803 sector_t s = BB_OFFSET(p[i]);
8804 unsigned int length = BB_LEN(p[i]);
8805 int ack = BB_ACK(p[i]);
8811 len += snprintf(page+len, PAGE_SIZE-len, "%llu %u\n",
8812 (unsigned long long)s << bb->shift,
8813 length << bb->shift);
8815 if (unack && len == 0)
8816 bb->unacked_exist = 0;
8818 if (read_seqretry(&bb->lock, seq))
8827 badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack)
8829 unsigned long long sector;
8833 /* Allow clearing via sysfs *only* for testing/debugging.
8834 * Normally only a successful write may clear a badblock
8837 if (page[0] == '-') {
8841 #endif /* DO_DEBUG */
8843 switch (sscanf(page, "%llu %d%c", §or, &length, &newline)) {
8845 if (newline != '\n')
8857 md_clear_badblocks(bb, sector, length);
8860 #endif /* DO_DEBUG */
8861 if (md_set_badblocks(bb, sector, length, !unack))
8867 static int md_notify_reboot(struct notifier_block *this,
8868 unsigned long code, void *x)
8870 struct list_head *tmp;
8871 struct mddev *mddev;
8874 for_each_mddev(mddev, tmp) {
8875 if (mddev_trylock(mddev)) {
8877 __md_stop_writes(mddev);
8878 if (mddev->persistent)
8879 mddev->safemode = 2;
8880 mddev_unlock(mddev);
8885 * certain more exotic SCSI devices are known to be
8886 * volatile wrt too early system reboots. While the
8887 * right place to handle this issue is the given
8888 * driver, we do want to have a safe RAID driver ...
8896 static struct notifier_block md_notifier = {
8897 .notifier_call = md_notify_reboot,
8899 .priority = INT_MAX, /* before any real devices */
8902 static void md_geninit(void)
8904 pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
8906 proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
8909 static int __init md_init(void)
8913 md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
8917 md_misc_wq = alloc_workqueue("md_misc", 0, 0);
8921 if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
8924 if ((ret = register_blkdev(0, "mdp")) < 0)
8928 blk_register_region(MKDEV(MD_MAJOR, 0), 512, THIS_MODULE,
8929 md_probe, NULL, NULL);
8930 blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
8931 md_probe, NULL, NULL);
8933 register_reboot_notifier(&md_notifier);
8934 raid_table_header = register_sysctl_table(raid_root_table);
8940 unregister_blkdev(MD_MAJOR, "md");
8942 destroy_workqueue(md_misc_wq);
8944 destroy_workqueue(md_wq);
8949 void md_reload_sb(struct mddev *mddev)
8951 struct md_rdev *rdev, *tmp;
8953 rdev_for_each_safe(rdev, tmp, mddev) {
8954 rdev->sb_loaded = 0;
8955 ClearPageUptodate(rdev->sb_page);
8957 mddev->raid_disks = 0;
8959 rdev_for_each_safe(rdev, tmp, mddev) {
8960 struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
8961 /* since we don't write to faulty devices, we figure out if the
8962 * disk is faulty by comparing events
8964 if (mddev->events > sb->events)
8965 set_bit(Faulty, &rdev->flags);
8969 EXPORT_SYMBOL(md_reload_sb);
8974 * Searches all registered partitions for autorun RAID arrays
8978 static LIST_HEAD(all_detected_devices);
8979 struct detected_devices_node {
8980 struct list_head list;
8984 void md_autodetect_dev(dev_t dev)
8986 struct detected_devices_node *node_detected_dev;
8988 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
8989 if (node_detected_dev) {
8990 node_detected_dev->dev = dev;
8991 list_add_tail(&node_detected_dev->list, &all_detected_devices);
8993 printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
8994 ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
8998 static void autostart_arrays(int part)
9000 struct md_rdev *rdev;
9001 struct detected_devices_node *node_detected_dev;
9003 int i_scanned, i_passed;
9008 printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
9010 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
9012 node_detected_dev = list_entry(all_detected_devices.next,
9013 struct detected_devices_node, list);
9014 list_del(&node_detected_dev->list);
9015 dev = node_detected_dev->dev;
9016 kfree(node_detected_dev);
9017 rdev = md_import_device(dev,0, 90);
9021 if (test_bit(Faulty, &rdev->flags))
9024 set_bit(AutoDetected, &rdev->flags);
9025 list_add(&rdev->same_set, &pending_raid_disks);
9029 printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
9030 i_scanned, i_passed);
9032 autorun_devices(part);
9035 #endif /* !MODULE */
9037 static __exit void md_exit(void)
9039 struct mddev *mddev;
9040 struct list_head *tmp;
9043 blk_unregister_region(MKDEV(MD_MAJOR,0), 512);
9044 blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
9046 unregister_blkdev(MD_MAJOR,"md");
9047 unregister_blkdev(mdp_major, "mdp");
9048 unregister_reboot_notifier(&md_notifier);
9049 unregister_sysctl_table(raid_table_header);
9051 /* We cannot unload the modules while some process is
9052 * waiting for us in select() or poll() - wake them up
9055 while (waitqueue_active(&md_event_waiters)) {
9056 /* not safe to leave yet */
9057 wake_up(&md_event_waiters);
9061 remove_proc_entry("mdstat", NULL);
9063 for_each_mddev(mddev, tmp) {
9064 export_array(mddev);
9065 mddev->hold_active = 0;
9067 destroy_workqueue(md_misc_wq);
9068 destroy_workqueue(md_wq);
9071 subsys_initcall(md_init);
9072 module_exit(md_exit)
9074 static int get_ro(char *buffer, struct kernel_param *kp)
9076 return sprintf(buffer, "%d", start_readonly);
9078 static int set_ro(const char *val, struct kernel_param *kp)
9080 return kstrtouint(val, 10, (unsigned int *)&start_readonly);
9083 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
9084 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
9085 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
9087 MODULE_LICENSE("GPL");
9088 MODULE_DESCRIPTION("MD RAID framework");
9090 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);