2 md.c : Multiple Devices driver for Linux
3 Copyright (C) 1998, 1999, 2000 Ingo Molnar
5 completely rewritten, based on the MD driver code from Marc Zyngier
9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13 - kmod support by: Cyrus Durgin
14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17 - lots of fixes and improvements to the RAID1/RAID5 and generic
18 RAID code (such as request based resynchronization):
20 Neil Brown <neilb@cse.unsw.edu.au>.
22 - persistent bitmap code
23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25 This program is free software; you can redistribute it and/or modify
26 it under the terms of the GNU General Public License as published by
27 the Free Software Foundation; either version 2, or (at your option)
30 You should have received a copy of the GNU General Public License
31 (for example /usr/src/linux/COPYING); if not, write to the Free
32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 #include <linux/kthread.h>
36 #include <linux/blkdev.h>
37 #include <linux/sysctl.h>
38 #include <linux/seq_file.h>
39 #include <linux/mutex.h>
40 #include <linux/buffer_head.h> /* for invalidate_bdev */
41 #include <linux/poll.h>
42 #include <linux/ctype.h>
43 #include <linux/string.h>
44 #include <linux/hdreg.h>
45 #include <linux/proc_fs.h>
46 #include <linux/random.h>
47 #include <linux/reboot.h>
48 #include <linux/file.h>
49 #include <linux/compat.h>
50 #include <linux/delay.h>
51 #include <linux/raid/md_p.h>
52 #include <linux/raid/md_u.h>
53 #include <linux/slab.h>
58 #define dprintk(x...) ((void)(DEBUG && printk(x)))
61 static void autostart_arrays(int part);
64 static LIST_HEAD(pers_list);
65 static DEFINE_SPINLOCK(pers_lock);
67 static void md_print_devices(void);
69 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
70 static struct workqueue_struct *md_wq;
71 static struct workqueue_struct *md_misc_wq;
73 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
76 * Default number of read corrections we'll attempt on an rdev
77 * before ejecting it from the array. We divide the read error
78 * count by 2 for every hour elapsed between read errors.
80 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
82 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
83 * is 1000 KB/sec, so the extra system load does not show up that much.
84 * Increase it if you want to have more _guaranteed_ speed. Note that
85 * the RAID driver will use the maximum available bandwidth if the IO
86 * subsystem is idle. There is also an 'absolute maximum' reconstruction
87 * speed limit - in case reconstruction slows down your system despite
90 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
91 * or /sys/block/mdX/md/sync_speed_{min,max}
94 static int sysctl_speed_limit_min = 1000;
95 static int sysctl_speed_limit_max = 200000;
96 static inline int speed_min(mddev_t *mddev)
98 return mddev->sync_speed_min ?
99 mddev->sync_speed_min : sysctl_speed_limit_min;
102 static inline int speed_max(mddev_t *mddev)
104 return mddev->sync_speed_max ?
105 mddev->sync_speed_max : sysctl_speed_limit_max;
108 static struct ctl_table_header *raid_table_header;
110 static ctl_table raid_table[] = {
112 .procname = "speed_limit_min",
113 .data = &sysctl_speed_limit_min,
114 .maxlen = sizeof(int),
115 .mode = S_IRUGO|S_IWUSR,
116 .proc_handler = proc_dointvec,
119 .procname = "speed_limit_max",
120 .data = &sysctl_speed_limit_max,
121 .maxlen = sizeof(int),
122 .mode = S_IRUGO|S_IWUSR,
123 .proc_handler = proc_dointvec,
128 static ctl_table raid_dir_table[] = {
132 .mode = S_IRUGO|S_IXUGO,
138 static ctl_table raid_root_table[] = {
143 .child = raid_dir_table,
148 static const struct block_device_operations md_fops;
150 static int start_readonly;
153 * We have a system wide 'event count' that is incremented
154 * on any 'interesting' event, and readers of /proc/mdstat
155 * can use 'poll' or 'select' to find out when the event
159 * start array, stop array, error, add device, remove device,
160 * start build, activate spare
162 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
163 static atomic_t md_event_count;
164 void md_new_event(mddev_t *mddev)
166 atomic_inc(&md_event_count);
167 wake_up(&md_event_waiters);
169 EXPORT_SYMBOL_GPL(md_new_event);
171 /* Alternate version that can be called from interrupts
172 * when calling sysfs_notify isn't needed.
174 static void md_new_event_inintr(mddev_t *mddev)
176 atomic_inc(&md_event_count);
177 wake_up(&md_event_waiters);
181 * Enables to iterate over all existing md arrays
182 * all_mddevs_lock protects this list.
184 static LIST_HEAD(all_mddevs);
185 static DEFINE_SPINLOCK(all_mddevs_lock);
189 * iterates through all used mddevs in the system.
190 * We take care to grab the all_mddevs_lock whenever navigating
191 * the list, and to always hold a refcount when unlocked.
192 * Any code which breaks out of this loop while own
193 * a reference to the current mddev and must mddev_put it.
195 #define for_each_mddev(mddev,tmp) \
197 for (({ spin_lock(&all_mddevs_lock); \
198 tmp = all_mddevs.next; \
200 ({ if (tmp != &all_mddevs) \
201 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
202 spin_unlock(&all_mddevs_lock); \
203 if (mddev) mddev_put(mddev); \
204 mddev = list_entry(tmp, mddev_t, all_mddevs); \
205 tmp != &all_mddevs;}); \
206 ({ spin_lock(&all_mddevs_lock); \
211 /* Rather than calling directly into the personality make_request function,
212 * IO requests come here first so that we can check if the device is
213 * being suspended pending a reconfiguration.
214 * We hold a refcount over the call to ->make_request. By the time that
215 * call has finished, the bio has been linked into some internal structure
216 * and so is visible to ->quiesce(), so we don't need the refcount any more.
218 static int md_make_request(struct request_queue *q, struct bio *bio)
220 const int rw = bio_data_dir(bio);
221 mddev_t *mddev = q->queuedata;
225 if (mddev == NULL || mddev->pers == NULL) {
230 if (mddev->suspended) {
233 prepare_to_wait(&mddev->sb_wait, &__wait,
234 TASK_UNINTERRUPTIBLE);
235 if (!mddev->suspended)
241 finish_wait(&mddev->sb_wait, &__wait);
243 atomic_inc(&mddev->active_io);
246 rv = mddev->pers->make_request(mddev, bio);
248 cpu = part_stat_lock();
249 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
250 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
254 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
255 wake_up(&mddev->sb_wait);
260 /* mddev_suspend makes sure no new requests are submitted
261 * to the device, and that any requests that have been submitted
262 * are completely handled.
263 * Once ->stop is called and completes, the module will be completely
266 void mddev_suspend(mddev_t *mddev)
268 BUG_ON(mddev->suspended);
269 mddev->suspended = 1;
271 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
272 mddev->pers->quiesce(mddev, 1);
274 EXPORT_SYMBOL_GPL(mddev_suspend);
276 void mddev_resume(mddev_t *mddev)
278 mddev->suspended = 0;
279 wake_up(&mddev->sb_wait);
280 mddev->pers->quiesce(mddev, 0);
282 EXPORT_SYMBOL_GPL(mddev_resume);
284 int mddev_congested(mddev_t *mddev, int bits)
286 return mddev->suspended;
288 EXPORT_SYMBOL(mddev_congested);
291 * Generic flush handling for md
294 static void md_end_flush(struct bio *bio, int err)
296 mdk_rdev_t *rdev = bio->bi_private;
297 mddev_t *mddev = rdev->mddev;
299 rdev_dec_pending(rdev, mddev);
301 if (atomic_dec_and_test(&mddev->flush_pending)) {
302 /* The pre-request flush has finished */
303 queue_work(md_wq, &mddev->flush_work);
308 static void submit_flushes(mddev_t *mddev)
313 list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
314 if (rdev->raid_disk >= 0 &&
315 !test_bit(Faulty, &rdev->flags)) {
316 /* Take two references, one is dropped
317 * when request finishes, one after
318 * we reclaim rcu_read_lock
321 atomic_inc(&rdev->nr_pending);
322 atomic_inc(&rdev->nr_pending);
324 bi = bio_alloc(GFP_KERNEL, 0);
325 bi->bi_end_io = md_end_flush;
326 bi->bi_private = rdev;
327 bi->bi_bdev = rdev->bdev;
328 atomic_inc(&mddev->flush_pending);
329 submit_bio(WRITE_FLUSH, bi);
331 rdev_dec_pending(rdev, mddev);
336 static void md_submit_flush_data(struct work_struct *ws)
338 mddev_t *mddev = container_of(ws, mddev_t, flush_work);
339 struct bio *bio = mddev->flush_bio;
341 atomic_set(&mddev->flush_pending, 1);
343 if (bio->bi_size == 0)
344 /* an empty barrier - all done */
347 bio->bi_rw &= ~REQ_FLUSH;
348 if (mddev->pers->make_request(mddev, bio))
349 generic_make_request(bio);
351 if (atomic_dec_and_test(&mddev->flush_pending)) {
352 mddev->flush_bio = NULL;
353 wake_up(&mddev->sb_wait);
357 void md_flush_request(mddev_t *mddev, struct bio *bio)
359 spin_lock_irq(&mddev->write_lock);
360 wait_event_lock_irq(mddev->sb_wait,
362 mddev->write_lock, /*nothing*/);
363 mddev->flush_bio = bio;
364 spin_unlock_irq(&mddev->write_lock);
366 atomic_set(&mddev->flush_pending, 1);
367 INIT_WORK(&mddev->flush_work, md_submit_flush_data);
369 submit_flushes(mddev);
371 if (atomic_dec_and_test(&mddev->flush_pending))
372 queue_work(md_wq, &mddev->flush_work);
374 EXPORT_SYMBOL(md_flush_request);
376 /* Support for plugging.
377 * This mirrors the plugging support in request_queue, but does not
378 * require having a whole queue
380 static void plugger_work(struct work_struct *work)
382 struct plug_handle *plug =
383 container_of(work, struct plug_handle, unplug_work);
384 plug->unplug_fn(plug);
386 static void plugger_timeout(unsigned long data)
388 struct plug_handle *plug = (void *)data;
389 kblockd_schedule_work(NULL, &plug->unplug_work);
391 void plugger_init(struct plug_handle *plug,
392 void (*unplug_fn)(struct plug_handle *))
394 plug->unplug_flag = 0;
395 plug->unplug_fn = unplug_fn;
396 init_timer(&plug->unplug_timer);
397 plug->unplug_timer.function = plugger_timeout;
398 plug->unplug_timer.data = (unsigned long)plug;
399 INIT_WORK(&plug->unplug_work, plugger_work);
401 EXPORT_SYMBOL_GPL(plugger_init);
403 void plugger_set_plug(struct plug_handle *plug)
405 if (!test_and_set_bit(PLUGGED_FLAG, &plug->unplug_flag))
406 mod_timer(&plug->unplug_timer, jiffies + msecs_to_jiffies(3)+1);
408 EXPORT_SYMBOL_GPL(plugger_set_plug);
410 int plugger_remove_plug(struct plug_handle *plug)
412 if (test_and_clear_bit(PLUGGED_FLAG, &plug->unplug_flag)) {
413 del_timer(&plug->unplug_timer);
418 EXPORT_SYMBOL_GPL(plugger_remove_plug);
421 static inline mddev_t *mddev_get(mddev_t *mddev)
423 atomic_inc(&mddev->active);
427 static void mddev_delayed_delete(struct work_struct *ws);
429 static void mddev_put(mddev_t *mddev)
431 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
433 if (!mddev->raid_disks && list_empty(&mddev->disks) &&
434 mddev->ctime == 0 && !mddev->hold_active) {
435 /* Array is not configured at all, and not held active,
437 list_del(&mddev->all_mddevs);
438 if (mddev->gendisk) {
439 /* We did a probe so need to clean up. Call
440 * queue_work inside the spinlock so that
441 * flush_workqueue() after mddev_find will
442 * succeed in waiting for the work to be done.
444 INIT_WORK(&mddev->del_work, mddev_delayed_delete);
445 queue_work(md_misc_wq, &mddev->del_work);
449 spin_unlock(&all_mddevs_lock);
452 void mddev_init(mddev_t *mddev)
454 mutex_init(&mddev->open_mutex);
455 mutex_init(&mddev->reconfig_mutex);
456 mutex_init(&mddev->bitmap_info.mutex);
457 INIT_LIST_HEAD(&mddev->disks);
458 INIT_LIST_HEAD(&mddev->all_mddevs);
459 init_timer(&mddev->safemode_timer);
460 atomic_set(&mddev->active, 1);
461 atomic_set(&mddev->openers, 0);
462 atomic_set(&mddev->active_io, 0);
463 spin_lock_init(&mddev->write_lock);
464 atomic_set(&mddev->flush_pending, 0);
465 init_waitqueue_head(&mddev->sb_wait);
466 init_waitqueue_head(&mddev->recovery_wait);
467 mddev->reshape_position = MaxSector;
468 mddev->resync_min = 0;
469 mddev->resync_max = MaxSector;
470 mddev->level = LEVEL_NONE;
472 EXPORT_SYMBOL_GPL(mddev_init);
474 static mddev_t * mddev_find(dev_t unit)
476 mddev_t *mddev, *new = NULL;
479 spin_lock(&all_mddevs_lock);
482 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
483 if (mddev->unit == unit) {
485 spin_unlock(&all_mddevs_lock);
491 list_add(&new->all_mddevs, &all_mddevs);
492 spin_unlock(&all_mddevs_lock);
493 new->hold_active = UNTIL_IOCTL;
497 /* find an unused unit number */
498 static int next_minor = 512;
499 int start = next_minor;
503 dev = MKDEV(MD_MAJOR, next_minor);
505 if (next_minor > MINORMASK)
507 if (next_minor == start) {
508 /* Oh dear, all in use. */
509 spin_unlock(&all_mddevs_lock);
515 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
516 if (mddev->unit == dev) {
522 new->md_minor = MINOR(dev);
523 new->hold_active = UNTIL_STOP;
524 list_add(&new->all_mddevs, &all_mddevs);
525 spin_unlock(&all_mddevs_lock);
528 spin_unlock(&all_mddevs_lock);
530 new = kzalloc(sizeof(*new), GFP_KERNEL);
535 if (MAJOR(unit) == MD_MAJOR)
536 new->md_minor = MINOR(unit);
538 new->md_minor = MINOR(unit) >> MdpMinorShift;
545 static inline int mddev_lock(mddev_t * mddev)
547 return mutex_lock_interruptible(&mddev->reconfig_mutex);
550 static inline int mddev_is_locked(mddev_t *mddev)
552 return mutex_is_locked(&mddev->reconfig_mutex);
555 static inline int mddev_trylock(mddev_t * mddev)
557 return mutex_trylock(&mddev->reconfig_mutex);
560 static struct attribute_group md_redundancy_group;
562 static void mddev_unlock(mddev_t * mddev)
564 if (mddev->to_remove) {
565 /* These cannot be removed under reconfig_mutex as
566 * an access to the files will try to take reconfig_mutex
567 * while holding the file unremovable, which leads to
569 * So hold set sysfs_active while the remove in happeing,
570 * and anything else which might set ->to_remove or my
571 * otherwise change the sysfs namespace will fail with
572 * -EBUSY if sysfs_active is still set.
573 * We set sysfs_active under reconfig_mutex and elsewhere
574 * test it under the same mutex to ensure its correct value
577 struct attribute_group *to_remove = mddev->to_remove;
578 mddev->to_remove = NULL;
579 mddev->sysfs_active = 1;
580 mutex_unlock(&mddev->reconfig_mutex);
582 if (mddev->kobj.sd) {
583 if (to_remove != &md_redundancy_group)
584 sysfs_remove_group(&mddev->kobj, to_remove);
585 if (mddev->pers == NULL ||
586 mddev->pers->sync_request == NULL) {
587 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
588 if (mddev->sysfs_action)
589 sysfs_put(mddev->sysfs_action);
590 mddev->sysfs_action = NULL;
593 mddev->sysfs_active = 0;
595 mutex_unlock(&mddev->reconfig_mutex);
597 md_wakeup_thread(mddev->thread);
600 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
604 list_for_each_entry(rdev, &mddev->disks, same_set)
605 if (rdev->desc_nr == nr)
611 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
615 list_for_each_entry(rdev, &mddev->disks, same_set)
616 if (rdev->bdev->bd_dev == dev)
622 static struct mdk_personality *find_pers(int level, char *clevel)
624 struct mdk_personality *pers;
625 list_for_each_entry(pers, &pers_list, list) {
626 if (level != LEVEL_NONE && pers->level == level)
628 if (strcmp(pers->name, clevel)==0)
634 /* return the offset of the super block in 512byte sectors */
635 static inline sector_t calc_dev_sboffset(struct block_device *bdev)
637 sector_t num_sectors = bdev->bd_inode->i_size / 512;
638 return MD_NEW_SIZE_SECTORS(num_sectors);
641 static int alloc_disk_sb(mdk_rdev_t * rdev)
646 rdev->sb_page = alloc_page(GFP_KERNEL);
647 if (!rdev->sb_page) {
648 printk(KERN_ALERT "md: out of memory.\n");
655 static void free_disk_sb(mdk_rdev_t * rdev)
658 put_page(rdev->sb_page);
660 rdev->sb_page = NULL;
667 static void super_written(struct bio *bio, int error)
669 mdk_rdev_t *rdev = bio->bi_private;
670 mddev_t *mddev = rdev->mddev;
672 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
673 printk("md: super_written gets error=%d, uptodate=%d\n",
674 error, test_bit(BIO_UPTODATE, &bio->bi_flags));
675 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
676 md_error(mddev, rdev);
679 if (atomic_dec_and_test(&mddev->pending_writes))
680 wake_up(&mddev->sb_wait);
684 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
685 sector_t sector, int size, struct page *page)
687 /* write first size bytes of page to sector of rdev
688 * Increment mddev->pending_writes before returning
689 * and decrement it on completion, waking up sb_wait
690 * if zero is reached.
691 * If an error occurred, call md_error
693 struct bio *bio = bio_alloc(GFP_NOIO, 1);
695 bio->bi_bdev = rdev->bdev;
696 bio->bi_sector = sector;
697 bio_add_page(bio, page, size, 0);
698 bio->bi_private = rdev;
699 bio->bi_end_io = super_written;
701 atomic_inc(&mddev->pending_writes);
702 submit_bio(REQ_WRITE | REQ_SYNC | REQ_UNPLUG | REQ_FLUSH | REQ_FUA,
706 void md_super_wait(mddev_t *mddev)
708 /* wait for all superblock writes that were scheduled to complete */
711 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
712 if (atomic_read(&mddev->pending_writes)==0)
716 finish_wait(&mddev->sb_wait, &wq);
719 static void bi_complete(struct bio *bio, int error)
721 complete((struct completion*)bio->bi_private);
724 int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size,
725 struct page *page, int rw)
727 struct bio *bio = bio_alloc(GFP_NOIO, 1);
728 struct completion event;
731 rw |= REQ_SYNC | REQ_UNPLUG;
733 bio->bi_bdev = rdev->bdev;
734 bio->bi_sector = sector;
735 bio_add_page(bio, page, size, 0);
736 init_completion(&event);
737 bio->bi_private = &event;
738 bio->bi_end_io = bi_complete;
740 wait_for_completion(&event);
742 ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
746 EXPORT_SYMBOL_GPL(sync_page_io);
748 static int read_disk_sb(mdk_rdev_t * rdev, int size)
750 char b[BDEVNAME_SIZE];
751 if (!rdev->sb_page) {
759 if (!sync_page_io(rdev, rdev->sb_start, size, rdev->sb_page, READ))
765 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
766 bdevname(rdev->bdev,b));
770 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
772 return sb1->set_uuid0 == sb2->set_uuid0 &&
773 sb1->set_uuid1 == sb2->set_uuid1 &&
774 sb1->set_uuid2 == sb2->set_uuid2 &&
775 sb1->set_uuid3 == sb2->set_uuid3;
778 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
781 mdp_super_t *tmp1, *tmp2;
783 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
784 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
786 if (!tmp1 || !tmp2) {
788 printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
796 * nr_disks is not constant
801 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
809 static u32 md_csum_fold(u32 csum)
811 csum = (csum & 0xffff) + (csum >> 16);
812 return (csum & 0xffff) + (csum >> 16);
815 static unsigned int calc_sb_csum(mdp_super_t * sb)
818 u32 *sb32 = (u32*)sb;
820 unsigned int disk_csum, csum;
822 disk_csum = sb->sb_csum;
825 for (i = 0; i < MD_SB_BYTES/4 ; i++)
827 csum = (newcsum & 0xffffffff) + (newcsum>>32);
831 /* This used to use csum_partial, which was wrong for several
832 * reasons including that different results are returned on
833 * different architectures. It isn't critical that we get exactly
834 * the same return value as before (we always csum_fold before
835 * testing, and that removes any differences). However as we
836 * know that csum_partial always returned a 16bit value on
837 * alphas, do a fold to maximise conformity to previous behaviour.
839 sb->sb_csum = md_csum_fold(disk_csum);
841 sb->sb_csum = disk_csum;
848 * Handle superblock details.
849 * We want to be able to handle multiple superblock formats
850 * so we have a common interface to them all, and an array of
851 * different handlers.
852 * We rely on user-space to write the initial superblock, and support
853 * reading and updating of superblocks.
854 * Interface methods are:
855 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
856 * loads and validates a superblock on dev.
857 * if refdev != NULL, compare superblocks on both devices
859 * 0 - dev has a superblock that is compatible with refdev
860 * 1 - dev has a superblock that is compatible and newer than refdev
861 * so dev should be used as the refdev in future
862 * -EINVAL superblock incompatible or invalid
863 * -othererror e.g. -EIO
865 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
866 * Verify that dev is acceptable into mddev.
867 * The first time, mddev->raid_disks will be 0, and data from
868 * dev should be merged in. Subsequent calls check that dev
869 * is new enough. Return 0 or -EINVAL
871 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
872 * Update the superblock for rdev with data in mddev
873 * This does not write to disc.
879 struct module *owner;
880 int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev,
882 int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
883 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
884 unsigned long long (*rdev_size_change)(mdk_rdev_t *rdev,
885 sector_t num_sectors);
889 * Check that the given mddev has no bitmap.
891 * This function is called from the run method of all personalities that do not
892 * support bitmaps. It prints an error message and returns non-zero if mddev
893 * has a bitmap. Otherwise, it returns 0.
896 int md_check_no_bitmap(mddev_t *mddev)
898 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
900 printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
901 mdname(mddev), mddev->pers->name);
904 EXPORT_SYMBOL(md_check_no_bitmap);
907 * load_super for 0.90.0
909 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
911 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
916 * Calculate the position of the superblock (512byte sectors),
917 * it's at the end of the disk.
919 * It also happens to be a multiple of 4Kb.
921 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
923 ret = read_disk_sb(rdev, MD_SB_BYTES);
928 bdevname(rdev->bdev, b);
929 sb = (mdp_super_t*)page_address(rdev->sb_page);
931 if (sb->md_magic != MD_SB_MAGIC) {
932 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
937 if (sb->major_version != 0 ||
938 sb->minor_version < 90 ||
939 sb->minor_version > 91) {
940 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
941 sb->major_version, sb->minor_version,
946 if (sb->raid_disks <= 0)
949 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
950 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
955 rdev->preferred_minor = sb->md_minor;
956 rdev->data_offset = 0;
957 rdev->sb_size = MD_SB_BYTES;
959 if (sb->level == LEVEL_MULTIPATH)
962 rdev->desc_nr = sb->this_disk.number;
968 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
969 if (!uuid_equal(refsb, sb)) {
970 printk(KERN_WARNING "md: %s has different UUID to %s\n",
971 b, bdevname(refdev->bdev,b2));
974 if (!sb_equal(refsb, sb)) {
975 printk(KERN_WARNING "md: %s has same UUID"
976 " but different superblock to %s\n",
977 b, bdevname(refdev->bdev, b2));
981 ev2 = md_event(refsb);
987 rdev->sectors = rdev->sb_start;
989 if (rdev->sectors < sb->size * 2 && sb->level > 1)
990 /* "this cannot possibly happen" ... */
998 * validate_super for 0.90.0
1000 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1003 mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
1004 __u64 ev1 = md_event(sb);
1006 rdev->raid_disk = -1;
1007 clear_bit(Faulty, &rdev->flags);
1008 clear_bit(In_sync, &rdev->flags);
1009 clear_bit(WriteMostly, &rdev->flags);
1011 if (mddev->raid_disks == 0) {
1012 mddev->major_version = 0;
1013 mddev->minor_version = sb->minor_version;
1014 mddev->patch_version = sb->patch_version;
1015 mddev->external = 0;
1016 mddev->chunk_sectors = sb->chunk_size >> 9;
1017 mddev->ctime = sb->ctime;
1018 mddev->utime = sb->utime;
1019 mddev->level = sb->level;
1020 mddev->clevel[0] = 0;
1021 mddev->layout = sb->layout;
1022 mddev->raid_disks = sb->raid_disks;
1023 mddev->dev_sectors = sb->size * 2;
1024 mddev->events = ev1;
1025 mddev->bitmap_info.offset = 0;
1026 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
1028 if (mddev->minor_version >= 91) {
1029 mddev->reshape_position = sb->reshape_position;
1030 mddev->delta_disks = sb->delta_disks;
1031 mddev->new_level = sb->new_level;
1032 mddev->new_layout = sb->new_layout;
1033 mddev->new_chunk_sectors = sb->new_chunk >> 9;
1035 mddev->reshape_position = MaxSector;
1036 mddev->delta_disks = 0;
1037 mddev->new_level = mddev->level;
1038 mddev->new_layout = mddev->layout;
1039 mddev->new_chunk_sectors = mddev->chunk_sectors;
1042 if (sb->state & (1<<MD_SB_CLEAN))
1043 mddev->recovery_cp = MaxSector;
1045 if (sb->events_hi == sb->cp_events_hi &&
1046 sb->events_lo == sb->cp_events_lo) {
1047 mddev->recovery_cp = sb->recovery_cp;
1049 mddev->recovery_cp = 0;
1052 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1053 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1054 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1055 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1057 mddev->max_disks = MD_SB_DISKS;
1059 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
1060 mddev->bitmap_info.file == NULL)
1061 mddev->bitmap_info.offset =
1062 mddev->bitmap_info.default_offset;
1064 } else if (mddev->pers == NULL) {
1065 /* Insist on good event counter while assembling, except
1066 * for spares (which don't need an event count) */
1068 if (sb->disks[rdev->desc_nr].state & (
1069 (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
1070 if (ev1 < mddev->events)
1072 } else if (mddev->bitmap) {
1073 /* if adding to array with a bitmap, then we can accept an
1074 * older device ... but not too old.
1076 if (ev1 < mddev->bitmap->events_cleared)
1079 if (ev1 < mddev->events)
1080 /* just a hot-add of a new device, leave raid_disk at -1 */
1084 if (mddev->level != LEVEL_MULTIPATH) {
1085 desc = sb->disks + rdev->desc_nr;
1087 if (desc->state & (1<<MD_DISK_FAULTY))
1088 set_bit(Faulty, &rdev->flags);
1089 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
1090 desc->raid_disk < mddev->raid_disks */) {
1091 set_bit(In_sync, &rdev->flags);
1092 rdev->raid_disk = desc->raid_disk;
1093 } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1094 /* active but not in sync implies recovery up to
1095 * reshape position. We don't know exactly where
1096 * that is, so set to zero for now */
1097 if (mddev->minor_version >= 91) {
1098 rdev->recovery_offset = 0;
1099 rdev->raid_disk = desc->raid_disk;
1102 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1103 set_bit(WriteMostly, &rdev->flags);
1104 } else /* MULTIPATH are always insync */
1105 set_bit(In_sync, &rdev->flags);
1110 * sync_super for 0.90.0
1112 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1116 int next_spare = mddev->raid_disks;
1119 /* make rdev->sb match mddev data..
1122 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1123 * 3/ any empty disks < next_spare become removed
1125 * disks[0] gets initialised to REMOVED because
1126 * we cannot be sure from other fields if it has
1127 * been initialised or not.
1130 int active=0, working=0,failed=0,spare=0,nr_disks=0;
1132 rdev->sb_size = MD_SB_BYTES;
1134 sb = (mdp_super_t*)page_address(rdev->sb_page);
1136 memset(sb, 0, sizeof(*sb));
1138 sb->md_magic = MD_SB_MAGIC;
1139 sb->major_version = mddev->major_version;
1140 sb->patch_version = mddev->patch_version;
1141 sb->gvalid_words = 0; /* ignored */
1142 memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1143 memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1144 memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1145 memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1147 sb->ctime = mddev->ctime;
1148 sb->level = mddev->level;
1149 sb->size = mddev->dev_sectors / 2;
1150 sb->raid_disks = mddev->raid_disks;
1151 sb->md_minor = mddev->md_minor;
1152 sb->not_persistent = 0;
1153 sb->utime = mddev->utime;
1155 sb->events_hi = (mddev->events>>32);
1156 sb->events_lo = (u32)mddev->events;
1158 if (mddev->reshape_position == MaxSector)
1159 sb->minor_version = 90;
1161 sb->minor_version = 91;
1162 sb->reshape_position = mddev->reshape_position;
1163 sb->new_level = mddev->new_level;
1164 sb->delta_disks = mddev->delta_disks;
1165 sb->new_layout = mddev->new_layout;
1166 sb->new_chunk = mddev->new_chunk_sectors << 9;
1168 mddev->minor_version = sb->minor_version;
1171 sb->recovery_cp = mddev->recovery_cp;
1172 sb->cp_events_hi = (mddev->events>>32);
1173 sb->cp_events_lo = (u32)mddev->events;
1174 if (mddev->recovery_cp == MaxSector)
1175 sb->state = (1<< MD_SB_CLEAN);
1177 sb->recovery_cp = 0;
1179 sb->layout = mddev->layout;
1180 sb->chunk_size = mddev->chunk_sectors << 9;
1182 if (mddev->bitmap && mddev->bitmap_info.file == NULL)
1183 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1185 sb->disks[0].state = (1<<MD_DISK_REMOVED);
1186 list_for_each_entry(rdev2, &mddev->disks, same_set) {
1189 int is_active = test_bit(In_sync, &rdev2->flags);
1191 if (rdev2->raid_disk >= 0 &&
1192 sb->minor_version >= 91)
1193 /* we have nowhere to store the recovery_offset,
1194 * but if it is not below the reshape_position,
1195 * we can piggy-back on that.
1198 if (rdev2->raid_disk < 0 ||
1199 test_bit(Faulty, &rdev2->flags))
1202 desc_nr = rdev2->raid_disk;
1204 desc_nr = next_spare++;
1205 rdev2->desc_nr = desc_nr;
1206 d = &sb->disks[rdev2->desc_nr];
1208 d->number = rdev2->desc_nr;
1209 d->major = MAJOR(rdev2->bdev->bd_dev);
1210 d->minor = MINOR(rdev2->bdev->bd_dev);
1212 d->raid_disk = rdev2->raid_disk;
1214 d->raid_disk = rdev2->desc_nr; /* compatibility */
1215 if (test_bit(Faulty, &rdev2->flags))
1216 d->state = (1<<MD_DISK_FAULTY);
1217 else if (is_active) {
1218 d->state = (1<<MD_DISK_ACTIVE);
1219 if (test_bit(In_sync, &rdev2->flags))
1220 d->state |= (1<<MD_DISK_SYNC);
1228 if (test_bit(WriteMostly, &rdev2->flags))
1229 d->state |= (1<<MD_DISK_WRITEMOSTLY);
1231 /* now set the "removed" and "faulty" bits on any missing devices */
1232 for (i=0 ; i < mddev->raid_disks ; i++) {
1233 mdp_disk_t *d = &sb->disks[i];
1234 if (d->state == 0 && d->number == 0) {
1237 d->state = (1<<MD_DISK_REMOVED);
1238 d->state |= (1<<MD_DISK_FAULTY);
1242 sb->nr_disks = nr_disks;
1243 sb->active_disks = active;
1244 sb->working_disks = working;
1245 sb->failed_disks = failed;
1246 sb->spare_disks = spare;
1248 sb->this_disk = sb->disks[rdev->desc_nr];
1249 sb->sb_csum = calc_sb_csum(sb);
1253 * rdev_size_change for 0.90.0
1255 static unsigned long long
1256 super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1258 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1259 return 0; /* component must fit device */
1260 if (rdev->mddev->bitmap_info.offset)
1261 return 0; /* can't move bitmap */
1262 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
1263 if (!num_sectors || num_sectors > rdev->sb_start)
1264 num_sectors = rdev->sb_start;
1265 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1267 md_super_wait(rdev->mddev);
1268 return num_sectors / 2; /* kB for sysfs */
1273 * version 1 superblock
1276 static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
1280 unsigned long long newcsum;
1281 int size = 256 + le32_to_cpu(sb->max_dev)*2;
1282 __le32 *isuper = (__le32*)sb;
1285 disk_csum = sb->sb_csum;
1288 for (i=0; size>=4; size -= 4 )
1289 newcsum += le32_to_cpu(*isuper++);
1292 newcsum += le16_to_cpu(*(__le16*) isuper);
1294 csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1295 sb->sb_csum = disk_csum;
1296 return cpu_to_le32(csum);
1299 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1301 struct mdp_superblock_1 *sb;
1304 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1308 * Calculate the position of the superblock in 512byte sectors.
1309 * It is always aligned to a 4K boundary and
1310 * depeding on minor_version, it can be:
1311 * 0: At least 8K, but less than 12K, from end of device
1312 * 1: At start of device
1313 * 2: 4K from start of device.
1315 switch(minor_version) {
1317 sb_start = rdev->bdev->bd_inode->i_size >> 9;
1319 sb_start &= ~(sector_t)(4*2-1);
1330 rdev->sb_start = sb_start;
1332 /* superblock is rarely larger than 1K, but it can be larger,
1333 * and it is safe to read 4k, so we do that
1335 ret = read_disk_sb(rdev, 4096);
1336 if (ret) return ret;
1339 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1341 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1342 sb->major_version != cpu_to_le32(1) ||
1343 le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1344 le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1345 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1348 if (calc_sb_1_csum(sb) != sb->sb_csum) {
1349 printk("md: invalid superblock checksum on %s\n",
1350 bdevname(rdev->bdev,b));
1353 if (le64_to_cpu(sb->data_size) < 10) {
1354 printk("md: data_size too small on %s\n",
1355 bdevname(rdev->bdev,b));
1359 rdev->preferred_minor = 0xffff;
1360 rdev->data_offset = le64_to_cpu(sb->data_offset);
1361 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1363 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1364 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1365 if (rdev->sb_size & bmask)
1366 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1369 && rdev->data_offset < sb_start + (rdev->sb_size/512))
1372 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1375 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1381 struct mdp_superblock_1 *refsb =
1382 (struct mdp_superblock_1*)page_address(refdev->sb_page);
1384 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1385 sb->level != refsb->level ||
1386 sb->layout != refsb->layout ||
1387 sb->chunksize != refsb->chunksize) {
1388 printk(KERN_WARNING "md: %s has strangely different"
1389 " superblock to %s\n",
1390 bdevname(rdev->bdev,b),
1391 bdevname(refdev->bdev,b2));
1394 ev1 = le64_to_cpu(sb->events);
1395 ev2 = le64_to_cpu(refsb->events);
1403 rdev->sectors = (rdev->bdev->bd_inode->i_size >> 9) -
1404 le64_to_cpu(sb->data_offset);
1406 rdev->sectors = rdev->sb_start;
1407 if (rdev->sectors < le64_to_cpu(sb->data_size))
1409 rdev->sectors = le64_to_cpu(sb->data_size);
1410 if (le64_to_cpu(sb->size) > rdev->sectors)
1415 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1417 struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1418 __u64 ev1 = le64_to_cpu(sb->events);
1420 rdev->raid_disk = -1;
1421 clear_bit(Faulty, &rdev->flags);
1422 clear_bit(In_sync, &rdev->flags);
1423 clear_bit(WriteMostly, &rdev->flags);
1425 if (mddev->raid_disks == 0) {
1426 mddev->major_version = 1;
1427 mddev->patch_version = 0;
1428 mddev->external = 0;
1429 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
1430 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1431 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1432 mddev->level = le32_to_cpu(sb->level);
1433 mddev->clevel[0] = 0;
1434 mddev->layout = le32_to_cpu(sb->layout);
1435 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1436 mddev->dev_sectors = le64_to_cpu(sb->size);
1437 mddev->events = ev1;
1438 mddev->bitmap_info.offset = 0;
1439 mddev->bitmap_info.default_offset = 1024 >> 9;
1441 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1442 memcpy(mddev->uuid, sb->set_uuid, 16);
1444 mddev->max_disks = (4096-256)/2;
1446 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1447 mddev->bitmap_info.file == NULL )
1448 mddev->bitmap_info.offset =
1449 (__s32)le32_to_cpu(sb->bitmap_offset);
1451 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1452 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1453 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1454 mddev->new_level = le32_to_cpu(sb->new_level);
1455 mddev->new_layout = le32_to_cpu(sb->new_layout);
1456 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
1458 mddev->reshape_position = MaxSector;
1459 mddev->delta_disks = 0;
1460 mddev->new_level = mddev->level;
1461 mddev->new_layout = mddev->layout;
1462 mddev->new_chunk_sectors = mddev->chunk_sectors;
1465 } else if (mddev->pers == NULL) {
1466 /* Insist of good event counter while assembling, except for
1467 * spares (which don't need an event count) */
1469 if (rdev->desc_nr >= 0 &&
1470 rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1471 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe)
1472 if (ev1 < mddev->events)
1474 } else if (mddev->bitmap) {
1475 /* If adding to array with a bitmap, then we can accept an
1476 * older device, but not too old.
1478 if (ev1 < mddev->bitmap->events_cleared)
1481 if (ev1 < mddev->events)
1482 /* just a hot-add of a new device, leave raid_disk at -1 */
1485 if (mddev->level != LEVEL_MULTIPATH) {
1487 if (rdev->desc_nr < 0 ||
1488 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
1492 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1494 case 0xffff: /* spare */
1496 case 0xfffe: /* faulty */
1497 set_bit(Faulty, &rdev->flags);
1500 if ((le32_to_cpu(sb->feature_map) &
1501 MD_FEATURE_RECOVERY_OFFSET))
1502 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1504 set_bit(In_sync, &rdev->flags);
1505 rdev->raid_disk = role;
1508 if (sb->devflags & WriteMostly1)
1509 set_bit(WriteMostly, &rdev->flags);
1510 } else /* MULTIPATH are always insync */
1511 set_bit(In_sync, &rdev->flags);
1516 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1518 struct mdp_superblock_1 *sb;
1521 /* make rdev->sb match mddev and rdev data. */
1523 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1525 sb->feature_map = 0;
1527 sb->recovery_offset = cpu_to_le64(0);
1528 memset(sb->pad1, 0, sizeof(sb->pad1));
1529 memset(sb->pad2, 0, sizeof(sb->pad2));
1530 memset(sb->pad3, 0, sizeof(sb->pad3));
1532 sb->utime = cpu_to_le64((__u64)mddev->utime);
1533 sb->events = cpu_to_le64(mddev->events);
1535 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1537 sb->resync_offset = cpu_to_le64(0);
1539 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1541 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1542 sb->size = cpu_to_le64(mddev->dev_sectors);
1543 sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
1544 sb->level = cpu_to_le32(mddev->level);
1545 sb->layout = cpu_to_le32(mddev->layout);
1547 if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
1548 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
1549 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1552 if (rdev->raid_disk >= 0 &&
1553 !test_bit(In_sync, &rdev->flags)) {
1555 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1556 sb->recovery_offset =
1557 cpu_to_le64(rdev->recovery_offset);
1560 if (mddev->reshape_position != MaxSector) {
1561 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1562 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1563 sb->new_layout = cpu_to_le32(mddev->new_layout);
1564 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1565 sb->new_level = cpu_to_le32(mddev->new_level);
1566 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
1570 list_for_each_entry(rdev2, &mddev->disks, same_set)
1571 if (rdev2->desc_nr+1 > max_dev)
1572 max_dev = rdev2->desc_nr+1;
1574 if (max_dev > le32_to_cpu(sb->max_dev)) {
1576 sb->max_dev = cpu_to_le32(max_dev);
1577 rdev->sb_size = max_dev * 2 + 256;
1578 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1579 if (rdev->sb_size & bmask)
1580 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1582 max_dev = le32_to_cpu(sb->max_dev);
1584 for (i=0; i<max_dev;i++)
1585 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1587 list_for_each_entry(rdev2, &mddev->disks, same_set) {
1589 if (test_bit(Faulty, &rdev2->flags))
1590 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1591 else if (test_bit(In_sync, &rdev2->flags))
1592 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1593 else if (rdev2->raid_disk >= 0)
1594 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1596 sb->dev_roles[i] = cpu_to_le16(0xffff);
1599 sb->sb_csum = calc_sb_1_csum(sb);
1602 static unsigned long long
1603 super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1605 struct mdp_superblock_1 *sb;
1606 sector_t max_sectors;
1607 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1608 return 0; /* component must fit device */
1609 if (rdev->sb_start < rdev->data_offset) {
1610 /* minor versions 1 and 2; superblock before data */
1611 max_sectors = rdev->bdev->bd_inode->i_size >> 9;
1612 max_sectors -= rdev->data_offset;
1613 if (!num_sectors || num_sectors > max_sectors)
1614 num_sectors = max_sectors;
1615 } else if (rdev->mddev->bitmap_info.offset) {
1616 /* minor version 0 with bitmap we can't move */
1619 /* minor version 0; superblock after data */
1621 sb_start = (rdev->bdev->bd_inode->i_size >> 9) - 8*2;
1622 sb_start &= ~(sector_t)(4*2 - 1);
1623 max_sectors = rdev->sectors + sb_start - rdev->sb_start;
1624 if (!num_sectors || num_sectors > max_sectors)
1625 num_sectors = max_sectors;
1626 rdev->sb_start = sb_start;
1628 sb = (struct mdp_superblock_1 *) page_address(rdev->sb_page);
1629 sb->data_size = cpu_to_le64(num_sectors);
1630 sb->super_offset = rdev->sb_start;
1631 sb->sb_csum = calc_sb_1_csum(sb);
1632 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1634 md_super_wait(rdev->mddev);
1635 return num_sectors / 2; /* kB for sysfs */
1638 static struct super_type super_types[] = {
1641 .owner = THIS_MODULE,
1642 .load_super = super_90_load,
1643 .validate_super = super_90_validate,
1644 .sync_super = super_90_sync,
1645 .rdev_size_change = super_90_rdev_size_change,
1649 .owner = THIS_MODULE,
1650 .load_super = super_1_load,
1651 .validate_super = super_1_validate,
1652 .sync_super = super_1_sync,
1653 .rdev_size_change = super_1_rdev_size_change,
1657 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1659 mdk_rdev_t *rdev, *rdev2;
1662 rdev_for_each_rcu(rdev, mddev1)
1663 rdev_for_each_rcu(rdev2, mddev2)
1664 if (rdev->bdev->bd_contains ==
1665 rdev2->bdev->bd_contains) {
1673 static LIST_HEAD(pending_raid_disks);
1676 * Try to register data integrity profile for an mddev
1678 * This is called when an array is started and after a disk has been kicked
1679 * from the array. It only succeeds if all working and active component devices
1680 * are integrity capable with matching profiles.
1682 int md_integrity_register(mddev_t *mddev)
1684 mdk_rdev_t *rdev, *reference = NULL;
1686 if (list_empty(&mddev->disks))
1687 return 0; /* nothing to do */
1688 if (blk_get_integrity(mddev->gendisk))
1689 return 0; /* already registered */
1690 list_for_each_entry(rdev, &mddev->disks, same_set) {
1691 /* skip spares and non-functional disks */
1692 if (test_bit(Faulty, &rdev->flags))
1694 if (rdev->raid_disk < 0)
1697 * If at least one rdev is not integrity capable, we can not
1698 * enable data integrity for the md device.
1700 if (!bdev_get_integrity(rdev->bdev))
1703 /* Use the first rdev as the reference */
1707 /* does this rdev's profile match the reference profile? */
1708 if (blk_integrity_compare(reference->bdev->bd_disk,
1709 rdev->bdev->bd_disk) < 0)
1713 * All component devices are integrity capable and have matching
1714 * profiles, register the common profile for the md device.
1716 if (blk_integrity_register(mddev->gendisk,
1717 bdev_get_integrity(reference->bdev)) != 0) {
1718 printk(KERN_ERR "md: failed to register integrity for %s\n",
1722 printk(KERN_NOTICE "md: data integrity on %s enabled\n",
1726 EXPORT_SYMBOL(md_integrity_register);
1728 /* Disable data integrity if non-capable/non-matching disk is being added */
1729 void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
1731 struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev);
1732 struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk);
1734 if (!bi_mddev) /* nothing to do */
1736 if (rdev->raid_disk < 0) /* skip spares */
1738 if (bi_rdev && blk_integrity_compare(mddev->gendisk,
1739 rdev->bdev->bd_disk) >= 0)
1741 printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
1742 blk_integrity_unregister(mddev->gendisk);
1744 EXPORT_SYMBOL(md_integrity_add_rdev);
1746 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1748 char b[BDEVNAME_SIZE];
1758 /* prevent duplicates */
1759 if (find_rdev(mddev, rdev->bdev->bd_dev))
1762 /* make sure rdev->sectors exceeds mddev->dev_sectors */
1763 if (rdev->sectors && (mddev->dev_sectors == 0 ||
1764 rdev->sectors < mddev->dev_sectors)) {
1766 /* Cannot change size, so fail
1767 * If mddev->level <= 0, then we don't care
1768 * about aligning sizes (e.g. linear)
1770 if (mddev->level > 0)
1773 mddev->dev_sectors = rdev->sectors;
1776 /* Verify rdev->desc_nr is unique.
1777 * If it is -1, assign a free number, else
1778 * check number is not in use
1780 if (rdev->desc_nr < 0) {
1782 if (mddev->pers) choice = mddev->raid_disks;
1783 while (find_rdev_nr(mddev, choice))
1785 rdev->desc_nr = choice;
1787 if (find_rdev_nr(mddev, rdev->desc_nr))
1790 if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
1791 printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
1792 mdname(mddev), mddev->max_disks);
1795 bdevname(rdev->bdev,b);
1796 while ( (s=strchr(b, '/')) != NULL)
1799 rdev->mddev = mddev;
1800 printk(KERN_INFO "md: bind<%s>\n", b);
1802 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
1805 ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
1806 if (sysfs_create_link(&rdev->kobj, ko, "block"))
1807 /* failure here is OK */;
1808 rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
1810 list_add_rcu(&rdev->same_set, &mddev->disks);
1811 bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
1813 /* May as well allow recovery to be retried once */
1814 mddev->recovery_disabled = 0;
1819 printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
1824 static void md_delayed_delete(struct work_struct *ws)
1826 mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work);
1827 kobject_del(&rdev->kobj);
1828 kobject_put(&rdev->kobj);
1831 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1833 char b[BDEVNAME_SIZE];
1838 bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk);
1839 list_del_rcu(&rdev->same_set);
1840 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1842 sysfs_remove_link(&rdev->kobj, "block");
1843 sysfs_put(rdev->sysfs_state);
1844 rdev->sysfs_state = NULL;
1845 /* We need to delay this, otherwise we can deadlock when
1846 * writing to 'remove' to "dev/state". We also need
1847 * to delay it due to rcu usage.
1850 INIT_WORK(&rdev->del_work, md_delayed_delete);
1851 kobject_get(&rdev->kobj);
1852 queue_work(md_misc_wq, &rdev->del_work);
1856 * prevent the device from being mounted, repartitioned or
1857 * otherwise reused by a RAID array (or any other kernel
1858 * subsystem), by bd_claiming the device.
1860 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared)
1863 struct block_device *bdev;
1864 char b[BDEVNAME_SIZE];
1866 bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
1868 printk(KERN_ERR "md: could not open %s.\n",
1869 __bdevname(dev, b));
1870 return PTR_ERR(bdev);
1872 err = bd_claim(bdev, shared ? (mdk_rdev_t *)lock_rdev : rdev);
1874 printk(KERN_ERR "md: could not bd_claim %s.\n",
1876 blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
1880 set_bit(AllReserved, &rdev->flags);
1885 static void unlock_rdev(mdk_rdev_t *rdev)
1887 struct block_device *bdev = rdev->bdev;
1892 blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
1895 void md_autodetect_dev(dev_t dev);
1897 static void export_rdev(mdk_rdev_t * rdev)
1899 char b[BDEVNAME_SIZE];
1900 printk(KERN_INFO "md: export_rdev(%s)\n",
1901 bdevname(rdev->bdev,b));
1906 if (test_bit(AutoDetected, &rdev->flags))
1907 md_autodetect_dev(rdev->bdev->bd_dev);
1910 kobject_put(&rdev->kobj);
1913 static void kick_rdev_from_array(mdk_rdev_t * rdev)
1915 unbind_rdev_from_array(rdev);
1919 static void export_array(mddev_t *mddev)
1921 mdk_rdev_t *rdev, *tmp;
1923 rdev_for_each(rdev, tmp, mddev) {
1928 kick_rdev_from_array(rdev);
1930 if (!list_empty(&mddev->disks))
1932 mddev->raid_disks = 0;
1933 mddev->major_version = 0;
1936 static void print_desc(mdp_disk_t *desc)
1938 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
1939 desc->major,desc->minor,desc->raid_disk,desc->state);
1942 static void print_sb_90(mdp_super_t *sb)
1947 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1948 sb->major_version, sb->minor_version, sb->patch_version,
1949 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1951 printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1952 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1953 sb->md_minor, sb->layout, sb->chunk_size);
1954 printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d"
1955 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1956 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1957 sb->failed_disks, sb->spare_disks,
1958 sb->sb_csum, (unsigned long)sb->events_lo);
1961 for (i = 0; i < MD_SB_DISKS; i++) {
1964 desc = sb->disks + i;
1965 if (desc->number || desc->major || desc->minor ||
1966 desc->raid_disk || (desc->state && (desc->state != 4))) {
1967 printk(" D %2d: ", i);
1971 printk(KERN_INFO "md: THIS: ");
1972 print_desc(&sb->this_disk);
1975 static void print_sb_1(struct mdp_superblock_1 *sb)
1979 uuid = sb->set_uuid;
1981 "md: SB: (V:%u) (F:0x%08x) Array-ID:<%pU>\n"
1982 "md: Name: \"%s\" CT:%llu\n",
1983 le32_to_cpu(sb->major_version),
1984 le32_to_cpu(sb->feature_map),
1987 (unsigned long long)le64_to_cpu(sb->ctime)
1988 & MD_SUPERBLOCK_1_TIME_SEC_MASK);
1990 uuid = sb->device_uuid;
1992 "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
1994 "md: Dev:%08x UUID: %pU\n"
1995 "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
1996 "md: (MaxDev:%u) \n",
1997 le32_to_cpu(sb->level),
1998 (unsigned long long)le64_to_cpu(sb->size),
1999 le32_to_cpu(sb->raid_disks),
2000 le32_to_cpu(sb->layout),
2001 le32_to_cpu(sb->chunksize),
2002 (unsigned long long)le64_to_cpu(sb->data_offset),
2003 (unsigned long long)le64_to_cpu(sb->data_size),
2004 (unsigned long long)le64_to_cpu(sb->super_offset),
2005 (unsigned long long)le64_to_cpu(sb->recovery_offset),
2006 le32_to_cpu(sb->dev_number),
2009 (unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK,
2010 (unsigned long long)le64_to_cpu(sb->events),
2011 (unsigned long long)le64_to_cpu(sb->resync_offset),
2012 le32_to_cpu(sb->sb_csum),
2013 le32_to_cpu(sb->max_dev)
2017 static void print_rdev(mdk_rdev_t *rdev, int major_version)
2019 char b[BDEVNAME_SIZE];
2020 printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n",
2021 bdevname(rdev->bdev, b), (unsigned long long)rdev->sectors,
2022 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
2024 if (rdev->sb_loaded) {
2025 printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version);
2026 switch (major_version) {
2028 print_sb_90((mdp_super_t*)page_address(rdev->sb_page));
2031 print_sb_1((struct mdp_superblock_1 *)page_address(rdev->sb_page));
2035 printk(KERN_INFO "md: no rdev superblock!\n");
2038 static void md_print_devices(void)
2040 struct list_head *tmp;
2043 char b[BDEVNAME_SIZE];
2046 printk("md: **********************************\n");
2047 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
2048 printk("md: **********************************\n");
2049 for_each_mddev(mddev, tmp) {
2052 bitmap_print_sb(mddev->bitmap);
2054 printk("%s: ", mdname(mddev));
2055 list_for_each_entry(rdev, &mddev->disks, same_set)
2056 printk("<%s>", bdevname(rdev->bdev,b));
2059 list_for_each_entry(rdev, &mddev->disks, same_set)
2060 print_rdev(rdev, mddev->major_version);
2062 printk("md: **********************************\n");
2067 static void sync_sbs(mddev_t * mddev, int nospares)
2069 /* Update each superblock (in-memory image), but
2070 * if we are allowed to, skip spares which already
2071 * have the right event counter, or have one earlier
2072 * (which would mean they aren't being marked as dirty
2073 * with the rest of the array)
2076 list_for_each_entry(rdev, &mddev->disks, same_set) {
2077 if (rdev->sb_events == mddev->events ||
2079 rdev->raid_disk < 0 &&
2080 rdev->sb_events+1 == mddev->events)) {
2081 /* Don't update this superblock */
2082 rdev->sb_loaded = 2;
2084 super_types[mddev->major_version].
2085 sync_super(mddev, rdev);
2086 rdev->sb_loaded = 1;
2091 static void md_update_sb(mddev_t * mddev, int force_change)
2098 /* First make sure individual recovery_offsets are correct */
2099 list_for_each_entry(rdev, &mddev->disks, same_set) {
2100 if (rdev->raid_disk >= 0 &&
2101 mddev->delta_disks >= 0 &&
2102 !test_bit(In_sync, &rdev->flags) &&
2103 mddev->curr_resync_completed > rdev->recovery_offset)
2104 rdev->recovery_offset = mddev->curr_resync_completed;
2107 if (!mddev->persistent) {
2108 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
2109 clear_bit(MD_CHANGE_DEVS, &mddev->flags);
2110 if (!mddev->external)
2111 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2112 wake_up(&mddev->sb_wait);
2116 spin_lock_irq(&mddev->write_lock);
2118 mddev->utime = get_seconds();
2120 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
2122 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
2123 /* just a clean<-> dirty transition, possibly leave spares alone,
2124 * though if events isn't the right even/odd, we will have to do
2130 if (mddev->degraded)
2131 /* If the array is degraded, then skipping spares is both
2132 * dangerous and fairly pointless.
2133 * Dangerous because a device that was removed from the array
2134 * might have a event_count that still looks up-to-date,
2135 * so it can be re-added without a resync.
2136 * Pointless because if there are any spares to skip,
2137 * then a recovery will happen and soon that array won't
2138 * be degraded any more and the spare can go back to sleep then.
2142 sync_req = mddev->in_sync;
2144 /* If this is just a dirty<->clean transition, and the array is clean
2145 * and 'events' is odd, we can roll back to the previous clean state */
2147 && (mddev->in_sync && mddev->recovery_cp == MaxSector)
2148 && mddev->can_decrease_events
2149 && mddev->events != 1) {
2151 mddev->can_decrease_events = 0;
2153 /* otherwise we have to go forward and ... */
2155 mddev->can_decrease_events = nospares;
2158 if (!mddev->events) {
2160 * oops, this 64-bit counter should never wrap.
2161 * Either we are in around ~1 trillion A.C., assuming
2162 * 1 reboot per second, or we have a bug:
2167 sync_sbs(mddev, nospares);
2168 spin_unlock_irq(&mddev->write_lock);
2171 "md: updating %s RAID superblock on device (in sync %d)\n",
2172 mdname(mddev),mddev->in_sync);
2174 bitmap_update_sb(mddev->bitmap);
2175 list_for_each_entry(rdev, &mddev->disks, same_set) {
2176 char b[BDEVNAME_SIZE];
2177 dprintk(KERN_INFO "md: ");
2178 if (rdev->sb_loaded != 1)
2179 continue; /* no noise on spare devices */
2180 if (test_bit(Faulty, &rdev->flags))
2181 dprintk("(skipping faulty ");
2183 dprintk("%s ", bdevname(rdev->bdev,b));
2184 if (!test_bit(Faulty, &rdev->flags)) {
2185 md_super_write(mddev,rdev,
2186 rdev->sb_start, rdev->sb_size,
2188 dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
2189 bdevname(rdev->bdev,b),
2190 (unsigned long long)rdev->sb_start);
2191 rdev->sb_events = mddev->events;
2195 if (mddev->level == LEVEL_MULTIPATH)
2196 /* only need to write one superblock... */
2199 md_super_wait(mddev);
2200 /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
2202 spin_lock_irq(&mddev->write_lock);
2203 if (mddev->in_sync != sync_req ||
2204 test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
2205 /* have to write it out again */
2206 spin_unlock_irq(&mddev->write_lock);
2209 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2210 spin_unlock_irq(&mddev->write_lock);
2211 wake_up(&mddev->sb_wait);
2212 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2213 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
2217 /* words written to sysfs files may, or may not, be \n terminated.
2218 * We want to accept with case. For this we use cmd_match.
2220 static int cmd_match(const char *cmd, const char *str)
2222 /* See if cmd, written into a sysfs file, matches
2223 * str. They must either be the same, or cmd can
2224 * have a trailing newline
2226 while (*cmd && *str && *cmd == *str) {
2237 struct rdev_sysfs_entry {
2238 struct attribute attr;
2239 ssize_t (*show)(mdk_rdev_t *, char *);
2240 ssize_t (*store)(mdk_rdev_t *, const char *, size_t);
2244 state_show(mdk_rdev_t *rdev, char *page)
2249 if (test_bit(Faulty, &rdev->flags)) {
2250 len+= sprintf(page+len, "%sfaulty",sep);
2253 if (test_bit(In_sync, &rdev->flags)) {
2254 len += sprintf(page+len, "%sin_sync",sep);
2257 if (test_bit(WriteMostly, &rdev->flags)) {
2258 len += sprintf(page+len, "%swrite_mostly",sep);
2261 if (test_bit(Blocked, &rdev->flags)) {
2262 len += sprintf(page+len, "%sblocked", sep);
2265 if (!test_bit(Faulty, &rdev->flags) &&
2266 !test_bit(In_sync, &rdev->flags)) {
2267 len += sprintf(page+len, "%sspare", sep);
2270 return len+sprintf(page+len, "\n");
2274 state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2277 * faulty - simulates and error
2278 * remove - disconnects the device
2279 * writemostly - sets write_mostly
2280 * -writemostly - clears write_mostly
2281 * blocked - sets the Blocked flag
2282 * -blocked - clears the Blocked flag
2283 * insync - sets Insync providing device isn't active
2286 if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2287 md_error(rdev->mddev, rdev);
2289 } else if (cmd_match(buf, "remove")) {
2290 if (rdev->raid_disk >= 0)
2293 mddev_t *mddev = rdev->mddev;
2294 kick_rdev_from_array(rdev);
2296 md_update_sb(mddev, 1);
2297 md_new_event(mddev);
2300 } else if (cmd_match(buf, "writemostly")) {
2301 set_bit(WriteMostly, &rdev->flags);
2303 } else if (cmd_match(buf, "-writemostly")) {
2304 clear_bit(WriteMostly, &rdev->flags);
2306 } else if (cmd_match(buf, "blocked")) {
2307 set_bit(Blocked, &rdev->flags);
2309 } else if (cmd_match(buf, "-blocked")) {
2310 clear_bit(Blocked, &rdev->flags);
2311 wake_up(&rdev->blocked_wait);
2312 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2313 md_wakeup_thread(rdev->mddev->thread);
2316 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
2317 set_bit(In_sync, &rdev->flags);
2321 sysfs_notify_dirent_safe(rdev->sysfs_state);
2322 return err ? err : len;
2324 static struct rdev_sysfs_entry rdev_state =
2325 __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
2328 errors_show(mdk_rdev_t *rdev, char *page)
2330 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
2334 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2337 unsigned long n = simple_strtoul(buf, &e, 10);
2338 if (*buf && (*e == 0 || *e == '\n')) {
2339 atomic_set(&rdev->corrected_errors, n);
2344 static struct rdev_sysfs_entry rdev_errors =
2345 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
2348 slot_show(mdk_rdev_t *rdev, char *page)
2350 if (rdev->raid_disk < 0)
2351 return sprintf(page, "none\n");
2353 return sprintf(page, "%d\n", rdev->raid_disk);
2357 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2362 int slot = simple_strtoul(buf, &e, 10);
2363 if (strncmp(buf, "none", 4)==0)
2365 else if (e==buf || (*e && *e!= '\n'))
2367 if (rdev->mddev->pers && slot == -1) {
2368 /* Setting 'slot' on an active array requires also
2369 * updating the 'rd%d' link, and communicating
2370 * with the personality with ->hot_*_disk.
2371 * For now we only support removing
2372 * failed/spare devices. This normally happens automatically,
2373 * but not when the metadata is externally managed.
2375 if (rdev->raid_disk == -1)
2377 /* personality does all needed checks */
2378 if (rdev->mddev->pers->hot_add_disk == NULL)
2380 err = rdev->mddev->pers->
2381 hot_remove_disk(rdev->mddev, rdev->raid_disk);
2384 sprintf(nm, "rd%d", rdev->raid_disk);
2385 sysfs_remove_link(&rdev->mddev->kobj, nm);
2386 rdev->raid_disk = -1;
2387 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2388 md_wakeup_thread(rdev->mddev->thread);
2389 } else if (rdev->mddev->pers) {
2391 /* Activating a spare .. or possibly reactivating
2392 * if we ever get bitmaps working here.
2395 if (rdev->raid_disk != -1)
2398 if (rdev->mddev->pers->hot_add_disk == NULL)
2401 list_for_each_entry(rdev2, &rdev->mddev->disks, same_set)
2402 if (rdev2->raid_disk == slot)
2405 rdev->raid_disk = slot;
2406 if (test_bit(In_sync, &rdev->flags))
2407 rdev->saved_raid_disk = slot;
2409 rdev->saved_raid_disk = -1;
2410 err = rdev->mddev->pers->
2411 hot_add_disk(rdev->mddev, rdev);
2413 rdev->raid_disk = -1;
2416 sysfs_notify_dirent_safe(rdev->sysfs_state);
2417 sprintf(nm, "rd%d", rdev->raid_disk);
2418 if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm))
2419 /* failure here is OK */;
2420 /* don't wakeup anyone, leave that to userspace. */
2422 if (slot >= rdev->mddev->raid_disks)
2424 rdev->raid_disk = slot;
2425 /* assume it is working */
2426 clear_bit(Faulty, &rdev->flags);
2427 clear_bit(WriteMostly, &rdev->flags);
2428 set_bit(In_sync, &rdev->flags);
2429 sysfs_notify_dirent_safe(rdev->sysfs_state);
2435 static struct rdev_sysfs_entry rdev_slot =
2436 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
2439 offset_show(mdk_rdev_t *rdev, char *page)
2441 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
2445 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2448 unsigned long long offset = simple_strtoull(buf, &e, 10);
2449 if (e==buf || (*e && *e != '\n'))
2451 if (rdev->mddev->pers && rdev->raid_disk >= 0)
2453 if (rdev->sectors && rdev->mddev->external)
2454 /* Must set offset before size, so overlap checks
2457 rdev->data_offset = offset;
2461 static struct rdev_sysfs_entry rdev_offset =
2462 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
2465 rdev_size_show(mdk_rdev_t *rdev, char *page)
2467 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
2470 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
2472 /* check if two start/length pairs overlap */
2480 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
2482 unsigned long long blocks;
2485 if (strict_strtoull(buf, 10, &blocks) < 0)
2488 if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
2489 return -EINVAL; /* sector conversion overflow */
2492 if (new != blocks * 2)
2493 return -EINVAL; /* unsigned long long to sector_t overflow */
2500 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2502 mddev_t *my_mddev = rdev->mddev;
2503 sector_t oldsectors = rdev->sectors;
2506 if (strict_blocks_to_sectors(buf, §ors) < 0)
2508 if (my_mddev->pers && rdev->raid_disk >= 0) {
2509 if (my_mddev->persistent) {
2510 sectors = super_types[my_mddev->major_version].
2511 rdev_size_change(rdev, sectors);
2514 } else if (!sectors)
2515 sectors = (rdev->bdev->bd_inode->i_size >> 9) -
2518 if (sectors < my_mddev->dev_sectors)
2519 return -EINVAL; /* component must fit device */
2521 rdev->sectors = sectors;
2522 if (sectors > oldsectors && my_mddev->external) {
2523 /* need to check that all other rdevs with the same ->bdev
2524 * do not overlap. We need to unlock the mddev to avoid
2525 * a deadlock. We have already changed rdev->sectors, and if
2526 * we have to change it back, we will have the lock again.
2530 struct list_head *tmp;
2532 mddev_unlock(my_mddev);
2533 for_each_mddev(mddev, tmp) {
2537 list_for_each_entry(rdev2, &mddev->disks, same_set)
2538 if (test_bit(AllReserved, &rdev2->flags) ||
2539 (rdev->bdev == rdev2->bdev &&
2541 overlaps(rdev->data_offset, rdev->sectors,
2547 mddev_unlock(mddev);
2553 mddev_lock(my_mddev);
2555 /* Someone else could have slipped in a size
2556 * change here, but doing so is just silly.
2557 * We put oldsectors back because we *know* it is
2558 * safe, and trust userspace not to race with
2561 rdev->sectors = oldsectors;
2568 static struct rdev_sysfs_entry rdev_size =
2569 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
2572 static ssize_t recovery_start_show(mdk_rdev_t *rdev, char *page)
2574 unsigned long long recovery_start = rdev->recovery_offset;
2576 if (test_bit(In_sync, &rdev->flags) ||
2577 recovery_start == MaxSector)
2578 return sprintf(page, "none\n");
2580 return sprintf(page, "%llu\n", recovery_start);
2583 static ssize_t recovery_start_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2585 unsigned long long recovery_start;
2587 if (cmd_match(buf, "none"))
2588 recovery_start = MaxSector;
2589 else if (strict_strtoull(buf, 10, &recovery_start))
2592 if (rdev->mddev->pers &&
2593 rdev->raid_disk >= 0)
2596 rdev->recovery_offset = recovery_start;
2597 if (recovery_start == MaxSector)
2598 set_bit(In_sync, &rdev->flags);
2600 clear_bit(In_sync, &rdev->flags);
2604 static struct rdev_sysfs_entry rdev_recovery_start =
2605 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
2607 static struct attribute *rdev_default_attrs[] = {
2613 &rdev_recovery_start.attr,
2617 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
2619 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2620 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
2621 mddev_t *mddev = rdev->mddev;
2627 rv = mddev ? mddev_lock(mddev) : -EBUSY;
2629 if (rdev->mddev == NULL)
2632 rv = entry->show(rdev, page);
2633 mddev_unlock(mddev);
2639 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
2640 const char *page, size_t length)
2642 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2643 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
2645 mddev_t *mddev = rdev->mddev;
2649 if (!capable(CAP_SYS_ADMIN))
2651 rv = mddev ? mddev_lock(mddev): -EBUSY;
2653 if (rdev->mddev == NULL)
2656 rv = entry->store(rdev, page, length);
2657 mddev_unlock(mddev);
2662 static void rdev_free(struct kobject *ko)
2664 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
2667 static const struct sysfs_ops rdev_sysfs_ops = {
2668 .show = rdev_attr_show,
2669 .store = rdev_attr_store,
2671 static struct kobj_type rdev_ktype = {
2672 .release = rdev_free,
2673 .sysfs_ops = &rdev_sysfs_ops,
2674 .default_attrs = rdev_default_attrs,
2677 void md_rdev_init(mdk_rdev_t *rdev)
2680 rdev->saved_raid_disk = -1;
2681 rdev->raid_disk = -1;
2683 rdev->data_offset = 0;
2684 rdev->sb_events = 0;
2685 rdev->last_read_error.tv_sec = 0;
2686 rdev->last_read_error.tv_nsec = 0;
2687 atomic_set(&rdev->nr_pending, 0);
2688 atomic_set(&rdev->read_errors, 0);
2689 atomic_set(&rdev->corrected_errors, 0);
2691 INIT_LIST_HEAD(&rdev->same_set);
2692 init_waitqueue_head(&rdev->blocked_wait);
2694 EXPORT_SYMBOL_GPL(md_rdev_init);
2696 * Import a device. If 'super_format' >= 0, then sanity check the superblock
2698 * mark the device faulty if:
2700 * - the device is nonexistent (zero size)
2701 * - the device has no valid superblock
2703 * a faulty rdev _never_ has rdev->sb set.
2705 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
2707 char b[BDEVNAME_SIZE];
2712 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
2714 printk(KERN_ERR "md: could not alloc mem for new device!\n");
2715 return ERR_PTR(-ENOMEM);
2719 if ((err = alloc_disk_sb(rdev)))
2722 err = lock_rdev(rdev, newdev, super_format == -2);
2726 kobject_init(&rdev->kobj, &rdev_ktype);
2728 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2731 "md: %s has zero or unknown size, marking faulty!\n",
2732 bdevname(rdev->bdev,b));
2737 if (super_format >= 0) {
2738 err = super_types[super_format].
2739 load_super(rdev, NULL, super_minor);
2740 if (err == -EINVAL) {
2742 "md: %s does not have a valid v%d.%d "
2743 "superblock, not importing!\n",
2744 bdevname(rdev->bdev,b),
2745 super_format, super_minor);
2750 "md: could not read %s's sb, not importing!\n",
2751 bdevname(rdev->bdev,b));
2759 if (rdev->sb_page) {
2765 return ERR_PTR(err);
2769 * Check a full RAID array for plausibility
2773 static void analyze_sbs(mddev_t * mddev)
2776 mdk_rdev_t *rdev, *freshest, *tmp;
2777 char b[BDEVNAME_SIZE];
2780 rdev_for_each(rdev, tmp, mddev)
2781 switch (super_types[mddev->major_version].
2782 load_super(rdev, freshest, mddev->minor_version)) {
2790 "md: fatal superblock inconsistency in %s"
2791 " -- removing from array\n",
2792 bdevname(rdev->bdev,b));
2793 kick_rdev_from_array(rdev);
2797 super_types[mddev->major_version].
2798 validate_super(mddev, freshest);
2801 rdev_for_each(rdev, tmp, mddev) {
2802 if (mddev->max_disks &&
2803 (rdev->desc_nr >= mddev->max_disks ||
2804 i > mddev->max_disks)) {
2806 "md: %s: %s: only %d devices permitted\n",
2807 mdname(mddev), bdevname(rdev->bdev, b),
2809 kick_rdev_from_array(rdev);
2812 if (rdev != freshest)
2813 if (super_types[mddev->major_version].
2814 validate_super(mddev, rdev)) {
2815 printk(KERN_WARNING "md: kicking non-fresh %s"
2817 bdevname(rdev->bdev,b));
2818 kick_rdev_from_array(rdev);
2821 if (mddev->level == LEVEL_MULTIPATH) {
2822 rdev->desc_nr = i++;
2823 rdev->raid_disk = rdev->desc_nr;
2824 set_bit(In_sync, &rdev->flags);
2825 } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) {
2826 rdev->raid_disk = -1;
2827 clear_bit(In_sync, &rdev->flags);
2832 /* Read a fixed-point number.
2833 * Numbers in sysfs attributes should be in "standard" units where
2834 * possible, so time should be in seconds.
2835 * However we internally use a a much smaller unit such as
2836 * milliseconds or jiffies.
2837 * This function takes a decimal number with a possible fractional
2838 * component, and produces an integer which is the result of
2839 * multiplying that number by 10^'scale'.
2840 * all without any floating-point arithmetic.
2842 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
2844 unsigned long result = 0;
2846 while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
2849 else if (decimals < scale) {
2852 result = result * 10 + value;
2864 while (decimals < scale) {
2873 static void md_safemode_timeout(unsigned long data);
2876 safe_delay_show(mddev_t *mddev, char *page)
2878 int msec = (mddev->safemode_delay*1000)/HZ;
2879 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
2882 safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
2886 if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
2889 mddev->safemode_delay = 0;
2891 unsigned long old_delay = mddev->safemode_delay;
2892 mddev->safemode_delay = (msec*HZ)/1000;
2893 if (mddev->safemode_delay == 0)
2894 mddev->safemode_delay = 1;
2895 if (mddev->safemode_delay < old_delay)
2896 md_safemode_timeout((unsigned long)mddev);
2900 static struct md_sysfs_entry md_safe_delay =
2901 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
2904 level_show(mddev_t *mddev, char *page)
2906 struct mdk_personality *p = mddev->pers;
2908 return sprintf(page, "%s\n", p->name);
2909 else if (mddev->clevel[0])
2910 return sprintf(page, "%s\n", mddev->clevel);
2911 else if (mddev->level != LEVEL_NONE)
2912 return sprintf(page, "%d\n", mddev->level);
2918 level_store(mddev_t *mddev, const char *buf, size_t len)
2922 struct mdk_personality *pers;
2927 if (mddev->pers == NULL) {
2930 if (len >= sizeof(mddev->clevel))
2932 strncpy(mddev->clevel, buf, len);
2933 if (mddev->clevel[len-1] == '\n')
2935 mddev->clevel[len] = 0;
2936 mddev->level = LEVEL_NONE;
2940 /* request to change the personality. Need to ensure:
2941 * - array is not engaged in resync/recovery/reshape
2942 * - old personality can be suspended
2943 * - new personality will access other array.
2946 if (mddev->sync_thread ||
2947 mddev->reshape_position != MaxSector ||
2948 mddev->sysfs_active)
2951 if (!mddev->pers->quiesce) {
2952 printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
2953 mdname(mddev), mddev->pers->name);
2957 /* Now find the new personality */
2958 if (len == 0 || len >= sizeof(clevel))
2960 strncpy(clevel, buf, len);
2961 if (clevel[len-1] == '\n')
2964 if (strict_strtol(clevel, 10, &level))
2967 if (request_module("md-%s", clevel) != 0)
2968 request_module("md-level-%s", clevel);
2969 spin_lock(&pers_lock);
2970 pers = find_pers(level, clevel);
2971 if (!pers || !try_module_get(pers->owner)) {
2972 spin_unlock(&pers_lock);
2973 printk(KERN_WARNING "md: personality %s not loaded\n", clevel);
2976 spin_unlock(&pers_lock);
2978 if (pers == mddev->pers) {
2979 /* Nothing to do! */
2980 module_put(pers->owner);
2983 if (!pers->takeover) {
2984 module_put(pers->owner);
2985 printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
2986 mdname(mddev), clevel);
2990 list_for_each_entry(rdev, &mddev->disks, same_set)
2991 rdev->new_raid_disk = rdev->raid_disk;
2993 /* ->takeover must set new_* and/or delta_disks
2994 * if it succeeds, and may set them when it fails.
2996 priv = pers->takeover(mddev);
2998 mddev->new_level = mddev->level;
2999 mddev->new_layout = mddev->layout;
3000 mddev->new_chunk_sectors = mddev->chunk_sectors;
3001 mddev->raid_disks -= mddev->delta_disks;
3002 mddev->delta_disks = 0;
3003 module_put(pers->owner);
3004 printk(KERN_WARNING "md: %s: %s would not accept array\n",
3005 mdname(mddev), clevel);
3006 return PTR_ERR(priv);
3009 /* Looks like we have a winner */
3010 mddev_suspend(mddev);
3011 mddev->pers->stop(mddev);
3013 if (mddev->pers->sync_request == NULL &&
3014 pers->sync_request != NULL) {
3015 /* need to add the md_redundancy_group */
3016 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
3018 "md: cannot register extra attributes for %s\n",
3020 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, NULL, "sync_action");
3022 if (mddev->pers->sync_request != NULL &&
3023 pers->sync_request == NULL) {
3024 /* need to remove the md_redundancy_group */
3025 if (mddev->to_remove == NULL)
3026 mddev->to_remove = &md_redundancy_group;
3029 if (mddev->pers->sync_request == NULL &&
3031 /* We are converting from a no-redundancy array
3032 * to a redundancy array and metadata is managed
3033 * externally so we need to be sure that writes
3034 * won't block due to a need to transition
3036 * until external management is started.
3039 mddev->safemode_delay = 0;
3040 mddev->safemode = 0;
3043 list_for_each_entry(rdev, &mddev->disks, same_set) {
3045 if (rdev->raid_disk < 0)
3047 if (rdev->new_raid_disk > mddev->raid_disks)
3048 rdev->new_raid_disk = -1;
3049 if (rdev->new_raid_disk == rdev->raid_disk)
3051 sprintf(nm, "rd%d", rdev->raid_disk);
3052 sysfs_remove_link(&mddev->kobj, nm);
3054 list_for_each_entry(rdev, &mddev->disks, same_set) {
3055 if (rdev->raid_disk < 0)
3057 if (rdev->new_raid_disk == rdev->raid_disk)
3059 rdev->raid_disk = rdev->new_raid_disk;
3060 if (rdev->raid_disk < 0)
3061 clear_bit(In_sync, &rdev->flags);
3064 sprintf(nm, "rd%d", rdev->raid_disk);
3065 if(sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
3066 printk("md: cannot register %s for %s after level change\n",
3071 module_put(mddev->pers->owner);
3073 mddev->private = priv;
3074 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3075 mddev->level = mddev->new_level;
3076 mddev->layout = mddev->new_layout;
3077 mddev->chunk_sectors = mddev->new_chunk_sectors;
3078 mddev->delta_disks = 0;
3079 if (mddev->pers->sync_request == NULL) {
3080 /* this is now an array without redundancy, so
3081 * it must always be in_sync
3084 del_timer_sync(&mddev->safemode_timer);
3087 mddev_resume(mddev);
3088 set_bit(MD_CHANGE_DEVS, &mddev->flags);
3089 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3090 md_wakeup_thread(mddev->thread);
3091 sysfs_notify(&mddev->kobj, NULL, "level");
3092 md_new_event(mddev);
3096 static struct md_sysfs_entry md_level =
3097 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
3101 layout_show(mddev_t *mddev, char *page)
3103 /* just a number, not meaningful for all levels */
3104 if (mddev->reshape_position != MaxSector &&
3105 mddev->layout != mddev->new_layout)
3106 return sprintf(page, "%d (%d)\n",
3107 mddev->new_layout, mddev->layout);
3108 return sprintf(page, "%d\n", mddev->layout);
3112 layout_store(mddev_t *mddev, const char *buf, size_t len)
3115 unsigned long n = simple_strtoul(buf, &e, 10);
3117 if (!*buf || (*e && *e != '\n'))
3122 if (mddev->pers->check_reshape == NULL)
3124 mddev->new_layout = n;
3125 err = mddev->pers->check_reshape(mddev);
3127 mddev->new_layout = mddev->layout;
3131 mddev->new_layout = n;
3132 if (mddev->reshape_position == MaxSector)
3137 static struct md_sysfs_entry md_layout =
3138 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
3142 raid_disks_show(mddev_t *mddev, char *page)
3144 if (mddev->raid_disks == 0)
3146 if (mddev->reshape_position != MaxSector &&
3147 mddev->delta_disks != 0)
3148 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
3149 mddev->raid_disks - mddev->delta_disks);
3150 return sprintf(page, "%d\n", mddev->raid_disks);
3153 static int update_raid_disks(mddev_t *mddev, int raid_disks);
3156 raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
3160 unsigned long n = simple_strtoul(buf, &e, 10);
3162 if (!*buf || (*e && *e != '\n'))
3166 rv = update_raid_disks(mddev, n);
3167 else if (mddev->reshape_position != MaxSector) {
3168 int olddisks = mddev->raid_disks - mddev->delta_disks;
3169 mddev->delta_disks = n - olddisks;
3170 mddev->raid_disks = n;
3172 mddev->raid_disks = n;
3173 return rv ? rv : len;
3175 static struct md_sysfs_entry md_raid_disks =
3176 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
3179 chunk_size_show(mddev_t *mddev, char *page)
3181 if (mddev->reshape_position != MaxSector &&
3182 mddev->chunk_sectors != mddev->new_chunk_sectors)
3183 return sprintf(page, "%d (%d)\n",
3184 mddev->new_chunk_sectors << 9,
3185 mddev->chunk_sectors << 9);
3186 return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
3190 chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
3193 unsigned long n = simple_strtoul(buf, &e, 10);
3195 if (!*buf || (*e && *e != '\n'))
3200 if (mddev->pers->check_reshape == NULL)
3202 mddev->new_chunk_sectors = n >> 9;
3203 err = mddev->pers->check_reshape(mddev);
3205 mddev->new_chunk_sectors = mddev->chunk_sectors;
3209 mddev->new_chunk_sectors = n >> 9;
3210 if (mddev->reshape_position == MaxSector)
3211 mddev->chunk_sectors = n >> 9;
3215 static struct md_sysfs_entry md_chunk_size =
3216 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
3219 resync_start_show(mddev_t *mddev, char *page)
3221 if (mddev->recovery_cp == MaxSector)
3222 return sprintf(page, "none\n");
3223 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
3227 resync_start_store(mddev_t *mddev, const char *buf, size_t len)
3230 unsigned long long n = simple_strtoull(buf, &e, 10);
3234 if (cmd_match(buf, "none"))
3236 else if (!*buf || (*e && *e != '\n'))
3239 mddev->recovery_cp = n;
3242 static struct md_sysfs_entry md_resync_start =
3243 __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
3246 * The array state can be:
3249 * No devices, no size, no level
3250 * Equivalent to STOP_ARRAY ioctl
3252 * May have some settings, but array is not active
3253 * all IO results in error
3254 * When written, doesn't tear down array, but just stops it
3255 * suspended (not supported yet)
3256 * All IO requests will block. The array can be reconfigured.
3257 * Writing this, if accepted, will block until array is quiescent
3259 * no resync can happen. no superblocks get written.
3260 * write requests fail
3262 * like readonly, but behaves like 'clean' on a write request.
3264 * clean - no pending writes, but otherwise active.
3265 * When written to inactive array, starts without resync
3266 * If a write request arrives then
3267 * if metadata is known, mark 'dirty' and switch to 'active'.
3268 * if not known, block and switch to write-pending
3269 * If written to an active array that has pending writes, then fails.
3271 * fully active: IO and resync can be happening.
3272 * When written to inactive array, starts with resync
3275 * clean, but writes are blocked waiting for 'active' to be written.
3278 * like active, but no writes have been seen for a while (100msec).
3281 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
3282 write_pending, active_idle, bad_word};
3283 static char *array_states[] = {
3284 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
3285 "write-pending", "active-idle", NULL };
3287 static int match_word(const char *word, char **list)
3290 for (n=0; list[n]; n++)
3291 if (cmd_match(word, list[n]))
3297 array_state_show(mddev_t *mddev, char *page)
3299 enum array_state st = inactive;
3312 else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
3314 else if (mddev->safemode)
3320 if (list_empty(&mddev->disks) &&
3321 mddev->raid_disks == 0 &&
3322 mddev->dev_sectors == 0)
3327 return sprintf(page, "%s\n", array_states[st]);
3330 static int do_md_stop(mddev_t * mddev, int ro, int is_open);
3331 static int md_set_readonly(mddev_t * mddev, int is_open);
3332 static int do_md_run(mddev_t * mddev);
3333 static int restart_array(mddev_t *mddev);
3336 array_state_store(mddev_t *mddev, const char *buf, size_t len)
3339 enum array_state st = match_word(buf, array_states);
3344 /* stopping an active array */
3345 if (atomic_read(&mddev->openers) > 0)
3347 err = do_md_stop(mddev, 0, 0);
3350 /* stopping an active array */
3352 if (atomic_read(&mddev->openers) > 0)
3354 err = do_md_stop(mddev, 2, 0);
3356 err = 0; /* already inactive */
3359 break; /* not supported yet */
3362 err = md_set_readonly(mddev, 0);
3365 set_disk_ro(mddev->gendisk, 1);
3366 err = do_md_run(mddev);
3372 err = md_set_readonly(mddev, 0);
3373 else if (mddev->ro == 1)
3374 err = restart_array(mddev);
3377 set_disk_ro(mddev->gendisk, 0);
3381 err = do_md_run(mddev);
3386 restart_array(mddev);
3387 spin_lock_irq(&mddev->write_lock);
3388 if (atomic_read(&mddev->writes_pending) == 0) {
3389 if (mddev->in_sync == 0) {
3391 if (mddev->safemode == 1)
3392 mddev->safemode = 0;
3393 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
3398 spin_unlock_irq(&mddev->write_lock);
3404 restart_array(mddev);
3405 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
3406 wake_up(&mddev->sb_wait);
3410 set_disk_ro(mddev->gendisk, 0);
3411 err = do_md_run(mddev);
3416 /* these cannot be set */
3422 sysfs_notify_dirent_safe(mddev->sysfs_state);
3426 static struct md_sysfs_entry md_array_state =
3427 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
3430 max_corrected_read_errors_show(mddev_t *mddev, char *page) {
3431 return sprintf(page, "%d\n",
3432 atomic_read(&mddev->max_corr_read_errors));
3436 max_corrected_read_errors_store(mddev_t *mddev, const char *buf, size_t len)
3439 unsigned long n = simple_strtoul(buf, &e, 10);
3441 if (*buf && (*e == 0 || *e == '\n')) {
3442 atomic_set(&mddev->max_corr_read_errors, n);
3448 static struct md_sysfs_entry max_corr_read_errors =
3449 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
3450 max_corrected_read_errors_store);
3453 null_show(mddev_t *mddev, char *page)
3459 new_dev_store(mddev_t *mddev, const char *buf, size_t len)
3461 /* buf must be %d:%d\n? giving major and minor numbers */
3462 /* The new device is added to the array.
3463 * If the array has a persistent superblock, we read the
3464 * superblock to initialise info and check validity.
3465 * Otherwise, only checking done is that in bind_rdev_to_array,
3466 * which mainly checks size.
3469 int major = simple_strtoul(buf, &e, 10);
3475 if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
3477 minor = simple_strtoul(e+1, &e, 10);
3478 if (*e && *e != '\n')
3480 dev = MKDEV(major, minor);
3481 if (major != MAJOR(dev) ||
3482 minor != MINOR(dev))
3486 if (mddev->persistent) {
3487 rdev = md_import_device(dev, mddev->major_version,
3488 mddev->minor_version);
3489 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
3490 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
3491 mdk_rdev_t, same_set);
3492 err = super_types[mddev->major_version]
3493 .load_super(rdev, rdev0, mddev->minor_version);
3497 } else if (mddev->external)
3498 rdev = md_import_device(dev, -2, -1);
3500 rdev = md_import_device(dev, -1, -1);
3503 return PTR_ERR(rdev);
3504 err = bind_rdev_to_array(rdev, mddev);
3508 return err ? err : len;
3511 static struct md_sysfs_entry md_new_device =
3512 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
3515 bitmap_store(mddev_t *mddev, const char *buf, size_t len)
3518 unsigned long chunk, end_chunk;
3522 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
3524 chunk = end_chunk = simple_strtoul(buf, &end, 0);
3525 if (buf == end) break;
3526 if (*end == '-') { /* range */
3528 end_chunk = simple_strtoul(buf, &end, 0);
3529 if (buf == end) break;
3531 if (*end && !isspace(*end)) break;
3532 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
3533 buf = skip_spaces(end);
3535 bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
3540 static struct md_sysfs_entry md_bitmap =
3541 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
3544 size_show(mddev_t *mddev, char *page)
3546 return sprintf(page, "%llu\n",
3547 (unsigned long long)mddev->dev_sectors / 2);
3550 static int update_size(mddev_t *mddev, sector_t num_sectors);
3553 size_store(mddev_t *mddev, const char *buf, size_t len)
3555 /* If array is inactive, we can reduce the component size, but
3556 * not increase it (except from 0).
3557 * If array is active, we can try an on-line resize
3560 int err = strict_blocks_to_sectors(buf, §ors);
3565 err = update_size(mddev, sectors);
3566 md_update_sb(mddev, 1);
3568 if (mddev->dev_sectors == 0 ||
3569 mddev->dev_sectors > sectors)
3570 mddev->dev_sectors = sectors;
3574 return err ? err : len;
3577 static struct md_sysfs_entry md_size =
3578 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
3583 * 'none' for arrays with no metadata (good luck...)
3584 * 'external' for arrays with externally managed metadata,
3585 * or N.M for internally known formats
3588 metadata_show(mddev_t *mddev, char *page)
3590 if (mddev->persistent)
3591 return sprintf(page, "%d.%d\n",
3592 mddev->major_version, mddev->minor_version);
3593 else if (mddev->external)
3594 return sprintf(page, "external:%s\n", mddev->metadata_type);
3596 return sprintf(page, "none\n");
3600 metadata_store(mddev_t *mddev, const char *buf, size_t len)
3604 /* Changing the details of 'external' metadata is
3605 * always permitted. Otherwise there must be
3606 * no devices attached to the array.
3608 if (mddev->external && strncmp(buf, "external:", 9) == 0)
3610 else if (!list_empty(&mddev->disks))
3613 if (cmd_match(buf, "none")) {
3614 mddev->persistent = 0;
3615 mddev->external = 0;
3616 mddev->major_version = 0;
3617 mddev->minor_version = 90;
3620 if (strncmp(buf, "external:", 9) == 0) {
3621 size_t namelen = len-9;
3622 if (namelen >= sizeof(mddev->metadata_type))
3623 namelen = sizeof(mddev->metadata_type)-1;
3624 strncpy(mddev->metadata_type, buf+9, namelen);
3625 mddev->metadata_type[namelen] = 0;
3626 if (namelen && mddev->metadata_type[namelen-1] == '\n')
3627 mddev->metadata_type[--namelen] = 0;
3628 mddev->persistent = 0;
3629 mddev->external = 1;
3630 mddev->major_version = 0;
3631 mddev->minor_version = 90;
3634 major = simple_strtoul(buf, &e, 10);
3635 if (e==buf || *e != '.')
3638 minor = simple_strtoul(buf, &e, 10);
3639 if (e==buf || (*e && *e != '\n') )
3641 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
3643 mddev->major_version = major;
3644 mddev->minor_version = minor;
3645 mddev->persistent = 1;
3646 mddev->external = 0;
3650 static struct md_sysfs_entry md_metadata =
3651 __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
3654 action_show(mddev_t *mddev, char *page)
3656 char *type = "idle";
3657 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
3659 else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3660 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
3661 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3663 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3664 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
3666 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
3670 } else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
3673 return sprintf(page, "%s\n", type);
3677 action_store(mddev_t *mddev, const char *page, size_t len)
3679 if (!mddev->pers || !mddev->pers->sync_request)
3682 if (cmd_match(page, "frozen"))
3683 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3685 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3687 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
3688 if (mddev->sync_thread) {
3689 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3690 md_unregister_thread(mddev->sync_thread);
3691 mddev->sync_thread = NULL;
3692 mddev->recovery = 0;
3694 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3695 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
3697 else if (cmd_match(page, "resync"))
3698 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3699 else if (cmd_match(page, "recover")) {
3700 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3701 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3702 } else if (cmd_match(page, "reshape")) {
3704 if (mddev->pers->start_reshape == NULL)
3706 err = mddev->pers->start_reshape(mddev);
3709 sysfs_notify(&mddev->kobj, NULL, "degraded");
3711 if (cmd_match(page, "check"))
3712 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3713 else if (!cmd_match(page, "repair"))
3715 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3716 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3718 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3719 md_wakeup_thread(mddev->thread);
3720 sysfs_notify_dirent_safe(mddev->sysfs_action);
3725 mismatch_cnt_show(mddev_t *mddev, char *page)
3727 return sprintf(page, "%llu\n",
3728 (unsigned long long) mddev->resync_mismatches);
3731 static struct md_sysfs_entry md_scan_mode =
3732 __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
3735 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
3738 sync_min_show(mddev_t *mddev, char *page)
3740 return sprintf(page, "%d (%s)\n", speed_min(mddev),
3741 mddev->sync_speed_min ? "local": "system");
3745 sync_min_store(mddev_t *mddev, const char *buf, size_t len)
3749 if (strncmp(buf, "system", 6)==0) {
3750 mddev->sync_speed_min = 0;
3753 min = simple_strtoul(buf, &e, 10);
3754 if (buf == e || (*e && *e != '\n') || min <= 0)
3756 mddev->sync_speed_min = min;
3760 static struct md_sysfs_entry md_sync_min =
3761 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
3764 sync_max_show(mddev_t *mddev, char *page)
3766 return sprintf(page, "%d (%s)\n", speed_max(mddev),
3767 mddev->sync_speed_max ? "local": "system");
3771 sync_max_store(mddev_t *mddev, const char *buf, size_t len)
3775 if (strncmp(buf, "system", 6)==0) {
3776 mddev->sync_speed_max = 0;
3779 max = simple_strtoul(buf, &e, 10);
3780 if (buf == e || (*e && *e != '\n') || max <= 0)
3782 mddev->sync_speed_max = max;
3786 static struct md_sysfs_entry md_sync_max =
3787 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
3790 degraded_show(mddev_t *mddev, char *page)
3792 return sprintf(page, "%d\n", mddev->degraded);
3794 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
3797 sync_force_parallel_show(mddev_t *mddev, char *page)
3799 return sprintf(page, "%d\n", mddev->parallel_resync);
3803 sync_force_parallel_store(mddev_t *mddev, const char *buf, size_t len)
3807 if (strict_strtol(buf, 10, &n))
3810 if (n != 0 && n != 1)
3813 mddev->parallel_resync = n;
3815 if (mddev->sync_thread)
3816 wake_up(&resync_wait);
3821 /* force parallel resync, even with shared block devices */
3822 static struct md_sysfs_entry md_sync_force_parallel =
3823 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
3824 sync_force_parallel_show, sync_force_parallel_store);
3827 sync_speed_show(mddev_t *mddev, char *page)
3829 unsigned long resync, dt, db;
3830 if (mddev->curr_resync == 0)
3831 return sprintf(page, "none\n");
3832 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
3833 dt = (jiffies - mddev->resync_mark) / HZ;
3835 db = resync - mddev->resync_mark_cnt;
3836 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
3839 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
3842 sync_completed_show(mddev_t *mddev, char *page)
3844 unsigned long max_sectors, resync;
3846 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3847 return sprintf(page, "none\n");
3849 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3850 max_sectors = mddev->resync_max_sectors;
3852 max_sectors = mddev->dev_sectors;
3854 resync = mddev->curr_resync_completed;
3855 return sprintf(page, "%lu / %lu\n", resync, max_sectors);
3858 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
3861 min_sync_show(mddev_t *mddev, char *page)
3863 return sprintf(page, "%llu\n",
3864 (unsigned long long)mddev->resync_min);
3867 min_sync_store(mddev_t *mddev, const char *buf, size_t len)
3869 unsigned long long min;
3870 if (strict_strtoull(buf, 10, &min))
3872 if (min > mddev->resync_max)
3874 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3877 /* Must be a multiple of chunk_size */
3878 if (mddev->chunk_sectors) {
3879 sector_t temp = min;
3880 if (sector_div(temp, mddev->chunk_sectors))
3883 mddev->resync_min = min;
3888 static struct md_sysfs_entry md_min_sync =
3889 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
3892 max_sync_show(mddev_t *mddev, char *page)
3894 if (mddev->resync_max == MaxSector)
3895 return sprintf(page, "max\n");
3897 return sprintf(page, "%llu\n",
3898 (unsigned long long)mddev->resync_max);
3901 max_sync_store(mddev_t *mddev, const char *buf, size_t len)
3903 if (strncmp(buf, "max", 3) == 0)
3904 mddev->resync_max = MaxSector;
3906 unsigned long long max;
3907 if (strict_strtoull(buf, 10, &max))
3909 if (max < mddev->resync_min)
3911 if (max < mddev->resync_max &&
3913 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3916 /* Must be a multiple of chunk_size */
3917 if (mddev->chunk_sectors) {
3918 sector_t temp = max;
3919 if (sector_div(temp, mddev->chunk_sectors))
3922 mddev->resync_max = max;
3924 wake_up(&mddev->recovery_wait);
3928 static struct md_sysfs_entry md_max_sync =
3929 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
3932 suspend_lo_show(mddev_t *mddev, char *page)
3934 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
3938 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
3941 unsigned long long new = simple_strtoull(buf, &e, 10);
3943 if (mddev->pers == NULL ||
3944 mddev->pers->quiesce == NULL)
3946 if (buf == e || (*e && *e != '\n'))
3948 if (new >= mddev->suspend_hi ||
3949 (new > mddev->suspend_lo && new < mddev->suspend_hi)) {
3950 mddev->suspend_lo = new;
3951 mddev->pers->quiesce(mddev, 2);
3956 static struct md_sysfs_entry md_suspend_lo =
3957 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
3961 suspend_hi_show(mddev_t *mddev, char *page)
3963 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
3967 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
3970 unsigned long long new = simple_strtoull(buf, &e, 10);
3972 if (mddev->pers == NULL ||
3973 mddev->pers->quiesce == NULL)
3975 if (buf == e || (*e && *e != '\n'))
3977 if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) ||
3978 (new > mddev->suspend_lo && new > mddev->suspend_hi)) {
3979 mddev->suspend_hi = new;
3980 mddev->pers->quiesce(mddev, 1);
3981 mddev->pers->quiesce(mddev, 0);
3986 static struct md_sysfs_entry md_suspend_hi =
3987 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
3990 reshape_position_show(mddev_t *mddev, char *page)
3992 if (mddev->reshape_position != MaxSector)
3993 return sprintf(page, "%llu\n",
3994 (unsigned long long)mddev->reshape_position);
3995 strcpy(page, "none\n");
4000 reshape_position_store(mddev_t *mddev, const char *buf, size_t len)
4003 unsigned long long new = simple_strtoull(buf, &e, 10);
4006 if (buf == e || (*e && *e != '\n'))
4008 mddev->reshape_position = new;
4009 mddev->delta_disks = 0;
4010 mddev->new_level = mddev->level;
4011 mddev->new_layout = mddev->layout;
4012 mddev->new_chunk_sectors = mddev->chunk_sectors;
4016 static struct md_sysfs_entry md_reshape_position =
4017 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
4018 reshape_position_store);
4021 array_size_show(mddev_t *mddev, char *page)
4023 if (mddev->external_size)
4024 return sprintf(page, "%llu\n",
4025 (unsigned long long)mddev->array_sectors/2);
4027 return sprintf(page, "default\n");
4031 array_size_store(mddev_t *mddev, const char *buf, size_t len)
4035 if (strncmp(buf, "default", 7) == 0) {
4037 sectors = mddev->pers->size(mddev, 0, 0);
4039 sectors = mddev->array_sectors;
4041 mddev->external_size = 0;
4043 if (strict_blocks_to_sectors(buf, §ors) < 0)
4045 if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
4048 mddev->external_size = 1;
4051 mddev->array_sectors = sectors;
4052 set_capacity(mddev->gendisk, mddev->array_sectors);
4054 revalidate_disk(mddev->gendisk);
4059 static struct md_sysfs_entry md_array_size =
4060 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
4063 static struct attribute *md_default_attrs[] = {
4066 &md_raid_disks.attr,
4067 &md_chunk_size.attr,
4069 &md_resync_start.attr,
4071 &md_new_device.attr,
4072 &md_safe_delay.attr,
4073 &md_array_state.attr,
4074 &md_reshape_position.attr,
4075 &md_array_size.attr,
4076 &max_corr_read_errors.attr,
4080 static struct attribute *md_redundancy_attrs[] = {
4082 &md_mismatches.attr,
4085 &md_sync_speed.attr,
4086 &md_sync_force_parallel.attr,
4087 &md_sync_completed.attr,
4090 &md_suspend_lo.attr,
4091 &md_suspend_hi.attr,
4096 static struct attribute_group md_redundancy_group = {
4098 .attrs = md_redundancy_attrs,
4103 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
4105 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4106 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
4111 rv = mddev_lock(mddev);
4113 rv = entry->show(mddev, page);
4114 mddev_unlock(mddev);
4120 md_attr_store(struct kobject *kobj, struct attribute *attr,
4121 const char *page, size_t length)
4123 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4124 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
4129 if (!capable(CAP_SYS_ADMIN))
4131 rv = mddev_lock(mddev);
4132 if (mddev->hold_active == UNTIL_IOCTL)
4133 mddev->hold_active = 0;
4135 rv = entry->store(mddev, page, length);
4136 mddev_unlock(mddev);
4141 static void md_free(struct kobject *ko)
4143 mddev_t *mddev = container_of(ko, mddev_t, kobj);
4145 if (mddev->sysfs_state)
4146 sysfs_put(mddev->sysfs_state);
4148 if (mddev->gendisk) {
4149 del_gendisk(mddev->gendisk);
4150 put_disk(mddev->gendisk);
4153 blk_cleanup_queue(mddev->queue);
4158 static const struct sysfs_ops md_sysfs_ops = {
4159 .show = md_attr_show,
4160 .store = md_attr_store,
4162 static struct kobj_type md_ktype = {
4164 .sysfs_ops = &md_sysfs_ops,
4165 .default_attrs = md_default_attrs,
4170 static void mddev_delayed_delete(struct work_struct *ws)
4172 mddev_t *mddev = container_of(ws, mddev_t, del_work);
4174 sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
4175 kobject_del(&mddev->kobj);
4176 kobject_put(&mddev->kobj);
4179 static int md_alloc(dev_t dev, char *name)
4181 static DEFINE_MUTEX(disks_mutex);
4182 mddev_t *mddev = mddev_find(dev);
4183 struct gendisk *disk;
4192 partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
4193 shift = partitioned ? MdpMinorShift : 0;
4194 unit = MINOR(mddev->unit) >> shift;
4196 /* wait for any previous instance of this device to be
4197 * completely removed (mddev_delayed_delete).
4199 flush_workqueue(md_misc_wq);
4201 mutex_lock(&disks_mutex);
4207 /* Need to ensure that 'name' is not a duplicate.
4210 spin_lock(&all_mddevs_lock);
4212 list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
4213 if (mddev2->gendisk &&
4214 strcmp(mddev2->gendisk->disk_name, name) == 0) {
4215 spin_unlock(&all_mddevs_lock);
4218 spin_unlock(&all_mddevs_lock);
4222 mddev->queue = blk_alloc_queue(GFP_KERNEL);
4225 mddev->queue->queuedata = mddev;
4227 /* Can be unlocked because the queue is new: no concurrency */
4228 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue);
4230 blk_queue_make_request(mddev->queue, md_make_request);
4232 disk = alloc_disk(1 << shift);
4234 blk_cleanup_queue(mddev->queue);
4235 mddev->queue = NULL;
4238 disk->major = MAJOR(mddev->unit);
4239 disk->first_minor = unit << shift;
4241 strcpy(disk->disk_name, name);
4242 else if (partitioned)
4243 sprintf(disk->disk_name, "md_d%d", unit);
4245 sprintf(disk->disk_name, "md%d", unit);
4246 disk->fops = &md_fops;
4247 disk->private_data = mddev;
4248 disk->queue = mddev->queue;
4249 /* Allow extended partitions. This makes the
4250 * 'mdp' device redundant, but we can't really
4253 disk->flags |= GENHD_FL_EXT_DEVT;
4255 mddev->gendisk = disk;
4256 error = kobject_init_and_add(&mddev->kobj, &md_ktype,
4257 &disk_to_dev(disk)->kobj, "%s", "md");
4259 /* This isn't possible, but as kobject_init_and_add is marked
4260 * __must_check, we must do something with the result
4262 printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
4266 if (mddev->kobj.sd &&
4267 sysfs_create_group(&mddev->kobj, &md_bitmap_group))
4268 printk(KERN_DEBUG "pointless warning\n");
4270 mutex_unlock(&disks_mutex);
4271 if (!error && mddev->kobj.sd) {
4272 kobject_uevent(&mddev->kobj, KOBJ_ADD);
4273 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
4279 static struct kobject *md_probe(dev_t dev, int *part, void *data)
4281 md_alloc(dev, NULL);
4285 static int add_named_array(const char *val, struct kernel_param *kp)
4287 /* val must be "md_*" where * is not all digits.
4288 * We allocate an array with a large free minor number, and
4289 * set the name to val. val must not already be an active name.
4291 int len = strlen(val);
4292 char buf[DISK_NAME_LEN];
4294 while (len && val[len-1] == '\n')
4296 if (len >= DISK_NAME_LEN)
4298 strlcpy(buf, val, len+1);
4299 if (strncmp(buf, "md_", 3) != 0)
4301 return md_alloc(0, buf);
4304 static void md_safemode_timeout(unsigned long data)
4306 mddev_t *mddev = (mddev_t *) data;
4308 if (!atomic_read(&mddev->writes_pending)) {
4309 mddev->safemode = 1;
4310 if (mddev->external)
4311 sysfs_notify_dirent_safe(mddev->sysfs_state);
4313 md_wakeup_thread(mddev->thread);
4316 static int start_dirty_degraded;
4318 int md_run(mddev_t *mddev)
4322 struct mdk_personality *pers;
4324 if (list_empty(&mddev->disks))
4325 /* cannot run an array with no devices.. */
4330 /* Cannot run until previous stop completes properly */
4331 if (mddev->sysfs_active)
4335 * Analyze all RAID superblock(s)
4337 if (!mddev->raid_disks) {
4338 if (!mddev->persistent)
4343 if (mddev->level != LEVEL_NONE)
4344 request_module("md-level-%d", mddev->level);
4345 else if (mddev->clevel[0])
4346 request_module("md-%s", mddev->clevel);
4349 * Drop all container device buffers, from now on
4350 * the only valid external interface is through the md
4353 list_for_each_entry(rdev, &mddev->disks, same_set) {
4354 if (test_bit(Faulty, &rdev->flags))
4356 sync_blockdev(rdev->bdev);
4357 invalidate_bdev(rdev->bdev);
4359 /* perform some consistency tests on the device.
4360 * We don't want the data to overlap the metadata,
4361 * Internal Bitmap issues have been handled elsewhere.
4363 if (rdev->data_offset < rdev->sb_start) {
4364 if (mddev->dev_sectors &&
4365 rdev->data_offset + mddev->dev_sectors
4367 printk("md: %s: data overlaps metadata\n",
4372 if (rdev->sb_start + rdev->sb_size/512
4373 > rdev->data_offset) {
4374 printk("md: %s: metadata overlaps data\n",
4379 sysfs_notify_dirent_safe(rdev->sysfs_state);
4382 spin_lock(&pers_lock);
4383 pers = find_pers(mddev->level, mddev->clevel);
4384 if (!pers || !try_module_get(pers->owner)) {
4385 spin_unlock(&pers_lock);
4386 if (mddev->level != LEVEL_NONE)
4387 printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
4390 printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
4395 spin_unlock(&pers_lock);
4396 if (mddev->level != pers->level) {
4397 mddev->level = pers->level;
4398 mddev->new_level = pers->level;
4400 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
4402 if (mddev->reshape_position != MaxSector &&
4403 pers->start_reshape == NULL) {
4404 /* This personality cannot handle reshaping... */
4406 module_put(pers->owner);
4410 if (pers->sync_request) {
4411 /* Warn if this is a potentially silly
4414 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
4418 list_for_each_entry(rdev, &mddev->disks, same_set)
4419 list_for_each_entry(rdev2, &mddev->disks, same_set) {
4421 rdev->bdev->bd_contains ==
4422 rdev2->bdev->bd_contains) {
4424 "%s: WARNING: %s appears to be"
4425 " on the same physical disk as"
4428 bdevname(rdev->bdev,b),
4429 bdevname(rdev2->bdev,b2));
4436 "True protection against single-disk"
4437 " failure might be compromised.\n");
4440 mddev->recovery = 0;
4441 /* may be over-ridden by personality */
4442 mddev->resync_max_sectors = mddev->dev_sectors;
4444 mddev->ok_start_degraded = start_dirty_degraded;
4446 if (start_readonly && mddev->ro == 0)
4447 mddev->ro = 2; /* read-only, but switch on first write */
4449 err = mddev->pers->run(mddev);
4451 printk(KERN_ERR "md: pers->run() failed ...\n");
4452 else if (mddev->pers->size(mddev, 0, 0) < mddev->array_sectors) {
4453 WARN_ONCE(!mddev->external_size, "%s: default size too small,"
4454 " but 'external_size' not in effect?\n", __func__);
4456 "md: invalid array_size %llu > default size %llu\n",
4457 (unsigned long long)mddev->array_sectors / 2,
4458 (unsigned long long)mddev->pers->size(mddev, 0, 0) / 2);
4460 mddev->pers->stop(mddev);
4462 if (err == 0 && mddev->pers->sync_request) {
4463 err = bitmap_create(mddev);
4465 printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
4466 mdname(mddev), err);
4467 mddev->pers->stop(mddev);
4471 module_put(mddev->pers->owner);
4473 bitmap_destroy(mddev);
4476 if (mddev->pers->sync_request) {
4477 if (mddev->kobj.sd &&
4478 sysfs_create_group(&mddev->kobj, &md_redundancy_group))
4480 "md: cannot register extra attributes for %s\n",
4482 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
4483 } else if (mddev->ro == 2) /* auto-readonly not meaningful */
4486 atomic_set(&mddev->writes_pending,0);
4487 atomic_set(&mddev->max_corr_read_errors,
4488 MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
4489 mddev->safemode = 0;
4490 mddev->safemode_timer.function = md_safemode_timeout;
4491 mddev->safemode_timer.data = (unsigned long) mddev;
4492 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
4495 list_for_each_entry(rdev, &mddev->disks, same_set)
4496 if (rdev->raid_disk >= 0) {
4498 sprintf(nm, "rd%d", rdev->raid_disk);
4499 if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
4500 /* failure here is OK */;
4503 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4506 md_update_sb(mddev, 0);
4508 md_wakeup_thread(mddev->thread);
4509 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
4511 md_new_event(mddev);
4512 sysfs_notify_dirent_safe(mddev->sysfs_state);
4513 sysfs_notify_dirent_safe(mddev->sysfs_action);
4514 sysfs_notify(&mddev->kobj, NULL, "degraded");
4517 EXPORT_SYMBOL_GPL(md_run);
4519 static int do_md_run(mddev_t *mddev)
4523 err = md_run(mddev);
4526 err = bitmap_load(mddev);
4528 bitmap_destroy(mddev);
4531 set_capacity(mddev->gendisk, mddev->array_sectors);
4532 revalidate_disk(mddev->gendisk);
4533 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
4538 static int restart_array(mddev_t *mddev)
4540 struct gendisk *disk = mddev->gendisk;
4542 /* Complain if it has no devices */
4543 if (list_empty(&mddev->disks))
4549 mddev->safemode = 0;
4551 set_disk_ro(disk, 0);
4552 printk(KERN_INFO "md: %s switched to read-write mode.\n",
4554 /* Kick recovery or resync if necessary */
4555 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4556 md_wakeup_thread(mddev->thread);
4557 md_wakeup_thread(mddev->sync_thread);
4558 sysfs_notify_dirent_safe(mddev->sysfs_state);
4562 /* similar to deny_write_access, but accounts for our holding a reference
4563 * to the file ourselves */
4564 static int deny_bitmap_write_access(struct file * file)
4566 struct inode *inode = file->f_mapping->host;
4568 spin_lock(&inode->i_lock);
4569 if (atomic_read(&inode->i_writecount) > 1) {
4570 spin_unlock(&inode->i_lock);
4573 atomic_set(&inode->i_writecount, -1);
4574 spin_unlock(&inode->i_lock);
4579 void restore_bitmap_write_access(struct file *file)
4581 struct inode *inode = file->f_mapping->host;
4583 spin_lock(&inode->i_lock);
4584 atomic_set(&inode->i_writecount, 1);
4585 spin_unlock(&inode->i_lock);
4588 static void md_clean(mddev_t *mddev)
4590 mddev->array_sectors = 0;
4591 mddev->external_size = 0;
4592 mddev->dev_sectors = 0;
4593 mddev->raid_disks = 0;
4594 mddev->recovery_cp = 0;
4595 mddev->resync_min = 0;
4596 mddev->resync_max = MaxSector;
4597 mddev->reshape_position = MaxSector;
4598 mddev->external = 0;
4599 mddev->persistent = 0;
4600 mddev->level = LEVEL_NONE;
4601 mddev->clevel[0] = 0;
4604 mddev->metadata_type[0] = 0;
4605 mddev->chunk_sectors = 0;
4606 mddev->ctime = mddev->utime = 0;
4608 mddev->max_disks = 0;
4610 mddev->can_decrease_events = 0;
4611 mddev->delta_disks = 0;
4612 mddev->new_level = LEVEL_NONE;
4613 mddev->new_layout = 0;
4614 mddev->new_chunk_sectors = 0;
4615 mddev->curr_resync = 0;
4616 mddev->resync_mismatches = 0;
4617 mddev->suspend_lo = mddev->suspend_hi = 0;
4618 mddev->sync_speed_min = mddev->sync_speed_max = 0;
4619 mddev->recovery = 0;
4621 mddev->degraded = 0;
4622 mddev->safemode = 0;
4623 mddev->bitmap_info.offset = 0;
4624 mddev->bitmap_info.default_offset = 0;
4625 mddev->bitmap_info.chunksize = 0;
4626 mddev->bitmap_info.daemon_sleep = 0;
4627 mddev->bitmap_info.max_write_behind = 0;
4631 void md_stop_writes(mddev_t *mddev)
4633 if (mddev->sync_thread) {
4634 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4635 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4636 md_unregister_thread(mddev->sync_thread);
4637 mddev->sync_thread = NULL;
4640 del_timer_sync(&mddev->safemode_timer);
4642 bitmap_flush(mddev);
4643 md_super_wait(mddev);
4645 if (!mddev->in_sync || mddev->flags) {
4646 /* mark array as shutdown cleanly */
4648 md_update_sb(mddev, 1);
4651 EXPORT_SYMBOL_GPL(md_stop_writes);
4653 void md_stop(mddev_t *mddev)
4655 mddev->pers->stop(mddev);
4656 if (mddev->pers->sync_request && mddev->to_remove == NULL)
4657 mddev->to_remove = &md_redundancy_group;
4658 module_put(mddev->pers->owner);
4660 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4662 EXPORT_SYMBOL_GPL(md_stop);
4664 static int md_set_readonly(mddev_t *mddev, int is_open)
4667 mutex_lock(&mddev->open_mutex);
4668 if (atomic_read(&mddev->openers) > is_open) {
4669 printk("md: %s still in use.\n",mdname(mddev));
4674 md_stop_writes(mddev);
4680 set_disk_ro(mddev->gendisk, 1);
4681 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4682 sysfs_notify_dirent_safe(mddev->sysfs_state);
4686 mutex_unlock(&mddev->open_mutex);
4691 * 0 - completely stop and dis-assemble array
4692 * 2 - stop but do not disassemble array
4694 static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4696 struct gendisk *disk = mddev->gendisk;
4699 mutex_lock(&mddev->open_mutex);
4700 if (atomic_read(&mddev->openers) > is_open ||
4701 mddev->sysfs_active) {
4702 printk("md: %s still in use.\n",mdname(mddev));
4703 mutex_unlock(&mddev->open_mutex);
4709 set_disk_ro(disk, 0);
4711 md_stop_writes(mddev);
4713 mddev->queue->merge_bvec_fn = NULL;
4714 mddev->queue->unplug_fn = NULL;
4715 mddev->queue->backing_dev_info.congested_fn = NULL;
4717 /* tell userspace to handle 'inactive' */
4718 sysfs_notify_dirent_safe(mddev->sysfs_state);
4720 list_for_each_entry(rdev, &mddev->disks, same_set)
4721 if (rdev->raid_disk >= 0) {
4723 sprintf(nm, "rd%d", rdev->raid_disk);
4724 sysfs_remove_link(&mddev->kobj, nm);
4727 set_capacity(disk, 0);
4728 mutex_unlock(&mddev->open_mutex);
4729 revalidate_disk(disk);
4734 mutex_unlock(&mddev->open_mutex);
4736 * Free resources if final stop
4739 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
4741 bitmap_destroy(mddev);
4742 if (mddev->bitmap_info.file) {
4743 restore_bitmap_write_access(mddev->bitmap_info.file);
4744 fput(mddev->bitmap_info.file);
4745 mddev->bitmap_info.file = NULL;
4747 mddev->bitmap_info.offset = 0;
4749 export_array(mddev);
4752 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
4753 if (mddev->hold_active == UNTIL_STOP)
4754 mddev->hold_active = 0;
4756 blk_integrity_unregister(disk);
4757 md_new_event(mddev);
4758 sysfs_notify_dirent_safe(mddev->sysfs_state);
4763 static void autorun_array(mddev_t *mddev)
4768 if (list_empty(&mddev->disks))
4771 printk(KERN_INFO "md: running: ");
4773 list_for_each_entry(rdev, &mddev->disks, same_set) {
4774 char b[BDEVNAME_SIZE];
4775 printk("<%s>", bdevname(rdev->bdev,b));
4779 err = do_md_run(mddev);
4781 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
4782 do_md_stop(mddev, 0, 0);
4787 * lets try to run arrays based on all disks that have arrived
4788 * until now. (those are in pending_raid_disks)
4790 * the method: pick the first pending disk, collect all disks with
4791 * the same UUID, remove all from the pending list and put them into
4792 * the 'same_array' list. Then order this list based on superblock
4793 * update time (freshest comes first), kick out 'old' disks and
4794 * compare superblocks. If everything's fine then run it.
4796 * If "unit" is allocated, then bump its reference count
4798 static void autorun_devices(int part)
4800 mdk_rdev_t *rdev0, *rdev, *tmp;
4802 char b[BDEVNAME_SIZE];
4804 printk(KERN_INFO "md: autorun ...\n");
4805 while (!list_empty(&pending_raid_disks)) {
4808 LIST_HEAD(candidates);
4809 rdev0 = list_entry(pending_raid_disks.next,
4810 mdk_rdev_t, same_set);
4812 printk(KERN_INFO "md: considering %s ...\n",
4813 bdevname(rdev0->bdev,b));
4814 INIT_LIST_HEAD(&candidates);
4815 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
4816 if (super_90_load(rdev, rdev0, 0) >= 0) {
4817 printk(KERN_INFO "md: adding %s ...\n",
4818 bdevname(rdev->bdev,b));
4819 list_move(&rdev->same_set, &candidates);
4822 * now we have a set of devices, with all of them having
4823 * mostly sane superblocks. It's time to allocate the
4827 dev = MKDEV(mdp_major,
4828 rdev0->preferred_minor << MdpMinorShift);
4829 unit = MINOR(dev) >> MdpMinorShift;
4831 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
4834 if (rdev0->preferred_minor != unit) {
4835 printk(KERN_INFO "md: unit number in %s is bad: %d\n",
4836 bdevname(rdev0->bdev, b), rdev0->preferred_minor);
4840 md_probe(dev, NULL, NULL);
4841 mddev = mddev_find(dev);
4842 if (!mddev || !mddev->gendisk) {
4846 "md: cannot allocate memory for md drive.\n");
4849 if (mddev_lock(mddev))
4850 printk(KERN_WARNING "md: %s locked, cannot run\n",
4852 else if (mddev->raid_disks || mddev->major_version
4853 || !list_empty(&mddev->disks)) {
4855 "md: %s already running, cannot run %s\n",
4856 mdname(mddev), bdevname(rdev0->bdev,b));
4857 mddev_unlock(mddev);
4859 printk(KERN_INFO "md: created %s\n", mdname(mddev));
4860 mddev->persistent = 1;
4861 rdev_for_each_list(rdev, tmp, &candidates) {
4862 list_del_init(&rdev->same_set);
4863 if (bind_rdev_to_array(rdev, mddev))
4866 autorun_array(mddev);
4867 mddev_unlock(mddev);
4869 /* on success, candidates will be empty, on error
4872 rdev_for_each_list(rdev, tmp, &candidates) {
4873 list_del_init(&rdev->same_set);
4878 printk(KERN_INFO "md: ... autorun DONE.\n");
4880 #endif /* !MODULE */
4882 static int get_version(void __user * arg)
4886 ver.major = MD_MAJOR_VERSION;
4887 ver.minor = MD_MINOR_VERSION;
4888 ver.patchlevel = MD_PATCHLEVEL_VERSION;
4890 if (copy_to_user(arg, &ver, sizeof(ver)))
4896 static int get_array_info(mddev_t * mddev, void __user * arg)
4898 mdu_array_info_t info;
4899 int nr,working,insync,failed,spare;
4902 nr=working=insync=failed=spare=0;
4903 list_for_each_entry(rdev, &mddev->disks, same_set) {
4905 if (test_bit(Faulty, &rdev->flags))
4909 if (test_bit(In_sync, &rdev->flags))
4916 info.major_version = mddev->major_version;
4917 info.minor_version = mddev->minor_version;
4918 info.patch_version = MD_PATCHLEVEL_VERSION;
4919 info.ctime = mddev->ctime;
4920 info.level = mddev->level;
4921 info.size = mddev->dev_sectors / 2;
4922 if (info.size != mddev->dev_sectors / 2) /* overflow */
4925 info.raid_disks = mddev->raid_disks;
4926 info.md_minor = mddev->md_minor;
4927 info.not_persistent= !mddev->persistent;
4929 info.utime = mddev->utime;
4932 info.state = (1<<MD_SB_CLEAN);
4933 if (mddev->bitmap && mddev->bitmap_info.offset)
4934 info.state = (1<<MD_SB_BITMAP_PRESENT);
4935 info.active_disks = insync;
4936 info.working_disks = working;
4937 info.failed_disks = failed;
4938 info.spare_disks = spare;
4940 info.layout = mddev->layout;
4941 info.chunk_size = mddev->chunk_sectors << 9;
4943 if (copy_to_user(arg, &info, sizeof(info)))
4949 static int get_bitmap_file(mddev_t * mddev, void __user * arg)
4951 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
4952 char *ptr, *buf = NULL;
4955 if (md_allow_write(mddev))
4956 file = kmalloc(sizeof(*file), GFP_NOIO);
4958 file = kmalloc(sizeof(*file), GFP_KERNEL);
4963 /* bitmap disabled, zero the first byte and copy out */
4964 if (!mddev->bitmap || !mddev->bitmap->file) {
4965 file->pathname[0] = '\0';
4969 buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
4973 ptr = d_path(&mddev->bitmap->file->f_path, buf, sizeof(file->pathname));
4977 strcpy(file->pathname, ptr);
4981 if (copy_to_user(arg, file, sizeof(*file)))
4989 static int get_disk_info(mddev_t * mddev, void __user * arg)
4991 mdu_disk_info_t info;
4994 if (copy_from_user(&info, arg, sizeof(info)))
4997 rdev = find_rdev_nr(mddev, info.number);
4999 info.major = MAJOR(rdev->bdev->bd_dev);
5000 info.minor = MINOR(rdev->bdev->bd_dev);
5001 info.raid_disk = rdev->raid_disk;
5003 if (test_bit(Faulty, &rdev->flags))
5004 info.state |= (1<<MD_DISK_FAULTY);
5005 else if (test_bit(In_sync, &rdev->flags)) {
5006 info.state |= (1<<MD_DISK_ACTIVE);
5007 info.state |= (1<<MD_DISK_SYNC);
5009 if (test_bit(WriteMostly, &rdev->flags))
5010 info.state |= (1<<MD_DISK_WRITEMOSTLY);
5012 info.major = info.minor = 0;
5013 info.raid_disk = -1;
5014 info.state = (1<<MD_DISK_REMOVED);
5017 if (copy_to_user(arg, &info, sizeof(info)))
5023 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
5025 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
5027 dev_t dev = MKDEV(info->major,info->minor);
5029 if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
5032 if (!mddev->raid_disks) {
5034 /* expecting a device which has a superblock */
5035 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
5038 "md: md_import_device returned %ld\n",
5040 return PTR_ERR(rdev);
5042 if (!list_empty(&mddev->disks)) {
5043 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
5044 mdk_rdev_t, same_set);
5045 err = super_types[mddev->major_version]
5046 .load_super(rdev, rdev0, mddev->minor_version);
5049 "md: %s has different UUID to %s\n",
5050 bdevname(rdev->bdev,b),
5051 bdevname(rdev0->bdev,b2));
5056 err = bind_rdev_to_array(rdev, mddev);
5063 * add_new_disk can be used once the array is assembled
5064 * to add "hot spares". They must already have a superblock
5069 if (!mddev->pers->hot_add_disk) {
5071 "%s: personality does not support diskops!\n",
5075 if (mddev->persistent)
5076 rdev = md_import_device(dev, mddev->major_version,
5077 mddev->minor_version);
5079 rdev = md_import_device(dev, -1, -1);
5082 "md: md_import_device returned %ld\n",
5084 return PTR_ERR(rdev);
5086 /* set save_raid_disk if appropriate */
5087 if (!mddev->persistent) {
5088 if (info->state & (1<<MD_DISK_SYNC) &&
5089 info->raid_disk < mddev->raid_disks)
5090 rdev->raid_disk = info->raid_disk;
5092 rdev->raid_disk = -1;
5094 super_types[mddev->major_version].
5095 validate_super(mddev, rdev);
5096 rdev->saved_raid_disk = rdev->raid_disk;
5098 clear_bit(In_sync, &rdev->flags); /* just to be sure */
5099 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
5100 set_bit(WriteMostly, &rdev->flags);
5102 clear_bit(WriteMostly, &rdev->flags);
5104 rdev->raid_disk = -1;
5105 err = bind_rdev_to_array(rdev, mddev);
5106 if (!err && !mddev->pers->hot_remove_disk) {
5107 /* If there is hot_add_disk but no hot_remove_disk
5108 * then added disks for geometry changes,
5109 * and should be added immediately.
5111 super_types[mddev->major_version].
5112 validate_super(mddev, rdev);
5113 err = mddev->pers->hot_add_disk(mddev, rdev);
5115 unbind_rdev_from_array(rdev);
5120 sysfs_notify_dirent_safe(rdev->sysfs_state);
5122 md_update_sb(mddev, 1);
5123 if (mddev->degraded)
5124 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5125 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5126 md_wakeup_thread(mddev->thread);
5130 /* otherwise, add_new_disk is only allowed
5131 * for major_version==0 superblocks
5133 if (mddev->major_version != 0) {
5134 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
5139 if (!(info->state & (1<<MD_DISK_FAULTY))) {
5141 rdev = md_import_device(dev, -1, 0);
5144 "md: error, md_import_device() returned %ld\n",
5146 return PTR_ERR(rdev);
5148 rdev->desc_nr = info->number;
5149 if (info->raid_disk < mddev->raid_disks)
5150 rdev->raid_disk = info->raid_disk;
5152 rdev->raid_disk = -1;
5154 if (rdev->raid_disk < mddev->raid_disks)
5155 if (info->state & (1<<MD_DISK_SYNC))
5156 set_bit(In_sync, &rdev->flags);
5158 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
5159 set_bit(WriteMostly, &rdev->flags);
5161 if (!mddev->persistent) {
5162 printk(KERN_INFO "md: nonpersistent superblock ...\n");
5163 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
5165 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
5166 rdev->sectors = rdev->sb_start;
5168 err = bind_rdev_to_array(rdev, mddev);
5178 static int hot_remove_disk(mddev_t * mddev, dev_t dev)
5180 char b[BDEVNAME_SIZE];
5183 rdev = find_rdev(mddev, dev);
5187 if (rdev->raid_disk >= 0)
5190 kick_rdev_from_array(rdev);
5191 md_update_sb(mddev, 1);
5192 md_new_event(mddev);
5196 printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
5197 bdevname(rdev->bdev,b), mdname(mddev));
5201 static int hot_add_disk(mddev_t * mddev, dev_t dev)
5203 char b[BDEVNAME_SIZE];
5210 if (mddev->major_version != 0) {
5211 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
5212 " version-0 superblocks.\n",
5216 if (!mddev->pers->hot_add_disk) {
5218 "%s: personality does not support diskops!\n",
5223 rdev = md_import_device(dev, -1, 0);
5226 "md: error, md_import_device() returned %ld\n",
5231 if (mddev->persistent)
5232 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
5234 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
5236 rdev->sectors = rdev->sb_start;
5238 if (test_bit(Faulty, &rdev->flags)) {
5240 "md: can not hot-add faulty %s disk to %s!\n",
5241 bdevname(rdev->bdev,b), mdname(mddev));
5245 clear_bit(In_sync, &rdev->flags);
5247 rdev->saved_raid_disk = -1;
5248 err = bind_rdev_to_array(rdev, mddev);
5253 * The rest should better be atomic, we can have disk failures
5254 * noticed in interrupt contexts ...
5257 rdev->raid_disk = -1;
5259 md_update_sb(mddev, 1);
5262 * Kick recovery, maybe this spare has to be added to the
5263 * array immediately.
5265 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5266 md_wakeup_thread(mddev->thread);
5267 md_new_event(mddev);
5275 static int set_bitmap_file(mddev_t *mddev, int fd)
5280 if (!mddev->pers->quiesce)
5282 if (mddev->recovery || mddev->sync_thread)
5284 /* we should be able to change the bitmap.. */
5290 return -EEXIST; /* cannot add when bitmap is present */
5291 mddev->bitmap_info.file = fget(fd);
5293 if (mddev->bitmap_info.file == NULL) {
5294 printk(KERN_ERR "%s: error: failed to get bitmap file\n",
5299 err = deny_bitmap_write_access(mddev->bitmap_info.file);
5301 printk(KERN_ERR "%s: error: bitmap file is already in use\n",
5303 fput(mddev->bitmap_info.file);
5304 mddev->bitmap_info.file = NULL;
5307 mddev->bitmap_info.offset = 0; /* file overrides offset */
5308 } else if (mddev->bitmap == NULL)
5309 return -ENOENT; /* cannot remove what isn't there */
5312 mddev->pers->quiesce(mddev, 1);
5314 err = bitmap_create(mddev);
5316 err = bitmap_load(mddev);
5318 if (fd < 0 || err) {
5319 bitmap_destroy(mddev);
5320 fd = -1; /* make sure to put the file */
5322 mddev->pers->quiesce(mddev, 0);
5325 if (mddev->bitmap_info.file) {
5326 restore_bitmap_write_access(mddev->bitmap_info.file);
5327 fput(mddev->bitmap_info.file);
5329 mddev->bitmap_info.file = NULL;
5336 * set_array_info is used two different ways
5337 * The original usage is when creating a new array.
5338 * In this usage, raid_disks is > 0 and it together with
5339 * level, size, not_persistent,layout,chunksize determine the
5340 * shape of the array.
5341 * This will always create an array with a type-0.90.0 superblock.
5342 * The newer usage is when assembling an array.
5343 * In this case raid_disks will be 0, and the major_version field is
5344 * use to determine which style super-blocks are to be found on the devices.
5345 * The minor and patch _version numbers are also kept incase the
5346 * super_block handler wishes to interpret them.
5348 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
5351 if (info->raid_disks == 0) {
5352 /* just setting version number for superblock loading */
5353 if (info->major_version < 0 ||
5354 info->major_version >= ARRAY_SIZE(super_types) ||
5355 super_types[info->major_version].name == NULL) {
5356 /* maybe try to auto-load a module? */
5358 "md: superblock version %d not known\n",
5359 info->major_version);
5362 mddev->major_version = info->major_version;
5363 mddev->minor_version = info->minor_version;
5364 mddev->patch_version = info->patch_version;
5365 mddev->persistent = !info->not_persistent;
5366 /* ensure mddev_put doesn't delete this now that there
5367 * is some minimal configuration.
5369 mddev->ctime = get_seconds();
5372 mddev->major_version = MD_MAJOR_VERSION;
5373 mddev->minor_version = MD_MINOR_VERSION;
5374 mddev->patch_version = MD_PATCHLEVEL_VERSION;
5375 mddev->ctime = get_seconds();
5377 mddev->level = info->level;
5378 mddev->clevel[0] = 0;
5379 mddev->dev_sectors = 2 * (sector_t)info->size;
5380 mddev->raid_disks = info->raid_disks;
5381 /* don't set md_minor, it is determined by which /dev/md* was
5384 if (info->state & (1<<MD_SB_CLEAN))
5385 mddev->recovery_cp = MaxSector;
5387 mddev->recovery_cp = 0;
5388 mddev->persistent = ! info->not_persistent;
5389 mddev->external = 0;
5391 mddev->layout = info->layout;
5392 mddev->chunk_sectors = info->chunk_size >> 9;
5394 mddev->max_disks = MD_SB_DISKS;
5396 if (mddev->persistent)
5398 set_bit(MD_CHANGE_DEVS, &mddev->flags);
5400 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
5401 mddev->bitmap_info.offset = 0;
5403 mddev->reshape_position = MaxSector;
5406 * Generate a 128 bit UUID
5408 get_random_bytes(mddev->uuid, 16);
5410 mddev->new_level = mddev->level;
5411 mddev->new_chunk_sectors = mddev->chunk_sectors;
5412 mddev->new_layout = mddev->layout;
5413 mddev->delta_disks = 0;
5418 void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors)
5420 WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);
5422 if (mddev->external_size)
5425 mddev->array_sectors = array_sectors;
5427 EXPORT_SYMBOL(md_set_array_sectors);
5429 static int update_size(mddev_t *mddev, sector_t num_sectors)
5433 int fit = (num_sectors == 0);
5435 if (mddev->pers->resize == NULL)
5437 /* The "num_sectors" is the number of sectors of each device that
5438 * is used. This can only make sense for arrays with redundancy.
5439 * linear and raid0 always use whatever space is available. We can only
5440 * consider changing this number if no resync or reconstruction is
5441 * happening, and if the new size is acceptable. It must fit before the
5442 * sb_start or, if that is <data_offset, it must fit before the size
5443 * of each device. If num_sectors is zero, we find the largest size
5447 if (mddev->sync_thread)
5450 /* Sorry, cannot grow a bitmap yet, just remove it,
5454 list_for_each_entry(rdev, &mddev->disks, same_set) {
5455 sector_t avail = rdev->sectors;
5457 if (fit && (num_sectors == 0 || num_sectors > avail))
5458 num_sectors = avail;
5459 if (avail < num_sectors)
5462 rv = mddev->pers->resize(mddev, num_sectors);
5464 revalidate_disk(mddev->gendisk);
5468 static int update_raid_disks(mddev_t *mddev, int raid_disks)
5471 /* change the number of raid disks */
5472 if (mddev->pers->check_reshape == NULL)
5474 if (raid_disks <= 0 ||
5475 (mddev->max_disks && raid_disks >= mddev->max_disks))
5477 if (mddev->sync_thread || mddev->reshape_position != MaxSector)
5479 mddev->delta_disks = raid_disks - mddev->raid_disks;
5481 rv = mddev->pers->check_reshape(mddev);
5487 * update_array_info is used to change the configuration of an
5489 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
5490 * fields in the info are checked against the array.
5491 * Any differences that cannot be handled will cause an error.
5492 * Normally, only one change can be managed at a time.
5494 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
5500 /* calculate expected state,ignoring low bits */
5501 if (mddev->bitmap && mddev->bitmap_info.offset)
5502 state |= (1 << MD_SB_BITMAP_PRESENT);
5504 if (mddev->major_version != info->major_version ||
5505 mddev->minor_version != info->minor_version ||
5506 /* mddev->patch_version != info->patch_version || */
5507 mddev->ctime != info->ctime ||
5508 mddev->level != info->level ||
5509 /* mddev->layout != info->layout || */
5510 !mddev->persistent != info->not_persistent||
5511 mddev->chunk_sectors != info->chunk_size >> 9 ||
5512 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
5513 ((state^info->state) & 0xfffffe00)
5516 /* Check there is only one change */
5517 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
5519 if (mddev->raid_disks != info->raid_disks)
5521 if (mddev->layout != info->layout)
5523 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
5530 if (mddev->layout != info->layout) {
5532 * we don't need to do anything at the md level, the
5533 * personality will take care of it all.
5535 if (mddev->pers->check_reshape == NULL)
5538 mddev->new_layout = info->layout;
5539 rv = mddev->pers->check_reshape(mddev);
5541 mddev->new_layout = mddev->layout;
5545 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
5546 rv = update_size(mddev, (sector_t)info->size * 2);
5548 if (mddev->raid_disks != info->raid_disks)
5549 rv = update_raid_disks(mddev, info->raid_disks);
5551 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
5552 if (mddev->pers->quiesce == NULL)
5554 if (mddev->recovery || mddev->sync_thread)
5556 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
5557 /* add the bitmap */
5560 if (mddev->bitmap_info.default_offset == 0)
5562 mddev->bitmap_info.offset =
5563 mddev->bitmap_info.default_offset;
5564 mddev->pers->quiesce(mddev, 1);
5565 rv = bitmap_create(mddev);
5567 rv = bitmap_load(mddev);
5569 bitmap_destroy(mddev);
5570 mddev->pers->quiesce(mddev, 0);
5572 /* remove the bitmap */
5575 if (mddev->bitmap->file)
5577 mddev->pers->quiesce(mddev, 1);
5578 bitmap_destroy(mddev);
5579 mddev->pers->quiesce(mddev, 0);
5580 mddev->bitmap_info.offset = 0;
5583 md_update_sb(mddev, 1);
5587 static int set_disk_faulty(mddev_t *mddev, dev_t dev)
5591 if (mddev->pers == NULL)
5594 rdev = find_rdev(mddev, dev);
5598 md_error(mddev, rdev);
5603 * We have a problem here : there is no easy way to give a CHS
5604 * virtual geometry. We currently pretend that we have a 2 heads
5605 * 4 sectors (with a BIG number of cylinders...). This drives
5606 * dosfs just mad... ;-)
5608 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
5610 mddev_t *mddev = bdev->bd_disk->private_data;
5614 geo->cylinders = mddev->array_sectors / 8;
5618 static int md_ioctl(struct block_device *bdev, fmode_t mode,
5619 unsigned int cmd, unsigned long arg)
5622 void __user *argp = (void __user *)arg;
5623 mddev_t *mddev = NULL;
5626 if (!capable(CAP_SYS_ADMIN))
5630 * Commands dealing with the RAID driver but not any
5636 err = get_version(argp);
5639 case PRINT_RAID_DEBUG:
5647 autostart_arrays(arg);
5654 * Commands creating/starting a new array:
5657 mddev = bdev->bd_disk->private_data;
5664 err = mddev_lock(mddev);
5667 "md: ioctl lock interrupted, reason %d, cmd %d\n",
5674 case SET_ARRAY_INFO:
5676 mdu_array_info_t info;
5678 memset(&info, 0, sizeof(info));
5679 else if (copy_from_user(&info, argp, sizeof(info))) {
5684 err = update_array_info(mddev, &info);
5686 printk(KERN_WARNING "md: couldn't update"
5687 " array info. %d\n", err);
5692 if (!list_empty(&mddev->disks)) {
5694 "md: array %s already has disks!\n",
5699 if (mddev->raid_disks) {
5701 "md: array %s already initialised!\n",
5706 err = set_array_info(mddev, &info);
5708 printk(KERN_WARNING "md: couldn't set"
5709 " array info. %d\n", err);
5719 * Commands querying/configuring an existing array:
5721 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
5722 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
5723 if ((!mddev->raid_disks && !mddev->external)
5724 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
5725 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
5726 && cmd != GET_BITMAP_FILE) {
5732 * Commands even a read-only array can execute:
5736 case GET_ARRAY_INFO:
5737 err = get_array_info(mddev, argp);
5740 case GET_BITMAP_FILE:
5741 err = get_bitmap_file(mddev, argp);
5745 err = get_disk_info(mddev, argp);
5748 case RESTART_ARRAY_RW:
5749 err = restart_array(mddev);
5753 err = do_md_stop(mddev, 0, 1);
5757 err = md_set_readonly(mddev, 1);
5761 if (get_user(ro, (int __user *)(arg))) {
5767 /* if the bdev is going readonly the value of mddev->ro
5768 * does not matter, no writes are coming
5773 /* are we are already prepared for writes? */
5777 /* transitioning to readauto need only happen for
5778 * arrays that call md_write_start
5781 err = restart_array(mddev);
5784 set_disk_ro(mddev->gendisk, 0);
5791 * The remaining ioctls are changing the state of the
5792 * superblock, so we do not allow them on read-only arrays.
5793 * However non-MD ioctls (e.g. get-size) will still come through
5794 * here and hit the 'default' below, so only disallow
5795 * 'md' ioctls, and switch to rw mode if started auto-readonly.
5797 if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) {
5798 if (mddev->ro == 2) {
5800 sysfs_notify_dirent_safe(mddev->sysfs_state);
5801 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5802 md_wakeup_thread(mddev->thread);
5813 mdu_disk_info_t info;
5814 if (copy_from_user(&info, argp, sizeof(info)))
5817 err = add_new_disk(mddev, &info);
5821 case HOT_REMOVE_DISK:
5822 err = hot_remove_disk(mddev, new_decode_dev(arg));
5826 err = hot_add_disk(mddev, new_decode_dev(arg));
5829 case SET_DISK_FAULTY:
5830 err = set_disk_faulty(mddev, new_decode_dev(arg));
5834 err = do_md_run(mddev);
5837 case SET_BITMAP_FILE:
5838 err = set_bitmap_file(mddev, (int)arg);
5848 if (mddev->hold_active == UNTIL_IOCTL &&
5850 mddev->hold_active = 0;
5851 mddev_unlock(mddev);
5860 #ifdef CONFIG_COMPAT
5861 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
5862 unsigned int cmd, unsigned long arg)
5865 case HOT_REMOVE_DISK:
5867 case SET_DISK_FAULTY:
5868 case SET_BITMAP_FILE:
5869 /* These take in integer arg, do not convert */
5872 arg = (unsigned long)compat_ptr(arg);
5876 return md_ioctl(bdev, mode, cmd, arg);
5878 #endif /* CONFIG_COMPAT */
5880 static int md_open(struct block_device *bdev, fmode_t mode)
5883 * Succeed if we can lock the mddev, which confirms that
5884 * it isn't being stopped right now.
5886 mddev_t *mddev = mddev_find(bdev->bd_dev);
5889 if (mddev->gendisk != bdev->bd_disk) {
5890 /* we are racing with mddev_put which is discarding this
5894 /* Wait until bdev->bd_disk is definitely gone */
5895 flush_workqueue(md_misc_wq);
5896 /* Then retry the open from the top */
5897 return -ERESTARTSYS;
5899 BUG_ON(mddev != bdev->bd_disk->private_data);
5901 if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
5905 atomic_inc(&mddev->openers);
5906 mutex_unlock(&mddev->open_mutex);
5908 check_disk_size_change(mddev->gendisk, bdev);
5913 static int md_release(struct gendisk *disk, fmode_t mode)
5915 mddev_t *mddev = disk->private_data;
5918 atomic_dec(&mddev->openers);
5923 static const struct block_device_operations md_fops =
5925 .owner = THIS_MODULE,
5927 .release = md_release,
5929 #ifdef CONFIG_COMPAT
5930 .compat_ioctl = md_compat_ioctl,
5932 .getgeo = md_getgeo,
5935 static int md_thread(void * arg)
5937 mdk_thread_t *thread = arg;
5940 * md_thread is a 'system-thread', it's priority should be very
5941 * high. We avoid resource deadlocks individually in each
5942 * raid personality. (RAID5 does preallocation) We also use RR and
5943 * the very same RT priority as kswapd, thus we will never get
5944 * into a priority inversion deadlock.
5946 * we definitely have to have equal or higher priority than
5947 * bdflush, otherwise bdflush will deadlock if there are too
5948 * many dirty RAID5 blocks.
5951 allow_signal(SIGKILL);
5952 while (!kthread_should_stop()) {
5954 /* We need to wait INTERRUPTIBLE so that
5955 * we don't add to the load-average.
5956 * That means we need to be sure no signals are
5959 if (signal_pending(current))
5960 flush_signals(current);
5962 wait_event_interruptible_timeout
5964 test_bit(THREAD_WAKEUP, &thread->flags)
5965 || kthread_should_stop(),
5968 clear_bit(THREAD_WAKEUP, &thread->flags);
5970 thread->run(thread->mddev);
5976 void md_wakeup_thread(mdk_thread_t *thread)
5979 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
5980 set_bit(THREAD_WAKEUP, &thread->flags);
5981 wake_up(&thread->wqueue);
5985 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
5988 mdk_thread_t *thread;
5990 thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL);
5994 init_waitqueue_head(&thread->wqueue);
5997 thread->mddev = mddev;
5998 thread->timeout = MAX_SCHEDULE_TIMEOUT;
5999 thread->tsk = kthread_run(md_thread, thread,
6001 mdname(thread->mddev),
6002 name ?: mddev->pers->name);
6003 if (IS_ERR(thread->tsk)) {
6010 void md_unregister_thread(mdk_thread_t *thread)
6014 dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
6016 kthread_stop(thread->tsk);
6020 void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
6027 if (!rdev || test_bit(Faulty, &rdev->flags))
6030 if (mddev->external)
6031 set_bit(Blocked, &rdev->flags);
6033 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
6035 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
6036 __builtin_return_address(0),__builtin_return_address(1),
6037 __builtin_return_address(2),__builtin_return_address(3));
6041 if (!mddev->pers->error_handler)
6043 mddev->pers->error_handler(mddev,rdev);
6044 if (mddev->degraded)
6045 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6046 sysfs_notify_dirent_safe(rdev->sysfs_state);
6047 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6048 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6049 md_wakeup_thread(mddev->thread);
6050 if (mddev->event_work.func)
6051 queue_work(md_misc_wq, &mddev->event_work);
6052 md_new_event_inintr(mddev);
6055 /* seq_file implementation /proc/mdstat */
6057 static void status_unused(struct seq_file *seq)
6062 seq_printf(seq, "unused devices: ");
6064 list_for_each_entry(rdev, &pending_raid_disks, same_set) {
6065 char b[BDEVNAME_SIZE];
6067 seq_printf(seq, "%s ",
6068 bdevname(rdev->bdev,b));
6071 seq_printf(seq, "<none>");
6073 seq_printf(seq, "\n");
6077 static void status_resync(struct seq_file *seq, mddev_t * mddev)
6079 sector_t max_sectors, resync, res;
6080 unsigned long dt, db;
6083 unsigned int per_milli;
6085 resync = mddev->curr_resync - atomic_read(&mddev->recovery_active);
6087 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
6088 max_sectors = mddev->resync_max_sectors;
6090 max_sectors = mddev->dev_sectors;
6093 * Should not happen.
6099 /* Pick 'scale' such that (resync>>scale)*1000 will fit
6100 * in a sector_t, and (max_sectors>>scale) will fit in a
6101 * u32, as those are the requirements for sector_div.
6102 * Thus 'scale' must be at least 10
6105 if (sizeof(sector_t) > sizeof(unsigned long)) {
6106 while ( max_sectors/2 > (1ULL<<(scale+32)))
6109 res = (resync>>scale)*1000;
6110 sector_div(res, (u32)((max_sectors>>scale)+1));
6114 int i, x = per_milli/50, y = 20-x;
6115 seq_printf(seq, "[");
6116 for (i = 0; i < x; i++)
6117 seq_printf(seq, "=");
6118 seq_printf(seq, ">");
6119 for (i = 0; i < y; i++)
6120 seq_printf(seq, ".");
6121 seq_printf(seq, "] ");
6123 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
6124 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
6126 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
6128 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
6129 "resync" : "recovery"))),
6130 per_milli/10, per_milli % 10,
6131 (unsigned long long) resync/2,
6132 (unsigned long long) max_sectors/2);
6135 * dt: time from mark until now
6136 * db: blocks written from mark until now
6137 * rt: remaining time
6139 * rt is a sector_t, so could be 32bit or 64bit.
6140 * So we divide before multiply in case it is 32bit and close
6142 * We scale the divisor (db) by 32 to avoid loosing precision
6143 * near the end of resync when the number of remaining sectors
6145 * We then divide rt by 32 after multiplying by db to compensate.
6146 * The '+1' avoids division by zero if db is very small.
6148 dt = ((jiffies - mddev->resync_mark) / HZ);
6150 db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
6151 - mddev->resync_mark_cnt;
6153 rt = max_sectors - resync; /* number of remaining sectors */
6154 sector_div(rt, db/32+1);
6158 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
6159 ((unsigned long)rt % 60)/6);
6161 seq_printf(seq, " speed=%ldK/sec", db/2/dt);
6164 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
6166 struct list_head *tmp;
6176 spin_lock(&all_mddevs_lock);
6177 list_for_each(tmp,&all_mddevs)
6179 mddev = list_entry(tmp, mddev_t, all_mddevs);
6181 spin_unlock(&all_mddevs_lock);
6184 spin_unlock(&all_mddevs_lock);
6186 return (void*)2;/* tail */
6190 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
6192 struct list_head *tmp;
6193 mddev_t *next_mddev, *mddev = v;
6199 spin_lock(&all_mddevs_lock);
6201 tmp = all_mddevs.next;
6203 tmp = mddev->all_mddevs.next;
6204 if (tmp != &all_mddevs)
6205 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
6207 next_mddev = (void*)2;
6210 spin_unlock(&all_mddevs_lock);
6218 static void md_seq_stop(struct seq_file *seq, void *v)
6222 if (mddev && v != (void*)1 && v != (void*)2)
6226 struct mdstat_info {
6230 static int md_seq_show(struct seq_file *seq, void *v)
6235 struct mdstat_info *mi = seq->private;
6236 struct bitmap *bitmap;
6238 if (v == (void*)1) {
6239 struct mdk_personality *pers;
6240 seq_printf(seq, "Personalities : ");
6241 spin_lock(&pers_lock);
6242 list_for_each_entry(pers, &pers_list, list)
6243 seq_printf(seq, "[%s] ", pers->name);
6245 spin_unlock(&pers_lock);
6246 seq_printf(seq, "\n");
6247 mi->event = atomic_read(&md_event_count);
6250 if (v == (void*)2) {
6255 if (mddev_lock(mddev) < 0)
6258 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
6259 seq_printf(seq, "%s : %sactive", mdname(mddev),
6260 mddev->pers ? "" : "in");
6263 seq_printf(seq, " (read-only)");
6265 seq_printf(seq, " (auto-read-only)");
6266 seq_printf(seq, " %s", mddev->pers->name);
6270 list_for_each_entry(rdev, &mddev->disks, same_set) {
6271 char b[BDEVNAME_SIZE];
6272 seq_printf(seq, " %s[%d]",
6273 bdevname(rdev->bdev,b), rdev->desc_nr);
6274 if (test_bit(WriteMostly, &rdev->flags))
6275 seq_printf(seq, "(W)");
6276 if (test_bit(Faulty, &rdev->flags)) {
6277 seq_printf(seq, "(F)");
6279 } else if (rdev->raid_disk < 0)
6280 seq_printf(seq, "(S)"); /* spare */
6281 sectors += rdev->sectors;
6284 if (!list_empty(&mddev->disks)) {
6286 seq_printf(seq, "\n %llu blocks",
6287 (unsigned long long)
6288 mddev->array_sectors / 2);
6290 seq_printf(seq, "\n %llu blocks",
6291 (unsigned long long)sectors / 2);
6293 if (mddev->persistent) {
6294 if (mddev->major_version != 0 ||
6295 mddev->minor_version != 90) {
6296 seq_printf(seq," super %d.%d",
6297 mddev->major_version,
6298 mddev->minor_version);
6300 } else if (mddev->external)
6301 seq_printf(seq, " super external:%s",
6302 mddev->metadata_type);
6304 seq_printf(seq, " super non-persistent");
6307 mddev->pers->status(seq, mddev);
6308 seq_printf(seq, "\n ");
6309 if (mddev->pers->sync_request) {
6310 if (mddev->curr_resync > 2) {
6311 status_resync(seq, mddev);
6312 seq_printf(seq, "\n ");
6313 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
6314 seq_printf(seq, "\tresync=DELAYED\n ");
6315 else if (mddev->recovery_cp < MaxSector)
6316 seq_printf(seq, "\tresync=PENDING\n ");
6319 seq_printf(seq, "\n ");
6321 if ((bitmap = mddev->bitmap)) {
6322 unsigned long chunk_kb;
6323 unsigned long flags;
6324 spin_lock_irqsave(&bitmap->lock, flags);
6325 chunk_kb = mddev->bitmap_info.chunksize >> 10;
6326 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
6328 bitmap->pages - bitmap->missing_pages,
6330 (bitmap->pages - bitmap->missing_pages)
6331 << (PAGE_SHIFT - 10),
6332 chunk_kb ? chunk_kb : mddev->bitmap_info.chunksize,
6333 chunk_kb ? "KB" : "B");
6335 seq_printf(seq, ", file: ");
6336 seq_path(seq, &bitmap->file->f_path, " \t\n");
6339 seq_printf(seq, "\n");
6340 spin_unlock_irqrestore(&bitmap->lock, flags);
6343 seq_printf(seq, "\n");
6345 mddev_unlock(mddev);
6350 static const struct seq_operations md_seq_ops = {
6351 .start = md_seq_start,
6352 .next = md_seq_next,
6353 .stop = md_seq_stop,
6354 .show = md_seq_show,
6357 static int md_seq_open(struct inode *inode, struct file *file)
6360 struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
6364 error = seq_open(file, &md_seq_ops);
6368 struct seq_file *p = file->private_data;
6370 mi->event = atomic_read(&md_event_count);
6375 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
6377 struct seq_file *m = filp->private_data;
6378 struct mdstat_info *mi = m->private;
6381 poll_wait(filp, &md_event_waiters, wait);
6383 /* always allow read */
6384 mask = POLLIN | POLLRDNORM;
6386 if (mi->event != atomic_read(&md_event_count))
6387 mask |= POLLERR | POLLPRI;
6391 static const struct file_operations md_seq_fops = {
6392 .owner = THIS_MODULE,
6393 .open = md_seq_open,
6395 .llseek = seq_lseek,
6396 .release = seq_release_private,
6397 .poll = mdstat_poll,
6400 int register_md_personality(struct mdk_personality *p)
6402 spin_lock(&pers_lock);
6403 list_add_tail(&p->list, &pers_list);
6404 printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
6405 spin_unlock(&pers_lock);
6409 int unregister_md_personality(struct mdk_personality *p)
6411 printk(KERN_INFO "md: %s personality unregistered\n", p->name);
6412 spin_lock(&pers_lock);
6413 list_del_init(&p->list);
6414 spin_unlock(&pers_lock);
6418 static int is_mddev_idle(mddev_t *mddev, int init)
6426 rdev_for_each_rcu(rdev, mddev) {
6427 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
6428 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
6429 (int)part_stat_read(&disk->part0, sectors[1]) -
6430 atomic_read(&disk->sync_io);
6431 /* sync IO will cause sync_io to increase before the disk_stats
6432 * as sync_io is counted when a request starts, and
6433 * disk_stats is counted when it completes.
6434 * So resync activity will cause curr_events to be smaller than
6435 * when there was no such activity.
6436 * non-sync IO will cause disk_stat to increase without
6437 * increasing sync_io so curr_events will (eventually)
6438 * be larger than it was before. Once it becomes
6439 * substantially larger, the test below will cause
6440 * the array to appear non-idle, and resync will slow
6442 * If there is a lot of outstanding resync activity when
6443 * we set last_event to curr_events, then all that activity
6444 * completing might cause the array to appear non-idle
6445 * and resync will be slowed down even though there might
6446 * not have been non-resync activity. This will only
6447 * happen once though. 'last_events' will soon reflect
6448 * the state where there is little or no outstanding
6449 * resync requests, and further resync activity will
6450 * always make curr_events less than last_events.
6453 if (init || curr_events - rdev->last_events > 64) {
6454 rdev->last_events = curr_events;
6462 void md_done_sync(mddev_t *mddev, int blocks, int ok)
6464 /* another "blocks" (512byte) blocks have been synced */
6465 atomic_sub(blocks, &mddev->recovery_active);
6466 wake_up(&mddev->recovery_wait);
6468 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6469 md_wakeup_thread(mddev->thread);
6470 // stop recovery, signal do_sync ....
6475 /* md_write_start(mddev, bi)
6476 * If we need to update some array metadata (e.g. 'active' flag
6477 * in superblock) before writing, schedule a superblock update
6478 * and wait for it to complete.
6480 void md_write_start(mddev_t *mddev, struct bio *bi)
6483 if (bio_data_dir(bi) != WRITE)
6486 BUG_ON(mddev->ro == 1);
6487 if (mddev->ro == 2) {
6488 /* need to switch to read/write */
6490 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6491 md_wakeup_thread(mddev->thread);
6492 md_wakeup_thread(mddev->sync_thread);
6495 atomic_inc(&mddev->writes_pending);
6496 if (mddev->safemode == 1)
6497 mddev->safemode = 0;
6498 if (mddev->in_sync) {
6499 spin_lock_irq(&mddev->write_lock);
6500 if (mddev->in_sync) {
6502 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6503 set_bit(MD_CHANGE_PENDING, &mddev->flags);
6504 md_wakeup_thread(mddev->thread);
6507 spin_unlock_irq(&mddev->write_lock);
6510 sysfs_notify_dirent_safe(mddev->sysfs_state);
6511 wait_event(mddev->sb_wait,
6512 !test_bit(MD_CHANGE_PENDING, &mddev->flags));
6515 void md_write_end(mddev_t *mddev)
6517 if (atomic_dec_and_test(&mddev->writes_pending)) {
6518 if (mddev->safemode == 2)
6519 md_wakeup_thread(mddev->thread);
6520 else if (mddev->safemode_delay)
6521 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
6525 /* md_allow_write(mddev)
6526 * Calling this ensures that the array is marked 'active' so that writes
6527 * may proceed without blocking. It is important to call this before
6528 * attempting a GFP_KERNEL allocation while holding the mddev lock.
6529 * Must be called with mddev_lock held.
6531 * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
6532 * is dropped, so return -EAGAIN after notifying userspace.
6534 int md_allow_write(mddev_t *mddev)
6540 if (!mddev->pers->sync_request)
6543 spin_lock_irq(&mddev->write_lock);
6544 if (mddev->in_sync) {
6546 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6547 set_bit(MD_CHANGE_PENDING, &mddev->flags);
6548 if (mddev->safemode_delay &&
6549 mddev->safemode == 0)
6550 mddev->safemode = 1;
6551 spin_unlock_irq(&mddev->write_lock);
6552 md_update_sb(mddev, 0);
6553 sysfs_notify_dirent_safe(mddev->sysfs_state);
6555 spin_unlock_irq(&mddev->write_lock);
6557 if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
6562 EXPORT_SYMBOL_GPL(md_allow_write);
6564 void md_unplug(mddev_t *mddev)
6567 blk_unplug(mddev->queue);
6569 mddev->plug->unplug_fn(mddev->plug);
6572 #define SYNC_MARKS 10
6573 #define SYNC_MARK_STEP (3*HZ)
6574 void md_do_sync(mddev_t *mddev)
6577 unsigned int currspeed = 0,
6579 sector_t max_sectors,j, io_sectors;
6580 unsigned long mark[SYNC_MARKS];
6581 sector_t mark_cnt[SYNC_MARKS];
6583 struct list_head *tmp;
6584 sector_t last_check;
6589 /* just incase thread restarts... */
6590 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
6592 if (mddev->ro) /* never try to sync a read-only array */
6595 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
6596 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
6597 desc = "data-check";
6598 else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6599 desc = "requested-resync";
6602 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
6607 /* we overload curr_resync somewhat here.
6608 * 0 == not engaged in resync at all
6609 * 2 == checking that there is no conflict with another sync
6610 * 1 == like 2, but have yielded to allow conflicting resync to
6612 * other == active in resync - this many blocks
6614 * Before starting a resync we must have set curr_resync to
6615 * 2, and then checked that every "conflicting" array has curr_resync
6616 * less than ours. When we find one that is the same or higher
6617 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
6618 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
6619 * This will mean we have to start checking from the beginning again.
6624 mddev->curr_resync = 2;
6627 if (kthread_should_stop())
6628 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6630 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6632 for_each_mddev(mddev2, tmp) {
6633 if (mddev2 == mddev)
6635 if (!mddev->parallel_resync
6636 && mddev2->curr_resync
6637 && match_mddev_units(mddev, mddev2)) {
6639 if (mddev < mddev2 && mddev->curr_resync == 2) {
6640 /* arbitrarily yield */
6641 mddev->curr_resync = 1;
6642 wake_up(&resync_wait);
6644 if (mddev > mddev2 && mddev->curr_resync == 1)
6645 /* no need to wait here, we can wait the next
6646 * time 'round when curr_resync == 2
6649 /* We need to wait 'interruptible' so as not to
6650 * contribute to the load average, and not to
6651 * be caught by 'softlockup'
6653 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
6654 if (!kthread_should_stop() &&
6655 mddev2->curr_resync >= mddev->curr_resync) {
6656 printk(KERN_INFO "md: delaying %s of %s"
6657 " until %s has finished (they"
6658 " share one or more physical units)\n",
6659 desc, mdname(mddev), mdname(mddev2));
6661 if (signal_pending(current))
6662 flush_signals(current);
6664 finish_wait(&resync_wait, &wq);
6667 finish_wait(&resync_wait, &wq);
6670 } while (mddev->curr_resync < 2);
6673 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
6674 /* resync follows the size requested by the personality,
6675 * which defaults to physical size, but can be virtual size
6677 max_sectors = mddev->resync_max_sectors;
6678 mddev->resync_mismatches = 0;
6679 /* we don't use the checkpoint if there's a bitmap */
6680 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6681 j = mddev->resync_min;
6682 else if (!mddev->bitmap)
6683 j = mddev->recovery_cp;
6685 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
6686 max_sectors = mddev->dev_sectors;
6688 /* recovery follows the physical size of devices */
6689 max_sectors = mddev->dev_sectors;
6692 list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
6693 if (rdev->raid_disk >= 0 &&
6694 !test_bit(Faulty, &rdev->flags) &&
6695 !test_bit(In_sync, &rdev->flags) &&
6696 rdev->recovery_offset < j)
6697 j = rdev->recovery_offset;
6701 printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
6702 printk(KERN_INFO "md: minimum _guaranteed_ speed:"
6703 " %d KB/sec/disk.\n", speed_min(mddev));
6704 printk(KERN_INFO "md: using maximum available idle IO bandwidth "
6705 "(but not more than %d KB/sec) for %s.\n",
6706 speed_max(mddev), desc);
6708 is_mddev_idle(mddev, 1); /* this initializes IO event counters */
6711 for (m = 0; m < SYNC_MARKS; m++) {
6713 mark_cnt[m] = io_sectors;
6716 mddev->resync_mark = mark[last_mark];
6717 mddev->resync_mark_cnt = mark_cnt[last_mark];
6720 * Tune reconstruction:
6722 window = 32*(PAGE_SIZE/512);
6723 printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
6724 window/2,(unsigned long long) max_sectors/2);
6726 atomic_set(&mddev->recovery_active, 0);
6731 "md: resuming %s of %s from checkpoint.\n",
6732 desc, mdname(mddev));
6733 mddev->curr_resync = j;
6735 mddev->curr_resync_completed = mddev->curr_resync;
6737 while (j < max_sectors) {
6742 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
6743 ((mddev->curr_resync > mddev->curr_resync_completed &&
6744 (mddev->curr_resync - mddev->curr_resync_completed)
6745 > (max_sectors >> 4)) ||
6746 (j - mddev->curr_resync_completed)*2
6747 >= mddev->resync_max - mddev->curr_resync_completed
6749 /* time to update curr_resync_completed */
6751 wait_event(mddev->recovery_wait,
6752 atomic_read(&mddev->recovery_active) == 0);
6753 mddev->curr_resync_completed =
6755 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6756 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6759 while (j >= mddev->resync_max && !kthread_should_stop()) {
6760 /* As this condition is controlled by user-space,
6761 * we can block indefinitely, so use '_interruptible'
6762 * to avoid triggering warnings.
6764 flush_signals(current); /* just in case */
6765 wait_event_interruptible(mddev->recovery_wait,
6766 mddev->resync_max > j
6767 || kthread_should_stop());
6770 if (kthread_should_stop())
6773 sectors = mddev->pers->sync_request(mddev, j, &skipped,
6774 currspeed < speed_min(mddev));
6776 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6780 if (!skipped) { /* actual IO requested */
6781 io_sectors += sectors;
6782 atomic_add(sectors, &mddev->recovery_active);
6786 if (j>1) mddev->curr_resync = j;
6787 mddev->curr_mark_cnt = io_sectors;
6788 if (last_check == 0)
6789 /* this is the earliers that rebuilt will be
6790 * visible in /proc/mdstat
6792 md_new_event(mddev);
6794 if (last_check + window > io_sectors || j == max_sectors)
6797 last_check = io_sectors;
6799 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6803 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
6805 int next = (last_mark+1) % SYNC_MARKS;
6807 mddev->resync_mark = mark[next];
6808 mddev->resync_mark_cnt = mark_cnt[next];
6809 mark[next] = jiffies;
6810 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
6815 if (kthread_should_stop())
6820 * this loop exits only if either when we are slower than
6821 * the 'hard' speed limit, or the system was IO-idle for
6823 * the system might be non-idle CPU-wise, but we only care
6824 * about not overloading the IO subsystem. (things like an
6825 * e2fsck being done on the RAID array should execute fast)
6830 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
6831 /((jiffies-mddev->resync_mark)/HZ +1) +1;
6833 if (currspeed > speed_min(mddev)) {
6834 if ((currspeed > speed_max(mddev)) ||
6835 !is_mddev_idle(mddev, 0)) {
6841 printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc);
6843 * this also signals 'finished resyncing' to md_stop
6848 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
6850 /* tell personality that we are finished */
6851 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
6853 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
6854 mddev->curr_resync > 2) {
6855 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
6856 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
6857 if (mddev->curr_resync >= mddev->recovery_cp) {
6859 "md: checkpointing %s of %s.\n",
6860 desc, mdname(mddev));
6861 mddev->recovery_cp = mddev->curr_resync;
6864 mddev->recovery_cp = MaxSector;
6866 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6867 mddev->curr_resync = MaxSector;
6869 list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
6870 if (rdev->raid_disk >= 0 &&
6871 mddev->delta_disks >= 0 &&
6872 !test_bit(Faulty, &rdev->flags) &&
6873 !test_bit(In_sync, &rdev->flags) &&
6874 rdev->recovery_offset < mddev->curr_resync)
6875 rdev->recovery_offset = mddev->curr_resync;
6879 set_bit(MD_CHANGE_DEVS, &mddev->flags);
6882 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
6883 /* We completed so min/max setting can be forgotten if used. */
6884 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6885 mddev->resync_min = 0;
6886 mddev->resync_max = MaxSector;
6887 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6888 mddev->resync_min = mddev->curr_resync_completed;
6889 mddev->curr_resync = 0;
6890 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6891 mddev->curr_resync_completed = 0;
6892 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6893 wake_up(&resync_wait);
6894 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
6895 md_wakeup_thread(mddev->thread);
6900 * got a signal, exit.
6903 "md: md_do_sync() got signal ... exiting\n");
6904 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6908 EXPORT_SYMBOL_GPL(md_do_sync);
6911 static int remove_and_add_spares(mddev_t *mddev)
6916 mddev->curr_resync_completed = 0;
6918 list_for_each_entry(rdev, &mddev->disks, same_set)
6919 if (rdev->raid_disk >= 0 &&
6920 !test_bit(Blocked, &rdev->flags) &&
6921 (test_bit(Faulty, &rdev->flags) ||
6922 ! test_bit(In_sync, &rdev->flags)) &&
6923 atomic_read(&rdev->nr_pending)==0) {
6924 if (mddev->pers->hot_remove_disk(
6925 mddev, rdev->raid_disk)==0) {
6927 sprintf(nm,"rd%d", rdev->raid_disk);
6928 sysfs_remove_link(&mddev->kobj, nm);
6929 rdev->raid_disk = -1;
6933 if (mddev->degraded && ! mddev->ro && !mddev->recovery_disabled) {
6934 list_for_each_entry(rdev, &mddev->disks, same_set) {
6935 if (rdev->raid_disk >= 0 &&
6936 !test_bit(In_sync, &rdev->flags) &&
6937 !test_bit(Blocked, &rdev->flags))
6939 if (rdev->raid_disk < 0
6940 && !test_bit(Faulty, &rdev->flags)) {
6941 rdev->recovery_offset = 0;
6943 hot_add_disk(mddev, rdev) == 0) {
6945 sprintf(nm, "rd%d", rdev->raid_disk);
6946 if (sysfs_create_link(&mddev->kobj,
6948 /* failure here is OK */;
6950 md_new_event(mddev);
6951 set_bit(MD_CHANGE_DEVS, &mddev->flags);
6960 * This routine is regularly called by all per-raid-array threads to
6961 * deal with generic issues like resync and super-block update.
6962 * Raid personalities that don't have a thread (linear/raid0) do not
6963 * need this as they never do any recovery or update the superblock.
6965 * It does not do any resync itself, but rather "forks" off other threads
6966 * to do that as needed.
6967 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
6968 * "->recovery" and create a thread at ->sync_thread.
6969 * When the thread finishes it sets MD_RECOVERY_DONE
6970 * and wakeups up this thread which will reap the thread and finish up.
6971 * This thread also removes any faulty devices (with nr_pending == 0).
6973 * The overall approach is:
6974 * 1/ if the superblock needs updating, update it.
6975 * 2/ If a recovery thread is running, don't do anything else.
6976 * 3/ If recovery has finished, clean up, possibly marking spares active.
6977 * 4/ If there are any faulty devices, remove them.
6978 * 5/ If array is degraded, try to add spares devices
6979 * 6/ If array has spares or is not in-sync, start a resync thread.
6981 void md_check_recovery(mddev_t *mddev)
6987 bitmap_daemon_work(mddev);
6992 if (signal_pending(current)) {
6993 if (mddev->pers->sync_request && !mddev->external) {
6994 printk(KERN_INFO "md: %s in immediate safe mode\n",
6996 mddev->safemode = 2;
6998 flush_signals(current);
7001 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
7004 (mddev->flags & ~ (1<<MD_CHANGE_PENDING)) ||
7005 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
7006 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
7007 (mddev->external == 0 && mddev->safemode == 1) ||
7008 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
7009 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
7013 if (mddev_trylock(mddev)) {
7017 /* Only thing we do on a ro array is remove
7020 remove_and_add_spares(mddev);
7021 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7025 if (!mddev->external) {
7027 spin_lock_irq(&mddev->write_lock);
7028 if (mddev->safemode &&
7029 !atomic_read(&mddev->writes_pending) &&
7031 mddev->recovery_cp == MaxSector) {
7034 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7036 if (mddev->safemode == 1)
7037 mddev->safemode = 0;
7038 spin_unlock_irq(&mddev->write_lock);
7040 sysfs_notify_dirent_safe(mddev->sysfs_state);
7044 md_update_sb(mddev, 0);
7046 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
7047 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
7048 /* resync/recovery still happening */
7049 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7052 if (mddev->sync_thread) {
7053 /* resync has finished, collect result */
7054 md_unregister_thread(mddev->sync_thread);
7055 mddev->sync_thread = NULL;
7056 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
7057 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
7059 /* activate any spares */
7060 if (mddev->pers->spare_active(mddev))
7061 sysfs_notify(&mddev->kobj, NULL,
7064 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
7065 mddev->pers->finish_reshape)
7066 mddev->pers->finish_reshape(mddev);
7067 md_update_sb(mddev, 1);
7069 /* if array is no-longer degraded, then any saved_raid_disk
7070 * information must be scrapped
7072 if (!mddev->degraded)
7073 list_for_each_entry(rdev, &mddev->disks, same_set)
7074 rdev->saved_raid_disk = -1;
7076 mddev->recovery = 0;
7077 /* flag recovery needed just to double check */
7078 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7079 sysfs_notify_dirent_safe(mddev->sysfs_action);
7080 md_new_event(mddev);
7083 /* Set RUNNING before clearing NEEDED to avoid
7084 * any transients in the value of "sync_action".
7086 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7087 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7088 /* Clear some bits that don't mean anything, but
7091 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
7092 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
7094 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
7096 /* no recovery is running.
7097 * remove any failed drives, then
7098 * add spares if possible.
7099 * Spare are also removed and re-added, to allow
7100 * the personality to fail the re-add.
7103 if (mddev->reshape_position != MaxSector) {
7104 if (mddev->pers->check_reshape == NULL ||
7105 mddev->pers->check_reshape(mddev) != 0)
7106 /* Cannot proceed */
7108 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
7109 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7110 } else if ((spares = remove_and_add_spares(mddev))) {
7111 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
7112 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
7113 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
7114 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7115 } else if (mddev->recovery_cp < MaxSector) {
7116 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
7117 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7118 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
7119 /* nothing to be done ... */
7122 if (mddev->pers->sync_request) {
7123 if (spares && mddev->bitmap && ! mddev->bitmap->file) {
7124 /* We are adding a device or devices to an array
7125 * which has the bitmap stored on all devices.
7126 * So make sure all bitmap pages get written
7128 bitmap_write_all(mddev->bitmap);
7130 mddev->sync_thread = md_register_thread(md_do_sync,
7133 if (!mddev->sync_thread) {
7134 printk(KERN_ERR "%s: could not start resync"
7137 /* leave the spares where they are, it shouldn't hurt */
7138 mddev->recovery = 0;
7140 md_wakeup_thread(mddev->sync_thread);
7141 sysfs_notify_dirent_safe(mddev->sysfs_action);
7142 md_new_event(mddev);
7145 if (!mddev->sync_thread) {
7146 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7147 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
7149 if (mddev->sysfs_action)
7150 sysfs_notify_dirent_safe(mddev->sysfs_action);
7152 mddev_unlock(mddev);
7156 void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
7158 sysfs_notify_dirent_safe(rdev->sysfs_state);
7159 wait_event_timeout(rdev->blocked_wait,
7160 !test_bit(Blocked, &rdev->flags),
7161 msecs_to_jiffies(5000));
7162 rdev_dec_pending(rdev, mddev);
7164 EXPORT_SYMBOL(md_wait_for_blocked_rdev);
7166 static int md_notify_reboot(struct notifier_block *this,
7167 unsigned long code, void *x)
7169 struct list_head *tmp;
7172 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
7174 printk(KERN_INFO "md: stopping all md devices.\n");
7176 for_each_mddev(mddev, tmp)
7177 if (mddev_trylock(mddev)) {
7178 /* Force a switch to readonly even array
7179 * appears to still be in use. Hence
7182 md_set_readonly(mddev, 100);
7183 mddev_unlock(mddev);
7186 * certain more exotic SCSI devices are known to be
7187 * volatile wrt too early system reboots. While the
7188 * right place to handle this issue is the given
7189 * driver, we do want to have a safe RAID driver ...
7196 static struct notifier_block md_notifier = {
7197 .notifier_call = md_notify_reboot,
7199 .priority = INT_MAX, /* before any real devices */
7202 static void md_geninit(void)
7204 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
7206 proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
7209 static int __init md_init(void)
7213 md_wq = alloc_workqueue("md", WQ_RESCUER, 0);
7217 md_misc_wq = alloc_workqueue("md_misc", 0, 0);
7221 if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
7224 if ((ret = register_blkdev(0, "mdp")) < 0)
7228 blk_register_region(MKDEV(MD_MAJOR, 0), 1UL<<MINORBITS, THIS_MODULE,
7229 md_probe, NULL, NULL);
7230 blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
7231 md_probe, NULL, NULL);
7233 register_reboot_notifier(&md_notifier);
7234 raid_table_header = register_sysctl_table(raid_root_table);
7240 unregister_blkdev(MD_MAJOR, "md");
7242 destroy_workqueue(md_misc_wq);
7244 destroy_workqueue(md_wq);
7252 * Searches all registered partitions for autorun RAID arrays
7256 static LIST_HEAD(all_detected_devices);
7257 struct detected_devices_node {
7258 struct list_head list;
7262 void md_autodetect_dev(dev_t dev)
7264 struct detected_devices_node *node_detected_dev;
7266 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
7267 if (node_detected_dev) {
7268 node_detected_dev->dev = dev;
7269 list_add_tail(&node_detected_dev->list, &all_detected_devices);
7271 printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
7272 ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
7277 static void autostart_arrays(int part)
7280 struct detected_devices_node *node_detected_dev;
7282 int i_scanned, i_passed;
7287 printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
7289 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
7291 node_detected_dev = list_entry(all_detected_devices.next,
7292 struct detected_devices_node, list);
7293 list_del(&node_detected_dev->list);
7294 dev = node_detected_dev->dev;
7295 kfree(node_detected_dev);
7296 rdev = md_import_device(dev,0, 90);
7300 if (test_bit(Faulty, &rdev->flags)) {
7304 set_bit(AutoDetected, &rdev->flags);
7305 list_add(&rdev->same_set, &pending_raid_disks);
7309 printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
7310 i_scanned, i_passed);
7312 autorun_devices(part);
7315 #endif /* !MODULE */
7317 static __exit void md_exit(void)
7320 struct list_head *tmp;
7322 blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS);
7323 blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
7325 unregister_blkdev(MD_MAJOR,"md");
7326 unregister_blkdev(mdp_major, "mdp");
7327 unregister_reboot_notifier(&md_notifier);
7328 unregister_sysctl_table(raid_table_header);
7329 remove_proc_entry("mdstat", NULL);
7330 for_each_mddev(mddev, tmp) {
7331 export_array(mddev);
7332 mddev->hold_active = 0;
7334 destroy_workqueue(md_misc_wq);
7335 destroy_workqueue(md_wq);
7338 subsys_initcall(md_init);
7339 module_exit(md_exit)
7341 static int get_ro(char *buffer, struct kernel_param *kp)
7343 return sprintf(buffer, "%d", start_readonly);
7345 static int set_ro(const char *val, struct kernel_param *kp)
7348 int num = simple_strtoul(val, &e, 10);
7349 if (*val && (*e == '\0' || *e == '\n')) {
7350 start_readonly = num;
7356 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
7357 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
7359 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
7361 EXPORT_SYMBOL(register_md_personality);
7362 EXPORT_SYMBOL(unregister_md_personality);
7363 EXPORT_SYMBOL(md_error);
7364 EXPORT_SYMBOL(md_done_sync);
7365 EXPORT_SYMBOL(md_write_start);
7366 EXPORT_SYMBOL(md_write_end);
7367 EXPORT_SYMBOL(md_register_thread);
7368 EXPORT_SYMBOL(md_unregister_thread);
7369 EXPORT_SYMBOL(md_wakeup_thread);
7370 EXPORT_SYMBOL(md_check_recovery);
7371 MODULE_LICENSE("GPL");
7372 MODULE_DESCRIPTION("MD RAID framework");
7374 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);