2 md.c : Multiple Devices driver for Linux
3 Copyright (C) 1998, 1999, 2000 Ingo Molnar
5 completely rewritten, based on the MD driver code from Marc Zyngier
9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13 - kmod support by: Cyrus Durgin
14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17 - lots of fixes and improvements to the RAID1/RAID5 and generic
18 RAID code (such as request based resynchronization):
20 Neil Brown <neilb@cse.unsw.edu.au>.
22 - persistent bitmap code
23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25 This program is free software; you can redistribute it and/or modify
26 it under the terms of the GNU General Public License as published by
27 the Free Software Foundation; either version 2, or (at your option)
30 You should have received a copy of the GNU General Public License
31 (for example /usr/src/linux/COPYING); if not, write to the Free
32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 #include <linux/kthread.h>
36 #include <linux/blkdev.h>
37 #include <linux/sysctl.h>
38 #include <linux/seq_file.h>
39 #include <linux/buffer_head.h> /* for invalidate_bdev */
40 #include <linux/poll.h>
41 #include <linux/ctype.h>
42 #include <linux/hdreg.h>
43 #include <linux/proc_fs.h>
44 #include <linux/random.h>
45 #include <linux/reboot.h>
46 #include <linux/file.h>
47 #include <linux/delay.h>
48 #include <linux/raid/md_p.h>
49 #include <linux/raid/md_u.h>
54 #define dprintk(x...) ((void)(DEBUG && printk(x)))
58 static void autostart_arrays(int part);
61 static LIST_HEAD(pers_list);
62 static DEFINE_SPINLOCK(pers_lock);
64 static void md_print_devices(void);
66 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
68 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
71 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
72 * is 1000 KB/sec, so the extra system load does not show up that much.
73 * Increase it if you want to have more _guaranteed_ speed. Note that
74 * the RAID driver will use the maximum available bandwidth if the IO
75 * subsystem is idle. There is also an 'absolute maximum' reconstruction
76 * speed limit - in case reconstruction slows down your system despite
79 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
80 * or /sys/block/mdX/md/sync_speed_{min,max}
83 static int sysctl_speed_limit_min = 1000;
84 static int sysctl_speed_limit_max = 200000;
85 static inline int speed_min(mddev_t *mddev)
87 return mddev->sync_speed_min ?
88 mddev->sync_speed_min : sysctl_speed_limit_min;
91 static inline int speed_max(mddev_t *mddev)
93 return mddev->sync_speed_max ?
94 mddev->sync_speed_max : sysctl_speed_limit_max;
97 static struct ctl_table_header *raid_table_header;
99 static ctl_table raid_table[] = {
101 .ctl_name = DEV_RAID_SPEED_LIMIT_MIN,
102 .procname = "speed_limit_min",
103 .data = &sysctl_speed_limit_min,
104 .maxlen = sizeof(int),
105 .mode = S_IRUGO|S_IWUSR,
106 .proc_handler = &proc_dointvec,
109 .ctl_name = DEV_RAID_SPEED_LIMIT_MAX,
110 .procname = "speed_limit_max",
111 .data = &sysctl_speed_limit_max,
112 .maxlen = sizeof(int),
113 .mode = S_IRUGO|S_IWUSR,
114 .proc_handler = &proc_dointvec,
119 static ctl_table raid_dir_table[] = {
121 .ctl_name = DEV_RAID,
124 .mode = S_IRUGO|S_IXUGO,
130 static ctl_table raid_root_table[] = {
136 .child = raid_dir_table,
141 static struct block_device_operations md_fops;
143 static int start_readonly;
146 * We have a system wide 'event count' that is incremented
147 * on any 'interesting' event, and readers of /proc/mdstat
148 * can use 'poll' or 'select' to find out when the event
152 * start array, stop array, error, add device, remove device,
153 * start build, activate spare
155 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
156 static atomic_t md_event_count;
157 void md_new_event(mddev_t *mddev)
159 atomic_inc(&md_event_count);
160 wake_up(&md_event_waiters);
162 EXPORT_SYMBOL_GPL(md_new_event);
164 /* Alternate version that can be called from interrupts
165 * when calling sysfs_notify isn't needed.
167 static void md_new_event_inintr(mddev_t *mddev)
169 atomic_inc(&md_event_count);
170 wake_up(&md_event_waiters);
174 * Enables to iterate over all existing md arrays
175 * all_mddevs_lock protects this list.
177 static LIST_HEAD(all_mddevs);
178 static DEFINE_SPINLOCK(all_mddevs_lock);
182 * iterates through all used mddevs in the system.
183 * We take care to grab the all_mddevs_lock whenever navigating
184 * the list, and to always hold a refcount when unlocked.
185 * Any code which breaks out of this loop while own
186 * a reference to the current mddev and must mddev_put it.
188 #define for_each_mddev(mddev,tmp) \
190 for (({ spin_lock(&all_mddevs_lock); \
191 tmp = all_mddevs.next; \
193 ({ if (tmp != &all_mddevs) \
194 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
195 spin_unlock(&all_mddevs_lock); \
196 if (mddev) mddev_put(mddev); \
197 mddev = list_entry(tmp, mddev_t, all_mddevs); \
198 tmp != &all_mddevs;}); \
199 ({ spin_lock(&all_mddevs_lock); \
204 /* Rather than calling directly into the personality make_request function,
205 * IO requests come here first so that we can check if the device is
206 * being suspended pending a reconfiguration.
207 * We hold a refcount over the call to ->make_request. By the time that
208 * call has finished, the bio has been linked into some internal structure
209 * and so is visible to ->quiesce(), so we don't need the refcount any more.
211 static int md_make_request(struct request_queue *q, struct bio *bio)
213 mddev_t *mddev = q->queuedata;
215 if (mddev == NULL || mddev->pers == NULL) {
220 if (mddev->suspended) {
223 prepare_to_wait(&mddev->sb_wait, &__wait,
224 TASK_UNINTERRUPTIBLE);
225 if (!mddev->suspended)
231 finish_wait(&mddev->sb_wait, &__wait);
233 atomic_inc(&mddev->active_io);
235 rv = mddev->pers->make_request(q, bio);
236 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
237 wake_up(&mddev->sb_wait);
242 static void mddev_suspend(mddev_t *mddev)
244 BUG_ON(mddev->suspended);
245 mddev->suspended = 1;
247 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
248 mddev->pers->quiesce(mddev, 1);
249 md_unregister_thread(mddev->thread);
250 mddev->thread = NULL;
251 /* we now know that no code is executing in the personality module,
252 * except possibly the tail end of a ->bi_end_io function, but that
253 * is certain to complete before the module has a chance to get
258 static void mddev_resume(mddev_t *mddev)
260 mddev->suspended = 0;
261 wake_up(&mddev->sb_wait);
262 mddev->pers->quiesce(mddev, 0);
266 static inline mddev_t *mddev_get(mddev_t *mddev)
268 atomic_inc(&mddev->active);
272 static void mddev_delayed_delete(struct work_struct *ws);
274 static void mddev_put(mddev_t *mddev)
276 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
278 if (!mddev->raid_disks && list_empty(&mddev->disks) &&
279 !mddev->hold_active) {
280 list_del(&mddev->all_mddevs);
281 if (mddev->gendisk) {
282 /* we did a probe so need to clean up.
283 * Call schedule_work inside the spinlock
284 * so that flush_scheduled_work() after
285 * mddev_find will succeed in waiting for the
288 INIT_WORK(&mddev->del_work, mddev_delayed_delete);
289 schedule_work(&mddev->del_work);
293 spin_unlock(&all_mddevs_lock);
296 static mddev_t * mddev_find(dev_t unit)
298 mddev_t *mddev, *new = NULL;
301 spin_lock(&all_mddevs_lock);
304 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
305 if (mddev->unit == unit) {
307 spin_unlock(&all_mddevs_lock);
313 list_add(&new->all_mddevs, &all_mddevs);
314 spin_unlock(&all_mddevs_lock);
315 new->hold_active = UNTIL_IOCTL;
319 /* find an unused unit number */
320 static int next_minor = 512;
321 int start = next_minor;
325 dev = MKDEV(MD_MAJOR, next_minor);
327 if (next_minor > MINORMASK)
329 if (next_minor == start) {
330 /* Oh dear, all in use. */
331 spin_unlock(&all_mddevs_lock);
337 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
338 if (mddev->unit == dev) {
344 new->md_minor = MINOR(dev);
345 new->hold_active = UNTIL_STOP;
346 list_add(&new->all_mddevs, &all_mddevs);
347 spin_unlock(&all_mddevs_lock);
350 spin_unlock(&all_mddevs_lock);
352 new = kzalloc(sizeof(*new), GFP_KERNEL);
357 if (MAJOR(unit) == MD_MAJOR)
358 new->md_minor = MINOR(unit);
360 new->md_minor = MINOR(unit) >> MdpMinorShift;
362 mutex_init(&new->reconfig_mutex);
363 INIT_LIST_HEAD(&new->disks);
364 INIT_LIST_HEAD(&new->all_mddevs);
365 init_timer(&new->safemode_timer);
366 atomic_set(&new->active, 1);
367 atomic_set(&new->openers, 0);
368 atomic_set(&new->active_io, 0);
369 spin_lock_init(&new->write_lock);
370 init_waitqueue_head(&new->sb_wait);
371 init_waitqueue_head(&new->recovery_wait);
372 new->reshape_position = MaxSector;
374 new->resync_max = MaxSector;
375 new->level = LEVEL_NONE;
380 static inline int mddev_lock(mddev_t * mddev)
382 return mutex_lock_interruptible(&mddev->reconfig_mutex);
385 static inline int mddev_is_locked(mddev_t *mddev)
387 return mutex_is_locked(&mddev->reconfig_mutex);
390 static inline int mddev_trylock(mddev_t * mddev)
392 return mutex_trylock(&mddev->reconfig_mutex);
395 static inline void mddev_unlock(mddev_t * mddev)
397 mutex_unlock(&mddev->reconfig_mutex);
399 md_wakeup_thread(mddev->thread);
402 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
406 list_for_each_entry(rdev, &mddev->disks, same_set)
407 if (rdev->desc_nr == nr)
413 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
417 list_for_each_entry(rdev, &mddev->disks, same_set)
418 if (rdev->bdev->bd_dev == dev)
424 static struct mdk_personality *find_pers(int level, char *clevel)
426 struct mdk_personality *pers;
427 list_for_each_entry(pers, &pers_list, list) {
428 if (level != LEVEL_NONE && pers->level == level)
430 if (strcmp(pers->name, clevel)==0)
436 /* return the offset of the super block in 512byte sectors */
437 static inline sector_t calc_dev_sboffset(struct block_device *bdev)
439 sector_t num_sectors = bdev->bd_inode->i_size / 512;
440 return MD_NEW_SIZE_SECTORS(num_sectors);
443 static int alloc_disk_sb(mdk_rdev_t * rdev)
448 rdev->sb_page = alloc_page(GFP_KERNEL);
449 if (!rdev->sb_page) {
450 printk(KERN_ALERT "md: out of memory.\n");
457 static void free_disk_sb(mdk_rdev_t * rdev)
460 put_page(rdev->sb_page);
462 rdev->sb_page = NULL;
469 static void super_written(struct bio *bio, int error)
471 mdk_rdev_t *rdev = bio->bi_private;
472 mddev_t *mddev = rdev->mddev;
474 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
475 printk("md: super_written gets error=%d, uptodate=%d\n",
476 error, test_bit(BIO_UPTODATE, &bio->bi_flags));
477 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
478 md_error(mddev, rdev);
481 if (atomic_dec_and_test(&mddev->pending_writes))
482 wake_up(&mddev->sb_wait);
486 static void super_written_barrier(struct bio *bio, int error)
488 struct bio *bio2 = bio->bi_private;
489 mdk_rdev_t *rdev = bio2->bi_private;
490 mddev_t *mddev = rdev->mddev;
492 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
493 error == -EOPNOTSUPP) {
495 /* barriers don't appear to be supported :-( */
496 set_bit(BarriersNotsupp, &rdev->flags);
497 mddev->barriers_work = 0;
498 spin_lock_irqsave(&mddev->write_lock, flags);
499 bio2->bi_next = mddev->biolist;
500 mddev->biolist = bio2;
501 spin_unlock_irqrestore(&mddev->write_lock, flags);
502 wake_up(&mddev->sb_wait);
506 bio->bi_private = rdev;
507 super_written(bio, error);
511 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
512 sector_t sector, int size, struct page *page)
514 /* write first size bytes of page to sector of rdev
515 * Increment mddev->pending_writes before returning
516 * and decrement it on completion, waking up sb_wait
517 * if zero is reached.
518 * If an error occurred, call md_error
520 * As we might need to resubmit the request if BIO_RW_BARRIER
521 * causes ENOTSUPP, we allocate a spare bio...
523 struct bio *bio = bio_alloc(GFP_NOIO, 1);
524 int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG);
526 bio->bi_bdev = rdev->bdev;
527 bio->bi_sector = sector;
528 bio_add_page(bio, page, size, 0);
529 bio->bi_private = rdev;
530 bio->bi_end_io = super_written;
533 atomic_inc(&mddev->pending_writes);
534 if (!test_bit(BarriersNotsupp, &rdev->flags)) {
536 rw |= (1<<BIO_RW_BARRIER);
537 rbio = bio_clone(bio, GFP_NOIO);
538 rbio->bi_private = bio;
539 rbio->bi_end_io = super_written_barrier;
540 submit_bio(rw, rbio);
545 void md_super_wait(mddev_t *mddev)
547 /* wait for all superblock writes that were scheduled to complete.
548 * if any had to be retried (due to BARRIER problems), retry them
552 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
553 if (atomic_read(&mddev->pending_writes)==0)
555 while (mddev->biolist) {
557 spin_lock_irq(&mddev->write_lock);
558 bio = mddev->biolist;
559 mddev->biolist = bio->bi_next ;
561 spin_unlock_irq(&mddev->write_lock);
562 submit_bio(bio->bi_rw, bio);
566 finish_wait(&mddev->sb_wait, &wq);
569 static void bi_complete(struct bio *bio, int error)
571 complete((struct completion*)bio->bi_private);
574 int sync_page_io(struct block_device *bdev, sector_t sector, int size,
575 struct page *page, int rw)
577 struct bio *bio = bio_alloc(GFP_NOIO, 1);
578 struct completion event;
581 rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
584 bio->bi_sector = sector;
585 bio_add_page(bio, page, size, 0);
586 init_completion(&event);
587 bio->bi_private = &event;
588 bio->bi_end_io = bi_complete;
590 wait_for_completion(&event);
592 ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
596 EXPORT_SYMBOL_GPL(sync_page_io);
598 static int read_disk_sb(mdk_rdev_t * rdev, int size)
600 char b[BDEVNAME_SIZE];
601 if (!rdev->sb_page) {
609 if (!sync_page_io(rdev->bdev, rdev->sb_start, size, rdev->sb_page, READ))
615 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
616 bdevname(rdev->bdev,b));
620 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
622 return sb1->set_uuid0 == sb2->set_uuid0 &&
623 sb1->set_uuid1 == sb2->set_uuid1 &&
624 sb1->set_uuid2 == sb2->set_uuid2 &&
625 sb1->set_uuid3 == sb2->set_uuid3;
628 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
631 mdp_super_t *tmp1, *tmp2;
633 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
634 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
636 if (!tmp1 || !tmp2) {
638 printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
646 * nr_disks is not constant
651 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
659 static u32 md_csum_fold(u32 csum)
661 csum = (csum & 0xffff) + (csum >> 16);
662 return (csum & 0xffff) + (csum >> 16);
665 static unsigned int calc_sb_csum(mdp_super_t * sb)
668 u32 *sb32 = (u32*)sb;
670 unsigned int disk_csum, csum;
672 disk_csum = sb->sb_csum;
675 for (i = 0; i < MD_SB_BYTES/4 ; i++)
677 csum = (newcsum & 0xffffffff) + (newcsum>>32);
681 /* This used to use csum_partial, which was wrong for several
682 * reasons including that different results are returned on
683 * different architectures. It isn't critical that we get exactly
684 * the same return value as before (we always csum_fold before
685 * testing, and that removes any differences). However as we
686 * know that csum_partial always returned a 16bit value on
687 * alphas, do a fold to maximise conformity to previous behaviour.
689 sb->sb_csum = md_csum_fold(disk_csum);
691 sb->sb_csum = disk_csum;
698 * Handle superblock details.
699 * We want to be able to handle multiple superblock formats
700 * so we have a common interface to them all, and an array of
701 * different handlers.
702 * We rely on user-space to write the initial superblock, and support
703 * reading and updating of superblocks.
704 * Interface methods are:
705 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
706 * loads and validates a superblock on dev.
707 * if refdev != NULL, compare superblocks on both devices
709 * 0 - dev has a superblock that is compatible with refdev
710 * 1 - dev has a superblock that is compatible and newer than refdev
711 * so dev should be used as the refdev in future
712 * -EINVAL superblock incompatible or invalid
713 * -othererror e.g. -EIO
715 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
716 * Verify that dev is acceptable into mddev.
717 * The first time, mddev->raid_disks will be 0, and data from
718 * dev should be merged in. Subsequent calls check that dev
719 * is new enough. Return 0 or -EINVAL
721 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
722 * Update the superblock for rdev with data in mddev
723 * This does not write to disc.
729 struct module *owner;
730 int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev,
732 int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
733 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
734 unsigned long long (*rdev_size_change)(mdk_rdev_t *rdev,
735 sector_t num_sectors);
739 * Check that the given mddev has no bitmap.
741 * This function is called from the run method of all personalities that do not
742 * support bitmaps. It prints an error message and returns non-zero if mddev
743 * has a bitmap. Otherwise, it returns 0.
746 int md_check_no_bitmap(mddev_t *mddev)
748 if (!mddev->bitmap_file && !mddev->bitmap_offset)
750 printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
751 mdname(mddev), mddev->pers->name);
754 EXPORT_SYMBOL(md_check_no_bitmap);
757 * load_super for 0.90.0
759 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
761 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
766 * Calculate the position of the superblock (512byte sectors),
767 * it's at the end of the disk.
769 * It also happens to be a multiple of 4Kb.
771 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
773 ret = read_disk_sb(rdev, MD_SB_BYTES);
778 bdevname(rdev->bdev, b);
779 sb = (mdp_super_t*)page_address(rdev->sb_page);
781 if (sb->md_magic != MD_SB_MAGIC) {
782 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
787 if (sb->major_version != 0 ||
788 sb->minor_version < 90 ||
789 sb->minor_version > 91) {
790 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
791 sb->major_version, sb->minor_version,
796 if (sb->raid_disks <= 0)
799 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
800 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
805 rdev->preferred_minor = sb->md_minor;
806 rdev->data_offset = 0;
807 rdev->sb_size = MD_SB_BYTES;
809 if (sb->level == LEVEL_MULTIPATH)
812 rdev->desc_nr = sb->this_disk.number;
818 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
819 if (!uuid_equal(refsb, sb)) {
820 printk(KERN_WARNING "md: %s has different UUID to %s\n",
821 b, bdevname(refdev->bdev,b2));
824 if (!sb_equal(refsb, sb)) {
825 printk(KERN_WARNING "md: %s has same UUID"
826 " but different superblock to %s\n",
827 b, bdevname(refdev->bdev, b2));
831 ev2 = md_event(refsb);
837 rdev->sectors = rdev->sb_start;
839 if (rdev->sectors < sb->size * 2 && sb->level > 1)
840 /* "this cannot possibly happen" ... */
848 * validate_super for 0.90.0
850 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
853 mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
854 __u64 ev1 = md_event(sb);
856 rdev->raid_disk = -1;
857 clear_bit(Faulty, &rdev->flags);
858 clear_bit(In_sync, &rdev->flags);
859 clear_bit(WriteMostly, &rdev->flags);
860 clear_bit(BarriersNotsupp, &rdev->flags);
862 if (mddev->raid_disks == 0) {
863 mddev->major_version = 0;
864 mddev->minor_version = sb->minor_version;
865 mddev->patch_version = sb->patch_version;
867 mddev->chunk_sectors = sb->chunk_size >> 9;
868 mddev->ctime = sb->ctime;
869 mddev->utime = sb->utime;
870 mddev->level = sb->level;
871 mddev->clevel[0] = 0;
872 mddev->layout = sb->layout;
873 mddev->raid_disks = sb->raid_disks;
874 mddev->dev_sectors = sb->size * 2;
876 mddev->bitmap_offset = 0;
877 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
879 if (mddev->minor_version >= 91) {
880 mddev->reshape_position = sb->reshape_position;
881 mddev->delta_disks = sb->delta_disks;
882 mddev->new_level = sb->new_level;
883 mddev->new_layout = sb->new_layout;
884 mddev->new_chunk_sectors = sb->new_chunk >> 9;
886 mddev->reshape_position = MaxSector;
887 mddev->delta_disks = 0;
888 mddev->new_level = mddev->level;
889 mddev->new_layout = mddev->layout;
890 mddev->new_chunk_sectors = mddev->chunk_sectors;
893 if (sb->state & (1<<MD_SB_CLEAN))
894 mddev->recovery_cp = MaxSector;
896 if (sb->events_hi == sb->cp_events_hi &&
897 sb->events_lo == sb->cp_events_lo) {
898 mddev->recovery_cp = sb->recovery_cp;
900 mddev->recovery_cp = 0;
903 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
904 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
905 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
906 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
908 mddev->max_disks = MD_SB_DISKS;
910 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
911 mddev->bitmap_file == NULL)
912 mddev->bitmap_offset = mddev->default_bitmap_offset;
914 } else if (mddev->pers == NULL) {
915 /* Insist on good event counter while assembling */
917 if (ev1 < mddev->events)
919 } else if (mddev->bitmap) {
920 /* if adding to array with a bitmap, then we can accept an
921 * older device ... but not too old.
923 if (ev1 < mddev->bitmap->events_cleared)
926 if (ev1 < mddev->events)
927 /* just a hot-add of a new device, leave raid_disk at -1 */
931 if (mddev->level != LEVEL_MULTIPATH) {
932 desc = sb->disks + rdev->desc_nr;
934 if (desc->state & (1<<MD_DISK_FAULTY))
935 set_bit(Faulty, &rdev->flags);
936 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
937 desc->raid_disk < mddev->raid_disks */) {
938 set_bit(In_sync, &rdev->flags);
939 rdev->raid_disk = desc->raid_disk;
941 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
942 set_bit(WriteMostly, &rdev->flags);
943 } else /* MULTIPATH are always insync */
944 set_bit(In_sync, &rdev->flags);
949 * sync_super for 0.90.0
951 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
955 int next_spare = mddev->raid_disks;
958 /* make rdev->sb match mddev data..
961 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
962 * 3/ any empty disks < next_spare become removed
964 * disks[0] gets initialised to REMOVED because
965 * we cannot be sure from other fields if it has
966 * been initialised or not.
969 int active=0, working=0,failed=0,spare=0,nr_disks=0;
971 rdev->sb_size = MD_SB_BYTES;
973 sb = (mdp_super_t*)page_address(rdev->sb_page);
975 memset(sb, 0, sizeof(*sb));
977 sb->md_magic = MD_SB_MAGIC;
978 sb->major_version = mddev->major_version;
979 sb->patch_version = mddev->patch_version;
980 sb->gvalid_words = 0; /* ignored */
981 memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
982 memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
983 memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
984 memcpy(&sb->set_uuid3, mddev->uuid+12,4);
986 sb->ctime = mddev->ctime;
987 sb->level = mddev->level;
988 sb->size = mddev->dev_sectors / 2;
989 sb->raid_disks = mddev->raid_disks;
990 sb->md_minor = mddev->md_minor;
991 sb->not_persistent = 0;
992 sb->utime = mddev->utime;
994 sb->events_hi = (mddev->events>>32);
995 sb->events_lo = (u32)mddev->events;
997 if (mddev->reshape_position == MaxSector)
998 sb->minor_version = 90;
1000 sb->minor_version = 91;
1001 sb->reshape_position = mddev->reshape_position;
1002 sb->new_level = mddev->new_level;
1003 sb->delta_disks = mddev->delta_disks;
1004 sb->new_layout = mddev->new_layout;
1005 sb->new_chunk = mddev->new_chunk_sectors << 9;
1007 mddev->minor_version = sb->minor_version;
1010 sb->recovery_cp = mddev->recovery_cp;
1011 sb->cp_events_hi = (mddev->events>>32);
1012 sb->cp_events_lo = (u32)mddev->events;
1013 if (mddev->recovery_cp == MaxSector)
1014 sb->state = (1<< MD_SB_CLEAN);
1016 sb->recovery_cp = 0;
1018 sb->layout = mddev->layout;
1019 sb->chunk_size = mddev->chunk_sectors << 9;
1021 if (mddev->bitmap && mddev->bitmap_file == NULL)
1022 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1024 sb->disks[0].state = (1<<MD_DISK_REMOVED);
1025 list_for_each_entry(rdev2, &mddev->disks, same_set) {
1028 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
1029 && !test_bit(Faulty, &rdev2->flags))
1030 desc_nr = rdev2->raid_disk;
1032 desc_nr = next_spare++;
1033 rdev2->desc_nr = desc_nr;
1034 d = &sb->disks[rdev2->desc_nr];
1036 d->number = rdev2->desc_nr;
1037 d->major = MAJOR(rdev2->bdev->bd_dev);
1038 d->minor = MINOR(rdev2->bdev->bd_dev);
1039 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
1040 && !test_bit(Faulty, &rdev2->flags))
1041 d->raid_disk = rdev2->raid_disk;
1043 d->raid_disk = rdev2->desc_nr; /* compatibility */
1044 if (test_bit(Faulty, &rdev2->flags))
1045 d->state = (1<<MD_DISK_FAULTY);
1046 else if (test_bit(In_sync, &rdev2->flags)) {
1047 d->state = (1<<MD_DISK_ACTIVE);
1048 d->state |= (1<<MD_DISK_SYNC);
1056 if (test_bit(WriteMostly, &rdev2->flags))
1057 d->state |= (1<<MD_DISK_WRITEMOSTLY);
1059 /* now set the "removed" and "faulty" bits on any missing devices */
1060 for (i=0 ; i < mddev->raid_disks ; i++) {
1061 mdp_disk_t *d = &sb->disks[i];
1062 if (d->state == 0 && d->number == 0) {
1065 d->state = (1<<MD_DISK_REMOVED);
1066 d->state |= (1<<MD_DISK_FAULTY);
1070 sb->nr_disks = nr_disks;
1071 sb->active_disks = active;
1072 sb->working_disks = working;
1073 sb->failed_disks = failed;
1074 sb->spare_disks = spare;
1076 sb->this_disk = sb->disks[rdev->desc_nr];
1077 sb->sb_csum = calc_sb_csum(sb);
1081 * rdev_size_change for 0.90.0
1083 static unsigned long long
1084 super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1086 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1087 return 0; /* component must fit device */
1088 if (rdev->mddev->bitmap_offset)
1089 return 0; /* can't move bitmap */
1090 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
1091 if (!num_sectors || num_sectors > rdev->sb_start)
1092 num_sectors = rdev->sb_start;
1093 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1095 md_super_wait(rdev->mddev);
1096 return num_sectors / 2; /* kB for sysfs */
1101 * version 1 superblock
1104 static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
1108 unsigned long long newcsum;
1109 int size = 256 + le32_to_cpu(sb->max_dev)*2;
1110 __le32 *isuper = (__le32*)sb;
1113 disk_csum = sb->sb_csum;
1116 for (i=0; size>=4; size -= 4 )
1117 newcsum += le32_to_cpu(*isuper++);
1120 newcsum += le16_to_cpu(*(__le16*) isuper);
1122 csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1123 sb->sb_csum = disk_csum;
1124 return cpu_to_le32(csum);
1127 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1129 struct mdp_superblock_1 *sb;
1132 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1136 * Calculate the position of the superblock in 512byte sectors.
1137 * It is always aligned to a 4K boundary and
1138 * depeding on minor_version, it can be:
1139 * 0: At least 8K, but less than 12K, from end of device
1140 * 1: At start of device
1141 * 2: 4K from start of device.
1143 switch(minor_version) {
1145 sb_start = rdev->bdev->bd_inode->i_size >> 9;
1147 sb_start &= ~(sector_t)(4*2-1);
1158 rdev->sb_start = sb_start;
1160 /* superblock is rarely larger than 1K, but it can be larger,
1161 * and it is safe to read 4k, so we do that
1163 ret = read_disk_sb(rdev, 4096);
1164 if (ret) return ret;
1167 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1169 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1170 sb->major_version != cpu_to_le32(1) ||
1171 le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1172 le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1173 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1176 if (calc_sb_1_csum(sb) != sb->sb_csum) {
1177 printk("md: invalid superblock checksum on %s\n",
1178 bdevname(rdev->bdev,b));
1181 if (le64_to_cpu(sb->data_size) < 10) {
1182 printk("md: data_size too small on %s\n",
1183 bdevname(rdev->bdev,b));
1187 rdev->preferred_minor = 0xffff;
1188 rdev->data_offset = le64_to_cpu(sb->data_offset);
1189 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1191 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1192 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1193 if (rdev->sb_size & bmask)
1194 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1197 && rdev->data_offset < sb_start + (rdev->sb_size/512))
1200 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1203 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1209 struct mdp_superblock_1 *refsb =
1210 (struct mdp_superblock_1*)page_address(refdev->sb_page);
1212 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1213 sb->level != refsb->level ||
1214 sb->layout != refsb->layout ||
1215 sb->chunksize != refsb->chunksize) {
1216 printk(KERN_WARNING "md: %s has strangely different"
1217 " superblock to %s\n",
1218 bdevname(rdev->bdev,b),
1219 bdevname(refdev->bdev,b2));
1222 ev1 = le64_to_cpu(sb->events);
1223 ev2 = le64_to_cpu(refsb->events);
1231 rdev->sectors = (rdev->bdev->bd_inode->i_size >> 9) -
1232 le64_to_cpu(sb->data_offset);
1234 rdev->sectors = rdev->sb_start;
1235 if (rdev->sectors < le64_to_cpu(sb->data_size))
1237 rdev->sectors = le64_to_cpu(sb->data_size);
1238 if (le64_to_cpu(sb->size) > rdev->sectors)
1243 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1245 struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1246 __u64 ev1 = le64_to_cpu(sb->events);
1248 rdev->raid_disk = -1;
1249 clear_bit(Faulty, &rdev->flags);
1250 clear_bit(In_sync, &rdev->flags);
1251 clear_bit(WriteMostly, &rdev->flags);
1252 clear_bit(BarriersNotsupp, &rdev->flags);
1254 if (mddev->raid_disks == 0) {
1255 mddev->major_version = 1;
1256 mddev->patch_version = 0;
1257 mddev->external = 0;
1258 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
1259 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1260 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1261 mddev->level = le32_to_cpu(sb->level);
1262 mddev->clevel[0] = 0;
1263 mddev->layout = le32_to_cpu(sb->layout);
1264 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1265 mddev->dev_sectors = le64_to_cpu(sb->size);
1266 mddev->events = ev1;
1267 mddev->bitmap_offset = 0;
1268 mddev->default_bitmap_offset = 1024 >> 9;
1270 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1271 memcpy(mddev->uuid, sb->set_uuid, 16);
1273 mddev->max_disks = (4096-256)/2;
1275 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1276 mddev->bitmap_file == NULL )
1277 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
1279 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1280 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1281 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1282 mddev->new_level = le32_to_cpu(sb->new_level);
1283 mddev->new_layout = le32_to_cpu(sb->new_layout);
1284 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
1286 mddev->reshape_position = MaxSector;
1287 mddev->delta_disks = 0;
1288 mddev->new_level = mddev->level;
1289 mddev->new_layout = mddev->layout;
1290 mddev->new_chunk_sectors = mddev->chunk_sectors;
1293 } else if (mddev->pers == NULL) {
1294 /* Insist of good event counter while assembling */
1296 if (ev1 < mddev->events)
1298 } else if (mddev->bitmap) {
1299 /* If adding to array with a bitmap, then we can accept an
1300 * older device, but not too old.
1302 if (ev1 < mddev->bitmap->events_cleared)
1305 if (ev1 < mddev->events)
1306 /* just a hot-add of a new device, leave raid_disk at -1 */
1309 if (mddev->level != LEVEL_MULTIPATH) {
1311 if (rdev->desc_nr < 0 ||
1312 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
1316 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1318 case 0xffff: /* spare */
1320 case 0xfffe: /* faulty */
1321 set_bit(Faulty, &rdev->flags);
1324 if ((le32_to_cpu(sb->feature_map) &
1325 MD_FEATURE_RECOVERY_OFFSET))
1326 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1328 set_bit(In_sync, &rdev->flags);
1329 rdev->raid_disk = role;
1332 if (sb->devflags & WriteMostly1)
1333 set_bit(WriteMostly, &rdev->flags);
1334 } else /* MULTIPATH are always insync */
1335 set_bit(In_sync, &rdev->flags);
1340 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1342 struct mdp_superblock_1 *sb;
1345 /* make rdev->sb match mddev and rdev data. */
1347 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1349 sb->feature_map = 0;
1351 sb->recovery_offset = cpu_to_le64(0);
1352 memset(sb->pad1, 0, sizeof(sb->pad1));
1353 memset(sb->pad2, 0, sizeof(sb->pad2));
1354 memset(sb->pad3, 0, sizeof(sb->pad3));
1356 sb->utime = cpu_to_le64((__u64)mddev->utime);
1357 sb->events = cpu_to_le64(mddev->events);
1359 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1361 sb->resync_offset = cpu_to_le64(0);
1363 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1365 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1366 sb->size = cpu_to_le64(mddev->dev_sectors);
1367 sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
1368 sb->level = cpu_to_le32(mddev->level);
1369 sb->layout = cpu_to_le32(mddev->layout);
1371 if (mddev->bitmap && mddev->bitmap_file == NULL) {
1372 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
1373 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1376 if (rdev->raid_disk >= 0 &&
1377 !test_bit(In_sync, &rdev->flags)) {
1378 if (mddev->curr_resync_completed > rdev->recovery_offset)
1379 rdev->recovery_offset = mddev->curr_resync_completed;
1380 if (rdev->recovery_offset > 0) {
1382 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1383 sb->recovery_offset =
1384 cpu_to_le64(rdev->recovery_offset);
1388 if (mddev->reshape_position != MaxSector) {
1389 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1390 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1391 sb->new_layout = cpu_to_le32(mddev->new_layout);
1392 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1393 sb->new_level = cpu_to_le32(mddev->new_level);
1394 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
1398 list_for_each_entry(rdev2, &mddev->disks, same_set)
1399 if (rdev2->desc_nr+1 > max_dev)
1400 max_dev = rdev2->desc_nr+1;
1402 if (max_dev > le32_to_cpu(sb->max_dev))
1403 sb->max_dev = cpu_to_le32(max_dev);
1404 for (i=0; i<max_dev;i++)
1405 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1407 list_for_each_entry(rdev2, &mddev->disks, same_set) {
1409 if (test_bit(Faulty, &rdev2->flags))
1410 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1411 else if (test_bit(In_sync, &rdev2->flags))
1412 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1413 else if (rdev2->raid_disk >= 0 && rdev2->recovery_offset > 0)
1414 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1416 sb->dev_roles[i] = cpu_to_le16(0xffff);
1419 sb->sb_csum = calc_sb_1_csum(sb);
1422 static unsigned long long
1423 super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1425 struct mdp_superblock_1 *sb;
1426 sector_t max_sectors;
1427 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1428 return 0; /* component must fit device */
1429 if (rdev->sb_start < rdev->data_offset) {
1430 /* minor versions 1 and 2; superblock before data */
1431 max_sectors = rdev->bdev->bd_inode->i_size >> 9;
1432 max_sectors -= rdev->data_offset;
1433 if (!num_sectors || num_sectors > max_sectors)
1434 num_sectors = max_sectors;
1435 } else if (rdev->mddev->bitmap_offset) {
1436 /* minor version 0 with bitmap we can't move */
1439 /* minor version 0; superblock after data */
1441 sb_start = (rdev->bdev->bd_inode->i_size >> 9) - 8*2;
1442 sb_start &= ~(sector_t)(4*2 - 1);
1443 max_sectors = rdev->sectors + sb_start - rdev->sb_start;
1444 if (!num_sectors || num_sectors > max_sectors)
1445 num_sectors = max_sectors;
1446 rdev->sb_start = sb_start;
1448 sb = (struct mdp_superblock_1 *) page_address(rdev->sb_page);
1449 sb->data_size = cpu_to_le64(num_sectors);
1450 sb->super_offset = rdev->sb_start;
1451 sb->sb_csum = calc_sb_1_csum(sb);
1452 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1454 md_super_wait(rdev->mddev);
1455 return num_sectors / 2; /* kB for sysfs */
1458 static struct super_type super_types[] = {
1461 .owner = THIS_MODULE,
1462 .load_super = super_90_load,
1463 .validate_super = super_90_validate,
1464 .sync_super = super_90_sync,
1465 .rdev_size_change = super_90_rdev_size_change,
1469 .owner = THIS_MODULE,
1470 .load_super = super_1_load,
1471 .validate_super = super_1_validate,
1472 .sync_super = super_1_sync,
1473 .rdev_size_change = super_1_rdev_size_change,
1477 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1479 mdk_rdev_t *rdev, *rdev2;
1482 rdev_for_each_rcu(rdev, mddev1)
1483 rdev_for_each_rcu(rdev2, mddev2)
1484 if (rdev->bdev->bd_contains ==
1485 rdev2->bdev->bd_contains) {
1493 static LIST_HEAD(pending_raid_disks);
1496 * Try to register data integrity profile for an mddev
1498 * This is called when an array is started and after a disk has been kicked
1499 * from the array. It only succeeds if all working and active component devices
1500 * are integrity capable with matching profiles.
1502 int md_integrity_register(mddev_t *mddev)
1504 mdk_rdev_t *rdev, *reference = NULL;
1506 if (list_empty(&mddev->disks))
1507 return 0; /* nothing to do */
1508 if (blk_get_integrity(mddev->gendisk))
1509 return 0; /* already registered */
1510 list_for_each_entry(rdev, &mddev->disks, same_set) {
1511 /* skip spares and non-functional disks */
1512 if (test_bit(Faulty, &rdev->flags))
1514 if (rdev->raid_disk < 0)
1517 * If at least one rdev is not integrity capable, we can not
1518 * enable data integrity for the md device.
1520 if (!bdev_get_integrity(rdev->bdev))
1523 /* Use the first rdev as the reference */
1527 /* does this rdev's profile match the reference profile? */
1528 if (blk_integrity_compare(reference->bdev->bd_disk,
1529 rdev->bdev->bd_disk) < 0)
1533 * All component devices are integrity capable and have matching
1534 * profiles, register the common profile for the md device.
1536 if (blk_integrity_register(mddev->gendisk,
1537 bdev_get_integrity(reference->bdev)) != 0) {
1538 printk(KERN_ERR "md: failed to register integrity for %s\n",
1542 printk(KERN_NOTICE "md: data integrity on %s enabled\n",
1546 EXPORT_SYMBOL(md_integrity_register);
1548 /* Disable data integrity if non-capable/non-matching disk is being added */
1549 void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
1551 struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev);
1552 struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk);
1554 if (!bi_mddev) /* nothing to do */
1556 if (rdev->raid_disk < 0) /* skip spares */
1558 if (bi_rdev && blk_integrity_compare(mddev->gendisk,
1559 rdev->bdev->bd_disk) >= 0)
1561 printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
1562 blk_integrity_unregister(mddev->gendisk);
1564 EXPORT_SYMBOL(md_integrity_add_rdev);
1566 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1568 char b[BDEVNAME_SIZE];
1578 /* prevent duplicates */
1579 if (find_rdev(mddev, rdev->bdev->bd_dev))
1582 /* make sure rdev->sectors exceeds mddev->dev_sectors */
1583 if (rdev->sectors && (mddev->dev_sectors == 0 ||
1584 rdev->sectors < mddev->dev_sectors)) {
1586 /* Cannot change size, so fail
1587 * If mddev->level <= 0, then we don't care
1588 * about aligning sizes (e.g. linear)
1590 if (mddev->level > 0)
1593 mddev->dev_sectors = rdev->sectors;
1596 /* Verify rdev->desc_nr is unique.
1597 * If it is -1, assign a free number, else
1598 * check number is not in use
1600 if (rdev->desc_nr < 0) {
1602 if (mddev->pers) choice = mddev->raid_disks;
1603 while (find_rdev_nr(mddev, choice))
1605 rdev->desc_nr = choice;
1607 if (find_rdev_nr(mddev, rdev->desc_nr))
1610 if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
1611 printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
1612 mdname(mddev), mddev->max_disks);
1615 bdevname(rdev->bdev,b);
1616 while ( (s=strchr(b, '/')) != NULL)
1619 rdev->mddev = mddev;
1620 printk(KERN_INFO "md: bind<%s>\n", b);
1622 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
1625 ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
1626 if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) {
1627 kobject_del(&rdev->kobj);
1630 rdev->sysfs_state = sysfs_get_dirent(rdev->kobj.sd, "state");
1632 list_add_rcu(&rdev->same_set, &mddev->disks);
1633 bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
1635 /* May as well allow recovery to be retried once */
1636 mddev->recovery_disabled = 0;
1641 printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
1646 static void md_delayed_delete(struct work_struct *ws)
1648 mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work);
1649 kobject_del(&rdev->kobj);
1650 kobject_put(&rdev->kobj);
1653 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1655 char b[BDEVNAME_SIZE];
1660 bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk);
1661 list_del_rcu(&rdev->same_set);
1662 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1664 sysfs_remove_link(&rdev->kobj, "block");
1665 sysfs_put(rdev->sysfs_state);
1666 rdev->sysfs_state = NULL;
1667 /* We need to delay this, otherwise we can deadlock when
1668 * writing to 'remove' to "dev/state". We also need
1669 * to delay it due to rcu usage.
1672 INIT_WORK(&rdev->del_work, md_delayed_delete);
1673 kobject_get(&rdev->kobj);
1674 schedule_work(&rdev->del_work);
1678 * prevent the device from being mounted, repartitioned or
1679 * otherwise reused by a RAID array (or any other kernel
1680 * subsystem), by bd_claiming the device.
1682 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared)
1685 struct block_device *bdev;
1686 char b[BDEVNAME_SIZE];
1688 bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
1690 printk(KERN_ERR "md: could not open %s.\n",
1691 __bdevname(dev, b));
1692 return PTR_ERR(bdev);
1694 err = bd_claim(bdev, shared ? (mdk_rdev_t *)lock_rdev : rdev);
1696 printk(KERN_ERR "md: could not bd_claim %s.\n",
1698 blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
1702 set_bit(AllReserved, &rdev->flags);
1707 static void unlock_rdev(mdk_rdev_t *rdev)
1709 struct block_device *bdev = rdev->bdev;
1714 blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
1717 void md_autodetect_dev(dev_t dev);
1719 static void export_rdev(mdk_rdev_t * rdev)
1721 char b[BDEVNAME_SIZE];
1722 printk(KERN_INFO "md: export_rdev(%s)\n",
1723 bdevname(rdev->bdev,b));
1728 if (test_bit(AutoDetected, &rdev->flags))
1729 md_autodetect_dev(rdev->bdev->bd_dev);
1732 kobject_put(&rdev->kobj);
1735 static void kick_rdev_from_array(mdk_rdev_t * rdev)
1737 unbind_rdev_from_array(rdev);
1741 static void export_array(mddev_t *mddev)
1743 mdk_rdev_t *rdev, *tmp;
1745 rdev_for_each(rdev, tmp, mddev) {
1750 kick_rdev_from_array(rdev);
1752 if (!list_empty(&mddev->disks))
1754 mddev->raid_disks = 0;
1755 mddev->major_version = 0;
1758 static void print_desc(mdp_disk_t *desc)
1760 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
1761 desc->major,desc->minor,desc->raid_disk,desc->state);
1764 static void print_sb_90(mdp_super_t *sb)
1769 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1770 sb->major_version, sb->minor_version, sb->patch_version,
1771 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1773 printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1774 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1775 sb->md_minor, sb->layout, sb->chunk_size);
1776 printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d"
1777 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1778 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1779 sb->failed_disks, sb->spare_disks,
1780 sb->sb_csum, (unsigned long)sb->events_lo);
1783 for (i = 0; i < MD_SB_DISKS; i++) {
1786 desc = sb->disks + i;
1787 if (desc->number || desc->major || desc->minor ||
1788 desc->raid_disk || (desc->state && (desc->state != 4))) {
1789 printk(" D %2d: ", i);
1793 printk(KERN_INFO "md: THIS: ");
1794 print_desc(&sb->this_disk);
1797 static void print_sb_1(struct mdp_superblock_1 *sb)
1801 uuid = sb->set_uuid;
1803 "md: SB: (V:%u) (F:0x%08x) Array-ID:<%02x%02x%02x%02x"
1804 ":%02x%02x:%02x%02x:%02x%02x:%02x%02x%02x%02x%02x%02x>\n"
1805 "md: Name: \"%s\" CT:%llu\n",
1806 le32_to_cpu(sb->major_version),
1807 le32_to_cpu(sb->feature_map),
1808 uuid[0], uuid[1], uuid[2], uuid[3],
1809 uuid[4], uuid[5], uuid[6], uuid[7],
1810 uuid[8], uuid[9], uuid[10], uuid[11],
1811 uuid[12], uuid[13], uuid[14], uuid[15],
1813 (unsigned long long)le64_to_cpu(sb->ctime)
1814 & MD_SUPERBLOCK_1_TIME_SEC_MASK);
1816 uuid = sb->device_uuid;
1818 "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
1820 "md: Dev:%08x UUID: %02x%02x%02x%02x:%02x%02x:%02x%02x:%02x%02x"
1821 ":%02x%02x%02x%02x%02x%02x\n"
1822 "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
1823 "md: (MaxDev:%u) \n",
1824 le32_to_cpu(sb->level),
1825 (unsigned long long)le64_to_cpu(sb->size),
1826 le32_to_cpu(sb->raid_disks),
1827 le32_to_cpu(sb->layout),
1828 le32_to_cpu(sb->chunksize),
1829 (unsigned long long)le64_to_cpu(sb->data_offset),
1830 (unsigned long long)le64_to_cpu(sb->data_size),
1831 (unsigned long long)le64_to_cpu(sb->super_offset),
1832 (unsigned long long)le64_to_cpu(sb->recovery_offset),
1833 le32_to_cpu(sb->dev_number),
1834 uuid[0], uuid[1], uuid[2], uuid[3],
1835 uuid[4], uuid[5], uuid[6], uuid[7],
1836 uuid[8], uuid[9], uuid[10], uuid[11],
1837 uuid[12], uuid[13], uuid[14], uuid[15],
1839 (unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK,
1840 (unsigned long long)le64_to_cpu(sb->events),
1841 (unsigned long long)le64_to_cpu(sb->resync_offset),
1842 le32_to_cpu(sb->sb_csum),
1843 le32_to_cpu(sb->max_dev)
1847 static void print_rdev(mdk_rdev_t *rdev, int major_version)
1849 char b[BDEVNAME_SIZE];
1850 printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n",
1851 bdevname(rdev->bdev, b), (unsigned long long)rdev->sectors,
1852 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
1854 if (rdev->sb_loaded) {
1855 printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version);
1856 switch (major_version) {
1858 print_sb_90((mdp_super_t*)page_address(rdev->sb_page));
1861 print_sb_1((struct mdp_superblock_1 *)page_address(rdev->sb_page));
1865 printk(KERN_INFO "md: no rdev superblock!\n");
1868 static void md_print_devices(void)
1870 struct list_head *tmp;
1873 char b[BDEVNAME_SIZE];
1876 printk("md: **********************************\n");
1877 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
1878 printk("md: **********************************\n");
1879 for_each_mddev(mddev, tmp) {
1882 bitmap_print_sb(mddev->bitmap);
1884 printk("%s: ", mdname(mddev));
1885 list_for_each_entry(rdev, &mddev->disks, same_set)
1886 printk("<%s>", bdevname(rdev->bdev,b));
1889 list_for_each_entry(rdev, &mddev->disks, same_set)
1890 print_rdev(rdev, mddev->major_version);
1892 printk("md: **********************************\n");
1897 static void sync_sbs(mddev_t * mddev, int nospares)
1899 /* Update each superblock (in-memory image), but
1900 * if we are allowed to, skip spares which already
1901 * have the right event counter, or have one earlier
1902 * (which would mean they aren't being marked as dirty
1903 * with the rest of the array)
1907 list_for_each_entry(rdev, &mddev->disks, same_set) {
1908 if (rdev->sb_events == mddev->events ||
1910 rdev->raid_disk < 0 &&
1911 (rdev->sb_events&1)==0 &&
1912 rdev->sb_events+1 == mddev->events)) {
1913 /* Don't update this superblock */
1914 rdev->sb_loaded = 2;
1916 super_types[mddev->major_version].
1917 sync_super(mddev, rdev);
1918 rdev->sb_loaded = 1;
1923 static void md_update_sb(mddev_t * mddev, int force_change)
1929 mddev->utime = get_seconds();
1930 if (mddev->external)
1933 spin_lock_irq(&mddev->write_lock);
1935 set_bit(MD_CHANGE_PENDING, &mddev->flags);
1936 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
1938 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
1939 /* just a clean<-> dirty transition, possibly leave spares alone,
1940 * though if events isn't the right even/odd, we will have to do
1946 if (mddev->degraded)
1947 /* If the array is degraded, then skipping spares is both
1948 * dangerous and fairly pointless.
1949 * Dangerous because a device that was removed from the array
1950 * might have a event_count that still looks up-to-date,
1951 * so it can be re-added without a resync.
1952 * Pointless because if there are any spares to skip,
1953 * then a recovery will happen and soon that array won't
1954 * be degraded any more and the spare can go back to sleep then.
1958 sync_req = mddev->in_sync;
1960 /* If this is just a dirty<->clean transition, and the array is clean
1961 * and 'events' is odd, we can roll back to the previous clean state */
1963 && (mddev->in_sync && mddev->recovery_cp == MaxSector)
1964 && (mddev->events & 1)
1965 && mddev->events != 1)
1968 /* otherwise we have to go forward and ... */
1970 if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
1971 /* .. if the array isn't clean, insist on an odd 'events' */
1972 if ((mddev->events&1)==0) {
1977 /* otherwise insist on an even 'events' (for clean states) */
1978 if ((mddev->events&1)) {
1985 if (!mddev->events) {
1987 * oops, this 64-bit counter should never wrap.
1988 * Either we are in around ~1 trillion A.C., assuming
1989 * 1 reboot per second, or we have a bug:
1996 * do not write anything to disk if using
1997 * nonpersistent superblocks
1999 if (!mddev->persistent) {
2000 if (!mddev->external)
2001 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2003 spin_unlock_irq(&mddev->write_lock);
2004 wake_up(&mddev->sb_wait);
2007 sync_sbs(mddev, nospares);
2008 spin_unlock_irq(&mddev->write_lock);
2011 "md: updating %s RAID superblock on device (in sync %d)\n",
2012 mdname(mddev),mddev->in_sync);
2014 bitmap_update_sb(mddev->bitmap);
2015 list_for_each_entry(rdev, &mddev->disks, same_set) {
2016 char b[BDEVNAME_SIZE];
2017 dprintk(KERN_INFO "md: ");
2018 if (rdev->sb_loaded != 1)
2019 continue; /* no noise on spare devices */
2020 if (test_bit(Faulty, &rdev->flags))
2021 dprintk("(skipping faulty ");
2023 dprintk("%s ", bdevname(rdev->bdev,b));
2024 if (!test_bit(Faulty, &rdev->flags)) {
2025 md_super_write(mddev,rdev,
2026 rdev->sb_start, rdev->sb_size,
2028 dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
2029 bdevname(rdev->bdev,b),
2030 (unsigned long long)rdev->sb_start);
2031 rdev->sb_events = mddev->events;
2035 if (mddev->level == LEVEL_MULTIPATH)
2036 /* only need to write one superblock... */
2039 md_super_wait(mddev);
2040 /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
2042 spin_lock_irq(&mddev->write_lock);
2043 if (mddev->in_sync != sync_req ||
2044 test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
2045 /* have to write it out again */
2046 spin_unlock_irq(&mddev->write_lock);
2049 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2050 spin_unlock_irq(&mddev->write_lock);
2051 wake_up(&mddev->sb_wait);
2052 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2053 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
2057 /* words written to sysfs files may, or may not, be \n terminated.
2058 * We want to accept with case. For this we use cmd_match.
2060 static int cmd_match(const char *cmd, const char *str)
2062 /* See if cmd, written into a sysfs file, matches
2063 * str. They must either be the same, or cmd can
2064 * have a trailing newline
2066 while (*cmd && *str && *cmd == *str) {
2077 struct rdev_sysfs_entry {
2078 struct attribute attr;
2079 ssize_t (*show)(mdk_rdev_t *, char *);
2080 ssize_t (*store)(mdk_rdev_t *, const char *, size_t);
2084 state_show(mdk_rdev_t *rdev, char *page)
2089 if (test_bit(Faulty, &rdev->flags)) {
2090 len+= sprintf(page+len, "%sfaulty",sep);
2093 if (test_bit(In_sync, &rdev->flags)) {
2094 len += sprintf(page+len, "%sin_sync",sep);
2097 if (test_bit(WriteMostly, &rdev->flags)) {
2098 len += sprintf(page+len, "%swrite_mostly",sep);
2101 if (test_bit(Blocked, &rdev->flags)) {
2102 len += sprintf(page+len, "%sblocked", sep);
2105 if (!test_bit(Faulty, &rdev->flags) &&
2106 !test_bit(In_sync, &rdev->flags)) {
2107 len += sprintf(page+len, "%sspare", sep);
2110 return len+sprintf(page+len, "\n");
2114 state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2117 * faulty - simulates and error
2118 * remove - disconnects the device
2119 * writemostly - sets write_mostly
2120 * -writemostly - clears write_mostly
2121 * blocked - sets the Blocked flag
2122 * -blocked - clears the Blocked flag
2123 * insync - sets Insync providing device isn't active
2126 if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2127 md_error(rdev->mddev, rdev);
2129 } else if (cmd_match(buf, "remove")) {
2130 if (rdev->raid_disk >= 0)
2133 mddev_t *mddev = rdev->mddev;
2134 kick_rdev_from_array(rdev);
2136 md_update_sb(mddev, 1);
2137 md_new_event(mddev);
2140 } else if (cmd_match(buf, "writemostly")) {
2141 set_bit(WriteMostly, &rdev->flags);
2143 } else if (cmd_match(buf, "-writemostly")) {
2144 clear_bit(WriteMostly, &rdev->flags);
2146 } else if (cmd_match(buf, "blocked")) {
2147 set_bit(Blocked, &rdev->flags);
2149 } else if (cmd_match(buf, "-blocked")) {
2150 clear_bit(Blocked, &rdev->flags);
2151 wake_up(&rdev->blocked_wait);
2152 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2153 md_wakeup_thread(rdev->mddev->thread);
2156 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
2157 set_bit(In_sync, &rdev->flags);
2160 if (!err && rdev->sysfs_state)
2161 sysfs_notify_dirent(rdev->sysfs_state);
2162 return err ? err : len;
2164 static struct rdev_sysfs_entry rdev_state =
2165 __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
2168 errors_show(mdk_rdev_t *rdev, char *page)
2170 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
2174 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2177 unsigned long n = simple_strtoul(buf, &e, 10);
2178 if (*buf && (*e == 0 || *e == '\n')) {
2179 atomic_set(&rdev->corrected_errors, n);
2184 static struct rdev_sysfs_entry rdev_errors =
2185 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
2188 slot_show(mdk_rdev_t *rdev, char *page)
2190 if (rdev->raid_disk < 0)
2191 return sprintf(page, "none\n");
2193 return sprintf(page, "%d\n", rdev->raid_disk);
2197 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2202 int slot = simple_strtoul(buf, &e, 10);
2203 if (strncmp(buf, "none", 4)==0)
2205 else if (e==buf || (*e && *e!= '\n'))
2207 if (rdev->mddev->pers && slot == -1) {
2208 /* Setting 'slot' on an active array requires also
2209 * updating the 'rd%d' link, and communicating
2210 * with the personality with ->hot_*_disk.
2211 * For now we only support removing
2212 * failed/spare devices. This normally happens automatically,
2213 * but not when the metadata is externally managed.
2215 if (rdev->raid_disk == -1)
2217 /* personality does all needed checks */
2218 if (rdev->mddev->pers->hot_add_disk == NULL)
2220 err = rdev->mddev->pers->
2221 hot_remove_disk(rdev->mddev, rdev->raid_disk);
2224 sprintf(nm, "rd%d", rdev->raid_disk);
2225 sysfs_remove_link(&rdev->mddev->kobj, nm);
2226 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2227 md_wakeup_thread(rdev->mddev->thread);
2228 } else if (rdev->mddev->pers) {
2230 /* Activating a spare .. or possibly reactivating
2231 * if we ever get bitmaps working here.
2234 if (rdev->raid_disk != -1)
2237 if (rdev->mddev->pers->hot_add_disk == NULL)
2240 list_for_each_entry(rdev2, &rdev->mddev->disks, same_set)
2241 if (rdev2->raid_disk == slot)
2244 rdev->raid_disk = slot;
2245 if (test_bit(In_sync, &rdev->flags))
2246 rdev->saved_raid_disk = slot;
2248 rdev->saved_raid_disk = -1;
2249 err = rdev->mddev->pers->
2250 hot_add_disk(rdev->mddev, rdev);
2252 rdev->raid_disk = -1;
2255 sysfs_notify_dirent(rdev->sysfs_state);
2256 sprintf(nm, "rd%d", rdev->raid_disk);
2257 if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm))
2259 "md: cannot register "
2261 nm, mdname(rdev->mddev));
2263 /* don't wakeup anyone, leave that to userspace. */
2265 if (slot >= rdev->mddev->raid_disks)
2267 rdev->raid_disk = slot;
2268 /* assume it is working */
2269 clear_bit(Faulty, &rdev->flags);
2270 clear_bit(WriteMostly, &rdev->flags);
2271 set_bit(In_sync, &rdev->flags);
2272 sysfs_notify_dirent(rdev->sysfs_state);
2278 static struct rdev_sysfs_entry rdev_slot =
2279 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
2282 offset_show(mdk_rdev_t *rdev, char *page)
2284 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
2288 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2291 unsigned long long offset = simple_strtoull(buf, &e, 10);
2292 if (e==buf || (*e && *e != '\n'))
2294 if (rdev->mddev->pers && rdev->raid_disk >= 0)
2296 if (rdev->sectors && rdev->mddev->external)
2297 /* Must set offset before size, so overlap checks
2300 rdev->data_offset = offset;
2304 static struct rdev_sysfs_entry rdev_offset =
2305 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
2308 rdev_size_show(mdk_rdev_t *rdev, char *page)
2310 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
2313 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
2315 /* check if two start/length pairs overlap */
2323 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
2325 unsigned long long blocks;
2328 if (strict_strtoull(buf, 10, &blocks) < 0)
2331 if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
2332 return -EINVAL; /* sector conversion overflow */
2335 if (new != blocks * 2)
2336 return -EINVAL; /* unsigned long long to sector_t overflow */
2343 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2345 mddev_t *my_mddev = rdev->mddev;
2346 sector_t oldsectors = rdev->sectors;
2349 if (strict_blocks_to_sectors(buf, §ors) < 0)
2351 if (my_mddev->pers && rdev->raid_disk >= 0) {
2352 if (my_mddev->persistent) {
2353 sectors = super_types[my_mddev->major_version].
2354 rdev_size_change(rdev, sectors);
2357 } else if (!sectors)
2358 sectors = (rdev->bdev->bd_inode->i_size >> 9) -
2361 if (sectors < my_mddev->dev_sectors)
2362 return -EINVAL; /* component must fit device */
2364 rdev->sectors = sectors;
2365 if (sectors > oldsectors && my_mddev->external) {
2366 /* need to check that all other rdevs with the same ->bdev
2367 * do not overlap. We need to unlock the mddev to avoid
2368 * a deadlock. We have already changed rdev->sectors, and if
2369 * we have to change it back, we will have the lock again.
2373 struct list_head *tmp;
2375 mddev_unlock(my_mddev);
2376 for_each_mddev(mddev, tmp) {
2380 list_for_each_entry(rdev2, &mddev->disks, same_set)
2381 if (test_bit(AllReserved, &rdev2->flags) ||
2382 (rdev->bdev == rdev2->bdev &&
2384 overlaps(rdev->data_offset, rdev->sectors,
2390 mddev_unlock(mddev);
2396 mddev_lock(my_mddev);
2398 /* Someone else could have slipped in a size
2399 * change here, but doing so is just silly.
2400 * We put oldsectors back because we *know* it is
2401 * safe, and trust userspace not to race with
2404 rdev->sectors = oldsectors;
2411 static struct rdev_sysfs_entry rdev_size =
2412 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
2414 static struct attribute *rdev_default_attrs[] = {
2423 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
2425 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2426 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
2427 mddev_t *mddev = rdev->mddev;
2433 rv = mddev ? mddev_lock(mddev) : -EBUSY;
2435 if (rdev->mddev == NULL)
2438 rv = entry->show(rdev, page);
2439 mddev_unlock(mddev);
2445 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
2446 const char *page, size_t length)
2448 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2449 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
2451 mddev_t *mddev = rdev->mddev;
2455 if (!capable(CAP_SYS_ADMIN))
2457 rv = mddev ? mddev_lock(mddev): -EBUSY;
2459 if (rdev->mddev == NULL)
2462 rv = entry->store(rdev, page, length);
2463 mddev_unlock(mddev);
2468 static void rdev_free(struct kobject *ko)
2470 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
2473 static struct sysfs_ops rdev_sysfs_ops = {
2474 .show = rdev_attr_show,
2475 .store = rdev_attr_store,
2477 static struct kobj_type rdev_ktype = {
2478 .release = rdev_free,
2479 .sysfs_ops = &rdev_sysfs_ops,
2480 .default_attrs = rdev_default_attrs,
2484 * Import a device. If 'super_format' >= 0, then sanity check the superblock
2486 * mark the device faulty if:
2488 * - the device is nonexistent (zero size)
2489 * - the device has no valid superblock
2491 * a faulty rdev _never_ has rdev->sb set.
2493 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
2495 char b[BDEVNAME_SIZE];
2500 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
2502 printk(KERN_ERR "md: could not alloc mem for new device!\n");
2503 return ERR_PTR(-ENOMEM);
2506 if ((err = alloc_disk_sb(rdev)))
2509 err = lock_rdev(rdev, newdev, super_format == -2);
2513 kobject_init(&rdev->kobj, &rdev_ktype);
2516 rdev->saved_raid_disk = -1;
2517 rdev->raid_disk = -1;
2519 rdev->data_offset = 0;
2520 rdev->sb_events = 0;
2521 atomic_set(&rdev->nr_pending, 0);
2522 atomic_set(&rdev->read_errors, 0);
2523 atomic_set(&rdev->corrected_errors, 0);
2525 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2528 "md: %s has zero or unknown size, marking faulty!\n",
2529 bdevname(rdev->bdev,b));
2534 if (super_format >= 0) {
2535 err = super_types[super_format].
2536 load_super(rdev, NULL, super_minor);
2537 if (err == -EINVAL) {
2539 "md: %s does not have a valid v%d.%d "
2540 "superblock, not importing!\n",
2541 bdevname(rdev->bdev,b),
2542 super_format, super_minor);
2547 "md: could not read %s's sb, not importing!\n",
2548 bdevname(rdev->bdev,b));
2553 INIT_LIST_HEAD(&rdev->same_set);
2554 init_waitqueue_head(&rdev->blocked_wait);
2559 if (rdev->sb_page) {
2565 return ERR_PTR(err);
2569 * Check a full RAID array for plausibility
2573 static void analyze_sbs(mddev_t * mddev)
2576 mdk_rdev_t *rdev, *freshest, *tmp;
2577 char b[BDEVNAME_SIZE];
2580 rdev_for_each(rdev, tmp, mddev)
2581 switch (super_types[mddev->major_version].
2582 load_super(rdev, freshest, mddev->minor_version)) {
2590 "md: fatal superblock inconsistency in %s"
2591 " -- removing from array\n",
2592 bdevname(rdev->bdev,b));
2593 kick_rdev_from_array(rdev);
2597 super_types[mddev->major_version].
2598 validate_super(mddev, freshest);
2601 rdev_for_each(rdev, tmp, mddev) {
2602 if (rdev->desc_nr >= mddev->max_disks ||
2603 i > mddev->max_disks) {
2605 "md: %s: %s: only %d devices permitted\n",
2606 mdname(mddev), bdevname(rdev->bdev, b),
2608 kick_rdev_from_array(rdev);
2611 if (rdev != freshest)
2612 if (super_types[mddev->major_version].
2613 validate_super(mddev, rdev)) {
2614 printk(KERN_WARNING "md: kicking non-fresh %s"
2616 bdevname(rdev->bdev,b));
2617 kick_rdev_from_array(rdev);
2620 if (mddev->level == LEVEL_MULTIPATH) {
2621 rdev->desc_nr = i++;
2622 rdev->raid_disk = rdev->desc_nr;
2623 set_bit(In_sync, &rdev->flags);
2624 } else if (rdev->raid_disk >= mddev->raid_disks) {
2625 rdev->raid_disk = -1;
2626 clear_bit(In_sync, &rdev->flags);
2631 static void md_safemode_timeout(unsigned long data);
2634 safe_delay_show(mddev_t *mddev, char *page)
2636 int msec = (mddev->safemode_delay*1000)/HZ;
2637 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
2640 safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
2648 /* remove a period, and count digits after it */
2649 if (len >= sizeof(buf))
2651 strlcpy(buf, cbuf, sizeof(buf));
2652 for (i=0; i<len; i++) {
2654 if (isdigit(buf[i])) {
2659 } else if (buf[i] == '.') {
2664 if (strict_strtoul(buf, 10, &msec) < 0)
2666 msec = (msec * 1000) / scale;
2668 mddev->safemode_delay = 0;
2670 unsigned long old_delay = mddev->safemode_delay;
2671 mddev->safemode_delay = (msec*HZ)/1000;
2672 if (mddev->safemode_delay == 0)
2673 mddev->safemode_delay = 1;
2674 if (mddev->safemode_delay < old_delay)
2675 md_safemode_timeout((unsigned long)mddev);
2679 static struct md_sysfs_entry md_safe_delay =
2680 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
2683 level_show(mddev_t *mddev, char *page)
2685 struct mdk_personality *p = mddev->pers;
2687 return sprintf(page, "%s\n", p->name);
2688 else if (mddev->clevel[0])
2689 return sprintf(page, "%s\n", mddev->clevel);
2690 else if (mddev->level != LEVEL_NONE)
2691 return sprintf(page, "%d\n", mddev->level);
2697 level_store(mddev_t *mddev, const char *buf, size_t len)
2701 struct mdk_personality *pers;
2705 if (mddev->pers == NULL) {
2708 if (len >= sizeof(mddev->clevel))
2710 strncpy(mddev->clevel, buf, len);
2711 if (mddev->clevel[len-1] == '\n')
2713 mddev->clevel[len] = 0;
2714 mddev->level = LEVEL_NONE;
2718 /* request to change the personality. Need to ensure:
2719 * - array is not engaged in resync/recovery/reshape
2720 * - old personality can be suspended
2721 * - new personality will access other array.
2724 if (mddev->sync_thread || mddev->reshape_position != MaxSector)
2727 if (!mddev->pers->quiesce) {
2728 printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
2729 mdname(mddev), mddev->pers->name);
2733 /* Now find the new personality */
2734 if (len == 0 || len >= sizeof(level))
2736 strncpy(level, buf, len);
2737 if (level[len-1] == '\n')
2741 request_module("md-%s", level);
2742 spin_lock(&pers_lock);
2743 pers = find_pers(LEVEL_NONE, level);
2744 if (!pers || !try_module_get(pers->owner)) {
2745 spin_unlock(&pers_lock);
2746 printk(KERN_WARNING "md: personality %s not loaded\n", level);
2749 spin_unlock(&pers_lock);
2751 if (pers == mddev->pers) {
2752 /* Nothing to do! */
2753 module_put(pers->owner);
2756 if (!pers->takeover) {
2757 module_put(pers->owner);
2758 printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
2759 mdname(mddev), level);
2763 /* ->takeover must set new_* and/or delta_disks
2764 * if it succeeds, and may set them when it fails.
2766 priv = pers->takeover(mddev);
2768 mddev->new_level = mddev->level;
2769 mddev->new_layout = mddev->layout;
2770 mddev->new_chunk_sectors = mddev->chunk_sectors;
2771 mddev->raid_disks -= mddev->delta_disks;
2772 mddev->delta_disks = 0;
2773 module_put(pers->owner);
2774 printk(KERN_WARNING "md: %s: %s would not accept array\n",
2775 mdname(mddev), level);
2776 return PTR_ERR(priv);
2779 /* Looks like we have a winner */
2780 mddev_suspend(mddev);
2781 mddev->pers->stop(mddev);
2782 module_put(mddev->pers->owner);
2783 /* Invalidate devices that are now superfluous */
2784 list_for_each_entry(rdev, &mddev->disks, same_set)
2785 if (rdev->raid_disk >= mddev->raid_disks) {
2786 rdev->raid_disk = -1;
2787 clear_bit(In_sync, &rdev->flags);
2790 mddev->private = priv;
2791 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
2792 mddev->level = mddev->new_level;
2793 mddev->layout = mddev->new_layout;
2794 mddev->chunk_sectors = mddev->new_chunk_sectors;
2795 mddev->delta_disks = 0;
2797 mddev_resume(mddev);
2798 set_bit(MD_CHANGE_DEVS, &mddev->flags);
2799 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2800 md_wakeup_thread(mddev->thread);
2804 static struct md_sysfs_entry md_level =
2805 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
2809 layout_show(mddev_t *mddev, char *page)
2811 /* just a number, not meaningful for all levels */
2812 if (mddev->reshape_position != MaxSector &&
2813 mddev->layout != mddev->new_layout)
2814 return sprintf(page, "%d (%d)\n",
2815 mddev->new_layout, mddev->layout);
2816 return sprintf(page, "%d\n", mddev->layout);
2820 layout_store(mddev_t *mddev, const char *buf, size_t len)
2823 unsigned long n = simple_strtoul(buf, &e, 10);
2825 if (!*buf || (*e && *e != '\n'))
2830 if (mddev->pers->check_reshape == NULL)
2832 mddev->new_layout = n;
2833 err = mddev->pers->check_reshape(mddev);
2835 mddev->new_layout = mddev->layout;
2839 mddev->new_layout = n;
2840 if (mddev->reshape_position == MaxSector)
2845 static struct md_sysfs_entry md_layout =
2846 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
2850 raid_disks_show(mddev_t *mddev, char *page)
2852 if (mddev->raid_disks == 0)
2854 if (mddev->reshape_position != MaxSector &&
2855 mddev->delta_disks != 0)
2856 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
2857 mddev->raid_disks - mddev->delta_disks);
2858 return sprintf(page, "%d\n", mddev->raid_disks);
2861 static int update_raid_disks(mddev_t *mddev, int raid_disks);
2864 raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
2868 unsigned long n = simple_strtoul(buf, &e, 10);
2870 if (!*buf || (*e && *e != '\n'))
2874 rv = update_raid_disks(mddev, n);
2875 else if (mddev->reshape_position != MaxSector) {
2876 int olddisks = mddev->raid_disks - mddev->delta_disks;
2877 mddev->delta_disks = n - olddisks;
2878 mddev->raid_disks = n;
2880 mddev->raid_disks = n;
2881 return rv ? rv : len;
2883 static struct md_sysfs_entry md_raid_disks =
2884 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
2887 chunk_size_show(mddev_t *mddev, char *page)
2889 if (mddev->reshape_position != MaxSector &&
2890 mddev->chunk_sectors != mddev->new_chunk_sectors)
2891 return sprintf(page, "%d (%d)\n",
2892 mddev->new_chunk_sectors << 9,
2893 mddev->chunk_sectors << 9);
2894 return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
2898 chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
2901 unsigned long n = simple_strtoul(buf, &e, 10);
2903 if (!*buf || (*e && *e != '\n'))
2908 if (mddev->pers->check_reshape == NULL)
2910 mddev->new_chunk_sectors = n >> 9;
2911 err = mddev->pers->check_reshape(mddev);
2913 mddev->new_chunk_sectors = mddev->chunk_sectors;
2917 mddev->new_chunk_sectors = n >> 9;
2918 if (mddev->reshape_position == MaxSector)
2919 mddev->chunk_sectors = n >> 9;
2923 static struct md_sysfs_entry md_chunk_size =
2924 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
2927 resync_start_show(mddev_t *mddev, char *page)
2929 if (mddev->recovery_cp == MaxSector)
2930 return sprintf(page, "none\n");
2931 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
2935 resync_start_store(mddev_t *mddev, const char *buf, size_t len)
2938 unsigned long long n = simple_strtoull(buf, &e, 10);
2942 if (!*buf || (*e && *e != '\n'))
2945 mddev->recovery_cp = n;
2948 static struct md_sysfs_entry md_resync_start =
2949 __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
2952 * The array state can be:
2955 * No devices, no size, no level
2956 * Equivalent to STOP_ARRAY ioctl
2958 * May have some settings, but array is not active
2959 * all IO results in error
2960 * When written, doesn't tear down array, but just stops it
2961 * suspended (not supported yet)
2962 * All IO requests will block. The array can be reconfigured.
2963 * Writing this, if accepted, will block until array is quiescent
2965 * no resync can happen. no superblocks get written.
2966 * write requests fail
2968 * like readonly, but behaves like 'clean' on a write request.
2970 * clean - no pending writes, but otherwise active.
2971 * When written to inactive array, starts without resync
2972 * If a write request arrives then
2973 * if metadata is known, mark 'dirty' and switch to 'active'.
2974 * if not known, block and switch to write-pending
2975 * If written to an active array that has pending writes, then fails.
2977 * fully active: IO and resync can be happening.
2978 * When written to inactive array, starts with resync
2981 * clean, but writes are blocked waiting for 'active' to be written.
2984 * like active, but no writes have been seen for a while (100msec).
2987 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
2988 write_pending, active_idle, bad_word};
2989 static char *array_states[] = {
2990 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
2991 "write-pending", "active-idle", NULL };
2993 static int match_word(const char *word, char **list)
2996 for (n=0; list[n]; n++)
2997 if (cmd_match(word, list[n]))
3003 array_state_show(mddev_t *mddev, char *page)
3005 enum array_state st = inactive;
3018 else if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
3020 else if (mddev->safemode)
3026 if (list_empty(&mddev->disks) &&
3027 mddev->raid_disks == 0 &&
3028 mddev->dev_sectors == 0)
3033 return sprintf(page, "%s\n", array_states[st]);
3036 static int do_md_stop(mddev_t * mddev, int ro, int is_open);
3037 static int do_md_run(mddev_t * mddev);
3038 static int restart_array(mddev_t *mddev);
3041 array_state_store(mddev_t *mddev, const char *buf, size_t len)
3044 enum array_state st = match_word(buf, array_states);
3049 /* stopping an active array */
3050 if (atomic_read(&mddev->openers) > 0)
3052 err = do_md_stop(mddev, 0, 0);
3055 /* stopping an active array */
3057 if (atomic_read(&mddev->openers) > 0)
3059 err = do_md_stop(mddev, 2, 0);
3061 err = 0; /* already inactive */
3064 break; /* not supported yet */
3067 err = do_md_stop(mddev, 1, 0);
3070 set_disk_ro(mddev->gendisk, 1);
3071 err = do_md_run(mddev);
3077 err = do_md_stop(mddev, 1, 0);
3078 else if (mddev->ro == 1)
3079 err = restart_array(mddev);
3082 set_disk_ro(mddev->gendisk, 0);
3086 err = do_md_run(mddev);
3091 restart_array(mddev);
3092 spin_lock_irq(&mddev->write_lock);
3093 if (atomic_read(&mddev->writes_pending) == 0) {
3094 if (mddev->in_sync == 0) {
3096 if (mddev->safemode == 1)
3097 mddev->safemode = 0;
3098 if (mddev->persistent)
3099 set_bit(MD_CHANGE_CLEAN,
3105 spin_unlock_irq(&mddev->write_lock);
3111 restart_array(mddev);
3112 if (mddev->external)
3113 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
3114 wake_up(&mddev->sb_wait);
3118 set_disk_ro(mddev->gendisk, 0);
3119 err = do_md_run(mddev);
3124 /* these cannot be set */
3130 sysfs_notify_dirent(mddev->sysfs_state);
3134 static struct md_sysfs_entry md_array_state =
3135 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
3138 null_show(mddev_t *mddev, char *page)
3144 new_dev_store(mddev_t *mddev, const char *buf, size_t len)
3146 /* buf must be %d:%d\n? giving major and minor numbers */
3147 /* The new device is added to the array.
3148 * If the array has a persistent superblock, we read the
3149 * superblock to initialise info and check validity.
3150 * Otherwise, only checking done is that in bind_rdev_to_array,
3151 * which mainly checks size.
3154 int major = simple_strtoul(buf, &e, 10);
3160 if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
3162 minor = simple_strtoul(e+1, &e, 10);
3163 if (*e && *e != '\n')
3165 dev = MKDEV(major, minor);
3166 if (major != MAJOR(dev) ||
3167 minor != MINOR(dev))
3171 if (mddev->persistent) {
3172 rdev = md_import_device(dev, mddev->major_version,
3173 mddev->minor_version);
3174 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
3175 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
3176 mdk_rdev_t, same_set);
3177 err = super_types[mddev->major_version]
3178 .load_super(rdev, rdev0, mddev->minor_version);
3182 } else if (mddev->external)
3183 rdev = md_import_device(dev, -2, -1);
3185 rdev = md_import_device(dev, -1, -1);
3188 return PTR_ERR(rdev);
3189 err = bind_rdev_to_array(rdev, mddev);
3193 return err ? err : len;
3196 static struct md_sysfs_entry md_new_device =
3197 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
3200 bitmap_store(mddev_t *mddev, const char *buf, size_t len)
3203 unsigned long chunk, end_chunk;
3207 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
3209 chunk = end_chunk = simple_strtoul(buf, &end, 0);
3210 if (buf == end) break;
3211 if (*end == '-') { /* range */
3213 end_chunk = simple_strtoul(buf, &end, 0);
3214 if (buf == end) break;
3216 if (*end && !isspace(*end)) break;
3217 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
3219 while (isspace(*buf)) buf++;
3221 bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
3226 static struct md_sysfs_entry md_bitmap =
3227 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
3230 size_show(mddev_t *mddev, char *page)
3232 return sprintf(page, "%llu\n",
3233 (unsigned long long)mddev->dev_sectors / 2);
3236 static int update_size(mddev_t *mddev, sector_t num_sectors);
3239 size_store(mddev_t *mddev, const char *buf, size_t len)
3241 /* If array is inactive, we can reduce the component size, but
3242 * not increase it (except from 0).
3243 * If array is active, we can try an on-line resize
3246 int err = strict_blocks_to_sectors(buf, §ors);
3251 err = update_size(mddev, sectors);
3252 md_update_sb(mddev, 1);
3254 if (mddev->dev_sectors == 0 ||
3255 mddev->dev_sectors > sectors)
3256 mddev->dev_sectors = sectors;
3260 return err ? err : len;
3263 static struct md_sysfs_entry md_size =
3264 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
3269 * 'none' for arrays with no metadata (good luck...)
3270 * 'external' for arrays with externally managed metadata,
3271 * or N.M for internally known formats
3274 metadata_show(mddev_t *mddev, char *page)
3276 if (mddev->persistent)
3277 return sprintf(page, "%d.%d\n",
3278 mddev->major_version, mddev->minor_version);
3279 else if (mddev->external)
3280 return sprintf(page, "external:%s\n", mddev->metadata_type);
3282 return sprintf(page, "none\n");
3286 metadata_store(mddev_t *mddev, const char *buf, size_t len)
3290 /* Changing the details of 'external' metadata is
3291 * always permitted. Otherwise there must be
3292 * no devices attached to the array.
3294 if (mddev->external && strncmp(buf, "external:", 9) == 0)
3296 else if (!list_empty(&mddev->disks))
3299 if (cmd_match(buf, "none")) {
3300 mddev->persistent = 0;
3301 mddev->external = 0;
3302 mddev->major_version = 0;
3303 mddev->minor_version = 90;
3306 if (strncmp(buf, "external:", 9) == 0) {
3307 size_t namelen = len-9;
3308 if (namelen >= sizeof(mddev->metadata_type))
3309 namelen = sizeof(mddev->metadata_type)-1;
3310 strncpy(mddev->metadata_type, buf+9, namelen);
3311 mddev->metadata_type[namelen] = 0;
3312 if (namelen && mddev->metadata_type[namelen-1] == '\n')
3313 mddev->metadata_type[--namelen] = 0;
3314 mddev->persistent = 0;
3315 mddev->external = 1;
3316 mddev->major_version = 0;
3317 mddev->minor_version = 90;
3320 major = simple_strtoul(buf, &e, 10);
3321 if (e==buf || *e != '.')
3324 minor = simple_strtoul(buf, &e, 10);
3325 if (e==buf || (*e && *e != '\n') )
3327 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
3329 mddev->major_version = major;
3330 mddev->minor_version = minor;
3331 mddev->persistent = 1;
3332 mddev->external = 0;
3336 static struct md_sysfs_entry md_metadata =
3337 __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
3340 action_show(mddev_t *mddev, char *page)
3342 char *type = "idle";
3343 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
3345 else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3346 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
3347 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3349 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3350 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
3352 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
3356 } else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
3359 return sprintf(page, "%s\n", type);
3363 action_store(mddev_t *mddev, const char *page, size_t len)
3365 if (!mddev->pers || !mddev->pers->sync_request)
3368 if (cmd_match(page, "frozen"))
3369 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3371 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3373 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
3374 if (mddev->sync_thread) {
3375 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3376 md_unregister_thread(mddev->sync_thread);
3377 mddev->sync_thread = NULL;
3378 mddev->recovery = 0;
3380 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3381 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
3383 else if (cmd_match(page, "resync"))
3384 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3385 else if (cmd_match(page, "recover")) {
3386 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3387 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3388 } else if (cmd_match(page, "reshape")) {
3390 if (mddev->pers->start_reshape == NULL)
3392 err = mddev->pers->start_reshape(mddev);
3395 sysfs_notify(&mddev->kobj, NULL, "degraded");
3397 if (cmd_match(page, "check"))
3398 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3399 else if (!cmd_match(page, "repair"))
3401 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3402 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3404 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3405 md_wakeup_thread(mddev->thread);
3406 sysfs_notify_dirent(mddev->sysfs_action);
3411 mismatch_cnt_show(mddev_t *mddev, char *page)
3413 return sprintf(page, "%llu\n",
3414 (unsigned long long) mddev->resync_mismatches);
3417 static struct md_sysfs_entry md_scan_mode =
3418 __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
3421 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
3424 sync_min_show(mddev_t *mddev, char *page)
3426 return sprintf(page, "%d (%s)\n", speed_min(mddev),
3427 mddev->sync_speed_min ? "local": "system");
3431 sync_min_store(mddev_t *mddev, const char *buf, size_t len)
3435 if (strncmp(buf, "system", 6)==0) {
3436 mddev->sync_speed_min = 0;
3439 min = simple_strtoul(buf, &e, 10);
3440 if (buf == e || (*e && *e != '\n') || min <= 0)
3442 mddev->sync_speed_min = min;
3446 static struct md_sysfs_entry md_sync_min =
3447 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
3450 sync_max_show(mddev_t *mddev, char *page)
3452 return sprintf(page, "%d (%s)\n", speed_max(mddev),
3453 mddev->sync_speed_max ? "local": "system");
3457 sync_max_store(mddev_t *mddev, const char *buf, size_t len)
3461 if (strncmp(buf, "system", 6)==0) {
3462 mddev->sync_speed_max = 0;
3465 max = simple_strtoul(buf, &e, 10);
3466 if (buf == e || (*e && *e != '\n') || max <= 0)
3468 mddev->sync_speed_max = max;
3472 static struct md_sysfs_entry md_sync_max =
3473 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
3476 degraded_show(mddev_t *mddev, char *page)
3478 return sprintf(page, "%d\n", mddev->degraded);
3480 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
3483 sync_force_parallel_show(mddev_t *mddev, char *page)
3485 return sprintf(page, "%d\n", mddev->parallel_resync);
3489 sync_force_parallel_store(mddev_t *mddev, const char *buf, size_t len)
3493 if (strict_strtol(buf, 10, &n))
3496 if (n != 0 && n != 1)
3499 mddev->parallel_resync = n;
3501 if (mddev->sync_thread)
3502 wake_up(&resync_wait);
3507 /* force parallel resync, even with shared block devices */
3508 static struct md_sysfs_entry md_sync_force_parallel =
3509 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
3510 sync_force_parallel_show, sync_force_parallel_store);
3513 sync_speed_show(mddev_t *mddev, char *page)
3515 unsigned long resync, dt, db;
3516 if (mddev->curr_resync == 0)
3517 return sprintf(page, "none\n");
3518 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
3519 dt = (jiffies - mddev->resync_mark) / HZ;
3521 db = resync - mddev->resync_mark_cnt;
3522 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
3525 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
3528 sync_completed_show(mddev_t *mddev, char *page)
3530 unsigned long max_sectors, resync;
3532 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3533 return sprintf(page, "none\n");
3535 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3536 max_sectors = mddev->resync_max_sectors;
3538 max_sectors = mddev->dev_sectors;
3540 resync = mddev->curr_resync_completed;
3541 return sprintf(page, "%lu / %lu\n", resync, max_sectors);
3544 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
3547 min_sync_show(mddev_t *mddev, char *page)
3549 return sprintf(page, "%llu\n",
3550 (unsigned long long)mddev->resync_min);
3553 min_sync_store(mddev_t *mddev, const char *buf, size_t len)
3555 unsigned long long min;
3556 if (strict_strtoull(buf, 10, &min))
3558 if (min > mddev->resync_max)
3560 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3563 /* Must be a multiple of chunk_size */
3564 if (mddev->chunk_sectors) {
3565 sector_t temp = min;
3566 if (sector_div(temp, mddev->chunk_sectors))
3569 mddev->resync_min = min;
3574 static struct md_sysfs_entry md_min_sync =
3575 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
3578 max_sync_show(mddev_t *mddev, char *page)
3580 if (mddev->resync_max == MaxSector)
3581 return sprintf(page, "max\n");
3583 return sprintf(page, "%llu\n",
3584 (unsigned long long)mddev->resync_max);
3587 max_sync_store(mddev_t *mddev, const char *buf, size_t len)
3589 if (strncmp(buf, "max", 3) == 0)
3590 mddev->resync_max = MaxSector;
3592 unsigned long long max;
3593 if (strict_strtoull(buf, 10, &max))
3595 if (max < mddev->resync_min)
3597 if (max < mddev->resync_max &&
3598 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3601 /* Must be a multiple of chunk_size */
3602 if (mddev->chunk_sectors) {
3603 sector_t temp = max;
3604 if (sector_div(temp, mddev->chunk_sectors))
3607 mddev->resync_max = max;
3609 wake_up(&mddev->recovery_wait);
3613 static struct md_sysfs_entry md_max_sync =
3614 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
3617 suspend_lo_show(mddev_t *mddev, char *page)
3619 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
3623 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
3626 unsigned long long new = simple_strtoull(buf, &e, 10);
3628 if (mddev->pers == NULL ||
3629 mddev->pers->quiesce == NULL)
3631 if (buf == e || (*e && *e != '\n'))
3633 if (new >= mddev->suspend_hi ||
3634 (new > mddev->suspend_lo && new < mddev->suspend_hi)) {
3635 mddev->suspend_lo = new;
3636 mddev->pers->quiesce(mddev, 2);
3641 static struct md_sysfs_entry md_suspend_lo =
3642 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
3646 suspend_hi_show(mddev_t *mddev, char *page)
3648 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
3652 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
3655 unsigned long long new = simple_strtoull(buf, &e, 10);
3657 if (mddev->pers == NULL ||
3658 mddev->pers->quiesce == NULL)
3660 if (buf == e || (*e && *e != '\n'))
3662 if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) ||
3663 (new > mddev->suspend_lo && new > mddev->suspend_hi)) {
3664 mddev->suspend_hi = new;
3665 mddev->pers->quiesce(mddev, 1);
3666 mddev->pers->quiesce(mddev, 0);
3671 static struct md_sysfs_entry md_suspend_hi =
3672 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
3675 reshape_position_show(mddev_t *mddev, char *page)
3677 if (mddev->reshape_position != MaxSector)
3678 return sprintf(page, "%llu\n",
3679 (unsigned long long)mddev->reshape_position);
3680 strcpy(page, "none\n");
3685 reshape_position_store(mddev_t *mddev, const char *buf, size_t len)
3688 unsigned long long new = simple_strtoull(buf, &e, 10);
3691 if (buf == e || (*e && *e != '\n'))
3693 mddev->reshape_position = new;
3694 mddev->delta_disks = 0;
3695 mddev->new_level = mddev->level;
3696 mddev->new_layout = mddev->layout;
3697 mddev->new_chunk_sectors = mddev->chunk_sectors;
3701 static struct md_sysfs_entry md_reshape_position =
3702 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
3703 reshape_position_store);
3706 array_size_show(mddev_t *mddev, char *page)
3708 if (mddev->external_size)
3709 return sprintf(page, "%llu\n",
3710 (unsigned long long)mddev->array_sectors/2);
3712 return sprintf(page, "default\n");
3716 array_size_store(mddev_t *mddev, const char *buf, size_t len)
3720 if (strncmp(buf, "default", 7) == 0) {
3722 sectors = mddev->pers->size(mddev, 0, 0);
3724 sectors = mddev->array_sectors;
3726 mddev->external_size = 0;
3728 if (strict_blocks_to_sectors(buf, §ors) < 0)
3730 if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
3733 mddev->external_size = 1;
3736 mddev->array_sectors = sectors;
3737 set_capacity(mddev->gendisk, mddev->array_sectors);
3739 struct block_device *bdev = bdget_disk(mddev->gendisk, 0);
3742 mutex_lock(&bdev->bd_inode->i_mutex);
3743 i_size_write(bdev->bd_inode,
3744 (loff_t)mddev->array_sectors << 9);
3745 mutex_unlock(&bdev->bd_inode->i_mutex);
3753 static struct md_sysfs_entry md_array_size =
3754 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
3757 static struct attribute *md_default_attrs[] = {
3760 &md_raid_disks.attr,
3761 &md_chunk_size.attr,
3763 &md_resync_start.attr,
3765 &md_new_device.attr,
3766 &md_safe_delay.attr,
3767 &md_array_state.attr,
3768 &md_reshape_position.attr,
3769 &md_array_size.attr,
3773 static struct attribute *md_redundancy_attrs[] = {
3775 &md_mismatches.attr,
3778 &md_sync_speed.attr,
3779 &md_sync_force_parallel.attr,
3780 &md_sync_completed.attr,
3783 &md_suspend_lo.attr,
3784 &md_suspend_hi.attr,
3789 static struct attribute_group md_redundancy_group = {
3791 .attrs = md_redundancy_attrs,
3796 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3798 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
3799 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
3804 rv = mddev_lock(mddev);
3806 rv = entry->show(mddev, page);
3807 mddev_unlock(mddev);
3813 md_attr_store(struct kobject *kobj, struct attribute *attr,
3814 const char *page, size_t length)
3816 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
3817 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
3822 if (!capable(CAP_SYS_ADMIN))
3824 rv = mddev_lock(mddev);
3825 if (mddev->hold_active == UNTIL_IOCTL)
3826 mddev->hold_active = 0;
3828 rv = entry->store(mddev, page, length);
3829 mddev_unlock(mddev);
3834 static void md_free(struct kobject *ko)
3836 mddev_t *mddev = container_of(ko, mddev_t, kobj);
3838 if (mddev->sysfs_state)
3839 sysfs_put(mddev->sysfs_state);
3841 if (mddev->gendisk) {
3842 del_gendisk(mddev->gendisk);
3843 put_disk(mddev->gendisk);
3846 blk_cleanup_queue(mddev->queue);
3851 static struct sysfs_ops md_sysfs_ops = {
3852 .show = md_attr_show,
3853 .store = md_attr_store,
3855 static struct kobj_type md_ktype = {
3857 .sysfs_ops = &md_sysfs_ops,
3858 .default_attrs = md_default_attrs,
3863 static void mddev_delayed_delete(struct work_struct *ws)
3865 mddev_t *mddev = container_of(ws, mddev_t, del_work);
3867 if (mddev->private == &md_redundancy_group) {
3868 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
3869 if (mddev->sysfs_action)
3870 sysfs_put(mddev->sysfs_action);
3871 mddev->sysfs_action = NULL;
3872 mddev->private = NULL;
3874 kobject_del(&mddev->kobj);
3875 kobject_put(&mddev->kobj);
3878 static int md_alloc(dev_t dev, char *name)
3880 static DEFINE_MUTEX(disks_mutex);
3881 mddev_t *mddev = mddev_find(dev);
3882 struct gendisk *disk;
3891 partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
3892 shift = partitioned ? MdpMinorShift : 0;
3893 unit = MINOR(mddev->unit) >> shift;
3895 /* wait for any previous instance if this device
3896 * to be completed removed (mddev_delayed_delete).
3898 flush_scheduled_work();
3900 mutex_lock(&disks_mutex);
3906 /* Need to ensure that 'name' is not a duplicate.
3909 spin_lock(&all_mddevs_lock);
3911 list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
3912 if (mddev2->gendisk &&
3913 strcmp(mddev2->gendisk->disk_name, name) == 0) {
3914 spin_unlock(&all_mddevs_lock);
3917 spin_unlock(&all_mddevs_lock);
3921 mddev->queue = blk_alloc_queue(GFP_KERNEL);
3924 mddev->queue->queuedata = mddev;
3926 /* Can be unlocked because the queue is new: no concurrency */
3927 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue);
3929 blk_queue_make_request(mddev->queue, md_make_request);
3931 disk = alloc_disk(1 << shift);
3933 blk_cleanup_queue(mddev->queue);
3934 mddev->queue = NULL;
3937 disk->major = MAJOR(mddev->unit);
3938 disk->first_minor = unit << shift;
3940 strcpy(disk->disk_name, name);
3941 else if (partitioned)
3942 sprintf(disk->disk_name, "md_d%d", unit);
3944 sprintf(disk->disk_name, "md%d", unit);
3945 disk->fops = &md_fops;
3946 disk->private_data = mddev;
3947 disk->queue = mddev->queue;
3948 /* Allow extended partitions. This makes the
3949 * 'mdp' device redundant, but we can't really
3952 disk->flags |= GENHD_FL_EXT_DEVT;
3954 mddev->gendisk = disk;
3955 error = kobject_init_and_add(&mddev->kobj, &md_ktype,
3956 &disk_to_dev(disk)->kobj, "%s", "md");
3958 /* This isn't possible, but as kobject_init_and_add is marked
3959 * __must_check, we must do something with the result
3961 printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
3966 mutex_unlock(&disks_mutex);
3968 kobject_uevent(&mddev->kobj, KOBJ_ADD);
3969 mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state");
3975 static struct kobject *md_probe(dev_t dev, int *part, void *data)
3977 md_alloc(dev, NULL);
3981 static int add_named_array(const char *val, struct kernel_param *kp)
3983 /* val must be "md_*" where * is not all digits.
3984 * We allocate an array with a large free minor number, and
3985 * set the name to val. val must not already be an active name.
3987 int len = strlen(val);
3988 char buf[DISK_NAME_LEN];
3990 while (len && val[len-1] == '\n')
3992 if (len >= DISK_NAME_LEN)
3994 strlcpy(buf, val, len+1);
3995 if (strncmp(buf, "md_", 3) != 0)
3997 return md_alloc(0, buf);
4000 static void md_safemode_timeout(unsigned long data)
4002 mddev_t *mddev = (mddev_t *) data;
4004 if (!atomic_read(&mddev->writes_pending)) {
4005 mddev->safemode = 1;
4006 if (mddev->external)
4007 sysfs_notify_dirent(mddev->sysfs_state);
4009 md_wakeup_thread(mddev->thread);
4012 static int start_dirty_degraded;
4014 static int do_md_run(mddev_t * mddev)
4018 struct gendisk *disk;
4019 struct mdk_personality *pers;
4021 if (list_empty(&mddev->disks))
4022 /* cannot run an array with no devices.. */
4029 * Analyze all RAID superblock(s)
4031 if (!mddev->raid_disks) {
4032 if (!mddev->persistent)
4037 if (mddev->level != LEVEL_NONE)
4038 request_module("md-level-%d", mddev->level);
4039 else if (mddev->clevel[0])
4040 request_module("md-%s", mddev->clevel);
4043 * Drop all container device buffers, from now on
4044 * the only valid external interface is through the md
4047 list_for_each_entry(rdev, &mddev->disks, same_set) {
4048 if (test_bit(Faulty, &rdev->flags))
4050 sync_blockdev(rdev->bdev);
4051 invalidate_bdev(rdev->bdev);
4053 /* perform some consistency tests on the device.
4054 * We don't want the data to overlap the metadata,
4055 * Internal Bitmap issues have been handled elsewhere.
4057 if (rdev->data_offset < rdev->sb_start) {
4058 if (mddev->dev_sectors &&
4059 rdev->data_offset + mddev->dev_sectors
4061 printk("md: %s: data overlaps metadata\n",
4066 if (rdev->sb_start + rdev->sb_size/512
4067 > rdev->data_offset) {
4068 printk("md: %s: metadata overlaps data\n",
4073 sysfs_notify_dirent(rdev->sysfs_state);
4076 md_probe(mddev->unit, NULL, NULL);
4077 disk = mddev->gendisk;
4081 spin_lock(&pers_lock);
4082 pers = find_pers(mddev->level, mddev->clevel);
4083 if (!pers || !try_module_get(pers->owner)) {
4084 spin_unlock(&pers_lock);
4085 if (mddev->level != LEVEL_NONE)
4086 printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
4089 printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
4094 spin_unlock(&pers_lock);
4095 if (mddev->level != pers->level) {
4096 mddev->level = pers->level;
4097 mddev->new_level = pers->level;
4099 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
4101 if (mddev->reshape_position != MaxSector &&
4102 pers->start_reshape == NULL) {
4103 /* This personality cannot handle reshaping... */
4105 module_put(pers->owner);
4109 if (pers->sync_request) {
4110 /* Warn if this is a potentially silly
4113 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
4117 list_for_each_entry(rdev, &mddev->disks, same_set)
4118 list_for_each_entry(rdev2, &mddev->disks, same_set) {
4120 rdev->bdev->bd_contains ==
4121 rdev2->bdev->bd_contains) {
4123 "%s: WARNING: %s appears to be"
4124 " on the same physical disk as"
4127 bdevname(rdev->bdev,b),
4128 bdevname(rdev2->bdev,b2));
4135 "True protection against single-disk"
4136 " failure might be compromised.\n");
4139 mddev->recovery = 0;
4140 /* may be over-ridden by personality */
4141 mddev->resync_max_sectors = mddev->dev_sectors;
4143 mddev->barriers_work = 1;
4144 mddev->ok_start_degraded = start_dirty_degraded;
4147 mddev->ro = 2; /* read-only, but switch on first write */
4149 err = mddev->pers->run(mddev);
4151 printk(KERN_ERR "md: pers->run() failed ...\n");
4152 else if (mddev->pers->size(mddev, 0, 0) < mddev->array_sectors) {
4153 WARN_ONCE(!mddev->external_size, "%s: default size too small,"
4154 " but 'external_size' not in effect?\n", __func__);
4156 "md: invalid array_size %llu > default size %llu\n",
4157 (unsigned long long)mddev->array_sectors / 2,
4158 (unsigned long long)mddev->pers->size(mddev, 0, 0) / 2);
4160 mddev->pers->stop(mddev);
4162 if (err == 0 && mddev->pers->sync_request) {
4163 err = bitmap_create(mddev);
4165 printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
4166 mdname(mddev), err);
4167 mddev->pers->stop(mddev);
4171 module_put(mddev->pers->owner);
4173 bitmap_destroy(mddev);
4176 if (mddev->pers->sync_request) {
4177 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
4179 "md: cannot register extra attributes for %s\n",
4181 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
4182 } else if (mddev->ro == 2) /* auto-readonly not meaningful */
4185 atomic_set(&mddev->writes_pending,0);
4186 mddev->safemode = 0;
4187 mddev->safemode_timer.function = md_safemode_timeout;
4188 mddev->safemode_timer.data = (unsigned long) mddev;
4189 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
4192 list_for_each_entry(rdev, &mddev->disks, same_set)
4193 if (rdev->raid_disk >= 0) {
4195 sprintf(nm, "rd%d", rdev->raid_disk);
4196 if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
4197 printk("md: cannot register %s for %s\n",
4201 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4204 md_update_sb(mddev, 0);
4206 set_capacity(disk, mddev->array_sectors);
4208 /* If there is a partially-recovered drive we need to
4209 * start recovery here. If we leave it to md_check_recovery,
4210 * it will remove the drives and not do the right thing
4212 if (mddev->degraded && !mddev->sync_thread) {
4214 list_for_each_entry(rdev, &mddev->disks, same_set)
4215 if (rdev->raid_disk >= 0 &&
4216 !test_bit(In_sync, &rdev->flags) &&
4217 !test_bit(Faulty, &rdev->flags))
4218 /* complete an interrupted recovery */
4220 if (spares && mddev->pers->sync_request) {
4221 mddev->recovery = 0;
4222 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4223 mddev->sync_thread = md_register_thread(md_do_sync,
4226 if (!mddev->sync_thread) {
4227 printk(KERN_ERR "%s: could not start resync"
4230 /* leave the spares where they are, it shouldn't hurt */
4231 mddev->recovery = 0;
4235 md_wakeup_thread(mddev->thread);
4236 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
4239 md_new_event(mddev);
4240 sysfs_notify_dirent(mddev->sysfs_state);
4241 if (mddev->sysfs_action)
4242 sysfs_notify_dirent(mddev->sysfs_action);
4243 sysfs_notify(&mddev->kobj, NULL, "degraded");
4244 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
4248 static int restart_array(mddev_t *mddev)
4250 struct gendisk *disk = mddev->gendisk;
4252 /* Complain if it has no devices */
4253 if (list_empty(&mddev->disks))
4259 mddev->safemode = 0;
4261 set_disk_ro(disk, 0);
4262 printk(KERN_INFO "md: %s switched to read-write mode.\n",
4264 /* Kick recovery or resync if necessary */
4265 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4266 md_wakeup_thread(mddev->thread);
4267 md_wakeup_thread(mddev->sync_thread);
4268 sysfs_notify_dirent(mddev->sysfs_state);
4272 /* similar to deny_write_access, but accounts for our holding a reference
4273 * to the file ourselves */
4274 static int deny_bitmap_write_access(struct file * file)
4276 struct inode *inode = file->f_mapping->host;
4278 spin_lock(&inode->i_lock);
4279 if (atomic_read(&inode->i_writecount) > 1) {
4280 spin_unlock(&inode->i_lock);
4283 atomic_set(&inode->i_writecount, -1);
4284 spin_unlock(&inode->i_lock);
4289 static void restore_bitmap_write_access(struct file *file)
4291 struct inode *inode = file->f_mapping->host;
4293 spin_lock(&inode->i_lock);
4294 atomic_set(&inode->i_writecount, 1);
4295 spin_unlock(&inode->i_lock);
4299 * 0 - completely stop and dis-assemble array
4300 * 1 - switch to readonly
4301 * 2 - stop but do not disassemble array
4303 static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4306 struct gendisk *disk = mddev->gendisk;
4309 if (atomic_read(&mddev->openers) > is_open) {
4310 printk("md: %s still in use.\n",mdname(mddev));
4316 if (mddev->sync_thread) {
4317 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4318 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4319 md_unregister_thread(mddev->sync_thread);
4320 mddev->sync_thread = NULL;
4323 del_timer_sync(&mddev->safemode_timer);
4326 case 1: /* readonly */
4332 case 0: /* disassemble */
4334 bitmap_flush(mddev);
4335 md_super_wait(mddev);
4337 set_disk_ro(disk, 0);
4339 mddev->pers->stop(mddev);
4340 mddev->queue->merge_bvec_fn = NULL;
4341 mddev->queue->unplug_fn = NULL;
4342 mddev->queue->backing_dev_info.congested_fn = NULL;
4343 module_put(mddev->pers->owner);
4344 if (mddev->pers->sync_request)
4345 mddev->private = &md_redundancy_group;
4347 /* tell userspace to handle 'inactive' */
4348 sysfs_notify_dirent(mddev->sysfs_state);
4350 list_for_each_entry(rdev, &mddev->disks, same_set)
4351 if (rdev->raid_disk >= 0) {
4353 sprintf(nm, "rd%d", rdev->raid_disk);
4354 sysfs_remove_link(&mddev->kobj, nm);
4357 set_capacity(disk, 0);
4363 if (!mddev->in_sync || mddev->flags) {
4364 /* mark array as shutdown cleanly */
4366 md_update_sb(mddev, 1);
4369 set_disk_ro(disk, 1);
4370 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4374 * Free resources if final stop
4378 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
4380 bitmap_destroy(mddev);
4381 if (mddev->bitmap_file) {
4382 restore_bitmap_write_access(mddev->bitmap_file);
4383 fput(mddev->bitmap_file);
4384 mddev->bitmap_file = NULL;
4386 mddev->bitmap_offset = 0;
4388 /* make sure all md_delayed_delete calls have finished */
4389 flush_scheduled_work();
4391 export_array(mddev);
4393 mddev->array_sectors = 0;
4394 mddev->external_size = 0;
4395 mddev->dev_sectors = 0;
4396 mddev->raid_disks = 0;
4397 mddev->recovery_cp = 0;
4398 mddev->resync_min = 0;
4399 mddev->resync_max = MaxSector;
4400 mddev->reshape_position = MaxSector;
4401 mddev->external = 0;
4402 mddev->persistent = 0;
4403 mddev->level = LEVEL_NONE;
4404 mddev->clevel[0] = 0;
4407 mddev->metadata_type[0] = 0;
4408 mddev->chunk_sectors = 0;
4409 mddev->ctime = mddev->utime = 0;
4411 mddev->max_disks = 0;
4413 mddev->delta_disks = 0;
4414 mddev->new_level = LEVEL_NONE;
4415 mddev->new_layout = 0;
4416 mddev->new_chunk_sectors = 0;
4417 mddev->curr_resync = 0;
4418 mddev->resync_mismatches = 0;
4419 mddev->suspend_lo = mddev->suspend_hi = 0;
4420 mddev->sync_speed_min = mddev->sync_speed_max = 0;
4421 mddev->recovery = 0;
4424 mddev->degraded = 0;
4425 mddev->barriers_work = 0;
4426 mddev->safemode = 0;
4427 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
4428 if (mddev->hold_active == UNTIL_STOP)
4429 mddev->hold_active = 0;
4431 } else if (mddev->pers)
4432 printk(KERN_INFO "md: %s switched to read-only mode.\n",
4435 blk_integrity_unregister(disk);
4436 md_new_event(mddev);
4437 sysfs_notify_dirent(mddev->sysfs_state);
4443 static void autorun_array(mddev_t *mddev)
4448 if (list_empty(&mddev->disks))
4451 printk(KERN_INFO "md: running: ");
4453 list_for_each_entry(rdev, &mddev->disks, same_set) {
4454 char b[BDEVNAME_SIZE];
4455 printk("<%s>", bdevname(rdev->bdev,b));
4459 err = do_md_run(mddev);
4461 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
4462 do_md_stop(mddev, 0, 0);
4467 * lets try to run arrays based on all disks that have arrived
4468 * until now. (those are in pending_raid_disks)
4470 * the method: pick the first pending disk, collect all disks with
4471 * the same UUID, remove all from the pending list and put them into
4472 * the 'same_array' list. Then order this list based on superblock
4473 * update time (freshest comes first), kick out 'old' disks and
4474 * compare superblocks. If everything's fine then run it.
4476 * If "unit" is allocated, then bump its reference count
4478 static void autorun_devices(int part)
4480 mdk_rdev_t *rdev0, *rdev, *tmp;
4482 char b[BDEVNAME_SIZE];
4484 printk(KERN_INFO "md: autorun ...\n");
4485 while (!list_empty(&pending_raid_disks)) {
4488 LIST_HEAD(candidates);
4489 rdev0 = list_entry(pending_raid_disks.next,
4490 mdk_rdev_t, same_set);
4492 printk(KERN_INFO "md: considering %s ...\n",
4493 bdevname(rdev0->bdev,b));
4494 INIT_LIST_HEAD(&candidates);
4495 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
4496 if (super_90_load(rdev, rdev0, 0) >= 0) {
4497 printk(KERN_INFO "md: adding %s ...\n",
4498 bdevname(rdev->bdev,b));
4499 list_move(&rdev->same_set, &candidates);
4502 * now we have a set of devices, with all of them having
4503 * mostly sane superblocks. It's time to allocate the
4507 dev = MKDEV(mdp_major,
4508 rdev0->preferred_minor << MdpMinorShift);
4509 unit = MINOR(dev) >> MdpMinorShift;
4511 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
4514 if (rdev0->preferred_minor != unit) {
4515 printk(KERN_INFO "md: unit number in %s is bad: %d\n",
4516 bdevname(rdev0->bdev, b), rdev0->preferred_minor);
4520 md_probe(dev, NULL, NULL);
4521 mddev = mddev_find(dev);
4522 if (!mddev || !mddev->gendisk) {
4526 "md: cannot allocate memory for md drive.\n");
4529 if (mddev_lock(mddev))
4530 printk(KERN_WARNING "md: %s locked, cannot run\n",
4532 else if (mddev->raid_disks || mddev->major_version
4533 || !list_empty(&mddev->disks)) {
4535 "md: %s already running, cannot run %s\n",
4536 mdname(mddev), bdevname(rdev0->bdev,b));
4537 mddev_unlock(mddev);
4539 printk(KERN_INFO "md: created %s\n", mdname(mddev));
4540 mddev->persistent = 1;
4541 rdev_for_each_list(rdev, tmp, &candidates) {
4542 list_del_init(&rdev->same_set);
4543 if (bind_rdev_to_array(rdev, mddev))
4546 autorun_array(mddev);
4547 mddev_unlock(mddev);
4549 /* on success, candidates will be empty, on error
4552 rdev_for_each_list(rdev, tmp, &candidates) {
4553 list_del_init(&rdev->same_set);
4558 printk(KERN_INFO "md: ... autorun DONE.\n");
4560 #endif /* !MODULE */
4562 static int get_version(void __user * arg)
4566 ver.major = MD_MAJOR_VERSION;
4567 ver.minor = MD_MINOR_VERSION;
4568 ver.patchlevel = MD_PATCHLEVEL_VERSION;
4570 if (copy_to_user(arg, &ver, sizeof(ver)))
4576 static int get_array_info(mddev_t * mddev, void __user * arg)
4578 mdu_array_info_t info;
4579 int nr,working,active,failed,spare;
4582 nr=working=active=failed=spare=0;
4583 list_for_each_entry(rdev, &mddev->disks, same_set) {
4585 if (test_bit(Faulty, &rdev->flags))
4589 if (test_bit(In_sync, &rdev->flags))
4596 info.major_version = mddev->major_version;
4597 info.minor_version = mddev->minor_version;
4598 info.patch_version = MD_PATCHLEVEL_VERSION;
4599 info.ctime = mddev->ctime;
4600 info.level = mddev->level;
4601 info.size = mddev->dev_sectors / 2;
4602 if (info.size != mddev->dev_sectors / 2) /* overflow */
4605 info.raid_disks = mddev->raid_disks;
4606 info.md_minor = mddev->md_minor;
4607 info.not_persistent= !mddev->persistent;
4609 info.utime = mddev->utime;
4612 info.state = (1<<MD_SB_CLEAN);
4613 if (mddev->bitmap && mddev->bitmap_offset)
4614 info.state = (1<<MD_SB_BITMAP_PRESENT);
4615 info.active_disks = active;
4616 info.working_disks = working;
4617 info.failed_disks = failed;
4618 info.spare_disks = spare;
4620 info.layout = mddev->layout;
4621 info.chunk_size = mddev->chunk_sectors << 9;
4623 if (copy_to_user(arg, &info, sizeof(info)))
4629 static int get_bitmap_file(mddev_t * mddev, void __user * arg)
4631 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
4632 char *ptr, *buf = NULL;
4635 if (md_allow_write(mddev))
4636 file = kmalloc(sizeof(*file), GFP_NOIO);
4638 file = kmalloc(sizeof(*file), GFP_KERNEL);
4643 /* bitmap disabled, zero the first byte and copy out */
4644 if (!mddev->bitmap || !mddev->bitmap->file) {
4645 file->pathname[0] = '\0';
4649 buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
4653 ptr = d_path(&mddev->bitmap->file->f_path, buf, sizeof(file->pathname));
4657 strcpy(file->pathname, ptr);
4661 if (copy_to_user(arg, file, sizeof(*file)))
4669 static int get_disk_info(mddev_t * mddev, void __user * arg)
4671 mdu_disk_info_t info;
4674 if (copy_from_user(&info, arg, sizeof(info)))
4677 rdev = find_rdev_nr(mddev, info.number);
4679 info.major = MAJOR(rdev->bdev->bd_dev);
4680 info.minor = MINOR(rdev->bdev->bd_dev);
4681 info.raid_disk = rdev->raid_disk;
4683 if (test_bit(Faulty, &rdev->flags))
4684 info.state |= (1<<MD_DISK_FAULTY);
4685 else if (test_bit(In_sync, &rdev->flags)) {
4686 info.state |= (1<<MD_DISK_ACTIVE);
4687 info.state |= (1<<MD_DISK_SYNC);
4689 if (test_bit(WriteMostly, &rdev->flags))
4690 info.state |= (1<<MD_DISK_WRITEMOSTLY);
4692 info.major = info.minor = 0;
4693 info.raid_disk = -1;
4694 info.state = (1<<MD_DISK_REMOVED);
4697 if (copy_to_user(arg, &info, sizeof(info)))
4703 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
4705 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
4707 dev_t dev = MKDEV(info->major,info->minor);
4709 if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
4712 if (!mddev->raid_disks) {
4714 /* expecting a device which has a superblock */
4715 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
4718 "md: md_import_device returned %ld\n",
4720 return PTR_ERR(rdev);
4722 if (!list_empty(&mddev->disks)) {
4723 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
4724 mdk_rdev_t, same_set);
4725 int err = super_types[mddev->major_version]
4726 .load_super(rdev, rdev0, mddev->minor_version);
4729 "md: %s has different UUID to %s\n",
4730 bdevname(rdev->bdev,b),
4731 bdevname(rdev0->bdev,b2));
4736 err = bind_rdev_to_array(rdev, mddev);
4743 * add_new_disk can be used once the array is assembled
4744 * to add "hot spares". They must already have a superblock
4749 if (!mddev->pers->hot_add_disk) {
4751 "%s: personality does not support diskops!\n",
4755 if (mddev->persistent)
4756 rdev = md_import_device(dev, mddev->major_version,
4757 mddev->minor_version);
4759 rdev = md_import_device(dev, -1, -1);
4762 "md: md_import_device returned %ld\n",
4764 return PTR_ERR(rdev);
4766 /* set save_raid_disk if appropriate */
4767 if (!mddev->persistent) {
4768 if (info->state & (1<<MD_DISK_SYNC) &&
4769 info->raid_disk < mddev->raid_disks)
4770 rdev->raid_disk = info->raid_disk;
4772 rdev->raid_disk = -1;
4774 super_types[mddev->major_version].
4775 validate_super(mddev, rdev);
4776 rdev->saved_raid_disk = rdev->raid_disk;
4778 clear_bit(In_sync, &rdev->flags); /* just to be sure */
4779 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
4780 set_bit(WriteMostly, &rdev->flags);
4782 clear_bit(WriteMostly, &rdev->flags);
4784 rdev->raid_disk = -1;
4785 err = bind_rdev_to_array(rdev, mddev);
4786 if (!err && !mddev->pers->hot_remove_disk) {
4787 /* If there is hot_add_disk but no hot_remove_disk
4788 * then added disks for geometry changes,
4789 * and should be added immediately.
4791 super_types[mddev->major_version].
4792 validate_super(mddev, rdev);
4793 err = mddev->pers->hot_add_disk(mddev, rdev);
4795 unbind_rdev_from_array(rdev);
4800 sysfs_notify_dirent(rdev->sysfs_state);
4802 md_update_sb(mddev, 1);
4803 if (mddev->degraded)
4804 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
4805 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4806 md_wakeup_thread(mddev->thread);
4810 /* otherwise, add_new_disk is only allowed
4811 * for major_version==0 superblocks
4813 if (mddev->major_version != 0) {
4814 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
4819 if (!(info->state & (1<<MD_DISK_FAULTY))) {
4821 rdev = md_import_device(dev, -1, 0);
4824 "md: error, md_import_device() returned %ld\n",
4826 return PTR_ERR(rdev);
4828 rdev->desc_nr = info->number;
4829 if (info->raid_disk < mddev->raid_disks)
4830 rdev->raid_disk = info->raid_disk;
4832 rdev->raid_disk = -1;
4834 if (rdev->raid_disk < mddev->raid_disks)
4835 if (info->state & (1<<MD_DISK_SYNC))
4836 set_bit(In_sync, &rdev->flags);
4838 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
4839 set_bit(WriteMostly, &rdev->flags);
4841 if (!mddev->persistent) {
4842 printk(KERN_INFO "md: nonpersistent superblock ...\n");
4843 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
4845 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
4846 rdev->sectors = rdev->sb_start;
4848 err = bind_rdev_to_array(rdev, mddev);
4858 static int hot_remove_disk(mddev_t * mddev, dev_t dev)
4860 char b[BDEVNAME_SIZE];
4863 rdev = find_rdev(mddev, dev);
4867 if (rdev->raid_disk >= 0)
4870 kick_rdev_from_array(rdev);
4871 md_update_sb(mddev, 1);
4872 md_new_event(mddev);
4876 printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
4877 bdevname(rdev->bdev,b), mdname(mddev));
4881 static int hot_add_disk(mddev_t * mddev, dev_t dev)
4883 char b[BDEVNAME_SIZE];
4890 if (mddev->major_version != 0) {
4891 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
4892 " version-0 superblocks.\n",
4896 if (!mddev->pers->hot_add_disk) {
4898 "%s: personality does not support diskops!\n",
4903 rdev = md_import_device(dev, -1, 0);
4906 "md: error, md_import_device() returned %ld\n",
4911 if (mddev->persistent)
4912 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
4914 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
4916 rdev->sectors = rdev->sb_start;
4918 if (test_bit(Faulty, &rdev->flags)) {
4920 "md: can not hot-add faulty %s disk to %s!\n",
4921 bdevname(rdev->bdev,b), mdname(mddev));
4925 clear_bit(In_sync, &rdev->flags);
4927 rdev->saved_raid_disk = -1;
4928 err = bind_rdev_to_array(rdev, mddev);
4933 * The rest should better be atomic, we can have disk failures
4934 * noticed in interrupt contexts ...
4937 rdev->raid_disk = -1;
4939 md_update_sb(mddev, 1);
4942 * Kick recovery, maybe this spare has to be added to the
4943 * array immediately.
4945 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4946 md_wakeup_thread(mddev->thread);
4947 md_new_event(mddev);
4955 static int set_bitmap_file(mddev_t *mddev, int fd)
4960 if (!mddev->pers->quiesce)
4962 if (mddev->recovery || mddev->sync_thread)
4964 /* we should be able to change the bitmap.. */
4970 return -EEXIST; /* cannot add when bitmap is present */
4971 mddev->bitmap_file = fget(fd);
4973 if (mddev->bitmap_file == NULL) {
4974 printk(KERN_ERR "%s: error: failed to get bitmap file\n",
4979 err = deny_bitmap_write_access(mddev->bitmap_file);
4981 printk(KERN_ERR "%s: error: bitmap file is already in use\n",
4983 fput(mddev->bitmap_file);
4984 mddev->bitmap_file = NULL;
4987 mddev->bitmap_offset = 0; /* file overrides offset */
4988 } else if (mddev->bitmap == NULL)
4989 return -ENOENT; /* cannot remove what isn't there */
4992 mddev->pers->quiesce(mddev, 1);
4994 err = bitmap_create(mddev);
4995 if (fd < 0 || err) {
4996 bitmap_destroy(mddev);
4997 fd = -1; /* make sure to put the file */
4999 mddev->pers->quiesce(mddev, 0);
5002 if (mddev->bitmap_file) {
5003 restore_bitmap_write_access(mddev->bitmap_file);
5004 fput(mddev->bitmap_file);
5006 mddev->bitmap_file = NULL;
5013 * set_array_info is used two different ways
5014 * The original usage is when creating a new array.
5015 * In this usage, raid_disks is > 0 and it together with
5016 * level, size, not_persistent,layout,chunksize determine the
5017 * shape of the array.
5018 * This will always create an array with a type-0.90.0 superblock.
5019 * The newer usage is when assembling an array.
5020 * In this case raid_disks will be 0, and the major_version field is
5021 * use to determine which style super-blocks are to be found on the devices.
5022 * The minor and patch _version numbers are also kept incase the
5023 * super_block handler wishes to interpret them.
5025 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
5028 if (info->raid_disks == 0) {
5029 /* just setting version number for superblock loading */
5030 if (info->major_version < 0 ||
5031 info->major_version >= ARRAY_SIZE(super_types) ||
5032 super_types[info->major_version].name == NULL) {
5033 /* maybe try to auto-load a module? */
5035 "md: superblock version %d not known\n",
5036 info->major_version);
5039 mddev->major_version = info->major_version;
5040 mddev->minor_version = info->minor_version;
5041 mddev->patch_version = info->patch_version;
5042 mddev->persistent = !info->not_persistent;
5045 mddev->major_version = MD_MAJOR_VERSION;
5046 mddev->minor_version = MD_MINOR_VERSION;
5047 mddev->patch_version = MD_PATCHLEVEL_VERSION;
5048 mddev->ctime = get_seconds();
5050 mddev->level = info->level;
5051 mddev->clevel[0] = 0;
5052 mddev->dev_sectors = 2 * (sector_t)info->size;
5053 mddev->raid_disks = info->raid_disks;
5054 /* don't set md_minor, it is determined by which /dev/md* was
5057 if (info->state & (1<<MD_SB_CLEAN))
5058 mddev->recovery_cp = MaxSector;
5060 mddev->recovery_cp = 0;
5061 mddev->persistent = ! info->not_persistent;
5062 mddev->external = 0;
5064 mddev->layout = info->layout;
5065 mddev->chunk_sectors = info->chunk_size >> 9;
5067 mddev->max_disks = MD_SB_DISKS;
5069 if (mddev->persistent)
5071 set_bit(MD_CHANGE_DEVS, &mddev->flags);
5073 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
5074 mddev->bitmap_offset = 0;
5076 mddev->reshape_position = MaxSector;
5079 * Generate a 128 bit UUID
5081 get_random_bytes(mddev->uuid, 16);
5083 mddev->new_level = mddev->level;
5084 mddev->new_chunk_sectors = mddev->chunk_sectors;
5085 mddev->new_layout = mddev->layout;
5086 mddev->delta_disks = 0;
5091 void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors)
5093 WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);
5095 if (mddev->external_size)
5098 mddev->array_sectors = array_sectors;
5100 EXPORT_SYMBOL(md_set_array_sectors);
5102 static int update_size(mddev_t *mddev, sector_t num_sectors)
5106 int fit = (num_sectors == 0);
5108 if (mddev->pers->resize == NULL)
5110 /* The "num_sectors" is the number of sectors of each device that
5111 * is used. This can only make sense for arrays with redundancy.
5112 * linear and raid0 always use whatever space is available. We can only
5113 * consider changing this number if no resync or reconstruction is
5114 * happening, and if the new size is acceptable. It must fit before the
5115 * sb_start or, if that is <data_offset, it must fit before the size
5116 * of each device. If num_sectors is zero, we find the largest size
5120 if (mddev->sync_thread)
5123 /* Sorry, cannot grow a bitmap yet, just remove it,
5127 list_for_each_entry(rdev, &mddev->disks, same_set) {
5128 sector_t avail = rdev->sectors;
5130 if (fit && (num_sectors == 0 || num_sectors > avail))
5131 num_sectors = avail;
5132 if (avail < num_sectors)
5135 rv = mddev->pers->resize(mddev, num_sectors);
5137 struct block_device *bdev;
5139 bdev = bdget_disk(mddev->gendisk, 0);
5141 mutex_lock(&bdev->bd_inode->i_mutex);
5142 i_size_write(bdev->bd_inode,
5143 (loff_t)mddev->array_sectors << 9);
5144 mutex_unlock(&bdev->bd_inode->i_mutex);
5151 static int update_raid_disks(mddev_t *mddev, int raid_disks)
5154 /* change the number of raid disks */
5155 if (mddev->pers->check_reshape == NULL)
5157 if (raid_disks <= 0 ||
5158 raid_disks >= mddev->max_disks)
5160 if (mddev->sync_thread || mddev->reshape_position != MaxSector)
5162 mddev->delta_disks = raid_disks - mddev->raid_disks;
5164 rv = mddev->pers->check_reshape(mddev);
5170 * update_array_info is used to change the configuration of an
5172 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
5173 * fields in the info are checked against the array.
5174 * Any differences that cannot be handled will cause an error.
5175 * Normally, only one change can be managed at a time.
5177 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
5183 /* calculate expected state,ignoring low bits */
5184 if (mddev->bitmap && mddev->bitmap_offset)
5185 state |= (1 << MD_SB_BITMAP_PRESENT);
5187 if (mddev->major_version != info->major_version ||
5188 mddev->minor_version != info->minor_version ||
5189 /* mddev->patch_version != info->patch_version || */
5190 mddev->ctime != info->ctime ||
5191 mddev->level != info->level ||
5192 /* mddev->layout != info->layout || */
5193 !mddev->persistent != info->not_persistent||
5194 mddev->chunk_sectors != info->chunk_size >> 9 ||
5195 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
5196 ((state^info->state) & 0xfffffe00)
5199 /* Check there is only one change */
5200 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
5202 if (mddev->raid_disks != info->raid_disks)
5204 if (mddev->layout != info->layout)
5206 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
5213 if (mddev->layout != info->layout) {
5215 * we don't need to do anything at the md level, the
5216 * personality will take care of it all.
5218 if (mddev->pers->check_reshape == NULL)
5221 mddev->new_layout = info->layout;
5222 rv = mddev->pers->check_reshape(mddev);
5224 mddev->new_layout = mddev->layout;
5228 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
5229 rv = update_size(mddev, (sector_t)info->size * 2);
5231 if (mddev->raid_disks != info->raid_disks)
5232 rv = update_raid_disks(mddev, info->raid_disks);
5234 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
5235 if (mddev->pers->quiesce == NULL)
5237 if (mddev->recovery || mddev->sync_thread)
5239 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
5240 /* add the bitmap */
5243 if (mddev->default_bitmap_offset == 0)
5245 mddev->bitmap_offset = mddev->default_bitmap_offset;
5246 mddev->pers->quiesce(mddev, 1);
5247 rv = bitmap_create(mddev);
5249 bitmap_destroy(mddev);
5250 mddev->pers->quiesce(mddev, 0);
5252 /* remove the bitmap */
5255 if (mddev->bitmap->file)
5257 mddev->pers->quiesce(mddev, 1);
5258 bitmap_destroy(mddev);
5259 mddev->pers->quiesce(mddev, 0);
5260 mddev->bitmap_offset = 0;
5263 md_update_sb(mddev, 1);
5267 static int set_disk_faulty(mddev_t *mddev, dev_t dev)
5271 if (mddev->pers == NULL)
5274 rdev = find_rdev(mddev, dev);
5278 md_error(mddev, rdev);
5283 * We have a problem here : there is no easy way to give a CHS
5284 * virtual geometry. We currently pretend that we have a 2 heads
5285 * 4 sectors (with a BIG number of cylinders...). This drives
5286 * dosfs just mad... ;-)
5288 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
5290 mddev_t *mddev = bdev->bd_disk->private_data;
5294 geo->cylinders = get_capacity(mddev->gendisk) / 8;
5298 static int md_ioctl(struct block_device *bdev, fmode_t mode,
5299 unsigned int cmd, unsigned long arg)
5302 void __user *argp = (void __user *)arg;
5303 mddev_t *mddev = NULL;
5305 if (!capable(CAP_SYS_ADMIN))
5309 * Commands dealing with the RAID driver but not any
5315 err = get_version(argp);
5318 case PRINT_RAID_DEBUG:
5326 autostart_arrays(arg);
5333 * Commands creating/starting a new array:
5336 mddev = bdev->bd_disk->private_data;
5343 err = mddev_lock(mddev);
5346 "md: ioctl lock interrupted, reason %d, cmd %d\n",
5353 case SET_ARRAY_INFO:
5355 mdu_array_info_t info;
5357 memset(&info, 0, sizeof(info));
5358 else if (copy_from_user(&info, argp, sizeof(info))) {
5363 err = update_array_info(mddev, &info);
5365 printk(KERN_WARNING "md: couldn't update"
5366 " array info. %d\n", err);
5371 if (!list_empty(&mddev->disks)) {
5373 "md: array %s already has disks!\n",
5378 if (mddev->raid_disks) {
5380 "md: array %s already initialised!\n",
5385 err = set_array_info(mddev, &info);
5387 printk(KERN_WARNING "md: couldn't set"
5388 " array info. %d\n", err);
5398 * Commands querying/configuring an existing array:
5400 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
5401 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
5402 if ((!mddev->raid_disks && !mddev->external)
5403 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
5404 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
5405 && cmd != GET_BITMAP_FILE) {
5411 * Commands even a read-only array can execute:
5415 case GET_ARRAY_INFO:
5416 err = get_array_info(mddev, argp);
5419 case GET_BITMAP_FILE:
5420 err = get_bitmap_file(mddev, argp);
5424 err = get_disk_info(mddev, argp);
5427 case RESTART_ARRAY_RW:
5428 err = restart_array(mddev);
5432 err = do_md_stop(mddev, 0, 1);
5436 err = do_md_stop(mddev, 1, 1);
5442 * The remaining ioctls are changing the state of the
5443 * superblock, so we do not allow them on read-only arrays.
5444 * However non-MD ioctls (e.g. get-size) will still come through
5445 * here and hit the 'default' below, so only disallow
5446 * 'md' ioctls, and switch to rw mode if started auto-readonly.
5448 if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) {
5449 if (mddev->ro == 2) {
5451 sysfs_notify_dirent(mddev->sysfs_state);
5452 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5453 md_wakeup_thread(mddev->thread);
5464 mdu_disk_info_t info;
5465 if (copy_from_user(&info, argp, sizeof(info)))
5468 err = add_new_disk(mddev, &info);
5472 case HOT_REMOVE_DISK:
5473 err = hot_remove_disk(mddev, new_decode_dev(arg));
5477 err = hot_add_disk(mddev, new_decode_dev(arg));
5480 case SET_DISK_FAULTY:
5481 err = set_disk_faulty(mddev, new_decode_dev(arg));
5485 err = do_md_run(mddev);
5488 case SET_BITMAP_FILE:
5489 err = set_bitmap_file(mddev, (int)arg);
5499 if (mddev->hold_active == UNTIL_IOCTL &&
5501 mddev->hold_active = 0;
5502 mddev_unlock(mddev);
5512 static int md_open(struct block_device *bdev, fmode_t mode)
5515 * Succeed if we can lock the mddev, which confirms that
5516 * it isn't being stopped right now.
5518 mddev_t *mddev = mddev_find(bdev->bd_dev);
5521 if (mddev->gendisk != bdev->bd_disk) {
5522 /* we are racing with mddev_put which is discarding this
5526 /* Wait until bdev->bd_disk is definitely gone */
5527 flush_scheduled_work();
5528 /* Then retry the open from the top */
5529 return -ERESTARTSYS;
5531 BUG_ON(mddev != bdev->bd_disk->private_data);
5533 if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1)))
5537 atomic_inc(&mddev->openers);
5538 mddev_unlock(mddev);
5540 check_disk_change(bdev);
5545 static int md_release(struct gendisk *disk, fmode_t mode)
5547 mddev_t *mddev = disk->private_data;
5550 atomic_dec(&mddev->openers);
5556 static int md_media_changed(struct gendisk *disk)
5558 mddev_t *mddev = disk->private_data;
5560 return mddev->changed;
5563 static int md_revalidate(struct gendisk *disk)
5565 mddev_t *mddev = disk->private_data;
5570 static struct block_device_operations md_fops =
5572 .owner = THIS_MODULE,
5574 .release = md_release,
5576 .getgeo = md_getgeo,
5577 .media_changed = md_media_changed,
5578 .revalidate_disk= md_revalidate,
5581 static int md_thread(void * arg)
5583 mdk_thread_t *thread = arg;
5586 * md_thread is a 'system-thread', it's priority should be very
5587 * high. We avoid resource deadlocks individually in each
5588 * raid personality. (RAID5 does preallocation) We also use RR and
5589 * the very same RT priority as kswapd, thus we will never get
5590 * into a priority inversion deadlock.
5592 * we definitely have to have equal or higher priority than
5593 * bdflush, otherwise bdflush will deadlock if there are too
5594 * many dirty RAID5 blocks.
5597 allow_signal(SIGKILL);
5598 while (!kthread_should_stop()) {
5600 /* We need to wait INTERRUPTIBLE so that
5601 * we don't add to the load-average.
5602 * That means we need to be sure no signals are
5605 if (signal_pending(current))
5606 flush_signals(current);
5608 wait_event_interruptible_timeout
5610 test_bit(THREAD_WAKEUP, &thread->flags)
5611 || kthread_should_stop(),
5614 clear_bit(THREAD_WAKEUP, &thread->flags);
5616 thread->run(thread->mddev);
5622 void md_wakeup_thread(mdk_thread_t *thread)
5625 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
5626 set_bit(THREAD_WAKEUP, &thread->flags);
5627 wake_up(&thread->wqueue);
5631 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
5634 mdk_thread_t *thread;
5636 thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL);
5640 init_waitqueue_head(&thread->wqueue);
5643 thread->mddev = mddev;
5644 thread->timeout = MAX_SCHEDULE_TIMEOUT;
5645 thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev));
5646 if (IS_ERR(thread->tsk)) {
5653 void md_unregister_thread(mdk_thread_t *thread)
5657 dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
5659 kthread_stop(thread->tsk);
5663 void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
5670 if (!rdev || test_bit(Faulty, &rdev->flags))
5673 if (mddev->external)
5674 set_bit(Blocked, &rdev->flags);
5676 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
5678 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
5679 __builtin_return_address(0),__builtin_return_address(1),
5680 __builtin_return_address(2),__builtin_return_address(3));
5684 if (!mddev->pers->error_handler)
5686 mddev->pers->error_handler(mddev,rdev);
5687 if (mddev->degraded)
5688 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5689 set_bit(StateChanged, &rdev->flags);
5690 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5691 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5692 md_wakeup_thread(mddev->thread);
5693 md_new_event_inintr(mddev);
5696 /* seq_file implementation /proc/mdstat */
5698 static void status_unused(struct seq_file *seq)
5703 seq_printf(seq, "unused devices: ");
5705 list_for_each_entry(rdev, &pending_raid_disks, same_set) {
5706 char b[BDEVNAME_SIZE];
5708 seq_printf(seq, "%s ",
5709 bdevname(rdev->bdev,b));
5712 seq_printf(seq, "<none>");
5714 seq_printf(seq, "\n");
5718 static void status_resync(struct seq_file *seq, mddev_t * mddev)
5720 sector_t max_sectors, resync, res;
5721 unsigned long dt, db;
5724 unsigned int per_milli;
5726 resync = mddev->curr_resync - atomic_read(&mddev->recovery_active);
5728 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
5729 max_sectors = mddev->resync_max_sectors;
5731 max_sectors = mddev->dev_sectors;
5734 * Should not happen.
5740 /* Pick 'scale' such that (resync>>scale)*1000 will fit
5741 * in a sector_t, and (max_sectors>>scale) will fit in a
5742 * u32, as those are the requirements for sector_div.
5743 * Thus 'scale' must be at least 10
5746 if (sizeof(sector_t) > sizeof(unsigned long)) {
5747 while ( max_sectors/2 > (1ULL<<(scale+32)))
5750 res = (resync>>scale)*1000;
5751 sector_div(res, (u32)((max_sectors>>scale)+1));
5755 int i, x = per_milli/50, y = 20-x;
5756 seq_printf(seq, "[");
5757 for (i = 0; i < x; i++)
5758 seq_printf(seq, "=");
5759 seq_printf(seq, ">");
5760 for (i = 0; i < y; i++)
5761 seq_printf(seq, ".");
5762 seq_printf(seq, "] ");
5764 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
5765 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
5767 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
5769 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
5770 "resync" : "recovery"))),
5771 per_milli/10, per_milli % 10,
5772 (unsigned long long) resync/2,
5773 (unsigned long long) max_sectors/2);
5776 * dt: time from mark until now
5777 * db: blocks written from mark until now
5778 * rt: remaining time
5780 * rt is a sector_t, so could be 32bit or 64bit.
5781 * So we divide before multiply in case it is 32bit and close
5783 * We scale the divisor (db) by 32 to avoid loosing precision
5784 * near the end of resync when the number of remaining sectors
5786 * We then divide rt by 32 after multiplying by db to compensate.
5787 * The '+1' avoids division by zero if db is very small.
5789 dt = ((jiffies - mddev->resync_mark) / HZ);
5791 db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
5792 - mddev->resync_mark_cnt;
5794 rt = max_sectors - resync; /* number of remaining sectors */
5795 sector_div(rt, db/32+1);
5799 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
5800 ((unsigned long)rt % 60)/6);
5802 seq_printf(seq, " speed=%ldK/sec", db/2/dt);
5805 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
5807 struct list_head *tmp;
5817 spin_lock(&all_mddevs_lock);
5818 list_for_each(tmp,&all_mddevs)
5820 mddev = list_entry(tmp, mddev_t, all_mddevs);
5822 spin_unlock(&all_mddevs_lock);
5825 spin_unlock(&all_mddevs_lock);
5827 return (void*)2;/* tail */
5831 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
5833 struct list_head *tmp;
5834 mddev_t *next_mddev, *mddev = v;
5840 spin_lock(&all_mddevs_lock);
5842 tmp = all_mddevs.next;
5844 tmp = mddev->all_mddevs.next;
5845 if (tmp != &all_mddevs)
5846 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
5848 next_mddev = (void*)2;
5851 spin_unlock(&all_mddevs_lock);
5859 static void md_seq_stop(struct seq_file *seq, void *v)
5863 if (mddev && v != (void*)1 && v != (void*)2)
5867 struct mdstat_info {
5871 static int md_seq_show(struct seq_file *seq, void *v)
5876 struct mdstat_info *mi = seq->private;
5877 struct bitmap *bitmap;
5879 if (v == (void*)1) {
5880 struct mdk_personality *pers;
5881 seq_printf(seq, "Personalities : ");
5882 spin_lock(&pers_lock);
5883 list_for_each_entry(pers, &pers_list, list)
5884 seq_printf(seq, "[%s] ", pers->name);
5886 spin_unlock(&pers_lock);
5887 seq_printf(seq, "\n");
5888 mi->event = atomic_read(&md_event_count);
5891 if (v == (void*)2) {
5896 if (mddev_lock(mddev) < 0)
5899 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
5900 seq_printf(seq, "%s : %sactive", mdname(mddev),
5901 mddev->pers ? "" : "in");
5904 seq_printf(seq, " (read-only)");
5906 seq_printf(seq, " (auto-read-only)");
5907 seq_printf(seq, " %s", mddev->pers->name);
5911 list_for_each_entry(rdev, &mddev->disks, same_set) {
5912 char b[BDEVNAME_SIZE];
5913 seq_printf(seq, " %s[%d]",
5914 bdevname(rdev->bdev,b), rdev->desc_nr);
5915 if (test_bit(WriteMostly, &rdev->flags))
5916 seq_printf(seq, "(W)");
5917 if (test_bit(Faulty, &rdev->flags)) {
5918 seq_printf(seq, "(F)");
5920 } else if (rdev->raid_disk < 0)
5921 seq_printf(seq, "(S)"); /* spare */
5922 sectors += rdev->sectors;
5925 if (!list_empty(&mddev->disks)) {
5927 seq_printf(seq, "\n %llu blocks",
5928 (unsigned long long)
5929 mddev->array_sectors / 2);
5931 seq_printf(seq, "\n %llu blocks",
5932 (unsigned long long)sectors / 2);
5934 if (mddev->persistent) {
5935 if (mddev->major_version != 0 ||
5936 mddev->minor_version != 90) {
5937 seq_printf(seq," super %d.%d",
5938 mddev->major_version,
5939 mddev->minor_version);
5941 } else if (mddev->external)
5942 seq_printf(seq, " super external:%s",
5943 mddev->metadata_type);
5945 seq_printf(seq, " super non-persistent");
5948 mddev->pers->status(seq, mddev);
5949 seq_printf(seq, "\n ");
5950 if (mddev->pers->sync_request) {
5951 if (mddev->curr_resync > 2) {
5952 status_resync(seq, mddev);
5953 seq_printf(seq, "\n ");
5954 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
5955 seq_printf(seq, "\tresync=DELAYED\n ");
5956 else if (mddev->recovery_cp < MaxSector)
5957 seq_printf(seq, "\tresync=PENDING\n ");
5960 seq_printf(seq, "\n ");
5962 if ((bitmap = mddev->bitmap)) {
5963 unsigned long chunk_kb;
5964 unsigned long flags;
5965 spin_lock_irqsave(&bitmap->lock, flags);
5966 chunk_kb = bitmap->chunksize >> 10;
5967 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
5969 bitmap->pages - bitmap->missing_pages,
5971 (bitmap->pages - bitmap->missing_pages)
5972 << (PAGE_SHIFT - 10),
5973 chunk_kb ? chunk_kb : bitmap->chunksize,
5974 chunk_kb ? "KB" : "B");
5976 seq_printf(seq, ", file: ");
5977 seq_path(seq, &bitmap->file->f_path, " \t\n");
5980 seq_printf(seq, "\n");
5981 spin_unlock_irqrestore(&bitmap->lock, flags);
5984 seq_printf(seq, "\n");
5986 mddev_unlock(mddev);
5991 static const struct seq_operations md_seq_ops = {
5992 .start = md_seq_start,
5993 .next = md_seq_next,
5994 .stop = md_seq_stop,
5995 .show = md_seq_show,
5998 static int md_seq_open(struct inode *inode, struct file *file)
6001 struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
6005 error = seq_open(file, &md_seq_ops);
6009 struct seq_file *p = file->private_data;
6011 mi->event = atomic_read(&md_event_count);
6016 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
6018 struct seq_file *m = filp->private_data;
6019 struct mdstat_info *mi = m->private;
6022 poll_wait(filp, &md_event_waiters, wait);
6024 /* always allow read */
6025 mask = POLLIN | POLLRDNORM;
6027 if (mi->event != atomic_read(&md_event_count))
6028 mask |= POLLERR | POLLPRI;
6032 static const struct file_operations md_seq_fops = {
6033 .owner = THIS_MODULE,
6034 .open = md_seq_open,
6036 .llseek = seq_lseek,
6037 .release = seq_release_private,
6038 .poll = mdstat_poll,
6041 int register_md_personality(struct mdk_personality *p)
6043 spin_lock(&pers_lock);
6044 list_add_tail(&p->list, &pers_list);
6045 printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
6046 spin_unlock(&pers_lock);
6050 int unregister_md_personality(struct mdk_personality *p)
6052 printk(KERN_INFO "md: %s personality unregistered\n", p->name);
6053 spin_lock(&pers_lock);
6054 list_del_init(&p->list);
6055 spin_unlock(&pers_lock);
6059 static int is_mddev_idle(mddev_t *mddev, int init)
6067 rdev_for_each_rcu(rdev, mddev) {
6068 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
6069 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
6070 (int)part_stat_read(&disk->part0, sectors[1]) -
6071 atomic_read(&disk->sync_io);
6072 /* sync IO will cause sync_io to increase before the disk_stats
6073 * as sync_io is counted when a request starts, and
6074 * disk_stats is counted when it completes.
6075 * So resync activity will cause curr_events to be smaller than
6076 * when there was no such activity.
6077 * non-sync IO will cause disk_stat to increase without
6078 * increasing sync_io so curr_events will (eventually)
6079 * be larger than it was before. Once it becomes
6080 * substantially larger, the test below will cause
6081 * the array to appear non-idle, and resync will slow
6083 * If there is a lot of outstanding resync activity when
6084 * we set last_event to curr_events, then all that activity
6085 * completing might cause the array to appear non-idle
6086 * and resync will be slowed down even though there might
6087 * not have been non-resync activity. This will only
6088 * happen once though. 'last_events' will soon reflect
6089 * the state where there is little or no outstanding
6090 * resync requests, and further resync activity will
6091 * always make curr_events less than last_events.
6094 if (init || curr_events - rdev->last_events > 64) {
6095 rdev->last_events = curr_events;
6103 void md_done_sync(mddev_t *mddev, int blocks, int ok)
6105 /* another "blocks" (512byte) blocks have been synced */
6106 atomic_sub(blocks, &mddev->recovery_active);
6107 wake_up(&mddev->recovery_wait);
6109 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6110 md_wakeup_thread(mddev->thread);
6111 // stop recovery, signal do_sync ....
6116 /* md_write_start(mddev, bi)
6117 * If we need to update some array metadata (e.g. 'active' flag
6118 * in superblock) before writing, schedule a superblock update
6119 * and wait for it to complete.
6121 void md_write_start(mddev_t *mddev, struct bio *bi)
6124 if (bio_data_dir(bi) != WRITE)
6127 BUG_ON(mddev->ro == 1);
6128 if (mddev->ro == 2) {
6129 /* need to switch to read/write */
6131 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6132 md_wakeup_thread(mddev->thread);
6133 md_wakeup_thread(mddev->sync_thread);
6136 atomic_inc(&mddev->writes_pending);
6137 if (mddev->safemode == 1)
6138 mddev->safemode = 0;
6139 if (mddev->in_sync) {
6140 spin_lock_irq(&mddev->write_lock);
6141 if (mddev->in_sync) {
6143 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6144 md_wakeup_thread(mddev->thread);
6147 spin_unlock_irq(&mddev->write_lock);
6150 sysfs_notify_dirent(mddev->sysfs_state);
6151 wait_event(mddev->sb_wait,
6152 !test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
6153 !test_bit(MD_CHANGE_PENDING, &mddev->flags));
6156 void md_write_end(mddev_t *mddev)
6158 if (atomic_dec_and_test(&mddev->writes_pending)) {
6159 if (mddev->safemode == 2)
6160 md_wakeup_thread(mddev->thread);
6161 else if (mddev->safemode_delay)
6162 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
6166 /* md_allow_write(mddev)
6167 * Calling this ensures that the array is marked 'active' so that writes
6168 * may proceed without blocking. It is important to call this before
6169 * attempting a GFP_KERNEL allocation while holding the mddev lock.
6170 * Must be called with mddev_lock held.
6172 * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
6173 * is dropped, so return -EAGAIN after notifying userspace.
6175 int md_allow_write(mddev_t *mddev)
6181 if (!mddev->pers->sync_request)
6184 spin_lock_irq(&mddev->write_lock);
6185 if (mddev->in_sync) {
6187 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6188 if (mddev->safemode_delay &&
6189 mddev->safemode == 0)
6190 mddev->safemode = 1;
6191 spin_unlock_irq(&mddev->write_lock);
6192 md_update_sb(mddev, 0);
6193 sysfs_notify_dirent(mddev->sysfs_state);
6195 spin_unlock_irq(&mddev->write_lock);
6197 if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
6202 EXPORT_SYMBOL_GPL(md_allow_write);
6204 #define SYNC_MARKS 10
6205 #define SYNC_MARK_STEP (3*HZ)
6206 void md_do_sync(mddev_t *mddev)
6209 unsigned int currspeed = 0,
6211 sector_t max_sectors,j, io_sectors;
6212 unsigned long mark[SYNC_MARKS];
6213 sector_t mark_cnt[SYNC_MARKS];
6215 struct list_head *tmp;
6216 sector_t last_check;
6221 /* just incase thread restarts... */
6222 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
6224 if (mddev->ro) /* never try to sync a read-only array */
6227 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
6228 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
6229 desc = "data-check";
6230 else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6231 desc = "requested-resync";
6234 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
6239 /* we overload curr_resync somewhat here.
6240 * 0 == not engaged in resync at all
6241 * 2 == checking that there is no conflict with another sync
6242 * 1 == like 2, but have yielded to allow conflicting resync to
6244 * other == active in resync - this many blocks
6246 * Before starting a resync we must have set curr_resync to
6247 * 2, and then checked that every "conflicting" array has curr_resync
6248 * less than ours. When we find one that is the same or higher
6249 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
6250 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
6251 * This will mean we have to start checking from the beginning again.
6256 mddev->curr_resync = 2;
6259 if (kthread_should_stop()) {
6260 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6263 for_each_mddev(mddev2, tmp) {
6264 if (mddev2 == mddev)
6266 if (!mddev->parallel_resync
6267 && mddev2->curr_resync
6268 && match_mddev_units(mddev, mddev2)) {
6270 if (mddev < mddev2 && mddev->curr_resync == 2) {
6271 /* arbitrarily yield */
6272 mddev->curr_resync = 1;
6273 wake_up(&resync_wait);
6275 if (mddev > mddev2 && mddev->curr_resync == 1)
6276 /* no need to wait here, we can wait the next
6277 * time 'round when curr_resync == 2
6280 /* We need to wait 'interruptible' so as not to
6281 * contribute to the load average, and not to
6282 * be caught by 'softlockup'
6284 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
6285 if (!kthread_should_stop() &&
6286 mddev2->curr_resync >= mddev->curr_resync) {
6287 printk(KERN_INFO "md: delaying %s of %s"
6288 " until %s has finished (they"
6289 " share one or more physical units)\n",
6290 desc, mdname(mddev), mdname(mddev2));
6292 if (signal_pending(current))
6293 flush_signals(current);
6295 finish_wait(&resync_wait, &wq);
6298 finish_wait(&resync_wait, &wq);
6301 } while (mddev->curr_resync < 2);
6304 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
6305 /* resync follows the size requested by the personality,
6306 * which defaults to physical size, but can be virtual size
6308 max_sectors = mddev->resync_max_sectors;
6309 mddev->resync_mismatches = 0;
6310 /* we don't use the checkpoint if there's a bitmap */
6311 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6312 j = mddev->resync_min;
6313 else if (!mddev->bitmap)
6314 j = mddev->recovery_cp;
6316 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
6317 max_sectors = mddev->dev_sectors;
6319 /* recovery follows the physical size of devices */
6320 max_sectors = mddev->dev_sectors;
6322 list_for_each_entry(rdev, &mddev->disks, same_set)
6323 if (rdev->raid_disk >= 0 &&
6324 !test_bit(Faulty, &rdev->flags) &&
6325 !test_bit(In_sync, &rdev->flags) &&
6326 rdev->recovery_offset < j)
6327 j = rdev->recovery_offset;
6330 printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
6331 printk(KERN_INFO "md: minimum _guaranteed_ speed:"
6332 " %d KB/sec/disk.\n", speed_min(mddev));
6333 printk(KERN_INFO "md: using maximum available idle IO bandwidth "
6334 "(but not more than %d KB/sec) for %s.\n",
6335 speed_max(mddev), desc);
6337 is_mddev_idle(mddev, 1); /* this initializes IO event counters */
6340 for (m = 0; m < SYNC_MARKS; m++) {
6342 mark_cnt[m] = io_sectors;
6345 mddev->resync_mark = mark[last_mark];
6346 mddev->resync_mark_cnt = mark_cnt[last_mark];
6349 * Tune reconstruction:
6351 window = 32*(PAGE_SIZE/512);
6352 printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
6353 window/2,(unsigned long long) max_sectors/2);
6355 atomic_set(&mddev->recovery_active, 0);
6360 "md: resuming %s of %s from checkpoint.\n",
6361 desc, mdname(mddev));
6362 mddev->curr_resync = j;
6365 while (j < max_sectors) {
6370 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
6371 ((mddev->curr_resync > mddev->curr_resync_completed &&
6372 (mddev->curr_resync - mddev->curr_resync_completed)
6373 > (max_sectors >> 4)) ||
6374 (j - mddev->curr_resync_completed)*2
6375 >= mddev->resync_max - mddev->curr_resync_completed
6377 /* time to update curr_resync_completed */
6378 blk_unplug(mddev->queue);
6379 wait_event(mddev->recovery_wait,
6380 atomic_read(&mddev->recovery_active) == 0);
6381 mddev->curr_resync_completed =
6383 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6384 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6387 while (j >= mddev->resync_max && !kthread_should_stop()) {
6388 /* As this condition is controlled by user-space,
6389 * we can block indefinitely, so use '_interruptible'
6390 * to avoid triggering warnings.
6392 flush_signals(current); /* just in case */
6393 wait_event_interruptible(mddev->recovery_wait,
6394 mddev->resync_max > j
6395 || kthread_should_stop());
6398 if (kthread_should_stop())
6401 sectors = mddev->pers->sync_request(mddev, j, &skipped,
6402 currspeed < speed_min(mddev));
6404 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6408 if (!skipped) { /* actual IO requested */
6409 io_sectors += sectors;
6410 atomic_add(sectors, &mddev->recovery_active);
6414 if (j>1) mddev->curr_resync = j;
6415 mddev->curr_mark_cnt = io_sectors;
6416 if (last_check == 0)
6417 /* this is the earliers that rebuilt will be
6418 * visible in /proc/mdstat
6420 md_new_event(mddev);
6422 if (last_check + window > io_sectors || j == max_sectors)
6425 last_check = io_sectors;
6427 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6431 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
6433 int next = (last_mark+1) % SYNC_MARKS;
6435 mddev->resync_mark = mark[next];
6436 mddev->resync_mark_cnt = mark_cnt[next];
6437 mark[next] = jiffies;
6438 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
6443 if (kthread_should_stop())
6448 * this loop exits only if either when we are slower than
6449 * the 'hard' speed limit, or the system was IO-idle for
6451 * the system might be non-idle CPU-wise, but we only care
6452 * about not overloading the IO subsystem. (things like an
6453 * e2fsck being done on the RAID array should execute fast)
6455 blk_unplug(mddev->queue);
6458 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
6459 /((jiffies-mddev->resync_mark)/HZ +1) +1;
6461 if (currspeed > speed_min(mddev)) {
6462 if ((currspeed > speed_max(mddev)) ||
6463 !is_mddev_idle(mddev, 0)) {
6469 printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc);
6471 * this also signals 'finished resyncing' to md_stop
6474 blk_unplug(mddev->queue);
6476 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
6478 /* tell personality that we are finished */
6479 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
6481 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
6482 mddev->curr_resync > 2) {
6483 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
6484 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
6485 if (mddev->curr_resync >= mddev->recovery_cp) {
6487 "md: checkpointing %s of %s.\n",
6488 desc, mdname(mddev));
6489 mddev->recovery_cp = mddev->curr_resync;
6492 mddev->recovery_cp = MaxSector;
6494 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6495 mddev->curr_resync = MaxSector;
6496 list_for_each_entry(rdev, &mddev->disks, same_set)
6497 if (rdev->raid_disk >= 0 &&
6498 !test_bit(Faulty, &rdev->flags) &&
6499 !test_bit(In_sync, &rdev->flags) &&
6500 rdev->recovery_offset < mddev->curr_resync)
6501 rdev->recovery_offset = mddev->curr_resync;
6504 set_bit(MD_CHANGE_DEVS, &mddev->flags);
6507 mddev->curr_resync = 0;
6508 mddev->curr_resync_completed = 0;
6509 mddev->resync_min = 0;
6510 mddev->resync_max = MaxSector;
6511 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6512 wake_up(&resync_wait);
6513 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
6514 md_wakeup_thread(mddev->thread);
6519 * got a signal, exit.
6522 "md: md_do_sync() got signal ... exiting\n");
6523 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6527 EXPORT_SYMBOL_GPL(md_do_sync);
6530 static int remove_and_add_spares(mddev_t *mddev)
6535 mddev->curr_resync_completed = 0;
6537 list_for_each_entry(rdev, &mddev->disks, same_set)
6538 if (rdev->raid_disk >= 0 &&
6539 !test_bit(Blocked, &rdev->flags) &&
6540 (test_bit(Faulty, &rdev->flags) ||
6541 ! test_bit(In_sync, &rdev->flags)) &&
6542 atomic_read(&rdev->nr_pending)==0) {
6543 if (mddev->pers->hot_remove_disk(
6544 mddev, rdev->raid_disk)==0) {
6546 sprintf(nm,"rd%d", rdev->raid_disk);
6547 sysfs_remove_link(&mddev->kobj, nm);
6548 rdev->raid_disk = -1;
6552 if (mddev->degraded && ! mddev->ro && !mddev->recovery_disabled) {
6553 list_for_each_entry(rdev, &mddev->disks, same_set) {
6554 if (rdev->raid_disk >= 0 &&
6555 !test_bit(In_sync, &rdev->flags) &&
6556 !test_bit(Blocked, &rdev->flags))
6558 if (rdev->raid_disk < 0
6559 && !test_bit(Faulty, &rdev->flags)) {
6560 rdev->recovery_offset = 0;
6562 hot_add_disk(mddev, rdev) == 0) {
6564 sprintf(nm, "rd%d", rdev->raid_disk);
6565 if (sysfs_create_link(&mddev->kobj,
6568 "md: cannot register "
6572 md_new_event(mddev);
6581 * This routine is regularly called by all per-raid-array threads to
6582 * deal with generic issues like resync and super-block update.
6583 * Raid personalities that don't have a thread (linear/raid0) do not
6584 * need this as they never do any recovery or update the superblock.
6586 * It does not do any resync itself, but rather "forks" off other threads
6587 * to do that as needed.
6588 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
6589 * "->recovery" and create a thread at ->sync_thread.
6590 * When the thread finishes it sets MD_RECOVERY_DONE
6591 * and wakeups up this thread which will reap the thread and finish up.
6592 * This thread also removes any faulty devices (with nr_pending == 0).
6594 * The overall approach is:
6595 * 1/ if the superblock needs updating, update it.
6596 * 2/ If a recovery thread is running, don't do anything else.
6597 * 3/ If recovery has finished, clean up, possibly marking spares active.
6598 * 4/ If there are any faulty devices, remove them.
6599 * 5/ If array is degraded, try to add spares devices
6600 * 6/ If array has spares or is not in-sync, start a resync thread.
6602 void md_check_recovery(mddev_t *mddev)
6608 bitmap_daemon_work(mddev->bitmap);
6613 if (signal_pending(current)) {
6614 if (mddev->pers->sync_request && !mddev->external) {
6615 printk(KERN_INFO "md: %s in immediate safe mode\n",
6617 mddev->safemode = 2;
6619 flush_signals(current);
6622 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
6625 (mddev->flags && !mddev->external) ||
6626 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
6627 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
6628 (mddev->external == 0 && mddev->safemode == 1) ||
6629 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
6630 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
6634 if (mddev_trylock(mddev)) {
6638 /* Only thing we do on a ro array is remove
6641 remove_and_add_spares(mddev);
6642 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6646 if (!mddev->external) {
6648 spin_lock_irq(&mddev->write_lock);
6649 if (mddev->safemode &&
6650 !atomic_read(&mddev->writes_pending) &&
6652 mddev->recovery_cp == MaxSector) {
6655 if (mddev->persistent)
6656 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6658 if (mddev->safemode == 1)
6659 mddev->safemode = 0;
6660 spin_unlock_irq(&mddev->write_lock);
6662 sysfs_notify_dirent(mddev->sysfs_state);
6666 md_update_sb(mddev, 0);
6668 list_for_each_entry(rdev, &mddev->disks, same_set)
6669 if (test_and_clear_bit(StateChanged, &rdev->flags))
6670 sysfs_notify_dirent(rdev->sysfs_state);
6673 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
6674 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
6675 /* resync/recovery still happening */
6676 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6679 if (mddev->sync_thread) {
6680 /* resync has finished, collect result */
6681 md_unregister_thread(mddev->sync_thread);
6682 mddev->sync_thread = NULL;
6683 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
6684 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
6686 /* activate any spares */
6687 if (mddev->pers->spare_active(mddev))
6688 sysfs_notify(&mddev->kobj, NULL,
6691 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
6692 mddev->pers->finish_reshape)
6693 mddev->pers->finish_reshape(mddev);
6694 md_update_sb(mddev, 1);
6696 /* if array is no-longer degraded, then any saved_raid_disk
6697 * information must be scrapped
6699 if (!mddev->degraded)
6700 list_for_each_entry(rdev, &mddev->disks, same_set)
6701 rdev->saved_raid_disk = -1;
6703 mddev->recovery = 0;
6704 /* flag recovery needed just to double check */
6705 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6706 sysfs_notify_dirent(mddev->sysfs_action);
6707 md_new_event(mddev);
6710 /* Set RUNNING before clearing NEEDED to avoid
6711 * any transients in the value of "sync_action".
6713 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
6714 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6715 /* Clear some bits that don't mean anything, but
6718 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
6719 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
6721 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
6723 /* no recovery is running.
6724 * remove any failed drives, then
6725 * add spares if possible.
6726 * Spare are also removed and re-added, to allow
6727 * the personality to fail the re-add.
6730 if (mddev->reshape_position != MaxSector) {
6731 if (mddev->pers->check_reshape == NULL ||
6732 mddev->pers->check_reshape(mddev) != 0)
6733 /* Cannot proceed */
6735 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
6736 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6737 } else if ((spares = remove_and_add_spares(mddev))) {
6738 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
6739 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
6740 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
6741 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6742 } else if (mddev->recovery_cp < MaxSector) {
6743 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
6744 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6745 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
6746 /* nothing to be done ... */
6749 if (mddev->pers->sync_request) {
6750 if (spares && mddev->bitmap && ! mddev->bitmap->file) {
6751 /* We are adding a device or devices to an array
6752 * which has the bitmap stored on all devices.
6753 * So make sure all bitmap pages get written
6755 bitmap_write_all(mddev->bitmap);
6757 mddev->sync_thread = md_register_thread(md_do_sync,
6760 if (!mddev->sync_thread) {
6761 printk(KERN_ERR "%s: could not start resync"
6764 /* leave the spares where they are, it shouldn't hurt */
6765 mddev->recovery = 0;
6767 md_wakeup_thread(mddev->sync_thread);
6768 sysfs_notify_dirent(mddev->sysfs_action);
6769 md_new_event(mddev);
6772 if (!mddev->sync_thread) {
6773 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
6774 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
6776 if (mddev->sysfs_action)
6777 sysfs_notify_dirent(mddev->sysfs_action);
6779 mddev_unlock(mddev);
6783 void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
6785 sysfs_notify_dirent(rdev->sysfs_state);
6786 wait_event_timeout(rdev->blocked_wait,
6787 !test_bit(Blocked, &rdev->flags),
6788 msecs_to_jiffies(5000));
6789 rdev_dec_pending(rdev, mddev);
6791 EXPORT_SYMBOL(md_wait_for_blocked_rdev);
6793 static int md_notify_reboot(struct notifier_block *this,
6794 unsigned long code, void *x)
6796 struct list_head *tmp;
6799 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
6801 printk(KERN_INFO "md: stopping all md devices.\n");
6803 for_each_mddev(mddev, tmp)
6804 if (mddev_trylock(mddev)) {
6805 /* Force a switch to readonly even array
6806 * appears to still be in use. Hence
6809 do_md_stop(mddev, 1, 100);
6810 mddev_unlock(mddev);
6813 * certain more exotic SCSI devices are known to be
6814 * volatile wrt too early system reboots. While the
6815 * right place to handle this issue is the given
6816 * driver, we do want to have a safe RAID driver ...
6823 static struct notifier_block md_notifier = {
6824 .notifier_call = md_notify_reboot,
6826 .priority = INT_MAX, /* before any real devices */
6829 static void md_geninit(void)
6831 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
6833 proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
6836 static int __init md_init(void)
6838 if (register_blkdev(MD_MAJOR, "md"))
6840 if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
6841 unregister_blkdev(MD_MAJOR, "md");
6844 blk_register_region(MKDEV(MD_MAJOR, 0), 1UL<<MINORBITS, THIS_MODULE,
6845 md_probe, NULL, NULL);
6846 blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
6847 md_probe, NULL, NULL);
6849 register_reboot_notifier(&md_notifier);
6850 raid_table_header = register_sysctl_table(raid_root_table);
6860 * Searches all registered partitions for autorun RAID arrays
6864 static LIST_HEAD(all_detected_devices);
6865 struct detected_devices_node {
6866 struct list_head list;
6870 void md_autodetect_dev(dev_t dev)
6872 struct detected_devices_node *node_detected_dev;
6874 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
6875 if (node_detected_dev) {
6876 node_detected_dev->dev = dev;
6877 list_add_tail(&node_detected_dev->list, &all_detected_devices);
6879 printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
6880 ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
6885 static void autostart_arrays(int part)
6888 struct detected_devices_node *node_detected_dev;
6890 int i_scanned, i_passed;
6895 printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
6897 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
6899 node_detected_dev = list_entry(all_detected_devices.next,
6900 struct detected_devices_node, list);
6901 list_del(&node_detected_dev->list);
6902 dev = node_detected_dev->dev;
6903 kfree(node_detected_dev);
6904 rdev = md_import_device(dev,0, 90);
6908 if (test_bit(Faulty, &rdev->flags)) {
6912 set_bit(AutoDetected, &rdev->flags);
6913 list_add(&rdev->same_set, &pending_raid_disks);
6917 printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
6918 i_scanned, i_passed);
6920 autorun_devices(part);
6923 #endif /* !MODULE */
6925 static __exit void md_exit(void)
6928 struct list_head *tmp;
6930 blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS);
6931 blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
6933 unregister_blkdev(MD_MAJOR,"md");
6934 unregister_blkdev(mdp_major, "mdp");
6935 unregister_reboot_notifier(&md_notifier);
6936 unregister_sysctl_table(raid_table_header);
6937 remove_proc_entry("mdstat", NULL);
6938 for_each_mddev(mddev, tmp) {
6939 export_array(mddev);
6940 mddev->hold_active = 0;
6944 subsys_initcall(md_init);
6945 module_exit(md_exit)
6947 static int get_ro(char *buffer, struct kernel_param *kp)
6949 return sprintf(buffer, "%d", start_readonly);
6951 static int set_ro(const char *val, struct kernel_param *kp)
6954 int num = simple_strtoul(val, &e, 10);
6955 if (*val && (*e == '\0' || *e == '\n')) {
6956 start_readonly = num;
6962 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
6963 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
6965 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
6967 EXPORT_SYMBOL(register_md_personality);
6968 EXPORT_SYMBOL(unregister_md_personality);
6969 EXPORT_SYMBOL(md_error);
6970 EXPORT_SYMBOL(md_done_sync);
6971 EXPORT_SYMBOL(md_write_start);
6972 EXPORT_SYMBOL(md_write_end);
6973 EXPORT_SYMBOL(md_register_thread);
6974 EXPORT_SYMBOL(md_unregister_thread);
6975 EXPORT_SYMBOL(md_wakeup_thread);
6976 EXPORT_SYMBOL(md_check_recovery);
6977 MODULE_LICENSE("GPL");
6979 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);