md: md_getgeo(): Move comment to proper position.
[pandora-kernel.git] / drivers / md / md.c
1 /*
2    md.c : Multiple Devices driver for Linux
3           Copyright (C) 1998, 1999, 2000 Ingo Molnar
4
5      completely rewritten, based on the MD driver code from Marc Zyngier
6
7    Changes:
8
9    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13    - kmod support by: Cyrus Durgin
14    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16
17    - lots of fixes and improvements to the RAID1/RAID5 and generic
18      RAID code (such as request based resynchronization):
19
20      Neil Brown <neilb@cse.unsw.edu.au>.
21
22    - persistent bitmap code
23      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
24
25    This program is free software; you can redistribute it and/or modify
26    it under the terms of the GNU General Public License as published by
27    the Free Software Foundation; either version 2, or (at your option)
28    any later version.
29
30    You should have received a copy of the GNU General Public License
31    (for example /usr/src/linux/COPYING); if not, write to the Free
32    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33 */
34
35 #include <linux/module.h>
36 #include <linux/kernel.h>
37 #include <linux/kthread.h>
38 #include <linux/linkage.h>
39 #include <linux/raid/md.h>
40 #include <linux/raid/bitmap.h>
41 #include <linux/sysctl.h>
42 #include <linux/buffer_head.h> /* for invalidate_bdev */
43 #include <linux/poll.h>
44 #include <linux/mutex.h>
45 #include <linux/ctype.h>
46 #include <linux/freezer.h>
47
48 #include <linux/init.h>
49
50 #include <linux/file.h>
51
52 #ifdef CONFIG_KMOD
53 #include <linux/kmod.h>
54 #endif
55
56 #include <asm/unaligned.h>
57
58 #define MAJOR_NR MD_MAJOR
59 #define MD_DRIVER
60
61 /* 63 partitions with the alternate major number (mdp) */
62 #define MdpMinorShift 6
63
64 #define DEBUG 0
65 #define dprintk(x...) ((void)(DEBUG && printk(x)))
66
67
68 #ifndef MODULE
69 static void autostart_arrays (int part);
70 #endif
71
72 static LIST_HEAD(pers_list);
73 static DEFINE_SPINLOCK(pers_lock);
74
75 static void md_print_devices(void);
76
77 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
78
79 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
80
81 /*
82  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
83  * is 1000 KB/sec, so the extra system load does not show up that much.
84  * Increase it if you want to have more _guaranteed_ speed. Note that
85  * the RAID driver will use the maximum available bandwidth if the IO
86  * subsystem is idle. There is also an 'absolute maximum' reconstruction
87  * speed limit - in case reconstruction slows down your system despite
88  * idle IO detection.
89  *
90  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
91  * or /sys/block/mdX/md/sync_speed_{min,max}
92  */
93
94 static int sysctl_speed_limit_min = 1000;
95 static int sysctl_speed_limit_max = 200000;
96 static inline int speed_min(mddev_t *mddev)
97 {
98         return mddev->sync_speed_min ?
99                 mddev->sync_speed_min : sysctl_speed_limit_min;
100 }
101
102 static inline int speed_max(mddev_t *mddev)
103 {
104         return mddev->sync_speed_max ?
105                 mddev->sync_speed_max : sysctl_speed_limit_max;
106 }
107
108 static struct ctl_table_header *raid_table_header;
109
110 static ctl_table raid_table[] = {
111         {
112                 .ctl_name       = DEV_RAID_SPEED_LIMIT_MIN,
113                 .procname       = "speed_limit_min",
114                 .data           = &sysctl_speed_limit_min,
115                 .maxlen         = sizeof(int),
116                 .mode           = S_IRUGO|S_IWUSR,
117                 .proc_handler   = &proc_dointvec,
118         },
119         {
120                 .ctl_name       = DEV_RAID_SPEED_LIMIT_MAX,
121                 .procname       = "speed_limit_max",
122                 .data           = &sysctl_speed_limit_max,
123                 .maxlen         = sizeof(int),
124                 .mode           = S_IRUGO|S_IWUSR,
125                 .proc_handler   = &proc_dointvec,
126         },
127         { .ctl_name = 0 }
128 };
129
130 static ctl_table raid_dir_table[] = {
131         {
132                 .ctl_name       = DEV_RAID,
133                 .procname       = "raid",
134                 .maxlen         = 0,
135                 .mode           = S_IRUGO|S_IXUGO,
136                 .child          = raid_table,
137         },
138         { .ctl_name = 0 }
139 };
140
141 static ctl_table raid_root_table[] = {
142         {
143                 .ctl_name       = CTL_DEV,
144                 .procname       = "dev",
145                 .maxlen         = 0,
146                 .mode           = 0555,
147                 .child          = raid_dir_table,
148         },
149         { .ctl_name = 0 }
150 };
151
152 static struct block_device_operations md_fops;
153
154 static int start_readonly;
155
156 /*
157  * We have a system wide 'event count' that is incremented
158  * on any 'interesting' event, and readers of /proc/mdstat
159  * can use 'poll' or 'select' to find out when the event
160  * count increases.
161  *
162  * Events are:
163  *  start array, stop array, error, add device, remove device,
164  *  start build, activate spare
165  */
166 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
167 static atomic_t md_event_count;
168 void md_new_event(mddev_t *mddev)
169 {
170         atomic_inc(&md_event_count);
171         wake_up(&md_event_waiters);
172 }
173 EXPORT_SYMBOL_GPL(md_new_event);
174
175 /* Alternate version that can be called from interrupts
176  * when calling sysfs_notify isn't needed.
177  */
178 static void md_new_event_inintr(mddev_t *mddev)
179 {
180         atomic_inc(&md_event_count);
181         wake_up(&md_event_waiters);
182 }
183
184 /*
185  * Enables to iterate over all existing md arrays
186  * all_mddevs_lock protects this list.
187  */
188 static LIST_HEAD(all_mddevs);
189 static DEFINE_SPINLOCK(all_mddevs_lock);
190
191
192 /*
193  * iterates through all used mddevs in the system.
194  * We take care to grab the all_mddevs_lock whenever navigating
195  * the list, and to always hold a refcount when unlocked.
196  * Any code which breaks out of this loop while own
197  * a reference to the current mddev and must mddev_put it.
198  */
199 #define for_each_mddev(mddev,tmp)                                       \
200                                                                         \
201         for (({ spin_lock(&all_mddevs_lock);                            \
202                 tmp = all_mddevs.next;                                  \
203                 mddev = NULL;});                                        \
204              ({ if (tmp != &all_mddevs)                                 \
205                         mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
206                 spin_unlock(&all_mddevs_lock);                          \
207                 if (mddev) mddev_put(mddev);                            \
208                 mddev = list_entry(tmp, mddev_t, all_mddevs);           \
209                 tmp != &all_mddevs;});                                  \
210              ({ spin_lock(&all_mddevs_lock);                            \
211                 tmp = tmp->next;})                                      \
212                 )
213
214
215 static int md_fail_request (struct request_queue *q, struct bio *bio)
216 {
217         bio_io_error(bio);
218         return 0;
219 }
220
221 static inline mddev_t *mddev_get(mddev_t *mddev)
222 {
223         atomic_inc(&mddev->active);
224         return mddev;
225 }
226
227 static void mddev_put(mddev_t *mddev)
228 {
229         if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
230                 return;
231         if (!mddev->raid_disks && list_empty(&mddev->disks)) {
232                 list_del(&mddev->all_mddevs);
233                 spin_unlock(&all_mddevs_lock);
234                 blk_cleanup_queue(mddev->queue);
235                 kobject_put(&mddev->kobj);
236         } else
237                 spin_unlock(&all_mddevs_lock);
238 }
239
240 static mddev_t * mddev_find(dev_t unit)
241 {
242         mddev_t *mddev, *new = NULL;
243
244  retry:
245         spin_lock(&all_mddevs_lock);
246         list_for_each_entry(mddev, &all_mddevs, all_mddevs)
247                 if (mddev->unit == unit) {
248                         mddev_get(mddev);
249                         spin_unlock(&all_mddevs_lock);
250                         kfree(new);
251                         return mddev;
252                 }
253
254         if (new) {
255                 list_add(&new->all_mddevs, &all_mddevs);
256                 spin_unlock(&all_mddevs_lock);
257                 return new;
258         }
259         spin_unlock(&all_mddevs_lock);
260
261         new = kzalloc(sizeof(*new), GFP_KERNEL);
262         if (!new)
263                 return NULL;
264
265         new->unit = unit;
266         if (MAJOR(unit) == MD_MAJOR)
267                 new->md_minor = MINOR(unit);
268         else
269                 new->md_minor = MINOR(unit) >> MdpMinorShift;
270
271         mutex_init(&new->reconfig_mutex);
272         INIT_LIST_HEAD(&new->disks);
273         INIT_LIST_HEAD(&new->all_mddevs);
274         init_timer(&new->safemode_timer);
275         atomic_set(&new->active, 1);
276         spin_lock_init(&new->write_lock);
277         init_waitqueue_head(&new->sb_wait);
278         init_waitqueue_head(&new->recovery_wait);
279         new->reshape_position = MaxSector;
280         new->resync_min = 0;
281         new->resync_max = MaxSector;
282         new->level = LEVEL_NONE;
283
284         new->queue = blk_alloc_queue(GFP_KERNEL);
285         if (!new->queue) {
286                 kfree(new);
287                 return NULL;
288         }
289         /* Can be unlocked because the queue is new: no concurrency */
290         queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, new->queue);
291
292         blk_queue_make_request(new->queue, md_fail_request);
293
294         goto retry;
295 }
296
297 static inline int mddev_lock(mddev_t * mddev)
298 {
299         return mutex_lock_interruptible(&mddev->reconfig_mutex);
300 }
301
302 static inline int mddev_trylock(mddev_t * mddev)
303 {
304         return mutex_trylock(&mddev->reconfig_mutex);
305 }
306
307 static inline void mddev_unlock(mddev_t * mddev)
308 {
309         mutex_unlock(&mddev->reconfig_mutex);
310
311         md_wakeup_thread(mddev->thread);
312 }
313
314 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
315 {
316         mdk_rdev_t * rdev;
317         struct list_head *tmp;
318
319         rdev_for_each(rdev, tmp, mddev) {
320                 if (rdev->desc_nr == nr)
321                         return rdev;
322         }
323         return NULL;
324 }
325
326 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
327 {
328         struct list_head *tmp;
329         mdk_rdev_t *rdev;
330
331         rdev_for_each(rdev, tmp, mddev) {
332                 if (rdev->bdev->bd_dev == dev)
333                         return rdev;
334         }
335         return NULL;
336 }
337
338 static struct mdk_personality *find_pers(int level, char *clevel)
339 {
340         struct mdk_personality *pers;
341         list_for_each_entry(pers, &pers_list, list) {
342                 if (level != LEVEL_NONE && pers->level == level)
343                         return pers;
344                 if (strcmp(pers->name, clevel)==0)
345                         return pers;
346         }
347         return NULL;
348 }
349
350 static inline sector_t calc_dev_sboffset(struct block_device *bdev)
351 {
352         sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
353         return MD_NEW_SIZE_BLOCKS(size);
354 }
355
356 static sector_t calc_dev_size(mdk_rdev_t *rdev, unsigned chunk_size)
357 {
358         sector_t size;
359
360         size = rdev->sb_offset;
361
362         if (chunk_size)
363                 size &= ~((sector_t)chunk_size/1024 - 1);
364         return size;
365 }
366
367 static int alloc_disk_sb(mdk_rdev_t * rdev)
368 {
369         if (rdev->sb_page)
370                 MD_BUG();
371
372         rdev->sb_page = alloc_page(GFP_KERNEL);
373         if (!rdev->sb_page) {
374                 printk(KERN_ALERT "md: out of memory.\n");
375                 return -EINVAL;
376         }
377
378         return 0;
379 }
380
381 static void free_disk_sb(mdk_rdev_t * rdev)
382 {
383         if (rdev->sb_page) {
384                 put_page(rdev->sb_page);
385                 rdev->sb_loaded = 0;
386                 rdev->sb_page = NULL;
387                 rdev->sb_offset = 0;
388                 rdev->size = 0;
389         }
390 }
391
392
393 static void super_written(struct bio *bio, int error)
394 {
395         mdk_rdev_t *rdev = bio->bi_private;
396         mddev_t *mddev = rdev->mddev;
397
398         if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
399                 printk("md: super_written gets error=%d, uptodate=%d\n",
400                        error, test_bit(BIO_UPTODATE, &bio->bi_flags));
401                 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
402                 md_error(mddev, rdev);
403         }
404
405         if (atomic_dec_and_test(&mddev->pending_writes))
406                 wake_up(&mddev->sb_wait);
407         bio_put(bio);
408 }
409
410 static void super_written_barrier(struct bio *bio, int error)
411 {
412         struct bio *bio2 = bio->bi_private;
413         mdk_rdev_t *rdev = bio2->bi_private;
414         mddev_t *mddev = rdev->mddev;
415
416         if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
417             error == -EOPNOTSUPP) {
418                 unsigned long flags;
419                 /* barriers don't appear to be supported :-( */
420                 set_bit(BarriersNotsupp, &rdev->flags);
421                 mddev->barriers_work = 0;
422                 spin_lock_irqsave(&mddev->write_lock, flags);
423                 bio2->bi_next = mddev->biolist;
424                 mddev->biolist = bio2;
425                 spin_unlock_irqrestore(&mddev->write_lock, flags);
426                 wake_up(&mddev->sb_wait);
427                 bio_put(bio);
428         } else {
429                 bio_put(bio2);
430                 bio->bi_private = rdev;
431                 super_written(bio, error);
432         }
433 }
434
435 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
436                    sector_t sector, int size, struct page *page)
437 {
438         /* write first size bytes of page to sector of rdev
439          * Increment mddev->pending_writes before returning
440          * and decrement it on completion, waking up sb_wait
441          * if zero is reached.
442          * If an error occurred, call md_error
443          *
444          * As we might need to resubmit the request if BIO_RW_BARRIER
445          * causes ENOTSUPP, we allocate a spare bio...
446          */
447         struct bio *bio = bio_alloc(GFP_NOIO, 1);
448         int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNC);
449
450         bio->bi_bdev = rdev->bdev;
451         bio->bi_sector = sector;
452         bio_add_page(bio, page, size, 0);
453         bio->bi_private = rdev;
454         bio->bi_end_io = super_written;
455         bio->bi_rw = rw;
456
457         atomic_inc(&mddev->pending_writes);
458         if (!test_bit(BarriersNotsupp, &rdev->flags)) {
459                 struct bio *rbio;
460                 rw |= (1<<BIO_RW_BARRIER);
461                 rbio = bio_clone(bio, GFP_NOIO);
462                 rbio->bi_private = bio;
463                 rbio->bi_end_io = super_written_barrier;
464                 submit_bio(rw, rbio);
465         } else
466                 submit_bio(rw, bio);
467 }
468
469 void md_super_wait(mddev_t *mddev)
470 {
471         /* wait for all superblock writes that were scheduled to complete.
472          * if any had to be retried (due to BARRIER problems), retry them
473          */
474         DEFINE_WAIT(wq);
475         for(;;) {
476                 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
477                 if (atomic_read(&mddev->pending_writes)==0)
478                         break;
479                 while (mddev->biolist) {
480                         struct bio *bio;
481                         spin_lock_irq(&mddev->write_lock);
482                         bio = mddev->biolist;
483                         mddev->biolist = bio->bi_next ;
484                         bio->bi_next = NULL;
485                         spin_unlock_irq(&mddev->write_lock);
486                         submit_bio(bio->bi_rw, bio);
487                 }
488                 schedule();
489         }
490         finish_wait(&mddev->sb_wait, &wq);
491 }
492
493 static void bi_complete(struct bio *bio, int error)
494 {
495         complete((struct completion*)bio->bi_private);
496 }
497
498 int sync_page_io(struct block_device *bdev, sector_t sector, int size,
499                    struct page *page, int rw)
500 {
501         struct bio *bio = bio_alloc(GFP_NOIO, 1);
502         struct completion event;
503         int ret;
504
505         rw |= (1 << BIO_RW_SYNC);
506
507         bio->bi_bdev = bdev;
508         bio->bi_sector = sector;
509         bio_add_page(bio, page, size, 0);
510         init_completion(&event);
511         bio->bi_private = &event;
512         bio->bi_end_io = bi_complete;
513         submit_bio(rw, bio);
514         wait_for_completion(&event);
515
516         ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
517         bio_put(bio);
518         return ret;
519 }
520 EXPORT_SYMBOL_GPL(sync_page_io);
521
522 static int read_disk_sb(mdk_rdev_t * rdev, int size)
523 {
524         char b[BDEVNAME_SIZE];
525         if (!rdev->sb_page) {
526                 MD_BUG();
527                 return -EINVAL;
528         }
529         if (rdev->sb_loaded)
530                 return 0;
531
532
533         if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, size, rdev->sb_page, READ))
534                 goto fail;
535         rdev->sb_loaded = 1;
536         return 0;
537
538 fail:
539         printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
540                 bdevname(rdev->bdev,b));
541         return -EINVAL;
542 }
543
544 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
545 {
546         if (    (sb1->set_uuid0 == sb2->set_uuid0) &&
547                 (sb1->set_uuid1 == sb2->set_uuid1) &&
548                 (sb1->set_uuid2 == sb2->set_uuid2) &&
549                 (sb1->set_uuid3 == sb2->set_uuid3))
550
551                 return 1;
552
553         return 0;
554 }
555
556
557 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
558 {
559         int ret;
560         mdp_super_t *tmp1, *tmp2;
561
562         tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
563         tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
564
565         if (!tmp1 || !tmp2) {
566                 ret = 0;
567                 printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n");
568                 goto abort;
569         }
570
571         *tmp1 = *sb1;
572         *tmp2 = *sb2;
573
574         /*
575          * nr_disks is not constant
576          */
577         tmp1->nr_disks = 0;
578         tmp2->nr_disks = 0;
579
580         if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4))
581                 ret = 0;
582         else
583                 ret = 1;
584
585 abort:
586         kfree(tmp1);
587         kfree(tmp2);
588         return ret;
589 }
590
591
592 static u32 md_csum_fold(u32 csum)
593 {
594         csum = (csum & 0xffff) + (csum >> 16);
595         return (csum & 0xffff) + (csum >> 16);
596 }
597
598 static unsigned int calc_sb_csum(mdp_super_t * sb)
599 {
600         u64 newcsum = 0;
601         u32 *sb32 = (u32*)sb;
602         int i;
603         unsigned int disk_csum, csum;
604
605         disk_csum = sb->sb_csum;
606         sb->sb_csum = 0;
607
608         for (i = 0; i < MD_SB_BYTES/4 ; i++)
609                 newcsum += sb32[i];
610         csum = (newcsum & 0xffffffff) + (newcsum>>32);
611
612
613 #ifdef CONFIG_ALPHA
614         /* This used to use csum_partial, which was wrong for several
615          * reasons including that different results are returned on
616          * different architectures.  It isn't critical that we get exactly
617          * the same return value as before (we always csum_fold before
618          * testing, and that removes any differences).  However as we
619          * know that csum_partial always returned a 16bit value on
620          * alphas, do a fold to maximise conformity to previous behaviour.
621          */
622         sb->sb_csum = md_csum_fold(disk_csum);
623 #else
624         sb->sb_csum = disk_csum;
625 #endif
626         return csum;
627 }
628
629
630 /*
631  * Handle superblock details.
632  * We want to be able to handle multiple superblock formats
633  * so we have a common interface to them all, and an array of
634  * different handlers.
635  * We rely on user-space to write the initial superblock, and support
636  * reading and updating of superblocks.
637  * Interface methods are:
638  *   int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
639  *      loads and validates a superblock on dev.
640  *      if refdev != NULL, compare superblocks on both devices
641  *    Return:
642  *      0 - dev has a superblock that is compatible with refdev
643  *      1 - dev has a superblock that is compatible and newer than refdev
644  *          so dev should be used as the refdev in future
645  *     -EINVAL superblock incompatible or invalid
646  *     -othererror e.g. -EIO
647  *
648  *   int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
649  *      Verify that dev is acceptable into mddev.
650  *       The first time, mddev->raid_disks will be 0, and data from
651  *       dev should be merged in.  Subsequent calls check that dev
652  *       is new enough.  Return 0 or -EINVAL
653  *
654  *   void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
655  *     Update the superblock for rdev with data in mddev
656  *     This does not write to disc.
657  *
658  */
659
660 struct super_type  {
661         char                *name;
662         struct module       *owner;
663         int                 (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev,
664                                           int minor_version);
665         int                 (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
666         void                (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
667         unsigned long long  (*rdev_size_change)(mdk_rdev_t *rdev,
668                                                 unsigned long long size);
669 };
670
671 /*
672  * load_super for 0.90.0 
673  */
674 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
675 {
676         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
677         mdp_super_t *sb;
678         int ret;
679         sector_t sb_offset;
680
681         /*
682          * Calculate the position of the superblock,
683          * it's at the end of the disk.
684          *
685          * It also happens to be a multiple of 4Kb.
686          */
687         sb_offset = calc_dev_sboffset(rdev->bdev);
688         rdev->sb_offset = sb_offset;
689
690         ret = read_disk_sb(rdev, MD_SB_BYTES);
691         if (ret) return ret;
692
693         ret = -EINVAL;
694
695         bdevname(rdev->bdev, b);
696         sb = (mdp_super_t*)page_address(rdev->sb_page);
697
698         if (sb->md_magic != MD_SB_MAGIC) {
699                 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
700                        b);
701                 goto abort;
702         }
703
704         if (sb->major_version != 0 ||
705             sb->minor_version < 90 ||
706             sb->minor_version > 91) {
707                 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
708                         sb->major_version, sb->minor_version,
709                         b);
710                 goto abort;
711         }
712
713         if (sb->raid_disks <= 0)
714                 goto abort;
715
716         if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
717                 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
718                         b);
719                 goto abort;
720         }
721
722         rdev->preferred_minor = sb->md_minor;
723         rdev->data_offset = 0;
724         rdev->sb_size = MD_SB_BYTES;
725
726         if (sb->state & (1<<MD_SB_BITMAP_PRESENT)) {
727                 if (sb->level != 1 && sb->level != 4
728                     && sb->level != 5 && sb->level != 6
729                     && sb->level != 10) {
730                         /* FIXME use a better test */
731                         printk(KERN_WARNING
732                                "md: bitmaps not supported for this level.\n");
733                         goto abort;
734                 }
735         }
736
737         if (sb->level == LEVEL_MULTIPATH)
738                 rdev->desc_nr = -1;
739         else
740                 rdev->desc_nr = sb->this_disk.number;
741
742         if (!refdev) {
743                 ret = 1;
744         } else {
745                 __u64 ev1, ev2;
746                 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
747                 if (!uuid_equal(refsb, sb)) {
748                         printk(KERN_WARNING "md: %s has different UUID to %s\n",
749                                 b, bdevname(refdev->bdev,b2));
750                         goto abort;
751                 }
752                 if (!sb_equal(refsb, sb)) {
753                         printk(KERN_WARNING "md: %s has same UUID"
754                                " but different superblock to %s\n",
755                                b, bdevname(refdev->bdev, b2));
756                         goto abort;
757                 }
758                 ev1 = md_event(sb);
759                 ev2 = md_event(refsb);
760                 if (ev1 > ev2)
761                         ret = 1;
762                 else 
763                         ret = 0;
764         }
765         rdev->size = calc_dev_size(rdev, sb->chunk_size);
766
767         if (rdev->size < sb->size && sb->level > 1)
768                 /* "this cannot possibly happen" ... */
769                 ret = -EINVAL;
770
771  abort:
772         return ret;
773 }
774
775 /*
776  * validate_super for 0.90.0
777  */
778 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
779 {
780         mdp_disk_t *desc;
781         mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
782         __u64 ev1 = md_event(sb);
783
784         rdev->raid_disk = -1;
785         clear_bit(Faulty, &rdev->flags);
786         clear_bit(In_sync, &rdev->flags);
787         clear_bit(WriteMostly, &rdev->flags);
788         clear_bit(BarriersNotsupp, &rdev->flags);
789
790         if (mddev->raid_disks == 0) {
791                 mddev->major_version = 0;
792                 mddev->minor_version = sb->minor_version;
793                 mddev->patch_version = sb->patch_version;
794                 mddev->external = 0;
795                 mddev->chunk_size = sb->chunk_size;
796                 mddev->ctime = sb->ctime;
797                 mddev->utime = sb->utime;
798                 mddev->level = sb->level;
799                 mddev->clevel[0] = 0;
800                 mddev->layout = sb->layout;
801                 mddev->raid_disks = sb->raid_disks;
802                 mddev->size = sb->size;
803                 mddev->events = ev1;
804                 mddev->bitmap_offset = 0;
805                 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
806
807                 if (mddev->minor_version >= 91) {
808                         mddev->reshape_position = sb->reshape_position;
809                         mddev->delta_disks = sb->delta_disks;
810                         mddev->new_level = sb->new_level;
811                         mddev->new_layout = sb->new_layout;
812                         mddev->new_chunk = sb->new_chunk;
813                 } else {
814                         mddev->reshape_position = MaxSector;
815                         mddev->delta_disks = 0;
816                         mddev->new_level = mddev->level;
817                         mddev->new_layout = mddev->layout;
818                         mddev->new_chunk = mddev->chunk_size;
819                 }
820
821                 if (sb->state & (1<<MD_SB_CLEAN))
822                         mddev->recovery_cp = MaxSector;
823                 else {
824                         if (sb->events_hi == sb->cp_events_hi && 
825                                 sb->events_lo == sb->cp_events_lo) {
826                                 mddev->recovery_cp = sb->recovery_cp;
827                         } else
828                                 mddev->recovery_cp = 0;
829                 }
830
831                 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
832                 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
833                 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
834                 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
835
836                 mddev->max_disks = MD_SB_DISKS;
837
838                 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
839                     mddev->bitmap_file == NULL)
840                         mddev->bitmap_offset = mddev->default_bitmap_offset;
841
842         } else if (mddev->pers == NULL) {
843                 /* Insist on good event counter while assembling */
844                 ++ev1;
845                 if (ev1 < mddev->events) 
846                         return -EINVAL;
847         } else if (mddev->bitmap) {
848                 /* if adding to array with a bitmap, then we can accept an
849                  * older device ... but not too old.
850                  */
851                 if (ev1 < mddev->bitmap->events_cleared)
852                         return 0;
853         } else {
854                 if (ev1 < mddev->events)
855                         /* just a hot-add of a new device, leave raid_disk at -1 */
856                         return 0;
857         }
858
859         if (mddev->level != LEVEL_MULTIPATH) {
860                 desc = sb->disks + rdev->desc_nr;
861
862                 if (desc->state & (1<<MD_DISK_FAULTY))
863                         set_bit(Faulty, &rdev->flags);
864                 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
865                             desc->raid_disk < mddev->raid_disks */) {
866                         set_bit(In_sync, &rdev->flags);
867                         rdev->raid_disk = desc->raid_disk;
868                 }
869                 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
870                         set_bit(WriteMostly, &rdev->flags);
871         } else /* MULTIPATH are always insync */
872                 set_bit(In_sync, &rdev->flags);
873         return 0;
874 }
875
876 /*
877  * sync_super for 0.90.0
878  */
879 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
880 {
881         mdp_super_t *sb;
882         struct list_head *tmp;
883         mdk_rdev_t *rdev2;
884         int next_spare = mddev->raid_disks;
885
886
887         /* make rdev->sb match mddev data..
888          *
889          * 1/ zero out disks
890          * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
891          * 3/ any empty disks < next_spare become removed
892          *
893          * disks[0] gets initialised to REMOVED because
894          * we cannot be sure from other fields if it has
895          * been initialised or not.
896          */
897         int i;
898         int active=0, working=0,failed=0,spare=0,nr_disks=0;
899
900         rdev->sb_size = MD_SB_BYTES;
901
902         sb = (mdp_super_t*)page_address(rdev->sb_page);
903
904         memset(sb, 0, sizeof(*sb));
905
906         sb->md_magic = MD_SB_MAGIC;
907         sb->major_version = mddev->major_version;
908         sb->patch_version = mddev->patch_version;
909         sb->gvalid_words  = 0; /* ignored */
910         memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
911         memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
912         memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
913         memcpy(&sb->set_uuid3, mddev->uuid+12,4);
914
915         sb->ctime = mddev->ctime;
916         sb->level = mddev->level;
917         sb->size  = mddev->size;
918         sb->raid_disks = mddev->raid_disks;
919         sb->md_minor = mddev->md_minor;
920         sb->not_persistent = 0;
921         sb->utime = mddev->utime;
922         sb->state = 0;
923         sb->events_hi = (mddev->events>>32);
924         sb->events_lo = (u32)mddev->events;
925
926         if (mddev->reshape_position == MaxSector)
927                 sb->minor_version = 90;
928         else {
929                 sb->minor_version = 91;
930                 sb->reshape_position = mddev->reshape_position;
931                 sb->new_level = mddev->new_level;
932                 sb->delta_disks = mddev->delta_disks;
933                 sb->new_layout = mddev->new_layout;
934                 sb->new_chunk = mddev->new_chunk;
935         }
936         mddev->minor_version = sb->minor_version;
937         if (mddev->in_sync)
938         {
939                 sb->recovery_cp = mddev->recovery_cp;
940                 sb->cp_events_hi = (mddev->events>>32);
941                 sb->cp_events_lo = (u32)mddev->events;
942                 if (mddev->recovery_cp == MaxSector)
943                         sb->state = (1<< MD_SB_CLEAN);
944         } else
945                 sb->recovery_cp = 0;
946
947         sb->layout = mddev->layout;
948         sb->chunk_size = mddev->chunk_size;
949
950         if (mddev->bitmap && mddev->bitmap_file == NULL)
951                 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
952
953         sb->disks[0].state = (1<<MD_DISK_REMOVED);
954         rdev_for_each(rdev2, tmp, mddev) {
955                 mdp_disk_t *d;
956                 int desc_nr;
957                 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
958                     && !test_bit(Faulty, &rdev2->flags))
959                         desc_nr = rdev2->raid_disk;
960                 else
961                         desc_nr = next_spare++;
962                 rdev2->desc_nr = desc_nr;
963                 d = &sb->disks[rdev2->desc_nr];
964                 nr_disks++;
965                 d->number = rdev2->desc_nr;
966                 d->major = MAJOR(rdev2->bdev->bd_dev);
967                 d->minor = MINOR(rdev2->bdev->bd_dev);
968                 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
969                     && !test_bit(Faulty, &rdev2->flags))
970                         d->raid_disk = rdev2->raid_disk;
971                 else
972                         d->raid_disk = rdev2->desc_nr; /* compatibility */
973                 if (test_bit(Faulty, &rdev2->flags))
974                         d->state = (1<<MD_DISK_FAULTY);
975                 else if (test_bit(In_sync, &rdev2->flags)) {
976                         d->state = (1<<MD_DISK_ACTIVE);
977                         d->state |= (1<<MD_DISK_SYNC);
978                         active++;
979                         working++;
980                 } else {
981                         d->state = 0;
982                         spare++;
983                         working++;
984                 }
985                 if (test_bit(WriteMostly, &rdev2->flags))
986                         d->state |= (1<<MD_DISK_WRITEMOSTLY);
987         }
988         /* now set the "removed" and "faulty" bits on any missing devices */
989         for (i=0 ; i < mddev->raid_disks ; i++) {
990                 mdp_disk_t *d = &sb->disks[i];
991                 if (d->state == 0 && d->number == 0) {
992                         d->number = i;
993                         d->raid_disk = i;
994                         d->state = (1<<MD_DISK_REMOVED);
995                         d->state |= (1<<MD_DISK_FAULTY);
996                         failed++;
997                 }
998         }
999         sb->nr_disks = nr_disks;
1000         sb->active_disks = active;
1001         sb->working_disks = working;
1002         sb->failed_disks = failed;
1003         sb->spare_disks = spare;
1004
1005         sb->this_disk = sb->disks[rdev->desc_nr];
1006         sb->sb_csum = calc_sb_csum(sb);
1007 }
1008
1009 /*
1010  * rdev_size_change for 0.90.0
1011  */
1012 static unsigned long long
1013 super_90_rdev_size_change(mdk_rdev_t *rdev, unsigned long long size)
1014 {
1015         if (size && size < rdev->mddev->size)
1016                 return 0; /* component must fit device */
1017         size *= 2; /* convert to sectors */
1018         if (rdev->mddev->bitmap_offset)
1019                 return 0; /* can't move bitmap */
1020         rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
1021         if (!size || size > rdev->sb_offset*2)
1022                 size = rdev->sb_offset*2;
1023         md_super_write(rdev->mddev, rdev, rdev->sb_offset << 1, rdev->sb_size,
1024                        rdev->sb_page);
1025         md_super_wait(rdev->mddev);
1026         return size/2; /* kB for sysfs */
1027 }
1028
1029
1030 /*
1031  * version 1 superblock
1032  */
1033
1034 static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
1035 {
1036         __le32 disk_csum;
1037         u32 csum;
1038         unsigned long long newcsum;
1039         int size = 256 + le32_to_cpu(sb->max_dev)*2;
1040         __le32 *isuper = (__le32*)sb;
1041         int i;
1042
1043         disk_csum = sb->sb_csum;
1044         sb->sb_csum = 0;
1045         newcsum = 0;
1046         for (i=0; size>=4; size -= 4 )
1047                 newcsum += le32_to_cpu(*isuper++);
1048
1049         if (size == 2)
1050                 newcsum += le16_to_cpu(*(__le16*) isuper);
1051
1052         csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1053         sb->sb_csum = disk_csum;
1054         return cpu_to_le32(csum);
1055 }
1056
1057 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1058 {
1059         struct mdp_superblock_1 *sb;
1060         int ret;
1061         sector_t sb_offset;
1062         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1063         int bmask;
1064
1065         /*
1066          * Calculate the position of the superblock.
1067          * It is always aligned to a 4K boundary and
1068          * depeding on minor_version, it can be:
1069          * 0: At least 8K, but less than 12K, from end of device
1070          * 1: At start of device
1071          * 2: 4K from start of device.
1072          */
1073         switch(minor_version) {
1074         case 0:
1075                 sb_offset = rdev->bdev->bd_inode->i_size >> 9;
1076                 sb_offset -= 8*2;
1077                 sb_offset &= ~(sector_t)(4*2-1);
1078                 /* convert from sectors to K */
1079                 sb_offset /= 2;
1080                 break;
1081         case 1:
1082                 sb_offset = 0;
1083                 break;
1084         case 2:
1085                 sb_offset = 4;
1086                 break;
1087         default:
1088                 return -EINVAL;
1089         }
1090         rdev->sb_offset = sb_offset;
1091
1092         /* superblock is rarely larger than 1K, but it can be larger,
1093          * and it is safe to read 4k, so we do that
1094          */
1095         ret = read_disk_sb(rdev, 4096);
1096         if (ret) return ret;
1097
1098
1099         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1100
1101         if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1102             sb->major_version != cpu_to_le32(1) ||
1103             le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1104             le64_to_cpu(sb->super_offset) != (rdev->sb_offset<<1) ||
1105             (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1106                 return -EINVAL;
1107
1108         if (calc_sb_1_csum(sb) != sb->sb_csum) {
1109                 printk("md: invalid superblock checksum on %s\n",
1110                         bdevname(rdev->bdev,b));
1111                 return -EINVAL;
1112         }
1113         if (le64_to_cpu(sb->data_size) < 10) {
1114                 printk("md: data_size too small on %s\n",
1115                        bdevname(rdev->bdev,b));
1116                 return -EINVAL;
1117         }
1118         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET)) {
1119                 if (sb->level != cpu_to_le32(1) &&
1120                     sb->level != cpu_to_le32(4) &&
1121                     sb->level != cpu_to_le32(5) &&
1122                     sb->level != cpu_to_le32(6) &&
1123                     sb->level != cpu_to_le32(10)) {
1124                         printk(KERN_WARNING
1125                                "md: bitmaps not supported for this level.\n");
1126                         return -EINVAL;
1127                 }
1128         }
1129
1130         rdev->preferred_minor = 0xffff;
1131         rdev->data_offset = le64_to_cpu(sb->data_offset);
1132         atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1133
1134         rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1135         bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
1136         if (rdev->sb_size & bmask)
1137                 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1138
1139         if (minor_version
1140             && rdev->data_offset < sb_offset + (rdev->sb_size/512))
1141                 return -EINVAL;
1142
1143         if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1144                 rdev->desc_nr = -1;
1145         else
1146                 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1147
1148         if (!refdev) {
1149                 ret = 1;
1150         } else {
1151                 __u64 ev1, ev2;
1152                 struct mdp_superblock_1 *refsb = 
1153                         (struct mdp_superblock_1*)page_address(refdev->sb_page);
1154
1155                 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1156                     sb->level != refsb->level ||
1157                     sb->layout != refsb->layout ||
1158                     sb->chunksize != refsb->chunksize) {
1159                         printk(KERN_WARNING "md: %s has strangely different"
1160                                 " superblock to %s\n",
1161                                 bdevname(rdev->bdev,b),
1162                                 bdevname(refdev->bdev,b2));
1163                         return -EINVAL;
1164                 }
1165                 ev1 = le64_to_cpu(sb->events);
1166                 ev2 = le64_to_cpu(refsb->events);
1167
1168                 if (ev1 > ev2)
1169                         ret = 1;
1170                 else
1171                         ret = 0;
1172         }
1173         if (minor_version)
1174                 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2;
1175         else
1176                 rdev->size = rdev->sb_offset;
1177         if (rdev->size < le64_to_cpu(sb->data_size)/2)
1178                 return -EINVAL;
1179         rdev->size = le64_to_cpu(sb->data_size)/2;
1180         if (le32_to_cpu(sb->chunksize))
1181                 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1);
1182
1183         if (le64_to_cpu(sb->size) > rdev->size*2)
1184                 return -EINVAL;
1185         return ret;
1186 }
1187
1188 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1189 {
1190         struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1191         __u64 ev1 = le64_to_cpu(sb->events);
1192
1193         rdev->raid_disk = -1;
1194         clear_bit(Faulty, &rdev->flags);
1195         clear_bit(In_sync, &rdev->flags);
1196         clear_bit(WriteMostly, &rdev->flags);
1197         clear_bit(BarriersNotsupp, &rdev->flags);
1198
1199         if (mddev->raid_disks == 0) {
1200                 mddev->major_version = 1;
1201                 mddev->patch_version = 0;
1202                 mddev->external = 0;
1203                 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9;
1204                 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1205                 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1206                 mddev->level = le32_to_cpu(sb->level);
1207                 mddev->clevel[0] = 0;
1208                 mddev->layout = le32_to_cpu(sb->layout);
1209                 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1210                 mddev->size = le64_to_cpu(sb->size)/2;
1211                 mddev->events = ev1;
1212                 mddev->bitmap_offset = 0;
1213                 mddev->default_bitmap_offset = 1024 >> 9;
1214                 
1215                 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1216                 memcpy(mddev->uuid, sb->set_uuid, 16);
1217
1218                 mddev->max_disks =  (4096-256)/2;
1219
1220                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1221                     mddev->bitmap_file == NULL )
1222                         mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
1223
1224                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1225                         mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1226                         mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1227                         mddev->new_level = le32_to_cpu(sb->new_level);
1228                         mddev->new_layout = le32_to_cpu(sb->new_layout);
1229                         mddev->new_chunk = le32_to_cpu(sb->new_chunk)<<9;
1230                 } else {
1231                         mddev->reshape_position = MaxSector;
1232                         mddev->delta_disks = 0;
1233                         mddev->new_level = mddev->level;
1234                         mddev->new_layout = mddev->layout;
1235                         mddev->new_chunk = mddev->chunk_size;
1236                 }
1237
1238         } else if (mddev->pers == NULL) {
1239                 /* Insist of good event counter while assembling */
1240                 ++ev1;
1241                 if (ev1 < mddev->events)
1242                         return -EINVAL;
1243         } else if (mddev->bitmap) {
1244                 /* If adding to array with a bitmap, then we can accept an
1245                  * older device, but not too old.
1246                  */
1247                 if (ev1 < mddev->bitmap->events_cleared)
1248                         return 0;
1249         } else {
1250                 if (ev1 < mddev->events)
1251                         /* just a hot-add of a new device, leave raid_disk at -1 */
1252                         return 0;
1253         }
1254         if (mddev->level != LEVEL_MULTIPATH) {
1255                 int role;
1256                 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1257                 switch(role) {
1258                 case 0xffff: /* spare */
1259                         break;
1260                 case 0xfffe: /* faulty */
1261                         set_bit(Faulty, &rdev->flags);
1262                         break;
1263                 default:
1264                         if ((le32_to_cpu(sb->feature_map) &
1265                              MD_FEATURE_RECOVERY_OFFSET))
1266                                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1267                         else
1268                                 set_bit(In_sync, &rdev->flags);
1269                         rdev->raid_disk = role;
1270                         break;
1271                 }
1272                 if (sb->devflags & WriteMostly1)
1273                         set_bit(WriteMostly, &rdev->flags);
1274         } else /* MULTIPATH are always insync */
1275                 set_bit(In_sync, &rdev->flags);
1276
1277         return 0;
1278 }
1279
1280 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1281 {
1282         struct mdp_superblock_1 *sb;
1283         struct list_head *tmp;
1284         mdk_rdev_t *rdev2;
1285         int max_dev, i;
1286         /* make rdev->sb match mddev and rdev data. */
1287
1288         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1289
1290         sb->feature_map = 0;
1291         sb->pad0 = 0;
1292         sb->recovery_offset = cpu_to_le64(0);
1293         memset(sb->pad1, 0, sizeof(sb->pad1));
1294         memset(sb->pad2, 0, sizeof(sb->pad2));
1295         memset(sb->pad3, 0, sizeof(sb->pad3));
1296
1297         sb->utime = cpu_to_le64((__u64)mddev->utime);
1298         sb->events = cpu_to_le64(mddev->events);
1299         if (mddev->in_sync)
1300                 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1301         else
1302                 sb->resync_offset = cpu_to_le64(0);
1303
1304         sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1305
1306         sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1307         sb->size = cpu_to_le64(mddev->size<<1);
1308
1309         if (mddev->bitmap && mddev->bitmap_file == NULL) {
1310                 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
1311                 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1312         }
1313
1314         if (rdev->raid_disk >= 0 &&
1315             !test_bit(In_sync, &rdev->flags) &&
1316             rdev->recovery_offset > 0) {
1317                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1318                 sb->recovery_offset = cpu_to_le64(rdev->recovery_offset);
1319         }
1320
1321         if (mddev->reshape_position != MaxSector) {
1322                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1323                 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1324                 sb->new_layout = cpu_to_le32(mddev->new_layout);
1325                 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1326                 sb->new_level = cpu_to_le32(mddev->new_level);
1327                 sb->new_chunk = cpu_to_le32(mddev->new_chunk>>9);
1328         }
1329
1330         max_dev = 0;
1331         rdev_for_each(rdev2, tmp, mddev)
1332                 if (rdev2->desc_nr+1 > max_dev)
1333                         max_dev = rdev2->desc_nr+1;
1334
1335         if (max_dev > le32_to_cpu(sb->max_dev))
1336                 sb->max_dev = cpu_to_le32(max_dev);
1337         for (i=0; i<max_dev;i++)
1338                 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1339         
1340         rdev_for_each(rdev2, tmp, mddev) {
1341                 i = rdev2->desc_nr;
1342                 if (test_bit(Faulty, &rdev2->flags))
1343                         sb->dev_roles[i] = cpu_to_le16(0xfffe);
1344                 else if (test_bit(In_sync, &rdev2->flags))
1345                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1346                 else if (rdev2->raid_disk >= 0 && rdev2->recovery_offset > 0)
1347                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1348                 else
1349                         sb->dev_roles[i] = cpu_to_le16(0xffff);
1350         }
1351
1352         sb->sb_csum = calc_sb_1_csum(sb);
1353 }
1354
1355 static unsigned long long
1356 super_1_rdev_size_change(mdk_rdev_t *rdev, unsigned long long size)
1357 {
1358         struct mdp_superblock_1 *sb;
1359         unsigned long long max_size;
1360         if (size && size < rdev->mddev->size)
1361                 return 0; /* component must fit device */
1362         size *= 2; /* convert to sectors */
1363         if (rdev->sb_offset < rdev->data_offset/2) {
1364                 /* minor versions 1 and 2; superblock before data */
1365                 max_size = (rdev->bdev->bd_inode->i_size >> 9);
1366                 max_size -= rdev->data_offset;
1367                 if (!size || size > max_size)
1368                         size = max_size;
1369         } else if (rdev->mddev->bitmap_offset) {
1370                 /* minor version 0 with bitmap we can't move */
1371                 return 0;
1372         } else {
1373                 /* minor version 0; superblock after data */
1374                 sector_t sb_offset;
1375                 sb_offset = (rdev->bdev->bd_inode->i_size >> 9) - 8*2;
1376                 sb_offset &= ~(sector_t)(4*2 - 1);
1377                 max_size = rdev->size*2 + sb_offset - rdev->sb_offset*2;
1378                 if (!size || size > max_size)
1379                         size = max_size;
1380                 rdev->sb_offset = sb_offset/2;
1381         }
1382         sb = (struct mdp_superblock_1 *) page_address(rdev->sb_page);
1383         sb->data_size = cpu_to_le64(size);
1384         sb->super_offset = rdev->sb_offset*2;
1385         sb->sb_csum = calc_sb_1_csum(sb);
1386         md_super_write(rdev->mddev, rdev, rdev->sb_offset << 1, rdev->sb_size,
1387                        rdev->sb_page);
1388         md_super_wait(rdev->mddev);
1389         return size/2; /* kB for sysfs */
1390 }
1391
1392 static struct super_type super_types[] = {
1393         [0] = {
1394                 .name   = "0.90.0",
1395                 .owner  = THIS_MODULE,
1396                 .load_super         = super_90_load,
1397                 .validate_super     = super_90_validate,
1398                 .sync_super         = super_90_sync,
1399                 .rdev_size_change   = super_90_rdev_size_change,
1400         },
1401         [1] = {
1402                 .name   = "md-1",
1403                 .owner  = THIS_MODULE,
1404                 .load_super         = super_1_load,
1405                 .validate_super     = super_1_validate,
1406                 .sync_super         = super_1_sync,
1407                 .rdev_size_change   = super_1_rdev_size_change,
1408         },
1409 };
1410
1411 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1412 {
1413         struct list_head *tmp, *tmp2;
1414         mdk_rdev_t *rdev, *rdev2;
1415
1416         rdev_for_each(rdev, tmp, mddev1)
1417                 rdev_for_each(rdev2, tmp2, mddev2)
1418                         if (rdev->bdev->bd_contains ==
1419                             rdev2->bdev->bd_contains)
1420                                 return 1;
1421
1422         return 0;
1423 }
1424
1425 static LIST_HEAD(pending_raid_disks);
1426
1427 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1428 {
1429         char b[BDEVNAME_SIZE];
1430         struct kobject *ko;
1431         char *s;
1432         int err;
1433
1434         if (rdev->mddev) {
1435                 MD_BUG();
1436                 return -EINVAL;
1437         }
1438
1439         /* prevent duplicates */
1440         if (find_rdev(mddev, rdev->bdev->bd_dev))
1441                 return -EEXIST;
1442
1443         /* make sure rdev->size exceeds mddev->size */
1444         if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) {
1445                 if (mddev->pers) {
1446                         /* Cannot change size, so fail
1447                          * If mddev->level <= 0, then we don't care
1448                          * about aligning sizes (e.g. linear)
1449                          */
1450                         if (mddev->level > 0)
1451                                 return -ENOSPC;
1452                 } else
1453                         mddev->size = rdev->size;
1454         }
1455
1456         /* Verify rdev->desc_nr is unique.
1457          * If it is -1, assign a free number, else
1458          * check number is not in use
1459          */
1460         if (rdev->desc_nr < 0) {
1461                 int choice = 0;
1462                 if (mddev->pers) choice = mddev->raid_disks;
1463                 while (find_rdev_nr(mddev, choice))
1464                         choice++;
1465                 rdev->desc_nr = choice;
1466         } else {
1467                 if (find_rdev_nr(mddev, rdev->desc_nr))
1468                         return -EBUSY;
1469         }
1470         bdevname(rdev->bdev,b);
1471         while ( (s=strchr(b, '/')) != NULL)
1472                 *s = '!';
1473
1474         rdev->mddev = mddev;
1475         printk(KERN_INFO "md: bind<%s>\n", b);
1476
1477         if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
1478                 goto fail;
1479
1480         if (rdev->bdev->bd_part)
1481                 ko = &rdev->bdev->bd_part->dev.kobj;
1482         else
1483                 ko = &rdev->bdev->bd_disk->dev.kobj;
1484         if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) {
1485                 kobject_del(&rdev->kobj);
1486                 goto fail;
1487         }
1488         list_add(&rdev->same_set, &mddev->disks);
1489         bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
1490         return 0;
1491
1492  fail:
1493         printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
1494                b, mdname(mddev));
1495         return err;
1496 }
1497
1498 static void md_delayed_delete(struct work_struct *ws)
1499 {
1500         mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work);
1501         kobject_del(&rdev->kobj);
1502         kobject_put(&rdev->kobj);
1503 }
1504
1505 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1506 {
1507         char b[BDEVNAME_SIZE];
1508         if (!rdev->mddev) {
1509                 MD_BUG();
1510                 return;
1511         }
1512         bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk);
1513         list_del_init(&rdev->same_set);
1514         printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1515         rdev->mddev = NULL;
1516         sysfs_remove_link(&rdev->kobj, "block");
1517
1518         /* We need to delay this, otherwise we can deadlock when
1519          * writing to 'remove' to "dev/state"
1520          */
1521         INIT_WORK(&rdev->del_work, md_delayed_delete);
1522         kobject_get(&rdev->kobj);
1523         schedule_work(&rdev->del_work);
1524 }
1525
1526 /*
1527  * prevent the device from being mounted, repartitioned or
1528  * otherwise reused by a RAID array (or any other kernel
1529  * subsystem), by bd_claiming the device.
1530  */
1531 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared)
1532 {
1533         int err = 0;
1534         struct block_device *bdev;
1535         char b[BDEVNAME_SIZE];
1536
1537         bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
1538         if (IS_ERR(bdev)) {
1539                 printk(KERN_ERR "md: could not open %s.\n",
1540                         __bdevname(dev, b));
1541                 return PTR_ERR(bdev);
1542         }
1543         err = bd_claim(bdev, shared ? (mdk_rdev_t *)lock_rdev : rdev);
1544         if (err) {
1545                 printk(KERN_ERR "md: could not bd_claim %s.\n",
1546                         bdevname(bdev, b));
1547                 blkdev_put(bdev);
1548                 return err;
1549         }
1550         if (!shared)
1551                 set_bit(AllReserved, &rdev->flags);
1552         rdev->bdev = bdev;
1553         return err;
1554 }
1555
1556 static void unlock_rdev(mdk_rdev_t *rdev)
1557 {
1558         struct block_device *bdev = rdev->bdev;
1559         rdev->bdev = NULL;
1560         if (!bdev)
1561                 MD_BUG();
1562         bd_release(bdev);
1563         blkdev_put(bdev);
1564 }
1565
1566 void md_autodetect_dev(dev_t dev);
1567
1568 static void export_rdev(mdk_rdev_t * rdev)
1569 {
1570         char b[BDEVNAME_SIZE];
1571         printk(KERN_INFO "md: export_rdev(%s)\n",
1572                 bdevname(rdev->bdev,b));
1573         if (rdev->mddev)
1574                 MD_BUG();
1575         free_disk_sb(rdev);
1576         list_del_init(&rdev->same_set);
1577 #ifndef MODULE
1578         if (test_bit(AutoDetected, &rdev->flags))
1579                 md_autodetect_dev(rdev->bdev->bd_dev);
1580 #endif
1581         unlock_rdev(rdev);
1582         kobject_put(&rdev->kobj);
1583 }
1584
1585 static void kick_rdev_from_array(mdk_rdev_t * rdev)
1586 {
1587         unbind_rdev_from_array(rdev);
1588         export_rdev(rdev);
1589 }
1590
1591 static void export_array(mddev_t *mddev)
1592 {
1593         struct list_head *tmp;
1594         mdk_rdev_t *rdev;
1595
1596         rdev_for_each(rdev, tmp, mddev) {
1597                 if (!rdev->mddev) {
1598                         MD_BUG();
1599                         continue;
1600                 }
1601                 kick_rdev_from_array(rdev);
1602         }
1603         if (!list_empty(&mddev->disks))
1604                 MD_BUG();
1605         mddev->raid_disks = 0;
1606         mddev->major_version = 0;
1607 }
1608
1609 static void print_desc(mdp_disk_t *desc)
1610 {
1611         printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
1612                 desc->major,desc->minor,desc->raid_disk,desc->state);
1613 }
1614
1615 static void print_sb(mdp_super_t *sb)
1616 {
1617         int i;
1618
1619         printk(KERN_INFO 
1620                 "md:  SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1621                 sb->major_version, sb->minor_version, sb->patch_version,
1622                 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1623                 sb->ctime);
1624         printk(KERN_INFO "md:     L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1625                 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1626                 sb->md_minor, sb->layout, sb->chunk_size);
1627         printk(KERN_INFO "md:     UT:%08x ST:%d AD:%d WD:%d"
1628                 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1629                 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1630                 sb->failed_disks, sb->spare_disks,
1631                 sb->sb_csum, (unsigned long)sb->events_lo);
1632
1633         printk(KERN_INFO);
1634         for (i = 0; i < MD_SB_DISKS; i++) {
1635                 mdp_disk_t *desc;
1636
1637                 desc = sb->disks + i;
1638                 if (desc->number || desc->major || desc->minor ||
1639                     desc->raid_disk || (desc->state && (desc->state != 4))) {
1640                         printk("     D %2d: ", i);
1641                         print_desc(desc);
1642                 }
1643         }
1644         printk(KERN_INFO "md:     THIS: ");
1645         print_desc(&sb->this_disk);
1646
1647 }
1648
1649 static void print_rdev(mdk_rdev_t *rdev)
1650 {
1651         char b[BDEVNAME_SIZE];
1652         printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
1653                 bdevname(rdev->bdev,b), (unsigned long long)rdev->size,
1654                 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
1655                 rdev->desc_nr);
1656         if (rdev->sb_loaded) {
1657                 printk(KERN_INFO "md: rdev superblock:\n");
1658                 print_sb((mdp_super_t*)page_address(rdev->sb_page));
1659         } else
1660                 printk(KERN_INFO "md: no rdev superblock!\n");
1661 }
1662
1663 static void md_print_devices(void)
1664 {
1665         struct list_head *tmp, *tmp2;
1666         mdk_rdev_t *rdev;
1667         mddev_t *mddev;
1668         char b[BDEVNAME_SIZE];
1669
1670         printk("\n");
1671         printk("md:     **********************************\n");
1672         printk("md:     * <COMPLETE RAID STATE PRINTOUT> *\n");
1673         printk("md:     **********************************\n");
1674         for_each_mddev(mddev, tmp) {
1675
1676                 if (mddev->bitmap)
1677                         bitmap_print_sb(mddev->bitmap);
1678                 else
1679                         printk("%s: ", mdname(mddev));
1680                 rdev_for_each(rdev, tmp2, mddev)
1681                         printk("<%s>", bdevname(rdev->bdev,b));
1682                 printk("\n");
1683
1684                 rdev_for_each(rdev, tmp2, mddev)
1685                         print_rdev(rdev);
1686         }
1687         printk("md:     **********************************\n");
1688         printk("\n");
1689 }
1690
1691
1692 static void sync_sbs(mddev_t * mddev, int nospares)
1693 {
1694         /* Update each superblock (in-memory image), but
1695          * if we are allowed to, skip spares which already
1696          * have the right event counter, or have one earlier
1697          * (which would mean they aren't being marked as dirty
1698          * with the rest of the array)
1699          */
1700         mdk_rdev_t *rdev;
1701         struct list_head *tmp;
1702
1703         rdev_for_each(rdev, tmp, mddev) {
1704                 if (rdev->sb_events == mddev->events ||
1705                     (nospares &&
1706                      rdev->raid_disk < 0 &&
1707                      (rdev->sb_events&1)==0 &&
1708                      rdev->sb_events+1 == mddev->events)) {
1709                         /* Don't update this superblock */
1710                         rdev->sb_loaded = 2;
1711                 } else {
1712                         super_types[mddev->major_version].
1713                                 sync_super(mddev, rdev);
1714                         rdev->sb_loaded = 1;
1715                 }
1716         }
1717 }
1718
1719 static void md_update_sb(mddev_t * mddev, int force_change)
1720 {
1721         struct list_head *tmp;
1722         mdk_rdev_t *rdev;
1723         int sync_req;
1724         int nospares = 0;
1725
1726         if (mddev->external)
1727                 return;
1728 repeat:
1729         spin_lock_irq(&mddev->write_lock);
1730
1731         set_bit(MD_CHANGE_PENDING, &mddev->flags);
1732         if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
1733                 force_change = 1;
1734         if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
1735                 /* just a clean<-> dirty transition, possibly leave spares alone,
1736                  * though if events isn't the right even/odd, we will have to do
1737                  * spares after all
1738                  */
1739                 nospares = 1;
1740         if (force_change)
1741                 nospares = 0;
1742         if (mddev->degraded)
1743                 /* If the array is degraded, then skipping spares is both
1744                  * dangerous and fairly pointless.
1745                  * Dangerous because a device that was removed from the array
1746                  * might have a event_count that still looks up-to-date,
1747                  * so it can be re-added without a resync.
1748                  * Pointless because if there are any spares to skip,
1749                  * then a recovery will happen and soon that array won't
1750                  * be degraded any more and the spare can go back to sleep then.
1751                  */
1752                 nospares = 0;
1753
1754         sync_req = mddev->in_sync;
1755         mddev->utime = get_seconds();
1756
1757         /* If this is just a dirty<->clean transition, and the array is clean
1758          * and 'events' is odd, we can roll back to the previous clean state */
1759         if (nospares
1760             && (mddev->in_sync && mddev->recovery_cp == MaxSector)
1761             && (mddev->events & 1)
1762             && mddev->events != 1)
1763                 mddev->events--;
1764         else {
1765                 /* otherwise we have to go forward and ... */
1766                 mddev->events ++;
1767                 if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
1768                         /* .. if the array isn't clean, insist on an odd 'events' */
1769                         if ((mddev->events&1)==0) {
1770                                 mddev->events++;
1771                                 nospares = 0;
1772                         }
1773                 } else {
1774                         /* otherwise insist on an even 'events' (for clean states) */
1775                         if ((mddev->events&1)) {
1776                                 mddev->events++;
1777                                 nospares = 0;
1778                         }
1779                 }
1780         }
1781
1782         if (!mddev->events) {
1783                 /*
1784                  * oops, this 64-bit counter should never wrap.
1785                  * Either we are in around ~1 trillion A.C., assuming
1786                  * 1 reboot per second, or we have a bug:
1787                  */
1788                 MD_BUG();
1789                 mddev->events --;
1790         }
1791
1792         /*
1793          * do not write anything to disk if using
1794          * nonpersistent superblocks
1795          */
1796         if (!mddev->persistent) {
1797                 if (!mddev->external)
1798                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
1799
1800                 spin_unlock_irq(&mddev->write_lock);
1801                 wake_up(&mddev->sb_wait);
1802                 return;
1803         }
1804         sync_sbs(mddev, nospares);
1805         spin_unlock_irq(&mddev->write_lock);
1806
1807         dprintk(KERN_INFO 
1808                 "md: updating %s RAID superblock on device (in sync %d)\n",
1809                 mdname(mddev),mddev->in_sync);
1810
1811         bitmap_update_sb(mddev->bitmap);
1812         rdev_for_each(rdev, tmp, mddev) {
1813                 char b[BDEVNAME_SIZE];
1814                 dprintk(KERN_INFO "md: ");
1815                 if (rdev->sb_loaded != 1)
1816                         continue; /* no noise on spare devices */
1817                 if (test_bit(Faulty, &rdev->flags))
1818                         dprintk("(skipping faulty ");
1819
1820                 dprintk("%s ", bdevname(rdev->bdev,b));
1821                 if (!test_bit(Faulty, &rdev->flags)) {
1822                         md_super_write(mddev,rdev,
1823                                        rdev->sb_offset<<1, rdev->sb_size,
1824                                        rdev->sb_page);
1825                         dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
1826                                 bdevname(rdev->bdev,b),
1827                                 (unsigned long long)rdev->sb_offset);
1828                         rdev->sb_events = mddev->events;
1829
1830                 } else
1831                         dprintk(")\n");
1832                 if (mddev->level == LEVEL_MULTIPATH)
1833                         /* only need to write one superblock... */
1834                         break;
1835         }
1836         md_super_wait(mddev);
1837         /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
1838
1839         spin_lock_irq(&mddev->write_lock);
1840         if (mddev->in_sync != sync_req ||
1841             test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
1842                 /* have to write it out again */
1843                 spin_unlock_irq(&mddev->write_lock);
1844                 goto repeat;
1845         }
1846         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
1847         spin_unlock_irq(&mddev->write_lock);
1848         wake_up(&mddev->sb_wait);
1849
1850 }
1851
1852 /* words written to sysfs files may, or my not, be \n terminated.
1853  * We want to accept with case. For this we use cmd_match.
1854  */
1855 static int cmd_match(const char *cmd, const char *str)
1856 {
1857         /* See if cmd, written into a sysfs file, matches
1858          * str.  They must either be the same, or cmd can
1859          * have a trailing newline
1860          */
1861         while (*cmd && *str && *cmd == *str) {
1862                 cmd++;
1863                 str++;
1864         }
1865         if (*cmd == '\n')
1866                 cmd++;
1867         if (*str || *cmd)
1868                 return 0;
1869         return 1;
1870 }
1871
1872 struct rdev_sysfs_entry {
1873         struct attribute attr;
1874         ssize_t (*show)(mdk_rdev_t *, char *);
1875         ssize_t (*store)(mdk_rdev_t *, const char *, size_t);
1876 };
1877
1878 static ssize_t
1879 state_show(mdk_rdev_t *rdev, char *page)
1880 {
1881         char *sep = "";
1882         size_t len = 0;
1883
1884         if (test_bit(Faulty, &rdev->flags)) {
1885                 len+= sprintf(page+len, "%sfaulty",sep);
1886                 sep = ",";
1887         }
1888         if (test_bit(In_sync, &rdev->flags)) {
1889                 len += sprintf(page+len, "%sin_sync",sep);
1890                 sep = ",";
1891         }
1892         if (test_bit(WriteMostly, &rdev->flags)) {
1893                 len += sprintf(page+len, "%swrite_mostly",sep);
1894                 sep = ",";
1895         }
1896         if (test_bit(Blocked, &rdev->flags)) {
1897                 len += sprintf(page+len, "%sblocked", sep);
1898                 sep = ",";
1899         }
1900         if (!test_bit(Faulty, &rdev->flags) &&
1901             !test_bit(In_sync, &rdev->flags)) {
1902                 len += sprintf(page+len, "%sspare", sep);
1903                 sep = ",";
1904         }
1905         return len+sprintf(page+len, "\n");
1906 }
1907
1908 static ssize_t
1909 state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1910 {
1911         /* can write
1912          *  faulty  - simulates and error
1913          *  remove  - disconnects the device
1914          *  writemostly - sets write_mostly
1915          *  -writemostly - clears write_mostly
1916          *  blocked - sets the Blocked flag
1917          *  -blocked - clears the Blocked flag
1918          */
1919         int err = -EINVAL;
1920         if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
1921                 md_error(rdev->mddev, rdev);
1922                 err = 0;
1923         } else if (cmd_match(buf, "remove")) {
1924                 if (rdev->raid_disk >= 0)
1925                         err = -EBUSY;
1926                 else {
1927                         mddev_t *mddev = rdev->mddev;
1928                         kick_rdev_from_array(rdev);
1929                         if (mddev->pers)
1930                                 md_update_sb(mddev, 1);
1931                         md_new_event(mddev);
1932                         err = 0;
1933                 }
1934         } else if (cmd_match(buf, "writemostly")) {
1935                 set_bit(WriteMostly, &rdev->flags);
1936                 err = 0;
1937         } else if (cmd_match(buf, "-writemostly")) {
1938                 clear_bit(WriteMostly, &rdev->flags);
1939                 err = 0;
1940         } else if (cmd_match(buf, "blocked")) {
1941                 set_bit(Blocked, &rdev->flags);
1942                 err = 0;
1943         } else if (cmd_match(buf, "-blocked")) {
1944                 clear_bit(Blocked, &rdev->flags);
1945                 wake_up(&rdev->blocked_wait);
1946                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
1947                 md_wakeup_thread(rdev->mddev->thread);
1948
1949                 err = 0;
1950         }
1951         if (!err)
1952                 sysfs_notify(&rdev->kobj, NULL, "state");
1953         return err ? err : len;
1954 }
1955 static struct rdev_sysfs_entry rdev_state =
1956 __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
1957
1958 static ssize_t
1959 errors_show(mdk_rdev_t *rdev, char *page)
1960 {
1961         return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
1962 }
1963
1964 static ssize_t
1965 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1966 {
1967         char *e;
1968         unsigned long n = simple_strtoul(buf, &e, 10);
1969         if (*buf && (*e == 0 || *e == '\n')) {
1970                 atomic_set(&rdev->corrected_errors, n);
1971                 return len;
1972         }
1973         return -EINVAL;
1974 }
1975 static struct rdev_sysfs_entry rdev_errors =
1976 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
1977
1978 static ssize_t
1979 slot_show(mdk_rdev_t *rdev, char *page)
1980 {
1981         if (rdev->raid_disk < 0)
1982                 return sprintf(page, "none\n");
1983         else
1984                 return sprintf(page, "%d\n", rdev->raid_disk);
1985 }
1986
1987 static ssize_t
1988 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1989 {
1990         char *e;
1991         int err;
1992         char nm[20];
1993         int slot = simple_strtoul(buf, &e, 10);
1994         if (strncmp(buf, "none", 4)==0)
1995                 slot = -1;
1996         else if (e==buf || (*e && *e!= '\n'))
1997                 return -EINVAL;
1998         if (rdev->mddev->pers && slot == -1) {
1999                 /* Setting 'slot' on an active array requires also
2000                  * updating the 'rd%d' link, and communicating
2001                  * with the personality with ->hot_*_disk.
2002                  * For now we only support removing
2003                  * failed/spare devices.  This normally happens automatically,
2004                  * but not when the metadata is externally managed.
2005                  */
2006                 if (rdev->raid_disk == -1)
2007                         return -EEXIST;
2008                 /* personality does all needed checks */
2009                 if (rdev->mddev->pers->hot_add_disk == NULL)
2010                         return -EINVAL;
2011                 err = rdev->mddev->pers->
2012                         hot_remove_disk(rdev->mddev, rdev->raid_disk);
2013                 if (err)
2014                         return err;
2015                 sprintf(nm, "rd%d", rdev->raid_disk);
2016                 sysfs_remove_link(&rdev->mddev->kobj, nm);
2017                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2018                 md_wakeup_thread(rdev->mddev->thread);
2019         } else if (rdev->mddev->pers) {
2020                 mdk_rdev_t *rdev2;
2021                 struct list_head *tmp;
2022                 /* Activating a spare .. or possibly reactivating
2023                  * if we every get bitmaps working here.
2024                  */
2025
2026                 if (rdev->raid_disk != -1)
2027                         return -EBUSY;
2028
2029                 if (rdev->mddev->pers->hot_add_disk == NULL)
2030                         return -EINVAL;
2031
2032                 rdev_for_each(rdev2, tmp, rdev->mddev)
2033                         if (rdev2->raid_disk == slot)
2034                                 return -EEXIST;
2035
2036                 rdev->raid_disk = slot;
2037                 if (test_bit(In_sync, &rdev->flags))
2038                         rdev->saved_raid_disk = slot;
2039                 else
2040                         rdev->saved_raid_disk = -1;
2041                 err = rdev->mddev->pers->
2042                         hot_add_disk(rdev->mddev, rdev);
2043                 if (err) {
2044                         rdev->raid_disk = -1;
2045                         return err;
2046                 } else
2047                         sysfs_notify(&rdev->kobj, NULL, "state");
2048                 sprintf(nm, "rd%d", rdev->raid_disk);
2049                 if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm))
2050                         printk(KERN_WARNING
2051                                "md: cannot register "
2052                                "%s for %s\n",
2053                                nm, mdname(rdev->mddev));
2054
2055                 /* don't wakeup anyone, leave that to userspace. */
2056         } else {
2057                 if (slot >= rdev->mddev->raid_disks)
2058                         return -ENOSPC;
2059                 rdev->raid_disk = slot;
2060                 /* assume it is working */
2061                 clear_bit(Faulty, &rdev->flags);
2062                 clear_bit(WriteMostly, &rdev->flags);
2063                 set_bit(In_sync, &rdev->flags);
2064                 sysfs_notify(&rdev->kobj, NULL, "state");
2065         }
2066         return len;
2067 }
2068
2069
2070 static struct rdev_sysfs_entry rdev_slot =
2071 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
2072
2073 static ssize_t
2074 offset_show(mdk_rdev_t *rdev, char *page)
2075 {
2076         return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
2077 }
2078
2079 static ssize_t
2080 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2081 {
2082         char *e;
2083         unsigned long long offset = simple_strtoull(buf, &e, 10);
2084         if (e==buf || (*e && *e != '\n'))
2085                 return -EINVAL;
2086         if (rdev->mddev->pers && rdev->raid_disk >= 0)
2087                 return -EBUSY;
2088         if (rdev->size && rdev->mddev->external)
2089                 /* Must set offset before size, so overlap checks
2090                  * can be sane */
2091                 return -EBUSY;
2092         rdev->data_offset = offset;
2093         return len;
2094 }
2095
2096 static struct rdev_sysfs_entry rdev_offset =
2097 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
2098
2099 static ssize_t
2100 rdev_size_show(mdk_rdev_t *rdev, char *page)
2101 {
2102         return sprintf(page, "%llu\n", (unsigned long long)rdev->size);
2103 }
2104
2105 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
2106 {
2107         /* check if two start/length pairs overlap */
2108         if (s1+l1 <= s2)
2109                 return 0;
2110         if (s2+l2 <= s1)
2111                 return 0;
2112         return 1;
2113 }
2114
2115 static ssize_t
2116 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2117 {
2118         char *e;
2119         unsigned long long size = simple_strtoull(buf, &e, 10);
2120         unsigned long long oldsize = rdev->size;
2121         mddev_t *my_mddev = rdev->mddev;
2122
2123         if (e==buf || (*e && *e != '\n'))
2124                 return -EINVAL;
2125         if (my_mddev->pers && rdev->raid_disk >= 0) {
2126                 if (rdev->mddev->persistent) {
2127                         size = super_types[rdev->mddev->major_version].
2128                                 rdev_size_change(rdev, size);
2129                         if (!size)
2130                                 return -EBUSY;
2131                 } else if (!size) {
2132                         size = (rdev->bdev->bd_inode->i_size >> 10);
2133                         size -= rdev->data_offset/2;
2134                 }
2135                 if (size < rdev->mddev->size)
2136                         return -EINVAL; /* component must fit device */
2137         }
2138
2139         rdev->size = size;
2140         if (size > oldsize && rdev->mddev->external) {
2141                 /* need to check that all other rdevs with the same ->bdev
2142                  * do not overlap.  We need to unlock the mddev to avoid
2143                  * a deadlock.  We have already changed rdev->size, and if
2144                  * we have to change it back, we will have the lock again.
2145                  */
2146                 mddev_t *mddev;
2147                 int overlap = 0;
2148                 struct list_head *tmp, *tmp2;
2149
2150                 mddev_unlock(my_mddev);
2151                 for_each_mddev(mddev, tmp) {
2152                         mdk_rdev_t *rdev2;
2153
2154                         mddev_lock(mddev);
2155                         rdev_for_each(rdev2, tmp2, mddev)
2156                                 if (test_bit(AllReserved, &rdev2->flags) ||
2157                                     (rdev->bdev == rdev2->bdev &&
2158                                      rdev != rdev2 &&
2159                                      overlaps(rdev->data_offset, rdev->size,
2160                                             rdev2->data_offset, rdev2->size))) {
2161                                         overlap = 1;
2162                                         break;
2163                                 }
2164                         mddev_unlock(mddev);
2165                         if (overlap) {
2166                                 mddev_put(mddev);
2167                                 break;
2168                         }
2169                 }
2170                 mddev_lock(my_mddev);
2171                 if (overlap) {
2172                         /* Someone else could have slipped in a size
2173                          * change here, but doing so is just silly.
2174                          * We put oldsize back because we *know* it is
2175                          * safe, and trust userspace not to race with
2176                          * itself
2177                          */
2178                         rdev->size = oldsize;
2179                         return -EBUSY;
2180                 }
2181         }
2182         if (size < my_mddev->size || my_mddev->size == 0)
2183                 my_mddev->size = size;
2184         return len;
2185 }
2186
2187 static struct rdev_sysfs_entry rdev_size =
2188 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
2189
2190 static struct attribute *rdev_default_attrs[] = {
2191         &rdev_state.attr,
2192         &rdev_errors.attr,
2193         &rdev_slot.attr,
2194         &rdev_offset.attr,
2195         &rdev_size.attr,
2196         NULL,
2197 };
2198 static ssize_t
2199 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
2200 {
2201         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2202         mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
2203         mddev_t *mddev = rdev->mddev;
2204         ssize_t rv;
2205
2206         if (!entry->show)
2207                 return -EIO;
2208
2209         rv = mddev ? mddev_lock(mddev) : -EBUSY;
2210         if (!rv) {
2211                 if (rdev->mddev == NULL)
2212                         rv = -EBUSY;
2213                 else
2214                         rv = entry->show(rdev, page);
2215                 mddev_unlock(mddev);
2216         }
2217         return rv;
2218 }
2219
2220 static ssize_t
2221 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
2222               const char *page, size_t length)
2223 {
2224         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2225         mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
2226         ssize_t rv;
2227         mddev_t *mddev = rdev->mddev;
2228
2229         if (!entry->store)
2230                 return -EIO;
2231         if (!capable(CAP_SYS_ADMIN))
2232                 return -EACCES;
2233         rv = mddev ? mddev_lock(mddev): -EBUSY;
2234         if (!rv) {
2235                 if (rdev->mddev == NULL)
2236                         rv = -EBUSY;
2237                 else
2238                         rv = entry->store(rdev, page, length);
2239                 mddev_unlock(mddev);
2240         }
2241         return rv;
2242 }
2243
2244 static void rdev_free(struct kobject *ko)
2245 {
2246         mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
2247         kfree(rdev);
2248 }
2249 static struct sysfs_ops rdev_sysfs_ops = {
2250         .show           = rdev_attr_show,
2251         .store          = rdev_attr_store,
2252 };
2253 static struct kobj_type rdev_ktype = {
2254         .release        = rdev_free,
2255         .sysfs_ops      = &rdev_sysfs_ops,
2256         .default_attrs  = rdev_default_attrs,
2257 };
2258
2259 /*
2260  * Import a device. If 'super_format' >= 0, then sanity check the superblock
2261  *
2262  * mark the device faulty if:
2263  *
2264  *   - the device is nonexistent (zero size)
2265  *   - the device has no valid superblock
2266  *
2267  * a faulty rdev _never_ has rdev->sb set.
2268  */
2269 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
2270 {
2271         char b[BDEVNAME_SIZE];
2272         int err;
2273         mdk_rdev_t *rdev;
2274         sector_t size;
2275
2276         rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
2277         if (!rdev) {
2278                 printk(KERN_ERR "md: could not alloc mem for new device!\n");
2279                 return ERR_PTR(-ENOMEM);
2280         }
2281
2282         if ((err = alloc_disk_sb(rdev)))
2283                 goto abort_free;
2284
2285         err = lock_rdev(rdev, newdev, super_format == -2);
2286         if (err)
2287                 goto abort_free;
2288
2289         kobject_init(&rdev->kobj, &rdev_ktype);
2290
2291         rdev->desc_nr = -1;
2292         rdev->saved_raid_disk = -1;
2293         rdev->raid_disk = -1;
2294         rdev->flags = 0;
2295         rdev->data_offset = 0;
2296         rdev->sb_events = 0;
2297         atomic_set(&rdev->nr_pending, 0);
2298         atomic_set(&rdev->read_errors, 0);
2299         atomic_set(&rdev->corrected_errors, 0);
2300
2301         size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2302         if (!size) {
2303                 printk(KERN_WARNING 
2304                         "md: %s has zero or unknown size, marking faulty!\n",
2305                         bdevname(rdev->bdev,b));
2306                 err = -EINVAL;
2307                 goto abort_free;
2308         }
2309
2310         if (super_format >= 0) {
2311                 err = super_types[super_format].
2312                         load_super(rdev, NULL, super_minor);
2313                 if (err == -EINVAL) {
2314                         printk(KERN_WARNING
2315                                 "md: %s does not have a valid v%d.%d "
2316                                "superblock, not importing!\n",
2317                                 bdevname(rdev->bdev,b),
2318                                super_format, super_minor);
2319                         goto abort_free;
2320                 }
2321                 if (err < 0) {
2322                         printk(KERN_WARNING 
2323                                 "md: could not read %s's sb, not importing!\n",
2324                                 bdevname(rdev->bdev,b));
2325                         goto abort_free;
2326                 }
2327         }
2328
2329         INIT_LIST_HEAD(&rdev->same_set);
2330         init_waitqueue_head(&rdev->blocked_wait);
2331
2332         return rdev;
2333
2334 abort_free:
2335         if (rdev->sb_page) {
2336                 if (rdev->bdev)
2337                         unlock_rdev(rdev);
2338                 free_disk_sb(rdev);
2339         }
2340         kfree(rdev);
2341         return ERR_PTR(err);
2342 }
2343
2344 /*
2345  * Check a full RAID array for plausibility
2346  */
2347
2348
2349 static void analyze_sbs(mddev_t * mddev)
2350 {
2351         int i;
2352         struct list_head *tmp;
2353         mdk_rdev_t *rdev, *freshest;
2354         char b[BDEVNAME_SIZE];
2355
2356         freshest = NULL;
2357         rdev_for_each(rdev, tmp, mddev)
2358                 switch (super_types[mddev->major_version].
2359                         load_super(rdev, freshest, mddev->minor_version)) {
2360                 case 1:
2361                         freshest = rdev;
2362                         break;
2363                 case 0:
2364                         break;
2365                 default:
2366                         printk( KERN_ERR \
2367                                 "md: fatal superblock inconsistency in %s"
2368                                 " -- removing from array\n", 
2369                                 bdevname(rdev->bdev,b));
2370                         kick_rdev_from_array(rdev);
2371                 }
2372
2373
2374         super_types[mddev->major_version].
2375                 validate_super(mddev, freshest);
2376
2377         i = 0;
2378         rdev_for_each(rdev, tmp, mddev) {
2379                 if (rdev != freshest)
2380                         if (super_types[mddev->major_version].
2381                             validate_super(mddev, rdev)) {
2382                                 printk(KERN_WARNING "md: kicking non-fresh %s"
2383                                         " from array!\n",
2384                                         bdevname(rdev->bdev,b));
2385                                 kick_rdev_from_array(rdev);
2386                                 continue;
2387                         }
2388                 if (mddev->level == LEVEL_MULTIPATH) {
2389                         rdev->desc_nr = i++;
2390                         rdev->raid_disk = rdev->desc_nr;
2391                         set_bit(In_sync, &rdev->flags);
2392                 } else if (rdev->raid_disk >= mddev->raid_disks) {
2393                         rdev->raid_disk = -1;
2394                         clear_bit(In_sync, &rdev->flags);
2395                 }
2396         }
2397
2398
2399
2400         if (mddev->recovery_cp != MaxSector &&
2401             mddev->level >= 1)
2402                 printk(KERN_ERR "md: %s: raid array is not clean"
2403                        " -- starting background reconstruction\n",
2404                        mdname(mddev));
2405
2406 }
2407
2408 static ssize_t
2409 safe_delay_show(mddev_t *mddev, char *page)
2410 {
2411         int msec = (mddev->safemode_delay*1000)/HZ;
2412         return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
2413 }
2414 static ssize_t
2415 safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
2416 {
2417         int scale=1;
2418         int dot=0;
2419         int i;
2420         unsigned long msec;
2421         char buf[30];
2422         char *e;
2423         /* remove a period, and count digits after it */
2424         if (len >= sizeof(buf))
2425                 return -EINVAL;
2426         strlcpy(buf, cbuf, len);
2427         buf[len] = 0;
2428         for (i=0; i<len; i++) {
2429                 if (dot) {
2430                         if (isdigit(buf[i])) {
2431                                 buf[i-1] = buf[i];
2432                                 scale *= 10;
2433                         }
2434                         buf[i] = 0;
2435                 } else if (buf[i] == '.') {
2436                         dot=1;
2437                         buf[i] = 0;
2438                 }
2439         }
2440         msec = simple_strtoul(buf, &e, 10);
2441         if (e == buf || (*e && *e != '\n'))
2442                 return -EINVAL;
2443         msec = (msec * 1000) / scale;
2444         if (msec == 0)
2445                 mddev->safemode_delay = 0;
2446         else {
2447                 mddev->safemode_delay = (msec*HZ)/1000;
2448                 if (mddev->safemode_delay == 0)
2449                         mddev->safemode_delay = 1;
2450         }
2451         return len;
2452 }
2453 static struct md_sysfs_entry md_safe_delay =
2454 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
2455
2456 static ssize_t
2457 level_show(mddev_t *mddev, char *page)
2458 {
2459         struct mdk_personality *p = mddev->pers;
2460         if (p)
2461                 return sprintf(page, "%s\n", p->name);
2462         else if (mddev->clevel[0])
2463                 return sprintf(page, "%s\n", mddev->clevel);
2464         else if (mddev->level != LEVEL_NONE)
2465                 return sprintf(page, "%d\n", mddev->level);
2466         else
2467                 return 0;
2468 }
2469
2470 static ssize_t
2471 level_store(mddev_t *mddev, const char *buf, size_t len)
2472 {
2473         ssize_t rv = len;
2474         if (mddev->pers)
2475                 return -EBUSY;
2476         if (len == 0)
2477                 return 0;
2478         if (len >= sizeof(mddev->clevel))
2479                 return -ENOSPC;
2480         strncpy(mddev->clevel, buf, len);
2481         if (mddev->clevel[len-1] == '\n')
2482                 len--;
2483         mddev->clevel[len] = 0;
2484         mddev->level = LEVEL_NONE;
2485         return rv;
2486 }
2487
2488 static struct md_sysfs_entry md_level =
2489 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
2490
2491
2492 static ssize_t
2493 layout_show(mddev_t *mddev, char *page)
2494 {
2495         /* just a number, not meaningful for all levels */
2496         if (mddev->reshape_position != MaxSector &&
2497             mddev->layout != mddev->new_layout)
2498                 return sprintf(page, "%d (%d)\n",
2499                                mddev->new_layout, mddev->layout);
2500         return sprintf(page, "%d\n", mddev->layout);
2501 }
2502
2503 static ssize_t
2504 layout_store(mddev_t *mddev, const char *buf, size_t len)
2505 {
2506         char *e;
2507         unsigned long n = simple_strtoul(buf, &e, 10);
2508
2509         if (!*buf || (*e && *e != '\n'))
2510                 return -EINVAL;
2511
2512         if (mddev->pers)
2513                 return -EBUSY;
2514         if (mddev->reshape_position != MaxSector)
2515                 mddev->new_layout = n;
2516         else
2517                 mddev->layout = n;
2518         return len;
2519 }
2520 static struct md_sysfs_entry md_layout =
2521 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
2522
2523
2524 static ssize_t
2525 raid_disks_show(mddev_t *mddev, char *page)
2526 {
2527         if (mddev->raid_disks == 0)
2528                 return 0;
2529         if (mddev->reshape_position != MaxSector &&
2530             mddev->delta_disks != 0)
2531                 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
2532                                mddev->raid_disks - mddev->delta_disks);
2533         return sprintf(page, "%d\n", mddev->raid_disks);
2534 }
2535
2536 static int update_raid_disks(mddev_t *mddev, int raid_disks);
2537
2538 static ssize_t
2539 raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
2540 {
2541         char *e;
2542         int rv = 0;
2543         unsigned long n = simple_strtoul(buf, &e, 10);
2544
2545         if (!*buf || (*e && *e != '\n'))
2546                 return -EINVAL;
2547
2548         if (mddev->pers)
2549                 rv = update_raid_disks(mddev, n);
2550         else if (mddev->reshape_position != MaxSector) {
2551                 int olddisks = mddev->raid_disks - mddev->delta_disks;
2552                 mddev->delta_disks = n - olddisks;
2553                 mddev->raid_disks = n;
2554         } else
2555                 mddev->raid_disks = n;
2556         return rv ? rv : len;
2557 }
2558 static struct md_sysfs_entry md_raid_disks =
2559 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
2560
2561 static ssize_t
2562 chunk_size_show(mddev_t *mddev, char *page)
2563 {
2564         if (mddev->reshape_position != MaxSector &&
2565             mddev->chunk_size != mddev->new_chunk)
2566                 return sprintf(page, "%d (%d)\n", mddev->new_chunk,
2567                                mddev->chunk_size);
2568         return sprintf(page, "%d\n", mddev->chunk_size);
2569 }
2570
2571 static ssize_t
2572 chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
2573 {
2574         /* can only set chunk_size if array is not yet active */
2575         char *e;
2576         unsigned long n = simple_strtoul(buf, &e, 10);
2577
2578         if (!*buf || (*e && *e != '\n'))
2579                 return -EINVAL;
2580
2581         if (mddev->pers)
2582                 return -EBUSY;
2583         else if (mddev->reshape_position != MaxSector)
2584                 mddev->new_chunk = n;
2585         else
2586                 mddev->chunk_size = n;
2587         return len;
2588 }
2589 static struct md_sysfs_entry md_chunk_size =
2590 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
2591
2592 static ssize_t
2593 resync_start_show(mddev_t *mddev, char *page)
2594 {
2595         return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
2596 }
2597
2598 static ssize_t
2599 resync_start_store(mddev_t *mddev, const char *buf, size_t len)
2600 {
2601         char *e;
2602         unsigned long long n = simple_strtoull(buf, &e, 10);
2603
2604         if (mddev->pers)
2605                 return -EBUSY;
2606         if (!*buf || (*e && *e != '\n'))
2607                 return -EINVAL;
2608
2609         mddev->recovery_cp = n;
2610         return len;
2611 }
2612 static struct md_sysfs_entry md_resync_start =
2613 __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
2614
2615 /*
2616  * The array state can be:
2617  *
2618  * clear
2619  *     No devices, no size, no level
2620  *     Equivalent to STOP_ARRAY ioctl
2621  * inactive
2622  *     May have some settings, but array is not active
2623  *        all IO results in error
2624  *     When written, doesn't tear down array, but just stops it
2625  * suspended (not supported yet)
2626  *     All IO requests will block. The array can be reconfigured.
2627  *     Writing this, if accepted, will block until array is quiessent
2628  * readonly
2629  *     no resync can happen.  no superblocks get written.
2630  *     write requests fail
2631  * read-auto
2632  *     like readonly, but behaves like 'clean' on a write request.
2633  *
2634  * clean - no pending writes, but otherwise active.
2635  *     When written to inactive array, starts without resync
2636  *     If a write request arrives then
2637  *       if metadata is known, mark 'dirty' and switch to 'active'.
2638  *       if not known, block and switch to write-pending
2639  *     If written to an active array that has pending writes, then fails.
2640  * active
2641  *     fully active: IO and resync can be happening.
2642  *     When written to inactive array, starts with resync
2643  *
2644  * write-pending
2645  *     clean, but writes are blocked waiting for 'active' to be written.
2646  *
2647  * active-idle
2648  *     like active, but no writes have been seen for a while (100msec).
2649  *
2650  */
2651 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
2652                    write_pending, active_idle, bad_word};
2653 static char *array_states[] = {
2654         "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
2655         "write-pending", "active-idle", NULL };
2656
2657 static int match_word(const char *word, char **list)
2658 {
2659         int n;
2660         for (n=0; list[n]; n++)
2661                 if (cmd_match(word, list[n]))
2662                         break;
2663         return n;
2664 }
2665
2666 static ssize_t
2667 array_state_show(mddev_t *mddev, char *page)
2668 {
2669         enum array_state st = inactive;
2670
2671         if (mddev->pers)
2672                 switch(mddev->ro) {
2673                 case 1:
2674                         st = readonly;
2675                         break;
2676                 case 2:
2677                         st = read_auto;
2678                         break;
2679                 case 0:
2680                         if (mddev->in_sync)
2681                                 st = clean;
2682                         else if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
2683                                 st = write_pending;
2684                         else if (mddev->safemode)
2685                                 st = active_idle;
2686                         else
2687                                 st = active;
2688                 }
2689         else {
2690                 if (list_empty(&mddev->disks) &&
2691                     mddev->raid_disks == 0 &&
2692                     mddev->size == 0)
2693                         st = clear;
2694                 else
2695                         st = inactive;
2696         }
2697         return sprintf(page, "%s\n", array_states[st]);
2698 }
2699
2700 static int do_md_stop(mddev_t * mddev, int ro);
2701 static int do_md_run(mddev_t * mddev);
2702 static int restart_array(mddev_t *mddev);
2703
2704 static ssize_t
2705 array_state_store(mddev_t *mddev, const char *buf, size_t len)
2706 {
2707         int err = -EINVAL;
2708         enum array_state st = match_word(buf, array_states);
2709         switch(st) {
2710         case bad_word:
2711                 break;
2712         case clear:
2713                 /* stopping an active array */
2714                 if (atomic_read(&mddev->active) > 1)
2715                         return -EBUSY;
2716                 err = do_md_stop(mddev, 0);
2717                 break;
2718         case inactive:
2719                 /* stopping an active array */
2720                 if (mddev->pers) {
2721                         if (atomic_read(&mddev->active) > 1)
2722                                 return -EBUSY;
2723                         err = do_md_stop(mddev, 2);
2724                 } else
2725                         err = 0; /* already inactive */
2726                 break;
2727         case suspended:
2728                 break; /* not supported yet */
2729         case readonly:
2730                 if (mddev->pers)
2731                         err = do_md_stop(mddev, 1);
2732                 else {
2733                         mddev->ro = 1;
2734                         set_disk_ro(mddev->gendisk, 1);
2735                         err = do_md_run(mddev);
2736                 }
2737                 break;
2738         case read_auto:
2739                 if (mddev->pers) {
2740                         if (mddev->ro != 1)
2741                                 err = do_md_stop(mddev, 1);
2742                         else
2743                                 err = restart_array(mddev);
2744                         if (err == 0) {
2745                                 mddev->ro = 2;
2746                                 set_disk_ro(mddev->gendisk, 0);
2747                         }
2748                 } else {
2749                         mddev->ro = 2;
2750                         err = do_md_run(mddev);
2751                 }
2752                 break;
2753         case clean:
2754                 if (mddev->pers) {
2755                         restart_array(mddev);
2756                         spin_lock_irq(&mddev->write_lock);
2757                         if (atomic_read(&mddev->writes_pending) == 0) {
2758                                 if (mddev->in_sync == 0) {
2759                                         mddev->in_sync = 1;
2760                                         if (mddev->safemode == 1)
2761                                                 mddev->safemode = 0;
2762                                         if (mddev->persistent)
2763                                                 set_bit(MD_CHANGE_CLEAN,
2764                                                         &mddev->flags);
2765                                 }
2766                                 err = 0;
2767                         } else
2768                                 err = -EBUSY;
2769                         spin_unlock_irq(&mddev->write_lock);
2770                 } else {
2771                         mddev->ro = 0;
2772                         mddev->recovery_cp = MaxSector;
2773                         err = do_md_run(mddev);
2774                 }
2775                 break;
2776         case active:
2777                 if (mddev->pers) {
2778                         restart_array(mddev);
2779                         if (mddev->external)
2780                                 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
2781                         wake_up(&mddev->sb_wait);
2782                         err = 0;
2783                 } else {
2784                         mddev->ro = 0;
2785                         set_disk_ro(mddev->gendisk, 0);
2786                         err = do_md_run(mddev);
2787                 }
2788                 break;
2789         case write_pending:
2790         case active_idle:
2791                 /* these cannot be set */
2792                 break;
2793         }
2794         if (err)
2795                 return err;
2796         else {
2797                 sysfs_notify(&mddev->kobj, NULL, "array_state");
2798                 return len;
2799         }
2800 }
2801 static struct md_sysfs_entry md_array_state =
2802 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
2803
2804 static ssize_t
2805 null_show(mddev_t *mddev, char *page)
2806 {
2807         return -EINVAL;
2808 }
2809
2810 static ssize_t
2811 new_dev_store(mddev_t *mddev, const char *buf, size_t len)
2812 {
2813         /* buf must be %d:%d\n? giving major and minor numbers */
2814         /* The new device is added to the array.
2815          * If the array has a persistent superblock, we read the
2816          * superblock to initialise info and check validity.
2817          * Otherwise, only checking done is that in bind_rdev_to_array,
2818          * which mainly checks size.
2819          */
2820         char *e;
2821         int major = simple_strtoul(buf, &e, 10);
2822         int minor;
2823         dev_t dev;
2824         mdk_rdev_t *rdev;
2825         int err;
2826
2827         if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
2828                 return -EINVAL;
2829         minor = simple_strtoul(e+1, &e, 10);
2830         if (*e && *e != '\n')
2831                 return -EINVAL;
2832         dev = MKDEV(major, minor);
2833         if (major != MAJOR(dev) ||
2834             minor != MINOR(dev))
2835                 return -EOVERFLOW;
2836
2837
2838         if (mddev->persistent) {
2839                 rdev = md_import_device(dev, mddev->major_version,
2840                                         mddev->minor_version);
2841                 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
2842                         mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
2843                                                        mdk_rdev_t, same_set);
2844                         err = super_types[mddev->major_version]
2845                                 .load_super(rdev, rdev0, mddev->minor_version);
2846                         if (err < 0)
2847                                 goto out;
2848                 }
2849         } else if (mddev->external)
2850                 rdev = md_import_device(dev, -2, -1);
2851         else
2852                 rdev = md_import_device(dev, -1, -1);
2853
2854         if (IS_ERR(rdev))
2855                 return PTR_ERR(rdev);
2856         err = bind_rdev_to_array(rdev, mddev);
2857  out:
2858         if (err)
2859                 export_rdev(rdev);
2860         return err ? err : len;
2861 }
2862
2863 static struct md_sysfs_entry md_new_device =
2864 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
2865
2866 static ssize_t
2867 bitmap_store(mddev_t *mddev, const char *buf, size_t len)
2868 {
2869         char *end;
2870         unsigned long chunk, end_chunk;
2871
2872         if (!mddev->bitmap)
2873                 goto out;
2874         /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
2875         while (*buf) {
2876                 chunk = end_chunk = simple_strtoul(buf, &end, 0);
2877                 if (buf == end) break;
2878                 if (*end == '-') { /* range */
2879                         buf = end + 1;
2880                         end_chunk = simple_strtoul(buf, &end, 0);
2881                         if (buf == end) break;
2882                 }
2883                 if (*end && !isspace(*end)) break;
2884                 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
2885                 buf = end;
2886                 while (isspace(*buf)) buf++;
2887         }
2888         bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
2889 out:
2890         return len;
2891 }
2892
2893 static struct md_sysfs_entry md_bitmap =
2894 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
2895
2896 static ssize_t
2897 size_show(mddev_t *mddev, char *page)
2898 {
2899         return sprintf(page, "%llu\n", (unsigned long long)mddev->size);
2900 }
2901
2902 static int update_size(mddev_t *mddev, unsigned long size);
2903
2904 static ssize_t
2905 size_store(mddev_t *mddev, const char *buf, size_t len)
2906 {
2907         /* If array is inactive, we can reduce the component size, but
2908          * not increase it (except from 0).
2909          * If array is active, we can try an on-line resize
2910          */
2911         char *e;
2912         int err = 0;
2913         unsigned long long size = simple_strtoull(buf, &e, 10);
2914         if (!*buf || *buf == '\n' ||
2915             (*e && *e != '\n'))
2916                 return -EINVAL;
2917
2918         if (mddev->pers) {
2919                 err = update_size(mddev, size);
2920                 md_update_sb(mddev, 1);
2921         } else {
2922                 if (mddev->size == 0 ||
2923                     mddev->size > size)
2924                         mddev->size = size;
2925                 else
2926                         err = -ENOSPC;
2927         }
2928         return err ? err : len;
2929 }
2930
2931 static struct md_sysfs_entry md_size =
2932 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
2933
2934
2935 /* Metdata version.
2936  * This is one of
2937  *   'none' for arrays with no metadata (good luck...)
2938  *   'external' for arrays with externally managed metadata,
2939  * or N.M for internally known formats
2940  */
2941 static ssize_t
2942 metadata_show(mddev_t *mddev, char *page)
2943 {
2944         if (mddev->persistent)
2945                 return sprintf(page, "%d.%d\n",
2946                                mddev->major_version, mddev->minor_version);
2947         else if (mddev->external)
2948                 return sprintf(page, "external:%s\n", mddev->metadata_type);
2949         else
2950                 return sprintf(page, "none\n");
2951 }
2952
2953 static ssize_t
2954 metadata_store(mddev_t *mddev, const char *buf, size_t len)
2955 {
2956         int major, minor;
2957         char *e;
2958         if (!list_empty(&mddev->disks))
2959                 return -EBUSY;
2960
2961         if (cmd_match(buf, "none")) {
2962                 mddev->persistent = 0;
2963                 mddev->external = 0;
2964                 mddev->major_version = 0;
2965                 mddev->minor_version = 90;
2966                 return len;
2967         }
2968         if (strncmp(buf, "external:", 9) == 0) {
2969                 size_t namelen = len-9;
2970                 if (namelen >= sizeof(mddev->metadata_type))
2971                         namelen = sizeof(mddev->metadata_type)-1;
2972                 strncpy(mddev->metadata_type, buf+9, namelen);
2973                 mddev->metadata_type[namelen] = 0;
2974                 if (namelen && mddev->metadata_type[namelen-1] == '\n')
2975                         mddev->metadata_type[--namelen] = 0;
2976                 mddev->persistent = 0;
2977                 mddev->external = 1;
2978                 mddev->major_version = 0;
2979                 mddev->minor_version = 90;
2980                 return len;
2981         }
2982         major = simple_strtoul(buf, &e, 10);
2983         if (e==buf || *e != '.')
2984                 return -EINVAL;
2985         buf = e+1;
2986         minor = simple_strtoul(buf, &e, 10);
2987         if (e==buf || (*e && *e != '\n') )
2988                 return -EINVAL;
2989         if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
2990                 return -ENOENT;
2991         mddev->major_version = major;
2992         mddev->minor_version = minor;
2993         mddev->persistent = 1;
2994         mddev->external = 0;
2995         return len;
2996 }
2997
2998 static struct md_sysfs_entry md_metadata =
2999 __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
3000
3001 static ssize_t
3002 action_show(mddev_t *mddev, char *page)
3003 {
3004         char *type = "idle";
3005         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3006             (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
3007                 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3008                         type = "reshape";
3009                 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3010                         if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
3011                                 type = "resync";
3012                         else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
3013                                 type = "check";
3014                         else
3015                                 type = "repair";
3016                 } else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
3017                         type = "recover";
3018         }
3019         return sprintf(page, "%s\n", type);
3020 }
3021
3022 static ssize_t
3023 action_store(mddev_t *mddev, const char *page, size_t len)
3024 {
3025         if (!mddev->pers || !mddev->pers->sync_request)
3026                 return -EINVAL;
3027
3028         if (cmd_match(page, "idle")) {
3029                 if (mddev->sync_thread) {
3030                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3031                         md_unregister_thread(mddev->sync_thread);
3032                         mddev->sync_thread = NULL;
3033                         mddev->recovery = 0;
3034                 }
3035         } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3036                    test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
3037                 return -EBUSY;
3038         else if (cmd_match(page, "resync"))
3039                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3040         else if (cmd_match(page, "recover")) {
3041                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3042                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3043         } else if (cmd_match(page, "reshape")) {
3044                 int err;
3045                 if (mddev->pers->start_reshape == NULL)
3046                         return -EINVAL;
3047                 err = mddev->pers->start_reshape(mddev);
3048                 if (err)
3049                         return err;
3050                 sysfs_notify(&mddev->kobj, NULL, "degraded");
3051         } else {
3052                 if (cmd_match(page, "check"))
3053                         set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3054                 else if (!cmd_match(page, "repair"))
3055                         return -EINVAL;
3056                 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3057                 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3058         }
3059         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3060         md_wakeup_thread(mddev->thread);
3061         sysfs_notify(&mddev->kobj, NULL, "sync_action");
3062         return len;
3063 }
3064
3065 static ssize_t
3066 mismatch_cnt_show(mddev_t *mddev, char *page)
3067 {
3068         return sprintf(page, "%llu\n",
3069                        (unsigned long long) mddev->resync_mismatches);
3070 }
3071
3072 static struct md_sysfs_entry md_scan_mode =
3073 __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
3074
3075
3076 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
3077
3078 static ssize_t
3079 sync_min_show(mddev_t *mddev, char *page)
3080 {
3081         return sprintf(page, "%d (%s)\n", speed_min(mddev),
3082                        mddev->sync_speed_min ? "local": "system");
3083 }
3084
3085 static ssize_t
3086 sync_min_store(mddev_t *mddev, const char *buf, size_t len)
3087 {
3088         int min;
3089         char *e;
3090         if (strncmp(buf, "system", 6)==0) {
3091                 mddev->sync_speed_min = 0;
3092                 return len;
3093         }
3094         min = simple_strtoul(buf, &e, 10);
3095         if (buf == e || (*e && *e != '\n') || min <= 0)
3096                 return -EINVAL;
3097         mddev->sync_speed_min = min;
3098         return len;
3099 }
3100
3101 static struct md_sysfs_entry md_sync_min =
3102 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
3103
3104 static ssize_t
3105 sync_max_show(mddev_t *mddev, char *page)
3106 {
3107         return sprintf(page, "%d (%s)\n", speed_max(mddev),
3108                        mddev->sync_speed_max ? "local": "system");
3109 }
3110
3111 static ssize_t
3112 sync_max_store(mddev_t *mddev, const char *buf, size_t len)
3113 {
3114         int max;
3115         char *e;
3116         if (strncmp(buf, "system", 6)==0) {
3117                 mddev->sync_speed_max = 0;
3118                 return len;
3119         }
3120         max = simple_strtoul(buf, &e, 10);
3121         if (buf == e || (*e && *e != '\n') || max <= 0)
3122                 return -EINVAL;
3123         mddev->sync_speed_max = max;
3124         return len;
3125 }
3126
3127 static struct md_sysfs_entry md_sync_max =
3128 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
3129
3130 static ssize_t
3131 degraded_show(mddev_t *mddev, char *page)
3132 {
3133         return sprintf(page, "%d\n", mddev->degraded);
3134 }
3135 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
3136
3137 static ssize_t
3138 sync_force_parallel_show(mddev_t *mddev, char *page)
3139 {
3140         return sprintf(page, "%d\n", mddev->parallel_resync);
3141 }
3142
3143 static ssize_t
3144 sync_force_parallel_store(mddev_t *mddev, const char *buf, size_t len)
3145 {
3146         long n;
3147
3148         if (strict_strtol(buf, 10, &n))
3149                 return -EINVAL;
3150
3151         if (n != 0 && n != 1)
3152                 return -EINVAL;
3153
3154         mddev->parallel_resync = n;
3155
3156         if (mddev->sync_thread)
3157                 wake_up(&resync_wait);
3158
3159         return len;
3160 }
3161
3162 /* force parallel resync, even with shared block devices */
3163 static struct md_sysfs_entry md_sync_force_parallel =
3164 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
3165        sync_force_parallel_show, sync_force_parallel_store);
3166
3167 static ssize_t
3168 sync_speed_show(mddev_t *mddev, char *page)
3169 {
3170         unsigned long resync, dt, db;
3171         resync = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active));
3172         dt = ((jiffies - mddev->resync_mark) / HZ);
3173         if (!dt) dt++;
3174         db = resync - (mddev->resync_mark_cnt);
3175         return sprintf(page, "%ld\n", db/dt/2); /* K/sec */
3176 }
3177
3178 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
3179
3180 static ssize_t
3181 sync_completed_show(mddev_t *mddev, char *page)
3182 {
3183         unsigned long max_blocks, resync;
3184
3185         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3186                 max_blocks = mddev->resync_max_sectors;
3187         else
3188                 max_blocks = mddev->size << 1;
3189
3190         resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active));
3191         return sprintf(page, "%lu / %lu\n", resync, max_blocks);
3192 }
3193
3194 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
3195
3196 static ssize_t
3197 min_sync_show(mddev_t *mddev, char *page)
3198 {
3199         return sprintf(page, "%llu\n",
3200                        (unsigned long long)mddev->resync_min);
3201 }
3202 static ssize_t
3203 min_sync_store(mddev_t *mddev, const char *buf, size_t len)
3204 {
3205         unsigned long long min;
3206         if (strict_strtoull(buf, 10, &min))
3207                 return -EINVAL;
3208         if (min > mddev->resync_max)
3209                 return -EINVAL;
3210         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3211                 return -EBUSY;
3212
3213         /* Must be a multiple of chunk_size */
3214         if (mddev->chunk_size) {
3215                 if (min & (sector_t)((mddev->chunk_size>>9)-1))
3216                         return -EINVAL;
3217         }
3218         mddev->resync_min = min;
3219
3220         return len;
3221 }
3222
3223 static struct md_sysfs_entry md_min_sync =
3224 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
3225
3226 static ssize_t
3227 max_sync_show(mddev_t *mddev, char *page)
3228 {
3229         if (mddev->resync_max == MaxSector)
3230                 return sprintf(page, "max\n");
3231         else
3232                 return sprintf(page, "%llu\n",
3233                                (unsigned long long)mddev->resync_max);
3234 }
3235 static ssize_t
3236 max_sync_store(mddev_t *mddev, const char *buf, size_t len)
3237 {
3238         if (strncmp(buf, "max", 3) == 0)
3239                 mddev->resync_max = MaxSector;
3240         else {
3241                 unsigned long long max;
3242                 if (strict_strtoull(buf, 10, &max))
3243                         return -EINVAL;
3244                 if (max < mddev->resync_min)
3245                         return -EINVAL;
3246                 if (max < mddev->resync_max &&
3247                     test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3248                         return -EBUSY;
3249
3250                 /* Must be a multiple of chunk_size */
3251                 if (mddev->chunk_size) {
3252                         if (max & (sector_t)((mddev->chunk_size>>9)-1))
3253                                 return -EINVAL;
3254                 }
3255                 mddev->resync_max = max;
3256         }
3257         wake_up(&mddev->recovery_wait);
3258         return len;
3259 }
3260
3261 static struct md_sysfs_entry md_max_sync =
3262 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
3263
3264 static ssize_t
3265 suspend_lo_show(mddev_t *mddev, char *page)
3266 {
3267         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
3268 }
3269
3270 static ssize_t
3271 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
3272 {
3273         char *e;
3274         unsigned long long new = simple_strtoull(buf, &e, 10);
3275
3276         if (mddev->pers->quiesce == NULL)
3277                 return -EINVAL;
3278         if (buf == e || (*e && *e != '\n'))
3279                 return -EINVAL;
3280         if (new >= mddev->suspend_hi ||
3281             (new > mddev->suspend_lo && new < mddev->suspend_hi)) {
3282                 mddev->suspend_lo = new;
3283                 mddev->pers->quiesce(mddev, 2);
3284                 return len;
3285         } else
3286                 return -EINVAL;
3287 }
3288 static struct md_sysfs_entry md_suspend_lo =
3289 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
3290
3291
3292 static ssize_t
3293 suspend_hi_show(mddev_t *mddev, char *page)
3294 {
3295         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
3296 }
3297
3298 static ssize_t
3299 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
3300 {
3301         char *e;
3302         unsigned long long new = simple_strtoull(buf, &e, 10);
3303
3304         if (mddev->pers->quiesce == NULL)
3305                 return -EINVAL;
3306         if (buf == e || (*e && *e != '\n'))
3307                 return -EINVAL;
3308         if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) ||
3309             (new > mddev->suspend_lo && new > mddev->suspend_hi)) {
3310                 mddev->suspend_hi = new;
3311                 mddev->pers->quiesce(mddev, 1);
3312                 mddev->pers->quiesce(mddev, 0);
3313                 return len;
3314         } else
3315                 return -EINVAL;
3316 }
3317 static struct md_sysfs_entry md_suspend_hi =
3318 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
3319
3320 static ssize_t
3321 reshape_position_show(mddev_t *mddev, char *page)
3322 {
3323         if (mddev->reshape_position != MaxSector)
3324                 return sprintf(page, "%llu\n",
3325                                (unsigned long long)mddev->reshape_position);
3326         strcpy(page, "none\n");
3327         return 5;
3328 }
3329
3330 static ssize_t
3331 reshape_position_store(mddev_t *mddev, const char *buf, size_t len)
3332 {
3333         char *e;
3334         unsigned long long new = simple_strtoull(buf, &e, 10);
3335         if (mddev->pers)
3336                 return -EBUSY;
3337         if (buf == e || (*e && *e != '\n'))
3338                 return -EINVAL;
3339         mddev->reshape_position = new;
3340         mddev->delta_disks = 0;
3341         mddev->new_level = mddev->level;
3342         mddev->new_layout = mddev->layout;
3343         mddev->new_chunk = mddev->chunk_size;
3344         return len;
3345 }
3346
3347 static struct md_sysfs_entry md_reshape_position =
3348 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
3349        reshape_position_store);
3350
3351
3352 static struct attribute *md_default_attrs[] = {
3353         &md_level.attr,
3354         &md_layout.attr,
3355         &md_raid_disks.attr,
3356         &md_chunk_size.attr,
3357         &md_size.attr,
3358         &md_resync_start.attr,
3359         &md_metadata.attr,
3360         &md_new_device.attr,
3361         &md_safe_delay.attr,
3362         &md_array_state.attr,
3363         &md_reshape_position.attr,
3364         NULL,
3365 };
3366
3367 static struct attribute *md_redundancy_attrs[] = {
3368         &md_scan_mode.attr,
3369         &md_mismatches.attr,
3370         &md_sync_min.attr,
3371         &md_sync_max.attr,
3372         &md_sync_speed.attr,
3373         &md_sync_force_parallel.attr,
3374         &md_sync_completed.attr,
3375         &md_min_sync.attr,
3376         &md_max_sync.attr,
3377         &md_suspend_lo.attr,
3378         &md_suspend_hi.attr,
3379         &md_bitmap.attr,
3380         &md_degraded.attr,
3381         NULL,
3382 };
3383 static struct attribute_group md_redundancy_group = {
3384         .name = NULL,
3385         .attrs = md_redundancy_attrs,
3386 };
3387
3388
3389 static ssize_t
3390 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3391 {
3392         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
3393         mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
3394         ssize_t rv;
3395
3396         if (!entry->show)
3397                 return -EIO;
3398         rv = mddev_lock(mddev);
3399         if (!rv) {
3400                 rv = entry->show(mddev, page);
3401                 mddev_unlock(mddev);
3402         }
3403         return rv;
3404 }
3405
3406 static ssize_t
3407 md_attr_store(struct kobject *kobj, struct attribute *attr,
3408               const char *page, size_t length)
3409 {
3410         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
3411         mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
3412         ssize_t rv;
3413
3414         if (!entry->store)
3415                 return -EIO;
3416         if (!capable(CAP_SYS_ADMIN))
3417                 return -EACCES;
3418         rv = mddev_lock(mddev);
3419         if (!rv) {
3420                 rv = entry->store(mddev, page, length);
3421                 mddev_unlock(mddev);
3422         }
3423         return rv;
3424 }
3425
3426 static void md_free(struct kobject *ko)
3427 {
3428         mddev_t *mddev = container_of(ko, mddev_t, kobj);
3429         kfree(mddev);
3430 }
3431
3432 static struct sysfs_ops md_sysfs_ops = {
3433         .show   = md_attr_show,
3434         .store  = md_attr_store,
3435 };
3436 static struct kobj_type md_ktype = {
3437         .release        = md_free,
3438         .sysfs_ops      = &md_sysfs_ops,
3439         .default_attrs  = md_default_attrs,
3440 };
3441
3442 int mdp_major = 0;
3443
3444 static struct kobject *md_probe(dev_t dev, int *part, void *data)
3445 {
3446         static DEFINE_MUTEX(disks_mutex);
3447         mddev_t *mddev = mddev_find(dev);
3448         struct gendisk *disk;
3449         int partitioned = (MAJOR(dev) != MD_MAJOR);
3450         int shift = partitioned ? MdpMinorShift : 0;
3451         int unit = MINOR(dev) >> shift;
3452         int error;
3453
3454         if (!mddev)
3455                 return NULL;
3456
3457         mutex_lock(&disks_mutex);
3458         if (mddev->gendisk) {
3459                 mutex_unlock(&disks_mutex);
3460                 mddev_put(mddev);
3461                 return NULL;
3462         }
3463         disk = alloc_disk(1 << shift);
3464         if (!disk) {
3465                 mutex_unlock(&disks_mutex);
3466                 mddev_put(mddev);
3467                 return NULL;
3468         }
3469         disk->major = MAJOR(dev);
3470         disk->first_minor = unit << shift;
3471         if (partitioned)
3472                 sprintf(disk->disk_name, "md_d%d", unit);
3473         else
3474                 sprintf(disk->disk_name, "md%d", unit);
3475         disk->fops = &md_fops;
3476         disk->private_data = mddev;
3477         disk->queue = mddev->queue;
3478         add_disk(disk);
3479         mddev->gendisk = disk;
3480         error = kobject_init_and_add(&mddev->kobj, &md_ktype, &disk->dev.kobj,
3481                                      "%s", "md");
3482         mutex_unlock(&disks_mutex);
3483         if (error)
3484                 printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
3485                        disk->disk_name);
3486         else
3487                 kobject_uevent(&mddev->kobj, KOBJ_ADD);
3488         return NULL;
3489 }
3490
3491 static void md_safemode_timeout(unsigned long data)
3492 {
3493         mddev_t *mddev = (mddev_t *) data;
3494
3495         if (!atomic_read(&mddev->writes_pending)) {
3496                 mddev->safemode = 1;
3497                 if (mddev->external)
3498                         sysfs_notify(&mddev->kobj, NULL, "array_state");
3499         }
3500         md_wakeup_thread(mddev->thread);
3501 }
3502
3503 static int start_dirty_degraded;
3504
3505 static int do_md_run(mddev_t * mddev)
3506 {
3507         int err;
3508         int chunk_size;
3509         struct list_head *tmp;
3510         mdk_rdev_t *rdev;
3511         struct gendisk *disk;
3512         struct mdk_personality *pers;
3513         char b[BDEVNAME_SIZE];
3514
3515         if (list_empty(&mddev->disks))
3516                 /* cannot run an array with no devices.. */
3517                 return -EINVAL;
3518
3519         if (mddev->pers)
3520                 return -EBUSY;
3521
3522         /*
3523          * Analyze all RAID superblock(s)
3524          */
3525         if (!mddev->raid_disks) {
3526                 if (!mddev->persistent)
3527                         return -EINVAL;
3528                 analyze_sbs(mddev);
3529         }
3530
3531         chunk_size = mddev->chunk_size;
3532
3533         if (chunk_size) {
3534                 if (chunk_size > MAX_CHUNK_SIZE) {
3535                         printk(KERN_ERR "too big chunk_size: %d > %d\n",
3536                                 chunk_size, MAX_CHUNK_SIZE);
3537                         return -EINVAL;
3538                 }
3539                 /*
3540                  * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
3541                  */
3542                 if ( (1 << ffz(~chunk_size)) != chunk_size) {
3543                         printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size);
3544                         return -EINVAL;
3545                 }
3546                 if (chunk_size < PAGE_SIZE) {
3547                         printk(KERN_ERR "too small chunk_size: %d < %ld\n",
3548                                 chunk_size, PAGE_SIZE);
3549                         return -EINVAL;
3550                 }
3551
3552                 /* devices must have minimum size of one chunk */
3553                 rdev_for_each(rdev, tmp, mddev) {
3554                         if (test_bit(Faulty, &rdev->flags))
3555                                 continue;
3556                         if (rdev->size < chunk_size / 1024) {
3557                                 printk(KERN_WARNING
3558                                         "md: Dev %s smaller than chunk_size:"
3559                                         " %lluk < %dk\n",
3560                                         bdevname(rdev->bdev,b),
3561                                         (unsigned long long)rdev->size,
3562                                         chunk_size / 1024);
3563                                 return -EINVAL;
3564                         }
3565                 }
3566         }
3567
3568 #ifdef CONFIG_KMOD
3569         if (mddev->level != LEVEL_NONE)
3570                 request_module("md-level-%d", mddev->level);
3571         else if (mddev->clevel[0])
3572                 request_module("md-%s", mddev->clevel);
3573 #endif
3574
3575         /*
3576          * Drop all container device buffers, from now on
3577          * the only valid external interface is through the md
3578          * device.
3579          */
3580         rdev_for_each(rdev, tmp, mddev) {
3581                 if (test_bit(Faulty, &rdev->flags))
3582                         continue;
3583                 sync_blockdev(rdev->bdev);
3584                 invalidate_bdev(rdev->bdev);
3585
3586                 /* perform some consistency tests on the device.
3587                  * We don't want the data to overlap the metadata,
3588                  * Internal Bitmap issues has handled elsewhere.
3589                  */
3590                 if (rdev->data_offset < rdev->sb_offset) {
3591                         if (mddev->size &&
3592                             rdev->data_offset + mddev->size*2
3593                             > rdev->sb_offset*2) {
3594                                 printk("md: %s: data overlaps metadata\n",
3595                                        mdname(mddev));
3596                                 return -EINVAL;
3597                         }
3598                 } else {
3599                         if (rdev->sb_offset*2 + rdev->sb_size/512
3600                             > rdev->data_offset) {
3601                                 printk("md: %s: metadata overlaps data\n",
3602                                        mdname(mddev));
3603                                 return -EINVAL;
3604                         }
3605                 }
3606                 sysfs_notify(&rdev->kobj, NULL, "state");
3607         }
3608
3609         md_probe(mddev->unit, NULL, NULL);
3610         disk = mddev->gendisk;
3611         if (!disk)
3612                 return -ENOMEM;
3613
3614         spin_lock(&pers_lock);
3615         pers = find_pers(mddev->level, mddev->clevel);
3616         if (!pers || !try_module_get(pers->owner)) {
3617                 spin_unlock(&pers_lock);
3618                 if (mddev->level != LEVEL_NONE)
3619                         printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
3620                                mddev->level);
3621                 else
3622                         printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
3623                                mddev->clevel);
3624                 return -EINVAL;
3625         }
3626         mddev->pers = pers;
3627         spin_unlock(&pers_lock);
3628         mddev->level = pers->level;
3629         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3630
3631         if (mddev->reshape_position != MaxSector &&
3632             pers->start_reshape == NULL) {
3633                 /* This personality cannot handle reshaping... */
3634                 mddev->pers = NULL;
3635                 module_put(pers->owner);
3636                 return -EINVAL;
3637         }
3638
3639         if (pers->sync_request) {
3640                 /* Warn if this is a potentially silly
3641                  * configuration.
3642                  */
3643                 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
3644                 mdk_rdev_t *rdev2;
3645                 struct list_head *tmp2;
3646                 int warned = 0;
3647                 rdev_for_each(rdev, tmp, mddev) {
3648                         rdev_for_each(rdev2, tmp2, mddev) {
3649                                 if (rdev < rdev2 &&
3650                                     rdev->bdev->bd_contains ==
3651                                     rdev2->bdev->bd_contains) {
3652                                         printk(KERN_WARNING
3653                                                "%s: WARNING: %s appears to be"
3654                                                " on the same physical disk as"
3655                                                " %s.\n",
3656                                                mdname(mddev),
3657                                                bdevname(rdev->bdev,b),
3658                                                bdevname(rdev2->bdev,b2));
3659                                         warned = 1;
3660                                 }
3661                         }
3662                 }
3663                 if (warned)
3664                         printk(KERN_WARNING
3665                                "True protection against single-disk"
3666                                " failure might be compromised.\n");
3667         }
3668
3669         mddev->recovery = 0;
3670         mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
3671         mddev->barriers_work = 1;
3672         mddev->ok_start_degraded = start_dirty_degraded;
3673
3674         if (start_readonly)
3675                 mddev->ro = 2; /* read-only, but switch on first write */
3676
3677         err = mddev->pers->run(mddev);
3678         if (!err && mddev->pers->sync_request) {
3679                 err = bitmap_create(mddev);
3680                 if (err) {
3681                         printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
3682                                mdname(mddev), err);
3683                         mddev->pers->stop(mddev);
3684                 }
3685         }
3686         if (err) {
3687                 printk(KERN_ERR "md: pers->run() failed ...\n");
3688                 module_put(mddev->pers->owner);
3689                 mddev->pers = NULL;
3690                 bitmap_destroy(mddev);
3691                 return err;
3692         }
3693         if (mddev->pers->sync_request) {
3694                 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
3695                         printk(KERN_WARNING
3696                                "md: cannot register extra attributes for %s\n",
3697                                mdname(mddev));
3698         } else if (mddev->ro == 2) /* auto-readonly not meaningful */
3699                 mddev->ro = 0;
3700
3701         atomic_set(&mddev->writes_pending,0);
3702         mddev->safemode = 0;
3703         mddev->safemode_timer.function = md_safemode_timeout;
3704         mddev->safemode_timer.data = (unsigned long) mddev;
3705         mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
3706         mddev->in_sync = 1;
3707
3708         rdev_for_each(rdev, tmp, mddev)
3709                 if (rdev->raid_disk >= 0) {
3710                         char nm[20];
3711                         sprintf(nm, "rd%d", rdev->raid_disk);
3712                         if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
3713                                 printk("md: cannot register %s for %s\n",
3714                                        nm, mdname(mddev));
3715                 }
3716         
3717         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3718         
3719         if (mddev->flags)
3720                 md_update_sb(mddev, 0);
3721
3722         set_capacity(disk, mddev->array_size<<1);
3723
3724         /* If we call blk_queue_make_request here, it will
3725          * re-initialise max_sectors etc which may have been
3726          * refined inside -> run.  So just set the bits we need to set.
3727          * Most initialisation happended when we called
3728          * blk_queue_make_request(..., md_fail_request)
3729          * earlier.
3730          */
3731         mddev->queue->queuedata = mddev;
3732         mddev->queue->make_request_fn = mddev->pers->make_request;
3733
3734         /* If there is a partially-recovered drive we need to
3735          * start recovery here.  If we leave it to md_check_recovery,
3736          * it will remove the drives and not do the right thing
3737          */
3738         if (mddev->degraded && !mddev->sync_thread) {
3739                 struct list_head *rtmp;
3740                 int spares = 0;
3741                 rdev_for_each(rdev, rtmp, mddev)
3742                         if (rdev->raid_disk >= 0 &&
3743                             !test_bit(In_sync, &rdev->flags) &&
3744                             !test_bit(Faulty, &rdev->flags))
3745                                 /* complete an interrupted recovery */
3746                                 spares++;
3747                 if (spares && mddev->pers->sync_request) {
3748                         mddev->recovery = 0;
3749                         set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3750                         mddev->sync_thread = md_register_thread(md_do_sync,
3751                                                                 mddev,
3752                                                                 "%s_resync");
3753                         if (!mddev->sync_thread) {
3754                                 printk(KERN_ERR "%s: could not start resync"
3755                                        " thread...\n",
3756                                        mdname(mddev));
3757                                 /* leave the spares where they are, it shouldn't hurt */
3758                                 mddev->recovery = 0;
3759                         }
3760                 }
3761         }
3762         md_wakeup_thread(mddev->thread);
3763         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
3764
3765         mddev->changed = 1;
3766         md_new_event(mddev);
3767         sysfs_notify(&mddev->kobj, NULL, "array_state");
3768         sysfs_notify(&mddev->kobj, NULL, "sync_action");
3769         sysfs_notify(&mddev->kobj, NULL, "degraded");
3770         kobject_uevent(&mddev->gendisk->dev.kobj, KOBJ_CHANGE);
3771         return 0;
3772 }
3773
3774 static int restart_array(mddev_t *mddev)
3775 {
3776         struct gendisk *disk = mddev->gendisk;
3777         int err;
3778
3779         /*
3780          * Complain if it has no devices
3781          */
3782         err = -ENXIO;
3783         if (list_empty(&mddev->disks))
3784                 goto out;
3785
3786         if (mddev->pers) {
3787                 err = -EBUSY;
3788                 if (!mddev->ro)
3789                         goto out;
3790
3791                 mddev->safemode = 0;
3792                 mddev->ro = 0;
3793                 set_disk_ro(disk, 0);
3794
3795                 printk(KERN_INFO "md: %s switched to read-write mode.\n",
3796                         mdname(mddev));
3797                 /*
3798                  * Kick recovery or resync if necessary
3799                  */
3800                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3801                 md_wakeup_thread(mddev->thread);
3802                 md_wakeup_thread(mddev->sync_thread);
3803                 err = 0;
3804                 sysfs_notify(&mddev->kobj, NULL, "array_state");
3805
3806         } else
3807                 err = -EINVAL;
3808
3809 out:
3810         return err;
3811 }
3812
3813 /* similar to deny_write_access, but accounts for our holding a reference
3814  * to the file ourselves */
3815 static int deny_bitmap_write_access(struct file * file)
3816 {
3817         struct inode *inode = file->f_mapping->host;
3818
3819         spin_lock(&inode->i_lock);
3820         if (atomic_read(&inode->i_writecount) > 1) {
3821                 spin_unlock(&inode->i_lock);
3822                 return -ETXTBSY;
3823         }
3824         atomic_set(&inode->i_writecount, -1);
3825         spin_unlock(&inode->i_lock);
3826
3827         return 0;
3828 }
3829
3830 static void restore_bitmap_write_access(struct file *file)
3831 {
3832         struct inode *inode = file->f_mapping->host;
3833
3834         spin_lock(&inode->i_lock);
3835         atomic_set(&inode->i_writecount, 1);
3836         spin_unlock(&inode->i_lock);
3837 }
3838
3839 /* mode:
3840  *   0 - completely stop and dis-assemble array
3841  *   1 - switch to readonly
3842  *   2 - stop but do not disassemble array
3843  */
3844 static int do_md_stop(mddev_t * mddev, int mode)
3845 {
3846         int err = 0;
3847         struct gendisk *disk = mddev->gendisk;
3848
3849         if (mddev->pers) {
3850                 if (atomic_read(&mddev->active)>2) {
3851                         printk("md: %s still in use.\n",mdname(mddev));
3852                         return -EBUSY;
3853                 }
3854
3855                 if (mddev->sync_thread) {
3856                         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3857                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3858                         md_unregister_thread(mddev->sync_thread);
3859                         mddev->sync_thread = NULL;
3860                 }
3861
3862                 del_timer_sync(&mddev->safemode_timer);
3863
3864                 invalidate_partition(disk, 0);
3865
3866                 switch(mode) {
3867                 case 1: /* readonly */
3868                         err  = -ENXIO;
3869                         if (mddev->ro==1)
3870                                 goto out;
3871                         mddev->ro = 1;
3872                         break;
3873                 case 0: /* disassemble */
3874                 case 2: /* stop */
3875                         bitmap_flush(mddev);
3876                         md_super_wait(mddev);
3877                         if (mddev->ro)
3878                                 set_disk_ro(disk, 0);
3879                         blk_queue_make_request(mddev->queue, md_fail_request);
3880                         mddev->pers->stop(mddev);
3881                         mddev->queue->merge_bvec_fn = NULL;
3882                         mddev->queue->unplug_fn = NULL;
3883                         mddev->queue->backing_dev_info.congested_fn = NULL;
3884                         if (mddev->pers->sync_request)
3885                                 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
3886
3887                         module_put(mddev->pers->owner);
3888                         mddev->pers = NULL;
3889                         /* tell userspace to handle 'inactive' */
3890                         sysfs_notify(&mddev->kobj, NULL, "array_state");
3891
3892                         set_capacity(disk, 0);
3893                         mddev->changed = 1;
3894
3895                         if (mddev->ro)
3896                                 mddev->ro = 0;
3897                 }
3898                 if (!mddev->in_sync || mddev->flags) {
3899                         /* mark array as shutdown cleanly */
3900                         mddev->in_sync = 1;
3901                         md_update_sb(mddev, 1);
3902                 }
3903                 if (mode == 1)
3904                         set_disk_ro(disk, 1);
3905                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3906         }
3907
3908         /*
3909          * Free resources if final stop
3910          */
3911         if (mode == 0) {
3912                 mdk_rdev_t *rdev;
3913                 struct list_head *tmp;
3914
3915                 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
3916
3917                 bitmap_destroy(mddev);
3918                 if (mddev->bitmap_file) {
3919                         restore_bitmap_write_access(mddev->bitmap_file);
3920                         fput(mddev->bitmap_file);
3921                         mddev->bitmap_file = NULL;
3922                 }
3923                 mddev->bitmap_offset = 0;
3924
3925                 rdev_for_each(rdev, tmp, mddev)
3926                         if (rdev->raid_disk >= 0) {
3927                                 char nm[20];
3928                                 sprintf(nm, "rd%d", rdev->raid_disk);
3929                                 sysfs_remove_link(&mddev->kobj, nm);
3930                         }
3931
3932                 /* make sure all md_delayed_delete calls have finished */
3933                 flush_scheduled_work();
3934
3935                 export_array(mddev);
3936
3937                 mddev->array_size = 0;
3938                 mddev->size = 0;
3939                 mddev->raid_disks = 0;
3940                 mddev->recovery_cp = 0;
3941                 mddev->resync_min = 0;
3942                 mddev->resync_max = MaxSector;
3943                 mddev->reshape_position = MaxSector;
3944                 mddev->external = 0;
3945                 mddev->persistent = 0;
3946                 mddev->level = LEVEL_NONE;
3947                 mddev->clevel[0] = 0;
3948                 mddev->flags = 0;
3949                 mddev->ro = 0;
3950                 mddev->metadata_type[0] = 0;
3951                 mddev->chunk_size = 0;
3952                 mddev->ctime = mddev->utime = 0;
3953                 mddev->layout = 0;
3954                 mddev->max_disks = 0;
3955                 mddev->events = 0;
3956                 mddev->delta_disks = 0;
3957                 mddev->new_level = LEVEL_NONE;
3958                 mddev->new_layout = 0;
3959                 mddev->new_chunk = 0;
3960                 mddev->curr_resync = 0;
3961                 mddev->resync_mismatches = 0;
3962                 mddev->suspend_lo = mddev->suspend_hi = 0;
3963                 mddev->sync_speed_min = mddev->sync_speed_max = 0;
3964                 mddev->recovery = 0;
3965                 mddev->in_sync = 0;
3966                 mddev->changed = 0;
3967                 mddev->degraded = 0;
3968                 mddev->barriers_work = 0;
3969                 mddev->safemode = 0;
3970
3971         } else if (mddev->pers)
3972                 printk(KERN_INFO "md: %s switched to read-only mode.\n",
3973                         mdname(mddev));
3974         err = 0;
3975         md_new_event(mddev);
3976         sysfs_notify(&mddev->kobj, NULL, "array_state");
3977 out:
3978         return err;
3979 }
3980
3981 #ifndef MODULE
3982 static void autorun_array(mddev_t *mddev)
3983 {
3984         mdk_rdev_t *rdev;
3985         struct list_head *tmp;
3986         int err;
3987
3988         if (list_empty(&mddev->disks))
3989                 return;
3990
3991         printk(KERN_INFO "md: running: ");
3992
3993         rdev_for_each(rdev, tmp, mddev) {
3994                 char b[BDEVNAME_SIZE];
3995                 printk("<%s>", bdevname(rdev->bdev,b));
3996         }
3997         printk("\n");
3998
3999         err = do_md_run (mddev);
4000         if (err) {
4001                 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
4002                 do_md_stop (mddev, 0);
4003         }
4004 }
4005
4006 /*
4007  * lets try to run arrays based on all disks that have arrived
4008  * until now. (those are in pending_raid_disks)
4009  *
4010  * the method: pick the first pending disk, collect all disks with
4011  * the same UUID, remove all from the pending list and put them into
4012  * the 'same_array' list. Then order this list based on superblock
4013  * update time (freshest comes first), kick out 'old' disks and
4014  * compare superblocks. If everything's fine then run it.
4015  *
4016  * If "unit" is allocated, then bump its reference count
4017  */
4018 static void autorun_devices(int part)
4019 {
4020         struct list_head *tmp;
4021         mdk_rdev_t *rdev0, *rdev;
4022         mddev_t *mddev;
4023         char b[BDEVNAME_SIZE];
4024
4025         printk(KERN_INFO "md: autorun ...\n");
4026         while (!list_empty(&pending_raid_disks)) {
4027                 int unit;
4028                 dev_t dev;
4029                 LIST_HEAD(candidates);
4030                 rdev0 = list_entry(pending_raid_disks.next,
4031                                          mdk_rdev_t, same_set);
4032
4033                 printk(KERN_INFO "md: considering %s ...\n",
4034                         bdevname(rdev0->bdev,b));
4035                 INIT_LIST_HEAD(&candidates);
4036                 rdev_for_each_list(rdev, tmp, pending_raid_disks)
4037                         if (super_90_load(rdev, rdev0, 0) >= 0) {
4038                                 printk(KERN_INFO "md:  adding %s ...\n",
4039                                         bdevname(rdev->bdev,b));
4040                                 list_move(&rdev->same_set, &candidates);
4041                         }
4042                 /*
4043                  * now we have a set of devices, with all of them having
4044                  * mostly sane superblocks. It's time to allocate the
4045                  * mddev.
4046                  */
4047                 if (part) {
4048                         dev = MKDEV(mdp_major,
4049                                     rdev0->preferred_minor << MdpMinorShift);
4050                         unit = MINOR(dev) >> MdpMinorShift;
4051                 } else {
4052                         dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
4053                         unit = MINOR(dev);
4054                 }
4055                 if (rdev0->preferred_minor != unit) {
4056                         printk(KERN_INFO "md: unit number in %s is bad: %d\n",
4057                                bdevname(rdev0->bdev, b), rdev0->preferred_minor);
4058                         break;
4059                 }
4060
4061                 md_probe(dev, NULL, NULL);
4062                 mddev = mddev_find(dev);
4063                 if (!mddev || !mddev->gendisk) {
4064                         if (mddev)
4065                                 mddev_put(mddev);
4066                         printk(KERN_ERR
4067                                 "md: cannot allocate memory for md drive.\n");
4068                         break;
4069                 }
4070                 if (mddev_lock(mddev)) 
4071                         printk(KERN_WARNING "md: %s locked, cannot run\n",
4072                                mdname(mddev));
4073                 else if (mddev->raid_disks || mddev->major_version
4074                          || !list_empty(&mddev->disks)) {
4075                         printk(KERN_WARNING 
4076                                 "md: %s already running, cannot run %s\n",
4077                                 mdname(mddev), bdevname(rdev0->bdev,b));
4078                         mddev_unlock(mddev);
4079                 } else {
4080                         printk(KERN_INFO "md: created %s\n", mdname(mddev));
4081                         mddev->persistent = 1;
4082                         rdev_for_each_list(rdev, tmp, candidates) {
4083                                 list_del_init(&rdev->same_set);
4084                                 if (bind_rdev_to_array(rdev, mddev))
4085                                         export_rdev(rdev);
4086                         }
4087                         autorun_array(mddev);
4088                         mddev_unlock(mddev);
4089                 }
4090                 /* on success, candidates will be empty, on error
4091                  * it won't...
4092                  */
4093                 rdev_for_each_list(rdev, tmp, candidates)
4094                         export_rdev(rdev);
4095                 mddev_put(mddev);
4096         }
4097         printk(KERN_INFO "md: ... autorun DONE.\n");
4098 }
4099 #endif /* !MODULE */
4100
4101 static int get_version(void __user * arg)
4102 {
4103         mdu_version_t ver;
4104
4105         ver.major = MD_MAJOR_VERSION;
4106         ver.minor = MD_MINOR_VERSION;
4107         ver.patchlevel = MD_PATCHLEVEL_VERSION;
4108
4109         if (copy_to_user(arg, &ver, sizeof(ver)))
4110                 return -EFAULT;
4111
4112         return 0;
4113 }
4114
4115 static int get_array_info(mddev_t * mddev, void __user * arg)
4116 {
4117         mdu_array_info_t info;
4118         int nr,working,active,failed,spare;
4119         mdk_rdev_t *rdev;
4120         struct list_head *tmp;
4121
4122         nr=working=active=failed=spare=0;
4123         rdev_for_each(rdev, tmp, mddev) {
4124                 nr++;
4125                 if (test_bit(Faulty, &rdev->flags))
4126                         failed++;
4127                 else {
4128                         working++;
4129                         if (test_bit(In_sync, &rdev->flags))
4130                                 active++;       
4131                         else
4132                                 spare++;
4133                 }
4134         }
4135
4136         info.major_version = mddev->major_version;
4137         info.minor_version = mddev->minor_version;
4138         info.patch_version = MD_PATCHLEVEL_VERSION;
4139         info.ctime         = mddev->ctime;
4140         info.level         = mddev->level;
4141         info.size          = mddev->size;
4142         if (info.size != mddev->size) /* overflow */
4143                 info.size = -1;
4144         info.nr_disks      = nr;
4145         info.raid_disks    = mddev->raid_disks;
4146         info.md_minor      = mddev->md_minor;
4147         info.not_persistent= !mddev->persistent;
4148
4149         info.utime         = mddev->utime;
4150         info.state         = 0;
4151         if (mddev->in_sync)
4152                 info.state = (1<<MD_SB_CLEAN);
4153         if (mddev->bitmap && mddev->bitmap_offset)
4154                 info.state = (1<<MD_SB_BITMAP_PRESENT);
4155         info.active_disks  = active;
4156         info.working_disks = working;
4157         info.failed_disks  = failed;
4158         info.spare_disks   = spare;
4159
4160         info.layout        = mddev->layout;
4161         info.chunk_size    = mddev->chunk_size;
4162
4163         if (copy_to_user(arg, &info, sizeof(info)))
4164                 return -EFAULT;
4165
4166         return 0;
4167 }
4168
4169 static int get_bitmap_file(mddev_t * mddev, void __user * arg)
4170 {
4171         mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
4172         char *ptr, *buf = NULL;
4173         int err = -ENOMEM;
4174
4175         if (md_allow_write(mddev))
4176                 file = kmalloc(sizeof(*file), GFP_NOIO);
4177         else
4178                 file = kmalloc(sizeof(*file), GFP_KERNEL);
4179
4180         if (!file)
4181                 goto out;
4182
4183         /* bitmap disabled, zero the first byte and copy out */
4184         if (!mddev->bitmap || !mddev->bitmap->file) {
4185                 file->pathname[0] = '\0';
4186                 goto copy_out;
4187         }
4188
4189         buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
4190         if (!buf)
4191                 goto out;
4192
4193         ptr = d_path(&mddev->bitmap->file->f_path, buf, sizeof(file->pathname));
4194         if (IS_ERR(ptr))
4195                 goto out;
4196
4197         strcpy(file->pathname, ptr);
4198
4199 copy_out:
4200         err = 0;
4201         if (copy_to_user(arg, file, sizeof(*file)))
4202                 err = -EFAULT;
4203 out:
4204         kfree(buf);
4205         kfree(file);
4206         return err;
4207 }
4208
4209 static int get_disk_info(mddev_t * mddev, void __user * arg)
4210 {
4211         mdu_disk_info_t info;
4212         unsigned int nr;
4213         mdk_rdev_t *rdev;
4214
4215         if (copy_from_user(&info, arg, sizeof(info)))
4216                 return -EFAULT;
4217
4218         nr = info.number;
4219
4220         rdev = find_rdev_nr(mddev, nr);
4221         if (rdev) {
4222                 info.major = MAJOR(rdev->bdev->bd_dev);
4223                 info.minor = MINOR(rdev->bdev->bd_dev);
4224                 info.raid_disk = rdev->raid_disk;
4225                 info.state = 0;
4226                 if (test_bit(Faulty, &rdev->flags))
4227                         info.state |= (1<<MD_DISK_FAULTY);
4228                 else if (test_bit(In_sync, &rdev->flags)) {
4229                         info.state |= (1<<MD_DISK_ACTIVE);
4230                         info.state |= (1<<MD_DISK_SYNC);
4231                 }
4232                 if (test_bit(WriteMostly, &rdev->flags))
4233                         info.state |= (1<<MD_DISK_WRITEMOSTLY);
4234         } else {
4235                 info.major = info.minor = 0;
4236                 info.raid_disk = -1;
4237                 info.state = (1<<MD_DISK_REMOVED);
4238         }
4239
4240         if (copy_to_user(arg, &info, sizeof(info)))
4241                 return -EFAULT;
4242
4243         return 0;
4244 }
4245
4246 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
4247 {
4248         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
4249         mdk_rdev_t *rdev;
4250         dev_t dev = MKDEV(info->major,info->minor);
4251
4252         if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
4253                 return -EOVERFLOW;
4254
4255         if (!mddev->raid_disks) {
4256                 int err;
4257                 /* expecting a device which has a superblock */
4258                 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
4259                 if (IS_ERR(rdev)) {
4260                         printk(KERN_WARNING 
4261                                 "md: md_import_device returned %ld\n",
4262                                 PTR_ERR(rdev));
4263                         return PTR_ERR(rdev);
4264                 }
4265                 if (!list_empty(&mddev->disks)) {
4266                         mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
4267                                                         mdk_rdev_t, same_set);
4268                         int err = super_types[mddev->major_version]
4269                                 .load_super(rdev, rdev0, mddev->minor_version);
4270                         if (err < 0) {
4271                                 printk(KERN_WARNING 
4272                                         "md: %s has different UUID to %s\n",
4273                                         bdevname(rdev->bdev,b), 
4274                                         bdevname(rdev0->bdev,b2));
4275                                 export_rdev(rdev);
4276                                 return -EINVAL;
4277                         }
4278                 }
4279                 err = bind_rdev_to_array(rdev, mddev);
4280                 if (err)
4281                         export_rdev(rdev);
4282                 return err;
4283         }
4284
4285         /*
4286          * add_new_disk can be used once the array is assembled
4287          * to add "hot spares".  They must already have a superblock
4288          * written
4289          */
4290         if (mddev->pers) {
4291                 int err;
4292                 if (!mddev->pers->hot_add_disk) {
4293                         printk(KERN_WARNING 
4294                                 "%s: personality does not support diskops!\n",
4295                                mdname(mddev));
4296                         return -EINVAL;
4297                 }
4298                 if (mddev->persistent)
4299                         rdev = md_import_device(dev, mddev->major_version,
4300                                                 mddev->minor_version);
4301                 else
4302                         rdev = md_import_device(dev, -1, -1);
4303                 if (IS_ERR(rdev)) {
4304                         printk(KERN_WARNING 
4305                                 "md: md_import_device returned %ld\n",
4306                                 PTR_ERR(rdev));
4307                         return PTR_ERR(rdev);
4308                 }
4309                 /* set save_raid_disk if appropriate */
4310                 if (!mddev->persistent) {
4311                         if (info->state & (1<<MD_DISK_SYNC)  &&
4312                             info->raid_disk < mddev->raid_disks)
4313                                 rdev->raid_disk = info->raid_disk;
4314                         else
4315                                 rdev->raid_disk = -1;
4316                 } else
4317                         super_types[mddev->major_version].
4318                                 validate_super(mddev, rdev);
4319                 rdev->saved_raid_disk = rdev->raid_disk;
4320
4321                 clear_bit(In_sync, &rdev->flags); /* just to be sure */
4322                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
4323                         set_bit(WriteMostly, &rdev->flags);
4324
4325                 rdev->raid_disk = -1;
4326                 err = bind_rdev_to_array(rdev, mddev);
4327                 if (!err && !mddev->pers->hot_remove_disk) {
4328                         /* If there is hot_add_disk but no hot_remove_disk
4329                          * then added disks for geometry changes,
4330                          * and should be added immediately.
4331                          */
4332                         super_types[mddev->major_version].
4333                                 validate_super(mddev, rdev);
4334                         err = mddev->pers->hot_add_disk(mddev, rdev);
4335                         if (err)
4336                                 unbind_rdev_from_array(rdev);
4337                 }
4338                 if (err)
4339                         export_rdev(rdev);
4340                 else
4341                         sysfs_notify(&rdev->kobj, NULL, "state");
4342
4343                 md_update_sb(mddev, 1);
4344                 if (mddev->degraded)
4345                         set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
4346                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4347                 md_wakeup_thread(mddev->thread);
4348                 return err;
4349         }
4350
4351         /* otherwise, add_new_disk is only allowed
4352          * for major_version==0 superblocks
4353          */
4354         if (mddev->major_version != 0) {
4355                 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
4356                        mdname(mddev));
4357                 return -EINVAL;
4358         }
4359
4360         if (!(info->state & (1<<MD_DISK_FAULTY))) {
4361                 int err;
4362                 rdev = md_import_device (dev, -1, 0);
4363                 if (IS_ERR(rdev)) {
4364                         printk(KERN_WARNING 
4365                                 "md: error, md_import_device() returned %ld\n",
4366                                 PTR_ERR(rdev));
4367                         return PTR_ERR(rdev);
4368                 }
4369                 rdev->desc_nr = info->number;
4370                 if (info->raid_disk < mddev->raid_disks)
4371                         rdev->raid_disk = info->raid_disk;
4372                 else
4373                         rdev->raid_disk = -1;
4374
4375                 if (rdev->raid_disk < mddev->raid_disks)
4376                         if (info->state & (1<<MD_DISK_SYNC))
4377                                 set_bit(In_sync, &rdev->flags);
4378
4379                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
4380                         set_bit(WriteMostly, &rdev->flags);
4381
4382                 if (!mddev->persistent) {
4383                         printk(KERN_INFO "md: nonpersistent superblock ...\n");
4384                         rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
4385                 } else 
4386                         rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
4387                 rdev->size = calc_dev_size(rdev, mddev->chunk_size);
4388
4389                 err = bind_rdev_to_array(rdev, mddev);
4390                 if (err) {
4391                         export_rdev(rdev);
4392                         return err;
4393                 }
4394         }
4395
4396         return 0;
4397 }
4398
4399 static int hot_remove_disk(mddev_t * mddev, dev_t dev)
4400 {
4401         char b[BDEVNAME_SIZE];
4402         mdk_rdev_t *rdev;
4403
4404         rdev = find_rdev(mddev, dev);
4405         if (!rdev)
4406                 return -ENXIO;
4407
4408         if (rdev->raid_disk >= 0)
4409                 goto busy;
4410
4411         kick_rdev_from_array(rdev);
4412         md_update_sb(mddev, 1);
4413         md_new_event(mddev);
4414
4415         return 0;
4416 busy:
4417         printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
4418                 bdevname(rdev->bdev,b), mdname(mddev));
4419         return -EBUSY;
4420 }
4421
4422 static int hot_add_disk(mddev_t * mddev, dev_t dev)
4423 {
4424         char b[BDEVNAME_SIZE];
4425         int err;
4426         unsigned int size;
4427         mdk_rdev_t *rdev;
4428
4429         if (!mddev->pers)
4430                 return -ENODEV;
4431
4432         if (mddev->major_version != 0) {
4433                 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
4434                         " version-0 superblocks.\n",
4435                         mdname(mddev));
4436                 return -EINVAL;
4437         }
4438         if (!mddev->pers->hot_add_disk) {
4439                 printk(KERN_WARNING 
4440                         "%s: personality does not support diskops!\n",
4441                         mdname(mddev));
4442                 return -EINVAL;
4443         }
4444
4445         rdev = md_import_device (dev, -1, 0);
4446         if (IS_ERR(rdev)) {
4447                 printk(KERN_WARNING 
4448                         "md: error, md_import_device() returned %ld\n",
4449                         PTR_ERR(rdev));
4450                 return -EINVAL;
4451         }
4452
4453         if (mddev->persistent)
4454                 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
4455         else
4456                 rdev->sb_offset =
4457                         rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
4458
4459         size = calc_dev_size(rdev, mddev->chunk_size);
4460         rdev->size = size;
4461
4462         if (test_bit(Faulty, &rdev->flags)) {
4463                 printk(KERN_WARNING 
4464                         "md: can not hot-add faulty %s disk to %s!\n",
4465                         bdevname(rdev->bdev,b), mdname(mddev));
4466                 err = -EINVAL;
4467                 goto abort_export;
4468         }
4469         clear_bit(In_sync, &rdev->flags);
4470         rdev->desc_nr = -1;
4471         rdev->saved_raid_disk = -1;
4472         err = bind_rdev_to_array(rdev, mddev);
4473         if (err)
4474                 goto abort_export;
4475
4476         /*
4477          * The rest should better be atomic, we can have disk failures
4478          * noticed in interrupt contexts ...
4479          */
4480
4481         if (rdev->desc_nr == mddev->max_disks) {
4482                 printk(KERN_WARNING "%s: can not hot-add to full array!\n",
4483                         mdname(mddev));
4484                 err = -EBUSY;
4485                 goto abort_unbind_export;
4486         }
4487
4488         rdev->raid_disk = -1;
4489
4490         md_update_sb(mddev, 1);
4491
4492         /*
4493          * Kick recovery, maybe this spare has to be added to the
4494          * array immediately.
4495          */
4496         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4497         md_wakeup_thread(mddev->thread);
4498         md_new_event(mddev);
4499         return 0;
4500
4501 abort_unbind_export:
4502         unbind_rdev_from_array(rdev);
4503
4504 abort_export:
4505         export_rdev(rdev);
4506         return err;
4507 }
4508
4509 static int set_bitmap_file(mddev_t *mddev, int fd)
4510 {
4511         int err;
4512
4513         if (mddev->pers) {
4514                 if (!mddev->pers->quiesce)
4515                         return -EBUSY;
4516                 if (mddev->recovery || mddev->sync_thread)
4517                         return -EBUSY;
4518                 /* we should be able to change the bitmap.. */
4519         }
4520
4521
4522         if (fd >= 0) {
4523                 if (mddev->bitmap)
4524                         return -EEXIST; /* cannot add when bitmap is present */
4525                 mddev->bitmap_file = fget(fd);
4526
4527                 if (mddev->bitmap_file == NULL) {
4528                         printk(KERN_ERR "%s: error: failed to get bitmap file\n",
4529                                mdname(mddev));
4530                         return -EBADF;
4531                 }
4532
4533                 err = deny_bitmap_write_access(mddev->bitmap_file);
4534                 if (err) {
4535                         printk(KERN_ERR "%s: error: bitmap file is already in use\n",
4536                                mdname(mddev));
4537                         fput(mddev->bitmap_file);
4538                         mddev->bitmap_file = NULL;
4539                         return err;
4540                 }
4541                 mddev->bitmap_offset = 0; /* file overrides offset */
4542         } else if (mddev->bitmap == NULL)
4543                 return -ENOENT; /* cannot remove what isn't there */
4544         err = 0;
4545         if (mddev->pers) {
4546                 mddev->pers->quiesce(mddev, 1);
4547                 if (fd >= 0)
4548                         err = bitmap_create(mddev);
4549                 if (fd < 0 || err) {
4550                         bitmap_destroy(mddev);
4551                         fd = -1; /* make sure to put the file */
4552                 }
4553                 mddev->pers->quiesce(mddev, 0);
4554         }
4555         if (fd < 0) {
4556                 if (mddev->bitmap_file) {
4557                         restore_bitmap_write_access(mddev->bitmap_file);
4558                         fput(mddev->bitmap_file);
4559                 }
4560                 mddev->bitmap_file = NULL;
4561         }
4562
4563         return err;
4564 }
4565
4566 /*
4567  * set_array_info is used two different ways
4568  * The original usage is when creating a new array.
4569  * In this usage, raid_disks is > 0 and it together with
4570  *  level, size, not_persistent,layout,chunksize determine the
4571  *  shape of the array.
4572  *  This will always create an array with a type-0.90.0 superblock.
4573  * The newer usage is when assembling an array.
4574  *  In this case raid_disks will be 0, and the major_version field is
4575  *  use to determine which style super-blocks are to be found on the devices.
4576  *  The minor and patch _version numbers are also kept incase the
4577  *  super_block handler wishes to interpret them.
4578  */
4579 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
4580 {
4581
4582         if (info->raid_disks == 0) {
4583                 /* just setting version number for superblock loading */
4584                 if (info->major_version < 0 ||
4585                     info->major_version >= ARRAY_SIZE(super_types) ||
4586                     super_types[info->major_version].name == NULL) {
4587                         /* maybe try to auto-load a module? */
4588                         printk(KERN_INFO 
4589                                 "md: superblock version %d not known\n",
4590                                 info->major_version);
4591                         return -EINVAL;
4592                 }
4593                 mddev->major_version = info->major_version;
4594                 mddev->minor_version = info->minor_version;
4595                 mddev->patch_version = info->patch_version;
4596                 mddev->persistent = !info->not_persistent;
4597                 return 0;
4598         }
4599         mddev->major_version = MD_MAJOR_VERSION;
4600         mddev->minor_version = MD_MINOR_VERSION;
4601         mddev->patch_version = MD_PATCHLEVEL_VERSION;
4602         mddev->ctime         = get_seconds();
4603
4604         mddev->level         = info->level;
4605         mddev->clevel[0]     = 0;
4606         mddev->size          = info->size;
4607         mddev->raid_disks    = info->raid_disks;
4608         /* don't set md_minor, it is determined by which /dev/md* was
4609          * openned
4610          */
4611         if (info->state & (1<<MD_SB_CLEAN))
4612                 mddev->recovery_cp = MaxSector;
4613         else
4614                 mddev->recovery_cp = 0;
4615         mddev->persistent    = ! info->not_persistent;
4616         mddev->external      = 0;
4617
4618         mddev->layout        = info->layout;
4619         mddev->chunk_size    = info->chunk_size;
4620
4621         mddev->max_disks     = MD_SB_DISKS;
4622
4623         if (mddev->persistent)
4624                 mddev->flags         = 0;
4625         set_bit(MD_CHANGE_DEVS, &mddev->flags);
4626
4627         mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
4628         mddev->bitmap_offset = 0;
4629
4630         mddev->reshape_position = MaxSector;
4631
4632         /*
4633          * Generate a 128 bit UUID
4634          */
4635         get_random_bytes(mddev->uuid, 16);
4636
4637         mddev->new_level = mddev->level;
4638         mddev->new_chunk = mddev->chunk_size;
4639         mddev->new_layout = mddev->layout;
4640         mddev->delta_disks = 0;
4641
4642         return 0;
4643 }
4644
4645 static int update_size(mddev_t *mddev, unsigned long size)
4646 {
4647         mdk_rdev_t * rdev;
4648         int rv;
4649         struct list_head *tmp;
4650         int fit = (size == 0);
4651
4652         if (mddev->pers->resize == NULL)
4653                 return -EINVAL;
4654         /* The "size" is the amount of each device that is used.
4655          * This can only make sense for arrays with redundancy.
4656          * linear and raid0 always use whatever space is available
4657          * We can only consider changing the size if no resync
4658          * or reconstruction is happening, and if the new size
4659          * is acceptable. It must fit before the sb_offset or,
4660          * if that is <data_offset, it must fit before the
4661          * size of each device.
4662          * If size is zero, we find the largest size that fits.
4663          */
4664         if (mddev->sync_thread)
4665                 return -EBUSY;
4666         rdev_for_each(rdev, tmp, mddev) {
4667                 sector_t avail;
4668                 avail = rdev->size * 2;
4669
4670                 if (fit && (size == 0 || size > avail/2))
4671                         size = avail/2;
4672                 if (avail < ((sector_t)size << 1))
4673                         return -ENOSPC;
4674         }
4675         rv = mddev->pers->resize(mddev, (sector_t)size *2);
4676         if (!rv) {
4677                 struct block_device *bdev;
4678
4679                 bdev = bdget_disk(mddev->gendisk, 0);
4680                 if (bdev) {
4681                         mutex_lock(&bdev->bd_inode->i_mutex);
4682                         i_size_write(bdev->bd_inode, (loff_t)mddev->array_size << 10);
4683                         mutex_unlock(&bdev->bd_inode->i_mutex);
4684                         bdput(bdev);
4685                 }
4686         }
4687         return rv;
4688 }
4689
4690 static int update_raid_disks(mddev_t *mddev, int raid_disks)
4691 {
4692         int rv;
4693         /* change the number of raid disks */
4694         if (mddev->pers->check_reshape == NULL)
4695                 return -EINVAL;
4696         if (raid_disks <= 0 ||
4697             raid_disks >= mddev->max_disks)
4698                 return -EINVAL;
4699         if (mddev->sync_thread || mddev->reshape_position != MaxSector)
4700                 return -EBUSY;
4701         mddev->delta_disks = raid_disks - mddev->raid_disks;
4702
4703         rv = mddev->pers->check_reshape(mddev);
4704         return rv;
4705 }
4706
4707
4708 /*
4709  * update_array_info is used to change the configuration of an
4710  * on-line array.
4711  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
4712  * fields in the info are checked against the array.
4713  * Any differences that cannot be handled will cause an error.
4714  * Normally, only one change can be managed at a time.
4715  */
4716 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
4717 {
4718         int rv = 0;
4719         int cnt = 0;
4720         int state = 0;
4721
4722         /* calculate expected state,ignoring low bits */
4723         if (mddev->bitmap && mddev->bitmap_offset)
4724                 state |= (1 << MD_SB_BITMAP_PRESENT);
4725
4726         if (mddev->major_version != info->major_version ||
4727             mddev->minor_version != info->minor_version ||
4728 /*          mddev->patch_version != info->patch_version || */
4729             mddev->ctime         != info->ctime         ||
4730             mddev->level         != info->level         ||
4731 /*          mddev->layout        != info->layout        || */
4732             !mddev->persistent   != info->not_persistent||
4733             mddev->chunk_size    != info->chunk_size    ||
4734             /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
4735             ((state^info->state) & 0xfffffe00)
4736                 )
4737                 return -EINVAL;
4738         /* Check there is only one change */
4739         if (info->size >= 0 && mddev->size != info->size) cnt++;
4740         if (mddev->raid_disks != info->raid_disks) cnt++;
4741         if (mddev->layout != info->layout) cnt++;
4742         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++;
4743         if (cnt == 0) return 0;
4744         if (cnt > 1) return -EINVAL;
4745
4746         if (mddev->layout != info->layout) {
4747                 /* Change layout
4748                  * we don't need to do anything at the md level, the
4749                  * personality will take care of it all.
4750                  */
4751                 if (mddev->pers->reconfig == NULL)
4752                         return -EINVAL;
4753                 else
4754                         return mddev->pers->reconfig(mddev, info->layout, -1);
4755         }
4756         if (info->size >= 0 && mddev->size != info->size)
4757                 rv = update_size(mddev, info->size);
4758
4759         if (mddev->raid_disks    != info->raid_disks)
4760                 rv = update_raid_disks(mddev, info->raid_disks);
4761
4762         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
4763                 if (mddev->pers->quiesce == NULL)
4764                         return -EINVAL;
4765                 if (mddev->recovery || mddev->sync_thread)
4766                         return -EBUSY;
4767                 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
4768                         /* add the bitmap */
4769                         if (mddev->bitmap)
4770                                 return -EEXIST;
4771                         if (mddev->default_bitmap_offset == 0)
4772                                 return -EINVAL;
4773                         mddev->bitmap_offset = mddev->default_bitmap_offset;
4774                         mddev->pers->quiesce(mddev, 1);
4775                         rv = bitmap_create(mddev);
4776                         if (rv)
4777                                 bitmap_destroy(mddev);
4778                         mddev->pers->quiesce(mddev, 0);
4779                 } else {
4780                         /* remove the bitmap */
4781                         if (!mddev->bitmap)
4782                                 return -ENOENT;
4783                         if (mddev->bitmap->file)
4784                                 return -EINVAL;
4785                         mddev->pers->quiesce(mddev, 1);
4786                         bitmap_destroy(mddev);
4787                         mddev->pers->quiesce(mddev, 0);
4788                         mddev->bitmap_offset = 0;
4789                 }
4790         }
4791         md_update_sb(mddev, 1);
4792         return rv;
4793 }
4794
4795 static int set_disk_faulty(mddev_t *mddev, dev_t dev)
4796 {
4797         mdk_rdev_t *rdev;
4798
4799         if (mddev->pers == NULL)
4800                 return -ENODEV;
4801
4802         rdev = find_rdev(mddev, dev);
4803         if (!rdev)
4804                 return -ENODEV;
4805
4806         md_error(mddev, rdev);
4807         return 0;
4808 }
4809
4810 /*
4811  * We have a problem here : there is no easy way to give a CHS
4812  * virtual geometry. We currently pretend that we have a 2 heads
4813  * 4 sectors (with a BIG number of cylinders...). This drives
4814  * dosfs just mad... ;-)
4815  */
4816 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
4817 {
4818         mddev_t *mddev = bdev->bd_disk->private_data;
4819
4820         geo->heads = 2;
4821         geo->sectors = 4;
4822         geo->cylinders = get_capacity(mddev->gendisk) / 8;
4823         return 0;
4824 }
4825
4826 static int md_ioctl(struct inode *inode, struct file *file,
4827                         unsigned int cmd, unsigned long arg)
4828 {
4829         int err = 0;
4830         void __user *argp = (void __user *)arg;
4831         mddev_t *mddev = NULL;
4832
4833         if (!capable(CAP_SYS_ADMIN))
4834                 return -EACCES;
4835
4836         /*
4837          * Commands dealing with the RAID driver but not any
4838          * particular array:
4839          */
4840         switch (cmd)
4841         {
4842                 case RAID_VERSION:
4843                         err = get_version(argp);
4844                         goto done;
4845
4846                 case PRINT_RAID_DEBUG:
4847                         err = 0;
4848                         md_print_devices();
4849                         goto done;
4850
4851 #ifndef MODULE
4852                 case RAID_AUTORUN:
4853                         err = 0;
4854                         autostart_arrays(arg);
4855                         goto done;
4856 #endif
4857                 default:;
4858         }
4859
4860         /*
4861          * Commands creating/starting a new array:
4862          */
4863
4864         mddev = inode->i_bdev->bd_disk->private_data;
4865
4866         if (!mddev) {
4867                 BUG();
4868                 goto abort;
4869         }
4870
4871         err = mddev_lock(mddev);
4872         if (err) {
4873                 printk(KERN_INFO 
4874                         "md: ioctl lock interrupted, reason %d, cmd %d\n",
4875                         err, cmd);
4876                 goto abort;
4877         }
4878
4879         switch (cmd)
4880         {
4881                 case SET_ARRAY_INFO:
4882                         {
4883                                 mdu_array_info_t info;
4884                                 if (!arg)
4885                                         memset(&info, 0, sizeof(info));
4886                                 else if (copy_from_user(&info, argp, sizeof(info))) {
4887                                         err = -EFAULT;
4888                                         goto abort_unlock;
4889                                 }
4890                                 if (mddev->pers) {
4891                                         err = update_array_info(mddev, &info);
4892                                         if (err) {
4893                                                 printk(KERN_WARNING "md: couldn't update"
4894                                                        " array info. %d\n", err);
4895                                                 goto abort_unlock;
4896                                         }
4897                                         goto done_unlock;
4898                                 }
4899                                 if (!list_empty(&mddev->disks)) {
4900                                         printk(KERN_WARNING
4901                                                "md: array %s already has disks!\n",
4902                                                mdname(mddev));
4903                                         err = -EBUSY;
4904                                         goto abort_unlock;
4905                                 }
4906                                 if (mddev->raid_disks) {
4907                                         printk(KERN_WARNING
4908                                                "md: array %s already initialised!\n",
4909                                                mdname(mddev));
4910                                         err = -EBUSY;
4911                                         goto abort_unlock;
4912                                 }
4913                                 err = set_array_info(mddev, &info);
4914                                 if (err) {
4915                                         printk(KERN_WARNING "md: couldn't set"
4916                                                " array info. %d\n", err);
4917                                         goto abort_unlock;
4918                                 }
4919                         }
4920                         goto done_unlock;
4921
4922                 default:;
4923         }
4924
4925         /*
4926          * Commands querying/configuring an existing array:
4927          */
4928         /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
4929          * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
4930         if ((!mddev->raid_disks && !mddev->external)
4931             && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
4932             && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
4933             && cmd != GET_BITMAP_FILE) {
4934                 err = -ENODEV;
4935                 goto abort_unlock;
4936         }
4937
4938         /*
4939          * Commands even a read-only array can execute:
4940          */
4941         switch (cmd)
4942         {
4943                 case GET_ARRAY_INFO:
4944                         err = get_array_info(mddev, argp);
4945                         goto done_unlock;
4946
4947                 case GET_BITMAP_FILE:
4948                         err = get_bitmap_file(mddev, argp);
4949                         goto done_unlock;
4950
4951                 case GET_DISK_INFO:
4952                         err = get_disk_info(mddev, argp);
4953                         goto done_unlock;
4954
4955                 case RESTART_ARRAY_RW:
4956                         err = restart_array(mddev);
4957                         goto done_unlock;
4958
4959                 case STOP_ARRAY:
4960                         err = do_md_stop (mddev, 0);
4961                         goto done_unlock;
4962
4963                 case STOP_ARRAY_RO:
4964                         err = do_md_stop (mddev, 1);
4965                         goto done_unlock;
4966
4967         }
4968
4969         /*
4970          * The remaining ioctls are changing the state of the
4971          * superblock, so we do not allow them on read-only arrays.
4972          * However non-MD ioctls (e.g. get-size) will still come through
4973          * here and hit the 'default' below, so only disallow
4974          * 'md' ioctls, and switch to rw mode if started auto-readonly.
4975          */
4976         if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) {
4977                 if (mddev->ro == 2) {
4978                         mddev->ro = 0;
4979                         sysfs_notify(&mddev->kobj, NULL, "array_state");
4980                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4981                         md_wakeup_thread(mddev->thread);
4982                 } else {
4983                         err = -EROFS;
4984                         goto abort_unlock;
4985                 }
4986         }
4987
4988         switch (cmd)
4989         {
4990                 case ADD_NEW_DISK:
4991                 {
4992                         mdu_disk_info_t info;
4993                         if (copy_from_user(&info, argp, sizeof(info)))
4994                                 err = -EFAULT;
4995                         else
4996                                 err = add_new_disk(mddev, &info);
4997                         goto done_unlock;
4998                 }
4999
5000                 case HOT_REMOVE_DISK:
5001                         err = hot_remove_disk(mddev, new_decode_dev(arg));
5002                         goto done_unlock;
5003
5004                 case HOT_ADD_DISK:
5005                         err = hot_add_disk(mddev, new_decode_dev(arg));
5006                         goto done_unlock;
5007
5008                 case SET_DISK_FAULTY:
5009                         err = set_disk_faulty(mddev, new_decode_dev(arg));
5010                         goto done_unlock;
5011
5012                 case RUN_ARRAY:
5013                         err = do_md_run (mddev);
5014                         goto done_unlock;
5015
5016                 case SET_BITMAP_FILE:
5017                         err = set_bitmap_file(mddev, (int)arg);
5018                         goto done_unlock;
5019
5020                 default:
5021                         err = -EINVAL;
5022                         goto abort_unlock;
5023         }
5024
5025 done_unlock:
5026 abort_unlock:
5027         mddev_unlock(mddev);
5028
5029         return err;
5030 done:
5031         if (err)
5032                 MD_BUG();
5033 abort:
5034         return err;
5035 }
5036
5037 static int md_open(struct inode *inode, struct file *file)
5038 {
5039         /*
5040          * Succeed if we can lock the mddev, which confirms that
5041          * it isn't being stopped right now.
5042          */
5043         mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
5044         int err;
5045
5046         if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1)))
5047                 goto out;
5048
5049         err = 0;
5050         mddev_get(mddev);
5051         mddev_unlock(mddev);
5052
5053         check_disk_change(inode->i_bdev);
5054  out:
5055         return err;
5056 }
5057
5058 static int md_release(struct inode *inode, struct file * file)
5059 {
5060         mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
5061
5062         BUG_ON(!mddev);
5063         mddev_put(mddev);
5064
5065         return 0;
5066 }
5067
5068 static int md_media_changed(struct gendisk *disk)
5069 {
5070         mddev_t *mddev = disk->private_data;
5071
5072         return mddev->changed;
5073 }
5074
5075 static int md_revalidate(struct gendisk *disk)
5076 {
5077         mddev_t *mddev = disk->private_data;
5078
5079         mddev->changed = 0;
5080         return 0;
5081 }
5082 static struct block_device_operations md_fops =
5083 {
5084         .owner          = THIS_MODULE,
5085         .open           = md_open,
5086         .release        = md_release,
5087         .ioctl          = md_ioctl,
5088         .getgeo         = md_getgeo,
5089         .media_changed  = md_media_changed,
5090         .revalidate_disk= md_revalidate,
5091 };
5092
5093 static int md_thread(void * arg)
5094 {
5095         mdk_thread_t *thread = arg;
5096
5097         /*
5098          * md_thread is a 'system-thread', it's priority should be very
5099          * high. We avoid resource deadlocks individually in each
5100          * raid personality. (RAID5 does preallocation) We also use RR and
5101          * the very same RT priority as kswapd, thus we will never get
5102          * into a priority inversion deadlock.
5103          *
5104          * we definitely have to have equal or higher priority than
5105          * bdflush, otherwise bdflush will deadlock if there are too
5106          * many dirty RAID5 blocks.
5107          */
5108
5109         allow_signal(SIGKILL);
5110         while (!kthread_should_stop()) {
5111
5112                 /* We need to wait INTERRUPTIBLE so that
5113                  * we don't add to the load-average.
5114                  * That means we need to be sure no signals are
5115                  * pending
5116                  */
5117                 if (signal_pending(current))
5118                         flush_signals(current);
5119
5120                 wait_event_interruptible_timeout
5121                         (thread->wqueue,
5122                          test_bit(THREAD_WAKEUP, &thread->flags)
5123                          || kthread_should_stop(),
5124                          thread->timeout);
5125
5126                 clear_bit(THREAD_WAKEUP, &thread->flags);
5127
5128                 thread->run(thread->mddev);
5129         }
5130
5131         return 0;
5132 }
5133
5134 void md_wakeup_thread(mdk_thread_t *thread)
5135 {
5136         if (thread) {
5137                 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
5138                 set_bit(THREAD_WAKEUP, &thread->flags);
5139                 wake_up(&thread->wqueue);
5140         }
5141 }
5142
5143 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
5144                                  const char *name)
5145 {
5146         mdk_thread_t *thread;
5147
5148         thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL);
5149         if (!thread)
5150                 return NULL;
5151
5152         init_waitqueue_head(&thread->wqueue);
5153
5154         thread->run = run;
5155         thread->mddev = mddev;
5156         thread->timeout = MAX_SCHEDULE_TIMEOUT;
5157         thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev));
5158         if (IS_ERR(thread->tsk)) {
5159                 kfree(thread);
5160                 return NULL;
5161         }
5162         return thread;
5163 }
5164
5165 void md_unregister_thread(mdk_thread_t *thread)
5166 {
5167         dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
5168
5169         kthread_stop(thread->tsk);
5170         kfree(thread);
5171 }
5172
5173 void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
5174 {
5175         if (!mddev) {
5176                 MD_BUG();
5177                 return;
5178         }
5179
5180         if (!rdev || test_bit(Faulty, &rdev->flags))
5181                 return;
5182
5183         if (mddev->external)
5184                 set_bit(Blocked, &rdev->flags);
5185 /*
5186         dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
5187                 mdname(mddev),
5188                 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
5189                 __builtin_return_address(0),__builtin_return_address(1),
5190                 __builtin_return_address(2),__builtin_return_address(3));
5191 */
5192         if (!mddev->pers)
5193                 return;
5194         if (!mddev->pers->error_handler)
5195                 return;
5196         mddev->pers->error_handler(mddev,rdev);
5197         if (mddev->degraded)
5198                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5199         set_bit(StateChanged, &rdev->flags);
5200         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5201         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5202         md_wakeup_thread(mddev->thread);
5203         md_new_event_inintr(mddev);
5204 }
5205
5206 /* seq_file implementation /proc/mdstat */
5207
5208 static void status_unused(struct seq_file *seq)
5209 {
5210         int i = 0;
5211         mdk_rdev_t *rdev;
5212         struct list_head *tmp;
5213
5214         seq_printf(seq, "unused devices: ");
5215
5216         rdev_for_each_list(rdev, tmp, pending_raid_disks) {
5217                 char b[BDEVNAME_SIZE];
5218                 i++;
5219                 seq_printf(seq, "%s ",
5220                               bdevname(rdev->bdev,b));
5221         }
5222         if (!i)
5223                 seq_printf(seq, "<none>");
5224
5225         seq_printf(seq, "\n");
5226 }
5227
5228
5229 static void status_resync(struct seq_file *seq, mddev_t * mddev)
5230 {
5231         sector_t max_blocks, resync, res;
5232         unsigned long dt, db, rt;
5233         int scale;
5234         unsigned int per_milli;
5235
5236         resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2;
5237
5238         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
5239                 max_blocks = mddev->resync_max_sectors >> 1;
5240         else
5241                 max_blocks = mddev->size;
5242
5243         /*
5244          * Should not happen.
5245          */
5246         if (!max_blocks) {
5247                 MD_BUG();
5248                 return;
5249         }
5250         /* Pick 'scale' such that (resync>>scale)*1000 will fit
5251          * in a sector_t, and (max_blocks>>scale) will fit in a
5252          * u32, as those are the requirements for sector_div.
5253          * Thus 'scale' must be at least 10
5254          */
5255         scale = 10;
5256         if (sizeof(sector_t) > sizeof(unsigned long)) {
5257                 while ( max_blocks/2 > (1ULL<<(scale+32)))
5258                         scale++;
5259         }
5260         res = (resync>>scale)*1000;
5261         sector_div(res, (u32)((max_blocks>>scale)+1));
5262
5263         per_milli = res;
5264         {
5265                 int i, x = per_milli/50, y = 20-x;
5266                 seq_printf(seq, "[");
5267                 for (i = 0; i < x; i++)
5268                         seq_printf(seq, "=");
5269                 seq_printf(seq, ">");
5270                 for (i = 0; i < y; i++)
5271                         seq_printf(seq, ".");
5272                 seq_printf(seq, "] ");
5273         }
5274         seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
5275                    (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
5276                     "reshape" :
5277                     (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
5278                      "check" :
5279                      (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
5280                       "resync" : "recovery"))),
5281                    per_milli/10, per_milli % 10,
5282                    (unsigned long long) resync,
5283                    (unsigned long long) max_blocks);
5284
5285         /*
5286          * We do not want to overflow, so the order of operands and
5287          * the * 100 / 100 trick are important. We do a +1 to be
5288          * safe against division by zero. We only estimate anyway.
5289          *
5290          * dt: time from mark until now
5291          * db: blocks written from mark until now
5292          * rt: remaining time
5293          */
5294         dt = ((jiffies - mddev->resync_mark) / HZ);
5295         if (!dt) dt++;
5296         db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
5297                 - mddev->resync_mark_cnt;
5298         rt = (dt * ((unsigned long)(max_blocks-resync) / (db/2/100+1)))/100;
5299
5300         seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6);
5301
5302         seq_printf(seq, " speed=%ldK/sec", db/2/dt);
5303 }
5304
5305 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
5306 {
5307         struct list_head *tmp;
5308         loff_t l = *pos;
5309         mddev_t *mddev;
5310
5311         if (l >= 0x10000)
5312                 return NULL;
5313         if (!l--)
5314                 /* header */
5315                 return (void*)1;
5316
5317         spin_lock(&all_mddevs_lock);
5318         list_for_each(tmp,&all_mddevs)
5319                 if (!l--) {
5320                         mddev = list_entry(tmp, mddev_t, all_mddevs);
5321                         mddev_get(mddev);
5322                         spin_unlock(&all_mddevs_lock);
5323                         return mddev;
5324                 }
5325         spin_unlock(&all_mddevs_lock);
5326         if (!l--)
5327                 return (void*)2;/* tail */
5328         return NULL;
5329 }
5330
5331 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
5332 {
5333         struct list_head *tmp;
5334         mddev_t *next_mddev, *mddev = v;
5335         
5336         ++*pos;
5337         if (v == (void*)2)
5338                 return NULL;
5339
5340         spin_lock(&all_mddevs_lock);
5341         if (v == (void*)1)
5342                 tmp = all_mddevs.next;
5343         else
5344                 tmp = mddev->all_mddevs.next;
5345         if (tmp != &all_mddevs)
5346                 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
5347         else {
5348                 next_mddev = (void*)2;
5349                 *pos = 0x10000;
5350         }               
5351         spin_unlock(&all_mddevs_lock);
5352
5353         if (v != (void*)1)
5354                 mddev_put(mddev);
5355         return next_mddev;
5356
5357 }
5358
5359 static void md_seq_stop(struct seq_file *seq, void *v)
5360 {
5361         mddev_t *mddev = v;
5362
5363         if (mddev && v != (void*)1 && v != (void*)2)
5364                 mddev_put(mddev);
5365 }
5366
5367 struct mdstat_info {
5368         int event;
5369 };
5370
5371 static int md_seq_show(struct seq_file *seq, void *v)
5372 {
5373         mddev_t *mddev = v;
5374         sector_t size;
5375         struct list_head *tmp2;
5376         mdk_rdev_t *rdev;
5377         struct mdstat_info *mi = seq->private;
5378         struct bitmap *bitmap;
5379
5380         if (v == (void*)1) {
5381                 struct mdk_personality *pers;
5382                 seq_printf(seq, "Personalities : ");
5383                 spin_lock(&pers_lock);
5384                 list_for_each_entry(pers, &pers_list, list)
5385                         seq_printf(seq, "[%s] ", pers->name);
5386
5387                 spin_unlock(&pers_lock);
5388                 seq_printf(seq, "\n");
5389                 mi->event = atomic_read(&md_event_count);
5390                 return 0;
5391         }
5392         if (v == (void*)2) {
5393                 status_unused(seq);
5394                 return 0;
5395         }
5396
5397         if (mddev_lock(mddev) < 0)
5398                 return -EINTR;
5399
5400         if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
5401                 seq_printf(seq, "%s : %sactive", mdname(mddev),
5402                                                 mddev->pers ? "" : "in");
5403                 if (mddev->pers) {
5404                         if (mddev->ro==1)
5405                                 seq_printf(seq, " (read-only)");
5406                         if (mddev->ro==2)
5407                                 seq_printf(seq, " (auto-read-only)");
5408                         seq_printf(seq, " %s", mddev->pers->name);
5409                 }
5410
5411                 size = 0;
5412                 rdev_for_each(rdev, tmp2, mddev) {
5413                         char b[BDEVNAME_SIZE];
5414                         seq_printf(seq, " %s[%d]",
5415                                 bdevname(rdev->bdev,b), rdev->desc_nr);
5416                         if (test_bit(WriteMostly, &rdev->flags))
5417                                 seq_printf(seq, "(W)");
5418                         if (test_bit(Faulty, &rdev->flags)) {
5419                                 seq_printf(seq, "(F)");
5420                                 continue;
5421                         } else if (rdev->raid_disk < 0)
5422                                 seq_printf(seq, "(S)"); /* spare */
5423                         size += rdev->size;
5424                 }
5425
5426                 if (!list_empty(&mddev->disks)) {
5427                         if (mddev->pers)
5428                                 seq_printf(seq, "\n      %llu blocks",
5429                                         (unsigned long long)mddev->array_size);
5430                         else
5431                                 seq_printf(seq, "\n      %llu blocks",
5432                                         (unsigned long long)size);
5433                 }
5434                 if (mddev->persistent) {
5435                         if (mddev->major_version != 0 ||
5436                             mddev->minor_version != 90) {
5437                                 seq_printf(seq," super %d.%d",
5438                                            mddev->major_version,
5439                                            mddev->minor_version);
5440                         }
5441                 } else if (mddev->external)
5442                         seq_printf(seq, " super external:%s",
5443                                    mddev->metadata_type);
5444                 else
5445                         seq_printf(seq, " super non-persistent");
5446
5447                 if (mddev->pers) {
5448                         mddev->pers->status (seq, mddev);
5449                         seq_printf(seq, "\n      ");
5450                         if (mddev->pers->sync_request) {
5451                                 if (mddev->curr_resync > 2) {
5452                                         status_resync (seq, mddev);
5453                                         seq_printf(seq, "\n      ");
5454                                 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
5455                                         seq_printf(seq, "\tresync=DELAYED\n      ");
5456                                 else if (mddev->recovery_cp < MaxSector)
5457                                         seq_printf(seq, "\tresync=PENDING\n      ");
5458                         }
5459                 } else
5460                         seq_printf(seq, "\n       ");
5461
5462                 if ((bitmap = mddev->bitmap)) {
5463                         unsigned long chunk_kb;
5464                         unsigned long flags;
5465                         spin_lock_irqsave(&bitmap->lock, flags);
5466                         chunk_kb = bitmap->chunksize >> 10;
5467                         seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
5468                                 "%lu%s chunk",
5469                                 bitmap->pages - bitmap->missing_pages,
5470                                 bitmap->pages,
5471                                 (bitmap->pages - bitmap->missing_pages)
5472                                         << (PAGE_SHIFT - 10),
5473                                 chunk_kb ? chunk_kb : bitmap->chunksize,
5474                                 chunk_kb ? "KB" : "B");
5475                         if (bitmap->file) {
5476                                 seq_printf(seq, ", file: ");
5477                                 seq_path(seq, &bitmap->file->f_path, " \t\n");
5478                         }
5479
5480                         seq_printf(seq, "\n");
5481                         spin_unlock_irqrestore(&bitmap->lock, flags);
5482                 }
5483
5484                 seq_printf(seq, "\n");
5485         }
5486         mddev_unlock(mddev);
5487         
5488         return 0;
5489 }
5490
5491 static struct seq_operations md_seq_ops = {
5492         .start  = md_seq_start,
5493         .next   = md_seq_next,
5494         .stop   = md_seq_stop,
5495         .show   = md_seq_show,
5496 };
5497
5498 static int md_seq_open(struct inode *inode, struct file *file)
5499 {
5500         int error;
5501         struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
5502         if (mi == NULL)
5503                 return -ENOMEM;
5504
5505         error = seq_open(file, &md_seq_ops);
5506         if (error)
5507                 kfree(mi);
5508         else {
5509                 struct seq_file *p = file->private_data;
5510                 p->private = mi;
5511                 mi->event = atomic_read(&md_event_count);
5512         }
5513         return error;
5514 }
5515
5516 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
5517 {
5518         struct seq_file *m = filp->private_data;
5519         struct mdstat_info *mi = m->private;
5520         int mask;
5521
5522         poll_wait(filp, &md_event_waiters, wait);
5523
5524         /* always allow read */
5525         mask = POLLIN | POLLRDNORM;
5526
5527         if (mi->event != atomic_read(&md_event_count))
5528                 mask |= POLLERR | POLLPRI;
5529         return mask;
5530 }
5531
5532 static const struct file_operations md_seq_fops = {
5533         .owner          = THIS_MODULE,
5534         .open           = md_seq_open,
5535         .read           = seq_read,
5536         .llseek         = seq_lseek,
5537         .release        = seq_release_private,
5538         .poll           = mdstat_poll,
5539 };
5540
5541 int register_md_personality(struct mdk_personality *p)
5542 {
5543         spin_lock(&pers_lock);
5544         list_add_tail(&p->list, &pers_list);
5545         printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
5546         spin_unlock(&pers_lock);
5547         return 0;
5548 }
5549
5550 int unregister_md_personality(struct mdk_personality *p)
5551 {
5552         printk(KERN_INFO "md: %s personality unregistered\n", p->name);
5553         spin_lock(&pers_lock);
5554         list_del_init(&p->list);
5555         spin_unlock(&pers_lock);
5556         return 0;
5557 }
5558
5559 static int is_mddev_idle(mddev_t *mddev)
5560 {
5561         mdk_rdev_t * rdev;
5562         struct list_head *tmp;
5563         int idle;
5564         long curr_events;
5565
5566         idle = 1;
5567         rdev_for_each(rdev, tmp, mddev) {
5568                 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
5569                 curr_events = disk_stat_read(disk, sectors[0]) + 
5570                                 disk_stat_read(disk, sectors[1]) - 
5571                                 atomic_read(&disk->sync_io);
5572                 /* sync IO will cause sync_io to increase before the disk_stats
5573                  * as sync_io is counted when a request starts, and
5574                  * disk_stats is counted when it completes.
5575                  * So resync activity will cause curr_events to be smaller than
5576                  * when there was no such activity.
5577                  * non-sync IO will cause disk_stat to increase without
5578                  * increasing sync_io so curr_events will (eventually)
5579                  * be larger than it was before.  Once it becomes
5580                  * substantially larger, the test below will cause
5581                  * the array to appear non-idle, and resync will slow
5582                  * down.
5583                  * If there is a lot of outstanding resync activity when
5584                  * we set last_event to curr_events, then all that activity
5585                  * completing might cause the array to appear non-idle
5586                  * and resync will be slowed down even though there might
5587                  * not have been non-resync activity.  This will only
5588                  * happen once though.  'last_events' will soon reflect
5589                  * the state where there is little or no outstanding
5590                  * resync requests, and further resync activity will
5591                  * always make curr_events less than last_events.
5592                  *
5593                  */
5594                 if (curr_events - rdev->last_events > 4096) {
5595                         rdev->last_events = curr_events;
5596                         idle = 0;
5597                 }
5598         }
5599         return idle;
5600 }
5601
5602 void md_done_sync(mddev_t *mddev, int blocks, int ok)
5603 {
5604         /* another "blocks" (512byte) blocks have been synced */
5605         atomic_sub(blocks, &mddev->recovery_active);
5606         wake_up(&mddev->recovery_wait);
5607         if (!ok) {
5608                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5609                 md_wakeup_thread(mddev->thread);
5610                 // stop recovery, signal do_sync ....
5611         }
5612 }
5613
5614
5615 /* md_write_start(mddev, bi)
5616  * If we need to update some array metadata (e.g. 'active' flag
5617  * in superblock) before writing, schedule a superblock update
5618  * and wait for it to complete.
5619  */
5620 void md_write_start(mddev_t *mddev, struct bio *bi)
5621 {
5622         int did_change = 0;
5623         if (bio_data_dir(bi) != WRITE)
5624                 return;
5625
5626         BUG_ON(mddev->ro == 1);
5627         if (mddev->ro == 2) {
5628                 /* need to switch to read/write */
5629                 mddev->ro = 0;
5630                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5631                 md_wakeup_thread(mddev->thread);
5632                 md_wakeup_thread(mddev->sync_thread);
5633                 did_change = 1;
5634         }
5635         atomic_inc(&mddev->writes_pending);
5636         if (mddev->safemode == 1)
5637                 mddev->safemode = 0;
5638         if (mddev->in_sync) {
5639                 spin_lock_irq(&mddev->write_lock);
5640                 if (mddev->in_sync) {
5641                         mddev->in_sync = 0;
5642                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
5643                         md_wakeup_thread(mddev->thread);
5644                         did_change = 1;
5645                 }
5646                 spin_unlock_irq(&mddev->write_lock);
5647         }
5648         if (did_change)
5649                 sysfs_notify(&mddev->kobj, NULL, "array_state");
5650         wait_event(mddev->sb_wait,
5651                    !test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
5652                    !test_bit(MD_CHANGE_PENDING, &mddev->flags));
5653 }
5654
5655 void md_write_end(mddev_t *mddev)
5656 {
5657         if (atomic_dec_and_test(&mddev->writes_pending)) {
5658                 if (mddev->safemode == 2)
5659                         md_wakeup_thread(mddev->thread);
5660                 else if (mddev->safemode_delay)
5661                         mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
5662         }
5663 }
5664
5665 /* md_allow_write(mddev)
5666  * Calling this ensures that the array is marked 'active' so that writes
5667  * may proceed without blocking.  It is important to call this before
5668  * attempting a GFP_KERNEL allocation while holding the mddev lock.
5669  * Must be called with mddev_lock held.
5670  *
5671  * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
5672  * is dropped, so return -EAGAIN after notifying userspace.
5673  */
5674 int md_allow_write(mddev_t *mddev)
5675 {
5676         if (!mddev->pers)
5677                 return 0;
5678         if (mddev->ro)
5679                 return 0;
5680         if (!mddev->pers->sync_request)
5681                 return 0;
5682
5683         spin_lock_irq(&mddev->write_lock);
5684         if (mddev->in_sync) {
5685                 mddev->in_sync = 0;
5686                 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
5687                 if (mddev->safemode_delay &&
5688                     mddev->safemode == 0)
5689                         mddev->safemode = 1;
5690                 spin_unlock_irq(&mddev->write_lock);
5691                 md_update_sb(mddev, 0);
5692                 sysfs_notify(&mddev->kobj, NULL, "array_state");
5693         } else
5694                 spin_unlock_irq(&mddev->write_lock);
5695
5696         if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
5697                 return -EAGAIN;
5698         else
5699                 return 0;
5700 }
5701 EXPORT_SYMBOL_GPL(md_allow_write);
5702
5703 #define SYNC_MARKS      10
5704 #define SYNC_MARK_STEP  (3*HZ)
5705 void md_do_sync(mddev_t *mddev)
5706 {
5707         mddev_t *mddev2;
5708         unsigned int currspeed = 0,
5709                  window;
5710         sector_t max_sectors,j, io_sectors;
5711         unsigned long mark[SYNC_MARKS];
5712         sector_t mark_cnt[SYNC_MARKS];
5713         int last_mark,m;
5714         struct list_head *tmp;
5715         sector_t last_check;
5716         int skipped = 0;
5717         struct list_head *rtmp;
5718         mdk_rdev_t *rdev;
5719         char *desc;
5720
5721         /* just incase thread restarts... */
5722         if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
5723                 return;
5724         if (mddev->ro) /* never try to sync a read-only array */
5725                 return;
5726
5727         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5728                 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
5729                         desc = "data-check";
5730                 else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
5731                         desc = "requested-resync";
5732                 else
5733                         desc = "resync";
5734         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5735                 desc = "reshape";
5736         else
5737                 desc = "recovery";
5738
5739         /* we overload curr_resync somewhat here.
5740          * 0 == not engaged in resync at all
5741          * 2 == checking that there is no conflict with another sync
5742          * 1 == like 2, but have yielded to allow conflicting resync to
5743          *              commense
5744          * other == active in resync - this many blocks
5745          *
5746          * Before starting a resync we must have set curr_resync to
5747          * 2, and then checked that every "conflicting" array has curr_resync
5748          * less than ours.  When we find one that is the same or higher
5749          * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
5750          * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
5751          * This will mean we have to start checking from the beginning again.
5752          *
5753          */
5754
5755         do {
5756                 mddev->curr_resync = 2;
5757
5758         try_again:
5759                 if (kthread_should_stop()) {
5760                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5761                         goto skip;
5762                 }
5763                 for_each_mddev(mddev2, tmp) {
5764                         if (mddev2 == mddev)
5765                                 continue;
5766                         if (!mddev->parallel_resync
5767                         &&  mddev2->curr_resync
5768                         &&  match_mddev_units(mddev, mddev2)) {
5769                                 DEFINE_WAIT(wq);
5770                                 if (mddev < mddev2 && mddev->curr_resync == 2) {
5771                                         /* arbitrarily yield */
5772                                         mddev->curr_resync = 1;
5773                                         wake_up(&resync_wait);
5774                                 }
5775                                 if (mddev > mddev2 && mddev->curr_resync == 1)
5776                                         /* no need to wait here, we can wait the next
5777                                          * time 'round when curr_resync == 2
5778                                          */
5779                                         continue;
5780                                 prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE);
5781                                 if (!kthread_should_stop() &&
5782                                     mddev2->curr_resync >= mddev->curr_resync) {
5783                                         printk(KERN_INFO "md: delaying %s of %s"
5784                                                " until %s has finished (they"
5785                                                " share one or more physical units)\n",
5786                                                desc, mdname(mddev), mdname(mddev2));
5787                                         mddev_put(mddev2);
5788                                         schedule();
5789                                         finish_wait(&resync_wait, &wq);
5790                                         goto try_again;
5791                                 }
5792                                 finish_wait(&resync_wait, &wq);
5793                         }
5794                 }
5795         } while (mddev->curr_resync < 2);
5796
5797         j = 0;
5798         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5799                 /* resync follows the size requested by the personality,
5800                  * which defaults to physical size, but can be virtual size
5801                  */
5802                 max_sectors = mddev->resync_max_sectors;
5803                 mddev->resync_mismatches = 0;
5804                 /* we don't use the checkpoint if there's a bitmap */
5805                 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
5806                         j = mddev->resync_min;
5807                 else if (!mddev->bitmap)
5808                         j = mddev->recovery_cp;
5809
5810         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5811                 max_sectors = mddev->size << 1;
5812         else {
5813                 /* recovery follows the physical size of devices */
5814                 max_sectors = mddev->size << 1;
5815                 j = MaxSector;
5816                 rdev_for_each(rdev, rtmp, mddev)
5817                         if (rdev->raid_disk >= 0 &&
5818                             !test_bit(Faulty, &rdev->flags) &&
5819                             !test_bit(In_sync, &rdev->flags) &&
5820                             rdev->recovery_offset < j)
5821                                 j = rdev->recovery_offset;
5822         }
5823
5824         printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
5825         printk(KERN_INFO "md: minimum _guaranteed_  speed:"
5826                 " %d KB/sec/disk.\n", speed_min(mddev));
5827         printk(KERN_INFO "md: using maximum available idle IO bandwidth "
5828                "(but not more than %d KB/sec) for %s.\n",
5829                speed_max(mddev), desc);
5830
5831         is_mddev_idle(mddev); /* this also initializes IO event counters */
5832
5833         io_sectors = 0;
5834         for (m = 0; m < SYNC_MARKS; m++) {
5835                 mark[m] = jiffies;
5836                 mark_cnt[m] = io_sectors;
5837         }
5838         last_mark = 0;
5839         mddev->resync_mark = mark[last_mark];
5840         mddev->resync_mark_cnt = mark_cnt[last_mark];
5841
5842         /*
5843          * Tune reconstruction:
5844          */
5845         window = 32*(PAGE_SIZE/512);
5846         printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
5847                 window/2,(unsigned long long) max_sectors/2);
5848
5849         atomic_set(&mddev->recovery_active, 0);
5850         last_check = 0;
5851
5852         if (j>2) {
5853                 printk(KERN_INFO 
5854                        "md: resuming %s of %s from checkpoint.\n",
5855                        desc, mdname(mddev));
5856                 mddev->curr_resync = j;
5857         }
5858
5859         while (j < max_sectors) {
5860                 sector_t sectors;
5861
5862                 skipped = 0;
5863                 if (j >= mddev->resync_max) {
5864                         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
5865                         wait_event(mddev->recovery_wait,
5866                                    mddev->resync_max > j
5867                                    || kthread_should_stop());
5868                 }
5869                 if (kthread_should_stop())
5870                         goto interrupted;
5871                 sectors = mddev->pers->sync_request(mddev, j, &skipped,
5872                                                   currspeed < speed_min(mddev));
5873                 if (sectors == 0) {
5874                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5875                         goto out;
5876                 }
5877
5878                 if (!skipped) { /* actual IO requested */
5879                         io_sectors += sectors;
5880                         atomic_add(sectors, &mddev->recovery_active);
5881                 }
5882
5883                 j += sectors;
5884                 if (j>1) mddev->curr_resync = j;
5885                 mddev->curr_mark_cnt = io_sectors;
5886                 if (last_check == 0)
5887                         /* this is the earliers that rebuilt will be
5888                          * visible in /proc/mdstat
5889                          */
5890                         md_new_event(mddev);
5891
5892                 if (last_check + window > io_sectors || j == max_sectors)
5893                         continue;
5894
5895                 last_check = io_sectors;
5896
5897                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5898                         break;
5899
5900         repeat:
5901                 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
5902                         /* step marks */
5903                         int next = (last_mark+1) % SYNC_MARKS;
5904
5905                         mddev->resync_mark = mark[next];
5906                         mddev->resync_mark_cnt = mark_cnt[next];
5907                         mark[next] = jiffies;
5908                         mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
5909                         last_mark = next;
5910                 }
5911
5912
5913                 if (kthread_should_stop())
5914                         goto interrupted;
5915
5916
5917                 /*
5918                  * this loop exits only if either when we are slower than
5919                  * the 'hard' speed limit, or the system was IO-idle for
5920                  * a jiffy.
5921                  * the system might be non-idle CPU-wise, but we only care
5922                  * about not overloading the IO subsystem. (things like an
5923                  * e2fsck being done on the RAID array should execute fast)
5924                  */
5925                 blk_unplug(mddev->queue);
5926                 cond_resched();
5927
5928                 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
5929                         /((jiffies-mddev->resync_mark)/HZ +1) +1;
5930
5931                 if (currspeed > speed_min(mddev)) {
5932                         if ((currspeed > speed_max(mddev)) ||
5933                                         !is_mddev_idle(mddev)) {
5934                                 msleep(500);
5935                                 goto repeat;
5936                         }
5937                 }
5938         }
5939         printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc);
5940         /*
5941          * this also signals 'finished resyncing' to md_stop
5942          */
5943  out:
5944         blk_unplug(mddev->queue);
5945
5946         wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
5947
5948         /* tell personality that we are finished */
5949         mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
5950
5951         if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
5952             mddev->curr_resync > 2) {
5953                 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5954                         if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5955                                 if (mddev->curr_resync >= mddev->recovery_cp) {
5956                                         printk(KERN_INFO
5957                                                "md: checkpointing %s of %s.\n",
5958                                                desc, mdname(mddev));
5959                                         mddev->recovery_cp = mddev->curr_resync;
5960                                 }
5961                         } else
5962                                 mddev->recovery_cp = MaxSector;
5963                 } else {
5964                         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5965                                 mddev->curr_resync = MaxSector;
5966                         rdev_for_each(rdev, rtmp, mddev)
5967                                 if (rdev->raid_disk >= 0 &&
5968                                     !test_bit(Faulty, &rdev->flags) &&
5969                                     !test_bit(In_sync, &rdev->flags) &&
5970                                     rdev->recovery_offset < mddev->curr_resync)
5971                                         rdev->recovery_offset = mddev->curr_resync;
5972                 }
5973         }
5974         set_bit(MD_CHANGE_DEVS, &mddev->flags);
5975
5976  skip:
5977         mddev->curr_resync = 0;
5978         mddev->resync_min = 0;
5979         mddev->resync_max = MaxSector;
5980         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
5981         wake_up(&resync_wait);
5982         set_bit(MD_RECOVERY_DONE, &mddev->recovery);
5983         md_wakeup_thread(mddev->thread);
5984         return;
5985
5986  interrupted:
5987         /*
5988          * got a signal, exit.
5989          */
5990         printk(KERN_INFO
5991                "md: md_do_sync() got signal ... exiting\n");
5992         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5993         goto out;
5994
5995 }
5996 EXPORT_SYMBOL_GPL(md_do_sync);
5997
5998
5999 static int remove_and_add_spares(mddev_t *mddev)
6000 {
6001         mdk_rdev_t *rdev;
6002         struct list_head *rtmp;
6003         int spares = 0;
6004
6005         rdev_for_each(rdev, rtmp, mddev)
6006                 if (rdev->raid_disk >= 0 &&
6007                     !test_bit(Blocked, &rdev->flags) &&
6008                     (test_bit(Faulty, &rdev->flags) ||
6009                      ! test_bit(In_sync, &rdev->flags)) &&
6010                     atomic_read(&rdev->nr_pending)==0) {
6011                         if (mddev->pers->hot_remove_disk(
6012                                     mddev, rdev->raid_disk)==0) {
6013                                 char nm[20];
6014                                 sprintf(nm,"rd%d", rdev->raid_disk);
6015                                 sysfs_remove_link(&mddev->kobj, nm);
6016                                 rdev->raid_disk = -1;
6017                         }
6018                 }
6019
6020         if (mddev->degraded) {
6021                 rdev_for_each(rdev, rtmp, mddev) {
6022                         if (rdev->raid_disk >= 0 &&
6023                             !test_bit(In_sync, &rdev->flags))
6024                                 spares++;
6025                         if (rdev->raid_disk < 0
6026                             && !test_bit(Faulty, &rdev->flags)) {
6027                                 rdev->recovery_offset = 0;
6028                                 if (mddev->pers->
6029                                     hot_add_disk(mddev, rdev) == 0) {
6030                                         char nm[20];
6031                                         sprintf(nm, "rd%d", rdev->raid_disk);
6032                                         if (sysfs_create_link(&mddev->kobj,
6033                                                               &rdev->kobj, nm))
6034                                                 printk(KERN_WARNING
6035                                                        "md: cannot register "
6036                                                        "%s for %s\n",
6037                                                        nm, mdname(mddev));
6038                                         spares++;
6039                                         md_new_event(mddev);
6040                                 } else
6041                                         break;
6042                         }
6043                 }
6044         }
6045         return spares;
6046 }
6047 /*
6048  * This routine is regularly called by all per-raid-array threads to
6049  * deal with generic issues like resync and super-block update.
6050  * Raid personalities that don't have a thread (linear/raid0) do not
6051  * need this as they never do any recovery or update the superblock.
6052  *
6053  * It does not do any resync itself, but rather "forks" off other threads
6054  * to do that as needed.
6055  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
6056  * "->recovery" and create a thread at ->sync_thread.
6057  * When the thread finishes it sets MD_RECOVERY_DONE
6058  * and wakeups up this thread which will reap the thread and finish up.
6059  * This thread also removes any faulty devices (with nr_pending == 0).
6060  *
6061  * The overall approach is:
6062  *  1/ if the superblock needs updating, update it.
6063  *  2/ If a recovery thread is running, don't do anything else.
6064  *  3/ If recovery has finished, clean up, possibly marking spares active.
6065  *  4/ If there are any faulty devices, remove them.
6066  *  5/ If array is degraded, try to add spares devices
6067  *  6/ If array has spares or is not in-sync, start a resync thread.
6068  */
6069 void md_check_recovery(mddev_t *mddev)
6070 {
6071         mdk_rdev_t *rdev;
6072         struct list_head *rtmp;
6073
6074
6075         if (mddev->bitmap)
6076                 bitmap_daemon_work(mddev->bitmap);
6077
6078         if (mddev->ro)
6079                 return;
6080
6081         if (signal_pending(current)) {
6082                 if (mddev->pers->sync_request && !mddev->external) {
6083                         printk(KERN_INFO "md: %s in immediate safe mode\n",
6084                                mdname(mddev));
6085                         mddev->safemode = 2;
6086                 }
6087                 flush_signals(current);
6088         }
6089
6090         if ( ! (
6091                 (mddev->flags && !mddev->external) ||
6092                 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
6093                 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
6094                 (mddev->external == 0 && mddev->safemode == 1) ||
6095                 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
6096                  && !mddev->in_sync && mddev->recovery_cp == MaxSector)
6097                 ))
6098                 return;
6099
6100         if (mddev_trylock(mddev)) {
6101                 int spares = 0;
6102
6103                 if (!mddev->external) {
6104                         int did_change = 0;
6105                         spin_lock_irq(&mddev->write_lock);
6106                         if (mddev->safemode &&
6107                             !atomic_read(&mddev->writes_pending) &&
6108                             !mddev->in_sync &&
6109                             mddev->recovery_cp == MaxSector) {
6110                                 mddev->in_sync = 1;
6111                                 did_change = 1;
6112                                 if (mddev->persistent)
6113                                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6114                         }
6115                         if (mddev->safemode == 1)
6116                                 mddev->safemode = 0;
6117                         spin_unlock_irq(&mddev->write_lock);
6118                         if (did_change)
6119                                 sysfs_notify(&mddev->kobj, NULL, "array_state");
6120                 }
6121
6122                 if (mddev->flags)
6123                         md_update_sb(mddev, 0);
6124
6125                 rdev_for_each(rdev, rtmp, mddev)
6126                         if (test_and_clear_bit(StateChanged, &rdev->flags))
6127                                 sysfs_notify(&rdev->kobj, NULL, "state");
6128
6129
6130                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
6131                     !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
6132                         /* resync/recovery still happening */
6133                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6134                         goto unlock;
6135                 }
6136                 if (mddev->sync_thread) {
6137                         /* resync has finished, collect result */
6138                         md_unregister_thread(mddev->sync_thread);
6139                         mddev->sync_thread = NULL;
6140                         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
6141                                 /* success...*/
6142                                 /* activate any spares */
6143                                 if (mddev->pers->spare_active(mddev))
6144                                         sysfs_notify(&mddev->kobj, NULL,
6145                                                      "degraded");
6146                         }
6147                         md_update_sb(mddev, 1);
6148
6149                         /* if array is no-longer degraded, then any saved_raid_disk
6150                          * information must be scrapped
6151                          */
6152                         if (!mddev->degraded)
6153                                 rdev_for_each(rdev, rtmp, mddev)
6154                                         rdev->saved_raid_disk = -1;
6155
6156                         mddev->recovery = 0;
6157                         /* flag recovery needed just to double check */
6158                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6159                         sysfs_notify(&mddev->kobj, NULL, "sync_action");
6160                         md_new_event(mddev);
6161                         goto unlock;
6162                 }
6163                 /* Set RUNNING before clearing NEEDED to avoid
6164                  * any transients in the value of "sync_action".
6165                  */
6166                 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
6167                 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6168                 /* Clear some bits that don't mean anything, but
6169                  * might be left set
6170                  */
6171                 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
6172                 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
6173
6174                 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
6175                         goto unlock;
6176                 /* no recovery is running.
6177                  * remove any failed drives, then
6178                  * add spares if possible.
6179                  * Spare are also removed and re-added, to allow
6180                  * the personality to fail the re-add.
6181                  */
6182
6183                 if (mddev->reshape_position != MaxSector) {
6184                         if (mddev->pers->check_reshape(mddev) != 0)
6185                                 /* Cannot proceed */
6186                                 goto unlock;
6187                         set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
6188                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6189                 } else if ((spares = remove_and_add_spares(mddev))) {
6190                         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
6191                         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
6192                         set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6193                 } else if (mddev->recovery_cp < MaxSector) {
6194                         set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
6195                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6196                 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
6197                         /* nothing to be done ... */
6198                         goto unlock;
6199
6200                 if (mddev->pers->sync_request) {
6201                         if (spares && mddev->bitmap && ! mddev->bitmap->file) {
6202                                 /* We are adding a device or devices to an array
6203                                  * which has the bitmap stored on all devices.
6204                                  * So make sure all bitmap pages get written
6205                                  */
6206                                 bitmap_write_all(mddev->bitmap);
6207                         }
6208                         mddev->sync_thread = md_register_thread(md_do_sync,
6209                                                                 mddev,
6210                                                                 "%s_resync");
6211                         if (!mddev->sync_thread) {
6212                                 printk(KERN_ERR "%s: could not start resync"
6213                                         " thread...\n", 
6214                                         mdname(mddev));
6215                                 /* leave the spares where they are, it shouldn't hurt */
6216                                 mddev->recovery = 0;
6217                         } else
6218                                 md_wakeup_thread(mddev->sync_thread);
6219                         sysfs_notify(&mddev->kobj, NULL, "sync_action");
6220                         md_new_event(mddev);
6221                 }
6222         unlock:
6223                 if (!mddev->sync_thread) {
6224                         clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
6225                         if (test_and_clear_bit(MD_RECOVERY_RECOVER,
6226                                                &mddev->recovery))
6227                                 sysfs_notify(&mddev->kobj, NULL, "sync_action");
6228                 }
6229                 mddev_unlock(mddev);
6230         }
6231 }
6232
6233 void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
6234 {
6235         sysfs_notify(&rdev->kobj, NULL, "state");
6236         wait_event_timeout(rdev->blocked_wait,
6237                            !test_bit(Blocked, &rdev->flags),
6238                            msecs_to_jiffies(5000));
6239         rdev_dec_pending(rdev, mddev);
6240 }
6241 EXPORT_SYMBOL(md_wait_for_blocked_rdev);
6242
6243 static int md_notify_reboot(struct notifier_block *this,
6244                             unsigned long code, void *x)
6245 {
6246         struct list_head *tmp;
6247         mddev_t *mddev;
6248
6249         if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
6250
6251                 printk(KERN_INFO "md: stopping all md devices.\n");
6252
6253                 for_each_mddev(mddev, tmp)
6254                         if (mddev_trylock(mddev)) {
6255                                 do_md_stop (mddev, 1);
6256                                 mddev_unlock(mddev);
6257                         }
6258                 /*
6259                  * certain more exotic SCSI devices are known to be
6260                  * volatile wrt too early system reboots. While the
6261                  * right place to handle this issue is the given
6262                  * driver, we do want to have a safe RAID driver ...
6263                  */
6264                 mdelay(1000*1);
6265         }
6266         return NOTIFY_DONE;
6267 }
6268
6269 static struct notifier_block md_notifier = {
6270         .notifier_call  = md_notify_reboot,
6271         .next           = NULL,
6272         .priority       = INT_MAX, /* before any real devices */
6273 };
6274
6275 static void md_geninit(void)
6276 {
6277         dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
6278
6279         proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
6280 }
6281
6282 static int __init md_init(void)
6283 {
6284         if (register_blkdev(MAJOR_NR, "md"))
6285                 return -1;
6286         if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
6287                 unregister_blkdev(MAJOR_NR, "md");
6288                 return -1;
6289         }
6290         blk_register_region(MKDEV(MAJOR_NR, 0), 1UL<<MINORBITS, THIS_MODULE,
6291                             md_probe, NULL, NULL);
6292         blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
6293                             md_probe, NULL, NULL);
6294
6295         register_reboot_notifier(&md_notifier);
6296         raid_table_header = register_sysctl_table(raid_root_table);
6297
6298         md_geninit();
6299         return (0);
6300 }
6301
6302
6303 #ifndef MODULE
6304
6305 /*
6306  * Searches all registered partitions for autorun RAID arrays
6307  * at boot time.
6308  */
6309
6310 static LIST_HEAD(all_detected_devices);
6311 struct detected_devices_node {
6312         struct list_head list;
6313         dev_t dev;
6314 };
6315
6316 void md_autodetect_dev(dev_t dev)
6317 {
6318         struct detected_devices_node *node_detected_dev;
6319
6320         node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
6321         if (node_detected_dev) {
6322                 node_detected_dev->dev = dev;
6323                 list_add_tail(&node_detected_dev->list, &all_detected_devices);
6324         } else {
6325                 printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
6326                         ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
6327         }
6328 }
6329
6330
6331 static void autostart_arrays(int part)
6332 {
6333         mdk_rdev_t *rdev;
6334         struct detected_devices_node *node_detected_dev;
6335         dev_t dev;
6336         int i_scanned, i_passed;
6337
6338         i_scanned = 0;
6339         i_passed = 0;
6340
6341         printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
6342
6343         while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
6344                 i_scanned++;
6345                 node_detected_dev = list_entry(all_detected_devices.next,
6346                                         struct detected_devices_node, list);
6347                 list_del(&node_detected_dev->list);
6348                 dev = node_detected_dev->dev;
6349                 kfree(node_detected_dev);
6350                 rdev = md_import_device(dev,0, 90);
6351                 if (IS_ERR(rdev))
6352                         continue;
6353
6354                 if (test_bit(Faulty, &rdev->flags)) {
6355                         MD_BUG();
6356                         continue;
6357                 }
6358                 set_bit(AutoDetected, &rdev->flags);
6359                 list_add(&rdev->same_set, &pending_raid_disks);
6360                 i_passed++;
6361         }
6362
6363         printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
6364                                                 i_scanned, i_passed);
6365
6366         autorun_devices(part);
6367 }
6368
6369 #endif /* !MODULE */
6370
6371 static __exit void md_exit(void)
6372 {
6373         mddev_t *mddev;
6374         struct list_head *tmp;
6375
6376         blk_unregister_region(MKDEV(MAJOR_NR,0), 1U << MINORBITS);
6377         blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
6378
6379         unregister_blkdev(MAJOR_NR,"md");
6380         unregister_blkdev(mdp_major, "mdp");
6381         unregister_reboot_notifier(&md_notifier);
6382         unregister_sysctl_table(raid_table_header);
6383         remove_proc_entry("mdstat", NULL);
6384         for_each_mddev(mddev, tmp) {
6385                 struct gendisk *disk = mddev->gendisk;
6386                 if (!disk)
6387                         continue;
6388                 export_array(mddev);
6389                 del_gendisk(disk);
6390                 put_disk(disk);
6391                 mddev->gendisk = NULL;
6392                 mddev_put(mddev);
6393         }
6394 }
6395
6396 subsys_initcall(md_init);
6397 module_exit(md_exit)
6398
6399 static int get_ro(char *buffer, struct kernel_param *kp)
6400 {
6401         return sprintf(buffer, "%d", start_readonly);
6402 }
6403 static int set_ro(const char *val, struct kernel_param *kp)
6404 {
6405         char *e;
6406         int num = simple_strtoul(val, &e, 10);
6407         if (*val && (*e == '\0' || *e == '\n')) {
6408                 start_readonly = num;
6409                 return 0;
6410         }
6411         return -EINVAL;
6412 }
6413
6414 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
6415 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
6416
6417
6418 EXPORT_SYMBOL(register_md_personality);
6419 EXPORT_SYMBOL(unregister_md_personality);
6420 EXPORT_SYMBOL(md_error);
6421 EXPORT_SYMBOL(md_done_sync);
6422 EXPORT_SYMBOL(md_write_start);
6423 EXPORT_SYMBOL(md_write_end);
6424 EXPORT_SYMBOL(md_register_thread);
6425 EXPORT_SYMBOL(md_unregister_thread);
6426 EXPORT_SYMBOL(md_wakeup_thread);
6427 EXPORT_SYMBOL(md_check_recovery);
6428 MODULE_LICENSE("GPL");
6429 MODULE_ALIAS("md");
6430 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);