Merge git://git.kernel.org/pub/scm/linux/kernel/git/bunk/trivial
[pandora-kernel.git] / drivers / md / md.c
1 /*
2    md.c : Multiple Devices driver for Linux
3           Copyright (C) 1998, 1999, 2000 Ingo Molnar
4
5      completely rewritten, based on the MD driver code from Marc Zyngier
6
7    Changes:
8
9    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13    - kmod support by: Cyrus Durgin
14    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16
17    - lots of fixes and improvements to the RAID1/RAID5 and generic
18      RAID code (such as request based resynchronization):
19
20      Neil Brown <neilb@cse.unsw.edu.au>.
21
22    - persistent bitmap code
23      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
24
25    This program is free software; you can redistribute it and/or modify
26    it under the terms of the GNU General Public License as published by
27    the Free Software Foundation; either version 2, or (at your option)
28    any later version.
29
30    You should have received a copy of the GNU General Public License
31    (for example /usr/src/linux/COPYING); if not, write to the Free
32    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33 */
34
35 #include <linux/module.h>
36 #include <linux/kthread.h>
37 #include <linux/linkage.h>
38 #include <linux/raid/md.h>
39 #include <linux/raid/bitmap.h>
40 #include <linux/sysctl.h>
41 #include <linux/buffer_head.h> /* for invalidate_bdev */
42 #include <linux/suspend.h>
43 #include <linux/poll.h>
44 #include <linux/mutex.h>
45 #include <linux/ctype.h>
46
47 #include <linux/init.h>
48
49 #include <linux/file.h>
50
51 #ifdef CONFIG_KMOD
52 #include <linux/kmod.h>
53 #endif
54
55 #include <asm/unaligned.h>
56
57 #define MAJOR_NR MD_MAJOR
58 #define MD_DRIVER
59
60 /* 63 partitions with the alternate major number (mdp) */
61 #define MdpMinorShift 6
62
63 #define DEBUG 0
64 #define dprintk(x...) ((void)(DEBUG && printk(x)))
65
66
67 #ifndef MODULE
68 static void autostart_arrays (int part);
69 #endif
70
71 static LIST_HEAD(pers_list);
72 static DEFINE_SPINLOCK(pers_lock);
73
74 static void md_print_devices(void);
75
76 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
77
78 /*
79  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
80  * is 1000 KB/sec, so the extra system load does not show up that much.
81  * Increase it if you want to have more _guaranteed_ speed. Note that
82  * the RAID driver will use the maximum available bandwidth if the IO
83  * subsystem is idle. There is also an 'absolute maximum' reconstruction
84  * speed limit - in case reconstruction slows down your system despite
85  * idle IO detection.
86  *
87  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
88  * or /sys/block/mdX/md/sync_speed_{min,max}
89  */
90
91 static int sysctl_speed_limit_min = 1000;
92 static int sysctl_speed_limit_max = 200000;
93 static inline int speed_min(mddev_t *mddev)
94 {
95         return mddev->sync_speed_min ?
96                 mddev->sync_speed_min : sysctl_speed_limit_min;
97 }
98
99 static inline int speed_max(mddev_t *mddev)
100 {
101         return mddev->sync_speed_max ?
102                 mddev->sync_speed_max : sysctl_speed_limit_max;
103 }
104
105 static struct ctl_table_header *raid_table_header;
106
107 static ctl_table raid_table[] = {
108         {
109                 .ctl_name       = DEV_RAID_SPEED_LIMIT_MIN,
110                 .procname       = "speed_limit_min",
111                 .data           = &sysctl_speed_limit_min,
112                 .maxlen         = sizeof(int),
113                 .mode           = S_IRUGO|S_IWUSR,
114                 .proc_handler   = &proc_dointvec,
115         },
116         {
117                 .ctl_name       = DEV_RAID_SPEED_LIMIT_MAX,
118                 .procname       = "speed_limit_max",
119                 .data           = &sysctl_speed_limit_max,
120                 .maxlen         = sizeof(int),
121                 .mode           = S_IRUGO|S_IWUSR,
122                 .proc_handler   = &proc_dointvec,
123         },
124         { .ctl_name = 0 }
125 };
126
127 static ctl_table raid_dir_table[] = {
128         {
129                 .ctl_name       = DEV_RAID,
130                 .procname       = "raid",
131                 .maxlen         = 0,
132                 .mode           = S_IRUGO|S_IXUGO,
133                 .child          = raid_table,
134         },
135         { .ctl_name = 0 }
136 };
137
138 static ctl_table raid_root_table[] = {
139         {
140                 .ctl_name       = CTL_DEV,
141                 .procname       = "dev",
142                 .maxlen         = 0,
143                 .mode           = 0555,
144                 .child          = raid_dir_table,
145         },
146         { .ctl_name = 0 }
147 };
148
149 static struct block_device_operations md_fops;
150
151 static int start_readonly;
152
153 /*
154  * We have a system wide 'event count' that is incremented
155  * on any 'interesting' event, and readers of /proc/mdstat
156  * can use 'poll' or 'select' to find out when the event
157  * count increases.
158  *
159  * Events are:
160  *  start array, stop array, error, add device, remove device,
161  *  start build, activate spare
162  */
163 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
164 static atomic_t md_event_count;
165 void md_new_event(mddev_t *mddev)
166 {
167         atomic_inc(&md_event_count);
168         wake_up(&md_event_waiters);
169         sysfs_notify(&mddev->kobj, NULL, "sync_action");
170 }
171 EXPORT_SYMBOL_GPL(md_new_event);
172
173 /* Alternate version that can be called from interrupts
174  * when calling sysfs_notify isn't needed.
175  */
176 static void md_new_event_inintr(mddev_t *mddev)
177 {
178         atomic_inc(&md_event_count);
179         wake_up(&md_event_waiters);
180 }
181
182 /*
183  * Enables to iterate over all existing md arrays
184  * all_mddevs_lock protects this list.
185  */
186 static LIST_HEAD(all_mddevs);
187 static DEFINE_SPINLOCK(all_mddevs_lock);
188
189
190 /*
191  * iterates through all used mddevs in the system.
192  * We take care to grab the all_mddevs_lock whenever navigating
193  * the list, and to always hold a refcount when unlocked.
194  * Any code which breaks out of this loop while own
195  * a reference to the current mddev and must mddev_put it.
196  */
197 #define ITERATE_MDDEV(mddev,tmp)                                        \
198                                                                         \
199         for (({ spin_lock(&all_mddevs_lock);                            \
200                 tmp = all_mddevs.next;                                  \
201                 mddev = NULL;});                                        \
202              ({ if (tmp != &all_mddevs)                                 \
203                         mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
204                 spin_unlock(&all_mddevs_lock);                          \
205                 if (mddev) mddev_put(mddev);                            \
206                 mddev = list_entry(tmp, mddev_t, all_mddevs);           \
207                 tmp != &all_mddevs;});                                  \
208              ({ spin_lock(&all_mddevs_lock);                            \
209                 tmp = tmp->next;})                                      \
210                 )
211
212
213 static int md_fail_request (request_queue_t *q, struct bio *bio)
214 {
215         bio_io_error(bio, bio->bi_size);
216         return 0;
217 }
218
219 static inline mddev_t *mddev_get(mddev_t *mddev)
220 {
221         atomic_inc(&mddev->active);
222         return mddev;
223 }
224
225 static void mddev_put(mddev_t *mddev)
226 {
227         if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
228                 return;
229         if (!mddev->raid_disks && list_empty(&mddev->disks)) {
230                 list_del(&mddev->all_mddevs);
231                 spin_unlock(&all_mddevs_lock);
232                 blk_cleanup_queue(mddev->queue);
233                 kobject_unregister(&mddev->kobj);
234         } else
235                 spin_unlock(&all_mddevs_lock);
236 }
237
238 static mddev_t * mddev_find(dev_t unit)
239 {
240         mddev_t *mddev, *new = NULL;
241
242  retry:
243         spin_lock(&all_mddevs_lock);
244         list_for_each_entry(mddev, &all_mddevs, all_mddevs)
245                 if (mddev->unit == unit) {
246                         mddev_get(mddev);
247                         spin_unlock(&all_mddevs_lock);
248                         kfree(new);
249                         return mddev;
250                 }
251
252         if (new) {
253                 list_add(&new->all_mddevs, &all_mddevs);
254                 spin_unlock(&all_mddevs_lock);
255                 return new;
256         }
257         spin_unlock(&all_mddevs_lock);
258
259         new = kzalloc(sizeof(*new), GFP_KERNEL);
260         if (!new)
261                 return NULL;
262
263         new->unit = unit;
264         if (MAJOR(unit) == MD_MAJOR)
265                 new->md_minor = MINOR(unit);
266         else
267                 new->md_minor = MINOR(unit) >> MdpMinorShift;
268
269         mutex_init(&new->reconfig_mutex);
270         INIT_LIST_HEAD(&new->disks);
271         INIT_LIST_HEAD(&new->all_mddevs);
272         init_timer(&new->safemode_timer);
273         atomic_set(&new->active, 1);
274         spin_lock_init(&new->write_lock);
275         init_waitqueue_head(&new->sb_wait);
276
277         new->queue = blk_alloc_queue(GFP_KERNEL);
278         if (!new->queue) {
279                 kfree(new);
280                 return NULL;
281         }
282         set_bit(QUEUE_FLAG_CLUSTER, &new->queue->queue_flags);
283
284         blk_queue_make_request(new->queue, md_fail_request);
285
286         goto retry;
287 }
288
289 static inline int mddev_lock(mddev_t * mddev)
290 {
291         return mutex_lock_interruptible(&mddev->reconfig_mutex);
292 }
293
294 static inline int mddev_trylock(mddev_t * mddev)
295 {
296         return mutex_trylock(&mddev->reconfig_mutex);
297 }
298
299 static inline void mddev_unlock(mddev_t * mddev)
300 {
301         mutex_unlock(&mddev->reconfig_mutex);
302
303         md_wakeup_thread(mddev->thread);
304 }
305
306 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
307 {
308         mdk_rdev_t * rdev;
309         struct list_head *tmp;
310
311         ITERATE_RDEV(mddev,rdev,tmp) {
312                 if (rdev->desc_nr == nr)
313                         return rdev;
314         }
315         return NULL;
316 }
317
318 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
319 {
320         struct list_head *tmp;
321         mdk_rdev_t *rdev;
322
323         ITERATE_RDEV(mddev,rdev,tmp) {
324                 if (rdev->bdev->bd_dev == dev)
325                         return rdev;
326         }
327         return NULL;
328 }
329
330 static struct mdk_personality *find_pers(int level, char *clevel)
331 {
332         struct mdk_personality *pers;
333         list_for_each_entry(pers, &pers_list, list) {
334                 if (level != LEVEL_NONE && pers->level == level)
335                         return pers;
336                 if (strcmp(pers->name, clevel)==0)
337                         return pers;
338         }
339         return NULL;
340 }
341
342 static inline sector_t calc_dev_sboffset(struct block_device *bdev)
343 {
344         sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
345         return MD_NEW_SIZE_BLOCKS(size);
346 }
347
348 static sector_t calc_dev_size(mdk_rdev_t *rdev, unsigned chunk_size)
349 {
350         sector_t size;
351
352         size = rdev->sb_offset;
353
354         if (chunk_size)
355                 size &= ~((sector_t)chunk_size/1024 - 1);
356         return size;
357 }
358
359 static int alloc_disk_sb(mdk_rdev_t * rdev)
360 {
361         if (rdev->sb_page)
362                 MD_BUG();
363
364         rdev->sb_page = alloc_page(GFP_KERNEL);
365         if (!rdev->sb_page) {
366                 printk(KERN_ALERT "md: out of memory.\n");
367                 return -EINVAL;
368         }
369
370         return 0;
371 }
372
373 static void free_disk_sb(mdk_rdev_t * rdev)
374 {
375         if (rdev->sb_page) {
376                 put_page(rdev->sb_page);
377                 rdev->sb_loaded = 0;
378                 rdev->sb_page = NULL;
379                 rdev->sb_offset = 0;
380                 rdev->size = 0;
381         }
382 }
383
384
385 static int super_written(struct bio *bio, unsigned int bytes_done, int error)
386 {
387         mdk_rdev_t *rdev = bio->bi_private;
388         mddev_t *mddev = rdev->mddev;
389         if (bio->bi_size)
390                 return 1;
391
392         if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
393                 printk("md: super_written gets error=%d, uptodate=%d\n",
394                        error, test_bit(BIO_UPTODATE, &bio->bi_flags));
395                 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
396                 md_error(mddev, rdev);
397         }
398
399         if (atomic_dec_and_test(&mddev->pending_writes))
400                 wake_up(&mddev->sb_wait);
401         bio_put(bio);
402         return 0;
403 }
404
405 static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int error)
406 {
407         struct bio *bio2 = bio->bi_private;
408         mdk_rdev_t *rdev = bio2->bi_private;
409         mddev_t *mddev = rdev->mddev;
410         if (bio->bi_size)
411                 return 1;
412
413         if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
414             error == -EOPNOTSUPP) {
415                 unsigned long flags;
416                 /* barriers don't appear to be supported :-( */
417                 set_bit(BarriersNotsupp, &rdev->flags);
418                 mddev->barriers_work = 0;
419                 spin_lock_irqsave(&mddev->write_lock, flags);
420                 bio2->bi_next = mddev->biolist;
421                 mddev->biolist = bio2;
422                 spin_unlock_irqrestore(&mddev->write_lock, flags);
423                 wake_up(&mddev->sb_wait);
424                 bio_put(bio);
425                 return 0;
426         }
427         bio_put(bio2);
428         bio->bi_private = rdev;
429         return super_written(bio, bytes_done, error);
430 }
431
432 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
433                    sector_t sector, int size, struct page *page)
434 {
435         /* write first size bytes of page to sector of rdev
436          * Increment mddev->pending_writes before returning
437          * and decrement it on completion, waking up sb_wait
438          * if zero is reached.
439          * If an error occurred, call md_error
440          *
441          * As we might need to resubmit the request if BIO_RW_BARRIER
442          * causes ENOTSUPP, we allocate a spare bio...
443          */
444         struct bio *bio = bio_alloc(GFP_NOIO, 1);
445         int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNC);
446
447         bio->bi_bdev = rdev->bdev;
448         bio->bi_sector = sector;
449         bio_add_page(bio, page, size, 0);
450         bio->bi_private = rdev;
451         bio->bi_end_io = super_written;
452         bio->bi_rw = rw;
453
454         atomic_inc(&mddev->pending_writes);
455         if (!test_bit(BarriersNotsupp, &rdev->flags)) {
456                 struct bio *rbio;
457                 rw |= (1<<BIO_RW_BARRIER);
458                 rbio = bio_clone(bio, GFP_NOIO);
459                 rbio->bi_private = bio;
460                 rbio->bi_end_io = super_written_barrier;
461                 submit_bio(rw, rbio);
462         } else
463                 submit_bio(rw, bio);
464 }
465
466 void md_super_wait(mddev_t *mddev)
467 {
468         /* wait for all superblock writes that were scheduled to complete.
469          * if any had to be retried (due to BARRIER problems), retry them
470          */
471         DEFINE_WAIT(wq);
472         for(;;) {
473                 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
474                 if (atomic_read(&mddev->pending_writes)==0)
475                         break;
476                 while (mddev->biolist) {
477                         struct bio *bio;
478                         spin_lock_irq(&mddev->write_lock);
479                         bio = mddev->biolist;
480                         mddev->biolist = bio->bi_next ;
481                         bio->bi_next = NULL;
482                         spin_unlock_irq(&mddev->write_lock);
483                         submit_bio(bio->bi_rw, bio);
484                 }
485                 schedule();
486         }
487         finish_wait(&mddev->sb_wait, &wq);
488 }
489
490 static int bi_complete(struct bio *bio, unsigned int bytes_done, int error)
491 {
492         if (bio->bi_size)
493                 return 1;
494
495         complete((struct completion*)bio->bi_private);
496         return 0;
497 }
498
499 int sync_page_io(struct block_device *bdev, sector_t sector, int size,
500                    struct page *page, int rw)
501 {
502         struct bio *bio = bio_alloc(GFP_NOIO, 1);
503         struct completion event;
504         int ret;
505
506         rw |= (1 << BIO_RW_SYNC);
507
508         bio->bi_bdev = bdev;
509         bio->bi_sector = sector;
510         bio_add_page(bio, page, size, 0);
511         init_completion(&event);
512         bio->bi_private = &event;
513         bio->bi_end_io = bi_complete;
514         submit_bio(rw, bio);
515         wait_for_completion(&event);
516
517         ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
518         bio_put(bio);
519         return ret;
520 }
521 EXPORT_SYMBOL_GPL(sync_page_io);
522
523 static int read_disk_sb(mdk_rdev_t * rdev, int size)
524 {
525         char b[BDEVNAME_SIZE];
526         if (!rdev->sb_page) {
527                 MD_BUG();
528                 return -EINVAL;
529         }
530         if (rdev->sb_loaded)
531                 return 0;
532
533
534         if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, size, rdev->sb_page, READ))
535                 goto fail;
536         rdev->sb_loaded = 1;
537         return 0;
538
539 fail:
540         printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
541                 bdevname(rdev->bdev,b));
542         return -EINVAL;
543 }
544
545 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
546 {
547         if (    (sb1->set_uuid0 == sb2->set_uuid0) &&
548                 (sb1->set_uuid1 == sb2->set_uuid1) &&
549                 (sb1->set_uuid2 == sb2->set_uuid2) &&
550                 (sb1->set_uuid3 == sb2->set_uuid3))
551
552                 return 1;
553
554         return 0;
555 }
556
557
558 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
559 {
560         int ret;
561         mdp_super_t *tmp1, *tmp2;
562
563         tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
564         tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
565
566         if (!tmp1 || !tmp2) {
567                 ret = 0;
568                 printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n");
569                 goto abort;
570         }
571
572         *tmp1 = *sb1;
573         *tmp2 = *sb2;
574
575         /*
576          * nr_disks is not constant
577          */
578         tmp1->nr_disks = 0;
579         tmp2->nr_disks = 0;
580
581         if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4))
582                 ret = 0;
583         else
584                 ret = 1;
585
586 abort:
587         kfree(tmp1);
588         kfree(tmp2);
589         return ret;
590 }
591
592 static unsigned int calc_sb_csum(mdp_super_t * sb)
593 {
594         unsigned int disk_csum, csum;
595
596         disk_csum = sb->sb_csum;
597         sb->sb_csum = 0;
598         csum = csum_partial((void *)sb, MD_SB_BYTES, 0);
599         sb->sb_csum = disk_csum;
600         return csum;
601 }
602
603
604 /*
605  * Handle superblock details.
606  * We want to be able to handle multiple superblock formats
607  * so we have a common interface to them all, and an array of
608  * different handlers.
609  * We rely on user-space to write the initial superblock, and support
610  * reading and updating of superblocks.
611  * Interface methods are:
612  *   int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
613  *      loads and validates a superblock on dev.
614  *      if refdev != NULL, compare superblocks on both devices
615  *    Return:
616  *      0 - dev has a superblock that is compatible with refdev
617  *      1 - dev has a superblock that is compatible and newer than refdev
618  *          so dev should be used as the refdev in future
619  *     -EINVAL superblock incompatible or invalid
620  *     -othererror e.g. -EIO
621  *
622  *   int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
623  *      Verify that dev is acceptable into mddev.
624  *       The first time, mddev->raid_disks will be 0, and data from
625  *       dev should be merged in.  Subsequent calls check that dev
626  *       is new enough.  Return 0 or -EINVAL
627  *
628  *   void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
629  *     Update the superblock for rdev with data in mddev
630  *     This does not write to disc.
631  *
632  */
633
634 struct super_type  {
635         char            *name;
636         struct module   *owner;
637         int             (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version);
638         int             (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
639         void            (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
640 };
641
642 /*
643  * load_super for 0.90.0 
644  */
645 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
646 {
647         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
648         mdp_super_t *sb;
649         int ret;
650         sector_t sb_offset;
651
652         /*
653          * Calculate the position of the superblock,
654          * it's at the end of the disk.
655          *
656          * It also happens to be a multiple of 4Kb.
657          */
658         sb_offset = calc_dev_sboffset(rdev->bdev);
659         rdev->sb_offset = sb_offset;
660
661         ret = read_disk_sb(rdev, MD_SB_BYTES);
662         if (ret) return ret;
663
664         ret = -EINVAL;
665
666         bdevname(rdev->bdev, b);
667         sb = (mdp_super_t*)page_address(rdev->sb_page);
668
669         if (sb->md_magic != MD_SB_MAGIC) {
670                 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
671                        b);
672                 goto abort;
673         }
674
675         if (sb->major_version != 0 ||
676             sb->minor_version < 90 ||
677             sb->minor_version > 91) {
678                 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
679                         sb->major_version, sb->minor_version,
680                         b);
681                 goto abort;
682         }
683
684         if (sb->raid_disks <= 0)
685                 goto abort;
686
687         if (csum_fold(calc_sb_csum(sb)) != csum_fold(sb->sb_csum)) {
688                 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
689                         b);
690                 goto abort;
691         }
692
693         rdev->preferred_minor = sb->md_minor;
694         rdev->data_offset = 0;
695         rdev->sb_size = MD_SB_BYTES;
696
697         if (sb->level == LEVEL_MULTIPATH)
698                 rdev->desc_nr = -1;
699         else
700                 rdev->desc_nr = sb->this_disk.number;
701
702         if (refdev == 0)
703                 ret = 1;
704         else {
705                 __u64 ev1, ev2;
706                 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
707                 if (!uuid_equal(refsb, sb)) {
708                         printk(KERN_WARNING "md: %s has different UUID to %s\n",
709                                 b, bdevname(refdev->bdev,b2));
710                         goto abort;
711                 }
712                 if (!sb_equal(refsb, sb)) {
713                         printk(KERN_WARNING "md: %s has same UUID"
714                                " but different superblock to %s\n",
715                                b, bdevname(refdev->bdev, b2));
716                         goto abort;
717                 }
718                 ev1 = md_event(sb);
719                 ev2 = md_event(refsb);
720                 if (ev1 > ev2)
721                         ret = 1;
722                 else 
723                         ret = 0;
724         }
725         rdev->size = calc_dev_size(rdev, sb->chunk_size);
726
727         if (rdev->size < sb->size && sb->level > 1)
728                 /* "this cannot possibly happen" ... */
729                 ret = -EINVAL;
730
731  abort:
732         return ret;
733 }
734
735 /*
736  * validate_super for 0.90.0
737  */
738 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
739 {
740         mdp_disk_t *desc;
741         mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
742         __u64 ev1 = md_event(sb);
743
744         rdev->raid_disk = -1;
745         rdev->flags = 0;
746         if (mddev->raid_disks == 0) {
747                 mddev->major_version = 0;
748                 mddev->minor_version = sb->minor_version;
749                 mddev->patch_version = sb->patch_version;
750                 mddev->persistent = ! sb->not_persistent;
751                 mddev->chunk_size = sb->chunk_size;
752                 mddev->ctime = sb->ctime;
753                 mddev->utime = sb->utime;
754                 mddev->level = sb->level;
755                 mddev->clevel[0] = 0;
756                 mddev->layout = sb->layout;
757                 mddev->raid_disks = sb->raid_disks;
758                 mddev->size = sb->size;
759                 mddev->events = ev1;
760                 mddev->bitmap_offset = 0;
761                 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
762
763                 if (mddev->minor_version >= 91) {
764                         mddev->reshape_position = sb->reshape_position;
765                         mddev->delta_disks = sb->delta_disks;
766                         mddev->new_level = sb->new_level;
767                         mddev->new_layout = sb->new_layout;
768                         mddev->new_chunk = sb->new_chunk;
769                 } else {
770                         mddev->reshape_position = MaxSector;
771                         mddev->delta_disks = 0;
772                         mddev->new_level = mddev->level;
773                         mddev->new_layout = mddev->layout;
774                         mddev->new_chunk = mddev->chunk_size;
775                 }
776
777                 if (sb->state & (1<<MD_SB_CLEAN))
778                         mddev->recovery_cp = MaxSector;
779                 else {
780                         if (sb->events_hi == sb->cp_events_hi && 
781                                 sb->events_lo == sb->cp_events_lo) {
782                                 mddev->recovery_cp = sb->recovery_cp;
783                         } else
784                                 mddev->recovery_cp = 0;
785                 }
786
787                 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
788                 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
789                 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
790                 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
791
792                 mddev->max_disks = MD_SB_DISKS;
793
794                 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
795                     mddev->bitmap_file == NULL) {
796                         if (mddev->level != 1 && mddev->level != 4
797                             && mddev->level != 5 && mddev->level != 6
798                             && mddev->level != 10) {
799                                 /* FIXME use a better test */
800                                 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
801                                 return -EINVAL;
802                         }
803                         mddev->bitmap_offset = mddev->default_bitmap_offset;
804                 }
805
806         } else if (mddev->pers == NULL) {
807                 /* Insist on good event counter while assembling */
808                 ++ev1;
809                 if (ev1 < mddev->events) 
810                         return -EINVAL;
811         } else if (mddev->bitmap) {
812                 /* if adding to array with a bitmap, then we can accept an
813                  * older device ... but not too old.
814                  */
815                 if (ev1 < mddev->bitmap->events_cleared)
816                         return 0;
817         } else {
818                 if (ev1 < mddev->events)
819                         /* just a hot-add of a new device, leave raid_disk at -1 */
820                         return 0;
821         }
822
823         if (mddev->level != LEVEL_MULTIPATH) {
824                 desc = sb->disks + rdev->desc_nr;
825
826                 if (desc->state & (1<<MD_DISK_FAULTY))
827                         set_bit(Faulty, &rdev->flags);
828                 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
829                             desc->raid_disk < mddev->raid_disks */) {
830                         set_bit(In_sync, &rdev->flags);
831                         rdev->raid_disk = desc->raid_disk;
832                 }
833                 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
834                         set_bit(WriteMostly, &rdev->flags);
835         } else /* MULTIPATH are always insync */
836                 set_bit(In_sync, &rdev->flags);
837         return 0;
838 }
839
840 /*
841  * sync_super for 0.90.0
842  */
843 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
844 {
845         mdp_super_t *sb;
846         struct list_head *tmp;
847         mdk_rdev_t *rdev2;
848         int next_spare = mddev->raid_disks;
849
850
851         /* make rdev->sb match mddev data..
852          *
853          * 1/ zero out disks
854          * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
855          * 3/ any empty disks < next_spare become removed
856          *
857          * disks[0] gets initialised to REMOVED because
858          * we cannot be sure from other fields if it has
859          * been initialised or not.
860          */
861         int i;
862         int active=0, working=0,failed=0,spare=0,nr_disks=0;
863
864         rdev->sb_size = MD_SB_BYTES;
865
866         sb = (mdp_super_t*)page_address(rdev->sb_page);
867
868         memset(sb, 0, sizeof(*sb));
869
870         sb->md_magic = MD_SB_MAGIC;
871         sb->major_version = mddev->major_version;
872         sb->patch_version = mddev->patch_version;
873         sb->gvalid_words  = 0; /* ignored */
874         memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
875         memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
876         memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
877         memcpy(&sb->set_uuid3, mddev->uuid+12,4);
878
879         sb->ctime = mddev->ctime;
880         sb->level = mddev->level;
881         sb->size  = mddev->size;
882         sb->raid_disks = mddev->raid_disks;
883         sb->md_minor = mddev->md_minor;
884         sb->not_persistent = !mddev->persistent;
885         sb->utime = mddev->utime;
886         sb->state = 0;
887         sb->events_hi = (mddev->events>>32);
888         sb->events_lo = (u32)mddev->events;
889
890         if (mddev->reshape_position == MaxSector)
891                 sb->minor_version = 90;
892         else {
893                 sb->minor_version = 91;
894                 sb->reshape_position = mddev->reshape_position;
895                 sb->new_level = mddev->new_level;
896                 sb->delta_disks = mddev->delta_disks;
897                 sb->new_layout = mddev->new_layout;
898                 sb->new_chunk = mddev->new_chunk;
899         }
900         mddev->minor_version = sb->minor_version;
901         if (mddev->in_sync)
902         {
903                 sb->recovery_cp = mddev->recovery_cp;
904                 sb->cp_events_hi = (mddev->events>>32);
905                 sb->cp_events_lo = (u32)mddev->events;
906                 if (mddev->recovery_cp == MaxSector)
907                         sb->state = (1<< MD_SB_CLEAN);
908         } else
909                 sb->recovery_cp = 0;
910
911         sb->layout = mddev->layout;
912         sb->chunk_size = mddev->chunk_size;
913
914         if (mddev->bitmap && mddev->bitmap_file == NULL)
915                 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
916
917         sb->disks[0].state = (1<<MD_DISK_REMOVED);
918         ITERATE_RDEV(mddev,rdev2,tmp) {
919                 mdp_disk_t *d;
920                 int desc_nr;
921                 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
922                     && !test_bit(Faulty, &rdev2->flags))
923                         desc_nr = rdev2->raid_disk;
924                 else
925                         desc_nr = next_spare++;
926                 rdev2->desc_nr = desc_nr;
927                 d = &sb->disks[rdev2->desc_nr];
928                 nr_disks++;
929                 d->number = rdev2->desc_nr;
930                 d->major = MAJOR(rdev2->bdev->bd_dev);
931                 d->minor = MINOR(rdev2->bdev->bd_dev);
932                 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
933                     && !test_bit(Faulty, &rdev2->flags))
934                         d->raid_disk = rdev2->raid_disk;
935                 else
936                         d->raid_disk = rdev2->desc_nr; /* compatibility */
937                 if (test_bit(Faulty, &rdev2->flags))
938                         d->state = (1<<MD_DISK_FAULTY);
939                 else if (test_bit(In_sync, &rdev2->flags)) {
940                         d->state = (1<<MD_DISK_ACTIVE);
941                         d->state |= (1<<MD_DISK_SYNC);
942                         active++;
943                         working++;
944                 } else {
945                         d->state = 0;
946                         spare++;
947                         working++;
948                 }
949                 if (test_bit(WriteMostly, &rdev2->flags))
950                         d->state |= (1<<MD_DISK_WRITEMOSTLY);
951         }
952         /* now set the "removed" and "faulty" bits on any missing devices */
953         for (i=0 ; i < mddev->raid_disks ; i++) {
954                 mdp_disk_t *d = &sb->disks[i];
955                 if (d->state == 0 && d->number == 0) {
956                         d->number = i;
957                         d->raid_disk = i;
958                         d->state = (1<<MD_DISK_REMOVED);
959                         d->state |= (1<<MD_DISK_FAULTY);
960                         failed++;
961                 }
962         }
963         sb->nr_disks = nr_disks;
964         sb->active_disks = active;
965         sb->working_disks = working;
966         sb->failed_disks = failed;
967         sb->spare_disks = spare;
968
969         sb->this_disk = sb->disks[rdev->desc_nr];
970         sb->sb_csum = calc_sb_csum(sb);
971 }
972
973 /*
974  * version 1 superblock
975  */
976
977 static unsigned int calc_sb_1_csum(struct mdp_superblock_1 * sb)
978 {
979         unsigned int disk_csum, csum;
980         unsigned long long newcsum;
981         int size = 256 + le32_to_cpu(sb->max_dev)*2;
982         unsigned int *isuper = (unsigned int*)sb;
983         int i;
984
985         disk_csum = sb->sb_csum;
986         sb->sb_csum = 0;
987         newcsum = 0;
988         for (i=0; size>=4; size -= 4 )
989                 newcsum += le32_to_cpu(*isuper++);
990
991         if (size == 2)
992                 newcsum += le16_to_cpu(*(unsigned short*) isuper);
993
994         csum = (newcsum & 0xffffffff) + (newcsum >> 32);
995         sb->sb_csum = disk_csum;
996         return cpu_to_le32(csum);
997 }
998
999 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1000 {
1001         struct mdp_superblock_1 *sb;
1002         int ret;
1003         sector_t sb_offset;
1004         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1005         int bmask;
1006
1007         /*
1008          * Calculate the position of the superblock.
1009          * It is always aligned to a 4K boundary and
1010          * depeding on minor_version, it can be:
1011          * 0: At least 8K, but less than 12K, from end of device
1012          * 1: At start of device
1013          * 2: 4K from start of device.
1014          */
1015         switch(minor_version) {
1016         case 0:
1017                 sb_offset = rdev->bdev->bd_inode->i_size >> 9;
1018                 sb_offset -= 8*2;
1019                 sb_offset &= ~(sector_t)(4*2-1);
1020                 /* convert from sectors to K */
1021                 sb_offset /= 2;
1022                 break;
1023         case 1:
1024                 sb_offset = 0;
1025                 break;
1026         case 2:
1027                 sb_offset = 4;
1028                 break;
1029         default:
1030                 return -EINVAL;
1031         }
1032         rdev->sb_offset = sb_offset;
1033
1034         /* superblock is rarely larger than 1K, but it can be larger,
1035          * and it is safe to read 4k, so we do that
1036          */
1037         ret = read_disk_sb(rdev, 4096);
1038         if (ret) return ret;
1039
1040
1041         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1042
1043         if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1044             sb->major_version != cpu_to_le32(1) ||
1045             le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1046             le64_to_cpu(sb->super_offset) != (rdev->sb_offset<<1) ||
1047             (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1048                 return -EINVAL;
1049
1050         if (calc_sb_1_csum(sb) != sb->sb_csum) {
1051                 printk("md: invalid superblock checksum on %s\n",
1052                         bdevname(rdev->bdev,b));
1053                 return -EINVAL;
1054         }
1055         if (le64_to_cpu(sb->data_size) < 10) {
1056                 printk("md: data_size too small on %s\n",
1057                        bdevname(rdev->bdev,b));
1058                 return -EINVAL;
1059         }
1060         rdev->preferred_minor = 0xffff;
1061         rdev->data_offset = le64_to_cpu(sb->data_offset);
1062         atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1063
1064         rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1065         bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
1066         if (rdev->sb_size & bmask)
1067                 rdev-> sb_size = (rdev->sb_size | bmask)+1;
1068
1069         if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1070                 rdev->desc_nr = -1;
1071         else
1072                 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1073
1074         if (refdev == 0)
1075                 ret = 1;
1076         else {
1077                 __u64 ev1, ev2;
1078                 struct mdp_superblock_1 *refsb = 
1079                         (struct mdp_superblock_1*)page_address(refdev->sb_page);
1080
1081                 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1082                     sb->level != refsb->level ||
1083                     sb->layout != refsb->layout ||
1084                     sb->chunksize != refsb->chunksize) {
1085                         printk(KERN_WARNING "md: %s has strangely different"
1086                                 " superblock to %s\n",
1087                                 bdevname(rdev->bdev,b),
1088                                 bdevname(refdev->bdev,b2));
1089                         return -EINVAL;
1090                 }
1091                 ev1 = le64_to_cpu(sb->events);
1092                 ev2 = le64_to_cpu(refsb->events);
1093
1094                 if (ev1 > ev2)
1095                         ret = 1;
1096                 else
1097                         ret = 0;
1098         }
1099         if (minor_version) 
1100                 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2;
1101         else
1102                 rdev->size = rdev->sb_offset;
1103         if (rdev->size < le64_to_cpu(sb->data_size)/2)
1104                 return -EINVAL;
1105         rdev->size = le64_to_cpu(sb->data_size)/2;
1106         if (le32_to_cpu(sb->chunksize))
1107                 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1);
1108
1109         if (le32_to_cpu(sb->size) > rdev->size*2)
1110                 return -EINVAL;
1111         return ret;
1112 }
1113
1114 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1115 {
1116         struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1117         __u64 ev1 = le64_to_cpu(sb->events);
1118
1119         rdev->raid_disk = -1;
1120         rdev->flags = 0;
1121         if (mddev->raid_disks == 0) {
1122                 mddev->major_version = 1;
1123                 mddev->patch_version = 0;
1124                 mddev->persistent = 1;
1125                 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9;
1126                 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1127                 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1128                 mddev->level = le32_to_cpu(sb->level);
1129                 mddev->clevel[0] = 0;
1130                 mddev->layout = le32_to_cpu(sb->layout);
1131                 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1132                 mddev->size = le64_to_cpu(sb->size)/2;
1133                 mddev->events = ev1;
1134                 mddev->bitmap_offset = 0;
1135                 mddev->default_bitmap_offset = 1024 >> 9;
1136                 
1137                 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1138                 memcpy(mddev->uuid, sb->set_uuid, 16);
1139
1140                 mddev->max_disks =  (4096-256)/2;
1141
1142                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1143                     mddev->bitmap_file == NULL ) {
1144                         if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6
1145                             && mddev->level != 10) {
1146                                 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
1147                                 return -EINVAL;
1148                         }
1149                         mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
1150                 }
1151                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1152                         mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1153                         mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1154                         mddev->new_level = le32_to_cpu(sb->new_level);
1155                         mddev->new_layout = le32_to_cpu(sb->new_layout);
1156                         mddev->new_chunk = le32_to_cpu(sb->new_chunk)<<9;
1157                 } else {
1158                         mddev->reshape_position = MaxSector;
1159                         mddev->delta_disks = 0;
1160                         mddev->new_level = mddev->level;
1161                         mddev->new_layout = mddev->layout;
1162                         mddev->new_chunk = mddev->chunk_size;
1163                 }
1164
1165         } else if (mddev->pers == NULL) {
1166                 /* Insist of good event counter while assembling */
1167                 ++ev1;
1168                 if (ev1 < mddev->events)
1169                         return -EINVAL;
1170         } else if (mddev->bitmap) {
1171                 /* If adding to array with a bitmap, then we can accept an
1172                  * older device, but not too old.
1173                  */
1174                 if (ev1 < mddev->bitmap->events_cleared)
1175                         return 0;
1176         } else {
1177                 if (ev1 < mddev->events)
1178                         /* just a hot-add of a new device, leave raid_disk at -1 */
1179                         return 0;
1180         }
1181         if (mddev->level != LEVEL_MULTIPATH) {
1182                 int role;
1183                 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1184                 switch(role) {
1185                 case 0xffff: /* spare */
1186                         break;
1187                 case 0xfffe: /* faulty */
1188                         set_bit(Faulty, &rdev->flags);
1189                         break;
1190                 default:
1191                         if ((le32_to_cpu(sb->feature_map) &
1192                              MD_FEATURE_RECOVERY_OFFSET))
1193                                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1194                         else
1195                                 set_bit(In_sync, &rdev->flags);
1196                         rdev->raid_disk = role;
1197                         break;
1198                 }
1199                 if (sb->devflags & WriteMostly1)
1200                         set_bit(WriteMostly, &rdev->flags);
1201         } else /* MULTIPATH are always insync */
1202                 set_bit(In_sync, &rdev->flags);
1203
1204         return 0;
1205 }
1206
1207 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1208 {
1209         struct mdp_superblock_1 *sb;
1210         struct list_head *tmp;
1211         mdk_rdev_t *rdev2;
1212         int max_dev, i;
1213         /* make rdev->sb match mddev and rdev data. */
1214
1215         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1216
1217         sb->feature_map = 0;
1218         sb->pad0 = 0;
1219         sb->recovery_offset = cpu_to_le64(0);
1220         memset(sb->pad1, 0, sizeof(sb->pad1));
1221         memset(sb->pad2, 0, sizeof(sb->pad2));
1222         memset(sb->pad3, 0, sizeof(sb->pad3));
1223
1224         sb->utime = cpu_to_le64((__u64)mddev->utime);
1225         sb->events = cpu_to_le64(mddev->events);
1226         if (mddev->in_sync)
1227                 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1228         else
1229                 sb->resync_offset = cpu_to_le64(0);
1230
1231         sb->cnt_corrected_read = atomic_read(&rdev->corrected_errors);
1232
1233         sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1234         sb->size = cpu_to_le64(mddev->size<<1);
1235
1236         if (mddev->bitmap && mddev->bitmap_file == NULL) {
1237                 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
1238                 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1239         }
1240
1241         if (rdev->raid_disk >= 0 &&
1242             !test_bit(In_sync, &rdev->flags) &&
1243             rdev->recovery_offset > 0) {
1244                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1245                 sb->recovery_offset = cpu_to_le64(rdev->recovery_offset);
1246         }
1247
1248         if (mddev->reshape_position != MaxSector) {
1249                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1250                 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1251                 sb->new_layout = cpu_to_le32(mddev->new_layout);
1252                 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1253                 sb->new_level = cpu_to_le32(mddev->new_level);
1254                 sb->new_chunk = cpu_to_le32(mddev->new_chunk>>9);
1255         }
1256
1257         max_dev = 0;
1258         ITERATE_RDEV(mddev,rdev2,tmp)
1259                 if (rdev2->desc_nr+1 > max_dev)
1260                         max_dev = rdev2->desc_nr+1;
1261         
1262         sb->max_dev = cpu_to_le32(max_dev);
1263         for (i=0; i<max_dev;i++)
1264                 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1265         
1266         ITERATE_RDEV(mddev,rdev2,tmp) {
1267                 i = rdev2->desc_nr;
1268                 if (test_bit(Faulty, &rdev2->flags))
1269                         sb->dev_roles[i] = cpu_to_le16(0xfffe);
1270                 else if (test_bit(In_sync, &rdev2->flags))
1271                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1272                 else if (rdev2->raid_disk >= 0 && rdev2->recovery_offset > 0)
1273                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1274                 else
1275                         sb->dev_roles[i] = cpu_to_le16(0xffff);
1276         }
1277
1278         sb->sb_csum = calc_sb_1_csum(sb);
1279 }
1280
1281
1282 static struct super_type super_types[] = {
1283         [0] = {
1284                 .name   = "0.90.0",
1285                 .owner  = THIS_MODULE,
1286                 .load_super     = super_90_load,
1287                 .validate_super = super_90_validate,
1288                 .sync_super     = super_90_sync,
1289         },
1290         [1] = {
1291                 .name   = "md-1",
1292                 .owner  = THIS_MODULE,
1293                 .load_super     = super_1_load,
1294                 .validate_super = super_1_validate,
1295                 .sync_super     = super_1_sync,
1296         },
1297 };
1298         
1299 static mdk_rdev_t * match_dev_unit(mddev_t *mddev, mdk_rdev_t *dev)
1300 {
1301         struct list_head *tmp;
1302         mdk_rdev_t *rdev;
1303
1304         ITERATE_RDEV(mddev,rdev,tmp)
1305                 if (rdev->bdev->bd_contains == dev->bdev->bd_contains)
1306                         return rdev;
1307
1308         return NULL;
1309 }
1310
1311 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1312 {
1313         struct list_head *tmp;
1314         mdk_rdev_t *rdev;
1315
1316         ITERATE_RDEV(mddev1,rdev,tmp)
1317                 if (match_dev_unit(mddev2, rdev))
1318                         return 1;
1319
1320         return 0;
1321 }
1322
1323 static LIST_HEAD(pending_raid_disks);
1324
1325 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1326 {
1327         mdk_rdev_t *same_pdev;
1328         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1329         struct kobject *ko;
1330         char *s;
1331
1332         if (rdev->mddev) {
1333                 MD_BUG();
1334                 return -EINVAL;
1335         }
1336         /* make sure rdev->size exceeds mddev->size */
1337         if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) {
1338                 if (mddev->pers)
1339                         /* Cannot change size, so fail */
1340                         return -ENOSPC;
1341                 else
1342                         mddev->size = rdev->size;
1343         }
1344         same_pdev = match_dev_unit(mddev, rdev);
1345         if (same_pdev)
1346                 printk(KERN_WARNING
1347                         "%s: WARNING: %s appears to be on the same physical"
1348                         " disk as %s. True\n     protection against single-disk"
1349                         " failure might be compromised.\n",
1350                         mdname(mddev), bdevname(rdev->bdev,b),
1351                         bdevname(same_pdev->bdev,b2));
1352
1353         /* Verify rdev->desc_nr is unique.
1354          * If it is -1, assign a free number, else
1355          * check number is not in use
1356          */
1357         if (rdev->desc_nr < 0) {
1358                 int choice = 0;
1359                 if (mddev->pers) choice = mddev->raid_disks;
1360                 while (find_rdev_nr(mddev, choice))
1361                         choice++;
1362                 rdev->desc_nr = choice;
1363         } else {
1364                 if (find_rdev_nr(mddev, rdev->desc_nr))
1365                         return -EBUSY;
1366         }
1367         bdevname(rdev->bdev,b);
1368         if (kobject_set_name(&rdev->kobj, "dev-%s", b) < 0)
1369                 return -ENOMEM;
1370         while ( (s=strchr(rdev->kobj.k_name, '/')) != NULL)
1371                 *s = '!';
1372                         
1373         list_add(&rdev->same_set, &mddev->disks);
1374         rdev->mddev = mddev;
1375         printk(KERN_INFO "md: bind<%s>\n", b);
1376
1377         rdev->kobj.parent = &mddev->kobj;
1378         kobject_add(&rdev->kobj);
1379
1380         if (rdev->bdev->bd_part)
1381                 ko = &rdev->bdev->bd_part->kobj;
1382         else
1383                 ko = &rdev->bdev->bd_disk->kobj;
1384         sysfs_create_link(&rdev->kobj, ko, "block");
1385         bd_claim_by_disk(rdev->bdev, rdev, mddev->gendisk);
1386         return 0;
1387 }
1388
1389 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1390 {
1391         char b[BDEVNAME_SIZE];
1392         if (!rdev->mddev) {
1393                 MD_BUG();
1394                 return;
1395         }
1396         bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk);
1397         list_del_init(&rdev->same_set);
1398         printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1399         rdev->mddev = NULL;
1400         sysfs_remove_link(&rdev->kobj, "block");
1401         kobject_del(&rdev->kobj);
1402 }
1403
1404 /*
1405  * prevent the device from being mounted, repartitioned or
1406  * otherwise reused by a RAID array (or any other kernel
1407  * subsystem), by bd_claiming the device.
1408  */
1409 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
1410 {
1411         int err = 0;
1412         struct block_device *bdev;
1413         char b[BDEVNAME_SIZE];
1414
1415         bdev = open_partition_by_devnum(dev, FMODE_READ|FMODE_WRITE);
1416         if (IS_ERR(bdev)) {
1417                 printk(KERN_ERR "md: could not open %s.\n",
1418                         __bdevname(dev, b));
1419                 return PTR_ERR(bdev);
1420         }
1421         err = bd_claim(bdev, rdev);
1422         if (err) {
1423                 printk(KERN_ERR "md: could not bd_claim %s.\n",
1424                         bdevname(bdev, b));
1425                 blkdev_put_partition(bdev);
1426                 return err;
1427         }
1428         rdev->bdev = bdev;
1429         return err;
1430 }
1431
1432 static void unlock_rdev(mdk_rdev_t *rdev)
1433 {
1434         struct block_device *bdev = rdev->bdev;
1435         rdev->bdev = NULL;
1436         if (!bdev)
1437                 MD_BUG();
1438         bd_release(bdev);
1439         blkdev_put_partition(bdev);
1440 }
1441
1442 void md_autodetect_dev(dev_t dev);
1443
1444 static void export_rdev(mdk_rdev_t * rdev)
1445 {
1446         char b[BDEVNAME_SIZE];
1447         printk(KERN_INFO "md: export_rdev(%s)\n",
1448                 bdevname(rdev->bdev,b));
1449         if (rdev->mddev)
1450                 MD_BUG();
1451         free_disk_sb(rdev);
1452         list_del_init(&rdev->same_set);
1453 #ifndef MODULE
1454         md_autodetect_dev(rdev->bdev->bd_dev);
1455 #endif
1456         unlock_rdev(rdev);
1457         kobject_put(&rdev->kobj);
1458 }
1459
1460 static void kick_rdev_from_array(mdk_rdev_t * rdev)
1461 {
1462         unbind_rdev_from_array(rdev);
1463         export_rdev(rdev);
1464 }
1465
1466 static void export_array(mddev_t *mddev)
1467 {
1468         struct list_head *tmp;
1469         mdk_rdev_t *rdev;
1470
1471         ITERATE_RDEV(mddev,rdev,tmp) {
1472                 if (!rdev->mddev) {
1473                         MD_BUG();
1474                         continue;
1475                 }
1476                 kick_rdev_from_array(rdev);
1477         }
1478         if (!list_empty(&mddev->disks))
1479                 MD_BUG();
1480         mddev->raid_disks = 0;
1481         mddev->major_version = 0;
1482 }
1483
1484 static void print_desc(mdp_disk_t *desc)
1485 {
1486         printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
1487                 desc->major,desc->minor,desc->raid_disk,desc->state);
1488 }
1489
1490 static void print_sb(mdp_super_t *sb)
1491 {
1492         int i;
1493
1494         printk(KERN_INFO 
1495                 "md:  SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1496                 sb->major_version, sb->minor_version, sb->patch_version,
1497                 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1498                 sb->ctime);
1499         printk(KERN_INFO "md:     L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1500                 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1501                 sb->md_minor, sb->layout, sb->chunk_size);
1502         printk(KERN_INFO "md:     UT:%08x ST:%d AD:%d WD:%d"
1503                 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1504                 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1505                 sb->failed_disks, sb->spare_disks,
1506                 sb->sb_csum, (unsigned long)sb->events_lo);
1507
1508         printk(KERN_INFO);
1509         for (i = 0; i < MD_SB_DISKS; i++) {
1510                 mdp_disk_t *desc;
1511
1512                 desc = sb->disks + i;
1513                 if (desc->number || desc->major || desc->minor ||
1514                     desc->raid_disk || (desc->state && (desc->state != 4))) {
1515                         printk("     D %2d: ", i);
1516                         print_desc(desc);
1517                 }
1518         }
1519         printk(KERN_INFO "md:     THIS: ");
1520         print_desc(&sb->this_disk);
1521
1522 }
1523
1524 static void print_rdev(mdk_rdev_t *rdev)
1525 {
1526         char b[BDEVNAME_SIZE];
1527         printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
1528                 bdevname(rdev->bdev,b), (unsigned long long)rdev->size,
1529                 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
1530                 rdev->desc_nr);
1531         if (rdev->sb_loaded) {
1532                 printk(KERN_INFO "md: rdev superblock:\n");
1533                 print_sb((mdp_super_t*)page_address(rdev->sb_page));
1534         } else
1535                 printk(KERN_INFO "md: no rdev superblock!\n");
1536 }
1537
1538 static void md_print_devices(void)
1539 {
1540         struct list_head *tmp, *tmp2;
1541         mdk_rdev_t *rdev;
1542         mddev_t *mddev;
1543         char b[BDEVNAME_SIZE];
1544
1545         printk("\n");
1546         printk("md:     **********************************\n");
1547         printk("md:     * <COMPLETE RAID STATE PRINTOUT> *\n");
1548         printk("md:     **********************************\n");
1549         ITERATE_MDDEV(mddev,tmp) {
1550
1551                 if (mddev->bitmap)
1552                         bitmap_print_sb(mddev->bitmap);
1553                 else
1554                         printk("%s: ", mdname(mddev));
1555                 ITERATE_RDEV(mddev,rdev,tmp2)
1556                         printk("<%s>", bdevname(rdev->bdev,b));
1557                 printk("\n");
1558
1559                 ITERATE_RDEV(mddev,rdev,tmp2)
1560                         print_rdev(rdev);
1561         }
1562         printk("md:     **********************************\n");
1563         printk("\n");
1564 }
1565
1566
1567 static void sync_sbs(mddev_t * mddev, int nospares)
1568 {
1569         /* Update each superblock (in-memory image), but
1570          * if we are allowed to, skip spares which already
1571          * have the right event counter, or have one earlier
1572          * (which would mean they aren't being marked as dirty
1573          * with the rest of the array)
1574          */
1575         mdk_rdev_t *rdev;
1576         struct list_head *tmp;
1577
1578         ITERATE_RDEV(mddev,rdev,tmp) {
1579                 if (rdev->sb_events == mddev->events ||
1580                     (nospares &&
1581                      rdev->raid_disk < 0 &&
1582                      (rdev->sb_events&1)==0 &&
1583                      rdev->sb_events+1 == mddev->events)) {
1584                         /* Don't update this superblock */
1585                         rdev->sb_loaded = 2;
1586                 } else {
1587                         super_types[mddev->major_version].
1588                                 sync_super(mddev, rdev);
1589                         rdev->sb_loaded = 1;
1590                 }
1591         }
1592 }
1593
1594 static void md_update_sb(mddev_t * mddev, int force_change)
1595 {
1596         int err;
1597         struct list_head *tmp;
1598         mdk_rdev_t *rdev;
1599         int sync_req;
1600         int nospares = 0;
1601
1602 repeat:
1603         spin_lock_irq(&mddev->write_lock);
1604
1605         set_bit(MD_CHANGE_PENDING, &mddev->flags);
1606         if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
1607                 force_change = 1;
1608         if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
1609                 /* just a clean<-> dirty transition, possibly leave spares alone,
1610                  * though if events isn't the right even/odd, we will have to do
1611                  * spares after all
1612                  */
1613                 nospares = 1;
1614         if (force_change)
1615                 nospares = 0;
1616         if (mddev->degraded)
1617                 /* If the array is degraded, then skipping spares is both
1618                  * dangerous and fairly pointless.
1619                  * Dangerous because a device that was removed from the array
1620                  * might have a event_count that still looks up-to-date,
1621                  * so it can be re-added without a resync.
1622                  * Pointless because if there are any spares to skip,
1623                  * then a recovery will happen and soon that array won't
1624                  * be degraded any more and the spare can go back to sleep then.
1625                  */
1626                 nospares = 0;
1627
1628         sync_req = mddev->in_sync;
1629         mddev->utime = get_seconds();
1630
1631         /* If this is just a dirty<->clean transition, and the array is clean
1632          * and 'events' is odd, we can roll back to the previous clean state */
1633         if (nospares
1634             && (mddev->in_sync && mddev->recovery_cp == MaxSector)
1635             && (mddev->events & 1))
1636                 mddev->events--;
1637         else {
1638                 /* otherwise we have to go forward and ... */
1639                 mddev->events ++;
1640                 if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
1641                         /* .. if the array isn't clean, insist on an odd 'events' */
1642                         if ((mddev->events&1)==0) {
1643                                 mddev->events++;
1644                                 nospares = 0;
1645                         }
1646                 } else {
1647                         /* otherwise insist on an even 'events' (for clean states) */
1648                         if ((mddev->events&1)) {
1649                                 mddev->events++;
1650                                 nospares = 0;
1651                         }
1652                 }
1653         }
1654
1655         if (!mddev->events) {
1656                 /*
1657                  * oops, this 64-bit counter should never wrap.
1658                  * Either we are in around ~1 trillion A.C., assuming
1659                  * 1 reboot per second, or we have a bug:
1660                  */
1661                 MD_BUG();
1662                 mddev->events --;
1663         }
1664         sync_sbs(mddev, nospares);
1665
1666         /*
1667          * do not write anything to disk if using
1668          * nonpersistent superblocks
1669          */
1670         if (!mddev->persistent) {
1671                 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
1672                 spin_unlock_irq(&mddev->write_lock);
1673                 wake_up(&mddev->sb_wait);
1674                 return;
1675         }
1676         spin_unlock_irq(&mddev->write_lock);
1677
1678         dprintk(KERN_INFO 
1679                 "md: updating %s RAID superblock on device (in sync %d)\n",
1680                 mdname(mddev),mddev->in_sync);
1681
1682         err = bitmap_update_sb(mddev->bitmap);
1683         ITERATE_RDEV(mddev,rdev,tmp) {
1684                 char b[BDEVNAME_SIZE];
1685                 dprintk(KERN_INFO "md: ");
1686                 if (rdev->sb_loaded != 1)
1687                         continue; /* no noise on spare devices */
1688                 if (test_bit(Faulty, &rdev->flags))
1689                         dprintk("(skipping faulty ");
1690
1691                 dprintk("%s ", bdevname(rdev->bdev,b));
1692                 if (!test_bit(Faulty, &rdev->flags)) {
1693                         md_super_write(mddev,rdev,
1694                                        rdev->sb_offset<<1, rdev->sb_size,
1695                                        rdev->sb_page);
1696                         dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
1697                                 bdevname(rdev->bdev,b),
1698                                 (unsigned long long)rdev->sb_offset);
1699                         rdev->sb_events = mddev->events;
1700
1701                 } else
1702                         dprintk(")\n");
1703                 if (mddev->level == LEVEL_MULTIPATH)
1704                         /* only need to write one superblock... */
1705                         break;
1706         }
1707         md_super_wait(mddev);
1708         /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
1709
1710         spin_lock_irq(&mddev->write_lock);
1711         if (mddev->in_sync != sync_req ||
1712             test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
1713                 /* have to write it out again */
1714                 spin_unlock_irq(&mddev->write_lock);
1715                 goto repeat;
1716         }
1717         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
1718         spin_unlock_irq(&mddev->write_lock);
1719         wake_up(&mddev->sb_wait);
1720
1721 }
1722
1723 /* words written to sysfs files may, or my not, be \n terminated.
1724  * We want to accept with case. For this we use cmd_match.
1725  */
1726 static int cmd_match(const char *cmd, const char *str)
1727 {
1728         /* See if cmd, written into a sysfs file, matches
1729          * str.  They must either be the same, or cmd can
1730          * have a trailing newline
1731          */
1732         while (*cmd && *str && *cmd == *str) {
1733                 cmd++;
1734                 str++;
1735         }
1736         if (*cmd == '\n')
1737                 cmd++;
1738         if (*str || *cmd)
1739                 return 0;
1740         return 1;
1741 }
1742
1743 struct rdev_sysfs_entry {
1744         struct attribute attr;
1745         ssize_t (*show)(mdk_rdev_t *, char *);
1746         ssize_t (*store)(mdk_rdev_t *, const char *, size_t);
1747 };
1748
1749 static ssize_t
1750 state_show(mdk_rdev_t *rdev, char *page)
1751 {
1752         char *sep = "";
1753         int len=0;
1754
1755         if (test_bit(Faulty, &rdev->flags)) {
1756                 len+= sprintf(page+len, "%sfaulty",sep);
1757                 sep = ",";
1758         }
1759         if (test_bit(In_sync, &rdev->flags)) {
1760                 len += sprintf(page+len, "%sin_sync",sep);
1761                 sep = ",";
1762         }
1763         if (test_bit(WriteMostly, &rdev->flags)) {
1764                 len += sprintf(page+len, "%swrite_mostly",sep);
1765                 sep = ",";
1766         }
1767         if (!test_bit(Faulty, &rdev->flags) &&
1768             !test_bit(In_sync, &rdev->flags)) {
1769                 len += sprintf(page+len, "%sspare", sep);
1770                 sep = ",";
1771         }
1772         return len+sprintf(page+len, "\n");
1773 }
1774
1775 static ssize_t
1776 state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1777 {
1778         /* can write
1779          *  faulty  - simulates and error
1780          *  remove  - disconnects the device
1781          *  writemostly - sets write_mostly
1782          *  -writemostly - clears write_mostly
1783          */
1784         int err = -EINVAL;
1785         if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
1786                 md_error(rdev->mddev, rdev);
1787                 err = 0;
1788         } else if (cmd_match(buf, "remove")) {
1789                 if (rdev->raid_disk >= 0)
1790                         err = -EBUSY;
1791                 else {
1792                         mddev_t *mddev = rdev->mddev;
1793                         kick_rdev_from_array(rdev);
1794                         md_update_sb(mddev, 1);
1795                         md_new_event(mddev);
1796                         err = 0;
1797                 }
1798         } else if (cmd_match(buf, "writemostly")) {
1799                 set_bit(WriteMostly, &rdev->flags);
1800                 err = 0;
1801         } else if (cmd_match(buf, "-writemostly")) {
1802                 clear_bit(WriteMostly, &rdev->flags);
1803                 err = 0;
1804         }
1805         return err ? err : len;
1806 }
1807 static struct rdev_sysfs_entry rdev_state =
1808 __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
1809
1810 static ssize_t
1811 super_show(mdk_rdev_t *rdev, char *page)
1812 {
1813         if (rdev->sb_loaded && rdev->sb_size) {
1814                 memcpy(page, page_address(rdev->sb_page), rdev->sb_size);
1815                 return rdev->sb_size;
1816         } else
1817                 return 0;
1818 }
1819 static struct rdev_sysfs_entry rdev_super = __ATTR_RO(super);
1820
1821 static ssize_t
1822 errors_show(mdk_rdev_t *rdev, char *page)
1823 {
1824         return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
1825 }
1826
1827 static ssize_t
1828 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1829 {
1830         char *e;
1831         unsigned long n = simple_strtoul(buf, &e, 10);
1832         if (*buf && (*e == 0 || *e == '\n')) {
1833                 atomic_set(&rdev->corrected_errors, n);
1834                 return len;
1835         }
1836         return -EINVAL;
1837 }
1838 static struct rdev_sysfs_entry rdev_errors =
1839 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
1840
1841 static ssize_t
1842 slot_show(mdk_rdev_t *rdev, char *page)
1843 {
1844         if (rdev->raid_disk < 0)
1845                 return sprintf(page, "none\n");
1846         else
1847                 return sprintf(page, "%d\n", rdev->raid_disk);
1848 }
1849
1850 static ssize_t
1851 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1852 {
1853         char *e;
1854         int slot = simple_strtoul(buf, &e, 10);
1855         if (strncmp(buf, "none", 4)==0)
1856                 slot = -1;
1857         else if (e==buf || (*e && *e!= '\n'))
1858                 return -EINVAL;
1859         if (rdev->mddev->pers)
1860                 /* Cannot set slot in active array (yet) */
1861                 return -EBUSY;
1862         if (slot >= rdev->mddev->raid_disks)
1863                 return -ENOSPC;
1864         rdev->raid_disk = slot;
1865         /* assume it is working */
1866         rdev->flags = 0;
1867         set_bit(In_sync, &rdev->flags);
1868         return len;
1869 }
1870
1871
1872 static struct rdev_sysfs_entry rdev_slot =
1873 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
1874
1875 static ssize_t
1876 offset_show(mdk_rdev_t *rdev, char *page)
1877 {
1878         return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
1879 }
1880
1881 static ssize_t
1882 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1883 {
1884         char *e;
1885         unsigned long long offset = simple_strtoull(buf, &e, 10);
1886         if (e==buf || (*e && *e != '\n'))
1887                 return -EINVAL;
1888         if (rdev->mddev->pers)
1889                 return -EBUSY;
1890         rdev->data_offset = offset;
1891         return len;
1892 }
1893
1894 static struct rdev_sysfs_entry rdev_offset =
1895 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
1896
1897 static ssize_t
1898 rdev_size_show(mdk_rdev_t *rdev, char *page)
1899 {
1900         return sprintf(page, "%llu\n", (unsigned long long)rdev->size);
1901 }
1902
1903 static ssize_t
1904 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1905 {
1906         char *e;
1907         unsigned long long size = simple_strtoull(buf, &e, 10);
1908         if (e==buf || (*e && *e != '\n'))
1909                 return -EINVAL;
1910         if (rdev->mddev->pers)
1911                 return -EBUSY;
1912         rdev->size = size;
1913         if (size < rdev->mddev->size || rdev->mddev->size == 0)
1914                 rdev->mddev->size = size;
1915         return len;
1916 }
1917
1918 static struct rdev_sysfs_entry rdev_size =
1919 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
1920
1921 static struct attribute *rdev_default_attrs[] = {
1922         &rdev_state.attr,
1923         &rdev_super.attr,
1924         &rdev_errors.attr,
1925         &rdev_slot.attr,
1926         &rdev_offset.attr,
1927         &rdev_size.attr,
1928         NULL,
1929 };
1930 static ssize_t
1931 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
1932 {
1933         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
1934         mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
1935
1936         if (!entry->show)
1937                 return -EIO;
1938         return entry->show(rdev, page);
1939 }
1940
1941 static ssize_t
1942 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
1943               const char *page, size_t length)
1944 {
1945         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
1946         mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
1947
1948         if (!entry->store)
1949                 return -EIO;
1950         if (!capable(CAP_SYS_ADMIN))
1951                 return -EACCES;
1952         return entry->store(rdev, page, length);
1953 }
1954
1955 static void rdev_free(struct kobject *ko)
1956 {
1957         mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
1958         kfree(rdev);
1959 }
1960 static struct sysfs_ops rdev_sysfs_ops = {
1961         .show           = rdev_attr_show,
1962         .store          = rdev_attr_store,
1963 };
1964 static struct kobj_type rdev_ktype = {
1965         .release        = rdev_free,
1966         .sysfs_ops      = &rdev_sysfs_ops,
1967         .default_attrs  = rdev_default_attrs,
1968 };
1969
1970 /*
1971  * Import a device. If 'super_format' >= 0, then sanity check the superblock
1972  *
1973  * mark the device faulty if:
1974  *
1975  *   - the device is nonexistent (zero size)
1976  *   - the device has no valid superblock
1977  *
1978  * a faulty rdev _never_ has rdev->sb set.
1979  */
1980 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
1981 {
1982         char b[BDEVNAME_SIZE];
1983         int err;
1984         mdk_rdev_t *rdev;
1985         sector_t size;
1986
1987         rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
1988         if (!rdev) {
1989                 printk(KERN_ERR "md: could not alloc mem for new device!\n");
1990                 return ERR_PTR(-ENOMEM);
1991         }
1992
1993         if ((err = alloc_disk_sb(rdev)))
1994                 goto abort_free;
1995
1996         err = lock_rdev(rdev, newdev);
1997         if (err)
1998                 goto abort_free;
1999
2000         rdev->kobj.parent = NULL;
2001         rdev->kobj.ktype = &rdev_ktype;
2002         kobject_init(&rdev->kobj);
2003
2004         rdev->desc_nr = -1;
2005         rdev->flags = 0;
2006         rdev->data_offset = 0;
2007         rdev->sb_events = 0;
2008         atomic_set(&rdev->nr_pending, 0);
2009         atomic_set(&rdev->read_errors, 0);
2010         atomic_set(&rdev->corrected_errors, 0);
2011
2012         size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2013         if (!size) {
2014                 printk(KERN_WARNING 
2015                         "md: %s has zero or unknown size, marking faulty!\n",
2016                         bdevname(rdev->bdev,b));
2017                 err = -EINVAL;
2018                 goto abort_free;
2019         }
2020
2021         if (super_format >= 0) {
2022                 err = super_types[super_format].
2023                         load_super(rdev, NULL, super_minor);
2024                 if (err == -EINVAL) {
2025                         printk(KERN_WARNING 
2026                                 "md: %s has invalid sb, not importing!\n",
2027                                 bdevname(rdev->bdev,b));
2028                         goto abort_free;
2029                 }
2030                 if (err < 0) {
2031                         printk(KERN_WARNING 
2032                                 "md: could not read %s's sb, not importing!\n",
2033                                 bdevname(rdev->bdev,b));
2034                         goto abort_free;
2035                 }
2036         }
2037         INIT_LIST_HEAD(&rdev->same_set);
2038
2039         return rdev;
2040
2041 abort_free:
2042         if (rdev->sb_page) {
2043                 if (rdev->bdev)
2044                         unlock_rdev(rdev);
2045                 free_disk_sb(rdev);
2046         }
2047         kfree(rdev);
2048         return ERR_PTR(err);
2049 }
2050
2051 /*
2052  * Check a full RAID array for plausibility
2053  */
2054
2055
2056 static void analyze_sbs(mddev_t * mddev)
2057 {
2058         int i;
2059         struct list_head *tmp;
2060         mdk_rdev_t *rdev, *freshest;
2061         char b[BDEVNAME_SIZE];
2062
2063         freshest = NULL;
2064         ITERATE_RDEV(mddev,rdev,tmp)
2065                 switch (super_types[mddev->major_version].
2066                         load_super(rdev, freshest, mddev->minor_version)) {
2067                 case 1:
2068                         freshest = rdev;
2069                         break;
2070                 case 0:
2071                         break;
2072                 default:
2073                         printk( KERN_ERR \
2074                                 "md: fatal superblock inconsistency in %s"
2075                                 " -- removing from array\n", 
2076                                 bdevname(rdev->bdev,b));
2077                         kick_rdev_from_array(rdev);
2078                 }
2079
2080
2081         super_types[mddev->major_version].
2082                 validate_super(mddev, freshest);
2083
2084         i = 0;
2085         ITERATE_RDEV(mddev,rdev,tmp) {
2086                 if (rdev != freshest)
2087                         if (super_types[mddev->major_version].
2088                             validate_super(mddev, rdev)) {
2089                                 printk(KERN_WARNING "md: kicking non-fresh %s"
2090                                         " from array!\n",
2091                                         bdevname(rdev->bdev,b));
2092                                 kick_rdev_from_array(rdev);
2093                                 continue;
2094                         }
2095                 if (mddev->level == LEVEL_MULTIPATH) {
2096                         rdev->desc_nr = i++;
2097                         rdev->raid_disk = rdev->desc_nr;
2098                         set_bit(In_sync, &rdev->flags);
2099                 }
2100         }
2101
2102
2103
2104         if (mddev->recovery_cp != MaxSector &&
2105             mddev->level >= 1)
2106                 printk(KERN_ERR "md: %s: raid array is not clean"
2107                        " -- starting background reconstruction\n",
2108                        mdname(mddev));
2109
2110 }
2111
2112 static ssize_t
2113 safe_delay_show(mddev_t *mddev, char *page)
2114 {
2115         int msec = (mddev->safemode_delay*1000)/HZ;
2116         return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
2117 }
2118 static ssize_t
2119 safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
2120 {
2121         int scale=1;
2122         int dot=0;
2123         int i;
2124         unsigned long msec;
2125         char buf[30];
2126         char *e;
2127         /* remove a period, and count digits after it */
2128         if (len >= sizeof(buf))
2129                 return -EINVAL;
2130         strlcpy(buf, cbuf, len);
2131         buf[len] = 0;
2132         for (i=0; i<len; i++) {
2133                 if (dot) {
2134                         if (isdigit(buf[i])) {
2135                                 buf[i-1] = buf[i];
2136                                 scale *= 10;
2137                         }
2138                         buf[i] = 0;
2139                 } else if (buf[i] == '.') {
2140                         dot=1;
2141                         buf[i] = 0;
2142                 }
2143         }
2144         msec = simple_strtoul(buf, &e, 10);
2145         if (e == buf || (*e && *e != '\n'))
2146                 return -EINVAL;
2147         msec = (msec * 1000) / scale;
2148         if (msec == 0)
2149                 mddev->safemode_delay = 0;
2150         else {
2151                 mddev->safemode_delay = (msec*HZ)/1000;
2152                 if (mddev->safemode_delay == 0)
2153                         mddev->safemode_delay = 1;
2154         }
2155         return len;
2156 }
2157 static struct md_sysfs_entry md_safe_delay =
2158 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
2159
2160 static ssize_t
2161 level_show(mddev_t *mddev, char *page)
2162 {
2163         struct mdk_personality *p = mddev->pers;
2164         if (p)
2165                 return sprintf(page, "%s\n", p->name);
2166         else if (mddev->clevel[0])
2167                 return sprintf(page, "%s\n", mddev->clevel);
2168         else if (mddev->level != LEVEL_NONE)
2169                 return sprintf(page, "%d\n", mddev->level);
2170         else
2171                 return 0;
2172 }
2173
2174 static ssize_t
2175 level_store(mddev_t *mddev, const char *buf, size_t len)
2176 {
2177         int rv = len;
2178         if (mddev->pers)
2179                 return -EBUSY;
2180         if (len == 0)
2181                 return 0;
2182         if (len >= sizeof(mddev->clevel))
2183                 return -ENOSPC;
2184         strncpy(mddev->clevel, buf, len);
2185         if (mddev->clevel[len-1] == '\n')
2186                 len--;
2187         mddev->clevel[len] = 0;
2188         mddev->level = LEVEL_NONE;
2189         return rv;
2190 }
2191
2192 static struct md_sysfs_entry md_level =
2193 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
2194
2195
2196 static ssize_t
2197 layout_show(mddev_t *mddev, char *page)
2198 {
2199         /* just a number, not meaningful for all levels */
2200         return sprintf(page, "%d\n", mddev->layout);
2201 }
2202
2203 static ssize_t
2204 layout_store(mddev_t *mddev, const char *buf, size_t len)
2205 {
2206         char *e;
2207         unsigned long n = simple_strtoul(buf, &e, 10);
2208         if (mddev->pers)
2209                 return -EBUSY;
2210
2211         if (!*buf || (*e && *e != '\n'))
2212                 return -EINVAL;
2213
2214         mddev->layout = n;
2215         return len;
2216 }
2217 static struct md_sysfs_entry md_layout =
2218 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
2219
2220
2221 static ssize_t
2222 raid_disks_show(mddev_t *mddev, char *page)
2223 {
2224         if (mddev->raid_disks == 0)
2225                 return 0;
2226         return sprintf(page, "%d\n", mddev->raid_disks);
2227 }
2228
2229 static int update_raid_disks(mddev_t *mddev, int raid_disks);
2230
2231 static ssize_t
2232 raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
2233 {
2234         /* can only set raid_disks if array is not yet active */
2235         char *e;
2236         int rv = 0;
2237         unsigned long n = simple_strtoul(buf, &e, 10);
2238
2239         if (!*buf || (*e && *e != '\n'))
2240                 return -EINVAL;
2241
2242         if (mddev->pers)
2243                 rv = update_raid_disks(mddev, n);
2244         else
2245                 mddev->raid_disks = n;
2246         return rv ? rv : len;
2247 }
2248 static struct md_sysfs_entry md_raid_disks =
2249 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
2250
2251 static ssize_t
2252 chunk_size_show(mddev_t *mddev, char *page)
2253 {
2254         return sprintf(page, "%d\n", mddev->chunk_size);
2255 }
2256
2257 static ssize_t
2258 chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
2259 {
2260         /* can only set chunk_size if array is not yet active */
2261         char *e;
2262         unsigned long n = simple_strtoul(buf, &e, 10);
2263
2264         if (mddev->pers)
2265                 return -EBUSY;
2266         if (!*buf || (*e && *e != '\n'))
2267                 return -EINVAL;
2268
2269         mddev->chunk_size = n;
2270         return len;
2271 }
2272 static struct md_sysfs_entry md_chunk_size =
2273 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
2274
2275 static ssize_t
2276 resync_start_show(mddev_t *mddev, char *page)
2277 {
2278         return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
2279 }
2280
2281 static ssize_t
2282 resync_start_store(mddev_t *mddev, const char *buf, size_t len)
2283 {
2284         /* can only set chunk_size if array is not yet active */
2285         char *e;
2286         unsigned long long n = simple_strtoull(buf, &e, 10);
2287
2288         if (mddev->pers)
2289                 return -EBUSY;
2290         if (!*buf || (*e && *e != '\n'))
2291                 return -EINVAL;
2292
2293         mddev->recovery_cp = n;
2294         return len;
2295 }
2296 static struct md_sysfs_entry md_resync_start =
2297 __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
2298
2299 /*
2300  * The array state can be:
2301  *
2302  * clear
2303  *     No devices, no size, no level
2304  *     Equivalent to STOP_ARRAY ioctl
2305  * inactive
2306  *     May have some settings, but array is not active
2307  *        all IO results in error
2308  *     When written, doesn't tear down array, but just stops it
2309  * suspended (not supported yet)
2310  *     All IO requests will block. The array can be reconfigured.
2311  *     Writing this, if accepted, will block until array is quiessent
2312  * readonly
2313  *     no resync can happen.  no superblocks get written.
2314  *     write requests fail
2315  * read-auto
2316  *     like readonly, but behaves like 'clean' on a write request.
2317  *
2318  * clean - no pending writes, but otherwise active.
2319  *     When written to inactive array, starts without resync
2320  *     If a write request arrives then
2321  *       if metadata is known, mark 'dirty' and switch to 'active'.
2322  *       if not known, block and switch to write-pending
2323  *     If written to an active array that has pending writes, then fails.
2324  * active
2325  *     fully active: IO and resync can be happening.
2326  *     When written to inactive array, starts with resync
2327  *
2328  * write-pending
2329  *     clean, but writes are blocked waiting for 'active' to be written.
2330  *
2331  * active-idle
2332  *     like active, but no writes have been seen for a while (100msec).
2333  *
2334  */
2335 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
2336                    write_pending, active_idle, bad_word};
2337 static char *array_states[] = {
2338         "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
2339         "write-pending", "active-idle", NULL };
2340
2341 static int match_word(const char *word, char **list)
2342 {
2343         int n;
2344         for (n=0; list[n]; n++)
2345                 if (cmd_match(word, list[n]))
2346                         break;
2347         return n;
2348 }
2349
2350 static ssize_t
2351 array_state_show(mddev_t *mddev, char *page)
2352 {
2353         enum array_state st = inactive;
2354
2355         if (mddev->pers)
2356                 switch(mddev->ro) {
2357                 case 1:
2358                         st = readonly;
2359                         break;
2360                 case 2:
2361                         st = read_auto;
2362                         break;
2363                 case 0:
2364                         if (mddev->in_sync)
2365                                 st = clean;
2366                         else if (mddev->safemode)
2367                                 st = active_idle;
2368                         else
2369                                 st = active;
2370                 }
2371         else {
2372                 if (list_empty(&mddev->disks) &&
2373                     mddev->raid_disks == 0 &&
2374                     mddev->size == 0)
2375                         st = clear;
2376                 else
2377                         st = inactive;
2378         }
2379         return sprintf(page, "%s\n", array_states[st]);
2380 }
2381
2382 static int do_md_stop(mddev_t * mddev, int ro);
2383 static int do_md_run(mddev_t * mddev);
2384 static int restart_array(mddev_t *mddev);
2385
2386 static ssize_t
2387 array_state_store(mddev_t *mddev, const char *buf, size_t len)
2388 {
2389         int err = -EINVAL;
2390         enum array_state st = match_word(buf, array_states);
2391         switch(st) {
2392         case bad_word:
2393                 break;
2394         case clear:
2395                 /* stopping an active array */
2396                 if (mddev->pers) {
2397                         if (atomic_read(&mddev->active) > 1)
2398                                 return -EBUSY;
2399                         err = do_md_stop(mddev, 0);
2400                 }
2401                 break;
2402         case inactive:
2403                 /* stopping an active array */
2404                 if (mddev->pers) {
2405                         if (atomic_read(&mddev->active) > 1)
2406                                 return -EBUSY;
2407                         err = do_md_stop(mddev, 2);
2408                 }
2409                 break;
2410         case suspended:
2411                 break; /* not supported yet */
2412         case readonly:
2413                 if (mddev->pers)
2414                         err = do_md_stop(mddev, 1);
2415                 else {
2416                         mddev->ro = 1;
2417                         err = do_md_run(mddev);
2418                 }
2419                 break;
2420         case read_auto:
2421                 /* stopping an active array */
2422                 if (mddev->pers) {
2423                         err = do_md_stop(mddev, 1);
2424                         if (err == 0)
2425                                 mddev->ro = 2; /* FIXME mark devices writable */
2426                 } else {
2427                         mddev->ro = 2;
2428                         err = do_md_run(mddev);
2429                 }
2430                 break;
2431         case clean:
2432                 if (mddev->pers) {
2433                         restart_array(mddev);
2434                         spin_lock_irq(&mddev->write_lock);
2435                         if (atomic_read(&mddev->writes_pending) == 0) {
2436                                 mddev->in_sync = 1;
2437                                 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
2438                         }
2439                         spin_unlock_irq(&mddev->write_lock);
2440                 } else {
2441                         mddev->ro = 0;
2442                         mddev->recovery_cp = MaxSector;
2443                         err = do_md_run(mddev);
2444                 }
2445                 break;
2446         case active:
2447                 if (mddev->pers) {
2448                         restart_array(mddev);
2449                         clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
2450                         wake_up(&mddev->sb_wait);
2451                         err = 0;
2452                 } else {
2453                         mddev->ro = 0;
2454                         err = do_md_run(mddev);
2455                 }
2456                 break;
2457         case write_pending:
2458         case active_idle:
2459                 /* these cannot be set */
2460                 break;
2461         }
2462         if (err)
2463                 return err;
2464         else
2465                 return len;
2466 }
2467 static struct md_sysfs_entry md_array_state =
2468 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
2469
2470 static ssize_t
2471 null_show(mddev_t *mddev, char *page)
2472 {
2473         return -EINVAL;
2474 }
2475
2476 static ssize_t
2477 new_dev_store(mddev_t *mddev, const char *buf, size_t len)
2478 {
2479         /* buf must be %d:%d\n? giving major and minor numbers */
2480         /* The new device is added to the array.
2481          * If the array has a persistent superblock, we read the
2482          * superblock to initialise info and check validity.
2483          * Otherwise, only checking done is that in bind_rdev_to_array,
2484          * which mainly checks size.
2485          */
2486         char *e;
2487         int major = simple_strtoul(buf, &e, 10);
2488         int minor;
2489         dev_t dev;
2490         mdk_rdev_t *rdev;
2491         int err;
2492
2493         if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
2494                 return -EINVAL;
2495         minor = simple_strtoul(e+1, &e, 10);
2496         if (*e && *e != '\n')
2497                 return -EINVAL;
2498         dev = MKDEV(major, minor);
2499         if (major != MAJOR(dev) ||
2500             minor != MINOR(dev))
2501                 return -EOVERFLOW;
2502
2503
2504         if (mddev->persistent) {
2505                 rdev = md_import_device(dev, mddev->major_version,
2506                                         mddev->minor_version);
2507                 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
2508                         mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
2509                                                        mdk_rdev_t, same_set);
2510                         err = super_types[mddev->major_version]
2511                                 .load_super(rdev, rdev0, mddev->minor_version);
2512                         if (err < 0)
2513                                 goto out;
2514                 }
2515         } else
2516                 rdev = md_import_device(dev, -1, -1);
2517
2518         if (IS_ERR(rdev))
2519                 return PTR_ERR(rdev);
2520         err = bind_rdev_to_array(rdev, mddev);
2521  out:
2522         if (err)
2523                 export_rdev(rdev);
2524         return err ? err : len;
2525 }
2526
2527 static struct md_sysfs_entry md_new_device =
2528 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
2529
2530 static ssize_t
2531 bitmap_store(mddev_t *mddev, const char *buf, size_t len)
2532 {
2533         char *end;
2534         unsigned long chunk, end_chunk;
2535
2536         if (!mddev->bitmap)
2537                 goto out;
2538         /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
2539         while (*buf) {
2540                 chunk = end_chunk = simple_strtoul(buf, &end, 0);
2541                 if (buf == end) break;
2542                 if (*end == '-') { /* range */
2543                         buf = end + 1;
2544                         end_chunk = simple_strtoul(buf, &end, 0);
2545                         if (buf == end) break;
2546                 }
2547                 if (*end && !isspace(*end)) break;
2548                 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
2549                 buf = end;
2550                 while (isspace(*buf)) buf++;
2551         }
2552         bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
2553 out:
2554         return len;
2555 }
2556
2557 static struct md_sysfs_entry md_bitmap =
2558 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
2559
2560 static ssize_t
2561 size_show(mddev_t *mddev, char *page)
2562 {
2563         return sprintf(page, "%llu\n", (unsigned long long)mddev->size);
2564 }
2565
2566 static int update_size(mddev_t *mddev, unsigned long size);
2567
2568 static ssize_t
2569 size_store(mddev_t *mddev, const char *buf, size_t len)
2570 {
2571         /* If array is inactive, we can reduce the component size, but
2572          * not increase it (except from 0).
2573          * If array is active, we can try an on-line resize
2574          */
2575         char *e;
2576         int err = 0;
2577         unsigned long long size = simple_strtoull(buf, &e, 10);
2578         if (!*buf || *buf == '\n' ||
2579             (*e && *e != '\n'))
2580                 return -EINVAL;
2581
2582         if (mddev->pers) {
2583                 err = update_size(mddev, size);
2584                 md_update_sb(mddev, 1);
2585         } else {
2586                 if (mddev->size == 0 ||
2587                     mddev->size > size)
2588                         mddev->size = size;
2589                 else
2590                         err = -ENOSPC;
2591         }
2592         return err ? err : len;
2593 }
2594
2595 static struct md_sysfs_entry md_size =
2596 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
2597
2598
2599 /* Metdata version.
2600  * This is either 'none' for arrays with externally managed metadata,
2601  * or N.M for internally known formats
2602  */
2603 static ssize_t
2604 metadata_show(mddev_t *mddev, char *page)
2605 {
2606         if (mddev->persistent)
2607                 return sprintf(page, "%d.%d\n",
2608                                mddev->major_version, mddev->minor_version);
2609         else
2610                 return sprintf(page, "none\n");
2611 }
2612
2613 static ssize_t
2614 metadata_store(mddev_t *mddev, const char *buf, size_t len)
2615 {
2616         int major, minor;
2617         char *e;
2618         if (!list_empty(&mddev->disks))
2619                 return -EBUSY;
2620
2621         if (cmd_match(buf, "none")) {
2622                 mddev->persistent = 0;
2623                 mddev->major_version = 0;
2624                 mddev->minor_version = 90;
2625                 return len;
2626         }
2627         major = simple_strtoul(buf, &e, 10);
2628         if (e==buf || *e != '.')
2629                 return -EINVAL;
2630         buf = e+1;
2631         minor = simple_strtoul(buf, &e, 10);
2632         if (e==buf || *e != '\n')
2633                 return -EINVAL;
2634         if (major >= sizeof(super_types)/sizeof(super_types[0]) ||
2635             super_types[major].name == NULL)
2636                 return -ENOENT;
2637         mddev->major_version = major;
2638         mddev->minor_version = minor;
2639         mddev->persistent = 1;
2640         return len;
2641 }
2642
2643 static struct md_sysfs_entry md_metadata =
2644 __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
2645
2646 static ssize_t
2647 action_show(mddev_t *mddev, char *page)
2648 {
2649         char *type = "idle";
2650         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
2651             test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) {
2652                 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2653                         type = "reshape";
2654                 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2655                         if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2656                                 type = "resync";
2657                         else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2658                                 type = "check";
2659                         else
2660                                 type = "repair";
2661                 } else
2662                         type = "recover";
2663         }
2664         return sprintf(page, "%s\n", type);
2665 }
2666
2667 static ssize_t
2668 action_store(mddev_t *mddev, const char *page, size_t len)
2669 {
2670         if (!mddev->pers || !mddev->pers->sync_request)
2671                 return -EINVAL;
2672
2673         if (cmd_match(page, "idle")) {
2674                 if (mddev->sync_thread) {
2675                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2676                         md_unregister_thread(mddev->sync_thread);
2677                         mddev->sync_thread = NULL;
2678                         mddev->recovery = 0;
2679                 }
2680         } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
2681                    test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
2682                 return -EBUSY;
2683         else if (cmd_match(page, "resync") || cmd_match(page, "recover"))
2684                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2685         else if (cmd_match(page, "reshape")) {
2686                 int err;
2687                 if (mddev->pers->start_reshape == NULL)
2688                         return -EINVAL;
2689                 err = mddev->pers->start_reshape(mddev);
2690                 if (err)
2691                         return err;
2692         } else {
2693                 if (cmd_match(page, "check"))
2694                         set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
2695                 else if (!cmd_match(page, "repair"))
2696                         return -EINVAL;
2697                 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
2698                 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
2699         }
2700         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2701         md_wakeup_thread(mddev->thread);
2702         return len;
2703 }
2704
2705 static ssize_t
2706 mismatch_cnt_show(mddev_t *mddev, char *page)
2707 {
2708         return sprintf(page, "%llu\n",
2709                        (unsigned long long) mddev->resync_mismatches);
2710 }
2711
2712 static struct md_sysfs_entry md_scan_mode =
2713 __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
2714
2715
2716 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
2717
2718 static ssize_t
2719 sync_min_show(mddev_t *mddev, char *page)
2720 {
2721         return sprintf(page, "%d (%s)\n", speed_min(mddev),
2722                        mddev->sync_speed_min ? "local": "system");
2723 }
2724
2725 static ssize_t
2726 sync_min_store(mddev_t *mddev, const char *buf, size_t len)
2727 {
2728         int min;
2729         char *e;
2730         if (strncmp(buf, "system", 6)==0) {
2731                 mddev->sync_speed_min = 0;
2732                 return len;
2733         }
2734         min = simple_strtoul(buf, &e, 10);
2735         if (buf == e || (*e && *e != '\n') || min <= 0)
2736                 return -EINVAL;
2737         mddev->sync_speed_min = min;
2738         return len;
2739 }
2740
2741 static struct md_sysfs_entry md_sync_min =
2742 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
2743
2744 static ssize_t
2745 sync_max_show(mddev_t *mddev, char *page)
2746 {
2747         return sprintf(page, "%d (%s)\n", speed_max(mddev),
2748                        mddev->sync_speed_max ? "local": "system");
2749 }
2750
2751 static ssize_t
2752 sync_max_store(mddev_t *mddev, const char *buf, size_t len)
2753 {
2754         int max;
2755         char *e;
2756         if (strncmp(buf, "system", 6)==0) {
2757                 mddev->sync_speed_max = 0;
2758                 return len;
2759         }
2760         max = simple_strtoul(buf, &e, 10);
2761         if (buf == e || (*e && *e != '\n') || max <= 0)
2762                 return -EINVAL;
2763         mddev->sync_speed_max = max;
2764         return len;
2765 }
2766
2767 static struct md_sysfs_entry md_sync_max =
2768 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
2769
2770
2771 static ssize_t
2772 sync_speed_show(mddev_t *mddev, char *page)
2773 {
2774         unsigned long resync, dt, db;
2775         resync = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active));
2776         dt = ((jiffies - mddev->resync_mark) / HZ);
2777         if (!dt) dt++;
2778         db = resync - (mddev->resync_mark_cnt);
2779         return sprintf(page, "%ld\n", db/dt/2); /* K/sec */
2780 }
2781
2782 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
2783
2784 static ssize_t
2785 sync_completed_show(mddev_t *mddev, char *page)
2786 {
2787         unsigned long max_blocks, resync;
2788
2789         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
2790                 max_blocks = mddev->resync_max_sectors;
2791         else
2792                 max_blocks = mddev->size << 1;
2793
2794         resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active));
2795         return sprintf(page, "%lu / %lu\n", resync, max_blocks);
2796 }
2797
2798 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
2799
2800 static ssize_t
2801 suspend_lo_show(mddev_t *mddev, char *page)
2802 {
2803         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
2804 }
2805
2806 static ssize_t
2807 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
2808 {
2809         char *e;
2810         unsigned long long new = simple_strtoull(buf, &e, 10);
2811
2812         if (mddev->pers->quiesce == NULL)
2813                 return -EINVAL;
2814         if (buf == e || (*e && *e != '\n'))
2815                 return -EINVAL;
2816         if (new >= mddev->suspend_hi ||
2817             (new > mddev->suspend_lo && new < mddev->suspend_hi)) {
2818                 mddev->suspend_lo = new;
2819                 mddev->pers->quiesce(mddev, 2);
2820                 return len;
2821         } else
2822                 return -EINVAL;
2823 }
2824 static struct md_sysfs_entry md_suspend_lo =
2825 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
2826
2827
2828 static ssize_t
2829 suspend_hi_show(mddev_t *mddev, char *page)
2830 {
2831         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
2832 }
2833
2834 static ssize_t
2835 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
2836 {
2837         char *e;
2838         unsigned long long new = simple_strtoull(buf, &e, 10);
2839
2840         if (mddev->pers->quiesce == NULL)
2841                 return -EINVAL;
2842         if (buf == e || (*e && *e != '\n'))
2843                 return -EINVAL;
2844         if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) ||
2845             (new > mddev->suspend_lo && new > mddev->suspend_hi)) {
2846                 mddev->suspend_hi = new;
2847                 mddev->pers->quiesce(mddev, 1);
2848                 mddev->pers->quiesce(mddev, 0);
2849                 return len;
2850         } else
2851                 return -EINVAL;
2852 }
2853 static struct md_sysfs_entry md_suspend_hi =
2854 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
2855
2856
2857 static struct attribute *md_default_attrs[] = {
2858         &md_level.attr,
2859         &md_layout.attr,
2860         &md_raid_disks.attr,
2861         &md_chunk_size.attr,
2862         &md_size.attr,
2863         &md_resync_start.attr,
2864         &md_metadata.attr,
2865         &md_new_device.attr,
2866         &md_safe_delay.attr,
2867         &md_array_state.attr,
2868         NULL,
2869 };
2870
2871 static struct attribute *md_redundancy_attrs[] = {
2872         &md_scan_mode.attr,
2873         &md_mismatches.attr,
2874         &md_sync_min.attr,
2875         &md_sync_max.attr,
2876         &md_sync_speed.attr,
2877         &md_sync_completed.attr,
2878         &md_suspend_lo.attr,
2879         &md_suspend_hi.attr,
2880         &md_bitmap.attr,
2881         NULL,
2882 };
2883 static struct attribute_group md_redundancy_group = {
2884         .name = NULL,
2885         .attrs = md_redundancy_attrs,
2886 };
2887
2888
2889 static ssize_t
2890 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
2891 {
2892         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
2893         mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
2894         ssize_t rv;
2895
2896         if (!entry->show)
2897                 return -EIO;
2898         rv = mddev_lock(mddev);
2899         if (!rv) {
2900                 rv = entry->show(mddev, page);
2901                 mddev_unlock(mddev);
2902         }
2903         return rv;
2904 }
2905
2906 static ssize_t
2907 md_attr_store(struct kobject *kobj, struct attribute *attr,
2908               const char *page, size_t length)
2909 {
2910         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
2911         mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
2912         ssize_t rv;
2913
2914         if (!entry->store)
2915                 return -EIO;
2916         if (!capable(CAP_SYS_ADMIN))
2917                 return -EACCES;
2918         rv = mddev_lock(mddev);
2919         if (!rv) {
2920                 rv = entry->store(mddev, page, length);
2921                 mddev_unlock(mddev);
2922         }
2923         return rv;
2924 }
2925
2926 static void md_free(struct kobject *ko)
2927 {
2928         mddev_t *mddev = container_of(ko, mddev_t, kobj);
2929         kfree(mddev);
2930 }
2931
2932 static struct sysfs_ops md_sysfs_ops = {
2933         .show   = md_attr_show,
2934         .store  = md_attr_store,
2935 };
2936 static struct kobj_type md_ktype = {
2937         .release        = md_free,
2938         .sysfs_ops      = &md_sysfs_ops,
2939         .default_attrs  = md_default_attrs,
2940 };
2941
2942 int mdp_major = 0;
2943
2944 static struct kobject *md_probe(dev_t dev, int *part, void *data)
2945 {
2946         static DEFINE_MUTEX(disks_mutex);
2947         mddev_t *mddev = mddev_find(dev);
2948         struct gendisk *disk;
2949         int partitioned = (MAJOR(dev) != MD_MAJOR);
2950         int shift = partitioned ? MdpMinorShift : 0;
2951         int unit = MINOR(dev) >> shift;
2952
2953         if (!mddev)
2954                 return NULL;
2955
2956         mutex_lock(&disks_mutex);
2957         if (mddev->gendisk) {
2958                 mutex_unlock(&disks_mutex);
2959                 mddev_put(mddev);
2960                 return NULL;
2961         }
2962         disk = alloc_disk(1 << shift);
2963         if (!disk) {
2964                 mutex_unlock(&disks_mutex);
2965                 mddev_put(mddev);
2966                 return NULL;
2967         }
2968         disk->major = MAJOR(dev);
2969         disk->first_minor = unit << shift;
2970         if (partitioned)
2971                 sprintf(disk->disk_name, "md_d%d", unit);
2972         else
2973                 sprintf(disk->disk_name, "md%d", unit);
2974         disk->fops = &md_fops;
2975         disk->private_data = mddev;
2976         disk->queue = mddev->queue;
2977         add_disk(disk);
2978         mddev->gendisk = disk;
2979         mutex_unlock(&disks_mutex);
2980         mddev->kobj.parent = &disk->kobj;
2981         mddev->kobj.k_name = NULL;
2982         snprintf(mddev->kobj.name, KOBJ_NAME_LEN, "%s", "md");
2983         mddev->kobj.ktype = &md_ktype;
2984         kobject_register(&mddev->kobj);
2985         return NULL;
2986 }
2987
2988 static void md_safemode_timeout(unsigned long data)
2989 {
2990         mddev_t *mddev = (mddev_t *) data;
2991
2992         mddev->safemode = 1;
2993         md_wakeup_thread(mddev->thread);
2994 }
2995
2996 static int start_dirty_degraded;
2997
2998 static int do_md_run(mddev_t * mddev)
2999 {
3000         int err;
3001         int chunk_size;
3002         struct list_head *tmp;
3003         mdk_rdev_t *rdev;
3004         struct gendisk *disk;
3005         struct mdk_personality *pers;
3006         char b[BDEVNAME_SIZE];
3007
3008         if (list_empty(&mddev->disks))
3009                 /* cannot run an array with no devices.. */
3010                 return -EINVAL;
3011
3012         if (mddev->pers)
3013                 return -EBUSY;
3014
3015         /*
3016          * Analyze all RAID superblock(s)
3017          */
3018         if (!mddev->raid_disks)
3019                 analyze_sbs(mddev);
3020
3021         chunk_size = mddev->chunk_size;
3022
3023         if (chunk_size) {
3024                 if (chunk_size > MAX_CHUNK_SIZE) {
3025                         printk(KERN_ERR "too big chunk_size: %d > %d\n",
3026                                 chunk_size, MAX_CHUNK_SIZE);
3027                         return -EINVAL;
3028                 }
3029                 /*
3030                  * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
3031                  */
3032                 if ( (1 << ffz(~chunk_size)) != chunk_size) {
3033                         printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size);
3034                         return -EINVAL;
3035                 }
3036                 if (chunk_size < PAGE_SIZE) {
3037                         printk(KERN_ERR "too small chunk_size: %d < %ld\n",
3038                                 chunk_size, PAGE_SIZE);
3039                         return -EINVAL;
3040                 }
3041
3042                 /* devices must have minimum size of one chunk */
3043                 ITERATE_RDEV(mddev,rdev,tmp) {
3044                         if (test_bit(Faulty, &rdev->flags))
3045                                 continue;
3046                         if (rdev->size < chunk_size / 1024) {
3047                                 printk(KERN_WARNING
3048                                         "md: Dev %s smaller than chunk_size:"
3049                                         " %lluk < %dk\n",
3050                                         bdevname(rdev->bdev,b),
3051                                         (unsigned long long)rdev->size,
3052                                         chunk_size / 1024);
3053                                 return -EINVAL;
3054                         }
3055                 }
3056         }
3057
3058 #ifdef CONFIG_KMOD
3059         if (mddev->level != LEVEL_NONE)
3060                 request_module("md-level-%d", mddev->level);
3061         else if (mddev->clevel[0])
3062                 request_module("md-%s", mddev->clevel);
3063 #endif
3064
3065         /*
3066          * Drop all container device buffers, from now on
3067          * the only valid external interface is through the md
3068          * device.
3069          * Also find largest hardsector size
3070          */
3071         ITERATE_RDEV(mddev,rdev,tmp) {
3072                 if (test_bit(Faulty, &rdev->flags))
3073                         continue;
3074                 sync_blockdev(rdev->bdev);
3075                 invalidate_bdev(rdev->bdev, 0);
3076         }
3077
3078         md_probe(mddev->unit, NULL, NULL);
3079         disk = mddev->gendisk;
3080         if (!disk)
3081                 return -ENOMEM;
3082
3083         spin_lock(&pers_lock);
3084         pers = find_pers(mddev->level, mddev->clevel);
3085         if (!pers || !try_module_get(pers->owner)) {
3086                 spin_unlock(&pers_lock);
3087                 if (mddev->level != LEVEL_NONE)
3088                         printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
3089                                mddev->level);
3090                 else
3091                         printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
3092                                mddev->clevel);
3093                 return -EINVAL;
3094         }
3095         mddev->pers = pers;
3096         spin_unlock(&pers_lock);
3097         mddev->level = pers->level;
3098         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3099
3100         if (mddev->reshape_position != MaxSector &&
3101             pers->start_reshape == NULL) {
3102                 /* This personality cannot handle reshaping... */
3103                 mddev->pers = NULL;
3104                 module_put(pers->owner);
3105                 return -EINVAL;
3106         }
3107
3108         mddev->recovery = 0;
3109         mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
3110         mddev->barriers_work = 1;
3111         mddev->ok_start_degraded = start_dirty_degraded;
3112
3113         if (start_readonly)
3114                 mddev->ro = 2; /* read-only, but switch on first write */
3115
3116         err = mddev->pers->run(mddev);
3117         if (!err && mddev->pers->sync_request) {
3118                 err = bitmap_create(mddev);
3119                 if (err) {
3120                         printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
3121                                mdname(mddev), err);
3122                         mddev->pers->stop(mddev);
3123                 }
3124         }
3125         if (err) {
3126                 printk(KERN_ERR "md: pers->run() failed ...\n");
3127                 module_put(mddev->pers->owner);
3128                 mddev->pers = NULL;
3129                 bitmap_destroy(mddev);
3130                 return err;
3131         }
3132         if (mddev->pers->sync_request)
3133                 sysfs_create_group(&mddev->kobj, &md_redundancy_group);
3134         else if (mddev->ro == 2) /* auto-readonly not meaningful */
3135                 mddev->ro = 0;
3136
3137         atomic_set(&mddev->writes_pending,0);
3138         mddev->safemode = 0;
3139         mddev->safemode_timer.function = md_safemode_timeout;
3140         mddev->safemode_timer.data = (unsigned long) mddev;
3141         mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
3142         mddev->in_sync = 1;
3143
3144         ITERATE_RDEV(mddev,rdev,tmp)
3145                 if (rdev->raid_disk >= 0) {
3146                         char nm[20];
3147                         sprintf(nm, "rd%d", rdev->raid_disk);
3148                         sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
3149                 }
3150         
3151         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3152         
3153         if (mddev->flags)
3154                 md_update_sb(mddev, 0);
3155
3156         set_capacity(disk, mddev->array_size<<1);
3157
3158         /* If we call blk_queue_make_request here, it will
3159          * re-initialise max_sectors etc which may have been
3160          * refined inside -> run.  So just set the bits we need to set.
3161          * Most initialisation happended when we called
3162          * blk_queue_make_request(..., md_fail_request)
3163          * earlier.
3164          */
3165         mddev->queue->queuedata = mddev;
3166         mddev->queue->make_request_fn = mddev->pers->make_request;
3167
3168         /* If there is a partially-recovered drive we need to
3169          * start recovery here.  If we leave it to md_check_recovery,
3170          * it will remove the drives and not do the right thing
3171          */
3172         if (mddev->degraded && !mddev->sync_thread) {
3173                 struct list_head *rtmp;
3174                 int spares = 0;
3175                 ITERATE_RDEV(mddev,rdev,rtmp)
3176                         if (rdev->raid_disk >= 0 &&
3177                             !test_bit(In_sync, &rdev->flags) &&
3178                             !test_bit(Faulty, &rdev->flags))
3179                                 /* complete an interrupted recovery */
3180                                 spares++;
3181                 if (spares && mddev->pers->sync_request) {
3182                         mddev->recovery = 0;
3183                         set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3184                         mddev->sync_thread = md_register_thread(md_do_sync,
3185                                                                 mddev,
3186                                                                 "%s_resync");
3187                         if (!mddev->sync_thread) {
3188                                 printk(KERN_ERR "%s: could not start resync"
3189                                        " thread...\n",
3190                                        mdname(mddev));
3191                                 /* leave the spares where they are, it shouldn't hurt */
3192                                 mddev->recovery = 0;
3193                         }
3194                 }
3195         }
3196         md_wakeup_thread(mddev->thread);
3197         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
3198
3199         mddev->changed = 1;
3200         md_new_event(mddev);
3201         return 0;
3202 }
3203
3204 static int restart_array(mddev_t *mddev)
3205 {
3206         struct gendisk *disk = mddev->gendisk;
3207         int err;
3208
3209         /*
3210          * Complain if it has no devices
3211          */
3212         err = -ENXIO;
3213         if (list_empty(&mddev->disks))
3214                 goto out;
3215
3216         if (mddev->pers) {
3217                 err = -EBUSY;
3218                 if (!mddev->ro)
3219                         goto out;
3220
3221                 mddev->safemode = 0;
3222                 mddev->ro = 0;
3223                 set_disk_ro(disk, 0);
3224
3225                 printk(KERN_INFO "md: %s switched to read-write mode.\n",
3226                         mdname(mddev));
3227                 /*
3228                  * Kick recovery or resync if necessary
3229                  */
3230                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3231                 md_wakeup_thread(mddev->thread);
3232                 md_wakeup_thread(mddev->sync_thread);
3233                 err = 0;
3234         } else
3235                 err = -EINVAL;
3236
3237 out:
3238         return err;
3239 }
3240
3241 /* similar to deny_write_access, but accounts for our holding a reference
3242  * to the file ourselves */
3243 static int deny_bitmap_write_access(struct file * file)
3244 {
3245         struct inode *inode = file->f_mapping->host;
3246
3247         spin_lock(&inode->i_lock);
3248         if (atomic_read(&inode->i_writecount) > 1) {
3249                 spin_unlock(&inode->i_lock);
3250                 return -ETXTBSY;
3251         }
3252         atomic_set(&inode->i_writecount, -1);
3253         spin_unlock(&inode->i_lock);
3254
3255         return 0;
3256 }
3257
3258 static void restore_bitmap_write_access(struct file *file)
3259 {
3260         struct inode *inode = file->f_mapping->host;
3261
3262         spin_lock(&inode->i_lock);
3263         atomic_set(&inode->i_writecount, 1);
3264         spin_unlock(&inode->i_lock);
3265 }
3266
3267 /* mode:
3268  *   0 - completely stop and dis-assemble array
3269  *   1 - switch to readonly
3270  *   2 - stop but do not disassemble array
3271  */
3272 static int do_md_stop(mddev_t * mddev, int mode)
3273 {
3274         int err = 0;
3275         struct gendisk *disk = mddev->gendisk;
3276
3277         if (mddev->pers) {
3278                 if (atomic_read(&mddev->active)>2) {
3279                         printk("md: %s still in use.\n",mdname(mddev));
3280                         return -EBUSY;
3281                 }
3282
3283                 if (mddev->sync_thread) {
3284                         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3285                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3286                         md_unregister_thread(mddev->sync_thread);
3287                         mddev->sync_thread = NULL;
3288                 }
3289
3290                 del_timer_sync(&mddev->safemode_timer);
3291
3292                 invalidate_partition(disk, 0);
3293
3294                 switch(mode) {
3295                 case 1: /* readonly */
3296                         err  = -ENXIO;
3297                         if (mddev->ro==1)
3298                                 goto out;
3299                         mddev->ro = 1;
3300                         break;
3301                 case 0: /* disassemble */
3302                 case 2: /* stop */
3303                         bitmap_flush(mddev);
3304                         md_super_wait(mddev);
3305                         if (mddev->ro)
3306                                 set_disk_ro(disk, 0);
3307                         blk_queue_make_request(mddev->queue, md_fail_request);
3308                         mddev->pers->stop(mddev);
3309                         if (mddev->pers->sync_request)
3310                                 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
3311
3312                         module_put(mddev->pers->owner);
3313                         mddev->pers = NULL;
3314                         if (mddev->ro)
3315                                 mddev->ro = 0;
3316                 }
3317                 if (!mddev->in_sync || mddev->flags) {
3318                         /* mark array as shutdown cleanly */
3319                         mddev->in_sync = 1;
3320                         md_update_sb(mddev, 1);
3321                 }
3322                 if (mode == 1)
3323                         set_disk_ro(disk, 1);
3324                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3325         }
3326
3327         /*
3328          * Free resources if final stop
3329          */
3330         if (mode == 0) {
3331                 mdk_rdev_t *rdev;
3332                 struct list_head *tmp;
3333                 struct gendisk *disk;
3334                 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
3335
3336                 bitmap_destroy(mddev);
3337                 if (mddev->bitmap_file) {
3338                         restore_bitmap_write_access(mddev->bitmap_file);
3339                         fput(mddev->bitmap_file);
3340                         mddev->bitmap_file = NULL;
3341                 }
3342                 mddev->bitmap_offset = 0;
3343
3344                 ITERATE_RDEV(mddev,rdev,tmp)
3345                         if (rdev->raid_disk >= 0) {
3346                                 char nm[20];
3347                                 sprintf(nm, "rd%d", rdev->raid_disk);
3348                                 sysfs_remove_link(&mddev->kobj, nm);
3349                         }
3350
3351                 export_array(mddev);
3352
3353                 mddev->array_size = 0;
3354                 mddev->size = 0;
3355                 mddev->raid_disks = 0;
3356                 mddev->recovery_cp = 0;
3357
3358                 disk = mddev->gendisk;
3359                 if (disk)
3360                         set_capacity(disk, 0);
3361                 mddev->changed = 1;
3362         } else if (mddev->pers)
3363                 printk(KERN_INFO "md: %s switched to read-only mode.\n",
3364                         mdname(mddev));
3365         err = 0;
3366         md_new_event(mddev);
3367 out:
3368         return err;
3369 }
3370
3371 static void autorun_array(mddev_t *mddev)
3372 {
3373         mdk_rdev_t *rdev;
3374         struct list_head *tmp;
3375         int err;
3376
3377         if (list_empty(&mddev->disks))
3378                 return;
3379
3380         printk(KERN_INFO "md: running: ");
3381
3382         ITERATE_RDEV(mddev,rdev,tmp) {
3383                 char b[BDEVNAME_SIZE];
3384                 printk("<%s>", bdevname(rdev->bdev,b));
3385         }
3386         printk("\n");
3387
3388         err = do_md_run (mddev);
3389         if (err) {
3390                 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
3391                 do_md_stop (mddev, 0);
3392         }
3393 }
3394
3395 /*
3396  * lets try to run arrays based on all disks that have arrived
3397  * until now. (those are in pending_raid_disks)
3398  *
3399  * the method: pick the first pending disk, collect all disks with
3400  * the same UUID, remove all from the pending list and put them into
3401  * the 'same_array' list. Then order this list based on superblock
3402  * update time (freshest comes first), kick out 'old' disks and
3403  * compare superblocks. If everything's fine then run it.
3404  *
3405  * If "unit" is allocated, then bump its reference count
3406  */
3407 static void autorun_devices(int part)
3408 {
3409         struct list_head *tmp;
3410         mdk_rdev_t *rdev0, *rdev;
3411         mddev_t *mddev;
3412         char b[BDEVNAME_SIZE];
3413
3414         printk(KERN_INFO "md: autorun ...\n");
3415         while (!list_empty(&pending_raid_disks)) {
3416                 int unit;
3417                 dev_t dev;
3418                 LIST_HEAD(candidates);
3419                 rdev0 = list_entry(pending_raid_disks.next,
3420                                          mdk_rdev_t, same_set);
3421
3422                 printk(KERN_INFO "md: considering %s ...\n",
3423                         bdevname(rdev0->bdev,b));
3424                 INIT_LIST_HEAD(&candidates);
3425                 ITERATE_RDEV_PENDING(rdev,tmp)
3426                         if (super_90_load(rdev, rdev0, 0) >= 0) {
3427                                 printk(KERN_INFO "md:  adding %s ...\n",
3428                                         bdevname(rdev->bdev,b));
3429                                 list_move(&rdev->same_set, &candidates);
3430                         }
3431                 /*
3432                  * now we have a set of devices, with all of them having
3433                  * mostly sane superblocks. It's time to allocate the
3434                  * mddev.
3435                  */
3436                 if (part) {
3437                         dev = MKDEV(mdp_major,
3438                                     rdev0->preferred_minor << MdpMinorShift);
3439                         unit = MINOR(dev) >> MdpMinorShift;
3440                 } else {
3441                         dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
3442                         unit = MINOR(dev);
3443                 }
3444                 if (rdev0->preferred_minor != unit) {
3445                         printk(KERN_INFO "md: unit number in %s is bad: %d\n",
3446                                bdevname(rdev0->bdev, b), rdev0->preferred_minor);
3447                         break;
3448                 }
3449
3450                 md_probe(dev, NULL, NULL);
3451                 mddev = mddev_find(dev);
3452                 if (!mddev) {
3453                         printk(KERN_ERR 
3454                                 "md: cannot allocate memory for md drive.\n");
3455                         break;
3456                 }
3457                 if (mddev_lock(mddev)) 
3458                         printk(KERN_WARNING "md: %s locked, cannot run\n",
3459                                mdname(mddev));
3460                 else if (mddev->raid_disks || mddev->major_version
3461                          || !list_empty(&mddev->disks)) {
3462                         printk(KERN_WARNING 
3463                                 "md: %s already running, cannot run %s\n",
3464                                 mdname(mddev), bdevname(rdev0->bdev,b));
3465                         mddev_unlock(mddev);
3466                 } else {
3467                         printk(KERN_INFO "md: created %s\n", mdname(mddev));
3468                         ITERATE_RDEV_GENERIC(candidates,rdev,tmp) {
3469                                 list_del_init(&rdev->same_set);
3470                                 if (bind_rdev_to_array(rdev, mddev))
3471                                         export_rdev(rdev);
3472                         }
3473                         autorun_array(mddev);
3474                         mddev_unlock(mddev);
3475                 }
3476                 /* on success, candidates will be empty, on error
3477                  * it won't...
3478                  */
3479                 ITERATE_RDEV_GENERIC(candidates,rdev,tmp)
3480                         export_rdev(rdev);
3481                 mddev_put(mddev);
3482         }
3483         printk(KERN_INFO "md: ... autorun DONE.\n");
3484 }
3485
3486 static int get_version(void __user * arg)
3487 {
3488         mdu_version_t ver;
3489
3490         ver.major = MD_MAJOR_VERSION;
3491         ver.minor = MD_MINOR_VERSION;
3492         ver.patchlevel = MD_PATCHLEVEL_VERSION;
3493
3494         if (copy_to_user(arg, &ver, sizeof(ver)))
3495                 return -EFAULT;
3496
3497         return 0;
3498 }
3499
3500 static int get_array_info(mddev_t * mddev, void __user * arg)
3501 {
3502         mdu_array_info_t info;
3503         int nr,working,active,failed,spare;
3504         mdk_rdev_t *rdev;
3505         struct list_head *tmp;
3506
3507         nr=working=active=failed=spare=0;
3508         ITERATE_RDEV(mddev,rdev,tmp) {
3509                 nr++;
3510                 if (test_bit(Faulty, &rdev->flags))
3511                         failed++;
3512                 else {
3513                         working++;
3514                         if (test_bit(In_sync, &rdev->flags))
3515                                 active++;       
3516                         else
3517                                 spare++;
3518                 }
3519         }
3520
3521         info.major_version = mddev->major_version;
3522         info.minor_version = mddev->minor_version;
3523         info.patch_version = MD_PATCHLEVEL_VERSION;
3524         info.ctime         = mddev->ctime;
3525         info.level         = mddev->level;
3526         info.size          = mddev->size;
3527         if (info.size != mddev->size) /* overflow */
3528                 info.size = -1;
3529         info.nr_disks      = nr;
3530         info.raid_disks    = mddev->raid_disks;
3531         info.md_minor      = mddev->md_minor;
3532         info.not_persistent= !mddev->persistent;
3533
3534         info.utime         = mddev->utime;
3535         info.state         = 0;
3536         if (mddev->in_sync)
3537                 info.state = (1<<MD_SB_CLEAN);
3538         if (mddev->bitmap && mddev->bitmap_offset)
3539                 info.state = (1<<MD_SB_BITMAP_PRESENT);
3540         info.active_disks  = active;
3541         info.working_disks = working;
3542         info.failed_disks  = failed;
3543         info.spare_disks   = spare;
3544
3545         info.layout        = mddev->layout;
3546         info.chunk_size    = mddev->chunk_size;
3547
3548         if (copy_to_user(arg, &info, sizeof(info)))
3549                 return -EFAULT;
3550
3551         return 0;
3552 }
3553
3554 static int get_bitmap_file(mddev_t * mddev, void __user * arg)
3555 {
3556         mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
3557         char *ptr, *buf = NULL;
3558         int err = -ENOMEM;
3559
3560         file = kmalloc(sizeof(*file), GFP_KERNEL);
3561         if (!file)
3562                 goto out;
3563
3564         /* bitmap disabled, zero the first byte and copy out */
3565         if (!mddev->bitmap || !mddev->bitmap->file) {
3566                 file->pathname[0] = '\0';
3567                 goto copy_out;
3568         }
3569
3570         buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
3571         if (!buf)
3572                 goto out;
3573
3574         ptr = file_path(mddev->bitmap->file, buf, sizeof(file->pathname));
3575         if (!ptr)
3576                 goto out;
3577
3578         strcpy(file->pathname, ptr);
3579
3580 copy_out:
3581         err = 0;
3582         if (copy_to_user(arg, file, sizeof(*file)))
3583                 err = -EFAULT;
3584 out:
3585         kfree(buf);
3586         kfree(file);
3587         return err;
3588 }
3589
3590 static int get_disk_info(mddev_t * mddev, void __user * arg)
3591 {
3592         mdu_disk_info_t info;
3593         unsigned int nr;
3594         mdk_rdev_t *rdev;
3595
3596         if (copy_from_user(&info, arg, sizeof(info)))
3597                 return -EFAULT;
3598
3599         nr = info.number;
3600
3601         rdev = find_rdev_nr(mddev, nr);
3602         if (rdev) {
3603                 info.major = MAJOR(rdev->bdev->bd_dev);
3604                 info.minor = MINOR(rdev->bdev->bd_dev);
3605                 info.raid_disk = rdev->raid_disk;
3606                 info.state = 0;
3607                 if (test_bit(Faulty, &rdev->flags))
3608                         info.state |= (1<<MD_DISK_FAULTY);
3609                 else if (test_bit(In_sync, &rdev->flags)) {
3610                         info.state |= (1<<MD_DISK_ACTIVE);
3611                         info.state |= (1<<MD_DISK_SYNC);
3612                 }
3613                 if (test_bit(WriteMostly, &rdev->flags))
3614                         info.state |= (1<<MD_DISK_WRITEMOSTLY);
3615         } else {
3616                 info.major = info.minor = 0;
3617                 info.raid_disk = -1;
3618                 info.state = (1<<MD_DISK_REMOVED);
3619         }
3620
3621         if (copy_to_user(arg, &info, sizeof(info)))
3622                 return -EFAULT;
3623
3624         return 0;
3625 }
3626
3627 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
3628 {
3629         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
3630         mdk_rdev_t *rdev;
3631         dev_t dev = MKDEV(info->major,info->minor);
3632
3633         if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
3634                 return -EOVERFLOW;
3635
3636         if (!mddev->raid_disks) {
3637                 int err;
3638                 /* expecting a device which has a superblock */
3639                 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
3640                 if (IS_ERR(rdev)) {
3641                         printk(KERN_WARNING 
3642                                 "md: md_import_device returned %ld\n",
3643                                 PTR_ERR(rdev));
3644                         return PTR_ERR(rdev);
3645                 }
3646                 if (!list_empty(&mddev->disks)) {
3647                         mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
3648                                                         mdk_rdev_t, same_set);
3649                         int err = super_types[mddev->major_version]
3650                                 .load_super(rdev, rdev0, mddev->minor_version);
3651                         if (err < 0) {
3652                                 printk(KERN_WARNING 
3653                                         "md: %s has different UUID to %s\n",
3654                                         bdevname(rdev->bdev,b), 
3655                                         bdevname(rdev0->bdev,b2));
3656                                 export_rdev(rdev);
3657                                 return -EINVAL;
3658                         }
3659                 }
3660                 err = bind_rdev_to_array(rdev, mddev);
3661                 if (err)
3662                         export_rdev(rdev);
3663                 return err;
3664         }
3665
3666         /*
3667          * add_new_disk can be used once the array is assembled
3668          * to add "hot spares".  They must already have a superblock
3669          * written
3670          */
3671         if (mddev->pers) {
3672                 int err;
3673                 if (!mddev->pers->hot_add_disk) {
3674                         printk(KERN_WARNING 
3675                                 "%s: personality does not support diskops!\n",
3676                                mdname(mddev));
3677                         return -EINVAL;
3678                 }
3679                 if (mddev->persistent)
3680                         rdev = md_import_device(dev, mddev->major_version,
3681                                                 mddev->minor_version);
3682                 else
3683                         rdev = md_import_device(dev, -1, -1);
3684                 if (IS_ERR(rdev)) {
3685                         printk(KERN_WARNING 
3686                                 "md: md_import_device returned %ld\n",
3687                                 PTR_ERR(rdev));
3688                         return PTR_ERR(rdev);
3689                 }
3690                 /* set save_raid_disk if appropriate */
3691                 if (!mddev->persistent) {
3692                         if (info->state & (1<<MD_DISK_SYNC)  &&
3693                             info->raid_disk < mddev->raid_disks)
3694                                 rdev->raid_disk = info->raid_disk;
3695                         else
3696                                 rdev->raid_disk = -1;
3697                 } else
3698                         super_types[mddev->major_version].
3699                                 validate_super(mddev, rdev);
3700                 rdev->saved_raid_disk = rdev->raid_disk;
3701
3702                 clear_bit(In_sync, &rdev->flags); /* just to be sure */
3703                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
3704                         set_bit(WriteMostly, &rdev->flags);
3705
3706                 rdev->raid_disk = -1;
3707                 err = bind_rdev_to_array(rdev, mddev);
3708                 if (!err && !mddev->pers->hot_remove_disk) {
3709                         /* If there is hot_add_disk but no hot_remove_disk
3710                          * then added disks for geometry changes,
3711                          * and should be added immediately.
3712                          */
3713                         super_types[mddev->major_version].
3714                                 validate_super(mddev, rdev);
3715                         err = mddev->pers->hot_add_disk(mddev, rdev);
3716                         if (err)
3717                                 unbind_rdev_from_array(rdev);
3718                 }
3719                 if (err)
3720                         export_rdev(rdev);
3721
3722                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3723                 md_wakeup_thread(mddev->thread);
3724                 return err;
3725         }
3726
3727         /* otherwise, add_new_disk is only allowed
3728          * for major_version==0 superblocks
3729          */
3730         if (mddev->major_version != 0) {
3731                 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
3732                        mdname(mddev));
3733                 return -EINVAL;
3734         }
3735
3736         if (!(info->state & (1<<MD_DISK_FAULTY))) {
3737                 int err;
3738                 rdev = md_import_device (dev, -1, 0);
3739                 if (IS_ERR(rdev)) {
3740                         printk(KERN_WARNING 
3741                                 "md: error, md_import_device() returned %ld\n",
3742                                 PTR_ERR(rdev));
3743                         return PTR_ERR(rdev);
3744                 }
3745                 rdev->desc_nr = info->number;
3746                 if (info->raid_disk < mddev->raid_disks)
3747                         rdev->raid_disk = info->raid_disk;
3748                 else
3749                         rdev->raid_disk = -1;
3750
3751                 rdev->flags = 0;
3752
3753                 if (rdev->raid_disk < mddev->raid_disks)
3754                         if (info->state & (1<<MD_DISK_SYNC))
3755                                 set_bit(In_sync, &rdev->flags);
3756
3757                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
3758                         set_bit(WriteMostly, &rdev->flags);
3759
3760                 if (!mddev->persistent) {
3761                         printk(KERN_INFO "md: nonpersistent superblock ...\n");
3762                         rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
3763                 } else 
3764                         rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
3765                 rdev->size = calc_dev_size(rdev, mddev->chunk_size);
3766
3767                 err = bind_rdev_to_array(rdev, mddev);
3768                 if (err) {
3769                         export_rdev(rdev);
3770                         return err;
3771                 }
3772         }
3773
3774         return 0;
3775 }
3776
3777 static int hot_remove_disk(mddev_t * mddev, dev_t dev)
3778 {
3779         char b[BDEVNAME_SIZE];
3780         mdk_rdev_t *rdev;
3781
3782         if (!mddev->pers)
3783                 return -ENODEV;
3784
3785         rdev = find_rdev(mddev, dev);
3786         if (!rdev)
3787                 return -ENXIO;
3788
3789         if (rdev->raid_disk >= 0)
3790                 goto busy;
3791
3792         kick_rdev_from_array(rdev);
3793         md_update_sb(mddev, 1);
3794         md_new_event(mddev);
3795
3796         return 0;
3797 busy:
3798         printk(KERN_WARNING "md: cannot remove active disk %s from %s ... \n",
3799                 bdevname(rdev->bdev,b), mdname(mddev));
3800         return -EBUSY;
3801 }
3802
3803 static int hot_add_disk(mddev_t * mddev, dev_t dev)
3804 {
3805         char b[BDEVNAME_SIZE];
3806         int err;
3807         unsigned int size;
3808         mdk_rdev_t *rdev;
3809
3810         if (!mddev->pers)
3811                 return -ENODEV;
3812
3813         if (mddev->major_version != 0) {
3814                 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
3815                         " version-0 superblocks.\n",
3816                         mdname(mddev));
3817                 return -EINVAL;
3818         }
3819         if (!mddev->pers->hot_add_disk) {
3820                 printk(KERN_WARNING 
3821                         "%s: personality does not support diskops!\n",
3822                         mdname(mddev));
3823                 return -EINVAL;
3824         }
3825
3826         rdev = md_import_device (dev, -1, 0);
3827         if (IS_ERR(rdev)) {
3828                 printk(KERN_WARNING 
3829                         "md: error, md_import_device() returned %ld\n",
3830                         PTR_ERR(rdev));
3831                 return -EINVAL;
3832         }
3833
3834         if (mddev->persistent)
3835                 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
3836         else
3837                 rdev->sb_offset =
3838                         rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
3839
3840         size = calc_dev_size(rdev, mddev->chunk_size);
3841         rdev->size = size;
3842
3843         if (test_bit(Faulty, &rdev->flags)) {
3844                 printk(KERN_WARNING 
3845                         "md: can not hot-add faulty %s disk to %s!\n",
3846                         bdevname(rdev->bdev,b), mdname(mddev));
3847                 err = -EINVAL;
3848                 goto abort_export;
3849         }
3850         clear_bit(In_sync, &rdev->flags);
3851         rdev->desc_nr = -1;
3852         err = bind_rdev_to_array(rdev, mddev);
3853         if (err)
3854                 goto abort_export;
3855
3856         /*
3857          * The rest should better be atomic, we can have disk failures
3858          * noticed in interrupt contexts ...
3859          */
3860
3861         if (rdev->desc_nr == mddev->max_disks) {
3862                 printk(KERN_WARNING "%s: can not hot-add to full array!\n",
3863                         mdname(mddev));
3864                 err = -EBUSY;
3865                 goto abort_unbind_export;
3866         }
3867
3868         rdev->raid_disk = -1;
3869
3870         md_update_sb(mddev, 1);
3871
3872         /*
3873          * Kick recovery, maybe this spare has to be added to the
3874          * array immediately.
3875          */
3876         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3877         md_wakeup_thread(mddev->thread);
3878         md_new_event(mddev);
3879         return 0;
3880
3881 abort_unbind_export:
3882         unbind_rdev_from_array(rdev);
3883
3884 abort_export:
3885         export_rdev(rdev);
3886         return err;
3887 }
3888
3889 static int set_bitmap_file(mddev_t *mddev, int fd)
3890 {
3891         int err;
3892
3893         if (mddev->pers) {
3894                 if (!mddev->pers->quiesce)
3895                         return -EBUSY;
3896                 if (mddev->recovery || mddev->sync_thread)
3897                         return -EBUSY;
3898                 /* we should be able to change the bitmap.. */
3899         }
3900
3901
3902         if (fd >= 0) {
3903                 if (mddev->bitmap)
3904                         return -EEXIST; /* cannot add when bitmap is present */
3905                 mddev->bitmap_file = fget(fd);
3906
3907                 if (mddev->bitmap_file == NULL) {
3908                         printk(KERN_ERR "%s: error: failed to get bitmap file\n",
3909                                mdname(mddev));
3910                         return -EBADF;
3911                 }
3912
3913                 err = deny_bitmap_write_access(mddev->bitmap_file);
3914                 if (err) {
3915                         printk(KERN_ERR "%s: error: bitmap file is already in use\n",
3916                                mdname(mddev));
3917                         fput(mddev->bitmap_file);
3918                         mddev->bitmap_file = NULL;
3919                         return err;
3920                 }
3921                 mddev->bitmap_offset = 0; /* file overrides offset */
3922         } else if (mddev->bitmap == NULL)
3923                 return -ENOENT; /* cannot remove what isn't there */
3924         err = 0;
3925         if (mddev->pers) {
3926                 mddev->pers->quiesce(mddev, 1);
3927                 if (fd >= 0)
3928                         err = bitmap_create(mddev);
3929                 if (fd < 0 || err) {
3930                         bitmap_destroy(mddev);
3931                         fd = -1; /* make sure to put the file */
3932                 }
3933                 mddev->pers->quiesce(mddev, 0);
3934         }
3935         if (fd < 0) {
3936                 if (mddev->bitmap_file) {
3937                         restore_bitmap_write_access(mddev->bitmap_file);
3938                         fput(mddev->bitmap_file);
3939                 }
3940                 mddev->bitmap_file = NULL;
3941         }
3942
3943         return err;
3944 }
3945
3946 /*
3947  * set_array_info is used two different ways
3948  * The original usage is when creating a new array.
3949  * In this usage, raid_disks is > 0 and it together with
3950  *  level, size, not_persistent,layout,chunksize determine the
3951  *  shape of the array.
3952  *  This will always create an array with a type-0.90.0 superblock.
3953  * The newer usage is when assembling an array.
3954  *  In this case raid_disks will be 0, and the major_version field is
3955  *  use to determine which style super-blocks are to be found on the devices.
3956  *  The minor and patch _version numbers are also kept incase the
3957  *  super_block handler wishes to interpret them.
3958  */
3959 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
3960 {
3961
3962         if (info->raid_disks == 0) {
3963                 /* just setting version number for superblock loading */
3964                 if (info->major_version < 0 ||
3965                     info->major_version >= sizeof(super_types)/sizeof(super_types[0]) ||
3966                     super_types[info->major_version].name == NULL) {
3967                         /* maybe try to auto-load a module? */
3968                         printk(KERN_INFO 
3969                                 "md: superblock version %d not known\n",
3970                                 info->major_version);
3971                         return -EINVAL;
3972                 }
3973                 mddev->major_version = info->major_version;
3974                 mddev->minor_version = info->minor_version;
3975                 mddev->patch_version = info->patch_version;
3976                 return 0;
3977         }
3978         mddev->major_version = MD_MAJOR_VERSION;
3979         mddev->minor_version = MD_MINOR_VERSION;
3980         mddev->patch_version = MD_PATCHLEVEL_VERSION;
3981         mddev->ctime         = get_seconds();
3982
3983         mddev->level         = info->level;
3984         mddev->clevel[0]     = 0;
3985         mddev->size          = info->size;
3986         mddev->raid_disks    = info->raid_disks;
3987         /* don't set md_minor, it is determined by which /dev/md* was
3988          * openned
3989          */
3990         if (info->state & (1<<MD_SB_CLEAN))
3991                 mddev->recovery_cp = MaxSector;
3992         else
3993                 mddev->recovery_cp = 0;
3994         mddev->persistent    = ! info->not_persistent;
3995
3996         mddev->layout        = info->layout;
3997         mddev->chunk_size    = info->chunk_size;
3998
3999         mddev->max_disks     = MD_SB_DISKS;
4000
4001         mddev->flags         = 0;
4002         set_bit(MD_CHANGE_DEVS, &mddev->flags);
4003
4004         mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
4005         mddev->bitmap_offset = 0;
4006
4007         mddev->reshape_position = MaxSector;
4008
4009         /*
4010          * Generate a 128 bit UUID
4011          */
4012         get_random_bytes(mddev->uuid, 16);
4013
4014         mddev->new_level = mddev->level;
4015         mddev->new_chunk = mddev->chunk_size;
4016         mddev->new_layout = mddev->layout;
4017         mddev->delta_disks = 0;
4018
4019         return 0;
4020 }
4021
4022 static int update_size(mddev_t *mddev, unsigned long size)
4023 {
4024         mdk_rdev_t * rdev;
4025         int rv;
4026         struct list_head *tmp;
4027         int fit = (size == 0);
4028
4029         if (mddev->pers->resize == NULL)
4030                 return -EINVAL;
4031         /* The "size" is the amount of each device that is used.
4032          * This can only make sense for arrays with redundancy.
4033          * linear and raid0 always use whatever space is available
4034          * We can only consider changing the size if no resync
4035          * or reconstruction is happening, and if the new size
4036          * is acceptable. It must fit before the sb_offset or,
4037          * if that is <data_offset, it must fit before the
4038          * size of each device.
4039          * If size is zero, we find the largest size that fits.
4040          */
4041         if (mddev->sync_thread)
4042                 return -EBUSY;
4043         ITERATE_RDEV(mddev,rdev,tmp) {
4044                 sector_t avail;
4045                 if (rdev->sb_offset > rdev->data_offset)
4046                         avail = (rdev->sb_offset*2) - rdev->data_offset;
4047                 else
4048                         avail = get_capacity(rdev->bdev->bd_disk)
4049                                 - rdev->data_offset;
4050                 if (fit && (size == 0 || size > avail/2))
4051                         size = avail/2;
4052                 if (avail < ((sector_t)size << 1))
4053                         return -ENOSPC;
4054         }
4055         rv = mddev->pers->resize(mddev, (sector_t)size *2);
4056         if (!rv) {
4057                 struct block_device *bdev;
4058
4059                 bdev = bdget_disk(mddev->gendisk, 0);
4060                 if (bdev) {
4061                         mutex_lock(&bdev->bd_inode->i_mutex);
4062                         i_size_write(bdev->bd_inode, (loff_t)mddev->array_size << 10);
4063                         mutex_unlock(&bdev->bd_inode->i_mutex);
4064                         bdput(bdev);
4065                 }
4066         }
4067         return rv;
4068 }
4069
4070 static int update_raid_disks(mddev_t *mddev, int raid_disks)
4071 {
4072         int rv;
4073         /* change the number of raid disks */
4074         if (mddev->pers->check_reshape == NULL)
4075                 return -EINVAL;
4076         if (raid_disks <= 0 ||
4077             raid_disks >= mddev->max_disks)
4078                 return -EINVAL;
4079         if (mddev->sync_thread || mddev->reshape_position != MaxSector)
4080                 return -EBUSY;
4081         mddev->delta_disks = raid_disks - mddev->raid_disks;
4082
4083         rv = mddev->pers->check_reshape(mddev);
4084         return rv;
4085 }
4086
4087
4088 /*
4089  * update_array_info is used to change the configuration of an
4090  * on-line array.
4091  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
4092  * fields in the info are checked against the array.
4093  * Any differences that cannot be handled will cause an error.
4094  * Normally, only one change can be managed at a time.
4095  */
4096 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
4097 {
4098         int rv = 0;
4099         int cnt = 0;
4100         int state = 0;
4101
4102         /* calculate expected state,ignoring low bits */
4103         if (mddev->bitmap && mddev->bitmap_offset)
4104                 state |= (1 << MD_SB_BITMAP_PRESENT);
4105
4106         if (mddev->major_version != info->major_version ||
4107             mddev->minor_version != info->minor_version ||
4108 /*          mddev->patch_version != info->patch_version || */
4109             mddev->ctime         != info->ctime         ||
4110             mddev->level         != info->level         ||
4111 /*          mddev->layout        != info->layout        || */
4112             !mddev->persistent   != info->not_persistent||
4113             mddev->chunk_size    != info->chunk_size    ||
4114             /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
4115             ((state^info->state) & 0xfffffe00)
4116                 )
4117                 return -EINVAL;
4118         /* Check there is only one change */
4119         if (info->size >= 0 && mddev->size != info->size) cnt++;
4120         if (mddev->raid_disks != info->raid_disks) cnt++;
4121         if (mddev->layout != info->layout) cnt++;
4122         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++;
4123         if (cnt == 0) return 0;
4124         if (cnt > 1) return -EINVAL;
4125
4126         if (mddev->layout != info->layout) {
4127                 /* Change layout
4128                  * we don't need to do anything at the md level, the
4129                  * personality will take care of it all.
4130                  */
4131                 if (mddev->pers->reconfig == NULL)
4132                         return -EINVAL;
4133                 else
4134                         return mddev->pers->reconfig(mddev, info->layout, -1);
4135         }
4136         if (info->size >= 0 && mddev->size != info->size)
4137                 rv = update_size(mddev, info->size);
4138
4139         if (mddev->raid_disks    != info->raid_disks)
4140                 rv = update_raid_disks(mddev, info->raid_disks);
4141
4142         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
4143                 if (mddev->pers->quiesce == NULL)
4144                         return -EINVAL;
4145                 if (mddev->recovery || mddev->sync_thread)
4146                         return -EBUSY;
4147                 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
4148                         /* add the bitmap */
4149                         if (mddev->bitmap)
4150                                 return -EEXIST;
4151                         if (mddev->default_bitmap_offset == 0)
4152                                 return -EINVAL;
4153                         mddev->bitmap_offset = mddev->default_bitmap_offset;
4154                         mddev->pers->quiesce(mddev, 1);
4155                         rv = bitmap_create(mddev);
4156                         if (rv)
4157                                 bitmap_destroy(mddev);
4158                         mddev->pers->quiesce(mddev, 0);
4159                 } else {
4160                         /* remove the bitmap */
4161                         if (!mddev->bitmap)
4162                                 return -ENOENT;
4163                         if (mddev->bitmap->file)
4164                                 return -EINVAL;
4165                         mddev->pers->quiesce(mddev, 1);
4166                         bitmap_destroy(mddev);
4167                         mddev->pers->quiesce(mddev, 0);
4168                         mddev->bitmap_offset = 0;
4169                 }
4170         }
4171         md_update_sb(mddev, 1);
4172         return rv;
4173 }
4174
4175 static int set_disk_faulty(mddev_t *mddev, dev_t dev)
4176 {
4177         mdk_rdev_t *rdev;
4178
4179         if (mddev->pers == NULL)
4180                 return -ENODEV;
4181
4182         rdev = find_rdev(mddev, dev);
4183         if (!rdev)
4184                 return -ENODEV;
4185
4186         md_error(mddev, rdev);
4187         return 0;
4188 }
4189
4190 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
4191 {
4192         mddev_t *mddev = bdev->bd_disk->private_data;
4193
4194         geo->heads = 2;
4195         geo->sectors = 4;
4196         geo->cylinders = get_capacity(mddev->gendisk) / 8;
4197         return 0;
4198 }
4199
4200 static int md_ioctl(struct inode *inode, struct file *file,
4201                         unsigned int cmd, unsigned long arg)
4202 {
4203         int err = 0;
4204         void __user *argp = (void __user *)arg;
4205         mddev_t *mddev = NULL;
4206
4207         if (!capable(CAP_SYS_ADMIN))
4208                 return -EACCES;
4209
4210         /*
4211          * Commands dealing with the RAID driver but not any
4212          * particular array:
4213          */
4214         switch (cmd)
4215         {
4216                 case RAID_VERSION:
4217                         err = get_version(argp);
4218                         goto done;
4219
4220                 case PRINT_RAID_DEBUG:
4221                         err = 0;
4222                         md_print_devices();
4223                         goto done;
4224
4225 #ifndef MODULE
4226                 case RAID_AUTORUN:
4227                         err = 0;
4228                         autostart_arrays(arg);
4229                         goto done;
4230 #endif
4231                 default:;
4232         }
4233
4234         /*
4235          * Commands creating/starting a new array:
4236          */
4237
4238         mddev = inode->i_bdev->bd_disk->private_data;
4239
4240         if (!mddev) {
4241                 BUG();
4242                 goto abort;
4243         }
4244
4245         err = mddev_lock(mddev);
4246         if (err) {
4247                 printk(KERN_INFO 
4248                         "md: ioctl lock interrupted, reason %d, cmd %d\n",
4249                         err, cmd);
4250                 goto abort;
4251         }
4252
4253         switch (cmd)
4254         {
4255                 case SET_ARRAY_INFO:
4256                         {
4257                                 mdu_array_info_t info;
4258                                 if (!arg)
4259                                         memset(&info, 0, sizeof(info));
4260                                 else if (copy_from_user(&info, argp, sizeof(info))) {
4261                                         err = -EFAULT;
4262                                         goto abort_unlock;
4263                                 }
4264                                 if (mddev->pers) {
4265                                         err = update_array_info(mddev, &info);
4266                                         if (err) {
4267                                                 printk(KERN_WARNING "md: couldn't update"
4268                                                        " array info. %d\n", err);
4269                                                 goto abort_unlock;
4270                                         }
4271                                         goto done_unlock;
4272                                 }
4273                                 if (!list_empty(&mddev->disks)) {
4274                                         printk(KERN_WARNING
4275                                                "md: array %s already has disks!\n",
4276                                                mdname(mddev));
4277                                         err = -EBUSY;
4278                                         goto abort_unlock;
4279                                 }
4280                                 if (mddev->raid_disks) {
4281                                         printk(KERN_WARNING
4282                                                "md: array %s already initialised!\n",
4283                                                mdname(mddev));
4284                                         err = -EBUSY;
4285                                         goto abort_unlock;
4286                                 }
4287                                 err = set_array_info(mddev, &info);
4288                                 if (err) {
4289                                         printk(KERN_WARNING "md: couldn't set"
4290                                                " array info. %d\n", err);
4291                                         goto abort_unlock;
4292                                 }
4293                         }
4294                         goto done_unlock;
4295
4296                 default:;
4297         }
4298
4299         /*
4300          * Commands querying/configuring an existing array:
4301          */
4302         /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
4303          * RUN_ARRAY, and SET_BITMAP_FILE are allowed */
4304         if (!mddev->raid_disks && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
4305                         && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE) {
4306                 err = -ENODEV;
4307                 goto abort_unlock;
4308         }
4309
4310         /*
4311          * Commands even a read-only array can execute:
4312          */
4313         switch (cmd)
4314         {
4315                 case GET_ARRAY_INFO:
4316                         err = get_array_info(mddev, argp);
4317                         goto done_unlock;
4318
4319                 case GET_BITMAP_FILE:
4320                         err = get_bitmap_file(mddev, argp);
4321                         goto done_unlock;
4322
4323                 case GET_DISK_INFO:
4324                         err = get_disk_info(mddev, argp);
4325                         goto done_unlock;
4326
4327                 case RESTART_ARRAY_RW:
4328                         err = restart_array(mddev);
4329                         goto done_unlock;
4330
4331                 case STOP_ARRAY:
4332                         err = do_md_stop (mddev, 0);
4333                         goto done_unlock;
4334
4335                 case STOP_ARRAY_RO:
4336                         err = do_md_stop (mddev, 1);
4337                         goto done_unlock;
4338
4339         /*
4340          * We have a problem here : there is no easy way to give a CHS
4341          * virtual geometry. We currently pretend that we have a 2 heads
4342          * 4 sectors (with a BIG number of cylinders...). This drives
4343          * dosfs just mad... ;-)
4344          */
4345         }
4346
4347         /*
4348          * The remaining ioctls are changing the state of the
4349          * superblock, so we do not allow them on read-only arrays.
4350          * However non-MD ioctls (e.g. get-size) will still come through
4351          * here and hit the 'default' below, so only disallow
4352          * 'md' ioctls, and switch to rw mode if started auto-readonly.
4353          */
4354         if (_IOC_TYPE(cmd) == MD_MAJOR &&
4355             mddev->ro && mddev->pers) {
4356                 if (mddev->ro == 2) {
4357                         mddev->ro = 0;
4358                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4359                 md_wakeup_thread(mddev->thread);
4360
4361                 } else {
4362                         err = -EROFS;
4363                         goto abort_unlock;
4364                 }
4365         }
4366
4367         switch (cmd)
4368         {
4369                 case ADD_NEW_DISK:
4370                 {
4371                         mdu_disk_info_t info;
4372                         if (copy_from_user(&info, argp, sizeof(info)))
4373                                 err = -EFAULT;
4374                         else
4375                                 err = add_new_disk(mddev, &info);
4376                         goto done_unlock;
4377                 }
4378
4379                 case HOT_REMOVE_DISK:
4380                         err = hot_remove_disk(mddev, new_decode_dev(arg));
4381                         goto done_unlock;
4382
4383                 case HOT_ADD_DISK:
4384                         err = hot_add_disk(mddev, new_decode_dev(arg));
4385                         goto done_unlock;
4386
4387                 case SET_DISK_FAULTY:
4388                         err = set_disk_faulty(mddev, new_decode_dev(arg));
4389                         goto done_unlock;
4390
4391                 case RUN_ARRAY:
4392                         err = do_md_run (mddev);
4393                         goto done_unlock;
4394
4395                 case SET_BITMAP_FILE:
4396                         err = set_bitmap_file(mddev, (int)arg);
4397                         goto done_unlock;
4398
4399                 default:
4400                         err = -EINVAL;
4401                         goto abort_unlock;
4402         }
4403
4404 done_unlock:
4405 abort_unlock:
4406         mddev_unlock(mddev);
4407
4408         return err;
4409 done:
4410         if (err)
4411                 MD_BUG();
4412 abort:
4413         return err;
4414 }
4415
4416 static int md_open(struct inode *inode, struct file *file)
4417 {
4418         /*
4419          * Succeed if we can lock the mddev, which confirms that
4420          * it isn't being stopped right now.
4421          */
4422         mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
4423         int err;
4424
4425         if ((err = mddev_lock(mddev)))
4426                 goto out;
4427
4428         err = 0;
4429         mddev_get(mddev);
4430         mddev_unlock(mddev);
4431
4432         check_disk_change(inode->i_bdev);
4433  out:
4434         return err;
4435 }
4436
4437 static int md_release(struct inode *inode, struct file * file)
4438 {
4439         mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
4440
4441         BUG_ON(!mddev);
4442         mddev_put(mddev);
4443
4444         return 0;
4445 }
4446
4447 static int md_media_changed(struct gendisk *disk)
4448 {
4449         mddev_t *mddev = disk->private_data;
4450
4451         return mddev->changed;
4452 }
4453
4454 static int md_revalidate(struct gendisk *disk)
4455 {
4456         mddev_t *mddev = disk->private_data;
4457
4458         mddev->changed = 0;
4459         return 0;
4460 }
4461 static struct block_device_operations md_fops =
4462 {
4463         .owner          = THIS_MODULE,
4464         .open           = md_open,
4465         .release        = md_release,
4466         .ioctl          = md_ioctl,
4467         .getgeo         = md_getgeo,
4468         .media_changed  = md_media_changed,
4469         .revalidate_disk= md_revalidate,
4470 };
4471
4472 static int md_thread(void * arg)
4473 {
4474         mdk_thread_t *thread = arg;
4475
4476         /*
4477          * md_thread is a 'system-thread', it's priority should be very
4478          * high. We avoid resource deadlocks individually in each
4479          * raid personality. (RAID5 does preallocation) We also use RR and
4480          * the very same RT priority as kswapd, thus we will never get
4481          * into a priority inversion deadlock.
4482          *
4483          * we definitely have to have equal or higher priority than
4484          * bdflush, otherwise bdflush will deadlock if there are too
4485          * many dirty RAID5 blocks.
4486          */
4487
4488         allow_signal(SIGKILL);
4489         while (!kthread_should_stop()) {
4490
4491                 /* We need to wait INTERRUPTIBLE so that
4492                  * we don't add to the load-average.
4493                  * That means we need to be sure no signals are
4494                  * pending
4495                  */
4496                 if (signal_pending(current))
4497                         flush_signals(current);
4498
4499                 wait_event_interruptible_timeout
4500                         (thread->wqueue,
4501                          test_bit(THREAD_WAKEUP, &thread->flags)
4502                          || kthread_should_stop(),
4503                          thread->timeout);
4504                 try_to_freeze();
4505
4506                 clear_bit(THREAD_WAKEUP, &thread->flags);
4507
4508                 thread->run(thread->mddev);
4509         }
4510
4511         return 0;
4512 }
4513
4514 void md_wakeup_thread(mdk_thread_t *thread)
4515 {
4516         if (thread) {
4517                 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
4518                 set_bit(THREAD_WAKEUP, &thread->flags);
4519                 wake_up(&thread->wqueue);
4520         }
4521 }
4522
4523 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
4524                                  const char *name)
4525 {
4526         mdk_thread_t *thread;
4527
4528         thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL);
4529         if (!thread)
4530                 return NULL;
4531
4532         init_waitqueue_head(&thread->wqueue);
4533
4534         thread->run = run;
4535         thread->mddev = mddev;
4536         thread->timeout = MAX_SCHEDULE_TIMEOUT;
4537         thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev));
4538         if (IS_ERR(thread->tsk)) {
4539                 kfree(thread);
4540                 return NULL;
4541         }
4542         return thread;
4543 }
4544
4545 void md_unregister_thread(mdk_thread_t *thread)
4546 {
4547         dprintk("interrupting MD-thread pid %d\n", thread->tsk->pid);
4548
4549         kthread_stop(thread->tsk);
4550         kfree(thread);
4551 }
4552
4553 void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
4554 {
4555         if (!mddev) {
4556                 MD_BUG();
4557                 return;
4558         }
4559
4560         if (!rdev || test_bit(Faulty, &rdev->flags))
4561                 return;
4562 /*
4563         dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
4564                 mdname(mddev),
4565                 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
4566                 __builtin_return_address(0),__builtin_return_address(1),
4567                 __builtin_return_address(2),__builtin_return_address(3));
4568 */
4569         if (!mddev->pers)
4570                 return;
4571         if (!mddev->pers->error_handler)
4572                 return;
4573         mddev->pers->error_handler(mddev,rdev);
4574         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4575         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4576         md_wakeup_thread(mddev->thread);
4577         md_new_event_inintr(mddev);
4578 }
4579
4580 /* seq_file implementation /proc/mdstat */
4581
4582 static void status_unused(struct seq_file *seq)
4583 {
4584         int i = 0;
4585         mdk_rdev_t *rdev;
4586         struct list_head *tmp;
4587
4588         seq_printf(seq, "unused devices: ");
4589
4590         ITERATE_RDEV_PENDING(rdev,tmp) {
4591                 char b[BDEVNAME_SIZE];
4592                 i++;
4593                 seq_printf(seq, "%s ",
4594                               bdevname(rdev->bdev,b));
4595         }
4596         if (!i)
4597                 seq_printf(seq, "<none>");
4598
4599         seq_printf(seq, "\n");
4600 }
4601
4602
4603 static void status_resync(struct seq_file *seq, mddev_t * mddev)
4604 {
4605         sector_t max_blocks, resync, res;
4606         unsigned long dt, db, rt;
4607         int scale;
4608         unsigned int per_milli;
4609
4610         resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2;
4611
4612         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
4613                 max_blocks = mddev->resync_max_sectors >> 1;
4614         else
4615                 max_blocks = mddev->size;
4616
4617         /*
4618          * Should not happen.
4619          */
4620         if (!max_blocks) {
4621                 MD_BUG();
4622                 return;
4623         }
4624         /* Pick 'scale' such that (resync>>scale)*1000 will fit
4625          * in a sector_t, and (max_blocks>>scale) will fit in a
4626          * u32, as those are the requirements for sector_div.
4627          * Thus 'scale' must be at least 10
4628          */
4629         scale = 10;
4630         if (sizeof(sector_t) > sizeof(unsigned long)) {
4631                 while ( max_blocks/2 > (1ULL<<(scale+32)))
4632                         scale++;
4633         }
4634         res = (resync>>scale)*1000;
4635         sector_div(res, (u32)((max_blocks>>scale)+1));
4636
4637         per_milli = res;
4638         {
4639                 int i, x = per_milli/50, y = 20-x;
4640                 seq_printf(seq, "[");
4641                 for (i = 0; i < x; i++)
4642                         seq_printf(seq, "=");
4643                 seq_printf(seq, ">");
4644                 for (i = 0; i < y; i++)
4645                         seq_printf(seq, ".");
4646                 seq_printf(seq, "] ");
4647         }
4648         seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
4649                    (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
4650                     "reshape" :
4651                     (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
4652                      "check" :
4653                      (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
4654                       "resync" : "recovery"))),
4655                    per_milli/10, per_milli % 10,
4656                    (unsigned long long) resync,
4657                    (unsigned long long) max_blocks);
4658
4659         /*
4660          * We do not want to overflow, so the order of operands and
4661          * the * 100 / 100 trick are important. We do a +1 to be
4662          * safe against division by zero. We only estimate anyway.
4663          *
4664          * dt: time from mark until now
4665          * db: blocks written from mark until now
4666          * rt: remaining time
4667          */
4668         dt = ((jiffies - mddev->resync_mark) / HZ);
4669         if (!dt) dt++;
4670         db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
4671                 - mddev->resync_mark_cnt;
4672         rt = (dt * ((unsigned long)(max_blocks-resync) / (db/2/100+1)))/100;
4673
4674         seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6);
4675
4676         seq_printf(seq, " speed=%ldK/sec", db/2/dt);
4677 }
4678
4679 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
4680 {
4681         struct list_head *tmp;
4682         loff_t l = *pos;
4683         mddev_t *mddev;
4684
4685         if (l >= 0x10000)
4686                 return NULL;
4687         if (!l--)
4688                 /* header */
4689                 return (void*)1;
4690
4691         spin_lock(&all_mddevs_lock);
4692         list_for_each(tmp,&all_mddevs)
4693                 if (!l--) {
4694                         mddev = list_entry(tmp, mddev_t, all_mddevs);
4695                         mddev_get(mddev);
4696                         spin_unlock(&all_mddevs_lock);
4697                         return mddev;
4698                 }
4699         spin_unlock(&all_mddevs_lock);
4700         if (!l--)
4701                 return (void*)2;/* tail */
4702         return NULL;
4703 }
4704
4705 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4706 {
4707         struct list_head *tmp;
4708         mddev_t *next_mddev, *mddev = v;
4709         
4710         ++*pos;
4711         if (v == (void*)2)
4712                 return NULL;
4713
4714         spin_lock(&all_mddevs_lock);
4715         if (v == (void*)1)
4716                 tmp = all_mddevs.next;
4717         else
4718                 tmp = mddev->all_mddevs.next;
4719         if (tmp != &all_mddevs)
4720                 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
4721         else {
4722                 next_mddev = (void*)2;
4723                 *pos = 0x10000;
4724         }               
4725         spin_unlock(&all_mddevs_lock);
4726
4727         if (v != (void*)1)
4728                 mddev_put(mddev);
4729         return next_mddev;
4730
4731 }
4732
4733 static void md_seq_stop(struct seq_file *seq, void *v)
4734 {
4735         mddev_t *mddev = v;
4736
4737         if (mddev && v != (void*)1 && v != (void*)2)
4738                 mddev_put(mddev);
4739 }
4740
4741 struct mdstat_info {
4742         int event;
4743 };
4744
4745 static int md_seq_show(struct seq_file *seq, void *v)
4746 {
4747         mddev_t *mddev = v;
4748         sector_t size;
4749         struct list_head *tmp2;
4750         mdk_rdev_t *rdev;
4751         struct mdstat_info *mi = seq->private;
4752         struct bitmap *bitmap;
4753
4754         if (v == (void*)1) {
4755                 struct mdk_personality *pers;
4756                 seq_printf(seq, "Personalities : ");
4757                 spin_lock(&pers_lock);
4758                 list_for_each_entry(pers, &pers_list, list)
4759                         seq_printf(seq, "[%s] ", pers->name);
4760
4761                 spin_unlock(&pers_lock);
4762                 seq_printf(seq, "\n");
4763                 mi->event = atomic_read(&md_event_count);
4764                 return 0;
4765         }
4766         if (v == (void*)2) {
4767                 status_unused(seq);
4768                 return 0;
4769         }
4770
4771         if (mddev_lock(mddev) < 0)
4772                 return -EINTR;
4773
4774         if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
4775                 seq_printf(seq, "%s : %sactive", mdname(mddev),
4776                                                 mddev->pers ? "" : "in");
4777                 if (mddev->pers) {
4778                         if (mddev->ro==1)
4779                                 seq_printf(seq, " (read-only)");
4780                         if (mddev->ro==2)
4781                                 seq_printf(seq, "(auto-read-only)");
4782                         seq_printf(seq, " %s", mddev->pers->name);
4783                 }
4784
4785                 size = 0;
4786                 ITERATE_RDEV(mddev,rdev,tmp2) {
4787                         char b[BDEVNAME_SIZE];
4788                         seq_printf(seq, " %s[%d]",
4789                                 bdevname(rdev->bdev,b), rdev->desc_nr);
4790                         if (test_bit(WriteMostly, &rdev->flags))
4791                                 seq_printf(seq, "(W)");
4792                         if (test_bit(Faulty, &rdev->flags)) {
4793                                 seq_printf(seq, "(F)");
4794                                 continue;
4795                         } else if (rdev->raid_disk < 0)
4796                                 seq_printf(seq, "(S)"); /* spare */
4797                         size += rdev->size;
4798                 }
4799
4800                 if (!list_empty(&mddev->disks)) {
4801                         if (mddev->pers)
4802                                 seq_printf(seq, "\n      %llu blocks",
4803                                         (unsigned long long)mddev->array_size);
4804                         else
4805                                 seq_printf(seq, "\n      %llu blocks",
4806                                         (unsigned long long)size);
4807                 }
4808                 if (mddev->persistent) {
4809                         if (mddev->major_version != 0 ||
4810                             mddev->minor_version != 90) {
4811                                 seq_printf(seq," super %d.%d",
4812                                            mddev->major_version,
4813                                            mddev->minor_version);
4814                         }
4815                 } else
4816                         seq_printf(seq, " super non-persistent");
4817
4818                 if (mddev->pers) {
4819                         mddev->pers->status (seq, mddev);
4820                         seq_printf(seq, "\n      ");
4821                         if (mddev->pers->sync_request) {
4822                                 if (mddev->curr_resync > 2) {
4823                                         status_resync (seq, mddev);
4824                                         seq_printf(seq, "\n      ");
4825                                 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
4826                                         seq_printf(seq, "\tresync=DELAYED\n      ");
4827                                 else if (mddev->recovery_cp < MaxSector)
4828                                         seq_printf(seq, "\tresync=PENDING\n      ");
4829                         }
4830                 } else
4831                         seq_printf(seq, "\n       ");
4832
4833                 if ((bitmap = mddev->bitmap)) {
4834                         unsigned long chunk_kb;
4835                         unsigned long flags;
4836                         spin_lock_irqsave(&bitmap->lock, flags);
4837                         chunk_kb = bitmap->chunksize >> 10;
4838                         seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
4839                                 "%lu%s chunk",
4840                                 bitmap->pages - bitmap->missing_pages,
4841                                 bitmap->pages,
4842                                 (bitmap->pages - bitmap->missing_pages)
4843                                         << (PAGE_SHIFT - 10),
4844                                 chunk_kb ? chunk_kb : bitmap->chunksize,
4845                                 chunk_kb ? "KB" : "B");
4846                         if (bitmap->file) {
4847                                 seq_printf(seq, ", file: ");
4848                                 seq_path(seq, bitmap->file->f_vfsmnt,
4849                                          bitmap->file->f_dentry," \t\n");
4850                         }
4851
4852                         seq_printf(seq, "\n");
4853                         spin_unlock_irqrestore(&bitmap->lock, flags);
4854                 }
4855
4856                 seq_printf(seq, "\n");
4857         }
4858         mddev_unlock(mddev);
4859         
4860         return 0;
4861 }
4862
4863 static struct seq_operations md_seq_ops = {
4864         .start  = md_seq_start,
4865         .next   = md_seq_next,
4866         .stop   = md_seq_stop,
4867         .show   = md_seq_show,
4868 };
4869
4870 static int md_seq_open(struct inode *inode, struct file *file)
4871 {
4872         int error;
4873         struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
4874         if (mi == NULL)
4875                 return -ENOMEM;
4876
4877         error = seq_open(file, &md_seq_ops);
4878         if (error)
4879                 kfree(mi);
4880         else {
4881                 struct seq_file *p = file->private_data;
4882                 p->private = mi;
4883                 mi->event = atomic_read(&md_event_count);
4884         }
4885         return error;
4886 }
4887
4888 static int md_seq_release(struct inode *inode, struct file *file)
4889 {
4890         struct seq_file *m = file->private_data;
4891         struct mdstat_info *mi = m->private;
4892         m->private = NULL;
4893         kfree(mi);
4894         return seq_release(inode, file);
4895 }
4896
4897 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
4898 {
4899         struct seq_file *m = filp->private_data;
4900         struct mdstat_info *mi = m->private;
4901         int mask;
4902
4903         poll_wait(filp, &md_event_waiters, wait);
4904
4905         /* always allow read */
4906         mask = POLLIN | POLLRDNORM;
4907
4908         if (mi->event != atomic_read(&md_event_count))
4909                 mask |= POLLERR | POLLPRI;
4910         return mask;
4911 }
4912
4913 static struct file_operations md_seq_fops = {
4914         .open           = md_seq_open,
4915         .read           = seq_read,
4916         .llseek         = seq_lseek,
4917         .release        = md_seq_release,
4918         .poll           = mdstat_poll,
4919 };
4920
4921 int register_md_personality(struct mdk_personality *p)
4922 {
4923         spin_lock(&pers_lock);
4924         list_add_tail(&p->list, &pers_list);
4925         printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
4926         spin_unlock(&pers_lock);
4927         return 0;
4928 }
4929
4930 int unregister_md_personality(struct mdk_personality *p)
4931 {
4932         printk(KERN_INFO "md: %s personality unregistered\n", p->name);
4933         spin_lock(&pers_lock);
4934         list_del_init(&p->list);
4935         spin_unlock(&pers_lock);
4936         return 0;
4937 }
4938
4939 static int is_mddev_idle(mddev_t *mddev)
4940 {
4941         mdk_rdev_t * rdev;
4942         struct list_head *tmp;
4943         int idle;
4944         unsigned long curr_events;
4945
4946         idle = 1;
4947         ITERATE_RDEV(mddev,rdev,tmp) {
4948                 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
4949                 curr_events = disk_stat_read(disk, sectors[0]) + 
4950                                 disk_stat_read(disk, sectors[1]) - 
4951                                 atomic_read(&disk->sync_io);
4952                 /* The difference between curr_events and last_events
4953                  * will be affected by any new non-sync IO (making
4954                  * curr_events bigger) and any difference in the amount of
4955                  * in-flight syncio (making current_events bigger or smaller)
4956                  * The amount in-flight is currently limited to
4957                  * 32*64K in raid1/10 and 256*PAGE_SIZE in raid5/6
4958                  * which is at most 4096 sectors.
4959                  * These numbers are fairly fragile and should be made
4960                  * more robust, probably by enforcing the
4961                  * 'window size' that md_do_sync sort-of uses.
4962                  *
4963                  * Note: the following is an unsigned comparison.
4964                  */
4965                 if ((curr_events - rdev->last_events + 4096) > 8192) {
4966                         rdev->last_events = curr_events;
4967                         idle = 0;
4968                 }
4969         }
4970         return idle;
4971 }
4972
4973 void md_done_sync(mddev_t *mddev, int blocks, int ok)
4974 {
4975         /* another "blocks" (512byte) blocks have been synced */
4976         atomic_sub(blocks, &mddev->recovery_active);
4977         wake_up(&mddev->recovery_wait);
4978         if (!ok) {
4979                 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
4980                 md_wakeup_thread(mddev->thread);
4981                 // stop recovery, signal do_sync ....
4982         }
4983 }
4984
4985
4986 /* md_write_start(mddev, bi)
4987  * If we need to update some array metadata (e.g. 'active' flag
4988  * in superblock) before writing, schedule a superblock update
4989  * and wait for it to complete.
4990  */
4991 void md_write_start(mddev_t *mddev, struct bio *bi)
4992 {
4993         if (bio_data_dir(bi) != WRITE)
4994                 return;
4995
4996         BUG_ON(mddev->ro == 1);
4997         if (mddev->ro == 2) {
4998                 /* need to switch to read/write */
4999                 mddev->ro = 0;
5000                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5001                 md_wakeup_thread(mddev->thread);
5002         }
5003         atomic_inc(&mddev->writes_pending);
5004         if (mddev->in_sync) {
5005                 spin_lock_irq(&mddev->write_lock);
5006                 if (mddev->in_sync) {
5007                         mddev->in_sync = 0;
5008                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
5009                         md_wakeup_thread(mddev->thread);
5010                 }
5011                 spin_unlock_irq(&mddev->write_lock);
5012         }
5013         wait_event(mddev->sb_wait, mddev->flags==0);
5014 }
5015
5016 void md_write_end(mddev_t *mddev)
5017 {
5018         if (atomic_dec_and_test(&mddev->writes_pending)) {
5019                 if (mddev->safemode == 2)
5020                         md_wakeup_thread(mddev->thread);
5021                 else if (mddev->safemode_delay)
5022                         mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
5023         }
5024 }
5025
5026 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
5027
5028 #define SYNC_MARKS      10
5029 #define SYNC_MARK_STEP  (3*HZ)
5030 void md_do_sync(mddev_t *mddev)
5031 {
5032         mddev_t *mddev2;
5033         unsigned int currspeed = 0,
5034                  window;
5035         sector_t max_sectors,j, io_sectors;
5036         unsigned long mark[SYNC_MARKS];
5037         sector_t mark_cnt[SYNC_MARKS];
5038         int last_mark,m;
5039         struct list_head *tmp;
5040         sector_t last_check;
5041         int skipped = 0;
5042         struct list_head *rtmp;
5043         mdk_rdev_t *rdev;
5044         char *desc;
5045
5046         /* just incase thread restarts... */
5047         if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
5048                 return;
5049         if (mddev->ro) /* never try to sync a read-only array */
5050                 return;
5051
5052         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5053                 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
5054                         desc = "data-check";
5055                 else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
5056                         desc = "requested-resync";
5057                 else
5058                         desc = "resync";
5059         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5060                 desc = "reshape";
5061         else
5062                 desc = "recovery";
5063
5064         /* we overload curr_resync somewhat here.
5065          * 0 == not engaged in resync at all
5066          * 2 == checking that there is no conflict with another sync
5067          * 1 == like 2, but have yielded to allow conflicting resync to
5068          *              commense
5069          * other == active in resync - this many blocks
5070          *
5071          * Before starting a resync we must have set curr_resync to
5072          * 2, and then checked that every "conflicting" array has curr_resync
5073          * less than ours.  When we find one that is the same or higher
5074          * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
5075          * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
5076          * This will mean we have to start checking from the beginning again.
5077          *
5078          */
5079
5080         do {
5081                 mddev->curr_resync = 2;
5082
5083         try_again:
5084                 if (kthread_should_stop()) {
5085                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5086                         goto skip;
5087                 }
5088                 ITERATE_MDDEV(mddev2,tmp) {
5089                         if (mddev2 == mddev)
5090                                 continue;
5091                         if (mddev2->curr_resync && 
5092                             match_mddev_units(mddev,mddev2)) {
5093                                 DEFINE_WAIT(wq);
5094                                 if (mddev < mddev2 && mddev->curr_resync == 2) {
5095                                         /* arbitrarily yield */
5096                                         mddev->curr_resync = 1;
5097                                         wake_up(&resync_wait);
5098                                 }
5099                                 if (mddev > mddev2 && mddev->curr_resync == 1)
5100                                         /* no need to wait here, we can wait the next
5101                                          * time 'round when curr_resync == 2
5102                                          */
5103                                         continue;
5104                                 prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE);
5105                                 if (!kthread_should_stop() &&
5106                                     mddev2->curr_resync >= mddev->curr_resync) {
5107                                         printk(KERN_INFO "md: delaying %s of %s"
5108                                                " until %s has finished (they"
5109                                                " share one or more physical units)\n",
5110                                                desc, mdname(mddev), mdname(mddev2));
5111                                         mddev_put(mddev2);
5112                                         schedule();
5113                                         finish_wait(&resync_wait, &wq);
5114                                         goto try_again;
5115                                 }
5116                                 finish_wait(&resync_wait, &wq);
5117                         }
5118                 }
5119         } while (mddev->curr_resync < 2);
5120
5121         j = 0;
5122         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5123                 /* resync follows the size requested by the personality,
5124                  * which defaults to physical size, but can be virtual size
5125                  */
5126                 max_sectors = mddev->resync_max_sectors;
5127                 mddev->resync_mismatches = 0;
5128                 /* we don't use the checkpoint if there's a bitmap */
5129                 if (!mddev->bitmap &&
5130                     !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
5131                         j = mddev->recovery_cp;
5132         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5133                 max_sectors = mddev->size << 1;
5134         else {
5135                 /* recovery follows the physical size of devices */
5136                 max_sectors = mddev->size << 1;
5137                 j = MaxSector;
5138                 ITERATE_RDEV(mddev,rdev,rtmp)
5139                         if (rdev->raid_disk >= 0 &&
5140                             !test_bit(Faulty, &rdev->flags) &&
5141                             !test_bit(In_sync, &rdev->flags) &&
5142                             rdev->recovery_offset < j)
5143                                 j = rdev->recovery_offset;
5144         }
5145
5146         printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
5147         printk(KERN_INFO "md: minimum _guaranteed_  speed:"
5148                 " %d KB/sec/disk.\n", speed_min(mddev));
5149         printk(KERN_INFO "md: using maximum available idle IO bandwidth "
5150                "(but not more than %d KB/sec) for %s.\n",
5151                speed_max(mddev), desc);
5152
5153         is_mddev_idle(mddev); /* this also initializes IO event counters */
5154
5155         io_sectors = 0;
5156         for (m = 0; m < SYNC_MARKS; m++) {
5157                 mark[m] = jiffies;
5158                 mark_cnt[m] = io_sectors;
5159         }
5160         last_mark = 0;
5161         mddev->resync_mark = mark[last_mark];
5162         mddev->resync_mark_cnt = mark_cnt[last_mark];
5163
5164         /*
5165          * Tune reconstruction:
5166          */
5167         window = 32*(PAGE_SIZE/512);
5168         printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
5169                 window/2,(unsigned long long) max_sectors/2);
5170
5171         atomic_set(&mddev->recovery_active, 0);
5172         init_waitqueue_head(&mddev->recovery_wait);
5173         last_check = 0;
5174
5175         if (j>2) {
5176                 printk(KERN_INFO 
5177                        "md: resuming %s of %s from checkpoint.\n",
5178                        desc, mdname(mddev));
5179                 mddev->curr_resync = j;
5180         }
5181
5182         while (j < max_sectors) {
5183                 sector_t sectors;
5184
5185                 skipped = 0;
5186                 sectors = mddev->pers->sync_request(mddev, j, &skipped,
5187                                             currspeed < speed_min(mddev));
5188                 if (sectors == 0) {
5189                         set_bit(MD_RECOVERY_ERR, &mddev->recovery);
5190                         goto out;
5191                 }
5192
5193                 if (!skipped) { /* actual IO requested */
5194                         io_sectors += sectors;
5195                         atomic_add(sectors, &mddev->recovery_active);
5196                 }
5197
5198                 j += sectors;
5199                 if (j>1) mddev->curr_resync = j;
5200                 mddev->curr_mark_cnt = io_sectors;
5201                 if (last_check == 0)
5202                         /* this is the earliers that rebuilt will be
5203                          * visible in /proc/mdstat
5204                          */
5205                         md_new_event(mddev);
5206
5207                 if (last_check + window > io_sectors || j == max_sectors)
5208                         continue;
5209
5210                 last_check = io_sectors;
5211
5212                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) ||
5213                     test_bit(MD_RECOVERY_ERR, &mddev->recovery))
5214                         break;
5215
5216         repeat:
5217                 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
5218                         /* step marks */
5219                         int next = (last_mark+1) % SYNC_MARKS;
5220
5221                         mddev->resync_mark = mark[next];
5222                         mddev->resync_mark_cnt = mark_cnt[next];
5223                         mark[next] = jiffies;
5224                         mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
5225                         last_mark = next;
5226                 }
5227
5228
5229                 if (kthread_should_stop()) {
5230                         /*
5231                          * got a signal, exit.
5232                          */
5233                         printk(KERN_INFO 
5234                                 "md: md_do_sync() got signal ... exiting\n");
5235                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5236                         goto out;
5237                 }
5238
5239                 /*
5240                  * this loop exits only if either when we are slower than
5241                  * the 'hard' speed limit, or the system was IO-idle for
5242                  * a jiffy.
5243                  * the system might be non-idle CPU-wise, but we only care
5244                  * about not overloading the IO subsystem. (things like an
5245                  * e2fsck being done on the RAID array should execute fast)
5246                  */
5247                 mddev->queue->unplug_fn(mddev->queue);
5248                 cond_resched();
5249
5250                 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
5251                         /((jiffies-mddev->resync_mark)/HZ +1) +1;
5252
5253                 if (currspeed > speed_min(mddev)) {
5254                         if ((currspeed > speed_max(mddev)) ||
5255                                         !is_mddev_idle(mddev)) {
5256                                 msleep(500);
5257                                 goto repeat;
5258                         }
5259                 }
5260         }
5261         printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc);
5262         /*
5263          * this also signals 'finished resyncing' to md_stop
5264          */
5265  out:
5266         mddev->queue->unplug_fn(mddev->queue);
5267
5268         wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
5269
5270         /* tell personality that we are finished */
5271         mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
5272
5273         if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
5274             test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
5275             !test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
5276             mddev->curr_resync > 2) {
5277                 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5278                         if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5279                                 if (mddev->curr_resync >= mddev->recovery_cp) {
5280                                         printk(KERN_INFO
5281                                                "md: checkpointing %s of %s.\n",
5282                                                desc, mdname(mddev));
5283                                         mddev->recovery_cp = mddev->curr_resync;
5284                                 }
5285                         } else
5286                                 mddev->recovery_cp = MaxSector;
5287                 } else {
5288                         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5289                                 mddev->curr_resync = MaxSector;
5290                         ITERATE_RDEV(mddev,rdev,rtmp)
5291                                 if (rdev->raid_disk >= 0 &&
5292                                     !test_bit(Faulty, &rdev->flags) &&
5293                                     !test_bit(In_sync, &rdev->flags) &&
5294                                     rdev->recovery_offset < mddev->curr_resync)
5295                                         rdev->recovery_offset = mddev->curr_resync;
5296                 }
5297         }
5298
5299  skip:
5300         mddev->curr_resync = 0;
5301         wake_up(&resync_wait);
5302         set_bit(MD_RECOVERY_DONE, &mddev->recovery);
5303         md_wakeup_thread(mddev->thread);
5304 }
5305 EXPORT_SYMBOL_GPL(md_do_sync);
5306
5307
5308 /*
5309  * This routine is regularly called by all per-raid-array threads to
5310  * deal with generic issues like resync and super-block update.
5311  * Raid personalities that don't have a thread (linear/raid0) do not
5312  * need this as they never do any recovery or update the superblock.
5313  *
5314  * It does not do any resync itself, but rather "forks" off other threads
5315  * to do that as needed.
5316  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
5317  * "->recovery" and create a thread at ->sync_thread.
5318  * When the thread finishes it sets MD_RECOVERY_DONE (and might set MD_RECOVERY_ERR)
5319  * and wakeups up this thread which will reap the thread and finish up.
5320  * This thread also removes any faulty devices (with nr_pending == 0).
5321  *
5322  * The overall approach is:
5323  *  1/ if the superblock needs updating, update it.
5324  *  2/ If a recovery thread is running, don't do anything else.
5325  *  3/ If recovery has finished, clean up, possibly marking spares active.
5326  *  4/ If there are any faulty devices, remove them.
5327  *  5/ If array is degraded, try to add spares devices
5328  *  6/ If array has spares or is not in-sync, start a resync thread.
5329  */
5330 void md_check_recovery(mddev_t *mddev)
5331 {
5332         mdk_rdev_t *rdev;
5333         struct list_head *rtmp;
5334
5335
5336         if (mddev->bitmap)
5337                 bitmap_daemon_work(mddev->bitmap);
5338
5339         if (mddev->ro)
5340                 return;
5341
5342         if (signal_pending(current)) {
5343                 if (mddev->pers->sync_request) {
5344                         printk(KERN_INFO "md: %s in immediate safe mode\n",
5345                                mdname(mddev));
5346                         mddev->safemode = 2;
5347                 }
5348                 flush_signals(current);
5349         }
5350
5351         if ( ! (
5352                 mddev->flags ||
5353                 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
5354                 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
5355                 (mddev->safemode == 1) ||
5356                 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
5357                  && !mddev->in_sync && mddev->recovery_cp == MaxSector)
5358                 ))
5359                 return;
5360
5361         if (mddev_trylock(mddev)) {
5362                 int spares =0;
5363
5364                 spin_lock_irq(&mddev->write_lock);
5365                 if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
5366                     !mddev->in_sync && mddev->recovery_cp == MaxSector) {
5367                         mddev->in_sync = 1;
5368                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
5369                 }
5370                 if (mddev->safemode == 1)
5371                         mddev->safemode = 0;
5372                 spin_unlock_irq(&mddev->write_lock);
5373
5374                 if (mddev->flags)
5375                         md_update_sb(mddev, 0);
5376
5377
5378                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
5379                     !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
5380                         /* resync/recovery still happening */
5381                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5382                         goto unlock;
5383                 }
5384                 if (mddev->sync_thread) {
5385                         /* resync has finished, collect result */
5386                         md_unregister_thread(mddev->sync_thread);
5387                         mddev->sync_thread = NULL;
5388                         if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
5389                             !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5390                                 /* success...*/
5391                                 /* activate any spares */
5392                                 mddev->pers->spare_active(mddev);
5393                         }
5394                         md_update_sb(mddev, 1);
5395
5396                         /* if array is no-longer degraded, then any saved_raid_disk
5397                          * information must be scrapped
5398                          */
5399                         if (!mddev->degraded)
5400                                 ITERATE_RDEV(mddev,rdev,rtmp)
5401                                         rdev->saved_raid_disk = -1;
5402
5403                         mddev->recovery = 0;
5404                         /* flag recovery needed just to double check */
5405                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5406                         md_new_event(mddev);
5407                         goto unlock;
5408                 }
5409                 /* Clear some bits that don't mean anything, but
5410                  * might be left set
5411                  */
5412                 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5413                 clear_bit(MD_RECOVERY_ERR, &mddev->recovery);
5414                 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
5415                 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
5416
5417                 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
5418                         goto unlock;
5419                 /* no recovery is running.
5420                  * remove any failed drives, then
5421                  * add spares if possible.
5422                  * Spare are also removed and re-added, to allow
5423                  * the personality to fail the re-add.
5424                  */
5425                 ITERATE_RDEV(mddev,rdev,rtmp)
5426                         if (rdev->raid_disk >= 0 &&
5427                             (test_bit(Faulty, &rdev->flags) || ! test_bit(In_sync, &rdev->flags)) &&
5428                             atomic_read(&rdev->nr_pending)==0) {
5429                                 if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0) {
5430                                         char nm[20];
5431                                         sprintf(nm,"rd%d", rdev->raid_disk);
5432                                         sysfs_remove_link(&mddev->kobj, nm);
5433                                         rdev->raid_disk = -1;
5434                                 }
5435                         }
5436
5437                 if (mddev->degraded) {
5438                         ITERATE_RDEV(mddev,rdev,rtmp)
5439                                 if (rdev->raid_disk < 0
5440                                     && !test_bit(Faulty, &rdev->flags)) {
5441                                         rdev->recovery_offset = 0;
5442                                         if (mddev->pers->hot_add_disk(mddev,rdev)) {
5443                                                 char nm[20];
5444                                                 sprintf(nm, "rd%d", rdev->raid_disk);
5445                                                 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
5446                                                 spares++;
5447                                                 md_new_event(mddev);
5448                                         } else
5449                                                 break;
5450                                 }
5451                 }
5452
5453                 if (spares) {
5454                         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5455                         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
5456                 } else if (mddev->recovery_cp < MaxSector) {
5457                         set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5458                 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
5459                         /* nothing to be done ... */
5460                         goto unlock;
5461
5462                 if (mddev->pers->sync_request) {
5463                         set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
5464                         if (spares && mddev->bitmap && ! mddev->bitmap->file) {
5465                                 /* We are adding a device or devices to an array
5466                                  * which has the bitmap stored on all devices.
5467                                  * So make sure all bitmap pages get written
5468                                  */
5469                                 bitmap_write_all(mddev->bitmap);
5470                         }
5471                         mddev->sync_thread = md_register_thread(md_do_sync,
5472                                                                 mddev,
5473                                                                 "%s_resync");
5474                         if (!mddev->sync_thread) {
5475                                 printk(KERN_ERR "%s: could not start resync"
5476                                         " thread...\n", 
5477                                         mdname(mddev));
5478                                 /* leave the spares where they are, it shouldn't hurt */
5479                                 mddev->recovery = 0;
5480                         } else
5481                                 md_wakeup_thread(mddev->sync_thread);
5482                         md_new_event(mddev);
5483                 }
5484         unlock:
5485                 mddev_unlock(mddev);
5486         }
5487 }
5488
5489 static int md_notify_reboot(struct notifier_block *this,
5490                             unsigned long code, void *x)
5491 {
5492         struct list_head *tmp;
5493         mddev_t *mddev;
5494
5495         if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
5496
5497                 printk(KERN_INFO "md: stopping all md devices.\n");
5498
5499                 ITERATE_MDDEV(mddev,tmp)
5500                         if (mddev_trylock(mddev)) {
5501                                 do_md_stop (mddev, 1);
5502                                 mddev_unlock(mddev);
5503                         }
5504                 /*
5505                  * certain more exotic SCSI devices are known to be
5506                  * volatile wrt too early system reboots. While the
5507                  * right place to handle this issue is the given
5508                  * driver, we do want to have a safe RAID driver ...
5509                  */
5510                 mdelay(1000*1);
5511         }
5512         return NOTIFY_DONE;
5513 }
5514
5515 static struct notifier_block md_notifier = {
5516         .notifier_call  = md_notify_reboot,
5517         .next           = NULL,
5518         .priority       = INT_MAX, /* before any real devices */
5519 };
5520
5521 static void md_geninit(void)
5522 {
5523         struct proc_dir_entry *p;
5524
5525         dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
5526
5527         p = create_proc_entry("mdstat", S_IRUGO, NULL);
5528         if (p)
5529                 p->proc_fops = &md_seq_fops;
5530 }
5531
5532 static int __init md_init(void)
5533 {
5534         if (register_blkdev(MAJOR_NR, "md"))
5535                 return -1;
5536         if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
5537                 unregister_blkdev(MAJOR_NR, "md");
5538                 return -1;
5539         }
5540         blk_register_region(MKDEV(MAJOR_NR, 0), 1UL<<MINORBITS, THIS_MODULE,
5541                             md_probe, NULL, NULL);
5542         blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
5543                             md_probe, NULL, NULL);
5544
5545         register_reboot_notifier(&md_notifier);
5546         raid_table_header = register_sysctl_table(raid_root_table, 1);
5547
5548         md_geninit();
5549         return (0);
5550 }
5551
5552
5553 #ifndef MODULE
5554
5555 /*
5556  * Searches all registered partitions for autorun RAID arrays
5557  * at boot time.
5558  */
5559 static dev_t detected_devices[128];
5560 static int dev_cnt;
5561
5562 void md_autodetect_dev(dev_t dev)
5563 {
5564         if (dev_cnt >= 0 && dev_cnt < 127)
5565                 detected_devices[dev_cnt++] = dev;
5566 }
5567
5568
5569 static void autostart_arrays(int part)
5570 {
5571         mdk_rdev_t *rdev;
5572         int i;
5573
5574         printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
5575
5576         for (i = 0; i < dev_cnt; i++) {
5577                 dev_t dev = detected_devices[i];
5578
5579                 rdev = md_import_device(dev,0, 0);
5580                 if (IS_ERR(rdev))
5581                         continue;
5582
5583                 if (test_bit(Faulty, &rdev->flags)) {
5584                         MD_BUG();
5585                         continue;
5586                 }
5587                 list_add(&rdev->same_set, &pending_raid_disks);
5588         }
5589         dev_cnt = 0;
5590
5591         autorun_devices(part);
5592 }
5593
5594 #endif
5595
5596 static __exit void md_exit(void)
5597 {
5598         mddev_t *mddev;
5599         struct list_head *tmp;
5600
5601         blk_unregister_region(MKDEV(MAJOR_NR,0), 1U << MINORBITS);
5602         blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
5603
5604         unregister_blkdev(MAJOR_NR,"md");
5605         unregister_blkdev(mdp_major, "mdp");
5606         unregister_reboot_notifier(&md_notifier);
5607         unregister_sysctl_table(raid_table_header);
5608         remove_proc_entry("mdstat", NULL);
5609         ITERATE_MDDEV(mddev,tmp) {
5610                 struct gendisk *disk = mddev->gendisk;
5611                 if (!disk)
5612                         continue;
5613                 export_array(mddev);
5614                 del_gendisk(disk);
5615                 put_disk(disk);
5616                 mddev->gendisk = NULL;
5617                 mddev_put(mddev);
5618         }
5619 }
5620
5621 module_init(md_init)
5622 module_exit(md_exit)
5623
5624 static int get_ro(char *buffer, struct kernel_param *kp)
5625 {
5626         return sprintf(buffer, "%d", start_readonly);
5627 }
5628 static int set_ro(const char *val, struct kernel_param *kp)
5629 {
5630         char *e;
5631         int num = simple_strtoul(val, &e, 10);
5632         if (*val && (*e == '\0' || *e == '\n')) {
5633                 start_readonly = num;
5634                 return 0;
5635         }
5636         return -EINVAL;
5637 }
5638
5639 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
5640 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
5641
5642
5643 EXPORT_SYMBOL(register_md_personality);
5644 EXPORT_SYMBOL(unregister_md_personality);
5645 EXPORT_SYMBOL(md_error);
5646 EXPORT_SYMBOL(md_done_sync);
5647 EXPORT_SYMBOL(md_write_start);
5648 EXPORT_SYMBOL(md_write_end);
5649 EXPORT_SYMBOL(md_register_thread);
5650 EXPORT_SYMBOL(md_unregister_thread);
5651 EXPORT_SYMBOL(md_wakeup_thread);
5652 EXPORT_SYMBOL(md_check_recovery);
5653 MODULE_LICENSE("GPL");
5654 MODULE_ALIAS("md");
5655 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);