[PATCH] md: add write-intent-bitmap support to raid5
[pandora-kernel.git] / drivers / md / md.c
1 /*
2    md.c : Multiple Devices driver for Linux
3           Copyright (C) 1998, 1999, 2000 Ingo Molnar
4
5      completely rewritten, based on the MD driver code from Marc Zyngier
6
7    Changes:
8
9    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13    - kmod support by: Cyrus Durgin
14    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16
17    - lots of fixes and improvements to the RAID1/RAID5 and generic
18      RAID code (such as request based resynchronization):
19
20      Neil Brown <neilb@cse.unsw.edu.au>.
21
22    - persistent bitmap code
23      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
24
25    This program is free software; you can redistribute it and/or modify
26    it under the terms of the GNU General Public License as published by
27    the Free Software Foundation; either version 2, or (at your option)
28    any later version.
29
30    You should have received a copy of the GNU General Public License
31    (for example /usr/src/linux/COPYING); if not, write to the Free
32    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33 */
34
35 #include <linux/module.h>
36 #include <linux/config.h>
37 #include <linux/linkage.h>
38 #include <linux/raid/md.h>
39 #include <linux/raid/bitmap.h>
40 #include <linux/sysctl.h>
41 #include <linux/devfs_fs_kernel.h>
42 #include <linux/buffer_head.h> /* for invalidate_bdev */
43 #include <linux/suspend.h>
44
45 #include <linux/init.h>
46
47 #include <linux/file.h>
48
49 #ifdef CONFIG_KMOD
50 #include <linux/kmod.h>
51 #endif
52
53 #include <asm/unaligned.h>
54
55 #define MAJOR_NR MD_MAJOR
56 #define MD_DRIVER
57
58 /* 63 partitions with the alternate major number (mdp) */
59 #define MdpMinorShift 6
60
61 #define DEBUG 0
62 #define dprintk(x...) ((void)(DEBUG && printk(x)))
63
64
65 #ifndef MODULE
66 static void autostart_arrays (int part);
67 #endif
68
69 static mdk_personality_t *pers[MAX_PERSONALITY];
70 static DEFINE_SPINLOCK(pers_lock);
71
72 /*
73  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
74  * is 1000 KB/sec, so the extra system load does not show up that much.
75  * Increase it if you want to have more _guaranteed_ speed. Note that
76  * the RAID driver will use the maximum available bandwith if the IO
77  * subsystem is idle. There is also an 'absolute maximum' reconstruction
78  * speed limit - in case reconstruction slows down your system despite
79  * idle IO detection.
80  *
81  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
82  */
83
84 static int sysctl_speed_limit_min = 1000;
85 static int sysctl_speed_limit_max = 200000;
86
87 static struct ctl_table_header *raid_table_header;
88
89 static ctl_table raid_table[] = {
90         {
91                 .ctl_name       = DEV_RAID_SPEED_LIMIT_MIN,
92                 .procname       = "speed_limit_min",
93                 .data           = &sysctl_speed_limit_min,
94                 .maxlen         = sizeof(int),
95                 .mode           = 0644,
96                 .proc_handler   = &proc_dointvec,
97         },
98         {
99                 .ctl_name       = DEV_RAID_SPEED_LIMIT_MAX,
100                 .procname       = "speed_limit_max",
101                 .data           = &sysctl_speed_limit_max,
102                 .maxlen         = sizeof(int),
103                 .mode           = 0644,
104                 .proc_handler   = &proc_dointvec,
105         },
106         { .ctl_name = 0 }
107 };
108
109 static ctl_table raid_dir_table[] = {
110         {
111                 .ctl_name       = DEV_RAID,
112                 .procname       = "raid",
113                 .maxlen         = 0,
114                 .mode           = 0555,
115                 .child          = raid_table,
116         },
117         { .ctl_name = 0 }
118 };
119
120 static ctl_table raid_root_table[] = {
121         {
122                 .ctl_name       = CTL_DEV,
123                 .procname       = "dev",
124                 .maxlen         = 0,
125                 .mode           = 0555,
126                 .child          = raid_dir_table,
127         },
128         { .ctl_name = 0 }
129 };
130
131 static struct block_device_operations md_fops;
132
133 /*
134  * Enables to iterate over all existing md arrays
135  * all_mddevs_lock protects this list.
136  */
137 static LIST_HEAD(all_mddevs);
138 static DEFINE_SPINLOCK(all_mddevs_lock);
139
140
141 /*
142  * iterates through all used mddevs in the system.
143  * We take care to grab the all_mddevs_lock whenever navigating
144  * the list, and to always hold a refcount when unlocked.
145  * Any code which breaks out of this loop while own
146  * a reference to the current mddev and must mddev_put it.
147  */
148 #define ITERATE_MDDEV(mddev,tmp)                                        \
149                                                                         \
150         for (({ spin_lock(&all_mddevs_lock);                            \
151                 tmp = all_mddevs.next;                                  \
152                 mddev = NULL;});                                        \
153              ({ if (tmp != &all_mddevs)                                 \
154                         mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
155                 spin_unlock(&all_mddevs_lock);                          \
156                 if (mddev) mddev_put(mddev);                            \
157                 mddev = list_entry(tmp, mddev_t, all_mddevs);           \
158                 tmp != &all_mddevs;});                                  \
159              ({ spin_lock(&all_mddevs_lock);                            \
160                 tmp = tmp->next;})                                      \
161                 )
162
163
164 static int md_fail_request (request_queue_t *q, struct bio *bio)
165 {
166         bio_io_error(bio, bio->bi_size);
167         return 0;
168 }
169
170 static inline mddev_t *mddev_get(mddev_t *mddev)
171 {
172         atomic_inc(&mddev->active);
173         return mddev;
174 }
175
176 static void mddev_put(mddev_t *mddev)
177 {
178         if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
179                 return;
180         if (!mddev->raid_disks && list_empty(&mddev->disks)) {
181                 list_del(&mddev->all_mddevs);
182                 blk_put_queue(mddev->queue);
183                 kfree(mddev);
184         }
185         spin_unlock(&all_mddevs_lock);
186 }
187
188 static mddev_t * mddev_find(dev_t unit)
189 {
190         mddev_t *mddev, *new = NULL;
191
192  retry:
193         spin_lock(&all_mddevs_lock);
194         list_for_each_entry(mddev, &all_mddevs, all_mddevs)
195                 if (mddev->unit == unit) {
196                         mddev_get(mddev);
197                         spin_unlock(&all_mddevs_lock);
198                         kfree(new);
199                         return mddev;
200                 }
201
202         if (new) {
203                 list_add(&new->all_mddevs, &all_mddevs);
204                 spin_unlock(&all_mddevs_lock);
205                 return new;
206         }
207         spin_unlock(&all_mddevs_lock);
208
209         new = (mddev_t *) kmalloc(sizeof(*new), GFP_KERNEL);
210         if (!new)
211                 return NULL;
212
213         memset(new, 0, sizeof(*new));
214
215         new->unit = unit;
216         if (MAJOR(unit) == MD_MAJOR)
217                 new->md_minor = MINOR(unit);
218         else
219                 new->md_minor = MINOR(unit) >> MdpMinorShift;
220
221         init_MUTEX(&new->reconfig_sem);
222         INIT_LIST_HEAD(&new->disks);
223         INIT_LIST_HEAD(&new->all_mddevs);
224         init_timer(&new->safemode_timer);
225         atomic_set(&new->active, 1);
226         spin_lock_init(&new->write_lock);
227         init_waitqueue_head(&new->sb_wait);
228
229         new->queue = blk_alloc_queue(GFP_KERNEL);
230         if (!new->queue) {
231                 kfree(new);
232                 return NULL;
233         }
234
235         blk_queue_make_request(new->queue, md_fail_request);
236
237         goto retry;
238 }
239
240 static inline int mddev_lock(mddev_t * mddev)
241 {
242         return down_interruptible(&mddev->reconfig_sem);
243 }
244
245 static inline void mddev_lock_uninterruptible(mddev_t * mddev)
246 {
247         down(&mddev->reconfig_sem);
248 }
249
250 static inline int mddev_trylock(mddev_t * mddev)
251 {
252         return down_trylock(&mddev->reconfig_sem);
253 }
254
255 static inline void mddev_unlock(mddev_t * mddev)
256 {
257         up(&mddev->reconfig_sem);
258
259         md_wakeup_thread(mddev->thread);
260 }
261
262 mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
263 {
264         mdk_rdev_t * rdev;
265         struct list_head *tmp;
266
267         ITERATE_RDEV(mddev,rdev,tmp) {
268                 if (rdev->desc_nr == nr)
269                         return rdev;
270         }
271         return NULL;
272 }
273
274 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
275 {
276         struct list_head *tmp;
277         mdk_rdev_t *rdev;
278
279         ITERATE_RDEV(mddev,rdev,tmp) {
280                 if (rdev->bdev->bd_dev == dev)
281                         return rdev;
282         }
283         return NULL;
284 }
285
286 static inline sector_t calc_dev_sboffset(struct block_device *bdev)
287 {
288         sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
289         return MD_NEW_SIZE_BLOCKS(size);
290 }
291
292 static sector_t calc_dev_size(mdk_rdev_t *rdev, unsigned chunk_size)
293 {
294         sector_t size;
295
296         size = rdev->sb_offset;
297
298         if (chunk_size)
299                 size &= ~((sector_t)chunk_size/1024 - 1);
300         return size;
301 }
302
303 static int alloc_disk_sb(mdk_rdev_t * rdev)
304 {
305         if (rdev->sb_page)
306                 MD_BUG();
307
308         rdev->sb_page = alloc_page(GFP_KERNEL);
309         if (!rdev->sb_page) {
310                 printk(KERN_ALERT "md: out of memory.\n");
311                 return -EINVAL;
312         }
313
314         return 0;
315 }
316
317 static void free_disk_sb(mdk_rdev_t * rdev)
318 {
319         if (rdev->sb_page) {
320                 page_cache_release(rdev->sb_page);
321                 rdev->sb_loaded = 0;
322                 rdev->sb_page = NULL;
323                 rdev->sb_offset = 0;
324                 rdev->size = 0;
325         }
326 }
327
328
329 static int super_written(struct bio *bio, unsigned int bytes_done, int error)
330 {
331         mdk_rdev_t *rdev = bio->bi_private;
332         if (bio->bi_size)
333                 return 1;
334
335         if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags))
336                 md_error(rdev->mddev, rdev);
337
338         if (atomic_dec_and_test(&rdev->mddev->pending_writes))
339                 wake_up(&rdev->mddev->sb_wait);
340         bio_put(bio);
341         return 0;
342 }
343
344 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
345                    sector_t sector, int size, struct page *page)
346 {
347         /* write first size bytes of page to sector of rdev
348          * Increment mddev->pending_writes before returning
349          * and decrement it on completion, waking up sb_wait
350          * if zero is reached.
351          * If an error occurred, call md_error
352          */
353         struct bio *bio = bio_alloc(GFP_NOIO, 1);
354
355         bio->bi_bdev = rdev->bdev;
356         bio->bi_sector = sector;
357         bio_add_page(bio, page, size, 0);
358         bio->bi_private = rdev;
359         bio->bi_end_io = super_written;
360         atomic_inc(&mddev->pending_writes);
361         submit_bio((1<<BIO_RW)|(1<<BIO_RW_SYNC), bio);
362 }
363
364 static int bi_complete(struct bio *bio, unsigned int bytes_done, int error)
365 {
366         if (bio->bi_size)
367                 return 1;
368
369         complete((struct completion*)bio->bi_private);
370         return 0;
371 }
372
373 int sync_page_io(struct block_device *bdev, sector_t sector, int size,
374                    struct page *page, int rw)
375 {
376         struct bio *bio = bio_alloc(GFP_NOIO, 1);
377         struct completion event;
378         int ret;
379
380         rw |= (1 << BIO_RW_SYNC);
381
382         bio->bi_bdev = bdev;
383         bio->bi_sector = sector;
384         bio_add_page(bio, page, size, 0);
385         init_completion(&event);
386         bio->bi_private = &event;
387         bio->bi_end_io = bi_complete;
388         submit_bio(rw, bio);
389         wait_for_completion(&event);
390
391         ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
392         bio_put(bio);
393         return ret;
394 }
395
396 static int read_disk_sb(mdk_rdev_t * rdev, int size)
397 {
398         char b[BDEVNAME_SIZE];
399         if (!rdev->sb_page) {
400                 MD_BUG();
401                 return -EINVAL;
402         }
403         if (rdev->sb_loaded)
404                 return 0;
405
406
407         if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, size, rdev->sb_page, READ))
408                 goto fail;
409         rdev->sb_loaded = 1;
410         return 0;
411
412 fail:
413         printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
414                 bdevname(rdev->bdev,b));
415         return -EINVAL;
416 }
417
418 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
419 {
420         if (    (sb1->set_uuid0 == sb2->set_uuid0) &&
421                 (sb1->set_uuid1 == sb2->set_uuid1) &&
422                 (sb1->set_uuid2 == sb2->set_uuid2) &&
423                 (sb1->set_uuid3 == sb2->set_uuid3))
424
425                 return 1;
426
427         return 0;
428 }
429
430
431 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
432 {
433         int ret;
434         mdp_super_t *tmp1, *tmp2;
435
436         tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
437         tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
438
439         if (!tmp1 || !tmp2) {
440                 ret = 0;
441                 printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n");
442                 goto abort;
443         }
444
445         *tmp1 = *sb1;
446         *tmp2 = *sb2;
447
448         /*
449          * nr_disks is not constant
450          */
451         tmp1->nr_disks = 0;
452         tmp2->nr_disks = 0;
453
454         if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4))
455                 ret = 0;
456         else
457                 ret = 1;
458
459 abort:
460         kfree(tmp1);
461         kfree(tmp2);
462         return ret;
463 }
464
465 static unsigned int calc_sb_csum(mdp_super_t * sb)
466 {
467         unsigned int disk_csum, csum;
468
469         disk_csum = sb->sb_csum;
470         sb->sb_csum = 0;
471         csum = csum_partial((void *)sb, MD_SB_BYTES, 0);
472         sb->sb_csum = disk_csum;
473         return csum;
474 }
475
476
477 /*
478  * Handle superblock details.
479  * We want to be able to handle multiple superblock formats
480  * so we have a common interface to them all, and an array of
481  * different handlers.
482  * We rely on user-space to write the initial superblock, and support
483  * reading and updating of superblocks.
484  * Interface methods are:
485  *   int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
486  *      loads and validates a superblock on dev.
487  *      if refdev != NULL, compare superblocks on both devices
488  *    Return:
489  *      0 - dev has a superblock that is compatible with refdev
490  *      1 - dev has a superblock that is compatible and newer than refdev
491  *          so dev should be used as the refdev in future
492  *     -EINVAL superblock incompatible or invalid
493  *     -othererror e.g. -EIO
494  *
495  *   int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
496  *      Verify that dev is acceptable into mddev.
497  *       The first time, mddev->raid_disks will be 0, and data from
498  *       dev should be merged in.  Subsequent calls check that dev
499  *       is new enough.  Return 0 or -EINVAL
500  *
501  *   void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
502  *     Update the superblock for rdev with data in mddev
503  *     This does not write to disc.
504  *
505  */
506
507 struct super_type  {
508         char            *name;
509         struct module   *owner;
510         int             (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version);
511         int             (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
512         void            (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
513 };
514
515 /*
516  * load_super for 0.90.0 
517  */
518 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
519 {
520         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
521         mdp_super_t *sb;
522         int ret;
523         sector_t sb_offset;
524
525         /*
526          * Calculate the position of the superblock,
527          * it's at the end of the disk.
528          *
529          * It also happens to be a multiple of 4Kb.
530          */
531         sb_offset = calc_dev_sboffset(rdev->bdev);
532         rdev->sb_offset = sb_offset;
533
534         ret = read_disk_sb(rdev, MD_SB_BYTES);
535         if (ret) return ret;
536
537         ret = -EINVAL;
538
539         bdevname(rdev->bdev, b);
540         sb = (mdp_super_t*)page_address(rdev->sb_page);
541
542         if (sb->md_magic != MD_SB_MAGIC) {
543                 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
544                        b);
545                 goto abort;
546         }
547
548         if (sb->major_version != 0 ||
549             sb->minor_version != 90) {
550                 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
551                         sb->major_version, sb->minor_version,
552                         b);
553                 goto abort;
554         }
555
556         if (sb->raid_disks <= 0)
557                 goto abort;
558
559         if (csum_fold(calc_sb_csum(sb)) != csum_fold(sb->sb_csum)) {
560                 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
561                         b);
562                 goto abort;
563         }
564
565         rdev->preferred_minor = sb->md_minor;
566         rdev->data_offset = 0;
567         rdev->sb_size = MD_SB_BYTES;
568
569         if (sb->level == LEVEL_MULTIPATH)
570                 rdev->desc_nr = -1;
571         else
572                 rdev->desc_nr = sb->this_disk.number;
573
574         if (refdev == 0)
575                 ret = 1;
576         else {
577                 __u64 ev1, ev2;
578                 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
579                 if (!uuid_equal(refsb, sb)) {
580                         printk(KERN_WARNING "md: %s has different UUID to %s\n",
581                                 b, bdevname(refdev->bdev,b2));
582                         goto abort;
583                 }
584                 if (!sb_equal(refsb, sb)) {
585                         printk(KERN_WARNING "md: %s has same UUID"
586                                " but different superblock to %s\n",
587                                b, bdevname(refdev->bdev, b2));
588                         goto abort;
589                 }
590                 ev1 = md_event(sb);
591                 ev2 = md_event(refsb);
592                 if (ev1 > ev2)
593                         ret = 1;
594                 else 
595                         ret = 0;
596         }
597         rdev->size = calc_dev_size(rdev, sb->chunk_size);
598
599  abort:
600         return ret;
601 }
602
603 /*
604  * validate_super for 0.90.0
605  */
606 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
607 {
608         mdp_disk_t *desc;
609         mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
610
611         rdev->raid_disk = -1;
612         rdev->in_sync = 0;
613         if (mddev->raid_disks == 0) {
614                 mddev->major_version = 0;
615                 mddev->minor_version = sb->minor_version;
616                 mddev->patch_version = sb->patch_version;
617                 mddev->persistent = ! sb->not_persistent;
618                 mddev->chunk_size = sb->chunk_size;
619                 mddev->ctime = sb->ctime;
620                 mddev->utime = sb->utime;
621                 mddev->level = sb->level;
622                 mddev->layout = sb->layout;
623                 mddev->raid_disks = sb->raid_disks;
624                 mddev->size = sb->size;
625                 mddev->events = md_event(sb);
626                 mddev->bitmap_offset = 0;
627                 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
628
629                 if (sb->state & (1<<MD_SB_CLEAN))
630                         mddev->recovery_cp = MaxSector;
631                 else {
632                         if (sb->events_hi == sb->cp_events_hi && 
633                                 sb->events_lo == sb->cp_events_lo) {
634                                 mddev->recovery_cp = sb->recovery_cp;
635                         } else
636                                 mddev->recovery_cp = 0;
637                 }
638
639                 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
640                 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
641                 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
642                 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
643
644                 mddev->max_disks = MD_SB_DISKS;
645
646                 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
647                     mddev->bitmap_file == NULL) {
648                         if (mddev->level != 1 && mddev->level != 5) {
649                                 /* FIXME use a better test */
650                                 printk(KERN_WARNING "md: bitmaps only support for raid1\n");
651                                 return -EINVAL;
652                         }
653                         mddev->bitmap_offset = mddev->default_bitmap_offset;
654                 }
655
656         } else if (mddev->pers == NULL) {
657                 /* Insist on good event counter while assembling */
658                 __u64 ev1 = md_event(sb);
659                 ++ev1;
660                 if (ev1 < mddev->events) 
661                         return -EINVAL;
662         } else if (mddev->bitmap) {
663                 /* if adding to array with a bitmap, then we can accept an
664                  * older device ... but not too old.
665                  */
666                 __u64 ev1 = md_event(sb);
667                 if (ev1 < mddev->bitmap->events_cleared)
668                         return 0;
669         } else /* just a hot-add of a new device, leave raid_disk at -1 */
670                 return 0;
671
672         if (mddev->level != LEVEL_MULTIPATH) {
673                 rdev->faulty = 0;
674                 rdev->flags = 0;
675                 desc = sb->disks + rdev->desc_nr;
676
677                 if (desc->state & (1<<MD_DISK_FAULTY))
678                         rdev->faulty = 1;
679                 else if (desc->state & (1<<MD_DISK_SYNC) &&
680                          desc->raid_disk < mddev->raid_disks) {
681                         rdev->in_sync = 1;
682                         rdev->raid_disk = desc->raid_disk;
683                 }
684                 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
685                         set_bit(WriteMostly, &rdev->flags);
686         } else /* MULTIPATH are always insync */
687                 rdev->in_sync = 1;
688         return 0;
689 }
690
691 /*
692  * sync_super for 0.90.0
693  */
694 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
695 {
696         mdp_super_t *sb;
697         struct list_head *tmp;
698         mdk_rdev_t *rdev2;
699         int next_spare = mddev->raid_disks;
700
701         /* make rdev->sb match mddev data..
702          *
703          * 1/ zero out disks
704          * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
705          * 3/ any empty disks < next_spare become removed
706          *
707          * disks[0] gets initialised to REMOVED because
708          * we cannot be sure from other fields if it has
709          * been initialised or not.
710          */
711         int i;
712         int active=0, working=0,failed=0,spare=0,nr_disks=0;
713
714         sb = (mdp_super_t*)page_address(rdev->sb_page);
715
716         memset(sb, 0, sizeof(*sb));
717
718         sb->md_magic = MD_SB_MAGIC;
719         sb->major_version = mddev->major_version;
720         sb->minor_version = mddev->minor_version;
721         sb->patch_version = mddev->patch_version;
722         sb->gvalid_words  = 0; /* ignored */
723         memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
724         memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
725         memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
726         memcpy(&sb->set_uuid3, mddev->uuid+12,4);
727
728         sb->ctime = mddev->ctime;
729         sb->level = mddev->level;
730         sb->size  = mddev->size;
731         sb->raid_disks = mddev->raid_disks;
732         sb->md_minor = mddev->md_minor;
733         sb->not_persistent = !mddev->persistent;
734         sb->utime = mddev->utime;
735         sb->state = 0;
736         sb->events_hi = (mddev->events>>32);
737         sb->events_lo = (u32)mddev->events;
738
739         if (mddev->in_sync)
740         {
741                 sb->recovery_cp = mddev->recovery_cp;
742                 sb->cp_events_hi = (mddev->events>>32);
743                 sb->cp_events_lo = (u32)mddev->events;
744                 if (mddev->recovery_cp == MaxSector)
745                         sb->state = (1<< MD_SB_CLEAN);
746         } else
747                 sb->recovery_cp = 0;
748
749         sb->layout = mddev->layout;
750         sb->chunk_size = mddev->chunk_size;
751
752         if (mddev->bitmap && mddev->bitmap_file == NULL)
753                 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
754
755         sb->disks[0].state = (1<<MD_DISK_REMOVED);
756         ITERATE_RDEV(mddev,rdev2,tmp) {
757                 mdp_disk_t *d;
758                 if (rdev2->raid_disk >= 0 && rdev2->in_sync && !rdev2->faulty)
759                         rdev2->desc_nr = rdev2->raid_disk;
760                 else
761                         rdev2->desc_nr = next_spare++;
762                 d = &sb->disks[rdev2->desc_nr];
763                 nr_disks++;
764                 d->number = rdev2->desc_nr;
765                 d->major = MAJOR(rdev2->bdev->bd_dev);
766                 d->minor = MINOR(rdev2->bdev->bd_dev);
767                 if (rdev2->raid_disk >= 0 && rdev->in_sync && !rdev2->faulty)
768                         d->raid_disk = rdev2->raid_disk;
769                 else
770                         d->raid_disk = rdev2->desc_nr; /* compatibility */
771                 if (rdev2->faulty) {
772                         d->state = (1<<MD_DISK_FAULTY);
773                         failed++;
774                 } else if (rdev2->in_sync) {
775                         d->state = (1<<MD_DISK_ACTIVE);
776                         d->state |= (1<<MD_DISK_SYNC);
777                         active++;
778                         working++;
779                 } else {
780                         d->state = 0;
781                         spare++;
782                         working++;
783                 }
784                 if (test_bit(WriteMostly, &rdev2->flags))
785                         d->state |= (1<<MD_DISK_WRITEMOSTLY);
786         }
787         
788         /* now set the "removed" and "faulty" bits on any missing devices */
789         for (i=0 ; i < mddev->raid_disks ; i++) {
790                 mdp_disk_t *d = &sb->disks[i];
791                 if (d->state == 0 && d->number == 0) {
792                         d->number = i;
793                         d->raid_disk = i;
794                         d->state = (1<<MD_DISK_REMOVED);
795                         d->state |= (1<<MD_DISK_FAULTY);
796                         failed++;
797                 }
798         }
799         sb->nr_disks = nr_disks;
800         sb->active_disks = active;
801         sb->working_disks = working;
802         sb->failed_disks = failed;
803         sb->spare_disks = spare;
804
805         sb->this_disk = sb->disks[rdev->desc_nr];
806         sb->sb_csum = calc_sb_csum(sb);
807 }
808
809 /*
810  * version 1 superblock
811  */
812
813 static unsigned int calc_sb_1_csum(struct mdp_superblock_1 * sb)
814 {
815         unsigned int disk_csum, csum;
816         unsigned long long newcsum;
817         int size = 256 + le32_to_cpu(sb->max_dev)*2;
818         unsigned int *isuper = (unsigned int*)sb;
819         int i;
820
821         disk_csum = sb->sb_csum;
822         sb->sb_csum = 0;
823         newcsum = 0;
824         for (i=0; size>=4; size -= 4 )
825                 newcsum += le32_to_cpu(*isuper++);
826
827         if (size == 2)
828                 newcsum += le16_to_cpu(*(unsigned short*) isuper);
829
830         csum = (newcsum & 0xffffffff) + (newcsum >> 32);
831         sb->sb_csum = disk_csum;
832         return cpu_to_le32(csum);
833 }
834
835 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
836 {
837         struct mdp_superblock_1 *sb;
838         int ret;
839         sector_t sb_offset;
840         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
841         int bmask;
842
843         /*
844          * Calculate the position of the superblock.
845          * It is always aligned to a 4K boundary and
846          * depeding on minor_version, it can be:
847          * 0: At least 8K, but less than 12K, from end of device
848          * 1: At start of device
849          * 2: 4K from start of device.
850          */
851         switch(minor_version) {
852         case 0:
853                 sb_offset = rdev->bdev->bd_inode->i_size >> 9;
854                 sb_offset -= 8*2;
855                 sb_offset &= ~(sector_t)(4*2-1);
856                 /* convert from sectors to K */
857                 sb_offset /= 2;
858                 break;
859         case 1:
860                 sb_offset = 0;
861                 break;
862         case 2:
863                 sb_offset = 4;
864                 break;
865         default:
866                 return -EINVAL;
867         }
868         rdev->sb_offset = sb_offset;
869
870         /* superblock is rarely larger than 1K, but it can be larger,
871          * and it is safe to read 4k, so we do that
872          */
873         ret = read_disk_sb(rdev, 4096);
874         if (ret) return ret;
875
876
877         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
878
879         if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
880             sb->major_version != cpu_to_le32(1) ||
881             le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
882             le64_to_cpu(sb->super_offset) != (rdev->sb_offset<<1) ||
883             (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
884                 return -EINVAL;
885
886         if (calc_sb_1_csum(sb) != sb->sb_csum) {
887                 printk("md: invalid superblock checksum on %s\n",
888                         bdevname(rdev->bdev,b));
889                 return -EINVAL;
890         }
891         if (le64_to_cpu(sb->data_size) < 10) {
892                 printk("md: data_size too small on %s\n",
893                        bdevname(rdev->bdev,b));
894                 return -EINVAL;
895         }
896         rdev->preferred_minor = 0xffff;
897         rdev->data_offset = le64_to_cpu(sb->data_offset);
898
899         rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
900         bmask = block_size(rdev->bdev)-1;
901         if (rdev->sb_size & bmask)
902                 rdev-> sb_size = (rdev->sb_size | bmask)+1;
903
904         if (refdev == 0)
905                 return 1;
906         else {
907                 __u64 ev1, ev2;
908                 struct mdp_superblock_1 *refsb = 
909                         (struct mdp_superblock_1*)page_address(refdev->sb_page);
910
911                 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
912                     sb->level != refsb->level ||
913                     sb->layout != refsb->layout ||
914                     sb->chunksize != refsb->chunksize) {
915                         printk(KERN_WARNING "md: %s has strangely different"
916                                 " superblock to %s\n",
917                                 bdevname(rdev->bdev,b),
918                                 bdevname(refdev->bdev,b2));
919                         return -EINVAL;
920                 }
921                 ev1 = le64_to_cpu(sb->events);
922                 ev2 = le64_to_cpu(refsb->events);
923
924                 if (ev1 > ev2)
925                         return 1;
926         }
927         if (minor_version) 
928                 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2;
929         else
930                 rdev->size = rdev->sb_offset;
931         if (rdev->size < le64_to_cpu(sb->data_size)/2)
932                 return -EINVAL;
933         rdev->size = le64_to_cpu(sb->data_size)/2;
934         if (le32_to_cpu(sb->chunksize))
935                 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1);
936         return 0;
937 }
938
939 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
940 {
941         struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
942
943         rdev->raid_disk = -1;
944         rdev->in_sync = 0;
945         if (mddev->raid_disks == 0) {
946                 mddev->major_version = 1;
947                 mddev->patch_version = 0;
948                 mddev->persistent = 1;
949                 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9;
950                 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
951                 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
952                 mddev->level = le32_to_cpu(sb->level);
953                 mddev->layout = le32_to_cpu(sb->layout);
954                 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
955                 mddev->size = le64_to_cpu(sb->size)/2;
956                 mddev->events = le64_to_cpu(sb->events);
957                 mddev->bitmap_offset = 0;
958                 mddev->default_bitmap_offset = 0;
959                 if (mddev->minor_version == 0)
960                         mddev->default_bitmap_offset = -(64*1024)/512;
961                 
962                 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
963                 memcpy(mddev->uuid, sb->set_uuid, 16);
964
965                 mddev->max_disks =  (4096-256)/2;
966
967                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
968                     mddev->bitmap_file == NULL ) {
969                         if (mddev->level != 1) {
970                                 printk(KERN_WARNING "md: bitmaps only supported for raid1\n");
971                                 return -EINVAL;
972                         }
973                         mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
974                 }
975         } else if (mddev->pers == NULL) {
976                 /* Insist of good event counter while assembling */
977                 __u64 ev1 = le64_to_cpu(sb->events);
978                 ++ev1;
979                 if (ev1 < mddev->events)
980                         return -EINVAL;
981         } else if (mddev->bitmap) {
982                 /* If adding to array with a bitmap, then we can accept an
983                  * older device, but not too old.
984                  */
985                 __u64 ev1 = le64_to_cpu(sb->events);
986                 if (ev1 < mddev->bitmap->events_cleared)
987                         return 0;
988         } else /* just a hot-add of a new device, leave raid_disk at -1 */
989                 return 0;
990
991         if (mddev->level != LEVEL_MULTIPATH) {
992                 int role;
993                 rdev->desc_nr = le32_to_cpu(sb->dev_number);
994                 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
995                 switch(role) {
996                 case 0xffff: /* spare */
997                         rdev->faulty = 0;
998                         break;
999                 case 0xfffe: /* faulty */
1000                         rdev->faulty = 1;
1001                         break;
1002                 default:
1003                         rdev->in_sync = 1;
1004                         rdev->faulty = 0;
1005                         rdev->raid_disk = role;
1006                         break;
1007                 }
1008                 rdev->flags = 0;
1009                 if (sb->devflags & WriteMostly1)
1010                         set_bit(WriteMostly, &rdev->flags);
1011         } else /* MULTIPATH are always insync */
1012                 rdev->in_sync = 1;
1013
1014         return 0;
1015 }
1016
1017 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1018 {
1019         struct mdp_superblock_1 *sb;
1020         struct list_head *tmp;
1021         mdk_rdev_t *rdev2;
1022         int max_dev, i;
1023         /* make rdev->sb match mddev and rdev data. */
1024
1025         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1026
1027         sb->feature_map = 0;
1028         sb->pad0 = 0;
1029         memset(sb->pad1, 0, sizeof(sb->pad1));
1030         memset(sb->pad2, 0, sizeof(sb->pad2));
1031         memset(sb->pad3, 0, sizeof(sb->pad3));
1032
1033         sb->utime = cpu_to_le64((__u64)mddev->utime);
1034         sb->events = cpu_to_le64(mddev->events);
1035         if (mddev->in_sync)
1036                 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1037         else
1038                 sb->resync_offset = cpu_to_le64(0);
1039
1040         if (mddev->bitmap && mddev->bitmap_file == NULL) {
1041                 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
1042                 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1043         }
1044
1045         max_dev = 0;
1046         ITERATE_RDEV(mddev,rdev2,tmp)
1047                 if (rdev2->desc_nr+1 > max_dev)
1048                         max_dev = rdev2->desc_nr+1;
1049         
1050         sb->max_dev = cpu_to_le32(max_dev);
1051         for (i=0; i<max_dev;i++)
1052                 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1053         
1054         ITERATE_RDEV(mddev,rdev2,tmp) {
1055                 i = rdev2->desc_nr;
1056                 if (rdev2->faulty)
1057                         sb->dev_roles[i] = cpu_to_le16(0xfffe);
1058                 else if (rdev2->in_sync)
1059                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1060                 else
1061                         sb->dev_roles[i] = cpu_to_le16(0xffff);
1062         }
1063
1064         sb->recovery_offset = cpu_to_le64(0); /* not supported yet */
1065         sb->sb_csum = calc_sb_1_csum(sb);
1066 }
1067
1068
1069 static struct super_type super_types[] = {
1070         [0] = {
1071                 .name   = "0.90.0",
1072                 .owner  = THIS_MODULE,
1073                 .load_super     = super_90_load,
1074                 .validate_super = super_90_validate,
1075                 .sync_super     = super_90_sync,
1076         },
1077         [1] = {
1078                 .name   = "md-1",
1079                 .owner  = THIS_MODULE,
1080                 .load_super     = super_1_load,
1081                 .validate_super = super_1_validate,
1082                 .sync_super     = super_1_sync,
1083         },
1084 };
1085         
1086 static mdk_rdev_t * match_dev_unit(mddev_t *mddev, mdk_rdev_t *dev)
1087 {
1088         struct list_head *tmp;
1089         mdk_rdev_t *rdev;
1090
1091         ITERATE_RDEV(mddev,rdev,tmp)
1092                 if (rdev->bdev->bd_contains == dev->bdev->bd_contains)
1093                         return rdev;
1094
1095         return NULL;
1096 }
1097
1098 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1099 {
1100         struct list_head *tmp;
1101         mdk_rdev_t *rdev;
1102
1103         ITERATE_RDEV(mddev1,rdev,tmp)
1104                 if (match_dev_unit(mddev2, rdev))
1105                         return 1;
1106
1107         return 0;
1108 }
1109
1110 static LIST_HEAD(pending_raid_disks);
1111
1112 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1113 {
1114         mdk_rdev_t *same_pdev;
1115         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1116
1117         if (rdev->mddev) {
1118                 MD_BUG();
1119                 return -EINVAL;
1120         }
1121         same_pdev = match_dev_unit(mddev, rdev);
1122         if (same_pdev)
1123                 printk(KERN_WARNING
1124                         "%s: WARNING: %s appears to be on the same physical"
1125                         " disk as %s. True\n     protection against single-disk"
1126                         " failure might be compromised.\n",
1127                         mdname(mddev), bdevname(rdev->bdev,b),
1128                         bdevname(same_pdev->bdev,b2));
1129
1130         /* Verify rdev->desc_nr is unique.
1131          * If it is -1, assign a free number, else
1132          * check number is not in use
1133          */
1134         if (rdev->desc_nr < 0) {
1135                 int choice = 0;
1136                 if (mddev->pers) choice = mddev->raid_disks;
1137                 while (find_rdev_nr(mddev, choice))
1138                         choice++;
1139                 rdev->desc_nr = choice;
1140         } else {
1141                 if (find_rdev_nr(mddev, rdev->desc_nr))
1142                         return -EBUSY;
1143         }
1144                         
1145         list_add(&rdev->same_set, &mddev->disks);
1146         rdev->mddev = mddev;
1147         printk(KERN_INFO "md: bind<%s>\n", bdevname(rdev->bdev,b));
1148         return 0;
1149 }
1150
1151 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1152 {
1153         char b[BDEVNAME_SIZE];
1154         if (!rdev->mddev) {
1155                 MD_BUG();
1156                 return;
1157         }
1158         list_del_init(&rdev->same_set);
1159         printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1160         rdev->mddev = NULL;
1161 }
1162
1163 /*
1164  * prevent the device from being mounted, repartitioned or
1165  * otherwise reused by a RAID array (or any other kernel
1166  * subsystem), by bd_claiming the device.
1167  */
1168 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
1169 {
1170         int err = 0;
1171         struct block_device *bdev;
1172         char b[BDEVNAME_SIZE];
1173
1174         bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
1175         if (IS_ERR(bdev)) {
1176                 printk(KERN_ERR "md: could not open %s.\n",
1177                         __bdevname(dev, b));
1178                 return PTR_ERR(bdev);
1179         }
1180         err = bd_claim(bdev, rdev);
1181         if (err) {
1182                 printk(KERN_ERR "md: could not bd_claim %s.\n",
1183                         bdevname(bdev, b));
1184                 blkdev_put(bdev);
1185                 return err;
1186         }
1187         rdev->bdev = bdev;
1188         return err;
1189 }
1190
1191 static void unlock_rdev(mdk_rdev_t *rdev)
1192 {
1193         struct block_device *bdev = rdev->bdev;
1194         rdev->bdev = NULL;
1195         if (!bdev)
1196                 MD_BUG();
1197         bd_release(bdev);
1198         blkdev_put(bdev);
1199 }
1200
1201 void md_autodetect_dev(dev_t dev);
1202
1203 static void export_rdev(mdk_rdev_t * rdev)
1204 {
1205         char b[BDEVNAME_SIZE];
1206         printk(KERN_INFO "md: export_rdev(%s)\n",
1207                 bdevname(rdev->bdev,b));
1208         if (rdev->mddev)
1209                 MD_BUG();
1210         free_disk_sb(rdev);
1211         list_del_init(&rdev->same_set);
1212 #ifndef MODULE
1213         md_autodetect_dev(rdev->bdev->bd_dev);
1214 #endif
1215         unlock_rdev(rdev);
1216         kfree(rdev);
1217 }
1218
1219 static void kick_rdev_from_array(mdk_rdev_t * rdev)
1220 {
1221         unbind_rdev_from_array(rdev);
1222         export_rdev(rdev);
1223 }
1224
1225 static void export_array(mddev_t *mddev)
1226 {
1227         struct list_head *tmp;
1228         mdk_rdev_t *rdev;
1229
1230         ITERATE_RDEV(mddev,rdev,tmp) {
1231                 if (!rdev->mddev) {
1232                         MD_BUG();
1233                         continue;
1234                 }
1235                 kick_rdev_from_array(rdev);
1236         }
1237         if (!list_empty(&mddev->disks))
1238                 MD_BUG();
1239         mddev->raid_disks = 0;
1240         mddev->major_version = 0;
1241 }
1242
1243 static void print_desc(mdp_disk_t *desc)
1244 {
1245         printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
1246                 desc->major,desc->minor,desc->raid_disk,desc->state);
1247 }
1248
1249 static void print_sb(mdp_super_t *sb)
1250 {
1251         int i;
1252
1253         printk(KERN_INFO 
1254                 "md:  SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1255                 sb->major_version, sb->minor_version, sb->patch_version,
1256                 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1257                 sb->ctime);
1258         printk(KERN_INFO "md:     L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1259                 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1260                 sb->md_minor, sb->layout, sb->chunk_size);
1261         printk(KERN_INFO "md:     UT:%08x ST:%d AD:%d WD:%d"
1262                 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1263                 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1264                 sb->failed_disks, sb->spare_disks,
1265                 sb->sb_csum, (unsigned long)sb->events_lo);
1266
1267         printk(KERN_INFO);
1268         for (i = 0; i < MD_SB_DISKS; i++) {
1269                 mdp_disk_t *desc;
1270
1271                 desc = sb->disks + i;
1272                 if (desc->number || desc->major || desc->minor ||
1273                     desc->raid_disk || (desc->state && (desc->state != 4))) {
1274                         printk("     D %2d: ", i);
1275                         print_desc(desc);
1276                 }
1277         }
1278         printk(KERN_INFO "md:     THIS: ");
1279         print_desc(&sb->this_disk);
1280
1281 }
1282
1283 static void print_rdev(mdk_rdev_t *rdev)
1284 {
1285         char b[BDEVNAME_SIZE];
1286         printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
1287                 bdevname(rdev->bdev,b), (unsigned long long)rdev->size,
1288                 rdev->faulty, rdev->in_sync, rdev->desc_nr);
1289         if (rdev->sb_loaded) {
1290                 printk(KERN_INFO "md: rdev superblock:\n");
1291                 print_sb((mdp_super_t*)page_address(rdev->sb_page));
1292         } else
1293                 printk(KERN_INFO "md: no rdev superblock!\n");
1294 }
1295
1296 void md_print_devices(void)
1297 {
1298         struct list_head *tmp, *tmp2;
1299         mdk_rdev_t *rdev;
1300         mddev_t *mddev;
1301         char b[BDEVNAME_SIZE];
1302
1303         printk("\n");
1304         printk("md:     **********************************\n");
1305         printk("md:     * <COMPLETE RAID STATE PRINTOUT> *\n");
1306         printk("md:     **********************************\n");
1307         ITERATE_MDDEV(mddev,tmp) {
1308
1309                 if (mddev->bitmap)
1310                         bitmap_print_sb(mddev->bitmap);
1311                 else
1312                         printk("%s: ", mdname(mddev));
1313                 ITERATE_RDEV(mddev,rdev,tmp2)
1314                         printk("<%s>", bdevname(rdev->bdev,b));
1315                 printk("\n");
1316
1317                 ITERATE_RDEV(mddev,rdev,tmp2)
1318                         print_rdev(rdev);
1319         }
1320         printk("md:     **********************************\n");
1321         printk("\n");
1322 }
1323
1324
1325 static void sync_sbs(mddev_t * mddev)
1326 {
1327         mdk_rdev_t *rdev;
1328         struct list_head *tmp;
1329
1330         ITERATE_RDEV(mddev,rdev,tmp) {
1331                 super_types[mddev->major_version].
1332                         sync_super(mddev, rdev);
1333                 rdev->sb_loaded = 1;
1334         }
1335 }
1336
1337 static void md_update_sb(mddev_t * mddev)
1338 {
1339         int err;
1340         struct list_head *tmp;
1341         mdk_rdev_t *rdev;
1342         int sync_req;
1343
1344 repeat:
1345         spin_lock(&mddev->write_lock);
1346         sync_req = mddev->in_sync;
1347         mddev->utime = get_seconds();
1348         mddev->events ++;
1349
1350         if (!mddev->events) {
1351                 /*
1352                  * oops, this 64-bit counter should never wrap.
1353                  * Either we are in around ~1 trillion A.C., assuming
1354                  * 1 reboot per second, or we have a bug:
1355                  */
1356                 MD_BUG();
1357                 mddev->events --;
1358         }
1359         mddev->sb_dirty = 2;
1360         sync_sbs(mddev);
1361
1362         /*
1363          * do not write anything to disk if using
1364          * nonpersistent superblocks
1365          */
1366         if (!mddev->persistent) {
1367                 mddev->sb_dirty = 0;
1368                 spin_unlock(&mddev->write_lock);
1369                 wake_up(&mddev->sb_wait);
1370                 return;
1371         }
1372         spin_unlock(&mddev->write_lock);
1373
1374         dprintk(KERN_INFO 
1375                 "md: updating %s RAID superblock on device (in sync %d)\n",
1376                 mdname(mddev),mddev->in_sync);
1377
1378         err = bitmap_update_sb(mddev->bitmap);
1379         ITERATE_RDEV(mddev,rdev,tmp) {
1380                 char b[BDEVNAME_SIZE];
1381                 dprintk(KERN_INFO "md: ");
1382                 if (rdev->faulty)
1383                         dprintk("(skipping faulty ");
1384
1385                 dprintk("%s ", bdevname(rdev->bdev,b));
1386                 if (!rdev->faulty) {
1387                         md_super_write(mddev,rdev,
1388                                        rdev->sb_offset<<1, rdev->sb_size,
1389                                        rdev->sb_page);
1390                         dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
1391                                 bdevname(rdev->bdev,b),
1392                                 (unsigned long long)rdev->sb_offset);
1393
1394                 } else
1395                         dprintk(")\n");
1396                 if (mddev->level == LEVEL_MULTIPATH)
1397                         /* only need to write one superblock... */
1398                         break;
1399         }
1400         wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
1401         /* if there was a failure, sb_dirty was set to 1, and we re-write super */
1402
1403         spin_lock(&mddev->write_lock);
1404         if (mddev->in_sync != sync_req|| mddev->sb_dirty == 1) {
1405                 /* have to write it out again */
1406                 spin_unlock(&mddev->write_lock);
1407                 goto repeat;
1408         }
1409         mddev->sb_dirty = 0;
1410         spin_unlock(&mddev->write_lock);
1411         wake_up(&mddev->sb_wait);
1412
1413 }
1414
1415 /*
1416  * Import a device. If 'super_format' >= 0, then sanity check the superblock
1417  *
1418  * mark the device faulty if:
1419  *
1420  *   - the device is nonexistent (zero size)
1421  *   - the device has no valid superblock
1422  *
1423  * a faulty rdev _never_ has rdev->sb set.
1424  */
1425 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
1426 {
1427         char b[BDEVNAME_SIZE];
1428         int err;
1429         mdk_rdev_t *rdev;
1430         sector_t size;
1431
1432         rdev = (mdk_rdev_t *) kmalloc(sizeof(*rdev), GFP_KERNEL);
1433         if (!rdev) {
1434                 printk(KERN_ERR "md: could not alloc mem for new device!\n");
1435                 return ERR_PTR(-ENOMEM);
1436         }
1437         memset(rdev, 0, sizeof(*rdev));
1438
1439         if ((err = alloc_disk_sb(rdev)))
1440                 goto abort_free;
1441
1442         err = lock_rdev(rdev, newdev);
1443         if (err)
1444                 goto abort_free;
1445
1446         rdev->desc_nr = -1;
1447         rdev->faulty = 0;
1448         rdev->in_sync = 0;
1449         rdev->data_offset = 0;
1450         atomic_set(&rdev->nr_pending, 0);
1451
1452         size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
1453         if (!size) {
1454                 printk(KERN_WARNING 
1455                         "md: %s has zero or unknown size, marking faulty!\n",
1456                         bdevname(rdev->bdev,b));
1457                 err = -EINVAL;
1458                 goto abort_free;
1459         }
1460
1461         if (super_format >= 0) {
1462                 err = super_types[super_format].
1463                         load_super(rdev, NULL, super_minor);
1464                 if (err == -EINVAL) {
1465                         printk(KERN_WARNING 
1466                                 "md: %s has invalid sb, not importing!\n",
1467                                 bdevname(rdev->bdev,b));
1468                         goto abort_free;
1469                 }
1470                 if (err < 0) {
1471                         printk(KERN_WARNING 
1472                                 "md: could not read %s's sb, not importing!\n",
1473                                 bdevname(rdev->bdev,b));
1474                         goto abort_free;
1475                 }
1476         }
1477         INIT_LIST_HEAD(&rdev->same_set);
1478
1479         return rdev;
1480
1481 abort_free:
1482         if (rdev->sb_page) {
1483                 if (rdev->bdev)
1484                         unlock_rdev(rdev);
1485                 free_disk_sb(rdev);
1486         }
1487         kfree(rdev);
1488         return ERR_PTR(err);
1489 }
1490
1491 /*
1492  * Check a full RAID array for plausibility
1493  */
1494
1495
1496 static void analyze_sbs(mddev_t * mddev)
1497 {
1498         int i;
1499         struct list_head *tmp;
1500         mdk_rdev_t *rdev, *freshest;
1501         char b[BDEVNAME_SIZE];
1502
1503         freshest = NULL;
1504         ITERATE_RDEV(mddev,rdev,tmp)
1505                 switch (super_types[mddev->major_version].
1506                         load_super(rdev, freshest, mddev->minor_version)) {
1507                 case 1:
1508                         freshest = rdev;
1509                         break;
1510                 case 0:
1511                         break;
1512                 default:
1513                         printk( KERN_ERR \
1514                                 "md: fatal superblock inconsistency in %s"
1515                                 " -- removing from array\n", 
1516                                 bdevname(rdev->bdev,b));
1517                         kick_rdev_from_array(rdev);
1518                 }
1519
1520
1521         super_types[mddev->major_version].
1522                 validate_super(mddev, freshest);
1523
1524         i = 0;
1525         ITERATE_RDEV(mddev,rdev,tmp) {
1526                 if (rdev != freshest)
1527                         if (super_types[mddev->major_version].
1528                             validate_super(mddev, rdev)) {
1529                                 printk(KERN_WARNING "md: kicking non-fresh %s"
1530                                         " from array!\n",
1531                                         bdevname(rdev->bdev,b));
1532                                 kick_rdev_from_array(rdev);
1533                                 continue;
1534                         }
1535                 if (mddev->level == LEVEL_MULTIPATH) {
1536                         rdev->desc_nr = i++;
1537                         rdev->raid_disk = rdev->desc_nr;
1538                         rdev->in_sync = 1;
1539                 }
1540         }
1541
1542
1543
1544         if (mddev->recovery_cp != MaxSector &&
1545             mddev->level >= 1)
1546                 printk(KERN_ERR "md: %s: raid array is not clean"
1547                        " -- starting background reconstruction\n",
1548                        mdname(mddev));
1549
1550 }
1551
1552 int mdp_major = 0;
1553
1554 static struct kobject *md_probe(dev_t dev, int *part, void *data)
1555 {
1556         static DECLARE_MUTEX(disks_sem);
1557         mddev_t *mddev = mddev_find(dev);
1558         struct gendisk *disk;
1559         int partitioned = (MAJOR(dev) != MD_MAJOR);
1560         int shift = partitioned ? MdpMinorShift : 0;
1561         int unit = MINOR(dev) >> shift;
1562
1563         if (!mddev)
1564                 return NULL;
1565
1566         down(&disks_sem);
1567         if (mddev->gendisk) {
1568                 up(&disks_sem);
1569                 mddev_put(mddev);
1570                 return NULL;
1571         }
1572         disk = alloc_disk(1 << shift);
1573         if (!disk) {
1574                 up(&disks_sem);
1575                 mddev_put(mddev);
1576                 return NULL;
1577         }
1578         disk->major = MAJOR(dev);
1579         disk->first_minor = unit << shift;
1580         if (partitioned) {
1581                 sprintf(disk->disk_name, "md_d%d", unit);
1582                 sprintf(disk->devfs_name, "md/d%d", unit);
1583         } else {
1584                 sprintf(disk->disk_name, "md%d", unit);
1585                 sprintf(disk->devfs_name, "md/%d", unit);
1586         }
1587         disk->fops = &md_fops;
1588         disk->private_data = mddev;
1589         disk->queue = mddev->queue;
1590         add_disk(disk);
1591         mddev->gendisk = disk;
1592         up(&disks_sem);
1593         return NULL;
1594 }
1595
1596 void md_wakeup_thread(mdk_thread_t *thread);
1597
1598 static void md_safemode_timeout(unsigned long data)
1599 {
1600         mddev_t *mddev = (mddev_t *) data;
1601
1602         mddev->safemode = 1;
1603         md_wakeup_thread(mddev->thread);
1604 }
1605
1606
1607 static int do_md_run(mddev_t * mddev)
1608 {
1609         int pnum, err;
1610         int chunk_size;
1611         struct list_head *tmp;
1612         mdk_rdev_t *rdev;
1613         struct gendisk *disk;
1614         char b[BDEVNAME_SIZE];
1615
1616         if (list_empty(&mddev->disks))
1617                 /* cannot run an array with no devices.. */
1618                 return -EINVAL;
1619
1620         if (mddev->pers)
1621                 return -EBUSY;
1622
1623         /*
1624          * Analyze all RAID superblock(s)
1625          */
1626         if (!mddev->raid_disks)
1627                 analyze_sbs(mddev);
1628
1629         chunk_size = mddev->chunk_size;
1630         pnum = level_to_pers(mddev->level);
1631
1632         if ((pnum != MULTIPATH) && (pnum != RAID1)) {
1633                 if (!chunk_size) {
1634                         /*
1635                          * 'default chunksize' in the old md code used to
1636                          * be PAGE_SIZE, baaad.
1637                          * we abort here to be on the safe side. We don't
1638                          * want to continue the bad practice.
1639                          */
1640                         printk(KERN_ERR 
1641                                 "no chunksize specified, see 'man raidtab'\n");
1642                         return -EINVAL;
1643                 }
1644                 if (chunk_size > MAX_CHUNK_SIZE) {
1645                         printk(KERN_ERR "too big chunk_size: %d > %d\n",
1646                                 chunk_size, MAX_CHUNK_SIZE);
1647                         return -EINVAL;
1648                 }
1649                 /*
1650                  * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
1651                  */
1652                 if ( (1 << ffz(~chunk_size)) != chunk_size) {
1653                         printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size);
1654                         return -EINVAL;
1655                 }
1656                 if (chunk_size < PAGE_SIZE) {
1657                         printk(KERN_ERR "too small chunk_size: %d < %ld\n",
1658                                 chunk_size, PAGE_SIZE);
1659                         return -EINVAL;
1660                 }
1661
1662                 /* devices must have minimum size of one chunk */
1663                 ITERATE_RDEV(mddev,rdev,tmp) {
1664                         if (rdev->faulty)
1665                                 continue;
1666                         if (rdev->size < chunk_size / 1024) {
1667                                 printk(KERN_WARNING
1668                                         "md: Dev %s smaller than chunk_size:"
1669                                         " %lluk < %dk\n",
1670                                         bdevname(rdev->bdev,b),
1671                                         (unsigned long long)rdev->size,
1672                                         chunk_size / 1024);
1673                                 return -EINVAL;
1674                         }
1675                 }
1676         }
1677
1678 #ifdef CONFIG_KMOD
1679         if (!pers[pnum])
1680         {
1681                 request_module("md-personality-%d", pnum);
1682         }
1683 #endif
1684
1685         /*
1686          * Drop all container device buffers, from now on
1687          * the only valid external interface is through the md
1688          * device.
1689          * Also find largest hardsector size
1690          */
1691         ITERATE_RDEV(mddev,rdev,tmp) {
1692                 if (rdev->faulty)
1693                         continue;
1694                 sync_blockdev(rdev->bdev);
1695                 invalidate_bdev(rdev->bdev, 0);
1696         }
1697
1698         md_probe(mddev->unit, NULL, NULL);
1699         disk = mddev->gendisk;
1700         if (!disk)
1701                 return -ENOMEM;
1702
1703         spin_lock(&pers_lock);
1704         if (!pers[pnum] || !try_module_get(pers[pnum]->owner)) {
1705                 spin_unlock(&pers_lock);
1706                 printk(KERN_WARNING "md: personality %d is not loaded!\n",
1707                        pnum);
1708                 return -EINVAL;
1709         }
1710
1711         mddev->pers = pers[pnum];
1712         spin_unlock(&pers_lock);
1713
1714         mddev->recovery = 0;
1715         mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
1716
1717         /* before we start the array running, initialise the bitmap */
1718         err = bitmap_create(mddev);
1719         if (err)
1720                 printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
1721                         mdname(mddev), err);
1722         else
1723                 err = mddev->pers->run(mddev);
1724         if (err) {
1725                 printk(KERN_ERR "md: pers->run() failed ...\n");
1726                 module_put(mddev->pers->owner);
1727                 mddev->pers = NULL;
1728                 bitmap_destroy(mddev);
1729                 return err;
1730         }
1731         atomic_set(&mddev->writes_pending,0);
1732         mddev->safemode = 0;
1733         mddev->safemode_timer.function = md_safemode_timeout;
1734         mddev->safemode_timer.data = (unsigned long) mddev;
1735         mddev->safemode_delay = (20 * HZ)/1000 +1; /* 20 msec delay */
1736         mddev->in_sync = 1;
1737         
1738         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1739         md_wakeup_thread(mddev->thread);
1740         
1741         if (mddev->sb_dirty)
1742                 md_update_sb(mddev);
1743
1744         set_capacity(disk, mddev->array_size<<1);
1745
1746         /* If we call blk_queue_make_request here, it will
1747          * re-initialise max_sectors etc which may have been
1748          * refined inside -> run.  So just set the bits we need to set.
1749          * Most initialisation happended when we called
1750          * blk_queue_make_request(..., md_fail_request)
1751          * earlier.
1752          */
1753         mddev->queue->queuedata = mddev;
1754         mddev->queue->make_request_fn = mddev->pers->make_request;
1755
1756         mddev->changed = 1;
1757         return 0;
1758 }
1759
1760 static int restart_array(mddev_t *mddev)
1761 {
1762         struct gendisk *disk = mddev->gendisk;
1763         int err;
1764
1765         /*
1766          * Complain if it has no devices
1767          */
1768         err = -ENXIO;
1769         if (list_empty(&mddev->disks))
1770                 goto out;
1771
1772         if (mddev->pers) {
1773                 err = -EBUSY;
1774                 if (!mddev->ro)
1775                         goto out;
1776
1777                 mddev->safemode = 0;
1778                 mddev->ro = 0;
1779                 set_disk_ro(disk, 0);
1780
1781                 printk(KERN_INFO "md: %s switched to read-write mode.\n",
1782                         mdname(mddev));
1783                 /*
1784                  * Kick recovery or resync if necessary
1785                  */
1786                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1787                 md_wakeup_thread(mddev->thread);
1788                 err = 0;
1789         } else {
1790                 printk(KERN_ERR "md: %s has no personality assigned.\n",
1791                         mdname(mddev));
1792                 err = -EINVAL;
1793         }
1794
1795 out:
1796         return err;
1797 }
1798
1799 static int do_md_stop(mddev_t * mddev, int ro)
1800 {
1801         int err = 0;
1802         struct gendisk *disk = mddev->gendisk;
1803
1804         if (mddev->pers) {
1805                 if (atomic_read(&mddev->active)>2) {
1806                         printk("md: %s still in use.\n",mdname(mddev));
1807                         return -EBUSY;
1808                 }
1809
1810                 if (mddev->sync_thread) {
1811                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1812                         md_unregister_thread(mddev->sync_thread);
1813                         mddev->sync_thread = NULL;
1814                 }
1815
1816                 del_timer_sync(&mddev->safemode_timer);
1817
1818                 invalidate_partition(disk, 0);
1819
1820                 if (ro) {
1821                         err  = -ENXIO;
1822                         if (mddev->ro)
1823                                 goto out;
1824                         mddev->ro = 1;
1825                 } else {
1826                         bitmap_flush(mddev);
1827                         wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
1828                         if (mddev->ro)
1829                                 set_disk_ro(disk, 0);
1830                         blk_queue_make_request(mddev->queue, md_fail_request);
1831                         mddev->pers->stop(mddev);
1832                         module_put(mddev->pers->owner);
1833                         mddev->pers = NULL;
1834                         if (mddev->ro)
1835                                 mddev->ro = 0;
1836                 }
1837                 if (!mddev->in_sync) {
1838                         /* mark array as shutdown cleanly */
1839                         mddev->in_sync = 1;
1840                         md_update_sb(mddev);
1841                 }
1842                 if (ro)
1843                         set_disk_ro(disk, 1);
1844         }
1845
1846         bitmap_destroy(mddev);
1847         if (mddev->bitmap_file) {
1848                 atomic_set(&mddev->bitmap_file->f_dentry->d_inode->i_writecount, 1);
1849                 fput(mddev->bitmap_file);
1850                 mddev->bitmap_file = NULL;
1851         }
1852         mddev->bitmap_offset = 0;
1853
1854         /*
1855          * Free resources if final stop
1856          */
1857         if (!ro) {
1858                 struct gendisk *disk;
1859                 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
1860
1861                 export_array(mddev);
1862
1863                 mddev->array_size = 0;
1864                 disk = mddev->gendisk;
1865                 if (disk)
1866                         set_capacity(disk, 0);
1867                 mddev->changed = 1;
1868         } else
1869                 printk(KERN_INFO "md: %s switched to read-only mode.\n",
1870                         mdname(mddev));
1871         err = 0;
1872 out:
1873         return err;
1874 }
1875
1876 static void autorun_array(mddev_t *mddev)
1877 {
1878         mdk_rdev_t *rdev;
1879         struct list_head *tmp;
1880         int err;
1881
1882         if (list_empty(&mddev->disks))
1883                 return;
1884
1885         printk(KERN_INFO "md: running: ");
1886
1887         ITERATE_RDEV(mddev,rdev,tmp) {
1888                 char b[BDEVNAME_SIZE];
1889                 printk("<%s>", bdevname(rdev->bdev,b));
1890         }
1891         printk("\n");
1892
1893         err = do_md_run (mddev);
1894         if (err) {
1895                 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
1896                 do_md_stop (mddev, 0);
1897         }
1898 }
1899
1900 /*
1901  * lets try to run arrays based on all disks that have arrived
1902  * until now. (those are in pending_raid_disks)
1903  *
1904  * the method: pick the first pending disk, collect all disks with
1905  * the same UUID, remove all from the pending list and put them into
1906  * the 'same_array' list. Then order this list based on superblock
1907  * update time (freshest comes first), kick out 'old' disks and
1908  * compare superblocks. If everything's fine then run it.
1909  *
1910  * If "unit" is allocated, then bump its reference count
1911  */
1912 static void autorun_devices(int part)
1913 {
1914         struct list_head candidates;
1915         struct list_head *tmp;
1916         mdk_rdev_t *rdev0, *rdev;
1917         mddev_t *mddev;
1918         char b[BDEVNAME_SIZE];
1919
1920         printk(KERN_INFO "md: autorun ...\n");
1921         while (!list_empty(&pending_raid_disks)) {
1922                 dev_t dev;
1923                 rdev0 = list_entry(pending_raid_disks.next,
1924                                          mdk_rdev_t, same_set);
1925
1926                 printk(KERN_INFO "md: considering %s ...\n",
1927                         bdevname(rdev0->bdev,b));
1928                 INIT_LIST_HEAD(&candidates);
1929                 ITERATE_RDEV_PENDING(rdev,tmp)
1930                         if (super_90_load(rdev, rdev0, 0) >= 0) {
1931                                 printk(KERN_INFO "md:  adding %s ...\n",
1932                                         bdevname(rdev->bdev,b));
1933                                 list_move(&rdev->same_set, &candidates);
1934                         }
1935                 /*
1936                  * now we have a set of devices, with all of them having
1937                  * mostly sane superblocks. It's time to allocate the
1938                  * mddev.
1939                  */
1940                 if (rdev0->preferred_minor < 0 || rdev0->preferred_minor >= MAX_MD_DEVS) {
1941                         printk(KERN_INFO "md: unit number in %s is bad: %d\n",
1942                                bdevname(rdev0->bdev, b), rdev0->preferred_minor);
1943                         break;
1944                 }
1945                 if (part)
1946                         dev = MKDEV(mdp_major,
1947                                     rdev0->preferred_minor << MdpMinorShift);
1948                 else
1949                         dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
1950
1951                 md_probe(dev, NULL, NULL);
1952                 mddev = mddev_find(dev);
1953                 if (!mddev) {
1954                         printk(KERN_ERR 
1955                                 "md: cannot allocate memory for md drive.\n");
1956                         break;
1957                 }
1958                 if (mddev_lock(mddev)) 
1959                         printk(KERN_WARNING "md: %s locked, cannot run\n",
1960                                mdname(mddev));
1961                 else if (mddev->raid_disks || mddev->major_version
1962                          || !list_empty(&mddev->disks)) {
1963                         printk(KERN_WARNING 
1964                                 "md: %s already running, cannot run %s\n",
1965                                 mdname(mddev), bdevname(rdev0->bdev,b));
1966                         mddev_unlock(mddev);
1967                 } else {
1968                         printk(KERN_INFO "md: created %s\n", mdname(mddev));
1969                         ITERATE_RDEV_GENERIC(candidates,rdev,tmp) {
1970                                 list_del_init(&rdev->same_set);
1971                                 if (bind_rdev_to_array(rdev, mddev))
1972                                         export_rdev(rdev);
1973                         }
1974                         autorun_array(mddev);
1975                         mddev_unlock(mddev);
1976                 }
1977                 /* on success, candidates will be empty, on error
1978                  * it won't...
1979                  */
1980                 ITERATE_RDEV_GENERIC(candidates,rdev,tmp)
1981                         export_rdev(rdev);
1982                 mddev_put(mddev);
1983         }
1984         printk(KERN_INFO "md: ... autorun DONE.\n");
1985 }
1986
1987 /*
1988  * import RAID devices based on one partition
1989  * if possible, the array gets run as well.
1990  */
1991
1992 static int autostart_array(dev_t startdev)
1993 {
1994         char b[BDEVNAME_SIZE];
1995         int err = -EINVAL, i;
1996         mdp_super_t *sb = NULL;
1997         mdk_rdev_t *start_rdev = NULL, *rdev;
1998
1999         start_rdev = md_import_device(startdev, 0, 0);
2000         if (IS_ERR(start_rdev))
2001                 return err;
2002
2003
2004         /* NOTE: this can only work for 0.90.0 superblocks */
2005         sb = (mdp_super_t*)page_address(start_rdev->sb_page);
2006         if (sb->major_version != 0 ||
2007             sb->minor_version != 90 ) {
2008                 printk(KERN_WARNING "md: can only autostart 0.90.0 arrays\n");
2009                 export_rdev(start_rdev);
2010                 return err;
2011         }
2012
2013         if (start_rdev->faulty) {
2014                 printk(KERN_WARNING 
2015                         "md: can not autostart based on faulty %s!\n",
2016                         bdevname(start_rdev->bdev,b));
2017                 export_rdev(start_rdev);
2018                 return err;
2019         }
2020         list_add(&start_rdev->same_set, &pending_raid_disks);
2021
2022         for (i = 0; i < MD_SB_DISKS; i++) {
2023                 mdp_disk_t *desc = sb->disks + i;
2024                 dev_t dev = MKDEV(desc->major, desc->minor);
2025
2026                 if (!dev)
2027                         continue;
2028                 if (dev == startdev)
2029                         continue;
2030                 if (MAJOR(dev) != desc->major || MINOR(dev) != desc->minor)
2031                         continue;
2032                 rdev = md_import_device(dev, 0, 0);
2033                 if (IS_ERR(rdev))
2034                         continue;
2035
2036                 list_add(&rdev->same_set, &pending_raid_disks);
2037         }
2038
2039         /*
2040          * possibly return codes
2041          */
2042         autorun_devices(0);
2043         return 0;
2044
2045 }
2046
2047
2048 static int get_version(void __user * arg)
2049 {
2050         mdu_version_t ver;
2051
2052         ver.major = MD_MAJOR_VERSION;
2053         ver.minor = MD_MINOR_VERSION;
2054         ver.patchlevel = MD_PATCHLEVEL_VERSION;
2055
2056         if (copy_to_user(arg, &ver, sizeof(ver)))
2057                 return -EFAULT;
2058
2059         return 0;
2060 }
2061
2062 static int get_array_info(mddev_t * mddev, void __user * arg)
2063 {
2064         mdu_array_info_t info;
2065         int nr,working,active,failed,spare;
2066         mdk_rdev_t *rdev;
2067         struct list_head *tmp;
2068
2069         nr=working=active=failed=spare=0;
2070         ITERATE_RDEV(mddev,rdev,tmp) {
2071                 nr++;
2072                 if (rdev->faulty)
2073                         failed++;
2074                 else {
2075                         working++;
2076                         if (rdev->in_sync)
2077                                 active++;       
2078                         else
2079                                 spare++;
2080                 }
2081         }
2082
2083         info.major_version = mddev->major_version;
2084         info.minor_version = mddev->minor_version;
2085         info.patch_version = MD_PATCHLEVEL_VERSION;
2086         info.ctime         = mddev->ctime;
2087         info.level         = mddev->level;
2088         info.size          = mddev->size;
2089         info.nr_disks      = nr;
2090         info.raid_disks    = mddev->raid_disks;
2091         info.md_minor      = mddev->md_minor;
2092         info.not_persistent= !mddev->persistent;
2093
2094         info.utime         = mddev->utime;
2095         info.state         = 0;
2096         if (mddev->in_sync)
2097                 info.state = (1<<MD_SB_CLEAN);
2098         if (mddev->bitmap && mddev->bitmap_offset)
2099                 info.state = (1<<MD_SB_BITMAP_PRESENT);
2100         info.active_disks  = active;
2101         info.working_disks = working;
2102         info.failed_disks  = failed;
2103         info.spare_disks   = spare;
2104
2105         info.layout        = mddev->layout;
2106         info.chunk_size    = mddev->chunk_size;
2107
2108         if (copy_to_user(arg, &info, sizeof(info)))
2109                 return -EFAULT;
2110
2111         return 0;
2112 }
2113
2114 static int get_bitmap_file(mddev_t * mddev, void __user * arg)
2115 {
2116         mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
2117         char *ptr, *buf = NULL;
2118         int err = -ENOMEM;
2119
2120         file = kmalloc(sizeof(*file), GFP_KERNEL);
2121         if (!file)
2122                 goto out;
2123
2124         /* bitmap disabled, zero the first byte and copy out */
2125         if (!mddev->bitmap || !mddev->bitmap->file) {
2126                 file->pathname[0] = '\0';
2127                 goto copy_out;
2128         }
2129
2130         buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
2131         if (!buf)
2132                 goto out;
2133
2134         ptr = file_path(mddev->bitmap->file, buf, sizeof(file->pathname));
2135         if (!ptr)
2136                 goto out;
2137
2138         strcpy(file->pathname, ptr);
2139
2140 copy_out:
2141         err = 0;
2142         if (copy_to_user(arg, file, sizeof(*file)))
2143                 err = -EFAULT;
2144 out:
2145         kfree(buf);
2146         kfree(file);
2147         return err;
2148 }
2149
2150 static int get_disk_info(mddev_t * mddev, void __user * arg)
2151 {
2152         mdu_disk_info_t info;
2153         unsigned int nr;
2154         mdk_rdev_t *rdev;
2155
2156         if (copy_from_user(&info, arg, sizeof(info)))
2157                 return -EFAULT;
2158
2159         nr = info.number;
2160
2161         rdev = find_rdev_nr(mddev, nr);
2162         if (rdev) {
2163                 info.major = MAJOR(rdev->bdev->bd_dev);
2164                 info.minor = MINOR(rdev->bdev->bd_dev);
2165                 info.raid_disk = rdev->raid_disk;
2166                 info.state = 0;
2167                 if (rdev->faulty)
2168                         info.state |= (1<<MD_DISK_FAULTY);
2169                 else if (rdev->in_sync) {
2170                         info.state |= (1<<MD_DISK_ACTIVE);
2171                         info.state |= (1<<MD_DISK_SYNC);
2172                 }
2173                 if (test_bit(WriteMostly, &rdev->flags))
2174                         info.state |= (1<<MD_DISK_WRITEMOSTLY);
2175         } else {
2176                 info.major = info.minor = 0;
2177                 info.raid_disk = -1;
2178                 info.state = (1<<MD_DISK_REMOVED);
2179         }
2180
2181         if (copy_to_user(arg, &info, sizeof(info)))
2182                 return -EFAULT;
2183
2184         return 0;
2185 }
2186
2187 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
2188 {
2189         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
2190         mdk_rdev_t *rdev;
2191         dev_t dev = MKDEV(info->major,info->minor);
2192
2193         if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
2194                 return -EOVERFLOW;
2195
2196         if (!mddev->raid_disks) {
2197                 int err;
2198                 /* expecting a device which has a superblock */
2199                 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
2200                 if (IS_ERR(rdev)) {
2201                         printk(KERN_WARNING 
2202                                 "md: md_import_device returned %ld\n",
2203                                 PTR_ERR(rdev));
2204                         return PTR_ERR(rdev);
2205                 }
2206                 if (!list_empty(&mddev->disks)) {
2207                         mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
2208                                                         mdk_rdev_t, same_set);
2209                         int err = super_types[mddev->major_version]
2210                                 .load_super(rdev, rdev0, mddev->minor_version);
2211                         if (err < 0) {
2212                                 printk(KERN_WARNING 
2213                                         "md: %s has different UUID to %s\n",
2214                                         bdevname(rdev->bdev,b), 
2215                                         bdevname(rdev0->bdev,b2));
2216                                 export_rdev(rdev);
2217                                 return -EINVAL;
2218                         }
2219                 }
2220                 err = bind_rdev_to_array(rdev, mddev);
2221                 if (err)
2222                         export_rdev(rdev);
2223                 return err;
2224         }
2225
2226         /*
2227          * add_new_disk can be used once the array is assembled
2228          * to add "hot spares".  They must already have a superblock
2229          * written
2230          */
2231         if (mddev->pers) {
2232                 int err;
2233                 if (!mddev->pers->hot_add_disk) {
2234                         printk(KERN_WARNING 
2235                                 "%s: personality does not support diskops!\n",
2236                                mdname(mddev));
2237                         return -EINVAL;
2238                 }
2239                 if (mddev->persistent)
2240                         rdev = md_import_device(dev, mddev->major_version,
2241                                                 mddev->minor_version);
2242                 else
2243                         rdev = md_import_device(dev, -1, -1);
2244                 if (IS_ERR(rdev)) {
2245                         printk(KERN_WARNING 
2246                                 "md: md_import_device returned %ld\n",
2247                                 PTR_ERR(rdev));
2248                         return PTR_ERR(rdev);
2249                 }
2250                 /* set save_raid_disk if appropriate */
2251                 if (!mddev->persistent) {
2252                         if (info->state & (1<<MD_DISK_SYNC)  &&
2253                             info->raid_disk < mddev->raid_disks)
2254                                 rdev->raid_disk = info->raid_disk;
2255                         else
2256                                 rdev->raid_disk = -1;
2257                 } else
2258                         super_types[mddev->major_version].
2259                                 validate_super(mddev, rdev);
2260                 rdev->saved_raid_disk = rdev->raid_disk;
2261
2262                 rdev->in_sync = 0; /* just to be sure */
2263                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
2264                         set_bit(WriteMostly, &rdev->flags);
2265
2266                 rdev->raid_disk = -1;
2267                 err = bind_rdev_to_array(rdev, mddev);
2268                 if (err)
2269                         export_rdev(rdev);
2270
2271                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2272                 md_wakeup_thread(mddev->thread);
2273                 return err;
2274         }
2275
2276         /* otherwise, add_new_disk is only allowed
2277          * for major_version==0 superblocks
2278          */
2279         if (mddev->major_version != 0) {
2280                 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
2281                        mdname(mddev));
2282                 return -EINVAL;
2283         }
2284
2285         if (!(info->state & (1<<MD_DISK_FAULTY))) {
2286                 int err;
2287                 rdev = md_import_device (dev, -1, 0);
2288                 if (IS_ERR(rdev)) {
2289                         printk(KERN_WARNING 
2290                                 "md: error, md_import_device() returned %ld\n",
2291                                 PTR_ERR(rdev));
2292                         return PTR_ERR(rdev);
2293                 }
2294                 rdev->desc_nr = info->number;
2295                 if (info->raid_disk < mddev->raid_disks)
2296                         rdev->raid_disk = info->raid_disk;
2297                 else
2298                         rdev->raid_disk = -1;
2299
2300                 rdev->faulty = 0;
2301                 if (rdev->raid_disk < mddev->raid_disks)
2302                         rdev->in_sync = (info->state & (1<<MD_DISK_SYNC));
2303                 else
2304                         rdev->in_sync = 0;
2305
2306                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
2307                         set_bit(WriteMostly, &rdev->flags);
2308
2309                 err = bind_rdev_to_array(rdev, mddev);
2310                 if (err) {
2311                         export_rdev(rdev);
2312                         return err;
2313                 }
2314
2315                 if (!mddev->persistent) {
2316                         printk(KERN_INFO "md: nonpersistent superblock ...\n");
2317                         rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2318                 } else 
2319                         rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
2320                 rdev->size = calc_dev_size(rdev, mddev->chunk_size);
2321
2322                 if (!mddev->size || (mddev->size > rdev->size))
2323                         mddev->size = rdev->size;
2324         }
2325
2326         return 0;
2327 }
2328
2329 static int hot_remove_disk(mddev_t * mddev, dev_t dev)
2330 {
2331         char b[BDEVNAME_SIZE];
2332         mdk_rdev_t *rdev;
2333
2334         if (!mddev->pers)
2335                 return -ENODEV;
2336
2337         rdev = find_rdev(mddev, dev);
2338         if (!rdev)
2339                 return -ENXIO;
2340
2341         if (rdev->raid_disk >= 0)
2342                 goto busy;
2343
2344         kick_rdev_from_array(rdev);
2345         md_update_sb(mddev);
2346
2347         return 0;
2348 busy:
2349         printk(KERN_WARNING "md: cannot remove active disk %s from %s ... \n",
2350                 bdevname(rdev->bdev,b), mdname(mddev));
2351         return -EBUSY;
2352 }
2353
2354 static int hot_add_disk(mddev_t * mddev, dev_t dev)
2355 {
2356         char b[BDEVNAME_SIZE];
2357         int err;
2358         unsigned int size;
2359         mdk_rdev_t *rdev;
2360
2361         if (!mddev->pers)
2362                 return -ENODEV;
2363
2364         if (mddev->major_version != 0) {
2365                 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
2366                         " version-0 superblocks.\n",
2367                         mdname(mddev));
2368                 return -EINVAL;
2369         }
2370         if (!mddev->pers->hot_add_disk) {
2371                 printk(KERN_WARNING 
2372                         "%s: personality does not support diskops!\n",
2373                         mdname(mddev));
2374                 return -EINVAL;
2375         }
2376
2377         rdev = md_import_device (dev, -1, 0);
2378         if (IS_ERR(rdev)) {
2379                 printk(KERN_WARNING 
2380                         "md: error, md_import_device() returned %ld\n",
2381                         PTR_ERR(rdev));
2382                 return -EINVAL;
2383         }
2384
2385         if (mddev->persistent)
2386                 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
2387         else
2388                 rdev->sb_offset =
2389                         rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2390
2391         size = calc_dev_size(rdev, mddev->chunk_size);
2392         rdev->size = size;
2393
2394         if (size < mddev->size) {
2395                 printk(KERN_WARNING 
2396                         "%s: disk size %llu blocks < array size %llu\n",
2397                         mdname(mddev), (unsigned long long)size,
2398                         (unsigned long long)mddev->size);
2399                 err = -ENOSPC;
2400                 goto abort_export;
2401         }
2402
2403         if (rdev->faulty) {
2404                 printk(KERN_WARNING 
2405                         "md: can not hot-add faulty %s disk to %s!\n",
2406                         bdevname(rdev->bdev,b), mdname(mddev));
2407                 err = -EINVAL;
2408                 goto abort_export;
2409         }
2410         rdev->in_sync = 0;
2411         rdev->desc_nr = -1;
2412         bind_rdev_to_array(rdev, mddev);
2413
2414         /*
2415          * The rest should better be atomic, we can have disk failures
2416          * noticed in interrupt contexts ...
2417          */
2418
2419         if (rdev->desc_nr == mddev->max_disks) {
2420                 printk(KERN_WARNING "%s: can not hot-add to full array!\n",
2421                         mdname(mddev));
2422                 err = -EBUSY;
2423                 goto abort_unbind_export;
2424         }
2425
2426         rdev->raid_disk = -1;
2427
2428         md_update_sb(mddev);
2429
2430         /*
2431          * Kick recovery, maybe this spare has to be added to the
2432          * array immediately.
2433          */
2434         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2435         md_wakeup_thread(mddev->thread);
2436
2437         return 0;
2438
2439 abort_unbind_export:
2440         unbind_rdev_from_array(rdev);
2441
2442 abort_export:
2443         export_rdev(rdev);
2444         return err;
2445 }
2446
2447 /* similar to deny_write_access, but accounts for our holding a reference
2448  * to the file ourselves */
2449 static int deny_bitmap_write_access(struct file * file)
2450 {
2451         struct inode *inode = file->f_mapping->host;
2452
2453         spin_lock(&inode->i_lock);
2454         if (atomic_read(&inode->i_writecount) > 1) {
2455                 spin_unlock(&inode->i_lock);
2456                 return -ETXTBSY;
2457         }
2458         atomic_set(&inode->i_writecount, -1);
2459         spin_unlock(&inode->i_lock);
2460
2461         return 0;
2462 }
2463
2464 static int set_bitmap_file(mddev_t *mddev, int fd)
2465 {
2466         int err;
2467
2468         if (mddev->pers) {
2469                 if (!mddev->pers->quiesce)
2470                         return -EBUSY;
2471                 if (mddev->recovery || mddev->sync_thread)
2472                         return -EBUSY;
2473                 /* we should be able to change the bitmap.. */
2474         }
2475
2476
2477         if (fd >= 0) {
2478                 if (mddev->bitmap)
2479                         return -EEXIST; /* cannot add when bitmap is present */
2480                 mddev->bitmap_file = fget(fd);
2481
2482                 if (mddev->bitmap_file == NULL) {
2483                         printk(KERN_ERR "%s: error: failed to get bitmap file\n",
2484                                mdname(mddev));
2485                         return -EBADF;
2486                 }
2487
2488                 err = deny_bitmap_write_access(mddev->bitmap_file);
2489                 if (err) {
2490                         printk(KERN_ERR "%s: error: bitmap file is already in use\n",
2491                                mdname(mddev));
2492                         fput(mddev->bitmap_file);
2493                         mddev->bitmap_file = NULL;
2494                         return err;
2495                 }
2496                 mddev->bitmap_offset = 0; /* file overrides offset */
2497         } else if (mddev->bitmap == NULL)
2498                 return -ENOENT; /* cannot remove what isn't there */
2499         err = 0;
2500         if (mddev->pers) {
2501                 mddev->pers->quiesce(mddev, 1);
2502                 if (fd >= 0)
2503                         err = bitmap_create(mddev);
2504                 if (fd < 0 || err)
2505                         bitmap_destroy(mddev);
2506                 mddev->pers->quiesce(mddev, 0);
2507         } else if (fd < 0) {
2508                 if (mddev->bitmap_file)
2509                         fput(mddev->bitmap_file);
2510                 mddev->bitmap_file = NULL;
2511         }
2512
2513         return err;
2514 }
2515
2516 /*
2517  * set_array_info is used two different ways
2518  * The original usage is when creating a new array.
2519  * In this usage, raid_disks is > 0 and it together with
2520  *  level, size, not_persistent,layout,chunksize determine the
2521  *  shape of the array.
2522  *  This will always create an array with a type-0.90.0 superblock.
2523  * The newer usage is when assembling an array.
2524  *  In this case raid_disks will be 0, and the major_version field is
2525  *  use to determine which style super-blocks are to be found on the devices.
2526  *  The minor and patch _version numbers are also kept incase the
2527  *  super_block handler wishes to interpret them.
2528  */
2529 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
2530 {
2531
2532         if (info->raid_disks == 0) {
2533                 /* just setting version number for superblock loading */
2534                 if (info->major_version < 0 ||
2535                     info->major_version >= sizeof(super_types)/sizeof(super_types[0]) ||
2536                     super_types[info->major_version].name == NULL) {
2537                         /* maybe try to auto-load a module? */
2538                         printk(KERN_INFO 
2539                                 "md: superblock version %d not known\n",
2540                                 info->major_version);
2541                         return -EINVAL;
2542                 }
2543                 mddev->major_version = info->major_version;
2544                 mddev->minor_version = info->minor_version;
2545                 mddev->patch_version = info->patch_version;
2546                 return 0;
2547         }
2548         mddev->major_version = MD_MAJOR_VERSION;
2549         mddev->minor_version = MD_MINOR_VERSION;
2550         mddev->patch_version = MD_PATCHLEVEL_VERSION;
2551         mddev->ctime         = get_seconds();
2552
2553         mddev->level         = info->level;
2554         mddev->size          = info->size;
2555         mddev->raid_disks    = info->raid_disks;
2556         /* don't set md_minor, it is determined by which /dev/md* was
2557          * openned
2558          */
2559         if (info->state & (1<<MD_SB_CLEAN))
2560                 mddev->recovery_cp = MaxSector;
2561         else
2562                 mddev->recovery_cp = 0;
2563         mddev->persistent    = ! info->not_persistent;
2564
2565         mddev->layout        = info->layout;
2566         mddev->chunk_size    = info->chunk_size;
2567
2568         mddev->max_disks     = MD_SB_DISKS;
2569
2570         mddev->sb_dirty      = 1;
2571
2572         /*
2573          * Generate a 128 bit UUID
2574          */
2575         get_random_bytes(mddev->uuid, 16);
2576
2577         return 0;
2578 }
2579
2580 /*
2581  * update_array_info is used to change the configuration of an
2582  * on-line array.
2583  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
2584  * fields in the info are checked against the array.
2585  * Any differences that cannot be handled will cause an error.
2586  * Normally, only one change can be managed at a time.
2587  */
2588 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
2589 {
2590         int rv = 0;
2591         int cnt = 0;
2592         int state = 0;
2593
2594         /* calculate expected state,ignoring low bits */
2595         if (mddev->bitmap && mddev->bitmap_offset)
2596                 state |= (1 << MD_SB_BITMAP_PRESENT);
2597
2598         if (mddev->major_version != info->major_version ||
2599             mddev->minor_version != info->minor_version ||
2600 /*          mddev->patch_version != info->patch_version || */
2601             mddev->ctime         != info->ctime         ||
2602             mddev->level         != info->level         ||
2603 /*          mddev->layout        != info->layout        || */
2604             !mddev->persistent   != info->not_persistent||
2605             mddev->chunk_size    != info->chunk_size    ||
2606             /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
2607             ((state^info->state) & 0xfffffe00)
2608                 )
2609                 return -EINVAL;
2610         /* Check there is only one change */
2611         if (mddev->size != info->size) cnt++;
2612         if (mddev->raid_disks != info->raid_disks) cnt++;
2613         if (mddev->layout != info->layout) cnt++;
2614         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++;
2615         if (cnt == 0) return 0;
2616         if (cnt > 1) return -EINVAL;
2617
2618         if (mddev->layout != info->layout) {
2619                 /* Change layout
2620                  * we don't need to do anything at the md level, the
2621                  * personality will take care of it all.
2622                  */
2623                 if (mddev->pers->reconfig == NULL)
2624                         return -EINVAL;
2625                 else
2626                         return mddev->pers->reconfig(mddev, info->layout, -1);
2627         }
2628         if (mddev->size != info->size) {
2629                 mdk_rdev_t * rdev;
2630                 struct list_head *tmp;
2631                 if (mddev->pers->resize == NULL)
2632                         return -EINVAL;
2633                 /* The "size" is the amount of each device that is used.
2634                  * This can only make sense for arrays with redundancy.
2635                  * linear and raid0 always use whatever space is available
2636                  * We can only consider changing the size if no resync
2637                  * or reconstruction is happening, and if the new size
2638                  * is acceptable. It must fit before the sb_offset or,
2639                  * if that is <data_offset, it must fit before the
2640                  * size of each device.
2641                  * If size is zero, we find the largest size that fits.
2642                  */
2643                 if (mddev->sync_thread)
2644                         return -EBUSY;
2645                 ITERATE_RDEV(mddev,rdev,tmp) {
2646                         sector_t avail;
2647                         int fit = (info->size == 0);
2648                         if (rdev->sb_offset > rdev->data_offset)
2649                                 avail = (rdev->sb_offset*2) - rdev->data_offset;
2650                         else
2651                                 avail = get_capacity(rdev->bdev->bd_disk)
2652                                         - rdev->data_offset;
2653                         if (fit && (info->size == 0 || info->size > avail/2))
2654                                 info->size = avail/2;
2655                         if (avail < ((sector_t)info->size << 1))
2656                                 return -ENOSPC;
2657                 }
2658                 rv = mddev->pers->resize(mddev, (sector_t)info->size *2);
2659                 if (!rv) {
2660                         struct block_device *bdev;
2661
2662                         bdev = bdget_disk(mddev->gendisk, 0);
2663                         if (bdev) {
2664                                 down(&bdev->bd_inode->i_sem);
2665                                 i_size_write(bdev->bd_inode, mddev->array_size << 10);
2666                                 up(&bdev->bd_inode->i_sem);
2667                                 bdput(bdev);
2668                         }
2669                 }
2670         }
2671         if (mddev->raid_disks    != info->raid_disks) {
2672                 /* change the number of raid disks */
2673                 if (mddev->pers->reshape == NULL)
2674                         return -EINVAL;
2675                 if (info->raid_disks <= 0 ||
2676                     info->raid_disks >= mddev->max_disks)
2677                         return -EINVAL;
2678                 if (mddev->sync_thread)
2679                         return -EBUSY;
2680                 rv = mddev->pers->reshape(mddev, info->raid_disks);
2681                 if (!rv) {
2682                         struct block_device *bdev;
2683
2684                         bdev = bdget_disk(mddev->gendisk, 0);
2685                         if (bdev) {
2686                                 down(&bdev->bd_inode->i_sem);
2687                                 i_size_write(bdev->bd_inode, mddev->array_size << 10);
2688                                 up(&bdev->bd_inode->i_sem);
2689                                 bdput(bdev);
2690                         }
2691                 }
2692         }
2693         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
2694                 if (mddev->pers->quiesce == NULL)
2695                         return -EINVAL;
2696                 if (mddev->recovery || mddev->sync_thread)
2697                         return -EBUSY;
2698                 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
2699                         /* add the bitmap */
2700                         if (mddev->bitmap)
2701                                 return -EEXIST;
2702                         if (mddev->default_bitmap_offset == 0)
2703                                 return -EINVAL;
2704                         mddev->bitmap_offset = mddev->default_bitmap_offset;
2705                         mddev->pers->quiesce(mddev, 1);
2706                         rv = bitmap_create(mddev);
2707                         if (rv)
2708                                 bitmap_destroy(mddev);
2709                         mddev->pers->quiesce(mddev, 0);
2710                 } else {
2711                         /* remove the bitmap */
2712                         if (!mddev->bitmap)
2713                                 return -ENOENT;
2714                         if (mddev->bitmap->file)
2715                                 return -EINVAL;
2716                         mddev->pers->quiesce(mddev, 1);
2717                         bitmap_destroy(mddev);
2718                         mddev->pers->quiesce(mddev, 0);
2719                         mddev->bitmap_offset = 0;
2720                 }
2721         }
2722         md_update_sb(mddev);
2723         return rv;
2724 }
2725
2726 static int set_disk_faulty(mddev_t *mddev, dev_t dev)
2727 {
2728         mdk_rdev_t *rdev;
2729
2730         if (mddev->pers == NULL)
2731                 return -ENODEV;
2732
2733         rdev = find_rdev(mddev, dev);
2734         if (!rdev)
2735                 return -ENODEV;
2736
2737         md_error(mddev, rdev);
2738         return 0;
2739 }
2740
2741 static int md_ioctl(struct inode *inode, struct file *file,
2742                         unsigned int cmd, unsigned long arg)
2743 {
2744         int err = 0;
2745         void __user *argp = (void __user *)arg;
2746         struct hd_geometry __user *loc = argp;
2747         mddev_t *mddev = NULL;
2748
2749         if (!capable(CAP_SYS_ADMIN))
2750                 return -EACCES;
2751
2752         /*
2753          * Commands dealing with the RAID driver but not any
2754          * particular array:
2755          */
2756         switch (cmd)
2757         {
2758                 case RAID_VERSION:
2759                         err = get_version(argp);
2760                         goto done;
2761
2762                 case PRINT_RAID_DEBUG:
2763                         err = 0;
2764                         md_print_devices();
2765                         goto done;
2766
2767 #ifndef MODULE
2768                 case RAID_AUTORUN:
2769                         err = 0;
2770                         autostart_arrays(arg);
2771                         goto done;
2772 #endif
2773                 default:;
2774         }
2775
2776         /*
2777          * Commands creating/starting a new array:
2778          */
2779
2780         mddev = inode->i_bdev->bd_disk->private_data;
2781
2782         if (!mddev) {
2783                 BUG();
2784                 goto abort;
2785         }
2786
2787
2788         if (cmd == START_ARRAY) {
2789                 /* START_ARRAY doesn't need to lock the array as autostart_array
2790                  * does the locking, and it could even be a different array
2791                  */
2792                 static int cnt = 3;
2793                 if (cnt > 0 ) {
2794                         printk(KERN_WARNING
2795                                "md: %s(pid %d) used deprecated START_ARRAY ioctl. "
2796                                "This will not be supported beyond 2.6\n",
2797                                current->comm, current->pid);
2798                         cnt--;
2799                 }
2800                 err = autostart_array(new_decode_dev(arg));
2801                 if (err) {
2802                         printk(KERN_WARNING "md: autostart failed!\n");
2803                         goto abort;
2804                 }
2805                 goto done;
2806         }
2807
2808         err = mddev_lock(mddev);
2809         if (err) {
2810                 printk(KERN_INFO 
2811                         "md: ioctl lock interrupted, reason %d, cmd %d\n",
2812                         err, cmd);
2813                 goto abort;
2814         }
2815
2816         switch (cmd)
2817         {
2818                 case SET_ARRAY_INFO:
2819                         {
2820                                 mdu_array_info_t info;
2821                                 if (!arg)
2822                                         memset(&info, 0, sizeof(info));
2823                                 else if (copy_from_user(&info, argp, sizeof(info))) {
2824                                         err = -EFAULT;
2825                                         goto abort_unlock;
2826                                 }
2827                                 if (mddev->pers) {
2828                                         err = update_array_info(mddev, &info);
2829                                         if (err) {
2830                                                 printk(KERN_WARNING "md: couldn't update"
2831                                                        " array info. %d\n", err);
2832                                                 goto abort_unlock;
2833                                         }
2834                                         goto done_unlock;
2835                                 }
2836                                 if (!list_empty(&mddev->disks)) {
2837                                         printk(KERN_WARNING
2838                                                "md: array %s already has disks!\n",
2839                                                mdname(mddev));
2840                                         err = -EBUSY;
2841                                         goto abort_unlock;
2842                                 }
2843                                 if (mddev->raid_disks) {
2844                                         printk(KERN_WARNING
2845                                                "md: array %s already initialised!\n",
2846                                                mdname(mddev));
2847                                         err = -EBUSY;
2848                                         goto abort_unlock;
2849                                 }
2850                                 err = set_array_info(mddev, &info);
2851                                 if (err) {
2852                                         printk(KERN_WARNING "md: couldn't set"
2853                                                " array info. %d\n", err);
2854                                         goto abort_unlock;
2855                                 }
2856                         }
2857                         goto done_unlock;
2858
2859                 default:;
2860         }
2861
2862         /*
2863          * Commands querying/configuring an existing array:
2864          */
2865         /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
2866          * RUN_ARRAY, and SET_BITMAP_FILE are allowed */
2867         if (!mddev->raid_disks && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
2868                         && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE) {
2869                 err = -ENODEV;
2870                 goto abort_unlock;
2871         }
2872
2873         /*
2874          * Commands even a read-only array can execute:
2875          */
2876         switch (cmd)
2877         {
2878                 case GET_ARRAY_INFO:
2879                         err = get_array_info(mddev, argp);
2880                         goto done_unlock;
2881
2882                 case GET_BITMAP_FILE:
2883                         err = get_bitmap_file(mddev, argp);
2884                         goto done_unlock;
2885
2886                 case GET_DISK_INFO:
2887                         err = get_disk_info(mddev, argp);
2888                         goto done_unlock;
2889
2890                 case RESTART_ARRAY_RW:
2891                         err = restart_array(mddev);
2892                         goto done_unlock;
2893
2894                 case STOP_ARRAY:
2895                         err = do_md_stop (mddev, 0);
2896                         goto done_unlock;
2897
2898                 case STOP_ARRAY_RO:
2899                         err = do_md_stop (mddev, 1);
2900                         goto done_unlock;
2901
2902         /*
2903          * We have a problem here : there is no easy way to give a CHS
2904          * virtual geometry. We currently pretend that we have a 2 heads
2905          * 4 sectors (with a BIG number of cylinders...). This drives
2906          * dosfs just mad... ;-)
2907          */
2908                 case HDIO_GETGEO:
2909                         if (!loc) {
2910                                 err = -EINVAL;
2911                                 goto abort_unlock;
2912                         }
2913                         err = put_user (2, (char __user *) &loc->heads);
2914                         if (err)
2915                                 goto abort_unlock;
2916                         err = put_user (4, (char __user *) &loc->sectors);
2917                         if (err)
2918                                 goto abort_unlock;
2919                         err = put_user(get_capacity(mddev->gendisk)/8,
2920                                         (short __user *) &loc->cylinders);
2921                         if (err)
2922                                 goto abort_unlock;
2923                         err = put_user (get_start_sect(inode->i_bdev),
2924                                                 (long __user *) &loc->start);
2925                         goto done_unlock;
2926         }
2927
2928         /*
2929          * The remaining ioctls are changing the state of the
2930          * superblock, so we do not allow read-only arrays
2931          * here:
2932          */
2933         if (mddev->ro) {
2934                 err = -EROFS;
2935                 goto abort_unlock;
2936         }
2937
2938         switch (cmd)
2939         {
2940                 case ADD_NEW_DISK:
2941                 {
2942                         mdu_disk_info_t info;
2943                         if (copy_from_user(&info, argp, sizeof(info)))
2944                                 err = -EFAULT;
2945                         else
2946                                 err = add_new_disk(mddev, &info);
2947                         goto done_unlock;
2948                 }
2949
2950                 case HOT_REMOVE_DISK:
2951                         err = hot_remove_disk(mddev, new_decode_dev(arg));
2952                         goto done_unlock;
2953
2954                 case HOT_ADD_DISK:
2955                         err = hot_add_disk(mddev, new_decode_dev(arg));
2956                         goto done_unlock;
2957
2958                 case SET_DISK_FAULTY:
2959                         err = set_disk_faulty(mddev, new_decode_dev(arg));
2960                         goto done_unlock;
2961
2962                 case RUN_ARRAY:
2963                         err = do_md_run (mddev);
2964                         goto done_unlock;
2965
2966                 case SET_BITMAP_FILE:
2967                         err = set_bitmap_file(mddev, (int)arg);
2968                         goto done_unlock;
2969
2970                 default:
2971                         if (_IOC_TYPE(cmd) == MD_MAJOR)
2972                                 printk(KERN_WARNING "md: %s(pid %d) used"
2973                                         " obsolete MD ioctl, upgrade your"
2974                                         " software to use new ictls.\n",
2975                                         current->comm, current->pid);
2976                         err = -EINVAL;
2977                         goto abort_unlock;
2978         }
2979
2980 done_unlock:
2981 abort_unlock:
2982         mddev_unlock(mddev);
2983
2984         return err;
2985 done:
2986         if (err)
2987                 MD_BUG();
2988 abort:
2989         return err;
2990 }
2991
2992 static int md_open(struct inode *inode, struct file *file)
2993 {
2994         /*
2995          * Succeed if we can lock the mddev, which confirms that
2996          * it isn't being stopped right now.
2997          */
2998         mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
2999         int err;
3000
3001         if ((err = mddev_lock(mddev)))
3002                 goto out;
3003
3004         err = 0;
3005         mddev_get(mddev);
3006         mddev_unlock(mddev);
3007
3008         check_disk_change(inode->i_bdev);
3009  out:
3010         return err;
3011 }
3012
3013 static int md_release(struct inode *inode, struct file * file)
3014 {
3015         mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
3016
3017         if (!mddev)
3018                 BUG();
3019         mddev_put(mddev);
3020
3021         return 0;
3022 }
3023
3024 static int md_media_changed(struct gendisk *disk)
3025 {
3026         mddev_t *mddev = disk->private_data;
3027
3028         return mddev->changed;
3029 }
3030
3031 static int md_revalidate(struct gendisk *disk)
3032 {
3033         mddev_t *mddev = disk->private_data;
3034
3035         mddev->changed = 0;
3036         return 0;
3037 }
3038 static struct block_device_operations md_fops =
3039 {
3040         .owner          = THIS_MODULE,
3041         .open           = md_open,
3042         .release        = md_release,
3043         .ioctl          = md_ioctl,
3044         .media_changed  = md_media_changed,
3045         .revalidate_disk= md_revalidate,
3046 };
3047
3048 static int md_thread(void * arg)
3049 {
3050         mdk_thread_t *thread = arg;
3051
3052         lock_kernel();
3053
3054         /*
3055          * Detach thread
3056          */
3057
3058         daemonize(thread->name, mdname(thread->mddev));
3059
3060         current->exit_signal = SIGCHLD;
3061         allow_signal(SIGKILL);
3062         thread->tsk = current;
3063
3064         /*
3065          * md_thread is a 'system-thread', it's priority should be very
3066          * high. We avoid resource deadlocks individually in each
3067          * raid personality. (RAID5 does preallocation) We also use RR and
3068          * the very same RT priority as kswapd, thus we will never get
3069          * into a priority inversion deadlock.
3070          *
3071          * we definitely have to have equal or higher priority than
3072          * bdflush, otherwise bdflush will deadlock if there are too
3073          * many dirty RAID5 blocks.
3074          */
3075         unlock_kernel();
3076
3077         complete(thread->event);
3078         while (thread->run) {
3079                 void (*run)(mddev_t *);
3080
3081                 wait_event_interruptible_timeout(thread->wqueue,
3082                                                  test_bit(THREAD_WAKEUP, &thread->flags),
3083                                                  thread->timeout);
3084                 try_to_freeze();
3085
3086                 clear_bit(THREAD_WAKEUP, &thread->flags);
3087
3088                 run = thread->run;
3089                 if (run)
3090                         run(thread->mddev);
3091
3092                 if (signal_pending(current))
3093                         flush_signals(current);
3094         }
3095         complete(thread->event);
3096         return 0;
3097 }
3098
3099 void md_wakeup_thread(mdk_thread_t *thread)
3100 {
3101         if (thread) {
3102                 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
3103                 set_bit(THREAD_WAKEUP, &thread->flags);
3104                 wake_up(&thread->wqueue);
3105         }
3106 }
3107
3108 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
3109                                  const char *name)
3110 {
3111         mdk_thread_t *thread;
3112         int ret;
3113         struct completion event;
3114
3115         thread = (mdk_thread_t *) kmalloc
3116                                 (sizeof(mdk_thread_t), GFP_KERNEL);
3117         if (!thread)
3118                 return NULL;
3119
3120         memset(thread, 0, sizeof(mdk_thread_t));
3121         init_waitqueue_head(&thread->wqueue);
3122
3123         init_completion(&event);
3124         thread->event = &event;
3125         thread->run = run;
3126         thread->mddev = mddev;
3127         thread->name = name;
3128         thread->timeout = MAX_SCHEDULE_TIMEOUT;
3129         ret = kernel_thread(md_thread, thread, 0);
3130         if (ret < 0) {
3131                 kfree(thread);
3132                 return NULL;
3133         }
3134         wait_for_completion(&event);
3135         return thread;
3136 }
3137
3138 void md_unregister_thread(mdk_thread_t *thread)
3139 {
3140         struct completion event;
3141
3142         init_completion(&event);
3143
3144         thread->event = &event;
3145
3146         /* As soon as ->run is set to NULL, the task could disappear,
3147          * so we need to hold tasklist_lock until we have sent the signal
3148          */
3149         dprintk("interrupting MD-thread pid %d\n", thread->tsk->pid);
3150         read_lock(&tasklist_lock);
3151         thread->run = NULL;
3152         send_sig(SIGKILL, thread->tsk, 1);
3153         read_unlock(&tasklist_lock);
3154         wait_for_completion(&event);
3155         kfree(thread);
3156 }
3157
3158 void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
3159 {
3160         if (!mddev) {
3161                 MD_BUG();
3162                 return;
3163         }
3164
3165         if (!rdev || rdev->faulty)
3166                 return;
3167 /*
3168         dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
3169                 mdname(mddev),
3170                 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
3171                 __builtin_return_address(0),__builtin_return_address(1),
3172                 __builtin_return_address(2),__builtin_return_address(3));
3173 */
3174         if (!mddev->pers->error_handler)
3175                 return;
3176         mddev->pers->error_handler(mddev,rdev);
3177         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3178         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3179         md_wakeup_thread(mddev->thread);
3180 }
3181
3182 /* seq_file implementation /proc/mdstat */
3183
3184 static void status_unused(struct seq_file *seq)
3185 {
3186         int i = 0;
3187         mdk_rdev_t *rdev;
3188         struct list_head *tmp;
3189
3190         seq_printf(seq, "unused devices: ");
3191
3192         ITERATE_RDEV_PENDING(rdev,tmp) {
3193                 char b[BDEVNAME_SIZE];
3194                 i++;
3195                 seq_printf(seq, "%s ",
3196                               bdevname(rdev->bdev,b));
3197         }
3198         if (!i)
3199                 seq_printf(seq, "<none>");
3200
3201         seq_printf(seq, "\n");
3202 }
3203
3204
3205 static void status_resync(struct seq_file *seq, mddev_t * mddev)
3206 {
3207         unsigned long max_blocks, resync, res, dt, db, rt;
3208
3209         resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2;
3210
3211         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3212                 max_blocks = mddev->resync_max_sectors >> 1;
3213         else
3214                 max_blocks = mddev->size;
3215
3216         /*
3217          * Should not happen.
3218          */
3219         if (!max_blocks) {
3220                 MD_BUG();
3221                 return;
3222         }
3223         res = (resync/1024)*1000/(max_blocks/1024 + 1);
3224         {
3225                 int i, x = res/50, y = 20-x;
3226                 seq_printf(seq, "[");
3227                 for (i = 0; i < x; i++)
3228                         seq_printf(seq, "=");
3229                 seq_printf(seq, ">");
3230                 for (i = 0; i < y; i++)
3231                         seq_printf(seq, ".");
3232                 seq_printf(seq, "] ");
3233         }
3234         seq_printf(seq, " %s =%3lu.%lu%% (%lu/%lu)",
3235                       (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
3236                        "resync" : "recovery"),
3237                       res/10, res % 10, resync, max_blocks);
3238
3239         /*
3240          * We do not want to overflow, so the order of operands and
3241          * the * 100 / 100 trick are important. We do a +1 to be
3242          * safe against division by zero. We only estimate anyway.
3243          *
3244          * dt: time from mark until now
3245          * db: blocks written from mark until now
3246          * rt: remaining time
3247          */
3248         dt = ((jiffies - mddev->resync_mark) / HZ);
3249         if (!dt) dt++;
3250         db = resync - (mddev->resync_mark_cnt/2);
3251         rt = (dt * ((max_blocks-resync) / (db/100+1)))/100;
3252
3253         seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6);
3254
3255         seq_printf(seq, " speed=%ldK/sec", db/dt);
3256 }
3257
3258 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
3259 {
3260         struct list_head *tmp;
3261         loff_t l = *pos;
3262         mddev_t *mddev;
3263
3264         if (l >= 0x10000)
3265                 return NULL;
3266         if (!l--)
3267                 /* header */
3268                 return (void*)1;
3269
3270         spin_lock(&all_mddevs_lock);
3271         list_for_each(tmp,&all_mddevs)
3272                 if (!l--) {
3273                         mddev = list_entry(tmp, mddev_t, all_mddevs);
3274                         mddev_get(mddev);
3275                         spin_unlock(&all_mddevs_lock);
3276                         return mddev;
3277                 }
3278         spin_unlock(&all_mddevs_lock);
3279         if (!l--)
3280                 return (void*)2;/* tail */
3281         return NULL;
3282 }
3283
3284 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3285 {
3286         struct list_head *tmp;
3287         mddev_t *next_mddev, *mddev = v;
3288         
3289         ++*pos;
3290         if (v == (void*)2)
3291                 return NULL;
3292
3293         spin_lock(&all_mddevs_lock);
3294         if (v == (void*)1)
3295                 tmp = all_mddevs.next;
3296         else
3297                 tmp = mddev->all_mddevs.next;
3298         if (tmp != &all_mddevs)
3299                 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
3300         else {
3301                 next_mddev = (void*)2;
3302                 *pos = 0x10000;
3303         }               
3304         spin_unlock(&all_mddevs_lock);
3305
3306         if (v != (void*)1)
3307                 mddev_put(mddev);
3308         return next_mddev;
3309
3310 }
3311
3312 static void md_seq_stop(struct seq_file *seq, void *v)
3313 {
3314         mddev_t *mddev = v;
3315
3316         if (mddev && v != (void*)1 && v != (void*)2)
3317                 mddev_put(mddev);
3318 }
3319
3320 static int md_seq_show(struct seq_file *seq, void *v)
3321 {
3322         mddev_t *mddev = v;
3323         sector_t size;
3324         struct list_head *tmp2;
3325         mdk_rdev_t *rdev;
3326         int i;
3327         struct bitmap *bitmap;
3328
3329         if (v == (void*)1) {
3330                 seq_printf(seq, "Personalities : ");
3331                 spin_lock(&pers_lock);
3332                 for (i = 0; i < MAX_PERSONALITY; i++)
3333                         if (pers[i])
3334                                 seq_printf(seq, "[%s] ", pers[i]->name);
3335
3336                 spin_unlock(&pers_lock);
3337                 seq_printf(seq, "\n");
3338                 return 0;
3339         }
3340         if (v == (void*)2) {
3341                 status_unused(seq);
3342                 return 0;
3343         }
3344
3345         if (mddev_lock(mddev)!=0) 
3346                 return -EINTR;
3347         if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
3348                 seq_printf(seq, "%s : %sactive", mdname(mddev),
3349                                                 mddev->pers ? "" : "in");
3350                 if (mddev->pers) {
3351                         if (mddev->ro)
3352                                 seq_printf(seq, " (read-only)");
3353                         seq_printf(seq, " %s", mddev->pers->name);
3354                 }
3355
3356                 size = 0;
3357                 ITERATE_RDEV(mddev,rdev,tmp2) {
3358                         char b[BDEVNAME_SIZE];
3359                         seq_printf(seq, " %s[%d]",
3360                                 bdevname(rdev->bdev,b), rdev->desc_nr);
3361                         if (test_bit(WriteMostly, &rdev->flags))
3362                                 seq_printf(seq, "(W)");
3363                         if (rdev->faulty) {
3364                                 seq_printf(seq, "(F)");
3365                                 continue;
3366                         }
3367                         size += rdev->size;
3368                 }
3369
3370                 if (!list_empty(&mddev->disks)) {
3371                         if (mddev->pers)
3372                                 seq_printf(seq, "\n      %llu blocks",
3373                                         (unsigned long long)mddev->array_size);
3374                         else
3375                                 seq_printf(seq, "\n      %llu blocks",
3376                                         (unsigned long long)size);
3377                 }
3378
3379                 if (mddev->pers) {
3380                         mddev->pers->status (seq, mddev);
3381                         seq_printf(seq, "\n      ");
3382                         if (mddev->curr_resync > 2) {
3383                                 status_resync (seq, mddev);
3384                                 seq_printf(seq, "\n      ");
3385                         } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
3386                                 seq_printf(seq, "       resync=DELAYED\n      ");
3387                 } else
3388                         seq_printf(seq, "\n       ");
3389
3390                 if ((bitmap = mddev->bitmap)) {
3391                         unsigned long chunk_kb;
3392                         unsigned long flags;
3393                         spin_lock_irqsave(&bitmap->lock, flags);
3394                         chunk_kb = bitmap->chunksize >> 10;
3395                         seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
3396                                 "%lu%s chunk",
3397                                 bitmap->pages - bitmap->missing_pages,
3398                                 bitmap->pages,
3399                                 (bitmap->pages - bitmap->missing_pages)
3400                                         << (PAGE_SHIFT - 10),
3401                                 chunk_kb ? chunk_kb : bitmap->chunksize,
3402                                 chunk_kb ? "KB" : "B");
3403                         if (bitmap->file) {
3404                                 seq_printf(seq, ", file: ");
3405                                 seq_path(seq, bitmap->file->f_vfsmnt,
3406                                          bitmap->file->f_dentry," \t\n");
3407                         }
3408
3409                         seq_printf(seq, "\n");
3410                         spin_unlock_irqrestore(&bitmap->lock, flags);
3411                 }
3412
3413                 seq_printf(seq, "\n");
3414         }
3415         mddev_unlock(mddev);
3416         
3417         return 0;
3418 }
3419
3420 static struct seq_operations md_seq_ops = {
3421         .start  = md_seq_start,
3422         .next   = md_seq_next,
3423         .stop   = md_seq_stop,
3424         .show   = md_seq_show,
3425 };
3426
3427 static int md_seq_open(struct inode *inode, struct file *file)
3428 {
3429         int error;
3430
3431         error = seq_open(file, &md_seq_ops);
3432         return error;
3433 }
3434
3435 static struct file_operations md_seq_fops = {
3436         .open           = md_seq_open,
3437         .read           = seq_read,
3438         .llseek         = seq_lseek,
3439         .release        = seq_release,
3440 };
3441
3442 int register_md_personality(int pnum, mdk_personality_t *p)
3443 {
3444         if (pnum >= MAX_PERSONALITY) {
3445                 printk(KERN_ERR
3446                        "md: tried to install personality %s as nr %d, but max is %lu\n",
3447                        p->name, pnum, MAX_PERSONALITY-1);
3448                 return -EINVAL;
3449         }
3450
3451         spin_lock(&pers_lock);
3452         if (pers[pnum]) {
3453                 spin_unlock(&pers_lock);
3454                 return -EBUSY;
3455         }
3456
3457         pers[pnum] = p;
3458         printk(KERN_INFO "md: %s personality registered as nr %d\n", p->name, pnum);
3459         spin_unlock(&pers_lock);
3460         return 0;
3461 }
3462
3463 int unregister_md_personality(int pnum)
3464 {
3465         if (pnum >= MAX_PERSONALITY)
3466                 return -EINVAL;
3467
3468         printk(KERN_INFO "md: %s personality unregistered\n", pers[pnum]->name);
3469         spin_lock(&pers_lock);
3470         pers[pnum] = NULL;
3471         spin_unlock(&pers_lock);
3472         return 0;
3473 }
3474
3475 static int is_mddev_idle(mddev_t *mddev)
3476 {
3477         mdk_rdev_t * rdev;
3478         struct list_head *tmp;
3479         int idle;
3480         unsigned long curr_events;
3481
3482         idle = 1;
3483         ITERATE_RDEV(mddev,rdev,tmp) {
3484                 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
3485                 curr_events = disk_stat_read(disk, read_sectors) + 
3486                                 disk_stat_read(disk, write_sectors) - 
3487                                 atomic_read(&disk->sync_io);
3488                 /* Allow some slack between valud of curr_events and last_events,
3489                  * as there are some uninteresting races.
3490                  * Note: the following is an unsigned comparison.
3491                  */
3492                 if ((curr_events - rdev->last_events + 32) > 64) {
3493                         rdev->last_events = curr_events;
3494                         idle = 0;
3495                 }
3496         }
3497         return idle;
3498 }
3499
3500 void md_done_sync(mddev_t *mddev, int blocks, int ok)
3501 {
3502         /* another "blocks" (512byte) blocks have been synced */
3503         atomic_sub(blocks, &mddev->recovery_active);
3504         wake_up(&mddev->recovery_wait);
3505         if (!ok) {
3506                 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
3507                 md_wakeup_thread(mddev->thread);
3508                 // stop recovery, signal do_sync ....
3509         }
3510 }
3511
3512
3513 /* md_write_start(mddev, bi)
3514  * If we need to update some array metadata (e.g. 'active' flag
3515  * in superblock) before writing, schedule a superblock update
3516  * and wait for it to complete.
3517  */
3518 void md_write_start(mddev_t *mddev, struct bio *bi)
3519 {
3520         if (bio_data_dir(bi) != WRITE)
3521                 return;
3522
3523         atomic_inc(&mddev->writes_pending);
3524         if (mddev->in_sync) {
3525                 spin_lock(&mddev->write_lock);
3526                 if (mddev->in_sync) {
3527                         mddev->in_sync = 0;
3528                         mddev->sb_dirty = 1;
3529                         md_wakeup_thread(mddev->thread);
3530                 }
3531                 spin_unlock(&mddev->write_lock);
3532         }
3533         wait_event(mddev->sb_wait, mddev->sb_dirty==0);
3534 }
3535
3536 void md_write_end(mddev_t *mddev)
3537 {
3538         if (atomic_dec_and_test(&mddev->writes_pending)) {
3539                 if (mddev->safemode == 2)
3540                         md_wakeup_thread(mddev->thread);
3541                 else
3542                         mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
3543         }
3544 }
3545
3546 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
3547
3548 #define SYNC_MARKS      10
3549 #define SYNC_MARK_STEP  (3*HZ)
3550 static void md_do_sync(mddev_t *mddev)
3551 {
3552         mddev_t *mddev2;
3553         unsigned int currspeed = 0,
3554                  window;
3555         sector_t max_sectors,j, io_sectors;
3556         unsigned long mark[SYNC_MARKS];
3557         sector_t mark_cnt[SYNC_MARKS];
3558         int last_mark,m;
3559         struct list_head *tmp;
3560         sector_t last_check;
3561         int skipped = 0;
3562
3563         /* just incase thread restarts... */
3564         if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
3565                 return;
3566
3567         /* we overload curr_resync somewhat here.
3568          * 0 == not engaged in resync at all
3569          * 2 == checking that there is no conflict with another sync
3570          * 1 == like 2, but have yielded to allow conflicting resync to
3571          *              commense
3572          * other == active in resync - this many blocks
3573          *
3574          * Before starting a resync we must have set curr_resync to
3575          * 2, and then checked that every "conflicting" array has curr_resync
3576          * less than ours.  When we find one that is the same or higher
3577          * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
3578          * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
3579          * This will mean we have to start checking from the beginning again.
3580          *
3581          */
3582
3583         do {
3584                 mddev->curr_resync = 2;
3585
3586         try_again:
3587                 if (signal_pending(current)) {
3588                         flush_signals(current);
3589                         goto skip;
3590                 }
3591                 ITERATE_MDDEV(mddev2,tmp) {
3592                         if (mddev2 == mddev)
3593                                 continue;
3594                         if (mddev2->curr_resync && 
3595                             match_mddev_units(mddev,mddev2)) {
3596                                 DEFINE_WAIT(wq);
3597                                 if (mddev < mddev2 && mddev->curr_resync == 2) {
3598                                         /* arbitrarily yield */
3599                                         mddev->curr_resync = 1;
3600                                         wake_up(&resync_wait);
3601                                 }
3602                                 if (mddev > mddev2 && mddev->curr_resync == 1)
3603                                         /* no need to wait here, we can wait the next
3604                                          * time 'round when curr_resync == 2
3605                                          */
3606                                         continue;
3607                                 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
3608                                 if (!signal_pending(current)
3609                                     && mddev2->curr_resync >= mddev->curr_resync) {
3610                                         printk(KERN_INFO "md: delaying resync of %s"
3611                                                " until %s has finished resync (they"
3612                                                " share one or more physical units)\n",
3613                                                mdname(mddev), mdname(mddev2));
3614                                         mddev_put(mddev2);
3615                                         schedule();
3616                                         finish_wait(&resync_wait, &wq);
3617                                         goto try_again;
3618                                 }
3619                                 finish_wait(&resync_wait, &wq);
3620                         }
3621                 }
3622         } while (mddev->curr_resync < 2);
3623
3624         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3625                 /* resync follows the size requested by the personality,
3626                  * which defaults to physical size, but can be virtual size
3627                  */
3628                 max_sectors = mddev->resync_max_sectors;
3629         else
3630                 /* recovery follows the physical size of devices */
3631                 max_sectors = mddev->size << 1;
3632
3633         printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev));
3634         printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:"
3635                 " %d KB/sec/disc.\n", sysctl_speed_limit_min);
3636         printk(KERN_INFO "md: using maximum available idle IO bandwith "
3637                "(but not more than %d KB/sec) for reconstruction.\n",
3638                sysctl_speed_limit_max);
3639
3640         is_mddev_idle(mddev); /* this also initializes IO event counters */
3641         /* we don't use the checkpoint if there's a bitmap */
3642         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && !mddev->bitmap)
3643                 j = mddev->recovery_cp;
3644         else
3645                 j = 0;
3646         io_sectors = 0;
3647         for (m = 0; m < SYNC_MARKS; m++) {
3648                 mark[m] = jiffies;
3649                 mark_cnt[m] = io_sectors;
3650         }
3651         last_mark = 0;
3652         mddev->resync_mark = mark[last_mark];
3653         mddev->resync_mark_cnt = mark_cnt[last_mark];
3654
3655         /*
3656          * Tune reconstruction:
3657          */
3658         window = 32*(PAGE_SIZE/512);
3659         printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
3660                 window/2,(unsigned long long) max_sectors/2);
3661
3662         atomic_set(&mddev->recovery_active, 0);
3663         init_waitqueue_head(&mddev->recovery_wait);
3664         last_check = 0;
3665
3666         if (j>2) {
3667                 printk(KERN_INFO 
3668                         "md: resuming recovery of %s from checkpoint.\n",
3669                         mdname(mddev));
3670                 mddev->curr_resync = j;
3671         }
3672
3673         while (j < max_sectors) {
3674                 sector_t sectors;
3675
3676                 skipped = 0;
3677                 sectors = mddev->pers->sync_request(mddev, j, &skipped,
3678                                             currspeed < sysctl_speed_limit_min);
3679                 if (sectors == 0) {
3680                         set_bit(MD_RECOVERY_ERR, &mddev->recovery);
3681                         goto out;
3682                 }
3683
3684                 if (!skipped) { /* actual IO requested */
3685                         io_sectors += sectors;
3686                         atomic_add(sectors, &mddev->recovery_active);
3687                 }
3688
3689                 j += sectors;
3690                 if (j>1) mddev->curr_resync = j;
3691
3692
3693                 if (last_check + window > io_sectors || j == max_sectors)
3694                         continue;
3695
3696                 last_check = io_sectors;
3697
3698                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) ||
3699                     test_bit(MD_RECOVERY_ERR, &mddev->recovery))
3700                         break;
3701
3702         repeat:
3703                 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
3704                         /* step marks */
3705                         int next = (last_mark+1) % SYNC_MARKS;
3706
3707                         mddev->resync_mark = mark[next];
3708                         mddev->resync_mark_cnt = mark_cnt[next];
3709                         mark[next] = jiffies;
3710                         mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
3711                         last_mark = next;
3712                 }
3713
3714
3715                 if (signal_pending(current)) {
3716                         /*
3717                          * got a signal, exit.
3718                          */
3719                         printk(KERN_INFO 
3720                                 "md: md_do_sync() got signal ... exiting\n");
3721                         flush_signals(current);
3722                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3723                         goto out;
3724                 }
3725
3726                 /*
3727                  * this loop exits only if either when we are slower than
3728                  * the 'hard' speed limit, or the system was IO-idle for
3729                  * a jiffy.
3730                  * the system might be non-idle CPU-wise, but we only care
3731                  * about not overloading the IO subsystem. (things like an
3732                  * e2fsck being done on the RAID array should execute fast)
3733                  */
3734                 mddev->queue->unplug_fn(mddev->queue);
3735                 cond_resched();
3736
3737                 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
3738                         /((jiffies-mddev->resync_mark)/HZ +1) +1;
3739
3740                 if (currspeed > sysctl_speed_limit_min) {
3741                         if ((currspeed > sysctl_speed_limit_max) ||
3742                                         !is_mddev_idle(mddev)) {
3743                                 msleep_interruptible(250);
3744                                 goto repeat;
3745                         }
3746                 }
3747         }
3748         printk(KERN_INFO "md: %s: sync done.\n",mdname(mddev));
3749         /*
3750          * this also signals 'finished resyncing' to md_stop
3751          */
3752  out:
3753         mddev->queue->unplug_fn(mddev->queue);
3754
3755         wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
3756
3757         /* tell personality that we are finished */
3758         mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
3759
3760         if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
3761             mddev->curr_resync > 2 &&
3762             mddev->curr_resync >= mddev->recovery_cp) {
3763                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
3764                         printk(KERN_INFO 
3765                                 "md: checkpointing recovery of %s.\n",
3766                                 mdname(mddev));
3767                         mddev->recovery_cp = mddev->curr_resync;
3768                 } else
3769                         mddev->recovery_cp = MaxSector;
3770         }
3771
3772  skip:
3773         mddev->curr_resync = 0;
3774         wake_up(&resync_wait);
3775         set_bit(MD_RECOVERY_DONE, &mddev->recovery);
3776         md_wakeup_thread(mddev->thread);
3777 }
3778
3779
3780 /*
3781  * This routine is regularly called by all per-raid-array threads to
3782  * deal with generic issues like resync and super-block update.
3783  * Raid personalities that don't have a thread (linear/raid0) do not
3784  * need this as they never do any recovery or update the superblock.
3785  *
3786  * It does not do any resync itself, but rather "forks" off other threads
3787  * to do that as needed.
3788  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
3789  * "->recovery" and create a thread at ->sync_thread.
3790  * When the thread finishes it sets MD_RECOVERY_DONE (and might set MD_RECOVERY_ERR)
3791  * and wakeups up this thread which will reap the thread and finish up.
3792  * This thread also removes any faulty devices (with nr_pending == 0).
3793  *
3794  * The overall approach is:
3795  *  1/ if the superblock needs updating, update it.
3796  *  2/ If a recovery thread is running, don't do anything else.
3797  *  3/ If recovery has finished, clean up, possibly marking spares active.
3798  *  4/ If there are any faulty devices, remove them.
3799  *  5/ If array is degraded, try to add spares devices
3800  *  6/ If array has spares or is not in-sync, start a resync thread.
3801  */
3802 void md_check_recovery(mddev_t *mddev)
3803 {
3804         mdk_rdev_t *rdev;
3805         struct list_head *rtmp;
3806
3807
3808         if (mddev->bitmap)
3809                 bitmap_daemon_work(mddev->bitmap);
3810
3811         if (mddev->ro)
3812                 return;
3813
3814         if (signal_pending(current)) {
3815                 if (mddev->pers->sync_request) {
3816                         printk(KERN_INFO "md: %s in immediate safe mode\n",
3817                                mdname(mddev));
3818                         mddev->safemode = 2;
3819                 }
3820                 flush_signals(current);
3821         }
3822
3823         if ( ! (
3824                 mddev->sb_dirty ||
3825                 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
3826                 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
3827                 (mddev->safemode == 1) ||
3828                 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
3829                  && !mddev->in_sync && mddev->recovery_cp == MaxSector)
3830                 ))
3831                 return;
3832
3833         if (mddev_trylock(mddev)==0) {
3834                 int spares =0;
3835
3836                 spin_lock(&mddev->write_lock);
3837                 if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
3838                     !mddev->in_sync && mddev->recovery_cp == MaxSector) {
3839                         mddev->in_sync = 1;
3840                         mddev->sb_dirty = 1;
3841                 }
3842                 if (mddev->safemode == 1)
3843                         mddev->safemode = 0;
3844                 spin_unlock(&mddev->write_lock);
3845
3846                 if (mddev->sb_dirty)
3847                         md_update_sb(mddev);
3848
3849
3850                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
3851                     !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
3852                         /* resync/recovery still happening */
3853                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3854                         goto unlock;
3855                 }
3856                 if (mddev->sync_thread) {
3857                         /* resync has finished, collect result */
3858                         md_unregister_thread(mddev->sync_thread);
3859                         mddev->sync_thread = NULL;
3860                         if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
3861                             !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
3862                                 /* success...*/
3863                                 /* activate any spares */
3864                                 mddev->pers->spare_active(mddev);
3865                         }
3866                         md_update_sb(mddev);
3867
3868                         /* if array is no-longer degraded, then any saved_raid_disk
3869                          * information must be scrapped
3870                          */
3871                         if (!mddev->degraded)
3872                                 ITERATE_RDEV(mddev,rdev,rtmp)
3873                                         rdev->saved_raid_disk = -1;
3874
3875                         mddev->recovery = 0;
3876                         /* flag recovery needed just to double check */
3877                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3878                         goto unlock;
3879                 }
3880                 if (mddev->recovery)
3881                         /* probably just the RECOVERY_NEEDED flag */
3882                         mddev->recovery = 0;
3883
3884                 /* no recovery is running.
3885                  * remove any failed drives, then
3886                  * add spares if possible.
3887                  * Spare are also removed and re-added, to allow
3888                  * the personality to fail the re-add.
3889                  */
3890                 ITERATE_RDEV(mddev,rdev,rtmp)
3891                         if (rdev->raid_disk >= 0 &&
3892                             (rdev->faulty || ! rdev->in_sync) &&
3893                             atomic_read(&rdev->nr_pending)==0) {
3894                                 if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0)
3895                                         rdev->raid_disk = -1;
3896                         }
3897
3898                 if (mddev->degraded) {
3899                         ITERATE_RDEV(mddev,rdev,rtmp)
3900                                 if (rdev->raid_disk < 0
3901                                     && !rdev->faulty) {
3902                                         if (mddev->pers->hot_add_disk(mddev,rdev))
3903                                                 spares++;
3904                                         else
3905                                                 break;
3906                                 }
3907                 }
3908
3909                 if (!spares && (mddev->recovery_cp == MaxSector )) {
3910                         /* nothing we can do ... */
3911                         goto unlock;
3912                 }
3913                 if (mddev->pers->sync_request) {
3914                         set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3915                         if (!spares)
3916                                 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3917                         if (spares && mddev->bitmap && ! mddev->bitmap->file) {
3918                                 /* We are adding a device or devices to an array
3919                                  * which has the bitmap stored on all devices.
3920                                  * So make sure all bitmap pages get written
3921                                  */
3922                                 bitmap_write_all(mddev->bitmap);
3923                         }
3924                         mddev->sync_thread = md_register_thread(md_do_sync,
3925                                                                 mddev,
3926                                                                 "%s_resync");
3927                         if (!mddev->sync_thread) {
3928                                 printk(KERN_ERR "%s: could not start resync"
3929                                         " thread...\n", 
3930                                         mdname(mddev));
3931                                 /* leave the spares where they are, it shouldn't hurt */
3932                                 mddev->recovery = 0;
3933                         } else {
3934                                 md_wakeup_thread(mddev->sync_thread);
3935                         }
3936                 }
3937         unlock:
3938                 mddev_unlock(mddev);
3939         }
3940 }
3941
3942 static int md_notify_reboot(struct notifier_block *this,
3943                             unsigned long code, void *x)
3944 {
3945         struct list_head *tmp;
3946         mddev_t *mddev;
3947
3948         if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
3949
3950                 printk(KERN_INFO "md: stopping all md devices.\n");
3951
3952                 ITERATE_MDDEV(mddev,tmp)
3953                         if (mddev_trylock(mddev)==0)
3954                                 do_md_stop (mddev, 1);
3955                 /*
3956                  * certain more exotic SCSI devices are known to be
3957                  * volatile wrt too early system reboots. While the
3958                  * right place to handle this issue is the given
3959                  * driver, we do want to have a safe RAID driver ...
3960                  */
3961                 mdelay(1000*1);
3962         }
3963         return NOTIFY_DONE;
3964 }
3965
3966 static struct notifier_block md_notifier = {
3967         .notifier_call  = md_notify_reboot,
3968         .next           = NULL,
3969         .priority       = INT_MAX, /* before any real devices */
3970 };
3971
3972 static void md_geninit(void)
3973 {
3974         struct proc_dir_entry *p;
3975
3976         dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
3977
3978         p = create_proc_entry("mdstat", S_IRUGO, NULL);
3979         if (p)
3980                 p->proc_fops = &md_seq_fops;
3981 }
3982
3983 static int __init md_init(void)
3984 {
3985         int minor;
3986
3987         printk(KERN_INFO "md: md driver %d.%d.%d MAX_MD_DEVS=%d,"
3988                         " MD_SB_DISKS=%d\n",
3989                         MD_MAJOR_VERSION, MD_MINOR_VERSION,
3990                         MD_PATCHLEVEL_VERSION, MAX_MD_DEVS, MD_SB_DISKS);
3991         printk(KERN_INFO "md: bitmap version %d.%d\n", BITMAP_MAJOR,
3992                         BITMAP_MINOR);
3993
3994         if (register_blkdev(MAJOR_NR, "md"))
3995                 return -1;
3996         if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
3997                 unregister_blkdev(MAJOR_NR, "md");
3998                 return -1;
3999         }
4000         devfs_mk_dir("md");
4001         blk_register_region(MKDEV(MAJOR_NR, 0), MAX_MD_DEVS, THIS_MODULE,
4002                                 md_probe, NULL, NULL);
4003         blk_register_region(MKDEV(mdp_major, 0), MAX_MD_DEVS<<MdpMinorShift, THIS_MODULE,
4004                             md_probe, NULL, NULL);
4005
4006         for (minor=0; minor < MAX_MD_DEVS; ++minor)
4007                 devfs_mk_bdev(MKDEV(MAJOR_NR, minor),
4008                                 S_IFBLK|S_IRUSR|S_IWUSR,
4009                                 "md/%d", minor);
4010
4011         for (minor=0; minor < MAX_MD_DEVS; ++minor)
4012                 devfs_mk_bdev(MKDEV(mdp_major, minor<<MdpMinorShift),
4013                               S_IFBLK|S_IRUSR|S_IWUSR,
4014                               "md/mdp%d", minor);
4015
4016
4017         register_reboot_notifier(&md_notifier);
4018         raid_table_header = register_sysctl_table(raid_root_table, 1);
4019
4020         md_geninit();
4021         return (0);
4022 }
4023
4024
4025 #ifndef MODULE
4026
4027 /*
4028  * Searches all registered partitions for autorun RAID arrays
4029  * at boot time.
4030  */
4031 static dev_t detected_devices[128];
4032 static int dev_cnt;
4033
4034 void md_autodetect_dev(dev_t dev)
4035 {
4036         if (dev_cnt >= 0 && dev_cnt < 127)
4037                 detected_devices[dev_cnt++] = dev;
4038 }
4039
4040
4041 static void autostart_arrays(int part)
4042 {
4043         mdk_rdev_t *rdev;
4044         int i;
4045
4046         printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
4047
4048         for (i = 0; i < dev_cnt; i++) {
4049                 dev_t dev = detected_devices[i];
4050
4051                 rdev = md_import_device(dev,0, 0);
4052                 if (IS_ERR(rdev))
4053                         continue;
4054
4055                 if (rdev->faulty) {
4056                         MD_BUG();
4057                         continue;
4058                 }
4059                 list_add(&rdev->same_set, &pending_raid_disks);
4060         }
4061         dev_cnt = 0;
4062
4063         autorun_devices(part);
4064 }
4065
4066 #endif
4067
4068 static __exit void md_exit(void)
4069 {
4070         mddev_t *mddev;
4071         struct list_head *tmp;
4072         int i;
4073         blk_unregister_region(MKDEV(MAJOR_NR,0), MAX_MD_DEVS);
4074         blk_unregister_region(MKDEV(mdp_major,0), MAX_MD_DEVS << MdpMinorShift);
4075         for (i=0; i < MAX_MD_DEVS; i++)
4076                 devfs_remove("md/%d", i);
4077         for (i=0; i < MAX_MD_DEVS; i++)
4078                 devfs_remove("md/d%d", i);
4079
4080         devfs_remove("md");
4081
4082         unregister_blkdev(MAJOR_NR,"md");
4083         unregister_blkdev(mdp_major, "mdp");
4084         unregister_reboot_notifier(&md_notifier);
4085         unregister_sysctl_table(raid_table_header);
4086         remove_proc_entry("mdstat", NULL);
4087         ITERATE_MDDEV(mddev,tmp) {
4088                 struct gendisk *disk = mddev->gendisk;
4089                 if (!disk)
4090                         continue;
4091                 export_array(mddev);
4092                 del_gendisk(disk);
4093                 put_disk(disk);
4094                 mddev->gendisk = NULL;
4095                 mddev_put(mddev);
4096         }
4097 }
4098
4099 module_init(md_init)
4100 module_exit(md_exit)
4101
4102 EXPORT_SYMBOL(register_md_personality);
4103 EXPORT_SYMBOL(unregister_md_personality);
4104 EXPORT_SYMBOL(md_error);
4105 EXPORT_SYMBOL(md_done_sync);
4106 EXPORT_SYMBOL(md_write_start);
4107 EXPORT_SYMBOL(md_write_end);
4108 EXPORT_SYMBOL(md_register_thread);
4109 EXPORT_SYMBOL(md_unregister_thread);
4110 EXPORT_SYMBOL(md_wakeup_thread);
4111 EXPORT_SYMBOL(md_print_devices);
4112 EXPORT_SYMBOL(md_check_recovery);
4113 MODULE_LICENSE("GPL");
4114 MODULE_ALIAS("md");
4115 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);