md: have raid0 report its formation
[pandora-kernel.git] / drivers / md / raid0.c
1 /*
2    raid0.c : Multiple Devices driver for Linux
3              Copyright (C) 1994-96 Marc ZYNGIER
4              <zyngier@ufr-info-p7.ibp.fr> or
5              <maz@gloups.fdn.fr>
6              Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
7
8
9    RAID-0 management functions.
10
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License as published by
13    the Free Software Foundation; either version 2, or (at your option)
14    any later version.
15    
16    You should have received a copy of the GNU General Public License
17    (for example /usr/src/linux/COPYING); if not, write to the Free
18    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.  
19 */
20
21 #include <linux/blkdev.h>
22 #include <linux/seq_file.h>
23 #include "md.h"
24 #include "raid0.h"
25
26 static void raid0_unplug(struct request_queue *q)
27 {
28         mddev_t *mddev = q->queuedata;
29         raid0_conf_t *conf = mddev->private;
30         mdk_rdev_t **devlist = conf->devlist;
31         int i;
32
33         for (i=0; i<mddev->raid_disks; i++) {
34                 struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
35
36                 blk_unplug(r_queue);
37         }
38 }
39
40 static int raid0_congested(void *data, int bits)
41 {
42         mddev_t *mddev = data;
43         raid0_conf_t *conf = mddev->private;
44         mdk_rdev_t **devlist = conf->devlist;
45         int i, ret = 0;
46
47         for (i = 0; i < mddev->raid_disks && !ret ; i++) {
48                 struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
49
50                 ret |= bdi_congested(&q->backing_dev_info, bits);
51         }
52         return ret;
53 }
54
55 /*
56  * inform the user of the raid configuration
57 */
58 static void dump_zones(mddev_t *mddev)
59 {
60         int j, k, h;
61         sector_t zone_size = 0;
62         sector_t zone_start = 0;
63         char b[BDEVNAME_SIZE];
64         raid0_conf_t *conf = mddev->private;
65         printk(KERN_INFO "******* %s configuration *********\n",
66                 mdname(mddev));
67         h = 0;
68         for (j = 0; j < conf->nr_strip_zones; j++) {
69                 printk(KERN_INFO "zone%d=[", j);
70                 for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
71                         printk("%s/",
72                         bdevname(conf->devlist[j*mddev->raid_disks
73                                                 + k]->bdev, b));
74                 printk("]\n");
75
76                 zone_size  = conf->strip_zone[j].zone_end - zone_start;
77                 printk(KERN_INFO "        zone offset=%llukb "
78                                 "device offset=%llukb size=%llukb\n",
79                         (unsigned long long)zone_start>>1,
80                         (unsigned long long)conf->strip_zone[j].dev_start>>1,
81                         (unsigned long long)zone_size>>1);
82                 zone_start = conf->strip_zone[j].zone_end;
83         }
84         printk(KERN_INFO "**********************************\n\n");
85 }
86
87 static int create_strip_zones(mddev_t *mddev)
88 {
89         int i, c, j, err;
90         sector_t curr_zone_end, sectors;
91         mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev, **dev;
92         struct strip_zone *zone;
93         int cnt;
94         char b[BDEVNAME_SIZE];
95         raid0_conf_t *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
96
97         if (!conf)
98                 return -ENOMEM;
99         list_for_each_entry(rdev1, &mddev->disks, same_set) {
100                 printk(KERN_INFO "raid0: looking at %s\n",
101                         bdevname(rdev1->bdev,b));
102                 c = 0;
103                 list_for_each_entry(rdev2, &mddev->disks, same_set) {
104                         printk(KERN_INFO "raid0:   comparing %s(%llu)",
105                                bdevname(rdev1->bdev,b),
106                                (unsigned long long)rdev1->sectors);
107                         printk(KERN_INFO " with %s(%llu)\n",
108                                bdevname(rdev2->bdev,b),
109                                (unsigned long long)rdev2->sectors);
110                         if (rdev2 == rdev1) {
111                                 printk(KERN_INFO "raid0:   END\n");
112                                 break;
113                         }
114                         if (rdev2->sectors == rdev1->sectors) {
115                                 /*
116                                  * Not unique, don't count it as a new
117                                  * group
118                                  */
119                                 printk(KERN_INFO "raid0:   EQUAL\n");
120                                 c = 1;
121                                 break;
122                         }
123                         printk(KERN_INFO "raid0:   NOT EQUAL\n");
124                 }
125                 if (!c) {
126                         printk(KERN_INFO "raid0:   ==> UNIQUE\n");
127                         conf->nr_strip_zones++;
128                         printk(KERN_INFO "raid0: %d zones\n",
129                                 conf->nr_strip_zones);
130                 }
131         }
132         printk(KERN_INFO "raid0: FINAL %d zones\n", conf->nr_strip_zones);
133         err = -ENOMEM;
134         conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
135                                 conf->nr_strip_zones, GFP_KERNEL);
136         if (!conf->strip_zone)
137                 goto abort;
138         conf->devlist = kzalloc(sizeof(mdk_rdev_t*)*
139                                 conf->nr_strip_zones*mddev->raid_disks,
140                                 GFP_KERNEL);
141         if (!conf->devlist)
142                 goto abort;
143
144         /* The first zone must contain all devices, so here we check that
145          * there is a proper alignment of slots to devices and find them all
146          */
147         zone = &conf->strip_zone[0];
148         cnt = 0;
149         smallest = NULL;
150         dev = conf->devlist;
151         err = -EINVAL;
152         list_for_each_entry(rdev1, &mddev->disks, same_set) {
153                 int j = rdev1->raid_disk;
154
155                 if (j < 0 || j >= mddev->raid_disks) {
156                         printk(KERN_ERR "raid0: bad disk number %d - "
157                                 "aborting!\n", j);
158                         goto abort;
159                 }
160                 if (dev[j]) {
161                         printk(KERN_ERR "raid0: multiple devices for %d - "
162                                 "aborting!\n", j);
163                         goto abort;
164                 }
165                 dev[j] = rdev1;
166
167                 blk_queue_stack_limits(mddev->queue,
168                                        rdev1->bdev->bd_disk->queue);
169                 /* as we don't honour merge_bvec_fn, we must never risk
170                  * violating it, so limit ->max_sector to one PAGE, as
171                  * a one page request is never in violation.
172                  */
173
174                 if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
175                     queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
176                         blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
177
178                 if (!smallest || (rdev1->sectors < smallest->sectors))
179                         smallest = rdev1;
180                 cnt++;
181         }
182         if (cnt != mddev->raid_disks) {
183                 printk(KERN_ERR "raid0: too few disks (%d of %d) - "
184                         "aborting!\n", cnt, mddev->raid_disks);
185                 goto abort;
186         }
187         zone->nb_dev = cnt;
188         zone->zone_end = smallest->sectors * cnt;
189
190         curr_zone_end = zone->zone_end;
191
192         /* now do the other zones */
193         for (i = 1; i < conf->nr_strip_zones; i++)
194         {
195                 zone = conf->strip_zone + i;
196                 dev = conf->devlist + i * mddev->raid_disks;
197
198                 printk(KERN_INFO "raid0: zone %d\n", i);
199                 zone->dev_start = smallest->sectors;
200                 smallest = NULL;
201                 c = 0;
202
203                 for (j=0; j<cnt; j++) {
204                         char b[BDEVNAME_SIZE];
205                         rdev = conf->devlist[j];
206                         printk(KERN_INFO "raid0: checking %s ...",
207                                 bdevname(rdev->bdev, b));
208                         if (rdev->sectors <= zone->dev_start) {
209                                 printk(KERN_INFO " nope.\n");
210                                 continue;
211                         }
212                         printk(KERN_INFO " contained as device %d\n", c);
213                         dev[c] = rdev;
214                         c++;
215                         if (!smallest || rdev->sectors < smallest->sectors) {
216                                 smallest = rdev;
217                                 printk(KERN_INFO "  (%llu) is smallest!.\n",
218                                         (unsigned long long)rdev->sectors);
219                         }
220                 }
221
222                 zone->nb_dev = c;
223                 sectors = (smallest->sectors - zone->dev_start) * c;
224                 printk(KERN_INFO "raid0: zone->nb_dev: %d, sectors: %llu\n",
225                         zone->nb_dev, (unsigned long long)sectors);
226
227                 curr_zone_end += sectors;
228                 zone->zone_end = curr_zone_end;
229
230                 printk(KERN_INFO "raid0: current zone start: %llu\n",
231                         (unsigned long long)smallest->sectors);
232         }
233         mddev->queue->unplug_fn = raid0_unplug;
234         mddev->queue->backing_dev_info.congested_fn = raid0_congested;
235         mddev->queue->backing_dev_info.congested_data = mddev;
236
237         printk(KERN_INFO "raid0: done.\n");
238         mddev->private = conf;
239         return 0;
240 abort:
241         kfree(conf->strip_zone);
242         kfree(conf->devlist);
243         kfree(conf);
244         mddev->private = NULL;
245         return err;
246 }
247
248 /**
249  *      raid0_mergeable_bvec -- tell bio layer if a two requests can be merged
250  *      @q: request queue
251  *      @bvm: properties of new bio
252  *      @biovec: the request that could be merged to it.
253  *
254  *      Return amount of bytes we can accept at this offset
255  */
256 static int raid0_mergeable_bvec(struct request_queue *q,
257                                 struct bvec_merge_data *bvm,
258                                 struct bio_vec *biovec)
259 {
260         mddev_t *mddev = q->queuedata;
261         sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
262         int max;
263         unsigned int chunk_sectors = mddev->chunk_size >> 9;
264         unsigned int bio_sectors = bvm->bi_size >> 9;
265
266         max =  (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
267         if (max < 0) max = 0; /* bio_add cannot handle a negative return */
268         if (max <= biovec->bv_len && bio_sectors == 0)
269                 return biovec->bv_len;
270         else 
271                 return max;
272 }
273
274 static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks)
275 {
276         sector_t array_sectors = 0;
277         mdk_rdev_t *rdev;
278
279         WARN_ONCE(sectors || raid_disks,
280                   "%s does not support generic reshape\n", __func__);
281
282         list_for_each_entry(rdev, &mddev->disks, same_set)
283                 array_sectors += rdev->sectors;
284
285         return array_sectors;
286 }
287
288 static int raid0_run(mddev_t *mddev)
289 {
290         int ret;
291
292         if (mddev->chunk_size == 0) {
293                 printk(KERN_ERR "md/raid0: non-zero chunk size required.\n");
294                 return -EINVAL;
295         }
296         blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9);
297         mddev->queue->queue_lock = &mddev->queue->__queue_lock;
298
299         ret = create_strip_zones(mddev);
300         if (ret < 0)
301                 return ret;
302
303         /* calculate array device size */
304         md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
305
306         printk(KERN_INFO "raid0 : md_size is %llu sectors.\n",
307                 (unsigned long long)mddev->array_sectors);
308         /* calculate the max read-ahead size.
309          * For read-ahead of large files to be effective, we need to
310          * readahead at least twice a whole stripe. i.e. number of devices
311          * multiplied by chunk size times 2.
312          * If an individual device has an ra_pages greater than the
313          * chunk size, then we will not drive that device as hard as it
314          * wants.  We consider this a configuration error: a larger
315          * chunksize should be used in that case.
316          */
317         {
318                 int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_SIZE;
319                 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
320                         mddev->queue->backing_dev_info.ra_pages = 2* stripe;
321         }
322
323         blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
324         dump_zones(mddev);
325         return 0;
326 }
327
328 static int raid0_stop(mddev_t *mddev)
329 {
330         raid0_conf_t *conf = mddev->private;
331
332         blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
333         kfree(conf->strip_zone);
334         kfree(conf->devlist);
335         kfree(conf);
336         mddev->private = NULL;
337         return 0;
338 }
339
340 /* Find the zone which holds a particular offset
341  * Update *sectorp to be an offset in that zone
342  */
343 static struct strip_zone *find_zone(struct raid0_private_data *conf,
344                                     sector_t *sectorp)
345 {
346         int i;
347         struct strip_zone *z = conf->strip_zone;
348         sector_t sector = *sectorp;
349
350         for (i = 0; i < conf->nr_strip_zones; i++)
351                 if (sector < z[i].zone_end) {
352                         if (i)
353                                 *sectorp = sector - z[i-1].zone_end;
354                         return z + i;
355                 }
356         BUG();
357 }
358
359 static int raid0_make_request (struct request_queue *q, struct bio *bio)
360 {
361         mddev_t *mddev = q->queuedata;
362         unsigned int sect_in_chunk, chunksect_bits, chunk_sects;
363         raid0_conf_t *conf = mddev->private;
364         struct strip_zone *zone;
365         mdk_rdev_t *tmp_dev;
366         sector_t chunk;
367         sector_t sector, rsect, sector_offset;
368         const int rw = bio_data_dir(bio);
369         int cpu;
370
371         if (unlikely(bio_barrier(bio))) {
372                 bio_endio(bio, -EOPNOTSUPP);
373                 return 0;
374         }
375
376         cpu = part_stat_lock();
377         part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
378         part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
379                       bio_sectors(bio));
380         part_stat_unlock();
381
382         chunk_sects = mddev->chunk_size >> 9;
383         chunksect_bits = ffz(~chunk_sects);
384         sector = bio->bi_sector;
385
386         if (unlikely(chunk_sects < (bio->bi_sector & (chunk_sects - 1)) + (bio->bi_size >> 9))) {
387                 struct bio_pair *bp;
388                 /* Sanity check -- queue functions should prevent this happening */
389                 if (bio->bi_vcnt != 1 ||
390                     bio->bi_idx != 0)
391                         goto bad_map;
392                 /* This is a one page bio that upper layers
393                  * refuse to split for us, so we need to split it.
394                  */
395                 bp = bio_split(bio, chunk_sects - (bio->bi_sector & (chunk_sects - 1)));
396                 if (raid0_make_request(q, &bp->bio1))
397                         generic_make_request(&bp->bio1);
398                 if (raid0_make_request(q, &bp->bio2))
399                         generic_make_request(&bp->bio2);
400
401                 bio_pair_release(bp);
402                 return 0;
403         }
404         sector_offset = sector;
405         zone = find_zone(conf, &sector_offset);
406         sect_in_chunk = bio->bi_sector & (chunk_sects - 1);
407         {
408                 sector_t x = sector_offset >> chunksect_bits;
409
410                 sector_div(x, zone->nb_dev);
411                 chunk = x;
412
413                 x = sector >> chunksect_bits;
414                 tmp_dev = conf->devlist[(zone - conf->strip_zone)*mddev->raid_disks
415                                         + sector_div(x, zone->nb_dev)];
416         }
417         rsect = (chunk << chunksect_bits) + zone->dev_start + sect_in_chunk;
418  
419         bio->bi_bdev = tmp_dev->bdev;
420         bio->bi_sector = rsect + tmp_dev->data_offset;
421
422         /*
423          * Let the main block layer submit the IO and resolve recursion:
424          */
425         return 1;
426
427 bad_map:
428         printk("raid0_make_request bug: can't convert block across chunks"
429                 " or bigger than %dk %llu %d\n", chunk_sects / 2,
430                 (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
431
432         bio_io_error(bio);
433         return 0;
434 }
435
436 static void raid0_status(struct seq_file *seq, mddev_t *mddev)
437 {
438 #undef MD_DEBUG
439 #ifdef MD_DEBUG
440         int j, k, h;
441         char b[BDEVNAME_SIZE];
442         raid0_conf_t *conf = mddev->private;
443
444         sector_t zone_size;
445         sector_t zone_start = 0;
446         h = 0;
447
448         for (j = 0; j < conf->nr_strip_zones; j++) {
449                 seq_printf(seq, "      z%d", j);
450                 seq_printf(seq, "=[");
451                 for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
452                         seq_printf(seq, "%s/", bdevname(
453                                 conf->devlist[j*mddev->raid_disks + k]
454                                                 ->bdev, b));
455
456                 zone_size  = conf->strip_zone[j].zone_end - zone_start;
457                 seq_printf(seq, "] ze=%lld ds=%lld s=%lld\n",
458                         (unsigned long long)zone_start>>1,
459                         (unsigned long long)conf->strip_zone[j].dev_start>>1,
460                         (unsigned long long)zone_size>>1);
461                 zone_start = conf->strip_zone[j].zone_end;
462         }
463 #endif
464         seq_printf(seq, " %dk chunks", mddev->chunk_size/1024);
465         return;
466 }
467
468 static struct mdk_personality raid0_personality=
469 {
470         .name           = "raid0",
471         .level          = 0,
472         .owner          = THIS_MODULE,
473         .make_request   = raid0_make_request,
474         .run            = raid0_run,
475         .stop           = raid0_stop,
476         .status         = raid0_status,
477         .size           = raid0_size,
478 };
479
480 static int __init raid0_init (void)
481 {
482         return register_md_personality (&raid0_personality);
483 }
484
485 static void raid0_exit (void)
486 {
487         unregister_md_personality (&raid0_personality);
488 }
489
490 module_init(raid0_init);
491 module_exit(raid0_exit);
492 MODULE_LICENSE("GPL");
493 MODULE_ALIAS("md-personality-2"); /* RAID0 */
494 MODULE_ALIAS("md-raid0");
495 MODULE_ALIAS("md-level-0");