pty: Rework the pty layer to use the normal buffering logic
[pandora-kernel.git] / drivers / md / raid0.c
index 62fde23..335f490 100644 (file)
@@ -100,6 +100,12 @@ static int create_strip_zones(mddev_t *mddev)
                printk(KERN_INFO "raid0: looking at %s\n",
                        bdevname(rdev1->bdev,b));
                c = 0;
+
+               /* round size to chunk_size */
+               sectors = rdev1->sectors;
+               sector_div(sectors, mddev->chunk_sectors);
+               rdev1->sectors = sectors * mddev->chunk_sectors;
+
                list_for_each_entry(rdev2, &mddev->disks, same_set) {
                        printk(KERN_INFO "raid0:   comparing %s(%llu)",
                               bdevname(rdev1->bdev,b),
@@ -164,8 +170,8 @@ static int create_strip_zones(mddev_t *mddev)
                }
                dev[j] = rdev1;
 
-               blk_queue_stack_limits(mddev->queue,
-                                      rdev1->bdev->bd_disk->queue);
+               disk_stack_limits(mddev->gendisk, rdev1->bdev,
+                                 rdev1->data_offset << 9);
                /* as we don't honour merge_bvec_fn, we must never risk
                 * violating it, so limit ->max_sector to one PAGE, as
                 * a one page request is never in violation.
@@ -234,6 +240,21 @@ static int create_strip_zones(mddev_t *mddev)
        mddev->queue->backing_dev_info.congested_fn = raid0_congested;
        mddev->queue->backing_dev_info.congested_data = mddev;
 
+       /*
+        * now since we have the hard sector sizes, we can make sure
+        * chunk size is a multiple of that sector size
+        */
+       if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) {
+               printk(KERN_ERR "%s chunk_size of %d not valid\n",
+                      mdname(mddev),
+                      mddev->chunk_sectors << 9);
+               goto abort;
+       }
+
+       blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
+       blk_queue_io_opt(mddev->queue,
+                        (mddev->chunk_sectors << 9) * mddev->raid_disks);
+
        printk(KERN_INFO "raid0: done.\n");
        mddev->private = conf;
        return 0;
@@ -260,10 +281,15 @@ static int raid0_mergeable_bvec(struct request_queue *q,
        mddev_t *mddev = q->queuedata;
        sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
        int max;
-       unsigned int chunk_sectors = mddev->chunk_size >> 9;
+       unsigned int chunk_sectors = mddev->chunk_sectors;
        unsigned int bio_sectors = bvm->bi_size >> 9;
 
-       max =  (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
+       if (is_power_of_2(chunk_sectors))
+               max =  (chunk_sectors - ((sector & (chunk_sectors-1))
+                                               + bio_sectors)) << 9;
+       else
+               max =  (chunk_sectors - (sector_div(sector, chunk_sectors)
+                                               + bio_sectors)) << 9;
        if (max < 0) max = 0; /* bio_add cannot handle a negative return */
        if (max <= biovec->bv_len && bio_sectors == 0)
                return biovec->bv_len;
@@ -289,11 +315,13 @@ static int raid0_run(mddev_t *mddev)
 {
        int ret;
 
-       if (mddev->chunk_size == 0) {
-               printk(KERN_ERR "md/raid0: non-zero chunk size required.\n");
+       if (mddev->chunk_sectors == 0) {
+               printk(KERN_ERR "md/raid0: chunk size must be set.\n");
                return -EINVAL;
        }
-       blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9);
+       if (md_check_no_bitmap(mddev))
+               return -EINVAL;
+       blk_queue_max_sectors(mddev->queue, mddev->chunk_sectors);
        mddev->queue->queue_lock = &mddev->queue->__queue_lock;
 
        ret = create_strip_zones(mddev);
@@ -315,7 +343,8 @@ static int raid0_run(mddev_t *mddev)
         * chunksize should be used in that case.
         */
        {
-               int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_SIZE;
+               int stripe = mddev->raid_disks *
+                       (mddev->chunk_sectors << 9) / PAGE_SIZE;
                if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
                        mddev->queue->backing_dev_info.ra_pages = 2* stripe;
        }
@@ -356,15 +385,65 @@ static struct strip_zone *find_zone(struct raid0_private_data *conf,
        BUG();
 }
 
-static int raid0_make_request (struct request_queue *q, struct bio *bio)
+/*
+ * remaps the bio to the target device. we separate two flows.
+ * power 2 flow and a general flow for the sake of perfromance
+*/
+static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone,
+                               sector_t sector, sector_t *sector_offset)
 {
-       mddev_t *mddev = q->queuedata;
-       unsigned int sect_in_chunk, chunksect_bits, chunk_sects;
+       unsigned int sect_in_chunk;
+       sector_t chunk;
        raid0_conf_t *conf = mddev->private;
+       unsigned int chunk_sects = mddev->chunk_sectors;
+
+       if (is_power_of_2(chunk_sects)) {
+               int chunksect_bits = ffz(~chunk_sects);
+               /* find the sector offset inside the chunk */
+               sect_in_chunk  = sector & (chunk_sects - 1);
+               sector >>= chunksect_bits;
+               /* chunk in zone */
+               chunk = *sector_offset;
+               /* quotient is the chunk in real device*/
+               sector_div(chunk, zone->nb_dev << chunksect_bits);
+       } else{
+               sect_in_chunk = sector_div(sector, chunk_sects);
+               chunk = *sector_offset;
+               sector_div(chunk, chunk_sects * zone->nb_dev);
+       }
+       /*
+       *  position the bio over the real device
+       *  real sector = chunk in device + starting of zone
+       *       + the position in the chunk
+       */
+       *sector_offset = (chunk * chunk_sects) + sect_in_chunk;
+       return conf->devlist[(zone - conf->strip_zone)*mddev->raid_disks
+                            + sector_div(sector, zone->nb_dev)];
+}
+
+/*
+ * Is io distribute over 1 or more chunks ?
+*/
+static inline int is_io_in_chunk_boundary(mddev_t *mddev,
+                       unsigned int chunk_sects, struct bio *bio)
+{
+       if (likely(is_power_of_2(chunk_sects))) {
+               return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
+                                       + (bio->bi_size >> 9));
+       } else{
+               sector_t sector = bio->bi_sector;
+               return chunk_sects >= (sector_div(sector, chunk_sects)
+                                               + (bio->bi_size >> 9));
+       }
+}
+
+static int raid0_make_request(struct request_queue *q, struct bio *bio)
+{
+       mddev_t *mddev = q->queuedata;
+       unsigned int chunk_sects;
+       sector_t sector_offset;
        struct strip_zone *zone;
        mdk_rdev_t *tmp_dev;
-       sector_t chunk;
-       sector_t sector, rsect, sector_offset;
        const int rw = bio_data_dir(bio);
        int cpu;
 
@@ -379,11 +458,9 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
                      bio_sectors(bio));
        part_stat_unlock();
 
-       chunk_sects = mddev->chunk_size >> 9;
-       chunksect_bits = ffz(~chunk_sects);
-       sector = bio->bi_sector;
-
-       if (unlikely(chunk_sects < (bio->bi_sector & (chunk_sects - 1)) + (bio->bi_size >> 9))) {
+       chunk_sects = mddev->chunk_sectors;
+       if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
+               sector_t sector = bio->bi_sector;
                struct bio_pair *bp;
                /* Sanity check -- queue functions should prevent this happening */
                if (bio->bi_vcnt != 1 ||
@@ -392,7 +469,12 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
                /* This is a one page bio that upper layers
                 * refuse to split for us, so we need to split it.
                 */
-               bp = bio_split(bio, chunk_sects - (bio->bi_sector & (chunk_sects - 1)));
+               if (likely(is_power_of_2(chunk_sects)))
+                       bp = bio_split(bio, chunk_sects - (sector &
+                                                          (chunk_sects-1)));
+               else
+                       bp = bio_split(bio, chunk_sects -
+                                      sector_div(sector, chunk_sects));
                if (raid0_make_request(q, &bp->bio1))
                        generic_make_request(&bp->bio1);
                if (raid0_make_request(q, &bp->bio2))
@@ -401,24 +483,14 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
                bio_pair_release(bp);
                return 0;
        }
-       sector_offset = sector;
-       zone = find_zone(conf, &sector_offset);
-       sect_in_chunk = bio->bi_sector & (chunk_sects - 1);
-       {
-               sector_t x = sector_offset >> chunksect_bits;
 
-               sector_div(x, zone->nb_dev);
-               chunk = x;
-
-               x = sector >> chunksect_bits;
-               tmp_dev = conf->devlist[(zone - conf->strip_zone)*mddev->raid_disks
-                                       + sector_div(x, zone->nb_dev)];
-       }
-       rsect = (chunk << chunksect_bits) + zone->dev_start + sect_in_chunk;
+       sector_offset = bio->bi_sector;
+       zone =  find_zone(mddev->private, &sector_offset);
+       tmp_dev = map_sector(mddev, zone, bio->bi_sector,
+                            &sector_offset);
        bio->bi_bdev = tmp_dev->bdev;
-       bio->bi_sector = rsect + tmp_dev->data_offset;
-
+       bio->bi_sector = sector_offset + zone->dev_start +
+               tmp_dev->data_offset;
        /*
         * Let the main block layer submit the IO and resolve recursion:
         */
@@ -461,7 +533,7 @@ static void raid0_status(struct seq_file *seq, mddev_t *mddev)
                zone_start = conf->strip_zone[j].zone_end;
        }
 #endif
-       seq_printf(seq, " %dk chunks", mddev->chunk_size/1024);
+       seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
        return;
 }