[PATCH] Centralise definitions of sector_t and blkcnt_t
[pandora-kernel.git] / block / ll_rw_blk.c
index 83425fb..0f82e12 100644 (file)
@@ -56,11 +56,6 @@ static kmem_cache_t *requestq_cachep;
  */
 static kmem_cache_t *iocontext_cachep;
 
-static wait_queue_head_t congestion_wqh[2] = {
-               __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
-               __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
-       };
-
 /*
  * Controlling structure to kblockd
  */
@@ -112,35 +107,6 @@ static void blk_queue_congestion_threshold(struct request_queue *q)
        q->nr_congestion_off = nr;
 }
 
-/*
- * A queue has just exitted congestion.  Note this in the global counter of
- * congested queues, and wake up anyone who was waiting for requests to be
- * put back.
- */
-static void clear_queue_congested(request_queue_t *q, int rw)
-{
-       enum bdi_state bit;
-       wait_queue_head_t *wqh = &congestion_wqh[rw];
-
-       bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
-       clear_bit(bit, &q->backing_dev_info.state);
-       smp_mb__after_clear_bit();
-       if (waitqueue_active(wqh))
-               wake_up(wqh);
-}
-
-/*
- * A queue has just entered congestion.  Flag that in the queue's VM-visible
- * state flags and increment the global gounter of congested queues.
- */
-static void set_queue_congested(request_queue_t *q, int rw)
-{
-       enum bdi_state bit;
-
-       bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
-       set_bit(bit, &q->backing_dev_info.state);
-}
-
 /**
  * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
  * @bdev:      device
@@ -159,7 +125,6 @@ struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
                ret = &q->backing_dev_info;
        return ret;
 }
-
 EXPORT_SYMBOL(blk_get_backing_dev_info);
 
 void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data)
@@ -167,7 +132,6 @@ void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data)
        q->activity_fn = fn;
        q->activity_data = data;
 }
-
 EXPORT_SYMBOL(blk_queue_activity_fn);
 
 /**
@@ -840,12 +804,7 @@ EXPORT_SYMBOL(blk_queue_dma_alignment);
  **/
 struct request *blk_queue_find_tag(request_queue_t *q, int tag)
 {
-       struct blk_queue_tag *bqt = q->queue_tags;
-
-       if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
-               return NULL;
-
-       return bqt->tag_index[tag];
+       return blk_map_queue_find_tag(q->queue_tags, tag);
 }
 
 EXPORT_SYMBOL(blk_queue_find_tag);
@@ -2072,7 +2031,7 @@ static void __freed_request(request_queue_t *q, int rw)
        struct request_list *rl = &q->rq;
 
        if (rl->count[rw] < queue_congestion_off_threshold(q))
-               clear_queue_congested(q, rw);
+               blk_clear_queue_congested(q, rw);
 
        if (rl->count[rw] + 1 <= q->nr_requests) {
                if (waitqueue_active(&rl->wait[rw]))
@@ -2142,7 +2101,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
                                }
                        }
                }
-               set_queue_congested(q, rw);
+               blk_set_queue_congested(q, rw);
        }
 
        /*
@@ -2363,6 +2322,84 @@ void blk_insert_request(request_queue_t *q, struct request *rq,
 
 EXPORT_SYMBOL(blk_insert_request);
 
+static int __blk_rq_unmap_user(struct bio *bio)
+{
+       int ret = 0;
+
+       if (bio) {
+               if (bio_flagged(bio, BIO_USER_MAPPED))
+                       bio_unmap_user(bio);
+               else
+                       ret = bio_uncopy_user(bio);
+       }
+
+       return ret;
+}
+
+static int __blk_rq_map_user(request_queue_t *q, struct request *rq,
+                            void __user *ubuf, unsigned int len)
+{
+       unsigned long uaddr;
+       struct bio *bio, *orig_bio;
+       int reading, ret;
+
+       reading = rq_data_dir(rq) == READ;
+
+       /*
+        * if alignment requirement is satisfied, map in user pages for
+        * direct dma. else, set up kernel bounce buffers
+        */
+       uaddr = (unsigned long) ubuf;
+       if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
+               bio = bio_map_user(q, NULL, uaddr, len, reading);
+       else
+               bio = bio_copy_user(q, uaddr, len, reading);
+
+       if (IS_ERR(bio)) {
+               return PTR_ERR(bio);
+       }
+
+       orig_bio = bio;
+       blk_queue_bounce(q, &bio);
+       /*
+        * We link the bounce buffer in and could have to traverse it
+        * later so we have to get a ref to prevent it from being freed
+        */
+       bio_get(bio);
+
+       /*
+        * for most (all? don't know of any) queues we could
+        * skip grabbing the queue lock here. only drivers with
+        * funky private ->back_merge_fn() function could be
+        * problematic.
+        */
+       spin_lock_irq(q->queue_lock);
+       if (!rq->bio)
+               blk_rq_bio_prep(q, rq, bio);
+       else if (!q->back_merge_fn(q, rq, bio)) {
+               ret = -EINVAL;
+               spin_unlock_irq(q->queue_lock);
+               goto unmap_bio;
+       } else {
+               rq->biotail->bi_next = bio;
+               rq->biotail = bio;
+
+               rq->nr_sectors += bio_sectors(bio);
+               rq->hard_nr_sectors = rq->nr_sectors;
+               rq->data_len += bio->bi_size;
+       }
+       spin_unlock_irq(q->queue_lock);
+
+       return bio->bi_size;
+
+unmap_bio:
+       /* if it was boucned we must call the end io function */
+       bio_endio(bio, bio->bi_size, 0);
+       __blk_rq_unmap_user(orig_bio);
+       bio_put(bio);
+       return ret;
+}
+
 /**
  * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
  * @q:         request queue where request should be inserted
@@ -2384,42 +2421,44 @@ EXPORT_SYMBOL(blk_insert_request);
  *    unmapping.
  */
 int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
-                   unsigned int len)
+                   unsigned long len)
 {
-       unsigned long uaddr;
-       struct bio *bio;
-       int reading;
+       unsigned long bytes_read = 0;
+       int ret;
 
        if (len > (q->max_hw_sectors << 9))
                return -EINVAL;
        if (!len || !ubuf)
                return -EINVAL;
 
-       reading = rq_data_dir(rq) == READ;
+       while (bytes_read != len) {
+               unsigned long map_len, end, start;
 
-       /*
-        * if alignment requirement is satisfied, map in user pages for
-        * direct dma. else, set up kernel bounce buffers
-        */
-       uaddr = (unsigned long) ubuf;
-       if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
-               bio = bio_map_user(q, NULL, uaddr, len, reading);
-       else
-               bio = bio_copy_user(q, uaddr, len, reading);
+               map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
+               end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
+                                                               >> PAGE_SHIFT;
+               start = (unsigned long)ubuf >> PAGE_SHIFT;
 
-       if (!IS_ERR(bio)) {
-               rq->bio = rq->biotail = bio;
-               blk_rq_bio_prep(q, rq, bio);
+               /*
+                * A bad offset could cause us to require BIO_MAX_PAGES + 1
+                * pages. If this happens we just lower the requested
+                * mapping len by a page so that we can fit
+                */
+               if (end - start > BIO_MAX_PAGES)
+                       map_len -= PAGE_SIZE;
 
-               rq->buffer = rq->data = NULL;
-               rq->data_len = len;
-               return 0;
+               ret = __blk_rq_map_user(q, rq, ubuf, map_len);
+               if (ret < 0)
+                       goto unmap_rq;
+               bytes_read += ret;
+               ubuf += ret;
        }
 
-       /*
-        * bio is the err-ptr
-        */
-       return PTR_ERR(bio);
+       rq->buffer = rq->data = NULL;
+       return 0;
+unmap_rq:
+       blk_rq_unmap_user(rq);
+       return ret;
 }
 
 EXPORT_SYMBOL(blk_rq_map_user);
@@ -2445,7 +2484,7 @@ EXPORT_SYMBOL(blk_rq_map_user);
  *    unmapping.
  */
 int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
-                       struct sg_iovec *iov, int iov_count)
+                       struct sg_iovec *iov, int iov_count, unsigned int len)
 {
        struct bio *bio;
 
@@ -2459,10 +2498,15 @@ int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
        if (IS_ERR(bio))
                return PTR_ERR(bio);
 
-       rq->bio = rq->biotail = bio;
+       if (bio->bi_size != len) {
+               bio_endio(bio, bio->bi_size, 0);
+               bio_unmap_user(bio);
+               return -EINVAL;
+       }
+
+       bio_get(bio);
        blk_rq_bio_prep(q, rq, bio);
        rq->buffer = rq->data = NULL;
-       rq->data_len = bio->bi_size;
        return 0;
 }
 
@@ -2470,23 +2514,26 @@ EXPORT_SYMBOL(blk_rq_map_user_iov);
 
 /**
  * blk_rq_unmap_user - unmap a request with user data
- * @bio:       bio to be unmapped
- * @ulen:      length of user buffer
+ * @rq:                rq to be unmapped
  *
  * Description:
- *    Unmap a bio previously mapped by blk_rq_map_user().
+ *    Unmap a rq previously mapped by blk_rq_map_user().
+ *    rq->bio must be set to the original head of the request.
  */
-int blk_rq_unmap_user(struct bio *bio, unsigned int ulen)
+int blk_rq_unmap_user(struct request *rq)
 {
-       int ret = 0;
+       struct bio *bio, *mapped_bio;
 
-       if (bio) {
-               if (bio_flagged(bio, BIO_USER_MAPPED))
-                       bio_unmap_user(bio);
+       while ((bio = rq->bio)) {
+               if (bio_flagged(bio, BIO_BOUNCED))
+                       mapped_bio = bio->bi_private;
                else
-                       ret = bio_uncopy_user(bio);
-       }
+                       mapped_bio = bio;
 
+               __blk_rq_unmap_user(mapped_bio);
+               rq->bio = bio->bi_next;
+               bio_put(bio);
+       }
        return 0;
 }
 
@@ -2517,11 +2564,8 @@ int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
        if (rq_data_dir(rq) == WRITE)
                bio->bi_rw |= (1 << BIO_RW);
 
-       rq->bio = rq->biotail = bio;
        blk_rq_bio_prep(q, rq, bio);
-
        rq->buffer = rq->data = NULL;
-       rq->data_len = len;
        return 0;
 }
 
@@ -2760,41 +2804,6 @@ void blk_end_sync_rq(struct request *rq, int error)
 }
 EXPORT_SYMBOL(blk_end_sync_rq);
 
-/**
- * blk_congestion_wait - wait for a queue to become uncongested
- * @rw: READ or WRITE
- * @timeout: timeout in jiffies
- *
- * Waits for up to @timeout jiffies for a queue (any queue) to exit congestion.
- * If no queues are congested then just wait for the next request to be
- * returned.
- */
-long blk_congestion_wait(int rw, long timeout)
-{
-       long ret;
-       DEFINE_WAIT(wait);
-       wait_queue_head_t *wqh = &congestion_wqh[rw];
-
-       prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
-       ret = io_schedule_timeout(timeout);
-       finish_wait(wqh, &wait);
-       return ret;
-}
-
-EXPORT_SYMBOL(blk_congestion_wait);
-
-/**
- * blk_congestion_end - wake up sleepers on a congestion queue
- * @rw: READ or WRITE
- */
-void blk_congestion_end(int rw)
-{
-       wait_queue_head_t *wqh = &congestion_wqh[rw];
-
-       if (waitqueue_active(wqh))
-               wake_up(wqh);
-}
-
 /*
  * Has to be called with the request spinlock acquired
  */
@@ -3075,6 +3084,7 @@ void generic_make_request(struct bio *bio)
 {
        request_queue_t *q;
        sector_t maxsector;
+       sector_t old_sector;
        int ret, nr_sectors = bio_sectors(bio);
        dev_t old_dev;
 
@@ -3103,7 +3113,7 @@ void generic_make_request(struct bio *bio)
         * NOTE: we don't repeat the blk_size check for each new device.
         * Stacking drivers are expected to know what they are doing.
         */
-       maxsector = -1;
+       old_sector = -1;
        old_dev = 0;
        do {
                char b[BDEVNAME_SIZE];
@@ -3137,15 +3147,31 @@ end_io:
                 */
                blk_partition_remap(bio);
 
-               if (maxsector != -1)
+               if (old_sector != -1)
                        blk_add_trace_remap(q, bio, old_dev, bio->bi_sector, 
-                                           maxsector);
+                                           old_sector);
 
                blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
 
-               maxsector = bio->bi_sector;
+               old_sector = bio->bi_sector;
                old_dev = bio->bi_bdev->bd_dev;
 
+               maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
+               if (maxsector) {
+                       sector_t sector = bio->bi_sector;
+
+                       if (maxsector < nr_sectors ||
+                                       maxsector - nr_sectors < sector) {
+                               /*
+                                * This may well happen - partitions are not
+                                * checked to make sure they are within the size
+                                * of the whole device.
+                                */
+                               handle_bad_sector(bio);
+                               goto end_io;
+                       }
+               }
+
                ret = q->make_request_fn(q, bio);
        } while (ret);
 }
@@ -3554,6 +3580,7 @@ void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
        rq->hard_cur_sectors = rq->current_nr_sectors;
        rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
        rq->buffer = bio_data(bio);
+       rq->data_len = bio->bi_size;
 
        rq->bio = rq->biotail = bio;
 }
@@ -3770,14 +3797,14 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
        blk_queue_congestion_threshold(q);
 
        if (rl->count[READ] >= queue_congestion_on_threshold(q))
-               set_queue_congested(q, READ);
+               blk_set_queue_congested(q, READ);
        else if (rl->count[READ] < queue_congestion_off_threshold(q))
-               clear_queue_congested(q, READ);
+               blk_clear_queue_congested(q, READ);
 
        if (rl->count[WRITE] >= queue_congestion_on_threshold(q))
-               set_queue_congested(q, WRITE);
+               blk_set_queue_congested(q, WRITE);
        else if (rl->count[WRITE] < queue_congestion_off_threshold(q))
-               clear_queue_congested(q, WRITE);
+               blk_clear_queue_congested(q, WRITE);
 
        if (rl->count[READ] >= q->nr_requests) {
                blk_set_queue_full(q, READ);