pandora: defconfig: update
[pandora-kernel.git] / fs / bio.c
index b1fe82c..081747c 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -507,11 +507,12 @@ int bio_get_nr_vecs(struct block_device *bdev)
        struct request_queue *q = bdev_get_queue(bdev);
        int nr_pages;
 
-       nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       if (nr_pages > queue_max_segments(q))
-               nr_pages = queue_max_segments(q);
+       nr_pages = min_t(unsigned,
+                    queue_max_segments(q),
+                    queue_max_sectors(q) / (PAGE_SIZE >> 9) + 1);
+
+       return min_t(unsigned, nr_pages, BIO_MAX_PAGES);
 
-       return nr_pages;
 }
 EXPORT_SYMBOL(bio_get_nr_vecs);
 
@@ -733,7 +734,7 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
        int iov_idx = 0;
        unsigned int iov_off = 0;
 
-       __bio_for_each_segment(bvec, bio, i, 0) {
+       bio_for_each_segment_all(bvec, bio, i) {
                char *bv_addr = page_address(bvec->bv_page);
                unsigned int bv_len = iovecs[i].bv_len;
 
@@ -786,12 +787,26 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
 int bio_uncopy_user(struct bio *bio)
 {
        struct bio_map_data *bmd = bio->bi_private;
-       int ret = 0;
+       struct bio_vec *bvec;
+       int ret = 0, i;
 
-       if (!bio_flagged(bio, BIO_NULL_MAPPED))
-               ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
-                                    bmd->nr_sgvecs, bio_data_dir(bio) == READ,
-                                    0, bmd->is_our_pages);
+       if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
+               /*
+                * if we're in a workqueue, the request is orphaned, so
+                * don't copy into a random user address space, just free
+                * and return -EINTR so user space doesn't expect any data.
+                */
+               if (current->mm)
+                       ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
+                                            bmd->nr_sgvecs, bio_data_dir(bio) == READ,
+                                            0, bmd->is_our_pages);
+               else {
+                       ret = -EINTR;
+                       if (bmd->is_our_pages)
+                               bio_for_each_segment_all(bvec, bio, i)
+                                       __free_page(bvec->bv_page);
+               }
+       }
        bio_free_map_data(bmd);
        bio_put(bio);
        return ret;
@@ -915,7 +930,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
        return bio;
 cleanup:
        if (!map_data)
-               bio_for_each_segment(bvec, bio, i)
+               bio_for_each_segment_all(bvec, bio, i)
                        __free_page(bvec->bv_page);
 
        bio_put(bio);
@@ -961,6 +976,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
        struct bio *bio;
        int cur_page = 0;
        int ret, offset;
+       struct bio_vec *bvec;
 
        for (i = 0; i < iov_count; i++) {
                unsigned long uaddr = (unsigned long)iov[i].iov_base;
@@ -1004,7 +1020,12 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
 
                ret = get_user_pages_fast(uaddr, local_nr_pages,
                                write_to_vm, &pages[cur_page]);
-               if (ret < local_nr_pages) {
+               if (unlikely(ret < local_nr_pages)) {
+                       for (j = cur_page; j < page_limit; j++) {
+                               if (!pages[j])
+                                       break;
+                               put_page(pages[j]);
+                       }
                        ret = -EFAULT;
                        goto out_unmap;
                }
@@ -1012,6 +1033,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
                offset = uaddr & ~PAGE_MASK;
                for (j = cur_page; j < page_limit; j++) {
                        unsigned int bytes = PAGE_SIZE - offset;
+                       unsigned short prev_bi_vcnt = bio->bi_vcnt;
 
                        if (len <= 0)
                                break;
@@ -1026,6 +1048,13 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
                                            bytes)
                                break;
 
+                       /*
+                        * check if vector was merged with previous
+                        * drop page reference if needed
+                        */
+                       if (bio->bi_vcnt == prev_bi_vcnt)
+                               put_page(pages[j]);
+
                        len -= bytes;
                        offset = 0;
                }
@@ -1051,10 +1080,8 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
        return bio;
 
  out_unmap:
-       for (i = 0; i < nr_pages; i++) {
-               if(!pages[i])
-                       break;
-               page_cache_release(pages[i]);
+       bio_for_each_segment_all(bvec, bio, j) {
+               put_page(bvec->bv_page);
        }
  out:
        kfree(pages);
@@ -1129,7 +1156,7 @@ static void __bio_unmap_user(struct bio *bio)
        /*
         * make sure we dirty pages we wrote to
         */
-       __bio_for_each_segment(bvec, bio, i, 0) {
+       bio_for_each_segment_all(bvec, bio, i) {
                if (bio_data_dir(bio) == READ)
                        set_page_dirty_lock(bvec->bv_page);
 
@@ -1235,7 +1262,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
        int i;
        char *p = bmd->sgvecs[0].iov_base;
 
-       __bio_for_each_segment(bvec, bio, i, 0) {
+       bio_for_each_segment_all(bvec, bio, i) {
                char *addr = page_address(bvec->bv_page);
                int len = bmd->iovecs[i].bv_len;
 
@@ -1275,7 +1302,7 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
        if (!reading) {
                void *p = data;
 
-               bio_for_each_segment(bvec, bio, i) {
+               bio_for_each_segment_all(bvec, bio, i) {
                        char *addr = page_address(bvec->bv_page);
 
                        memcpy(addr, p, bvec->bv_len);
@@ -1555,7 +1582,7 @@ sector_t bio_sector_offset(struct bio *bio, unsigned short index,
        if (index >= bio->bi_idx)
                index = bio->bi_vcnt - 1;
 
-       __bio_for_each_segment(bv, bio, i, 0) {
+       bio_for_each_segment_all(bv, bio, i) {
                if (i == index) {
                        if (offset > bv->bv_offset)
                                sectors += (offset - bv->bv_offset) / sector_sz;