pandora: defconfig: update
[pandora-kernel.git] / fs / bio.c
index b84d851..081747c 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -793,15 +793,19 @@ int bio_uncopy_user(struct bio *bio)
        if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
                /*
                 * if we're in a workqueue, the request is orphaned, so
-                * don't copy into a random user address space, just free.
+                * don't copy into a random user address space, just free
+                * and return -EINTR so user space doesn't expect any data.
                 */
                if (current->mm)
                        ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
                                             bmd->nr_sgvecs, bio_data_dir(bio) == READ,
                                             0, bmd->is_our_pages);
-               else if (bmd->is_our_pages)
-                       bio_for_each_segment_all(bvec, bio, i)
-                               __free_page(bvec->bv_page);
+               else {
+                       ret = -EINTR;
+                       if (bmd->is_our_pages)
+                               bio_for_each_segment_all(bvec, bio, i)
+                                       __free_page(bvec->bv_page);
+               }
        }
        bio_free_map_data(bmd);
        bio_put(bio);
@@ -972,6 +976,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
        struct bio *bio;
        int cur_page = 0;
        int ret, offset;
+       struct bio_vec *bvec;
 
        for (i = 0; i < iov_count; i++) {
                unsigned long uaddr = (unsigned long)iov[i].iov_base;
@@ -1015,7 +1020,12 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
 
                ret = get_user_pages_fast(uaddr, local_nr_pages,
                                write_to_vm, &pages[cur_page]);
-               if (ret < local_nr_pages) {
+               if (unlikely(ret < local_nr_pages)) {
+                       for (j = cur_page; j < page_limit; j++) {
+                               if (!pages[j])
+                                       break;
+                               put_page(pages[j]);
+                       }
                        ret = -EFAULT;
                        goto out_unmap;
                }
@@ -1023,6 +1033,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
                offset = uaddr & ~PAGE_MASK;
                for (j = cur_page; j < page_limit; j++) {
                        unsigned int bytes = PAGE_SIZE - offset;
+                       unsigned short prev_bi_vcnt = bio->bi_vcnt;
 
                        if (len <= 0)
                                break;
@@ -1037,6 +1048,13 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
                                            bytes)
                                break;
 
+                       /*
+                        * check if vector was merged with previous
+                        * drop page reference if needed
+                        */
+                       if (bio->bi_vcnt == prev_bi_vcnt)
+                               put_page(pages[j]);
+
                        len -= bytes;
                        offset = 0;
                }
@@ -1062,10 +1080,8 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
        return bio;
 
  out_unmap:
-       for (i = 0; i < nr_pages; i++) {
-               if(!pages[i])
-                       break;
-               page_cache_release(pages[i]);
+       bio_for_each_segment_all(bvec, bio, j) {
+               put_page(bvec->bv_page);
        }
  out:
        kfree(pages);