Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4
[pandora-kernel.git] / block / blk-merge.c
index 5023f0b..5efc9e7 100644 (file)
@@ -32,7 +32,7 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect)
                 * size, something has gone terribly wrong
                 */
                if (rq->nr_sectors < rq->current_nr_sectors) {
-                       printk("blk: request botched\n");
+                       printk(KERN_ERR "blk: request botched\n");
                        rq->nr_sectors = rq->current_nr_sectors;
                }
        }
@@ -55,7 +55,7 @@ void blk_recalc_rq_segments(struct request *rq)
        if (!rq->bio)
                return;
 
-       cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
+       cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
        hw_seg_size = seg_size = 0;
        phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
        rq_for_each_segment(bv, rq, iter) {
@@ -128,7 +128,7 @@ EXPORT_SYMBOL(blk_recount_segments);
 static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
                                   struct bio *nxt)
 {
-       if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
+       if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
                return 0;
 
        if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
@@ -149,9 +149,9 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
 static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio,
                                 struct bio *nxt)
 {
-       if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
+       if (!bio_flagged(bio, BIO_SEG_VALID))
                blk_recount_segments(q, bio);
-       if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID)))
+       if (!bio_flagged(nxt, BIO_SEG_VALID))
                blk_recount_segments(q, nxt);
        if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||
            BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size))
@@ -175,7 +175,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
        int nsegs, cluster;
 
        nsegs = 0;
-       cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
+       cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
 
        /*
         * for each bio in rq
@@ -220,7 +220,19 @@ new_segment:
                bvprv = bvec;
        } /* segments in rq */
 
-       if (q->dma_drain_size) {
+
+       if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
+           (rq->data_len & q->dma_pad_mask)) {
+               unsigned int pad_len = (q->dma_pad_mask & ~rq->data_len) + 1;
+
+               sg->length += pad_len;
+               rq->extra_len += pad_len;
+       }
+
+       if (q->dma_drain_size && q->dma_drain_needed(rq)) {
+               if (rq->cmd_flags & REQ_RW)
+                       memset(q->dma_drain_buffer, 0, q->dma_drain_size);
+
                sg->page_link &= ~0x02;
                sg = sg_next(sg);
                sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
@@ -228,6 +240,7 @@ new_segment:
                            ((unsigned long)q->dma_drain_buffer) &
                            (PAGE_SIZE - 1));
                nsegs++;
+               rq->extra_len += q->dma_drain_size;
        }
 
        if (sg)
@@ -235,7 +248,6 @@ new_segment:
 
        return nsegs;
 }
-
 EXPORT_SYMBOL(blk_rq_map_sg);
 
 static inline int ll_new_mergeable(struct request_queue *q,
@@ -300,13 +312,13 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
                        q->last_merge = NULL;
                return 0;
        }
-       if (unlikely(!bio_flagged(req->biotail, BIO_SEG_VALID)))
+       if (!bio_flagged(req->biotail, BIO_SEG_VALID))
                blk_recount_segments(q, req->biotail);
-       if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
+       if (!bio_flagged(bio, BIO_SEG_VALID))
                blk_recount_segments(q, bio);
        len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
-       if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) &&
-           !BIOVEC_VIRT_OVERSIZE(len)) {
+       if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio))
+           && !BIOVEC_VIRT_OVERSIZE(len)) {
                int mergeable =  ll_new_mergeable(q, req, bio);
 
                if (mergeable) {
@@ -321,7 +333,7 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
        return ll_new_hw_segment(q, req, bio);
 }
 
-int ll_front_merge_fn(struct request_queue *q, struct request *req, 
+int ll_front_merge_fn(struct request_queue *q, struct request *req,
                      struct bio *bio)
 {
        unsigned short max_sectors;
@@ -340,9 +352,9 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
                return 0;
        }
        len = bio->bi_hw_back_size + req->bio->bi_hw_front_size;
-       if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
+       if (!bio_flagged(bio, BIO_SEG_VALID))
                blk_recount_segments(q, bio);
-       if (unlikely(!bio_flagged(req->bio, BIO_SEG_VALID)))
+       if (!bio_flagged(req->bio, BIO_SEG_VALID))
                blk_recount_segments(q, req->bio);
        if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) &&
            !BIOVEC_VIRT_OVERSIZE(len)) {
@@ -388,7 +400,8 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
 
        total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
        if (blk_hw_contig_segment(q, req->biotail, next->bio)) {
-               int len = req->biotail->bi_hw_back_size + next->bio->bi_hw_front_size;
+               int len = req->biotail->bi_hw_back_size +
+                               next->bio->bi_hw_front_size;
                /*
                 * propagate the combined length to the end of the requests
                 */
@@ -428,6 +441,9 @@ static int attempt_merge(struct request_queue *q, struct request *req,
            || next->special)
                return 0;
 
+       if (blk_integrity_rq(req) != blk_integrity_rq(next))
+               return 0;
+
        /*
         * If we are allowed to merge, then append bio list
         * from next to rq and release next. merge_requests_fn
@@ -454,8 +470,14 @@ static int attempt_merge(struct request_queue *q, struct request *req,
        elv_merge_requests(q, req, next);
 
        if (req->rq_disk) {
+               struct hd_struct *part
+                       = get_part(req->rq_disk, req->sector);
                disk_round_stats(req->rq_disk);
                req->rq_disk->in_flight--;
+               if (part) {
+                       part_round_stats(part);
+                       part->in_flight--;
+               }
        }
 
        req->ioprio = ioprio_best(req->ioprio, next->ioprio);