block: add queue flag for disabling SG merging
authorJens Axboe <axboe@fb.com>
Thu, 29 May 2014 15:53:32 +0000 (09:53 -0600)
committerJens Axboe <axboe@fb.com>
Thu, 29 May 2014 15:53:32 +0000 (09:53 -0600)
If devices are not SG starved, we waste a lot of time potentially
collapsing SG segments. Enough that 1.5% of the CPU time goes
to this, at only 400K IOPS. Add a queue flag, QUEUE_FLAG_NO_SG_MERGE,
which just returns the number of vectors in a bio instead of looping
over all segments and checking for collapsible ones.

Add a BLK_MQ_F_SG_MERGE flag so that drivers can opt-in on the sg
merging, if they so desire.

Signed-off-by: Jens Axboe <axboe@fb.com>
block/blk-merge.c
block/blk-mq.c
include/linux/blk-mq.h
include/linux/blkdev.h

index 6c583f9..b3bf0df 100644 (file)
@@ -13,7 +13,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
                                             struct bio *bio)
 {
        struct bio_vec bv, bvprv = { NULL };
-       int cluster, high, highprv = 1;
+       int cluster, high, highprv = 1, no_sg_merge;
        unsigned int seg_size, nr_phys_segs;
        struct bio *fbio, *bbio;
        struct bvec_iter iter;
@@ -35,12 +35,21 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
        cluster = blk_queue_cluster(q);
        seg_size = 0;
        nr_phys_segs = 0;
+       no_sg_merge = test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
+       high = 0;
        for_each_bio(bio) {
                bio_for_each_segment(bv, bio, iter) {
+                       /*
+                        * If SG merging is disabled, each bio vector is
+                        * a segment
+                        */
+                       if (no_sg_merge)
+                               goto new_segment;
+
                        /*
                         * the trick here is making sure that a high page is
-                        * never considered part of another segment, since that
-                        * might change with the bounce page.
+                        * never considered part of another segment, since
+                        * that might change with the bounce page.
                         */
                        high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q);
                        if (!high && !highprv && cluster) {
@@ -84,11 +93,16 @@ void blk_recalc_rq_segments(struct request *rq)
 
 void blk_recount_segments(struct request_queue *q, struct bio *bio)
 {
-       struct bio *nxt = bio->bi_next;
+       if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags))
+               bio->bi_phys_segments = bio->bi_vcnt;
+       else {
+               struct bio *nxt = bio->bi_next;
+
+               bio->bi_next = NULL;
+               bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
+               bio->bi_next = nxt;
+       }
 
-       bio->bi_next = NULL;
-       bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
-       bio->bi_next = nxt;
        bio->bi_flags |= (1 << BIO_SEG_VALID);
 }
 EXPORT_SYMBOL(blk_recount_segments);
index f27fe44..f98d977 100644 (file)
@@ -1829,6 +1829,9 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
        q->mq_ops = set->ops;
        q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
 
+       if (!(set->flags & BLK_MQ_F_SG_MERGE))
+               q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
+
        q->sg_reserved_size = INT_MAX;
 
        INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
index 91dfb75..95de239 100644 (file)
@@ -129,6 +129,7 @@ enum {
        BLK_MQ_F_SHOULD_MERGE   = 1 << 0,
        BLK_MQ_F_SHOULD_SORT    = 1 << 1,
        BLK_MQ_F_TAG_SHARED     = 1 << 2,
+       BLK_MQ_F_SG_MERGE       = 1 << 3,
 
        BLK_MQ_S_STOPPED        = 0,
        BLK_MQ_S_TAG_ACTIVE     = 1,
index 0983045..695b9fd 100644 (file)
@@ -510,6 +510,7 @@ struct request_queue {
 #define QUEUE_FLAG_SAME_FORCE  18      /* force complete on same CPU */
 #define QUEUE_FLAG_DEAD        19      /* queue tear-down finished */
 #define QUEUE_FLAG_INIT_DONE   20      /* queue is initialized */
+#define QUEUE_FLAG_NO_SG_MERGE 21      /* don't attempt to merge SG segments*/
 
 #define QUEUE_FLAG_DEFAULT     ((1 << QUEUE_FLAG_IO_STAT) |            \
                                 (1 << QUEUE_FLAG_STACKABLE)    |       \