Merge branch 'for-3.16/blk-mq-tagging' into for-3.16/core
authorJens Axboe <axboe@fb.com>
Mon, 19 May 2014 17:52:35 +0000 (11:52 -0600)
committerJens Axboe <axboe@fb.com>
Mon, 19 May 2014 17:52:35 +0000 (11:52 -0600)
Signed-off-by: Jens Axboe <axboe@fb.com>
Conflicts:
block/blk-mq-tag.c

1  2 
block/blk-mq-tag.c
block/blk-mq-tag.h
block/blk-mq.c
include/linux/blk-mq.h

@@@ -40,7 -39,85 +39,85 @@@ bool blk_mq_has_free_tags(struct blk_mq
        return bt_has_free_tags(&tags->bitmap_tags);
  }
  
 -static int __bt_get_word(struct blk_mq_bitmap *bm, unsigned int last_tag)
+ static inline void bt_index_inc(unsigned int *index)
+ {
+       *index = (*index + 1) & (BT_WAIT_QUEUES - 1);
+ }
+ /*
+  * If a previously inactive queue goes active, bump the active user count.
+  */
+ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
+ {
+       if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
+           !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
+               atomic_inc(&hctx->tags->active_queues);
+       return true;
+ }
+ /*
+  * If a previously busy queue goes inactive, potential waiters could now
+  * be allowed to queue. Wake them up and check.
+  */
+ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
+ {
+       struct blk_mq_tags *tags = hctx->tags;
+       struct blk_mq_bitmap_tags *bt;
+       int i, wake_index;
+       if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
+               return;
+       atomic_dec(&tags->active_queues);
+       /*
+        * Will only throttle depth on non-reserved tags
+        */
+       bt = &tags->bitmap_tags;
+       wake_index = bt->wake_index;
+       for (i = 0; i < BT_WAIT_QUEUES; i++) {
+               struct bt_wait_state *bs = &bt->bs[wake_index];
+               if (waitqueue_active(&bs->wait))
+                       wake_up(&bs->wait);
+               bt_index_inc(&wake_index);
+       }
+ }
+ /*
+  * For shared tag users, we track the number of currently active users
+  * and attempt to provide a fair share of the tag depth for each of them.
+  */
+ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
+                                 struct blk_mq_bitmap_tags *bt)
+ {
+       unsigned int depth, users;
+       if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
+               return true;
+       if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
+               return true;
+       /*
+        * Don't try dividing an ant
+        */
+       if (bt->depth == 1)
+               return true;
+       users = atomic_read(&hctx->tags->active_queues);
+       if (!users)
+               return true;
+       /*
+        * Allow at least some tags
+        */
+       depth = max((bt->depth + users - 1) / users, 4U);
+       return atomic_read(&hctx->nr_active) < depth;
+ }
 +static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag)
  {
        int tag, org_last_tag, end;
  
Simple merge
diff --cc block/blk-mq.c
@@@ -1605,7 -1611,10 +1673,9 @@@ void blk_mq_free_queue(struct request_q
        struct blk_mq_hw_ctx *hctx;
        int i;
  
+       blk_mq_del_queue_tag_set(q);
        queue_for_each_hw_ctx(q, hctx, i) {
 -              kfree(hctx->ctx_map);
                kfree(hctx->ctxs);
                blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
                if (q->mq_ops->exit_hctx)
Simple merge