2 * Fast and scalable bitmap tagging variant. Uses sparser bitmaps spread
3 * over multiple cachelines to avoid ping-pong between multiple submitters
4 * or submitter and completer. Uses rolling wakeups to avoid falling of
5 * the scaling cliff when we run out of tags and have to start putting
8 * Uses active queue tracking to support fairer distribution of tags
9 * between multiple submitters when a shared tag map is used.
11 * Copyright (C) 2013-2014 Jens Axboe
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/random.h>
17 #include <linux/blk-mq.h>
20 #include "blk-mq-tag.h"
22 static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt)
26 for (i = 0; i < bt->map_nr; i++) {
27 struct blk_align_bitmap *bm = &bt->map[i];
30 ret = find_first_zero_bit(&bm->word, bm->depth);
38 bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
43 return bt_has_free_tags(&tags->bitmap_tags);
46 static inline int bt_index_inc(int index)
48 return (index + 1) & (BT_WAIT_QUEUES - 1);
51 static inline void bt_index_atomic_inc(atomic_t *index)
53 int old = atomic_read(index);
54 int new = bt_index_inc(old);
55 atomic_cmpxchg(index, old, new);
59 * If a previously inactive queue goes active, bump the active user count.
61 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
63 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
64 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
65 atomic_inc(&hctx->tags->active_queues);
71 * Wakeup all potentially sleeping on tags
73 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
75 struct blk_mq_bitmap_tags *bt;
78 bt = &tags->bitmap_tags;
79 wake_index = atomic_read(&bt->wake_index);
80 for (i = 0; i < BT_WAIT_QUEUES; i++) {
81 struct bt_wait_state *bs = &bt->bs[wake_index];
83 if (waitqueue_active(&bs->wait))
86 wake_index = bt_index_inc(wake_index);
89 if (include_reserve) {
90 bt = &tags->breserved_tags;
91 if (waitqueue_active(&bt->bs[0].wait))
92 wake_up(&bt->bs[0].wait);
97 * If a previously busy queue goes inactive, potential waiters could now
98 * be allowed to queue. Wake them up and check.
100 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
102 struct blk_mq_tags *tags = hctx->tags;
104 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
107 atomic_dec(&tags->active_queues);
109 blk_mq_tag_wakeup_all(tags, false);
113 * For shared tag users, we track the number of currently active users
114 * and attempt to provide a fair share of the tag depth for each of them.
116 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
117 struct blk_mq_bitmap_tags *bt)
119 unsigned int depth, users;
121 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
123 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
127 * Don't try dividing an ant
132 users = atomic_read(&hctx->tags->active_queues);
137 * Allow at least some tags
139 depth = max((bt->depth + users - 1) / users, 4U);
140 return atomic_read(&hctx->nr_active) < depth;
143 static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag)
145 int tag, org_last_tag = last_tag;
148 tag = find_next_zero_bit(&bm->word, bm->depth, last_tag);
149 if (unlikely(tag >= bm->depth)) {
151 * We started with an offset, and we didn't reset the
152 * offset to 0 in a failure case, so start from 0 to
155 if (org_last_tag && last_tag) {
156 last_tag = org_last_tag = 0;
162 if (!test_and_set_bit(tag, &bm->word))
166 if (last_tag >= bm->depth - 1)
174 * Straight forward bitmap tag implementation, where each bit is a tag
175 * (cleared == free, and set == busy). The small twist is using per-cpu
176 * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue
177 * contexts. This enables us to drastically limit the space searched,
178 * without dirtying an extra shared cacheline like we would if we stored
179 * the cache value inside the shared blk_mq_bitmap_tags structure. On top
180 * of that, each word of tags is in a separate cacheline. This means that
181 * multiple users will tend to stick to different cachelines, at least
182 * until the map is exhausted.
184 static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
185 unsigned int *tag_cache)
187 unsigned int last_tag, org_last_tag;
190 if (!hctx_may_queue(hctx, bt))
193 last_tag = org_last_tag = *tag_cache;
194 index = TAG_TO_INDEX(bt, last_tag);
196 for (i = 0; i < bt->map_nr; i++) {
197 tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag));
199 tag += (index << bt->bits_per_word);
204 * Jump to next index, and reset the last tag to be the
205 * first tag of that index
208 last_tag = (index << bt->bits_per_word);
210 if (index >= bt->map_nr) {
220 * Only update the cache from the allocation path, if we ended
221 * up using the specific cached tag.
224 if (tag == org_last_tag) {
226 if (last_tag >= bt->depth - 1)
229 *tag_cache = last_tag;
235 static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt,
236 struct blk_mq_hw_ctx *hctx)
238 struct bt_wait_state *bs;
244 wait_index = atomic_read(&hctx->wait_index);
245 bs = &bt->bs[wait_index];
246 bt_index_atomic_inc(&hctx->wait_index);
250 static int bt_get(struct blk_mq_alloc_data *data,
251 struct blk_mq_bitmap_tags *bt,
252 struct blk_mq_hw_ctx *hctx,
253 unsigned int *last_tag)
255 struct bt_wait_state *bs;
259 tag = __bt_get(hctx, bt, last_tag);
263 if (!(data->gfp & __GFP_WAIT))
266 bs = bt_wait_ptr(bt, hctx);
268 prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE);
270 tag = __bt_get(hctx, bt, last_tag);
275 * We're out of tags on this hardware queue, kick any
276 * pending IO submits before going to sleep waiting for
279 blk_mq_run_hw_queue(hctx, false);
282 * Retry tag allocation after running the hardware queue,
283 * as running the queue may also have found completions.
285 tag = __bt_get(hctx, bt, last_tag);
289 blk_mq_put_ctx(data->ctx);
293 data->ctx = blk_mq_get_ctx(data->q);
294 data->hctx = data->q->mq_ops->map_queue(data->q,
296 if (data->reserved) {
297 bt = &data->hctx->tags->breserved_tags;
299 last_tag = &data->ctx->last_tag;
301 bt = &hctx->tags->bitmap_tags;
303 finish_wait(&bs->wait, &wait);
304 bs = bt_wait_ptr(bt, hctx);
307 finish_wait(&bs->wait, &wait);
311 static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data)
315 tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx,
316 &data->ctx->last_tag);
318 return tag + data->hctx->tags->nr_reserved_tags;
320 return BLK_MQ_TAG_FAIL;
323 static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data)
327 if (unlikely(!data->hctx->tags->nr_reserved_tags)) {
329 return BLK_MQ_TAG_FAIL;
332 tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero);
334 return BLK_MQ_TAG_FAIL;
339 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
342 return __blk_mq_get_tag(data);
344 return __blk_mq_get_reserved_tag(data);
347 static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt)
351 wake_index = atomic_read(&bt->wake_index);
352 for (i = 0; i < BT_WAIT_QUEUES; i++) {
353 struct bt_wait_state *bs = &bt->bs[wake_index];
355 if (waitqueue_active(&bs->wait)) {
356 int o = atomic_read(&bt->wake_index);
358 atomic_cmpxchg(&bt->wake_index, o, wake_index);
363 wake_index = bt_index_inc(wake_index);
369 static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
371 const int index = TAG_TO_INDEX(bt, tag);
372 struct bt_wait_state *bs;
375 clear_bit(TAG_TO_BIT(bt, tag), &bt->map[index].word);
377 /* Ensure that the wait list checks occur after clear_bit(). */
380 bs = bt_wake_ptr(bt);
384 wait_cnt = atomic_dec_return(&bs->wait_cnt);
385 if (unlikely(wait_cnt < 0))
386 wait_cnt = atomic_inc_return(&bs->wait_cnt);
388 atomic_add(bt->wake_cnt, &bs->wait_cnt);
389 bt_index_atomic_inc(&bt->wake_index);
394 void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,
395 unsigned int *last_tag)
397 struct blk_mq_tags *tags = hctx->tags;
399 if (tag >= tags->nr_reserved_tags) {
400 const int real_tag = tag - tags->nr_reserved_tags;
402 BUG_ON(real_tag >= tags->nr_tags);
403 bt_clear_tag(&tags->bitmap_tags, real_tag);
404 *last_tag = real_tag;
406 BUG_ON(tag >= tags->nr_reserved_tags);
407 bt_clear_tag(&tags->breserved_tags, tag);
411 static void bt_for_each(struct blk_mq_hw_ctx *hctx,
412 struct blk_mq_bitmap_tags *bt, unsigned int off,
413 busy_iter_fn *fn, void *data, bool reserved)
418 for (i = 0; i < bt->map_nr; i++) {
419 struct blk_align_bitmap *bm = &bt->map[i];
421 for (bit = find_first_bit(&bm->word, bm->depth);
423 bit = find_next_bit(&bm->word, bm->depth, bit + 1)) {
424 rq = blk_mq_tag_to_rq(hctx->tags, off + bit);
425 if (rq->q == hctx->queue)
426 fn(hctx, rq, data, reserved);
429 off += (1 << bt->bits_per_word);
433 void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
436 struct blk_mq_tags *tags = hctx->tags;
438 if (tags->nr_reserved_tags)
439 bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
440 bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
443 EXPORT_SYMBOL(blk_mq_tag_busy_iter);
445 static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
447 unsigned int i, used;
449 for (i = 0, used = 0; i < bt->map_nr; i++) {
450 struct blk_align_bitmap *bm = &bt->map[i];
452 used += bitmap_weight(&bm->word, bm->depth);
455 return bt->depth - used;
458 static void bt_update_count(struct blk_mq_bitmap_tags *bt,
461 unsigned int tags_per_word = 1U << bt->bits_per_word;
462 unsigned int map_depth = depth;
467 for (i = 0; i < bt->map_nr; i++) {
468 bt->map[i].depth = min(map_depth, tags_per_word);
469 map_depth -= bt->map[i].depth;
473 bt->wake_cnt = BT_WAIT_BATCH;
474 if (bt->wake_cnt > depth / BT_WAIT_QUEUES)
475 bt->wake_cnt = max(1U, depth / BT_WAIT_QUEUES);
480 static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth,
481 int node, bool reserved)
485 bt->bits_per_word = ilog2(BITS_PER_LONG);
488 * Depth can be zero for reserved tags, that's not a failure
492 unsigned int nr, tags_per_word;
494 tags_per_word = (1 << bt->bits_per_word);
497 * If the tag space is small, shrink the number of tags
498 * per word so we spread over a few cachelines, at least.
499 * If less than 4 tags, just forget about it, it's not
500 * going to work optimally anyway.
503 while (tags_per_word * 4 > depth) {
505 tags_per_word = (1 << bt->bits_per_word);
509 nr = ALIGN(depth, tags_per_word) / tags_per_word;
510 bt->map = kzalloc_node(nr * sizeof(struct blk_align_bitmap),
518 bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL);
524 bt_update_count(bt, depth);
526 for (i = 0; i < BT_WAIT_QUEUES; i++) {
527 init_waitqueue_head(&bt->bs[i].wait);
528 atomic_set(&bt->bs[i].wait_cnt, bt->wake_cnt);
534 static void bt_free(struct blk_mq_bitmap_tags *bt)
540 static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
543 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
545 if (bt_alloc(&tags->bitmap_tags, depth, node, false))
547 if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true))
552 bt_free(&tags->bitmap_tags);
557 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
558 unsigned int reserved_tags, int node)
560 struct blk_mq_tags *tags;
562 if (total_tags > BLK_MQ_TAG_MAX) {
563 pr_err("blk-mq: tag depth too large\n");
567 tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
571 tags->nr_tags = total_tags;
572 tags->nr_reserved_tags = reserved_tags;
574 return blk_mq_init_bitmap_tags(tags, node);
577 void blk_mq_free_tags(struct blk_mq_tags *tags)
579 bt_free(&tags->bitmap_tags);
580 bt_free(&tags->breserved_tags);
584 void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag)
586 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
588 *tag = prandom_u32() % depth;
591 int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
593 tdepth -= tags->nr_reserved_tags;
594 if (tdepth > tags->nr_tags)
598 * Don't need (or can't) update reserved tags here, they remain
599 * static and should never need resizing.
601 bt_update_count(&tags->bitmap_tags, tdepth);
602 blk_mq_tag_wakeup_all(tags, false);
607 * blk_mq_unique_tag() - return a tag that is unique queue-wide
608 * @rq: request for which to compute a unique tag
610 * The tag field in struct request is unique per hardware queue but not over
611 * all hardware queues. Hence this function that returns a tag with the
612 * hardware context index in the upper bits and the per hardware queue tag in
615 * Note: When called for a request that is queued on a non-multiqueue request
616 * queue, the hardware context index is set to zero.
618 u32 blk_mq_unique_tag(struct request *rq)
620 struct request_queue *q = rq->q;
621 struct blk_mq_hw_ctx *hctx;
625 hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
626 hwq = hctx->queue_num;
629 return (hwq << BLK_MQ_UNIQUE_TAG_BITS) |
630 (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
632 EXPORT_SYMBOL(blk_mq_unique_tag);
634 ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
636 char *orig_page = page;
637 unsigned int free, res;
642 page += sprintf(page, "nr_tags=%u, reserved_tags=%u, "
643 "bits_per_word=%u\n",
644 tags->nr_tags, tags->nr_reserved_tags,
645 tags->bitmap_tags.bits_per_word);
647 free = bt_unused_tags(&tags->bitmap_tags);
648 res = bt_unused_tags(&tags->breserved_tags);
650 page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res);
651 page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues));
653 return page - orig_page;