blk-mq: add blk_mq_free_hctx_request()
[pandora-kernel.git] / block / blk-mq.c
1 /*
2  * Block multiqueue core code
3  *
4  * Copyright (C) 2013-2014 Jens Axboe
5  * Copyright (C) 2013-2014 Christoph Hellwig
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/backing-dev.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/mm.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/workqueue.h>
16 #include <linux/smp.h>
17 #include <linux/llist.h>
18 #include <linux/list_sort.h>
19 #include <linux/cpu.h>
20 #include <linux/cache.h>
21 #include <linux/sched/sysctl.h>
22 #include <linux/delay.h>
23 #include <linux/crash_dump.h>
24
25 #include <trace/events/block.h>
26
27 #include <linux/blk-mq.h>
28 #include "blk.h"
29 #include "blk-mq.h"
30 #include "blk-mq-tag.h"
31
32 static DEFINE_MUTEX(all_q_mutex);
33 static LIST_HEAD(all_q_list);
34
35 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
36
37 /*
38  * Check if any of the ctx's have pending work in this hardware queue
39  */
40 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
41 {
42         unsigned int i;
43
44         for (i = 0; i < hctx->ctx_map.map_size; i++)
45                 if (hctx->ctx_map.map[i].word)
46                         return true;
47
48         return false;
49 }
50
51 static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx,
52                                               struct blk_mq_ctx *ctx)
53 {
54         return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word];
55 }
56
57 #define CTX_TO_BIT(hctx, ctx)   \
58         ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
59
60 /*
61  * Mark this ctx as having pending work in this hardware queue
62  */
63 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
64                                      struct blk_mq_ctx *ctx)
65 {
66         struct blk_align_bitmap *bm = get_bm(hctx, ctx);
67
68         if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word))
69                 set_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
70 }
71
72 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
73                                       struct blk_mq_ctx *ctx)
74 {
75         struct blk_align_bitmap *bm = get_bm(hctx, ctx);
76
77         clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
78 }
79
80 static int blk_mq_queue_enter(struct request_queue *q)
81 {
82         while (true) {
83                 int ret;
84
85                 if (percpu_ref_tryget_live(&q->mq_usage_counter))
86                         return 0;
87
88                 ret = wait_event_interruptible(q->mq_freeze_wq,
89                                 !q->mq_freeze_depth || blk_queue_dying(q));
90                 if (blk_queue_dying(q))
91                         return -ENODEV;
92                 if (ret)
93                         return ret;
94         }
95 }
96
97 static void blk_mq_queue_exit(struct request_queue *q)
98 {
99         percpu_ref_put(&q->mq_usage_counter);
100 }
101
102 static void blk_mq_usage_counter_release(struct percpu_ref *ref)
103 {
104         struct request_queue *q =
105                 container_of(ref, struct request_queue, mq_usage_counter);
106
107         wake_up_all(&q->mq_freeze_wq);
108 }
109
110 /*
111  * Guarantee no request is in use, so we can change any data structure of
112  * the queue afterward.
113  */
114 void blk_mq_freeze_queue(struct request_queue *q)
115 {
116         bool freeze;
117
118         spin_lock_irq(q->queue_lock);
119         freeze = !q->mq_freeze_depth++;
120         spin_unlock_irq(q->queue_lock);
121
122         if (freeze) {
123                 percpu_ref_kill(&q->mq_usage_counter);
124                 blk_mq_run_queues(q, false);
125         }
126         wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
127 }
128
129 static void blk_mq_unfreeze_queue(struct request_queue *q)
130 {
131         bool wake;
132
133         spin_lock_irq(q->queue_lock);
134         wake = !--q->mq_freeze_depth;
135         WARN_ON_ONCE(q->mq_freeze_depth < 0);
136         spin_unlock_irq(q->queue_lock);
137         if (wake) {
138                 percpu_ref_reinit(&q->mq_usage_counter);
139                 wake_up_all(&q->mq_freeze_wq);
140         }
141 }
142
143 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
144 {
145         return blk_mq_has_free_tags(hctx->tags);
146 }
147 EXPORT_SYMBOL(blk_mq_can_queue);
148
149 static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
150                                struct request *rq, unsigned int rw_flags)
151 {
152         if (blk_queue_io_stat(q))
153                 rw_flags |= REQ_IO_STAT;
154
155         INIT_LIST_HEAD(&rq->queuelist);
156         /* csd/requeue_work/fifo_time is initialized before use */
157         rq->q = q;
158         rq->mq_ctx = ctx;
159         rq->cmd_flags |= rw_flags;
160         /* do not touch atomic flags, it needs atomic ops against the timer */
161         rq->cpu = -1;
162         INIT_HLIST_NODE(&rq->hash);
163         RB_CLEAR_NODE(&rq->rb_node);
164         rq->rq_disk = NULL;
165         rq->part = NULL;
166         rq->start_time = jiffies;
167 #ifdef CONFIG_BLK_CGROUP
168         rq->rl = NULL;
169         set_start_time_ns(rq);
170         rq->io_start_time_ns = 0;
171 #endif
172         rq->nr_phys_segments = 0;
173 #if defined(CONFIG_BLK_DEV_INTEGRITY)
174         rq->nr_integrity_segments = 0;
175 #endif
176         rq->special = NULL;
177         /* tag was already set */
178         rq->errors = 0;
179
180         rq->cmd = rq->__cmd;
181
182         rq->extra_len = 0;
183         rq->sense_len = 0;
184         rq->resid_len = 0;
185         rq->sense = NULL;
186
187         INIT_LIST_HEAD(&rq->timeout_list);
188         rq->timeout = 0;
189
190         rq->end_io = NULL;
191         rq->end_io_data = NULL;
192         rq->next_rq = NULL;
193
194         ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
195 }
196
197 static struct request *
198 __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
199 {
200         struct request *rq;
201         unsigned int tag;
202
203         tag = blk_mq_get_tag(data);
204         if (tag != BLK_MQ_TAG_FAIL) {
205                 rq = data->hctx->tags->rqs[tag];
206
207                 if (blk_mq_tag_busy(data->hctx)) {
208                         rq->cmd_flags = REQ_MQ_INFLIGHT;
209                         atomic_inc(&data->hctx->nr_active);
210                 }
211
212                 rq->tag = tag;
213                 blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw);
214                 return rq;
215         }
216
217         return NULL;
218 }
219
220 struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
221                 bool reserved)
222 {
223         struct blk_mq_ctx *ctx;
224         struct blk_mq_hw_ctx *hctx;
225         struct request *rq;
226         struct blk_mq_alloc_data alloc_data;
227         int ret;
228
229         ret = blk_mq_queue_enter(q);
230         if (ret)
231                 return ERR_PTR(ret);
232
233         ctx = blk_mq_get_ctx(q);
234         hctx = q->mq_ops->map_queue(q, ctx->cpu);
235         blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_WAIT,
236                         reserved, ctx, hctx);
237
238         rq = __blk_mq_alloc_request(&alloc_data, rw);
239         if (!rq && (gfp & __GFP_WAIT)) {
240                 __blk_mq_run_hw_queue(hctx);
241                 blk_mq_put_ctx(ctx);
242
243                 ctx = blk_mq_get_ctx(q);
244                 hctx = q->mq_ops->map_queue(q, ctx->cpu);
245                 blk_mq_set_alloc_data(&alloc_data, q, gfp, reserved, ctx,
246                                 hctx);
247                 rq =  __blk_mq_alloc_request(&alloc_data, rw);
248                 ctx = alloc_data.ctx;
249         }
250         blk_mq_put_ctx(ctx);
251         if (!rq)
252                 return ERR_PTR(-EWOULDBLOCK);
253         return rq;
254 }
255 EXPORT_SYMBOL(blk_mq_alloc_request);
256
257 static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
258                                   struct blk_mq_ctx *ctx, struct request *rq)
259 {
260         const int tag = rq->tag;
261         struct request_queue *q = rq->q;
262
263         if (rq->cmd_flags & REQ_MQ_INFLIGHT)
264                 atomic_dec(&hctx->nr_active);
265         rq->cmd_flags = 0;
266
267         clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
268         blk_mq_put_tag(hctx, tag, &ctx->last_tag);
269         blk_mq_queue_exit(q);
270 }
271
272 void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
273 {
274         struct blk_mq_ctx *ctx = rq->mq_ctx;
275
276         ctx->rq_completed[rq_is_sync(rq)]++;
277         __blk_mq_free_request(hctx, ctx, rq);
278
279 }
280 EXPORT_SYMBOL_GPL(blk_mq_free_hctx_request);
281
282 void blk_mq_free_request(struct request *rq)
283 {
284         struct blk_mq_hw_ctx *hctx;
285         struct request_queue *q = rq->q;
286
287         hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
288         blk_mq_free_hctx_request(hctx, rq);
289 }
290 EXPORT_SYMBOL_GPL(blk_mq_free_request);
291
292 inline void __blk_mq_end_request(struct request *rq, int error)
293 {
294         blk_account_io_done(rq);
295
296         if (rq->end_io) {
297                 rq->end_io(rq, error);
298         } else {
299                 if (unlikely(blk_bidi_rq(rq)))
300                         blk_mq_free_request(rq->next_rq);
301                 blk_mq_free_request(rq);
302         }
303 }
304 EXPORT_SYMBOL(__blk_mq_end_request);
305
306 void blk_mq_end_request(struct request *rq, int error)
307 {
308         if (blk_update_request(rq, error, blk_rq_bytes(rq)))
309                 BUG();
310         __blk_mq_end_request(rq, error);
311 }
312 EXPORT_SYMBOL(blk_mq_end_request);
313
314 static void __blk_mq_complete_request_remote(void *data)
315 {
316         struct request *rq = data;
317
318         rq->q->softirq_done_fn(rq);
319 }
320
321 static void blk_mq_ipi_complete_request(struct request *rq)
322 {
323         struct blk_mq_ctx *ctx = rq->mq_ctx;
324         bool shared = false;
325         int cpu;
326
327         if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
328                 rq->q->softirq_done_fn(rq);
329                 return;
330         }
331
332         cpu = get_cpu();
333         if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
334                 shared = cpus_share_cache(cpu, ctx->cpu);
335
336         if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
337                 rq->csd.func = __blk_mq_complete_request_remote;
338                 rq->csd.info = rq;
339                 rq->csd.flags = 0;
340                 smp_call_function_single_async(ctx->cpu, &rq->csd);
341         } else {
342                 rq->q->softirq_done_fn(rq);
343         }
344         put_cpu();
345 }
346
347 void __blk_mq_complete_request(struct request *rq)
348 {
349         struct request_queue *q = rq->q;
350
351         if (!q->softirq_done_fn)
352                 blk_mq_end_request(rq, rq->errors);
353         else
354                 blk_mq_ipi_complete_request(rq);
355 }
356
357 /**
358  * blk_mq_complete_request - end I/O on a request
359  * @rq:         the request being processed
360  *
361  * Description:
362  *      Ends all I/O on a request. It does not handle partial completions.
363  *      The actual completion happens out-of-order, through a IPI handler.
364  **/
365 void blk_mq_complete_request(struct request *rq)
366 {
367         struct request_queue *q = rq->q;
368
369         if (unlikely(blk_should_fake_timeout(q)))
370                 return;
371         if (!blk_mark_rq_complete(rq))
372                 __blk_mq_complete_request(rq);
373 }
374 EXPORT_SYMBOL(blk_mq_complete_request);
375
376 void blk_mq_start_request(struct request *rq)
377 {
378         struct request_queue *q = rq->q;
379
380         trace_block_rq_issue(q, rq);
381
382         rq->resid_len = blk_rq_bytes(rq);
383         if (unlikely(blk_bidi_rq(rq)))
384                 rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
385
386         blk_add_timer(rq);
387
388         /*
389          * Ensure that ->deadline is visible before set the started
390          * flag and clear the completed flag.
391          */
392         smp_mb__before_atomic();
393
394         /*
395          * Mark us as started and clear complete. Complete might have been
396          * set if requeue raced with timeout, which then marked it as
397          * complete. So be sure to clear complete again when we start
398          * the request, otherwise we'll ignore the completion event.
399          */
400         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
401                 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
402         if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
403                 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
404
405         if (q->dma_drain_size && blk_rq_bytes(rq)) {
406                 /*
407                  * Make sure space for the drain appears.  We know we can do
408                  * this because max_hw_segments has been adjusted to be one
409                  * fewer than the device can handle.
410                  */
411                 rq->nr_phys_segments++;
412         }
413 }
414 EXPORT_SYMBOL(blk_mq_start_request);
415
416 static void __blk_mq_requeue_request(struct request *rq)
417 {
418         struct request_queue *q = rq->q;
419
420         trace_block_rq_requeue(q, rq);
421
422         if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
423                 if (q->dma_drain_size && blk_rq_bytes(rq))
424                         rq->nr_phys_segments--;
425         }
426 }
427
428 void blk_mq_requeue_request(struct request *rq)
429 {
430         __blk_mq_requeue_request(rq);
431
432         BUG_ON(blk_queued_rq(rq));
433         blk_mq_add_to_requeue_list(rq, true);
434 }
435 EXPORT_SYMBOL(blk_mq_requeue_request);
436
437 static void blk_mq_requeue_work(struct work_struct *work)
438 {
439         struct request_queue *q =
440                 container_of(work, struct request_queue, requeue_work);
441         LIST_HEAD(rq_list);
442         struct request *rq, *next;
443         unsigned long flags;
444
445         spin_lock_irqsave(&q->requeue_lock, flags);
446         list_splice_init(&q->requeue_list, &rq_list);
447         spin_unlock_irqrestore(&q->requeue_lock, flags);
448
449         list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
450                 if (!(rq->cmd_flags & REQ_SOFTBARRIER))
451                         continue;
452
453                 rq->cmd_flags &= ~REQ_SOFTBARRIER;
454                 list_del_init(&rq->queuelist);
455                 blk_mq_insert_request(rq, true, false, false);
456         }
457
458         while (!list_empty(&rq_list)) {
459                 rq = list_entry(rq_list.next, struct request, queuelist);
460                 list_del_init(&rq->queuelist);
461                 blk_mq_insert_request(rq, false, false, false);
462         }
463
464         /*
465          * Use the start variant of queue running here, so that running
466          * the requeue work will kick stopped queues.
467          */
468         blk_mq_start_hw_queues(q);
469 }
470
471 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
472 {
473         struct request_queue *q = rq->q;
474         unsigned long flags;
475
476         /*
477          * We abuse this flag that is otherwise used by the I/O scheduler to
478          * request head insertation from the workqueue.
479          */
480         BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
481
482         spin_lock_irqsave(&q->requeue_lock, flags);
483         if (at_head) {
484                 rq->cmd_flags |= REQ_SOFTBARRIER;
485                 list_add(&rq->queuelist, &q->requeue_list);
486         } else {
487                 list_add_tail(&rq->queuelist, &q->requeue_list);
488         }
489         spin_unlock_irqrestore(&q->requeue_lock, flags);
490 }
491 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
492
493 void blk_mq_kick_requeue_list(struct request_queue *q)
494 {
495         kblockd_schedule_work(&q->requeue_work);
496 }
497 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
498
499 static inline bool is_flush_request(struct request *rq,
500                 struct blk_flush_queue *fq, unsigned int tag)
501 {
502         return ((rq->cmd_flags & REQ_FLUSH_SEQ) &&
503                         fq->flush_rq->tag == tag);
504 }
505
506 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
507 {
508         struct request *rq = tags->rqs[tag];
509         /* mq_ctx of flush rq is always cloned from the corresponding req */
510         struct blk_flush_queue *fq = blk_get_flush_queue(rq->q, rq->mq_ctx);
511
512         if (!is_flush_request(rq, fq, tag))
513                 return rq;
514
515         return fq->flush_rq;
516 }
517 EXPORT_SYMBOL(blk_mq_tag_to_rq);
518
519 struct blk_mq_timeout_data {
520         unsigned long next;
521         unsigned int next_set;
522 };
523
524 void blk_mq_rq_timed_out(struct request *req, bool reserved)
525 {
526         struct blk_mq_ops *ops = req->q->mq_ops;
527         enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
528
529         /*
530          * We know that complete is set at this point. If STARTED isn't set
531          * anymore, then the request isn't active and the "timeout" should
532          * just be ignored. This can happen due to the bitflag ordering.
533          * Timeout first checks if STARTED is set, and if it is, assumes
534          * the request is active. But if we race with completion, then
535          * we both flags will get cleared. So check here again, and ignore
536          * a timeout event with a request that isn't active.
537          */
538         if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
539                 return;
540
541         if (ops->timeout)
542                 ret = ops->timeout(req, reserved);
543
544         switch (ret) {
545         case BLK_EH_HANDLED:
546                 __blk_mq_complete_request(req);
547                 break;
548         case BLK_EH_RESET_TIMER:
549                 blk_add_timer(req);
550                 blk_clear_rq_complete(req);
551                 break;
552         case BLK_EH_NOT_HANDLED:
553                 break;
554         default:
555                 printk(KERN_ERR "block: bad eh return: %d\n", ret);
556                 break;
557         }
558 }
559                 
560 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
561                 struct request *rq, void *priv, bool reserved)
562 {
563         struct blk_mq_timeout_data *data = priv;
564
565         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
566                 return;
567
568         if (time_after_eq(jiffies, rq->deadline)) {
569                 if (!blk_mark_rq_complete(rq))
570                         blk_mq_rq_timed_out(rq, reserved);
571         } else if (!data->next_set || time_after(data->next, rq->deadline)) {
572                 data->next = rq->deadline;
573                 data->next_set = 1;
574         }
575 }
576
577 static void blk_mq_rq_timer(unsigned long priv)
578 {
579         struct request_queue *q = (struct request_queue *)priv;
580         struct blk_mq_timeout_data data = {
581                 .next           = 0,
582                 .next_set       = 0,
583         };
584         struct blk_mq_hw_ctx *hctx;
585         int i;
586
587         queue_for_each_hw_ctx(q, hctx, i) {
588                 /*
589                  * If not software queues are currently mapped to this
590                  * hardware queue, there's nothing to check
591                  */
592                 if (!hctx->nr_ctx || !hctx->tags)
593                         continue;
594
595                 blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data);
596         }
597
598         if (data.next_set) {
599                 data.next = blk_rq_timeout(round_jiffies_up(data.next));
600                 mod_timer(&q->timeout, data.next);
601         } else {
602                 queue_for_each_hw_ctx(q, hctx, i)
603                         blk_mq_tag_idle(hctx);
604         }
605 }
606
607 /*
608  * Reverse check our software queue for entries that we could potentially
609  * merge with. Currently includes a hand-wavy stop count of 8, to not spend
610  * too much time checking for merges.
611  */
612 static bool blk_mq_attempt_merge(struct request_queue *q,
613                                  struct blk_mq_ctx *ctx, struct bio *bio)
614 {
615         struct request *rq;
616         int checked = 8;
617
618         list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
619                 int el_ret;
620
621                 if (!checked--)
622                         break;
623
624                 if (!blk_rq_merge_ok(rq, bio))
625                         continue;
626
627                 el_ret = blk_try_merge(rq, bio);
628                 if (el_ret == ELEVATOR_BACK_MERGE) {
629                         if (bio_attempt_back_merge(q, rq, bio)) {
630                                 ctx->rq_merged++;
631                                 return true;
632                         }
633                         break;
634                 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
635                         if (bio_attempt_front_merge(q, rq, bio)) {
636                                 ctx->rq_merged++;
637                                 return true;
638                         }
639                         break;
640                 }
641         }
642
643         return false;
644 }
645
646 /*
647  * Process software queues that have been marked busy, splicing them
648  * to the for-dispatch
649  */
650 static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
651 {
652         struct blk_mq_ctx *ctx;
653         int i;
654
655         for (i = 0; i < hctx->ctx_map.map_size; i++) {
656                 struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
657                 unsigned int off, bit;
658
659                 if (!bm->word)
660                         continue;
661
662                 bit = 0;
663                 off = i * hctx->ctx_map.bits_per_word;
664                 do {
665                         bit = find_next_bit(&bm->word, bm->depth, bit);
666                         if (bit >= bm->depth)
667                                 break;
668
669                         ctx = hctx->ctxs[bit + off];
670                         clear_bit(bit, &bm->word);
671                         spin_lock(&ctx->lock);
672                         list_splice_tail_init(&ctx->rq_list, list);
673                         spin_unlock(&ctx->lock);
674
675                         bit++;
676                 } while (1);
677         }
678 }
679
680 /*
681  * Run this hardware queue, pulling any software queues mapped to it in.
682  * Note that this function currently has various problems around ordering
683  * of IO. In particular, we'd like FIFO behaviour on handling existing
684  * items on the hctx->dispatch list. Ignore that for now.
685  */
686 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
687 {
688         struct request_queue *q = hctx->queue;
689         struct request *rq;
690         LIST_HEAD(rq_list);
691         LIST_HEAD(driver_list);
692         struct list_head *dptr;
693         int queued;
694
695         WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
696
697         if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
698                 return;
699
700         hctx->run++;
701
702         /*
703          * Touch any software queue that has pending entries.
704          */
705         flush_busy_ctxs(hctx, &rq_list);
706
707         /*
708          * If we have previous entries on our dispatch list, grab them
709          * and stuff them at the front for more fair dispatch.
710          */
711         if (!list_empty_careful(&hctx->dispatch)) {
712                 spin_lock(&hctx->lock);
713                 if (!list_empty(&hctx->dispatch))
714                         list_splice_init(&hctx->dispatch, &rq_list);
715                 spin_unlock(&hctx->lock);
716         }
717
718         /*
719          * Start off with dptr being NULL, so we start the first request
720          * immediately, even if we have more pending.
721          */
722         dptr = NULL;
723
724         /*
725          * Now process all the entries, sending them to the driver.
726          */
727         queued = 0;
728         while (!list_empty(&rq_list)) {
729                 struct blk_mq_queue_data bd;
730                 int ret;
731
732                 rq = list_first_entry(&rq_list, struct request, queuelist);
733                 list_del_init(&rq->queuelist);
734
735                 bd.rq = rq;
736                 bd.list = dptr;
737                 bd.last = list_empty(&rq_list);
738
739                 ret = q->mq_ops->queue_rq(hctx, &bd);
740                 switch (ret) {
741                 case BLK_MQ_RQ_QUEUE_OK:
742                         queued++;
743                         continue;
744                 case BLK_MQ_RQ_QUEUE_BUSY:
745                         list_add(&rq->queuelist, &rq_list);
746                         __blk_mq_requeue_request(rq);
747                         break;
748                 default:
749                         pr_err("blk-mq: bad return on queue: %d\n", ret);
750                 case BLK_MQ_RQ_QUEUE_ERROR:
751                         rq->errors = -EIO;
752                         blk_mq_end_request(rq, rq->errors);
753                         break;
754                 }
755
756                 if (ret == BLK_MQ_RQ_QUEUE_BUSY)
757                         break;
758
759                 /*
760                  * We've done the first request. If we have more than 1
761                  * left in the list, set dptr to defer issue.
762                  */
763                 if (!dptr && rq_list.next != rq_list.prev)
764                         dptr = &driver_list;
765         }
766
767         if (!queued)
768                 hctx->dispatched[0]++;
769         else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
770                 hctx->dispatched[ilog2(queued) + 1]++;
771
772         /*
773          * Any items that need requeuing? Stuff them into hctx->dispatch,
774          * that is where we will continue on next queue run.
775          */
776         if (!list_empty(&rq_list)) {
777                 spin_lock(&hctx->lock);
778                 list_splice(&rq_list, &hctx->dispatch);
779                 spin_unlock(&hctx->lock);
780         }
781 }
782
783 /*
784  * It'd be great if the workqueue API had a way to pass
785  * in a mask and had some smarts for more clever placement.
786  * For now we just round-robin here, switching for every
787  * BLK_MQ_CPU_WORK_BATCH queued items.
788  */
789 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
790 {
791         int cpu = hctx->next_cpu;
792
793         if (--hctx->next_cpu_batch <= 0) {
794                 int next_cpu;
795
796                 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
797                 if (next_cpu >= nr_cpu_ids)
798                         next_cpu = cpumask_first(hctx->cpumask);
799
800                 hctx->next_cpu = next_cpu;
801                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
802         }
803
804         return cpu;
805 }
806
807 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
808 {
809         if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
810                 return;
811
812         if (!async) {
813                 int cpu = get_cpu();
814                 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
815                         __blk_mq_run_hw_queue(hctx);
816                         put_cpu();
817                         return;
818                 }
819
820                 put_cpu();
821         }
822
823         if (hctx->queue->nr_hw_queues == 1)
824                 kblockd_schedule_delayed_work(&hctx->run_work, 0);
825         else {
826                 unsigned int cpu;
827
828                 cpu = blk_mq_hctx_next_cpu(hctx);
829                 kblockd_schedule_delayed_work_on(cpu, &hctx->run_work, 0);
830         }
831 }
832
833 void blk_mq_run_queues(struct request_queue *q, bool async)
834 {
835         struct blk_mq_hw_ctx *hctx;
836         int i;
837
838         queue_for_each_hw_ctx(q, hctx, i) {
839                 if ((!blk_mq_hctx_has_pending(hctx) &&
840                     list_empty_careful(&hctx->dispatch)) ||
841                     test_bit(BLK_MQ_S_STOPPED, &hctx->state))
842                         continue;
843
844                 blk_mq_run_hw_queue(hctx, async);
845         }
846 }
847 EXPORT_SYMBOL(blk_mq_run_queues);
848
849 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
850 {
851         cancel_delayed_work(&hctx->run_work);
852         cancel_delayed_work(&hctx->delay_work);
853         set_bit(BLK_MQ_S_STOPPED, &hctx->state);
854 }
855 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
856
857 void blk_mq_stop_hw_queues(struct request_queue *q)
858 {
859         struct blk_mq_hw_ctx *hctx;
860         int i;
861
862         queue_for_each_hw_ctx(q, hctx, i)
863                 blk_mq_stop_hw_queue(hctx);
864 }
865 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
866
867 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
868 {
869         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
870
871         blk_mq_run_hw_queue(hctx, false);
872 }
873 EXPORT_SYMBOL(blk_mq_start_hw_queue);
874
875 void blk_mq_start_hw_queues(struct request_queue *q)
876 {
877         struct blk_mq_hw_ctx *hctx;
878         int i;
879
880         queue_for_each_hw_ctx(q, hctx, i)
881                 blk_mq_start_hw_queue(hctx);
882 }
883 EXPORT_SYMBOL(blk_mq_start_hw_queues);
884
885
886 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
887 {
888         struct blk_mq_hw_ctx *hctx;
889         int i;
890
891         queue_for_each_hw_ctx(q, hctx, i) {
892                 if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
893                         continue;
894
895                 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
896                 blk_mq_run_hw_queue(hctx, async);
897         }
898 }
899 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
900
901 static void blk_mq_run_work_fn(struct work_struct *work)
902 {
903         struct blk_mq_hw_ctx *hctx;
904
905         hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
906
907         __blk_mq_run_hw_queue(hctx);
908 }
909
910 static void blk_mq_delay_work_fn(struct work_struct *work)
911 {
912         struct blk_mq_hw_ctx *hctx;
913
914         hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
915
916         if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
917                 __blk_mq_run_hw_queue(hctx);
918 }
919
920 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
921 {
922         unsigned long tmo = msecs_to_jiffies(msecs);
923
924         if (hctx->queue->nr_hw_queues == 1)
925                 kblockd_schedule_delayed_work(&hctx->delay_work, tmo);
926         else {
927                 unsigned int cpu;
928
929                 cpu = blk_mq_hctx_next_cpu(hctx);
930                 kblockd_schedule_delayed_work_on(cpu, &hctx->delay_work, tmo);
931         }
932 }
933 EXPORT_SYMBOL(blk_mq_delay_queue);
934
935 static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
936                                     struct request *rq, bool at_head)
937 {
938         struct blk_mq_ctx *ctx = rq->mq_ctx;
939
940         trace_block_rq_insert(hctx->queue, rq);
941
942         if (at_head)
943                 list_add(&rq->queuelist, &ctx->rq_list);
944         else
945                 list_add_tail(&rq->queuelist, &ctx->rq_list);
946
947         blk_mq_hctx_mark_pending(hctx, ctx);
948 }
949
950 void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
951                 bool async)
952 {
953         struct request_queue *q = rq->q;
954         struct blk_mq_hw_ctx *hctx;
955         struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
956
957         current_ctx = blk_mq_get_ctx(q);
958         if (!cpu_online(ctx->cpu))
959                 rq->mq_ctx = ctx = current_ctx;
960
961         hctx = q->mq_ops->map_queue(q, ctx->cpu);
962
963         spin_lock(&ctx->lock);
964         __blk_mq_insert_request(hctx, rq, at_head);
965         spin_unlock(&ctx->lock);
966
967         if (run_queue)
968                 blk_mq_run_hw_queue(hctx, async);
969
970         blk_mq_put_ctx(current_ctx);
971 }
972
973 static void blk_mq_insert_requests(struct request_queue *q,
974                                      struct blk_mq_ctx *ctx,
975                                      struct list_head *list,
976                                      int depth,
977                                      bool from_schedule)
978
979 {
980         struct blk_mq_hw_ctx *hctx;
981         struct blk_mq_ctx *current_ctx;
982
983         trace_block_unplug(q, depth, !from_schedule);
984
985         current_ctx = blk_mq_get_ctx(q);
986
987         if (!cpu_online(ctx->cpu))
988                 ctx = current_ctx;
989         hctx = q->mq_ops->map_queue(q, ctx->cpu);
990
991         /*
992          * preemption doesn't flush plug list, so it's possible ctx->cpu is
993          * offline now
994          */
995         spin_lock(&ctx->lock);
996         while (!list_empty(list)) {
997                 struct request *rq;
998
999                 rq = list_first_entry(list, struct request, queuelist);
1000                 list_del_init(&rq->queuelist);
1001                 rq->mq_ctx = ctx;
1002                 __blk_mq_insert_request(hctx, rq, false);
1003         }
1004         spin_unlock(&ctx->lock);
1005
1006         blk_mq_run_hw_queue(hctx, from_schedule);
1007         blk_mq_put_ctx(current_ctx);
1008 }
1009
1010 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1011 {
1012         struct request *rqa = container_of(a, struct request, queuelist);
1013         struct request *rqb = container_of(b, struct request, queuelist);
1014
1015         return !(rqa->mq_ctx < rqb->mq_ctx ||
1016                  (rqa->mq_ctx == rqb->mq_ctx &&
1017                   blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1018 }
1019
1020 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1021 {
1022         struct blk_mq_ctx *this_ctx;
1023         struct request_queue *this_q;
1024         struct request *rq;
1025         LIST_HEAD(list);
1026         LIST_HEAD(ctx_list);
1027         unsigned int depth;
1028
1029         list_splice_init(&plug->mq_list, &list);
1030
1031         list_sort(NULL, &list, plug_ctx_cmp);
1032
1033         this_q = NULL;
1034         this_ctx = NULL;
1035         depth = 0;
1036
1037         while (!list_empty(&list)) {
1038                 rq = list_entry_rq(list.next);
1039                 list_del_init(&rq->queuelist);
1040                 BUG_ON(!rq->q);
1041                 if (rq->mq_ctx != this_ctx) {
1042                         if (this_ctx) {
1043                                 blk_mq_insert_requests(this_q, this_ctx,
1044                                                         &ctx_list, depth,
1045                                                         from_schedule);
1046                         }
1047
1048                         this_ctx = rq->mq_ctx;
1049                         this_q = rq->q;
1050                         depth = 0;
1051                 }
1052
1053                 depth++;
1054                 list_add_tail(&rq->queuelist, &ctx_list);
1055         }
1056
1057         /*
1058          * If 'this_ctx' is set, we know we have entries to complete
1059          * on 'ctx_list'. Do those.
1060          */
1061         if (this_ctx) {
1062                 blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
1063                                        from_schedule);
1064         }
1065 }
1066
1067 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1068 {
1069         init_request_from_bio(rq, bio);
1070
1071         if (blk_do_io_stat(rq))
1072                 blk_account_io_start(rq, 1);
1073 }
1074
1075 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1076 {
1077         return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1078                 !blk_queue_nomerges(hctx->queue);
1079 }
1080
1081 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1082                                          struct blk_mq_ctx *ctx,
1083                                          struct request *rq, struct bio *bio)
1084 {
1085         if (!hctx_allow_merges(hctx)) {
1086                 blk_mq_bio_to_request(rq, bio);
1087                 spin_lock(&ctx->lock);
1088 insert_rq:
1089                 __blk_mq_insert_request(hctx, rq, false);
1090                 spin_unlock(&ctx->lock);
1091                 return false;
1092         } else {
1093                 struct request_queue *q = hctx->queue;
1094
1095                 spin_lock(&ctx->lock);
1096                 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1097                         blk_mq_bio_to_request(rq, bio);
1098                         goto insert_rq;
1099                 }
1100
1101                 spin_unlock(&ctx->lock);
1102                 __blk_mq_free_request(hctx, ctx, rq);
1103                 return true;
1104         }
1105 }
1106
1107 struct blk_map_ctx {
1108         struct blk_mq_hw_ctx *hctx;
1109         struct blk_mq_ctx *ctx;
1110 };
1111
1112 static struct request *blk_mq_map_request(struct request_queue *q,
1113                                           struct bio *bio,
1114                                           struct blk_map_ctx *data)
1115 {
1116         struct blk_mq_hw_ctx *hctx;
1117         struct blk_mq_ctx *ctx;
1118         struct request *rq;
1119         int rw = bio_data_dir(bio);
1120         struct blk_mq_alloc_data alloc_data;
1121
1122         if (unlikely(blk_mq_queue_enter(q))) {
1123                 bio_endio(bio, -EIO);
1124                 return NULL;
1125         }
1126
1127         ctx = blk_mq_get_ctx(q);
1128         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1129
1130         if (rw_is_sync(bio->bi_rw))
1131                 rw |= REQ_SYNC;
1132
1133         trace_block_getrq(q, bio, rw);
1134         blk_mq_set_alloc_data(&alloc_data, q, GFP_ATOMIC, false, ctx,
1135                         hctx);
1136         rq = __blk_mq_alloc_request(&alloc_data, rw);
1137         if (unlikely(!rq)) {
1138                 __blk_mq_run_hw_queue(hctx);
1139                 blk_mq_put_ctx(ctx);
1140                 trace_block_sleeprq(q, bio, rw);
1141
1142                 ctx = blk_mq_get_ctx(q);
1143                 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1144                 blk_mq_set_alloc_data(&alloc_data, q,
1145                                 __GFP_WAIT|GFP_ATOMIC, false, ctx, hctx);
1146                 rq = __blk_mq_alloc_request(&alloc_data, rw);
1147                 ctx = alloc_data.ctx;
1148                 hctx = alloc_data.hctx;
1149         }
1150
1151         hctx->queued++;
1152         data->hctx = hctx;
1153         data->ctx = ctx;
1154         return rq;
1155 }
1156
1157 /*
1158  * Multiple hardware queue variant. This will not use per-process plugs,
1159  * but will attempt to bypass the hctx queueing if we can go straight to
1160  * hardware for SYNC IO.
1161  */
1162 static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1163 {
1164         const int is_sync = rw_is_sync(bio->bi_rw);
1165         const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1166         struct blk_map_ctx data;
1167         struct request *rq;
1168
1169         blk_queue_bounce(q, &bio);
1170
1171         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1172                 bio_endio(bio, -EIO);
1173                 return;
1174         }
1175
1176         rq = blk_mq_map_request(q, bio, &data);
1177         if (unlikely(!rq))
1178                 return;
1179
1180         if (unlikely(is_flush_fua)) {
1181                 blk_mq_bio_to_request(rq, bio);
1182                 blk_insert_flush(rq);
1183                 goto run_queue;
1184         }
1185
1186         /*
1187          * If the driver supports defer issued based on 'last', then
1188          * queue it up like normal since we can potentially save some
1189          * CPU this way.
1190          */
1191         if (is_sync && !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
1192                 struct blk_mq_queue_data bd = {
1193                         .rq = rq,
1194                         .list = NULL,
1195                         .last = 1
1196                 };
1197                 int ret;
1198
1199                 blk_mq_bio_to_request(rq, bio);
1200
1201                 /*
1202                  * For OK queue, we are done. For error, kill it. Any other
1203                  * error (busy), just add it to our list as we previously
1204                  * would have done
1205                  */
1206                 ret = q->mq_ops->queue_rq(data.hctx, &bd);
1207                 if (ret == BLK_MQ_RQ_QUEUE_OK)
1208                         goto done;
1209                 else {
1210                         __blk_mq_requeue_request(rq);
1211
1212                         if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1213                                 rq->errors = -EIO;
1214                                 blk_mq_end_request(rq, rq->errors);
1215                                 goto done;
1216                         }
1217                 }
1218         }
1219
1220         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1221                 /*
1222                  * For a SYNC request, send it to the hardware immediately. For
1223                  * an ASYNC request, just ensure that we run it later on. The
1224                  * latter allows for merging opportunities and more efficient
1225                  * dispatching.
1226                  */
1227 run_queue:
1228                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1229         }
1230 done:
1231         blk_mq_put_ctx(data.ctx);
1232 }
1233
1234 /*
1235  * Single hardware queue variant. This will attempt to use any per-process
1236  * plug for merging and IO deferral.
1237  */
1238 static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
1239 {
1240         const int is_sync = rw_is_sync(bio->bi_rw);
1241         const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1242         unsigned int use_plug, request_count = 0;
1243         struct blk_map_ctx data;
1244         struct request *rq;
1245
1246         /*
1247          * If we have multiple hardware queues, just go directly to
1248          * one of those for sync IO.
1249          */
1250         use_plug = !is_flush_fua && !is_sync;
1251
1252         blk_queue_bounce(q, &bio);
1253
1254         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1255                 bio_endio(bio, -EIO);
1256                 return;
1257         }
1258
1259         if (use_plug && !blk_queue_nomerges(q) &&
1260             blk_attempt_plug_merge(q, bio, &request_count))
1261                 return;
1262
1263         rq = blk_mq_map_request(q, bio, &data);
1264         if (unlikely(!rq))
1265                 return;
1266
1267         if (unlikely(is_flush_fua)) {
1268                 blk_mq_bio_to_request(rq, bio);
1269                 blk_insert_flush(rq);
1270                 goto run_queue;
1271         }
1272
1273         /*
1274          * A task plug currently exists. Since this is completely lockless,
1275          * utilize that to temporarily store requests until the task is
1276          * either done or scheduled away.
1277          */
1278         if (use_plug) {
1279                 struct blk_plug *plug = current->plug;
1280
1281                 if (plug) {
1282                         blk_mq_bio_to_request(rq, bio);
1283                         if (list_empty(&plug->mq_list))
1284                                 trace_block_plug(q);
1285                         else if (request_count >= BLK_MAX_REQUEST_COUNT) {
1286                                 blk_flush_plug_list(plug, false);
1287                                 trace_block_plug(q);
1288                         }
1289                         list_add_tail(&rq->queuelist, &plug->mq_list);
1290                         blk_mq_put_ctx(data.ctx);
1291                         return;
1292                 }
1293         }
1294
1295         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1296                 /*
1297                  * For a SYNC request, send it to the hardware immediately. For
1298                  * an ASYNC request, just ensure that we run it later on. The
1299                  * latter allows for merging opportunities and more efficient
1300                  * dispatching.
1301                  */
1302 run_queue:
1303                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1304         }
1305
1306         blk_mq_put_ctx(data.ctx);
1307 }
1308
1309 /*
1310  * Default mapping to a software queue, since we use one per CPU.
1311  */
1312 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
1313 {
1314         return q->queue_hw_ctx[q->mq_map[cpu]];
1315 }
1316 EXPORT_SYMBOL(blk_mq_map_queue);
1317
1318 static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1319                 struct blk_mq_tags *tags, unsigned int hctx_idx)
1320 {
1321         struct page *page;
1322
1323         if (tags->rqs && set->ops->exit_request) {
1324                 int i;
1325
1326                 for (i = 0; i < tags->nr_tags; i++) {
1327                         if (!tags->rqs[i])
1328                                 continue;
1329                         set->ops->exit_request(set->driver_data, tags->rqs[i],
1330                                                 hctx_idx, i);
1331                         tags->rqs[i] = NULL;
1332                 }
1333         }
1334
1335         while (!list_empty(&tags->page_list)) {
1336                 page = list_first_entry(&tags->page_list, struct page, lru);
1337                 list_del_init(&page->lru);
1338                 __free_pages(page, page->private);
1339         }
1340
1341         kfree(tags->rqs);
1342
1343         blk_mq_free_tags(tags);
1344 }
1345
1346 static size_t order_to_size(unsigned int order)
1347 {
1348         return (size_t)PAGE_SIZE << order;
1349 }
1350
1351 static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1352                 unsigned int hctx_idx)
1353 {
1354         struct blk_mq_tags *tags;
1355         unsigned int i, j, entries_per_page, max_order = 4;
1356         size_t rq_size, left;
1357
1358         tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
1359                                 set->numa_node);
1360         if (!tags)
1361                 return NULL;
1362
1363         INIT_LIST_HEAD(&tags->page_list);
1364
1365         tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
1366                                  GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1367                                  set->numa_node);
1368         if (!tags->rqs) {
1369                 blk_mq_free_tags(tags);
1370                 return NULL;
1371         }
1372
1373         /*
1374          * rq_size is the size of the request plus driver payload, rounded
1375          * to the cacheline size
1376          */
1377         rq_size = round_up(sizeof(struct request) + set->cmd_size,
1378                                 cache_line_size());
1379         left = rq_size * set->queue_depth;
1380
1381         for (i = 0; i < set->queue_depth; ) {
1382                 int this_order = max_order;
1383                 struct page *page;
1384                 int to_do;
1385                 void *p;
1386
1387                 while (left < order_to_size(this_order - 1) && this_order)
1388                         this_order--;
1389
1390                 do {
1391                         page = alloc_pages_node(set->numa_node,
1392                                 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1393                                 this_order);
1394                         if (page)
1395                                 break;
1396                         if (!this_order--)
1397                                 break;
1398                         if (order_to_size(this_order) < rq_size)
1399                                 break;
1400                 } while (1);
1401
1402                 if (!page)
1403                         goto fail;
1404
1405                 page->private = this_order;
1406                 list_add_tail(&page->lru, &tags->page_list);
1407
1408                 p = page_address(page);
1409                 entries_per_page = order_to_size(this_order) / rq_size;
1410                 to_do = min(entries_per_page, set->queue_depth - i);
1411                 left -= to_do * rq_size;
1412                 for (j = 0; j < to_do; j++) {
1413                         tags->rqs[i] = p;
1414                         tags->rqs[i]->atomic_flags = 0;
1415                         tags->rqs[i]->cmd_flags = 0;
1416                         if (set->ops->init_request) {
1417                                 if (set->ops->init_request(set->driver_data,
1418                                                 tags->rqs[i], hctx_idx, i,
1419                                                 set->numa_node)) {
1420                                         tags->rqs[i] = NULL;
1421                                         goto fail;
1422                                 }
1423                         }
1424
1425                         p += rq_size;
1426                         i++;
1427                 }
1428         }
1429
1430         return tags;
1431
1432 fail:
1433         blk_mq_free_rq_map(set, tags, hctx_idx);
1434         return NULL;
1435 }
1436
1437 static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap)
1438 {
1439         kfree(bitmap->map);
1440 }
1441
1442 static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
1443 {
1444         unsigned int bpw = 8, total, num_maps, i;
1445
1446         bitmap->bits_per_word = bpw;
1447
1448         num_maps = ALIGN(nr_cpu_ids, bpw) / bpw;
1449         bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap),
1450                                         GFP_KERNEL, node);
1451         if (!bitmap->map)
1452                 return -ENOMEM;
1453
1454         bitmap->map_size = num_maps;
1455
1456         total = nr_cpu_ids;
1457         for (i = 0; i < num_maps; i++) {
1458                 bitmap->map[i].depth = min(total, bitmap->bits_per_word);
1459                 total -= bitmap->map[i].depth;
1460         }
1461
1462         return 0;
1463 }
1464
1465 static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
1466 {
1467         struct request_queue *q = hctx->queue;
1468         struct blk_mq_ctx *ctx;
1469         LIST_HEAD(tmp);
1470
1471         /*
1472          * Move ctx entries to new CPU, if this one is going away.
1473          */
1474         ctx = __blk_mq_get_ctx(q, cpu);
1475
1476         spin_lock(&ctx->lock);
1477         if (!list_empty(&ctx->rq_list)) {
1478                 list_splice_init(&ctx->rq_list, &tmp);
1479                 blk_mq_hctx_clear_pending(hctx, ctx);
1480         }
1481         spin_unlock(&ctx->lock);
1482
1483         if (list_empty(&tmp))
1484                 return NOTIFY_OK;
1485
1486         ctx = blk_mq_get_ctx(q);
1487         spin_lock(&ctx->lock);
1488
1489         while (!list_empty(&tmp)) {
1490                 struct request *rq;
1491
1492                 rq = list_first_entry(&tmp, struct request, queuelist);
1493                 rq->mq_ctx = ctx;
1494                 list_move_tail(&rq->queuelist, &ctx->rq_list);
1495         }
1496
1497         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1498         blk_mq_hctx_mark_pending(hctx, ctx);
1499
1500         spin_unlock(&ctx->lock);
1501
1502         blk_mq_run_hw_queue(hctx, true);
1503         blk_mq_put_ctx(ctx);
1504         return NOTIFY_OK;
1505 }
1506
1507 static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu)
1508 {
1509         struct request_queue *q = hctx->queue;
1510         struct blk_mq_tag_set *set = q->tag_set;
1511
1512         if (set->tags[hctx->queue_num])
1513                 return NOTIFY_OK;
1514
1515         set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num);
1516         if (!set->tags[hctx->queue_num])
1517                 return NOTIFY_STOP;
1518
1519         hctx->tags = set->tags[hctx->queue_num];
1520         return NOTIFY_OK;
1521 }
1522
1523 static int blk_mq_hctx_notify(void *data, unsigned long action,
1524                               unsigned int cpu)
1525 {
1526         struct blk_mq_hw_ctx *hctx = data;
1527
1528         if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1529                 return blk_mq_hctx_cpu_offline(hctx, cpu);
1530         else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
1531                 return blk_mq_hctx_cpu_online(hctx, cpu);
1532
1533         return NOTIFY_OK;
1534 }
1535
1536 static void blk_mq_exit_hctx(struct request_queue *q,
1537                 struct blk_mq_tag_set *set,
1538                 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1539 {
1540         unsigned flush_start_tag = set->queue_depth;
1541
1542         blk_mq_tag_idle(hctx);
1543
1544         if (set->ops->exit_request)
1545                 set->ops->exit_request(set->driver_data,
1546                                        hctx->fq->flush_rq, hctx_idx,
1547                                        flush_start_tag + hctx_idx);
1548
1549         if (set->ops->exit_hctx)
1550                 set->ops->exit_hctx(hctx, hctx_idx);
1551
1552         blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1553         blk_free_flush_queue(hctx->fq);
1554         kfree(hctx->ctxs);
1555         blk_mq_free_bitmap(&hctx->ctx_map);
1556 }
1557
1558 static void blk_mq_exit_hw_queues(struct request_queue *q,
1559                 struct blk_mq_tag_set *set, int nr_queue)
1560 {
1561         struct blk_mq_hw_ctx *hctx;
1562         unsigned int i;
1563
1564         queue_for_each_hw_ctx(q, hctx, i) {
1565                 if (i == nr_queue)
1566                         break;
1567                 blk_mq_exit_hctx(q, set, hctx, i);
1568         }
1569 }
1570
1571 static void blk_mq_free_hw_queues(struct request_queue *q,
1572                 struct blk_mq_tag_set *set)
1573 {
1574         struct blk_mq_hw_ctx *hctx;
1575         unsigned int i;
1576
1577         queue_for_each_hw_ctx(q, hctx, i) {
1578                 free_cpumask_var(hctx->cpumask);
1579                 kfree(hctx);
1580         }
1581 }
1582
1583 static int blk_mq_init_hctx(struct request_queue *q,
1584                 struct blk_mq_tag_set *set,
1585                 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
1586 {
1587         int node;
1588         unsigned flush_start_tag = set->queue_depth;
1589
1590         node = hctx->numa_node;
1591         if (node == NUMA_NO_NODE)
1592                 node = hctx->numa_node = set->numa_node;
1593
1594         INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1595         INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1596         spin_lock_init(&hctx->lock);
1597         INIT_LIST_HEAD(&hctx->dispatch);
1598         hctx->queue = q;
1599         hctx->queue_num = hctx_idx;
1600         hctx->flags = set->flags;
1601         hctx->cmd_size = set->cmd_size;
1602
1603         blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
1604                                         blk_mq_hctx_notify, hctx);
1605         blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
1606
1607         hctx->tags = set->tags[hctx_idx];
1608
1609         /*
1610          * Allocate space for all possible cpus to avoid allocation at
1611          * runtime
1612          */
1613         hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1614                                         GFP_KERNEL, node);
1615         if (!hctx->ctxs)
1616                 goto unregister_cpu_notifier;
1617
1618         if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
1619                 goto free_ctxs;
1620
1621         hctx->nr_ctx = 0;
1622
1623         if (set->ops->init_hctx &&
1624             set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1625                 goto free_bitmap;
1626
1627         hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
1628         if (!hctx->fq)
1629                 goto exit_hctx;
1630
1631         if (set->ops->init_request &&
1632             set->ops->init_request(set->driver_data,
1633                                    hctx->fq->flush_rq, hctx_idx,
1634                                    flush_start_tag + hctx_idx, node))
1635                 goto free_fq;
1636
1637         return 0;
1638
1639  free_fq:
1640         kfree(hctx->fq);
1641  exit_hctx:
1642         if (set->ops->exit_hctx)
1643                 set->ops->exit_hctx(hctx, hctx_idx);
1644  free_bitmap:
1645         blk_mq_free_bitmap(&hctx->ctx_map);
1646  free_ctxs:
1647         kfree(hctx->ctxs);
1648  unregister_cpu_notifier:
1649         blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1650
1651         return -1;
1652 }
1653
1654 static int blk_mq_init_hw_queues(struct request_queue *q,
1655                 struct blk_mq_tag_set *set)
1656 {
1657         struct blk_mq_hw_ctx *hctx;
1658         unsigned int i;
1659
1660         /*
1661          * Initialize hardware queues
1662          */
1663         queue_for_each_hw_ctx(q, hctx, i) {
1664                 if (blk_mq_init_hctx(q, set, hctx, i))
1665                         break;
1666         }
1667
1668         if (i == q->nr_hw_queues)
1669                 return 0;
1670
1671         /*
1672          * Init failed
1673          */
1674         blk_mq_exit_hw_queues(q, set, i);
1675
1676         return 1;
1677 }
1678
1679 static void blk_mq_init_cpu_queues(struct request_queue *q,
1680                                    unsigned int nr_hw_queues)
1681 {
1682         unsigned int i;
1683
1684         for_each_possible_cpu(i) {
1685                 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1686                 struct blk_mq_hw_ctx *hctx;
1687
1688                 memset(__ctx, 0, sizeof(*__ctx));
1689                 __ctx->cpu = i;
1690                 spin_lock_init(&__ctx->lock);
1691                 INIT_LIST_HEAD(&__ctx->rq_list);
1692                 __ctx->queue = q;
1693
1694                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1695                 if (!cpu_online(i))
1696                         continue;
1697
1698                 hctx = q->mq_ops->map_queue(q, i);
1699                 cpumask_set_cpu(i, hctx->cpumask);
1700                 hctx->nr_ctx++;
1701
1702                 /*
1703                  * Set local node, IFF we have more than one hw queue. If
1704                  * not, we remain on the home node of the device
1705                  */
1706                 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1707                         hctx->numa_node = cpu_to_node(i);
1708         }
1709 }
1710
1711 static void blk_mq_map_swqueue(struct request_queue *q)
1712 {
1713         unsigned int i;
1714         struct blk_mq_hw_ctx *hctx;
1715         struct blk_mq_ctx *ctx;
1716
1717         queue_for_each_hw_ctx(q, hctx, i) {
1718                 cpumask_clear(hctx->cpumask);
1719                 hctx->nr_ctx = 0;
1720         }
1721
1722         /*
1723          * Map software to hardware queues
1724          */
1725         queue_for_each_ctx(q, ctx, i) {
1726                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1727                 if (!cpu_online(i))
1728                         continue;
1729
1730                 hctx = q->mq_ops->map_queue(q, i);
1731                 cpumask_set_cpu(i, hctx->cpumask);
1732                 ctx->index_hw = hctx->nr_ctx;
1733                 hctx->ctxs[hctx->nr_ctx++] = ctx;
1734         }
1735
1736         queue_for_each_hw_ctx(q, hctx, i) {
1737                 /*
1738                  * If no software queues are mapped to this hardware queue,
1739                  * disable it and free the request entries.
1740                  */
1741                 if (!hctx->nr_ctx) {
1742                         struct blk_mq_tag_set *set = q->tag_set;
1743
1744                         if (set->tags[i]) {
1745                                 blk_mq_free_rq_map(set, set->tags[i], i);
1746                                 set->tags[i] = NULL;
1747                                 hctx->tags = NULL;
1748                         }
1749                         continue;
1750                 }
1751
1752                 /*
1753                  * Initialize batch roundrobin counts
1754                  */
1755                 hctx->next_cpu = cpumask_first(hctx->cpumask);
1756                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1757         }
1758 }
1759
1760 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
1761 {
1762         struct blk_mq_hw_ctx *hctx;
1763         struct request_queue *q;
1764         bool shared;
1765         int i;
1766
1767         if (set->tag_list.next == set->tag_list.prev)
1768                 shared = false;
1769         else
1770                 shared = true;
1771
1772         list_for_each_entry(q, &set->tag_list, tag_set_list) {
1773                 blk_mq_freeze_queue(q);
1774
1775                 queue_for_each_hw_ctx(q, hctx, i) {
1776                         if (shared)
1777                                 hctx->flags |= BLK_MQ_F_TAG_SHARED;
1778                         else
1779                                 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
1780                 }
1781                 blk_mq_unfreeze_queue(q);
1782         }
1783 }
1784
1785 static void blk_mq_del_queue_tag_set(struct request_queue *q)
1786 {
1787         struct blk_mq_tag_set *set = q->tag_set;
1788
1789         mutex_lock(&set->tag_list_lock);
1790         list_del_init(&q->tag_set_list);
1791         blk_mq_update_tag_set_depth(set);
1792         mutex_unlock(&set->tag_list_lock);
1793 }
1794
1795 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1796                                      struct request_queue *q)
1797 {
1798         q->tag_set = set;
1799
1800         mutex_lock(&set->tag_list_lock);
1801         list_add_tail(&q->tag_set_list, &set->tag_list);
1802         blk_mq_update_tag_set_depth(set);
1803         mutex_unlock(&set->tag_list_lock);
1804 }
1805
1806 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1807 {
1808         struct blk_mq_hw_ctx **hctxs;
1809         struct blk_mq_ctx __percpu *ctx;
1810         struct request_queue *q;
1811         unsigned int *map;
1812         int i;
1813
1814         ctx = alloc_percpu(struct blk_mq_ctx);
1815         if (!ctx)
1816                 return ERR_PTR(-ENOMEM);
1817
1818         /*
1819          * If a crashdump is active, then we are potentially in a very
1820          * memory constrained environment. Limit us to 1 queue and
1821          * 64 tags to prevent using too much memory.
1822          */
1823         if (is_kdump_kernel()) {
1824                 set->nr_hw_queues = 1;
1825                 set->queue_depth = min(64U, set->queue_depth);
1826         }
1827
1828         hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
1829                         set->numa_node);
1830
1831         if (!hctxs)
1832                 goto err_percpu;
1833
1834         map = blk_mq_make_queue_map(set);
1835         if (!map)
1836                 goto err_map;
1837
1838         for (i = 0; i < set->nr_hw_queues; i++) {
1839                 int node = blk_mq_hw_queue_to_node(map, i);
1840
1841                 hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
1842                                         GFP_KERNEL, node);
1843                 if (!hctxs[i])
1844                         goto err_hctxs;
1845
1846                 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
1847                                                 node))
1848                         goto err_hctxs;
1849
1850                 atomic_set(&hctxs[i]->nr_active, 0);
1851                 hctxs[i]->numa_node = node;
1852                 hctxs[i]->queue_num = i;
1853         }
1854
1855         q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
1856         if (!q)
1857                 goto err_hctxs;
1858
1859         /*
1860          * Init percpu_ref in atomic mode so that it's faster to shutdown.
1861          * See blk_register_queue() for details.
1862          */
1863         if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
1864                             PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
1865                 goto err_map;
1866
1867         setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
1868         blk_queue_rq_timeout(q, 30000);
1869
1870         q->nr_queues = nr_cpu_ids;
1871         q->nr_hw_queues = set->nr_hw_queues;
1872         q->mq_map = map;
1873
1874         q->queue_ctx = ctx;
1875         q->queue_hw_ctx = hctxs;
1876
1877         q->mq_ops = set->ops;
1878         q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
1879
1880         if (!(set->flags & BLK_MQ_F_SG_MERGE))
1881                 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
1882
1883         q->sg_reserved_size = INT_MAX;
1884
1885         INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
1886         INIT_LIST_HEAD(&q->requeue_list);
1887         spin_lock_init(&q->requeue_lock);
1888
1889         if (q->nr_hw_queues > 1)
1890                 blk_queue_make_request(q, blk_mq_make_request);
1891         else
1892                 blk_queue_make_request(q, blk_sq_make_request);
1893
1894         if (set->timeout)
1895                 blk_queue_rq_timeout(q, set->timeout);
1896
1897         /*
1898          * Do this after blk_queue_make_request() overrides it...
1899          */
1900         q->nr_requests = set->queue_depth;
1901
1902         if (set->ops->complete)
1903                 blk_queue_softirq_done(q, set->ops->complete);
1904
1905         blk_mq_init_cpu_queues(q, set->nr_hw_queues);
1906
1907         if (blk_mq_init_hw_queues(q, set))
1908                 goto err_hw;
1909
1910         mutex_lock(&all_q_mutex);
1911         list_add_tail(&q->all_q_node, &all_q_list);
1912         mutex_unlock(&all_q_mutex);
1913
1914         blk_mq_add_queue_tag_set(set, q);
1915
1916         blk_mq_map_swqueue(q);
1917
1918         return q;
1919
1920 err_hw:
1921         blk_cleanup_queue(q);
1922 err_hctxs:
1923         kfree(map);
1924         for (i = 0; i < set->nr_hw_queues; i++) {
1925                 if (!hctxs[i])
1926                         break;
1927                 free_cpumask_var(hctxs[i]->cpumask);
1928                 kfree(hctxs[i]);
1929         }
1930 err_map:
1931         kfree(hctxs);
1932 err_percpu:
1933         free_percpu(ctx);
1934         return ERR_PTR(-ENOMEM);
1935 }
1936 EXPORT_SYMBOL(blk_mq_init_queue);
1937
1938 void blk_mq_free_queue(struct request_queue *q)
1939 {
1940         struct blk_mq_tag_set   *set = q->tag_set;
1941
1942         blk_mq_del_queue_tag_set(q);
1943
1944         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
1945         blk_mq_free_hw_queues(q, set);
1946
1947         percpu_ref_exit(&q->mq_usage_counter);
1948
1949         free_percpu(q->queue_ctx);
1950         kfree(q->queue_hw_ctx);
1951         kfree(q->mq_map);
1952
1953         q->queue_ctx = NULL;
1954         q->queue_hw_ctx = NULL;
1955         q->mq_map = NULL;
1956
1957         mutex_lock(&all_q_mutex);
1958         list_del_init(&q->all_q_node);
1959         mutex_unlock(&all_q_mutex);
1960 }
1961
1962 /* Basically redo blk_mq_init_queue with queue frozen */
1963 static void blk_mq_queue_reinit(struct request_queue *q)
1964 {
1965         blk_mq_freeze_queue(q);
1966
1967         blk_mq_sysfs_unregister(q);
1968
1969         blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
1970
1971         /*
1972          * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
1973          * we should change hctx numa_node according to new topology (this
1974          * involves free and re-allocate memory, worthy doing?)
1975          */
1976
1977         blk_mq_map_swqueue(q);
1978
1979         blk_mq_sysfs_register(q);
1980
1981         blk_mq_unfreeze_queue(q);
1982 }
1983
1984 static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1985                                       unsigned long action, void *hcpu)
1986 {
1987         struct request_queue *q;
1988
1989         /*
1990          * Before new mappings are established, hotadded cpu might already
1991          * start handling requests. This doesn't break anything as we map
1992          * offline CPUs to first hardware queue. We will re-init the queue
1993          * below to get optimal settings.
1994          */
1995         if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
1996             action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
1997                 return NOTIFY_OK;
1998
1999         mutex_lock(&all_q_mutex);
2000         list_for_each_entry(q, &all_q_list, all_q_node)
2001                 blk_mq_queue_reinit(q);
2002         mutex_unlock(&all_q_mutex);
2003         return NOTIFY_OK;
2004 }
2005
2006 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2007 {
2008         int i;
2009
2010         for (i = 0; i < set->nr_hw_queues; i++) {
2011                 set->tags[i] = blk_mq_init_rq_map(set, i);
2012                 if (!set->tags[i])
2013                         goto out_unwind;
2014         }
2015
2016         return 0;
2017
2018 out_unwind:
2019         while (--i >= 0)
2020                 blk_mq_free_rq_map(set, set->tags[i], i);
2021
2022         return -ENOMEM;
2023 }
2024
2025 /*
2026  * Allocate the request maps associated with this tag_set. Note that this
2027  * may reduce the depth asked for, if memory is tight. set->queue_depth
2028  * will be updated to reflect the allocated depth.
2029  */
2030 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2031 {
2032         unsigned int depth;
2033         int err;
2034
2035         depth = set->queue_depth;
2036         do {
2037                 err = __blk_mq_alloc_rq_maps(set);
2038                 if (!err)
2039                         break;
2040
2041                 set->queue_depth >>= 1;
2042                 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2043                         err = -ENOMEM;
2044                         break;
2045                 }
2046         } while (set->queue_depth);
2047
2048         if (!set->queue_depth || err) {
2049                 pr_err("blk-mq: failed to allocate request map\n");
2050                 return -ENOMEM;
2051         }
2052
2053         if (depth != set->queue_depth)
2054                 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2055                                                 depth, set->queue_depth);
2056
2057         return 0;
2058 }
2059
2060 /*
2061  * Alloc a tag set to be associated with one or more request queues.
2062  * May fail with EINVAL for various error conditions. May adjust the
2063  * requested depth down, if if it too large. In that case, the set
2064  * value will be stored in set->queue_depth.
2065  */
2066 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2067 {
2068         if (!set->nr_hw_queues)
2069                 return -EINVAL;
2070         if (!set->queue_depth)
2071                 return -EINVAL;
2072         if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2073                 return -EINVAL;
2074
2075         if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue)
2076                 return -EINVAL;
2077
2078         if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2079                 pr_info("blk-mq: reduced tag depth to %u\n",
2080                         BLK_MQ_MAX_DEPTH);
2081                 set->queue_depth = BLK_MQ_MAX_DEPTH;
2082         }
2083
2084         set->tags = kmalloc_node(set->nr_hw_queues *
2085                                  sizeof(struct blk_mq_tags *),
2086                                  GFP_KERNEL, set->numa_node);
2087         if (!set->tags)
2088                 return -ENOMEM;
2089
2090         if (blk_mq_alloc_rq_maps(set))
2091                 goto enomem;
2092
2093         mutex_init(&set->tag_list_lock);
2094         INIT_LIST_HEAD(&set->tag_list);
2095
2096         return 0;
2097 enomem:
2098         kfree(set->tags);
2099         set->tags = NULL;
2100         return -ENOMEM;
2101 }
2102 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2103
2104 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2105 {
2106         int i;
2107
2108         for (i = 0; i < set->nr_hw_queues; i++) {
2109                 if (set->tags[i])
2110                         blk_mq_free_rq_map(set, set->tags[i], i);
2111         }
2112
2113         kfree(set->tags);
2114         set->tags = NULL;
2115 }
2116 EXPORT_SYMBOL(blk_mq_free_tag_set);
2117
2118 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2119 {
2120         struct blk_mq_tag_set *set = q->tag_set;
2121         struct blk_mq_hw_ctx *hctx;
2122         int i, ret;
2123
2124         if (!set || nr > set->queue_depth)
2125                 return -EINVAL;
2126
2127         ret = 0;
2128         queue_for_each_hw_ctx(q, hctx, i) {
2129                 ret = blk_mq_tag_update_depth(hctx->tags, nr);
2130                 if (ret)
2131                         break;
2132         }
2133
2134         if (!ret)
2135                 q->nr_requests = nr;
2136
2137         return ret;
2138 }
2139
2140 void blk_mq_disable_hotplug(void)
2141 {
2142         mutex_lock(&all_q_mutex);
2143 }
2144
2145 void blk_mq_enable_hotplug(void)
2146 {
2147         mutex_unlock(&all_q_mutex);
2148 }
2149
2150 static int __init blk_mq_init(void)
2151 {
2152         blk_mq_cpu_init();
2153
2154         hotcpu_notifier(blk_mq_queue_reinit_notify, 0);
2155
2156         return 0;
2157 }
2158 subsys_initcall(blk_mq_init);