Prepare this for the next patch which adds more smarts in the
plugging logic, so that we can save some memory.
Signed-off-by: Jens Axboe <axboe@fb.com>
{
unsigned int cpu = (unsigned long) hcpu;
struct blk_mq_cpu_notifier *notify;
{
unsigned int cpu = (unsigned long) hcpu;
struct blk_mq_cpu_notifier *notify;
raw_spin_lock(&blk_mq_cpu_notify_lock);
raw_spin_lock(&blk_mq_cpu_notify_lock);
- list_for_each_entry(notify, &blk_mq_cpu_notify_list, list)
- notify->notify(notify->data, action, cpu);
+ list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) {
+ ret = notify->notify(notify->data, action, cpu);
+ if (ret != NOTIFY_OK)
+ break;
+ }
raw_spin_unlock(&blk_mq_cpu_notify_lock);
raw_spin_unlock(&blk_mq_cpu_notify_lock);
}
void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
}
void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
}
void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
}
void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
- void (*fn)(void *, unsigned long, unsigned int),
+ int (*fn)(void *, unsigned long, unsigned int),
void *data)
{
notifier->notify = fn;
void *data)
{
notifier->notify = fn;
}
EXPORT_SYMBOL(blk_mq_free_single_hw_queue);
}
EXPORT_SYMBOL(blk_mq_free_single_hw_queue);
-static void blk_mq_hctx_notify(void *data, unsigned long action,
- unsigned int cpu)
+static int blk_mq_hctx_notify(void *data, unsigned long action,
+ unsigned int cpu)
{
struct blk_mq_hw_ctx *hctx = data;
struct request_queue *q = hctx->queue;
{
struct blk_mq_hw_ctx *hctx = data;
struct request_queue *q = hctx->queue;
LIST_HEAD(tmp);
if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
LIST_HEAD(tmp);
if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
/*
* Move ctx entries to new CPU, if this one is going away.
/*
* Move ctx entries to new CPU, if this one is going away.
spin_unlock(&ctx->lock);
if (list_empty(&tmp))
spin_unlock(&ctx->lock);
if (list_empty(&tmp))
ctx = blk_mq_get_ctx(q);
spin_lock(&ctx->lock);
ctx = blk_mq_get_ctx(q);
spin_lock(&ctx->lock);
blk_mq_run_hw_queue(hctx, true);
blk_mq_put_ctx(ctx);
blk_mq_run_hw_queue(hctx, true);
blk_mq_put_ctx(ctx);
}
static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
}
static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
*/
struct blk_mq_cpu_notifier;
void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
*/
struct blk_mq_cpu_notifier;
void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
- void (*fn)(void *, unsigned long, unsigned int),
+ int (*fn)(void *, unsigned long, unsigned int),
void *data);
void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
void *data);
void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
struct blk_mq_cpu_notifier {
struct list_head list;
void *data;
struct blk_mq_cpu_notifier {
struct list_head list;
void *data;
- void (*notify)(void *data, unsigned long action, unsigned int cpu);
+ int (*notify)(void *data, unsigned long action, unsigned int cpu);
};
struct blk_mq_ctxmap {
};
struct blk_mq_ctxmap {