**/
void blk_start_queue(struct request_queue *q)
{
- WARN_ON(!irqs_disabled());
+ WARN_ON(!in_interrupt() && !irqs_disabled());
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
__blk_run_queue(q);
}
if (blk_throtl_init(q)) {
+ bdi_destroy(&q->backing_dev_info);
kmem_cache_free(blk_requestq_cachep, q);
return NULL;
}
kobject_init(&q->kobj, &blk_queue_ktype);
+#ifdef CONFIG_BLK_DEV_IO_TRACE
+ mutex_init(&q->blk_trace_mutex);
+#endif
mutex_init(&q->sysfs_lock);
spin_lock_init(&q->__queue_lock);
int blk_get_queue(struct request_queue *q)
{
- if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
+ if (likely(!blk_queue_dead(q))) {
kobject_get(&q->kobj);
return 0;
}
const bool is_sync = rw_is_sync(rw_flags) != 0;
int may_queue;
- if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
+ if (unlikely(blk_queue_dead(q)))
return NULL;
may_queue = elv_may_queue(q, rw_flags);
struct io_context *ioc;
struct request_list *rl = &q->rq;
- if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
+ if (unlikely(blk_queue_dead(q)))
return NULL;
prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
if (unlikely(blk_bidi_rq(req)))
req->next_rq->resid_len = blk_rq_bytes(req->next_rq);
+ BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
blk_add_timer(req);
}
EXPORT_SYMBOL(blk_start_request);
if (!req->bio)
return false;
- trace_block_rq_complete(req->q, req);
+ trace_block_rq_complete(req->q, req, nr_bytes);
/*
* For fs requests, rq is just carrier of independent bio's