X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?p=pandora-kernel.git;a=blobdiff_plain;f=block%2Fblk-core.c;h=416b263921bc89272d0e94d3c3663bce4dfef4b8;hp=ea70e6c80cd34f0c2b563ceabcc7189027fc5182;hb=refs%2Fheads%2Fpandora-3.2;hpb=0efebaa72d3b8cf377c45930c78e1a0969d6355a diff --git a/block/blk-core.c b/block/blk-core.c index ea70e6c80cd3..416b263921bc 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -235,7 +235,7 @@ EXPORT_SYMBOL(blk_delay_queue); **/ void blk_start_queue(struct request_queue *q) { - WARN_ON(!irqs_disabled()); + WARN_ON(!in_interrupt() && !irqs_disabled()); queue_flag_clear(QUEUE_FLAG_STOPPED, q); __blk_run_queue(q); @@ -366,7 +366,14 @@ void blk_drain_queue(struct request_queue *q, bool drain_all) if (drain_all) blk_throtl_drain(q); - __blk_run_queue(q); + /* + * This function might be called on a queue which failed + * driver init after queue creation. Some drivers + * (e.g. fd) get unhappy in such cases. Kick queue iff + * dispatch queue has something on it. + */ + if (!list_empty(&q->queue_head)) + __blk_run_queue(q); if (drain_all) nr_rqs = q->rq.count[0] + q->rq.count[1]; @@ -467,6 +474,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) q->backing_dev_info.state = 0; q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; q->backing_dev_info.name = "block"; + q->node = node_id; err = bdi_init(&q->backing_dev_info); if (err) { @@ -475,6 +483,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) } if (blk_throtl_init(q)) { + bdi_destroy(&q->backing_dev_info); kmem_cache_free(blk_requestq_cachep, q); return NULL; } @@ -490,6 +499,9 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) kobject_init(&q->kobj, &blk_queue_ktype); +#ifdef CONFIG_BLK_DEV_IO_TRACE + mutex_init(&q->blk_trace_mutex); +#endif mutex_init(&q->sysfs_lock); spin_lock_init(&q->__queue_lock); @@ -551,7 +563,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) if (!uninit_q) return NULL; - q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id); + q = blk_init_allocated_queue(uninit_q, rfn, lock); if (!q) blk_cleanup_queue(uninit_q); @@ -562,19 +574,10 @@ EXPORT_SYMBOL(blk_init_queue_node); struct request_queue * blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, spinlock_t *lock) -{ - return blk_init_allocated_queue_node(q, rfn, lock, -1); -} -EXPORT_SYMBOL(blk_init_allocated_queue); - -struct request_queue * -blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn, - spinlock_t *lock, int node_id) { if (!q) return NULL; - q->node = node_id; if (blk_init_free_list(q)) return NULL; @@ -604,11 +607,11 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn, return NULL; } -EXPORT_SYMBOL(blk_init_allocated_queue_node); +EXPORT_SYMBOL(blk_init_allocated_queue); int blk_get_queue(struct request_queue *q) { - if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { + if (likely(!blk_queue_dead(q))) { kobject_get(&q->kobj); return 0; } @@ -755,7 +758,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, const bool is_sync = rw_is_sync(rw_flags) != 0; int may_queue; - if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) + if (unlikely(blk_queue_dead(q))) return NULL; may_queue = elv_may_queue(q, rw_flags); @@ -875,7 +878,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags, struct io_context *ioc; struct request_list *rl = &q->rq; - if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) + if (unlikely(blk_queue_dead(q))) return NULL; prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, @@ -2016,6 +2019,7 @@ void blk_start_request(struct request *req) if (unlikely(blk_bidi_rq(req))) req->next_rq->resid_len = blk_rq_bytes(req->next_rq); + BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags)); blk_add_timer(req); } EXPORT_SYMBOL(blk_start_request); @@ -2076,7 +2080,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) if (!req->bio) return false; - trace_block_rq_complete(req->q, req); + trace_block_rq_complete(req->q, req, nr_bytes); /* * For fs requests, rq is just carrier of independent bio's