ARM: OMAP3: PM: remove access to PRM_VOLTCTRL register
[pandora-kernel.git] / block / blk-core.c
index ea70e6c..a219c89 100644 (file)
@@ -366,7 +366,14 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
                if (drain_all)
                        blk_throtl_drain(q);
 
-               __blk_run_queue(q);
+               /*
+                * This function might be called on a queue which failed
+                * driver init after queue creation.  Some drivers
+                * (e.g. fd) get unhappy in such cases.  Kick queue iff
+                * dispatch queue has something on it.
+                */
+               if (!list_empty(&q->queue_head))
+                       __blk_run_queue(q);
 
                if (drain_all)
                        nr_rqs = q->rq.count[0] + q->rq.count[1];
@@ -467,6 +474,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
        q->backing_dev_info.state = 0;
        q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
        q->backing_dev_info.name = "block";
+       q->node = node_id;
 
        err = bdi_init(&q->backing_dev_info);
        if (err) {
@@ -475,6 +483,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
        }
 
        if (blk_throtl_init(q)) {
+               bdi_destroy(&q->backing_dev_info);
                kmem_cache_free(blk_requestq_cachep, q);
                return NULL;
        }
@@ -551,7 +560,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
        if (!uninit_q)
                return NULL;
 
-       q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id);
+       q = blk_init_allocated_queue(uninit_q, rfn, lock);
        if (!q)
                blk_cleanup_queue(uninit_q);
 
@@ -562,19 +571,10 @@ EXPORT_SYMBOL(blk_init_queue_node);
 struct request_queue *
 blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
                         spinlock_t *lock)
-{
-       return blk_init_allocated_queue_node(q, rfn, lock, -1);
-}
-EXPORT_SYMBOL(blk_init_allocated_queue);
-
-struct request_queue *
-blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
-                             spinlock_t *lock, int node_id)
 {
        if (!q)
                return NULL;
 
-       q->node = node_id;
        if (blk_init_free_list(q))
                return NULL;
 
@@ -604,11 +604,11 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
 
        return NULL;
 }
-EXPORT_SYMBOL(blk_init_allocated_queue_node);
+EXPORT_SYMBOL(blk_init_allocated_queue);
 
 int blk_get_queue(struct request_queue *q)
 {
-       if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
+       if (likely(!blk_queue_dead(q))) {
                kobject_get(&q->kobj);
                return 0;
        }
@@ -755,7 +755,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
        const bool is_sync = rw_is_sync(rw_flags) != 0;
        int may_queue;
 
-       if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
+       if (unlikely(blk_queue_dead(q)))
                return NULL;
 
        may_queue = elv_may_queue(q, rw_flags);
@@ -875,7 +875,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
                struct io_context *ioc;
                struct request_list *rl = &q->rq;
 
-               if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
+               if (unlikely(blk_queue_dead(q)))
                        return NULL;
 
                prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
@@ -2016,6 +2016,7 @@ void blk_start_request(struct request *req)
        if (unlikely(blk_bidi_rq(req)))
                req->next_rq->resid_len = blk_rq_bytes(req->next_rq);
 
+       BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
        blk_add_timer(req);
 }
 EXPORT_SYMBOL(blk_start_request);