}
EXPORT_SYMBOL(blk_dump_rq_flags);
-/*
- * Make sure that plugs that were pending when this function was entered,
- * are now complete and requests pushed to the queue.
-*/
-static inline void queue_sync_plugs(struct request_queue *q)
-{
- /*
- * If the current process is plugged and has barriers submitted,
- * we will livelock if we don't unplug first.
- */
- blk_flush_plug(current);
-}
-
static void blk_delay_work(struct work_struct *work)
{
struct request_queue *q;
{
del_timer_sync(&q->timeout);
cancel_delayed_work_sync(&q->delay_work);
- queue_sync_plugs(q);
}
EXPORT_SYMBOL(blk_sync_queue);
/*
* Attempts to merge with the plugged list in the current process. Returns
- * true if merge was succesful, otherwise false.
+ * true if merge was successful, otherwise false.
*/
static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q,
struct bio *bio)
plug = current->plug;
if (plug) {
- if (!plug->should_sort && !list_empty(&plug->list)) {
+ /*
+ * If this is the first request added after a plug, fire
+ * of a plug trace. If others have been added before, check
+ * if we have multiple devices in this plug. If so, make a
+ * note to sort the list before dispatch.
+ */
+ if (list_empty(&plug->list))
+ trace_block_plug(q);
+ else if (!plug->should_sort) {
struct request *__rq;
__rq = list_entry_rq(plug->list.prev);
* size, something has gone terribly wrong.
*/
if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
- printk(KERN_ERR "blk: request botched\n");
+ blk_dump_rq_flags(req, "request botched");
req->__data_len = blk_rq_cur_bytes(req);
}
struct request *rqa = container_of(a, struct request, queuelist);
struct request *rqb = container_of(b, struct request, queuelist);
- return !(rqa->q == rqb->q);
+ return !(rqa->q <= rqb->q);
+}
+
+static void queue_unplugged(struct request_queue *q, unsigned int depth)
+{
+ trace_block_unplug_io(q, depth);
+ __blk_run_queue(q, true);
+
+ if (q->unplugged_fn)
+ q->unplugged_fn(q);
}
static void flush_plug_list(struct blk_plug *plug)
struct request_queue *q;
unsigned long flags;
struct request *rq;
+ LIST_HEAD(list);
+ unsigned int depth;
BUG_ON(plug->magic != PLUG_MAGIC);
if (list_empty(&plug->list))
return;
- if (plug->should_sort)
- list_sort(NULL, &plug->list, plug_rq_cmp);
+ list_splice_init(&plug->list, &list);
+
+ if (plug->should_sort) {
+ list_sort(NULL, &list, plug_rq_cmp);
+ plug->should_sort = 0;
+ }
q = NULL;
+ depth = 0;
+
+ /*
+ * Save and disable interrupts here, to avoid doing it for every
+ * queue lock we have to take.
+ */
local_irq_save(flags);
- while (!list_empty(&plug->list)) {
- rq = list_entry_rq(plug->list.next);
+ while (!list_empty(&list)) {
+ rq = list_entry_rq(list.next);
list_del_init(&rq->queuelist);
BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG));
BUG_ON(!rq->q);
if (rq->q != q) {
if (q) {
- __blk_run_queue(q, false);
+ queue_unplugged(q, depth);
spin_unlock(q->queue_lock);
}
q = rq->q;
+ depth = 0;
spin_lock(q->queue_lock);
}
rq->cmd_flags &= ~REQ_ON_PLUG;
__elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
else
__elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
+
+ depth++;
}
if (q) {
- __blk_run_queue(q, false);
+ queue_unplugged(q, depth);
spin_unlock(q->queue_lock);
}
- BUG_ON(!list_empty(&plug->list));
local_irq_restore(flags);
}