[PATCH] elevator=as back-compatibility
[pandora-kernel.git] / block / elevator.c
index 6c3fc8a..e8025b2 100644 (file)
@@ -64,7 +64,7 @@ inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
 }
 EXPORT_SYMBOL(elv_rq_merge_ok);
 
-inline int elv_try_merge(struct request *__rq, struct bio *bio)
+static inline int elv_try_merge(struct request *__rq, struct bio *bio)
 {
        int ret = ELEVATOR_NO_MERGE;
 
@@ -80,7 +80,6 @@ inline int elv_try_merge(struct request *__rq, struct bio *bio)
 
        return ret;
 }
-EXPORT_SYMBOL(elv_try_merge);
 
 static struct elevator_type *elevator_find(const char *name)
 {
@@ -150,6 +149,13 @@ static void elevator_setup_default(void)
        if (!chosen_elevator[0])
                strcpy(chosen_elevator, CONFIG_DEFAULT_IOSCHED);
 
+       /*
+        * Be backwards-compatible with previous kernels, so users
+        * won't get the wrong elevator.
+        */
+       if (!strcmp(chosen_elevator, "as"))
+               strcpy(chosen_elevator, "anticipatory");
+
        /*
         * If the given scheduler is not available, fall back to no-op.
         */
@@ -304,15 +310,7 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)
 
        rq->flags &= ~REQ_STARTED;
 
-       /*
-        * if this is the flush, requeue the original instead and drop the flush
-        */
-       if (rq->flags & REQ_BAR_FLUSH) {
-               clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
-               rq = rq->end_io_data;
-       }
-
-       __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
+       __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE, 0);
 }
 
 static void elv_drain_elevator(request_queue_t *q)
@@ -332,7 +330,18 @@ static void elv_drain_elevator(request_queue_t *q)
 void __elv_add_request(request_queue_t *q, struct request *rq, int where,
                       int plug)
 {
+       struct list_head *pos;
+       unsigned ordseq;
+
+       if (q->ordcolor)
+               rq->flags |= REQ_ORDERED_COLOR;
+
        if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
+               /*
+                * toggle ordered color
+                */
+               q->ordcolor ^= 1;
+
                /*
                 * barriers implicitly indicate back insertion
                 */
@@ -393,6 +402,30 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
                q->elevator->ops->elevator_add_req_fn(q, rq);
                break;
 
+       case ELEVATOR_INSERT_REQUEUE:
+               /*
+                * If ordered flush isn't in progress, we do front
+                * insertion; otherwise, requests should be requeued
+                * in ordseq order.
+                */
+               rq->flags |= REQ_SOFTBARRIER;
+
+               if (q->ordseq == 0) {
+                       list_add(&rq->queuelist, &q->queue_head);
+                       break;
+               }
+
+               ordseq = blk_ordered_req_seq(rq);
+
+               list_for_each(pos, &q->queue_head) {
+                       struct request *pos_rq = list_entry_rq(pos);
+                       if (ordseq <= blk_ordered_req_seq(pos_rq))
+                               break;
+               }
+
+               list_add_tail(&rq->queuelist, pos);
+               break;
+
        default:
                printk(KERN_ERR "%s: bad insertion point %d\n",
                       __FUNCTION__, where);
@@ -422,25 +455,16 @@ static inline struct request *__elv_next_request(request_queue_t *q)
 {
        struct request *rq;
 
-       if (unlikely(list_empty(&q->queue_head) &&
-                    !q->elevator->ops->elevator_dispatch_fn(q, 0)))
-               return NULL;
-
-       rq = list_entry_rq(q->queue_head.next);
-
-       /*
-        * if this is a barrier write and the device has to issue a
-        * flush sequence to support it, check how far we are
-        */
-       if (blk_fs_request(rq) && blk_barrier_rq(rq)) {
-               BUG_ON(q->ordered == QUEUE_ORDERED_NONE);
+       while (1) {
+               while (!list_empty(&q->queue_head)) {
+                       rq = list_entry_rq(q->queue_head.next);
+                       if (blk_do_ordered(q, &rq))
+                               return rq;
+               }
 
-               if (q->ordered == QUEUE_ORDERED_FLUSH &&
-                   !blk_barrier_preflush(rq))
-                       rq = blk_start_pre_flush(q, rq);
+               if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
+                       return NULL;
        }
-
-       return rq;
 }
 
 struct request *elv_next_request(request_queue_t *q)
@@ -498,7 +522,7 @@ struct request *elv_next_request(request_queue_t *q)
                        blkdev_dequeue_request(rq);
                        rq->flags |= REQ_QUIET;
                        end_that_request_chunk(rq, 0, nr_bytes);
-                       end_that_request_last(rq);
+                       end_that_request_last(rq, 0);
                } else {
                        printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
                                                                ret);
@@ -597,6 +621,20 @@ void elv_completed_request(request_queue_t *q, struct request *rq)
                if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
                        e->ops->elevator_completed_req_fn(q, rq);
        }
+
+       /*
+        * Check if the queue is waiting for fs requests to be
+        * drained for flush sequence.
+        */
+       if (unlikely(q->ordseq)) {
+               struct request *first_rq = list_entry_rq(q->queue_head.next);
+               if (q->in_flight == 0 &&
+                   blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
+                   blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
+                       blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
+                       q->request_fn(q);
+               }
+       }
 }
 
 int elv_register_queue(struct request_queue *q)