Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
[pandora-kernel.git] / block / cfq-iosched.c
index 023f4e6..5f127cf 100644 (file)
@@ -7,6 +7,7 @@
  *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  */
 #include <linux/module.h>
+#include <linux/slab.h>
 #include <linux/blkdev.h>
 #include <linux/elevator.h>
 #include <linux/jiffies.h>
@@ -19,7 +20,7 @@
  * tunables
  */
 /* max queue in one round of service */
-static const int cfq_quantum = 4;
+static const int cfq_quantum = 8;
 static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
 /* maximum backwards seek, in KiB */
 static const int cfq_back_max = 16 * 1024;
@@ -46,8 +47,10 @@ static const int cfq_hist_divisor = 4;
 #define CFQ_HW_QUEUE_MIN       (5)
 #define CFQ_SERVICE_SHIFT       12
 
-#define CFQQ_SEEK_THR          8 * 1024
-#define CFQQ_SEEKY(cfqq)       ((cfqq)->seek_mean > CFQQ_SEEK_THR)
+#define CFQQ_SEEK_THR          (sector_t)(8 * 100)
+#define CFQQ_CLOSE_THR         (sector_t)(8 * 1024)
+#define CFQQ_SECT_THR_NONROT   (sector_t)(2 * 32)
+#define CFQQ_SEEKY(cfqq)       (hweight32(cfqq->seek_history) > 32/8)
 
 #define RQ_CIC(rq)             \
        ((struct cfq_io_context *) (rq)->elevator_private)
@@ -77,11 +80,12 @@ struct cfq_rb_root {
        struct rb_root rb;
        struct rb_node *left;
        unsigned count;
+       unsigned total_weight;
        u64 min_vdisktime;
        struct rb_node *active;
-       unsigned total_weight;
 };
-#define CFQ_RB_ROOT    (struct cfq_rb_root) { RB_ROOT, NULL, 0, 0, }
+#define CFQ_RB_ROOT    (struct cfq_rb_root) { .rb = RB_ROOT, .left = NULL, \
+                       .count = 0, .min_vdisktime = 0, }
 
 /*
  * Per process-grouping structure
@@ -115,11 +119,11 @@ struct cfq_queue {
        /* time when queue got scheduled in to dispatch first request. */
        unsigned long dispatch_start;
        unsigned int allocated_slice;
+       unsigned int slice_dispatch;
        /* time when first request from queue completed and slice started. */
        unsigned long slice_start;
        unsigned long slice_end;
        long slice_resid;
-       unsigned int slice_dispatch;
 
        /* pending metadata requests */
        int meta_pending;
@@ -130,13 +134,11 @@ struct cfq_queue {
        unsigned short ioprio, org_ioprio;
        unsigned short ioprio_class, org_ioprio_class;
 
-       unsigned int seek_samples;
-       u64 seek_total;
-       sector_t seek_mean;
-       sector_t last_request_pos;
-
        pid_t pid;
 
+       u32 seek_history;
+       sector_t last_request_pos;
+
        struct cfq_rb_root *service_tree;
        struct cfq_queue *new_cfqq;
        struct cfq_group *cfqg;
@@ -223,8 +225,8 @@ struct cfq_data {
 
        unsigned int busy_queues;
 
-       int rq_in_driver[2];
-       int sync_flight;
+       int rq_in_driver;
+       int rq_in_flight[2];
 
        /*
         * queue-depth detection
@@ -417,11 +419,6 @@ static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
 static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
                                                struct io_context *);
 
-static inline int rq_in_driver(struct cfq_data *cfqd)
-{
-       return cfqd->rq_in_driver[0] + cfqd->rq_in_driver[1];
-}
-
 static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
                                            bool is_sync)
 {
@@ -951,11 +948,12 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
        struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
        unsigned int major, minor;
 
-       /* Do we need to take this reference */
-       if (!blkiocg_css_tryget(blkcg))
-               return NULL;;
-
        cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
+       if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
+               sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
+               cfqg->blkg.dev = MKDEV(major, minor);
+               goto done;
+       }
        if (cfqg || !create)
                goto done;
 
@@ -985,7 +983,6 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
        hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
 
 done:
-       blkiocg_css_put(blkcg);
        return cfqg;
 }
 
@@ -1420,9 +1417,9 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq)
 {
        struct cfq_data *cfqd = q->elevator->elevator_data;
 
-       cfqd->rq_in_driver[rq_is_sync(rq)]++;
+       cfqd->rq_in_driver++;
        cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
-                                               rq_in_driver(cfqd));
+                                               cfqd->rq_in_driver);
 
        cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
 }
@@ -1430,12 +1427,11 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq)
 static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
 {
        struct cfq_data *cfqd = q->elevator->elevator_data;
-       const int sync = rq_is_sync(rq);
 
-       WARN_ON(!cfqd->rq_in_driver[sync]);
-       cfqd->rq_in_driver[sync]--;
+       WARN_ON(!cfqd->rq_in_driver);
+       cfqd->rq_in_driver--;
        cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
-                                               rq_in_driver(cfqd));
+                                               cfqd->rq_in_driver);
 }
 
 static void cfq_remove_request(struct request *rq)
@@ -1528,7 +1524,8 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
                                   struct cfq_queue *cfqq)
 {
        if (cfqq) {
-               cfq_log_cfqq(cfqd, cfqq, "set_active");
+               cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
+                               cfqd->serving_prio, cfqd->serving_type);
                cfqq->slice_start = 0;
                cfqq->dispatch_start = jiffies;
                cfqq->allocated_slice = 0;
@@ -1671,18 +1668,9 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
 }
 
 static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
-                              struct request *rq, bool for_preempt)
+                              struct request *rq)
 {
-       sector_t sdist = cfqq->seek_mean;
-
-       if (!sample_valid(cfqq->seek_samples))
-               sdist = CFQQ_SEEK_THR;
-
-       /* if seek_mean is big, using it as close criteria is meaningless */
-       if (sdist > CFQQ_SEEK_THR && !for_preempt)
-               sdist = CFQQ_SEEK_THR;
-
-       return cfq_dist_from_last(cfqd, rq) <= sdist;
+       return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
 }
 
 static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
@@ -1709,7 +1697,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
         * will contain the closest sector.
         */
        __cfqq = rb_entry(parent, struct cfq_queue, p_node);
-       if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq, false))
+       if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
                return __cfqq;
 
        if (blk_rq_pos(__cfqq->next_rq) < sector)
@@ -1720,7 +1708,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
                return NULL;
 
        __cfqq = rb_entry(node, struct cfq_queue, p_node);
-       if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq, false))
+       if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
                return __cfqq;
 
        return NULL;
@@ -1741,6 +1729,8 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
 {
        struct cfq_queue *cfqq;
 
+       if (cfq_class_idle(cur_cfqq))
+               return NULL;
        if (!cfq_cfqq_sync(cur_cfqq))
                return NULL;
        if (CFQQ_SEEKY(cur_cfqq))
@@ -1807,7 +1797,11 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
         * Otherwise, we do only if they are the last ones
         * in their service tree.
         */
-       return service_tree->count == 1 && cfq_cfqq_sync(cfqq);
+       if (service_tree->count == 1 && cfq_cfqq_sync(cfqq))
+               return 1;
+       cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
+                       service_tree->count);
+       return 0;
 }
 
 static void cfq_arm_slice_timer(struct cfq_data *cfqd)
@@ -1852,8 +1846,11 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
         * time slice.
         */
        if (sample_valid(cic->ttime_samples) &&
-           (cfqq->slice_end - jiffies < cic->ttime_mean))
+           (cfqq->slice_end - jiffies < cic->ttime_mean)) {
+               cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%d",
+                               cic->ttime_mean);
                return;
+       }
 
        cfq_mark_cfqq_wait_request(cfqq);
 
@@ -1878,8 +1875,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
        cfqq->dispatched++;
        elv_dispatch_sort(q, rq);
 
-       if (cfq_cfqq_sync(cfqq))
-               cfqd->sync_flight++;
+       cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
        cfqq->nr_sectors += blk_rq_sectors(rq);
 }
 
@@ -2062,6 +2058,7 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
                slice = max(slice, 2 * cfqd->cfq_slice_idle);
 
        slice = max_t(unsigned, slice, CFQ_MIN_TT);
+       cfq_log(cfqd, "workload slice:%d", slice);
        cfqd->workload_expires = jiffies + slice;
        cfqd->noidle_tree_requires_idle = false;
 }
@@ -2209,16 +2206,32 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
        struct cfq_queue *cfqq;
        int dispatched = 0;
 
-       while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL)
+       /* Expire the timeslice of the current active queue first */
+       cfq_slice_expired(cfqd, 0);
+       while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
+               __cfq_set_active_queue(cfqd, cfqq);
                dispatched += __cfq_forced_dispatch_cfqq(cfqq);
+       }
 
-       cfq_slice_expired(cfqd, 0);
        BUG_ON(cfqd->busy_queues);
 
        cfq_log(cfqd, "forced_dispatch=%d", dispatched);
        return dispatched;
 }
 
+static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
+       struct cfq_queue *cfqq)
+{
+       /* the queue hasn't finished any request, can't estimate */
+       if (cfq_cfqq_slice_new(cfqq))
+               return 1;
+       if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
+               cfqq->slice_end))
+               return 1;
+
+       return 0;
+}
+
 static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
        unsigned int max_dispatch;
@@ -2226,16 +2239,16 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
        /*
         * Drain async requests before we start sync IO
         */
-       if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC])
+       if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
                return false;
 
        /*
         * If this is an async queue and we have sync IO in flight, let it wait
         */
-       if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
+       if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
                return false;
 
-       max_dispatch = cfqd->cfq_quantum;
+       max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
        if (cfq_class_idle(cfqq))
                max_dispatch = 1;
 
@@ -2252,13 +2265,22 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
                /*
                 * We have other queues, don't allow more IO from this one
                 */
-               if (cfqd->busy_queues > 1)
+               if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq))
                        return false;
 
                /*
                 * Sole queue user, no limit
                 */
-               max_dispatch = -1;
+               if (cfqd->busy_queues == 1)
+                       max_dispatch = -1;
+               else
+                       /*
+                        * Normally we start throttling cfqq when cfq_quantum/2
+                        * requests have been dispatched. But we can drive
+                        * deeper queue depths at the beginning of slice
+                        * subjected to upper limit of cfq_quantum.
+                        * */
+                       max_dispatch = cfqd->cfq_quantum;
        }
 
        /*
@@ -2980,30 +3002,20 @@ static void
 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                       struct request *rq)
 {
-       sector_t sdist;
-       u64 total;
-
-       if (!cfqq->last_request_pos)
-               sdist = 0;
-       else if (cfqq->last_request_pos < blk_rq_pos(rq))
-               sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
-       else
-               sdist = cfqq->last_request_pos - blk_rq_pos(rq);
+       sector_t sdist = 0;
+       sector_t n_sec = blk_rq_sectors(rq);
+       if (cfqq->last_request_pos) {
+               if (cfqq->last_request_pos < blk_rq_pos(rq))
+                       sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
+               else
+                       sdist = cfqq->last_request_pos - blk_rq_pos(rq);
+       }
 
-       /*
-        * Don't allow the seek distance to get too large from the
-        * odd fragment, pagein, etc
-        */
-       if (cfqq->seek_samples <= 60) /* second&third seek */
-               sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*1024);
+       cfqq->seek_history <<= 1;
+       if (blk_queue_nonrot(cfqd->queue))
+               cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
        else
-               sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*64);
-
-       cfqq->seek_samples = (7*cfqq->seek_samples + 256) / 8;
-       cfqq->seek_total = (7*cfqq->seek_total + (u64)256*sdist) / 8;
-       total = cfqq->seek_total + (cfqq->seek_samples/2);
-       do_div(total, cfqq->seek_samples);
-       cfqq->seek_mean = (sector_t)total;
+               cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
 }
 
 /*
@@ -3028,8 +3040,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                cfq_mark_cfqq_deep(cfqq);
 
        if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
-           (!cfq_cfqq_deep(cfqq) && sample_valid(cfqq->seek_samples)
-            && CFQQ_SEEKY(cfqq)))
+           (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
                enable_idle = 0;
        else if (sample_valid(cic->ttime_samples)) {
                if (cic->ttime_mean > cfqd->cfq_slice_idle)
@@ -3113,7 +3124,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
         * if this request is as-good as one we would expect from the
         * current cfqq, let it preempt
         */
-       if (cfq_rq_close(cfqd, cfqq, rq, true))
+       if (cfq_rq_close(cfqd, cfqq, rq))
                return true;
 
        return false;
@@ -3215,14 +3226,14 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd)
 {
        struct cfq_queue *cfqq = cfqd->active_queue;
 
-       if (rq_in_driver(cfqd) > cfqd->hw_tag_est_depth)
-               cfqd->hw_tag_est_depth = rq_in_driver(cfqd);
+       if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
+               cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
 
        if (cfqd->hw_tag == 1)
                return;
 
        if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
-           rq_in_driver(cfqd) <= CFQ_HW_QUEUE_MIN)
+           cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
                return;
 
        /*
@@ -3232,7 +3243,7 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd)
         */
        if (cfqq && cfq_cfqq_idle_window(cfqq) &&
            cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
-           CFQ_HW_QUEUE_MIN && rq_in_driver(cfqd) < CFQ_HW_QUEUE_MIN)
+           CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
                return;
 
        if (cfqd->hw_tag_samples++ < 50)
@@ -3285,13 +3296,12 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
 
        cfq_update_hw_tag(cfqd);
 
-       WARN_ON(!cfqd->rq_in_driver[sync]);
+       WARN_ON(!cfqd->rq_in_driver);
        WARN_ON(!cfqq->dispatched);
-       cfqd->rq_in_driver[sync]--;
+       cfqd->rq_in_driver--;
        cfqq->dispatched--;
 
-       if (cfq_cfqq_sync(cfqq))
-               cfqd->sync_flight--;
+       cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
 
        if (sync) {
                RQ_CIC(rq)->last_end_request = now;
@@ -3318,6 +3328,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
                if (cfq_should_wait_busy(cfqd, cfqq)) {
                        cfqq->slice_end = jiffies + cfqd->cfq_slice_idle;
                        cfq_mark_cfqq_wait_busy(cfqq);
+                       cfq_log_cfqq(cfqd, cfqq, "will busy wait");
                }
 
                /*
@@ -3345,7 +3356,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
                }
        }
 
-       if (!rq_in_driver(cfqd))
+       if (!cfqd->rq_in_driver)
                cfq_schedule_dispatch(cfqd);
 }
 
@@ -3683,8 +3694,10 @@ static void *cfq_init_queue(struct request_queue *q)
         * to make sure that cfq_put_cfqg() does not try to kfree root group
         */
        atomic_set(&cfqg->ref, 1);
+       rcu_read_lock();
        blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, (void *)cfqd,
                                        0);
+       rcu_read_unlock();
 #endif
        /*
         * Not strictly needed (since RB_ROOT just clears the node and we