Merge branch 'devel' of master.kernel.org:/home/rmk/linux-2.6-arm
[pandora-kernel.git] / block / cfq-iosched.c
index 64df3fa..d148ccb 100644 (file)
@@ -92,6 +92,12 @@ struct cfq_data {
        struct cfq_queue *active_queue;
        struct cfq_io_context *active_cic;
 
+       /*
+        * async queue for each priority case
+        */
+       struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
+       struct cfq_queue *async_idle_cfqq;
+
        struct timer_list idle_class_timer;
 
        sector_t last_position;
@@ -109,9 +115,6 @@ struct cfq_data {
        unsigned int cfq_slice_idle;
 
        struct list_head cic_list;
-
-       sector_t new_seek_mean;
-       u64 new_seek_total;
 };
 
 /*
@@ -151,8 +154,6 @@ struct cfq_queue {
 
        /* various state flags, see below */
        unsigned int flags;
-
-       sector_t last_request_pos;
 };
 
 enum cfqq_state_flags {
@@ -1249,9 +1250,9 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
 {
        struct cfq_io_context *cic;
 
-       cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask, cfqd->queue->node);
+       cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
+                                                       cfqd->queue->node);
        if (cic) {
-               memset(cic, 0, sizeof(*cic));
                cic->last_end_request = jiffies;
                INIT_LIST_HEAD(&cic->queue_list);
                cic->dtor = cfq_free_io_context;
@@ -1351,8 +1352,8 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc)
 }
 
 static struct cfq_queue *
-cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
-             gfp_t gfp_mask)
+cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
+                    struct task_struct *tsk, gfp_t gfp_mask)
 {
        struct cfq_queue *cfqq, *new_cfqq = NULL;
        struct cfq_io_context *cic;
@@ -1374,17 +1375,19 @@ retry:
                         * free memory.
                         */
                        spin_unlock_irq(cfqd->queue->queue_lock);
-                       new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask|__GFP_NOFAIL, cfqd->queue->node);
+                       new_cfqq = kmem_cache_alloc_node(cfq_pool,
+                                       gfp_mask | __GFP_NOFAIL | __GFP_ZERO,
+                                       cfqd->queue->node);
                        spin_lock_irq(cfqd->queue->queue_lock);
                        goto retry;
                } else {
-                       cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask, cfqd->queue->node);
+                       cfqq = kmem_cache_alloc_node(cfq_pool,
+                                       gfp_mask | __GFP_ZERO,
+                                       cfqd->queue->node);
                        if (!cfqq)
                                goto out;
                }
 
-               memset(cfqq, 0, sizeof(*cfqq));
-
                RB_CLEAR_NODE(&cfqq->rb_node);
                INIT_LIST_HEAD(&cfqq->fifo);
 
@@ -1405,12 +1408,55 @@ retry:
        if (new_cfqq)
                kmem_cache_free(cfq_pool, new_cfqq);
 
-       atomic_inc(&cfqq->ref);
 out:
        WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
        return cfqq;
 }
 
+static struct cfq_queue **
+cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
+{
+       switch(ioprio_class) {
+       case IOPRIO_CLASS_RT:
+               return &cfqd->async_cfqq[0][ioprio];
+       case IOPRIO_CLASS_BE:
+               return &cfqd->async_cfqq[1][ioprio];
+       case IOPRIO_CLASS_IDLE:
+               return &cfqd->async_idle_cfqq;
+       default:
+               BUG();
+       }
+}
+
+static struct cfq_queue *
+cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
+             gfp_t gfp_mask)
+{
+       const int ioprio = task_ioprio(tsk);
+       const int ioprio_class = task_ioprio_class(tsk);
+       struct cfq_queue **async_cfqq = NULL;
+       struct cfq_queue *cfqq = NULL;
+
+       if (!is_sync) {
+               async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
+               cfqq = *async_cfqq;
+       }
+
+       if (!cfqq)
+               cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask);
+
+       /*
+        * pin the queue now that it's allocated, scheduler exit will prune it
+        */
+       if (!is_sync && !(*async_cfqq)) {
+               atomic_inc(&cfqq->ref);
+               *async_cfqq = cfqq;
+       }
+
+       atomic_inc(&cfqq->ref);
+       return cfqq;
+}
+
 /*
  * We drop cfq io contexts lazily, so we may find a dead one.
  */
@@ -1570,11 +1616,6 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
        else
                sdist = cic->last_request_pos - rq->sector;
 
-       if (!cic->seek_samples) {
-               cfqd->new_seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
-               cfqd->new_seek_mean = cfqd->new_seek_total / 256;
-       }
-
        /*
         * Don't allow the seek distance to get too large from the
         * odd fragment, pagein, etc
@@ -1710,7 +1751,6 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
        cfq_update_idle_window(cfqd, cfqq, cic);
 
        cic->last_request_pos = rq->sector + rq->nr_sectors;
-       cfqq->last_request_pos = cic->last_request_pos;
 
        if (cfqq == cfqd->active_queue) {
                /*
@@ -2015,6 +2055,20 @@ static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
        blk_sync_queue(cfqd->queue);
 }
 
+static void cfq_put_async_queues(struct cfq_data *cfqd)
+{
+       int i;
+
+       for (i = 0; i < IOPRIO_BE_NR; i++) {
+               if (cfqd->async_cfqq[0][i])
+                       cfq_put_queue(cfqd->async_cfqq[0][i]);
+               if (cfqd->async_cfqq[1][i])
+                       cfq_put_queue(cfqd->async_cfqq[1][i]);
+               if (cfqd->async_idle_cfqq)
+                       cfq_put_queue(cfqd->async_idle_cfqq);
+       }
+}
+
 static void cfq_exit_queue(elevator_t *e)
 {
        struct cfq_data *cfqd = e->elevator_data;
@@ -2035,6 +2089,8 @@ static void cfq_exit_queue(elevator_t *e)
                __cfq_exit_single_io_context(cfqd, cic);
        }
 
+       cfq_put_async_queues(cfqd);
+
        spin_unlock_irq(q->queue_lock);
 
        cfq_shutdown_timer_wq(cfqd);
@@ -2046,12 +2102,10 @@ static void *cfq_init_queue(request_queue_t *q)
 {
        struct cfq_data *cfqd;
 
-       cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
+       cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
        if (!cfqd)
                return NULL;
 
-       memset(cfqd, 0, sizeof(*cfqd));
-
        cfqd->service_tree = CFQ_RB_ROOT;
        INIT_LIST_HEAD(&cfqd->cic_list);
 
@@ -2090,13 +2144,11 @@ static void cfq_slab_kill(void)
 
 static int __init cfq_slab_setup(void)
 {
-       cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0,
-                                       NULL, NULL);
+       cfq_pool = KMEM_CACHE(cfq_queue, 0);
        if (!cfq_pool)
                goto fail;
 
-       cfq_ioc_pool = kmem_cache_create("cfq_ioc_pool",
-                       sizeof(struct cfq_io_context), 0, 0, NULL, NULL);
+       cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
        if (!cfq_ioc_pool)
                goto fail;