[PATCH] cfq-iosched: many performance fixes
[pandora-kernel.git] / block / cfq-iosched.c
index 2540dfa..13c4793 100644 (file)
@@ -26,14 +26,14 @@ static const int cfq_back_penalty = 2;              /* penalty of a backwards seek */
 static const int cfq_slice_sync = HZ / 10;
 static int cfq_slice_async = HZ / 25;
 static const int cfq_slice_async_rq = 2;
-static int cfq_slice_idle = HZ / 70;
+static int cfq_slice_idle = HZ / 125;
 
 #define CFQ_IDLE_GRACE         (HZ / 10)
 #define CFQ_SLICE_SCALE                (5)
 
 #define CFQ_KEY_ASYNC          (0)
 
-static DEFINE_RWLOCK(cfq_exit_lock);
+static DEFINE_SPINLOCK(cfq_exit_lock);
 
 /*
  * for the hash of cfqq inside the cfqd
@@ -60,14 +60,9 @@ static DEFINE_RWLOCK(cfq_exit_lock);
 /*
  * rb-tree defines
  */
-#define RB_NONE                        (2)
 #define RB_EMPTY(node)         ((node)->rb_node == NULL)
-#define RB_CLEAR_COLOR(node)   (node)->rb_color = RB_NONE
 #define RB_CLEAR(node)         do {    \
-       (node)->rb_parent = NULL;       \
-       RB_CLEAR_COLOR((node));         \
-       (node)->rb_right = NULL;        \
-       (node)->rb_left = NULL;         \
+               memset(node, 0, sizeof(*node)); \
 } while (0)
 #define RB_CLEAR_ROOT(root)    ((root)->rb_node = NULL)
 #define rb_entry_crq(node)     rb_entry((node), struct cfq_rq, rb_node)
@@ -128,11 +123,10 @@ struct cfq_data {
         */
        struct hlist_head *crq_hash;
 
-       unsigned int max_queued;
-
        mempool_t *crq_pool;
 
        int rq_in_driver;
+       int hw_tag;
 
        /*
         * schedule slice state info
@@ -283,8 +277,6 @@ static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsi
 static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *);
 static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask);
 
-#define process_sync(tsk)      ((tsk)->flags & PF_SYNCWRITE)
-
 /*
  * lots of deadline iosched dupes, can be abstracted later...
  */
@@ -340,7 +332,7 @@ static int cfq_queue_empty(request_queue_t *q)
 
 static inline pid_t cfq_queue_pid(struct task_struct *task, int rw)
 {
-       if (rw == READ || process_sync(task))
+       if (rw == READ || rw == WRITE_SYNC)
                return task->pid;
 
        return CFQ_KEY_ASYNC;
@@ -500,10 +492,13 @@ static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
 
        /*
         * if queue was preempted, just add to front to be fair. busy_rr
-        * isn't sorted.
+        * isn't sorted, but insert at the back for fairness.
         */
        if (preempted || list == &cfqd->busy_rr) {
-               list_add(&cfqq->cfq_list, list);
+               if (preempted)
+                       list = list->prev;
+
+               list_add_tail(&cfqq->cfq_list, list);
                return;
        }
 
@@ -563,7 +558,6 @@ static inline void cfq_del_crq_rb(struct cfq_rq *crq)
        cfq_update_next_crq(crq);
 
        rb_erase(&crq->rb_node, &cfqq->sort_list);
-       RB_CLEAR_COLOR(&crq->rb_node);
 
        if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list))
                cfq_del_cfqq_rr(cfqd, cfqq);
@@ -664,6 +658,15 @@ static void cfq_activate_request(request_queue_t *q, struct request *rq)
        struct cfq_data *cfqd = q->elevator->elevator_data;
 
        cfqd->rq_in_driver++;
+
+       /*
+        * If the depth is larger 1, it really could be queueing. But lets
+        * make the mark a little higher - idling could still be good for
+        * low queueing, and a low queueing number could also just indicate
+        * a SCSI mid layer like behaviour where limit+1 is often seen.
+        */
+       if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
+               cfqd->hw_tag = 1;
 }
 
 static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
@@ -878,6 +881,13 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
        if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1)
                cfqq = list_entry_cfqq(cfqd->cur_rr.next);
 
+       /*
+        * If no new queues are available, check if the busy list has some
+        * before falling back to idle io.
+        */
+       if (!cfqq && !list_empty(&cfqd->busy_rr))
+               cfqq = list_entry_cfqq(cfqd->busy_rr.next);
+
        /*
         * if we have idle queues and no rt or be queues had pending
         * requests, either allow immediate service if the grace period
@@ -896,6 +906,8 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
        return cfqq;
 }
 
+#define CIC_SEEKY(cic) ((cic)->seek_mean > (128 * 1024))
+
 static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 
 {
@@ -929,7 +941,7 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
         * fair distribution of slice time for a process doing back-to-back
         * seeks. so allow a little bit of time for him to submit a new rq
         */
-       if (sample_valid(cic->seek_samples) && cic->seek_mean > 131072)
+       if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
                sl = 2;
 
        mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
@@ -1028,8 +1040,10 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
         */
        if (!RB_EMPTY(&cfqq->sort_list))
                goto keep_queue;
-       else if (cfq_cfqq_class_sync(cfqq) &&
-                time_before(now, cfqq->slice_end)) {
+       else if (cfq_cfqq_dispatched(cfqq)) {
+               cfqq = NULL;
+               goto keep_queue;
+       } else if (cfq_cfqq_class_sync(cfqq)) {
                if (cfq_arm_slice_timer(cfqd, cfqq))
                        return NULL;
        }
@@ -1078,8 +1092,7 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
        } while (dispatched < max_dispatch);
 
        /*
-        * if slice end isn't set yet, set it. if at least one request was
-        * sync, use the sync time slice value
+        * if slice end isn't set yet, set it.
         */
        if (!cfqq->slice_end)
                cfq_set_prio_slice(cfqd, cfqq);
@@ -1090,7 +1103,8 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
         */
        if ((!cfq_cfqq_sync(cfqq) &&
            cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
-           cfq_class_idle(cfqq))
+           cfq_class_idle(cfqq) ||
+           !cfq_cfqq_idle_window(cfqq))
                cfq_slice_expired(cfqd, 0);
 
        return dispatched;
@@ -1099,10 +1113,11 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 static int
 cfq_forced_dispatch_cfqqs(struct list_head *list)
 {
-       int dispatched = 0;
        struct cfq_queue *cfqq, *next;
        struct cfq_rq *crq;
+       int dispatched;
 
+       dispatched = 0;
        list_for_each_entry_safe(cfqq, next, list, cfq_list) {
                while ((crq = cfqq->next_crq)) {
                        cfq_dispatch_insert(cfqq->cfqd->queue, crq);
@@ -1110,6 +1125,7 @@ cfq_forced_dispatch_cfqqs(struct list_head *list)
                }
                BUG_ON(!list_empty(&cfqq->fifo));
        }
+
        return dispatched;
 }
 
@@ -1136,7 +1152,8 @@ static int
 cfq_dispatch_requests(request_queue_t *q, int force)
 {
        struct cfq_data *cfqd = q->elevator->elevator_data;
-       struct cfq_queue *cfqq;
+       struct cfq_queue *cfqq, *prev_cfqq;
+       int dispatched;
 
        if (!cfqd->busy_queues)
                return 0;
@@ -1144,10 +1161,17 @@ cfq_dispatch_requests(request_queue_t *q, int force)
        if (unlikely(force))
                return cfq_forced_dispatch(cfqd);
 
-       cfqq = cfq_select_queue(cfqd);
-       if (cfqq) {
+       dispatched = 0;
+       prev_cfqq = NULL;
+       while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
                int max_dispatch;
 
+               /*
+                * Don't repeat dispatch from the previous queue.
+                */
+               if (prev_cfqq == cfqq)
+                       break;
+
                cfq_clear_cfqq_must_dispatch(cfqq);
                cfq_clear_cfqq_wait_request(cfqq);
                del_timer(&cfqd->idle_slice_timer);
@@ -1156,10 +1180,19 @@ cfq_dispatch_requests(request_queue_t *q, int force)
                if (cfq_class_idle(cfqq))
                        max_dispatch = 1;
 
-               return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
+               dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
+
+               /*
+                * If the dispatch cfqq has idling enabled and is still
+                * the active queue, break out.
+                */
+               if (cfq_cfqq_idle_window(cfqq) && cfqd->active_queue)
+                       break;
+
+               prev_cfqq = cfqq;
        }
 
-       return 0;
+       return dispatched;
 }
 
 /*
@@ -1284,7 +1317,7 @@ static void cfq_exit_io_context(struct io_context *ioc)
        /*
         * put the reference this task is holding to the various queues
         */
-       read_lock_irqsave(&cfq_exit_lock, flags);
+       spin_lock_irqsave(&cfq_exit_lock, flags);
 
        n = rb_first(&ioc->cic_root);
        while (n != NULL) {
@@ -1294,7 +1327,7 @@ static void cfq_exit_io_context(struct io_context *ioc)
                n = rb_next(n);
        }
 
-       read_unlock_irqrestore(&cfq_exit_lock, flags);
+       spin_unlock_irqrestore(&cfq_exit_lock, flags);
 }
 
 static struct cfq_io_context *
@@ -1303,17 +1336,11 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
        struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask);
 
        if (cic) {
-               RB_CLEAR(&cic->rb_node);
-               cic->key = NULL;
-               cic->cfqq[ASYNC] = NULL;
-               cic->cfqq[SYNC] = NULL;
+               memset(cic, 0, sizeof(*cic));
                cic->last_end_request = jiffies;
-               cic->ttime_total = 0;
-               cic->ttime_samples = 0;
-               cic->ttime_mean = 0;
+               INIT_LIST_HEAD(&cic->queue_list);
                cic->dtor = cfq_free_io_context;
                cic->exit = cfq_exit_io_context;
-               INIT_LIST_HEAD(&cic->queue_list);
                atomic_inc(&ioc_count);
        }
 
@@ -1371,25 +1398,28 @@ static inline void changed_ioprio(struct cfq_io_context *cic)
 {
        struct cfq_data *cfqd = cic->key;
        struct cfq_queue *cfqq;
-       if (cfqd) {
-               spin_lock(cfqd->queue->queue_lock);
-               cfqq = cic->cfqq[ASYNC];
-               if (cfqq) {
-                       struct cfq_queue *new_cfqq;
-                       new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC,
-                                               cic->ioc->task, GFP_ATOMIC);
-                       if (new_cfqq) {
-                               cic->cfqq[ASYNC] = new_cfqq;
-                               cfq_put_queue(cfqq);
-                       }
-               }
-               cfqq = cic->cfqq[SYNC];
-               if (cfqq) {
-                       cfq_mark_cfqq_prio_changed(cfqq);
-                       cfq_init_prio_data(cfqq);
+
+       if (unlikely(!cfqd))
+               return;
+
+       spin_lock(cfqd->queue->queue_lock);
+
+       cfqq = cic->cfqq[ASYNC];
+       if (cfqq) {
+               struct cfq_queue *new_cfqq;
+               new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC, cic->ioc->task,
+                                        GFP_ATOMIC);
+               if (new_cfqq) {
+                       cic->cfqq[ASYNC] = new_cfqq;
+                       cfq_put_queue(cfqq);
                }
-               spin_unlock(cfqd->queue->queue_lock);
        }
+
+       cfqq = cic->cfqq[SYNC];
+       if (cfqq)
+               cfq_mark_cfqq_prio_changed(cfqq);
+
+       spin_unlock(cfqd->queue->queue_lock);
 }
 
 /*
@@ -1400,17 +1430,17 @@ static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio)
        struct cfq_io_context *cic;
        struct rb_node *n;
 
-       write_lock(&cfq_exit_lock);
+       spin_lock(&cfq_exit_lock);
 
        n = rb_first(&ioc->cic_root);
        while (n != NULL) {
                cic = rb_entry(n, struct cfq_io_context, rb_node);
+
                changed_ioprio(cic);
                n = rb_next(n);
        }
 
-       write_unlock(&cfq_exit_lock);
+       spin_unlock(&cfq_exit_lock);
 
        return 0;
 }
@@ -1475,9 +1505,10 @@ out:
 static void
 cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
 {
-       read_lock(&cfq_exit_lock);
+       spin_lock(&cfq_exit_lock);
        rb_erase(&cic->rb_node, &ioc->cic_root);
-       read_unlock(&cfq_exit_lock);
+       list_del_init(&cic->queue_list);
+       spin_unlock(&cfq_exit_lock);
        kmem_cache_free(cfq_ioc_pool, cic);
        atomic_dec(&ioc_count);
 }
@@ -1545,11 +1576,11 @@ restart:
                        BUG();
        }
 
-       read_lock(&cfq_exit_lock);
+       spin_lock(&cfq_exit_lock);
        rb_link_node(&cic->rb_node, parent, p);
        rb_insert_color(&cic->rb_node, &ioc->cic_root);
        list_add(&cic->queue_list, &cfqd->cic_list);
-       read_unlock(&cfq_exit_lock);
+       spin_unlock(&cfq_exit_lock);
 }
 
 /*
@@ -1648,7 +1679,8 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 {
        int enable_idle = cfq_cfqq_idle_window(cfqq);
 
-       if (!cic->ioc->task || !cfqd->cfq_slice_idle)
+       if (!cic->ioc->task || !cfqd->cfq_slice_idle ||
+           (cfqd->hw_tag && CIC_SEEKY(cic)))
                enable_idle = 0;
        else if (sample_valid(cic->ttime_samples)) {
                if (cic->ttime_mean > cfqd->cfq_slice_idle)
@@ -1678,7 +1710,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
                return 0;
 
        if (!cfqq)
-               return 1;
+               return 0;
 
        if (cfq_class_idle(cfqq))
                return 1;
@@ -1710,7 +1742,7 @@ static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
                cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2;
 
        cfqq->slice_end = cfqq->slice_left + jiffies;
-       __cfq_slice_expired(cfqd, cfqq, 1);
+       cfq_slice_expired(cfqd, 1);
        __cfq_set_active_queue(cfqd, cfqq);
 }
 
@@ -1739,14 +1771,24 @@ cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 
        cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
 
+       cic = crq->io_context;
+
        /*
         * we never wait for an async request and we don't allow preemption
         * of an async request. so just return early
         */
-       if (!cfq_crq_is_sync(crq))
+       if (!cfq_crq_is_sync(crq)) {
+               /*
+                * sync process issued an async request, if it's waiting
+                * then expire it and kick rq handling.
+                */
+               if (cic == cfqd->active_cic &&
+                   del_timer(&cfqd->idle_slice_timer)) {
+                       cfq_slice_expired(cfqd, 0);
+                       cfq_start_queueing(cfqd, cfqq);
+               }
                return;
-
-       cic = crq->io_context;
+       }
 
        cfq_update_io_thinktime(cfqd, cic);
        cfq_update_io_seektime(cfqd, cic, crq);
@@ -1819,11 +1861,23 @@ static void cfq_completed_request(request_queue_t *q, struct request *rq)
                        cfqq->service_last = now;
                        cfq_resort_rr_list(cfqq, 0);
                }
-               cfq_schedule_dispatch(cfqd);
        }
 
-       if (cfq_crq_is_sync(crq))
+       if (sync)
                crq->io_context->last_end_request = now;
+
+       /*
+        * If this is the active queue, check if it needs to be expired,
+        * or if we want to idle in case it has no pending requests.
+        */
+       if (cfqd->active_queue == cfqq) {
+               if (time_after(now, cfqq->slice_end))
+                       cfq_slice_expired(cfqd, 0);
+               else if (sync && RB_EMPTY(&cfqq->sort_list)) {
+                       if (!cfq_arm_slice_timer(cfqd, cfqq))
+                               cfq_schedule_dispatch(cfqd);
+               }
+       }
 }
 
 static struct request *
@@ -1890,7 +1944,6 @@ static inline int
 __cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                struct task_struct *task, int rw)
 {
-#if 1
        if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
            !cfq_cfqq_must_alloc_slice(cfqq)) {
                cfq_mark_cfqq_must_alloc_slice(cfqq);
@@ -1898,39 +1951,6 @@ __cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq,
        }
 
        return ELV_MQUEUE_MAY;
-#else
-       if (!cfqq || task->flags & PF_MEMALLOC)
-               return ELV_MQUEUE_MAY;
-       if (!cfqq->allocated[rw] || cfq_cfqq_must_alloc(cfqq)) {
-               if (cfq_cfqq_wait_request(cfqq))
-                       return ELV_MQUEUE_MUST;
-
-               /*
-                * only allow 1 ELV_MQUEUE_MUST per slice, otherwise we
-                * can quickly flood the queue with writes from a single task
-                */
-               if (rw == READ || !cfq_cfqq_must_alloc_slice(cfqq)) {
-                       cfq_mark_cfqq_must_alloc_slice(cfqq);
-                       return ELV_MQUEUE_MUST;
-               }
-
-               return ELV_MQUEUE_MAY;
-       }
-       if (cfq_class_idle(cfqq))
-               return ELV_MQUEUE_NO;
-       if (cfqq->allocated[rw] >= cfqd->max_queued) {
-               struct io_context *ioc = get_io_context(GFP_ATOMIC);
-               int ret = ELV_MQUEUE_NO;
-
-               if (ioc && ioc->nr_batch_requests)
-                       ret = ELV_MQUEUE_MAY;
-
-               put_io_context(ioc);
-               return ret;
-       }
-
-       return ELV_MQUEUE_MAY;
-#endif
 }
 
 static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio)
@@ -1959,16 +1979,13 @@ static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio)
 static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq)
 {
        struct cfq_data *cfqd = q->elevator->elevator_data;
-       struct request_list *rl = &q->rq;
 
-       if (cfqq->allocated[READ] <= cfqd->max_queued || cfqd->rq_starved) {
+       if (unlikely(cfqd->rq_starved)) {
+               struct request_list *rl = &q->rq;
+
                smp_mb();
                if (waitqueue_active(&rl->wait[READ]))
                        wake_up(&rl->wait[READ]);
-       }
-
-       if (cfqq->allocated[WRITE] <= cfqd->max_queued || cfqd->rq_starved) {
-               smp_mb();
                if (waitqueue_active(&rl->wait[WRITE]))
                        wake_up(&rl->wait[WRITE]);
        }
@@ -2128,11 +2145,8 @@ static void cfq_idle_slice_timer(unsigned long data)
                 * only expire and reinvoke request handler, if there are
                 * other queues with pending requests
                 */
-               if (!cfqd->busy_queues) {
-                       cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end);
-                       add_timer(&cfqd->idle_slice_timer);
+               if (!cfqd->busy_queues)
                        goto out_cont;
-               }
 
                /*
                 * not expired and it has a request pending, let it dispatch
@@ -2164,10 +2178,9 @@ static void cfq_idle_class_timer(unsigned long data)
         * race with a non-idle queue, reset timer
         */
        end = cfqd->last_end_request + CFQ_IDLE_GRACE;
-       if (!time_after_eq(jiffies, end)) {
-               cfqd->idle_class_timer.expires = end;
-               add_timer(&cfqd->idle_class_timer);
-       } else
+       if (!time_after_eq(jiffies, end))
+               mod_timer(&cfqd->idle_class_timer, end);
+       else
                cfq_schedule_dispatch(cfqd);
 
        spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
@@ -2187,7 +2200,7 @@ static void cfq_exit_queue(elevator_t *e)
 
        cfq_shutdown_timer_wq(cfqd);
 
-       write_lock(&cfq_exit_lock);
+       spin_lock(&cfq_exit_lock);
        spin_lock_irq(q->queue_lock);
 
        if (cfqd->active_queue)
@@ -2210,7 +2223,7 @@ static void cfq_exit_queue(elevator_t *e)
        }
 
        spin_unlock_irq(q->queue_lock);
-       write_unlock(&cfq_exit_lock);
+       spin_unlock(&cfq_exit_lock);
 
        cfq_shutdown_timer_wq(cfqd);
 
@@ -2220,14 +2233,14 @@ static void cfq_exit_queue(elevator_t *e)
        kfree(cfqd);
 }
 
-static int cfq_init_queue(request_queue_t *q, elevator_t *e)
+static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
 {
        struct cfq_data *cfqd;
        int i;
 
        cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL);
        if (!cfqd)
-               return -ENOMEM;
+               return NULL;
 
        memset(cfqd, 0, sizeof(*cfqd));
 
@@ -2257,13 +2270,8 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
        for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
                INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
 
-       e->elevator_data = cfqd;
-
        cfqd->queue = q;
 
-       cfqd->max_queued = q->nr_requests / 4;
-       q->nr_batching = cfq_queued;
-
        init_timer(&cfqd->idle_slice_timer);
        cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
        cfqd->idle_slice_timer.data = (unsigned long) cfqd;
@@ -2285,14 +2293,14 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
        cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
        cfqd->cfq_slice_idle = cfq_slice_idle;
 
-       return 0;
+       return cfqd;
 out_crqpool:
        kfree(cfqd->cfq_hash);
 out_cfqhash:
        kfree(cfqd->crq_hash);
 out_crqhash:
        kfree(cfqd);
-       return -ENOMEM;
+       return NULL;
 }
 
 static void cfq_slab_kill(void)