static const int cfq_slice_sync = HZ / 10;
static int cfq_slice_async = HZ / 25;
static const int cfq_slice_async_rq = 2;
-static int cfq_slice_idle = HZ / 100;
+static int cfq_slice_idle = HZ / 70;
#define CFQ_IDLE_GRACE (HZ / 10)
#define CFQ_SLICE_SCALE (5)
#define CFQ_KEY_ASYNC (0)
-#define CFQ_KEY_ANY (0xffff)
-/*
- * disable queueing at the driver/hardware level
- */
-static const int cfq_max_depth = 2;
-
-static DEFINE_RWLOCK(cfq_exit_lock);
+static DEFINE_SPINLOCK(cfq_exit_lock);
/*
* for the hash of cfqq inside the cfqd
#define cfq_cfqq_sync(cfqq) \
(cfq_cfqq_class_sync(cfqq) || (cfqq)->on_dispatch[SYNC])
+#define sample_valid(samples) ((samples) > 80)
+
/*
* Per block device queue structure
*/
mempool_t *crq_pool;
int rq_in_driver;
+ int hw_tag;
/*
* schedule slice state info
unsigned int cfq_slice[2];
unsigned int cfq_slice_async_rq;
unsigned int cfq_slice_idle;
- unsigned int cfq_max_depth;
struct list_head cic_list;
};
return !cfqd->busy_queues;
}
+static inline pid_t cfq_queue_pid(struct task_struct *task, int rw)
+{
+ if (rw == READ || process_sync(task))
+ return task->pid;
+
+ return CFQ_KEY_ASYNC;
+}
+
/*
* Lifted from AS - choose which of crq1 and crq2 that is best served now.
* We choose the request that is closest to the head right now. Distance
- * behind the head are penalized and only allowed to a certain extent.
+ * behind the head is penalized and only allowed to a certain extent.
*/
static struct cfq_rq *
cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2)
{
sector_t last, s1, s2, d1 = 0, d2 = 0;
- int r1_wrap = 0, r2_wrap = 0; /* requests are behind the disk head */
unsigned long back_max;
+#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
+#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
+ unsigned wrap = 0; /* bit mask: requests behind the disk head? */
if (crq1 == NULL || crq1 == crq2)
return crq2;
else if (s1 + back_max >= last)
d1 = (last - s1) * cfqd->cfq_back_penalty;
else
- r1_wrap = 1;
+ wrap |= CFQ_RQ1_WRAP;
if (s2 >= last)
d2 = s2 - last;
else if (s2 + back_max >= last)
d2 = (last - s2) * cfqd->cfq_back_penalty;
else
- r2_wrap = 1;
+ wrap |= CFQ_RQ2_WRAP;
/* Found required data */
- if (!r1_wrap && r2_wrap)
- return crq1;
- else if (!r2_wrap && r1_wrap)
- return crq2;
- else if (r1_wrap && r2_wrap) {
- /* both behind the head */
- if (s1 <= s2)
+
+ /*
+ * By doing switch() on the bit mask "wrap" we avoid having to
+ * check two variables for all permutations: --> faster!
+ */
+ switch (wrap) {
+ case 0: /* common case for CFQ: crq1 and crq2 not wrapped */
+ if (d1 < d2)
return crq1;
- else
+ else if (d2 < d1)
return crq2;
- }
+ else {
+ if (s1 >= s2)
+ return crq1;
+ else
+ return crq2;
+ }
- /* Both requests in front of the head */
- if (d1 < d2)
+ case CFQ_RQ2_WRAP:
return crq1;
- else if (d2 < d1)
+ case CFQ_RQ1_WRAP:
return crq2;
- else {
- if (s1 >= s2)
+ case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both crqs wrapped */
+ default:
+ /*
+ * Since both rqs are wrapped,
+ * start with the one that's further behind head
+ * (--> only *one* back seek required),
+ * since back seek takes more time than forward.
+ */
+ if (s1 <= s2)
return crq1;
else
return crq2;
/*
* if queue was preempted, just add to front to be fair. busy_rr
- * isn't sorted.
+ * isn't sorted, but insert at the back for fairness.
*/
if (preempted || list == &cfqd->busy_rr) {
- list_add(&cfqq->cfq_list, list);
+ if (preempted)
+ list = list->prev;
+
+ list_add_tail(&cfqq->cfq_list, list);
return;
}
cfq_add_crq_rb(crq);
}
-static struct request *cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector)
-
+static struct request *
+cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
{
- struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, current->pid, CFQ_KEY_ANY);
+ struct task_struct *tsk = current;
+ pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio));
+ struct cfq_queue *cfqq;
struct rb_node *n;
+ sector_t sector;
+ cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
if (!cfqq)
goto out;
+ sector = bio->bi_sector + bio_sectors(bio);
n = cfqq->sort_list.rb_node;
while (n) {
struct cfq_rq *crq = rb_entry_crq(n);
struct cfq_data *cfqd = q->elevator->elevator_data;
cfqd->rq_in_driver++;
+
+ /*
+ * If the depth is larger 1, it really could be queueing. But lets
+ * make the mark a little higher - idling could still be good for
+ * low queueing, and a low queueing number could also just indicate
+ * a SCSI mid layer like behaviour where limit+1 is often seen.
+ */
+ if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
+ cfqd->hw_tag = 1;
}
static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
goto out;
}
- __rq = cfq_find_rq_rb(cfqd, bio->bi_sector + bio_sectors(bio));
+ __rq = cfq_find_rq_fmerge(cfqd, bio);
if (__rq && elv_rq_merge_ok(__rq, bio)) {
ret = ELEVATOR_FRONT_MERGE;
goto out;
if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1)
cfqq = list_entry_cfqq(cfqd->cur_rr.next);
+ /*
+ * If no new queues are available, check if the busy list has some
+ * before falling back to idle io.
+ */
+ if (!cfqq && !list_empty(&cfqd->busy_rr))
+ cfqq = list_entry_cfqq(cfqd->busy_rr.next);
+
/*
* if we have idle queues and no rt or be queues had pending
* requests, either allow immediate service if the grace period
static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
+ struct cfq_io_context *cic;
unsigned long sl;
WARN_ON(!RB_EMPTY(&cfqq->sort_list));
/*
* task has exited, don't wait
*/
- if (cfqd->active_cic && !cfqd->active_cic->ioc->task)
+ cic = cfqd->active_cic;
+ if (!cic || !cic->ioc->task)
return 0;
cfq_mark_cfqq_must_dispatch(cfqq);
cfq_mark_cfqq_wait_request(cfqq);
sl = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle);
+
+ /*
+ * we don't want to idle for seeks, but we do want to allow
+ * fair distribution of slice time for a process doing back-to-back
+ * seeks. so allow a little bit of time for him to submit a new rq
+ */
+ if (sample_valid(cic->seek_samples) && cic->seek_mean > 131072)
+ sl = 2;
+
mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
return 1;
}
if (cfqq) {
int max_dispatch;
- /*
- * if idle window is disabled, allow queue buildup
- */
- if (!cfq_cfqq_idle_window(cfqq) &&
- cfqd->rq_in_driver >= cfqd->cfq_max_depth)
- return 0;
-
cfq_clear_cfqq_must_dispatch(cfqq);
cfq_clear_cfqq_wait_request(cfqq);
del_timer(&cfqd->idle_slice_timer);
const int hashval)
{
struct hlist_head *hash_list = &cfqd->cfq_hash[hashval];
- struct hlist_node *entry, *next;
+ struct hlist_node *entry;
+ struct cfq_queue *__cfqq;
- hlist_for_each_safe(entry, next, hash_list) {
- struct cfq_queue *__cfqq = list_entry_qhash(entry);
+ hlist_for_each_entry(__cfqq, entry, hash_list, cfq_hash) {
const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->org_ioprio_class, __cfqq->org_ioprio);
- if (__cfqq->key == key && (__p == prio || prio == CFQ_KEY_ANY))
+ if (__cfqq->key == key && (__p == prio || !prio))
return __cfqq;
}
return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT));
}
-static void cfq_free_io_context(struct cfq_io_context *cic)
+static void cfq_free_io_context(struct io_context *ioc)
{
struct cfq_io_context *__cic;
- struct list_head *entry, *next;
- int freed = 1;
+ struct rb_node *n;
+ int freed = 0;
- list_for_each_safe(entry, next, &cic->list) {
- __cic = list_entry(entry, struct cfq_io_context, list);
+ while ((n = rb_first(&ioc->cic_root)) != NULL) {
+ __cic = rb_entry(n, struct cfq_io_context, rb_node);
+ rb_erase(&__cic->rb_node, &ioc->cic_root);
kmem_cache_free(cfq_ioc_pool, __cic);
freed++;
}
- kmem_cache_free(cfq_ioc_pool, cic);
if (atomic_sub_and_test(freed, &ioc_count) && ioc_gone)
complete(ioc_gone);
}
static void cfq_trim(struct io_context *ioc)
{
ioc->set_ioprio = NULL;
- if (ioc->cic)
- cfq_free_io_context(ioc->cic);
+ cfq_free_io_context(ioc);
}
/*
spin_unlock(q->queue_lock);
}
-static void cfq_exit_io_context(struct cfq_io_context *cic)
+static void cfq_exit_io_context(struct io_context *ioc)
{
struct cfq_io_context *__cic;
- struct list_head *entry;
unsigned long flags;
-
- local_irq_save(flags);
+ struct rb_node *n;
/*
* put the reference this task is holding to the various queues
*/
- read_lock(&cfq_exit_lock);
- list_for_each(entry, &cic->list) {
- __cic = list_entry(entry, struct cfq_io_context, list);
+ spin_lock_irqsave(&cfq_exit_lock, flags);
+
+ n = rb_first(&ioc->cic_root);
+ while (n != NULL) {
+ __cic = rb_entry(n, struct cfq_io_context, rb_node);
+
cfq_exit_single_io_context(__cic);
+ n = rb_next(n);
}
- cfq_exit_single_io_context(cic);
- read_unlock(&cfq_exit_lock);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&cfq_exit_lock, flags);
}
static struct cfq_io_context *
struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask);
if (cic) {
- INIT_LIST_HEAD(&cic->list);
- cic->cfqq[ASYNC] = NULL;
- cic->cfqq[SYNC] = NULL;
- cic->key = NULL;
+ memset(cic, 0, sizeof(*cic));
+ RB_CLEAR_COLOR(&cic->rb_node);
cic->last_end_request = jiffies;
- cic->ttime_total = 0;
- cic->ttime_samples = 0;
- cic->ttime_mean = 0;
+ INIT_LIST_HEAD(&cic->queue_list);
cic->dtor = cfq_free_io_context;
cic->exit = cfq_exit_io_context;
- INIT_LIST_HEAD(&cic->queue_list);
atomic_inc(&ioc_count);
}
static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio)
{
struct cfq_io_context *cic;
+ struct rb_node *n;
- write_lock(&cfq_exit_lock);
-
- cic = ioc->cic;
+ spin_lock(&cfq_exit_lock);
- changed_ioprio(cic);
+ n = rb_first(&ioc->cic_root);
+ while (n != NULL) {
+ cic = rb_entry(n, struct cfq_io_context, rb_node);
- list_for_each_entry(cic, &cic->list, list)
changed_ioprio(cic);
+ n = rb_next(n);
+ }
- write_unlock(&cfq_exit_lock);
+ spin_unlock(&cfq_exit_lock);
return 0;
}
* set ->slice_left to allow preemption for a new process
*/
cfqq->slice_left = 2 * cfqd->cfq_slice_idle;
- cfq_mark_cfqq_idle_window(cfqq);
+ if (!cfqd->hw_tag)
+ cfq_mark_cfqq_idle_window(cfqq);
cfq_mark_cfqq_prio_changed(cfqq);
cfq_init_prio_data(cfqq);
}
return cfqq;
}
+static void
+cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
+{
+ spin_lock(&cfq_exit_lock);
+ rb_erase(&cic->rb_node, &ioc->cic_root);
+ list_del_init(&cic->queue_list);
+ spin_unlock(&cfq_exit_lock);
+ kmem_cache_free(cfq_ioc_pool, cic);
+ atomic_dec(&ioc_count);
+}
+
+static struct cfq_io_context *
+cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc)
+{
+ struct rb_node *n;
+ struct cfq_io_context *cic;
+ void *k, *key = cfqd;
+
+restart:
+ n = ioc->cic_root.rb_node;
+ while (n) {
+ cic = rb_entry(n, struct cfq_io_context, rb_node);
+ /* ->key must be copied to avoid race with cfq_exit_queue() */
+ k = cic->key;
+ if (unlikely(!k)) {
+ cfq_drop_dead_cic(ioc, cic);
+ goto restart;
+ }
+
+ if (key < k)
+ n = n->rb_left;
+ else if (key > k)
+ n = n->rb_right;
+ else
+ return cic;
+ }
+
+ return NULL;
+}
+
+static inline void
+cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
+ struct cfq_io_context *cic)
+{
+ struct rb_node **p;
+ struct rb_node *parent;
+ struct cfq_io_context *__cic;
+ void *k;
+
+ cic->ioc = ioc;
+ cic->key = cfqd;
+
+ ioc->set_ioprio = cfq_ioc_set_ioprio;
+restart:
+ parent = NULL;
+ p = &ioc->cic_root.rb_node;
+ while (*p) {
+ parent = *p;
+ __cic = rb_entry(parent, struct cfq_io_context, rb_node);
+ /* ->key must be copied to avoid race with cfq_exit_queue() */
+ k = __cic->key;
+ if (unlikely(!k)) {
+ cfq_drop_dead_cic(ioc, cic);
+ goto restart;
+ }
+
+ if (cic->key < k)
+ p = &(*p)->rb_left;
+ else if (cic->key > k)
+ p = &(*p)->rb_right;
+ else
+ BUG();
+ }
+
+ spin_lock(&cfq_exit_lock);
+ rb_link_node(&cic->rb_node, parent, p);
+ rb_insert_color(&cic->rb_node, &ioc->cic_root);
+ list_add(&cic->queue_list, &cfqd->cic_list);
+ spin_unlock(&cfq_exit_lock);
+}
+
/*
* Setup general io context and cfq io context. There can be several cfq
* io contexts per general io context, if this process is doing io to more
- * than one device managed by cfq. Note that caller is holding a reference to
- * cfqq, so we don't need to worry about it disappearing
+ * than one device managed by cfq.
*/
static struct cfq_io_context *
-cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask)
+cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
{
struct io_context *ioc = NULL;
struct cfq_io_context *cic;
if (!ioc)
return NULL;
-restart:
- if ((cic = ioc->cic) == NULL) {
- cic = cfq_alloc_io_context(cfqd, gfp_mask);
-
- if (cic == NULL)
- goto err;
-
- /*
- * manually increment generic io_context usage count, it
- * cannot go away since we are already holding one ref to it
- */
- cic->ioc = ioc;
- cic->key = cfqd;
- read_lock(&cfq_exit_lock);
- ioc->set_ioprio = cfq_ioc_set_ioprio;
- ioc->cic = cic;
- list_add(&cic->queue_list, &cfqd->cic_list);
- read_unlock(&cfq_exit_lock);
- } else {
- struct cfq_io_context *__cic;
-
- /*
- * the first cic on the list is actually the head itself
- */
- if (cic->key == cfqd)
- goto out;
-
- if (unlikely(!cic->key)) {
- read_lock(&cfq_exit_lock);
- if (list_empty(&cic->list))
- ioc->cic = NULL;
- else
- ioc->cic = list_entry(cic->list.next,
- struct cfq_io_context,
- list);
- read_unlock(&cfq_exit_lock);
- kmem_cache_free(cfq_ioc_pool, cic);
- atomic_dec(&ioc_count);
- goto restart;
- }
-
- /*
- * cic exists, check if we already are there. linear search
- * should be ok here, the list will usually not be more than
- * 1 or a few entries long
- */
- list_for_each_entry(__cic, &cic->list, list) {
- /*
- * this process is already holding a reference to
- * this queue, so no need to get one more
- */
- if (__cic->key == cfqd) {
- cic = __cic;
- goto out;
- }
- if (unlikely(!__cic->key)) {
- read_lock(&cfq_exit_lock);
- list_del(&__cic->list);
- read_unlock(&cfq_exit_lock);
- kmem_cache_free(cfq_ioc_pool, __cic);
- atomic_dec(&ioc_count);
- goto restart;
- }
- }
+ cic = cfq_cic_rb_lookup(cfqd, ioc);
+ if (cic)
+ goto out;
- /*
- * nope, process doesn't have a cic assoicated with this
- * cfqq yet. get a new one and add to list
- */
- __cic = cfq_alloc_io_context(cfqd, gfp_mask);
- if (__cic == NULL)
- goto err;
-
- __cic->ioc = ioc;
- __cic->key = cfqd;
- read_lock(&cfq_exit_lock);
- list_add(&__cic->list, &cic->list);
- list_add(&__cic->queue_list, &cfqd->cic_list);
- read_unlock(&cfq_exit_lock);
- cic = __cic;
- }
+ cic = cfq_alloc_io_context(cfqd, gfp_mask);
+ if (cic == NULL)
+ goto err;
+ cfq_cic_link(cfqd, ioc, cic);
out:
return cic;
err:
cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
}
-#define sample_valid(samples) ((samples) > 80)
+static void
+cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
+ struct cfq_rq *crq)
+{
+ sector_t sdist;
+ u64 total;
+
+ if (cic->last_request_pos < crq->request->sector)
+ sdist = crq->request->sector - cic->last_request_pos;
+ else
+ sdist = cic->last_request_pos - crq->request->sector;
+
+ /*
+ * Don't allow the seek distance to get too large from the
+ * odd fragment, pagein, etc
+ */
+ if (cic->seek_samples <= 60) /* second&third seek */
+ sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024);
+ else
+ sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64);
+
+ cic->seek_samples = (7*cic->seek_samples + 256) / 8;
+ cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
+ total = cic->seek_total + (cic->seek_samples/2);
+ do_div(total, cic->seek_samples);
+ cic->seek_mean = (sector_t)total;
+}
/*
* Disable idle window if the process thinks too long or seeks so much that
{
int enable_idle = cfq_cfqq_idle_window(cfqq);
- if (!cic->ioc->task || !cfqd->cfq_slice_idle)
+ if (!cic->ioc->task || !cfqd->cfq_slice_idle || cfqd->hw_tag)
enable_idle = 0;
else if (sample_valid(cic->ttime_samples)) {
if (cic->ttime_mean > cfqd->cfq_slice_idle)
cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
+ cic = crq->io_context;
+
/*
* we never wait for an async request and we don't allow preemption
* of an async request. so just return early
*/
- if (!cfq_crq_is_sync(crq))
+ if (!cfq_crq_is_sync(crq)) {
+ /*
+ * sync process issued an async request, if it's waiting
+ * then expire it and kick rq handling.
+ */
+ if (cic == cfqd->active_cic &&
+ del_timer(&cfqd->idle_slice_timer)) {
+ cfq_slice_expired(cfqd, 0);
+ cfq_start_queueing(cfqd, cfqq);
+ }
return;
-
- cic = crq->io_context;
+ }
cfq_update_io_thinktime(cfqd, cic);
+ cfq_update_io_seektime(cfqd, cic, crq);
cfq_update_idle_window(cfqd, cfqq, cic);
cic->last_queue = jiffies;
+ cic->last_request_pos = crq->request->sector + crq->request->nr_sectors;
if (cfqq == cfqd->active_queue) {
/*
cfq_resort_rr_list(cfqq, 0);
}
-static inline pid_t cfq_queue_pid(struct task_struct *task, int rw)
-{
- if (rw == READ || process_sync(task))
- return task->pid;
-
- return CFQ_KEY_ASYNC;
-}
-
static inline int
__cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct task_struct *task, int rw)
might_sleep_if(gfp_mask & __GFP_WAIT);
- cic = cfq_get_io_context(cfqd, key, gfp_mask);
+ cic = cfq_get_io_context(cfqd, gfp_mask);
spin_lock_irqsave(q->queue_lock, flags);
* race with a non-idle queue, reset timer
*/
end = cfqd->last_end_request + CFQ_IDLE_GRACE;
- if (!time_after_eq(jiffies, end)) {
- cfqd->idle_class_timer.expires = end;
- add_timer(&cfqd->idle_class_timer);
- } else
+ if (!time_after_eq(jiffies, end))
+ mod_timer(&cfqd->idle_class_timer, end);
+ else
cfq_schedule_dispatch(cfqd);
spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
request_queue_t *q = cfqd->queue;
cfq_shutdown_timer_wq(cfqd);
- write_lock(&cfq_exit_lock);
+
+ spin_lock(&cfq_exit_lock);
spin_lock_irq(q->queue_lock);
+
if (cfqd->active_queue)
__cfq_slice_expired(cfqd, cfqd->active_queue, 0);
- while(!list_empty(&cfqd->cic_list)) {
+
+ while (!list_empty(&cfqd->cic_list)) {
struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
struct cfq_io_context,
queue_list);
cic->key = NULL;
list_del_init(&cic->queue_list);
}
+
spin_unlock_irq(q->queue_lock);
- write_unlock(&cfq_exit_lock);
+ spin_unlock(&cfq_exit_lock);
cfq_shutdown_timer_wq(cfqd);
kfree(cfqd);
}
-static int cfq_init_queue(request_queue_t *q, elevator_t *e)
+static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
{
struct cfq_data *cfqd;
int i;
cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL);
if (!cfqd)
- return -ENOMEM;
+ return NULL;
memset(cfqd, 0, sizeof(*cfqd));
if (!cfqd->cfq_hash)
goto out_cfqhash;
- cfqd->crq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, crq_pool);
+ cfqd->crq_pool = mempool_create_slab_pool(BLKDEV_MIN_RQ, crq_pool);
if (!cfqd->crq_pool)
goto out_crqpool;
for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
- e->elevator_data = cfqd;
-
cfqd->queue = q;
cfqd->max_queued = q->nr_requests / 4;
cfqd->cfq_slice[1] = cfq_slice_sync;
cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
cfqd->cfq_slice_idle = cfq_slice_idle;
- cfqd->cfq_max_depth = cfq_max_depth;
- return 0;
+ return cfqd;
out_crqpool:
kfree(cfqd->cfq_hash);
out_cfqhash:
kfree(cfqd->crq_hash);
out_crqhash:
kfree(cfqd);
- return -ENOMEM;
+ return NULL;
}
static void cfq_slab_kill(void)
SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
-SHOW_FUNCTION(cfq_max_depth_show, cfqd->cfq_max_depth, 0);
#undef SHOW_FUNCTION
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0);
-STORE_FUNCTION(cfq_max_depth_store, &cfqd->cfq_max_depth, 1, UINT_MAX, 0);
#undef STORE_FUNCTION
#define CFQ_ATTR(name) \
CFQ_ATTR(slice_async),
CFQ_ATTR(slice_async_rq),
CFQ_ATTR(slice_idle),
- CFQ_ATTR(max_depth),
__ATTR_NULL
};
DECLARE_COMPLETION(all_gone);
elv_unregister(&iosched_cfq);
ioc_gone = &all_gone;
- barrier();
+ /* ioc_gone's update must be visible before reading ioc_count */
+ smp_wmb();
if (atomic_read(&ioc_count))
- complete(ioc_gone);
+ wait_for_completion(ioc_gone);
synchronize_rcu();
cfq_slab_kill();
}