* Based on ideas from a previously unfinished io
* scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
*
- * Copyright (C) 2003 Jens Axboe <axboe@suse.de>
+ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
*/
#include <linux/module.h>
#include <linux/blkdev.h>
* tunables
*/
static const int cfq_quantum = 4; /* max queue in one round of service */
-static const int cfq_queued = 8; /* minimum rq allocate limit per-queue*/
static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
static const int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */
static const int cfq_back_penalty = 2; /* penalty of a backwards seek */
#define CFQ_KEY_ASYNC (0)
-static DEFINE_SPINLOCK(cfq_exit_lock);
-
/*
* for the hash of cfqq inside the cfqd
*/
static kmem_cache_t *cfq_pool;
static kmem_cache_t *cfq_ioc_pool;
-static atomic_t ioc_count = ATOMIC_INIT(0);
+static DEFINE_PER_CPU(unsigned long, ioc_count);
static struct completion *ioc_gone;
#define CFQ_PRIO_LISTS IOPRIO_BE_NR
#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
-#define cfq_class_be(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_BE)
#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
#define ASYNC (0)
struct list_head idle_rr;
unsigned int busy_queues;
- /*
- * non-ordered list of empty cfqq's
- */
- struct list_head empty_list;
-
/*
* cfqq lookup hash
*/
int rq_in_driver;
int hw_tag;
- /*
- * schedule slice state info
- */
/*
* idle window management
*/
sector_t last_sector;
unsigned long last_end_request;
- unsigned int rq_starved;
-
/*
* tunables, see top of file
*/
unsigned int cfq_quantum;
- unsigned int cfq_queued;
unsigned int cfq_fifo_expire[2];
unsigned int cfq_back_penalty;
unsigned int cfq_back_max;
struct hlist_node cfq_hash;
/* hash key */
unsigned int key;
- /* on either rr or empty list of cfqd */
+ /* member of the rr/busy/cur/idle cfqd list */
struct list_head cfq_list;
/* sorted list of pending requests */
struct rb_root sort_list;
int queued[2];
/* currently allocated requests */
int allocated[2];
+ /* pending metadata requests */
+ int meta_pending;
/* fifo list of requests in sort_list */
struct list_head fifo;
unsigned long slice_start;
unsigned long slice_end;
unsigned long slice_left;
- unsigned long service_last;
/* number of requests that are on the dispatch list */
int on_dispatch[2];
CFQ_CFQQ_FLAG_fifo_expire,
CFQ_CFQQ_FLAG_idle_window,
CFQ_CFQQ_FLAG_prio_changed,
+ CFQ_CFQQ_FLAG_queue_new,
};
#define CFQ_CFQQ_FNS(name) \
CFQ_CFQQ_FNS(fifo_expire);
CFQ_CFQQ_FNS(idle_window);
CFQ_CFQQ_FNS(prio_changed);
+CFQ_CFQQ_FNS(queue_new);
#undef CFQ_CFQQ_FNS
static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
return rq1;
else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
return rq2;
+ if (rq_is_meta(rq1) && !rq_is_meta(rq2))
+ return rq1;
+ else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
+ return rq2;
s1 = rq1->sector;
s2 = rq2->sector;
static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
{
struct cfq_data *cfqd = cfqq->cfqd;
- struct list_head *list, *entry;
+ struct list_head *list;
BUG_ON(!cfq_cfqq_on_rr(cfqq));
}
/*
- * if queue was preempted, just add to front to be fair. busy_rr
- * isn't sorted, but insert at the back for fairness.
+ * If this queue was preempted or is new (never been serviced), let
+ * it be added first for fairness but beind other new queues.
+ * Otherwise, just add to the back of the list.
*/
- if (preempted || list == &cfqd->busy_rr) {
- if (preempted)
- list = list->prev;
+ if (preempted || cfq_cfqq_queue_new(cfqq)) {
+ struct list_head *n = list;
+ struct cfq_queue *__cfqq;
- list_add_tail(&cfqq->cfq_list, list);
- return;
- }
+ while (n->next != list) {
+ __cfqq = list_entry_cfqq(n->next);
+ if (!cfq_cfqq_queue_new(__cfqq))
+ break;
- /*
- * sort by when queue was last serviced
- */
- entry = list;
- while ((entry = entry->prev) != list) {
- struct cfq_queue *__cfqq = list_entry_cfqq(entry);
+ n = n->next;
+ }
- if (!__cfqq->service_last)
- break;
- if (time_before(__cfqq->service_last, cfqq->service_last))
- break;
+ list = n;
}
- list_add(&cfqq->cfq_list, entry);
+ list_add_tail(&cfqq->cfq_list, list);
}
/*
{
BUG_ON(!cfq_cfqq_on_rr(cfqq));
cfq_clear_cfqq_on_rr(cfqq);
- list_move(&cfqq->cfq_list, &cfqd->empty_list);
+ list_del_init(&cfqq->cfq_list);
BUG_ON(!cfqd->busy_queues);
cfqd->busy_queues--;
{
struct task_struct *tsk = current;
pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio));
- sector_t sector = bio->bi_sector + bio_sectors(bio);
struct cfq_queue *cfqq;
cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
- if (cfqq)
+ if (cfqq) {
+ sector_t sector = bio->bi_sector + bio_sectors(bio);
+
return elv_rb_find(&cfqq->sort_list, sector);
+ }
return NULL;
}
list_del_init(&rq->queuelist);
cfq_del_rq_rb(rq);
+
+ if (rq_is_meta(rq)) {
+ WARN_ON(!cfqq->meta_pending);
+ cfqq->meta_pending--;
+ }
}
static int
if (cfq_cfqq_wait_request(cfqq))
del_timer(&cfqd->idle_slice_timer);
- if (!preempted && !cfq_cfqq_dispatched(cfqq)) {
- cfqq->service_last = now;
+ if (!preempted && !cfq_cfqq_dispatched(cfqq))
cfq_schedule_dispatch(cfqd);
- }
cfq_clear_cfqq_must_dispatch(cfqq);
cfq_clear_cfqq_wait_request(cfqq);
+ cfq_clear_cfqq_queue_new(cfqq);
/*
* store what was left of this slice, if the queue idled out
{
struct cfq_queue *cfqq = NULL;
- /*
- * if current list is non-empty, grab first entry. if it is empty,
- * get next prio level and grab first entry then if any are spliced
- */
- if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1)
+ if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1) {
+ /*
+ * if current list is non-empty, grab first entry. if it is
+ * empty, get next prio level and grab first entry then if any
+ * are spliced
+ */
cfqq = list_entry_cfqq(cfqd->cur_rr.next);
-
- /*
- * If no new queues are available, check if the busy list has some
- * before falling back to idle io.
- */
- if (!cfqq && !list_empty(&cfqd->busy_rr))
+ } else if (!list_empty(&cfqd->busy_rr)) {
+ /*
+ * If no new queues are available, check if the busy list has
+ * some before falling back to idle io.
+ */
cfqq = list_entry_cfqq(cfqd->busy_rr.next);
-
- /*
- * if we have idle queues and no rt or be queues had pending
- * requests, either allow immediate service if the grace period
- * has passed or arm the idle grace timer
- */
- if (!cfqq && !list_empty(&cfqd->idle_rr)) {
+ } else if (!list_empty(&cfqd->idle_rr)) {
+ /*
+ * if we have idle queues and no rt or be queues had pending
+ * requests, either allow immediate service if the grace period
+ * has passed or arm the idle grace timer
+ */
unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE;
if (time_after_eq(jiffies, end))
{
struct cfq_data *cfqd = cfqq->cfqd;
struct request *rq;
+ int fifo;
if (cfq_cfqq_fifo_expire(cfqq))
return NULL;
+ if (list_empty(&cfqq->fifo))
+ return NULL;
- if (!list_empty(&cfqq->fifo)) {
- int fifo = cfq_cfqq_class_sync(cfqq);
+ fifo = cfq_cfqq_class_sync(cfqq);
+ rq = rq_entry_fifo(cfqq->fifo.next);
- rq = rq_entry_fifo(cfqq->fifo.next);
- if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) {
- cfq_mark_cfqq_fifo_expire(cfqq);
- return rq;
- }
+ if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) {
+ cfq_mark_cfqq_fifo_expire(cfqq);
+ return rq;
}
return NULL;
kmem_cache_free(cfq_pool, cfqq);
}
-static inline struct cfq_queue *
+static struct cfq_queue *
__cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio,
const int hashval)
{
freed++;
}
- if (atomic_sub_and_test(freed, &ioc_count) && ioc_gone)
+ elv_ioc_count_mod(ioc_count, -freed);
+
+ if (ioc_gone && !elv_ioc_count_read(ioc_count))
complete(ioc_gone);
}
-static void cfq_trim(struct io_context *ioc)
+static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
- ioc->set_ioprio = NULL;
- cfq_free_io_context(ioc);
+ if (unlikely(cfqq == cfqd->active_queue))
+ __cfq_slice_expired(cfqd, cfqq, 0);
+
+ cfq_put_queue(cfqq);
}
-/*
- * Called with interrupts disabled
- */
-static void cfq_exit_single_io_context(struct cfq_io_context *cic)
+static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
+ struct cfq_io_context *cic)
{
- struct cfq_data *cfqd = cic->key;
- request_queue_t *q;
-
- if (!cfqd)
- return;
-
- q = cfqd->queue;
-
- WARN_ON(!irqs_disabled());
-
- spin_lock(q->queue_lock);
+ list_del_init(&cic->queue_list);
+ smp_wmb();
+ cic->key = NULL;
if (cic->cfqq[ASYNC]) {
- if (unlikely(cic->cfqq[ASYNC] == cfqd->active_queue))
- __cfq_slice_expired(cfqd, cic->cfqq[ASYNC], 0);
- cfq_put_queue(cic->cfqq[ASYNC]);
+ cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]);
cic->cfqq[ASYNC] = NULL;
}
if (cic->cfqq[SYNC]) {
- if (unlikely(cic->cfqq[SYNC] == cfqd->active_queue))
- __cfq_slice_expired(cfqd, cic->cfqq[SYNC], 0);
- cfq_put_queue(cic->cfqq[SYNC]);
+ cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]);
cic->cfqq[SYNC] = NULL;
}
+}
- cic->key = NULL;
- list_del_init(&cic->queue_list);
- spin_unlock(q->queue_lock);
+
+/*
+ * Called with interrupts disabled
+ */
+static void cfq_exit_single_io_context(struct cfq_io_context *cic)
+{
+ struct cfq_data *cfqd = cic->key;
+
+ if (cfqd) {
+ request_queue_t *q = cfqd->queue;
+
+ spin_lock_irq(q->queue_lock);
+ __cfq_exit_single_io_context(cfqd, cic);
+ spin_unlock_irq(q->queue_lock);
+ }
}
static void cfq_exit_io_context(struct io_context *ioc)
{
struct cfq_io_context *__cic;
- unsigned long flags;
struct rb_node *n;
/*
* put the reference this task is holding to the various queues
*/
- spin_lock_irqsave(&cfq_exit_lock, flags);
n = rb_first(&ioc->cic_root);
while (n != NULL) {
cfq_exit_single_io_context(__cic);
n = rb_next(n);
}
-
- spin_unlock_irqrestore(&cfq_exit_lock, flags);
}
static struct cfq_io_context *
cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
{
- struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask);
+ struct cfq_io_context *cic;
+ cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask, cfqd->queue->node);
if (cic) {
memset(cic, 0, sizeof(*cic));
cic->last_end_request = jiffies;
INIT_LIST_HEAD(&cic->queue_list);
cic->dtor = cfq_free_io_context;
cic->exit = cfq_exit_io_context;
- atomic_inc(&ioc_count);
+ elv_ioc_count_inc(ioc_count);
}
return cic;
spin_unlock(cfqd->queue->queue_lock);
}
-/*
- * callback from sys_ioprio_set, irqs are disabled
- */
-static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio)
+static void cfq_ioc_set_ioprio(struct io_context *ioc)
{
struct cfq_io_context *cic;
struct rb_node *n;
- spin_lock(&cfq_exit_lock);
+ ioc->ioprio_changed = 0;
n = rb_first(&ioc->cic_root);
while (n != NULL) {
changed_ioprio(cic);
n = rb_next(n);
}
-
- spin_unlock(&cfq_exit_lock);
-
- return 0;
}
static struct cfq_queue *
cfqq = new_cfqq;
new_cfqq = NULL;
} else if (gfp_mask & __GFP_WAIT) {
+ /*
+ * Inform the allocator of the fact that we will
+ * just repeat this allocation if it fails, to allow
+ * the allocator to do whatever it needs to attempt to
+ * free memory.
+ */
spin_unlock_irq(cfqd->queue->queue_lock);
- new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask);
+ new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask|__GFP_NOFAIL, cfqd->queue->node);
spin_lock_irq(cfqd->queue->queue_lock);
goto retry;
} else {
- cfqq = kmem_cache_alloc(cfq_pool, gfp_mask);
+ cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask, cfqd->queue->node);
if (!cfqq)
goto out;
}
hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
atomic_set(&cfqq->ref, 0);
cfqq->cfqd = cfqd;
- cfqq->service_last = 0;
/*
* set ->slice_left to allow preemption for a new process
*/
cfqq->slice_left = 2 * cfqd->cfq_slice_idle;
cfq_mark_cfqq_idle_window(cfqq);
cfq_mark_cfqq_prio_changed(cfqq);
+ cfq_mark_cfqq_queue_new(cfqq);
cfq_init_prio_data(cfqq);
}
static void
cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
{
- spin_lock(&cfq_exit_lock);
+ WARN_ON(!list_empty(&cic->queue_list));
rb_erase(&cic->rb_node, &ioc->cic_root);
- list_del_init(&cic->queue_list);
- spin_unlock(&cfq_exit_lock);
kmem_cache_free(cfq_ioc_pool, cic);
- atomic_dec(&ioc_count);
+ elv_ioc_count_dec(ioc_count);
}
static struct cfq_io_context *
cic->ioc = ioc;
cic->key = cfqd;
- ioc->set_ioprio = cfq_ioc_set_ioprio;
restart:
parent = NULL;
p = &ioc->cic_root.rb_node;
BUG();
}
- spin_lock(&cfq_exit_lock);
rb_link_node(&cic->rb_node, parent, p);
rb_insert_color(&cic->rb_node, &ioc->cic_root);
+
+ spin_lock_irq(cfqd->queue->queue_lock);
list_add(&cic->queue_list, &cfqd->cic_list);
- spin_unlock(&cfq_exit_lock);
+ spin_unlock_irq(cfqd->queue->queue_lock);
}
/*
might_sleep_if(gfp_mask & __GFP_WAIT);
- ioc = get_io_context(gfp_mask);
+ ioc = get_io_context(gfp_mask, cfqd->queue->node);
if (!ioc)
return NULL;
cfq_cic_link(cfqd, ioc, cic);
out:
+ smp_read_barrier_depends();
+ if (unlikely(ioc->ioprio_changed))
+ cfq_ioc_set_ioprio(ioc);
+
return cic;
err:
put_io_context(ioc);
*/
if (new_cfqq->slice_left < cfqd->cfq_slice_idle)
return 0;
+ /*
+ * if the new request is sync, but the currently running queue is
+ * not, let the sync request have priority.
+ */
if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
return 1;
+ /*
+ * So both queues are sync. Let the new request get disk time if
+ * it's a metadata request and the current queue is doing regular IO.
+ */
+ if (rq_is_meta(rq) && !cfqq->meta_pending)
+ return 1;
return 0;
}
*/
static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
- struct cfq_queue *__cfqq, *next;
-
- list_for_each_entry_safe(__cfqq, next, &cfqd->cur_rr, cfq_list)
- cfq_resort_rr_list(__cfqq, 1);
+ cfq_slice_expired(cfqd, 1);
if (!cfqq->slice_left)
cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2;
- cfqq->slice_end = cfqq->slice_left + jiffies;
- cfq_slice_expired(cfqd, 1);
- __cfq_set_active_queue(cfqd, cfqq);
-}
-
-/*
- * should really be a ll_rw_blk.c helper
- */
-static void cfq_start_queueing(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
- request_queue_t *q = cfqd->queue;
+ /*
+ * Put the new queue at the front of the of the current list,
+ * so we know that it will be selected next.
+ */
+ BUG_ON(!cfq_cfqq_on_rr(cfqq));
+ list_move(&cfqq->cfq_list, &cfqd->cur_rr);
- if (!blk_queue_plugged(q))
- q->request_fn(q);
- else
- __generic_unplug_device(q);
+ cfqq->slice_end = cfqq->slice_left + jiffies;
}
/*
{
struct cfq_io_context *cic = RQ_CIC(rq);
+ if (rq_is_meta(rq))
+ cfqq->meta_pending++;
+
/*
* check if this request is a better next-serve candidate)) {
*/
if (cic == cfqd->active_cic &&
del_timer(&cfqd->idle_slice_timer)) {
cfq_slice_expired(cfqd, 0);
- cfq_start_queueing(cfqd, cfqq);
+ blk_start_queueing(cfqd->queue);
}
return;
}
if (cfq_cfqq_wait_request(cfqq)) {
cfq_mark_cfqq_must_dispatch(cfqq);
del_timer(&cfqd->idle_slice_timer);
- cfq_start_queueing(cfqd, cfqq);
+ blk_start_queueing(cfqd->queue);
}
} else if (cfq_should_preempt(cfqd, cfqq, rq)) {
/*
*/
cfq_preempt_queue(cfqd, cfqq);
cfq_mark_cfqq_must_dispatch(cfqq);
- cfq_start_queueing(cfqd, cfqq);
+ blk_start_queueing(cfqd->queue);
}
}
if (!cfq_class_idle(cfqq))
cfqd->last_end_request = now;
- if (!cfq_cfqq_dispatched(cfqq)) {
- if (cfq_cfqq_on_rr(cfqq)) {
- cfqq->service_last = now;
- cfq_resort_rr_list(cfqq, 0);
- }
- }
+ if (!cfq_cfqq_dispatched(cfqq) && cfq_cfqq_on_rr(cfqq))
+ cfq_resort_rr_list(cfqq, 0);
if (sync)
RQ_CIC(rq)->last_end_request = now;
cfq_resort_rr_list(cfqq, 0);
}
-static inline int
-__cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq,
- struct task_struct *task, int rw)
+static inline int __cfq_may_queue(struct cfq_queue *cfqq)
{
if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
!cfq_cfqq_must_alloc_slice(cfqq)) {
return ELV_MQUEUE_MAY;
}
-static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio)
+static int cfq_may_queue(request_queue_t *q, int rw)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct task_struct *tsk = current;
cfq_init_prio_data(cfqq);
cfq_prio_boost(cfqq);
- return __cfq_may_queue(cfqd, cfqq, tsk, rw);
+ return __cfq_may_queue(cfqq);
}
return ELV_MQUEUE_MAY;
}
-static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq)
-{
- struct cfq_data *cfqd = q->elevator->elevator_data;
-
- if (unlikely(cfqd->rq_starved)) {
- struct request_list *rl = &q->rq;
-
- smp_mb();
- if (waitqueue_active(&rl->wait[READ]))
- wake_up(&rl->wait[READ]);
- if (waitqueue_active(&rl->wait[WRITE]))
- wake_up(&rl->wait[WRITE]);
- }
-}
-
/*
* queue lock held here
*/
rq->elevator_private = NULL;
rq->elevator_private2 = NULL;
- cfq_check_waiters(q, cfqq);
cfq_put_queue(cfqq);
}
}
* Allocate cfq data structures associated with this request.
*/
static int
-cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
- gfp_t gfp_mask)
+cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct task_struct *tsk = current;
cfqq->allocated[rw]++;
cfq_clear_cfqq_must_alloc(cfqq);
- cfqd->rq_starved = 0;
atomic_inc(&cfqq->ref);
spin_unlock_irqrestore(q->queue_lock, flags);
queue_fail:
if (cic)
put_io_context(cic->ioc);
- /*
- * mark us rq allocation starved. we need to kickstart the process
- * ourselves if there are no pending requests that can do it for us.
- * that would be an extremely rare OOM situation
- */
- cfqd->rq_starved = 1;
+
cfq_schedule_dispatch(cfqd);
spin_unlock_irqrestore(q->queue_lock, flags);
return 1;
static void cfq_kick_queue(void *data)
{
request_queue_t *q = data;
- struct cfq_data *cfqd = q->elevator->elevator_data;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
-
- if (cfqd->rq_starved) {
- struct request_list *rl = &q->rq;
-
- /*
- * we aren't guaranteed to get a request after this, but we
- * have to be opportunistic
- */
- smp_mb();
- if (waitqueue_active(&rl->wait[READ]))
- wake_up(&rl->wait[READ]);
- if (waitqueue_active(&rl->wait[WRITE]))
- wake_up(&rl->wait[WRITE]);
- }
-
- blk_remove_plug(q);
- q->request_fn(q);
+ blk_start_queueing(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
cfq_shutdown_timer_wq(cfqd);
- spin_lock(&cfq_exit_lock);
spin_lock_irq(q->queue_lock);
if (cfqd->active_queue)
struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
struct cfq_io_context,
queue_list);
- if (cic->cfqq[ASYNC]) {
- cfq_put_queue(cic->cfqq[ASYNC]);
- cic->cfqq[ASYNC] = NULL;
- }
- if (cic->cfqq[SYNC]) {
- cfq_put_queue(cic->cfqq[SYNC]);
- cic->cfqq[SYNC] = NULL;
- }
- cic->key = NULL;
- list_del_init(&cic->queue_list);
+
+ __cfq_exit_single_io_context(cfqd, cic);
}
spin_unlock_irq(q->queue_lock);
- spin_unlock(&cfq_exit_lock);
cfq_shutdown_timer_wq(cfqd);
struct cfq_data *cfqd;
int i;
- cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL);
+ cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
if (!cfqd)
return NULL;
INIT_LIST_HEAD(&cfqd->busy_rr);
INIT_LIST_HEAD(&cfqd->cur_rr);
INIT_LIST_HEAD(&cfqd->idle_rr);
- INIT_LIST_HEAD(&cfqd->empty_list);
INIT_LIST_HEAD(&cfqd->cic_list);
- cfqd->cfq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL);
+ cfqd->cfq_hash = kmalloc_node(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL, q->node);
if (!cfqd->cfq_hash)
goto out_free;
INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q);
- cfqd->cfq_queued = cfq_queued;
cfqd->cfq_quantum = cfq_quantum;
cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
return cfq_var_show(__data, (page)); \
}
SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
-SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued, 0);
SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
return ret; \
}
STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
-STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, UINT_MAX, 0);
STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
static struct elv_fs_entry cfq_attrs[] = {
CFQ_ATTR(quantum),
- CFQ_ATTR(queued),
CFQ_ATTR(fifo_expire_sync),
CFQ_ATTR(fifo_expire_async),
CFQ_ATTR(back_seek_max),
.elevator_may_queue_fn = cfq_may_queue,
.elevator_init_fn = cfq_init_queue,
.elevator_exit_fn = cfq_exit_queue,
- .trim = cfq_trim,
+ .trim = cfq_free_io_context,
},
.elevator_attrs = cfq_attrs,
.elevator_name = "cfq",
static void __exit cfq_exit(void)
{
- DECLARE_COMPLETION(all_gone);
+ DECLARE_COMPLETION_ONSTACK(all_gone);
elv_unregister(&iosched_cfq);
ioc_gone = &all_gone;
/* ioc_gone's update must be visible before reading ioc_count */
smp_wmb();
- if (atomic_read(&ioc_count))
+ if (elv_ioc_count_read(ioc_count))
wait_for_completion(ioc_gone);
synchronize_rcu();
cfq_slab_kill();