X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=block%2Fcfq-iosched.c;h=6200d9b9af28efbbd5aaac5373995ca8334dbaaf;hb=be883da7594b0a2a02074e683673ae0e522566a4;hp=67d446de0227a3ac71aaa1563728717a21af5eb9;hpb=206dc69b31ca05baac68c75b8ed2ba7dd857d273;p=pandora-kernel.git diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 67d446de0227..6200d9b9af28 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -33,7 +33,7 @@ static int cfq_slice_idle = HZ / 70; #define CFQ_KEY_ASYNC (0) -static DEFINE_RWLOCK(cfq_exit_lock); +static DEFINE_SPINLOCK(cfq_exit_lock); /* * for the hash of cfqq inside the cfqd @@ -60,14 +60,9 @@ static DEFINE_RWLOCK(cfq_exit_lock); /* * rb-tree defines */ -#define RB_NONE (2) #define RB_EMPTY(node) ((node)->rb_node == NULL) -#define RB_CLEAR_COLOR(node) (node)->rb_color = RB_NONE #define RB_CLEAR(node) do { \ - (node)->rb_parent = NULL; \ - RB_CLEAR_COLOR((node)); \ - (node)->rb_right = NULL; \ - (node)->rb_left = NULL; \ + memset(node, 0, sizeof(*node)); \ } while (0) #define RB_CLEAR_ROOT(root) ((root)->rb_node = NULL) #define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node) @@ -133,6 +128,7 @@ struct cfq_data { mempool_t *crq_pool; int rq_in_driver; + int hw_tag; /* * schedule slice state info @@ -500,10 +496,13 @@ static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted) /* * if queue was preempted, just add to front to be fair. busy_rr - * isn't sorted. + * isn't sorted, but insert at the back for fairness. */ if (preempted || list == &cfqd->busy_rr) { - list_add(&cfqq->cfq_list, list); + if (preempted) + list = list->prev; + + list_add_tail(&cfqq->cfq_list, list); return; } @@ -563,7 +562,6 @@ static inline void cfq_del_crq_rb(struct cfq_rq *crq) cfq_update_next_crq(crq); rb_erase(&crq->rb_node, &cfqq->sort_list); - RB_CLEAR_COLOR(&crq->rb_node); if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list)) cfq_del_cfqq_rr(cfqd, cfqq); @@ -664,6 +662,15 @@ static void cfq_activate_request(request_queue_t *q, struct request *rq) struct cfq_data *cfqd = q->elevator->elevator_data; cfqd->rq_in_driver++; + + /* + * If the depth is larger 1, it really could be queueing. But lets + * make the mark a little higher - idling could still be good for + * low queueing, and a low queueing number could also just indicate + * a SCSI mid layer like behaviour where limit+1 is often seen. + */ + if (!cfqd->hw_tag && cfqd->rq_in_driver > 4) + cfqd->hw_tag = 1; } static void cfq_deactivate_request(request_queue_t *q, struct request *rq) @@ -878,6 +885,13 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd) if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1) cfqq = list_entry_cfqq(cfqd->cur_rr.next); + /* + * If no new queues are available, check if the busy list has some + * before falling back to idle io. + */ + if (!cfqq && !list_empty(&cfqd->busy_rr)) + cfqq = list_entry_cfqq(cfqd->busy_rr.next); + /* * if we have idle queues and no rt or be queues had pending * requests, either allow immediate service if the grace period @@ -1284,7 +1298,7 @@ static void cfq_exit_io_context(struct io_context *ioc) /* * put the reference this task is holding to the various queues */ - read_lock_irqsave(&cfq_exit_lock, flags); + spin_lock_irqsave(&cfq_exit_lock, flags); n = rb_first(&ioc->cic_root); while (n != NULL) { @@ -1294,7 +1308,7 @@ static void cfq_exit_io_context(struct io_context *ioc) n = rb_next(n); } - read_unlock_irqrestore(&cfq_exit_lock, flags); + spin_unlock_irqrestore(&cfq_exit_lock, flags); } static struct cfq_io_context * @@ -1303,17 +1317,12 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask); if (cic) { - RB_CLEAR(&cic->rb_node); - cic->key = NULL; - cic->cfqq[ASYNC] = NULL; - cic->cfqq[SYNC] = NULL; + memset(cic, 0, sizeof(*cic)); + RB_CLEAR_COLOR(&cic->rb_node); cic->last_end_request = jiffies; - cic->ttime_total = 0; - cic->ttime_samples = 0; - cic->ttime_mean = 0; + INIT_LIST_HEAD(&cic->queue_list); cic->dtor = cfq_free_io_context; cic->exit = cfq_exit_io_context; - INIT_LIST_HEAD(&cic->queue_list); atomic_inc(&ioc_count); } @@ -1400,17 +1409,17 @@ static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio) struct cfq_io_context *cic; struct rb_node *n; - write_lock(&cfq_exit_lock); + spin_lock(&cfq_exit_lock); n = rb_first(&ioc->cic_root); while (n != NULL) { cic = rb_entry(n, struct cfq_io_context, rb_node); - + changed_ioprio(cic); n = rb_next(n); } - write_unlock(&cfq_exit_lock); + spin_unlock(&cfq_exit_lock); return 0; } @@ -1458,7 +1467,8 @@ retry: * set ->slice_left to allow preemption for a new process */ cfqq->slice_left = 2 * cfqd->cfq_slice_idle; - cfq_mark_cfqq_idle_window(cfqq); + if (!cfqd->hw_tag) + cfq_mark_cfqq_idle_window(cfqq); cfq_mark_cfqq_prio_changed(cfqq); cfq_init_prio_data(cfqq); } @@ -1472,19 +1482,38 @@ out: return cfqq; } +static void +cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic) +{ + spin_lock(&cfq_exit_lock); + rb_erase(&cic->rb_node, &ioc->cic_root); + list_del_init(&cic->queue_list); + spin_unlock(&cfq_exit_lock); + kmem_cache_free(cfq_ioc_pool, cic); + atomic_dec(&ioc_count); +} + static struct cfq_io_context * cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc) { - struct rb_node *n = ioc->cic_root.rb_node; + struct rb_node *n; struct cfq_io_context *cic; - void *key = cfqd; + void *k, *key = cfqd; +restart: + n = ioc->cic_root.rb_node; while (n) { cic = rb_entry(n, struct cfq_io_context, rb_node); + /* ->key must be copied to avoid race with cfq_exit_queue() */ + k = cic->key; + if (unlikely(!k)) { + cfq_drop_dead_cic(ioc, cic); + goto restart; + } - if (key < cic->key) + if (key < k) n = n->rb_left; - else if (key > cic->key) + else if (key > k) n = n->rb_right; else return cic; @@ -1497,33 +1526,41 @@ static inline void cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, struct cfq_io_context *cic) { - struct rb_node **p = &ioc->cic_root.rb_node; - struct rb_node *parent = NULL; + struct rb_node **p; + struct rb_node *parent; struct cfq_io_context *__cic; - - read_lock(&cfq_exit_lock); + void *k; cic->ioc = ioc; cic->key = cfqd; ioc->set_ioprio = cfq_ioc_set_ioprio; - +restart: + parent = NULL; + p = &ioc->cic_root.rb_node; while (*p) { parent = *p; __cic = rb_entry(parent, struct cfq_io_context, rb_node); + /* ->key must be copied to avoid race with cfq_exit_queue() */ + k = __cic->key; + if (unlikely(!k)) { + cfq_drop_dead_cic(ioc, cic); + goto restart; + } - if (cic->key < __cic->key) + if (cic->key < k) p = &(*p)->rb_left; - else if (cic->key > __cic->key) + else if (cic->key > k) p = &(*p)->rb_right; else BUG(); } + spin_lock(&cfq_exit_lock); rb_link_node(&cic->rb_node, parent, p); rb_insert_color(&cic->rb_node, &ioc->cic_root); list_add(&cic->queue_list, &cfqd->cic_list); - read_unlock(&cfq_exit_lock); + spin_unlock(&cfq_exit_lock); } /* @@ -1622,7 +1659,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, { int enable_idle = cfq_cfqq_idle_window(cfqq); - if (!cic->ioc->task || !cfqd->cfq_slice_idle) + if (!cic->ioc->task || !cfqd->cfq_slice_idle || cfqd->hw_tag) enable_idle = 0; else if (sample_valid(cic->ttime_samples)) { if (cic->ttime_mean > cfqd->cfq_slice_idle) @@ -1713,14 +1750,24 @@ cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq); + cic = crq->io_context; + /* * we never wait for an async request and we don't allow preemption * of an async request. so just return early */ - if (!cfq_crq_is_sync(crq)) + if (!cfq_crq_is_sync(crq)) { + /* + * sync process issued an async request, if it's waiting + * then expire it and kick rq handling. + */ + if (cic == cfqd->active_cic && + del_timer(&cfqd->idle_slice_timer)) { + cfq_slice_expired(cfqd, 0); + cfq_start_queueing(cfqd, cfqq); + } return; - - cic = crq->io_context; + } cfq_update_io_thinktime(cfqd, cic); cfq_update_io_seektime(cfqd, cic, crq); @@ -2138,10 +2185,9 @@ static void cfq_idle_class_timer(unsigned long data) * race with a non-idle queue, reset timer */ end = cfqd->last_end_request + CFQ_IDLE_GRACE; - if (!time_after_eq(jiffies, end)) { - cfqd->idle_class_timer.expires = end; - add_timer(&cfqd->idle_class_timer); - } else + if (!time_after_eq(jiffies, end)) + mod_timer(&cfqd->idle_class_timer, end); + else cfq_schedule_dispatch(cfqd); spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); @@ -2161,7 +2207,7 @@ static void cfq_exit_queue(elevator_t *e) cfq_shutdown_timer_wq(cfqd); - write_lock(&cfq_exit_lock); + spin_lock(&cfq_exit_lock); spin_lock_irq(q->queue_lock); if (cfqd->active_queue) @@ -2184,7 +2230,7 @@ static void cfq_exit_queue(elevator_t *e) } spin_unlock_irq(q->queue_lock); - write_unlock(&cfq_exit_lock); + spin_unlock(&cfq_exit_lock); cfq_shutdown_timer_wq(cfqd); @@ -2194,14 +2240,14 @@ static void cfq_exit_queue(elevator_t *e) kfree(cfqd); } -static int cfq_init_queue(request_queue_t *q, elevator_t *e) +static void *cfq_init_queue(request_queue_t *q, elevator_t *e) { struct cfq_data *cfqd; int i; cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL); if (!cfqd) - return -ENOMEM; + return NULL; memset(cfqd, 0, sizeof(*cfqd)); @@ -2231,8 +2277,6 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e) for (i = 0; i < CFQ_QHASH_ENTRIES; i++) INIT_HLIST_HEAD(&cfqd->cfq_hash[i]); - e->elevator_data = cfqd; - cfqd->queue = q; cfqd->max_queued = q->nr_requests / 4; @@ -2259,14 +2303,14 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e) cfqd->cfq_slice_async_rq = cfq_slice_async_rq; cfqd->cfq_slice_idle = cfq_slice_idle; - return 0; + return cfqd; out_crqpool: kfree(cfqd->cfq_hash); out_cfqhash: kfree(cfqd->crq_hash); out_crqhash: kfree(cfqd); - return -ENOMEM; + return NULL; } static void cfq_slab_kill(void) @@ -2439,9 +2483,10 @@ static void __exit cfq_exit(void) DECLARE_COMPLETION(all_gone); elv_unregister(&iosched_cfq); ioc_gone = &all_gone; - barrier(); + /* ioc_gone's update must be visible before reading ioc_count */ + smp_wmb(); if (atomic_read(&ioc_count)) - complete(ioc_gone); + wait_for_completion(ioc_gone); synchronize_rcu(); cfq_slab_kill(); }