X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=block%2Fll_rw_blk.c;h=e112d1a5dab6b8c2dcae45e2a6b18bbfc4529760;hb=f17a2686b11453680e9662ef8bdc8d948d0dce18;hp=062067fa7ead0340ee3f5da91f82af80ec097e82;hpb=1b9a3917366028cc451a98dd22e3bcd537d4e5c1;p=pandora-kernel.git diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 062067fa7ead..eee03a3876a3 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -638,7 +638,7 @@ void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr) /* Assume anything <= 4GB can be handled by IOMMU. Actually some IOMMUs can handle everything, but I don't know of a way to test this here. */ - if (bounce_pfn < (0xffffffff>>PAGE_SHIFT)) + if (bounce_pfn < (min_t(u64,0xffffffff,BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) dma = 1; q->bounce_pfn = max_low_pfn; #else @@ -785,6 +785,8 @@ void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b) t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments); t->max_segment_size = min(t->max_segment_size,b->max_segment_size); t->hardsect_size = max(t->hardsect_size,b->hardsect_size); + if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) + clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags); } EXPORT_SYMBOL(blk_queue_stack_limits); @@ -906,17 +908,15 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) __FUNCTION__, depth); } - tag_index = kmalloc(depth * sizeof(struct request *), GFP_ATOMIC); + tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC); if (!tag_index) goto fail; nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG; - tag_map = kmalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC); + tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC); if (!tag_map) goto fail; - memset(tag_index, 0, depth * sizeof(struct request *)); - memset(tag_map, 0, nr_ulongs * sizeof(unsigned long)); tags->real_max_depth = depth; tags->max_depth = depth; tags->tag_index = tag_index; @@ -1554,7 +1554,7 @@ void blk_plug_device(request_queue_t *q) * don't plug a stopped queue, it must be paired with blk_start_queue() * which will restart the queueing */ - if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags)) + if (blk_queue_stopped(q)) return; if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) { @@ -1587,7 +1587,7 @@ EXPORT_SYMBOL(blk_remove_plug); */ void __generic_unplug_device(request_queue_t *q) { - if (unlikely(test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))) + if (unlikely(blk_queue_stopped(q))) return; if (!blk_remove_plug(q)) @@ -1663,6 +1663,8 @@ static void blk_unplug_timeout(unsigned long data) **/ void blk_start_queue(request_queue_t *q) { + WARN_ON(!irqs_disabled()); + clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); /* @@ -1732,15 +1734,28 @@ void blk_run_queue(struct request_queue *q) spin_lock_irqsave(q->queue_lock, flags); blk_remove_plug(q); - if (!elv_queue_empty(q)) - q->request_fn(q); + + /* + * Only recurse once to avoid overrunning the stack, let the unplug + * handling reinvoke the handler shortly if we already got there. + */ + if (!elv_queue_empty(q)) { + if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { + q->request_fn(q); + clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags); + } else { + blk_plug_device(q); + kblockd_schedule_work(&q->unplug_work); + } + } + spin_unlock_irqrestore(q->queue_lock, flags); } EXPORT_SYMBOL(blk_run_queue); /** * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed - * @q: the request queue to be released + * @kobj: the kobj belonging of the request queue to be released * * Description: * blk_cleanup_queue is the pair to blk_init_queue() or @@ -1865,7 +1880,8 @@ EXPORT_SYMBOL(blk_alloc_queue_node); * get dealt with eventually. * * The queue spin lock must be held while manipulating the requests on the - * request queue. + * request queue; this lock will be taken also from interrupt context, so irq + * disabling is needed for it. * * Function returns a pointer to the initialized request queue, or NULL if * it didn't succeed. @@ -2479,10 +2495,12 @@ void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk, rq->rq_disk = bd_disk; rq->flags |= REQ_NOMERGE; rq->end_io = done; - elv_add_request(q, rq, where, 1); - generic_unplug_device(q); + WARN_ON(irqs_disabled()); + spin_lock_irq(q->queue_lock); + __elv_add_request(q, rq, where, 1); + __generic_unplug_device(q); + spin_unlock_irq(q->queue_lock); } - EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); /** @@ -2727,7 +2745,7 @@ static int attempt_merge(request_queue_t *q, struct request *req, return 0; /* - * not contigious + * not contiguous */ if (req->sector + req->nr_sectors != next->sector) return 0; @@ -2809,6 +2827,9 @@ static void init_request_from_bio(struct request *req, struct bio *bio) if (unlikely(bio_barrier(bio))) req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE); + if (bio_sync(bio)) + req->flags |= REQ_RW_SYNC; + req->errors = 0; req->hard_sector = req->sector = bio->bi_sector; req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio); @@ -3344,12 +3365,11 @@ EXPORT_SYMBOL(end_that_request_chunk); */ static void blk_done_softirq(struct softirq_action *h) { - struct list_head *cpu_list; - LIST_HEAD(local_list); + struct list_head *cpu_list, local_list; local_irq_disable(); cpu_list = &__get_cpu_var(blk_cpu_done); - list_splice_init(cpu_list, &local_list); + list_replace_init(cpu_list, &local_list); local_irq_enable(); while (!list_empty(&local_list)) { @@ -3395,7 +3415,7 @@ static struct notifier_block __devinitdata blk_cpu_notifier = { * * Description: * Ends all I/O on a request. It does not handle partial completions, - * unless the driver actually implements this in its completionc callback + * unless the driver actually implements this in its completion callback * through requeueing. Theh actual completion happens out-of-order, * through a softirq handler. The user must have registered a completion * callback through blk_queue_softirq_done(). @@ -3437,7 +3457,12 @@ void end_that_request_last(struct request *req, int uptodate) if (unlikely(laptop_mode) && blk_fs_request(req)) laptop_io_completion(); - if (disk && blk_fs_request(req)) { + /* + * Account IO completion. bar_rq isn't accounted as a normal + * IO on queueing nor completion. Accounting the containing + * request is enough. + */ + if (disk && blk_fs_request(req) && req != &req->q->bar_rq) { unsigned long duration = jiffies - req->start_time; const int rw = rq_data_dir(req); @@ -3512,13 +3537,11 @@ int __init blk_dev_init(void) iocontext_cachep = kmem_cache_create("blkdev_ioc", sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL); - for_each_cpu(i) + for_each_possible_cpu(i) INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); -#ifdef CONFIG_HOTPLUG_CPU - register_cpu_notifier(&blk_cpu_notifier); -#endif + register_hotcpu_notifier(&blk_cpu_notifier); blk_max_low_pfn = max_low_pfn; blk_max_pfn = max_pfn; @@ -3537,11 +3560,17 @@ void put_io_context(struct io_context *ioc) BUG_ON(atomic_read(&ioc->refcount) == 0); if (atomic_dec_and_test(&ioc->refcount)) { + struct cfq_io_context *cic; + rcu_read_lock(); if (ioc->aic && ioc->aic->dtor) ioc->aic->dtor(ioc->aic); - if (ioc->cic && ioc->cic->dtor) - ioc->cic->dtor(ioc->cic); + if (ioc->cic_root.rb_node != NULL) { + struct rb_node *n = rb_first(&ioc->cic_root); + + cic = rb_entry(n, struct cfq_io_context, rb_node); + cic->dtor(ioc); + } rcu_read_unlock(); kmem_cache_free(iocontext_cachep, ioc); @@ -3554,6 +3583,7 @@ void exit_io_context(void) { unsigned long flags; struct io_context *ioc; + struct cfq_io_context *cic; local_irq_save(flags); task_lock(current); @@ -3565,9 +3595,11 @@ void exit_io_context(void) if (ioc->aic && ioc->aic->exit) ioc->aic->exit(ioc->aic); - if (ioc->cic && ioc->cic->exit) - ioc->cic->exit(ioc->cic); - + if (ioc->cic_root.rb_node != NULL) { + cic = rb_entry(rb_first(&ioc->cic_root), struct cfq_io_context, rb_node); + cic->exit(ioc); + } + put_io_context(ioc); } @@ -3596,7 +3628,7 @@ struct io_context *current_io_context(gfp_t gfp_flags) ret->last_waited = jiffies; /* doesn't matter... */ ret->nr_batch_requests = 0; /* because this is 0 */ ret->aic = NULL; - ret->cic = NULL; + ret->cic_root.rb_node = NULL; tsk->io_context = ret; }