Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus
[pandora-kernel.git] / block / ll_rw_blk.c
index f9fc07e..6c793b1 100644 (file)
@@ -454,7 +454,7 @@ static void queue_flush(request_queue_t *q, unsigned which)
        rq->end_io = end_io;
        q->prepare_flush_fn(q, rq);
 
-       __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
+       elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
 }
 
 static inline struct request *start_ordered(request_queue_t *q,
@@ -490,7 +490,7 @@ static inline struct request *start_ordered(request_queue_t *q,
        else
                q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
 
-       __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
+       elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
 
        if (q->ordered & QUEUE_ORDERED_PREFLUSH) {
                queue_flush(q, QUEUE_ORDERED_PREFLUSH);
@@ -508,7 +508,7 @@ static inline struct request *start_ordered(request_queue_t *q,
 
 int blk_do_ordered(request_queue_t *q, struct request **rqp)
 {
-       struct request *rq = *rqp, *allowed_rq;
+       struct request *rq = *rqp;
        int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
 
        if (!q->ordseq) {
@@ -532,32 +532,26 @@ int blk_do_ordered(request_queue_t *q, struct request **rqp)
                }
        }
 
+       /*
+        * Ordered sequence in progress
+        */
+
+       /* Special requests are not subject to ordering rules. */
+       if (!blk_fs_request(rq) &&
+           rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
+               return 1;
+
        if (q->ordered & QUEUE_ORDERED_TAG) {
+               /* Ordered by tag.  Blocking the next barrier is enough. */
                if (is_barrier && rq != &q->bar_rq)
                        *rqp = NULL;
-               return 1;
-       }
-
-       switch (blk_ordered_cur_seq(q)) {
-       case QUEUE_ORDSEQ_PREFLUSH:
-               allowed_rq = &q->pre_flush_rq;
-               break;
-       case QUEUE_ORDSEQ_BAR:
-               allowed_rq = &q->bar_rq;
-               break;
-       case QUEUE_ORDSEQ_POSTFLUSH:
-               allowed_rq = &q->post_flush_rq;
-               break;
-       default:
-               allowed_rq = NULL;
-               break;
+       } else {
+               /* Ordered by draining.  Wait for turn. */
+               WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
+               if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
+                       *rqp = NULL;
        }
 
-       if (rq != allowed_rq &&
-           (blk_fs_request(rq) || rq == &q->pre_flush_rq ||
-            rq == &q->post_flush_rq))
-               *rqp = NULL;
-
        return 1;
 }
 
@@ -631,26 +625,31 @@ static inline int ordered_bio_endio(struct request *rq, struct bio *bio,
  *    Different hardware can have different requirements as to what pages
  *    it can do I/O directly to. A low level driver can call
  *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
- *    buffers for doing I/O to pages residing above @page. By default
- *    the block layer sets this to the highest numbered "low" memory page.
+ *    buffers for doing I/O to pages residing above @page.
  **/
 void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)
 {
        unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;
-
-       /*
-        * set appropriate bounce gfp mask -- unfortunately we don't have a
-        * full 4GB zone, so we have to resort to low memory for any bounces.
-        * ISA has its own < 16MB zone.
-        */
-       if (bounce_pfn < blk_max_low_pfn) {
-               BUG_ON(dma_addr < BLK_BOUNCE_ISA);
+       int dma = 0;
+
+       q->bounce_gfp = GFP_NOIO;
+#if BITS_PER_LONG == 64
+       /* Assume anything <= 4GB can be handled by IOMMU.
+          Actually some IOMMUs can handle everything, but I don't
+          know of a way to test this here. */
+       if (bounce_pfn < (0xffffffff>>PAGE_SHIFT))
+               dma = 1;
+       q->bounce_pfn = max_low_pfn;
+#else
+       if (bounce_pfn < blk_max_low_pfn)
+               dma = 1;
+       q->bounce_pfn = bounce_pfn;
+#endif
+       if (dma) {
                init_emergency_isa_pool();
                q->bounce_gfp = GFP_NOIO | GFP_DMA;
-       } else
-               q->bounce_gfp = GFP_NOIO;
-
-       q->bounce_pfn = bounce_pfn;
+               q->bounce_pfn = bounce_pfn;
+       }
 }
 
 EXPORT_SYMBOL(blk_queue_bounce_limit);
@@ -1741,16 +1740,11 @@ EXPORT_SYMBOL(blk_run_queue);
  *     Hopefully the low level driver will have finished any
  *     outstanding requests first...
  **/
-void blk_cleanup_queue(request_queue_t * q)
+static void blk_release_queue(struct kobject *kobj)
 {
+       request_queue_t *q = container_of(kobj, struct request_queue, kobj);
        struct request_list *rl = &q->rq;
 
-       if (!atomic_dec_and_test(&q->refcnt))
-               return;
-
-       if (q->elevator)
-               elevator_exit(q->elevator);
-
        blk_sync_queue(q);
 
        if (rl->rq_pool)
@@ -1762,6 +1756,24 @@ void blk_cleanup_queue(request_queue_t * q)
        kmem_cache_free(requestq_cachep, q);
 }
 
+void blk_put_queue(request_queue_t *q)
+{
+       kobject_put(&q->kobj);
+}
+EXPORT_SYMBOL(blk_put_queue);
+
+void blk_cleanup_queue(request_queue_t * q)
+{
+       mutex_lock(&q->sysfs_lock);
+       set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
+       mutex_unlock(&q->sysfs_lock);
+
+       if (q->elevator)
+               elevator_exit(q->elevator);
+
+       blk_put_queue(q);
+}
+
 EXPORT_SYMBOL(blk_cleanup_queue);
 
 static int blk_init_free_list(request_queue_t *q)
@@ -1789,6 +1801,8 @@ request_queue_t *blk_alloc_queue(gfp_t gfp_mask)
 }
 EXPORT_SYMBOL(blk_alloc_queue);
 
+static struct kobj_type queue_ktype;
+
 request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 {
        request_queue_t *q;
@@ -1799,11 +1813,16 @@ request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 
        memset(q, 0, sizeof(*q));
        init_timer(&q->unplug_timer);
-       atomic_set(&q->refcnt, 1);
+
+       snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
+       q->kobj.ktype = &queue_ktype;
+       kobject_init(&q->kobj);
 
        q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
        q->backing_dev_info.unplug_io_data = q;
 
+       mutex_init(&q->sysfs_lock);
+
        return q;
 }
 EXPORT_SYMBOL(blk_alloc_queue_node);
@@ -1855,8 +1874,10 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
                return NULL;
 
        q->node = node_id;
-       if (blk_init_free_list(q))
-               goto out_init;
+       if (blk_init_free_list(q)) {
+               kmem_cache_free(requestq_cachep, q);
+               return NULL;
+       }
 
        /*
         * if caller didn't supply a lock, they get per-queue locking with
@@ -1892,9 +1913,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
                return q;
        }
 
-       blk_cleanup_queue(q);
-out_init:
-       kmem_cache_free(requestq_cachep, q);
+       blk_put_queue(q);
        return NULL;
 }
 EXPORT_SYMBOL(blk_init_queue_node);
@@ -1902,7 +1921,7 @@ EXPORT_SYMBOL(blk_init_queue_node);
 int blk_get_queue(request_queue_t *q)
 {
        if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
-               atomic_inc(&q->refcnt);
+               kobject_get(&q->kobj);
                return 0;
        }
 
@@ -3453,7 +3472,7 @@ int __init blk_dev_init(void)
        iocontext_cachep = kmem_cache_create("blkdev_ioc",
                        sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
 
-       for (i = 0; i < NR_CPUS; i++)
+       for_each_cpu(i)
                INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
 
        open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
@@ -3478,10 +3497,12 @@ void put_io_context(struct io_context *ioc)
        BUG_ON(atomic_read(&ioc->refcount) == 0);
 
        if (atomic_dec_and_test(&ioc->refcount)) {
+               rcu_read_lock();
                if (ioc->aic && ioc->aic->dtor)
                        ioc->aic->dtor(ioc->aic);
                if (ioc->cic && ioc->cic->dtor)
                        ioc->cic->dtor(ioc->cic);
+               rcu_read_unlock();
 
                kmem_cache_free(iocontext_cachep, ioc);
        }
@@ -3615,10 +3636,13 @@ static ssize_t
 queue_requests_store(struct request_queue *q, const char *page, size_t count)
 {
        struct request_list *rl = &q->rq;
+       unsigned long nr;
+       int ret = queue_var_store(&nr, page, count);
+       if (nr < BLKDEV_MIN_RQ)
+               nr = BLKDEV_MIN_RQ;
 
-       int ret = queue_var_store(&q->nr_requests, page, count);
-       if (q->nr_requests < BLKDEV_MIN_RQ)
-               q->nr_requests = BLKDEV_MIN_RQ;
+       spin_lock_irq(q->queue_lock);
+       q->nr_requests = nr;
        blk_queue_congestion_threshold(q);
 
        if (rl->count[READ] >= queue_congestion_on_threshold(q))
@@ -3644,6 +3668,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
                blk_clear_queue_full(q, WRITE);
                wake_up(&rl->wait[WRITE]);
        }
+       spin_unlock_irq(q->queue_lock);
        return ret;
 }
 
@@ -3759,13 +3784,19 @@ static ssize_t
 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
 {
        struct queue_sysfs_entry *entry = to_queue(attr);
-       struct request_queue *q;
+       request_queue_t *q = container_of(kobj, struct request_queue, kobj);
+       ssize_t res;
 
-       q = container_of(kobj, struct request_queue, kobj);
        if (!entry->show)
                return -EIO;
-
-       return entry->show(q, page);
+       mutex_lock(&q->sysfs_lock);
+       if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
+               mutex_unlock(&q->sysfs_lock);
+               return -ENOENT;
+       }
+       res = entry->show(q, page);
+       mutex_unlock(&q->sysfs_lock);
+       return res;
 }
 
 static ssize_t
@@ -3773,13 +3804,20 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
                    const char *page, size_t length)
 {
        struct queue_sysfs_entry *entry = to_queue(attr);
-       struct request_queue *q;
+       request_queue_t *q = container_of(kobj, struct request_queue, kobj);
+
+       ssize_t res;
 
-       q = container_of(kobj, struct request_queue, kobj);
        if (!entry->store)
                return -EIO;
-
-       return entry->store(q, page, length);
+       mutex_lock(&q->sysfs_lock);
+       if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
+               mutex_unlock(&q->sysfs_lock);
+               return -ENOENT;
+       }
+       res = entry->store(q, page, length);
+       mutex_unlock(&q->sysfs_lock);
+       return res;
 }
 
 static struct sysfs_ops queue_sysfs_ops = {
@@ -3790,6 +3828,7 @@ static struct sysfs_ops queue_sysfs_ops = {
 static struct kobj_type queue_ktype = {
        .sysfs_ops      = &queue_sysfs_ops,
        .default_attrs  = default_attrs,
+       .release        = blk_release_queue,
 };
 
 int blk_register_queue(struct gendisk *disk)
@@ -3802,19 +3841,17 @@ int blk_register_queue(struct gendisk *disk)
                return -ENXIO;
 
        q->kobj.parent = kobject_get(&disk->kobj);
-       if (!q->kobj.parent)
-               return -EBUSY;
 
-       snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
-       q->kobj.ktype = &queue_ktype;
-
-       ret = kobject_register(&q->kobj);
+       ret = kobject_add(&q->kobj);
        if (ret < 0)
                return ret;
 
+       kobject_uevent(&q->kobj, KOBJ_ADD);
+
        ret = elv_register_queue(q);
        if (ret) {
-               kobject_unregister(&q->kobj);
+               kobject_uevent(&q->kobj, KOBJ_REMOVE);
+               kobject_del(&q->kobj);
                return ret;
        }
 
@@ -3828,7 +3865,8 @@ void blk_unregister_queue(struct gendisk *disk)
        if (q && q->request_fn) {
                elv_unregister_queue(q);
 
-               kobject_unregister(&q->kobj);
+               kobject_uevent(&q->kobj, KOBJ_REMOVE);
+               kobject_del(&q->kobj);
                kobject_put(&disk->kobj);
        }
 }