xhci: Fix front USB ports on ASUS PRIME B350M-A
[pandora-kernel.git] / drivers / md / dm.c
index 4720f68..d7e6399 100644 (file)
 
 #define DM_MSG_PREFIX "core"
 
-#ifdef CONFIG_PRINTK
-/*
- * ratelimit state to be used in DMXXX_LIMIT().
- */
-DEFINE_RATELIMIT_STATE(dm_ratelimit_state,
-                      DEFAULT_RATELIMIT_INTERVAL,
-                      DEFAULT_RATELIMIT_BURST);
-EXPORT_SYMBOL(dm_ratelimit_state);
-#endif
-
 /*
  * Cookies are numeric values sent with CHANGE and REMOVE
  * uevents while resuming, removing or renaming the device.
@@ -192,8 +182,8 @@ struct mapped_device {
        /* forced geometry settings */
        struct hd_geometry geometry;
 
-       /* sysfs handle */
-       struct kobject kobj;
+       /* kobject and completion */
+       struct dm_kobject_holder kobj_holder;
 
        /* zero-length flush that will be cloned and submitted to targets */
        struct bio flush_bio;
@@ -755,8 +745,14 @@ static void rq_completed(struct mapped_device *md, int rw, int run_queue)
        if (!md_in_flight(md))
                wake_up(&md->wait);
 
+       /*
+        * Run this off this callpath, as drivers could invoke end_io while
+        * inside their request_fn (and holding the queue lock). Calling
+        * back into ->request_fn() could deadlock attempting to grab the
+        * queue lock again.
+        */
        if (run_queue)
-               blk_run_queue(md->queue);
+               blk_run_queue_async(md->queue);
 
        /*
         * dm_put() must be at the end of this function. See the comment above
@@ -866,10 +862,14 @@ static void dm_done(struct request *clone, int error, bool mapped)
 {
        int r = error;
        struct dm_rq_target_io *tio = clone->end_io_data;
-       dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
+       dm_request_endio_fn rq_end_io = NULL;
 
-       if (mapped && rq_end_io)
-               r = rq_end_io(tio->ti, clone, error, &tio->info);
+       if (tio->ti) {
+               rq_end_io = tio->ti->type->rq_end_io;
+
+               if (mapped && rq_end_io)
+                       r = rq_end_io(tio->ti, clone, error, &tio->info);
+       }
 
        if (r <= 0)
                /* The target wants to complete the I/O */
@@ -1566,15 +1566,6 @@ static int map_request(struct dm_target *ti, struct request *clone,
        int r, requeued = 0;
        struct dm_rq_target_io *tio = clone->end_io_data;
 
-       /*
-        * Hold the md reference here for the in-flight I/O.
-        * We can't rely on the reference count by device opener,
-        * because the device may be closed during the request completion
-        * when all bios are completed.
-        * See the comment in rq_completed() too.
-        */
-       dm_get(md);
-
        tio->ti = ti;
        r = ti->type->map_rq(ti, clone, &tio->info);
        switch (r) {
@@ -1606,6 +1597,26 @@ static int map_request(struct dm_target *ti, struct request *clone,
        return requeued;
 }
 
+static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
+{
+       struct request *clone;
+
+       blk_start_request(orig);
+       clone = orig->special;
+       atomic_inc(&md->pending[rq_data_dir(clone)]);
+
+       /*
+        * Hold the md reference here for the in-flight I/O.
+        * We can't rely on the reference count by device opener,
+        * because the device may be closed during the request completion
+        * when all bios are completed.
+        * See the comment in rq_completed() too.
+        */
+       dm_get(md);
+
+       return clone;
+}
+
 /*
  * q->request_fn for request-based dm.
  * Called with the queue lock held.
@@ -1635,14 +1646,21 @@ static void dm_request_fn(struct request_queue *q)
                        pos = blk_rq_pos(rq);
 
                ti = dm_table_find_target(map, pos);
-               BUG_ON(!dm_target_is_valid(ti));
+               if (!dm_target_is_valid(ti)) {
+                       /*
+                        * Must perform setup, that dm_done() requires,
+                        * before calling dm_kill_unmapped_request
+                        */
+                       DMERR_LIMIT("request attempted access beyond the end of device");
+                       clone = dm_start_request(md, rq);
+                       dm_kill_unmapped_request(clone, -EIO);
+                       continue;
+               }
 
                if (ti->type->busy && ti->type->busy(ti))
                        goto delay_and_out;
 
-               blk_start_request(rq);
-               clone = rq->special;
-               atomic_inc(&md->pending[rq_data_dir(clone)]);
+               clone = dm_start_request(md, rq);
 
                spin_unlock(q->queue_lock);
                if (map_request(ti, clone, md))
@@ -1662,8 +1680,6 @@ delay_and_out:
        blk_delay_queue(q, HZ / 10);
 out:
        dm_table_put(map);
-
-       return;
 }
 
 int dm_underlying_device_busy(struct request_queue *q)
@@ -1865,6 +1881,7 @@ static struct mapped_device *alloc_dev(int minor)
        init_waitqueue_head(&md->wait);
        INIT_WORK(&md->work, dm_wq_work);
        init_waitqueue_head(&md->eventq);
+       init_completion(&md->kobj_holder.completion);
 
        md->disk->major = _major;
        md->disk->first_minor = minor;
@@ -2205,7 +2222,7 @@ int dm_setup_md_queue(struct mapped_device *md)
        return 0;
 }
 
-static struct mapped_device *dm_find_md(dev_t dev)
+struct mapped_device *dm_get_md(dev_t dev)
 {
        struct mapped_device *md;
        unsigned minor = MINOR(dev);
@@ -2216,12 +2233,15 @@ static struct mapped_device *dm_find_md(dev_t dev)
        spin_lock(&_minor_lock);
 
        md = idr_find(&_minor_idr, minor);
-       if (md && (md == MINOR_ALLOCED ||
-                  (MINOR(disk_devt(dm_disk(md))) != minor) ||
-                  dm_deleting_md(md) ||
-                  test_bit(DMF_FREEING, &md->flags))) {
-               md = NULL;
-               goto out;
+       if (md) {
+               if ((md == MINOR_ALLOCED ||
+                    (MINOR(disk_devt(dm_disk(md))) != minor) ||
+                    dm_deleting_md(md) ||
+                    test_bit(DMF_FREEING, &md->flags))) {
+                       md = NULL;
+                       goto out;
+               }
+               dm_get(md);
        }
 
 out:
@@ -2229,16 +2249,6 @@ out:
 
        return md;
 }
-
-struct mapped_device *dm_get_md(dev_t dev)
-{
-       struct mapped_device *md = dm_find_md(dev);
-
-       if (md)
-               dm_get(md);
-
-       return md;
-}
 EXPORT_SYMBOL_GPL(dm_get_md);
 
 void *dm_get_mdptr(struct mapped_device *md)
@@ -2275,10 +2285,16 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
        set_bit(DMF_FREEING, &md->flags);
        spin_unlock(&_minor_lock);
 
+       /*
+        * Take suspend_lock so that presuspend and postsuspend methods
+        * do not race with internal suspend.
+        */
+       mutex_lock(&md->suspend_lock);
        if (!dm_suspended_md(md)) {
                dm_table_presuspend_targets(map);
                dm_table_postsuspend_targets(map);
        }
+       mutex_unlock(&md->suspend_lock);
 
        /*
         * Rare, but there may be I/O requests still going to complete,
@@ -2656,7 +2672,7 @@ struct gendisk *dm_disk(struct mapped_device *md)
 
 struct kobject *dm_kobject(struct mapped_device *md)
 {
-       return &md->kobj;
+       return &md->kobj_holder.kobj;
 }
 
 /*
@@ -2667,15 +2683,17 @@ struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
 {
        struct mapped_device *md;
 
-       md = container_of(kobj, struct mapped_device, kobj);
-       if (&md->kobj != kobj)
-               return NULL;
-
-       if (test_bit(DMF_FREEING, &md->flags) ||
-           dm_deleting_md(md))
-               return NULL;
+       md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
 
+       spin_lock(&_minor_lock);
+       if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
+               md = NULL;
+               goto out;
+       }
        dm_get(md);
+out:
+       spin_unlock(&_minor_lock);
+
        return md;
 }