Merge branch 'fix/hda-samsung-p50' into topic/hda
[pandora-kernel.git] / drivers / md / dm.c
index 3fd8b1e..3c6d4ee 100644 (file)
 #include <linux/slab.h>
 #include <linux/idr.h>
 #include <linux/hdreg.h>
-#include <linux/blktrace_api.h>
 
 #include <trace/events/block.h>
 
 #define DM_MSG_PREFIX "core"
 
+/*
+ * Cookies are numeric values sent with CHANGE and REMOVE
+ * uevents while resuming, removing or renaming the device.
+ */
+#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
+#define DM_COOKIE_LENGTH 24
+
 static const char *_name = DM_NAME;
 
 static unsigned int major = 0;
@@ -72,7 +78,7 @@ struct dm_rq_target_io {
  */
 struct dm_rq_clone_bio_info {
        struct bio *orig;
-       struct request *rq;
+       struct dm_rq_target_io *tio;
 };
 
 union map_info *dm_get_mapinfo(struct bio *bio)
@@ -82,6 +88,14 @@ union map_info *dm_get_mapinfo(struct bio *bio)
        return NULL;
 }
 
+union map_info *dm_get_rq_mapinfo(struct request *rq)
+{
+       if (rq && rq->end_io_data)
+               return &((struct dm_rq_target_io *)rq->end_io_data)->info;
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
+
 #define MINOR_ALLOCED ((void *)-1)
 
 /*
@@ -158,13 +172,31 @@ struct mapped_device {
         * freeze/thaw support require holding onto a super block
         */
        struct super_block *frozen_sb;
-       struct block_device *suspended_bdev;
+       struct block_device *bdev;
 
        /* forced geometry settings */
        struct hd_geometry geometry;
 
+       /* marker of flush suspend for request-based dm */
+       struct request suspend_rq;
+
+       /* For saving the address of __make_request for request based dm */
+       make_request_fn *saved_make_request_fn;
+
        /* sysfs handle */
        struct kobject kobj;
+
+       /* zero-length barrier that will be cloned and submitted to targets */
+       struct bio barrier_bio;
+};
+
+/*
+ * For mempools pre-allocation at the table loading time.
+ */
+struct dm_md_mempools {
+       mempool_t *io_pool;
+       mempool_t *tio_pool;
+       struct bio_set *bs;
 };
 
 #define MIN_IOS 256
@@ -392,14 +424,29 @@ static void free_io(struct mapped_device *md, struct dm_io *io)
        mempool_free(io, md->io_pool);
 }
 
-static struct dm_target_io *alloc_tio(struct mapped_device *md)
+static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
 {
-       return mempool_alloc(md->tio_pool, GFP_NOIO);
+       mempool_free(tio, md->tio_pool);
 }
 
-static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
+static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md)
 {
-       mempool_free(tio, md->tio_pool);
+       return mempool_alloc(md->tio_pool, GFP_ATOMIC);
+}
+
+static void free_rq_tio(struct dm_rq_target_io *tio)
+{
+       mempool_free(tio, tio->md->tio_pool);
+}
+
+static struct dm_rq_clone_bio_info *alloc_bio_info(struct mapped_device *md)
+{
+       return mempool_alloc(md->io_pool, GFP_ATOMIC);
+}
+
+static void free_bio_info(struct dm_rq_clone_bio_info *info)
+{
+       mempool_free(info, info->tio->md->io_pool);
 }
 
 static void start_io_acct(struct dm_io *io)
@@ -465,12 +512,13 @@ static void queue_io(struct mapped_device *md, struct bio *bio)
 struct dm_table *dm_get_table(struct mapped_device *md)
 {
        struct dm_table *t;
+       unsigned long flags;
 
-       read_lock(&md->map_lock);
+       read_lock_irqsave(&md->map_lock, flags);
        t = md->map;
        if (t)
                dm_table_get(t);
-       read_unlock(&md->map_lock);
+       read_unlock_irqrestore(&md->map_lock, flags);
 
        return t;
 }
@@ -537,9 +585,11 @@ static void dec_pending(struct dm_io *io, int error)
                         * Target requested pushing back the I/O.
                         */
                        spin_lock_irqsave(&md->deferred_lock, flags);
-                       if (__noflush_suspending(md))
-                               bio_list_add_head(&md->deferred, io->bio);
-                       else
+                       if (__noflush_suspending(md)) {
+                               if (!bio_barrier(io->bio))
+                                       bio_list_add_head(&md->deferred,
+                                                         io->bio);
+                       } else
                                /* noflush suspend was interrupted. */
                                io->error = -EIO;
                        spin_unlock_irqrestore(&md->deferred_lock, flags);
@@ -554,7 +604,8 @@ static void dec_pending(struct dm_io *io, int error)
                         * a per-device variable for error reporting.
                         * Note that you can't touch the bio after end_io_acct
                         */
-                       md->barrier_error = io_error;
+                       if (!md->barrier_error && io_error != -EOPNOTSUPP)
+                               md->barrier_error = io_error;
                        end_io_acct(io);
                } else {
                        end_io_acct(io);
@@ -608,6 +659,262 @@ static void clone_endio(struct bio *bio, int error)
        dec_pending(io, error);
 }
 
+/*
+ * Partial completion handling for request-based dm
+ */
+static void end_clone_bio(struct bio *clone, int error)
+{
+       struct dm_rq_clone_bio_info *info = clone->bi_private;
+       struct dm_rq_target_io *tio = info->tio;
+       struct bio *bio = info->orig;
+       unsigned int nr_bytes = info->orig->bi_size;
+
+       bio_put(clone);
+
+       if (tio->error)
+               /*
+                * An error has already been detected on the request.
+                * Once error occurred, just let clone->end_io() handle
+                * the remainder.
+                */
+               return;
+       else if (error) {
+               /*
+                * Don't notice the error to the upper layer yet.
+                * The error handling decision is made by the target driver,
+                * when the request is completed.
+                */
+               tio->error = error;
+               return;
+       }
+
+       /*
+        * I/O for the bio successfully completed.
+        * Notice the data completion to the upper layer.
+        */
+
+       /*
+        * bios are processed from the head of the list.
+        * So the completing bio should always be rq->bio.
+        * If it's not, something wrong is happening.
+        */
+       if (tio->orig->bio != bio)
+               DMERR("bio completion is going in the middle of the request");
+
+       /*
+        * Update the original request.
+        * Do not use blk_end_request() here, because it may complete
+        * the original request before the clone, and break the ordering.
+        */
+       blk_update_request(tio->orig, 0, nr_bytes);
+}
+
+/*
+ * Don't touch any member of the md after calling this function because
+ * the md may be freed in dm_put() at the end of this function.
+ * Or do dm_get() before calling this function and dm_put() later.
+ */
+static void rq_completed(struct mapped_device *md, int run_queue)
+{
+       int wakeup_waiters = 0;
+       struct request_queue *q = md->queue;
+       unsigned long flags;
+
+       spin_lock_irqsave(q->queue_lock, flags);
+       if (!queue_in_flight(q))
+               wakeup_waiters = 1;
+       spin_unlock_irqrestore(q->queue_lock, flags);
+
+       /* nudge anyone waiting on suspend queue */
+       if (wakeup_waiters)
+               wake_up(&md->wait);
+
+       if (run_queue)
+               blk_run_queue(q);
+
+       /*
+        * dm_put() must be at the end of this function. See the comment above
+        */
+       dm_put(md);
+}
+
+static void dm_unprep_request(struct request *rq)
+{
+       struct request *clone = rq->special;
+       struct dm_rq_target_io *tio = clone->end_io_data;
+
+       rq->special = NULL;
+       rq->cmd_flags &= ~REQ_DONTPREP;
+
+       blk_rq_unprep_clone(clone);
+       free_rq_tio(tio);
+}
+
+/*
+ * Requeue the original request of a clone.
+ */
+void dm_requeue_unmapped_request(struct request *clone)
+{
+       struct dm_rq_target_io *tio = clone->end_io_data;
+       struct mapped_device *md = tio->md;
+       struct request *rq = tio->orig;
+       struct request_queue *q = rq->q;
+       unsigned long flags;
+
+       dm_unprep_request(rq);
+
+       spin_lock_irqsave(q->queue_lock, flags);
+       if (elv_queue_empty(q))
+               blk_plug_device(q);
+       blk_requeue_request(q, rq);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+
+       rq_completed(md, 0);
+}
+EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request);
+
+static void __stop_queue(struct request_queue *q)
+{
+       blk_stop_queue(q);
+}
+
+static void stop_queue(struct request_queue *q)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(q->queue_lock, flags);
+       __stop_queue(q);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+static void __start_queue(struct request_queue *q)
+{
+       if (blk_queue_stopped(q))
+               blk_start_queue(q);
+}
+
+static void start_queue(struct request_queue *q)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(q->queue_lock, flags);
+       __start_queue(q);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+/*
+ * Complete the clone and the original request.
+ * Must be called without queue lock.
+ */
+static void dm_end_request(struct request *clone, int error)
+{
+       struct dm_rq_target_io *tio = clone->end_io_data;
+       struct mapped_device *md = tio->md;
+       struct request *rq = tio->orig;
+
+       if (blk_pc_request(rq)) {
+               rq->errors = clone->errors;
+               rq->resid_len = clone->resid_len;
+
+               if (rq->sense)
+                       /*
+                        * We are using the sense buffer of the original
+                        * request.
+                        * So setting the length of the sense data is enough.
+                        */
+                       rq->sense_len = clone->sense_len;
+       }
+
+       BUG_ON(clone->bio);
+       free_rq_tio(tio);
+
+       blk_end_request_all(rq, error);
+
+       rq_completed(md, 1);
+}
+
+/*
+ * Request completion handler for request-based dm
+ */
+static void dm_softirq_done(struct request *rq)
+{
+       struct request *clone = rq->completion_data;
+       struct dm_rq_target_io *tio = clone->end_io_data;
+       dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
+       int error = tio->error;
+
+       if (!(rq->cmd_flags & REQ_FAILED) && rq_end_io)
+               error = rq_end_io(tio->ti, clone, error, &tio->info);
+
+       if (error <= 0)
+               /* The target wants to complete the I/O */
+               dm_end_request(clone, error);
+       else if (error == DM_ENDIO_INCOMPLETE)
+               /* The target will handle the I/O */
+               return;
+       else if (error == DM_ENDIO_REQUEUE)
+               /* The target wants to requeue the I/O */
+               dm_requeue_unmapped_request(clone);
+       else {
+               DMWARN("unimplemented target endio return value: %d", error);
+               BUG();
+       }
+}
+
+/*
+ * Complete the clone and the original request with the error status
+ * through softirq context.
+ */
+static void dm_complete_request(struct request *clone, int error)
+{
+       struct dm_rq_target_io *tio = clone->end_io_data;
+       struct request *rq = tio->orig;
+
+       tio->error = error;
+       rq->completion_data = clone;
+       blk_complete_request(rq);
+}
+
+/*
+ * Complete the not-mapped clone and the original request with the error status
+ * through softirq context.
+ * Target's rq_end_io() function isn't called.
+ * This may be used when the target's map_rq() function fails.
+ */
+void dm_kill_unmapped_request(struct request *clone, int error)
+{
+       struct dm_rq_target_io *tio = clone->end_io_data;
+       struct request *rq = tio->orig;
+
+       rq->cmd_flags |= REQ_FAILED;
+       dm_complete_request(clone, error);
+}
+EXPORT_SYMBOL_GPL(dm_kill_unmapped_request);
+
+/*
+ * Called with the queue lock held
+ */
+static void end_clone_request(struct request *clone, int error)
+{
+       /*
+        * For just cleaning up the information of the queue in which
+        * the clone was dispatched.
+        * The clone is *NOT* freed actually here because it is alloced from
+        * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
+        */
+       __blk_put_request(clone->q, clone);
+
+       /*
+        * Actual request completion is done in a softirq context which doesn't
+        * hold the queue lock.  Otherwise, deadlock could occur because:
+        *     - another request may be submitted by the upper level driver
+        *       of the stacking during the completion
+        *     - the submission which requires queue lock may be done
+        *       against this queue
+        */
+       dm_complete_request(clone, error);
+}
+
 static sector_t max_io_len(struct mapped_device *md,
                           sector_t sector, struct dm_target *ti)
 {
@@ -635,11 +942,6 @@ static void __map_bio(struct dm_target *ti, struct bio *clone,
        sector_t sector;
        struct mapped_device *md;
 
-       /*
-        * Sanity checks.
-        */
-       BUG_ON(!clone->bi_size);
-
        clone->bi_end_io = clone_endio;
        clone->bi_private = tio;
 
@@ -753,6 +1055,48 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
        return clone;
 }
 
+static struct dm_target_io *alloc_tio(struct clone_info *ci,
+                                     struct dm_target *ti)
+{
+       struct dm_target_io *tio = mempool_alloc(ci->md->tio_pool, GFP_NOIO);
+
+       tio->io = ci->io;
+       tio->ti = ti;
+       memset(&tio->info, 0, sizeof(tio->info));
+
+       return tio;
+}
+
+static void __flush_target(struct clone_info *ci, struct dm_target *ti,
+                         unsigned flush_nr)
+{
+       struct dm_target_io *tio = alloc_tio(ci, ti);
+       struct bio *clone;
+
+       tio->info.flush_request = flush_nr;
+
+       clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs);
+       __bio_clone(clone, ci->bio);
+       clone->bi_destructor = dm_bio_destructor;
+
+       __map_bio(ti, clone, tio);
+}
+
+static int __clone_and_map_empty_barrier(struct clone_info *ci)
+{
+       unsigned target_nr = 0, flush_nr;
+       struct dm_target *ti;
+
+       while ((ti = dm_table_get_target(ci->map, target_nr++)))
+               for (flush_nr = 0; flush_nr < ti->num_flush_requests;
+                    flush_nr++)
+                       __flush_target(ci, ti, flush_nr);
+
+       ci->sector_count = 0;
+
+       return 0;
+}
+
 static int __clone_and_map(struct clone_info *ci)
 {
        struct bio *clone, *bio = ci->bio;
@@ -760,6 +1104,9 @@ static int __clone_and_map(struct clone_info *ci)
        sector_t len = 0, max;
        struct dm_target_io *tio;
 
+       if (unlikely(bio_empty_barrier(bio)))
+               return __clone_and_map_empty_barrier(ci);
+
        ti = dm_table_find_target(ci->map, ci->sector);
        if (!dm_target_is_valid(ti))
                return -EIO;
@@ -769,10 +1116,7 @@ static int __clone_and_map(struct clone_info *ci)
        /*
         * Allocate a target io object.
         */
-       tio = alloc_tio(ci->md);
-       tio->io = ci->io;
-       tio->ti = ti;
-       memset(&tio->info, 0, sizeof(tio->info));
+       tio = alloc_tio(ci, ti);
 
        if (ci->sector_count <= max) {
                /*
@@ -828,10 +1172,7 @@ static int __clone_and_map(struct clone_info *ci)
 
                                max = max_io_len(ci->md, ci->sector, ti);
 
-                               tio = alloc_tio(ci->md);
-                               tio->io = ci->io;
-                               tio->ti = ti;
-                               memset(&tio->info, 0, sizeof(tio->info));
+                               tio = alloc_tio(ci, ti);
                        }
 
                        len = min(remaining, max);
@@ -866,7 +1207,8 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
                if (!bio_barrier(bio))
                        bio_io_error(bio);
                else
-                       md->barrier_error = -EIO;
+                       if (!md->barrier_error)
+                               md->barrier_error = -EIO;
                return;
        }
 
@@ -879,6 +1221,8 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
        ci.io->md = md;
        ci.sector = bio->bi_sector;
        ci.sector_count = bio_sectors(bio);
+       if (unlikely(bio_empty_barrier(bio)))
+               ci.sector_count = 1;
        ci.idx = bio->bi_idx;
 
        start_io_acct(ci.io);
@@ -926,6 +1270,16 @@ static int dm_merge_bvec(struct request_queue *q,
         */
        if (max_size && ti->type->merge)
                max_size = ti->type->merge(ti, bvm, biovec, max_size);
+       /*
+        * If the target doesn't support merge method and some of the devices
+        * provided their merge_bvec method (we know this by looking at
+        * queue_max_hw_sectors), then we can't allow bios with multiple vector
+        * entries.  So always set max_size to 0, and the code below allows
+        * just one page.
+        */
+       else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
+
+               max_size = 0;
 
 out_table:
        dm_table_put(map);
@@ -944,7 +1298,7 @@ out:
  * The request function that just remaps the bio built up by
  * dm_merge_bvec.
  */
-static int dm_request(struct request_queue *q, struct bio *bio)
+static int _dm_request(struct request_queue *q, struct bio *bio)
 {
        int rw = bio_data_dir(bio);
        struct mapped_device *md = q->queuedata;
@@ -981,12 +1335,274 @@ static int dm_request(struct request_queue *q, struct bio *bio)
        return 0;
 }
 
+static int dm_make_request(struct request_queue *q, struct bio *bio)
+{
+       struct mapped_device *md = q->queuedata;
+
+       if (unlikely(bio_barrier(bio))) {
+               bio_endio(bio, -EOPNOTSUPP);
+               return 0;
+       }
+
+       return md->saved_make_request_fn(q, bio); /* call __make_request() */
+}
+
+static int dm_request_based(struct mapped_device *md)
+{
+       return blk_queue_stackable(md->queue);
+}
+
+static int dm_request(struct request_queue *q, struct bio *bio)
+{
+       struct mapped_device *md = q->queuedata;
+
+       if (dm_request_based(md))
+               return dm_make_request(q, bio);
+
+       return _dm_request(q, bio);
+}
+
+void dm_dispatch_request(struct request *rq)
+{
+       int r;
+
+       if (blk_queue_io_stat(rq->q))
+               rq->cmd_flags |= REQ_IO_STAT;
+
+       rq->start_time = jiffies;
+       r = blk_insert_cloned_request(rq->q, rq);
+       if (r)
+               dm_complete_request(rq, r);
+}
+EXPORT_SYMBOL_GPL(dm_dispatch_request);
+
+static void dm_rq_bio_destructor(struct bio *bio)
+{
+       struct dm_rq_clone_bio_info *info = bio->bi_private;
+       struct mapped_device *md = info->tio->md;
+
+       free_bio_info(info);
+       bio_free(bio, md->bs);
+}
+
+static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
+                                void *data)
+{
+       struct dm_rq_target_io *tio = data;
+       struct mapped_device *md = tio->md;
+       struct dm_rq_clone_bio_info *info = alloc_bio_info(md);
+
+       if (!info)
+               return -ENOMEM;
+
+       info->orig = bio_orig;
+       info->tio = tio;
+       bio->bi_end_io = end_clone_bio;
+       bio->bi_private = info;
+       bio->bi_destructor = dm_rq_bio_destructor;
+
+       return 0;
+}
+
+static int setup_clone(struct request *clone, struct request *rq,
+                      struct dm_rq_target_io *tio)
+{
+       int r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
+                                 dm_rq_bio_constructor, tio);
+
+       if (r)
+               return r;
+
+       clone->cmd = rq->cmd;
+       clone->cmd_len = rq->cmd_len;
+       clone->sense = rq->sense;
+       clone->buffer = rq->buffer;
+       clone->end_io = end_clone_request;
+       clone->end_io_data = tio;
+
+       return 0;
+}
+
+static int dm_rq_flush_suspending(struct mapped_device *md)
+{
+       return !md->suspend_rq.special;
+}
+
+/*
+ * Called with the queue lock held.
+ */
+static int dm_prep_fn(struct request_queue *q, struct request *rq)
+{
+       struct mapped_device *md = q->queuedata;
+       struct dm_rq_target_io *tio;
+       struct request *clone;
+
+       if (unlikely(rq == &md->suspend_rq)) {
+               if (dm_rq_flush_suspending(md))
+                       return BLKPREP_OK;
+               else
+                       /* The flush suspend was interrupted */
+                       return BLKPREP_KILL;
+       }
+
+       if (unlikely(rq->special)) {
+               DMWARN("Already has something in rq->special.");
+               return BLKPREP_KILL;
+       }
+
+       tio = alloc_rq_tio(md); /* Only one for each original request */
+       if (!tio)
+               /* -ENOMEM */
+               return BLKPREP_DEFER;
+
+       tio->md = md;
+       tio->ti = NULL;
+       tio->orig = rq;
+       tio->error = 0;
+       memset(&tio->info, 0, sizeof(tio->info));
+
+       clone = &tio->clone;
+       if (setup_clone(clone, rq, tio)) {
+               /* -ENOMEM */
+               free_rq_tio(tio);
+               return BLKPREP_DEFER;
+       }
+
+       rq->special = clone;
+       rq->cmd_flags |= REQ_DONTPREP;
+
+       return BLKPREP_OK;
+}
+
+static void map_request(struct dm_target *ti, struct request *rq,
+                       struct mapped_device *md)
+{
+       int r;
+       struct request *clone = rq->special;
+       struct dm_rq_target_io *tio = clone->end_io_data;
+
+       /*
+        * Hold the md reference here for the in-flight I/O.
+        * We can't rely on the reference count by device opener,
+        * because the device may be closed during the request completion
+        * when all bios are completed.
+        * See the comment in rq_completed() too.
+        */
+       dm_get(md);
+
+       tio->ti = ti;
+       r = ti->type->map_rq(ti, clone, &tio->info);
+       switch (r) {
+       case DM_MAPIO_SUBMITTED:
+               /* The target has taken the I/O to submit by itself later */
+               break;
+       case DM_MAPIO_REMAPPED:
+               /* The target has remapped the I/O so dispatch it */
+               dm_dispatch_request(clone);
+               break;
+       case DM_MAPIO_REQUEUE:
+               /* The target wants to requeue the I/O */
+               dm_requeue_unmapped_request(clone);
+               break;
+       default:
+               if (r > 0) {
+                       DMWARN("unimplemented target map return value: %d", r);
+                       BUG();
+               }
+
+               /* The target wants to complete the I/O */
+               dm_kill_unmapped_request(clone, r);
+               break;
+       }
+}
+
+/*
+ * q->request_fn for request-based dm.
+ * Called with the queue lock held.
+ */
+static void dm_request_fn(struct request_queue *q)
+{
+       struct mapped_device *md = q->queuedata;
+       struct dm_table *map = dm_get_table(md);
+       struct dm_target *ti;
+       struct request *rq;
+
+       /*
+        * For noflush suspend, check blk_queue_stopped() to immediately
+        * quit I/O dispatching.
+        */
+       while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) {
+               rq = blk_peek_request(q);
+               if (!rq)
+                       goto plug_and_out;
+
+               if (unlikely(rq == &md->suspend_rq)) { /* Flush suspend maker */
+                       if (queue_in_flight(q))
+                               /* Not quiet yet.  Wait more */
+                               goto plug_and_out;
+
+                       /* This device should be quiet now */
+                       __stop_queue(q);
+                       blk_start_request(rq);
+                       __blk_end_request_all(rq, 0);
+                       wake_up(&md->wait);
+                       goto out;
+               }
+
+               ti = dm_table_find_target(map, blk_rq_pos(rq));
+               if (ti->type->busy && ti->type->busy(ti))
+                       goto plug_and_out;
+
+               blk_start_request(rq);
+               spin_unlock(q->queue_lock);
+               map_request(ti, rq, md);
+               spin_lock_irq(q->queue_lock);
+       }
+
+       goto out;
+
+plug_and_out:
+       if (!elv_queue_empty(q))
+               /* Some requests still remain, retry later */
+               blk_plug_device(q);
+
+out:
+       dm_table_put(map);
+
+       return;
+}
+
+int dm_underlying_device_busy(struct request_queue *q)
+{
+       return blk_lld_busy(q);
+}
+EXPORT_SYMBOL_GPL(dm_underlying_device_busy);
+
+static int dm_lld_busy(struct request_queue *q)
+{
+       int r;
+       struct mapped_device *md = q->queuedata;
+       struct dm_table *map = dm_get_table(md);
+
+       if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
+               r = 1;
+       else
+               r = dm_table_any_busy_target(map);
+
+       dm_table_put(map);
+
+       return r;
+}
+
 static void dm_unplug_all(struct request_queue *q)
 {
        struct mapped_device *md = q->queuedata;
        struct dm_table *map = dm_get_table(md);
 
        if (map) {
+               if (dm_request_based(md))
+                       generic_unplug_device(q);
+
                dm_table_unplug_all(map);
                dm_table_put(map);
        }
@@ -1001,7 +1617,16 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
        if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
                map = dm_get_table(md);
                if (map) {
-                       r = dm_table_any_congested(map, bdi_bits);
+                       /*
+                        * Request-based dm cares about only own queue for
+                        * the query about congestion status of request_queue
+                        */
+                       if (dm_request_based(md))
+                               r = md->queue->backing_dev_info.state &
+                                   bdi_bits;
+                       else
+                               r = dm_table_any_congested(map, bdi_bits);
+
                        dm_table_put(map);
                }
        }
@@ -1124,30 +1749,32 @@ static struct mapped_device *alloc_dev(int minor)
        INIT_LIST_HEAD(&md->uevent_list);
        spin_lock_init(&md->uevent_lock);
 
-       md->queue = blk_alloc_queue(GFP_KERNEL);
+       md->queue = blk_init_queue(dm_request_fn, NULL);
        if (!md->queue)
                goto bad_queue;
 
+       /*
+        * Request-based dm devices cannot be stacked on top of bio-based dm
+        * devices.  The type of this dm device has not been decided yet,
+        * although we initialized the queue using blk_init_queue().
+        * The type is decided at the first table loading time.
+        * To prevent problematic device stacking, clear the queue flag
+        * for request stacking support until then.
+        *
+        * This queue is new, so no concurrency on the queue_flags.
+        */
+       queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
+       md->saved_make_request_fn = md->queue->make_request_fn;
        md->queue->queuedata = md;
        md->queue->backing_dev_info.congested_fn = dm_any_congested;
        md->queue->backing_dev_info.congested_data = md;
        blk_queue_make_request(md->queue, dm_request);
-       blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL);
        blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
        md->queue->unplug_fn = dm_unplug_all;
        blk_queue_merge_bvec(md->queue, dm_merge_bvec);
-
-       md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
-       if (!md->io_pool)
-               goto bad_io_pool;
-
-       md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
-       if (!md->tio_pool)
-               goto bad_tio_pool;
-
-       md->bs = bioset_create(16, 0);
-       if (!md->bs)
-               goto bad_no_bioset;
+       blk_queue_softirq_done(md->queue, dm_softirq_done);
+       blk_queue_prep_rq(md->queue, dm_prep_fn);
+       blk_queue_lld_busy(md->queue, dm_lld_busy);
 
        md->disk = alloc_disk(1);
        if (!md->disk)
@@ -1171,6 +1798,10 @@ static struct mapped_device *alloc_dev(int minor)
        if (!md->wq)
                goto bad_thread;
 
+       md->bdev = bdget_disk(md->disk, 0);
+       if (!md->bdev)
+               goto bad_bdev;
+
        /* Populate the mapping, nobody knows we exist yet */
        spin_lock(&_minor_lock);
        old_md = idr_replace(&_minor_idr, md, minor);
@@ -1180,15 +1811,11 @@ static struct mapped_device *alloc_dev(int minor)
 
        return md;
 
+bad_bdev:
+       destroy_workqueue(md->wq);
 bad_thread:
        put_disk(md->disk);
 bad_disk:
-       bioset_free(md->bs);
-bad_no_bioset:
-       mempool_destroy(md->tio_pool);
-bad_tio_pool:
-       mempool_destroy(md->io_pool);
-bad_io_pool:
        blk_cleanup_queue(md->queue);
 bad_queue:
        free_minor(minor);
@@ -1205,14 +1832,15 @@ static void free_dev(struct mapped_device *md)
 {
        int minor = MINOR(disk_devt(md->disk));
 
-       if (md->suspended_bdev) {
-               unlock_fs(md);
-               bdput(md->suspended_bdev);
-       }
+       unlock_fs(md);
+       bdput(md->bdev);
        destroy_workqueue(md->wq);
-       mempool_destroy(md->tio_pool);
-       mempool_destroy(md->io_pool);
-       bioset_free(md->bs);
+       if (md->tio_pool)
+               mempool_destroy(md->tio_pool);
+       if (md->io_pool)
+               mempool_destroy(md->io_pool);
+       if (md->bs)
+               bioset_free(md->bs);
        blk_integrity_unregister(md->disk);
        del_gendisk(md->disk);
        free_minor(minor);
@@ -1227,6 +1855,29 @@ static void free_dev(struct mapped_device *md)
        kfree(md);
 }
 
+static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
+{
+       struct dm_md_mempools *p;
+
+       if (md->io_pool && md->tio_pool && md->bs)
+               /* the md already has necessary mempools */
+               goto out;
+
+       p = dm_table_get_md_mempools(t);
+       BUG_ON(!p || md->io_pool || md->tio_pool || md->bs);
+
+       md->io_pool = p->io_pool;
+       p->io_pool = NULL;
+       md->tio_pool = p->tio_pool;
+       p->tio_pool = NULL;
+       md->bs = p->bs;
+       p->bs = NULL;
+
+out:
+       /* mempool bind completed, now no need any mempools in the table */
+       dm_table_free_md_mempools(t);
+}
+
 /*
  * Bind a table to the device.
  */
@@ -1250,15 +1901,17 @@ static void __set_size(struct mapped_device *md, sector_t size)
 {
        set_capacity(md->disk, size);
 
-       mutex_lock(&md->suspended_bdev->bd_inode->i_mutex);
-       i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
-       mutex_unlock(&md->suspended_bdev->bd_inode->i_mutex);
+       mutex_lock(&md->bdev->bd_inode->i_mutex);
+       i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
+       mutex_unlock(&md->bdev->bd_inode->i_mutex);
 }
 
-static int __bind(struct mapped_device *md, struct dm_table *t)
+static int __bind(struct mapped_device *md, struct dm_table *t,
+                 struct queue_limits *limits)
 {
        struct request_queue *q = md->queue;
        sector_t size;
+       unsigned long flags;
 
        size = dm_table_get_size(t);
 
@@ -1268,8 +1921,7 @@ static int __bind(struct mapped_device *md, struct dm_table *t)
        if (size != get_capacity(md->disk))
                memset(&md->geometry, 0, sizeof(md->geometry));
 
-       if (md->suspended_bdev)
-               __set_size(md, size);
+       __set_size(md, size);
 
        if (!size) {
                dm_table_destroy(t);
@@ -1278,10 +1930,22 @@ static int __bind(struct mapped_device *md, struct dm_table *t)
 
        dm_table_event_callback(t, event_callback, md);
 
-       write_lock(&md->map_lock);
+       /*
+        * The queue hasn't been stopped yet, if the old table type wasn't
+        * for request-based during suspension.  So stop it to prevent
+        * I/O mapping before resume.
+        * This must be done before setting the queue restrictions,
+        * because request-based dm may be run just after the setting.
+        */
+       if (dm_table_request_based(t) && !blk_queue_stopped(q))
+               stop_queue(q);
+
+       __bind_mempools(md, t);
+
+       write_lock_irqsave(&md->map_lock, flags);
        md->map = t;
-       dm_table_set_restrictions(t, q);
-       write_unlock(&md->map_lock);
+       dm_table_set_restrictions(t, q, limits);
+       write_unlock_irqrestore(&md->map_lock, flags);
 
        return 0;
 }
@@ -1289,14 +1953,15 @@ static int __bind(struct mapped_device *md, struct dm_table *t)
 static void __unbind(struct mapped_device *md)
 {
        struct dm_table *map = md->map;
+       unsigned long flags;
 
        if (!map)
                return;
 
        dm_table_event_callback(map, NULL, NULL);
-       write_lock(&md->map_lock);
+       write_lock_irqsave(&md->map_lock, flags);
        md->map = NULL;
-       write_unlock(&md->map_lock);
+       write_unlock_irqrestore(&md->map_lock, flags);
        dm_table_destroy(map);
 }
 
@@ -1400,6 +2065,8 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
 {
        int r = 0;
        DECLARE_WAITQUEUE(wait, current);
+       struct request_queue *q = md->queue;
+       unsigned long flags;
 
        dm_unplug_all(md->queue);
 
@@ -1409,7 +2076,14 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
                set_current_state(interruptible);
 
                smp_mb();
-               if (!atomic_read(&md->pending))
+               if (dm_request_based(md)) {
+                       spin_lock_irqsave(q->queue_lock, flags);
+                       if (!queue_in_flight(q) && blk_queue_stopped(q)) {
+                               spin_unlock_irqrestore(q->queue_lock, flags);
+                               break;
+                       }
+                       spin_unlock_irqrestore(q->queue_lock, flags);
+               } else if (!atomic_read(&md->pending))
                        break;
 
                if (interruptible == TASK_INTERRUPTIBLE &&
@@ -1427,34 +2101,36 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
        return r;
 }
 
-static int dm_flush(struct mapped_device *md)
+static void dm_flush(struct mapped_device *md)
 {
        dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
-       return 0;
+
+       bio_init(&md->barrier_bio);
+       md->barrier_bio.bi_bdev = md->bdev;
+       md->barrier_bio.bi_rw = WRITE_BARRIER;
+       __split_and_process_bio(md, &md->barrier_bio);
+
+       dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
 }
 
 static void process_barrier(struct mapped_device *md, struct bio *bio)
 {
-       int error = dm_flush(md);
-
-       if (unlikely(error)) {
-               bio_endio(bio, error);
-               return;
-       }
-       if (bio_empty_barrier(bio)) {
-               bio_endio(bio, 0);
-               return;
-       }
-
-       __split_and_process_bio(md, bio);
+       md->barrier_error = 0;
 
-       error = dm_flush(md);
+       dm_flush(md);
 
-       if (!error && md->barrier_error)
-               error = md->barrier_error;
+       if (!bio_empty_barrier(bio)) {
+               __split_and_process_bio(md, bio);
+               dm_flush(md);
+       }
 
        if (md->barrier_error != DM_ENDIO_REQUEUE)
-               bio_endio(bio, error);
+               bio_endio(bio, md->barrier_error);
+       else {
+               spin_lock_irq(&md->deferred_lock);
+               bio_list_add_head(&md->deferred, bio);
+               spin_unlock_irq(&md->deferred_lock);
+       }
 }
 
 /*
@@ -1480,10 +2156,14 @@ static void dm_wq_work(struct work_struct *work)
 
                up_write(&md->io_lock);
 
-               if (bio_barrier(c))
-                       process_barrier(md, c);
-               else
-                       __split_and_process_bio(md, c);
+               if (dm_request_based(md))
+                       generic_make_request(c);
+               else {
+                       if (bio_barrier(c))
+                               process_barrier(md, c);
+                       else
+                               __split_and_process_bio(md, c);
+               }
 
                down_write(&md->io_lock);
        }
@@ -1503,6 +2183,7 @@ static void dm_queue_flush(struct mapped_device *md)
  */
 int dm_swap_table(struct mapped_device *md, struct dm_table *table)
 {
+       struct queue_limits limits;
        int r = -EINVAL;
 
        mutex_lock(&md->suspend_lock);
@@ -1511,19 +2192,96 @@ int dm_swap_table(struct mapped_device *md, struct dm_table *table)
        if (!dm_suspended(md))
                goto out;
 
-       /* without bdev, the device size cannot be changed */
-       if (!md->suspended_bdev)
-               if (get_capacity(md->disk) != dm_table_get_size(table))
-                       goto out;
+       r = dm_calculate_queue_limits(table, &limits);
+       if (r)
+               goto out;
+
+       /* cannot change the device type, once a table is bound */
+       if (md->map &&
+           (dm_table_get_type(md->map) != dm_table_get_type(table))) {
+               DMWARN("can't change the device type after a table is bound");
+               goto out;
+       }
+
+       /*
+        * It is enought that blk_queue_ordered() is called only once when
+        * the first bio-based table is bound.
+        *
+        * This setting should be moved to alloc_dev() when request-based dm
+        * supports barrier.
+        */
+       if (!md->map && dm_table_bio_based(table))
+               blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL);
 
        __unbind(md);
-       r = __bind(md, table);
+       r = __bind(md, table, &limits);
 
 out:
        mutex_unlock(&md->suspend_lock);
        return r;
 }
 
+static void dm_rq_invalidate_suspend_marker(struct mapped_device *md)
+{
+       md->suspend_rq.special = (void *)0x1;
+}
+
+static void dm_rq_abort_suspend(struct mapped_device *md, int noflush)
+{
+       struct request_queue *q = md->queue;
+       unsigned long flags;
+
+       spin_lock_irqsave(q->queue_lock, flags);
+       if (!noflush)
+               dm_rq_invalidate_suspend_marker(md);
+       __start_queue(q);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+static void dm_rq_start_suspend(struct mapped_device *md, int noflush)
+{
+       struct request *rq = &md->suspend_rq;
+       struct request_queue *q = md->queue;
+
+       if (noflush)
+               stop_queue(q);
+       else {
+               blk_rq_init(q, rq);
+               blk_insert_request(q, rq, 0, NULL);
+       }
+}
+
+static int dm_rq_suspend_available(struct mapped_device *md, int noflush)
+{
+       int r = 1;
+       struct request *rq = &md->suspend_rq;
+       struct request_queue *q = md->queue;
+       unsigned long flags;
+
+       if (noflush)
+               return r;
+
+       /* The marker must be protected by queue lock if it is in use */
+       spin_lock_irqsave(q->queue_lock, flags);
+       if (unlikely(rq->ref_count)) {
+               /*
+                * This can happen, when the previous flush suspend was
+                * interrupted, the marker is still in the queue and
+                * this flush suspend has been invoked, because we don't
+                * remove the marker at the time of suspend interruption.
+                * We have only one marker per mapped_device, so we can't
+                * start another flush suspend while it is in use.
+                */
+               BUG_ON(!rq->special); /* The marker should be invalidated */
+               DMWARN("Invalidating the previous flush suspend is still in"
+                      " progress.  Please retry later.");
+               r = 0;
+       }
+       spin_unlock_irqrestore(q->queue_lock, flags);
+
+       return r;
+}
+
 /*
  * Functions to lock and unlock any filesystem running on the
  * device.
@@ -1534,7 +2292,7 @@ static int lock_fs(struct mapped_device *md)
 
        WARN_ON(md->frozen_sb);
 
-       md->frozen_sb = freeze_bdev(md->suspended_bdev);
+       md->frozen_sb = freeze_bdev(md->bdev);
        if (IS_ERR(md->frozen_sb)) {
                r = PTR_ERR(md->frozen_sb);
                md->frozen_sb = NULL;
@@ -1543,9 +2301,6 @@ static int lock_fs(struct mapped_device *md)
 
        set_bit(DMF_FROZEN, &md->flags);
 
-       /* don't bdput right now, we don't want the bdev
-        * to go away while it is locked.
-        */
        return 0;
 }
 
@@ -1554,7 +2309,7 @@ static void unlock_fs(struct mapped_device *md)
        if (!test_bit(DMF_FROZEN, &md->flags))
                return;
 
-       thaw_bdev(md->suspended_bdev, md->frozen_sb);
+       thaw_bdev(md->bdev, md->frozen_sb);
        md->frozen_sb = NULL;
        clear_bit(DMF_FROZEN, &md->flags);
 }
@@ -1566,6 +2321,53 @@ static void unlock_fs(struct mapped_device *md)
  * dm_bind_table, dm_suspend must be called to flush any in
  * flight bios and ensure that any further io gets deferred.
  */
+/*
+ * Suspend mechanism in request-based dm.
+ *
+ * After the suspend starts, further incoming requests are kept in
+ * the request_queue and deferred.
+ * Remaining requests in the request_queue at the start of suspend are flushed
+ * if it is flush suspend.
+ * The suspend completes when the following conditions have been satisfied,
+ * so wait for it:
+ *    1. q->in_flight is 0 (which means no in_flight request)
+ *    2. queue has been stopped (which means no request dispatching)
+ *
+ *
+ * Noflush suspend
+ * ---------------
+ * Noflush suspend doesn't need to dispatch remaining requests.
+ * So stop the queue immediately.  Then, wait for all in_flight requests
+ * to be completed or requeued.
+ *
+ * To abort noflush suspend, start the queue.
+ *
+ *
+ * Flush suspend
+ * -------------
+ * Flush suspend needs to dispatch remaining requests.  So stop the queue
+ * after the remaining requests are completed. (Requeued request must be also
+ * re-dispatched and completed.  Until then, we can't stop the queue.)
+ *
+ * During flushing the remaining requests, further incoming requests are also
+ * inserted to the same queue.  To distinguish which requests are to be
+ * flushed, we insert a marker request to the queue at the time of starting
+ * flush suspend, like a barrier.
+ * The dispatching is blocked when the marker is found on the top of the queue.
+ * And the queue is stopped when all in_flight requests are completed, since
+ * that means the remaining requests are completely flushed.
+ * Then, the marker is removed from the queue.
+ *
+ * To abort flush suspend, we also need to take care of the marker, not only
+ * starting the queue.
+ * We don't remove the marker forcibly from the queue since it's against
+ * the block-layer manner.  Instead, we put a invalidated mark on the marker.
+ * When the invalidated marker is found on the top of the queue, it is
+ * immediately removed from the queue, so it doesn't block dispatching.
+ * Because we have only one marker per mapped_device, we can't start another
+ * flush suspend until the invalidated marker is removed from the queue.
+ * So fail and return with -EBUSY in such a case.
+ */
 int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
 {
        struct dm_table *map = NULL;
@@ -1580,6 +2382,11 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
                goto out_unlock;
        }
 
+       if (dm_request_based(md) && !dm_rq_suspend_available(md, noflush)) {
+               r = -EBUSY;
+               goto out_unlock;
+       }
+
        map = dm_get_table(md);
 
        /*
@@ -1592,24 +2399,14 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
        /* This does not get reverted if there's an error later. */
        dm_table_presuspend_targets(map);
 
-       /* bdget() can stall if the pending I/Os are not flushed */
-       if (!noflush) {
-               md->suspended_bdev = bdget_disk(md->disk, 0);
-               if (!md->suspended_bdev) {
-                       DMWARN("bdget failed in dm_suspend");
-                       r = -ENOMEM;
+       /*
+        * Flush I/O to the device. noflush supersedes do_lockfs,
+        * because lock_fs() needs to flush I/Os.
+        */
+       if (!noflush && do_lockfs) {
+               r = lock_fs(md);
+               if (r)
                        goto out;
-               }
-
-               /*
-                * Flush I/O to the device. noflush supersedes do_lockfs,
-                * because lock_fs() needs to flush I/Os.
-                */
-               if (do_lockfs) {
-                       r = lock_fs(md);
-                       if (r)
-                               goto out;
-               }
        }
 
        /*
@@ -1635,6 +2432,9 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
 
        flush_workqueue(md->wq);
 
+       if (dm_request_based(md))
+               dm_rq_start_suspend(md, noflush);
+
        /*
         * At this point no more requests are entering target request routines.
         * We call dm_wait_for_completion to wait for all existing requests
@@ -1651,6 +2451,9 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
        if (r < 0) {
                dm_queue_flush(md);
 
+               if (dm_request_based(md))
+                       dm_rq_abort_suspend(md, noflush);
+
                unlock_fs(md);
                goto out; /* pushback list is already flushed, so skip flush */
        }
@@ -1666,11 +2469,6 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
        set_bit(DMF_SUSPENDED, &md->flags);
 
 out:
-       if (r && md->suspended_bdev) {
-               bdput(md->suspended_bdev);
-               md->suspended_bdev = NULL;
-       }
-
        dm_table_put(map);
 
 out_unlock:
@@ -1697,21 +2495,20 @@ int dm_resume(struct mapped_device *md)
 
        dm_queue_flush(md);
 
-       unlock_fs(md);
+       /*
+        * Flushing deferred I/Os must be done after targets are resumed
+        * so that mapping of targets can work correctly.
+        * Request-based dm is queueing the deferred I/Os in its request_queue.
+        */
+       if (dm_request_based(md))
+               start_queue(md->queue);
 
-       if (md->suspended_bdev) {
-               bdput(md->suspended_bdev);
-               md->suspended_bdev = NULL;
-       }
+       unlock_fs(md);
 
        clear_bit(DMF_SUSPENDED, &md->flags);
 
        dm_table_unplug_all(map);
-
-       dm_kobject_uevent(md);
-
        r = 0;
-
 out:
        dm_table_put(map);
        mutex_unlock(&md->suspend_lock);
@@ -1722,9 +2519,19 @@ out:
 /*-----------------------------------------------------------------
  * Event notification.
  *---------------------------------------------------------------*/
-void dm_kobject_uevent(struct mapped_device *md)
-{
-       kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE);
+void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+                      unsigned cookie)
+{
+       char udev_cookie[DM_COOKIE_LENGTH];
+       char *envp[] = { udev_cookie, NULL };
+
+       if (!cookie)
+               kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
+       else {
+               snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
+                        DM_COOKIE_ENV_VAR_NAME, cookie);
+               kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp);
+       }
 }
 
 uint32_t dm_next_uevent_seq(struct mapped_device *md)
@@ -1778,6 +2585,10 @@ struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
        if (&md->kobj != kobj)
                return NULL;
 
+       if (test_bit(DMF_FREEING, &md->flags) ||
+           test_bit(DMF_DELETING, &md->flags))
+               return NULL;
+
        dm_get(md);
        return md;
 }
@@ -1798,6 +2609,61 @@ int dm_noflush_suspending(struct dm_target *ti)
 }
 EXPORT_SYMBOL_GPL(dm_noflush_suspending);
 
+struct dm_md_mempools *dm_alloc_md_mempools(unsigned type)
+{
+       struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
+
+       if (!pools)
+               return NULL;
+
+       pools->io_pool = (type == DM_TYPE_BIO_BASED) ?
+                        mempool_create_slab_pool(MIN_IOS, _io_cache) :
+                        mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache);
+       if (!pools->io_pool)
+               goto free_pools_and_out;
+
+       pools->tio_pool = (type == DM_TYPE_BIO_BASED) ?
+                         mempool_create_slab_pool(MIN_IOS, _tio_cache) :
+                         mempool_create_slab_pool(MIN_IOS, _rq_tio_cache);
+       if (!pools->tio_pool)
+               goto free_io_pool_and_out;
+
+       pools->bs = (type == DM_TYPE_BIO_BASED) ?
+                   bioset_create(16, 0) : bioset_create(MIN_IOS, 0);
+       if (!pools->bs)
+               goto free_tio_pool_and_out;
+
+       return pools;
+
+free_tio_pool_and_out:
+       mempool_destroy(pools->tio_pool);
+
+free_io_pool_and_out:
+       mempool_destroy(pools->io_pool);
+
+free_pools_and_out:
+       kfree(pools);
+
+       return NULL;
+}
+
+void dm_free_md_mempools(struct dm_md_mempools *pools)
+{
+       if (!pools)
+               return;
+
+       if (pools->io_pool)
+               mempool_destroy(pools->io_pool);
+
+       if (pools->tio_pool)
+               mempool_destroy(pools->tio_pool);
+
+       if (pools->bs)
+               bioset_free(pools->bs);
+
+       kfree(pools);
+}
+
 static struct block_device_operations dm_blk_dops = {
        .open = dm_blk_open,
        .release = dm_blk_close,