2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/moduleparam.h>
15 #include <linux/blkpg.h>
16 #include <linux/bio.h>
17 #include <linux/buffer_head.h>
18 #include <linux/mempool.h>
19 #include <linux/slab.h>
20 #include <linux/idr.h>
21 #include <linux/hdreg.h>
23 #include <trace/events/block.h>
25 #define DM_MSG_PREFIX "core"
28 * Cookies are numeric values sent with CHANGE and REMOVE
29 * uevents while resuming, removing or renaming the device.
31 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
32 #define DM_COOKIE_LENGTH 24
34 static const char *_name = DM_NAME;
36 static unsigned int major = 0;
37 static unsigned int _major = 0;
39 static DEFINE_SPINLOCK(_minor_lock);
42 * One of these is allocated per bio.
45 struct mapped_device *md;
49 unsigned long start_time;
54 * One of these is allocated per target within a bio. Hopefully
55 * this will be simplified out one day.
64 * For request-based dm.
65 * One of these is allocated per request.
67 struct dm_rq_target_io {
68 struct mapped_device *md;
70 struct request *orig, clone;
76 * For request-based dm.
77 * One of these is allocated per bio.
79 struct dm_rq_clone_bio_info {
81 struct dm_rq_target_io *tio;
84 union map_info *dm_get_mapinfo(struct bio *bio)
86 if (bio && bio->bi_private)
87 return &((struct dm_target_io *)bio->bi_private)->info;
91 union map_info *dm_get_rq_mapinfo(struct request *rq)
93 if (rq && rq->end_io_data)
94 return &((struct dm_rq_target_io *)rq->end_io_data)->info;
97 EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
99 #define MINOR_ALLOCED ((void *)-1)
102 * Bits for the md->flags field.
104 #define DMF_BLOCK_IO_FOR_SUSPEND 0
105 #define DMF_SUSPENDED 1
107 #define DMF_FREEING 3
108 #define DMF_DELETING 4
109 #define DMF_NOFLUSH_SUSPENDING 5
110 #define DMF_QUEUE_IO_TO_THREAD 6
113 * Work processed by per-device workqueue.
115 struct mapped_device {
116 struct rw_semaphore io_lock;
117 struct mutex suspend_lock;
124 struct request_queue *queue;
125 struct gendisk *disk;
131 * A list of ios that arrived while we were suspended.
134 wait_queue_head_t wait;
135 struct work_struct work;
136 struct bio_list deferred;
137 spinlock_t deferred_lock;
140 * An error from the barrier request currently being processed.
145 * Processing queue (flush/barriers)
147 struct workqueue_struct *wq;
150 * The current mapping.
152 struct dm_table *map;
155 * io objects are allocated from here.
166 wait_queue_head_t eventq;
168 struct list_head uevent_list;
169 spinlock_t uevent_lock; /* Protect access to uevent_list */
172 * freeze/thaw support require holding onto a super block
174 struct super_block *frozen_sb;
175 struct block_device *bdev;
177 /* forced geometry settings */
178 struct hd_geometry geometry;
180 /* marker of flush suspend for request-based dm */
181 struct request suspend_rq;
183 /* For saving the address of __make_request for request based dm */
184 make_request_fn *saved_make_request_fn;
189 /* zero-length barrier that will be cloned and submitted to targets */
190 struct bio barrier_bio;
194 * For mempools pre-allocation at the table loading time.
196 struct dm_md_mempools {
203 static struct kmem_cache *_io_cache;
204 static struct kmem_cache *_tio_cache;
205 static struct kmem_cache *_rq_tio_cache;
206 static struct kmem_cache *_rq_bio_info_cache;
208 static int __init local_init(void)
212 /* allocate a slab for the dm_ios */
213 _io_cache = KMEM_CACHE(dm_io, 0);
217 /* allocate a slab for the target ios */
218 _tio_cache = KMEM_CACHE(dm_target_io, 0);
220 goto out_free_io_cache;
222 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
224 goto out_free_tio_cache;
226 _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
227 if (!_rq_bio_info_cache)
228 goto out_free_rq_tio_cache;
230 r = dm_uevent_init();
232 goto out_free_rq_bio_info_cache;
235 r = register_blkdev(_major, _name);
237 goto out_uevent_exit;
246 out_free_rq_bio_info_cache:
247 kmem_cache_destroy(_rq_bio_info_cache);
248 out_free_rq_tio_cache:
249 kmem_cache_destroy(_rq_tio_cache);
251 kmem_cache_destroy(_tio_cache);
253 kmem_cache_destroy(_io_cache);
258 static void local_exit(void)
260 kmem_cache_destroy(_rq_bio_info_cache);
261 kmem_cache_destroy(_rq_tio_cache);
262 kmem_cache_destroy(_tio_cache);
263 kmem_cache_destroy(_io_cache);
264 unregister_blkdev(_major, _name);
269 DMINFO("cleaned up");
272 static int (*_inits[])(void) __initdata = {
281 static void (*_exits[])(void) = {
290 static int __init dm_init(void)
292 const int count = ARRAY_SIZE(_inits);
296 for (i = 0; i < count; i++) {
311 static void __exit dm_exit(void)
313 int i = ARRAY_SIZE(_exits);
320 * Block device functions
322 static int dm_blk_open(struct block_device *bdev, fmode_t mode)
324 struct mapped_device *md;
326 spin_lock(&_minor_lock);
328 md = bdev->bd_disk->private_data;
332 if (test_bit(DMF_FREEING, &md->flags) ||
333 test_bit(DMF_DELETING, &md->flags)) {
339 atomic_inc(&md->open_count);
342 spin_unlock(&_minor_lock);
344 return md ? 0 : -ENXIO;
347 static int dm_blk_close(struct gendisk *disk, fmode_t mode)
349 struct mapped_device *md = disk->private_data;
350 atomic_dec(&md->open_count);
355 int dm_open_count(struct mapped_device *md)
357 return atomic_read(&md->open_count);
361 * Guarantees nothing is using the device before it's deleted.
363 int dm_lock_for_deletion(struct mapped_device *md)
367 spin_lock(&_minor_lock);
369 if (dm_open_count(md))
372 set_bit(DMF_DELETING, &md->flags);
374 spin_unlock(&_minor_lock);
379 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
381 struct mapped_device *md = bdev->bd_disk->private_data;
383 return dm_get_geometry(md, geo);
386 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
387 unsigned int cmd, unsigned long arg)
389 struct mapped_device *md = bdev->bd_disk->private_data;
390 struct dm_table *map = dm_get_table(md);
391 struct dm_target *tgt;
394 if (!map || !dm_table_get_size(map))
397 /* We only support devices that have a single target */
398 if (dm_table_get_num_targets(map) != 1)
401 tgt = dm_table_get_target(map, 0);
403 if (dm_suspended(md)) {
408 if (tgt->type->ioctl)
409 r = tgt->type->ioctl(tgt, cmd, arg);
417 static struct dm_io *alloc_io(struct mapped_device *md)
419 return mempool_alloc(md->io_pool, GFP_NOIO);
422 static void free_io(struct mapped_device *md, struct dm_io *io)
424 mempool_free(io, md->io_pool);
427 static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
429 mempool_free(tio, md->tio_pool);
432 static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md)
434 return mempool_alloc(md->tio_pool, GFP_ATOMIC);
437 static void free_rq_tio(struct dm_rq_target_io *tio)
439 mempool_free(tio, tio->md->tio_pool);
442 static struct dm_rq_clone_bio_info *alloc_bio_info(struct mapped_device *md)
444 return mempool_alloc(md->io_pool, GFP_ATOMIC);
447 static void free_bio_info(struct dm_rq_clone_bio_info *info)
449 mempool_free(info, info->tio->md->io_pool);
452 static void start_io_acct(struct dm_io *io)
454 struct mapped_device *md = io->md;
457 io->start_time = jiffies;
459 cpu = part_stat_lock();
460 part_round_stats(cpu, &dm_disk(md)->part0);
462 dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
465 static void end_io_acct(struct dm_io *io)
467 struct mapped_device *md = io->md;
468 struct bio *bio = io->bio;
469 unsigned long duration = jiffies - io->start_time;
471 int rw = bio_data_dir(bio);
473 cpu = part_stat_lock();
474 part_round_stats(cpu, &dm_disk(md)->part0);
475 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
479 * After this is decremented the bio must not be touched if it is
482 dm_disk(md)->part0.in_flight = pending =
483 atomic_dec_return(&md->pending);
485 /* nudge anyone waiting on suspend queue */
491 * Add the bio to the list of deferred io.
493 static void queue_io(struct mapped_device *md, struct bio *bio)
495 down_write(&md->io_lock);
497 spin_lock_irq(&md->deferred_lock);
498 bio_list_add(&md->deferred, bio);
499 spin_unlock_irq(&md->deferred_lock);
501 if (!test_and_set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags))
502 queue_work(md->wq, &md->work);
504 up_write(&md->io_lock);
508 * Everyone (including functions in this file), should use this
509 * function to access the md->map field, and make sure they call
510 * dm_table_put() when finished.
512 struct dm_table *dm_get_table(struct mapped_device *md)
516 read_lock(&md->map_lock);
520 read_unlock(&md->map_lock);
526 * Get the geometry associated with a dm device
528 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
536 * Set the geometry of a device.
538 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
540 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
542 if (geo->start > sz) {
543 DMWARN("Start sector is beyond the geometry limits.");
552 /*-----------------------------------------------------------------
554 * A more elegant soln is in the works that uses the queue
555 * merge fn, unfortunately there are a couple of changes to
556 * the block layer that I want to make for this. So in the
557 * interests of getting something for people to use I give
558 * you this clearly demarcated crap.
559 *---------------------------------------------------------------*/
561 static int __noflush_suspending(struct mapped_device *md)
563 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
567 * Decrements the number of outstanding ios that a bio has been
568 * cloned into, completing the original io if necc.
570 static void dec_pending(struct dm_io *io, int error)
575 struct mapped_device *md = io->md;
577 /* Push-back supersedes any I/O errors */
578 if (error && !(io->error > 0 && __noflush_suspending(md)))
581 if (atomic_dec_and_test(&io->io_count)) {
582 if (io->error == DM_ENDIO_REQUEUE) {
584 * Target requested pushing back the I/O.
586 spin_lock_irqsave(&md->deferred_lock, flags);
587 if (__noflush_suspending(md)) {
588 if (!bio_barrier(io->bio))
589 bio_list_add_head(&md->deferred,
592 /* noflush suspend was interrupted. */
594 spin_unlock_irqrestore(&md->deferred_lock, flags);
597 io_error = io->error;
600 if (bio_barrier(bio)) {
602 * There can be just one barrier request so we use
603 * a per-device variable for error reporting.
604 * Note that you can't touch the bio after end_io_acct
606 if (!md->barrier_error && io_error != -EOPNOTSUPP)
607 md->barrier_error = io_error;
612 if (io_error != DM_ENDIO_REQUEUE) {
613 trace_block_bio_complete(md->queue, bio);
615 bio_endio(bio, io_error);
623 static void clone_endio(struct bio *bio, int error)
626 struct dm_target_io *tio = bio->bi_private;
627 struct dm_io *io = tio->io;
628 struct mapped_device *md = tio->io->md;
629 dm_endio_fn endio = tio->ti->type->end_io;
631 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
635 r = endio(tio->ti, bio, error, &tio->info);
636 if (r < 0 || r == DM_ENDIO_REQUEUE)
638 * error and requeue request are handled
642 else if (r == DM_ENDIO_INCOMPLETE)
643 /* The target will handle the io */
646 DMWARN("unimplemented target endio return value: %d", r);
652 * Store md for cleanup instead of tio which is about to get freed.
654 bio->bi_private = md->bs;
658 dec_pending(io, error);
662 * Partial completion handling for request-based dm
664 static void end_clone_bio(struct bio *clone, int error)
666 struct dm_rq_clone_bio_info *info = clone->bi_private;
667 struct dm_rq_target_io *tio = info->tio;
668 struct bio *bio = info->orig;
669 unsigned int nr_bytes = info->orig->bi_size;
675 * An error has already been detected on the request.
676 * Once error occurred, just let clone->end_io() handle
682 * Don't notice the error to the upper layer yet.
683 * The error handling decision is made by the target driver,
684 * when the request is completed.
691 * I/O for the bio successfully completed.
692 * Notice the data completion to the upper layer.
696 * bios are processed from the head of the list.
697 * So the completing bio should always be rq->bio.
698 * If it's not, something wrong is happening.
700 if (tio->orig->bio != bio)
701 DMERR("bio completion is going in the middle of the request");
704 * Update the original request.
705 * Do not use blk_end_request() here, because it may complete
706 * the original request before the clone, and break the ordering.
708 blk_update_request(tio->orig, 0, nr_bytes);
712 * Don't touch any member of the md after calling this function because
713 * the md may be freed in dm_put() at the end of this function.
714 * Or do dm_get() before calling this function and dm_put() later.
716 static void rq_completed(struct mapped_device *md, int run_queue)
718 int wakeup_waiters = 0;
719 struct request_queue *q = md->queue;
722 spin_lock_irqsave(q->queue_lock, flags);
723 if (!queue_in_flight(q))
725 spin_unlock_irqrestore(q->queue_lock, flags);
727 /* nudge anyone waiting on suspend queue */
735 * dm_put() must be at the end of this function. See the comment above
740 static void dm_unprep_request(struct request *rq)
742 struct request *clone = rq->special;
743 struct dm_rq_target_io *tio = clone->end_io_data;
746 rq->cmd_flags &= ~REQ_DONTPREP;
748 blk_rq_unprep_clone(clone);
753 * Requeue the original request of a clone.
755 void dm_requeue_unmapped_request(struct request *clone)
757 struct dm_rq_target_io *tio = clone->end_io_data;
758 struct mapped_device *md = tio->md;
759 struct request *rq = tio->orig;
760 struct request_queue *q = rq->q;
763 dm_unprep_request(rq);
765 spin_lock_irqsave(q->queue_lock, flags);
766 if (elv_queue_empty(q))
768 blk_requeue_request(q, rq);
769 spin_unlock_irqrestore(q->queue_lock, flags);
773 EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request);
775 static void __stop_queue(struct request_queue *q)
780 static void stop_queue(struct request_queue *q)
784 spin_lock_irqsave(q->queue_lock, flags);
786 spin_unlock_irqrestore(q->queue_lock, flags);
789 static void __start_queue(struct request_queue *q)
791 if (blk_queue_stopped(q))
795 static void start_queue(struct request_queue *q)
799 spin_lock_irqsave(q->queue_lock, flags);
801 spin_unlock_irqrestore(q->queue_lock, flags);
805 * Complete the clone and the original request.
806 * Must be called without queue lock.
808 static void dm_end_request(struct request *clone, int error)
810 struct dm_rq_target_io *tio = clone->end_io_data;
811 struct mapped_device *md = tio->md;
812 struct request *rq = tio->orig;
814 if (blk_pc_request(rq)) {
815 rq->errors = clone->errors;
816 rq->resid_len = clone->resid_len;
820 * We are using the sense buffer of the original
822 * So setting the length of the sense data is enough.
824 rq->sense_len = clone->sense_len;
830 blk_end_request_all(rq, error);
836 * Request completion handler for request-based dm
838 static void dm_softirq_done(struct request *rq)
840 struct request *clone = rq->completion_data;
841 struct dm_rq_target_io *tio = clone->end_io_data;
842 dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
843 int error = tio->error;
845 if (!(rq->cmd_flags & REQ_FAILED) && rq_end_io)
846 error = rq_end_io(tio->ti, clone, error, &tio->info);
849 /* The target wants to complete the I/O */
850 dm_end_request(clone, error);
851 else if (error == DM_ENDIO_INCOMPLETE)
852 /* The target will handle the I/O */
854 else if (error == DM_ENDIO_REQUEUE)
855 /* The target wants to requeue the I/O */
856 dm_requeue_unmapped_request(clone);
858 DMWARN("unimplemented target endio return value: %d", error);
864 * Complete the clone and the original request with the error status
865 * through softirq context.
867 static void dm_complete_request(struct request *clone, int error)
869 struct dm_rq_target_io *tio = clone->end_io_data;
870 struct request *rq = tio->orig;
873 rq->completion_data = clone;
874 blk_complete_request(rq);
878 * Complete the not-mapped clone and the original request with the error status
879 * through softirq context.
880 * Target's rq_end_io() function isn't called.
881 * This may be used when the target's map_rq() function fails.
883 void dm_kill_unmapped_request(struct request *clone, int error)
885 struct dm_rq_target_io *tio = clone->end_io_data;
886 struct request *rq = tio->orig;
888 rq->cmd_flags |= REQ_FAILED;
889 dm_complete_request(clone, error);
891 EXPORT_SYMBOL_GPL(dm_kill_unmapped_request);
894 * Called with the queue lock held
896 static void end_clone_request(struct request *clone, int error)
899 * For just cleaning up the information of the queue in which
900 * the clone was dispatched.
901 * The clone is *NOT* freed actually here because it is alloced from
902 * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
904 __blk_put_request(clone->q, clone);
907 * Actual request completion is done in a softirq context which doesn't
908 * hold the queue lock. Otherwise, deadlock could occur because:
909 * - another request may be submitted by the upper level driver
910 * of the stacking during the completion
911 * - the submission which requires queue lock may be done
914 dm_complete_request(clone, error);
917 static sector_t max_io_len(struct mapped_device *md,
918 sector_t sector, struct dm_target *ti)
920 sector_t offset = sector - ti->begin;
921 sector_t len = ti->len - offset;
924 * Does the target need to split even further ?
928 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
937 static void __map_bio(struct dm_target *ti, struct bio *clone,
938 struct dm_target_io *tio)
942 struct mapped_device *md;
944 clone->bi_end_io = clone_endio;
945 clone->bi_private = tio;
948 * Map the clone. If r == 0 we don't need to do
949 * anything, the target has assumed ownership of
952 atomic_inc(&tio->io->io_count);
953 sector = clone->bi_sector;
954 r = ti->type->map(ti, clone, &tio->info);
955 if (r == DM_MAPIO_REMAPPED) {
956 /* the bio has been remapped so dispatch it */
958 trace_block_remap(bdev_get_queue(clone->bi_bdev), clone,
959 tio->io->bio->bi_bdev->bd_dev, sector);
961 generic_make_request(clone);
962 } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
963 /* error the io and bail out, or requeue it if needed */
965 dec_pending(tio->io, r);
967 * Store bio_set for cleanup.
969 clone->bi_private = md->bs;
973 DMWARN("unimplemented target map return value: %d", r);
979 struct mapped_device *md;
980 struct dm_table *map;
984 sector_t sector_count;
988 static void dm_bio_destructor(struct bio *bio)
990 struct bio_set *bs = bio->bi_private;
996 * Creates a little bio that is just does part of a bvec.
998 static struct bio *split_bvec(struct bio *bio, sector_t sector,
999 unsigned short idx, unsigned int offset,
1000 unsigned int len, struct bio_set *bs)
1003 struct bio_vec *bv = bio->bi_io_vec + idx;
1005 clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
1006 clone->bi_destructor = dm_bio_destructor;
1007 *clone->bi_io_vec = *bv;
1009 clone->bi_sector = sector;
1010 clone->bi_bdev = bio->bi_bdev;
1011 clone->bi_rw = bio->bi_rw & ~(1 << BIO_RW_BARRIER);
1013 clone->bi_size = to_bytes(len);
1014 clone->bi_io_vec->bv_offset = offset;
1015 clone->bi_io_vec->bv_len = clone->bi_size;
1016 clone->bi_flags |= 1 << BIO_CLONED;
1018 if (bio_integrity(bio)) {
1019 bio_integrity_clone(clone, bio, GFP_NOIO);
1020 bio_integrity_trim(clone,
1021 bio_sector_offset(bio, idx, offset), len);
1028 * Creates a bio that consists of range of complete bvecs.
1030 static struct bio *clone_bio(struct bio *bio, sector_t sector,
1031 unsigned short idx, unsigned short bv_count,
1032 unsigned int len, struct bio_set *bs)
1036 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
1037 __bio_clone(clone, bio);
1038 clone->bi_rw &= ~(1 << BIO_RW_BARRIER);
1039 clone->bi_destructor = dm_bio_destructor;
1040 clone->bi_sector = sector;
1041 clone->bi_idx = idx;
1042 clone->bi_vcnt = idx + bv_count;
1043 clone->bi_size = to_bytes(len);
1044 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
1046 if (bio_integrity(bio)) {
1047 bio_integrity_clone(clone, bio, GFP_NOIO);
1049 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
1050 bio_integrity_trim(clone,
1051 bio_sector_offset(bio, idx, 0), len);
1057 static struct dm_target_io *alloc_tio(struct clone_info *ci,
1058 struct dm_target *ti)
1060 struct dm_target_io *tio = mempool_alloc(ci->md->tio_pool, GFP_NOIO);
1064 memset(&tio->info, 0, sizeof(tio->info));
1069 static void __flush_target(struct clone_info *ci, struct dm_target *ti,
1072 struct dm_target_io *tio = alloc_tio(ci, ti);
1075 tio->info.flush_request = flush_nr;
1077 clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs);
1078 __bio_clone(clone, ci->bio);
1079 clone->bi_destructor = dm_bio_destructor;
1081 __map_bio(ti, clone, tio);
1084 static int __clone_and_map_empty_barrier(struct clone_info *ci)
1086 unsigned target_nr = 0, flush_nr;
1087 struct dm_target *ti;
1089 while ((ti = dm_table_get_target(ci->map, target_nr++)))
1090 for (flush_nr = 0; flush_nr < ti->num_flush_requests;
1092 __flush_target(ci, ti, flush_nr);
1094 ci->sector_count = 0;
1099 static int __clone_and_map(struct clone_info *ci)
1101 struct bio *clone, *bio = ci->bio;
1102 struct dm_target *ti;
1103 sector_t len = 0, max;
1104 struct dm_target_io *tio;
1106 if (unlikely(bio_empty_barrier(bio)))
1107 return __clone_and_map_empty_barrier(ci);
1109 ti = dm_table_find_target(ci->map, ci->sector);
1110 if (!dm_target_is_valid(ti))
1113 max = max_io_len(ci->md, ci->sector, ti);
1116 * Allocate a target io object.
1118 tio = alloc_tio(ci, ti);
1120 if (ci->sector_count <= max) {
1122 * Optimise for the simple case where we can do all of
1123 * the remaining io with a single clone.
1125 clone = clone_bio(bio, ci->sector, ci->idx,
1126 bio->bi_vcnt - ci->idx, ci->sector_count,
1128 __map_bio(ti, clone, tio);
1129 ci->sector_count = 0;
1131 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
1133 * There are some bvecs that don't span targets.
1134 * Do as many of these as possible.
1137 sector_t remaining = max;
1140 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
1141 bv_len = to_sector(bio->bi_io_vec[i].bv_len);
1143 if (bv_len > remaining)
1146 remaining -= bv_len;
1150 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
1152 __map_bio(ti, clone, tio);
1155 ci->sector_count -= len;
1160 * Handle a bvec that must be split between two or more targets.
1162 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
1163 sector_t remaining = to_sector(bv->bv_len);
1164 unsigned int offset = 0;
1168 ti = dm_table_find_target(ci->map, ci->sector);
1169 if (!dm_target_is_valid(ti))
1172 max = max_io_len(ci->md, ci->sector, ti);
1174 tio = alloc_tio(ci, ti);
1177 len = min(remaining, max);
1179 clone = split_bvec(bio, ci->sector, ci->idx,
1180 bv->bv_offset + offset, len,
1183 __map_bio(ti, clone, tio);
1186 ci->sector_count -= len;
1187 offset += to_bytes(len);
1188 } while (remaining -= len);
1197 * Split the bio into several clones and submit it to targets.
1199 static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
1201 struct clone_info ci;
1204 ci.map = dm_get_table(md);
1205 if (unlikely(!ci.map)) {
1206 if (!bio_barrier(bio))
1209 if (!md->barrier_error)
1210 md->barrier_error = -EIO;
1216 ci.io = alloc_io(md);
1218 atomic_set(&ci.io->io_count, 1);
1221 ci.sector = bio->bi_sector;
1222 ci.sector_count = bio_sectors(bio);
1223 if (unlikely(bio_empty_barrier(bio)))
1224 ci.sector_count = 1;
1225 ci.idx = bio->bi_idx;
1227 start_io_acct(ci.io);
1228 while (ci.sector_count && !error)
1229 error = __clone_and_map(&ci);
1231 /* drop the extra reference count */
1232 dec_pending(ci.io, error);
1233 dm_table_put(ci.map);
1235 /*-----------------------------------------------------------------
1237 *---------------------------------------------------------------*/
1239 static int dm_merge_bvec(struct request_queue *q,
1240 struct bvec_merge_data *bvm,
1241 struct bio_vec *biovec)
1243 struct mapped_device *md = q->queuedata;
1244 struct dm_table *map = dm_get_table(md);
1245 struct dm_target *ti;
1246 sector_t max_sectors;
1252 ti = dm_table_find_target(map, bvm->bi_sector);
1253 if (!dm_target_is_valid(ti))
1257 * Find maximum amount of I/O that won't need splitting
1259 max_sectors = min(max_io_len(md, bvm->bi_sector, ti),
1260 (sector_t) BIO_MAX_SECTORS);
1261 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
1266 * merge_bvec_fn() returns number of bytes
1267 * it can accept at this offset
1268 * max is precomputed maximal io size
1270 if (max_size && ti->type->merge)
1271 max_size = ti->type->merge(ti, bvm, biovec, max_size);
1273 * If the target doesn't support merge method and some of the devices
1274 * provided their merge_bvec method (we know this by looking at
1275 * queue_max_hw_sectors), then we can't allow bios with multiple vector
1276 * entries. So always set max_size to 0, and the code below allows
1279 else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
1288 * Always allow an entire first page
1290 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
1291 max_size = biovec->bv_len;
1297 * The request function that just remaps the bio built up by
1300 static int _dm_request(struct request_queue *q, struct bio *bio)
1302 int rw = bio_data_dir(bio);
1303 struct mapped_device *md = q->queuedata;
1306 down_read(&md->io_lock);
1308 cpu = part_stat_lock();
1309 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
1310 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
1314 * If we're suspended or the thread is processing barriers
1315 * we have to queue this io for later.
1317 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
1318 unlikely(bio_barrier(bio))) {
1319 up_read(&md->io_lock);
1321 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
1322 bio_rw(bio) == READA) {
1332 __split_and_process_bio(md, bio);
1333 up_read(&md->io_lock);
1337 static int dm_make_request(struct request_queue *q, struct bio *bio)
1339 struct mapped_device *md = q->queuedata;
1341 if (unlikely(bio_barrier(bio))) {
1342 bio_endio(bio, -EOPNOTSUPP);
1346 return md->saved_make_request_fn(q, bio); /* call __make_request() */
1349 static int dm_request_based(struct mapped_device *md)
1351 return blk_queue_stackable(md->queue);
1354 static int dm_request(struct request_queue *q, struct bio *bio)
1356 struct mapped_device *md = q->queuedata;
1358 if (dm_request_based(md))
1359 return dm_make_request(q, bio);
1361 return _dm_request(q, bio);
1364 void dm_dispatch_request(struct request *rq)
1368 if (blk_queue_io_stat(rq->q))
1369 rq->cmd_flags |= REQ_IO_STAT;
1371 rq->start_time = jiffies;
1372 r = blk_insert_cloned_request(rq->q, rq);
1374 dm_complete_request(rq, r);
1376 EXPORT_SYMBOL_GPL(dm_dispatch_request);
1378 static void dm_rq_bio_destructor(struct bio *bio)
1380 struct dm_rq_clone_bio_info *info = bio->bi_private;
1381 struct mapped_device *md = info->tio->md;
1383 free_bio_info(info);
1384 bio_free(bio, md->bs);
1387 static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
1390 struct dm_rq_target_io *tio = data;
1391 struct mapped_device *md = tio->md;
1392 struct dm_rq_clone_bio_info *info = alloc_bio_info(md);
1397 info->orig = bio_orig;
1399 bio->bi_end_io = end_clone_bio;
1400 bio->bi_private = info;
1401 bio->bi_destructor = dm_rq_bio_destructor;
1406 static int setup_clone(struct request *clone, struct request *rq,
1407 struct dm_rq_target_io *tio)
1409 int r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
1410 dm_rq_bio_constructor, tio);
1415 clone->cmd = rq->cmd;
1416 clone->cmd_len = rq->cmd_len;
1417 clone->sense = rq->sense;
1418 clone->buffer = rq->buffer;
1419 clone->end_io = end_clone_request;
1420 clone->end_io_data = tio;
1425 static int dm_rq_flush_suspending(struct mapped_device *md)
1427 return !md->suspend_rq.special;
1431 * Called with the queue lock held.
1433 static int dm_prep_fn(struct request_queue *q, struct request *rq)
1435 struct mapped_device *md = q->queuedata;
1436 struct dm_rq_target_io *tio;
1437 struct request *clone;
1439 if (unlikely(rq == &md->suspend_rq)) {
1440 if (dm_rq_flush_suspending(md))
1443 /* The flush suspend was interrupted */
1444 return BLKPREP_KILL;
1447 if (unlikely(rq->special)) {
1448 DMWARN("Already has something in rq->special.");
1449 return BLKPREP_KILL;
1452 tio = alloc_rq_tio(md); /* Only one for each original request */
1455 return BLKPREP_DEFER;
1461 memset(&tio->info, 0, sizeof(tio->info));
1463 clone = &tio->clone;
1464 if (setup_clone(clone, rq, tio)) {
1467 return BLKPREP_DEFER;
1470 rq->special = clone;
1471 rq->cmd_flags |= REQ_DONTPREP;
1476 static void map_request(struct dm_target *ti, struct request *rq,
1477 struct mapped_device *md)
1480 struct request *clone = rq->special;
1481 struct dm_rq_target_io *tio = clone->end_io_data;
1484 * Hold the md reference here for the in-flight I/O.
1485 * We can't rely on the reference count by device opener,
1486 * because the device may be closed during the request completion
1487 * when all bios are completed.
1488 * See the comment in rq_completed() too.
1493 r = ti->type->map_rq(ti, clone, &tio->info);
1495 case DM_MAPIO_SUBMITTED:
1496 /* The target has taken the I/O to submit by itself later */
1498 case DM_MAPIO_REMAPPED:
1499 /* The target has remapped the I/O so dispatch it */
1500 dm_dispatch_request(clone);
1502 case DM_MAPIO_REQUEUE:
1503 /* The target wants to requeue the I/O */
1504 dm_requeue_unmapped_request(clone);
1508 DMWARN("unimplemented target map return value: %d", r);
1512 /* The target wants to complete the I/O */
1513 dm_kill_unmapped_request(clone, r);
1519 * q->request_fn for request-based dm.
1520 * Called with the queue lock held.
1522 static void dm_request_fn(struct request_queue *q)
1524 struct mapped_device *md = q->queuedata;
1525 struct dm_table *map = dm_get_table(md);
1526 struct dm_target *ti;
1530 * For noflush suspend, check blk_queue_stopped() to immediately
1531 * quit I/O dispatching.
1533 while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) {
1534 rq = blk_peek_request(q);
1538 if (unlikely(rq == &md->suspend_rq)) { /* Flush suspend maker */
1539 if (queue_in_flight(q))
1540 /* Not quiet yet. Wait more */
1543 /* This device should be quiet now */
1545 blk_start_request(rq);
1546 __blk_end_request_all(rq, 0);
1551 ti = dm_table_find_target(map, blk_rq_pos(rq));
1552 if (ti->type->busy && ti->type->busy(ti))
1555 blk_start_request(rq);
1556 spin_unlock(q->queue_lock);
1557 map_request(ti, rq, md);
1558 spin_lock_irq(q->queue_lock);
1564 if (!elv_queue_empty(q))
1565 /* Some requests still remain, retry later */
1574 int dm_underlying_device_busy(struct request_queue *q)
1576 return blk_lld_busy(q);
1578 EXPORT_SYMBOL_GPL(dm_underlying_device_busy);
1580 static int dm_lld_busy(struct request_queue *q)
1583 struct mapped_device *md = q->queuedata;
1584 struct dm_table *map = dm_get_table(md);
1586 if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
1589 r = dm_table_any_busy_target(map);
1596 static void dm_unplug_all(struct request_queue *q)
1598 struct mapped_device *md = q->queuedata;
1599 struct dm_table *map = dm_get_table(md);
1602 if (dm_request_based(md))
1603 generic_unplug_device(q);
1605 dm_table_unplug_all(map);
1610 static int dm_any_congested(void *congested_data, int bdi_bits)
1613 struct mapped_device *md = congested_data;
1614 struct dm_table *map;
1616 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
1617 map = dm_get_table(md);
1620 * Request-based dm cares about only own queue for
1621 * the query about congestion status of request_queue
1623 if (dm_request_based(md))
1624 r = md->queue->backing_dev_info.state &
1627 r = dm_table_any_congested(map, bdi_bits);
1636 /*-----------------------------------------------------------------
1637 * An IDR is used to keep track of allocated minor numbers.
1638 *---------------------------------------------------------------*/
1639 static DEFINE_IDR(_minor_idr);
1641 static void free_minor(int minor)
1643 spin_lock(&_minor_lock);
1644 idr_remove(&_minor_idr, minor);
1645 spin_unlock(&_minor_lock);
1649 * See if the device with a specific minor # is free.
1651 static int specific_minor(int minor)
1655 if (minor >= (1 << MINORBITS))
1658 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1662 spin_lock(&_minor_lock);
1664 if (idr_find(&_minor_idr, minor)) {
1669 r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
1674 idr_remove(&_minor_idr, m);
1680 spin_unlock(&_minor_lock);
1684 static int next_free_minor(int *minor)
1688 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1692 spin_lock(&_minor_lock);
1694 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
1698 if (m >= (1 << MINORBITS)) {
1699 idr_remove(&_minor_idr, m);
1707 spin_unlock(&_minor_lock);
1711 static struct block_device_operations dm_blk_dops;
1713 static void dm_wq_work(struct work_struct *work);
1716 * Allocate and initialise a blank device with a given minor.
1718 static struct mapped_device *alloc_dev(int minor)
1721 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
1725 DMWARN("unable to allocate device, out of memory.");
1729 if (!try_module_get(THIS_MODULE))
1730 goto bad_module_get;
1732 /* get a minor number for the dev */
1733 if (minor == DM_ANY_MINOR)
1734 r = next_free_minor(&minor);
1736 r = specific_minor(minor);
1740 init_rwsem(&md->io_lock);
1741 mutex_init(&md->suspend_lock);
1742 spin_lock_init(&md->deferred_lock);
1743 rwlock_init(&md->map_lock);
1744 atomic_set(&md->holders, 1);
1745 atomic_set(&md->open_count, 0);
1746 atomic_set(&md->event_nr, 0);
1747 atomic_set(&md->uevent_seq, 0);
1748 INIT_LIST_HEAD(&md->uevent_list);
1749 spin_lock_init(&md->uevent_lock);
1751 md->queue = blk_init_queue(dm_request_fn, NULL);
1756 * Request-based dm devices cannot be stacked on top of bio-based dm
1757 * devices. The type of this dm device has not been decided yet,
1758 * although we initialized the queue using blk_init_queue().
1759 * The type is decided at the first table loading time.
1760 * To prevent problematic device stacking, clear the queue flag
1761 * for request stacking support until then.
1763 * This queue is new, so no concurrency on the queue_flags.
1765 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
1766 md->saved_make_request_fn = md->queue->make_request_fn;
1767 md->queue->queuedata = md;
1768 md->queue->backing_dev_info.congested_fn = dm_any_congested;
1769 md->queue->backing_dev_info.congested_data = md;
1770 blk_queue_make_request(md->queue, dm_request);
1771 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1772 md->queue->unplug_fn = dm_unplug_all;
1773 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
1774 blk_queue_softirq_done(md->queue, dm_softirq_done);
1775 blk_queue_prep_rq(md->queue, dm_prep_fn);
1776 blk_queue_lld_busy(md->queue, dm_lld_busy);
1778 md->disk = alloc_disk(1);
1782 atomic_set(&md->pending, 0);
1783 init_waitqueue_head(&md->wait);
1784 INIT_WORK(&md->work, dm_wq_work);
1785 init_waitqueue_head(&md->eventq);
1787 md->disk->major = _major;
1788 md->disk->first_minor = minor;
1789 md->disk->fops = &dm_blk_dops;
1790 md->disk->queue = md->queue;
1791 md->disk->private_data = md;
1792 sprintf(md->disk->disk_name, "dm-%d", minor);
1794 format_dev_t(md->name, MKDEV(_major, minor));
1796 md->wq = create_singlethread_workqueue("kdmflush");
1800 md->bdev = bdget_disk(md->disk, 0);
1804 /* Populate the mapping, nobody knows we exist yet */
1805 spin_lock(&_minor_lock);
1806 old_md = idr_replace(&_minor_idr, md, minor);
1807 spin_unlock(&_minor_lock);
1809 BUG_ON(old_md != MINOR_ALLOCED);
1814 destroy_workqueue(md->wq);
1818 blk_cleanup_queue(md->queue);
1822 module_put(THIS_MODULE);
1828 static void unlock_fs(struct mapped_device *md);
1830 static void free_dev(struct mapped_device *md)
1832 int minor = MINOR(disk_devt(md->disk));
1836 destroy_workqueue(md->wq);
1838 mempool_destroy(md->tio_pool);
1840 mempool_destroy(md->io_pool);
1842 bioset_free(md->bs);
1843 blk_integrity_unregister(md->disk);
1844 del_gendisk(md->disk);
1847 spin_lock(&_minor_lock);
1848 md->disk->private_data = NULL;
1849 spin_unlock(&_minor_lock);
1852 blk_cleanup_queue(md->queue);
1853 module_put(THIS_MODULE);
1857 static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
1859 struct dm_md_mempools *p;
1861 if (md->io_pool && md->tio_pool && md->bs)
1862 /* the md already has necessary mempools */
1865 p = dm_table_get_md_mempools(t);
1866 BUG_ON(!p || md->io_pool || md->tio_pool || md->bs);
1868 md->io_pool = p->io_pool;
1870 md->tio_pool = p->tio_pool;
1876 /* mempool bind completed, now no need any mempools in the table */
1877 dm_table_free_md_mempools(t);
1881 * Bind a table to the device.
1883 static void event_callback(void *context)
1885 unsigned long flags;
1887 struct mapped_device *md = (struct mapped_device *) context;
1889 spin_lock_irqsave(&md->uevent_lock, flags);
1890 list_splice_init(&md->uevent_list, &uevents);
1891 spin_unlock_irqrestore(&md->uevent_lock, flags);
1893 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
1895 atomic_inc(&md->event_nr);
1896 wake_up(&md->eventq);
1899 static void __set_size(struct mapped_device *md, sector_t size)
1901 set_capacity(md->disk, size);
1903 mutex_lock(&md->bdev->bd_inode->i_mutex);
1904 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
1905 mutex_unlock(&md->bdev->bd_inode->i_mutex);
1908 static int __bind(struct mapped_device *md, struct dm_table *t,
1909 struct queue_limits *limits)
1911 struct request_queue *q = md->queue;
1914 size = dm_table_get_size(t);
1917 * Wipe any geometry if the size of the table changed.
1919 if (size != get_capacity(md->disk))
1920 memset(&md->geometry, 0, sizeof(md->geometry));
1922 __set_size(md, size);
1925 dm_table_destroy(t);
1929 dm_table_event_callback(t, event_callback, md);
1932 * The queue hasn't been stopped yet, if the old table type wasn't
1933 * for request-based during suspension. So stop it to prevent
1934 * I/O mapping before resume.
1935 * This must be done before setting the queue restrictions,
1936 * because request-based dm may be run just after the setting.
1938 if (dm_table_request_based(t) && !blk_queue_stopped(q))
1941 __bind_mempools(md, t);
1943 write_lock(&md->map_lock);
1945 dm_table_set_restrictions(t, q, limits);
1946 write_unlock(&md->map_lock);
1951 static void __unbind(struct mapped_device *md)
1953 struct dm_table *map = md->map;
1958 dm_table_event_callback(map, NULL, NULL);
1959 write_lock(&md->map_lock);
1961 write_unlock(&md->map_lock);
1962 dm_table_destroy(map);
1966 * Constructor for a new device.
1968 int dm_create(int minor, struct mapped_device **result)
1970 struct mapped_device *md;
1972 md = alloc_dev(minor);
1982 static struct mapped_device *dm_find_md(dev_t dev)
1984 struct mapped_device *md;
1985 unsigned minor = MINOR(dev);
1987 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
1990 spin_lock(&_minor_lock);
1992 md = idr_find(&_minor_idr, minor);
1993 if (md && (md == MINOR_ALLOCED ||
1994 (MINOR(disk_devt(dm_disk(md))) != minor) ||
1995 test_bit(DMF_FREEING, &md->flags))) {
2001 spin_unlock(&_minor_lock);
2006 struct mapped_device *dm_get_md(dev_t dev)
2008 struct mapped_device *md = dm_find_md(dev);
2016 void *dm_get_mdptr(struct mapped_device *md)
2018 return md->interface_ptr;
2021 void dm_set_mdptr(struct mapped_device *md, void *ptr)
2023 md->interface_ptr = ptr;
2026 void dm_get(struct mapped_device *md)
2028 atomic_inc(&md->holders);
2031 const char *dm_device_name(struct mapped_device *md)
2035 EXPORT_SYMBOL_GPL(dm_device_name);
2037 void dm_put(struct mapped_device *md)
2039 struct dm_table *map;
2041 BUG_ON(test_bit(DMF_FREEING, &md->flags));
2043 if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
2044 map = dm_get_table(md);
2045 idr_replace(&_minor_idr, MINOR_ALLOCED,
2046 MINOR(disk_devt(dm_disk(md))));
2047 set_bit(DMF_FREEING, &md->flags);
2048 spin_unlock(&_minor_lock);
2049 if (!dm_suspended(md)) {
2050 dm_table_presuspend_targets(map);
2051 dm_table_postsuspend_targets(map);
2059 EXPORT_SYMBOL_GPL(dm_put);
2061 static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
2064 DECLARE_WAITQUEUE(wait, current);
2065 struct request_queue *q = md->queue;
2066 unsigned long flags;
2068 dm_unplug_all(md->queue);
2070 add_wait_queue(&md->wait, &wait);
2073 set_current_state(interruptible);
2076 if (dm_request_based(md)) {
2077 spin_lock_irqsave(q->queue_lock, flags);
2078 if (!queue_in_flight(q) && blk_queue_stopped(q)) {
2079 spin_unlock_irqrestore(q->queue_lock, flags);
2082 spin_unlock_irqrestore(q->queue_lock, flags);
2083 } else if (!atomic_read(&md->pending))
2086 if (interruptible == TASK_INTERRUPTIBLE &&
2087 signal_pending(current)) {
2094 set_current_state(TASK_RUNNING);
2096 remove_wait_queue(&md->wait, &wait);
2101 static void dm_flush(struct mapped_device *md)
2103 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
2105 bio_init(&md->barrier_bio);
2106 md->barrier_bio.bi_bdev = md->bdev;
2107 md->barrier_bio.bi_rw = WRITE_BARRIER;
2108 __split_and_process_bio(md, &md->barrier_bio);
2110 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
2113 static void process_barrier(struct mapped_device *md, struct bio *bio)
2115 md->barrier_error = 0;
2119 if (!bio_empty_barrier(bio)) {
2120 __split_and_process_bio(md, bio);
2124 if (md->barrier_error != DM_ENDIO_REQUEUE)
2125 bio_endio(bio, md->barrier_error);
2127 spin_lock_irq(&md->deferred_lock);
2128 bio_list_add_head(&md->deferred, bio);
2129 spin_unlock_irq(&md->deferred_lock);
2134 * Process the deferred bios
2136 static void dm_wq_work(struct work_struct *work)
2138 struct mapped_device *md = container_of(work, struct mapped_device,
2142 down_write(&md->io_lock);
2144 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
2145 spin_lock_irq(&md->deferred_lock);
2146 c = bio_list_pop(&md->deferred);
2147 spin_unlock_irq(&md->deferred_lock);
2150 clear_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
2154 up_write(&md->io_lock);
2156 if (dm_request_based(md))
2157 generic_make_request(c);
2160 process_barrier(md, c);
2162 __split_and_process_bio(md, c);
2165 down_write(&md->io_lock);
2168 up_write(&md->io_lock);
2171 static void dm_queue_flush(struct mapped_device *md)
2173 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2174 smp_mb__after_clear_bit();
2175 queue_work(md->wq, &md->work);
2179 * Swap in a new table (destroying old one).
2181 int dm_swap_table(struct mapped_device *md, struct dm_table *table)
2183 struct queue_limits limits;
2186 mutex_lock(&md->suspend_lock);
2188 /* device must be suspended */
2189 if (!dm_suspended(md))
2192 r = dm_calculate_queue_limits(table, &limits);
2196 /* cannot change the device type, once a table is bound */
2198 (dm_table_get_type(md->map) != dm_table_get_type(table))) {
2199 DMWARN("can't change the device type after a table is bound");
2204 * It is enought that blk_queue_ordered() is called only once when
2205 * the first bio-based table is bound.
2207 * This setting should be moved to alloc_dev() when request-based dm
2210 if (!md->map && dm_table_bio_based(table))
2211 blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL);
2214 r = __bind(md, table, &limits);
2217 mutex_unlock(&md->suspend_lock);
2221 static void dm_rq_invalidate_suspend_marker(struct mapped_device *md)
2223 md->suspend_rq.special = (void *)0x1;
2226 static void dm_rq_abort_suspend(struct mapped_device *md, int noflush)
2228 struct request_queue *q = md->queue;
2229 unsigned long flags;
2231 spin_lock_irqsave(q->queue_lock, flags);
2233 dm_rq_invalidate_suspend_marker(md);
2235 spin_unlock_irqrestore(q->queue_lock, flags);
2238 static void dm_rq_start_suspend(struct mapped_device *md, int noflush)
2240 struct request *rq = &md->suspend_rq;
2241 struct request_queue *q = md->queue;
2247 blk_insert_request(q, rq, 0, NULL);
2251 static int dm_rq_suspend_available(struct mapped_device *md, int noflush)
2254 struct request *rq = &md->suspend_rq;
2255 struct request_queue *q = md->queue;
2256 unsigned long flags;
2261 /* The marker must be protected by queue lock if it is in use */
2262 spin_lock_irqsave(q->queue_lock, flags);
2263 if (unlikely(rq->ref_count)) {
2265 * This can happen, when the previous flush suspend was
2266 * interrupted, the marker is still in the queue and
2267 * this flush suspend has been invoked, because we don't
2268 * remove the marker at the time of suspend interruption.
2269 * We have only one marker per mapped_device, so we can't
2270 * start another flush suspend while it is in use.
2272 BUG_ON(!rq->special); /* The marker should be invalidated */
2273 DMWARN("Invalidating the previous flush suspend is still in"
2274 " progress. Please retry later.");
2277 spin_unlock_irqrestore(q->queue_lock, flags);
2283 * Functions to lock and unlock any filesystem running on the
2286 static int lock_fs(struct mapped_device *md)
2290 WARN_ON(md->frozen_sb);
2292 md->frozen_sb = freeze_bdev(md->bdev);
2293 if (IS_ERR(md->frozen_sb)) {
2294 r = PTR_ERR(md->frozen_sb);
2295 md->frozen_sb = NULL;
2299 set_bit(DMF_FROZEN, &md->flags);
2304 static void unlock_fs(struct mapped_device *md)
2306 if (!test_bit(DMF_FROZEN, &md->flags))
2309 thaw_bdev(md->bdev, md->frozen_sb);
2310 md->frozen_sb = NULL;
2311 clear_bit(DMF_FROZEN, &md->flags);
2315 * We need to be able to change a mapping table under a mounted
2316 * filesystem. For example we might want to move some data in
2317 * the background. Before the table can be swapped with
2318 * dm_bind_table, dm_suspend must be called to flush any in
2319 * flight bios and ensure that any further io gets deferred.
2322 * Suspend mechanism in request-based dm.
2324 * After the suspend starts, further incoming requests are kept in
2325 * the request_queue and deferred.
2326 * Remaining requests in the request_queue at the start of suspend are flushed
2327 * if it is flush suspend.
2328 * The suspend completes when the following conditions have been satisfied,
2330 * 1. q->in_flight is 0 (which means no in_flight request)
2331 * 2. queue has been stopped (which means no request dispatching)
2336 * Noflush suspend doesn't need to dispatch remaining requests.
2337 * So stop the queue immediately. Then, wait for all in_flight requests
2338 * to be completed or requeued.
2340 * To abort noflush suspend, start the queue.
2345 * Flush suspend needs to dispatch remaining requests. So stop the queue
2346 * after the remaining requests are completed. (Requeued request must be also
2347 * re-dispatched and completed. Until then, we can't stop the queue.)
2349 * During flushing the remaining requests, further incoming requests are also
2350 * inserted to the same queue. To distinguish which requests are to be
2351 * flushed, we insert a marker request to the queue at the time of starting
2352 * flush suspend, like a barrier.
2353 * The dispatching is blocked when the marker is found on the top of the queue.
2354 * And the queue is stopped when all in_flight requests are completed, since
2355 * that means the remaining requests are completely flushed.
2356 * Then, the marker is removed from the queue.
2358 * To abort flush suspend, we also need to take care of the marker, not only
2359 * starting the queue.
2360 * We don't remove the marker forcibly from the queue since it's against
2361 * the block-layer manner. Instead, we put a invalidated mark on the marker.
2362 * When the invalidated marker is found on the top of the queue, it is
2363 * immediately removed from the queue, so it doesn't block dispatching.
2364 * Because we have only one marker per mapped_device, we can't start another
2365 * flush suspend until the invalidated marker is removed from the queue.
2366 * So fail and return with -EBUSY in such a case.
2368 int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2370 struct dm_table *map = NULL;
2372 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
2373 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
2375 mutex_lock(&md->suspend_lock);
2377 if (dm_suspended(md)) {
2382 if (dm_request_based(md) && !dm_rq_suspend_available(md, noflush)) {
2387 map = dm_get_table(md);
2390 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2391 * This flag is cleared before dm_suspend returns.
2394 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2396 /* This does not get reverted if there's an error later. */
2397 dm_table_presuspend_targets(map);
2400 * Flush I/O to the device. noflush supersedes do_lockfs,
2401 * because lock_fs() needs to flush I/Os.
2403 if (!noflush && do_lockfs) {
2410 * Here we must make sure that no processes are submitting requests
2411 * to target drivers i.e. no one may be executing
2412 * __split_and_process_bio. This is called from dm_request and
2415 * To get all processes out of __split_and_process_bio in dm_request,
2416 * we take the write lock. To prevent any process from reentering
2417 * __split_and_process_bio from dm_request, we set
2418 * DMF_QUEUE_IO_TO_THREAD.
2420 * To quiesce the thread (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND
2421 * and call flush_workqueue(md->wq). flush_workqueue will wait until
2422 * dm_wq_work exits and DMF_BLOCK_IO_FOR_SUSPEND will prevent any
2423 * further calls to __split_and_process_bio from dm_wq_work.
2425 down_write(&md->io_lock);
2426 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2427 set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
2428 up_write(&md->io_lock);
2430 flush_workqueue(md->wq);
2432 if (dm_request_based(md))
2433 dm_rq_start_suspend(md, noflush);
2436 * At this point no more requests are entering target request routines.
2437 * We call dm_wait_for_completion to wait for all existing requests
2440 r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
2442 down_write(&md->io_lock);
2444 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2445 up_write(&md->io_lock);
2447 /* were we interrupted ? */
2451 if (dm_request_based(md))
2452 dm_rq_abort_suspend(md, noflush);
2455 goto out; /* pushback list is already flushed, so skip flush */
2459 * If dm_wait_for_completion returned 0, the device is completely
2460 * quiescent now. There is no request-processing activity. All new
2461 * requests are being added to md->deferred list.
2464 dm_table_postsuspend_targets(map);
2466 set_bit(DMF_SUSPENDED, &md->flags);
2472 mutex_unlock(&md->suspend_lock);
2476 int dm_resume(struct mapped_device *md)
2479 struct dm_table *map = NULL;
2481 mutex_lock(&md->suspend_lock);
2482 if (!dm_suspended(md))
2485 map = dm_get_table(md);
2486 if (!map || !dm_table_get_size(map))
2489 r = dm_table_resume_targets(map);
2496 * Flushing deferred I/Os must be done after targets are resumed
2497 * so that mapping of targets can work correctly.
2498 * Request-based dm is queueing the deferred I/Os in its request_queue.
2500 if (dm_request_based(md))
2501 start_queue(md->queue);
2505 clear_bit(DMF_SUSPENDED, &md->flags);
2507 dm_table_unplug_all(map);
2511 mutex_unlock(&md->suspend_lock);
2516 /*-----------------------------------------------------------------
2517 * Event notification.
2518 *---------------------------------------------------------------*/
2519 void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
2522 char udev_cookie[DM_COOKIE_LENGTH];
2523 char *envp[] = { udev_cookie, NULL };
2526 kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
2528 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
2529 DM_COOKIE_ENV_VAR_NAME, cookie);
2530 kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp);
2534 uint32_t dm_next_uevent_seq(struct mapped_device *md)
2536 return atomic_add_return(1, &md->uevent_seq);
2539 uint32_t dm_get_event_nr(struct mapped_device *md)
2541 return atomic_read(&md->event_nr);
2544 int dm_wait_event(struct mapped_device *md, int event_nr)
2546 return wait_event_interruptible(md->eventq,
2547 (event_nr != atomic_read(&md->event_nr)));
2550 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
2552 unsigned long flags;
2554 spin_lock_irqsave(&md->uevent_lock, flags);
2555 list_add(elist, &md->uevent_list);
2556 spin_unlock_irqrestore(&md->uevent_lock, flags);
2560 * The gendisk is only valid as long as you have a reference
2563 struct gendisk *dm_disk(struct mapped_device *md)
2568 struct kobject *dm_kobject(struct mapped_device *md)
2574 * struct mapped_device should not be exported outside of dm.c
2575 * so use this check to verify that kobj is part of md structure
2577 struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2579 struct mapped_device *md;
2581 md = container_of(kobj, struct mapped_device, kobj);
2582 if (&md->kobj != kobj)
2585 if (test_bit(DMF_FREEING, &md->flags) ||
2586 test_bit(DMF_DELETING, &md->flags))
2593 int dm_suspended(struct mapped_device *md)
2595 return test_bit(DMF_SUSPENDED, &md->flags);
2598 int dm_noflush_suspending(struct dm_target *ti)
2600 struct mapped_device *md = dm_table_get_md(ti->table);
2601 int r = __noflush_suspending(md);
2607 EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2609 struct dm_md_mempools *dm_alloc_md_mempools(unsigned type)
2611 struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
2616 pools->io_pool = (type == DM_TYPE_BIO_BASED) ?
2617 mempool_create_slab_pool(MIN_IOS, _io_cache) :
2618 mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache);
2619 if (!pools->io_pool)
2620 goto free_pools_and_out;
2622 pools->tio_pool = (type == DM_TYPE_BIO_BASED) ?
2623 mempool_create_slab_pool(MIN_IOS, _tio_cache) :
2624 mempool_create_slab_pool(MIN_IOS, _rq_tio_cache);
2625 if (!pools->tio_pool)
2626 goto free_io_pool_and_out;
2628 pools->bs = (type == DM_TYPE_BIO_BASED) ?
2629 bioset_create(16, 0) : bioset_create(MIN_IOS, 0);
2631 goto free_tio_pool_and_out;
2635 free_tio_pool_and_out:
2636 mempool_destroy(pools->tio_pool);
2638 free_io_pool_and_out:
2639 mempool_destroy(pools->io_pool);
2647 void dm_free_md_mempools(struct dm_md_mempools *pools)
2653 mempool_destroy(pools->io_pool);
2655 if (pools->tio_pool)
2656 mempool_destroy(pools->tio_pool);
2659 bioset_free(pools->bs);
2664 static struct block_device_operations dm_blk_dops = {
2665 .open = dm_blk_open,
2666 .release = dm_blk_close,
2667 .ioctl = dm_blk_ioctl,
2668 .getgeo = dm_blk_getgeo,
2669 .owner = THIS_MODULE
2672 EXPORT_SYMBOL(dm_get_mapinfo);
2677 module_init(dm_init);
2678 module_exit(dm_exit);
2680 module_param(major, uint, 0);
2681 MODULE_PARM_DESC(major, "The major number of the device mapper");
2682 MODULE_DESCRIPTION(DM_NAME " driver");
2683 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2684 MODULE_LICENSE("GPL");