dm: convey that all flushes are processed as empty
authorMike Snitzer <snitzer@redhat.com>
Wed, 8 Sep 2010 16:07:01 +0000 (18:07 +0200)
committerJens Axboe <jaxboe@fusionio.com>
Fri, 10 Sep 2010 10:35:38 +0000 (12:35 +0200)
Rename __clone_and_map_flush to __clone_and_map_empty_flush for added
clarity.

Simplify logic associated with REQ_FLUSH conditionals.

Introduce a BUG_ON() and add a few more helpful comments to the code
so that it is clear that all flushes are empty.

Cleanup __split_and_process_bio() so that an empty flush isn't processed
by a 'sector_count' focused while loop.

Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
drivers/md/dm.c

index cd2f7e7..f934e98 100644 (file)
@@ -621,16 +621,17 @@ static void dec_pending(struct dm_io *io, int error)
                if (io_error == DM_ENDIO_REQUEUE)
                        return;
 
-               if (!(bio->bi_rw & REQ_FLUSH) || !bio->bi_size) {
-                       trace_block_bio_complete(md->queue, bio);
-                       bio_endio(bio, io_error);
-               } else {
+               if ((bio->bi_rw & REQ_FLUSH) && bio->bi_size) {
                        /*
                         * Preflush done for flush with data, reissue
                         * without REQ_FLUSH.
                         */
                        bio->bi_rw &= ~REQ_FLUSH;
                        queue_io(md, bio);
+               } else {
+                       /* done with normal IO or empty flush */
+                       trace_block_bio_complete(md->queue, bio);
+                       bio_endio(bio, io_error);
                }
        }
 }
@@ -1132,16 +1133,15 @@ static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti,
                __issue_target_request(ci, ti, request_nr, len);
 }
 
-static int __clone_and_map_flush(struct clone_info *ci)
+static int __clone_and_map_empty_flush(struct clone_info *ci)
 {
        unsigned target_nr = 0;
        struct dm_target *ti;
 
+       BUG_ON(bio_has_data(ci->bio));
        while ((ti = dm_table_get_target(ci->map, target_nr++)))
                __issue_target_requests(ci, ti, ti->num_flush_requests, 0);
 
-       ci->sector_count = 0;
-
        return 0;
 }
 
@@ -1282,7 +1282,6 @@ static int __clone_and_map(struct clone_info *ci)
  */
 static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
 {
-       bool is_flush = bio->bi_rw & REQ_FLUSH;
        struct clone_info ci;
        int error = 0;
 
@@ -1302,20 +1301,17 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
        ci.sector = bio->bi_sector;
        ci.idx = bio->bi_idx;
 
-       if (!is_flush) {
+       start_io_acct(ci.io);
+       if (bio->bi_rw & REQ_FLUSH) {
+               ci.bio = &ci.md->flush_bio;
+               ci.sector_count = 0;
+               error = __clone_and_map_empty_flush(&ci);
+               /* dec_pending submits any data associated with flush */
+       } else {
                ci.bio = bio;
                ci.sector_count = bio_sectors(bio);
-       } else {
-               ci.bio = &ci.md->flush_bio;
-               ci.sector_count = 1;
-       }
-
-       start_io_acct(ci.io);
-       while (ci.sector_count && !error) {
-               if (!is_flush)
+               while (ci.sector_count && !error)
                        error = __clone_and_map(&ci);
-               else
-                       error = __clone_and_map_flush(&ci);
        }
 
        /* drop the extra reference count */