Merge branch 'for-john' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi...
authorJohn W. Linville <linville@tuxdriver.com>
Tue, 4 Feb 2014 20:14:34 +0000 (15:14 -0500)
committerJohn W. Linville <linville@tuxdriver.com>
Tue, 4 Feb 2014 20:14:34 +0000 (15:14 -0500)
191 files changed:
Documentation/block/biodoc.txt
Documentation/block/biovecs.txt [new file with mode: 0644]
Documentation/filesystems/nfs/nfs41-server.txt
arch/m68k/emu/nfblock.c
arch/powerpc/sysdev/axonram.c
arch/xtensa/platforms/iss/simdisk.c
block/blk-core.c
block/blk-exec.c
block/blk-flush.c
block/blk-integrity.c
block/blk-lib.c
block/blk-map.c
block/blk-merge.c
block/blk-mq-cpu.c
block/blk-mq.c
block/blk-mq.h
block/blk-settings.c
block/blk-sysfs.c
block/blk-throttle.c
block/cmdline-parser.c
block/elevator.c
block/scsi_ioctl.c
drivers/block/aoe/aoe.h
drivers/block/aoe/aoecmd.c
drivers/block/brd.c
drivers/block/cciss.c
drivers/block/drbd/drbd_actlog.c
drivers/block/drbd/drbd_bitmap.c
drivers/block/drbd/drbd_main.c
drivers/block/drbd/drbd_receiver.c
drivers/block/drbd/drbd_req.c
drivers/block/drbd/drbd_req.h
drivers/block/drbd/drbd_worker.c
drivers/block/floppy.c
drivers/block/loop.c
drivers/block/mg_disk.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/mtip32xx/mtip32xx.h
drivers/block/nbd.c
drivers/block/null_blk.c
drivers/block/nvme-core.c
drivers/block/paride/pg.c
drivers/block/pktcdvd.c
drivers/block/ps3disk.c
drivers/block/ps3vram.c
drivers/block/rbd.c
drivers/block/rsxx/dev.c
drivers/block/rsxx/dma.c
drivers/block/sx8.c
drivers/block/umem.c
drivers/block/xen-blkback/blkback.c
drivers/block/xen-blkfront.c
drivers/cdrom/gdrom.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/md/bcache/Makefile
drivers/md/bcache/alloc.c
drivers/md/bcache/bcache.h
drivers/md/bcache/bset.c
drivers/md/bcache/bset.h
drivers/md/bcache/btree.c
drivers/md/bcache/btree.h
drivers/md/bcache/closure.c
drivers/md/bcache/closure.h
drivers/md/bcache/debug.c
drivers/md/bcache/debug.h
drivers/md/bcache/extents.c [new file with mode: 0644]
drivers/md/bcache/extents.h [new file with mode: 0644]
drivers/md/bcache/io.c
drivers/md/bcache/journal.c
drivers/md/bcache/journal.h
drivers/md/bcache/movinggc.c
drivers/md/bcache/request.c
drivers/md/bcache/request.h
drivers/md/bcache/super.c
drivers/md/bcache/sysfs.c
drivers/md/bcache/util.c
drivers/md/bcache/util.h
drivers/md/bcache/writeback.c
drivers/md/bcache/writeback.h
drivers/md/dm-bio-record.h
drivers/md/dm-bufio.c
drivers/md/dm-cache-policy-mq.c
drivers/md/dm-cache-target.c
drivers/md/dm-crypt.c
drivers/md/dm-delay.c
drivers/md/dm-flakey.c
drivers/md/dm-io.c
drivers/md/dm-linear.c
drivers/md/dm-raid1.c
drivers/md/dm-region-hash.c
drivers/md/dm-snap.c
drivers/md/dm-stripe.c
drivers/md/dm-switch.c
drivers/md/dm-thin.c
drivers/md/dm-verity.c
drivers/md/dm.c
drivers/md/faulty.c
drivers/md/linear.c
drivers/md/md.c
drivers/md/multipath.c
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/message/fusion/mptsas.c
drivers/s390/block/dasd_diag.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/dasd_fba.c
drivers/s390/block/dcssblk.c
drivers/s390/block/scm_blk.c
drivers/s390/block/scm_blk_cluster.c
drivers/s390/block/xpram.c
drivers/scsi/libsas/sas_expander.c
drivers/scsi/mpt2sas/mpt2sas_transport.c
drivers/scsi/mpt3sas/mpt3sas_transport.c
drivers/scsi/osd/osd_initiator.c
drivers/scsi/sd.c
drivers/scsi/sd_dif.c
drivers/staging/lustre/lustre/llite/lloop.c
drivers/staging/zram/zram_drv.c
drivers/target/target_core_iblock.c
fs/bio-integrity.c
fs/bio.c
fs/btrfs/check-integrity.c
fs/btrfs/compression.c
fs/btrfs/disk-io.c
fs/btrfs/extent_io.c
fs/btrfs/file-item.c
fs/btrfs/inode.c
fs/btrfs/raid56.c
fs/btrfs/scrub.c
fs/btrfs/volumes.c
fs/buffer.c
fs/direct-io.c
fs/ext4/page-io.c
fs/f2fs/data.c
fs/gfs2/lops.c
fs/gfs2/ops_fstype.c
fs/hfsplus/wrapper.c
fs/jfs/jfs_logmgr.c
fs/jfs/jfs_metapage.c
fs/logfs/dev_bdev.c
fs/mpage.c
fs/nfs/blocklayout/blocklayout.c
fs/nfs/nfs3proc.c
fs/nfsd/acl.h
fs/nfsd/cache.h
fs/nfsd/idmap.h
fs/nfsd/netns.h
fs/nfsd/nfs3xdr.c
fs/nfsd/nfs4acl.c
fs/nfsd/nfs4idmap.c
fs/nfsd/nfs4proc.c
fs/nfsd/nfs4state.c
fs/nfsd/nfs4xdr.c
fs/nfsd/nfscache.c
fs/nfsd/nfssvc.c
fs/nfsd/nfsxdr.c
fs/nfsd/vfs.c
fs/nfsd/vfs.h
fs/nfsd/xdr3.h
fs/nfsd/xdr4.h
fs/nilfs2/segbuf.c
fs/ocfs2/cluster/heartbeat.c
fs/xfs/xfs_aops.c
fs/xfs/xfs_buf.c
include/linux/bio.h
include/linux/blk-mq.h
include/linux/blk_types.h
include/linux/blkdev.h
include/linux/ceph/messenger.h
include/linux/cmdline-parser.h
include/linux/dm-io.h
include/linux/sunrpc/svc.h
include/trace/events/bcache.h
include/trace/events/block.h
include/trace/events/f2fs.h
include/uapi/linux/bcache.h
include/uapi/linux/fd.h
kernel/power/block_io.c
kernel/trace/blktrace.c
mm/bounce.c
mm/page_io.c
net/ceph/messenger.c
net/sunrpc/auth_gss/gss_krb5_keys.c
net/sunrpc/auth_gss/gss_rpc_upcall.c
net/sunrpc/auth_gss/svcauth_gss.c
net/sunrpc/cache.c
net/sunrpc/netns.h
net/sunrpc/svc.c
net/sunrpc/xprtsock.c

index 8df5e8e..2101e71 100644 (file)
@@ -447,14 +447,13 @@ struct bio_vec {
  * main unit of I/O for the block layer and lower layers (ie drivers)
  */
 struct bio {
-       sector_t            bi_sector;
        struct bio          *bi_next;    /* request queue link */
        struct block_device *bi_bdev;   /* target device */
        unsigned long       bi_flags;    /* status, command, etc */
        unsigned long       bi_rw;       /* low bits: r/w, high: priority */
 
        unsigned int    bi_vcnt;     /* how may bio_vec's */
-       unsigned int    bi_idx;         /* current index into bio_vec array */
+       struct bvec_iter        bi_iter;        /* current index into bio_vec array */
 
        unsigned int    bi_size;     /* total size in bytes */
        unsigned short  bi_phys_segments; /* segments after physaddr coalesce*/
@@ -480,7 +479,7 @@ With this multipage bio design:
 - Code that traverses the req list can find all the segments of a bio
   by using rq_for_each_segment.  This handles the fact that a request
   has multiple bios, each of which can have multiple segments.
-- Drivers which can't process a large bio in one shot can use the bi_idx
+- Drivers which can't process a large bio in one shot can use the bi_iter
   field to keep track of the next bio_vec entry to process.
   (e.g a 1MB bio_vec needs to be handled in max 128kB chunks for IDE)
   [TBD: Should preferably also have a bi_voffset and bi_vlen to avoid modifying
@@ -589,7 +588,7 @@ driver should not modify these values. The block layer sets up the
 nr_sectors and current_nr_sectors fields (based on the corresponding
 hard_xxx values and the number of bytes transferred) and updates it on
 every transfer that invokes end_that_request_first. It does the same for the
-buffer, bio, bio->bi_idx fields too.
+buffer, bio, bio->bi_iter fields too.
 
 The buffer field is just a virtual address mapping of the current segment
 of the i/o buffer in cases where the buffer resides in low-memory. For high
diff --git a/Documentation/block/biovecs.txt b/Documentation/block/biovecs.txt
new file mode 100644 (file)
index 0000000..74a32ad
--- /dev/null
@@ -0,0 +1,111 @@
+
+Immutable biovecs and biovec iterators:
+=======================================
+
+Kent Overstreet <kmo@daterainc.com>
+
+As of 3.13, biovecs should never be modified after a bio has been submitted.
+Instead, we have a new struct bvec_iter which represents a range of a biovec -
+the iterator will be modified as the bio is completed, not the biovec.
+
+More specifically, old code that needed to partially complete a bio would
+update bi_sector and bi_size, and advance bi_idx to the next biovec. If it
+ended up partway through a biovec, it would increment bv_offset and decrement
+bv_len by the number of bytes completed in that biovec.
+
+In the new scheme of things, everything that must be mutated in order to
+partially complete a bio is segregated into struct bvec_iter: bi_sector,
+bi_size and bi_idx have been moved there; and instead of modifying bv_offset
+and bv_len, struct bvec_iter has bi_bvec_done, which represents the number of
+bytes completed in the current bvec.
+
+There are a bunch of new helper macros for hiding the gory details - in
+particular, presenting the illusion of partially completed biovecs so that
+normal code doesn't have to deal with bi_bvec_done.
+
+ * Driver code should no longer refer to biovecs directly; we now have
+   bio_iovec() and bio_iovec_iter() macros that return literal struct biovecs,
+   constructed from the raw biovecs but taking into account bi_bvec_done and
+   bi_size.
+
+   bio_for_each_segment() has been updated to take a bvec_iter argument
+   instead of an integer (that corresponded to bi_idx); for a lot of code the
+   conversion just required changing the types of the arguments to
+   bio_for_each_segment().
+
+ * Advancing a bvec_iter is done with bio_advance_iter(); bio_advance() is a
+   wrapper around bio_advance_iter() that operates on bio->bi_iter, and also
+   advances the bio integrity's iter if present.
+
+   There is a lower level advance function - bvec_iter_advance() - which takes
+   a pointer to a biovec, not a bio; this is used by the bio integrity code.
+
+What's all this get us?
+=======================
+
+Having a real iterator, and making biovecs immutable, has a number of
+advantages:
+
+ * Before, iterating over bios was very awkward when you weren't processing
+   exactly one bvec at a time - for example, bio_copy_data() in fs/bio.c,
+   which copies the contents of one bio into another. Because the biovecs
+   wouldn't necessarily be the same size, the old code was tricky convoluted -
+   it had to walk two different bios at the same time, keeping both bi_idx and
+   and offset into the current biovec for each.
+
+   The new code is much more straightforward - have a look. This sort of
+   pattern comes up in a lot of places; a lot of drivers were essentially open
+   coding bvec iterators before, and having common implementation considerably
+   simplifies a lot of code.
+
+ * Before, any code that might need to use the biovec after the bio had been
+   completed (perhaps to copy the data somewhere else, or perhaps to resubmit
+   it somewhere else if there was an error) had to save the entire bvec array
+   - again, this was being done in a fair number of places.
+
+ * Biovecs can be shared between multiple bios - a bvec iter can represent an
+   arbitrary range of an existing biovec, both starting and ending midway
+   through biovecs. This is what enables efficient splitting of arbitrary
+   bios. Note that this means we _only_ use bi_size to determine when we've
+   reached the end of a bio, not bi_vcnt - and the bio_iovec() macro takes
+   bi_size into account when constructing biovecs.
+
+ * Splitting bios is now much simpler. The old bio_split() didn't even work on
+   bios with more than a single bvec! Now, we can efficiently split arbitrary
+   size bios - because the new bio can share the old bio's biovec.
+
+   Care must be taken to ensure the biovec isn't freed while the split bio is
+   still using it, in case the original bio completes first, though. Using
+   bio_chain() when splitting bios helps with this.
+
+ * Submitting partially completed bios is now perfectly fine - this comes up
+   occasionally in stacking block drivers and various code (e.g. md and
+   bcache) had some ugly workarounds for this.
+
+   It used to be the case that submitting a partially completed bio would work
+   fine to _most_ devices, but since accessing the raw bvec array was the
+   norm, not all drivers would respect bi_idx and those would break. Now,
+   since all drivers _must_ go through the bvec iterator - and have been
+   audited to make sure they are - submitting partially completed bios is
+   perfectly fine.
+
+Other implications:
+===================
+
+ * Almost all usage of bi_idx is now incorrect and has been removed; instead,
+   where previously you would have used bi_idx you'd now use a bvec_iter,
+   probably passing it to one of the helper macros.
+
+   I.e. instead of using bio_iovec_idx() (or bio->bi_iovec[bio->bi_idx]), you
+   now use bio_iter_iovec(), which takes a bvec_iter and returns a
+   literal struct bio_vec - constructed on the fly from the raw biovec but
+   taking into account bi_bvec_done (and bi_size).
+
+ * bi_vcnt can't be trusted or relied upon by driver code - i.e. anything that
+   doesn't actually own the bio. The reason is twofold: firstly, it's not
+   actually needed for iterating over the bio anymore - we only use bi_size.
+   Secondly, when cloning a bio and reusing (a portion of) the original bio's
+   biovec, in order to calculate bi_vcnt for the new bio we'd have to iterate
+   over all the biovecs in the new bio - which is silly as it's not needed.
+
+   So, don't use bi_vcnt anymore.
index 01c2db7..b930ad0 100644 (file)
@@ -5,11 +5,11 @@ Server support for minorversion 1 can be controlled using the
 by reading this file will contain either "+4.1" or "-4.1"
 correspondingly.
 
-Currently, server support for minorversion 1 is disabled by default.
-It can be enabled at run time by writing the string "+4.1" to
+Currently, server support for minorversion 1 is enabled by default.
+It can be disabled at run time by writing the string "-4.1" to
 the /proc/fs/nfsd/versions control file.  Note that to write this
-control file, the nfsd service must be taken down.  Use your user-mode
-nfs-utils to set this up; see rpc.nfsd(8)
+control file, the nfsd service must be taken down.  You can use rpc.nfsd
+for this; see rpc.nfsd(8).
 
 (Warning: older servers will interpret "+4.1" and "-4.1" as "+4" and
 "-4", respectively.  Therefore, code meant to work on both new and old
@@ -29,29 +29,6 @@ are still under development out of tree.
 See http://wiki.linux-nfs.org/wiki/index.php/PNFS_prototype_design
 for more information.
 
-The current implementation is intended for developers only: while it
-does support ordinary file operations on clients we have tested against
-(including the linux client), it is incomplete in ways which may limit
-features unexpectedly, cause known bugs in rare cases, or cause
-interoperability problems with future clients.  Known issues:
-
-       - gss support is questionable: currently mounts with kerberos
-         from a linux client are possible, but we aren't really
-         conformant with the spec (for example, we don't use kerberos
-         on the backchannel correctly).
-       - We do not support SSV, which provides security for shared
-         client-server state (thus preventing unauthorized tampering
-         with locks and opens, for example).  It is mandatory for
-         servers to support this, though no clients use it yet.
-
-In addition, some limitations are inherited from the current NFSv4
-implementation:
-
-       - Incomplete delegation enforcement: if a file is renamed or
-         unlinked by a local process, a client holding a delegation may
-         continue to indefinitely allow opens of the file under the old
-         name.
-
 The table below, taken from the NFSv4.1 document, lists
 the operations that are mandatory to implement (REQ), optional
 (OPT), and NFSv4.0 operations that are required not to implement (MNI)
@@ -169,6 +146,16 @@ NS*| CB_WANTS_CANCELLED      | OPT       | FDELG,      | Section 20.10 |
 
 Implementation notes:
 
+SSV:
+* The spec claims this is mandatory, but we don't actually know of any
+  implementations, so we're ignoring it for now.  The server returns
+  NFS4ERR_ENCR_ALG_UNSUPP on EXCHANGE_ID, which should be future-proof.
+
+GSS on the backchannel:
+* Again, theoretically required but not widely implemented (in
+  particular, the current Linux client doesn't request it).  We return
+  NFS4ERR_ENCR_ALG_UNSUPP on CREATE_SESSION.
+
 DELEGPURGE:
 * mandatory only for servers that support CLAIM_DELEGATE_PREV and/or
   CLAIM_DELEG_PREV_FH (which allows clients to keep delegations that
@@ -176,7 +163,6 @@ DELEGPURGE:
   now.
 
 EXCHANGE_ID:
-* only SP4_NONE state protection supported
 * implementation ids are ignored
 
 CREATE_SESSION:
index 0721858..2d75ae2 100644 (file)
@@ -62,17 +62,18 @@ struct nfhd_device {
 static void nfhd_make_request(struct request_queue *queue, struct bio *bio)
 {
        struct nfhd_device *dev = queue->queuedata;
-       struct bio_vec *bvec;
-       int i, dir, len, shift;
-       sector_t sec = bio->bi_sector;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
+       int dir, len, shift;
+       sector_t sec = bio->bi_iter.bi_sector;
 
        dir = bio_data_dir(bio);
        shift = dev->bshift;
-       bio_for_each_segment(bvec, bio, i) {
-               len = bvec->bv_len;
+       bio_for_each_segment(bvec, bio, iter) {
+               len = bvec.bv_len;
                len >>= 9;
                nfhd_read_write(dev->id, 0, dir, sec >> shift, len >> shift,
-                               bvec_to_phys(bvec));
+                               bvec_to_phys(&bvec));
                sec += len;
        }
        bio_endio(bio, 0);
index 1c16141..47b6b9f 100644 (file)
@@ -109,27 +109,28 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
        struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data;
        unsigned long phys_mem, phys_end;
        void *user_mem;
-       struct bio_vec *vec;
+       struct bio_vec vec;
        unsigned int transfered;
-       unsigned short idx;
+       struct bvec_iter iter;
 
-       phys_mem = bank->io_addr + (bio->bi_sector << AXON_RAM_SECTOR_SHIFT);
+       phys_mem = bank->io_addr + (bio->bi_iter.bi_sector <<
+                                   AXON_RAM_SECTOR_SHIFT);
        phys_end = bank->io_addr + bank->size;
        transfered = 0;
-       bio_for_each_segment(vec, bio, idx) {
-               if (unlikely(phys_mem + vec->bv_len > phys_end)) {
+       bio_for_each_segment(vec, bio, iter) {
+               if (unlikely(phys_mem + vec.bv_len > phys_end)) {
                        bio_io_error(bio);
                        return;
                }
 
-               user_mem = page_address(vec->bv_page) + vec->bv_offset;
+               user_mem = page_address(vec.bv_page) + vec.bv_offset;
                if (bio_data_dir(bio) == READ)
-                       memcpy(user_mem, (void *) phys_mem, vec->bv_len);
+                       memcpy(user_mem, (void *) phys_mem, vec.bv_len);
                else
-                       memcpy((void *) phys_mem, user_mem, vec->bv_len);
+                       memcpy((void *) phys_mem, user_mem, vec.bv_len);
 
-               phys_mem += vec->bv_len;
-               transfered += vec->bv_len;
+               phys_mem += vec.bv_len;
+               transfered += vec.bv_len;
        }
        bio_endio(bio, 0);
 }
index 8c6e819..48eebac 100644 (file)
@@ -103,18 +103,18 @@ static void simdisk_transfer(struct simdisk *dev, unsigned long sector,
 
 static int simdisk_xfer_bio(struct simdisk *dev, struct bio *bio)
 {
-       int i;
-       struct bio_vec *bvec;
-       sector_t sector = bio->bi_sector;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
+       sector_t sector = bio->bi_iter.bi_sector;
 
-       bio_for_each_segment(bvec, bio, i) {
-               char *buffer = __bio_kmap_atomic(bio, i);
-               unsigned len = bvec->bv_len >> SECTOR_SHIFT;
+       bio_for_each_segment(bvec, bio, iter) {
+               char *buffer = __bio_kmap_atomic(bio, iter);
+               unsigned len = bvec.bv_len >> SECTOR_SHIFT;
 
                simdisk_transfer(dev, sector, len, buffer,
                                bio_data_dir(bio) == WRITE);
                sector += len;
-               __bio_kunmap_atomic(bio);
+               __bio_kunmap_atomic(buffer);
        }
        return 0;
 }
index 8bdd012..c00e0bd 100644 (file)
@@ -38,6 +38,7 @@
 
 #include "blk.h"
 #include "blk-cgroup.h"
+#include "blk-mq.h"
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
@@ -130,7 +131,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
        bio_advance(bio, nbytes);
 
        /* don't actually finish bio if it's part of flush sequence */
-       if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
+       if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
                bio_endio(bio, error);
 }
 
@@ -245,7 +246,16 @@ EXPORT_SYMBOL(blk_stop_queue);
 void blk_sync_queue(struct request_queue *q)
 {
        del_timer_sync(&q->timeout);
-       cancel_delayed_work_sync(&q->delay_work);
+
+       if (q->mq_ops) {
+               struct blk_mq_hw_ctx *hctx;
+               int i;
+
+               queue_for_each_hw_ctx(q, hctx, i)
+                       cancel_delayed_work_sync(&hctx->delayed_work);
+       } else {
+               cancel_delayed_work_sync(&q->delay_work);
+       }
 }
 EXPORT_SYMBOL(blk_sync_queue);
 
@@ -497,8 +507,13 @@ void blk_cleanup_queue(struct request_queue *q)
         * Drain all requests queued before DYING marking. Set DEAD flag to
         * prevent that q->request_fn() gets invoked after draining finished.
         */
-       spin_lock_irq(lock);
-       __blk_drain_queue(q, true);
+       if (q->mq_ops) {
+               blk_mq_drain_queue(q);
+               spin_lock_irq(lock);
+       } else {
+               spin_lock_irq(lock);
+               __blk_drain_queue(q, true);
+       }
        queue_flag_set(QUEUE_FLAG_DEAD, q);
        spin_unlock_irq(lock);
 
@@ -1326,7 +1341,7 @@ void blk_add_request_payload(struct request *rq, struct page *page,
        bio->bi_io_vec->bv_offset = 0;
        bio->bi_io_vec->bv_len = len;
 
-       bio->bi_size = len;
+       bio->bi_iter.bi_size = len;
        bio->bi_vcnt = 1;
        bio->bi_phys_segments = 1;
 
@@ -1351,7 +1366,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
 
        req->biotail->bi_next = bio;
        req->biotail = bio;
-       req->__data_len += bio->bi_size;
+       req->__data_len += bio->bi_iter.bi_size;
        req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
 
        blk_account_io_start(req, false);
@@ -1380,8 +1395,8 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
         * not touch req->buffer either...
         */
        req->buffer = bio_data(bio);
-       req->__sector = bio->bi_sector;
-       req->__data_len += bio->bi_size;
+       req->__sector = bio->bi_iter.bi_sector;
+       req->__data_len += bio->bi_iter.bi_size;
        req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
 
        blk_account_io_start(req, false);
@@ -1459,7 +1474,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
                req->cmd_flags |= REQ_FAILFAST_MASK;
 
        req->errors = 0;
-       req->__sector = bio->bi_sector;
+       req->__sector = bio->bi_iter.bi_sector;
        req->ioprio = bio_prio(bio);
        blk_rq_bio_prep(req->q, req, bio);
 }
@@ -1583,12 +1598,12 @@ static inline void blk_partition_remap(struct bio *bio)
        if (bio_sectors(bio) && bdev != bdev->bd_contains) {
                struct hd_struct *p = bdev->bd_part;
 
-               bio->bi_sector += p->start_sect;
+               bio->bi_iter.bi_sector += p->start_sect;
                bio->bi_bdev = bdev->bd_contains;
 
                trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
                                      bdev->bd_dev,
-                                     bio->bi_sector - p->start_sect);
+                                     bio->bi_iter.bi_sector - p->start_sect);
        }
 }
 
@@ -1654,7 +1669,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
        /* Test device or partition size, when known. */
        maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
        if (maxsector) {
-               sector_t sector = bio->bi_sector;
+               sector_t sector = bio->bi_iter.bi_sector;
 
                if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
                        /*
@@ -1690,7 +1705,7 @@ generic_make_request_checks(struct bio *bio)
                       "generic_make_request: Trying to access "
                        "nonexistent block-device %s (%Lu)\n",
                        bdevname(bio->bi_bdev, b),
-                       (long long) bio->bi_sector);
+                       (long long) bio->bi_iter.bi_sector);
                goto end_io;
        }
 
@@ -1704,9 +1719,9 @@ generic_make_request_checks(struct bio *bio)
        }
 
        part = bio->bi_bdev->bd_part;
-       if (should_fail_request(part, bio->bi_size) ||
+       if (should_fail_request(part, bio->bi_iter.bi_size) ||
            should_fail_request(&part_to_disk(part)->part0,
-                               bio->bi_size))
+                               bio->bi_iter.bi_size))
                goto end_io;
 
        /*
@@ -1865,7 +1880,7 @@ void submit_bio(int rw, struct bio *bio)
                if (rw & WRITE) {
                        count_vm_events(PGPGOUT, count);
                } else {
-                       task_io_account_read(bio->bi_size);
+                       task_io_account_read(bio->bi_iter.bi_size);
                        count_vm_events(PGPGIN, count);
                }
 
@@ -1874,7 +1889,7 @@ void submit_bio(int rw, struct bio *bio)
                        printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
                        current->comm, task_pid_nr(current),
                                (rw & WRITE) ? "WRITE" : "READ",
-                               (unsigned long long)bio->bi_sector,
+                               (unsigned long long)bio->bi_iter.bi_sector,
                                bdevname(bio->bi_bdev, b),
                                count);
                }
@@ -2007,7 +2022,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq)
        for (bio = rq->bio; bio; bio = bio->bi_next) {
                if ((bio->bi_rw & ff) != ff)
                        break;
-               bytes += bio->bi_size;
+               bytes += bio->bi_iter.bi_size;
        }
 
        /* this could lead to infinite loop */
@@ -2378,9 +2393,9 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
        total_bytes = 0;
        while (req->bio) {
                struct bio *bio = req->bio;
-               unsigned bio_bytes = min(bio->bi_size, nr_bytes);
+               unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
 
-               if (bio_bytes == bio->bi_size)
+               if (bio_bytes == bio->bi_iter.bi_size)
                        req->bio = bio->bi_next;
 
                req_bio_endio(req, bio, bio_bytes, error);
@@ -2728,7 +2743,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
                rq->nr_phys_segments = bio_phys_segments(q, bio);
                rq->buffer = bio_data(bio);
        }
-       rq->__data_len = bio->bi_size;
+       rq->__data_len = bio->bi_iter.bi_size;
        rq->bio = rq->biotail = bio;
 
        if (bio->bi_bdev)
@@ -2746,10 +2761,10 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
 void rq_flush_dcache_pages(struct request *rq)
 {
        struct req_iterator iter;
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
 
        rq_for_each_segment(bvec, rq, iter)
-               flush_dcache_page(bvec->bv_page);
+               flush_dcache_page(bvec.bv_page);
 }
 EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
 #endif
index c3edf9d..bbfc072 100644 (file)
@@ -60,6 +60,10 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
        rq->rq_disk = bd_disk;
        rq->end_io = done;
 
+       /*
+        * don't check dying flag for MQ because the request won't
+        * be resued after dying flag is set
+        */
        if (q->mq_ops) {
                blk_mq_insert_request(q, rq, true);
                return;
index fb6f3c0..9288aaf 100644 (file)
@@ -548,7 +548,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
         * copied from blk_rq_pos(rq).
         */
        if (error_sector)
-               *error_sector = bio->bi_sector;
+               *error_sector = bio->bi_iter.bi_sector;
 
        bio_put(bio);
        return ret;
index 03cf717..7fbab84 100644 (file)
@@ -43,30 +43,32 @@ static const char *bi_unsupported_name = "unsupported";
  */
 int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
 {
-       struct bio_vec *iv, *ivprv = NULL;
+       struct bio_vec iv, ivprv = { NULL };
        unsigned int segments = 0;
        unsigned int seg_size = 0;
-       unsigned int i = 0;
+       struct bvec_iter iter;
+       int prev = 0;
 
-       bio_for_each_integrity_vec(iv, bio, i) {
+       bio_for_each_integrity_vec(iv, bio, iter) {
 
-               if (ivprv) {
-                       if (!BIOVEC_PHYS_MERGEABLE(ivprv, iv))
+               if (prev) {
+                       if (!BIOVEC_PHYS_MERGEABLE(&ivprv, &iv))
                                goto new_segment;
 
-                       if (!BIOVEC_SEG_BOUNDARY(q, ivprv, iv))
+                       if (!BIOVEC_SEG_BOUNDARY(q, &ivprv, &iv))
                                goto new_segment;
 
-                       if (seg_size + iv->bv_len > queue_max_segment_size(q))
+                       if (seg_size + iv.bv_len > queue_max_segment_size(q))
                                goto new_segment;
 
-                       seg_size += iv->bv_len;
+                       seg_size += iv.bv_len;
                } else {
 new_segment:
                        segments++;
-                       seg_size = iv->bv_len;
+                       seg_size = iv.bv_len;
                }
 
+               prev = 1;
                ivprv = iv;
        }
 
@@ -87,24 +89,25 @@ EXPORT_SYMBOL(blk_rq_count_integrity_sg);
 int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
                            struct scatterlist *sglist)
 {
-       struct bio_vec *iv, *ivprv = NULL;
+       struct bio_vec iv, ivprv = { NULL };
        struct scatterlist *sg = NULL;
        unsigned int segments = 0;
-       unsigned int i = 0;
+       struct bvec_iter iter;
+       int prev = 0;
 
-       bio_for_each_integrity_vec(iv, bio, i) {
+       bio_for_each_integrity_vec(iv, bio, iter) {
 
-               if (ivprv) {
-                       if (!BIOVEC_PHYS_MERGEABLE(ivprv, iv))
+               if (prev) {
+                       if (!BIOVEC_PHYS_MERGEABLE(&ivprv, &iv))
                                goto new_segment;
 
-                       if (!BIOVEC_SEG_BOUNDARY(q, ivprv, iv))
+                       if (!BIOVEC_SEG_BOUNDARY(q, &ivprv, &iv))
                                goto new_segment;
 
-                       if (sg->length + iv->bv_len > queue_max_segment_size(q))
+                       if (sg->length + iv.bv_len > queue_max_segment_size(q))
                                goto new_segment;
 
-                       sg->length += iv->bv_len;
+                       sg->length += iv.bv_len;
                } else {
 new_segment:
                        if (!sg)
@@ -114,10 +117,11 @@ new_segment:
                                sg = sg_next(sg);
                        }
 
-                       sg_set_page(sg, iv->bv_page, iv->bv_len, iv->bv_offset);
+                       sg_set_page(sg, iv.bv_page, iv.bv_len, iv.bv_offset);
                        segments++;
                }
 
+               prev = 1;
                ivprv = iv;
        }
 
index 9b5b561..2da76c9 100644 (file)
@@ -108,12 +108,12 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
                        req_sects = end_sect - sector;
                }
 
-               bio->bi_sector = sector;
+               bio->bi_iter.bi_sector = sector;
                bio->bi_end_io = bio_batch_end_io;
                bio->bi_bdev = bdev;
                bio->bi_private = &bb;
 
-               bio->bi_size = req_sects << 9;
+               bio->bi_iter.bi_size = req_sects << 9;
                nr_sects -= req_sects;
                sector = end_sect;
 
@@ -174,7 +174,7 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
                        break;
                }
 
-               bio->bi_sector = sector;
+               bio->bi_iter.bi_sector = sector;
                bio->bi_end_io = bio_batch_end_io;
                bio->bi_bdev = bdev;
                bio->bi_private = &bb;
@@ -184,11 +184,11 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
                bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
 
                if (nr_sects > max_write_same_sectors) {
-                       bio->bi_size = max_write_same_sectors << 9;
+                       bio->bi_iter.bi_size = max_write_same_sectors << 9;
                        nr_sects -= max_write_same_sectors;
                        sector += max_write_same_sectors;
                } else {
-                       bio->bi_size = nr_sects << 9;
+                       bio->bi_iter.bi_size = nr_sects << 9;
                        nr_sects = 0;
                }
 
@@ -240,7 +240,7 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
                        break;
                }
 
-               bio->bi_sector = sector;
+               bio->bi_iter.bi_sector = sector;
                bio->bi_bdev   = bdev;
                bio->bi_end_io = bio_batch_end_io;
                bio->bi_private = &bb;
index 623e1cd..ae4ae10 100644 (file)
@@ -20,7 +20,7 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
                rq->biotail->bi_next = bio;
                rq->biotail = bio;
 
-               rq->__data_len += bio->bi_size;
+               rq->__data_len += bio->bi_iter.bi_size;
        }
        return 0;
 }
@@ -76,7 +76,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
 
        ret = blk_rq_append_bio(q, rq, bio);
        if (!ret)
-               return bio->bi_size;
+               return bio->bi_iter.bi_size;
 
        /* if it was boucned we must call the end io function */
        bio_endio(bio, 0);
@@ -220,7 +220,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
        if (IS_ERR(bio))
                return PTR_ERR(bio);
 
-       if (bio->bi_size != len) {
+       if (bio->bi_iter.bi_size != len) {
                /*
                 * Grab an extra reference to this bio, as bio_unmap_user()
                 * expects to be able to drop it twice as it happens on the
index 1ffc589..8f8adaa 100644 (file)
 static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
                                             struct bio *bio)
 {
-       struct bio_vec *bv, *bvprv = NULL;
-       int cluster, i, high, highprv = 1;
+       struct bio_vec bv, bvprv = { NULL };
+       int cluster, high, highprv = 1;
        unsigned int seg_size, nr_phys_segs;
        struct bio *fbio, *bbio;
+       struct bvec_iter iter;
 
        if (!bio)
                return 0;
@@ -25,25 +26,23 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
        seg_size = 0;
        nr_phys_segs = 0;
        for_each_bio(bio) {
-               bio_for_each_segment(bv, bio, i) {
+               bio_for_each_segment(bv, bio, iter) {
                        /*
                         * the trick here is making sure that a high page is
                         * never considered part of another segment, since that
                         * might change with the bounce page.
                         */
-                       high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
-                       if (high || highprv)
-                               goto new_segment;
-                       if (cluster) {
-                               if (seg_size + bv->bv_len
+                       high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q);
+                       if (!high && !highprv && cluster) {
+                               if (seg_size + bv.bv_len
                                    > queue_max_segment_size(q))
                                        goto new_segment;
-                               if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
+                               if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
                                        goto new_segment;
-                               if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
+                               if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
                                        goto new_segment;
 
-                               seg_size += bv->bv_len;
+                               seg_size += bv.bv_len;
                                bvprv = bv;
                                continue;
                        }
@@ -54,7 +53,7 @@ new_segment:
 
                        nr_phys_segs++;
                        bvprv = bv;
-                       seg_size = bv->bv_len;
+                       seg_size = bv.bv_len;
                        highprv = high;
                }
                bbio = bio;
@@ -87,6 +86,9 @@ EXPORT_SYMBOL(blk_recount_segments);
 static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
                                   struct bio *nxt)
 {
+       struct bio_vec end_bv = { NULL }, nxt_bv;
+       struct bvec_iter iter;
+
        if (!blk_queue_cluster(q))
                return 0;
 
@@ -97,34 +99,40 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
        if (!bio_has_data(bio))
                return 1;
 
-       if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
+       bio_for_each_segment(end_bv, bio, iter)
+               if (end_bv.bv_len == iter.bi_size)
+                       break;
+
+       nxt_bv = bio_iovec(nxt);
+
+       if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
                return 0;
 
        /*
         * bio and nxt are contiguous in memory; check if the queue allows
         * these two to be merged into one
         */
-       if (BIO_SEG_BOUNDARY(q, bio, nxt))
+       if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv))
                return 1;
 
        return 0;
 }
 
-static void
+static inline void
 __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
-                    struct scatterlist *sglist, struct bio_vec **bvprv,
+                    struct scatterlist *sglist, struct bio_vec *bvprv,
                     struct scatterlist **sg, int *nsegs, int *cluster)
 {
 
        int nbytes = bvec->bv_len;
 
-       if (*bvprv && *cluster) {
+       if (*sg && *cluster) {
                if ((*sg)->length + nbytes > queue_max_segment_size(q))
                        goto new_segment;
 
-               if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec))
+               if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
                        goto new_segment;
-               if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec))
+               if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
                        goto new_segment;
 
                (*sg)->length += nbytes;
@@ -150,7 +158,7 @@ new_segment:
                sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
                (*nsegs)++;
        }
-       *bvprv = bvec;
+       *bvprv = *bvec;
 }
 
 /*
@@ -160,7 +168,7 @@ new_segment:
 int blk_rq_map_sg(struct request_queue *q, struct request *rq,
                  struct scatterlist *sglist)
 {
-       struct bio_vec *bvec, *bvprv;
+       struct bio_vec bvec, bvprv = { NULL };
        struct req_iterator iter;
        struct scatterlist *sg;
        int nsegs, cluster;
@@ -171,10 +179,9 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
        /*
         * for each bio in rq
         */
-       bvprv = NULL;
        sg = NULL;
        rq_for_each_segment(bvec, rq, iter) {
-               __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
+               __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg,
                                     &nsegs, &cluster);
        } /* segments in rq */
 
@@ -223,18 +230,17 @@ EXPORT_SYMBOL(blk_rq_map_sg);
 int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
                   struct scatterlist *sglist)
 {
-       struct bio_vec *bvec, *bvprv;
+       struct bio_vec bvec, bvprv = { NULL };
        struct scatterlist *sg;
        int nsegs, cluster;
-       unsigned long i;
+       struct bvec_iter iter;
 
        nsegs = 0;
        cluster = blk_queue_cluster(q);
 
-       bvprv = NULL;
        sg = NULL;
-       bio_for_each_segment(bvec, bio, i) {
-               __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
+       bio_for_each_segment(bvec, bio, iter) {
+               __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg,
                                     &nsegs, &cluster);
        } /* segments in bio */
 
@@ -543,9 +549,9 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
 
 int blk_try_merge(struct request *rq, struct bio *bio)
 {
-       if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector)
+       if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
                return ELEVATOR_BACK_MERGE;
-       else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector)
+       else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
                return ELEVATOR_FRONT_MERGE;
        return ELEVATOR_NO_MERGE;
 }
index 0045ace..3146bef 100644 (file)
@@ -28,36 +28,6 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self,
        return NOTIFY_OK;
 }
 
-static void blk_mq_cpu_notify(void *data, unsigned long action,
-                             unsigned int cpu)
-{
-       if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
-               /*
-                * If the CPU goes away, ensure that we run any pending
-                * completions.
-                */
-               struct llist_node *node;
-               struct request *rq;
-
-               local_irq_disable();
-
-               node = llist_del_all(&per_cpu(ipi_lists, cpu));
-               while (node) {
-                       struct llist_node *next = node->next;
-
-                       rq = llist_entry(node, struct request, ll_list);
-                       __blk_mq_end_io(rq, rq->errors);
-                       node = next;
-               }
-
-               local_irq_enable();
-       }
-}
-
-static struct notifier_block __cpuinitdata blk_mq_main_cpu_notifier = {
-       .notifier_call  = blk_mq_main_cpu_notify,
-};
-
 void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
 {
        BUG_ON(!notifier->notify);
@@ -82,12 +52,7 @@ void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
        notifier->data = data;
 }
 
-static struct blk_mq_cpu_notifier __cpuinitdata cpu_notifier = {
-       .notify = blk_mq_cpu_notify,
-};
-
 void __init blk_mq_cpu_init(void)
 {
-       register_hotcpu_notifier(&blk_mq_main_cpu_notifier);
-       blk_mq_register_cpu_notifier(&cpu_notifier);
+       hotcpu_notifier(blk_mq_main_cpu_notify, 0);
 }
index c79126e..57039fc 100644 (file)
@@ -27,8 +27,6 @@ static LIST_HEAD(all_q_list);
 
 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
 
-DEFINE_PER_CPU(struct llist_head, ipi_lists);
-
 static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
                                           unsigned int cpu)
 {
@@ -106,10 +104,13 @@ static int blk_mq_queue_enter(struct request_queue *q)
 
        spin_lock_irq(q->queue_lock);
        ret = wait_event_interruptible_lock_irq(q->mq_freeze_wq,
-               !blk_queue_bypass(q), *q->queue_lock);
+               !blk_queue_bypass(q) || blk_queue_dying(q),
+               *q->queue_lock);
        /* inc usage with lock hold to avoid freeze_queue runs here */
-       if (!ret)
+       if (!ret && !blk_queue_dying(q))
                __percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
+       else if (blk_queue_dying(q))
+               ret = -ENODEV;
        spin_unlock_irq(q->queue_lock);
 
        return ret;
@@ -120,6 +121,22 @@ static void blk_mq_queue_exit(struct request_queue *q)
        __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
 }
 
+static void __blk_mq_drain_queue(struct request_queue *q)
+{
+       while (true) {
+               s64 count;
+
+               spin_lock_irq(q->queue_lock);
+               count = percpu_counter_sum(&q->mq_usage_counter);
+               spin_unlock_irq(q->queue_lock);
+
+               if (count == 0)
+                       break;
+               blk_mq_run_queues(q, false);
+               msleep(10);
+       }
+}
+
 /*
  * Guarantee no request is in use, so we can change any data structure of
  * the queue afterward.
@@ -133,21 +150,13 @@ static void blk_mq_freeze_queue(struct request_queue *q)
        queue_flag_set(QUEUE_FLAG_BYPASS, q);
        spin_unlock_irq(q->queue_lock);
 
-       if (!drain)
-               return;
-
-       while (true) {
-               s64 count;
-
-               spin_lock_irq(q->queue_lock);
-               count = percpu_counter_sum(&q->mq_usage_counter);
-               spin_unlock_irq(q->queue_lock);
+       if (drain)
+               __blk_mq_drain_queue(q);
+}
 
-               if (count == 0)
-                       break;
-               blk_mq_run_queues(q, false);
-               msleep(10);
-       }
+void blk_mq_drain_queue(struct request_queue *q)
+{
+       __blk_mq_drain_queue(q);
 }
 
 static void blk_mq_unfreeze_queue(struct request_queue *q)
@@ -179,6 +188,8 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
 
        rq->mq_ctx = ctx;
        rq->cmd_flags = rw_flags;
+       rq->start_time = jiffies;
+       set_start_time_ns(rq);
        ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
 }
 
@@ -305,7 +316,7 @@ void blk_mq_complete_request(struct request *rq, int error)
                struct bio *next = bio->bi_next;
 
                bio->bi_next = NULL;
-               bytes += bio->bi_size;
+               bytes += bio->bi_iter.bi_size;
                blk_mq_bio_endio(rq, bio, error);
                bio = next;
        }
@@ -326,55 +337,12 @@ void __blk_mq_end_io(struct request *rq, int error)
                blk_mq_complete_request(rq, error);
 }
 
-#if defined(CONFIG_SMP)
-
-/*
- * Called with interrupts disabled.
- */
-static void ipi_end_io(void *data)
-{
-       struct llist_head *list = &per_cpu(ipi_lists, smp_processor_id());
-       struct llist_node *entry, *next;
-       struct request *rq;
-
-       entry = llist_del_all(list);
-
-       while (entry) {
-               next = entry->next;
-               rq = llist_entry(entry, struct request, ll_list);
-               __blk_mq_end_io(rq, rq->errors);
-               entry = next;
-       }
-}
-
-static int ipi_remote_cpu(struct blk_mq_ctx *ctx, const int cpu,
-                         struct request *rq, const int error)
+static void blk_mq_end_io_remote(void *data)
 {
-       struct call_single_data *data = &rq->csd;
-
-       rq->errors = error;
-       rq->ll_list.next = NULL;
+       struct request *rq = data;
 
-       /*
-        * If the list is non-empty, an existing IPI must already
-        * be "in flight". If that is the case, we need not schedule
-        * a new one.
-        */
-       if (llist_add(&rq->ll_list, &per_cpu(ipi_lists, ctx->cpu))) {
-               data->func = ipi_end_io;
-               data->flags = 0;
-               __smp_call_function_single(ctx->cpu, data, 0);
-       }
-
-       return true;
-}
-#else /* CONFIG_SMP */
-static int ipi_remote_cpu(struct blk_mq_ctx *ctx, const int cpu,
-                         struct request *rq, const int error)
-{
-       return false;
+       __blk_mq_end_io(rq, rq->errors);
 }
-#endif
 
 /*
  * End IO on this request on a multiqueue enabled driver. We'll either do
@@ -390,11 +358,15 @@ void blk_mq_end_io(struct request *rq, int error)
                return __blk_mq_end_io(rq, error);
 
        cpu = get_cpu();
-
-       if (cpu == ctx->cpu || !cpu_online(ctx->cpu) ||
-           !ipi_remote_cpu(ctx, cpu, rq, error))
+       if (cpu != ctx->cpu && cpu_online(ctx->cpu)) {
+               rq->errors = error;
+               rq->csd.func = blk_mq_end_io_remote;
+               rq->csd.info = rq;
+               rq->csd.flags = 0;
+               __smp_call_function_single(ctx->cpu, &rq->csd, 0);
+       } else {
                __blk_mq_end_io(rq, error);
-
+       }
        put_cpu();
 }
 EXPORT_SYMBOL(blk_mq_end_io);
@@ -1091,8 +1063,8 @@ static void blk_mq_free_rq_map(struct blk_mq_hw_ctx *hctx)
        struct page *page;
 
        while (!list_empty(&hctx->page_list)) {
-               page = list_first_entry(&hctx->page_list, struct page, list);
-               list_del_init(&page->list);
+               page = list_first_entry(&hctx->page_list, struct page, lru);
+               list_del_init(&page->lru);
                __free_pages(page, page->private);
        }
 
@@ -1156,7 +1128,7 @@ static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx,
                        break;
 
                page->private = this_order;
-               list_add_tail(&page->list, &hctx->page_list);
+               list_add_tail(&page->lru, &hctx->page_list);
 
                p = page_address(page);
                entries_per_page = order_to_size(this_order) / rq_size;
@@ -1429,7 +1401,6 @@ void blk_mq_free_queue(struct request_queue *q)
        int i;
 
        queue_for_each_hw_ctx(q, hctx, i) {
-               cancel_delayed_work_sync(&hctx->delayed_work);
                kfree(hctx->ctx_map);
                kfree(hctx->ctxs);
                blk_mq_free_rq_map(hctx);
@@ -1451,7 +1422,6 @@ void blk_mq_free_queue(struct request_queue *q)
        list_del_init(&q->all_q_node);
        mutex_unlock(&all_q_mutex);
 }
-EXPORT_SYMBOL(blk_mq_free_queue);
 
 /* Basically redo blk_mq_init_queue with queue frozen */
 static void blk_mq_queue_reinit(struct request_queue *q)
@@ -1495,11 +1465,6 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
 
 static int __init blk_mq_init(void)
 {
-       unsigned int i;
-
-       for_each_possible_cpu(i)
-               init_llist_head(&per_cpu(ipi_lists, i));
-
        blk_mq_cpu_init();
 
        /* Must be called after percpu_counter_hotcpu_callback() */
index 52bf1f9..5c39179 100644 (file)
@@ -27,6 +27,8 @@ void blk_mq_complete_request(struct request *rq, int error);
 void blk_mq_run_request(struct request *rq, bool run_queue, bool async);
 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
 void blk_mq_init_flush(struct request_queue *q);
+void blk_mq_drain_queue(struct request_queue *q);
+void blk_mq_free_queue(struct request_queue *q);
 
 /*
  * CPU hotplug helpers
@@ -38,7 +40,6 @@ void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
 void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
 void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
 void blk_mq_cpu_init(void);
-DECLARE_PER_CPU(struct llist_head, ipi_lists);
 
 /*
  * CPU -> queue mappings
index 05e8267..5d21239 100644 (file)
@@ -592,6 +592,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
                ret = -1;
        }
 
+       t->raid_partial_stripes_expensive =
+               max(t->raid_partial_stripes_expensive,
+                   b->raid_partial_stripes_expensive);
+
        /* Find lowest common alignment_offset */
        t->alignment_offset = lcm(t->alignment_offset, alignment)
                & (max(t->physical_block_size, t->io_min) - 1);
index 9777952..8095c4a 100644 (file)
@@ -11,6 +11,7 @@
 
 #include "blk.h"
 #include "blk-cgroup.h"
+#include "blk-mq.h"
 
 struct queue_sysfs_entry {
        struct attribute attr;
index a760857..1474c3a 100644 (file)
@@ -877,14 +877,14 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
        do_div(tmp, HZ);
        bytes_allowed = tmp;
 
-       if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
+       if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
                if (wait)
                        *wait = 0;
                return 1;
        }
 
        /* Calc approx time to dispatch */
-       extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
+       extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed;
        jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
 
        if (!jiffy_wait)
@@ -987,7 +987,7 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
        bool rw = bio_data_dir(bio);
 
        /* Charge the bio to the group */
-       tg->bytes_disp[rw] += bio->bi_size;
+       tg->bytes_disp[rw] += bio->bi_iter.bi_size;
        tg->io_disp[rw]++;
 
        /*
@@ -1003,8 +1003,8 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
         */
        if (!(bio->bi_rw & REQ_THROTTLED)) {
                bio->bi_rw |= REQ_THROTTLED;
-               throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size,
-                                            bio->bi_rw);
+               throtl_update_dispatch_stats(tg_to_blkg(tg),
+                                            bio->bi_iter.bi_size, bio->bi_rw);
        }
 }
 
@@ -1503,7 +1503,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
        if (tg) {
                if (!tg->has_rules[rw]) {
                        throtl_update_dispatch_stats(tg_to_blkg(tg),
-                                                    bio->bi_size, bio->bi_rw);
+                                       bio->bi_iter.bi_size, bio->bi_rw);
                        goto out_unlock_rcu;
                }
        }
@@ -1559,7 +1559,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
        /* out-of-limit, queue to @tg */
        throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
                   rw == READ ? 'R' : 'W',
-                  tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
+                  tg->bytes_disp[rw], bio->bi_iter.bi_size, tg->bps[rw],
                   tg->io_disp[rw], tg->iops[rw],
                   sq->nr_queued[READ], sq->nr_queued[WRITE]);
 
index cc2637f..9dbc67e 100644 (file)
@@ -4,8 +4,7 @@
  * Written by Cai Zhiyong <caizhiyong@huawei.com>
  *
  */
-#include <linux/buffer_head.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/cmdline-parser.h>
 
 static int parse_subpart(struct cmdline_subpart **subpart, char *partdef)
@@ -159,6 +158,7 @@ void cmdline_parts_free(struct cmdline_parts **parts)
                *parts = next_parts;
        }
 }
+EXPORT_SYMBOL(cmdline_parts_free);
 
 int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline)
 {
@@ -206,6 +206,7 @@ fail:
        cmdline_parts_free(parts);
        goto done;
 }
+EXPORT_SYMBOL(cmdline_parts_parse);
 
 struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
                                         const char *bdev)
@@ -214,17 +215,17 @@ struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
                parts = parts->next_parts;
        return parts;
 }
+EXPORT_SYMBOL(cmdline_parts_find);
 
 /*
  *  add_part()
  *    0 success.
  *    1 can not add so many partitions.
  */
-void cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
-                      int slot,
-                      int (*add_part)(int, struct cmdline_subpart *, void *),
-                      void *param)
-
+int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
+                     int slot,
+                     int (*add_part)(int, struct cmdline_subpart *, void *),
+                     void *param)
 {
        sector_t from = 0;
        struct cmdline_subpart *subpart;
@@ -247,4 +248,7 @@ void cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
                if (add_part(slot, subpart, param))
                        break;
        }
+
+       return slot;
 }
+EXPORT_SYMBOL(cmdline_parts_set);
index b7ff286..42c45a7 100644 (file)
@@ -440,7 +440,7 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
        /*
         * See if our hash lookup can find a potential backmerge.
         */
-       __rq = elv_rqhash_find(q, bio->bi_sector);
+       __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
        if (__rq && elv_rq_merge_ok(__rq, bio)) {
                *req = __rq;
                return ELEVATOR_BACK_MERGE;
index 625e3e4..2648797 100644 (file)
@@ -323,12 +323,14 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
 
        if (hdr->iovec_count) {
                size_t iov_data_len;
-               struct iovec *iov;
+               struct iovec *iov = NULL;
 
                ret = rw_copy_check_uvector(-1, hdr->dxferp, hdr->iovec_count,
                                            0, NULL, &iov);
-               if (ret < 0)
+               if (ret < 0) {
+                       kfree(iov);
                        goto out;
+               }
 
                iov_data_len = ret;
                ret = 0;
index 14a9d19..9220f8e 100644 (file)
@@ -100,11 +100,8 @@ enum {
 
 struct buf {
        ulong nframesout;
-       ulong resid;
-       ulong bv_resid;
-       sector_t sector;
        struct bio *bio;
-       struct bio_vec *bv;
+       struct bvec_iter iter;
        struct request *rq;
 };
 
@@ -120,13 +117,10 @@ struct frame {
        ulong waited;
        ulong waited_total;
        struct aoetgt *t;               /* parent target I belong to */
-       sector_t lba;
        struct sk_buff *skb;            /* command skb freed on module exit */
        struct sk_buff *r_skb;          /* response skb for async processing */
        struct buf *buf;
-       struct bio_vec *bv;
-       ulong bcnt;
-       ulong bv_off;
+       struct bvec_iter iter;
        char flags;
 };
 
index d251543..8184451 100644 (file)
@@ -196,8 +196,7 @@ aoe_freetframe(struct frame *f)
 
        t = f->t;
        f->buf = NULL;
-       f->lba = 0;
-       f->bv = NULL;
+       memset(&f->iter, 0, sizeof(f->iter));
        f->r_skb = NULL;
        f->flags = 0;
        list_add(&f->head, &t->ffree);
@@ -295,21 +294,14 @@ newframe(struct aoedev *d)
 }
 
 static void
-skb_fillup(struct sk_buff *skb, struct bio_vec *bv, ulong off, ulong cnt)
+skb_fillup(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter)
 {
        int frag = 0;
-       ulong fcnt;
-loop:
-       fcnt = bv->bv_len - (off - bv->bv_offset);
-       if (fcnt > cnt)
-               fcnt = cnt;
-       skb_fill_page_desc(skb, frag++, bv->bv_page, off, fcnt);
-       cnt -= fcnt;
-       if (cnt <= 0)
-               return;
-       bv++;
-       off = bv->bv_offset;
-       goto loop;
+       struct bio_vec bv;
+
+       __bio_for_each_segment(bv, bio, iter, iter)
+               skb_fill_page_desc(skb, frag++, bv.bv_page,
+                                  bv.bv_offset, bv.bv_len);
 }
 
 static void
@@ -346,12 +338,10 @@ ata_rw_frameinit(struct frame *f)
        t->nout++;
        f->waited = 0;
        f->waited_total = 0;
-       if (f->buf)
-               f->lba = f->buf->sector;
 
        /* set up ata header */
-       ah->scnt = f->bcnt >> 9;
-       put_lba(ah, f->lba);
+       ah->scnt = f->iter.bi_size >> 9;
+       put_lba(ah, f->iter.bi_sector);
        if (t->d->flags & DEVFL_EXT) {
                ah->aflags |= AOEAFL_EXT;
        } else {
@@ -360,11 +350,11 @@ ata_rw_frameinit(struct frame *f)
                ah->lba3 |= 0xe0;       /* LBA bit + obsolete 0xa0 */
        }
        if (f->buf && bio_data_dir(f->buf->bio) == WRITE) {
-               skb_fillup(skb, f->bv, f->bv_off, f->bcnt);
+               skb_fillup(skb, f->buf->bio, f->iter);
                ah->aflags |= AOEAFL_WRITE;
-               skb->len += f->bcnt;
-               skb->data_len = f->bcnt;
-               skb->truesize += f->bcnt;
+               skb->len += f->iter.bi_size;
+               skb->data_len = f->iter.bi_size;
+               skb->truesize += f->iter.bi_size;
                t->wpkts++;
        } else {
                t->rpkts++;
@@ -382,7 +372,6 @@ aoecmd_ata_rw(struct aoedev *d)
        struct buf *buf;
        struct sk_buff *skb;
        struct sk_buff_head queue;
-       ulong bcnt, fbcnt;
 
        buf = nextbuf(d);
        if (buf == NULL)
@@ -390,39 +379,22 @@ aoecmd_ata_rw(struct aoedev *d)
        f = newframe(d);
        if (f == NULL)
                return 0;
-       bcnt = d->maxbcnt;
-       if (bcnt == 0)
-               bcnt = DEFAULTBCNT;
-       if (bcnt > buf->resid)
-               bcnt = buf->resid;
-       fbcnt = bcnt;
-       f->bv = buf->bv;
-       f->bv_off = f->bv->bv_offset + (f->bv->bv_len - buf->bv_resid);
-       do {
-               if (fbcnt < buf->bv_resid) {
-                       buf->bv_resid -= fbcnt;
-                       buf->resid -= fbcnt;
-                       break;
-               }
-               fbcnt -= buf->bv_resid;
-               buf->resid -= buf->bv_resid;
-               if (buf->resid == 0) {
-                       d->ip.buf = NULL;
-                       break;
-               }
-               buf->bv++;
-               buf->bv_resid = buf->bv->bv_len;
-               WARN_ON(buf->bv_resid == 0);
-       } while (fbcnt);
 
        /* initialize the headers & frame */
        f->buf = buf;
-       f->bcnt = bcnt;
-       ata_rw_frameinit(f);
+       f->iter = buf->iter;
+       f->iter.bi_size = min_t(unsigned long,
+                               d->maxbcnt ?: DEFAULTBCNT,
+                               f->iter.bi_size);
+       bio_advance_iter(buf->bio, &buf->iter, f->iter.bi_size);
+
+       if (!buf->iter.bi_size)
+               d->ip.buf = NULL;
 
        /* mark all tracking fields and load out */
        buf->nframesout += 1;
-       buf->sector += bcnt >> 9;
+
+       ata_rw_frameinit(f);
 
        skb = skb_clone(f->skb, GFP_ATOMIC);
        if (skb) {
@@ -613,10 +585,7 @@ reassign_frame(struct frame *f)
        skb = nf->skb;
        nf->skb = f->skb;
        nf->buf = f->buf;
-       nf->bcnt = f->bcnt;
-       nf->lba = f->lba;
-       nf->bv = f->bv;
-       nf->bv_off = f->bv_off;
+       nf->iter = f->iter;
        nf->waited = 0;
        nf->waited_total = f->waited_total;
        nf->sent = f->sent;
@@ -648,19 +617,19 @@ probe(struct aoetgt *t)
        }
        f->flags |= FFL_PROBE;
        ifrotate(t);
-       f->bcnt = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT;
+       f->iter.bi_size = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT;
        ata_rw_frameinit(f);
        skb = f->skb;
-       for (frag = 0, n = f->bcnt; n > 0; ++frag, n -= m) {
+       for (frag = 0, n = f->iter.bi_size; n > 0; ++frag, n -= m) {
                if (n < PAGE_SIZE)
                        m = n;
                else
                        m = PAGE_SIZE;
                skb_fill_page_desc(skb, frag, empty_page, 0, m);
        }
-       skb->len += f->bcnt;
-       skb->data_len = f->bcnt;
-       skb->truesize += f->bcnt;
+       skb->len += f->iter.bi_size;
+       skb->data_len = f->iter.bi_size;
+       skb->truesize += f->iter.bi_size;
 
        skb = skb_clone(f->skb, GFP_ATOMIC);
        if (skb) {
@@ -897,15 +866,15 @@ rqbiocnt(struct request *r)
 static void
 bio_pageinc(struct bio *bio)
 {
-       struct bio_vec *bv;
+       struct bio_vec bv;
        struct page *page;
-       int i;
+       struct bvec_iter iter;
 
-       bio_for_each_segment(bv, bio, i) {
+       bio_for_each_segment(bv, bio, iter) {
                /* Non-zero page count for non-head members of
                 * compound pages is no longer allowed by the kernel.
                 */
-               page = compound_trans_head(bv->bv_page);
+               page = compound_trans_head(bv.bv_page);
                atomic_inc(&page->_count);
        }
 }
@@ -913,12 +882,12 @@ bio_pageinc(struct bio *bio)
 static void
 bio_pagedec(struct bio *bio)
 {
-       struct bio_vec *bv;
        struct page *page;
-       int i;
+       struct bio_vec bv;
+       struct bvec_iter iter;
 
-       bio_for_each_segment(bv, bio, i) {
-               page = compound_trans_head(bv->bv_page);
+       bio_for_each_segment(bv, bio, iter) {
+               page = compound_trans_head(bv.bv_page);
                atomic_dec(&page->_count);
        }
 }
@@ -929,12 +898,8 @@ bufinit(struct buf *buf, struct request *rq, struct bio *bio)
        memset(buf, 0, sizeof(*buf));
        buf->rq = rq;
        buf->bio = bio;
-       buf->resid = bio->bi_size;
-       buf->sector = bio->bi_sector;
+       buf->iter = bio->bi_iter;
        bio_pageinc(bio);
-       buf->bv = bio_iovec(bio);
-       buf->bv_resid = buf->bv->bv_len;
-       WARN_ON(buf->bv_resid == 0);
 }
 
 static struct buf *
@@ -1119,24 +1084,18 @@ gettgt(struct aoedev *d, char *addr)
 }
 
 static void
-bvcpy(struct bio_vec *bv, ulong off, struct sk_buff *skb, long cnt)
+bvcpy(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter, long cnt)
 {
-       ulong fcnt;
-       char *p;
        int soff = 0;
-loop:
-       fcnt = bv->bv_len - (off - bv->bv_offset);
-       if (fcnt > cnt)
-               fcnt = cnt;
-       p = page_address(bv->bv_page) + off;
-       skb_copy_bits(skb, soff, p, fcnt);
-       soff += fcnt;
-       cnt -= fcnt;
-       if (cnt <= 0)
-               return;
-       bv++;
-       off = bv->bv_offset;
-       goto loop;
+       struct bio_vec bv;
+
+       iter.bi_size = cnt;
+
+       __bio_for_each_segment(bv, bio, iter, iter) {
+               char *p = page_address(bv.bv_page) + bv.bv_offset;
+               skb_copy_bits(skb, soff, p, bv.bv_len);
+               soff += bv.bv_len;
+       }
 }
 
 void
@@ -1152,7 +1111,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
        do {
                bio = rq->bio;
                bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags);
-       } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_size));
+       } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_iter.bi_size));
 
        /* cf. http://lkml.org/lkml/2006/10/31/28 */
        if (!fastfail)
@@ -1229,7 +1188,15 @@ noskb:           if (buf)
                        clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
                        break;
                }
-               bvcpy(f->bv, f->bv_off, skb, n);
+               if (n > f->iter.bi_size) {
+                       pr_err_ratelimited("%s e%ld.%d.  bytes=%ld need=%u\n",
+                               "aoe: too-large data size in read from",
+                               (long) d->aoemajor, d->aoeminor,
+                               n, f->iter.bi_size);
+                       clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
+                       break;
+               }
+               bvcpy(skb, f->buf->bio, f->iter, n);
        case ATA_CMD_PIO_WRITE:
        case ATA_CMD_PIO_WRITE_EXT:
                spin_lock_irq(&d->lock);
@@ -1272,7 +1239,7 @@ out:
 
        aoe_freetframe(f);
 
-       if (buf && --buf->nframesout == 0 && buf->resid == 0)
+       if (buf && --buf->nframesout == 0 && buf->iter.bi_size == 0)
                aoe_end_buf(d, buf);
 
        spin_unlock_irq(&d->lock);
@@ -1727,7 +1694,7 @@ aoe_failbuf(struct aoedev *d, struct buf *buf)
 {
        if (buf == NULL)
                return;
-       buf->resid = 0;
+       buf->iter.bi_size = 0;
        clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
        if (buf->nframesout == 0)
                aoe_end_buf(d, buf);
index d91f1a5..e73b85c 100644 (file)
@@ -328,18 +328,18 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
        struct block_device *bdev = bio->bi_bdev;
        struct brd_device *brd = bdev->bd_disk->private_data;
        int rw;
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
        sector_t sector;
-       int i;
+       struct bvec_iter iter;
        int err = -EIO;
 
-       sector = bio->bi_sector;
+       sector = bio->bi_iter.bi_sector;
        if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
                goto out;
 
        if (unlikely(bio->bi_rw & REQ_DISCARD)) {
                err = 0;
-               discard_from_brd(brd, sector, bio->bi_size);
+               discard_from_brd(brd, sector, bio->bi_iter.bi_size);
                goto out;
        }
 
@@ -347,10 +347,10 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
        if (rw == READA)
                rw = READ;
 
-       bio_for_each_segment(bvec, bio, i) {
-               unsigned int len = bvec->bv_len;
-               err = brd_do_bvec(brd, bvec->bv_page, len,
-                                       bvec->bv_offset, rw, sector);
+       bio_for_each_segment(bvec, bio, iter) {
+               unsigned int len = bvec.bv_len;
+               err = brd_do_bvec(brd, bvec.bv_page, len,
+                                       bvec.bv_offset, rw, sector);
                if (err)
                        break;
                sector += len >> SECTOR_SHIFT;
index b35fc4f..036e8ab 100644 (file)
@@ -5004,7 +5004,7 @@ reinit_after_soft_reset:
 
        i = alloc_cciss_hba(pdev);
        if (i < 0)
-               return -1;
+               return -ENOMEM;
 
        h = hba[i];
        h->pdev = pdev;
@@ -5205,7 +5205,7 @@ clean_no_release_regions:
         */
        pci_set_drvdata(pdev, NULL);
        free_hba(h);
-       return -1;
+       return -ENODEV;
 }
 
 static void cciss_shutdown(struct pci_dev *pdev)
index 28c73ca..a9b13f2 100644 (file)
@@ -159,7 +159,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
 
        bio = bio_alloc_drbd(GFP_NOIO);
        bio->bi_bdev = bdev->md_bdev;
-       bio->bi_sector = sector;
+       bio->bi_iter.bi_sector = sector;
        err = -EIO;
        if (bio_add_page(bio, page, size, 0) != size)
                goto out;
index b12c11e..597f111 100644 (file)
@@ -1028,7 +1028,7 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
        } else
                page = b->bm_pages[page_nr];
        bio->bi_bdev = mdev->ldev->md_bdev;
-       bio->bi_sector = on_disk_sector;
+       bio->bi_iter.bi_sector = on_disk_sector;
        /* bio_add_page of a single page to an empty bio will always succeed,
         * according to api.  Do we want to assert that? */
        bio_add_page(bio, page, len, 0);
index 9e3818b..929468e 100644 (file)
@@ -1537,15 +1537,17 @@ static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
 
 static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
 {
-       struct bio_vec *bvec;
-       int i;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
+
        /* hint all but last page with MSG_MORE */
-       bio_for_each_segment(bvec, bio, i) {
+       bio_for_each_segment(bvec, bio, iter) {
                int err;
 
-               err = _drbd_no_send_page(mdev, bvec->bv_page,
-                                        bvec->bv_offset, bvec->bv_len,
-                                        i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
+               err = _drbd_no_send_page(mdev, bvec.bv_page,
+                                        bvec.bv_offset, bvec.bv_len,
+                                        bio_iter_last(bvec, iter)
+                                        ? 0 : MSG_MORE);
                if (err)
                        return err;
        }
@@ -1554,15 +1556,16 @@ static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
 
 static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
 {
-       struct bio_vec *bvec;
-       int i;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
+
        /* hint all but last page with MSG_MORE */
-       bio_for_each_segment(bvec, bio, i) {
+       bio_for_each_segment(bvec, bio, iter) {
                int err;
 
-               err = _drbd_send_page(mdev, bvec->bv_page,
-                                     bvec->bv_offset, bvec->bv_len,
-                                     i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
+               err = _drbd_send_page(mdev, bvec.bv_page,
+                                     bvec.bv_offset, bvec.bv_len,
+                                     bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
                if (err)
                        return err;
        }
index 6fa6673..d073305 100644 (file)
@@ -1333,7 +1333,7 @@ next_bio:
                goto fail;
        }
        /* > peer_req->i.sector, unless this is the first bio */
-       bio->bi_sector = sector;
+       bio->bi_iter.bi_sector = sector;
        bio->bi_bdev = mdev->ldev->backing_bdev;
        bio->bi_rw = rw;
        bio->bi_private = peer_req;
@@ -1353,7 +1353,7 @@ next_bio:
                                dev_err(DEV,
                                        "bio_add_page failed for len=%u, "
                                        "bi_vcnt=0 (bi_sector=%llu)\n",
-                                       len, (unsigned long long)bio->bi_sector);
+                                       len, (uint64_t)bio->bi_iter.bi_sector);
                                err = -ENOSPC;
                                goto fail;
                        }
@@ -1595,9 +1595,10 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
 static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
                           sector_t sector, int data_size)
 {
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
        struct bio *bio;
-       int dgs, err, i, expect;
+       int dgs, err, expect;
        void *dig_in = mdev->tconn->int_dig_in;
        void *dig_vv = mdev->tconn->int_dig_vv;
 
@@ -1615,13 +1616,13 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
        mdev->recv_cnt += data_size>>9;
 
        bio = req->master_bio;
-       D_ASSERT(sector == bio->bi_sector);
+       D_ASSERT(sector == bio->bi_iter.bi_sector);
 
-       bio_for_each_segment(bvec, bio, i) {
-               void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
-               expect = min_t(int, data_size, bvec->bv_len);
+       bio_for_each_segment(bvec, bio, iter) {
+               void *mapped = kmap(bvec.bv_page) + bvec.bv_offset;
+               expect = min_t(int, data_size, bvec.bv_len);
                err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
-               kunmap(bvec->bv_page);
+               kunmap(bvec.bv_page);
                if (err)
                        return err;
                data_size -= expect;
index fec7bef..104a040 100644 (file)
@@ -77,8 +77,8 @@ static struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
        req->epoch       = 0;
 
        drbd_clear_interval(&req->i);
-       req->i.sector     = bio_src->bi_sector;
-       req->i.size      = bio_src->bi_size;
+       req->i.sector     = bio_src->bi_iter.bi_sector;
+       req->i.size      = bio_src->bi_iter.bi_size;
        req->i.local = true;
        req->i.waiting = false;
 
@@ -1280,7 +1280,7 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
        /*
         * what we "blindly" assume:
         */
-       D_ASSERT(IS_ALIGNED(bio->bi_size, 512));
+       D_ASSERT(IS_ALIGNED(bio->bi_iter.bi_size, 512));
 
        inc_ap_bio(mdev);
        __drbd_make_request(mdev, bio, start_time);
index 978cb1a..28e15d9 100644 (file)
@@ -269,7 +269,7 @@ static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bi
 
 /* Short lived temporary struct on the stack.
  * We could squirrel the error to be returned into
- * bio->bi_size, or similar. But that would be too ugly. */
+ * bio->bi_iter.bi_size, or similar. But that would be too ugly. */
 struct bio_and_error {
        struct bio *bio;
        int error;
index 891c0ec..84d3175 100644 (file)
@@ -313,8 +313,8 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
 {
        struct hash_desc desc;
        struct scatterlist sg;
-       struct bio_vec *bvec;
-       int i;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
 
        desc.tfm = tfm;
        desc.flags = 0;
@@ -322,8 +322,8 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
        sg_init_table(&sg, 1);
        crypto_hash_init(&desc);
 
-       bio_for_each_segment(bvec, bio, i) {
-               sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset);
+       bio_for_each_segment(bvec, bio, iter) {
+               sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
                crypto_hash_update(&desc, &sg, sg.length);
        }
        crypto_hash_final(&desc, digest);
index 000abe2..2023043 100644 (file)
@@ -2351,7 +2351,7 @@ static void rw_interrupt(void)
 /* Compute maximal contiguous buffer size. */
 static int buffer_chain_size(void)
 {
-       struct bio_vec *bv;
+       struct bio_vec bv;
        int size;
        struct req_iterator iter;
        char *base;
@@ -2360,10 +2360,10 @@ static int buffer_chain_size(void)
        size = 0;
 
        rq_for_each_segment(bv, current_req, iter) {
-               if (page_address(bv->bv_page) + bv->bv_offset != base + size)
+               if (page_address(bv.bv_page) + bv.bv_offset != base + size)
                        break;
 
-               size += bv->bv_len;
+               size += bv.bv_len;
        }
 
        return size >> 9;
@@ -2389,7 +2389,7 @@ static int transfer_size(int ssize, int max_sector, int max_size)
 static void copy_buffer(int ssize, int max_sector, int max_sector_2)
 {
        int remaining;          /* number of transferred 512-byte sectors */
-       struct bio_vec *bv;
+       struct bio_vec bv;
        char *buffer;
        char *dma_buffer;
        int size;
@@ -2427,10 +2427,10 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
                if (!remaining)
                        break;
 
-               size = bv->bv_len;
+               size = bv.bv_len;
                SUPBOUND(size, remaining);
 
-               buffer = page_address(bv->bv_page) + bv->bv_offset;
+               buffer = page_address(bv.bv_page) + bv.bv_offset;
                if (dma_buffer + size >
                    floppy_track_buffer + (max_buffer_sectors << 10) ||
                    dma_buffer < floppy_track_buffer) {
@@ -3691,9 +3691,12 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
        if (!(mode & FMODE_NDELAY)) {
                if (mode & (FMODE_READ|FMODE_WRITE)) {
                        UDRS->last_checked = 0;
+                       clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
                        check_disk_change(bdev);
                        if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
                                goto out;
+                       if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
+                               goto out;
                }
                res = -EROFS;
                if ((mode & FMODE_WRITE) &&
@@ -3746,17 +3749,29 @@ static unsigned int floppy_check_events(struct gendisk *disk,
  * a disk in the drive, and whether that disk is writable.
  */
 
-static void floppy_rb0_complete(struct bio *bio, int err)
+struct rb0_cbdata {
+       int drive;
+       struct completion complete;
+};
+
+static void floppy_rb0_cb(struct bio *bio, int err)
 {
-       complete((struct completion *)bio->bi_private);
+       struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private;
+       int drive = cbdata->drive;
+
+       if (err) {
+               pr_info("floppy: error %d while reading block 0", err);
+               set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
+       }
+       complete(&cbdata->complete);
 }
 
-static int __floppy_read_block_0(struct block_device *bdev)
+static int __floppy_read_block_0(struct block_device *bdev, int drive)
 {
        struct bio bio;
        struct bio_vec bio_vec;
-       struct completion complete;
        struct page *page;
+       struct rb0_cbdata cbdata;
        size_t size;
 
        page = alloc_page(GFP_NOIO);
@@ -3769,23 +3784,26 @@ static int __floppy_read_block_0(struct block_device *bdev)
        if (!size)
                size = 1024;
 
+       cbdata.drive = drive;
+
        bio_init(&bio);
        bio.bi_io_vec = &bio_vec;
        bio_vec.bv_page = page;
        bio_vec.bv_len = size;
        bio_vec.bv_offset = 0;
        bio.bi_vcnt = 1;
-       bio.bi_size = size;
+       bio.bi_iter.bi_size = size;
        bio.bi_bdev = bdev;
-       bio.bi_sector = 0;
+       bio.bi_iter.bi_sector = 0;
        bio.bi_flags = (1 << BIO_QUIET);
-       init_completion(&complete);
-       bio.bi_private = &complete;
-       bio.bi_end_io = floppy_rb0_complete;
+       bio.bi_private = &cbdata;
+       bio.bi_end_io = floppy_rb0_cb;
 
        submit_bio(READ, &bio);
        process_fd_request();
-       wait_for_completion(&complete);
+
+       init_completion(&cbdata.complete);
+       wait_for_completion(&cbdata.complete);
 
        __free_page(page);
 
@@ -3827,7 +3845,7 @@ static int floppy_revalidate(struct gendisk *disk)
                        UDRS->generation++;
                if (drive_no_geom(drive)) {
                        /* auto-sensing */
-                       res = __floppy_read_block_0(opened_bdev[drive]);
+                       res = __floppy_read_block_0(opened_bdev[drive], drive);
                } else {
                        if (cf)
                                poll_drive(false, FD_RAW_NEED_DISK);
index c8dac73..66e8c3b 100644 (file)
@@ -288,9 +288,10 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
 {
        int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t,
                        struct page *page);
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
        struct page *page = NULL;
-       int i, ret = 0;
+       int ret = 0;
 
        if (lo->transfer != transfer_none) {
                page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
@@ -302,11 +303,11 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
                do_lo_send = do_lo_send_direct_write;
        }
 
-       bio_for_each_segment(bvec, bio, i) {
-               ret = do_lo_send(lo, bvec, pos, page);
+       bio_for_each_segment(bvec, bio, iter) {
+               ret = do_lo_send(lo, &bvec, pos, page);
                if (ret < 0)
                        break;
-               pos += bvec->bv_len;
+               pos += bvec.bv_len;
        }
        if (page) {
                kunmap(page);
@@ -392,20 +393,20 @@ do_lo_receive(struct loop_device *lo,
 static int
 lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos)
 {
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
        ssize_t s;
-       int i;
 
-       bio_for_each_segment(bvec, bio, i) {
-               s = do_lo_receive(lo, bvec, bsize, pos);
+       bio_for_each_segment(bvec, bio, iter) {
+               s = do_lo_receive(lo, &bvec, bsize, pos);
                if (s < 0)
                        return s;
 
-               if (s != bvec->bv_len) {
+               if (s != bvec.bv_len) {
                        zero_fill_bio(bio);
                        break;
                }
-               pos += bvec->bv_len;
+               pos += bvec.bv_len;
        }
        return 0;
 }
@@ -415,7 +416,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
        loff_t pos;
        int ret;
 
-       pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
+       pos = ((loff_t) bio->bi_iter.bi_sector << 9) + lo->lo_offset;
 
        if (bio_rw(bio) == WRITE) {
                struct file *file = lo->lo_backing_file;
@@ -444,7 +445,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
                                goto out;
                        }
                        ret = file->f_op->fallocate(file, mode, pos,
-                                                   bio->bi_size);
+                                                   bio->bi_iter.bi_size);
                        if (unlikely(ret && ret != -EINVAL &&
                                     ret != -EOPNOTSUPP))
                                ret = -EIO;
@@ -798,7 +799,7 @@ static void loop_config_discard(struct loop_device *lo)
 
        /*
         * We use punch hole to reclaim the free space used by the
-        * image a.k.a. discard. However we do support discard if
+        * image a.k.a. discard. However we do not support discard if
         * encryption is enabled, because it may give an attacker
         * useful information.
         */
index 7bc363f..eb59b12 100644 (file)
@@ -915,7 +915,7 @@ static int mg_probe(struct platform_device *plat_dev)
 
        /* disk reset */
        if (prv_data->dev_attr == MG_STORAGE_DEV) {
-               /* If POR seq. not yet finised, wait */
+               /* If POR seq. not yet finished, wait */
                err = mg_wait_rstout(host->rstout, MG_TMAX_RSTOUT);
                if (err)
                        goto probe_err_3b;
index 050c712..5160269 100644 (file)
 #include "mtip32xx.h"
 
 #define HW_CMD_SLOT_SZ         (MTIP_MAX_COMMAND_SLOTS * 32)
-#define HW_CMD_TBL_SZ          (AHCI_CMD_TBL_HDR_SZ + (MTIP_MAX_SG * 16))
-#define HW_CMD_TBL_AR_SZ       (HW_CMD_TBL_SZ * MTIP_MAX_COMMAND_SLOTS)
-#define HW_PORT_PRIV_DMA_SZ \
-               (HW_CMD_SLOT_SZ + HW_CMD_TBL_AR_SZ + AHCI_RX_FIS_SZ)
+
+/* DMA region containing RX Fis, Identify, RLE10, and SMART buffers */
+#define AHCI_RX_FIS_SZ          0x100
+#define AHCI_RX_FIS_OFFSET      0x0
+#define AHCI_IDFY_SZ            ATA_SECT_SIZE
+#define AHCI_IDFY_OFFSET        0x400
+#define AHCI_SECTBUF_SZ         ATA_SECT_SIZE
+#define AHCI_SECTBUF_OFFSET     0x800
+#define AHCI_SMARTBUF_SZ        ATA_SECT_SIZE
+#define AHCI_SMARTBUF_OFFSET    0xC00
+/* 0x100 + 0x200 + 0x200 + 0x200 is smaller than 4k but we pad it out */
+#define BLOCK_DMA_ALLOC_SZ      4096
+
+/* DMA region containing command table (should be 8192 bytes) */
+#define AHCI_CMD_SLOT_SZ        sizeof(struct mtip_cmd_hdr)
+#define AHCI_CMD_TBL_SZ         (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
+#define AHCI_CMD_TBL_OFFSET     0x0
+
+/* DMA region per command (contains header and SGL) */
+#define AHCI_CMD_TBL_HDR_SZ     0x80
+#define AHCI_CMD_TBL_HDR_OFFSET 0x0
+#define AHCI_CMD_TBL_SGL_SZ     (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
+#define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
+#define CMD_DMA_ALLOC_SZ        (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
+
 
 #define HOST_CAP_NZDMA         (1 << 19)
 #define HOST_HSORG             0xFC
@@ -899,8 +920,9 @@ static void mtip_handle_tfe(struct driver_data *dd)
                        fail_reason = "thermal shutdown";
                }
                if (buf[288] == 0xBF) {
+                       set_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag);
                        dev_info(&dd->pdev->dev,
-                               "Drive indicates rebuild has failed.\n");
+                               "Drive indicates rebuild has failed. Secure erase required.\n");
                        fail_all_ncq_cmds = 1;
                        fail_reason = "rebuild failed";
                }
@@ -1566,6 +1588,12 @@ static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer)
        }
 #endif
 
+       /* Check security locked state */
+       if (port->identify[128] & 0x4)
+               set_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
+       else
+               clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
+
 #ifdef MTIP_TRIM /* Disabling TRIM support temporarily */
        /* Demux ID.DRAT & ID.RZAT to determine trim support */
        if (port->identify[69] & (1 << 14) && port->identify[69] & (1 << 5))
@@ -1887,6 +1915,10 @@ static void mtip_dump_identify(struct mtip_port *port)
        strlcpy(cbuf, (char *)(port->identify+27), 41);
        dev_info(&port->dd->pdev->dev, "Model: %s\n", cbuf);
 
+       dev_info(&port->dd->pdev->dev, "Security: %04x %s\n",
+               port->identify[128],
+               port->identify[128] & 0x4 ? "(LOCKED)" : "");
+
        if (mtip_hw_get_capacity(port->dd, &sectors))
                dev_info(&port->dd->pdev->dev,
                        "Capacity: %llu sectors (%llu MB)\n",
@@ -3312,6 +3344,118 @@ st_out:
        return 0;
 }
 
+/*
+ * DMA region teardown
+ *
+ * @dd Pointer to driver_data structure
+ *
+ * return value
+ *      None
+ */
+static void mtip_dma_free(struct driver_data *dd)
+{
+       int i;
+       struct mtip_port *port = dd->port;
+
+       if (port->block1)
+               dmam_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
+                                       port->block1, port->block1_dma);
+
+       if (port->command_list) {
+               dmam_free_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ,
+                               port->command_list, port->command_list_dma);
+       }
+
+       for (i = 0; i < MTIP_MAX_COMMAND_SLOTS; i++) {
+               if (port->commands[i].command)
+                       dmam_free_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
+                               port->commands[i].command,
+                               port->commands[i].command_dma);
+       }
+}
+
+/*
+ * DMA region setup
+ *
+ * @dd Pointer to driver_data structure
+ *
+ * return value
+ *      -ENOMEM Not enough free DMA region space to initialize driver
+ */
+static int mtip_dma_alloc(struct driver_data *dd)
+{
+       struct mtip_port *port = dd->port;
+       int i, rv = 0;
+       u32 host_cap_64 = readl(dd->mmio + HOST_CAP) & HOST_CAP_64;
+
+       /* Allocate dma memory for RX Fis, Identify, and Sector Bufffer */
+       port->block1 =
+               dmam_alloc_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
+                                       &port->block1_dma, GFP_KERNEL);
+       if (!port->block1)
+               return -ENOMEM;
+       memset(port->block1, 0, BLOCK_DMA_ALLOC_SZ);
+
+       /* Allocate dma memory for command list */
+       port->command_list =
+               dmam_alloc_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ,
+                                       &port->command_list_dma, GFP_KERNEL);
+       if (!port->command_list) {
+               dmam_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
+                                       port->block1, port->block1_dma);
+               port->block1 = NULL;
+               port->block1_dma = 0;
+               return -ENOMEM;
+       }
+       memset(port->command_list, 0, AHCI_CMD_TBL_SZ);
+
+       /* Setup all pointers into first DMA region */
+       port->rxfis         = port->block1 + AHCI_RX_FIS_OFFSET;
+       port->rxfis_dma     = port->block1_dma + AHCI_RX_FIS_OFFSET;
+       port->identify      = port->block1 + AHCI_IDFY_OFFSET;
+       port->identify_dma  = port->block1_dma + AHCI_IDFY_OFFSET;
+       port->log_buf       = port->block1 + AHCI_SECTBUF_OFFSET;
+       port->log_buf_dma   = port->block1_dma + AHCI_SECTBUF_OFFSET;
+       port->smart_buf     = port->block1 + AHCI_SMARTBUF_OFFSET;
+       port->smart_buf_dma = port->block1_dma + AHCI_SMARTBUF_OFFSET;
+
+       /* Setup per command SGL DMA region */
+
+       /* Point the command headers at the command tables */
+       for (i = 0; i < MTIP_MAX_COMMAND_SLOTS; i++) {
+               port->commands[i].command =
+                       dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
+                               &port->commands[i].command_dma, GFP_KERNEL);
+               if (!port->commands[i].command) {
+                       rv = -ENOMEM;
+                       mtip_dma_free(dd);
+                       return rv;
+               }
+               memset(port->commands[i].command, 0, CMD_DMA_ALLOC_SZ);
+
+               port->commands[i].command_header = port->command_list +
+                                       (sizeof(struct mtip_cmd_hdr) * i);
+               port->commands[i].command_header_dma =
+                                       dd->port->command_list_dma +
+                                       (sizeof(struct mtip_cmd_hdr) * i);
+
+               if (host_cap_64)
+                       port->commands[i].command_header->ctbau =
+                               __force_bit2int cpu_to_le32(
+                               (port->commands[i].command_dma >> 16) >> 16);
+
+               port->commands[i].command_header->ctba =
+                               __force_bit2int cpu_to_le32(
+                               port->commands[i].command_dma & 0xFFFFFFFF);
+
+               sg_init_table(port->commands[i].sg, MTIP_MAX_SG);
+
+               /* Mark command as currently inactive */
+               atomic_set(&dd->port->commands[i].active, 0);
+       }
+       return 0;
+}
+
 /*
  * Called once for each card.
  *
@@ -3370,83 +3514,10 @@ static int mtip_hw_init(struct driver_data *dd)
        dd->port->mmio  = dd->mmio + PORT_OFFSET;
        dd->port->dd    = dd;
 
-       /* Allocate memory for the command list. */
-       dd->port->command_list =
-               dmam_alloc_coherent(&dd->pdev->dev,
-                       HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
-                       &dd->port->command_list_dma,
-                       GFP_KERNEL);
-       if (!dd->port->command_list) {
-               dev_err(&dd->pdev->dev,
-                       "Memory allocation: command list\n");
-               rv = -ENOMEM;
+       /* DMA allocations */
+       rv = mtip_dma_alloc(dd);
+       if (rv < 0)
                goto out1;
-       }
-
-       /* Clear the memory we have allocated. */
-       memset(dd->port->command_list,
-               0,
-               HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4));
-
-       /* Setup the addresse of the RX FIS. */
-       dd->port->rxfis     = dd->port->command_list + HW_CMD_SLOT_SZ;
-       dd->port->rxfis_dma = dd->port->command_list_dma + HW_CMD_SLOT_SZ;
-
-       /* Setup the address of the command tables. */
-       dd->port->command_table   = dd->port->rxfis + AHCI_RX_FIS_SZ;
-       dd->port->command_tbl_dma = dd->port->rxfis_dma + AHCI_RX_FIS_SZ;
-
-       /* Setup the address of the identify data. */
-       dd->port->identify     = dd->port->command_table +
-                                       HW_CMD_TBL_AR_SZ;
-       dd->port->identify_dma = dd->port->command_tbl_dma +
-                                       HW_CMD_TBL_AR_SZ;
-
-       /* Setup the address of the sector buffer - for some non-ncq cmds */
-       dd->port->sector_buffer = (void *) dd->port->identify + ATA_SECT_SIZE;
-       dd->port->sector_buffer_dma = dd->port->identify_dma + ATA_SECT_SIZE;
-
-       /* Setup the address of the log buf - for read log command */
-       dd->port->log_buf = (void *)dd->port->sector_buffer  + ATA_SECT_SIZE;
-       dd->port->log_buf_dma = dd->port->sector_buffer_dma + ATA_SECT_SIZE;
-
-       /* Setup the address of the smart buf - for smart read data command */
-       dd->port->smart_buf = (void *)dd->port->log_buf  + ATA_SECT_SIZE;
-       dd->port->smart_buf_dma = dd->port->log_buf_dma + ATA_SECT_SIZE;
-
-
-       /* Point the command headers at the command tables. */
-       for (i = 0; i < num_command_slots; i++) {
-               dd->port->commands[i].command_header =
-                                       dd->port->command_list +
-                                       (sizeof(struct mtip_cmd_hdr) * i);
-               dd->port->commands[i].command_header_dma =
-                                       dd->port->command_list_dma +
-                                       (sizeof(struct mtip_cmd_hdr) * i);
-
-               dd->port->commands[i].command =
-                       dd->port->command_table + (HW_CMD_TBL_SZ * i);
-               dd->port->commands[i].command_dma =
-                       dd->port->command_tbl_dma + (HW_CMD_TBL_SZ * i);
-
-               if (readl(dd->mmio + HOST_CAP) & HOST_CAP_64)
-                       dd->port->commands[i].command_header->ctbau =
-                       __force_bit2int cpu_to_le32(
-                       (dd->port->commands[i].command_dma >> 16) >> 16);
-               dd->port->commands[i].command_header->ctba =
-                       __force_bit2int cpu_to_le32(
-                       dd->port->commands[i].command_dma & 0xFFFFFFFF);
-
-               /*
-                * If this is not done, a bug is reported by the stock
-                * FC11 i386. Due to the fact that it has lots of kernel
-                * debugging enabled.
-                */
-               sg_init_table(dd->port->commands[i].sg, MTIP_MAX_SG);
-
-               /* Mark all commands as currently inactive.*/
-               atomic_set(&dd->port->commands[i].active, 0);
-       }
 
        /* Setup the pointers to the extended s_active and CI registers. */
        for (i = 0; i < dd->slot_groups; i++) {
@@ -3594,12 +3665,8 @@ out3:
 
 out2:
        mtip_deinit_port(dd->port);
+       mtip_dma_free(dd);
 
-       /* Free the command/command header memory. */
-       dmam_free_coherent(&dd->pdev->dev,
-                               HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
-                               dd->port->command_list,
-                               dd->port->command_list_dma);
 out1:
        /* Free the memory allocated for the for structure. */
        kfree(dd->port);
@@ -3622,7 +3689,8 @@ static int mtip_hw_exit(struct driver_data *dd)
         * saves its state.
         */
        if (!dd->sr) {
-               if (!test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))
+               if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) &&
+                   !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))
                        if (mtip_standby_immediate(dd->port))
                                dev_warn(&dd->pdev->dev,
                                        "STANDBY IMMEDIATE failed\n");
@@ -3641,11 +3709,9 @@ static int mtip_hw_exit(struct driver_data *dd)
        irq_set_affinity_hint(dd->pdev->irq, NULL);
        devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
 
-       /* Free the command/command header memory. */
-       dmam_free_coherent(&dd->pdev->dev,
-                       HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
-                       dd->port->command_list,
-                       dd->port->command_list_dma);
+       /* Free dma regions */
+       mtip_dma_free(dd);
+
        /* Free the memory allocated for the for structure. */
        kfree(dd->port);
        dd->port = NULL;
@@ -3962,8 +4028,9 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
 {
        struct driver_data *dd = queue->queuedata;
        struct scatterlist *sg;
-       struct bio_vec *bvec;
-       int i, nents = 0;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
+       int nents = 0;
        int tag = 0, unaligned = 0;
 
        if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
@@ -3993,7 +4060,7 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
        }
 
        if (unlikely(bio->bi_rw & REQ_DISCARD)) {
-               bio_endio(bio, mtip_send_trim(dd, bio->bi_sector,
+               bio_endio(bio, mtip_send_trim(dd, bio->bi_iter.bi_sector,
                                                bio_sectors(bio)));
                return;
        }
@@ -4006,7 +4073,8 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
 
        if (bio_data_dir(bio) == WRITE && bio_sectors(bio) <= 64 &&
                                                        dd->unal_qdepth) {
-               if (bio->bi_sector % 8 != 0) /* Unaligned on 4k boundaries */
+               if (bio->bi_iter.bi_sector % 8 != 0)
+                       /* Unaligned on 4k boundaries */
                        unaligned = 1;
                else if (bio_sectors(bio) % 8 != 0) /* Aligned but not 4k/8k */
                        unaligned = 1;
@@ -4025,17 +4093,17 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
                }
 
                /* Create the scatter list for this bio. */
-               bio_for_each_segment(bvec, bio, i) {
+               bio_for_each_segment(bvec, bio, iter) {
                        sg_set_page(&sg[nents],
-                                       bvec->bv_page,
-                                       bvec->bv_len,
-                                       bvec->bv_offset);
+                                       bvec.bv_page,
+                                       bvec.bv_len,
+                                       bvec.bv_offset);
                        nents++;
                }
 
                /* Issue the read/write. */
                mtip_hw_submit_io(dd,
-                               bio->bi_sector,
+                               bio->bi_iter.bi_sector,
                                bio_sectors(bio),
                                nents,
                                tag,
index 9be7a15..b52e9a6 100644 (file)
@@ -69,7 +69,7 @@
  * Maximum number of scatter gather entries
  * a single command may have.
  */
-#define MTIP_MAX_SG            128
+#define MTIP_MAX_SG            504
 
 /*
  * Maximum number of slot groups (Command Issue & s_active registers)
@@ -92,7 +92,7 @@
 
 /* Driver name and version strings */
 #define MTIP_DRV_NAME          "mtip32xx"
-#define MTIP_DRV_VERSION       "1.2.6os3"
+#define MTIP_DRV_VERSION       "1.3.0"
 
 /* Maximum number of minor device numbers per device. */
 #define MTIP_MAX_MINORS                16
@@ -391,15 +391,13 @@ struct mtip_port {
         */
        dma_addr_t rxfis_dma;
        /*
-        * Pointer to the beginning of the command table memory as used
-        * by the driver.
+        * Pointer to the DMA region for RX Fis, Identify, RLE10, and SMART
         */
-       void *command_table;
+       void *block1;
        /*
-        * Pointer to the beginning of the command table memory as used
-        * by the DMA.
+        * DMA address of region for RX Fis, Identify, RLE10, and SMART
         */
-       dma_addr_t command_tbl_dma;
+       dma_addr_t block1_dma;
        /*
         * Pointer to the beginning of the identify data memory as used
         * by the driver.
index 2dc3b51..55298db 100644 (file)
@@ -271,18 +271,18 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
 
        if (nbd_cmd(req) == NBD_CMD_WRITE) {
                struct req_iterator iter;
-               struct bio_vec *bvec;
+               struct bio_vec bvec;
                /*
                 * we are really probing at internals to determine
                 * whether to set MSG_MORE or not...
                 */
                rq_for_each_segment(bvec, req, iter) {
                        flags = 0;
-                       if (!rq_iter_last(req, iter))
+                       if (!rq_iter_last(bvec, iter))
                                flags = MSG_MORE;
                        dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
-                                       nbd->disk->disk_name, req, bvec->bv_len);
-                       result = sock_send_bvec(nbd, bvec, flags);
+                                       nbd->disk->disk_name, req, bvec.bv_len);
+                       result = sock_send_bvec(nbd, &bvec, flags);
                        if (result <= 0) {
                                dev_err(disk_to_dev(nbd->disk),
                                        "Send data failed (result %d)\n",
@@ -378,10 +378,10 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
                        nbd->disk->disk_name, req);
        if (nbd_cmd(req) == NBD_CMD_READ) {
                struct req_iterator iter;
-               struct bio_vec *bvec;
+               struct bio_vec bvec;
 
                rq_for_each_segment(bvec, req, iter) {
-                       result = sock_recv_bvec(nbd, bvec);
+                       result = sock_recv_bvec(nbd, &bvec);
                        if (result <= 0) {
                                dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
                                        result);
@@ -389,7 +389,7 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
                                return req;
                        }
                        dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
-                               nbd->disk->disk_name, req, bvec->bv_len);
+                               nbd->disk->disk_name, req, bvec.bv_len);
                }
        }
        return req;
index 83a598e..3107282 100644 (file)
@@ -616,6 +616,11 @@ static int __init null_init(void)
                irqmode = NULL_IRQ_NONE;
        }
 #endif
+       if (bs > PAGE_SIZE) {
+               pr_warn("null_blk: invalid block size\n");
+               pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
+               bs = PAGE_SIZE;
+       }
 
        if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
                if (submit_queues < nr_online_nodes) {
index 26d03fa..1f14ac4 100644 (file)
@@ -441,104 +441,19 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
        return total_len;
 }
 
-struct nvme_bio_pair {
-       struct bio b1, b2, *parent;
-       struct bio_vec *bv1, *bv2;
-       int err;
-       atomic_t cnt;
-};
-
-static void nvme_bio_pair_endio(struct bio *bio, int err)
-{
-       struct nvme_bio_pair *bp = bio->bi_private;
-
-       if (err)
-               bp->err = err;
-
-       if (atomic_dec_and_test(&bp->cnt)) {
-               bio_endio(bp->parent, bp->err);
-               kfree(bp->bv1);
-               kfree(bp->bv2);
-               kfree(bp);
-       }
-}
-
-static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx,
-                                                       int len, int offset)
-{
-       struct nvme_bio_pair *bp;
-
-       BUG_ON(len > bio->bi_size);
-       BUG_ON(idx > bio->bi_vcnt);
-
-       bp = kmalloc(sizeof(*bp), GFP_ATOMIC);
-       if (!bp)
-               return NULL;
-       bp->err = 0;
-
-       bp->b1 = *bio;
-       bp->b2 = *bio;
-
-       bp->b1.bi_size = len;
-       bp->b2.bi_size -= len;
-       bp->b1.bi_vcnt = idx;
-       bp->b2.bi_idx = idx;
-       bp->b2.bi_sector += len >> 9;
-
-       if (offset) {
-               bp->bv1 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
-                                                               GFP_ATOMIC);
-               if (!bp->bv1)
-                       goto split_fail_1;
-
-               bp->bv2 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
-                                                               GFP_ATOMIC);
-               if (!bp->bv2)
-                       goto split_fail_2;
-
-               memcpy(bp->bv1, bio->bi_io_vec,
-                       bio->bi_max_vecs * sizeof(struct bio_vec));
-               memcpy(bp->bv2, bio->bi_io_vec,
-                       bio->bi_max_vecs * sizeof(struct bio_vec));
-
-               bp->b1.bi_io_vec = bp->bv1;
-               bp->b2.bi_io_vec = bp->bv2;
-               bp->b2.bi_io_vec[idx].bv_offset += offset;
-               bp->b2.bi_io_vec[idx].bv_len -= offset;
-               bp->b1.bi_io_vec[idx].bv_len = offset;
-               bp->b1.bi_vcnt++;
-       } else
-               bp->bv1 = bp->bv2 = NULL;
-
-       bp->b1.bi_private = bp;
-       bp->b2.bi_private = bp;
-
-       bp->b1.bi_end_io = nvme_bio_pair_endio;
-       bp->b2.bi_end_io = nvme_bio_pair_endio;
-
-       bp->parent = bio;
-       atomic_set(&bp->cnt, 2);
-
-       return bp;
-
- split_fail_2:
-       kfree(bp->bv1);
- split_fail_1:
-       kfree(bp);
-       return NULL;
-}
-
 static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
-                                               int idx, int len, int offset)
+                                int len)
 {
-       struct nvme_bio_pair *bp = nvme_bio_split(bio, idx, len, offset);
-       if (!bp)
+       struct bio *split = bio_split(bio, len >> 9, GFP_ATOMIC, NULL);
+       if (!split)
                return -ENOMEM;
 
+       bio_chain(split, bio);
+
        if (bio_list_empty(&nvmeq->sq_cong))
                add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
-       bio_list_add(&nvmeq->sq_cong, &bp->b1);
-       bio_list_add(&nvmeq->sq_cong, &bp->b2);
+       bio_list_add(&nvmeq->sq_cong, split);
+       bio_list_add(&nvmeq->sq_cong, bio);
 
        return 0;
 }
@@ -550,41 +465,44 @@ static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
 static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
                struct bio *bio, enum dma_data_direction dma_dir, int psegs)
 {
-       struct bio_vec *bvec, *bvprv = NULL;
+       struct bio_vec bvec, bvprv;
+       struct bvec_iter iter;
        struct scatterlist *sg = NULL;
-       int i, length = 0, nsegs = 0, split_len = bio->bi_size;
+       int length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size;
+       int first = 1;
 
        if (nvmeq->dev->stripe_size)
                split_len = nvmeq->dev->stripe_size -
-                       ((bio->bi_sector << 9) & (nvmeq->dev->stripe_size - 1));
+                       ((bio->bi_iter.bi_sector << 9) &
+                        (nvmeq->dev->stripe_size - 1));
 
        sg_init_table(iod->sg, psegs);
-       bio_for_each_segment(bvec, bio, i) {
-               if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
-                       sg->length += bvec->bv_len;
+       bio_for_each_segment(bvec, bio, iter) {
+               if (!first && BIOVEC_PHYS_MERGEABLE(&bvprv, &bvec)) {
+                       sg->length += bvec.bv_len;
                } else {
-                       if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
-                               return nvme_split_and_submit(bio, nvmeq, i,
-                                                               length, 0);
+                       if (!first && BIOVEC_NOT_VIRT_MERGEABLE(&bvprv, &bvec))
+                               return nvme_split_and_submit(bio, nvmeq,
+                                                            length);
 
                        sg = sg ? sg + 1 : iod->sg;
-                       sg_set_page(sg, bvec->bv_page, bvec->bv_len,
-                                                       bvec->bv_offset);
+                       sg_set_page(sg, bvec.bv_page,
+                                   bvec.bv_len, bvec.bv_offset);
                        nsegs++;
                }
 
-               if (split_len - length < bvec->bv_len)
-                       return nvme_split_and_submit(bio, nvmeq, i, split_len,
-                                                       split_len - length);
-               length += bvec->bv_len;
+               if (split_len - length < bvec.bv_len)
+                       return nvme_split_and_submit(bio, nvmeq, split_len);
+               length += bvec.bv_len;
                bvprv = bvec;
+               first = 0;
        }
        iod->nents = nsegs;
        sg_mark_end(sg);
        if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0)
                return -ENOMEM;
 
-       BUG_ON(length != bio->bi_size);
+       BUG_ON(length != bio->bi_iter.bi_size);
        return length;
 }
 
@@ -608,8 +526,8 @@ static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
        iod->npages = 0;
 
        range->cattr = cpu_to_le32(0);
-       range->nlb = cpu_to_le32(bio->bi_size >> ns->lba_shift);
-       range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
+       range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift);
+       range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
 
        memset(cmnd, 0, sizeof(*cmnd));
        cmnd->dsm.opcode = nvme_cmd_dsm;
@@ -674,7 +592,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
        }
 
        result = -ENOMEM;
-       iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
+       iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
        if (!iod)
                goto nomem;
        iod->private = bio;
@@ -723,7 +641,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
        cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
        length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
                                                                GFP_ATOMIC);
-       cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
+       cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
        cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
        cmnd->rw.control = cpu_to_le16(control);
        cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
index 4a27b1d..2ce3dfd 100644 (file)
@@ -581,7 +581,7 @@ static ssize_t pg_write(struct file *filp, const char __user *buf, size_t count,
 
        if (hdr.magic != PG_MAGIC)
                return -EINVAL;
-       if (hdr.dlen > PG_MAX_DATA)
+       if (hdr.dlen < 0 || hdr.dlen > PG_MAX_DATA)
                return -EINVAL;
        if ((count - hs) > PG_MAX_DATA)
                return -EINVAL;
index ff8668c..a2af73d 100644 (file)
@@ -651,7 +651,7 @@ static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s
 
        for (;;) {
                tmp = rb_entry(n, struct pkt_rb_node, rb_node);
-               if (s <= tmp->bio->bi_sector)
+               if (s <= tmp->bio->bi_iter.bi_sector)
                        next = n->rb_left;
                else
                        next = n->rb_right;
@@ -660,12 +660,12 @@ static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s
                n = next;
        }
 
-       if (s > tmp->bio->bi_sector) {
+       if (s > tmp->bio->bi_iter.bi_sector) {
                tmp = pkt_rbtree_next(tmp);
                if (!tmp)
                        return NULL;
        }
-       BUG_ON(s > tmp->bio->bi_sector);
+       BUG_ON(s > tmp->bio->bi_iter.bi_sector);
        return tmp;
 }
 
@@ -676,13 +676,13 @@ static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *nod
 {
        struct rb_node **p = &pd->bio_queue.rb_node;
        struct rb_node *parent = NULL;
-       sector_t s = node->bio->bi_sector;
+       sector_t s = node->bio->bi_iter.bi_sector;
        struct pkt_rb_node *tmp;
 
        while (*p) {
                parent = *p;
                tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
-               if (s < tmp->bio->bi_sector)
+               if (s < tmp->bio->bi_iter.bi_sector)
                        p = &(*p)->rb_left;
                else
                        p = &(*p)->rb_right;
@@ -706,7 +706,9 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
                             WRITE : READ, __GFP_WAIT);
 
        if (cgc->buflen) {
-               if (blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, __GFP_WAIT))
+               ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
+                                     __GFP_WAIT);
+               if (ret)
                        goto out;
        }
 
@@ -857,7 +859,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
                        spin_lock(&pd->iosched.lock);
                        bio = bio_list_peek(&pd->iosched.write_queue);
                        spin_unlock(&pd->iosched.lock);
-                       if (bio && (bio->bi_sector == pd->iosched.last_write))
+                       if (bio && (bio->bi_iter.bi_sector ==
+                                   pd->iosched.last_write))
                                need_write_seek = 0;
                        if (need_write_seek && reads_queued) {
                                if (atomic_read(&pd->cdrw.pending_bios) > 0) {
@@ -888,7 +891,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
                        continue;
 
                if (bio_data_dir(bio) == READ)
-                       pd->iosched.successive_reads += bio->bi_size >> 10;
+                       pd->iosched.successive_reads +=
+                               bio->bi_iter.bi_size >> 10;
                else {
                        pd->iosched.successive_reads = 0;
                        pd->iosched.last_write = bio_end_sector(bio);
@@ -978,7 +982,7 @@ static void pkt_end_io_read(struct bio *bio, int err)
 
        pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
                bio, (unsigned long long)pkt->sector,
-               (unsigned long long)bio->bi_sector, err);
+               (unsigned long long)bio->bi_iter.bi_sector, err);
 
        if (err)
                atomic_inc(&pkt->io_errors);
@@ -1026,8 +1030,9 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
        memset(written, 0, sizeof(written));
        spin_lock(&pkt->lock);
        bio_list_for_each(bio, &pkt->orig_bios) {
-               int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
-               int num_frames = bio->bi_size / CD_FRAMESIZE;
+               int first_frame = (bio->bi_iter.bi_sector - pkt->sector) /
+                       (CD_FRAMESIZE >> 9);
+               int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE;
                pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
                BUG_ON(first_frame < 0);
                BUG_ON(first_frame + num_frames > pkt->frames);
@@ -1053,7 +1058,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
 
                bio = pkt->r_bios[f];
                bio_reset(bio);
-               bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
+               bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
                bio->bi_bdev = pd->bdev;
                bio->bi_end_io = pkt_end_io_read;
                bio->bi_private = pkt;
@@ -1150,8 +1155,8 @@ static int pkt_start_recovery(struct packet_data *pkt)
        bio_reset(pkt->bio);
        pkt->bio->bi_bdev = pd->bdev;
        pkt->bio->bi_rw = REQ_WRITE;
-       pkt->bio->bi_sector = new_sector;
-       pkt->bio->bi_size = pkt->frames * CD_FRAMESIZE;
+       pkt->bio->bi_iter.bi_sector = new_sector;
+       pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
        pkt->bio->bi_vcnt = pkt->frames;
 
        pkt->bio->bi_end_io = pkt_end_io_packet_write;
@@ -1213,7 +1218,7 @@ static int pkt_handle_queue(struct pktcdvd_device *pd)
        node = first_node;
        while (node) {
                bio = node->bio;
-               zone = get_zone(bio->bi_sector, pd);
+               zone = get_zone(bio->bi_iter.bi_sector, pd);
                list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
                        if (p->sector == zone) {
                                bio = NULL;
@@ -1252,14 +1257,14 @@ try_next_bio:
        pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
        while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
                bio = node->bio;
-               pkt_dbg(2, pd, "found zone=%llx\n",
-                       (unsigned long long)get_zone(bio->bi_sector, pd));
-               if (get_zone(bio->bi_sector, pd) != zone)
+               pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long)
+                       get_zone(bio->bi_iter.bi_sector, pd));
+               if (get_zone(bio->bi_iter.bi_sector, pd) != zone)
                        break;
                pkt_rbtree_erase(pd, node);
                spin_lock(&pkt->lock);
                bio_list_add(&pkt->orig_bios, bio);
-               pkt->write_size += bio->bi_size / CD_FRAMESIZE;
+               pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE;
                spin_unlock(&pkt->lock);
        }
        /* check write congestion marks, and if bio_queue_size is
@@ -1293,7 +1298,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
        struct bio_vec *bvec = pkt->w_bio->bi_io_vec;
 
        bio_reset(pkt->w_bio);
-       pkt->w_bio->bi_sector = pkt->sector;
+       pkt->w_bio->bi_iter.bi_sector = pkt->sector;
        pkt->w_bio->bi_bdev = pd->bdev;
        pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
        pkt->w_bio->bi_private = pkt;
@@ -2335,75 +2340,29 @@ static void pkt_end_io_read_cloned(struct bio *bio, int err)
        pkt_bio_finished(pd);
 }
 
-static void pkt_make_request(struct request_queue *q, struct bio *bio)
+static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
 {
-       struct pktcdvd_device *pd;
-       char b[BDEVNAME_SIZE];
+       struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
+       struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
+
+       psd->pd = pd;
+       psd->bio = bio;
+       cloned_bio->bi_bdev = pd->bdev;
+       cloned_bio->bi_private = psd;
+       cloned_bio->bi_end_io = pkt_end_io_read_cloned;
+       pd->stats.secs_r += bio_sectors(bio);
+       pkt_queue_bio(pd, cloned_bio);
+}
+
+static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
+{
+       struct pktcdvd_device *pd = q->queuedata;
        sector_t zone;
        struct packet_data *pkt;
        int was_empty, blocked_bio;
        struct pkt_rb_node *node;
 
-       pd = q->queuedata;
-       if (!pd) {
-               pr_err("%s incorrect request queue\n",
-                      bdevname(bio->bi_bdev, b));
-               goto end_io;
-       }
-
-       /*
-        * Clone READ bios so we can have our own bi_end_io callback.
-        */
-       if (bio_data_dir(bio) == READ) {
-               struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
-               struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
-
-               psd->pd = pd;
-               psd->bio = bio;
-               cloned_bio->bi_bdev = pd->bdev;
-               cloned_bio->bi_private = psd;
-               cloned_bio->bi_end_io = pkt_end_io_read_cloned;
-               pd->stats.secs_r += bio_sectors(bio);
-               pkt_queue_bio(pd, cloned_bio);
-               return;
-       }
-
-       if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
-               pkt_notice(pd, "WRITE for ro device (%llu)\n",
-                          (unsigned long long)bio->bi_sector);
-               goto end_io;
-       }
-
-       if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) {
-               pkt_err(pd, "wrong bio size\n");
-               goto end_io;
-       }
-
-       blk_queue_bounce(q, &bio);
-
-       zone = get_zone(bio->bi_sector, pd);
-       pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
-               (unsigned long long)bio->bi_sector,
-               (unsigned long long)bio_end_sector(bio));
-
-       /* Check if we have to split the bio */
-       {
-               struct bio_pair *bp;
-               sector_t last_zone;
-               int first_sectors;
-
-               last_zone = get_zone(bio_end_sector(bio) - 1, pd);
-               if (last_zone != zone) {
-                       BUG_ON(last_zone != zone + pd->settings.size);
-                       first_sectors = last_zone - bio->bi_sector;
-                       bp = bio_split(bio, first_sectors);
-                       BUG_ON(!bp);
-                       pkt_make_request(q, &bp->bio1);
-                       pkt_make_request(q, &bp->bio2);
-                       bio_pair_release(bp);
-                       return;
-               }
-       }
+       zone = get_zone(bio->bi_iter.bi_sector, pd);
 
        /*
         * If we find a matching packet in state WAITING or READ_WAIT, we can
@@ -2417,7 +2376,8 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
                        if ((pkt->state == PACKET_WAITING_STATE) ||
                            (pkt->state == PACKET_READ_WAIT_STATE)) {
                                bio_list_add(&pkt->orig_bios, bio);
-                               pkt->write_size += bio->bi_size / CD_FRAMESIZE;
+                               pkt->write_size +=
+                                       bio->bi_iter.bi_size / CD_FRAMESIZE;
                                if ((pkt->write_size >= pkt->frames) &&
                                    (pkt->state == PACKET_WAITING_STATE)) {
                                        atomic_inc(&pkt->run_sm);
@@ -2476,6 +2436,64 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
                 */
                wake_up(&pd->wqueue);
        }
+}
+
+static void pkt_make_request(struct request_queue *q, struct bio *bio)
+{
+       struct pktcdvd_device *pd;
+       char b[BDEVNAME_SIZE];
+       struct bio *split;
+
+       pd = q->queuedata;
+       if (!pd) {
+               pr_err("%s incorrect request queue\n",
+                      bdevname(bio->bi_bdev, b));
+               goto end_io;
+       }
+
+       pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
+               (unsigned long long)bio->bi_iter.bi_sector,
+               (unsigned long long)bio_end_sector(bio));
+
+       /*
+        * Clone READ bios so we can have our own bi_end_io callback.
+        */
+       if (bio_data_dir(bio) == READ) {
+               pkt_make_request_read(pd, bio);
+               return;
+       }
+
+       if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
+               pkt_notice(pd, "WRITE for ro device (%llu)\n",
+                          (unsigned long long)bio->bi_iter.bi_sector);
+               goto end_io;
+       }
+
+       if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) {
+               pkt_err(pd, "wrong bio size\n");
+               goto end_io;
+       }
+
+       blk_queue_bounce(q, &bio);
+
+       do {
+               sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
+               sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
+
+               if (last_zone != zone) {
+                       BUG_ON(last_zone != zone + pd->settings.size);
+
+                       split = bio_split(bio, last_zone -
+                                         bio->bi_iter.bi_sector,
+                                         GFP_NOIO, fs_bio_set);
+                       bio_chain(split, bio);
+               } else {
+                       split = bio;
+               }
+
+               pkt_make_request_write(q, split);
+       } while (split != bio);
+
        return;
 end_io:
        bio_io_error(bio);
index d754a88..c120d70 100644 (file)
@@ -94,26 +94,25 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
 {
        unsigned int offset = 0;
        struct req_iterator iter;
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
        unsigned int i = 0;
        size_t size;
        void *buf;
 
        rq_for_each_segment(bvec, req, iter) {
                unsigned long flags;
-               dev_dbg(&dev->sbd.core,
-                       "%s:%u: bio %u: %u segs %u sectors from %lu\n",
-                       __func__, __LINE__, i, bio_segments(iter.bio),
-                       bio_sectors(iter.bio), iter.bio->bi_sector);
+               dev_dbg(&dev->sbd.core, "%s:%u: bio %u: %u sectors from %lu\n",
+                       __func__, __LINE__, i, bio_sectors(iter.bio),
+                       iter.bio->bi_iter.bi_sector);
 
-               size = bvec->bv_len;
-               buf = bvec_kmap_irq(bvec, &flags);
+               size = bvec.bv_len;
+               buf = bvec_kmap_irq(&bvec, &flags);
                if (gather)
                        memcpy(dev->bounce_buf+offset, buf, size);
                else
                        memcpy(buf, dev->bounce_buf+offset, size);
                offset += size;
-               flush_kernel_dcache_page(bvec->bv_page);
+               flush_kernel_dcache_page(bvec.bv_page);
                bvec_kunmap_irq(buf, &flags);
                i++;
        }
@@ -130,7 +129,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
 
 #ifdef DEBUG
        unsigned int n = 0;
-       struct bio_vec *bv;
+       struct bio_vec bv;
        struct req_iterator iter;
 
        rq_for_each_segment(bv, req, iter)
index 06a2e53..ef45cfb 100644 (file)
@@ -553,16 +553,16 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
        struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
        int write = bio_data_dir(bio) == WRITE;
        const char *op = write ? "write" : "read";
-       loff_t offset = bio->bi_sector << 9;
+       loff_t offset = bio->bi_iter.bi_sector << 9;
        int error = 0;
-       struct bio_vec *bvec;
-       unsigned int i;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
        struct bio *next;
 
-       bio_for_each_segment(bvec, bio, i) {
+       bio_for_each_segment(bvec, bio, iter) {
                /* PS3 is ppc64, so we don't handle highmem */
-               char *ptr = page_address(bvec->bv_page) + bvec->bv_offset;
-               size_t len = bvec->bv_len, retlen;
+               char *ptr = page_address(bvec.bv_page) + bvec.bv_offset;
+               size_t len = bvec.bv_len, retlen;
 
                dev_dbg(&dev->core, "    %s %zu bytes at offset %llu\n", op,
                        len, offset);
index 16cab66..b365e0d 100644 (file)
@@ -1156,23 +1156,23 @@ static void bio_chain_put(struct bio *chain)
  */
 static void zero_bio_chain(struct bio *chain, int start_ofs)
 {
-       struct bio_vec *bv;
+       struct bio_vec bv;
+       struct bvec_iter iter;
        unsigned long flags;
        void *buf;
-       int i;
        int pos = 0;
 
        while (chain) {
-               bio_for_each_segment(bv, chain, i) {
-                       if (pos + bv->bv_len > start_ofs) {
+               bio_for_each_segment(bv, chain, iter) {
+                       if (pos + bv.bv_len > start_ofs) {
                                int remainder = max(start_ofs - pos, 0);
-                               buf = bvec_kmap_irq(bv, &flags);
+                               buf = bvec_kmap_irq(&bv, &flags);
                                memset(buf + remainder, 0,
-                                      bv->bv_len - remainder);
-                               flush_dcache_page(bv->bv_page);
+                                      bv.bv_len - remainder);
+                               flush_dcache_page(bv.bv_page);
                                bvec_kunmap_irq(buf, &flags);
                        }
-                       pos += bv->bv_len;
+                       pos += bv.bv_len;
                }
 
                chain = chain->bi_next;
@@ -1220,74 +1220,14 @@ static struct bio *bio_clone_range(struct bio *bio_src,
                                        unsigned int len,
                                        gfp_t gfpmask)
 {
-       struct bio_vec *bv;
-       unsigned int resid;
-       unsigned short idx;
-       unsigned int voff;
-       unsigned short end_idx;
-       unsigned short vcnt;
        struct bio *bio;
 
-       /* Handle the easy case for the caller */
-
-       if (!offset && len == bio_src->bi_size)
-               return bio_clone(bio_src, gfpmask);
-
-       if (WARN_ON_ONCE(!len))
-               return NULL;
-       if (WARN_ON_ONCE(len > bio_src->bi_size))
-               return NULL;
-       if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
-               return NULL;
-
-       /* Find first affected segment... */
-
-       resid = offset;
-       bio_for_each_segment(bv, bio_src, idx) {
-               if (resid < bv->bv_len)
-                       break;
-               resid -= bv->bv_len;
-       }
-       voff = resid;
-
-       /* ...and the last affected segment */
-
-       resid += len;
-       __bio_for_each_segment(bv, bio_src, end_idx, idx) {
-               if (resid <= bv->bv_len)
-                       break;
-               resid -= bv->bv_len;
-       }
-       vcnt = end_idx - idx + 1;
-
-       /* Build the clone */
-
-       bio = bio_alloc(gfpmask, (unsigned int) vcnt);
+       bio = bio_clone(bio_src, gfpmask);
        if (!bio)
                return NULL;    /* ENOMEM */
 
-       bio->bi_bdev = bio_src->bi_bdev;
-       bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
-       bio->bi_rw = bio_src->bi_rw;
-       bio->bi_flags |= 1 << BIO_CLONED;
-
-       /*
-        * Copy over our part of the bio_vec, then update the first
-        * and last (or only) entries.
-        */
-       memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
-                       vcnt * sizeof (struct bio_vec));
-       bio->bi_io_vec[0].bv_offset += voff;
-       if (vcnt > 1) {
-               bio->bi_io_vec[0].bv_len -= voff;
-               bio->bi_io_vec[vcnt - 1].bv_len = resid;
-       } else {
-               bio->bi_io_vec[0].bv_len = len;
-       }
-
-       bio->bi_vcnt = vcnt;
-       bio->bi_size = len;
-       bio->bi_idx = 0;
+       bio_advance(bio, offset);
+       bio->bi_iter.bi_size = len;
 
        return bio;
 }
@@ -1318,7 +1258,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
 
        /* Build up a chain of clone bios up to the limit */
 
-       if (!bi || off >= bi->bi_size || !len)
+       if (!bi || off >= bi->bi_iter.bi_size || !len)
                return NULL;            /* Nothing to clone */
 
        end = &chain;
@@ -1330,7 +1270,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
                        rbd_warn(NULL, "bio_chain exhausted with %u left", len);
                        goto out_err;   /* EINVAL; ran out of bio's */
                }
-               bi_size = min_t(unsigned int, bi->bi_size - off, len);
+               bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
                bio = bio_clone_range(bi, off, bi_size, gfpmask);
                if (!bio)
                        goto out_err;   /* ENOMEM */
@@ -1339,7 +1279,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
                end = &bio->bi_next;
 
                off += bi_size;
-               if (off == bi->bi_size) {
+               if (off == bi->bi_iter.bi_size) {
                        bi = bi->bi_next;
                        off = 0;
                }
@@ -2227,7 +2167,8 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
 
        if (type == OBJ_REQUEST_BIO) {
                bio_list = data_desc;
-               rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT);
+               rbd_assert(img_offset ==
+                          bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
        } else {
                rbd_assert(type == OBJ_REQUEST_PAGES);
                pages = data_desc;
index 2284f5d..2839d37 100644 (file)
@@ -174,7 +174,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
        if (!card)
                goto req_err;
 
-       if (bio->bi_sector + (bio->bi_size >> 9) > get_capacity(card->gendisk))
+       if (bio_end_sector(bio) > get_capacity(card->gendisk))
                goto req_err;
 
        if (unlikely(card->halt)) {
@@ -187,7 +187,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
                goto req_err;
        }
 
-       if (bio->bi_size == 0) {
+       if (bio->bi_iter.bi_size == 0) {
                dev_err(CARD_TO_DEV(card), "size zero BIO!\n");
                goto req_err;
        }
@@ -208,7 +208,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
 
        dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n",
                 bio_data_dir(bio) ? 'W' : 'R', bio_meta,
-                (u64)bio->bi_sector << 9, bio->bi_size);
+                (u64)bio->bi_iter.bi_sector << 9, bio->bi_iter.bi_size);
 
        st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas,
                                    bio_dma_done_cb, bio_meta);
index fc88ba3..cf8cd29 100644 (file)
@@ -684,7 +684,8 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
                           void *cb_data)
 {
        struct list_head dma_list[RSXX_MAX_TARGETS];
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
        unsigned long long addr8;
        unsigned int laddr;
        unsigned int bv_len;
@@ -696,7 +697,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
        int st;
        int i;
 
-       addr8 = bio->bi_sector << 9; /* sectors are 512 bytes */
+       addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */
        atomic_set(n_dmas, 0);
 
        for (i = 0; i < card->n_targets; i++) {
@@ -705,7 +706,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
        }
 
        if (bio->bi_rw & REQ_DISCARD) {
-               bv_len = bio->bi_size;
+               bv_len = bio->bi_iter.bi_size;
 
                while (bv_len > 0) {
                        tgt   = rsxx_get_dma_tgt(card, addr8);
@@ -722,9 +723,9 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
                        bv_len -= RSXX_HW_BLK_SIZE;
                }
        } else {
-               bio_for_each_segment(bvec, bio, i) {
-                       bv_len = bvec->bv_len;
-                       bv_off = bvec->bv_offset;
+               bio_for_each_segment(bvec, bio, iter) {
+                       bv_len = bvec.bv_len;
+                       bv_off = bvec.bv_offset;
 
                        while (bv_len > 0) {
                                tgt   = rsxx_get_dma_tgt(card, addr8);
@@ -736,7 +737,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
                                st = rsxx_queue_dma(card, &dma_list[tgt],
                                                        bio_data_dir(bio),
                                                        dma_off, dma_len,
-                                                       laddr, bvec->bv_page,
+                                                       laddr, bvec.bv_page,
                                                        bv_off, cb, cb_data);
                                if (st)
                                        goto bvec_err;
index 3fb6ab4..d5e2d12 100644 (file)
@@ -1744,20 +1744,6 @@ static void carm_remove_one (struct pci_dev *pdev)
        kfree(host);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 }
 
-static int __init carm_init(void)
-{
-       return pci_register_driver(&carm_driver);
-}
-
-static void __exit carm_exit(void)
-{
-       pci_unregister_driver(&carm_driver);
-}
-
-module_init(carm_init);
-module_exit(carm_exit);
-
-
+module_pci_driver(carm_driver);
index ad70868..4cf81b5 100644 (file)
@@ -108,8 +108,7 @@ struct cardinfo {
                                    * have been written
                                    */
        struct bio      *bio, *currentbio, **biotail;
-       int             current_idx;
-       sector_t        current_sector;
+       struct bvec_iter current_iter;
 
        struct request_queue *queue;
 
@@ -118,7 +117,7 @@ struct cardinfo {
                struct mm_dma_desc      *desc;
                int                     cnt, headcnt;
                struct bio              *bio, **biotail;
-               int                     idx;
+               struct bvec_iter        iter;
        } mm_pages[2];
 #define DESC_PER_PAGE ((PAGE_SIZE*2)/sizeof(struct mm_dma_desc))
 
@@ -344,16 +343,13 @@ static int add_bio(struct cardinfo *card)
        dma_addr_t dma_handle;
        int offset;
        struct bio *bio;
-       struct bio_vec *vec;
-       int idx;
+       struct bio_vec vec;
        int rw;
-       int len;
 
        bio = card->currentbio;
        if (!bio && card->bio) {
                card->currentbio = card->bio;
-               card->current_idx = card->bio->bi_idx;
-               card->current_sector = card->bio->bi_sector;
+               card->current_iter = card->bio->bi_iter;
                card->bio = card->bio->bi_next;
                if (card->bio == NULL)
                        card->biotail = &card->bio;
@@ -362,18 +358,17 @@ static int add_bio(struct cardinfo *card)
        }
        if (!bio)
                return 0;
-       idx = card->current_idx;
 
        rw = bio_rw(bio);
        if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE)
                return 0;
 
-       vec = bio_iovec_idx(bio, idx);
-       len = vec->bv_len;
+       vec = bio_iter_iovec(bio, card->current_iter);
+
        dma_handle = pci_map_page(card->dev,
-                                 vec->bv_page,
-                                 vec->bv_offset,
-                                 len,
+                                 vec.bv_page,
+                                 vec.bv_offset,
+                                 vec.bv_len,
                                  (rw == READ) ?
                                  PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
 
@@ -381,7 +376,7 @@ static int add_bio(struct cardinfo *card)
        desc = &p->desc[p->cnt];
        p->cnt++;
        if (p->bio == NULL)
-               p->idx = idx;
+               p->iter = card->current_iter;
        if ((p->biotail) != &bio->bi_next) {
                *(p->biotail) = bio;
                p->biotail = &(bio->bi_next);
@@ -391,8 +386,8 @@ static int add_bio(struct cardinfo *card)
        desc->data_dma_handle = dma_handle;
 
        desc->pci_addr = cpu_to_le64((u64)desc->data_dma_handle);
-       desc->local_addr = cpu_to_le64(card->current_sector << 9);
-       desc->transfer_size = cpu_to_le32(len);
+       desc->local_addr = cpu_to_le64(card->current_iter.bi_sector << 9);
+       desc->transfer_size = cpu_to_le32(vec.bv_len);
        offset = (((char *)&desc->sem_control_bits) - ((char *)p->desc));
        desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset));
        desc->zero1 = desc->zero2 = 0;
@@ -407,10 +402,9 @@ static int add_bio(struct cardinfo *card)
                desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ);
        desc->sem_control_bits = desc->control_bits;
 
-       card->current_sector += (len >> 9);
-       idx++;
-       card->current_idx = idx;
-       if (idx >= bio->bi_vcnt)
+
+       bio_advance_iter(bio, &card->current_iter, vec.bv_len);
+       if (!card->current_iter.bi_size)
                card->currentbio = NULL;
 
        return 1;
@@ -439,23 +433,25 @@ static void process_page(unsigned long data)
                struct mm_dma_desc *desc = &page->desc[page->headcnt];
                int control = le32_to_cpu(desc->sem_control_bits);
                int last = 0;
-               int idx;
+               struct bio_vec vec;
 
                if (!(control & DMASCR_DMA_COMPLETE)) {
                        control = dma_status;
                        last = 1;
                }
+
                page->headcnt++;
-               idx = page->idx;
-               page->idx++;
-               if (page->idx >= bio->bi_vcnt) {
+               vec = bio_iter_iovec(bio, page->iter);
+               bio_advance_iter(bio, &page->iter, vec.bv_len);
+
+               if (!page->iter.bi_size) {
                        page->bio = bio->bi_next;
                        if (page->bio)
-                               page->idx = page->bio->bi_idx;
+                               page->iter = page->bio->bi_iter;
                }
 
                pci_unmap_page(card->dev, desc->data_dma_handle,
-                              bio_iovec_idx(bio, idx)->bv_len,
+                              vec.bv_len,
                                 (control & DMASCR_TRANSFER_READ) ?
                                PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
                if (control & DMASCR_HARD_ERROR) {
@@ -532,7 +528,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
 {
        struct cardinfo *card = q->queuedata;
        pr_debug("mm_make_request %llu %u\n",
-                (unsigned long long)bio->bi_sector, bio->bi_size);
+                (unsigned long long)bio->bi_iter.bi_sector,
+                bio->bi_iter.bi_size);
 
        spin_lock_irq(&card->lock);
        *card->biotail = bio;
index 6620b73..4b97b86 100644 (file)
@@ -1257,7 +1257,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
                        bio->bi_bdev    = preq.bdev;
                        bio->bi_private = pending_req;
                        bio->bi_end_io  = end_block_io_op;
-                       bio->bi_sector  = preq.sector_number;
+                       bio->bi_iter.bi_sector  = preq.sector_number;
                }
 
                preq.sector_number += seg[i].nsec;
index f9c43f9..8dcfb54 100644 (file)
@@ -1547,7 +1547,7 @@ static int blkif_recover(struct blkfront_info *info)
                        for (i = 0; i < pending; i++) {
                                offset = (i * segs * PAGE_SIZE) >> 9;
                                size = min((unsigned int)(segs * PAGE_SIZE) >> 9,
-                                          (unsigned int)(bio->bi_size >> 9) - offset);
+                                          (unsigned int)bio_sectors(bio) - offset);
                                cloned_bio = bio_clone(bio, GFP_NOIO);
                                BUG_ON(cloned_bio == NULL);
                                bio_trim(cloned_bio, offset, size);
index 5980cb9..51e75ad 100644 (file)
@@ -561,11 +561,11 @@ static int gdrom_set_interrupt_handlers(void)
        int err;
 
        err = request_irq(HW_EVENT_GDROM_CMD, gdrom_command_interrupt,
-               IRQF_DISABLED, "gdrom_command", &gd);
+               0, "gdrom_command", &gd);
        if (err)
                return err;
        err = request_irq(HW_EVENT_GDROM_DMA, gdrom_dma_interrupt,
-               IRQF_DISABLED, "gdrom_dma", &gd);
+               0, "gdrom_dma", &gd);
        if (err)
                free_irq(HW_EVENT_GDROM_CMD, &gd);
        return err;
index 671c385..03f4189 100644 (file)
@@ -2724,6 +2724,7 @@ static struct platform_driver ipmi_driver = {
 static int ipmi_parisc_probe(struct parisc_device *dev)
 {
        struct smi_info *info;
+       int rv;
 
        info = smi_info_alloc();
 
index 0e9c825..c488b84 100644 (file)
@@ -1,7 +1,8 @@
 
 obj-$(CONFIG_BCACHE)   += bcache.o
 
-bcache-y               := alloc.o btree.o bset.o io.o journal.o writeback.o\
-       movinggc.o request.o super.o sysfs.o debug.o util.o trace.o stats.o closure.o
+bcache-y               := alloc.o bset.o btree.o closure.o debug.o extents.o\
+       io.o journal.o movinggc.o request.o stats.o super.o sysfs.o trace.o\
+       util.o writeback.o
 
 CFLAGS_request.o       += -Iblock
index 4c9852d..c0d37d0 100644 (file)
@@ -132,10 +132,16 @@ bool bch_bucket_add_unused(struct cache *ca, struct bucket *b)
 {
        BUG_ON(GC_MARK(b) || GC_SECTORS_USED(b));
 
-       if (fifo_used(&ca->free) > ca->watermark[WATERMARK_MOVINGGC] &&
-           CACHE_REPLACEMENT(&ca->sb) == CACHE_REPLACEMENT_FIFO)
-               return false;
+       if (CACHE_REPLACEMENT(&ca->sb) == CACHE_REPLACEMENT_FIFO) {
+               unsigned i;
+
+               for (i = 0; i < RESERVE_NONE; i++)
+                       if (!fifo_full(&ca->free[i]))
+                               goto add;
 
+               return false;
+       }
+add:
        b->prio = 0;
 
        if (can_inc_bucket_gen(b) &&
@@ -162,8 +168,21 @@ static void invalidate_one_bucket(struct cache *ca, struct bucket *b)
        fifo_push(&ca->free_inc, b - ca->buckets);
 }
 
-#define bucket_prio(b)                         \
-       (((unsigned) (b->prio - ca->set->min_prio)) * GC_SECTORS_USED(b))
+/*
+ * Determines what order we're going to reuse buckets, smallest bucket_prio()
+ * first: we also take into account the number of sectors of live data in that
+ * bucket, and in order for that multiply to make sense we have to scale bucket
+ *
+ * Thus, we scale the bucket priorities so that the bucket with the smallest
+ * prio is worth 1/8th of what INITIAL_PRIO is worth.
+ */
+
+#define bucket_prio(b)                                                 \
+({                                                                     \
+       unsigned min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8;     \
+                                                                       \
+       (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b);  \
+})
 
 #define bucket_max_cmp(l, r)   (bucket_prio(l) < bucket_prio(r))
 #define bucket_min_cmp(l, r)   (bucket_prio(l) > bucket_prio(r))
@@ -304,6 +323,21 @@ do {                                                                       \
        __set_current_state(TASK_RUNNING);                              \
 } while (0)
 
+static int bch_allocator_push(struct cache *ca, long bucket)
+{
+       unsigned i;
+
+       /* Prios/gens are actually the most important reserve */
+       if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
+               return true;
+
+       for (i = 0; i < RESERVE_NR; i++)
+               if (fifo_push(&ca->free[i], bucket))
+                       return true;
+
+       return false;
+}
+
 static int bch_allocator_thread(void *arg)
 {
        struct cache *ca = arg;
@@ -336,9 +370,7 @@ static int bch_allocator_thread(void *arg)
                                mutex_lock(&ca->set->bucket_lock);
                        }
 
-                       allocator_wait(ca, !fifo_full(&ca->free));
-
-                       fifo_push(&ca->free, bucket);
+                       allocator_wait(ca, bch_allocator_push(ca, bucket));
                        wake_up(&ca->set->bucket_wait);
                }
 
@@ -365,34 +397,29 @@ static int bch_allocator_thread(void *arg)
        }
 }
 
-long bch_bucket_alloc(struct cache *ca, unsigned watermark, bool wait)
+long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
 {
        DEFINE_WAIT(w);
        struct bucket *b;
        long r;
 
        /* fastpath */
-       if (fifo_used(&ca->free) > ca->watermark[watermark]) {
-               fifo_pop(&ca->free, r);
+       if (fifo_pop(&ca->free[RESERVE_NONE], r) ||
+           fifo_pop(&ca->free[reserve], r))
                goto out;
-       }
 
        if (!wait)
                return -1;
 
-       while (1) {
-               if (fifo_used(&ca->free) > ca->watermark[watermark]) {
-                       fifo_pop(&ca->free, r);
-                       break;
-               }
-
+       do {
                prepare_to_wait(&ca->set->bucket_wait, &w,
                                TASK_UNINTERRUPTIBLE);
 
                mutex_unlock(&ca->set->bucket_lock);
                schedule();
                mutex_lock(&ca->set->bucket_lock);
-       }
+       } while (!fifo_pop(&ca->free[RESERVE_NONE], r) &&
+                !fifo_pop(&ca->free[reserve], r));
 
        finish_wait(&ca->set->bucket_wait, &w);
 out:
@@ -401,12 +428,14 @@ out:
        if (expensive_debug_checks(ca->set)) {
                size_t iter;
                long i;
+               unsigned j;
 
                for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
                        BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
 
-               fifo_for_each(i, &ca->free, iter)
-                       BUG_ON(i == r);
+               for (j = 0; j < RESERVE_NR; j++)
+                       fifo_for_each(i, &ca->free[j], iter)
+                               BUG_ON(i == r);
                fifo_for_each(i, &ca->free_inc, iter)
                        BUG_ON(i == r);
                fifo_for_each(i, &ca->unused, iter)
@@ -419,7 +448,7 @@ out:
 
        SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
 
-       if (watermark <= WATERMARK_METADATA) {
+       if (reserve <= RESERVE_PRIO) {
                SET_GC_MARK(b, GC_MARK_METADATA);
                SET_GC_MOVE(b, 0);
                b->prio = BTREE_PRIO;
@@ -445,7 +474,7 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k)
        }
 }
 
-int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
+int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
                           struct bkey *k, int n, bool wait)
 {
        int i;
@@ -459,7 +488,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
 
        for (i = 0; i < n; i++) {
                struct cache *ca = c->cache_by_alloc[i];
-               long b = bch_bucket_alloc(ca, watermark, wait);
+               long b = bch_bucket_alloc(ca, reserve, wait);
 
                if (b == -1)
                        goto err;
@@ -478,12 +507,12 @@ err:
        return -1;
 }
 
-int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
+int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
                         struct bkey *k, int n, bool wait)
 {
        int ret;
        mutex_lock(&c->bucket_lock);
-       ret = __bch_bucket_alloc_set(c, watermark, k, n, wait);
+       ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
        mutex_unlock(&c->bucket_lock);
        return ret;
 }
@@ -573,8 +602,8 @@ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
 
        while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
                unsigned watermark = write_prio
-                       ? WATERMARK_MOVINGGC
-                       : WATERMARK_NONE;
+                       ? RESERVE_MOVINGGC
+                       : RESERVE_NONE;
 
                spin_unlock(&c->data_bucket_lock);
 
@@ -689,7 +718,7 @@ int bch_cache_allocator_init(struct cache *ca)
         * Then 8 for btree allocations
         * Then half for the moving garbage collector
         */
-
+#if 0
        ca->watermark[WATERMARK_PRIO] = 0;
 
        ca->watermark[WATERMARK_METADATA] = prio_buckets(ca);
@@ -699,6 +728,6 @@ int bch_cache_allocator_init(struct cache *ca)
 
        ca->watermark[WATERMARK_NONE] = ca->free.size / 2 +
                ca->watermark[WATERMARK_MOVINGGC];
-
+#endif
        return 0;
 }
index 754f431..0c707e4 100644 (file)
 #include <linux/types.h>
 #include <linux/workqueue.h>
 
+#include "bset.h"
 #include "util.h"
 #include "closure.h"
 
@@ -280,7 +281,6 @@ struct bcache_device {
        unsigned long           sectors_dirty_last;
        long                    sectors_dirty_derivative;
 
-       mempool_t               *unaligned_bvec;
        struct bio_set          *bio_split;
 
        unsigned                data_csum:1;
@@ -310,7 +310,8 @@ struct cached_dev {
        struct cache_sb         sb;
        struct bio              sb_bio;
        struct bio_vec          sb_bv[1];
-       struct closure_with_waitlist sb_write;
+       struct closure          sb_write;
+       struct semaphore        sb_write_mutex;
 
        /* Refcount on the cache set. Always nonzero when we're caching. */
        atomic_t                count;
@@ -383,12 +384,12 @@ struct cached_dev {
        unsigned                writeback_rate_p_term_inverse;
 };
 
-enum alloc_watermarks {
-       WATERMARK_PRIO,
-       WATERMARK_METADATA,
-       WATERMARK_MOVINGGC,
-       WATERMARK_NONE,
-       WATERMARK_MAX
+enum alloc_reserve {
+       RESERVE_BTREE,
+       RESERVE_PRIO,
+       RESERVE_MOVINGGC,
+       RESERVE_NONE,
+       RESERVE_NR,
 };
 
 struct cache {
@@ -400,8 +401,6 @@ struct cache {
        struct kobject          kobj;
        struct block_device     *bdev;
 
-       unsigned                watermark[WATERMARK_MAX];
-
        struct task_struct      *alloc_thread;
 
        struct closure          prio;
@@ -430,7 +429,7 @@ struct cache {
         * because all the data they contained was overwritten), so we only
         * need to discard them before they can be moved to the free list.
         */
-       DECLARE_FIFO(long, free);
+       DECLARE_FIFO(long, free)[RESERVE_NR];
        DECLARE_FIFO(long, free_inc);
        DECLARE_FIFO(long, unused);
 
@@ -515,7 +514,8 @@ struct cache_set {
        uint64_t                cached_dev_sectors;
        struct closure          caching;
 
-       struct closure_with_waitlist sb_write;
+       struct closure          sb_write;
+       struct semaphore        sb_write_mutex;
 
        mempool_t               *search;
        mempool_t               *bio_meta;
@@ -630,13 +630,15 @@ struct cache_set {
 
 #ifdef CONFIG_BCACHE_DEBUG
        struct btree            *verify_data;
+       struct bset             *verify_ondisk;
        struct mutex            verify_lock;
 #endif
 
        unsigned                nr_uuids;
        struct uuid_entry       *uuids;
        BKEY_PADDED(uuid_bucket);
-       struct closure_with_waitlist uuid_write;
+       struct closure          uuid_write;
+       struct semaphore        uuid_write_mutex;
 
        /*
         * A btree node on disk could have too many bsets for an iterator to fit
@@ -644,13 +646,7 @@ struct cache_set {
         */
        mempool_t               *fill_iter;
 
-       /*
-        * btree_sort() is a merge sort and requires temporary space - single
-        * element mempool
-        */
-       struct mutex            sort_lock;
-       struct bset             *sort;
-       unsigned                sort_crit_factor;
+       struct bset_sort_state  sort;
 
        /* List of buckets we're currently writing data to */
        struct list_head        data_buckets;
@@ -666,7 +662,6 @@ struct cache_set {
        unsigned                congested_read_threshold_us;
        unsigned                congested_write_threshold_us;
 
-       struct time_stats       sort_time;
        struct time_stats       btree_gc_time;
        struct time_stats       btree_split_time;
        struct time_stats       btree_read_time;
@@ -684,9 +679,9 @@ struct cache_set {
        unsigned                error_decay;
 
        unsigned short          journal_delay_ms;
+       bool                    expensive_debug_checks;
        unsigned                verify:1;
        unsigned                key_merging_disabled:1;
-       unsigned                expensive_debug_checks:1;
        unsigned                gc_always_rewrite:1;
        unsigned                shrinker_disabled:1;
        unsigned                copy_gc_enabled:1;
@@ -708,13 +703,8 @@ struct bbio {
        struct bio              bio;
 };
 
-static inline unsigned local_clock_us(void)
-{
-       return local_clock() >> 10;
-}
-
 #define BTREE_PRIO             USHRT_MAX
-#define INITIAL_PRIO           32768
+#define INITIAL_PRIO           32768U
 
 #define btree_bytes(c)         ((c)->btree_pages * PAGE_SIZE)
 #define btree_blocks(b)                                                        \
@@ -727,21 +717,6 @@ static inline unsigned local_clock_us(void)
 #define bucket_bytes(c)                ((c)->sb.bucket_size << 9)
 #define block_bytes(c)         ((c)->sb.block_size << 9)
 
-#define __set_bytes(i, k)      (sizeof(*(i)) + (k) * sizeof(uint64_t))
-#define set_bytes(i)           __set_bytes(i, i->keys)
-
-#define __set_blocks(i, k, c)  DIV_ROUND_UP(__set_bytes(i, k), block_bytes(c))
-#define set_blocks(i, c)       __set_blocks(i, (i)->keys, c)
-
-#define node(i, j)             ((struct bkey *) ((i)->d + (j)))
-#define end(i)                 node(i, (i)->keys)
-
-#define index(i, b)                                                    \
-       ((size_t) (((void *) i - (void *) (b)->sets[0].data) /          \
-                  block_bytes(b->c)))
-
-#define btree_data_space(b)    (PAGE_SIZE << (b)->page_order)
-
 #define prios_per_bucket(c)                            \
        ((bucket_bytes(c) - sizeof(struct prio_set)) /  \
         sizeof(struct bucket_disk))
@@ -784,20 +759,34 @@ static inline struct bucket *PTR_BUCKET(struct cache_set *c,
        return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr);
 }
 
-/* Btree key macros */
+static inline uint8_t gen_after(uint8_t a, uint8_t b)
+{
+       uint8_t r = a - b;
+       return r > 128U ? 0 : r;
+}
 
-static inline void bkey_init(struct bkey *k)
+static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
+                               unsigned i)
 {
-       *k = ZERO_KEY;
+       return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
 }
 
+static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
+                                unsigned i)
+{
+       return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
+}
+
+/* Btree key macros */
+
 /*
  * This is used for various on disk data structures - cache_sb, prio_set, bset,
  * jset: The checksum is _always_ the first 8 bytes of these structs
  */
 #define csum_set(i)                                                    \
        bch_crc64(((void *) (i)) + sizeof(uint64_t),                    \
-             ((void *) end(i)) - (((void *) (i)) + sizeof(uint64_t)))
+                 ((void *) bset_bkey_last(i)) -                        \
+                 (((void *) (i)) + sizeof(uint64_t)))
 
 /* Error handling macros */
 
@@ -902,7 +891,6 @@ void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *);
 void bch_bbio_free(struct bio *, struct cache_set *);
 struct bio *bch_bbio_alloc(struct cache_set *);
 
-struct bio *bch_bio_split(struct bio *, int, gfp_t, struct bio_set *);
 void bch_generic_make_request(struct bio *, struct bio_split_pool *);
 void __bch_submit_bbio(struct bio *, struct cache_set *);
 void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
index 7d388b8..4f6b594 100644 (file)
  * Copyright 2012 Google, Inc.
  */
 
-#include "bcache.h"
-#include "btree.h"
-#include "debug.h"
+#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
 
+#include "util.h"
+#include "bset.h"
+
+#include <linux/console.h>
 #include <linux/random.h>
 #include <linux/prefetch.h>
 
+#ifdef CONFIG_BCACHE_DEBUG
+
+void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set)
+{
+       struct bkey *k, *next;
+
+       for (k = i->start; k < bset_bkey_last(i); k = next) {
+               next = bkey_next(k);
+
+               printk(KERN_ERR "block %u key %zi/%u: ", set,
+                      (uint64_t *) k - i->d, i->keys);
+
+               if (b->ops->key_dump)
+                       b->ops->key_dump(b, k);
+               else
+                       printk("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k));
+
+               if (next < bset_bkey_last(i) &&
+                   bkey_cmp(k, b->ops->is_extents ?
+                            &START_KEY(next) : next) > 0)
+                       printk(KERN_ERR "Key skipped backwards\n");
+       }
+}
+
+void bch_dump_bucket(struct btree_keys *b)
+{
+       unsigned i;
+
+       console_lock();
+       for (i = 0; i <= b->nsets; i++)
+               bch_dump_bset(b, b->set[i].data,
+                             bset_sector_offset(b, b->set[i].data));
+       console_unlock();
+}
+
+int __bch_count_data(struct btree_keys *b)
+{
+       unsigned ret = 0;
+       struct btree_iter iter;
+       struct bkey *k;
+
+       if (b->ops->is_extents)
+               for_each_key(b, k, &iter)
+                       ret += KEY_SIZE(k);
+       return ret;
+}
+
+void __bch_check_keys(struct btree_keys *b, const char *fmt, ...)
+{
+       va_list args;
+       struct bkey *k, *p = NULL;
+       struct btree_iter iter;
+       const char *err;
+
+       for_each_key(b, k, &iter) {
+               if (b->ops->is_extents) {
+                       err = "Keys out of order";
+                       if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0)
+                               goto bug;
+
+                       if (bch_ptr_invalid(b, k))
+                               continue;
+
+                       err =  "Overlapping keys";
+                       if (p && bkey_cmp(p, &START_KEY(k)) > 0)
+                               goto bug;
+               } else {
+                       if (bch_ptr_bad(b, k))
+                               continue;
+
+                       err = "Duplicate keys";
+                       if (p && !bkey_cmp(p, k))
+                               goto bug;
+               }
+               p = k;
+       }
+#if 0
+       err = "Key larger than btree node key";
+       if (p && bkey_cmp(p, &b->key) > 0)
+               goto bug;
+#endif
+       return;
+bug:
+       bch_dump_bucket(b);
+
+       va_start(args, fmt);
+       vprintk(fmt, args);
+       va_end(args);
+
+       panic("bch_check_keys error:  %s:\n", err);
+}
+
+static void bch_btree_iter_next_check(struct btree_iter *iter)
+{
+       struct bkey *k = iter->data->k, *next = bkey_next(k);
+
+       if (next < iter->data->end &&
+           bkey_cmp(k, iter->b->ops->is_extents ?
+                    &START_KEY(next) : next) > 0) {
+               bch_dump_bucket(iter->b);
+               panic("Key skipped backwards\n");
+       }
+}
+
+#else
+
+static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}
+
+#endif
+
 /* Keylists */
 
-int bch_keylist_realloc(struct keylist *l, int nptrs, struct cache_set *c)
+int __bch_keylist_realloc(struct keylist *l, unsigned u64s)
 {
        size_t oldsize = bch_keylist_nkeys(l);
-       size_t newsize = oldsize + 2 + nptrs;
+       size_t newsize = oldsize + u64s;
        uint64_t *old_keys = l->keys_p == l->inline_keys ? NULL : l->keys_p;
        uint64_t *new_keys;
 
-       /* The journalling code doesn't handle the case where the keys to insert
-        * is bigger than an empty write: If we just return -ENOMEM here,
-        * bio_insert() and bio_invalidate() will insert the keys created so far
-        * and finish the rest when the keylist is empty.
-        */
-       if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
-               return -ENOMEM;
-
        newsize = roundup_pow_of_two(newsize);
 
        if (newsize <= KEYLIST_INLINE ||
@@ -71,136 +175,6 @@ void bch_keylist_pop_front(struct keylist *l)
                bch_keylist_bytes(l));
 }
 
-/* Pointer validation */
-
-static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
-{
-       unsigned i;
-
-       for (i = 0; i < KEY_PTRS(k); i++)
-               if (ptr_available(c, k, i)) {
-                       struct cache *ca = PTR_CACHE(c, k, i);
-                       size_t bucket = PTR_BUCKET_NR(c, k, i);
-                       size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
-
-                       if (KEY_SIZE(k) + r > c->sb.bucket_size ||
-                           bucket <  ca->sb.first_bucket ||
-                           bucket >= ca->sb.nbuckets)
-                               return true;
-               }
-
-       return false;
-}
-
-bool bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k)
-{
-       char buf[80];
-
-       if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))
-               goto bad;
-
-       if (__ptr_invalid(c, k))
-               goto bad;
-
-       return false;
-bad:
-       bch_bkey_to_text(buf, sizeof(buf), k);
-       cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k));
-       return true;
-}
-
-bool bch_extent_ptr_invalid(struct cache_set *c, const struct bkey *k)
-{
-       char buf[80];
-
-       if (!KEY_SIZE(k))
-               return true;
-
-       if (KEY_SIZE(k) > KEY_OFFSET(k))
-               goto bad;
-
-       if (__ptr_invalid(c, k))
-               goto bad;
-
-       return false;
-bad:
-       bch_bkey_to_text(buf, sizeof(buf), k);
-       cache_bug(c, "spotted extent %s: %s", buf, bch_ptr_status(c, k));
-       return true;
-}
-
-static bool ptr_bad_expensive_checks(struct btree *b, const struct bkey *k,
-                                    unsigned ptr)
-{
-       struct bucket *g = PTR_BUCKET(b->c, k, ptr);
-       char buf[80];
-
-       if (mutex_trylock(&b->c->bucket_lock)) {
-               if (b->level) {
-                       if (KEY_DIRTY(k) ||
-                           g->prio != BTREE_PRIO ||
-                           (b->c->gc_mark_valid &&
-                            GC_MARK(g) != GC_MARK_METADATA))
-                               goto err;
-
-               } else {
-                       if (g->prio == BTREE_PRIO)
-                               goto err;
-
-                       if (KEY_DIRTY(k) &&
-                           b->c->gc_mark_valid &&
-                           GC_MARK(g) != GC_MARK_DIRTY)
-                               goto err;
-               }
-               mutex_unlock(&b->c->bucket_lock);
-       }
-
-       return false;
-err:
-       mutex_unlock(&b->c->bucket_lock);
-       bch_bkey_to_text(buf, sizeof(buf), k);
-       btree_bug(b,
-"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
-                 buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin),
-                 g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
-       return true;
-}
-
-bool bch_ptr_bad(struct btree *b, const struct bkey *k)
-{
-       struct bucket *g;
-       unsigned i, stale;
-
-       if (!bkey_cmp(k, &ZERO_KEY) ||
-           !KEY_PTRS(k) ||
-           bch_ptr_invalid(b, k))
-               return true;
-
-       for (i = 0; i < KEY_PTRS(k); i++) {
-               if (!ptr_available(b->c, k, i))
-                       return true;
-
-               g = PTR_BUCKET(b->c, k, i);
-               stale = ptr_stale(b->c, k, i);
-
-               btree_bug_on(stale > 96, b,
-                            "key too stale: %i, need_gc %u",
-                            stale, b->c->need_gc);
-
-               btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
-                            b, "stale dirty pointer");
-
-               if (stale)
-                       return true;
-
-               if (expensive_debug_checks(b->c) &&
-                   ptr_bad_expensive_checks(b, k, i))
-                       return true;
-       }
-
-       return false;
-}
-
 /* Key/pointer manipulation */
 
 void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
@@ -255,56 +229,138 @@ bool __bch_cut_back(const struct bkey *where, struct bkey *k)
        return true;
 }
 
-static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
+/* Auxiliary search trees */
+
+/* 32 bits total: */
+#define BKEY_MID_BITS          3
+#define BKEY_EXPONENT_BITS     7
+#define BKEY_MANTISSA_BITS     (32 - BKEY_MID_BITS - BKEY_EXPONENT_BITS)
+#define BKEY_MANTISSA_MASK     ((1 << BKEY_MANTISSA_BITS) - 1)
+
+struct bkey_float {
+       unsigned        exponent:BKEY_EXPONENT_BITS;
+       unsigned        m:BKEY_MID_BITS;
+       unsigned        mantissa:BKEY_MANTISSA_BITS;
+} __packed;
+
+/*
+ * BSET_CACHELINE was originally intended to match the hardware cacheline size -
+ * it used to be 64, but I realized the lookup code would touch slightly less
+ * memory if it was 128.
+ *
+ * It definites the number of bytes (in struct bset) per struct bkey_float in
+ * the auxiliar search tree - when we're done searching the bset_float tree we
+ * have this many bytes left that we do a linear search over.
+ *
+ * Since (after level 5) every level of the bset_tree is on a new cacheline,
+ * we're touching one fewer cacheline in the bset tree in exchange for one more
+ * cacheline in the linear search - but the linear search might stop before it
+ * gets to the second cacheline.
+ */
+
+#define BSET_CACHELINE         128
+
+/* Space required for the btree node keys */
+static inline size_t btree_keys_bytes(struct btree_keys *b)
 {
-       return (l->ptr[KEY_PTRS(l)] + r->ptr[KEY_PTRS(r)]) &
-               ~((uint64_t)1 << 63);
+       return PAGE_SIZE << b->page_order;
 }
 
-/* Tries to merge l and r: l should be lower than r
- * Returns true if we were able to merge. If we did merge, l will be the merged
- * key, r will be untouched.
- */
-bool bch_bkey_try_merge(struct btree *b, struct bkey *l, struct bkey *r)
+static inline size_t btree_keys_cachelines(struct btree_keys *b)
 {
-       unsigned i;
+       return btree_keys_bytes(b) / BSET_CACHELINE;
+}
 
-       if (key_merging_disabled(b->c))
-               return false;
+/* Space required for the auxiliary search trees */
+static inline size_t bset_tree_bytes(struct btree_keys *b)
+{
+       return btree_keys_cachelines(b) * sizeof(struct bkey_float);
+}
 
-       if (KEY_PTRS(l) != KEY_PTRS(r) ||
-           KEY_DIRTY(l) != KEY_DIRTY(r) ||
-           bkey_cmp(l, &START_KEY(r)))
-               return false;
+/* Space required for the prev pointers */
+static inline size_t bset_prev_bytes(struct btree_keys *b)
+{
+       return btree_keys_cachelines(b) * sizeof(uint8_t);
+}
 
-       for (i = 0; i < KEY_PTRS(l); i++)
-               if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
-                   PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
-                       return false;
+/* Memory allocation */
 
-       /* Keys with no pointers aren't restricted to one bucket and could
-        * overflow KEY_SIZE
-        */
-       if (KEY_SIZE(l) + KEY_SIZE(r) > USHRT_MAX) {
-               SET_KEY_OFFSET(l, KEY_OFFSET(l) + USHRT_MAX - KEY_SIZE(l));
-               SET_KEY_SIZE(l, USHRT_MAX);
+void bch_btree_keys_free(struct btree_keys *b)
+{
+       struct bset_tree *t = b->set;
 
-               bch_cut_front(l, r);
-               return false;
-       }
+       if (bset_prev_bytes(b) < PAGE_SIZE)
+               kfree(t->prev);
+       else
+               free_pages((unsigned long) t->prev,
+                          get_order(bset_prev_bytes(b)));
 
-       if (KEY_CSUM(l)) {
-               if (KEY_CSUM(r))
-                       l->ptr[KEY_PTRS(l)] = merge_chksums(l, r);
-               else
-                       SET_KEY_CSUM(l, 0);
-       }
+       if (bset_tree_bytes(b) < PAGE_SIZE)
+               kfree(t->tree);
+       else
+               free_pages((unsigned long) t->tree,
+                          get_order(bset_tree_bytes(b)));
 
-       SET_KEY_OFFSET(l, KEY_OFFSET(l) + KEY_SIZE(r));
-       SET_KEY_SIZE(l, KEY_SIZE(l) + KEY_SIZE(r));
+       free_pages((unsigned long) t->data, b->page_order);
 
-       return true;
+       t->prev = NULL;
+       t->tree = NULL;
+       t->data = NULL;
+}
+EXPORT_SYMBOL(bch_btree_keys_free);
+
+int bch_btree_keys_alloc(struct btree_keys *b, unsigned page_order, gfp_t gfp)
+{
+       struct bset_tree *t = b->set;
+
+       BUG_ON(t->data);
+
+       b->page_order = page_order;
+
+       t->data = (void *) __get_free_pages(gfp, b->page_order);
+       if (!t->data)
+               goto err;
+
+       t->tree = bset_tree_bytes(b) < PAGE_SIZE
+               ? kmalloc(bset_tree_bytes(b), gfp)
+               : (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b)));
+       if (!t->tree)
+               goto err;
+
+       t->prev = bset_prev_bytes(b) < PAGE_SIZE
+               ? kmalloc(bset_prev_bytes(b), gfp)
+               : (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b)));
+       if (!t->prev)
+               goto err;
+
+       return 0;
+err:
+       bch_btree_keys_free(b);
+       return -ENOMEM;
 }
+EXPORT_SYMBOL(bch_btree_keys_alloc);
+
+void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
+                        bool *expensive_debug_checks)
+{
+       unsigned i;
+
+       b->ops = ops;
+       b->expensive_debug_checks = expensive_debug_checks;
+       b->nsets = 0;
+       b->last_set_unwritten = 0;
+
+       /* XXX: shouldn't be needed */
+       for (i = 0; i < MAX_BSETS; i++)
+               b->set[i].size = 0;
+       /*
+        * Second loop starts at 1 because b->keys[0]->data is the memory we
+        * allocated
+        */
+       for (i = 1; i < MAX_BSETS; i++)
+               b->set[i].data = NULL;
+}
+EXPORT_SYMBOL(bch_btree_keys_init);
 
 /* Binary tree stuff for auxiliary search trees */
 
@@ -455,9 +511,11 @@ static unsigned bkey_to_cacheline(struct bset_tree *t, struct bkey *k)
        return ((void *) k - (void *) t->data) / BSET_CACHELINE;
 }
 
-static unsigned bkey_to_cacheline_offset(struct bkey *k)
+static unsigned bkey_to_cacheline_offset(struct bset_tree *t,
+                                        unsigned cacheline,
+                                        struct bkey *k)
 {
-       return ((size_t) k & (BSET_CACHELINE - 1)) / sizeof(uint64_t);
+       return (u64 *) k - (u64 *) cacheline_to_bkey(t, cacheline, 0);
 }
 
 static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned j)
@@ -504,7 +562,7 @@ static void make_bfloat(struct bset_tree *t, unsigned j)
                : tree_to_prev_bkey(t, j >> ffs(j));
 
        struct bkey *r = is_power_of_2(j + 1)
-               ? node(t->data, t->data->keys - bkey_u64s(&t->end))
+               ? bset_bkey_idx(t->data, t->data->keys - bkey_u64s(&t->end))
                : tree_to_bkey(t, j >> (ffz(j) + 1));
 
        BUG_ON(m < l || m > r);
@@ -528,9 +586,9 @@ static void make_bfloat(struct bset_tree *t, unsigned j)
                f->exponent = 127;
 }
 
-static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
+static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t)
 {
-       if (t != b->sets) {
+       if (t != b->set) {
                unsigned j = roundup(t[-1].size,
                                     64 / sizeof(struct bkey_float));
 
@@ -538,33 +596,54 @@ static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
                t->prev = t[-1].prev + j;
        }
 
-       while (t < b->sets + MAX_BSETS)
+       while (t < b->set + MAX_BSETS)
                t++->size = 0;
 }
 
-static void bset_build_unwritten_tree(struct btree *b)
+static void bch_bset_build_unwritten_tree(struct btree_keys *b)
 {
-       struct bset_tree *t = b->sets + b->nsets;
+       struct bset_tree *t = bset_tree_last(b);
+
+       BUG_ON(b->last_set_unwritten);
+       b->last_set_unwritten = 1;
 
        bset_alloc_tree(b, t);
 
-       if (t->tree != b->sets->tree + bset_tree_space(b)) {
-               t->prev[0] = bkey_to_cacheline_offset(t->data->start);
+       if (t->tree != b->set->tree + btree_keys_cachelines(b)) {
+               t->prev[0] = bkey_to_cacheline_offset(t, 0, t->data->start);
                t->size = 1;
        }
 }
 
-static void bset_build_written_tree(struct btree *b)
+void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic)
+{
+       if (i != b->set->data) {
+               b->set[++b->nsets].data = i;
+               i->seq = b->set->data->seq;
+       } else
+               get_random_bytes(&i->seq, sizeof(uint64_t));
+
+       i->magic        = magic;
+       i->version      = 0;
+       i->keys         = 0;
+
+       bch_bset_build_unwritten_tree(b);
+}
+EXPORT_SYMBOL(bch_bset_init_next);
+
+void bch_bset_build_written_tree(struct btree_keys *b)
 {
-       struct bset_tree *t = b->sets + b->nsets;
-       struct bkey *k = t->data->start;
+       struct bset_tree *t = bset_tree_last(b);
+       struct bkey *prev = NULL, *k = t->data->start;
        unsigned j, cacheline = 1;
 
+       b->last_set_unwritten = 0;
+
        bset_alloc_tree(b, t);
 
        t->size = min_t(unsigned,
-                       bkey_to_cacheline(t, end(t->data)),
-                       b->sets->tree + bset_tree_space(b) - t->tree);
+                       bkey_to_cacheline(t, bset_bkey_last(t->data)),
+                       b->set->tree + btree_keys_cachelines(b) - t->tree);
 
        if (t->size < 2) {
                t->size = 0;
@@ -577,16 +656,14 @@ static void bset_build_written_tree(struct btree *b)
        for (j = inorder_next(0, t->size);
             j;
             j = inorder_next(j, t->size)) {
-               while (bkey_to_cacheline(t, k) != cacheline)
-                       k = bkey_next(k);
+               while (bkey_to_cacheline(t, k) < cacheline)
+                       prev = k, k = bkey_next(k);
 
-               t->prev[j] = bkey_u64s(k);
-               k = bkey_next(k);
-               cacheline++;
-               t->tree[j].m = bkey_to_cacheline_offset(k);
+               t->prev[j] = bkey_u64s(prev);
+               t->tree[j].m = bkey_to_cacheline_offset(t, cacheline++, k);
        }
 
-       while (bkey_next(k) != end(t->data))
+       while (bkey_next(k) != bset_bkey_last(t->data))
                k = bkey_next(k);
 
        t->end = *k;
@@ -597,14 +674,17 @@ static void bset_build_written_tree(struct btree *b)
             j = inorder_next(j, t->size))
                make_bfloat(t, j);
 }
+EXPORT_SYMBOL(bch_bset_build_written_tree);
 
-void bch_bset_fix_invalidated_key(struct btree *b, struct bkey *k)
+/* Insert */
+
+void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k)
 {
        struct bset_tree *t;
        unsigned inorder, j = 1;
 
-       for (t = b->sets; t <= &b->sets[b->nsets]; t++)
-               if (k < end(t->data))
+       for (t = b->set; t <= bset_tree_last(b); t++)
+               if (k < bset_bkey_last(t->data))
                        goto found_set;
 
        BUG();
@@ -617,7 +697,7 @@ found_set:
        if (k == t->data->start)
                goto fix_left;
 
-       if (bkey_next(k) == end(t->data)) {
+       if (bkey_next(k) == bset_bkey_last(t->data)) {
                t->end = *k;
                goto fix_right;
        }
@@ -642,10 +722,12 @@ fix_right:        do {
                        j = j * 2 + 1;
                } while (j < t->size);
 }
+EXPORT_SYMBOL(bch_bset_fix_invalidated_key);
 
-void bch_bset_fix_lookup_table(struct btree *b, struct bkey *k)
+static void bch_bset_fix_lookup_table(struct btree_keys *b,
+                                     struct bset_tree *t,
+                                     struct bkey *k)
 {
-       struct bset_tree *t = &b->sets[b->nsets];
        unsigned shift = bkey_u64s(k);
        unsigned j = bkey_to_cacheline(t, k);
 
@@ -657,8 +739,8 @@ void bch_bset_fix_lookup_table(struct btree *b, struct bkey *k)
         * lookup table for the first key that is strictly greater than k:
         * it's either k's cacheline or the next one
         */
-       if (j < t->size &&
-           table_to_bkey(t, j) <= k)
+       while (j < t->size &&
+              table_to_bkey(t, j) <= k)
                j++;
 
        /* Adjust all the lookup table entries, and find a new key for any that
@@ -673,54 +755,124 @@ void bch_bset_fix_lookup_table(struct btree *b, struct bkey *k)
                        while (k < cacheline_to_bkey(t, j, 0))
                                k = bkey_next(k);
 
-                       t->prev[j] = bkey_to_cacheline_offset(k);
+                       t->prev[j] = bkey_to_cacheline_offset(t, j, k);
                }
        }
 
-       if (t->size == b->sets->tree + bset_tree_space(b) - t->tree)
+       if (t->size == b->set->tree + btree_keys_cachelines(b) - t->tree)
                return;
 
        /* Possibly add a new entry to the end of the lookup table */
 
        for (k = table_to_bkey(t, t->size - 1);
-            k != end(t->data);
+            k != bset_bkey_last(t->data);
             k = bkey_next(k))
                if (t->size == bkey_to_cacheline(t, k)) {
-                       t->prev[t->size] = bkey_to_cacheline_offset(k);
+                       t->prev[t->size] = bkey_to_cacheline_offset(t, t->size, k);
                        t->size++;
                }
 }
 
-void bch_bset_init_next(struct btree *b)
+/*
+ * Tries to merge l and r: l should be lower than r
+ * Returns true if we were able to merge. If we did merge, l will be the merged
+ * key, r will be untouched.
+ */
+bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r)
 {
-       struct bset *i = write_block(b);
+       if (!b->ops->key_merge)
+               return false;
 
-       if (i != b->sets[0].data) {
-               b->sets[++b->nsets].data = i;
-               i->seq = b->sets[0].data->seq;
-       } else
-               get_random_bytes(&i->seq, sizeof(uint64_t));
+       /*
+        * Generic header checks
+        * Assumes left and right are in order
+        * Left and right must be exactly aligned
+        */
+       if (!bch_bkey_equal_header(l, r) ||
+            bkey_cmp(l, &START_KEY(r)))
+               return false;
 
-       i->magic        = bset_magic(&b->c->sb);
-       i->version      = 0;
-       i->keys         = 0;
+       return b->ops->key_merge(b, l, r);
+}
+EXPORT_SYMBOL(bch_bkey_try_merge);
 
-       bset_build_unwritten_tree(b);
+void bch_bset_insert(struct btree_keys *b, struct bkey *where,
+                    struct bkey *insert)
+{
+       struct bset_tree *t = bset_tree_last(b);
+
+       BUG_ON(!b->last_set_unwritten);
+       BUG_ON(bset_byte_offset(b, t->data) +
+              __set_bytes(t->data, t->data->keys + bkey_u64s(insert)) >
+              PAGE_SIZE << b->page_order);
+
+       memmove((uint64_t *) where + bkey_u64s(insert),
+               where,
+               (void *) bset_bkey_last(t->data) - (void *) where);
+
+       t->data->keys += bkey_u64s(insert);
+       bkey_copy(where, insert);
+       bch_bset_fix_lookup_table(b, t, where);
 }
+EXPORT_SYMBOL(bch_bset_insert);
+
+unsigned bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
+                             struct bkey *replace_key)
+{
+       unsigned status = BTREE_INSERT_STATUS_NO_INSERT;
+       struct bset *i = bset_tree_last(b)->data;
+       struct bkey *m, *prev = NULL;
+       struct btree_iter iter;
+
+       BUG_ON(b->ops->is_extents && !KEY_SIZE(k));
+
+       m = bch_btree_iter_init(b, &iter, b->ops->is_extents
+                               ? PRECEDING_KEY(&START_KEY(k))
+                               : PRECEDING_KEY(k));
+
+       if (b->ops->insert_fixup(b, k, &iter, replace_key))
+               return status;
+
+       status = BTREE_INSERT_STATUS_INSERT;
+
+       while (m != bset_bkey_last(i) &&
+              bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0)
+               prev = m, m = bkey_next(m);
+
+       /* prev is in the tree, if we merge we're done */
+       status = BTREE_INSERT_STATUS_BACK_MERGE;
+       if (prev &&
+           bch_bkey_try_merge(b, prev, k))
+               goto merged;
+#if 0
+       status = BTREE_INSERT_STATUS_OVERWROTE;
+       if (m != bset_bkey_last(i) &&
+           KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
+               goto copy;
+#endif
+       status = BTREE_INSERT_STATUS_FRONT_MERGE;
+       if (m != bset_bkey_last(i) &&
+           bch_bkey_try_merge(b, k, m))
+               goto copy;
+
+       bch_bset_insert(b, m, k);
+copy:  bkey_copy(m, k);
+merged:
+       return status;
+}
+EXPORT_SYMBOL(bch_btree_insert_key);
+
+/* Lookup */
 
 struct bset_search_iter {
        struct bkey *l, *r;
 };
 
-static struct bset_search_iter bset_search_write_set(struct btree *b,
-                                                    struct bset_tree *t,
+static struct bset_search_iter bset_search_write_set(struct bset_tree *t,
                                                     const struct bkey *search)
 {
        unsigned li = 0, ri = t->size;
 
-       BUG_ON(!b->nsets &&
-              t->size < bkey_to_cacheline(t, end(t->data)));
-
        while (li + 1 != ri) {
                unsigned m = (li + ri) >> 1;
 
@@ -732,12 +884,11 @@ static struct bset_search_iter bset_search_write_set(struct btree *b,
 
        return (struct bset_search_iter) {
                table_to_bkey(t, li),
-               ri < t->size ? table_to_bkey(t, ri) : end(t->data)
+               ri < t->size ? table_to_bkey(t, ri) : bset_bkey_last(t->data)
        };
 }
 
-static struct bset_search_iter bset_search_tree(struct btree *b,
-                                               struct bset_tree *t,
+static struct bset_search_iter bset_search_tree(struct bset_tree *t,
                                                const struct bkey *search)
 {
        struct bkey *l, *r;
@@ -784,7 +935,7 @@ static struct bset_search_iter bset_search_tree(struct btree *b,
                        f = &t->tree[inorder_next(j, t->size)];
                        r = cacheline_to_bkey(t, inorder, f->m);
                } else
-                       r = end(t->data);
+                       r = bset_bkey_last(t->data);
        } else {
                r = cacheline_to_bkey(t, inorder, f->m);
 
@@ -798,7 +949,7 @@ static struct bset_search_iter bset_search_tree(struct btree *b,
        return (struct bset_search_iter) {l, r};
 }
 
-struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
+struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
                               const struct bkey *search)
 {
        struct bset_search_iter i;
@@ -820,7 +971,7 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
 
        if (unlikely(!t->size)) {
                i.l = t->data->start;
-               i.r = end(t->data);
+               i.r = bset_bkey_last(t->data);
        } else if (bset_written(b, t)) {
                /*
                 * Each node in the auxiliary search tree covers a certain range
@@ -830,23 +981,27 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
                 */
 
                if (unlikely(bkey_cmp(search, &t->end) >= 0))
-                       return end(t->data);
+                       return bset_bkey_last(t->data);
 
                if (unlikely(bkey_cmp(search, t->data->start) < 0))
                        return t->data->start;
 
-               i = bset_search_tree(b, t, search);
-       } else
-               i = bset_search_write_set(b, t, search);
+               i = bset_search_tree(t, search);
+       } else {
+               BUG_ON(!b->nsets &&
+                      t->size < bkey_to_cacheline(t, bset_bkey_last(t->data)));
 
-       if (expensive_debug_checks(b->c)) {
+               i = bset_search_write_set(t, search);
+       }
+
+       if (btree_keys_expensive_checks(b)) {
                BUG_ON(bset_written(b, t) &&
                       i.l != t->data->start &&
                       bkey_cmp(tree_to_prev_bkey(t,
                          inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
                                search) > 0);
 
-               BUG_ON(i.r != end(t->data) &&
+               BUG_ON(i.r != bset_bkey_last(t->data) &&
                       bkey_cmp(i.r, search) <= 0);
        }
 
@@ -856,22 +1011,17 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
 
        return i.l;
 }
+EXPORT_SYMBOL(__bch_bset_search);
 
 /* Btree iterator */
 
-/*
- * Returns true if l > r - unless l == r, in which case returns true if l is
- * older than r.
- *
- * Necessary for btree_sort_fixup() - if there are multiple keys that compare
- * equal in different sets, we have to process them newest to oldest.
- */
+typedef bool (btree_iter_cmp_fn)(struct btree_iter_set,
+                                struct btree_iter_set);
+
 static inline bool btree_iter_cmp(struct btree_iter_set l,
                                  struct btree_iter_set r)
 {
-       int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k));
-
-       return c ? c > 0 : l.k < r.k;
+       return bkey_cmp(l.k, r.k) > 0;
 }
 
 static inline bool btree_iter_end(struct btree_iter *iter)
@@ -888,8 +1038,10 @@ void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
                                 btree_iter_cmp));
 }
 
-struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter,
-                                  struct bkey *search, struct bset_tree *start)
+static struct bkey *__bch_btree_iter_init(struct btree_keys *b,
+                                         struct btree_iter *iter,
+                                         struct bkey *search,
+                                         struct bset_tree *start)
 {
        struct bkey *ret = NULL;
        iter->size = ARRAY_SIZE(iter->data);
@@ -899,15 +1051,24 @@ struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter,
        iter->b = b;
 #endif
 
-       for (; start <= &b->sets[b->nsets]; start++) {
+       for (; start <= bset_tree_last(b); start++) {
                ret = bch_bset_search(b, start, search);
-               bch_btree_iter_push(iter, ret, end(start->data));
+               bch_btree_iter_push(iter, ret, bset_bkey_last(start->data));
        }
 
        return ret;
 }
 
-struct bkey *bch_btree_iter_next(struct btree_iter *iter)
+struct bkey *bch_btree_iter_init(struct btree_keys *b,
+                                struct btree_iter *iter,
+                                struct bkey *search)
+{
+       return __bch_btree_iter_init(b, iter, search, b->set);
+}
+EXPORT_SYMBOL(bch_btree_iter_init);
+
+static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
+                                                btree_iter_cmp_fn *cmp)
 {
        struct btree_iter_set unused;
        struct bkey *ret = NULL;
@@ -924,16 +1085,23 @@ struct bkey *bch_btree_iter_next(struct btree_iter *iter)
                }
 
                if (iter->data->k == iter->data->end)
-                       heap_pop(iter, unused, btree_iter_cmp);
+                       heap_pop(iter, unused, cmp);
                else
-                       heap_sift(iter, 0, btree_iter_cmp);
+                       heap_sift(iter, 0, cmp);
        }
 
        return ret;
 }
 
+struct bkey *bch_btree_iter_next(struct btree_iter *iter)
+{
+       return __bch_btree_iter_next(iter, btree_iter_cmp);
+
+}
+EXPORT_SYMBOL(bch_btree_iter_next);
+
 struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
-                                       struct btree *b, ptr_filter_fn fn)
+                                       struct btree_keys *b, ptr_filter_fn fn)
 {
        struct bkey *ret;
 
@@ -946,70 +1114,58 @@ struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
 
 /* Mergesort */
 
-static void sort_key_next(struct btree_iter *iter,
-                         struct btree_iter_set *i)
+void bch_bset_sort_state_free(struct bset_sort_state *state)
 {
-       i->k = bkey_next(i->k);
-
-       if (i->k == i->end)
-               *i = iter->data[--iter->used];
+       if (state->pool)
+               mempool_destroy(state->pool);
 }
 
-static void btree_sort_fixup(struct btree_iter *iter)
+int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order)
 {
-       while (iter->used > 1) {
-               struct btree_iter_set *top = iter->data, *i = top + 1;
+       spin_lock_init(&state->time.lock);
 
-               if (iter->used > 2 &&
-                   btree_iter_cmp(i[0], i[1]))
-                       i++;
+       state->page_order = page_order;
+       state->crit_factor = int_sqrt(1 << page_order);
 
-               if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
-                       break;
-
-               if (!KEY_SIZE(i->k)) {
-                       sort_key_next(iter, i);
-                       heap_sift(iter, i - top, btree_iter_cmp);
-                       continue;
-               }
-
-               if (top->k > i->k) {
-                       if (bkey_cmp(top->k, i->k) >= 0)
-                               sort_key_next(iter, i);
-                       else
-                               bch_cut_front(top->k, i->k);
+       state->pool = mempool_create_page_pool(1, page_order);
+       if (!state->pool)
+               return -ENOMEM;
 
-                       heap_sift(iter, i - top, btree_iter_cmp);
-               } else {
-                       /* can't happen because of comparison func */
-                       BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
-                       bch_cut_back(&START_KEY(i->k), top->k);
-               }
-       }
+       return 0;
 }
+EXPORT_SYMBOL(bch_bset_sort_state_init);
 
-static void btree_mergesort(struct btree *b, struct bset *out,
+static void btree_mergesort(struct btree_keys *b, struct bset *out,
                            struct btree_iter *iter,
                            bool fixup, bool remove_stale)
 {
+       int i;
        struct bkey *k, *last = NULL;
-       bool (*bad)(struct btree *, const struct bkey *) = remove_stale
+       BKEY_PADDED(k) tmp;
+       bool (*bad)(struct btree_keys *, const struct bkey *) = remove_stale
                ? bch_ptr_bad
                : bch_ptr_invalid;
 
+       /* Heapify the iterator, using our comparison function */
+       for (i = iter->used / 2 - 1; i >= 0; --i)
+               heap_sift(iter, i, b->ops->sort_cmp);
+
        while (!btree_iter_end(iter)) {
-               if (fixup && !b->level)
-                       btree_sort_fixup(iter);
+               if (b->ops->sort_fixup && fixup)
+                       k = b->ops->sort_fixup(iter, &tmp.k);
+               else
+                       k = NULL;
+
+               if (!k)
+                       k = __bch_btree_iter_next(iter, b->ops->sort_cmp);
 
-               k = bch_btree_iter_next(iter);
                if (bad(b, k))
                        continue;
 
                if (!last) {
                        last = out->start;
                        bkey_copy(last, k);
-               } else if (b->level ||
-                          !bch_bkey_try_merge(b, last, k)) {
+               } else if (!bch_bkey_try_merge(b, last, k)) {
                        last = bkey_next(last);
                        bkey_copy(last, k);
                }
@@ -1020,27 +1176,27 @@ static void btree_mergesort(struct btree *b, struct bset *out,
        pr_debug("sorted %i keys", out->keys);
 }
 
-static void __btree_sort(struct btree *b, struct btree_iter *iter,
-                        unsigned start, unsigned order, bool fixup)
+static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
+                        unsigned start, unsigned order, bool fixup,
+                        struct bset_sort_state *state)
 {
        uint64_t start_time;
-       bool remove_stale = !b->written;
+       bool used_mempool = false;
        struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO,
                                                     order);
        if (!out) {
-               mutex_lock(&b->c->sort_lock);
-               out = b->c->sort;
-               order = ilog2(bucket_pages(b->c));
+               BUG_ON(order > state->page_order);
+
+               out = page_address(mempool_alloc(state->pool, GFP_NOIO));
+               used_mempool = true;
+               order = state->page_order;
        }
 
        start_time = local_clock();
 
-       btree_mergesort(b, out, iter, fixup, remove_stale);
+       btree_mergesort(b, out, iter, fixup, false);
        b->nsets = start;
 
-       if (!fixup && !start && b->written)
-               bch_btree_verify(b, out);
-
        if (!start && order == b->page_order) {
                /*
                 * Our temporary buffer is the same size as the btree node's
@@ -1048,84 +1204,76 @@ static void __btree_sort(struct btree *b, struct btree_iter *iter,
                 * memcpy()
                 */
 
-               out->magic      = bset_magic(&b->c->sb);
-               out->seq        = b->sets[0].data->seq;
-               out->version    = b->sets[0].data->version;
-               swap(out, b->sets[0].data);
-
-               if (b->c->sort == b->sets[0].data)
-                       b->c->sort = out;
+               out->magic      = b->set->data->magic;
+               out->seq        = b->set->data->seq;
+               out->version    = b->set->data->version;
+               swap(out, b->set->data);
        } else {
-               b->sets[start].data->keys = out->keys;
-               memcpy(b->sets[start].data->start, out->start,
-                      (void *) end(out) - (void *) out->start);
+               b->set[start].data->keys = out->keys;
+               memcpy(b->set[start].data->start, out->start,
+                      (void *) bset_bkey_last(out) - (void *) out->start);
        }
 
-       if (out == b->c->sort)
-               mutex_unlock(&b->c->sort_lock);
+       if (used_mempool)
+               mempool_free(virt_to_page(out), state->pool);
        else
                free_pages((unsigned long) out, order);
 
-       if (b->written)
-               bset_build_written_tree(b);
+       bch_bset_build_written_tree(b);
 
        if (!start)
-               bch_time_stats_update(&b->c->sort_time, start_time);
+               bch_time_stats_update(&state->time, start_time);
 }
 
-void bch_btree_sort_partial(struct btree *b, unsigned start)
+void bch_btree_sort_partial(struct btree_keys *b, unsigned start,
+                           struct bset_sort_state *state)
 {
        size_t order = b->page_order, keys = 0;
        struct btree_iter iter;
        int oldsize = bch_count_data(b);
 
-       __bch_btree_iter_init(b, &iter, NULL, &b->sets[start]);
-
-       BUG_ON(b->sets[b->nsets].data == write_block(b) &&
-              (b->sets[b->nsets].size || b->nsets));
-
+       __bch_btree_iter_init(b, &iter, NULL, &b->set[start]);
 
        if (start) {
                unsigned i;
 
                for (i = start; i <= b->nsets; i++)
-                       keys += b->sets[i].data->keys;
+                       keys += b->set[i].data->keys;
 
-               order = roundup_pow_of_two(__set_bytes(b->sets->data,
-                                                      keys)) / PAGE_SIZE;
-               if (order)
-                       order = ilog2(order);
+               order = get_order(__set_bytes(b->set->data, keys));
        }
 
-       __btree_sort(b, &iter, start, order, false);
+       __btree_sort(b, &iter, start, order, false, state);
 
-       EBUG_ON(b->written && oldsize >= 0 && bch_count_data(b) != oldsize);
+       EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize);
 }
+EXPORT_SYMBOL(bch_btree_sort_partial);
 
-void bch_btree_sort_and_fix_extents(struct btree *b, struct btree_iter *iter)
+void bch_btree_sort_and_fix_extents(struct btree_keys *b,
+                                   struct btree_iter *iter,
+                                   struct bset_sort_state *state)
 {
-       BUG_ON(!b->written);
-       __btree_sort(b, iter, 0, b->page_order, true);
+       __btree_sort(b, iter, 0, b->page_order, true, state);
 }
 
-void bch_btree_sort_into(struct btree *b, struct btree *new)
+void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
+                        struct bset_sort_state *state)
 {
        uint64_t start_time = local_clock();
 
        struct btree_iter iter;
        bch_btree_iter_init(b, &iter, NULL);
 
-       btree_mergesort(b, new->sets->data, &iter, false, true);
+       btree_mergesort(b, new->set->data, &iter, false, true);
 
-       bch_time_stats_update(&b->c->sort_time, start_time);
+       bch_time_stats_update(&state->time, start_time);
 
-       bkey_copy_key(&new->key, &b->key);
-       new->sets->size = 0;
+       new->set->size = 0; // XXX: why?
 }
 
 #define SORT_CRIT      (4096 / sizeof(uint64_t))
 
-void bch_btree_sort_lazy(struct btree *b)
+void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state)
 {
        unsigned crit = SORT_CRIT;
        int i;
@@ -1134,50 +1282,32 @@ void bch_btree_sort_lazy(struct btree *b)
        if (!b->nsets)
                goto out;
 
-       /* If not a leaf node, always sort */
-       if (b->level) {
-               bch_btree_sort(b);
-               return;
-       }
-
        for (i = b->nsets - 1; i >= 0; --i) {
-               crit *= b->c->sort_crit_factor;
+               crit *= state->crit_factor;
 
-               if (b->sets[i].data->keys < crit) {
-                       bch_btree_sort_partial(b, i);
+               if (b->set[i].data->keys < crit) {
+                       bch_btree_sort_partial(b, i, state);
                        return;
                }
        }
 
        /* Sort if we'd overflow */
        if (b->nsets + 1 == MAX_BSETS) {
-               bch_btree_sort(b);
+               bch_btree_sort(b, state);
                return;
        }
 
 out:
-       bset_build_written_tree(b);
+       bch_bset_build_written_tree(b);
 }
+EXPORT_SYMBOL(bch_btree_sort_lazy);
 
-/* Sysfs stuff */
-
-struct bset_stats {
-       struct btree_op op;
-       size_t nodes;
-       size_t sets_written, sets_unwritten;
-       size_t bytes_written, bytes_unwritten;
-       size_t floats, failed;
-};
-
-static int btree_bset_stats(struct btree_op *op, struct btree *b)
+void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats)
 {
-       struct bset_stats *stats = container_of(op, struct bset_stats, op);
        unsigned i;
 
-       stats->nodes++;
-
        for (i = 0; i <= b->nsets; i++) {
-               struct bset_tree *t = &b->sets[i];
+               struct bset_tree *t = &b->set[i];
                size_t bytes = t->data->keys * sizeof(uint64_t);
                size_t j;
 
@@ -1195,32 +1325,4 @@ static int btree_bset_stats(struct btree_op *op, struct btree *b)
                        stats->bytes_unwritten += bytes;
                }
        }
-
-       return MAP_CONTINUE;
-}
-
-int bch_bset_print_stats(struct cache_set *c, char *buf)
-{
-       struct bset_stats t;
-       int ret;
-
-       memset(&t, 0, sizeof(struct bset_stats));
-       bch_btree_op_init(&t.op, -1);
-
-       ret = bch_btree_map_nodes(&t.op, c, &ZERO_KEY, btree_bset_stats);
-       if (ret < 0)
-               return ret;
-
-       return snprintf(buf, PAGE_SIZE,
-                       "btree nodes:           %zu\n"
-                       "written sets:          %zu\n"
-                       "unwritten sets:                %zu\n"
-                       "written key bytes:     %zu\n"
-                       "unwritten key bytes:   %zu\n"
-                       "floats:                        %zu\n"
-                       "failed:                        %zu\n",
-                       t.nodes,
-                       t.sets_written, t.sets_unwritten,
-                       t.bytes_written, t.bytes_unwritten,
-                       t.floats, t.failed);
 }
index 1d3c24f..003260f 100644 (file)
@@ -1,7 +1,11 @@
 #ifndef _BCACHE_BSET_H
 #define _BCACHE_BSET_H
 
-#include <linux/slab.h>
+#include <linux/bcache.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "util.h" /* for time_stats */
 
 /*
  * BKEYS:
  * first key in that range of bytes again.
  */
 
-/* Btree key comparison/iteration */
+struct btree_keys;
+struct btree_iter;
+struct btree_iter_set;
+struct bkey_float;
 
 #define MAX_BSETS              4U
 
-struct btree_iter {
-       size_t size, used;
-#ifdef CONFIG_BCACHE_DEBUG
-       struct btree *b;
-#endif
-       struct btree_iter_set {
-               struct bkey *k, *end;
-       } data[MAX_BSETS];
-};
-
 struct bset_tree {
        /*
         * We construct a binary tree in an array as if the array
@@ -165,14 +162,14 @@ struct bset_tree {
         */
 
        /* size of the binary tree and prev array */
-       unsigned        size;
+       unsigned                size;
 
        /* function of size - precalculated for to_inorder() */
-       unsigned        extra;
+       unsigned                extra;
 
        /* copy of the last key in the set */
-       struct bkey     end;
-       struct bkey_float *tree;
+       struct bkey             end;
+       struct bkey_float       *tree;
 
        /*
         * The nodes in the bset tree point to specific keys - this
@@ -182,12 +179,219 @@ struct bset_tree {
         * to keep bkey_float to 4 bytes and prev isn't used in the fast
         * path.
         */
-       uint8_t         *prev;
+       uint8_t                 *prev;
 
        /* The actual btree node, with pointers to each sorted set */
-       struct bset     *data;
+       struct bset             *data;
+};
+
+struct btree_keys_ops {
+       bool            (*sort_cmp)(struct btree_iter_set,
+                                   struct btree_iter_set);
+       struct bkey     *(*sort_fixup)(struct btree_iter *, struct bkey *);
+       bool            (*insert_fixup)(struct btree_keys *, struct bkey *,
+                                       struct btree_iter *, struct bkey *);
+       bool            (*key_invalid)(struct btree_keys *,
+                                      const struct bkey *);
+       bool            (*key_bad)(struct btree_keys *, const struct bkey *);
+       bool            (*key_merge)(struct btree_keys *,
+                                    struct bkey *, struct bkey *);
+       void            (*key_to_text)(char *, size_t, const struct bkey *);
+       void            (*key_dump)(struct btree_keys *, const struct bkey *);
+
+       /*
+        * Only used for deciding whether to use START_KEY(k) or just the key
+        * itself in a couple places
+        */
+       bool            is_extents;
+};
+
+struct btree_keys {
+       const struct btree_keys_ops     *ops;
+       uint8_t                 page_order;
+       uint8_t                 nsets;
+       unsigned                last_set_unwritten:1;
+       bool                    *expensive_debug_checks;
+
+       /*
+        * Sets of sorted keys - the real btree node - plus a binary search tree
+        *
+        * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
+        * to the memory we have allocated for this btree node. Additionally,
+        * set[0]->data points to the entire btree node as it exists on disk.
+        */
+       struct bset_tree        set[MAX_BSETS];
+};
+
+static inline struct bset_tree *bset_tree_last(struct btree_keys *b)
+{
+       return b->set + b->nsets;
+}
+
+static inline bool bset_written(struct btree_keys *b, struct bset_tree *t)
+{
+       return t <= b->set + b->nsets - b->last_set_unwritten;
+}
+
+static inline bool bkey_written(struct btree_keys *b, struct bkey *k)
+{
+       return !b->last_set_unwritten || k < b->set[b->nsets].data->start;
+}
+
+static inline unsigned bset_byte_offset(struct btree_keys *b, struct bset *i)
+{
+       return ((size_t) i) - ((size_t) b->set->data);
+}
+
+static inline unsigned bset_sector_offset(struct btree_keys *b, struct bset *i)
+{
+       return bset_byte_offset(b, i) >> 9;
+}
+
+#define __set_bytes(i, k)      (sizeof(*(i)) + (k) * sizeof(uint64_t))
+#define set_bytes(i)           __set_bytes(i, i->keys)
+
+#define __set_blocks(i, k, block_bytes)                                \
+       DIV_ROUND_UP(__set_bytes(i, k), block_bytes)
+#define set_blocks(i, block_bytes)                             \
+       __set_blocks(i, (i)->keys, block_bytes)
+
+static inline size_t bch_btree_keys_u64s_remaining(struct btree_keys *b)
+{
+       struct bset_tree *t = bset_tree_last(b);
+
+       BUG_ON((PAGE_SIZE << b->page_order) <
+              (bset_byte_offset(b, t->data) + set_bytes(t->data)));
+
+       if (!b->last_set_unwritten)
+               return 0;
+
+       return ((PAGE_SIZE << b->page_order) -
+               (bset_byte_offset(b, t->data) + set_bytes(t->data))) /
+               sizeof(u64);
+}
+
+static inline struct bset *bset_next_set(struct btree_keys *b,
+                                        unsigned block_bytes)
+{
+       struct bset *i = bset_tree_last(b)->data;
+
+       return ((void *) i) + roundup(set_bytes(i), block_bytes);
+}
+
+void bch_btree_keys_free(struct btree_keys *);
+int bch_btree_keys_alloc(struct btree_keys *, unsigned, gfp_t);
+void bch_btree_keys_init(struct btree_keys *, const struct btree_keys_ops *,
+                        bool *);
+
+void bch_bset_init_next(struct btree_keys *, struct bset *, uint64_t);
+void bch_bset_build_written_tree(struct btree_keys *);
+void bch_bset_fix_invalidated_key(struct btree_keys *, struct bkey *);
+bool bch_bkey_try_merge(struct btree_keys *, struct bkey *, struct bkey *);
+void bch_bset_insert(struct btree_keys *, struct bkey *, struct bkey *);
+unsigned bch_btree_insert_key(struct btree_keys *, struct bkey *,
+                             struct bkey *);
+
+enum {
+       BTREE_INSERT_STATUS_NO_INSERT = 0,
+       BTREE_INSERT_STATUS_INSERT,
+       BTREE_INSERT_STATUS_BACK_MERGE,
+       BTREE_INSERT_STATUS_OVERWROTE,
+       BTREE_INSERT_STATUS_FRONT_MERGE,
 };
 
+/* Btree key iteration */
+
+struct btree_iter {
+       size_t size, used;
+#ifdef CONFIG_BCACHE_DEBUG
+       struct btree_keys *b;
+#endif
+       struct btree_iter_set {
+               struct bkey *k, *end;
+       } data[MAX_BSETS];
+};
+
+typedef bool (*ptr_filter_fn)(struct btree_keys *, const struct bkey *);
+
+struct bkey *bch_btree_iter_next(struct btree_iter *);
+struct bkey *bch_btree_iter_next_filter(struct btree_iter *,
+                                       struct btree_keys *, ptr_filter_fn);
+
+void bch_btree_iter_push(struct btree_iter *, struct bkey *, struct bkey *);
+struct bkey *bch_btree_iter_init(struct btree_keys *, struct btree_iter *,
+                                struct bkey *);
+
+struct bkey *__bch_bset_search(struct btree_keys *, struct bset_tree *,
+                              const struct bkey *);
+
+/*
+ * Returns the first key that is strictly greater than search
+ */
+static inline struct bkey *bch_bset_search(struct btree_keys *b,
+                                          struct bset_tree *t,
+                                          const struct bkey *search)
+{
+       return search ? __bch_bset_search(b, t, search) : t->data->start;
+}
+
+#define for_each_key_filter(b, k, iter, filter)                                \
+       for (bch_btree_iter_init((b), (iter), NULL);                    \
+            ((k) = bch_btree_iter_next_filter((iter), (b), filter));)
+
+#define for_each_key(b, k, iter)                                       \
+       for (bch_btree_iter_init((b), (iter), NULL);                    \
+            ((k) = bch_btree_iter_next(iter));)
+
+/* Sorting */
+
+struct bset_sort_state {
+       mempool_t               *pool;
+
+       unsigned                page_order;
+       unsigned                crit_factor;
+
+       struct time_stats       time;
+};
+
+void bch_bset_sort_state_free(struct bset_sort_state *);
+int bch_bset_sort_state_init(struct bset_sort_state *, unsigned);
+void bch_btree_sort_lazy(struct btree_keys *, struct bset_sort_state *);
+void bch_btree_sort_into(struct btree_keys *, struct btree_keys *,
+                        struct bset_sort_state *);
+void bch_btree_sort_and_fix_extents(struct btree_keys *, struct btree_iter *,
+                                   struct bset_sort_state *);
+void bch_btree_sort_partial(struct btree_keys *, unsigned,
+                           struct bset_sort_state *);
+
+static inline void bch_btree_sort(struct btree_keys *b,
+                                 struct bset_sort_state *state)
+{
+       bch_btree_sort_partial(b, 0, state);
+}
+
+struct bset_stats {
+       size_t sets_written, sets_unwritten;
+       size_t bytes_written, bytes_unwritten;
+       size_t floats, failed;
+};
+
+void bch_btree_keys_stats(struct btree_keys *, struct bset_stats *);
+
+/* Bkey utility code */
+
+#define bset_bkey_last(i)      bkey_idx((struct bkey *) (i)->d, (i)->keys)
+
+static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned idx)
+{
+       return bkey_idx(i->start, idx);
+}
+
+static inline void bkey_init(struct bkey *k)
+{
+       *k = ZERO_KEY;
+}
+
 static __always_inline int64_t bkey_cmp(const struct bkey *l,
                                        const struct bkey *r)
 {
@@ -196,6 +400,62 @@ static __always_inline int64_t bkey_cmp(const struct bkey *l,
                : (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r);
 }
 
+void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *,
+                             unsigned);
+bool __bch_cut_front(const struct bkey *, struct bkey *);
+bool __bch_cut_back(const struct bkey *, struct bkey *);
+
+static inline bool bch_cut_front(const struct bkey *where, struct bkey *k)
+{
+       BUG_ON(bkey_cmp(where, k) > 0);
+       return __bch_cut_front(where, k);
+}
+
+static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
+{
+       BUG_ON(bkey_cmp(where, &START_KEY(k)) < 0);
+       return __bch_cut_back(where, k);
+}
+
+#define PRECEDING_KEY(_k)                                      \
+({                                                             \
+       struct bkey *_ret = NULL;                               \
+                                                               \
+       if (KEY_INODE(_k) || KEY_OFFSET(_k)) {                  \
+               _ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0);  \
+                                                               \
+               if (!_ret->low)                                 \
+                       _ret->high--;                           \
+               _ret->low--;                                    \
+       }                                                       \
+                                                               \
+       _ret;                                                   \
+})
+
+static inline bool bch_ptr_invalid(struct btree_keys *b, const struct bkey *k)
+{
+       return b->ops->key_invalid(b, k);
+}
+
+static inline bool bch_ptr_bad(struct btree_keys *b, const struct bkey *k)
+{
+       return b->ops->key_bad(b, k);
+}
+
+static inline void bch_bkey_to_text(struct btree_keys *b, char *buf,
+                                   size_t size, const struct bkey *k)
+{
+       return b->ops->key_to_text(buf, size, k);
+}
+
+static inline bool bch_bkey_equal_header(const struct bkey *l,
+                                        const struct bkey *r)
+{
+       return (KEY_DIRTY(l) == KEY_DIRTY(r) &&
+               KEY_PTRS(l) == KEY_PTRS(r) &&
+               KEY_CSUM(l) == KEY_CSUM(l));
+}
+
 /* Keylists */
 
 struct keylist {
@@ -257,136 +517,44 @@ static inline size_t bch_keylist_bytes(struct keylist *l)
 
 struct bkey *bch_keylist_pop(struct keylist *);
 void bch_keylist_pop_front(struct keylist *);
-int bch_keylist_realloc(struct keylist *, int, struct cache_set *);
-
-void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *,
-                             unsigned);
-bool __bch_cut_front(const struct bkey *, struct bkey *);
-bool __bch_cut_back(const struct bkey *, struct bkey *);
+int __bch_keylist_realloc(struct keylist *, unsigned);
 
-static inline bool bch_cut_front(const struct bkey *where, struct bkey *k)
-{
-       BUG_ON(bkey_cmp(where, k) > 0);
-       return __bch_cut_front(where, k);
-}
+/* Debug stuff */
 
-static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
-{
-       BUG_ON(bkey_cmp(where, &START_KEY(k)) < 0);
-       return __bch_cut_back(where, k);
-}
-
-const char *bch_ptr_status(struct cache_set *, const struct bkey *);
-bool bch_btree_ptr_invalid(struct cache_set *, const struct bkey *);
-bool bch_extent_ptr_invalid(struct cache_set *, const struct bkey *);
-
-bool bch_ptr_bad(struct btree *, const struct bkey *);
-
-static inline uint8_t gen_after(uint8_t a, uint8_t b)
-{
-       uint8_t r = a - b;
-       return r > 128U ? 0 : r;
-}
-
-static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
-                               unsigned i)
-{
-       return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
-}
-
-static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
-                                unsigned i)
-{
-       return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
-}
-
-
-typedef bool (*ptr_filter_fn)(struct btree *, const struct bkey *);
-
-struct bkey *bch_btree_iter_next(struct btree_iter *);
-struct bkey *bch_btree_iter_next_filter(struct btree_iter *,
-                                       struct btree *, ptr_filter_fn);
-
-void bch_btree_iter_push(struct btree_iter *, struct bkey *, struct bkey *);
-struct bkey *__bch_btree_iter_init(struct btree *, struct btree_iter *,
-                                  struct bkey *, struct bset_tree *);
-
-/* 32 bits total: */
-#define BKEY_MID_BITS          3
-#define BKEY_EXPONENT_BITS     7
-#define BKEY_MANTISSA_BITS     22
-#define BKEY_MANTISSA_MASK     ((1 << BKEY_MANTISSA_BITS) - 1)
-
-struct bkey_float {
-       unsigned        exponent:BKEY_EXPONENT_BITS;
-       unsigned        m:BKEY_MID_BITS;
-       unsigned        mantissa:BKEY_MANTISSA_BITS;
-} __packed;
-
-/*
- * BSET_CACHELINE was originally intended to match the hardware cacheline size -
- * it used to be 64, but I realized the lookup code would touch slightly less
- * memory if it was 128.
- *
- * It definites the number of bytes (in struct bset) per struct bkey_float in
- * the auxiliar search tree - when we're done searching the bset_float tree we
- * have this many bytes left that we do a linear search over.
- *
- * Since (after level 5) every level of the bset_tree is on a new cacheline,
- * we're touching one fewer cacheline in the bset tree in exchange for one more
- * cacheline in the linear search - but the linear search might stop before it
- * gets to the second cacheline.
- */
-
-#define BSET_CACHELINE         128
-#define bset_tree_space(b)     (btree_data_space(b) / BSET_CACHELINE)
+#ifdef CONFIG_BCACHE_DEBUG
 
-#define bset_tree_bytes(b)     (bset_tree_space(b) * sizeof(struct bkey_float))
-#define bset_prev_bytes(b)     (bset_tree_space(b) * sizeof(uint8_t))
+int __bch_count_data(struct btree_keys *);
+void __bch_check_keys(struct btree_keys *, const char *, ...);
+void bch_dump_bset(struct btree_keys *, struct bset *, unsigned);
+void bch_dump_bucket(struct btree_keys *);
 
-void bch_bset_init_next(struct btree *);
+#else
 
-void bch_bset_fix_invalidated_key(struct btree *, struct bkey *);
-void bch_bset_fix_lookup_table(struct btree *, struct bkey *);
+static inline int __bch_count_data(struct btree_keys *b) { return -1; }
+static inline void __bch_check_keys(struct btree_keys *b, const char *fmt, ...) {}
+static inline void bch_dump_bucket(struct btree_keys *b) {}
+void bch_dump_bset(struct btree_keys *, struct bset *, unsigned);
 
-struct bkey *__bch_bset_search(struct btree *, struct bset_tree *,
-                          const struct bkey *);
+#endif
 
-/*
- * Returns the first key that is strictly greater than search
- */
-static inline struct bkey *bch_bset_search(struct btree *b, struct bset_tree *t,
-                                          const struct bkey *search)
+static inline bool btree_keys_expensive_checks(struct btree_keys *b)
 {
-       return search ? __bch_bset_search(b, t, search) : t->data->start;
+#ifdef CONFIG_BCACHE_DEBUG
+       return *b->expensive_debug_checks;
+#else
+       return false;
+#endif
 }
 
-#define PRECEDING_KEY(_k)                                      \
-({                                                             \
-       struct bkey *_ret = NULL;                               \
-                                                               \
-       if (KEY_INODE(_k) || KEY_OFFSET(_k)) {                  \
-               _ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0);  \
-                                                               \
-               if (!_ret->low)                                 \
-                       _ret->high--;                           \
-               _ret->low--;                                    \
-       }                                                       \
-                                                               \
-       _ret;                                                   \
-})
-
-bool bch_bkey_try_merge(struct btree *, struct bkey *, struct bkey *);
-void bch_btree_sort_lazy(struct btree *);
-void bch_btree_sort_into(struct btree *, struct btree *);
-void bch_btree_sort_and_fix_extents(struct btree *, struct btree_iter *);
-void bch_btree_sort_partial(struct btree *, unsigned);
-
-static inline void bch_btree_sort(struct btree *b)
+static inline int bch_count_data(struct btree_keys *b)
 {
-       bch_btree_sort_partial(b, 0);
+       return btree_keys_expensive_checks(b) ? __bch_count_data(b) : -1;
 }
 
-int bch_bset_print_stats(struct cache_set *, char *);
+#define bch_check_keys(b, ...)                                         \
+do {                                                                   \
+       if (btree_keys_expensive_checks(b))                             \
+               __bch_check_keys(b, __VA_ARGS__);                       \
+} while (0)
 
 #endif
index 31bb53f..98cc0a8 100644 (file)
@@ -23,7 +23,7 @@
 #include "bcache.h"
 #include "btree.h"
 #include "debug.h"
-#include "writeback.h"
+#include "extents.h"
 
 #include <linux/slab.h>
 #include <linux/bitops.h>
  * Test module load/unload
  */
 
-enum {
-       BTREE_INSERT_STATUS_INSERT,
-       BTREE_INSERT_STATUS_BACK_MERGE,
-       BTREE_INSERT_STATUS_OVERWROTE,
-       BTREE_INSERT_STATUS_FRONT_MERGE,
-};
-
 #define MAX_NEED_GC            64
 #define MAX_SAVE_PRIO          72
 
@@ -106,14 +99,6 @@ enum {
 
 static struct workqueue_struct *btree_io_wq;
 
-static inline bool should_split(struct btree *b)
-{
-       struct bset *i = write_block(b);
-       return b->written >= btree_blocks(b) ||
-               (b->written + __set_blocks(i, i->keys + 15, b->c)
-                > btree_blocks(b));
-}
-
 #define insert_lock(s, b)      ((b)->level <= (s)->lock)
 
 /*
@@ -167,6 +152,8 @@ static inline bool should_split(struct btree *b)
                        _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__);   \
                }                                                       \
                rw_unlock(_w, _b);                                      \
+               if (_r == -EINTR)                                       \
+                       schedule();                                     \
                bch_cannibalize_unlock(c);                              \
                if (_r == -ENOSPC) {                                    \
                        wait_event((c)->try_wait,                       \
@@ -175,9 +162,15 @@ static inline bool should_split(struct btree *b)
                }                                                       \
        } while (_r == -EINTR);                                         \
                                                                        \
+       finish_wait(&(c)->bucket_wait, &(op)->wait);                    \
        _r;                                                             \
 })
 
+static inline struct bset *write_block(struct btree *b)
+{
+       return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
+}
+
 /* Btree key manipulation */
 
 void bkey_put(struct cache_set *c, struct bkey *k)
@@ -194,16 +187,16 @@ void bkey_put(struct cache_set *c, struct bkey *k)
 static uint64_t btree_csum_set(struct btree *b, struct bset *i)
 {
        uint64_t crc = b->key.ptr[0];
-       void *data = (void *) i + 8, *end = end(i);
+       void *data = (void *) i + 8, *end = bset_bkey_last(i);
 
        crc = bch_crc64_update(crc, data, end - data);
        return crc ^ 0xffffffffffffffffULL;
 }
 
-static void bch_btree_node_read_done(struct btree *b)
+void bch_btree_node_read_done(struct btree *b)
 {
        const char *err = "bad btree header";
-       struct bset *i = b->sets[0].data;
+       struct bset *i = btree_bset_first(b);
        struct btree_iter *iter;
 
        iter = mempool_alloc(b->c->fill_iter, GFP_NOWAIT);
@@ -211,21 +204,22 @@ static void bch_btree_node_read_done(struct btree *b)
        iter->used = 0;
 
 #ifdef CONFIG_BCACHE_DEBUG
-       iter->b = b;
+       iter->b = &b->keys;
 #endif
 
        if (!i->seq)
                goto err;
 
        for (;
-            b->written < btree_blocks(b) && i->seq == b->sets[0].data->seq;
+            b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
             i = write_block(b)) {
                err = "unsupported bset version";
                if (i->version > BCACHE_BSET_VERSION)
                        goto err;
 
                err = "bad btree header";
-               if (b->written + set_blocks(i, b->c) > btree_blocks(b))
+               if (b->written + set_blocks(i, block_bytes(b->c)) >
+                   btree_blocks(b))
                        goto err;
 
                err = "bad magic";
@@ -245,39 +239,40 @@ static void bch_btree_node_read_done(struct btree *b)
                }
 
                err = "empty set";
-               if (i != b->sets[0].data && !i->keys)
+               if (i != b->keys.set[0].data && !i->keys)
                        goto err;
 
-               bch_btree_iter_push(iter, i->start, end(i));
+               bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
 
-               b->written += set_blocks(i, b->c);
+               b->written += set_blocks(i, block_bytes(b->c));
        }
 
        err = "corrupted btree";
        for (i = write_block(b);
-            index(i, b) < btree_blocks(b);
+            bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
             i = ((void *) i) + block_bytes(b->c))
-               if (i->seq == b->sets[0].data->seq)
+               if (i->seq == b->keys.set[0].data->seq)
                        goto err;
 
-       bch_btree_sort_and_fix_extents(b, iter);
+       bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
 
-       i = b->sets[0].data;
+       i = b->keys.set[0].data;
        err = "short btree key";
-       if (b->sets[0].size &&
-           bkey_cmp(&b->key, &b->sets[0].end) < 0)
+       if (b->keys.set[0].size &&
+           bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
                goto err;
 
        if (b->written < btree_blocks(b))
-               bch_bset_init_next(b);
+               bch_bset_init_next(&b->keys, write_block(b),
+                                  bset_magic(&b->c->sb));
 out:
        mempool_free(iter, b->c->fill_iter);
        return;
 err:
        set_btree_node_io_error(b);
-       bch_cache_set_error(b->c, "%s at bucket %zu, block %zu, %u keys",
+       bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
                            err, PTR_BUCKET_NR(b->c, &b->key, 0),
-                           index(i, b), i->keys);
+                           bset_block_offset(b, i), i->keys);
        goto out;
 }
 
@@ -287,7 +282,7 @@ static void btree_node_read_endio(struct bio *bio, int error)
        closure_put(cl);
 }
 
-void bch_btree_node_read(struct btree *b)
+static void bch_btree_node_read(struct btree *b)
 {
        uint64_t start_time = local_clock();
        struct closure cl;
@@ -299,11 +294,11 @@ void bch_btree_node_read(struct btree *b)
 
        bio = bch_bbio_alloc(b->c);
        bio->bi_rw      = REQ_META|READ_SYNC;
-       bio->bi_size    = KEY_SIZE(&b->key) << 9;
+       bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
        bio->bi_end_io  = btree_node_read_endio;
        bio->bi_private = &cl;
 
-       bch_bio_map(bio, b->sets[0].data);
+       bch_bio_map(bio, b->keys.set[0].data);
 
        bch_submit_bbio(bio, b->c, &b->key, 0);
        closure_sync(&cl);
@@ -340,9 +335,16 @@ static void btree_complete_write(struct btree *b, struct btree_write *w)
        w->journal      = NULL;
 }
 
+static void btree_node_write_unlock(struct closure *cl)
+{
+       struct btree *b = container_of(cl, struct btree, io);
+
+       up(&b->io_mutex);
+}
+
 static void __btree_node_write_done(struct closure *cl)
 {
-       struct btree *b = container_of(cl, struct btree, io.cl);
+       struct btree *b = container_of(cl, struct btree, io);
        struct btree_write *w = btree_prev_write(b);
 
        bch_bbio_free(b->bio, b->c);
@@ -353,16 +355,16 @@ static void __btree_node_write_done(struct closure *cl)
                queue_delayed_work(btree_io_wq, &b->work,
                                   msecs_to_jiffies(30000));
 
-       closure_return(cl);
+       closure_return_with_destructor(cl, btree_node_write_unlock);
 }
 
 static void btree_node_write_done(struct closure *cl)
 {
-       struct btree *b = container_of(cl, struct btree, io.cl);
+       struct btree *b = container_of(cl, struct btree, io);
        struct bio_vec *bv;
        int n;
 
-       __bio_for_each_segment(bv, b->bio, n, 0)
+       bio_for_each_segment_all(bv, b->bio, n)
                __free_page(bv->bv_page);
 
        __btree_node_write_done(cl);
@@ -371,7 +373,7 @@ static void btree_node_write_done(struct closure *cl)
 static void btree_node_write_endio(struct bio *bio, int error)
 {
        struct closure *cl = bio->bi_private;
-       struct btree *b = container_of(cl, struct btree, io.cl);
+       struct btree *b = container_of(cl, struct btree, io);
 
        if (error)
                set_btree_node_io_error(b);
@@ -382,8 +384,8 @@ static void btree_node_write_endio(struct bio *bio, int error)
 
 static void do_btree_node_write(struct btree *b)
 {
-       struct closure *cl = &b->io.cl;
-       struct bset *i = b->sets[b->nsets].data;
+       struct closure *cl = &b->io;
+       struct bset *i = btree_bset_last(b);
        BKEY_PADDED(key) k;
 
        i->version      = BCACHE_BSET_VERSION;
@@ -395,7 +397,7 @@ static void do_btree_node_write(struct btree *b)
        b->bio->bi_end_io       = btree_node_write_endio;
        b->bio->bi_private      = cl;
        b->bio->bi_rw           = REQ_META|WRITE_SYNC|REQ_FUA;
-       b->bio->bi_size         = set_blocks(i, b->c) * block_bytes(b->c);
+       b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
        bch_bio_map(b->bio, i);
 
        /*
@@ -414,14 +416,15 @@ static void do_btree_node_write(struct btree *b)
         */
 
        bkey_copy(&k.key, &b->key);
-       SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + bset_offset(b, i));
+       SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
+                      bset_sector_offset(&b->keys, i));
 
        if (!bio_alloc_pages(b->bio, GFP_NOIO)) {
                int j;
                struct bio_vec *bv;
                void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
 
-               bio_for_each_segment(bv, b->bio, j)
+               bio_for_each_segment_all(bv, b->bio, j)
                        memcpy(page_address(bv->bv_page),
                               base + j * PAGE_SIZE, PAGE_SIZE);
 
@@ -435,40 +438,54 @@ static void do_btree_node_write(struct btree *b)
                bch_submit_bbio(b->bio, b->c, &k.key, 0);
 
                closure_sync(cl);
-               __btree_node_write_done(cl);
+               continue_at_nobarrier(cl, __btree_node_write_done, NULL);
        }
 }
 
 void bch_btree_node_write(struct btree *b, struct closure *parent)
 {
-       struct bset *i = b->sets[b->nsets].data;
+       struct bset *i = btree_bset_last(b);
 
        trace_bcache_btree_write(b);
 
        BUG_ON(current->bio_list);
        BUG_ON(b->written >= btree_blocks(b));
        BUG_ON(b->written && !i->keys);
-       BUG_ON(b->sets->data->seq != i->seq);
-       bch_check_keys(b, "writing");
+       BUG_ON(btree_bset_first(b)->seq != i->seq);
+       bch_check_keys(&b->keys, "writing");
 
        cancel_delayed_work(&b->work);
 
        /* If caller isn't waiting for write, parent refcount is cache set */
-       closure_lock(&b->io, parent ?: &b->c->cl);
+       down(&b->io_mutex);
+       closure_init(&b->io, parent ?: &b->c->cl);
 
        clear_bit(BTREE_NODE_dirty,      &b->flags);
        change_bit(BTREE_NODE_write_idx, &b->flags);
 
        do_btree_node_write(b);
 
-       b->written += set_blocks(i, b->c);
-       atomic_long_add(set_blocks(i, b->c) * b->c->sb.block_size,
+       atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
                        &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
 
-       bch_btree_sort_lazy(b);
+       b->written += set_blocks(i, block_bytes(b->c));
+
+       /* If not a leaf node, always sort */
+       if (b->level && b->keys.nsets)
+               bch_btree_sort(&b->keys, &b->c->sort);
+       else
+               bch_btree_sort_lazy(&b->keys, &b->c->sort);
+
+       /*
+        * do verify if there was more than one set initially (i.e. we did a
+        * sort) and we sorted down to a single set:
+        */
+       if (i != b->keys.set->data && !b->keys.nsets)
+               bch_btree_verify(b);
 
        if (b->written < btree_blocks(b))
-               bch_bset_init_next(b);
+               bch_bset_init_next(&b->keys, write_block(b),
+                                  bset_magic(&b->c->sb));
 }
 
 static void bch_btree_node_write_sync(struct btree *b)
@@ -493,7 +510,7 @@ static void btree_node_write_work(struct work_struct *w)
 
 static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
 {
-       struct bset *i = b->sets[b->nsets].data;
+       struct bset *i = btree_bset_last(b);
        struct btree_write *w = btree_current_write(b);
 
        BUG_ON(!b->written);
@@ -528,24 +545,6 @@ static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
  * mca -> memory cache
  */
 
-static void mca_reinit(struct btree *b)
-{
-       unsigned i;
-
-       b->flags        = 0;
-       b->written      = 0;
-       b->nsets        = 0;
-
-       for (i = 0; i < MAX_BSETS; i++)
-               b->sets[i].size = 0;
-       /*
-        * Second loop starts at 1 because b->sets[0]->data is the memory we
-        * allocated
-        */
-       for (i = 1; i < MAX_BSETS; i++)
-               b->sets[i].data = NULL;
-}
-
 #define mca_reserve(c) (((c->root && c->root->level)           \
                          ? c->root->level : 1) * 8 + 16)
 #define mca_can_free(c)                                                \
@@ -553,28 +552,12 @@ static void mca_reinit(struct btree *b)
 
 static void mca_data_free(struct btree *b)
 {
-       struct bset_tree *t = b->sets;
-       BUG_ON(!closure_is_unlocked(&b->io.cl));
+       BUG_ON(b->io_mutex.count != 1);
 
-       if (bset_prev_bytes(b) < PAGE_SIZE)
-               kfree(t->prev);
-       else
-               free_pages((unsigned long) t->prev,
-                          get_order(bset_prev_bytes(b)));
+       bch_btree_keys_free(&b->keys);
 
-       if (bset_tree_bytes(b) < PAGE_SIZE)
-               kfree(t->tree);
-       else
-               free_pages((unsigned long) t->tree,
-                          get_order(bset_tree_bytes(b)));
-
-       free_pages((unsigned long) t->data, b->page_order);
-
-       t->prev = NULL;
-       t->tree = NULL;
-       t->data = NULL;
-       list_move(&b->list, &b->c->btree_cache_freed);
        b->c->bucket_cache_used--;
+       list_move(&b->list, &b->c->btree_cache_freed);
 }
 
 static void mca_bucket_free(struct btree *b)
@@ -593,34 +576,16 @@ static unsigned btree_order(struct bkey *k)
 
 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
 {
-       struct bset_tree *t = b->sets;
-       BUG_ON(t->data);
-
-       b->page_order = max_t(unsigned,
-                             ilog2(b->c->btree_pages),
-                             btree_order(k));
-
-       t->data = (void *) __get_free_pages(gfp, b->page_order);
-       if (!t->data)
-               goto err;
-
-       t->tree = bset_tree_bytes(b) < PAGE_SIZE
-               ? kmalloc(bset_tree_bytes(b), gfp)
-               : (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b)));
-       if (!t->tree)
-               goto err;
-
-       t->prev = bset_prev_bytes(b) < PAGE_SIZE
-               ? kmalloc(bset_prev_bytes(b), gfp)
-               : (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b)));
-       if (!t->prev)
-               goto err;
-
-       list_move(&b->list, &b->c->btree_cache);
-       b->c->bucket_cache_used++;
-       return;
-err:
-       mca_data_free(b);
+       if (!bch_btree_keys_alloc(&b->keys,
+                                 max_t(unsigned,
+                                       ilog2(b->c->btree_pages),
+                                       btree_order(k)),
+                                 gfp)) {
+               b->c->bucket_cache_used++;
+               list_move(&b->list, &b->c->btree_cache);
+       } else {
+               list_move(&b->list, &b->c->btree_cache_freed);
+       }
 }
 
 static struct btree *mca_bucket_alloc(struct cache_set *c,
@@ -635,7 +600,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
        INIT_LIST_HEAD(&b->list);
        INIT_DELAYED_WORK(&b->work, btree_node_write_work);
        b->c = c;
-       closure_init_unlocked(&b->io);
+       sema_init(&b->io_mutex, 1);
 
        mca_data_alloc(b, k, gfp);
        return b;
@@ -651,24 +616,31 @@ static int mca_reap(struct btree *b, unsigned min_order, bool flush)
        if (!down_write_trylock(&b->lock))
                return -ENOMEM;
 
-       BUG_ON(btree_node_dirty(b) && !b->sets[0].data);
+       BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
 
-       if (b->page_order < min_order ||
-           (!flush &&
-            (btree_node_dirty(b) ||
-             atomic_read(&b->io.cl.remaining) != -1))) {
-               rw_unlock(true, b);
-               return -ENOMEM;
+       if (b->keys.page_order < min_order)
+               goto out_unlock;
+
+       if (!flush) {
+               if (btree_node_dirty(b))
+                       goto out_unlock;
+
+               if (down_trylock(&b->io_mutex))
+                       goto out_unlock;
+               up(&b->io_mutex);
        }
 
        if (btree_node_dirty(b))
                bch_btree_node_write_sync(b);
 
        /* wait for any in flight btree write */
-       closure_wait_event(&b->io.wait, &cl,
-                          atomic_read(&b->io.cl.remaining) == -1);
+       down(&b->io_mutex);
+       up(&b->io_mutex);
 
        return 0;
+out_unlock:
+       rw_unlock(true, b);
+       return -ENOMEM;
 }
 
 static unsigned long bch_mca_scan(struct shrinker *shrink,
@@ -714,14 +686,10 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
                }
        }
 
-       /*
-        * Can happen right when we first start up, before we've read in any
-        * btree nodes
-        */
-       if (list_empty(&c->btree_cache))
-               goto out;
-
        for (i = 0; (nr--) && i < c->bucket_cache_used; i++) {
+               if (list_empty(&c->btree_cache))
+                       goto out;
+
                b = list_first_entry(&c->btree_cache, struct btree, list);
                list_rotate_left(&c->btree_cache);
 
@@ -767,6 +735,8 @@ void bch_btree_cache_free(struct cache_set *c)
 #ifdef CONFIG_BCACHE_DEBUG
        if (c->verify_data)
                list_move(&c->verify_data->list, &c->btree_cache);
+
+       free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c)));
 #endif
 
        list_splice(&c->btree_cache_freeable,
@@ -807,10 +777,13 @@ int bch_btree_cache_alloc(struct cache_set *c)
 #ifdef CONFIG_BCACHE_DEBUG
        mutex_init(&c->verify_lock);
 
+       c->verify_ondisk = (void *)
+               __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
+
        c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
 
        if (c->verify_data &&
-           c->verify_data->sets[0].data)
+           c->verify_data->keys.set->data)
                list_del_init(&c->verify_data->list);
        else
                c->verify_data = NULL;
@@ -908,7 +881,7 @@ static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, int level)
        list_for_each_entry(b, &c->btree_cache_freed, list)
                if (!mca_reap(b, 0, false)) {
                        mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
-                       if (!b->sets[0].data)
+                       if (!b->keys.set[0].data)
                                goto err;
                        else
                                goto out;
@@ -919,10 +892,10 @@ static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, int level)
                goto err;
 
        BUG_ON(!down_write_trylock(&b->lock));
-       if (!b->sets->data)
+       if (!b->keys.set->data)
                goto err;
 out:
-       BUG_ON(!closure_is_unlocked(&b->io.cl));
+       BUG_ON(b->io_mutex.count != 1);
 
        bkey_copy(&b->key, k);
        list_move(&b->list, &c->btree_cache);
@@ -930,10 +903,17 @@ out:
        hlist_add_head_rcu(&b->hash, mca_hash(c, k));
 
        lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
-       b->level        = level;
        b->parent       = (void *) ~0UL;
+       b->flags        = 0;
+       b->written      = 0;
+       b->level        = level;
 
-       mca_reinit(b);
+       if (!b->level)
+               bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
+                                   &b->c->expensive_debug_checks);
+       else
+               bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
+                                   &b->c->expensive_debug_checks);
 
        return b;
 err:
@@ -994,13 +974,13 @@ retry:
 
        b->accessed = 1;
 
-       for (; i <= b->nsets && b->sets[i].size; i++) {
-               prefetch(b->sets[i].tree);
-               prefetch(b->sets[i].data);
+       for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
+               prefetch(b->keys.set[i].tree);
+               prefetch(b->keys.set[i].data);
        }
 
-       for (; i <= b->nsets; i++)
-               prefetch(b->sets[i].data);
+       for (; i <= b->keys.nsets; i++)
+               prefetch(b->keys.set[i].data);
 
        if (btree_node_io_error(b)) {
                rw_unlock(write, b);
@@ -1063,7 +1043,7 @@ struct btree *bch_btree_node_alloc(struct cache_set *c, int level, bool wait)
 
        mutex_lock(&c->bucket_lock);
 retry:
-       if (__bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, wait))
+       if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
                goto err;
 
        bkey_put(c, &k.key);
@@ -1080,7 +1060,7 @@ retry:
        }
 
        b->accessed = 1;
-       bch_bset_init_next(b);
+       bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
 
        mutex_unlock(&c->bucket_lock);
 
@@ -1098,8 +1078,10 @@ err:
 static struct btree *btree_node_alloc_replacement(struct btree *b, bool wait)
 {
        struct btree *n = bch_btree_node_alloc(b->c, b->level, wait);
-       if (!IS_ERR_OR_NULL(n))
-               bch_btree_sort_into(b, n);
+       if (!IS_ERR_OR_NULL(n)) {
+               bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
+               bkey_copy_key(&n->key, &b->key);
+       }
 
        return n;
 }
@@ -1120,6 +1102,28 @@ static void make_btree_freeing_key(struct btree *b, struct bkey *k)
        atomic_inc(&b->c->prio_blocked);
 }
 
+static int btree_check_reserve(struct btree *b, struct btree_op *op)
+{
+       struct cache_set *c = b->c;
+       struct cache *ca;
+       unsigned i, reserve = c->root->level * 2 + 1;
+       int ret = 0;
+
+       mutex_lock(&c->bucket_lock);
+
+       for_each_cache(ca, c, i)
+               if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
+                       if (op)
+                               prepare_to_wait(&c->bucket_wait, &op->wait,
+                                               TASK_UNINTERRUPTIBLE);
+                       ret = -EINTR;
+                       break;
+               }
+
+       mutex_unlock(&c->bucket_lock);
+       return ret;
+}
+
 /* Garbage collection */
 
 uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
@@ -1183,11 +1187,11 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
 
        gc->nodes++;
 
-       for_each_key_filter(b, k, &iter, bch_ptr_invalid) {
+       for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
                stale = max(stale, btree_mark_key(b, k));
                keys++;
 
-               if (bch_ptr_bad(b, k))
+               if (bch_ptr_bad(&b->keys, k))
                        continue;
 
                gc->key_bytes += bkey_u64s(k);
@@ -1197,9 +1201,9 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
                gc->data += KEY_SIZE(k);
        }
 
-       for (t = b->sets; t <= &b->sets[b->nsets]; t++)
+       for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
                btree_bug_on(t->size &&
-                            bset_written(b, t) &&
+                            bset_written(&b->keys, t) &&
                             bkey_cmp(&b->key, &t->end) < 0,
                             b, "found short btree key in gc");
 
@@ -1243,7 +1247,8 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
        blocks = btree_default_blocks(b->c) * 2 / 3;
 
        if (nodes < 2 ||
-           __set_blocks(b->sets[0].data, keys, b->c) > blocks * (nodes - 1))
+           __set_blocks(b->keys.set[0].data, keys,
+                        block_bytes(b->c)) > blocks * (nodes - 1))
                return 0;
 
        for (i = 0; i < nodes; i++) {
@@ -1253,18 +1258,19 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
        }
 
        for (i = nodes - 1; i > 0; --i) {
-               struct bset *n1 = new_nodes[i]->sets->data;
-               struct bset *n2 = new_nodes[i - 1]->sets->data;
+               struct bset *n1 = btree_bset_first(new_nodes[i]);
+               struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
                struct bkey *k, *last = NULL;
 
                keys = 0;
 
                if (i > 1) {
                        for (k = n2->start;
-                            k < end(n2);
+                            k < bset_bkey_last(n2);
                             k = bkey_next(k)) {
                                if (__set_blocks(n1, n1->keys + keys +
-                                                bkey_u64s(k), b->c) > blocks)
+                                                bkey_u64s(k),
+                                                block_bytes(b->c)) > blocks)
                                        break;
 
                                last = k;
@@ -1280,7 +1286,8 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
                         * though)
                         */
                        if (__set_blocks(n1, n1->keys + n2->keys,
-                                        b->c) > btree_blocks(new_nodes[i]))
+                                        block_bytes(b->c)) >
+                           btree_blocks(new_nodes[i]))
                                goto out_nocoalesce;
 
                        keys = n2->keys;
@@ -1288,27 +1295,28 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
                        last = &r->b->key;
                }
 
-               BUG_ON(__set_blocks(n1, n1->keys + keys,
-                                   b->c) > btree_blocks(new_nodes[i]));
+               BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
+                      btree_blocks(new_nodes[i]));
 
                if (last)
                        bkey_copy_key(&new_nodes[i]->key, last);
 
-               memcpy(end(n1),
+               memcpy(bset_bkey_last(n1),
                       n2->start,
-                      (void *) node(n2, keys) - (void *) n2->start);
+                      (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
 
                n1->keys += keys;
                r[i].keys = n1->keys;
 
                memmove(n2->start,
-                       node(n2, keys),
-                       (void *) end(n2) - (void *) node(n2, keys));
+                       bset_bkey_idx(n2, keys),
+                       (void *) bset_bkey_last(n2) -
+                       (void *) bset_bkey_idx(n2, keys));
 
                n2->keys -= keys;
 
-               if (bch_keylist_realloc(keylist,
-                                       KEY_PTRS(&new_nodes[i]->key), b->c))
+               if (__bch_keylist_realloc(keylist,
+                                         bkey_u64s(&new_nodes[i]->key)))
                        goto out_nocoalesce;
 
                bch_btree_node_write(new_nodes[i], &cl);
@@ -1316,7 +1324,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
        }
 
        for (i = 0; i < nodes; i++) {
-               if (bch_keylist_realloc(keylist, KEY_PTRS(&r[i].b->key), b->c))
+               if (__bch_keylist_realloc(keylist, bkey_u64s(&r[i].b->key)))
                        goto out_nocoalesce;
 
                make_btree_freeing_key(r[i].b, keylist->top);
@@ -1324,7 +1332,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
        }
 
        /* We emptied out this node */
-       BUG_ON(new_nodes[0]->sets->data->keys);
+       BUG_ON(btree_bset_first(new_nodes[0])->keys);
        btree_node_free(new_nodes[0]);
        rw_unlock(true, new_nodes[0]);
 
@@ -1370,7 +1378,7 @@ static unsigned btree_gc_count_keys(struct btree *b)
        struct btree_iter iter;
        unsigned ret = 0;
 
-       for_each_key_filter(b, k, &iter, bch_ptr_bad)
+       for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
                ret += bkey_u64s(k);
 
        return ret;
@@ -1390,13 +1398,13 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
        struct gc_merge_info *last = r + GC_MERGE_NODES - 1;
 
        bch_keylist_init(&keys);
-       bch_btree_iter_init(b, &iter, &b->c->gc_done);
+       bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
 
        for (i = 0; i < GC_MERGE_NODES; i++)
                r[i].b = ERR_PTR(-EINTR);
 
        while (1) {
-               k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
+               k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
                if (k) {
                        r->b = bch_btree_node_get(b->c, k, b->level - 1, true);
                        if (IS_ERR(r->b)) {
@@ -1416,7 +1424,8 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
 
                if (!IS_ERR(last->b)) {
                        should_rewrite = btree_gc_mark_node(last->b, gc);
-                       if (should_rewrite) {
+                       if (should_rewrite &&
+                           !btree_check_reserve(b, NULL)) {
                                n = btree_node_alloc_replacement(last->b,
                                                                 false);
 
@@ -1705,7 +1714,7 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op,
        struct bucket *g;
        struct btree_iter iter;
 
-       for_each_key_filter(b, k, &iter, bch_ptr_invalid) {
+       for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
                for (i = 0; i < KEY_PTRS(k); i++) {
                        if (!ptr_available(b->c, k, i))
                                continue;
@@ -1728,10 +1737,11 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op,
        }
 
        if (b->level) {
-               bch_btree_iter_init(b, &iter, NULL);
+               bch_btree_iter_init(&b->keys, &iter, NULL);
 
                do {
-                       k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
+                       k = bch_btree_iter_next_filter(&iter, &b->keys,
+                                                      bch_ptr_bad);
                        if (k)
                                btree_node_prefetch(b->c, k, b->level - 1);
 
@@ -1774,235 +1784,36 @@ err:
 
 /* Btree insertion */
 
-static void shift_keys(struct btree *b, struct bkey *where, struct bkey *insert)
-{
-       struct bset *i = b->sets[b->nsets].data;
-
-       memmove((uint64_t *) where + bkey_u64s(insert),
-               where,
-               (void *) end(i) - (void *) where);
-
-       i->keys += bkey_u64s(insert);
-       bkey_copy(where, insert);
-       bch_bset_fix_lookup_table(b, where);
-}
-
-static bool fix_overlapping_extents(struct btree *b, struct bkey *insert,
-                                   struct btree_iter *iter,
-                                   struct bkey *replace_key)
+static bool btree_insert_key(struct btree *b, struct bkey *k,
+                            struct bkey *replace_key)
 {
-       void subtract_dirty(struct bkey *k, uint64_t offset, int sectors)
-       {
-               if (KEY_DIRTY(k))
-                       bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
-                                                    offset, -sectors);
-       }
-
-       uint64_t old_offset;
-       unsigned old_size, sectors_found = 0;
-
-       while (1) {
-               struct bkey *k = bch_btree_iter_next(iter);
-               if (!k ||
-                   bkey_cmp(&START_KEY(k), insert) >= 0)
-                       break;
-
-               if (bkey_cmp(k, &START_KEY(insert)) <= 0)
-                       continue;
-
-               old_offset = KEY_START(k);
-               old_size = KEY_SIZE(k);
-
-               /*
-                * We might overlap with 0 size extents; we can't skip these
-                * because if they're in the set we're inserting to we have to
-                * adjust them so they don't overlap with the key we're
-                * inserting. But we don't want to check them for replace
-                * operations.
-                */
-
-               if (replace_key && KEY_SIZE(k)) {
-                       /*
-                        * k might have been split since we inserted/found the
-                        * key we're replacing
-                        */
-                       unsigned i;
-                       uint64_t offset = KEY_START(k) -
-                               KEY_START(replace_key);
-
-                       /* But it must be a subset of the replace key */
-                       if (KEY_START(k) < KEY_START(replace_key) ||
-                           KEY_OFFSET(k) > KEY_OFFSET(replace_key))
-                               goto check_failed;
-
-                       /* We didn't find a key that we were supposed to */
-                       if (KEY_START(k) > KEY_START(insert) + sectors_found)
-                               goto check_failed;
-
-                       if (KEY_PTRS(k) != KEY_PTRS(replace_key) ||
-                           KEY_DIRTY(k) != KEY_DIRTY(replace_key))
-                               goto check_failed;
-
-                       /* skip past gen */
-                       offset <<= 8;
-
-                       BUG_ON(!KEY_PTRS(replace_key));
+       unsigned status;
 
-                       for (i = 0; i < KEY_PTRS(replace_key); i++)
-                               if (k->ptr[i] != replace_key->ptr[i] + offset)
-                                       goto check_failed;
-
-                       sectors_found = KEY_OFFSET(k) - KEY_START(insert);
-               }
-
-               if (bkey_cmp(insert, k) < 0 &&
-                   bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) {
-                       /*
-                        * We overlapped in the middle of an existing key: that
-                        * means we have to split the old key. But we have to do
-                        * slightly different things depending on whether the
-                        * old key has been written out yet.
-                        */
-
-                       struct bkey *top;
-
-                       subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert));
-
-                       if (bkey_written(b, k)) {
-                               /*
-                                * We insert a new key to cover the top of the
-                                * old key, and the old key is modified in place
-                                * to represent the bottom split.
-                                *
-                                * It's completely arbitrary whether the new key
-                                * is the top or the bottom, but it has to match
-                                * up with what btree_sort_fixup() does - it
-                                * doesn't check for this kind of overlap, it
-                                * depends on us inserting a new key for the top
-                                * here.
-                                */
-                               top = bch_bset_search(b, &b->sets[b->nsets],
-                                                     insert);
-                               shift_keys(b, top, k);
-                       } else {
-                               BKEY_PADDED(key) temp;
-                               bkey_copy(&temp.key, k);
-                               shift_keys(b, k, &temp.key);
-                               top = bkey_next(k);
-                       }
-
-                       bch_cut_front(insert, top);
-                       bch_cut_back(&START_KEY(insert), k);
-                       bch_bset_fix_invalidated_key(b, k);
-                       return false;
-               }
-
-               if (bkey_cmp(insert, k) < 0) {
-                       bch_cut_front(insert, k);
-               } else {
-                       if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0)
-                               old_offset = KEY_START(insert);
-
-                       if (bkey_written(b, k) &&
-                           bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
-                               /*
-                                * Completely overwrote, so we don't have to
-                                * invalidate the binary search tree
-                                */
-                               bch_cut_front(k, k);
-                       } else {
-                               __bch_cut_back(&START_KEY(insert), k);
-                               bch_bset_fix_invalidated_key(b, k);
-                       }
-               }
-
-               subtract_dirty(k, old_offset, old_size - KEY_SIZE(k));
-       }
+       BUG_ON(bkey_cmp(k, &b->key) > 0);
 
-check_failed:
-       if (replace_key) {
-               if (!sectors_found) {
-                       return true;
-               } else if (sectors_found < KEY_SIZE(insert)) {
-                       SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
-                                      (KEY_SIZE(insert) - sectors_found));
-                       SET_KEY_SIZE(insert, sectors_found);
-               }
-       }
+       status = bch_btree_insert_key(&b->keys, k, replace_key);
+       if (status != BTREE_INSERT_STATUS_NO_INSERT) {
+               bch_check_keys(&b->keys, "%u for %s", status,
+                              replace_key ? "replace" : "insert");
 
-       return false;
+               trace_bcache_btree_insert_key(b, k, replace_key != NULL,
+                                             status);
+               return true;
+       } else
+               return false;
 }
 
-static bool btree_insert_key(struct btree *b, struct btree_op *op,
-                            struct bkey *k, struct bkey *replace_key)
+static size_t insert_u64s_remaining(struct btree *b)
 {
-       struct bset *i = b->sets[b->nsets].data;
-       struct bkey *m, *prev;
-       unsigned status = BTREE_INSERT_STATUS_INSERT;
-
-       BUG_ON(bkey_cmp(k, &b->key) > 0);
-       BUG_ON(b->level && !KEY_PTRS(k));
-       BUG_ON(!b->level && !KEY_OFFSET(k));
-
-       if (!b->level) {
-               struct btree_iter iter;
-
-               /*
-                * bset_search() returns the first key that is strictly greater
-                * than the search key - but for back merging, we want to find
-                * the previous key.
-                */
-               prev = NULL;
-               m = bch_btree_iter_init(b, &iter, PRECEDING_KEY(&START_KEY(k)));
+       ssize_t ret = bch_btree_keys_u64s_remaining(&b->keys);
 
-               if (fix_overlapping_extents(b, k, &iter, replace_key)) {
-                       op->insert_collision = true;
-                       return false;
-               }
-
-               if (KEY_DIRTY(k))
-                       bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
-                                                    KEY_START(k), KEY_SIZE(k));
-
-               while (m != end(i) &&
-                      bkey_cmp(k, &START_KEY(m)) > 0)
-                       prev = m, m = bkey_next(m);
-
-               if (key_merging_disabled(b->c))
-                       goto insert;
-
-               /* prev is in the tree, if we merge we're done */
-               status = BTREE_INSERT_STATUS_BACK_MERGE;
-               if (prev &&
-                   bch_bkey_try_merge(b, prev, k))
-                       goto merged;
-
-               status = BTREE_INSERT_STATUS_OVERWROTE;
-               if (m != end(i) &&
-                   KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
-                       goto copy;
-
-               status = BTREE_INSERT_STATUS_FRONT_MERGE;
-               if (m != end(i) &&
-                   bch_bkey_try_merge(b, k, m))
-                       goto copy;
-       } else {
-               BUG_ON(replace_key);
-               m = bch_bset_search(b, &b->sets[b->nsets], k);
-       }
-
-insert:        shift_keys(b, m, k);
-copy:  bkey_copy(m, k);
-merged:
-       bch_check_keys(b, "%u for %s", status,
-                      replace_key ? "replace" : "insert");
-
-       if (b->level && !KEY_OFFSET(k))
-               btree_current_write(b)->prio_blocked++;
-
-       trace_bcache_btree_insert_key(b, k, replace_key != NULL, status);
+       /*
+        * Might land in the middle of an existing extent and have to split it
+        */
+       if (b->keys.ops->is_extents)
+               ret -= KEY_MAX_U64S;
 
-       return true;
+       return max(ret, 0L);
 }
 
 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
@@ -2010,21 +1821,19 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
                                  struct bkey *replace_key)
 {
        bool ret = false;
-       int oldsize = bch_count_data(b);
+       int oldsize = bch_count_data(&b->keys);
 
        while (!bch_keylist_empty(insert_keys)) {
-               struct bset *i = write_block(b);
                struct bkey *k = insert_keys->keys;
 
-               if (b->written + __set_blocks(i, i->keys + bkey_u64s(k), b->c)
-                   > btree_blocks(b))
+               if (bkey_u64s(k) > insert_u64s_remaining(b))
                        break;
 
                if (bkey_cmp(k, &b->key) <= 0) {
                        if (!b->level)
                                bkey_put(b->c, k);
 
-                       ret |= btree_insert_key(b, op, k, replace_key);
+                       ret |= btree_insert_key(b, k, replace_key);
                        bch_keylist_pop_front(insert_keys);
                } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
                        BKEY_PADDED(key) temp;
@@ -2033,16 +1842,19 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
                        bch_cut_back(&b->key, &temp.key);
                        bch_cut_front(&b->key, insert_keys->keys);
 
-                       ret |= btree_insert_key(b, op, &temp.key, replace_key);
+                       ret |= btree_insert_key(b, &temp.key, replace_key);
                        break;
                } else {
                        break;
                }
        }
 
+       if (!ret)
+               op->insert_collision = true;
+
        BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
 
-       BUG_ON(bch_count_data(b) < oldsize);
+       BUG_ON(bch_count_data(&b->keys) < oldsize);
        return ret;
 }
 
@@ -2059,16 +1871,21 @@ static int btree_split(struct btree *b, struct btree_op *op,
        closure_init_stack(&cl);
        bch_keylist_init(&parent_keys);
 
+       if (!b->level &&
+           btree_check_reserve(b, op))
+               return -EINTR;
+
        n1 = btree_node_alloc_replacement(b, true);
        if (IS_ERR(n1))
                goto err;
 
-       split = set_blocks(n1->sets[0].data, n1->c) > (btree_blocks(b) * 4) / 5;
+       split = set_blocks(btree_bset_first(n1),
+                          block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
 
        if (split) {
                unsigned keys = 0;
 
-               trace_bcache_btree_node_split(b, n1->sets[0].data->keys);
+               trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
 
                n2 = bch_btree_node_alloc(b->c, b->level, true);
                if (IS_ERR(n2))
@@ -2087,18 +1904,20 @@ static int btree_split(struct btree *b, struct btree_op *op,
                 * search tree yet
                 */
 
-               while (keys < (n1->sets[0].data->keys * 3) / 5)
-                       keys += bkey_u64s(node(n1->sets[0].data, keys));
+               while (keys < (btree_bset_first(n1)->keys * 3) / 5)
+                       keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
+                                                       keys));
 
-               bkey_copy_key(&n1->key, node(n1->sets[0].data, keys));
-               keys += bkey_u64s(node(n1->sets[0].data, keys));
+               bkey_copy_key(&n1->key,
+                             bset_bkey_idx(btree_bset_first(n1), keys));
+               keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
 
-               n2->sets[0].data->keys = n1->sets[0].data->keys - keys;
-               n1->sets[0].data->keys = keys;
+               btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
+               btree_bset_first(n1)->keys = keys;
 
-               memcpy(n2->sets[0].data->start,
-                      end(n1->sets[0].data),
-                      n2->sets[0].data->keys * sizeof(uint64_t));
+               memcpy(btree_bset_first(n2)->start,
+                      bset_bkey_last(btree_bset_first(n1)),
+                      btree_bset_first(n2)->keys * sizeof(uint64_t));
 
                bkey_copy_key(&n2->key, &b->key);
 
@@ -2106,7 +1925,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
                bch_btree_node_write(n2, &cl);
                rw_unlock(true, n2);
        } else {
-               trace_bcache_btree_node_compact(b, n1->sets[0].data->keys);
+               trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
 
                bch_btree_insert_keys(n1, op, insert_keys, replace_key);
        }
@@ -2149,18 +1968,21 @@ static int btree_split(struct btree *b, struct btree_op *op,
 
        return 0;
 err_free2:
+       bkey_put(b->c, &n2->key);
        btree_node_free(n2);
        rw_unlock(true, n2);
 err_free1:
+       bkey_put(b->c, &n1->key);
        btree_node_free(n1);
        rw_unlock(true, n1);
 err:
+       WARN(1, "bcache: btree split failed");
+
        if (n3 == ERR_PTR(-EAGAIN) ||
            n2 == ERR_PTR(-EAGAIN) ||
            n1 == ERR_PTR(-EAGAIN))
                return -EAGAIN;
 
-       pr_warn("couldn't split");
        return -ENOMEM;
 }
 
@@ -2171,7 +1993,7 @@ static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
 {
        BUG_ON(b->level && replace_key);
 
-       if (should_split(b)) {
+       if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
                if (current->bio_list) {
                        op->lock = b->c->root->level + 1;
                        return -EAGAIN;
@@ -2180,11 +2002,13 @@ static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
                        return -EINTR;
                } else {
                        /* Invalidated all iterators */
-                       return btree_split(b, op, insert_keys, replace_key) ?:
-                               -EINTR;
+                       int ret = btree_split(b, op, insert_keys, replace_key);
+
+                       return bch_keylist_empty(insert_keys) ?
+                               0 : ret ?: -EINTR;
                }
        } else {
-               BUG_ON(write_block(b) != b->sets[b->nsets].data);
+               BUG_ON(write_block(b) != btree_bset_last(b));
 
                if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
                        if (!b->level)
@@ -2323,9 +2147,9 @@ static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
                struct bkey *k;
                struct btree_iter iter;
 
-               bch_btree_iter_init(b, &iter, from);
+               bch_btree_iter_init(&b->keys, &iter, from);
 
-               while ((k = bch_btree_iter_next_filter(&iter, b,
+               while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
                                                       bch_ptr_bad))) {
                        ret = btree(map_nodes_recurse, k, b,
                                    op, from, fn, flags);
@@ -2356,9 +2180,9 @@ static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
        struct bkey *k;
        struct btree_iter iter;
 
-       bch_btree_iter_init(b, &iter, from);
+       bch_btree_iter_init(&b->keys, &iter, from);
 
-       while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad))) {
+       while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
                ret = !b->level
                        ? fn(op, b, k)
                        : btree(map_keys_recurse, k, b, op, from, fn, flags);
index 767e755..af065e9 100644 (file)
@@ -130,20 +130,12 @@ struct btree {
        unsigned long           flags;
        uint16_t                written;        /* would be nice to kill */
        uint8_t                 level;
-       uint8_t                 nsets;
-       uint8_t                 page_order;
-
-       /*
-        * Set of sorted keys - the real btree node - plus a binary search tree
-        *
-        * sets[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
-        * to the memory we have allocated for this btree node. Additionally,
-        * set[0]->data points to the entire btree node as it exists on disk.
-        */
-       struct bset_tree        sets[MAX_BSETS];
+
+       struct btree_keys       keys;
 
        /* For outstanding btree writes, used as a lock - protects write_idx */
-       struct closure_with_waitlist    io;
+       struct closure          io;
+       struct semaphore        io_mutex;
 
        struct list_head        list;
        struct delayed_work     work;
@@ -179,24 +171,19 @@ static inline struct btree_write *btree_prev_write(struct btree *b)
        return b->writes + (btree_node_write_idx(b) ^ 1);
 }
 
-static inline unsigned bset_offset(struct btree *b, struct bset *i)
+static inline struct bset *btree_bset_first(struct btree *b)
 {
-       return (((size_t) i) - ((size_t) b->sets->data)) >> 9;
+       return b->keys.set->data;
 }
 
-static inline struct bset *write_block(struct btree *b)
+static inline struct bset *btree_bset_last(struct btree *b)
 {
-       return ((void *) b->sets[0].data) + b->written * block_bytes(b->c);
+       return bset_tree_last(&b->keys)->data;
 }
 
-static inline bool bset_written(struct btree *b, struct bset_tree *t)
+static inline unsigned bset_block_offset(struct btree *b, struct bset *i)
 {
-       return t->data < write_block(b);
-}
-
-static inline bool bkey_written(struct btree *b, struct bkey *k)
-{
-       return k < write_block(b)->start;
+       return bset_sector_offset(&b->keys, i) >> b->c->block_bits;
 }
 
 static inline void set_gc_sectors(struct cache_set *c)
@@ -204,21 +191,6 @@ static inline void set_gc_sectors(struct cache_set *c)
        atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16);
 }
 
-static inline struct bkey *bch_btree_iter_init(struct btree *b,
-                                              struct btree_iter *iter,
-                                              struct bkey *search)
-{
-       return __bch_btree_iter_init(b, iter, search, b->sets);
-}
-
-static inline bool bch_ptr_invalid(struct btree *b, const struct bkey *k)
-{
-       if (b->level)
-               return bch_btree_ptr_invalid(b->c, k);
-       else
-               return bch_extent_ptr_invalid(b->c, k);
-}
-
 void bkey_put(struct cache_set *c, struct bkey *k);
 
 /* Looping macros */
@@ -229,17 +201,12 @@ void bkey_put(struct cache_set *c, struct bkey *k);
             iter++)                                                    \
                hlist_for_each_entry_rcu((b), (c)->bucket_hash + iter, hash)
 
-#define for_each_key_filter(b, k, iter, filter)                                \
-       for (bch_btree_iter_init((b), (iter), NULL);                    \
-            ((k) = bch_btree_iter_next_filter((iter), b, filter));)
-
-#define for_each_key(b, k, iter)                                       \
-       for (bch_btree_iter_init((b), (iter), NULL);                    \
-            ((k) = bch_btree_iter_next(iter));)
-
 /* Recursing down the btree */
 
 struct btree_op {
+       /* for waiting on btree reserve in btree_split() */
+       wait_queue_t            wait;
+
        /* Btree level at which we start taking write locks */
        short                   lock;
 
@@ -249,6 +216,7 @@ struct btree_op {
 static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
 {
        memset(op, 0, sizeof(struct btree_op));
+       init_wait(&op->wait);
        op->lock = write_lock_level;
 }
 
@@ -267,7 +235,7 @@ static inline void rw_unlock(bool w, struct btree *b)
        (w ? up_write : up_read)(&b->lock);
 }
 
-void bch_btree_node_read(struct btree *);
+void bch_btree_node_read_done(struct btree *);
 void bch_btree_node_write(struct btree *, struct closure *);
 
 void bch_btree_set_root(struct btree *);
index dfff241..7a228de 100644 (file)
 
 #include "closure.h"
 
-#define CL_FIELD(type, field)                                  \
-       case TYPE_ ## type:                                     \
-       return &container_of(cl, struct type, cl)->field
-
-static struct closure_waitlist *closure_waitlist(struct closure *cl)
-{
-       switch (cl->type) {
-               CL_FIELD(closure_with_waitlist, wait);
-       default:
-               return NULL;
-       }
-}
-
 static inline void closure_put_after_sub(struct closure *cl, int flags)
 {
        int r = flags & CLOSURE_REMAINING_MASK;
@@ -42,17 +29,10 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
                        closure_queue(cl);
                } else {
                        struct closure *parent = cl->parent;
-                       struct closure_waitlist *wait = closure_waitlist(cl);
                        closure_fn *destructor = cl->fn;
 
                        closure_debug_destroy(cl);
 
-                       smp_mb();
-                       atomic_set(&cl->remaining, -1);
-
-                       if (wait)
-                               closure_wake_up(wait);
-
                        if (destructor)
                                destructor(cl);
 
@@ -69,19 +49,18 @@ void closure_sub(struct closure *cl, int v)
 }
 EXPORT_SYMBOL(closure_sub);
 
+/**
+ * closure_put - decrement a closure's refcount
+ */
 void closure_put(struct closure *cl)
 {
        closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
 }
 EXPORT_SYMBOL(closure_put);
 
-static void set_waiting(struct closure *cl, unsigned long f)
-{
-#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
-       cl->waiting_on = f;
-#endif
-}
-
+/**
+ * closure_wake_up - wake up all closures on a wait list, without memory barrier
+ */
 void __closure_wake_up(struct closure_waitlist *wait_list)
 {
        struct llist_node *list;
@@ -106,27 +85,34 @@ void __closure_wake_up(struct closure_waitlist *wait_list)
                cl = container_of(reverse, struct closure, list);
                reverse = llist_next(reverse);
 
-               set_waiting(cl, 0);
+               closure_set_waiting(cl, 0);
                closure_sub(cl, CLOSURE_WAITING + 1);
        }
 }
 EXPORT_SYMBOL(__closure_wake_up);
 
-bool closure_wait(struct closure_waitlist *list, struct closure *cl)
+/**
+ * closure_wait - add a closure to a waitlist
+ *
+ * @waitlist will own a ref on @cl, which will be released when
+ * closure_wake_up() is called on @waitlist.
+ *
+ */
+bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
 {
        if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
                return false;
 
-       set_waiting(cl, _RET_IP_);
+       closure_set_waiting(cl, _RET_IP_);
        atomic_add(CLOSURE_WAITING + 1, &cl->remaining);
-       llist_add(&cl->list, &list->list);
+       llist_add(&cl->list, &waitlist->list);
 
        return true;
 }
 EXPORT_SYMBOL(closure_wait);
 
 /**
- * closure_sync() - sleep until a closure a closure has nothing left to wait on
+ * closure_sync - sleep until a closure a closure has nothing left to wait on
  *
  * Sleeps until the refcount hits 1 - the thread that's running the closure owns
  * the last refcount.
@@ -148,46 +134,6 @@ void closure_sync(struct closure *cl)
 }
 EXPORT_SYMBOL(closure_sync);
 
-/**
- * closure_trylock() - try to acquire the closure, without waiting
- * @cl:                closure to lock
- *
- * Returns true if the closure was succesfully locked.
- */
-bool closure_trylock(struct closure *cl, struct closure *parent)
-{
-       if (atomic_cmpxchg(&cl->remaining, -1,
-                          CLOSURE_REMAINING_INITIALIZER) != -1)
-               return false;
-
-       smp_mb();
-
-       cl->parent = parent;
-       if (parent)
-               closure_get(parent);
-
-       closure_set_ret_ip(cl);
-       closure_debug_create(cl);
-       return true;
-}
-EXPORT_SYMBOL(closure_trylock);
-
-void __closure_lock(struct closure *cl, struct closure *parent,
-                   struct closure_waitlist *wait_list)
-{
-       struct closure wait;
-       closure_init_stack(&wait);
-
-       while (1) {
-               if (closure_trylock(cl, parent))
-                       return;
-
-               closure_wait_event(wait_list, &wait,
-                                  atomic_read(&cl->remaining) == -1);
-       }
-}
-EXPORT_SYMBOL(__closure_lock);
-
 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
 
 static LIST_HEAD(closure_list);
index 9762f1b..7ef7461 100644 (file)
  * closure - _always_ use continue_at(). Doing so consistently will help
  * eliminate an entire class of particularly pernicious races.
  *
- * For a closure to wait on an arbitrary event, we need to introduce waitlists:
- *
- * struct closure_waitlist list;
- * closure_wait_event(list, cl, condition);
- * closure_wake_up(wait_list);
- *
- * These work analagously to wait_event() and wake_up() - except that instead of
- * operating on the current thread (for wait_event()) and lists of threads, they
- * operate on an explicit closure and lists of closures.
- *
- * Because it's a closure we can now wait either synchronously or
- * asynchronously. closure_wait_event() returns the current value of the
- * condition, and if it returned false continue_at() or closure_sync() can be
- * used to wait for it to become true.
- *
- * It's useful for waiting on things when you can't sleep in the context in
- * which you must check the condition (perhaps a spinlock held, or you might be
- * beneath generic_make_request() - in which case you can't sleep on IO).
- *
- * closure_wait_event() will wait either synchronously or asynchronously,
- * depending on whether the closure is in blocking mode or not. You can pick a
- * mode explicitly with closure_wait_event_sync() and
- * closure_wait_event_async(), which do just what you might expect.
- *
  * Lastly, you might have a wait list dedicated to a specific event, and have no
  * need for specifying the condition - you just want to wait until someone runs
  * closure_wake_up() on the appropriate wait list. In that case, just use
  * All this implies that a closure should typically be embedded in a particular
  * struct (which its refcount will normally control the lifetime of), and that
  * struct can very much be thought of as a stack frame.
- *
- * Locking:
- *
- * Closures are based on work items but they can be thought of as more like
- * threads - in that like threads and unlike work items they have a well
- * defined lifetime; they are created (with closure_init()) and eventually
- * complete after a continue_at(cl, NULL, NULL).
- *
- * Suppose you've got some larger structure with a closure embedded in it that's
- * used for periodically doing garbage collection. You only want one garbage
- * collection happening at a time, so the natural thing to do is protect it with
- * a lock. However, it's difficult to use a lock protecting a closure correctly
- * because the unlock should come after the last continue_to() (additionally, if
- * you're using the closure asynchronously a mutex won't work since a mutex has
- * to be unlocked by the same process that locked it).
- *
- * So to make it less error prone and more efficient, we also have the ability
- * to use closures as locks:
- *
- * closure_init_unlocked();
- * closure_trylock();
- *
- * That's all we need for trylock() - the last closure_put() implicitly unlocks
- * it for you.  But for closure_lock(), we also need a wait list:
- *
- * struct closure_with_waitlist frobnicator_cl;
- *
- * closure_init_unlocked(&frobnicator_cl);
- * closure_lock(&frobnicator_cl);
- *
- * A closure_with_waitlist embeds a closure and a wait list - much like struct
- * delayed_work embeds a work item and a timer_list. The important thing is, use
- * it exactly like you would a regular closure and closure_put() will magically
- * handle everything for you.
  */
 
 struct closure;
@@ -164,12 +106,6 @@ struct closure_waitlist {
        struct llist_head       list;
 };
 
-enum closure_type {
-       TYPE_closure                            = 0,
-       TYPE_closure_with_waitlist              = 1,
-       MAX_CLOSURE_TYPE                        = 1,
-};
-
 enum closure_state {
        /*
         * CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by
@@ -224,8 +160,6 @@ struct closure {
 
        atomic_t                remaining;
 
-       enum closure_type       type;
-
 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
 #define CLOSURE_MAGIC_DEAD     0xc054dead
 #define CLOSURE_MAGIC_ALIVE    0xc054a11e
@@ -237,34 +171,12 @@ struct closure {
 #endif
 };
 
-struct closure_with_waitlist {
-       struct closure          cl;
-       struct closure_waitlist wait;
-};
-
-extern unsigned invalid_closure_type(void);
-
-#define __CLOSURE_TYPE(cl, _t)                                         \
-         __builtin_types_compatible_p(typeof(cl), struct _t)           \
-               ? TYPE_ ## _t :                                         \
-
-#define __closure_type(cl)                                             \
-(                                                                      \
-       __CLOSURE_TYPE(cl, closure)                                     \
-       __CLOSURE_TYPE(cl, closure_with_waitlist)                       \
-       invalid_closure_type()                                          \
-)
-
 void closure_sub(struct closure *cl, int v);
 void closure_put(struct closure *cl);
 void __closure_wake_up(struct closure_waitlist *list);
 bool closure_wait(struct closure_waitlist *list, struct closure *cl);
 void closure_sync(struct closure *cl);
 
-bool closure_trylock(struct closure *cl, struct closure *parent);
-void __closure_lock(struct closure *cl, struct closure *parent,
-                   struct closure_waitlist *wait_list);
-
 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
 
 void closure_debug_init(void);
@@ -293,134 +205,97 @@ static inline void closure_set_ret_ip(struct closure *cl)
 #endif
 }
 
-static inline void closure_get(struct closure *cl)
+static inline void closure_set_waiting(struct closure *cl, unsigned long f)
 {
 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
-       BUG_ON((atomic_inc_return(&cl->remaining) &
-               CLOSURE_REMAINING_MASK) <= 1);
-#else
-       atomic_inc(&cl->remaining);
+       cl->waiting_on = f;
 #endif
 }
 
-static inline void closure_set_stopped(struct closure *cl)
+static inline void __closure_end_sleep(struct closure *cl)
 {
-       atomic_sub(CLOSURE_RUNNING, &cl->remaining);
+       __set_current_state(TASK_RUNNING);
+
+       if (atomic_read(&cl->remaining) & CLOSURE_SLEEPING)
+               atomic_sub(CLOSURE_SLEEPING, &cl->remaining);
 }
 
-static inline bool closure_is_unlocked(struct closure *cl)
+static inline void __closure_start_sleep(struct closure *cl)
 {
-       return atomic_read(&cl->remaining) == -1;
+       closure_set_ip(cl);
+       cl->task = current;
+       set_current_state(TASK_UNINTERRUPTIBLE);
+
+       if (!(atomic_read(&cl->remaining) & CLOSURE_SLEEPING))
+               atomic_add(CLOSURE_SLEEPING, &cl->remaining);
 }
 
-static inline void do_closure_init(struct closure *cl, struct closure *parent,
-                                  bool running)
+static inline void closure_set_stopped(struct closure *cl)
 {
-       cl->parent = parent;
-       if (parent)
-               closure_get(parent);
-
-       if (running) {
-               closure_debug_create(cl);
-               atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
-       } else
-               atomic_set(&cl->remaining, -1);
+       atomic_sub(CLOSURE_RUNNING, &cl->remaining);
+}
 
+static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
+                                 struct workqueue_struct *wq)
+{
+       BUG_ON(object_is_on_stack(cl));
        closure_set_ip(cl);
+       cl->fn = fn;
+       cl->wq = wq;
+       /* between atomic_dec() in closure_put() */
+       smp_mb__before_atomic_dec();
 }
 
-/*
- * Hack to get at the embedded closure if there is one, by doing an unsafe cast:
- * the result of __closure_type() is thrown away, it's used merely for type
- * checking.
- */
-#define __to_internal_closure(cl)                              \
-({                                                             \
-       BUILD_BUG_ON(__closure_type(*cl) > MAX_CLOSURE_TYPE);   \
-       (struct closure *) cl;                                  \
-})
-
-#define closure_init_type(cl, parent, running)                 \
-do {                                                           \
-       struct closure *_cl = __to_internal_closure(cl);        \
-       _cl->type = __closure_type(*(cl));                      \
-       do_closure_init(_cl, parent, running);                  \
-} while (0)
+static inline void closure_queue(struct closure *cl)
+{
+       struct workqueue_struct *wq = cl->wq;
+       if (wq) {
+               INIT_WORK(&cl->work, cl->work.func);
+               BUG_ON(!queue_work(wq, &cl->work));
+       } else
+               cl->fn(cl);
+}
 
 /**
- * __closure_init() - Initialize a closure, skipping the memset()
- *
- * May be used instead of closure_init() when memory has already been zeroed.
+ * closure_get - increment a closure's refcount
  */
-#define __closure_init(cl, parent)                             \
-       closure_init_type(cl, parent, true)
+static inline void closure_get(struct closure *cl)
+{
+#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+       BUG_ON((atomic_inc_return(&cl->remaining) &
+               CLOSURE_REMAINING_MASK) <= 1);
+#else
+       atomic_inc(&cl->remaining);
+#endif
+}
 
 /**
- * closure_init() - Initialize a closure, setting the refcount to 1
+ * closure_init - Initialize a closure, setting the refcount to 1
  * @cl:                closure to initialize
  * @parent:    parent of the new closure. cl will take a refcount on it for its
  *             lifetime; may be NULL.
  */
-#define closure_init(cl, parent)                               \
-do {                                                           \
-       memset((cl), 0, sizeof(*(cl)));                         \
-       __closure_init(cl, parent);                             \
-} while (0)
-
-static inline void closure_init_stack(struct closure *cl)
+static inline void closure_init(struct closure *cl, struct closure *parent)
 {
        memset(cl, 0, sizeof(struct closure));
-       atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|CLOSURE_STACK);
-}
-
-/**
- * closure_init_unlocked() - Initialize a closure but leave it unlocked.
- * @cl:                closure to initialize
- *
- * For when the closure will be used as a lock. The closure may not be used
- * until after a closure_lock() or closure_trylock().
- */
-#define closure_init_unlocked(cl)                              \
-do {                                                           \
-       memset((cl), 0, sizeof(*(cl)));                         \
-       closure_init_type(cl, NULL, false);                     \
-} while (0)
-
-/**
- * closure_lock() - lock and initialize a closure.
- * @cl:                the closure to lock
- * @parent:    the new parent for this closure
- *
- * The closure must be of one of the types that has a waitlist (otherwise we
- * wouldn't be able to sleep on contention).
- *
- * @parent has exactly the same meaning as in closure_init(); if non null, the
- * closure will take a reference on @parent which will be released when it is
- * unlocked.
- */
-#define closure_lock(cl, parent)                               \
-       __closure_lock(__to_internal_closure(cl), parent, &(cl)->wait)
+       cl->parent = parent;
+       if (parent)
+               closure_get(parent);
 
-static inline void __closure_end_sleep(struct closure *cl)
-{
-       __set_current_state(TASK_RUNNING);
+       atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
 
-       if (atomic_read(&cl->remaining) & CLOSURE_SLEEPING)
-               atomic_sub(CLOSURE_SLEEPING, &cl->remaining);
+       closure_debug_create(cl);
+       closure_set_ip(cl);
 }
 
-static inline void __closure_start_sleep(struct closure *cl)
+static inline void closure_init_stack(struct closure *cl)
 {
-       closure_set_ip(cl);
-       cl->task = current;
-       set_current_state(TASK_UNINTERRUPTIBLE);
-
-       if (!(atomic_read(&cl->remaining) & CLOSURE_SLEEPING))
-               atomic_add(CLOSURE_SLEEPING, &cl->remaining);
+       memset(cl, 0, sizeof(struct closure));
+       atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|CLOSURE_STACK);
 }
 
 /**
- * closure_wake_up() - wake up all closures on a wait list.
+ * closure_wake_up - wake up all closures on a wait list.
  */
 static inline void closure_wake_up(struct closure_waitlist *list)
 {
@@ -428,69 +303,19 @@ static inline void closure_wake_up(struct closure_waitlist *list)
        __closure_wake_up(list);
 }
 
-/*
- * Wait on an event, synchronously or asynchronously - analogous to wait_event()
- * but for closures.
- *
- * The loop is oddly structured so as to avoid a race; we must check the
- * condition again after we've added ourself to the waitlist. We know if we were
- * already on the waitlist because closure_wait() returns false; thus, we only
- * schedule or break if closure_wait() returns false. If it returns true, we
- * just loop again - rechecking the condition.
- *
- * The __closure_wake_up() is necessary because we may race with the event
- * becoming true; i.e. we see event false -> wait -> recheck condition, but the
- * thread that made the event true may have called closure_wake_up() before we
- * added ourself to the wait list.
- *
- * We have to call closure_sync() at the end instead of just
- * __closure_end_sleep() because a different thread might've called
- * closure_wake_up() before us and gotten preempted before they dropped the
- * refcount on our closure. If this was a stack allocated closure, that would be
- * bad.
+/**
+ * continue_at - jump to another function with barrier
+ *
+ * After @cl is no longer waiting on anything (i.e. all outstanding refs have
+ * been dropped with closure_put()), it will resume execution at @fn running out
+ * of @wq (or, if @wq is NULL, @fn will be called by closure_put() directly).
+ *
+ * NOTE: This macro expands to a return in the calling function!
+ *
+ * This is because after calling continue_at() you no longer have a ref on @cl,
+ * and whatever @cl owns may be freed out from under you - a running closure fn
+ * has a ref on its own closure which continue_at() drops.
  */
-#define closure_wait_event(list, cl, condition)                                \
-({                                                                     \
-       typeof(condition) ret;                                          \
-                                                                       \
-       while (1) {                                                     \
-               ret = (condition);                                      \
-               if (ret) {                                              \
-                       __closure_wake_up(list);                        \
-                       closure_sync(cl);                               \
-                       break;                                          \
-               }                                                       \
-                                                                       \
-               __closure_start_sleep(cl);                              \
-                                                                       \
-               if (!closure_wait(list, cl))                            \
-                       schedule();                                     \
-       }                                                               \
-                                                                       \
-       ret;                                                            \
-})
-
-static inline void closure_queue(struct closure *cl)
-{
-       struct workqueue_struct *wq = cl->wq;
-       if (wq) {
-               INIT_WORK(&cl->work, cl->work.func);
-               BUG_ON(!queue_work(wq, &cl->work));
-       } else
-               cl->fn(cl);
-}
-
-static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
-                                 struct workqueue_struct *wq)
-{
-       BUG_ON(object_is_on_stack(cl));
-       closure_set_ip(cl);
-       cl->fn = fn;
-       cl->wq = wq;
-       /* between atomic_dec() in closure_put() */
-       smp_mb__before_atomic_dec();
-}
-
 #define continue_at(_cl, _fn, _wq)                                     \
 do {                                                                   \
        set_closure_fn(_cl, _fn, _wq);                                  \
@@ -498,8 +323,28 @@ do {                                                                       \
        return;                                                         \
 } while (0)
 
+/**
+ * closure_return - finish execution of a closure
+ *
+ * This is used to indicate that @cl is finished: when all outstanding refs on
+ * @cl have been dropped @cl's ref on its parent closure (as passed to
+ * closure_init()) will be dropped, if one was specified - thus this can be
+ * thought of as returning to the parent closure.
+ */
 #define closure_return(_cl)    continue_at((_cl), NULL, NULL)
 
+/**
+ * continue_at_nobarrier - jump to another function without barrier
+ *
+ * Causes @fn to be executed out of @cl, in @wq context (or called directly if
+ * @wq is NULL).
+ *
+ * NOTE: like continue_at(), this macro expands to a return in the caller!
+ *
+ * The ref the caller of continue_at_nobarrier() had on @cl is now owned by @fn,
+ * thus it's not safe to touch anything protected by @cl after a
+ * continue_at_nobarrier().
+ */
 #define continue_at_nobarrier(_cl, _fn, _wq)                           \
 do {                                                                   \
        set_closure_fn(_cl, _fn, _wq);                                  \
@@ -507,6 +352,15 @@ do {                                                                       \
        return;                                                         \
 } while (0)
 
+/**
+ * closure_return - finish execution of a closure, with destructor
+ *
+ * Works like closure_return(), except @destructor will be called when all
+ * outstanding refs on @cl have been dropped; @destructor may be used to safely
+ * free the memory occupied by @cl, and it is called with the ref on the parent
+ * closure still held - so @destructor could safely return an item to a
+ * freelist protected by @cl's parent.
+ */
 #define closure_return_with_destructor(_cl, _destructor)               \
 do {                                                                   \
        set_closure_fn(_cl, _destructor, NULL);                         \
@@ -514,6 +368,13 @@ do {                                                                       \
        return;                                                         \
 } while (0)
 
+/**
+ * closure_call - execute @fn out of a new, uninitialized closure
+ *
+ * Typically used when running out of one closure, and we want to run @fn
+ * asynchronously out of a new closure - @parent will then wait for @cl to
+ * finish.
+ */
 static inline void closure_call(struct closure *cl, closure_fn fn,
                                struct workqueue_struct *wq,
                                struct closure *parent)
@@ -522,12 +383,4 @@ static inline void closure_call(struct closure *cl, closure_fn fn,
        continue_at_nobarrier(cl, fn, wq);
 }
 
-static inline void closure_trylock_call(struct closure *cl, closure_fn fn,
-                                       struct workqueue_struct *wq,
-                                       struct closure *parent)
-{
-       if (closure_trylock(cl, parent))
-               continue_at_nobarrier(cl, fn, wq);
-}
-
 #endif /* _LINUX_CLOSURE_H */
index 264fcfb..8b1f1d5 100644 (file)
@@ -8,6 +8,7 @@
 #include "bcache.h"
 #include "btree.h"
 #include "debug.h"
+#include "extents.h"
 
 #include <linux/console.h>
 #include <linux/debugfs.h>
 
 static struct dentry *debug;
 
-const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
-{
-       unsigned i;
-
-       for (i = 0; i < KEY_PTRS(k); i++)
-               if (ptr_available(c, k, i)) {
-                       struct cache *ca = PTR_CACHE(c, k, i);
-                       size_t bucket = PTR_BUCKET_NR(c, k, i);
-                       size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
-
-                       if (KEY_SIZE(k) + r > c->sb.bucket_size)
-                               return "bad, length too big";
-                       if (bucket <  ca->sb.first_bucket)
-                               return "bad, short offset";
-                       if (bucket >= ca->sb.nbuckets)
-                               return "bad, offset past end of device";
-                       if (ptr_stale(c, k, i))
-                               return "stale";
-               }
-
-       if (!bkey_cmp(k, &ZERO_KEY))
-               return "bad, null key";
-       if (!KEY_PTRS(k))
-               return "bad, no pointers";
-       if (!KEY_SIZE(k))
-               return "zeroed key";
-       return "";
-}
-
-int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k)
-{
-       unsigned i = 0;
-       char *out = buf, *end = buf + size;
-
-#define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
-
-       p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_OFFSET(k), KEY_SIZE(k));
-
-       if (KEY_PTRS(k))
-               while (1) {
-                       p("%llu:%llu gen %llu",
-                         PTR_DEV(k, i), PTR_OFFSET(k, i), PTR_GEN(k, i));
-
-                       if (++i == KEY_PTRS(k))
-                               break;
-
-                       p(", ");
-               }
-
-       p("]");
-
-       if (KEY_DIRTY(k))
-               p(" dirty");
-       if (KEY_CSUM(k))
-               p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]);
-#undef p
-       return out - buf;
-}
-
 #ifdef CONFIG_BCACHE_DEBUG
 
-static void dump_bset(struct btree *b, struct bset *i)
-{
-       struct bkey *k, *next;
-       unsigned j;
-       char buf[80];
-
-       for (k = i->start; k < end(i); k = next) {
-               next = bkey_next(k);
-
-               bch_bkey_to_text(buf, sizeof(buf), k);
-               printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b),
-                      (uint64_t *) k - i->d, i->keys, buf);
-
-               for (j = 0; j < KEY_PTRS(k); j++) {
-                       size_t n = PTR_BUCKET_NR(b->c, k, j);
-                       printk(" bucket %zu", n);
-
-                       if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
-                               printk(" prio %i",
-                                      PTR_BUCKET(b->c, k, j)->prio);
-               }
+#define for_each_written_bset(b, start, i)                             \
+       for (i = (start);                                               \
+            (void *) i < (void *) (start) + (KEY_SIZE(&b->key) << 9) &&\
+            i->seq == (start)->seq;                                    \
+            i = (void *) i + set_blocks(i, block_bytes(b->c)) *        \
+                block_bytes(b->c))
 
-               printk(" %s\n", bch_ptr_status(b->c, k));
-
-               if (next < end(i) &&
-                   bkey_cmp(k, !b->level ? &START_KEY(next) : next) > 0)
-                       printk(KERN_ERR "Key skipped backwards\n");
-       }
-}
-
-static void bch_dump_bucket(struct btree *b)
-{
-       unsigned i;
-
-       console_lock();
-       for (i = 0; i <= b->nsets; i++)
-               dump_bset(b, b->sets[i].data);
-       console_unlock();
-}
-
-void bch_btree_verify(struct btree *b, struct bset *new)
+void bch_btree_verify(struct btree *b)
 {
        struct btree *v = b->c->verify_data;
-       struct closure cl;
-       closure_init_stack(&cl);
+       struct bset *ondisk, *sorted, *inmemory;
+       struct bio *bio;
 
-       if (!b->c->verify)
+       if (!b->c->verify || !b->c->verify_ondisk)
                return;
 
-       closure_wait_event(&b->io.wait, &cl,
-                          atomic_read(&b->io.cl.remaining) == -1);
-
+       down(&b->io_mutex);
        mutex_lock(&b->c->verify_lock);
 
+       ondisk = b->c->verify_ondisk;
+       sorted = b->c->verify_data->keys.set->data;
+       inmemory = b->keys.set->data;
+
        bkey_copy(&v->key, &b->key);
        v->written = 0;
        v->level = b->level;
+       v->keys.ops = b->keys.ops;
+
+       bio = bch_bbio_alloc(b->c);
+       bio->bi_bdev            = PTR_CACHE(b->c, &b->key, 0)->bdev;
+       bio->bi_iter.bi_sector  = PTR_OFFSET(&b->key, 0);
+       bio->bi_iter.bi_size    = KEY_SIZE(&v->key) << 9;
+       bch_bio_map(bio, sorted);
 
-       bch_btree_node_read(v);
-       closure_wait_event(&v->io.wait, &cl,
-                          atomic_read(&b->io.cl.remaining) == -1);
+       submit_bio_wait(REQ_META|READ_SYNC, bio);
+       bch_bbio_free(bio, b->c);
 
-       if (new->keys != v->sets[0].data->keys ||
-           memcmp(new->start,
-                  v->sets[0].data->start,
-                  (void *) end(new) - (void *) new->start)) {
-               unsigned i, j;
+       memcpy(ondisk, sorted, KEY_SIZE(&v->key) << 9);
+
+       bch_btree_node_read_done(v);
+       sorted = v->keys.set->data;
+
+       if (inmemory->keys != sorted->keys ||
+           memcmp(inmemory->start,
+                  sorted->start,
+                  (void *) bset_bkey_last(inmemory) - (void *) inmemory->start)) {
+               struct bset *i;
+               unsigned j;
 
                console_lock();
 
-               printk(KERN_ERR "*** original memory node:\n");
-               for (i = 0; i <= b->nsets; i++)
-                       dump_bset(b, b->sets[i].data);
+               printk(KERN_ERR "*** in memory:\n");
+               bch_dump_bset(&b->keys, inmemory, 0);
 
-               printk(KERN_ERR "*** sorted memory node:\n");
-               dump_bset(b, new);
+               printk(KERN_ERR "*** read back in:\n");
+               bch_dump_bset(&v->keys, sorted, 0);
 
-               printk(KERN_ERR "*** on disk node:\n");
-               dump_bset(v, v->sets[0].data);
+               for_each_written_bset(b, ondisk, i) {
+                       unsigned block = ((void *) i - (void *) ondisk) /
+                               block_bytes(b->c);
+
+                       printk(KERN_ERR "*** on disk block %u:\n", block);
+                       bch_dump_bset(&b->keys, i, block);
+               }
 
-               for (j = 0; j < new->keys; j++)
-                       if (new->d[j] != v->sets[0].data->d[j])
+               printk(KERN_ERR "*** block %zu not written\n",
+                      ((void *) i - (void *) ondisk) / block_bytes(b->c));
+
+               for (j = 0; j < inmemory->keys; j++)
+                       if (inmemory->d[j] != sorted->d[j])
                                break;
 
+               printk(KERN_ERR "b->written %u\n", b->written);
+
                console_unlock();
                panic("verify failed at %u\n", j);
        }
 
        mutex_unlock(&b->c->verify_lock);
+       up(&b->io_mutex);
 }
 
 void bch_data_verify(struct cached_dev *dc, struct bio *bio)
 {
        char name[BDEVNAME_SIZE];
        struct bio *check;
-       struct bio_vec *bv;
+       struct bio_vec bv, *bv2;
+       struct bvec_iter iter;
        int i;
 
        check = bio_clone(bio, GFP_NOIO);
@@ -185,95 +119,27 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
 
        submit_bio_wait(READ_SYNC, check);
 
-       bio_for_each_segment(bv, bio, i) {
-               void *p1 = kmap_atomic(bv->bv_page);
-               void *p2 = page_address(check->bi_io_vec[i].bv_page);
+       bio_for_each_segment(bv, bio, iter) {
+               void *p1 = kmap_atomic(bv.bv_page);
+               void *p2 = page_address(check->bi_io_vec[iter.bi_idx].bv_page);
 
-               cache_set_err_on(memcmp(p1 + bv->bv_offset,
-                                       p2 + bv->bv_offset,
-                                       bv->bv_len),
+               cache_set_err_on(memcmp(p1 + bv.bv_offset,
+                                       p2 + bv.bv_offset,
+                                       bv.bv_len),
                                 dc->disk.c,
                                 "verify failed at dev %s sector %llu",
                                 bdevname(dc->bdev, name),
-                                (uint64_t) bio->bi_sector);
+                                (uint64_t) bio->bi_iter.bi_sector);
 
                kunmap_atomic(p1);
        }
 
-       bio_for_each_segment_all(bv, check, i)
-               __free_page(bv->bv_page);
+       bio_for_each_segment_all(bv2, check, i)
+               __free_page(bv2->bv_page);
 out_put:
        bio_put(check);
 }
 
-int __bch_count_data(struct btree *b)
-{
-       unsigned ret = 0;
-       struct btree_iter iter;
-       struct bkey *k;
-
-       if (!b->level)
-               for_each_key(b, k, &iter)
-                       ret += KEY_SIZE(k);
-       return ret;
-}
-
-void __bch_check_keys(struct btree *b, const char *fmt, ...)
-{
-       va_list args;
-       struct bkey *k, *p = NULL;
-       struct btree_iter iter;
-       const char *err;
-
-       for_each_key(b, k, &iter) {
-               if (!b->level) {
-                       err = "Keys out of order";
-                       if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0)
-                               goto bug;
-
-                       if (bch_ptr_invalid(b, k))
-                               continue;
-
-                       err =  "Overlapping keys";
-                       if (p && bkey_cmp(p, &START_KEY(k)) > 0)
-                               goto bug;
-               } else {
-                       if (bch_ptr_bad(b, k))
-                               continue;
-
-                       err = "Duplicate keys";
-                       if (p && !bkey_cmp(p, k))
-                               goto bug;
-               }
-               p = k;
-       }
-
-       err = "Key larger than btree node key";
-       if (p && bkey_cmp(p, &b->key) > 0)
-               goto bug;
-
-       return;
-bug:
-       bch_dump_bucket(b);
-
-       va_start(args, fmt);
-       vprintk(fmt, args);
-       va_end(args);
-
-       panic("bcache error: %s:\n", err);
-}
-
-void bch_btree_iter_next_check(struct btree_iter *iter)
-{
-       struct bkey *k = iter->data->k, *next = bkey_next(k);
-
-       if (next < iter->data->end &&
-           bkey_cmp(k, iter->b->level ? next : &START_KEY(next)) > 0) {
-               bch_dump_bucket(iter->b);
-               panic("Key skipped backwards\n");
-       }
-}
-
 #endif
 
 #ifdef CONFIG_DEBUG_FS
@@ -320,7 +186,7 @@ static ssize_t bch_dump_read(struct file *file, char __user *buf,
                if (!w)
                        break;
 
-               bch_bkey_to_text(kbuf, sizeof(kbuf), &w->key);
+               bch_extent_to_text(kbuf, sizeof(kbuf), &w->key);
                i->bytes = snprintf(i->buf, PAGE_SIZE, "%s\n", kbuf);
                bch_keybuf_del(&i->keys, w);
        }
index 2ede60e..1f63c19 100644 (file)
@@ -1,47 +1,30 @@
 #ifndef _BCACHE_DEBUG_H
 #define _BCACHE_DEBUG_H
 
-/* Btree/bkey debug printing */
-
-int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k);
+struct bio;
+struct cached_dev;
+struct cache_set;
 
 #ifdef CONFIG_BCACHE_DEBUG
 
-void bch_btree_verify(struct btree *, struct bset *);
+void bch_btree_verify(struct btree *);
 void bch_data_verify(struct cached_dev *, struct bio *);
-int __bch_count_data(struct btree *);
-void __bch_check_keys(struct btree *, const char *, ...);
-void bch_btree_iter_next_check(struct btree_iter *);
 
-#define EBUG_ON(cond)                  BUG_ON(cond)
 #define expensive_debug_checks(c)      ((c)->expensive_debug_checks)
 #define key_merging_disabled(c)                ((c)->key_merging_disabled)
 #define bypass_torture_test(d)         ((d)->bypass_torture_test)
 
 #else /* DEBUG */
 
-static inline void bch_btree_verify(struct btree *b, struct bset *i) {}
+static inline void bch_btree_verify(struct btree *b) {}
 static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {}
-static inline int __bch_count_data(struct btree *b) { return -1; }
-static inline void __bch_check_keys(struct btree *b, const char *fmt, ...) {}
-static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}
 
-#define EBUG_ON(cond)                  do { if (cond); } while (0)
 #define expensive_debug_checks(c)      0
 #define key_merging_disabled(c)                0
 #define bypass_torture_test(d)         0
 
 #endif
 
-#define bch_count_data(b)                                              \
-       (expensive_debug_checks((b)->c) ? __bch_count_data(b) : -1)
-
-#define bch_check_keys(b, ...)                                         \
-do {                                                                   \
-       if (expensive_debug_checks((b)->c))                             \
-               __bch_check_keys(b, __VA_ARGS__);                       \
-} while (0)
-
 #ifdef CONFIG_DEBUG_FS
 void bch_debug_init_cache_set(struct cache_set *);
 #else
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
new file mode 100644 (file)
index 0000000..c3ead58
--- /dev/null
@@ -0,0 +1,616 @@
+/*
+ * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
+ *
+ * Uses a block device as cache for other block devices; optimized for SSDs.
+ * All allocation is done in buckets, which should match the erase block size
+ * of the device.
+ *
+ * Buckets containing cached data are kept on a heap sorted by priority;
+ * bucket priority is increased on cache hit, and periodically all the buckets
+ * on the heap have their priority scaled down. This currently is just used as
+ * an LRU but in the future should allow for more intelligent heuristics.
+ *
+ * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
+ * counter. Garbage collection is used to remove stale pointers.
+ *
+ * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
+ * as keys are inserted we only sort the pages that have not yet been written.
+ * When garbage collection is run, we resort the entire node.
+ *
+ * All configuration is done via sysfs; see Documentation/bcache.txt.
+ */
+
+#include "bcache.h"
+#include "btree.h"
+#include "debug.h"
+#include "extents.h"
+#include "writeback.h"
+
+static void sort_key_next(struct btree_iter *iter,
+                         struct btree_iter_set *i)
+{
+       i->k = bkey_next(i->k);
+
+       if (i->k == i->end)
+               *i = iter->data[--iter->used];
+}
+
+static bool bch_key_sort_cmp(struct btree_iter_set l,
+                            struct btree_iter_set r)
+{
+       int64_t c = bkey_cmp(l.k, r.k);
+
+       return c ? c > 0 : l.k < r.k;
+}
+
+static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
+{
+       unsigned i;
+
+       for (i = 0; i < KEY_PTRS(k); i++)
+               if (ptr_available(c, k, i)) {
+                       struct cache *ca = PTR_CACHE(c, k, i);
+                       size_t bucket = PTR_BUCKET_NR(c, k, i);
+                       size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
+
+                       if (KEY_SIZE(k) + r > c->sb.bucket_size ||
+                           bucket <  ca->sb.first_bucket ||
+                           bucket >= ca->sb.nbuckets)
+                               return true;
+               }
+
+       return false;
+}
+
+/* Common among btree and extent ptrs */
+
+static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
+{
+       unsigned i;
+
+       for (i = 0; i < KEY_PTRS(k); i++)
+               if (ptr_available(c, k, i)) {
+                       struct cache *ca = PTR_CACHE(c, k, i);
+                       size_t bucket = PTR_BUCKET_NR(c, k, i);
+                       size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
+
+                       if (KEY_SIZE(k) + r > c->sb.bucket_size)
+                               return "bad, length too big";
+                       if (bucket <  ca->sb.first_bucket)
+                               return "bad, short offset";
+                       if (bucket >= ca->sb.nbuckets)
+                               return "bad, offset past end of device";
+                       if (ptr_stale(c, k, i))
+                               return "stale";
+               }
+
+       if (!bkey_cmp(k, &ZERO_KEY))
+               return "bad, null key";
+       if (!KEY_PTRS(k))
+               return "bad, no pointers";
+       if (!KEY_SIZE(k))
+               return "zeroed key";
+       return "";
+}
+
+void bch_extent_to_text(char *buf, size_t size, const struct bkey *k)
+{
+       unsigned i = 0;
+       char *out = buf, *end = buf + size;
+
+#define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
+
+       p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_START(k), KEY_SIZE(k));
+
+       for (i = 0; i < KEY_PTRS(k); i++) {
+               if (i)
+                       p(", ");
+
+               if (PTR_DEV(k, i) == PTR_CHECK_DEV)
+                       p("check dev");
+               else
+                       p("%llu:%llu gen %llu", PTR_DEV(k, i),
+                         PTR_OFFSET(k, i), PTR_GEN(k, i));
+       }
+
+       p("]");
+
+       if (KEY_DIRTY(k))
+               p(" dirty");
+       if (KEY_CSUM(k))
+               p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]);
+#undef p
+}
+
+static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
+{
+       struct btree *b = container_of(keys, struct btree, keys);
+       unsigned j;
+       char buf[80];
+
+       bch_extent_to_text(buf, sizeof(buf), k);
+       printk(" %s", buf);
+
+       for (j = 0; j < KEY_PTRS(k); j++) {
+               size_t n = PTR_BUCKET_NR(b->c, k, j);
+               printk(" bucket %zu", n);
+
+               if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
+                       printk(" prio %i",
+                              PTR_BUCKET(b->c, k, j)->prio);
+       }
+
+       printk(" %s\n", bch_ptr_status(b->c, k));
+}
+
+/* Btree ptrs */
+
+bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k)
+{
+       char buf[80];
+
+       if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))
+               goto bad;
+
+       if (__ptr_invalid(c, k))
+               goto bad;
+
+       return false;
+bad:
+       bch_extent_to_text(buf, sizeof(buf), k);
+       cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k));
+       return true;
+}
+
+static bool bch_btree_ptr_invalid(struct btree_keys *bk, const struct bkey *k)
+{
+       struct btree *b = container_of(bk, struct btree, keys);
+       return __bch_btree_ptr_invalid(b->c, k);
+}
+
+static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k)
+{
+       unsigned i;
+       char buf[80];
+       struct bucket *g;
+
+       if (mutex_trylock(&b->c->bucket_lock)) {
+               for (i = 0; i < KEY_PTRS(k); i++)
+                       if (ptr_available(b->c, k, i)) {
+                               g = PTR_BUCKET(b->c, k, i);
+
+                               if (KEY_DIRTY(k) ||
+                                   g->prio != BTREE_PRIO ||
+                                   (b->c->gc_mark_valid &&
+                                    GC_MARK(g) != GC_MARK_METADATA))
+                                       goto err;
+                       }
+
+               mutex_unlock(&b->c->bucket_lock);
+       }
+
+       return false;
+err:
+       mutex_unlock(&b->c->bucket_lock);
+       bch_extent_to_text(buf, sizeof(buf), k);
+       btree_bug(b,
+"inconsistent btree pointer %s: bucket %li pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
+                 buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
+                 g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
+       return true;
+}
+
+static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k)
+{
+       struct btree *b = container_of(bk, struct btree, keys);
+       unsigned i;
+
+       if (!bkey_cmp(k, &ZERO_KEY) ||
+           !KEY_PTRS(k) ||
+           bch_ptr_invalid(bk, k))
+               return true;
+
+       for (i = 0; i < KEY_PTRS(k); i++)
+               if (!ptr_available(b->c, k, i) ||
+                   ptr_stale(b->c, k, i))
+                       return true;
+
+       if (expensive_debug_checks(b->c) &&
+           btree_ptr_bad_expensive(b, k))
+               return true;
+
+       return false;
+}
+
+static bool bch_btree_ptr_insert_fixup(struct btree_keys *bk,
+                                      struct bkey *insert,
+                                      struct btree_iter *iter,
+                                      struct bkey *replace_key)
+{
+       struct btree *b = container_of(bk, struct btree, keys);
+
+       if (!KEY_OFFSET(insert))
+               btree_current_write(b)->prio_blocked++;
+
+       return false;
+}
+
+const struct btree_keys_ops bch_btree_keys_ops = {
+       .sort_cmp       = bch_key_sort_cmp,
+       .insert_fixup   = bch_btree_ptr_insert_fixup,
+       .key_invalid    = bch_btree_ptr_invalid,
+       .key_bad        = bch_btree_ptr_bad,
+       .key_to_text    = bch_extent_to_text,
+       .key_dump       = bch_bkey_dump,
+};
+
+/* Extents */
+
+/*
+ * Returns true if l > r - unless l == r, in which case returns true if l is
+ * older than r.
+ *
+ * Necessary for btree_sort_fixup() - if there are multiple keys that compare
+ * equal in different sets, we have to process them newest to oldest.
+ */
+static bool bch_extent_sort_cmp(struct btree_iter_set l,
+                               struct btree_iter_set r)
+{
+       int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k));
+
+       return c ? c > 0 : l.k < r.k;
+}
+
+static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
+                                         struct bkey *tmp)
+{
+       while (iter->used > 1) {
+               struct btree_iter_set *top = iter->data, *i = top + 1;
+
+               if (iter->used > 2 &&
+                   bch_extent_sort_cmp(i[0], i[1]))
+                       i++;
+
+               if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
+                       break;
+
+               if (!KEY_SIZE(i->k)) {
+                       sort_key_next(iter, i);
+                       heap_sift(iter, i - top, bch_extent_sort_cmp);
+                       continue;
+               }
+
+               if (top->k > i->k) {
+                       if (bkey_cmp(top->k, i->k) >= 0)
+                               sort_key_next(iter, i);
+                       else
+                               bch_cut_front(top->k, i->k);
+
+                       heap_sift(iter, i - top, bch_extent_sort_cmp);
+               } else {
+                       /* can't happen because of comparison func */
+                       BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
+
+                       if (bkey_cmp(i->k, top->k) < 0) {
+                               bkey_copy(tmp, top->k);
+
+                               bch_cut_back(&START_KEY(i->k), tmp);
+                               bch_cut_front(i->k, top->k);
+                               heap_sift(iter, 0, bch_extent_sort_cmp);
+
+                               return tmp;
+                       } else {
+                               bch_cut_back(&START_KEY(i->k), top->k);
+                       }
+               }
+       }
+
+       return NULL;
+}
+
+static bool bch_extent_insert_fixup(struct btree_keys *b,
+                                   struct bkey *insert,
+                                   struct btree_iter *iter,
+                                   struct bkey *replace_key)
+{
+       struct cache_set *c = container_of(b, struct btree, keys)->c;
+
+       void subtract_dirty(struct bkey *k, uint64_t offset, int sectors)
+       {
+               if (KEY_DIRTY(k))
+                       bcache_dev_sectors_dirty_add(c, KEY_INODE(k),
+                                                    offset, -sectors);
+       }
+
+       uint64_t old_offset;
+       unsigned old_size, sectors_found = 0;
+
+       BUG_ON(!KEY_OFFSET(insert));
+       BUG_ON(!KEY_SIZE(insert));
+
+       while (1) {
+               struct bkey *k = bch_btree_iter_next(iter);
+               if (!k)
+                       break;
+
+               if (bkey_cmp(&START_KEY(k), insert) >= 0) {
+                       if (KEY_SIZE(k))
+                               break;
+                       else
+                               continue;
+               }
+
+               if (bkey_cmp(k, &START_KEY(insert)) <= 0)
+                       continue;
+
+               old_offset = KEY_START(k);
+               old_size = KEY_SIZE(k);
+
+               /*
+                * We might overlap with 0 size extents; we can't skip these
+                * because if they're in the set we're inserting to we have to
+                * adjust them so they don't overlap with the key we're
+                * inserting. But we don't want to check them for replace
+                * operations.
+                */
+
+               if (replace_key && KEY_SIZE(k)) {
+                       /*
+                        * k might have been split since we inserted/found the
+                        * key we're replacing
+                        */
+                       unsigned i;
+                       uint64_t offset = KEY_START(k) -
+                               KEY_START(replace_key);
+
+                       /* But it must be a subset of the replace key */
+                       if (KEY_START(k) < KEY_START(replace_key) ||
+                           KEY_OFFSET(k) > KEY_OFFSET(replace_key))
+                               goto check_failed;
+
+                       /* We didn't find a key that we were supposed to */
+                       if (KEY_START(k) > KEY_START(insert) + sectors_found)
+                               goto check_failed;
+
+                       if (!bch_bkey_equal_header(k, replace_key))
+                               goto check_failed;
+
+                       /* skip past gen */
+                       offset <<= 8;
+
+                       BUG_ON(!KEY_PTRS(replace_key));
+
+                       for (i = 0; i < KEY_PTRS(replace_key); i++)
+                               if (k->ptr[i] != replace_key->ptr[i] + offset)
+                                       goto check_failed;
+
+                       sectors_found = KEY_OFFSET(k) - KEY_START(insert);
+               }
+
+               if (bkey_cmp(insert, k) < 0 &&
+                   bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) {
+                       /*
+                        * We overlapped in the middle of an existing key: that
+                        * means we have to split the old key. But we have to do
+                        * slightly different things depending on whether the
+                        * old key has been written out yet.
+                        */
+
+                       struct bkey *top;
+
+                       subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert));
+
+                       if (bkey_written(b, k)) {
+                               /*
+                                * We insert a new key to cover the top of the
+                                * old key, and the old key is modified in place
+                                * to represent the bottom split.
+                                *
+                                * It's completely arbitrary whether the new key
+                                * is the top or the bottom, but it has to match
+                                * up with what btree_sort_fixup() does - it
+                                * doesn't check for this kind of overlap, it
+                                * depends on us inserting a new key for the top
+                                * here.
+                                */
+                               top = bch_bset_search(b, bset_tree_last(b),
+                                                     insert);
+                               bch_bset_insert(b, top, k);
+                       } else {
+                               BKEY_PADDED(key) temp;
+                               bkey_copy(&temp.key, k);
+                               bch_bset_insert(b, k, &temp.key);
+                               top = bkey_next(k);
+                       }
+
+                       bch_cut_front(insert, top);
+                       bch_cut_back(&START_KEY(insert), k);
+                       bch_bset_fix_invalidated_key(b, k);
+                       goto out;
+               }
+
+               if (bkey_cmp(insert, k) < 0) {
+                       bch_cut_front(insert, k);
+               } else {
+                       if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0)
+                               old_offset = KEY_START(insert);
+
+                       if (bkey_written(b, k) &&
+                           bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
+                               /*
+                                * Completely overwrote, so we don't have to
+                                * invalidate the binary search tree
+                                */
+                               bch_cut_front(k, k);
+                       } else {
+                               __bch_cut_back(&START_KEY(insert), k);
+                               bch_bset_fix_invalidated_key(b, k);
+                       }
+               }
+
+               subtract_dirty(k, old_offset, old_size - KEY_SIZE(k));
+       }
+
+check_failed:
+       if (replace_key) {
+               if (!sectors_found) {
+                       return true;
+               } else if (sectors_found < KEY_SIZE(insert)) {
+                       SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
+                                      (KEY_SIZE(insert) - sectors_found));
+                       SET_KEY_SIZE(insert, sectors_found);
+               }
+       }
+out:
+       if (KEY_DIRTY(insert))
+               bcache_dev_sectors_dirty_add(c, KEY_INODE(insert),
+                                            KEY_START(insert),
+                                            KEY_SIZE(insert));
+
+       return false;
+}
+
+static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k)
+{
+       struct btree *b = container_of(bk, struct btree, keys);
+       char buf[80];
+
+       if (!KEY_SIZE(k))
+               return true;
+
+       if (KEY_SIZE(k) > KEY_OFFSET(k))
+               goto bad;
+
+       if (__ptr_invalid(b->c, k))
+               goto bad;
+
+       return false;
+bad:
+       bch_extent_to_text(buf, sizeof(buf), k);
+       cache_bug(b->c, "spotted extent %s: %s", buf, bch_ptr_status(b->c, k));
+       return true;
+}
+
+static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k,
+                                    unsigned ptr)
+{
+       struct bucket *g = PTR_BUCKET(b->c, k, ptr);
+       char buf[80];
+
+       if (mutex_trylock(&b->c->bucket_lock)) {
+               if (b->c->gc_mark_valid &&
+                   ((GC_MARK(g) != GC_MARK_DIRTY &&
+                     KEY_DIRTY(k)) ||
+                    GC_MARK(g) == GC_MARK_METADATA))
+                       goto err;
+
+               if (g->prio == BTREE_PRIO)
+                       goto err;
+
+               mutex_unlock(&b->c->bucket_lock);
+       }
+
+       return false;
+err:
+       mutex_unlock(&b->c->bucket_lock);
+       bch_extent_to_text(buf, sizeof(buf), k);
+       btree_bug(b,
+"inconsistent extent pointer %s:\nbucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
+                 buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin),
+                 g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
+       return true;
+}
+
+static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
+{
+       struct btree *b = container_of(bk, struct btree, keys);
+       struct bucket *g;
+       unsigned i, stale;
+
+       if (!KEY_PTRS(k) ||
+           bch_extent_invalid(bk, k))
+               return true;
+
+       for (i = 0; i < KEY_PTRS(k); i++)
+               if (!ptr_available(b->c, k, i))
+                       return true;
+
+       if (!expensive_debug_checks(b->c) && KEY_DIRTY(k))
+               return false;
+
+       for (i = 0; i < KEY_PTRS(k); i++) {
+               g = PTR_BUCKET(b->c, k, i);
+               stale = ptr_stale(b->c, k, i);
+
+               btree_bug_on(stale > 96, b,
+                            "key too stale: %i, need_gc %u",
+                            stale, b->c->need_gc);
+
+               btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
+                            b, "stale dirty pointer");
+
+               if (stale)
+                       return true;
+
+               if (expensive_debug_checks(b->c) &&
+                   bch_extent_bad_expensive(b, k, i))
+                       return true;
+       }
+
+       return false;
+}
+
+static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
+{
+       return (l->ptr[KEY_PTRS(l)] + r->ptr[KEY_PTRS(r)]) &
+               ~((uint64_t)1 << 63);
+}
+
+static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey *r)
+{
+       struct btree *b = container_of(bk, struct btree, keys);
+       unsigned i;
+
+       if (key_merging_disabled(b->c))
+               return false;
+
+       for (i = 0; i < KEY_PTRS(l); i++)
+               if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
+                   PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
+                       return false;
+
+       /* Keys with no pointers aren't restricted to one bucket and could
+        * overflow KEY_SIZE
+        */
+       if (KEY_SIZE(l) + KEY_SIZE(r) > USHRT_MAX) {
+               SET_KEY_OFFSET(l, KEY_OFFSET(l) + USHRT_MAX - KEY_SIZE(l));
+               SET_KEY_SIZE(l, USHRT_MAX);
+
+               bch_cut_front(l, r);
+               return false;
+       }
+
+       if (KEY_CSUM(l)) {
+               if (KEY_CSUM(r))
+                       l->ptr[KEY_PTRS(l)] = merge_chksums(l, r);
+               else
+                       SET_KEY_CSUM(l, 0);
+       }
+
+       SET_KEY_OFFSET(l, KEY_OFFSET(l) + KEY_SIZE(r));
+       SET_KEY_SIZE(l, KEY_SIZE(l) + KEY_SIZE(r));
+
+       return true;
+}
+
+const struct btree_keys_ops bch_extent_keys_ops = {
+       .sort_cmp       = bch_extent_sort_cmp,
+       .sort_fixup     = bch_extent_sort_fixup,
+       .insert_fixup   = bch_extent_insert_fixup,
+       .key_invalid    = bch_extent_invalid,
+       .key_bad        = bch_extent_bad,
+       .key_merge      = bch_extent_merge,
+       .key_to_text    = bch_extent_to_text,
+       .key_dump       = bch_bkey_dump,
+       .is_extents     = true,
+};
diff --git a/drivers/md/bcache/extents.h b/drivers/md/bcache/extents.h
new file mode 100644 (file)
index 0000000..e4e2340
--- /dev/null
@@ -0,0 +1,13 @@
+#ifndef _BCACHE_EXTENTS_H
+#define _BCACHE_EXTENTS_H
+
+extern const struct btree_keys_ops bch_btree_keys_ops;
+extern const struct btree_keys_ops bch_extent_keys_ops;
+
+struct bkey;
+struct cache_set;
+
+void bch_extent_to_text(char *, size_t, const struct bkey *);
+bool __bch_btree_ptr_invalid(struct cache_set *, const struct bkey *);
+
+#endif /* _BCACHE_EXTENTS_H */
index 9056632..fa028fa 100644 (file)
 
 #include <linux/blkdev.h>
 
-static void bch_bi_idx_hack_endio(struct bio *bio, int error)
-{
-       struct bio *p = bio->bi_private;
-
-       bio_endio(p, error);
-       bio_put(bio);
-}
-
-static void bch_generic_make_request_hack(struct bio *bio)
-{
-       if (bio->bi_idx) {
-               struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio));
-
-               memcpy(clone->bi_io_vec,
-                      bio_iovec(bio),
-                      bio_segments(bio) * sizeof(struct bio_vec));
-
-               clone->bi_sector        = bio->bi_sector;
-               clone->bi_bdev          = bio->bi_bdev;
-               clone->bi_rw            = bio->bi_rw;
-               clone->bi_vcnt          = bio_segments(bio);
-               clone->bi_size          = bio->bi_size;
-
-               clone->bi_private       = bio;
-               clone->bi_end_io        = bch_bi_idx_hack_endio;
-
-               bio = clone;
-       }
-
-       /*
-        * Hack, since drivers that clone bios clone up to bi_max_vecs, but our
-        * bios might have had more than that (before we split them per device
-        * limitations).
-        *
-        * To be taken out once immutable bvec stuff is in.
-        */
-       bio->bi_max_vecs = bio->bi_vcnt;
-
-       generic_make_request(bio);
-}
-
-/**
- * bch_bio_split - split a bio
- * @bio:       bio to split
- * @sectors:   number of sectors to split from the front of @bio
- * @gfp:       gfp mask
- * @bs:                bio set to allocate from
- *
- * Allocates and returns a new bio which represents @sectors from the start of
- * @bio, and updates @bio to represent the remaining sectors.
- *
- * If bio_sectors(@bio) was less than or equal to @sectors, returns @bio
- * unchanged.
- *
- * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a
- * bvec boundry; it is the caller's responsibility to ensure that @bio is not
- * freed before the split.
- */
-struct bio *bch_bio_split(struct bio *bio, int sectors,
-                         gfp_t gfp, struct bio_set *bs)
-{
-       unsigned idx = bio->bi_idx, vcnt = 0, nbytes = sectors << 9;
-       struct bio_vec *bv;
-       struct bio *ret = NULL;
-
-       BUG_ON(sectors <= 0);
-
-       if (sectors >= bio_sectors(bio))
-               return bio;
-
-       if (bio->bi_rw & REQ_DISCARD) {
-               ret = bio_alloc_bioset(gfp, 1, bs);
-               if (!ret)
-                       return NULL;
-               idx = 0;
-               goto out;
-       }
-
-       bio_for_each_segment(bv, bio, idx) {
-               vcnt = idx - bio->bi_idx;
-
-               if (!nbytes) {
-                       ret = bio_alloc_bioset(gfp, vcnt, bs);
-                       if (!ret)
-                               return NULL;
-
-                       memcpy(ret->bi_io_vec, bio_iovec(bio),
-                              sizeof(struct bio_vec) * vcnt);
-
-                       break;
-               } else if (nbytes < bv->bv_len) {
-                       ret = bio_alloc_bioset(gfp, ++vcnt, bs);
-                       if (!ret)
-                               return NULL;
-
-                       memcpy(ret->bi_io_vec, bio_iovec(bio),
-                              sizeof(struct bio_vec) * vcnt);
-
-                       ret->bi_io_vec[vcnt - 1].bv_len = nbytes;
-                       bv->bv_offset   += nbytes;
-                       bv->bv_len      -= nbytes;
-                       break;
-               }
-
-               nbytes -= bv->bv_len;
-       }
-out:
-       ret->bi_bdev    = bio->bi_bdev;
-       ret->bi_sector  = bio->bi_sector;
-       ret->bi_size    = sectors << 9;
-       ret->bi_rw      = bio->bi_rw;
-       ret->bi_vcnt    = vcnt;
-       ret->bi_max_vecs = vcnt;
-
-       bio->bi_sector  += sectors;
-       bio->bi_size    -= sectors << 9;
-       bio->bi_idx      = idx;
-
-       if (bio_integrity(bio)) {
-               if (bio_integrity_clone(ret, bio, gfp)) {
-                       bio_put(ret);
-                       return NULL;
-               }
-
-               bio_integrity_trim(ret, 0, bio_sectors(ret));
-               bio_integrity_trim(bio, bio_sectors(ret), bio_sectors(bio));
-       }
-
-       return ret;
-}
-
 static unsigned bch_bio_max_sectors(struct bio *bio)
 {
-       unsigned ret = bio_sectors(bio);
        struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-       unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES,
-                                     queue_max_segments(q));
+       struct bio_vec bv;
+       struct bvec_iter iter;
+       unsigned ret = 0, seg = 0;
 
        if (bio->bi_rw & REQ_DISCARD)
-               return min(ret, q->limits.max_discard_sectors);
-
-       if (bio_segments(bio) > max_segments ||
-           q->merge_bvec_fn) {
-               struct bio_vec *bv;
-               int i, seg = 0;
-
-               ret = 0;
-
-               bio_for_each_segment(bv, bio, i) {
-                       struct bvec_merge_data bvm = {
-                               .bi_bdev        = bio->bi_bdev,
-                               .bi_sector      = bio->bi_sector,
-                               .bi_size        = ret << 9,
-                               .bi_rw          = bio->bi_rw,
-                       };
-
-                       if (seg == max_segments)
-                               break;
+               return min(bio_sectors(bio), q->limits.max_discard_sectors);
+
+       bio_for_each_segment(bv, bio, iter) {
+               struct bvec_merge_data bvm = {
+                       .bi_bdev        = bio->bi_bdev,
+                       .bi_sector      = bio->bi_iter.bi_sector,
+                       .bi_size        = ret << 9,
+                       .bi_rw          = bio->bi_rw,
+               };
+
+               if (seg == min_t(unsigned, BIO_MAX_PAGES,
+                                queue_max_segments(q)))
+                       break;
 
-                       if (q->merge_bvec_fn &&
-                           q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len)
-                               break;
+               if (q->merge_bvec_fn &&
+                   q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len)
+                       break;
 
-                       seg++;
-                       ret += bv->bv_len >> 9;
-               }
+               seg++;
+               ret += bv.bv_len >> 9;
        }
 
        ret = min(ret, queue_max_sectors(q));
 
        WARN_ON(!ret);
-       ret = max_t(int, ret, bio_iovec(bio)->bv_len >> 9);
+       ret = max_t(int, ret, bio_iovec(bio).bv_len >> 9);
 
        return ret;
 }
@@ -193,7 +55,7 @@ static void bch_bio_submit_split_done(struct closure *cl)
 
        s->bio->bi_end_io = s->bi_end_io;
        s->bio->bi_private = s->bi_private;
-       bio_endio(s->bio, 0);
+       bio_endio_nodec(s->bio, 0);
 
        closure_debug_destroy(&s->cl);
        mempool_free(s, s->p->bio_split_hook);
@@ -232,19 +94,19 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
        bio_get(bio);
 
        do {
-               n = bch_bio_split(bio, bch_bio_max_sectors(bio),
-                                 GFP_NOIO, s->p->bio_split);
+               n = bio_next_split(bio, bch_bio_max_sectors(bio),
+                                  GFP_NOIO, s->p->bio_split);
 
                n->bi_end_io    = bch_bio_submit_split_endio;
                n->bi_private   = &s->cl;
 
                closure_get(&s->cl);
-               bch_generic_make_request_hack(n);
+               generic_make_request(n);
        } while (n != bio);
 
        continue_at(&s->cl, bch_bio_submit_split_done, NULL);
 submit:
-       bch_generic_make_request_hack(bio);
+       generic_make_request(bio);
 }
 
 /* Bios with headers */
@@ -272,8 +134,8 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
 {
        struct bbio *b = container_of(bio, struct bbio, bio);
 
-       bio->bi_sector  = PTR_OFFSET(&b->key, 0);
-       bio->bi_bdev    = PTR_CACHE(c, &b->key, 0)->bdev;
+       bio->bi_iter.bi_sector  = PTR_OFFSET(&b->key, 0);
+       bio->bi_bdev            = PTR_CACHE(c, &b->key, 0)->bdev;
 
        b->submit_time_us = local_clock_us();
        closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0));
index ecdaa67..18039af 100644 (file)
@@ -44,17 +44,17 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list,
 
        closure_init_stack(&cl);
 
-       pr_debug("reading %llu", (uint64_t) bucket);
+       pr_debug("reading %u", bucket_index);
 
        while (offset < ca->sb.bucket_size) {
 reread:                left = ca->sb.bucket_size - offset;
-               len = min_t(unsigned, left, PAGE_SECTORS * 8);
+               len = min_t(unsigned, left, PAGE_SECTORS << JSET_BITS);
 
                bio_reset(bio);
-               bio->bi_sector  = bucket + offset;
+               bio->bi_iter.bi_sector  = bucket + offset;
                bio->bi_bdev    = ca->bdev;
                bio->bi_rw      = READ;
-               bio->bi_size    = len << 9;
+               bio->bi_iter.bi_size    = len << 9;
 
                bio->bi_end_io  = journal_read_endio;
                bio->bi_private = &cl;
@@ -74,19 +74,28 @@ reread:             left = ca->sb.bucket_size - offset;
                        struct list_head *where;
                        size_t blocks, bytes = set_bytes(j);
 
-                       if (j->magic != jset_magic(&ca->sb))
+                       if (j->magic != jset_magic(&ca->sb)) {
+                               pr_debug("%u: bad magic", bucket_index);
                                return ret;
+                       }
 
-                       if (bytes > left << 9)
+                       if (bytes > left << 9 ||
+                           bytes > PAGE_SIZE << JSET_BITS) {
+                               pr_info("%u: too big, %zu bytes, offset %u",
+                                       bucket_index, bytes, offset);
                                return ret;
+                       }
 
                        if (bytes > len << 9)
                                goto reread;
 
-                       if (j->csum != csum_set(j))
+                       if (j->csum != csum_set(j)) {
+                               pr_info("%u: bad csum, %zu bytes, offset %u",
+                                       bucket_index, bytes, offset);
                                return ret;
+                       }
 
-                       blocks = set_blocks(j, ca->set);
+                       blocks = set_blocks(j, block_bytes(ca->set));
 
                        while (!list_empty(list)) {
                                i = list_first_entry(list,
@@ -275,7 +284,7 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
                }
 
                for (k = i->j.start;
-                    k < end(&i->j);
+                    k < bset_bkey_last(&i->j);
                     k = bkey_next(k)) {
                        unsigned j;
 
@@ -313,7 +322,7 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
                                 n, i->j.seq - 1, start, end);
 
                for (k = i->j.start;
-                    k < end(&i->j);
+                    k < bset_bkey_last(&i->j);
                     k = bkey_next(k)) {
                        trace_bcache_journal_replay_key(k);
 
@@ -437,13 +446,13 @@ static void do_journal_discard(struct cache *ca)
                atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
 
                bio_init(bio);
-               bio->bi_sector          = bucket_to_sector(ca->set,
+               bio->bi_iter.bi_sector  = bucket_to_sector(ca->set,
                                                ca->sb.d[ja->discard_idx]);
                bio->bi_bdev            = ca->bdev;
                bio->bi_rw              = REQ_WRITE|REQ_DISCARD;
                bio->bi_max_vecs        = 1;
                bio->bi_io_vec          = bio->bi_inline_vecs;
-               bio->bi_size            = bucket_bytes(ca);
+               bio->bi_iter.bi_size    = bucket_bytes(ca);
                bio->bi_end_io          = journal_discard_endio;
 
                closure_get(&ca->set->cl);
@@ -555,6 +564,14 @@ static void journal_write_done(struct closure *cl)
        continue_at_nobarrier(cl, journal_write, system_wq);
 }
 
+static void journal_write_unlock(struct closure *cl)
+{
+       struct cache_set *c = container_of(cl, struct cache_set, journal.io);
+
+       c->journal.io_in_flight = 0;
+       spin_unlock(&c->journal.lock);
+}
+
 static void journal_write_unlocked(struct closure *cl)
        __releases(c->journal.lock)
 {
@@ -562,22 +579,15 @@ static void journal_write_unlocked(struct closure *cl)
        struct cache *ca;
        struct journal_write *w = c->journal.cur;
        struct bkey *k = &c->journal.key;
-       unsigned i, sectors = set_blocks(w->data, c) * c->sb.block_size;
+       unsigned i, sectors = set_blocks(w->data, block_bytes(c)) *
+               c->sb.block_size;
 
        struct bio *bio;
        struct bio_list list;
        bio_list_init(&list);
 
        if (!w->need_write) {
-               /*
-                * XXX: have to unlock closure before we unlock journal lock,
-                * else we race with bch_journal(). But this way we race
-                * against cache set unregister. Doh.
-                */
-               set_closure_fn(cl, NULL, NULL);
-               closure_sub(cl, CLOSURE_RUNNING + 1);
-               spin_unlock(&c->journal.lock);
-               return;
+               closure_return_with_destructor(cl, journal_write_unlock);
        } else if (journal_full(&c->journal)) {
                journal_reclaim(c);
                spin_unlock(&c->journal.lock);
@@ -586,7 +596,7 @@ static void journal_write_unlocked(struct closure *cl)
                continue_at(cl, journal_write, system_wq);
        }
 
-       c->journal.blocks_free -= set_blocks(w->data, c);
+       c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));
 
        w->data->btree_level = c->root->level;
 
@@ -608,10 +618,10 @@ static void journal_write_unlocked(struct closure *cl)
                atomic_long_add(sectors, &ca->meta_sectors_written);
 
                bio_reset(bio);
-               bio->bi_sector  = PTR_OFFSET(k, i);
+               bio->bi_iter.bi_sector  = PTR_OFFSET(k, i);
                bio->bi_bdev    = ca->bdev;
                bio->bi_rw      = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
-               bio->bi_size    = sectors << 9;
+               bio->bi_iter.bi_size = sectors << 9;
 
                bio->bi_end_io  = journal_write_endio;
                bio->bi_private = w;
@@ -653,10 +663,12 @@ static void journal_try_write(struct cache_set *c)
 
        w->need_write = true;
 
-       if (closure_trylock(cl, &c->cl))
-               journal_write_unlocked(cl);
-       else
+       if (!c->journal.io_in_flight) {
+               c->journal.io_in_flight = 1;
+               closure_call(cl, journal_write_unlocked, NULL, &c->cl);
+       } else {
                spin_unlock(&c->journal.lock);
+       }
 }
 
 static struct journal_write *journal_wait_for_write(struct cache_set *c,
@@ -664,6 +676,7 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
 {
        size_t sectors;
        struct closure cl;
+       bool wait = false;
 
        closure_init_stack(&cl);
 
@@ -673,16 +686,19 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
                struct journal_write *w = c->journal.cur;
 
                sectors = __set_blocks(w->data, w->data->keys + nkeys,
-                                      c) * c->sb.block_size;
+                                      block_bytes(c)) * c->sb.block_size;
 
                if (sectors <= min_t(size_t,
                                     c->journal.blocks_free * c->sb.block_size,
                                     PAGE_SECTORS << JSET_BITS))
                        return w;
 
-               /* XXX: tracepoint */
+               if (wait)
+                       closure_wait(&c->journal.wait, &cl);
+
                if (!journal_full(&c->journal)) {
-                       trace_bcache_journal_entry_full(c);
+                       if (wait)
+                               trace_bcache_journal_entry_full(c);
 
                        /*
                         * XXX: If we were inserting so many keys that they
@@ -692,12 +708,11 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
                         */
                        BUG_ON(!w->data->keys);
 
-                       closure_wait(&w->wait, &cl);
                        journal_try_write(c); /* unlocks */
                } else {
-                       trace_bcache_journal_full(c);
+                       if (wait)
+                               trace_bcache_journal_full(c);
 
-                       closure_wait(&c->journal.wait, &cl);
                        journal_reclaim(c);
                        spin_unlock(&c->journal.lock);
 
@@ -706,6 +721,7 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
 
                closure_sync(&cl);
                spin_lock(&c->journal.lock);
+               wait = true;
        }
 }
 
@@ -736,7 +752,7 @@ atomic_t *bch_journal(struct cache_set *c,
 
        w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
 
-       memcpy(end(w->data), keys->keys, bch_keylist_bytes(keys));
+       memcpy(bset_bkey_last(w->data), keys->keys, bch_keylist_bytes(keys));
        w->data->keys += bch_keylist_nkeys(keys);
 
        ret = &fifo_back(&c->journal.pin);
@@ -780,7 +796,6 @@ int bch_journal_alloc(struct cache_set *c)
 {
        struct journal *j = &c->journal;
 
-       closure_init_unlocked(&j->io);
        spin_lock_init(&j->lock);
        INIT_DELAYED_WORK(&j->work, journal_write_work);
 
index a6472fd..9180c44 100644 (file)
@@ -104,6 +104,7 @@ struct journal {
        /* used when waiting because the journal was full */
        struct closure_waitlist wait;
        struct closure          io;
+       int                     io_in_flight;
        struct delayed_work     work;
 
        /* Number of blocks free in the bucket(s) we're currently writing to */
index f2f0998..9eb60d1 100644 (file)
@@ -86,7 +86,7 @@ static void moving_init(struct moving_io *io)
        bio_get(bio);
        bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
 
-       bio->bi_size            = KEY_SIZE(&io->w->key) << 9;
+       bio->bi_iter.bi_size    = KEY_SIZE(&io->w->key) << 9;
        bio->bi_max_vecs        = DIV_ROUND_UP(KEY_SIZE(&io->w->key),
                                               PAGE_SECTORS);
        bio->bi_private         = &io->cl;
@@ -102,7 +102,7 @@ static void write_moving(struct closure *cl)
        if (!op->error) {
                moving_init(io);
 
-               io->bio.bio.bi_sector = KEY_START(&io->w->key);
+               io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key);
                op->write_prio          = 1;
                op->bio                 = &io->bio.bio;
 
@@ -211,7 +211,7 @@ void bch_moving_gc(struct cache_set *c)
        for_each_cache(ca, c, i) {
                unsigned sectors_to_move = 0;
                unsigned reserve_sectors = ca->sb.bucket_size *
-                       min(fifo_used(&ca->free), ca->free.size / 2);
+                       fifo_used(&ca->free[RESERVE_MOVINGGC]);
 
                ca->heap.used = 0;
 
index 61bcfc2..72cd213 100644 (file)
@@ -197,14 +197,14 @@ static bool verify(struct cached_dev *dc, struct bio *bio)
 
 static void bio_csum(struct bio *bio, struct bkey *k)
 {
-       struct bio_vec *bv;
+       struct bio_vec bv;
+       struct bvec_iter iter;
        uint64_t csum = 0;
-       int i;
 
-       bio_for_each_segment(bv, bio, i) {
-               void *d = kmap(bv->bv_page) + bv->bv_offset;
-               csum = bch_crc64_update(csum, d, bv->bv_len);
-               kunmap(bv->bv_page);
+       bio_for_each_segment(bv, bio, iter) {
+               void *d = kmap(bv.bv_page) + bv.bv_offset;
+               csum = bch_crc64_update(csum, d, bv.bv_len);
+               kunmap(bv.bv_page);
        }
 
        k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
@@ -254,26 +254,44 @@ static void bch_data_insert_keys(struct closure *cl)
        closure_return(cl);
 }
 
+static int bch_keylist_realloc(struct keylist *l, unsigned u64s,
+                              struct cache_set *c)
+{
+       size_t oldsize = bch_keylist_nkeys(l);
+       size_t newsize = oldsize + u64s;
+
+       /*
+        * The journalling code doesn't handle the case where the keys to insert
+        * is bigger than an empty write: If we just return -ENOMEM here,
+        * bio_insert() and bio_invalidate() will insert the keys created so far
+        * and finish the rest when the keylist is empty.
+        */
+       if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
+               return -ENOMEM;
+
+       return __bch_keylist_realloc(l, u64s);
+}
+
 static void bch_data_invalidate(struct closure *cl)
 {
        struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
        struct bio *bio = op->bio;
 
        pr_debug("invalidating %i sectors from %llu",
-                bio_sectors(bio), (uint64_t) bio->bi_sector);
+                bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
 
        while (bio_sectors(bio)) {
                unsigned sectors = min(bio_sectors(bio),
                                       1U << (KEY_SIZE_BITS - 1));
 
-               if (bch_keylist_realloc(&op->insert_keys, 0, op->c))
+               if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
                        goto out;
 
-               bio->bi_sector  += sectors;
-               bio->bi_size    -= sectors << 9;
+               bio->bi_iter.bi_sector  += sectors;
+               bio->bi_iter.bi_size    -= sectors << 9;
 
                bch_keylist_add(&op->insert_keys,
-                               &KEY(op->inode, bio->bi_sector, sectors));
+                               &KEY(op->inode, bio->bi_iter.bi_sector, sectors));
        }
 
        op->insert_data_done = true;
@@ -356,21 +374,21 @@ static void bch_data_insert_start(struct closure *cl)
 
                /* 1 for the device pointer and 1 for the chksum */
                if (bch_keylist_realloc(&op->insert_keys,
-                                       1 + (op->csum ? 1 : 0),
+                                       3 + (op->csum ? 1 : 0),
                                        op->c))
                        continue_at(cl, bch_data_insert_keys, bcache_wq);
 
                k = op->insert_keys.top;
                bkey_init(k);
                SET_KEY_INODE(k, op->inode);
-               SET_KEY_OFFSET(k, bio->bi_sector);
+               SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
 
                if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
                                       op->write_point, op->write_prio,
                                       op->writeback))
                        goto err;
 
-               n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split);
+               n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
 
                n->bi_end_io    = bch_data_insert_endio;
                n->bi_private   = cl;
@@ -521,7 +539,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
             (bio->bi_rw & REQ_WRITE)))
                goto skip;
 
-       if (bio->bi_sector & (c->sb.block_size - 1) ||
+       if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
            bio_sectors(bio) & (c->sb.block_size - 1)) {
                pr_debug("skipping unaligned io");
                goto skip;
@@ -545,8 +563,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
 
        spin_lock(&dc->io_lock);
 
-       hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
-               if (i->last == bio->bi_sector &&
+       hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
+               if (i->last == bio->bi_iter.bi_sector &&
                    time_before(jiffies, i->jiffies))
                        goto found;
 
@@ -555,8 +573,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
        add_sequential(task);
        i->sequential = 0;
 found:
-       if (i->sequential + bio->bi_size > i->sequential)
-               i->sequential   += bio->bi_size;
+       if (i->sequential + bio->bi_iter.bi_size > i->sequential)
+               i->sequential   += bio->bi_iter.bi_size;
 
        i->last                  = bio_end_sector(bio);
        i->jiffies               = jiffies + msecs_to_jiffies(5000);
@@ -596,16 +614,13 @@ struct search {
        /* Stack frame for bio_complete */
        struct closure          cl;
 
-       struct bcache_device    *d;
-
        struct bbio             bio;
        struct bio              *orig_bio;
        struct bio              *cache_miss;
+       struct bcache_device    *d;
 
        unsigned                insert_bio_sectors;
-
        unsigned                recoverable:1;
-       unsigned                unaligned_bvec:1;
        unsigned                write:1;
        unsigned                read_dirty_data:1;
 
@@ -630,7 +645,8 @@ static void bch_cache_read_endio(struct bio *bio, int error)
 
        if (error)
                s->iop.error = error;
-       else if (ptr_stale(s->iop.c, &b->key, 0)) {
+       else if (!KEY_DIRTY(&b->key) &&
+                ptr_stale(s->iop.c, &b->key, 0)) {
                atomic_long_inc(&s->iop.c->cache_read_races);
                s->iop.error = -EINTR;
        }
@@ -649,15 +665,15 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
        struct bkey *bio_key;
        unsigned ptr;
 
-       if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_sector, 0)) <= 0)
+       if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
                return MAP_CONTINUE;
 
        if (KEY_INODE(k) != s->iop.inode ||
-           KEY_START(k) > bio->bi_sector) {
+           KEY_START(k) > bio->bi_iter.bi_sector) {
                unsigned bio_sectors = bio_sectors(bio);
                unsigned sectors = KEY_INODE(k) == s->iop.inode
                        ? min_t(uint64_t, INT_MAX,
-                               KEY_START(k) - bio->bi_sector)
+                               KEY_START(k) - bio->bi_iter.bi_sector)
                        : INT_MAX;
 
                int ret = s->d->cache_miss(b, s, bio, sectors);
@@ -679,14 +695,14 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
        if (KEY_DIRTY(k))
                s->read_dirty_data = true;
 
-       n = bch_bio_split(bio, min_t(uint64_t, INT_MAX,
-                                    KEY_OFFSET(k) - bio->bi_sector),
-                         GFP_NOIO, s->d->bio_split);
+       n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
+                                     KEY_OFFSET(k) - bio->bi_iter.bi_sector),
+                          GFP_NOIO, s->d->bio_split);
 
        bio_key = &container_of(n, struct bbio, bio)->key;
        bch_bkey_copy_single_ptr(bio_key, k, ptr);
 
-       bch_cut_front(&KEY(s->iop.inode, n->bi_sector, 0), bio_key);
+       bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
        bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
 
        n->bi_end_io    = bch_cache_read_endio;
@@ -711,10 +727,13 @@ static void cache_lookup(struct closure *cl)
 {
        struct search *s = container_of(cl, struct search, iop.cl);
        struct bio *bio = &s->bio.bio;
+       int ret;
 
-       int ret = bch_btree_map_keys(&s->op, s->iop.c,
-                                    &KEY(s->iop.inode, bio->bi_sector, 0),
-                                    cache_lookup_fn, MAP_END_KEY);
+       bch_btree_op_init(&s->op, -1);
+
+       ret = bch_btree_map_keys(&s->op, s->iop.c,
+                                &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
+                                cache_lookup_fn, MAP_END_KEY);
        if (ret == -EAGAIN)
                continue_at(cl, cache_lookup, bcache_wq);
 
@@ -755,13 +774,15 @@ static void bio_complete(struct search *s)
        }
 }
 
-static void do_bio_hook(struct search *s)
+static void do_bio_hook(struct search *s, struct bio *orig_bio)
 {
        struct bio *bio = &s->bio.bio;
-       memcpy(bio, s->orig_bio, sizeof(struct bio));
 
+       bio_init(bio);
+       __bio_clone_fast(bio, orig_bio);
        bio->bi_end_io          = request_endio;
        bio->bi_private         = &s->cl;
+
        atomic_set(&bio->bi_cnt, 3);
 }
 
@@ -773,43 +794,36 @@ static void search_free(struct closure *cl)
        if (s->iop.bio)
                bio_put(s->iop.bio);
 
-       if (s->unaligned_bvec)
-               mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec);
-
        closure_debug_destroy(cl);
        mempool_free(s, s->d->c->search);
 }
 
-static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
+static inline struct search *search_alloc(struct bio *bio,
+                                         struct bcache_device *d)
 {
        struct search *s;
-       struct bio_vec *bv;
 
        s = mempool_alloc(d->c->search, GFP_NOIO);
-       memset(s, 0, offsetof(struct search, iop.insert_keys));
 
-       __closure_init(&s->cl, NULL);
+       closure_init(&s->cl, NULL);
+       do_bio_hook(s, bio);
 
-       s->iop.inode            = d->id;
-       s->iop.c                = d->c;
-       s->d                    = d;
-       s->op.lock              = -1;
-       s->iop.write_point      = hash_long((unsigned long) current, 16);
        s->orig_bio             = bio;
-       s->write                = (bio->bi_rw & REQ_WRITE) != 0;
-       s->iop.flush_journal    = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
+       s->cache_miss           = NULL;
+       s->d                    = d;
        s->recoverable          = 1;
+       s->write                = (bio->bi_rw & REQ_WRITE) != 0;
+       s->read_dirty_data      = 0;
        s->start_time           = jiffies;
-       do_bio_hook(s);
 
-       if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) {
-               bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO);
-               memcpy(bv, bio_iovec(bio),
-                      sizeof(struct bio_vec) * bio_segments(bio));
-
-               s->bio.bio.bi_io_vec    = bv;
-               s->unaligned_bvec       = 1;
-       }
+       s->iop.c                = d->c;
+       s->iop.bio              = NULL;
+       s->iop.inode            = d->id;
+       s->iop.write_point      = hash_long((unsigned long) current, 16);
+       s->iop.write_prio       = 0;
+       s->iop.error            = 0;
+       s->iop.flags            = 0;
+       s->iop.flush_journal    = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
 
        return s;
 }
@@ -849,26 +863,13 @@ static void cached_dev_read_error(struct closure *cl)
 {
        struct search *s = container_of(cl, struct search, cl);
        struct bio *bio = &s->bio.bio;
-       struct bio_vec *bv;
-       int i;
 
        if (s->recoverable) {
                /* Retry from the backing device: */
                trace_bcache_read_retry(s->orig_bio);
 
                s->iop.error = 0;
-               bv = s->bio.bio.bi_io_vec;
-               do_bio_hook(s);
-               s->bio.bio.bi_io_vec = bv;
-
-               if (!s->unaligned_bvec)
-                       bio_for_each_segment(bv, s->orig_bio, i)
-                               bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
-               else
-                       memcpy(s->bio.bio.bi_io_vec,
-                              bio_iovec(s->orig_bio),
-                              sizeof(struct bio_vec) *
-                              bio_segments(s->orig_bio));
+               do_bio_hook(s, s->orig_bio);
 
                /* XXX: invalidate cache */
 
@@ -893,9 +894,9 @@ static void cached_dev_read_done(struct closure *cl)
 
        if (s->iop.bio) {
                bio_reset(s->iop.bio);
-               s->iop.bio->bi_sector = s->cache_miss->bi_sector;
+               s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
                s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
-               s->iop.bio->bi_size = s->insert_bio_sectors << 9;
+               s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
                bch_bio_map(s->iop.bio, NULL);
 
                bio_copy_data(s->cache_miss, s->iop.bio);
@@ -904,8 +905,7 @@ static void cached_dev_read_done(struct closure *cl)
                s->cache_miss = NULL;
        }
 
-       if (verify(dc, &s->bio.bio) && s->recoverable &&
-           !s->unaligned_bvec && !s->read_dirty_data)
+       if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data)
                bch_data_verify(dc, s->orig_bio);
 
        bio_complete(s);
@@ -945,7 +945,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
        struct bio *miss, *cache_bio;
 
        if (s->cache_miss || s->iop.bypass) {
-               miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
+               miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
                ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
                goto out_submit;
        }
@@ -959,7 +959,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
        s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
 
        s->iop.replace_key = KEY(s->iop.inode,
-                                bio->bi_sector + s->insert_bio_sectors,
+                                bio->bi_iter.bi_sector + s->insert_bio_sectors,
                                 s->insert_bio_sectors);
 
        ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
@@ -968,7 +968,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
 
        s->iop.replace = true;
 
-       miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
+       miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
 
        /* btree_search_recurse()'s btree iterator is no good anymore */
        ret = miss == bio ? MAP_DONE : -EINTR;
@@ -979,9 +979,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
        if (!cache_bio)
                goto out_submit;
 
-       cache_bio->bi_sector    = miss->bi_sector;
-       cache_bio->bi_bdev      = miss->bi_bdev;
-       cache_bio->bi_size      = s->insert_bio_sectors << 9;
+       cache_bio->bi_iter.bi_sector    = miss->bi_iter.bi_sector;
+       cache_bio->bi_bdev              = miss->bi_bdev;
+       cache_bio->bi_iter.bi_size      = s->insert_bio_sectors << 9;
 
        cache_bio->bi_end_io    = request_endio;
        cache_bio->bi_private   = &s->cl;
@@ -1031,7 +1031,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
 {
        struct closure *cl = &s->cl;
        struct bio *bio = &s->bio.bio;
-       struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0);
+       struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
        struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
 
        bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
@@ -1087,8 +1087,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
                        closure_bio_submit(flush, cl, s->d);
                }
        } else {
-               s->iop.bio = bio_clone_bioset(bio, GFP_NOIO,
-                                             dc->disk.bio_split);
+               s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
 
                closure_bio_submit(bio, cl, s->d);
        }
@@ -1126,13 +1125,13 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
        part_stat_unlock();
 
        bio->bi_bdev = dc->bdev;
-       bio->bi_sector += dc->sb.data_offset;
+       bio->bi_iter.bi_sector += dc->sb.data_offset;
 
        if (cached_dev_get(dc)) {
                s = search_alloc(bio, d);
                trace_bcache_request_start(s->d, bio);
 
-               if (!bio->bi_size) {
+               if (!bio->bi_iter.bi_size) {
                        /*
                         * can't call bch_journal_meta from under
                         * generic_make_request
@@ -1204,24 +1203,24 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
 static int flash_dev_cache_miss(struct btree *b, struct search *s,
                                struct bio *bio, unsigned sectors)
 {
-       struct bio_vec *bv;
-       int i;
+       struct bio_vec bv;
+       struct bvec_iter iter;
 
        /* Zero fill bio */
 
-       bio_for_each_segment(bv, bio, i) {
-               unsigned j = min(bv->bv_len >> 9, sectors);
+       bio_for_each_segment(bv, bio, iter) {
+               unsigned j = min(bv.bv_len >> 9, sectors);
 
-               void *p = kmap(bv->bv_page);
-               memset(p + bv->bv_offset, 0, j << 9);
-               kunmap(bv->bv_page);
+               void *p = kmap(bv.bv_page);
+               memset(p + bv.bv_offset, 0, j << 9);
+               kunmap(bv.bv_page);
 
                sectors -= j;
        }
 
-       bio_advance(bio, min(sectors << 9, bio->bi_size));
+       bio_advance(bio, min(sectors << 9, bio->bi_iter.bi_size));
 
-       if (!bio->bi_size)
+       if (!bio->bi_iter.bi_size)
                return MAP_DONE;
 
        return MAP_CONTINUE;
@@ -1255,7 +1254,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
 
        trace_bcache_request_start(s->d, bio);
 
-       if (!bio->bi_size) {
+       if (!bio->bi_iter.bi_size) {
                /*
                 * can't call bch_journal_meta from under
                 * generic_make_request
@@ -1265,7 +1264,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
                                      bcache_wq);
        } else if (rw) {
                bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
-                                       &KEY(d->id, bio->bi_sector, 0),
+                                       &KEY(d->id, bio->bi_iter.bi_sector, 0),
                                        &KEY(d->id, bio_end_sector(bio), 0));
 
                s->iop.bypass           = (bio->bi_rw & REQ_DISCARD) != 0;
index 2cd65bf..39f21db 100644 (file)
@@ -13,17 +13,22 @@ struct data_insert_op {
        uint16_t                write_prio;
        short                   error;
 
-       unsigned                bypass:1;
-       unsigned                writeback:1;
-       unsigned                flush_journal:1;
-       unsigned                csum:1;
+       union {
+               uint16_t        flags;
 
-       unsigned                replace:1;
-       unsigned                replace_collision:1;
+       struct {
+               unsigned        bypass:1;
+               unsigned        writeback:1;
+               unsigned        flush_journal:1;
+               unsigned        csum:1;
 
-       unsigned                insert_data_done:1;
+               unsigned        replace:1;
+               unsigned        replace_collision:1;
+
+               unsigned        insert_data_done:1;
+       };
+       };
 
-       /* Anything past this point won't get zeroed in search_alloc() */
        struct keylist          insert_keys;
        BKEY_PADDED(replace_key);
 };
index c57bfa0..24a3a15 100644 (file)
@@ -9,6 +9,7 @@
 #include "bcache.h"
 #include "btree.h"
 #include "debug.h"
+#include "extents.h"
 #include "request.h"
 #include "writeback.h"
 
@@ -225,7 +226,7 @@ static void write_bdev_super_endio(struct bio *bio, int error)
        struct cached_dev *dc = bio->bi_private;
        /* XXX: error checking */
 
-       closure_put(&dc->sb_write.cl);
+       closure_put(&dc->sb_write);
 }
 
 static void __write_super(struct cache_sb *sb, struct bio *bio)
@@ -233,9 +234,9 @@ static void __write_super(struct cache_sb *sb, struct bio *bio)
        struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page);
        unsigned i;
 
-       bio->bi_sector  = SB_SECTOR;
-       bio->bi_rw      = REQ_SYNC|REQ_META;
-       bio->bi_size    = SB_SIZE;
+       bio->bi_iter.bi_sector  = SB_SECTOR;
+       bio->bi_rw              = REQ_SYNC|REQ_META;
+       bio->bi_iter.bi_size    = SB_SIZE;
        bch_bio_map(bio, NULL);
 
        out->offset             = cpu_to_le64(sb->offset);
@@ -263,12 +264,20 @@ static void __write_super(struct cache_sb *sb, struct bio *bio)
        submit_bio(REQ_WRITE, bio);
 }
 
+static void bch_write_bdev_super_unlock(struct closure *cl)
+{
+       struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write);
+
+       up(&dc->sb_write_mutex);
+}
+
 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
 {
-       struct closure *cl = &dc->sb_write.cl;
+       struct closure *cl = &dc->sb_write;
        struct bio *bio = &dc->sb_bio;
 
-       closure_lock(&dc->sb_write, parent);
+       down(&dc->sb_write_mutex);
+       closure_init(cl, parent);
 
        bio_reset(bio);
        bio->bi_bdev    = dc->bdev;
@@ -278,7 +287,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
        closure_get(cl);
        __write_super(&dc->sb, bio);
 
-       closure_return(cl);
+       closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
 }
 
 static void write_super_endio(struct bio *bio, int error)
@@ -286,16 +295,24 @@ static void write_super_endio(struct bio *bio, int error)
        struct cache *ca = bio->bi_private;
 
        bch_count_io_errors(ca, error, "writing superblock");
-       closure_put(&ca->set->sb_write.cl);
+       closure_put(&ca->set->sb_write);
+}
+
+static void bcache_write_super_unlock(struct closure *cl)
+{
+       struct cache_set *c = container_of(cl, struct cache_set, sb_write);
+
+       up(&c->sb_write_mutex);
 }
 
 void bcache_write_super(struct cache_set *c)
 {
-       struct closure *cl = &c->sb_write.cl;
+       struct closure *cl = &c->sb_write;
        struct cache *ca;
        unsigned i;
 
-       closure_lock(&c->sb_write, &c->cl);
+       down(&c->sb_write_mutex);
+       closure_init(cl, &c->cl);
 
        c->sb.seq++;
 
@@ -317,7 +334,7 @@ void bcache_write_super(struct cache_set *c)
                __write_super(&ca->sb, bio);
        }
 
-       closure_return(cl);
+       closure_return_with_destructor(cl, bcache_write_super_unlock);
 }
 
 /* UUID io */
@@ -325,29 +342,37 @@ void bcache_write_super(struct cache_set *c)
 static void uuid_endio(struct bio *bio, int error)
 {
        struct closure *cl = bio->bi_private;
-       struct cache_set *c = container_of(cl, struct cache_set, uuid_write.cl);
+       struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
 
        cache_set_err_on(error, c, "accessing uuids");
        bch_bbio_free(bio, c);
        closure_put(cl);
 }
 
+static void uuid_io_unlock(struct closure *cl)
+{
+       struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
+
+       up(&c->uuid_write_mutex);
+}
+
 static void uuid_io(struct cache_set *c, unsigned long rw,
                    struct bkey *k, struct closure *parent)
 {
-       struct closure *cl = &c->uuid_write.cl;
+       struct closure *cl = &c->uuid_write;
        struct uuid_entry *u;
        unsigned i;
        char buf[80];
 
        BUG_ON(!parent);
-       closure_lock(&c->uuid_write, parent);
+       down(&c->uuid_write_mutex);
+       closure_init(cl, parent);
 
        for (i = 0; i < KEY_PTRS(k); i++) {
                struct bio *bio = bch_bbio_alloc(c);
 
                bio->bi_rw      = REQ_SYNC|REQ_META|rw;
-               bio->bi_size    = KEY_SIZE(k) << 9;
+               bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
 
                bio->bi_end_io  = uuid_endio;
                bio->bi_private = cl;
@@ -359,7 +384,7 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
                        break;
        }
 
-       bch_bkey_to_text(buf, sizeof(buf), k);
+       bch_extent_to_text(buf, sizeof(buf), k);
        pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read", buf);
 
        for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
@@ -368,14 +393,14 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
                                 u - c->uuids, u->uuid, u->label,
                                 u->first_reg, u->last_reg, u->invalidated);
 
-       closure_return(cl);
+       closure_return_with_destructor(cl, uuid_io_unlock);
 }
 
 static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
 {
        struct bkey *k = &j->uuid_bucket;
 
-       if (bch_btree_ptr_invalid(c, k))
+       if (__bch_btree_ptr_invalid(c, k))
                return "bad uuid pointer";
 
        bkey_copy(&c->uuid_bucket, k);
@@ -420,7 +445,7 @@ static int __uuid_write(struct cache_set *c)
 
        lockdep_assert_held(&bch_register_lock);
 
-       if (bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, true))
+       if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))
                return 1;
 
        SET_KEY_SIZE(&k.key, c->sb.bucket_size);
@@ -503,10 +528,10 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)
 
        closure_init_stack(cl);
 
-       bio->bi_sector  = bucket * ca->sb.bucket_size;
-       bio->bi_bdev    = ca->bdev;
-       bio->bi_rw      = REQ_SYNC|REQ_META|rw;
-       bio->bi_size    = bucket_bytes(ca);
+       bio->bi_iter.bi_sector  = bucket * ca->sb.bucket_size;
+       bio->bi_bdev            = ca->bdev;
+       bio->bi_rw              = REQ_SYNC|REQ_META|rw;
+       bio->bi_iter.bi_size    = bucket_bytes(ca);
 
        bio->bi_end_io  = prio_endio;
        bio->bi_private = ca;
@@ -538,8 +563,8 @@ void bch_prio_write(struct cache *ca)
        atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
                        &ca->meta_sectors_written);
 
-       pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
-                fifo_used(&ca->free_inc), fifo_used(&ca->unused));
+       //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
+       //       fifo_used(&ca->free_inc), fifo_used(&ca->unused));
 
        for (i = prio_buckets(ca) - 1; i >= 0; --i) {
                long bucket;
@@ -558,7 +583,7 @@ void bch_prio_write(struct cache *ca)
                p->magic        = pset_magic(&ca->sb);
                p->csum         = bch_crc64(&p->magic, bucket_bytes(ca) - 8);
 
-               bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, true);
+               bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true);
                BUG_ON(bucket == -1);
 
                mutex_unlock(&ca->set->bucket_lock);
@@ -739,8 +764,6 @@ static void bcache_device_free(struct bcache_device *d)
        }
 
        bio_split_pool_free(&d->bio_split_hook);
-       if (d->unaligned_bvec)
-               mempool_destroy(d->unaligned_bvec);
        if (d->bio_split)
                bioset_free(d->bio_split);
        if (is_vmalloc_addr(d->full_dirty_stripes))
@@ -793,8 +816,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
                return minor;
 
        if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
-           !(d->unaligned_bvec = mempool_create_kmalloc_pool(1,
-                               sizeof(struct bio_vec) * BIO_MAX_PAGES)) ||
            bio_split_pool_init(&d->bio_split_hook) ||
            !(d->disk = alloc_disk(1))) {
                ida_simple_remove(&bcache_minor, minor);
@@ -1102,7 +1123,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
        set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
        kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
        INIT_WORK(&dc->detach, cached_dev_detach_finish);
-       closure_init_unlocked(&dc->sb_write);
+       sema_init(&dc->sb_write_mutex, 1);
        INIT_LIST_HEAD(&dc->io_lru);
        spin_lock_init(&dc->io_lock);
        bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
@@ -1114,6 +1135,12 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
                hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
        }
 
+       dc->disk.stripe_size = q->limits.io_opt >> 9;
+
+       if (dc->disk.stripe_size)
+               dc->partial_stripes_expensive =
+                       q->limits.raid_partial_stripes_expensive;
+
        ret = bcache_device_init(&dc->disk, block_size,
                         dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
        if (ret)
@@ -1325,8 +1352,8 @@ static void cache_set_free(struct closure *cl)
                if (ca)
                        kobject_put(&ca->kobj);
 
+       bch_bset_sort_state_free(&c->sort);
        free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c)));
-       free_pages((unsigned long) c->sort, ilog2(bucket_pages(c)));
 
        if (c->bio_split)
                bioset_free(c->bio_split);
@@ -1451,21 +1478,17 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
        c->block_bits           = ilog2(sb->block_size);
        c->nr_uuids             = bucket_bytes(c) / sizeof(struct uuid_entry);
 
-       c->btree_pages          = c->sb.bucket_size / PAGE_SECTORS;
+       c->btree_pages          = bucket_pages(c);
        if (c->btree_pages > BTREE_MAX_PAGES)
                c->btree_pages = max_t(int, c->btree_pages / 4,
                                       BTREE_MAX_PAGES);
 
-       c->sort_crit_factor = int_sqrt(c->btree_pages);
-
-       closure_init_unlocked(&c->sb_write);
+       sema_init(&c->sb_write_mutex, 1);
        mutex_init(&c->bucket_lock);
        init_waitqueue_head(&c->try_wait);
        init_waitqueue_head(&c->bucket_wait);
-       closure_init_unlocked(&c->uuid_write);
-       mutex_init(&c->sort_lock);
+       sema_init(&c->uuid_write_mutex, 1);
 
-       spin_lock_init(&c->sort_time.lock);
        spin_lock_init(&c->btree_gc_time.lock);
        spin_lock_init(&c->btree_split_time.lock);
        spin_lock_init(&c->btree_read_time.lock);
@@ -1493,11 +1516,11 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
                                bucket_pages(c))) ||
            !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) ||
            !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
-           !(c->sort = alloc_bucket_pages(GFP_KERNEL, c)) ||
            !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
            bch_journal_alloc(c) ||
            bch_btree_cache_alloc(c) ||
-           bch_open_buckets_alloc(c))
+           bch_open_buckets_alloc(c) ||
+           bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages)))
                goto err;
 
        c->congested_read_threshold_us  = 2000;
@@ -1553,7 +1576,7 @@ static void run_cache_set(struct cache_set *c)
                k = &j->btree_root;
 
                err = "bad btree root";
-               if (bch_btree_ptr_invalid(c, k))
+               if (__bch_btree_ptr_invalid(c, k))
                        goto err;
 
                err = "error reading btree root";
@@ -1747,6 +1770,7 @@ err:
 void bch_cache_release(struct kobject *kobj)
 {
        struct cache *ca = container_of(kobj, struct cache, kobj);
+       unsigned i;
 
        if (ca->set)
                ca->set->cache[ca->sb.nr_this_dev] = NULL;
@@ -1760,7 +1784,9 @@ void bch_cache_release(struct kobject *kobj)
        free_heap(&ca->heap);
        free_fifo(&ca->unused);
        free_fifo(&ca->free_inc);
-       free_fifo(&ca->free);
+
+       for (i = 0; i < RESERVE_NR; i++)
+               free_fifo(&ca->free[i]);
 
        if (ca->sb_bio.bi_inline_vecs[0].bv_page)
                put_page(ca->sb_bio.bi_io_vec[0].bv_page);
@@ -1786,10 +1812,12 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
        ca->journal.bio.bi_max_vecs = 8;
        ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
 
-       free = roundup_pow_of_two(ca->sb.nbuckets) >> 9;
-       free = max_t(size_t, free, (prio_buckets(ca) + 8) * 2);
+       free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
 
-       if (!init_fifo(&ca->free,       free, GFP_KERNEL) ||
+       if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) ||
+           !init_fifo(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
+           !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
+           !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
            !init_fifo(&ca->free_inc,   free << 2, GFP_KERNEL) ||
            !init_fifo(&ca->unused,     free << 2, GFP_KERNEL) ||
            !init_heap(&ca->heap,       free << 3, GFP_KERNEL) ||
@@ -2034,7 +2062,8 @@ static void bcache_exit(void)
                kobject_put(bcache_kobj);
        if (bcache_wq)
                destroy_workqueue(bcache_wq);
-       unregister_blkdev(bcache_major, "bcache");
+       if (bcache_major)
+               unregister_blkdev(bcache_major, "bcache");
        unregister_reboot_notifier(&reboot);
 }
 
index a1f8561..c6ab693 100644 (file)
@@ -102,7 +102,6 @@ rw_attribute(bypass_torture_test);
 rw_attribute(key_merging_disabled);
 rw_attribute(gc_always_rewrite);
 rw_attribute(expensive_debug_checks);
-rw_attribute(freelist_percent);
 rw_attribute(cache_replacement_policy);
 rw_attribute(btree_shrinker_disabled);
 rw_attribute(copy_gc_enabled);
@@ -401,6 +400,48 @@ static struct attribute *bch_flash_dev_files[] = {
 };
 KTYPE(bch_flash_dev);
 
+struct bset_stats_op {
+       struct btree_op op;
+       size_t nodes;
+       struct bset_stats stats;
+};
+
+static int btree_bset_stats(struct btree_op *b_op, struct btree *b)
+{
+       struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
+
+       op->nodes++;
+       bch_btree_keys_stats(&b->keys, &op->stats);
+
+       return MAP_CONTINUE;
+}
+
+int bch_bset_print_stats(struct cache_set *c, char *buf)
+{
+       struct bset_stats_op op;
+       int ret;
+
+       memset(&op, 0, sizeof(op));
+       bch_btree_op_init(&op.op, -1);
+
+       ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, btree_bset_stats);
+       if (ret < 0)
+               return ret;
+
+       return snprintf(buf, PAGE_SIZE,
+                       "btree nodes:           %zu\n"
+                       "written sets:          %zu\n"
+                       "unwritten sets:                %zu\n"
+                       "written key bytes:     %zu\n"
+                       "unwritten key bytes:   %zu\n"
+                       "floats:                        %zu\n"
+                       "failed:                        %zu\n",
+                       op.nodes,
+                       op.stats.sets_written, op.stats.sets_unwritten,
+                       op.stats.bytes_written, op.stats.bytes_unwritten,
+                       op.stats.floats, op.stats.failed);
+}
+
 SHOW(__bch_cache_set)
 {
        unsigned root_usage(struct cache_set *c)
@@ -419,7 +460,7 @@ lock_root:
                        rw_lock(false, b, b->level);
                } while (b != c->root);
 
-               for_each_key_filter(b, k, &iter, bch_ptr_bad)
+               for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
                        bytes += bkey_bytes(k);
 
                rw_unlock(false, b);
@@ -434,7 +475,7 @@ lock_root:
 
                mutex_lock(&c->bucket_lock);
                list_for_each_entry(b, &c->btree_cache, list)
-                       ret += 1 << (b->page_order + PAGE_SHIFT);
+                       ret += 1 << (b->keys.page_order + PAGE_SHIFT);
 
                mutex_unlock(&c->bucket_lock);
                return ret;
@@ -491,7 +532,7 @@ lock_root:
 
        sysfs_print_time_stats(&c->btree_gc_time,       btree_gc, sec, ms);
        sysfs_print_time_stats(&c->btree_split_time,    btree_split, sec, us);
-       sysfs_print_time_stats(&c->sort_time,           btree_sort, ms, us);
+       sysfs_print_time_stats(&c->sort.time,           btree_sort, ms, us);
        sysfs_print_time_stats(&c->btree_read_time,     btree_read, ms, us);
        sysfs_print_time_stats(&c->try_harder_time,     try_harder, ms, us);
 
@@ -711,9 +752,6 @@ SHOW(__bch_cache)
        sysfs_print(io_errors,
                    atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
 
-       sysfs_print(freelist_percent, ca->free.size * 100 /
-                   ((size_t) ca->sb.nbuckets));
-
        if (attr == &sysfs_cache_replacement_policy)
                return bch_snprint_string_list(buf, PAGE_SIZE,
                                               cache_replacement_policies,
@@ -820,32 +858,6 @@ STORE(__bch_cache)
                }
        }
 
-       if (attr == &sysfs_freelist_percent) {
-               DECLARE_FIFO(long, free);
-               long i;
-               size_t p = strtoul_or_return(buf);
-
-               p = clamp_t(size_t,
-                           ((size_t) ca->sb.nbuckets * p) / 100,
-                           roundup_pow_of_two(ca->sb.nbuckets) >> 9,
-                           ca->sb.nbuckets / 2);
-
-               if (!init_fifo_exact(&free, p, GFP_KERNEL))
-                       return -ENOMEM;
-
-               mutex_lock(&ca->set->bucket_lock);
-
-               fifo_move(&free, &ca->free);
-               fifo_swap(&free, &ca->free);
-
-               mutex_unlock(&ca->set->bucket_lock);
-
-               while (fifo_pop(&free, i))
-                       atomic_dec(&ca->buckets[i].pin);
-
-               free_fifo(&free);
-       }
-
        if (attr == &sysfs_clear_stats) {
                atomic_long_set(&ca->sectors_written, 0);
                atomic_long_set(&ca->btree_sectors_written, 0);
@@ -869,7 +881,6 @@ static struct attribute *bch_cache_files[] = {
        &sysfs_metadata_written,
        &sysfs_io_errors,
        &sysfs_clear_stats,
-       &sysfs_freelist_percent,
        &sysfs_cache_replacement_policy,
        NULL
 };
index bb37618..db3ae4c 100644 (file)
@@ -224,10 +224,10 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
 
 void bch_bio_map(struct bio *bio, void *base)
 {
-       size_t size = bio->bi_size;
+       size_t size = bio->bi_iter.bi_size;
        struct bio_vec *bv = bio->bi_io_vec;
 
-       BUG_ON(!bio->bi_size);
+       BUG_ON(!bio->bi_iter.bi_size);
        BUG_ON(bio->bi_vcnt);
 
        bv->bv_offset = base ? ((unsigned long) base) % PAGE_SIZE : 0;
index 1030c60..ac7d0d1 100644 (file)
@@ -2,6 +2,7 @@
 #ifndef _BCACHE_UTIL_H
 #define _BCACHE_UTIL_H
 
+#include <linux/blkdev.h>
 #include <linux/errno.h>
 #include <linux/kernel.h>
 #include <linux/llist.h>
@@ -17,11 +18,13 @@ struct closure;
 
 #ifdef CONFIG_BCACHE_DEBUG
 
+#define EBUG_ON(cond)                  BUG_ON(cond)
 #define atomic_dec_bug(v)      BUG_ON(atomic_dec_return(v) < 0)
 #define atomic_inc_bug(v, i)   BUG_ON(atomic_inc_return(v) <= i)
 
 #else /* DEBUG */
 
+#define EBUG_ON(cond)                  do { if (cond); } while (0)
 #define atomic_dec_bug(v)      atomic_dec(v)
 #define atomic_inc_bug(v, i)   atomic_inc(v)
 
@@ -391,6 +394,11 @@ struct time_stats {
 
 void bch_time_stats_update(struct time_stats *stats, uint64_t time);
 
+static inline unsigned local_clock_us(void)
+{
+       return local_clock() >> 10;
+}
+
 #define NSEC_PER_ns                    1L
 #define NSEC_PER_us                    NSEC_PER_USEC
 #define NSEC_PER_ms                    NSEC_PER_MSEC
index 6c44fe0..f4300e4 100644 (file)
@@ -111,7 +111,7 @@ static void dirty_init(struct keybuf_key *w)
        if (!io->dc->writeback_percent)
                bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
 
-       bio->bi_size            = KEY_SIZE(&w->key) << 9;
+       bio->bi_iter.bi_size    = KEY_SIZE(&w->key) << 9;
        bio->bi_max_vecs        = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);
        bio->bi_private         = w;
        bio->bi_io_vec          = bio->bi_inline_vecs;
@@ -184,7 +184,7 @@ static void write_dirty(struct closure *cl)
 
        dirty_init(w);
        io->bio.bi_rw           = WRITE;
-       io->bio.bi_sector       = KEY_START(&w->key);
+       io->bio.bi_iter.bi_sector = KEY_START(&w->key);
        io->bio.bi_bdev         = io->dc->bdev;
        io->bio.bi_end_io       = dirty_endio;
 
@@ -253,7 +253,7 @@ static void read_dirty(struct cached_dev *dc)
                io->dc          = dc;
 
                dirty_init(w);
-               io->bio.bi_sector       = PTR_OFFSET(&w->key, 0);
+               io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
                io->bio.bi_bdev         = PTR_CACHE(dc->disk.c,
                                                    &w->key, 0)->bdev;
                io->bio.bi_rw           = READ;
index c9ddcf4..e2f8598 100644 (file)
@@ -50,7 +50,7 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
                return false;
 
        if (dc->partial_stripes_expensive &&
-           bcache_dev_stripe_dirty(dc, bio->bi_sector,
+           bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
                                    bio_sectors(bio)))
                return true;
 
index 3a8cfa2..dd36461 100644 (file)
  * original bio state.
  */
 
-struct dm_bio_vec_details {
-#if PAGE_SIZE < 65536
-       __u16 bv_len;
-       __u16 bv_offset;
-#else
-       unsigned bv_len;
-       unsigned bv_offset;
-#endif
-};
-
 struct dm_bio_details {
-       sector_t bi_sector;
        struct block_device *bi_bdev;
-       unsigned int bi_size;
-       unsigned short bi_idx;
        unsigned long bi_flags;
-       struct dm_bio_vec_details bi_io_vec[BIO_MAX_PAGES];
+       struct bvec_iter bi_iter;
 };
 
 static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
 {
-       unsigned i;
-
-       bd->bi_sector = bio->bi_sector;
        bd->bi_bdev = bio->bi_bdev;
-       bd->bi_size = bio->bi_size;
-       bd->bi_idx = bio->bi_idx;
        bd->bi_flags = bio->bi_flags;
-
-       for (i = 0; i < bio->bi_vcnt; i++) {
-               bd->bi_io_vec[i].bv_len = bio->bi_io_vec[i].bv_len;
-               bd->bi_io_vec[i].bv_offset = bio->bi_io_vec[i].bv_offset;
-       }
+       bd->bi_iter = bio->bi_iter;
 }
 
 static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
 {
-       unsigned i;
-
-       bio->bi_sector = bd->bi_sector;
        bio->bi_bdev = bd->bi_bdev;
-       bio->bi_size = bd->bi_size;
-       bio->bi_idx = bd->bi_idx;
        bio->bi_flags = bd->bi_flags;
-
-       for (i = 0; i < bio->bi_vcnt; i++) {
-               bio->bi_io_vec[i].bv_len = bd->bi_io_vec[i].bv_len;
-               bio->bi_io_vec[i].bv_offset = bd->bi_io_vec[i].bv_offset;
-       }
+       bio->bi_iter = bd->bi_iter;
 }
 
 #endif
index 9ed4212..66c5d13 100644 (file)
@@ -540,7 +540,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
        bio_init(&b->bio);
        b->bio.bi_io_vec = b->bio_vec;
        b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
-       b->bio.bi_sector = block << b->c->sectors_per_block_bits;
+       b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
        b->bio.bi_bdev = b->c->bdev;
        b->bio.bi_end_io = end_io;
 
index 930e8c3..1e018e9 100644 (file)
@@ -72,7 +72,7 @@ static enum io_pattern iot_pattern(struct io_tracker *t)
 
 static void iot_update_stats(struct io_tracker *t, struct bio *bio)
 {
-       if (bio->bi_sector == from_oblock(t->last_end_oblock) + 1)
+       if (bio->bi_iter.bi_sector == from_oblock(t->last_end_oblock) + 1)
                t->nr_seq_samples++;
        else {
                /*
@@ -87,7 +87,7 @@ static void iot_update_stats(struct io_tracker *t, struct bio *bio)
                t->nr_rand_samples++;
        }
 
-       t->last_end_oblock = to_oblock(bio->bi_sector + bio_sectors(bio) - 1);
+       t->last_end_oblock = to_oblock(bio_end_sector(bio) - 1);
 }
 
 static void iot_check_for_pattern_switch(struct io_tracker *t)
index 09334c2..ffd472e 100644 (file)
@@ -85,6 +85,12 @@ static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
 {
        bio->bi_end_io = h->bi_end_io;
        bio->bi_private = h->bi_private;
+
+       /*
+        * Must bump bi_remaining to allow bio to complete with
+        * restored bi_end_io.
+        */
+       atomic_inc(&bio->bi_remaining);
 }
 
 /*----------------------------------------------------------------*/
@@ -664,15 +670,17 @@ static void remap_to_origin(struct cache *cache, struct bio *bio)
 static void remap_to_cache(struct cache *cache, struct bio *bio,
                           dm_cblock_t cblock)
 {
-       sector_t bi_sector = bio->bi_sector;
+       sector_t bi_sector = bio->bi_iter.bi_sector;
 
        bio->bi_bdev = cache->cache_dev->bdev;
        if (!block_size_is_power_of_two(cache))
-               bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) +
-                               sector_div(bi_sector, cache->sectors_per_block);
+               bio->bi_iter.bi_sector =
+                       (from_cblock(cblock) * cache->sectors_per_block) +
+                       sector_div(bi_sector, cache->sectors_per_block);
        else
-               bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) |
-                               (bi_sector & (cache->sectors_per_block - 1));
+               bio->bi_iter.bi_sector =
+                       (from_cblock(cblock) << cache->sectors_per_block_shift) |
+                       (bi_sector & (cache->sectors_per_block - 1));
 }
 
 static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
@@ -712,7 +720,7 @@ static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
 
 static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
 {
-       sector_t block_nr = bio->bi_sector;
+       sector_t block_nr = bio->bi_iter.bi_sector;
 
        if (!block_size_is_power_of_two(cache))
                (void) sector_div(block_nr, cache->sectors_per_block);
@@ -1027,7 +1035,7 @@ static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio)
 static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
 {
        return (bio_data_dir(bio) == WRITE) &&
-               (bio->bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
+               (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
 }
 
 static void avoid_copy(struct dm_cache_migration *mg)
@@ -1252,7 +1260,7 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)
        size_t pb_data_size = get_per_bio_data_size(cache);
        struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
 
-       BUG_ON(bio->bi_size);
+       BUG_ON(bio->bi_iter.bi_size);
        if (!pb->req_nr)
                remap_to_origin(cache, bio);
        else
@@ -1275,9 +1283,9 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)
  */
 static void process_discard_bio(struct cache *cache, struct bio *bio)
 {
-       dm_block_t start_block = dm_sector_div_up(bio->bi_sector,
+       dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector,
                                                  cache->discard_block_size);
-       dm_block_t end_block = bio->bi_sector + bio_sectors(bio);
+       dm_block_t end_block = bio_end_sector(bio);
        dm_block_t b;
 
        end_block = block_div(end_block, cache->discard_block_size);
index 81b0fa6..784695d 100644 (file)
@@ -39,10 +39,8 @@ struct convert_context {
        struct completion restart;
        struct bio *bio_in;
        struct bio *bio_out;
-       unsigned int offset_in;
-       unsigned int offset_out;
-       unsigned int idx_in;
-       unsigned int idx_out;
+       struct bvec_iter iter_in;
+       struct bvec_iter iter_out;
        sector_t cc_sector;
        atomic_t cc_pending;
 };
@@ -826,10 +824,10 @@ static void crypt_convert_init(struct crypt_config *cc,
 {
        ctx->bio_in = bio_in;
        ctx->bio_out = bio_out;
-       ctx->offset_in = 0;
-       ctx->offset_out = 0;
-       ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
-       ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
+       if (bio_in)
+               ctx->iter_in = bio_in->bi_iter;
+       if (bio_out)
+               ctx->iter_out = bio_out->bi_iter;
        ctx->cc_sector = sector + cc->iv_offset;
        init_completion(&ctx->restart);
 }
@@ -857,8 +855,8 @@ static int crypt_convert_block(struct crypt_config *cc,
                               struct convert_context *ctx,
                               struct ablkcipher_request *req)
 {
-       struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
-       struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
+       struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
+       struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
        struct dm_crypt_request *dmreq;
        u8 *iv;
        int r;
@@ -869,24 +867,15 @@ static int crypt_convert_block(struct crypt_config *cc,
        dmreq->iv_sector = ctx->cc_sector;
        dmreq->ctx = ctx;
        sg_init_table(&dmreq->sg_in, 1);
-       sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
-                   bv_in->bv_offset + ctx->offset_in);
+       sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT,
+                   bv_in.bv_offset);
 
        sg_init_table(&dmreq->sg_out, 1);
-       sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT,
-                   bv_out->bv_offset + ctx->offset_out);
+       sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT,
+                   bv_out.bv_offset);
 
-       ctx->offset_in += 1 << SECTOR_SHIFT;
-       if (ctx->offset_in >= bv_in->bv_len) {
-               ctx->offset_in = 0;
-               ctx->idx_in++;
-       }
-
-       ctx->offset_out += 1 << SECTOR_SHIFT;
-       if (ctx->offset_out >= bv_out->bv_len) {
-               ctx->offset_out = 0;
-               ctx->idx_out++;
-       }
+       bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT);
+       bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT);
 
        if (cc->iv_gen_ops) {
                r = cc->iv_gen_ops->generator(cc, iv, dmreq);
@@ -937,8 +926,7 @@ static int crypt_convert(struct crypt_config *cc,
 
        atomic_set(&ctx->cc_pending, 1);
 
-       while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
-             ctx->idx_out < ctx->bio_out->bi_vcnt) {
+       while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
 
                crypt_alloc_req(cc, ctx);
 
@@ -1021,7 +1009,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
                size -= len;
        }
 
-       if (!clone->bi_size) {
+       if (!clone->bi_iter.bi_size) {
                bio_put(clone);
                return NULL;
        }
@@ -1161,7 +1149,7 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
        crypt_inc_pending(io);
 
        clone_init(io, clone);
-       clone->bi_sector = cc->start + io->sector;
+       clone->bi_iter.bi_sector = cc->start + io->sector;
 
        generic_make_request(clone);
        return 0;
@@ -1207,9 +1195,9 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
        }
 
        /* crypt_convert should have filled the clone bio */
-       BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
+       BUG_ON(io->ctx.iter_out.bi_size);
 
-       clone->bi_sector = cc->start + io->sector;
+       clone->bi_iter.bi_sector = cc->start + io->sector;
 
        if (async)
                kcryptd_queue_io(io);
@@ -1224,7 +1212,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
        struct dm_crypt_io *new_io;
        int crypt_finished;
        unsigned out_of_pages = 0;
-       unsigned remaining = io->base_bio->bi_size;
+       unsigned remaining = io->base_bio->bi_iter.bi_size;
        sector_t sector = io->sector;
        int r;
 
@@ -1246,9 +1234,9 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
                }
 
                io->ctx.bio_out = clone;
-               io->ctx.idx_out = 0;
+               io->ctx.iter_out = clone->bi_iter;
 
-               remaining -= clone->bi_size;
+               remaining -= clone->bi_iter.bi_size;
                sector += bio_sectors(clone);
 
                crypt_inc_pending(io);
@@ -1290,8 +1278,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
                        crypt_inc_pending(new_io);
                        crypt_convert_init(cc, &new_io->ctx, NULL,
                                           io->base_bio, sector);
-                       new_io->ctx.idx_in = io->ctx.idx_in;
-                       new_io->ctx.offset_in = io->ctx.offset_in;
+                       new_io->ctx.iter_in = io->ctx.iter_in;
 
                        /*
                         * Fragments after the first use the base_io
@@ -1869,11 +1856,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
        if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
                bio->bi_bdev = cc->dev->bdev;
                if (bio_sectors(bio))
-                       bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector);
+                       bio->bi_iter.bi_sector = cc->start +
+                               dm_target_offset(ti, bio->bi_iter.bi_sector);
                return DM_MAPIO_REMAPPED;
        }
 
-       io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_sector));
+       io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
 
        if (bio_data_dir(io->base_bio) == READ) {
                if (kcryptd_io_read(io, GFP_NOWAIT))
index a8a511c..42c3a27 100644 (file)
@@ -277,14 +277,15 @@ static int delay_map(struct dm_target *ti, struct bio *bio)
        if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) {
                bio->bi_bdev = dc->dev_write->bdev;
                if (bio_sectors(bio))
-                       bio->bi_sector = dc->start_write +
-                                        dm_target_offset(ti, bio->bi_sector);
+                       bio->bi_iter.bi_sector = dc->start_write +
+                               dm_target_offset(ti, bio->bi_iter.bi_sector);
 
                return delay_bio(dc, dc->write_delay, bio);
        }
 
        bio->bi_bdev = dc->dev_read->bdev;
-       bio->bi_sector = dc->start_read + dm_target_offset(ti, bio->bi_sector);
+       bio->bi_iter.bi_sector = dc->start_read +
+               dm_target_offset(ti, bio->bi_iter.bi_sector);
 
        return delay_bio(dc, dc->read_delay, bio);
 }
index c80a0ec..b257e46 100644 (file)
@@ -248,7 +248,8 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
 
        bio->bi_bdev = fc->dev->bdev;
        if (bio_sectors(bio))
-               bio->bi_sector = flakey_map_sector(ti, bio->bi_sector);
+               bio->bi_iter.bi_sector =
+                       flakey_map_sector(ti, bio->bi_iter.bi_sector);
 }
 
 static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
@@ -265,8 +266,8 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
                DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
                        "(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n",
                        bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
-                       (bio_data_dir(bio) == WRITE) ? 'w' : 'r',
-                       bio->bi_rw, (unsigned long long)bio->bi_sector, bio_bytes);
+                       (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw,
+                       (unsigned long long)bio->bi_iter.bi_sector, bio_bytes);
        }
 }
 
index 2a20986..b2b8a10 100644 (file)
@@ -201,26 +201,29 @@ static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offse
 /*
  * Functions for getting the pages from a bvec.
  */
-static void bvec_get_page(struct dpages *dp,
+static void bio_get_page(struct dpages *dp,
                  struct page **p, unsigned long *len, unsigned *offset)
 {
-       struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
-       *p = bvec->bv_page;
-       *len = bvec->bv_len;
-       *offset = bvec->bv_offset;
+       struct bio *bio = dp->context_ptr;
+       struct bio_vec bvec = bio_iovec(bio);
+       *p = bvec.bv_page;
+       *len = bvec.bv_len;
+       *offset = bvec.bv_offset;
 }
 
-static void bvec_next_page(struct dpages *dp)
+static void bio_next_page(struct dpages *dp)
 {
-       struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
-       dp->context_ptr = bvec + 1;
+       struct bio *bio = dp->context_ptr;
+       struct bio_vec bvec = bio_iovec(bio);
+
+       bio_advance(bio, bvec.bv_len);
 }
 
-static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
+static void bio_dp_init(struct dpages *dp, struct bio *bio)
 {
-       dp->get_page = bvec_get_page;
-       dp->next_page = bvec_next_page;
-       dp->context_ptr = bvec;
+       dp->get_page = bio_get_page;
+       dp->next_page = bio_next_page;
+       dp->context_ptr = bio;
 }
 
 /*
@@ -304,14 +307,14 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
                                          dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
 
                bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
-               bio->bi_sector = where->sector + (where->count - remaining);
+               bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
                bio->bi_bdev = where->bdev;
                bio->bi_end_io = endio;
                store_io_and_region_in_bio(bio, io, region);
 
                if (rw & REQ_DISCARD) {
                        num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
-                       bio->bi_size = num_sectors << SECTOR_SHIFT;
+                       bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
                        remaining -= num_sectors;
                } else if (rw & REQ_WRITE_SAME) {
                        /*
@@ -320,7 +323,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
                        dp->get_page(dp, &page, &len, &offset);
                        bio_add_page(bio, page, logical_block_size, offset);
                        num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining);
-                       bio->bi_size = num_sectors << SECTOR_SHIFT;
+                       bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
 
                        offset = 0;
                        remaining -= num_sectors;
@@ -457,8 +460,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
                list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
                break;
 
-       case DM_IO_BVEC:
-               bvec_dp_init(dp, io_req->mem.ptr.bvec);
+       case DM_IO_BIO:
+               bio_dp_init(dp, io_req->mem.ptr.bio);
                break;
 
        case DM_IO_VMA:
index 4f99d26..53e848c 100644 (file)
@@ -85,7 +85,8 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio)
 
        bio->bi_bdev = lc->dev->bdev;
        if (bio_sectors(bio))
-               bio->bi_sector = linear_map_sector(ti, bio->bi_sector);
+               bio->bi_iter.bi_sector =
+                       linear_map_sector(ti, bio->bi_iter.bi_sector);
 }
 
 static int linear_map(struct dm_target *ti, struct bio *bio)
index 9584443..f284e0b 100644 (file)
@@ -432,7 +432,7 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio)
        region_t region = dm_rh_bio_to_region(ms->rh, bio);
 
        if (log->type->in_sync(log, region, 0))
-               return choose_mirror(ms,  bio->bi_sector) ? 1 : 0;
+               return choose_mirror(ms,  bio->bi_iter.bi_sector) ? 1 : 0;
 
        return 0;
 }
@@ -442,15 +442,15 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio)
  */
 static sector_t map_sector(struct mirror *m, struct bio *bio)
 {
-       if (unlikely(!bio->bi_size))
+       if (unlikely(!bio->bi_iter.bi_size))
                return 0;
-       return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector);
+       return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector);
 }
 
 static void map_bio(struct mirror *m, struct bio *bio)
 {
        bio->bi_bdev = m->dev->bdev;
-       bio->bi_sector = map_sector(m, bio);
+       bio->bi_iter.bi_sector = map_sector(m, bio);
 }
 
 static void map_region(struct dm_io_region *io, struct mirror *m,
@@ -526,8 +526,8 @@ static void read_async_bio(struct mirror *m, struct bio *bio)
        struct dm_io_region io;
        struct dm_io_request io_req = {
                .bi_rw = READ,
-               .mem.type = DM_IO_BVEC,
-               .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
+               .mem.type = DM_IO_BIO,
+               .mem.ptr.bio = bio,
                .notify.fn = read_callback,
                .notify.context = bio,
                .client = m->ms->io_client,
@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
                 * We can only read balance if the region is in sync.
                 */
                if (likely(region_in_sync(ms, region, 1)))
-                       m = choose_mirror(ms, bio->bi_sector);
+                       m = choose_mirror(ms, bio->bi_iter.bi_sector);
                else if (m && atomic_read(&m->error_count))
                        m = NULL;
 
@@ -629,8 +629,8 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
        struct mirror *m;
        struct dm_io_request io_req = {
                .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA),
-               .mem.type = DM_IO_BVEC,
-               .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
+               .mem.type = DM_IO_BIO,
+               .mem.ptr.bio = bio,
                .notify.fn = write_callback,
                .notify.context = bio,
                .client = ms->io_client,
@@ -1181,7 +1181,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
         * The region is in-sync and we can perform reads directly.
         * Store enough information so we can retry if it fails.
         */
-       m = choose_mirror(ms, bio->bi_sector);
+       m = choose_mirror(ms, bio->bi_iter.bi_sector);
        if (unlikely(!m))
                return -EIO;
 
index 69732e0..b929fd5 100644 (file)
@@ -126,7 +126,8 @@ EXPORT_SYMBOL_GPL(dm_rh_region_to_sector);
 
 region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio)
 {
-       return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin);
+       return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector -
+                                     rh->target_begin);
 }
 EXPORT_SYMBOL_GPL(dm_rh_bio_to_region);
 
index 7177185..ebddef5 100644 (file)
@@ -1438,6 +1438,7 @@ out:
        if (full_bio) {
                full_bio->bi_end_io = pe->full_bio_end_io;
                full_bio->bi_private = pe->full_bio_private;
+               atomic_inc(&full_bio->bi_remaining);
        }
        free_pending_exception(pe);
 
@@ -1619,11 +1620,10 @@ static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
                            struct bio *bio, chunk_t chunk)
 {
        bio->bi_bdev = s->cow->bdev;
-       bio->bi_sector = chunk_to_sector(s->store,
-                                        dm_chunk_number(e->new_chunk) +
-                                        (chunk - e->old_chunk)) +
-                                        (bio->bi_sector &
-                                         s->store->chunk_mask);
+       bio->bi_iter.bi_sector =
+               chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) +
+                               (chunk - e->old_chunk)) +
+               (bio->bi_iter.bi_sector & s->store->chunk_mask);
 }
 
 static int snapshot_map(struct dm_target *ti, struct bio *bio)
@@ -1641,7 +1641,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
                return DM_MAPIO_REMAPPED;
        }
 
-       chunk = sector_to_chunk(s->store, bio->bi_sector);
+       chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
 
        /* Full snapshots are not usable */
        /* To get here the table must be live so s->active is always set. */
@@ -1702,7 +1702,8 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
                r = DM_MAPIO_SUBMITTED;
 
                if (!pe->started &&
-                   bio->bi_size == (s->store->chunk_size << SECTOR_SHIFT)) {
+                   bio->bi_iter.bi_size ==
+                   (s->store->chunk_size << SECTOR_SHIFT)) {
                        pe->started = 1;
                        up_write(&s->lock);
                        start_full_bio(pe, bio);
@@ -1758,7 +1759,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
                return DM_MAPIO_REMAPPED;
        }
 
-       chunk = sector_to_chunk(s->store, bio->bi_sector);
+       chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
 
        down_write(&s->lock);
 
@@ -2095,7 +2096,7 @@ static int do_origin(struct dm_dev *origin, struct bio *bio)
        down_read(&_origins_lock);
        o = __lookup_origin(origin->bdev);
        if (o)
-               r = __origin_write(&o->snapshots, bio->bi_sector, bio);
+               r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
        up_read(&_origins_lock);
 
        return r;
index 73c1712..d1600d2 100644 (file)
@@ -259,13 +259,15 @@ static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
 {
        sector_t begin, end;
 
-       stripe_map_range_sector(sc, bio->bi_sector, target_stripe, &begin);
+       stripe_map_range_sector(sc, bio->bi_iter.bi_sector,
+                               target_stripe, &begin);
        stripe_map_range_sector(sc, bio_end_sector(bio),
                                target_stripe, &end);
        if (begin < end) {
                bio->bi_bdev = sc->stripe[target_stripe].dev->bdev;
-               bio->bi_sector = begin + sc->stripe[target_stripe].physical_start;
-               bio->bi_size = to_bytes(end - begin);
+               bio->bi_iter.bi_sector = begin +
+                       sc->stripe[target_stripe].physical_start;
+               bio->bi_iter.bi_size = to_bytes(end - begin);
                return DM_MAPIO_REMAPPED;
        } else {
                /* The range doesn't map to the target stripe */
@@ -293,9 +295,10 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
                return stripe_map_range(sc, bio, target_bio_nr);
        }
 
-       stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector);
+       stripe_map_sector(sc, bio->bi_iter.bi_sector,
+                         &stripe, &bio->bi_iter.bi_sector);
 
-       bio->bi_sector += sc->stripe[stripe].physical_start;
+       bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start;
        bio->bi_bdev = sc->stripe[stripe].dev->bdev;
 
        return DM_MAPIO_REMAPPED;
index ff9ac4b..09a688b 100644 (file)
@@ -311,11 +311,11 @@ error:
 static int switch_map(struct dm_target *ti, struct bio *bio)
 {
        struct switch_ctx *sctx = ti->private;
-       sector_t offset = dm_target_offset(ti, bio->bi_sector);
+       sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector);
        unsigned path_nr = switch_get_path_nr(sctx, offset);
 
        bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev;
-       bio->bi_sector = sctx->path_list[path_nr].start + offset;
+       bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset;
 
        return DM_MAPIO_REMAPPED;
 }
index 726228b..faaf944 100644 (file)
@@ -414,7 +414,7 @@ static bool block_size_is_power_of_two(struct pool *pool)
 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
 {
        struct pool *pool = tc->pool;
-       sector_t block_nr = bio->bi_sector;
+       sector_t block_nr = bio->bi_iter.bi_sector;
 
        if (block_size_is_power_of_two(pool))
                block_nr >>= pool->sectors_per_block_shift;
@@ -427,14 +427,15 @@ static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
 {
        struct pool *pool = tc->pool;
-       sector_t bi_sector = bio->bi_sector;
+       sector_t bi_sector = bio->bi_iter.bi_sector;
 
        bio->bi_bdev = tc->pool_dev->bdev;
        if (block_size_is_power_of_two(pool))
-               bio->bi_sector = (block << pool->sectors_per_block_shift) |
-                               (bi_sector & (pool->sectors_per_block - 1));
+               bio->bi_iter.bi_sector =
+                       (block << pool->sectors_per_block_shift) |
+                       (bi_sector & (pool->sectors_per_block - 1));
        else
-               bio->bi_sector = (block * pool->sectors_per_block) +
+               bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
                                 sector_div(bi_sector, pool->sectors_per_block);
 }
 
@@ -612,8 +613,10 @@ static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *c
 
 static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
 {
-       if (m->bio)
+       if (m->bio) {
                m->bio->bi_end_io = m->saved_bi_end_io;
+               atomic_inc(&m->bio->bi_remaining);
+       }
        cell_error(m->tc->pool, m->cell);
        list_del(&m->list);
        mempool_free(m, m->tc->pool->mapping_pool);
@@ -627,8 +630,10 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
        int r;
 
        bio = m->bio;
-       if (bio)
+       if (bio) {
                bio->bi_end_io = m->saved_bi_end_io;
+               atomic_inc(&bio->bi_remaining);
+       }
 
        if (m->err) {
                cell_error(pool, m->cell);
@@ -731,7 +736,8 @@ static void process_prepared(struct pool *pool, struct list_head *head,
  */
 static int io_overlaps_block(struct pool *pool, struct bio *bio)
 {
-       return bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT);
+       return bio->bi_iter.bi_size ==
+               (pool->sectors_per_block << SECTOR_SHIFT);
 }
 
 static int io_overwrites_block(struct pool *pool, struct bio *bio)
@@ -1136,7 +1142,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
        if (bio_detain(pool, &key, bio, &cell))
                return;
 
-       if (bio_data_dir(bio) == WRITE && bio->bi_size)
+       if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size)
                break_sharing(tc, bio, block, &key, lookup_result, cell);
        else {
                struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
@@ -1159,7 +1165,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
        /*
         * Remap empty bios (flushes) immediately, without provisioning.
         */
-       if (!bio->bi_size) {
+       if (!bio->bi_iter.bi_size) {
                inc_all_io_entry(pool, bio);
                cell_defer_no_holder(tc, cell);
 
@@ -1258,7 +1264,7 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
        r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
        switch (r) {
        case 0:
-               if (lookup_result.shared && (rw == WRITE) && bio->bi_size)
+               if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size)
                        handle_unserviceable_bio(tc->pool, bio);
                else {
                        inc_all_io_entry(tc->pool, bio);
@@ -2939,7 +2945,7 @@ out_unlock:
 
 static int thin_map(struct dm_target *ti, struct bio *bio)
 {
-       bio->bi_sector = dm_target_offset(ti, bio->bi_sector);
+       bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
 
        return thin_bio_map(ti, bio);
 }
index 4b7941d..796007a 100644 (file)
@@ -73,15 +73,10 @@ struct dm_verity_io {
        sector_t block;
        unsigned n_blocks;
 
-       /* saved bio vector */
-       struct bio_vec *io_vec;
-       unsigned io_vec_size;
+       struct bvec_iter iter;
 
        struct work_struct work;
 
-       /* A space for short vectors; longer vectors are allocated separately. */
-       struct bio_vec io_vec_inline[DM_VERITY_IO_VEC_INLINE];
-
        /*
         * Three variably-size fields follow this struct:
         *
@@ -284,9 +279,10 @@ release_ret_r:
 static int verity_verify_io(struct dm_verity_io *io)
 {
        struct dm_verity *v = io->v;
+       struct bio *bio = dm_bio_from_per_bio_data(io,
+                                                  v->ti->per_bio_data_size);
        unsigned b;
        int i;
-       unsigned vector = 0, offset = 0;
 
        for (b = 0; b < io->n_blocks; b++) {
                struct shash_desc *desc;
@@ -336,31 +332,22 @@ test_block_hash:
                }
 
                todo = 1 << v->data_dev_block_bits;
-               do {
-                       struct bio_vec *bv;
+               while (io->iter.bi_size) {
                        u8 *page;
-                       unsigned len;
-
-                       BUG_ON(vector >= io->io_vec_size);
-                       bv = &io->io_vec[vector];
-                       page = kmap_atomic(bv->bv_page);
-                       len = bv->bv_len - offset;
-                       if (likely(len >= todo))
-                               len = todo;
-                       r = crypto_shash_update(desc,
-                                       page + bv->bv_offset + offset, len);
+                       struct bio_vec bv = bio_iter_iovec(bio, io->iter);
+
+                       page = kmap_atomic(bv.bv_page);
+                       r = crypto_shash_update(desc, page + bv.bv_offset,
+                                               bv.bv_len);
                        kunmap_atomic(page);
+
                        if (r < 0) {
                                DMERR("crypto_shash_update failed: %d", r);
                                return r;
                        }
-                       offset += len;
-                       if (likely(offset == bv->bv_len)) {
-                               offset = 0;
-                               vector++;
-                       }
-                       todo -= len;
-               } while (todo);
+
+                       bio_advance_iter(bio, &io->iter, bv.bv_len);
+               }
 
                if (!v->version) {
                        r = crypto_shash_update(desc, v->salt, v->salt_size);
@@ -383,8 +370,6 @@ test_block_hash:
                        return -EIO;
                }
        }
-       BUG_ON(vector != io->io_vec_size);
-       BUG_ON(offset);
 
        return 0;
 }
@@ -400,10 +385,7 @@ static void verity_finish_io(struct dm_verity_io *io, int error)
        bio->bi_end_io = io->orig_bi_end_io;
        bio->bi_private = io->orig_bi_private;
 
-       if (io->io_vec != io->io_vec_inline)
-               mempool_free(io->io_vec, v->vec_mempool);
-
-       bio_endio(bio, error);
+       bio_endio_nodec(bio, error);
 }
 
 static void verity_work(struct work_struct *w)
@@ -493,9 +475,9 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
        struct dm_verity_io *io;
 
        bio->bi_bdev = v->data_dev->bdev;
-       bio->bi_sector = verity_map_sector(v, bio->bi_sector);
+       bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector);
 
-       if (((unsigned)bio->bi_sector | bio_sectors(bio)) &
+       if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
            ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
                DMERR_LIMIT("unaligned io");
                return -EIO;
@@ -514,18 +496,12 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
        io->v = v;
        io->orig_bi_end_io = bio->bi_end_io;
        io->orig_bi_private = bio->bi_private;
-       io->block = bio->bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
-       io->n_blocks = bio->bi_size >> v->data_dev_block_bits;
+       io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
+       io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits;
 
        bio->bi_end_io = verity_end_io;
        bio->bi_private = io;
-       io->io_vec_size = bio_segments(bio);
-       if (io->io_vec_size < DM_VERITY_IO_VEC_INLINE)
-               io->io_vec = io->io_vec_inline;
-       else
-               io->io_vec = mempool_alloc(v->vec_mempool, GFP_NOIO);
-       memcpy(io->io_vec, bio_iovec(bio),
-              io->io_vec_size * sizeof(struct bio_vec));
+       io->iter = bio->bi_iter;
 
        verity_submit_prefetch(v, io);
 
index b49c762..8c53b09 100644 (file)
@@ -575,7 +575,7 @@ static void start_io_acct(struct dm_io *io)
                atomic_inc_return(&md->pending[rw]));
 
        if (unlikely(dm_stats_used(&md->stats)))
-               dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector,
+               dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
                                    bio_sectors(bio), false, 0, &io->stats_aux);
 }
 
@@ -593,7 +593,7 @@ static void end_io_acct(struct dm_io *io)
        part_stat_unlock();
 
        if (unlikely(dm_stats_used(&md->stats)))
-               dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector,
+               dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
                                    bio_sectors(bio), true, duration, &io->stats_aux);
 
        /*
@@ -742,7 +742,7 @@ static void dec_pending(struct dm_io *io, int error)
                if (io_error == DM_ENDIO_REQUEUE)
                        return;
 
-               if ((bio->bi_rw & REQ_FLUSH) && bio->bi_size) {
+               if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) {
                        /*
                         * Preflush done for flush with data, reissue
                         * without REQ_FLUSH.
@@ -797,7 +797,7 @@ static void end_clone_bio(struct bio *clone, int error)
        struct dm_rq_clone_bio_info *info = clone->bi_private;
        struct dm_rq_target_io *tio = info->tio;
        struct bio *bio = info->orig;
-       unsigned int nr_bytes = info->orig->bi_size;
+       unsigned int nr_bytes = info->orig->bi_iter.bi_size;
 
        bio_put(clone);
 
@@ -1128,7 +1128,7 @@ static void __map_bio(struct dm_target_io *tio)
         * this io.
         */
        atomic_inc(&tio->io->io_count);
-       sector = clone->bi_sector;
+       sector = clone->bi_iter.bi_sector;
        r = ti->type->map(ti, clone);
        if (r == DM_MAPIO_REMAPPED) {
                /* the bio has been remapped so dispatch it */
@@ -1155,76 +1155,32 @@ struct clone_info {
        struct dm_io *io;
        sector_t sector;
        sector_t sector_count;
-       unsigned short idx;
 };
 
 static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len)
 {
-       bio->bi_sector = sector;
-       bio->bi_size = to_bytes(len);
-}
-
-static void bio_setup_bv(struct bio *bio, unsigned short idx, unsigned short bv_count)
-{
-       bio->bi_idx = idx;
-       bio->bi_vcnt = idx + bv_count;
-       bio->bi_flags &= ~(1 << BIO_SEG_VALID);
-}
-
-static void clone_bio_integrity(struct bio *bio, struct bio *clone,
-                               unsigned short idx, unsigned len, unsigned offset,
-                               unsigned trim)
-{
-       if (!bio_integrity(bio))
-               return;
-
-       bio_integrity_clone(clone, bio, GFP_NOIO);
-
-       if (trim)
-               bio_integrity_trim(clone, bio_sector_offset(bio, idx, offset), len);
-}
-
-/*
- * Creates a little bio that just does part of a bvec.
- */
-static void clone_split_bio(struct dm_target_io *tio, struct bio *bio,
-                           sector_t sector, unsigned short idx,
-                           unsigned offset, unsigned len)
-{
-       struct bio *clone = &tio->clone;
-       struct bio_vec *bv = bio->bi_io_vec + idx;
-
-       *clone->bi_io_vec = *bv;
-
-       bio_setup_sector(clone, sector, len);
-
-       clone->bi_bdev = bio->bi_bdev;
-       clone->bi_rw = bio->bi_rw;
-       clone->bi_vcnt = 1;
-       clone->bi_io_vec->bv_offset = offset;
-       clone->bi_io_vec->bv_len = clone->bi_size;
-       clone->bi_flags |= 1 << BIO_CLONED;
-
-       clone_bio_integrity(bio, clone, idx, len, offset, 1);
+       bio->bi_iter.bi_sector = sector;
+       bio->bi_iter.bi_size = to_bytes(len);
 }
 
 /*
  * Creates a bio that consists of range of complete bvecs.
  */
 static void clone_bio(struct dm_target_io *tio, struct bio *bio,
-                     sector_t sector, unsigned short idx,
-                     unsigned short bv_count, unsigned len)
+                     sector_t sector, unsigned len)
 {
        struct bio *clone = &tio->clone;
-       unsigned trim = 0;
 
-       __bio_clone(clone, bio);
-       bio_setup_sector(clone, sector, len);
-       bio_setup_bv(clone, idx, bv_count);
+       __bio_clone_fast(clone, bio);
+
+       if (bio_integrity(bio))
+               bio_integrity_clone(clone, bio, GFP_NOIO);
+
+       bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
+       clone->bi_iter.bi_size = to_bytes(len);
 
-       if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
-               trim = 1;
-       clone_bio_integrity(bio, clone, idx, len, 0, trim);
+       if (bio_integrity(bio))
+               bio_integrity_trim(clone, 0, len);
 }
 
 static struct dm_target_io *alloc_tio(struct clone_info *ci,
@@ -1257,7 +1213,7 @@ static void __clone_and_map_simple_bio(struct clone_info *ci,
         * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
         * and discard, so no need for concern about wasted bvec allocations.
         */
-        __bio_clone(clone, ci->bio);
+        __bio_clone_fast(clone, ci->bio);
        if (len)
                bio_setup_sector(clone, ci->sector, len);
 
@@ -1286,10 +1242,7 @@ static int __send_empty_flush(struct clone_info *ci)
 }
 
 static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
-                                    sector_t sector, int nr_iovecs,
-                                    unsigned short idx, unsigned short bv_count,
-                                    unsigned offset, unsigned len,
-                                    unsigned split_bvec)
+                                    sector_t sector, unsigned len)
 {
        struct bio *bio = ci->bio;
        struct dm_target_io *tio;
@@ -1303,11 +1256,8 @@ static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti
                num_target_bios = ti->num_write_bios(ti, bio);
 
        for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
-               tio = alloc_tio(ci, ti, nr_iovecs, target_bio_nr);
-               if (split_bvec)
-                       clone_split_bio(tio, bio, sector, idx, offset, len);
-               else
-                       clone_bio(tio, bio, sector, idx, bv_count, len);
+               tio = alloc_tio(ci, ti, 0, target_bio_nr);
+               clone_bio(tio, bio, sector, len);
                __map_bio(tio);
        }
 }
@@ -1378,60 +1328,6 @@ static int __send_write_same(struct clone_info *ci)
        return __send_changing_extent_only(ci, get_num_write_same_bios, NULL);
 }
 
-/*
- * Find maximum number of sectors / bvecs we can process with a single bio.
- */
-static sector_t __len_within_target(struct clone_info *ci, sector_t max, int *idx)
-{
-       struct bio *bio = ci->bio;
-       sector_t bv_len, total_len = 0;
-
-       for (*idx = ci->idx; max && (*idx < bio->bi_vcnt); (*idx)++) {
-               bv_len = to_sector(bio->bi_io_vec[*idx].bv_len);
-
-               if (bv_len > max)
-                       break;
-
-               max -= bv_len;
-               total_len += bv_len;
-       }
-
-       return total_len;
-}
-
-static int __split_bvec_across_targets(struct clone_info *ci,
-                                      struct dm_target *ti, sector_t max)
-{
-       struct bio *bio = ci->bio;
-       struct bio_vec *bv = bio->bi_io_vec + ci->idx;
-       sector_t remaining = to_sector(bv->bv_len);
-       unsigned offset = 0;
-       sector_t len;
-
-       do {
-               if (offset) {
-                       ti = dm_table_find_target(ci->map, ci->sector);
-                       if (!dm_target_is_valid(ti))
-                               return -EIO;
-
-                       max = max_io_len(ci->sector, ti);
-               }
-
-               len = min(remaining, max);
-
-               __clone_and_map_data_bio(ci, ti, ci->sector, 1, ci->idx, 0,
-                                        bv->bv_offset + offset, len, 1);
-
-               ci->sector += len;
-               ci->sector_count -= len;
-               offset += to_bytes(len);
-       } while (remaining -= len);
-
-       ci->idx++;
-
-       return 0;
-}
-
 /*
  * Select the correct strategy for processing a non-flush bio.
  */
@@ -1439,8 +1335,7 @@ static int __split_and_process_non_flush(struct clone_info *ci)
 {
        struct bio *bio = ci->bio;
        struct dm_target *ti;
-       sector_t len, max;
-       int idx;
+       unsigned len;
 
        if (unlikely(bio->bi_rw & REQ_DISCARD))
                return __send_discard(ci);
@@ -1451,41 +1346,14 @@ static int __split_and_process_non_flush(struct clone_info *ci)
        if (!dm_target_is_valid(ti))
                return -EIO;
 
-       max = max_io_len(ci->sector, ti);
-
-       /*
-        * Optimise for the simple case where we can do all of
-        * the remaining io with a single clone.
-        */
-       if (ci->sector_count <= max) {
-               __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
-                                        ci->idx, bio->bi_vcnt - ci->idx, 0,
-                                        ci->sector_count, 0);
-               ci->sector_count = 0;
-               return 0;
-       }
-
-       /*
-        * There are some bvecs that don't span targets.
-        * Do as many of these as possible.
-        */
-       if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
-               len = __len_within_target(ci, max, &idx);
-
-               __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
-                                        ci->idx, idx - ci->idx, 0, len, 0);
+       len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
 
-               ci->sector += len;
-               ci->sector_count -= len;
-               ci->idx = idx;
+       __clone_and_map_data_bio(ci, ti, ci->sector, len);
 
-               return 0;
-       }
+       ci->sector += len;
+       ci->sector_count -= len;
 
-       /*
-        * Handle a bvec that must be split between two or more targets.
-        */
-       return __split_bvec_across_targets(ci, ti, max);
+       return 0;
 }
 
 /*
@@ -1510,8 +1378,7 @@ static void __split_and_process_bio(struct mapped_device *md,
        ci.io->bio = bio;
        ci.io->md = md;
        spin_lock_init(&ci.io->endio_lock);
-       ci.sector = bio->bi_sector;
-       ci.idx = bio->bi_idx;
+       ci.sector = bio->bi_iter.bi_sector;
 
        start_io_acct(ci.io);
 
index 3193aef..e8b4574 100644 (file)
@@ -74,8 +74,8 @@ static void faulty_fail(struct bio *bio, int error)
 {
        struct bio *b = bio->bi_private;
 
-       b->bi_size = bio->bi_size;
-       b->bi_sector = bio->bi_sector;
+       b->bi_iter.bi_size = bio->bi_iter.bi_size;
+       b->bi_iter.bi_sector = bio->bi_iter.bi_sector;
 
        bio_put(bio);
 
@@ -185,26 +185,31 @@ static void make_request(struct mddev *mddev, struct bio *bio)
                        return;
                }
 
-               if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), WRITE))
+               if (check_sector(conf, bio->bi_iter.bi_sector,
+                                bio_end_sector(bio), WRITE))
                        failit = 1;
                if (check_mode(conf, WritePersistent)) {
-                       add_sector(conf, bio->bi_sector, WritePersistent);
+                       add_sector(conf, bio->bi_iter.bi_sector,
+                                  WritePersistent);
                        failit = 1;
                }
                if (check_mode(conf, WriteTransient))
                        failit = 1;
        } else {
                /* read request */
-               if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), READ))
+               if (check_sector(conf, bio->bi_iter.bi_sector,
+                                bio_end_sector(bio), READ))
                        failit = 1;
                if (check_mode(conf, ReadTransient))
                        failit = 1;
                if (check_mode(conf, ReadPersistent)) {
-                       add_sector(conf, bio->bi_sector, ReadPersistent);
+                       add_sector(conf, bio->bi_iter.bi_sector,
+                                  ReadPersistent);
                        failit = 1;
                }
                if (check_mode(conf, ReadFixable)) {
-                       add_sector(conf, bio->bi_sector, ReadFixable);
+                       add_sector(conf, bio->bi_iter.bi_sector,
+                                  ReadFixable);
                        failit = 1;
                }
        }
index f03fabd..56f534b 100644 (file)
@@ -288,65 +288,65 @@ static int linear_stop (struct mddev *mddev)
 
 static void linear_make_request(struct mddev *mddev, struct bio *bio)
 {
+       char b[BDEVNAME_SIZE];
        struct dev_info *tmp_dev;
-       sector_t start_sector;
+       struct bio *split;
+       sector_t start_sector, end_sector, data_offset;
 
        if (unlikely(bio->bi_rw & REQ_FLUSH)) {
                md_flush_request(mddev, bio);
                return;
        }
 
-       rcu_read_lock();
-       tmp_dev = which_dev(mddev, bio->bi_sector);
-       start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
-
-
-       if (unlikely(bio->bi_sector >= (tmp_dev->end_sector)
-                    || (bio->bi_sector < start_sector))) {
-               char b[BDEVNAME_SIZE];
-
-               printk(KERN_ERR
-                      "md/linear:%s: make_request: Sector %llu out of bounds on "
-                      "dev %s: %llu sectors, offset %llu\n",
-                      mdname(mddev),
-                      (unsigned long long)bio->bi_sector,
-                      bdevname(tmp_dev->rdev->bdev, b),
-                      (unsigned long long)tmp_dev->rdev->sectors,
-                      (unsigned long long)start_sector);
-               rcu_read_unlock();
-               bio_io_error(bio);
-               return;
-       }
-       if (unlikely(bio_end_sector(bio) > tmp_dev->end_sector)) {
-               /* This bio crosses a device boundary, so we have to
-                * split it.
-                */
-               struct bio_pair *bp;
-               sector_t end_sector = tmp_dev->end_sector;
+       do {
+               rcu_read_lock();
 
-               rcu_read_unlock();
-
-               bp = bio_split(bio, end_sector - bio->bi_sector);
+               tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector);
+               start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
+               end_sector = tmp_dev->end_sector;
+               data_offset = tmp_dev->rdev->data_offset;
+               bio->bi_bdev = tmp_dev->rdev->bdev;
 
-               linear_make_request(mddev, &bp->bio1);
-               linear_make_request(mddev, &bp->bio2);
-               bio_pair_release(bp);
-               return;
-       }
-                   
-       bio->bi_bdev = tmp_dev->rdev->bdev;
-       bio->bi_sector = bio->bi_sector - start_sector
-               + tmp_dev->rdev->data_offset;
-       rcu_read_unlock();
+               rcu_read_unlock();
 
-       if (unlikely((bio->bi_rw & REQ_DISCARD) &&
-                    !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
-               /* Just ignore it */
-               bio_endio(bio, 0);
-               return;
-       }
+               if (unlikely(bio->bi_iter.bi_sector >= end_sector ||
+                            bio->bi_iter.bi_sector < start_sector))
+                       goto out_of_bounds;
+
+               if (unlikely(bio_end_sector(bio) > end_sector)) {
+                       /* This bio crosses a device boundary, so we have to
+                        * split it.
+                        */
+                       split = bio_split(bio, end_sector -
+                                         bio->bi_iter.bi_sector,
+                                         GFP_NOIO, fs_bio_set);
+                       bio_chain(split, bio);
+               } else {
+                       split = bio;
+               }
 
-       generic_make_request(bio);
+               split->bi_iter.bi_sector = split->bi_iter.bi_sector -
+                       start_sector + data_offset;
+
+               if (unlikely((split->bi_rw & REQ_DISCARD) &&
+                        !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
+                       /* Just ignore it */
+                       bio_endio(split, 0);
+               } else
+                       generic_make_request(split);
+       } while (split != bio);
+       return;
+
+out_of_bounds:
+       printk(KERN_ERR
+              "md/linear:%s: make_request: Sector %llu out of bounds on "
+              "dev %s: %llu sectors, offset %llu\n",
+              mdname(mddev),
+              (unsigned long long)bio->bi_iter.bi_sector,
+              bdevname(tmp_dev->rdev->bdev, b),
+              (unsigned long long)tmp_dev->rdev->sectors,
+              (unsigned long long)start_sector);
+       bio_io_error(bio);
 }
 
 static void linear_status (struct seq_file *seq, struct mddev *mddev)
index 40c5313..4ad5cc4 100644 (file)
@@ -393,7 +393,7 @@ static void md_submit_flush_data(struct work_struct *ws)
        struct mddev *mddev = container_of(ws, struct mddev, flush_work);
        struct bio *bio = mddev->flush_bio;
 
-       if (bio->bi_size == 0)
+       if (bio->bi_iter.bi_size == 0)
                /* an empty barrier - all done */
                bio_endio(bio, 0);
        else {
@@ -754,7 +754,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
        struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
 
        bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
-       bio->bi_sector = sector;
+       bio->bi_iter.bi_sector = sector;
        bio_add_page(bio, page, size, 0);
        bio->bi_private = rdev;
        bio->bi_end_io = super_written;
@@ -782,18 +782,16 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
        struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
        int ret;
 
-       rw |= REQ_SYNC;
-
        bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
                rdev->meta_bdev : rdev->bdev;
        if (metadata_op)
-               bio->bi_sector = sector + rdev->sb_start;
+               bio->bi_iter.bi_sector = sector + rdev->sb_start;
        else if (rdev->mddev->reshape_position != MaxSector &&
                 (rdev->mddev->reshape_backwards ==
                  (sector >= rdev->mddev->reshape_position)))
-               bio->bi_sector = sector + rdev->new_data_offset;
+               bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
        else
-               bio->bi_sector = sector + rdev->data_offset;
+               bio->bi_iter.bi_sector = sector + rdev->data_offset;
        bio_add_page(bio, page, size, 0);
        submit_bio_wait(rw, bio);
 
index 1642eae..849ad39 100644 (file)
@@ -100,7 +100,7 @@ static void multipath_end_request(struct bio *bio, int error)
                md_error (mp_bh->mddev, rdev);
                printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n", 
                       bdevname(rdev->bdev,b), 
-                      (unsigned long long)bio->bi_sector);
+                      (unsigned long long)bio->bi_iter.bi_sector);
                multipath_reschedule_retry(mp_bh);
        } else
                multipath_end_bh_io(mp_bh, error);
@@ -132,7 +132,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
        multipath = conf->multipaths + mp_bh->path;
 
        mp_bh->bio = *bio;
-       mp_bh->bio.bi_sector += multipath->rdev->data_offset;
+       mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
        mp_bh->bio.bi_bdev = multipath->rdev->bdev;
        mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
        mp_bh->bio.bi_end_io = multipath_end_request;
@@ -355,21 +355,22 @@ static void multipathd(struct md_thread *thread)
                spin_unlock_irqrestore(&conf->device_lock, flags);
 
                bio = &mp_bh->bio;
-               bio->bi_sector = mp_bh->master_bio->bi_sector;
+               bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector;
                
                if ((mp_bh->path = multipath_map (conf))<0) {
                        printk(KERN_ALERT "multipath: %s: unrecoverable IO read"
                                " error for block %llu\n",
                                bdevname(bio->bi_bdev,b),
-                               (unsigned long long)bio->bi_sector);
+                               (unsigned long long)bio->bi_iter.bi_sector);
                        multipath_end_bh_io(mp_bh, -EIO);
                } else {
                        printk(KERN_ERR "multipath: %s: redirecting sector %llu"
                                " to another IO path\n",
                                bdevname(bio->bi_bdev,b),
-                               (unsigned long long)bio->bi_sector);
+                               (unsigned long long)bio->bi_iter.bi_sector);
                        *bio = *(mp_bh->master_bio);
-                       bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset;
+                       bio->bi_iter.bi_sector +=
+                               conf->multipaths[mp_bh->path].rdev->data_offset;
                        bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
                        bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
                        bio->bi_end_io = multipath_end_request;
index c4d420b..407a99e 100644 (file)
@@ -501,10 +501,11 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
                        unsigned int chunk_sects, struct bio *bio)
 {
        if (likely(is_power_of_2(chunk_sects))) {
-               return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
+               return chunk_sects >=
+                       ((bio->bi_iter.bi_sector & (chunk_sects-1))
                                        + bio_sectors(bio));
        } else{
-               sector_t sector = bio->bi_sector;
+               sector_t sector = bio->bi_iter.bi_sector;
                return chunk_sects >= (sector_div(sector, chunk_sects)
                                                + bio_sectors(bio));
        }
@@ -512,64 +513,44 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
 
 static void raid0_make_request(struct mddev *mddev, struct bio *bio)
 {
-       unsigned int chunk_sects;
-       sector_t sector_offset;
        struct strip_zone *zone;
        struct md_rdev *tmp_dev;
+       struct bio *split;
 
        if (unlikely(bio->bi_rw & REQ_FLUSH)) {
                md_flush_request(mddev, bio);
                return;
        }
 
-       chunk_sects = mddev->chunk_sectors;
-       if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
-               sector_t sector = bio->bi_sector;
-               struct bio_pair *bp;
-               /* Sanity check -- queue functions should prevent this happening */
-               if (bio_segments(bio) > 1)
-                       goto bad_map;
-               /* This is a one page bio that upper layers
-                * refuse to split for us, so we need to split it.
-                */
-               if (likely(is_power_of_2(chunk_sects)))
-                       bp = bio_split(bio, chunk_sects - (sector &
-                                                          (chunk_sects-1)));
-               else
-                       bp = bio_split(bio, chunk_sects -
-                                      sector_div(sector, chunk_sects));
-               raid0_make_request(mddev, &bp->bio1);
-               raid0_make_request(mddev, &bp->bio2);
-               bio_pair_release(bp);
-               return;
-       }
+       do {
+               sector_t sector = bio->bi_iter.bi_sector;
+               unsigned chunk_sects = mddev->chunk_sectors;
 
-       sector_offset = bio->bi_sector;
-       zone = find_zone(mddev->private, &sector_offset);
-       tmp_dev = map_sector(mddev, zone, bio->bi_sector,
-                            &sector_offset);
-       bio->bi_bdev = tmp_dev->bdev;
-       bio->bi_sector = sector_offset + zone->dev_start +
-               tmp_dev->data_offset;
-
-       if (unlikely((bio->bi_rw & REQ_DISCARD) &&
-                    !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
-               /* Just ignore it */
-               bio_endio(bio, 0);
-               return;
-       }
+               unsigned sectors = chunk_sects -
+                       (likely(is_power_of_2(chunk_sects))
+                        ? (sector & (chunk_sects-1))
+                        : sector_div(sector, chunk_sects));
 
-       generic_make_request(bio);
-       return;
-
-bad_map:
-       printk("md/raid0:%s: make_request bug: can't convert block across chunks"
-              " or bigger than %dk %llu %d\n",
-              mdname(mddev), chunk_sects / 2,
-              (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
+               if (sectors < bio_sectors(bio)) {
+                       split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
+                       bio_chain(split, bio);
+               } else {
+                       split = bio;
+               }
 
-       bio_io_error(bio);
-       return;
+               zone = find_zone(mddev->private, &sector);
+               tmp_dev = map_sector(mddev, zone, sector, &sector);
+               split->bi_bdev = tmp_dev->bdev;
+               split->bi_iter.bi_sector = sector + zone->dev_start +
+                       tmp_dev->data_offset;
+
+               if (unlikely((split->bi_rw & REQ_DISCARD) &&
+                        !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
+                       /* Just ignore it */
+                       bio_endio(split, 0);
+               } else
+                       generic_make_request(split);
+       } while (split != bio);
 }
 
 static void raid0_status(struct seq_file *seq, struct mddev *mddev)
index a49cfcc..fd3a2a1 100644 (file)
@@ -229,7 +229,7 @@ static void call_bio_endio(struct r1bio *r1_bio)
        int done;
        struct r1conf *conf = r1_bio->mddev->private;
        sector_t start_next_window = r1_bio->start_next_window;
-       sector_t bi_sector = bio->bi_sector;
+       sector_t bi_sector = bio->bi_iter.bi_sector;
 
        if (bio->bi_phys_segments) {
                unsigned long flags;
@@ -265,9 +265,8 @@ static void raid_end_bio_io(struct r1bio *r1_bio)
        if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
                pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
                         (bio_data_dir(bio) == WRITE) ? "write" : "read",
-                        (unsigned long long) bio->bi_sector,
-                        (unsigned long long) bio->bi_sector +
-                        bio_sectors(bio) - 1);
+                        (unsigned long long) bio->bi_iter.bi_sector,
+                        (unsigned long long) bio_end_sector(bio) - 1);
 
                call_bio_endio(r1_bio);
        }
@@ -466,9 +465,8 @@ static void raid1_end_write_request(struct bio *bio, int error)
                                struct bio *mbio = r1_bio->master_bio;
                                pr_debug("raid1: behind end write sectors"
                                         " %llu-%llu\n",
-                                        (unsigned long long) mbio->bi_sector,
-                                        (unsigned long long) mbio->bi_sector +
-                                        bio_sectors(mbio) - 1);
+                                        (unsigned long long) mbio->bi_iter.bi_sector,
+                                        (unsigned long long) bio_end_sector(mbio) - 1);
                                call_bio_endio(r1_bio);
                        }
                }
@@ -875,7 +873,7 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
                else if ((conf->next_resync - RESYNC_WINDOW_SECTORS
                                >= bio_end_sector(bio)) ||
                         (conf->next_resync + NEXT_NORMALIO_DISTANCE
-                               <= bio->bi_sector))
+                               <= bio->bi_iter.bi_sector))
                        wait = false;
                else
                        wait = true;
@@ -913,14 +911,14 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
 
        if (bio && bio_data_dir(bio) == WRITE) {
                if (conf->next_resync + NEXT_NORMALIO_DISTANCE
-                   <= bio->bi_sector) {
+                   <= bio->bi_iter.bi_sector) {
                        if (conf->start_next_window == MaxSector)
                                conf->start_next_window =
                                        conf->next_resync +
                                        NEXT_NORMALIO_DISTANCE;
 
                        if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE)
-                           <= bio->bi_sector)
+                           <= bio->bi_iter.bi_sector)
                                conf->next_window_requests++;
                        else
                                conf->current_window_requests++;
@@ -1027,7 +1025,8 @@ do_sync_io:
                if (bvecs[i].bv_page)
                        put_page(bvecs[i].bv_page);
        kfree(bvecs);
-       pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
+       pr_debug("%dB behind alloc failed, doing sync I/O\n",
+                bio->bi_iter.bi_size);
 }
 
 struct raid1_plug_cb {
@@ -1107,7 +1106,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
 
        if (bio_data_dir(bio) == WRITE &&
            bio_end_sector(bio) > mddev->suspend_lo &&
-           bio->bi_sector < mddev->suspend_hi) {
+           bio->bi_iter.bi_sector < mddev->suspend_hi) {
                /* As the suspend_* range is controlled by
                 * userspace, we want an interruptible
                 * wait.
@@ -1118,7 +1117,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
                        prepare_to_wait(&conf->wait_barrier,
                                        &w, TASK_INTERRUPTIBLE);
                        if (bio_end_sector(bio) <= mddev->suspend_lo ||
-                           bio->bi_sector >= mddev->suspend_hi)
+                           bio->bi_iter.bi_sector >= mddev->suspend_hi)
                                break;
                        schedule();
                }
@@ -1140,7 +1139,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
        r1_bio->sectors = bio_sectors(bio);
        r1_bio->state = 0;
        r1_bio->mddev = mddev;
-       r1_bio->sector = bio->bi_sector;
+       r1_bio->sector = bio->bi_iter.bi_sector;
 
        /* We might need to issue multiple reads to different
         * devices if there are bad blocks around, so we keep
@@ -1180,12 +1179,13 @@ read_again:
                r1_bio->read_disk = rdisk;
 
                read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-               bio_trim(read_bio, r1_bio->sector - bio->bi_sector,
+               bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
                         max_sectors);
 
                r1_bio->bios[rdisk] = read_bio;
 
-               read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
+               read_bio->bi_iter.bi_sector = r1_bio->sector +
+                       mirror->rdev->data_offset;
                read_bio->bi_bdev = mirror->rdev->bdev;
                read_bio->bi_end_io = raid1_end_read_request;
                read_bio->bi_rw = READ | do_sync;
@@ -1197,7 +1197,7 @@ read_again:
                         */
 
                        sectors_handled = (r1_bio->sector + max_sectors
-                                          - bio->bi_sector);
+                                          - bio->bi_iter.bi_sector);
                        r1_bio->sectors = max_sectors;
                        spin_lock_irq(&conf->device_lock);
                        if (bio->bi_phys_segments == 0)
@@ -1218,7 +1218,8 @@ read_again:
                        r1_bio->sectors = bio_sectors(bio) - sectors_handled;
                        r1_bio->state = 0;
                        r1_bio->mddev = mddev;
-                       r1_bio->sector = bio->bi_sector + sectors_handled;
+                       r1_bio->sector = bio->bi_iter.bi_sector +
+                               sectors_handled;
                        goto read_again;
                } else
                        generic_make_request(read_bio);
@@ -1321,7 +1322,7 @@ read_again:
                        if (r1_bio->bios[j])
                                rdev_dec_pending(conf->mirrors[j].rdev, mddev);
                r1_bio->state = 0;
-               allow_barrier(conf, start_next_window, bio->bi_sector);
+               allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector);
                md_wait_for_blocked_rdev(blocked_rdev, mddev);
                start_next_window = wait_barrier(conf, bio);
                /*
@@ -1348,7 +1349,7 @@ read_again:
                        bio->bi_phys_segments++;
                spin_unlock_irq(&conf->device_lock);
        }
-       sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector;
+       sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector;
 
        atomic_set(&r1_bio->remaining, 1);
        atomic_set(&r1_bio->behind_remaining, 0);
@@ -1360,7 +1361,7 @@ read_again:
                        continue;
 
                mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-               bio_trim(mbio, r1_bio->sector - bio->bi_sector, max_sectors);
+               bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors);
 
                if (first_clone) {
                        /* do behind I/O ?
@@ -1394,7 +1395,7 @@ read_again:
 
                r1_bio->bios[i] = mbio;
 
-               mbio->bi_sector = (r1_bio->sector +
+               mbio->bi_iter.bi_sector = (r1_bio->sector +
                                   conf->mirrors[i].rdev->data_offset);
                mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
                mbio->bi_end_io = raid1_end_write_request;
@@ -1434,7 +1435,7 @@ read_again:
                r1_bio->sectors = bio_sectors(bio) - sectors_handled;
                r1_bio->state = 0;
                r1_bio->mddev = mddev;
-               r1_bio->sector = bio->bi_sector + sectors_handled;
+               r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
                goto retry_write;
        }
 
@@ -1958,14 +1959,14 @@ static int process_checks(struct r1bio *r1_bio)
                /* fixup the bio for reuse */
                bio_reset(b);
                b->bi_vcnt = vcnt;
-               b->bi_size = r1_bio->sectors << 9;
-               b->bi_sector = r1_bio->sector +
+               b->bi_iter.bi_size = r1_bio->sectors << 9;
+               b->bi_iter.bi_sector = r1_bio->sector +
                        conf->mirrors[i].rdev->data_offset;
                b->bi_bdev = conf->mirrors[i].rdev->bdev;
                b->bi_end_io = end_sync_read;
                b->bi_private = r1_bio;
 
-               size = b->bi_size;
+               size = b->bi_iter.bi_size;
                for (j = 0; j < vcnt ; j++) {
                        struct bio_vec *bi;
                        bi = &b->bi_io_vec[j];
@@ -2220,11 +2221,11 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
                }
 
                wbio->bi_rw = WRITE;
-               wbio->bi_sector = r1_bio->sector;
-               wbio->bi_size = r1_bio->sectors << 9;
+               wbio->bi_iter.bi_sector = r1_bio->sector;
+               wbio->bi_iter.bi_size = r1_bio->sectors << 9;
 
                bio_trim(wbio, sector - r1_bio->sector, sectors);
-               wbio->bi_sector += rdev->data_offset;
+               wbio->bi_iter.bi_sector += rdev->data_offset;
                wbio->bi_bdev = rdev->bdev;
                if (submit_bio_wait(WRITE, wbio) == 0)
                        /* failure! */
@@ -2338,7 +2339,8 @@ read_more:
                }
                r1_bio->read_disk = disk;
                bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
-               bio_trim(bio, r1_bio->sector - bio->bi_sector, max_sectors);
+               bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector,
+                        max_sectors);
                r1_bio->bios[r1_bio->read_disk] = bio;
                rdev = conf->mirrors[disk].rdev;
                printk_ratelimited(KERN_ERR
@@ -2347,7 +2349,7 @@ read_more:
                                   mdname(mddev),
                                   (unsigned long long)r1_bio->sector,
                                   bdevname(rdev->bdev, b));
-               bio->bi_sector = r1_bio->sector + rdev->data_offset;
+               bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
                bio->bi_bdev = rdev->bdev;
                bio->bi_end_io = raid1_end_read_request;
                bio->bi_rw = READ | do_sync;
@@ -2356,7 +2358,7 @@ read_more:
                        /* Drat - have to split this up more */
                        struct bio *mbio = r1_bio->master_bio;
                        int sectors_handled = (r1_bio->sector + max_sectors
-                                              - mbio->bi_sector);
+                                              - mbio->bi_iter.bi_sector);
                        r1_bio->sectors = max_sectors;
                        spin_lock_irq(&conf->device_lock);
                        if (mbio->bi_phys_segments == 0)
@@ -2374,7 +2376,8 @@ read_more:
                        r1_bio->state = 0;
                        set_bit(R1BIO_ReadError, &r1_bio->state);
                        r1_bio->mddev = mddev;
-                       r1_bio->sector = mbio->bi_sector + sectors_handled;
+                       r1_bio->sector = mbio->bi_iter.bi_sector +
+                               sectors_handled;
 
                        goto read_more;
                } else
@@ -2598,7 +2601,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
                }
                if (bio->bi_end_io) {
                        atomic_inc(&rdev->nr_pending);
-                       bio->bi_sector = sector_nr + rdev->data_offset;
+                       bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
                        bio->bi_bdev = rdev->bdev;
                        bio->bi_private = r1_bio;
                }
@@ -2698,7 +2701,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
                                                        continue;
                                                /* remove last page from this bio */
                                                bio->bi_vcnt--;
-                                               bio->bi_size -= len;
+                                               bio->bi_iter.bi_size -= len;
                                                bio->bi_flags &= ~(1<< BIO_SEG_VALID);
                                        }
                                        goto bio_full;
index 8d39d63..33fc408 100644 (file)
@@ -1152,14 +1152,12 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
        kfree(plug);
 }
 
-static void make_request(struct mddev *mddev, struct bio * bio)
+static void __make_request(struct mddev *mddev, struct bio *bio)
 {
        struct r10conf *conf = mddev->private;
        struct r10bio *r10_bio;
        struct bio *read_bio;
        int i;
-       sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
-       int chunk_sects = chunk_mask + 1;
        const int rw = bio_data_dir(bio);
        const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
        const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
@@ -1174,88 +1172,27 @@ static void make_request(struct mddev *mddev, struct bio * bio)
        int max_sectors;
        int sectors;
 
-       if (unlikely(bio->bi_rw & REQ_FLUSH)) {
-               md_flush_request(mddev, bio);
-               return;
-       }
-
-       /* If this request crosses a chunk boundary, we need to
-        * split it.  This will only happen for 1 PAGE (or less) requests.
-        */
-       if (unlikely((bio->bi_sector & chunk_mask) + bio_sectors(bio)
-                    > chunk_sects
-                    && (conf->geo.near_copies < conf->geo.raid_disks
-                        || conf->prev.near_copies < conf->prev.raid_disks))) {
-               struct bio_pair *bp;
-               /* Sanity check -- queue functions should prevent this happening */
-               if (bio_segments(bio) > 1)
-                       goto bad_map;
-               /* This is a one page bio that upper layers
-                * refuse to split for us, so we need to split it.
-                */
-               bp = bio_split(bio,
-                              chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
-
-               /* Each of these 'make_request' calls will call 'wait_barrier'.
-                * If the first succeeds but the second blocks due to the resync
-                * thread raising the barrier, we will deadlock because the
-                * IO to the underlying device will be queued in generic_make_request
-                * and will never complete, so will never reduce nr_pending.
-                * So increment nr_waiting here so no new raise_barriers will
-                * succeed, and so the second wait_barrier cannot block.
-                */
-               spin_lock_irq(&conf->resync_lock);
-               conf->nr_waiting++;
-               spin_unlock_irq(&conf->resync_lock);
-
-               make_request(mddev, &bp->bio1);
-               make_request(mddev, &bp->bio2);
-
-               spin_lock_irq(&conf->resync_lock);
-               conf->nr_waiting--;
-               wake_up(&conf->wait_barrier);
-               spin_unlock_irq(&conf->resync_lock);
-
-               bio_pair_release(bp);
-               return;
-       bad_map:
-               printk("md/raid10:%s: make_request bug: can't convert block across chunks"
-                      " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
-                      (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
-
-               bio_io_error(bio);
-               return;
-       }
-
-       md_write_start(mddev, bio);
-
-       /*
-        * Register the new request and wait if the reconstruction
-        * thread has put up a bar for new requests.
-        * Continue immediately if no resync is active currently.
-        */
-       wait_barrier(conf);
-
        sectors = bio_sectors(bio);
        while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
-           bio->bi_sector < conf->reshape_progress &&
-           bio->bi_sector + sectors > conf->reshape_progress) {
+           bio->bi_iter.bi_sector < conf->reshape_progress &&
+           bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
                /* IO spans the reshape position.  Need to wait for
                 * reshape to pass
                 */
                allow_barrier(conf);
                wait_event(conf->wait_barrier,
-                          conf->reshape_progress <= bio->bi_sector ||
-                          conf->reshape_progress >= bio->bi_sector + sectors);
+                          conf->reshape_progress <= bio->bi_iter.bi_sector ||
+                          conf->reshape_progress >= bio->bi_iter.bi_sector +
+                          sectors);
                wait_barrier(conf);
        }
        if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
            bio_data_dir(bio) == WRITE &&
            (mddev->reshape_backwards
-            ? (bio->bi_sector < conf->reshape_safe &&
-               bio->bi_sector + sectors > conf->reshape_progress)
-            : (bio->bi_sector + sectors > conf->reshape_safe &&
-               bio->bi_sector < conf->reshape_progress))) {
+            ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
+               bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
+            : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
+               bio->bi_iter.bi_sector < conf->reshape_progress))) {
                /* Need to update reshape_position in metadata */
                mddev->reshape_position = conf->reshape_progress;
                set_bit(MD_CHANGE_DEVS, &mddev->flags);
@@ -1273,7 +1210,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
        r10_bio->sectors = sectors;
 
        r10_bio->mddev = mddev;
-       r10_bio->sector = bio->bi_sector;
+       r10_bio->sector = bio->bi_iter.bi_sector;
        r10_bio->state = 0;
 
        /* We might need to issue multiple reads to different
@@ -1302,13 +1239,13 @@ read_again:
                slot = r10_bio->read_slot;
 
                read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-               bio_trim(read_bio, r10_bio->sector - bio->bi_sector,
+               bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
                         max_sectors);
 
                r10_bio->devs[slot].bio = read_bio;
                r10_bio->devs[slot].rdev = rdev;
 
-               read_bio->bi_sector = r10_bio->devs[slot].addr +
+               read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
                        choose_data_offset(r10_bio, rdev);
                read_bio->bi_bdev = rdev->bdev;
                read_bio->bi_end_io = raid10_end_read_request;
@@ -1320,7 +1257,7 @@ read_again:
                         * need another r10_bio.
                         */
                        sectors_handled = (r10_bio->sector + max_sectors
-                                          - bio->bi_sector);
+                                          - bio->bi_iter.bi_sector);
                        r10_bio->sectors = max_sectors;
                        spin_lock_irq(&conf->device_lock);
                        if (bio->bi_phys_segments == 0)
@@ -1341,7 +1278,8 @@ read_again:
                        r10_bio->sectors = bio_sectors(bio) - sectors_handled;
                        r10_bio->state = 0;
                        r10_bio->mddev = mddev;
-                       r10_bio->sector = bio->bi_sector + sectors_handled;
+                       r10_bio->sector = bio->bi_iter.bi_sector +
+                               sectors_handled;
                        goto read_again;
                } else
                        generic_make_request(read_bio);
@@ -1499,7 +1437,8 @@ retry_write:
                        bio->bi_phys_segments++;
                spin_unlock_irq(&conf->device_lock);
        }
-       sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector;
+       sectors_handled = r10_bio->sector + max_sectors -
+               bio->bi_iter.bi_sector;
 
        atomic_set(&r10_bio->remaining, 1);
        bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
@@ -1510,11 +1449,11 @@ retry_write:
                if (r10_bio->devs[i].bio) {
                        struct md_rdev *rdev = conf->mirrors[d].rdev;
                        mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-                       bio_trim(mbio, r10_bio->sector - bio->bi_sector,
+                       bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
                                 max_sectors);
                        r10_bio->devs[i].bio = mbio;
 
-                       mbio->bi_sector = (r10_bio->devs[i].addr+
+                       mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
                                           choose_data_offset(r10_bio,
                                                              rdev));
                        mbio->bi_bdev = rdev->bdev;
@@ -1553,11 +1492,11 @@ retry_write:
                                rdev = conf->mirrors[d].rdev;
                        }
                        mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-                       bio_trim(mbio, r10_bio->sector - bio->bi_sector,
+                       bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
                                 max_sectors);
                        r10_bio->devs[i].repl_bio = mbio;
 
-                       mbio->bi_sector = (r10_bio->devs[i].addr +
+                       mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr +
                                           choose_data_offset(
                                                   r10_bio, rdev));
                        mbio->bi_bdev = rdev->bdev;
@@ -1591,11 +1530,57 @@ retry_write:
                r10_bio->sectors = bio_sectors(bio) - sectors_handled;
 
                r10_bio->mddev = mddev;
-               r10_bio->sector = bio->bi_sector + sectors_handled;
+               r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
                r10_bio->state = 0;
                goto retry_write;
        }
        one_write_done(r10_bio);
+}
+
+static void make_request(struct mddev *mddev, struct bio *bio)
+{
+       struct r10conf *conf = mddev->private;
+       sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
+       int chunk_sects = chunk_mask + 1;
+
+       struct bio *split;
+
+       if (unlikely(bio->bi_rw & REQ_FLUSH)) {
+               md_flush_request(mddev, bio);
+               return;
+       }
+
+       md_write_start(mddev, bio);
+
+       /*
+        * Register the new request and wait if the reconstruction
+        * thread has put up a bar for new requests.
+        * Continue immediately if no resync is active currently.
+        */
+       wait_barrier(conf);
+
+       do {
+
+               /*
+                * If this request crosses a chunk boundary, we need to split
+                * it.
+                */
+               if (unlikely((bio->bi_iter.bi_sector & chunk_mask) +
+                            bio_sectors(bio) > chunk_sects
+                            && (conf->geo.near_copies < conf->geo.raid_disks
+                                || conf->prev.near_copies <
+                                conf->prev.raid_disks))) {
+                       split = bio_split(bio, chunk_sects -
+                                         (bio->bi_iter.bi_sector &
+                                          (chunk_sects - 1)),
+                                         GFP_NOIO, fs_bio_set);
+                       bio_chain(split, bio);
+               } else {
+                       split = bio;
+               }
+
+               __make_request(mddev, split);
+       } while (split != bio);
 
        /* In case raid10d snuck in to freeze_array */
        wake_up(&conf->wait_barrier);
@@ -2124,10 +2109,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
                bio_reset(tbio);
 
                tbio->bi_vcnt = vcnt;
-               tbio->bi_size = r10_bio->sectors << 9;
+               tbio->bi_iter.bi_size = r10_bio->sectors << 9;
                tbio->bi_rw = WRITE;
                tbio->bi_private = r10_bio;
-               tbio->bi_sector = r10_bio->devs[i].addr;
+               tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
 
                for (j=0; j < vcnt ; j++) {
                        tbio->bi_io_vec[j].bv_offset = 0;
@@ -2144,7 +2129,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
                atomic_inc(&r10_bio->remaining);
                md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
 
-               tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
+               tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
                tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
                generic_make_request(tbio);
        }
@@ -2614,8 +2599,8 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
                        sectors = sect_to_write;
                /* Write at 'sector' for 'sectors' */
                wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-               bio_trim(wbio, sector - bio->bi_sector, sectors);
-               wbio->bi_sector = (r10_bio->devs[i].addr+
+               bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
+               wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
                                   choose_data_offset(r10_bio, rdev) +
                                   (sector - r10_bio->sector));
                wbio->bi_bdev = rdev->bdev;
@@ -2687,10 +2672,10 @@ read_more:
                (unsigned long long)r10_bio->sector);
        bio = bio_clone_mddev(r10_bio->master_bio,
                              GFP_NOIO, mddev);
-       bio_trim(bio, r10_bio->sector - bio->bi_sector, max_sectors);
+       bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors);
        r10_bio->devs[slot].bio = bio;
        r10_bio->devs[slot].rdev = rdev;
-       bio->bi_sector = r10_bio->devs[slot].addr
+       bio->bi_iter.bi_sector = r10_bio->devs[slot].addr
                + choose_data_offset(r10_bio, rdev);
        bio->bi_bdev = rdev->bdev;
        bio->bi_rw = READ | do_sync;
@@ -2701,7 +2686,7 @@ read_more:
                struct bio *mbio = r10_bio->master_bio;
                int sectors_handled =
                        r10_bio->sector + max_sectors
-                       - mbio->bi_sector;
+                       - mbio->bi_iter.bi_sector;
                r10_bio->sectors = max_sectors;
                spin_lock_irq(&conf->device_lock);
                if (mbio->bi_phys_segments == 0)
@@ -2719,7 +2704,7 @@ read_more:
                set_bit(R10BIO_ReadError,
                        &r10_bio->state);
                r10_bio->mddev = mddev;
-               r10_bio->sector = mbio->bi_sector
+               r10_bio->sector = mbio->bi_iter.bi_sector
                        + sectors_handled;
 
                goto read_more;
@@ -3157,7 +3142,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                                bio->bi_end_io = end_sync_read;
                                bio->bi_rw = READ;
                                from_addr = r10_bio->devs[j].addr;
-                               bio->bi_sector = from_addr + rdev->data_offset;
+                               bio->bi_iter.bi_sector = from_addr +
+                                       rdev->data_offset;
                                bio->bi_bdev = rdev->bdev;
                                atomic_inc(&rdev->nr_pending);
                                /* and we write to 'i' (if not in_sync) */
@@ -3181,7 +3167,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                                        bio->bi_private = r10_bio;
                                        bio->bi_end_io = end_sync_write;
                                        bio->bi_rw = WRITE;
-                                       bio->bi_sector = to_addr
+                                       bio->bi_iter.bi_sector = to_addr
                                                + rdev->data_offset;
                                        bio->bi_bdev = rdev->bdev;
                                        atomic_inc(&r10_bio->remaining);
@@ -3210,7 +3196,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                                bio->bi_private = r10_bio;
                                bio->bi_end_io = end_sync_write;
                                bio->bi_rw = WRITE;
-                               bio->bi_sector = to_addr + rdev->data_offset;
+                               bio->bi_iter.bi_sector = to_addr +
+                                       rdev->data_offset;
                                bio->bi_bdev = rdev->bdev;
                                atomic_inc(&r10_bio->remaining);
                                break;
@@ -3328,7 +3315,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                        bio->bi_private = r10_bio;
                        bio->bi_end_io = end_sync_read;
                        bio->bi_rw = READ;
-                       bio->bi_sector = sector +
+                       bio->bi_iter.bi_sector = sector +
                                conf->mirrors[d].rdev->data_offset;
                        bio->bi_bdev = conf->mirrors[d].rdev->bdev;
                        count++;
@@ -3350,7 +3337,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                        bio->bi_private = r10_bio;
                        bio->bi_end_io = end_sync_write;
                        bio->bi_rw = WRITE;
-                       bio->bi_sector = sector +
+                       bio->bi_iter.bi_sector = sector +
                                conf->mirrors[d].replacement->data_offset;
                        bio->bi_bdev = conf->mirrors[d].replacement->bdev;
                        count++;
@@ -3397,7 +3384,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                             bio2 = bio2->bi_next) {
                                /* remove last page from this bio */
                                bio2->bi_vcnt--;
-                               bio2->bi_size -= len;
+                               bio2->bi_iter.bi_size -= len;
                                bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
                        }
                        goto bio_full;
@@ -4418,7 +4405,7 @@ read_more:
        read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
 
        read_bio->bi_bdev = rdev->bdev;
-       read_bio->bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
+       read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
                               + rdev->data_offset);
        read_bio->bi_private = r10_bio;
        read_bio->bi_end_io = end_sync_read;
@@ -4426,7 +4413,7 @@ read_more:
        read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
        read_bio->bi_flags |= 1 << BIO_UPTODATE;
        read_bio->bi_vcnt = 0;
-       read_bio->bi_size = 0;
+       read_bio->bi_iter.bi_size = 0;
        r10_bio->master_bio = read_bio;
        r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
 
@@ -4452,7 +4439,8 @@ read_more:
 
                bio_reset(b);
                b->bi_bdev = rdev2->bdev;
-               b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset;
+               b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
+                       rdev2->new_data_offset;
                b->bi_private = r10_bio;
                b->bi_end_io = end_reshape_write;
                b->bi_rw = WRITE;
@@ -4479,7 +4467,7 @@ read_more:
                             bio2 = bio2->bi_next) {
                                /* Remove last page from this bio */
                                bio2->bi_vcnt--;
-                               bio2->bi_size -= len;
+                               bio2->bi_iter.bi_size -= len;
                                bio2->bi_flags &= ~(1<<BIO_SEG_VALID);
                        }
                        goto bio_full;
index 03f82ab..f1feade 100644 (file)
@@ -133,7 +133,7 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
 static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
 {
        int sectors = bio_sectors(bio);
-       if (bio->bi_sector + sectors < sector + STRIPE_SECTORS)
+       if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
                return bio->bi_next;
        else
                return NULL;
@@ -225,7 +225,7 @@ static void return_io(struct bio *return_bi)
 
                return_bi = bi->bi_next;
                bi->bi_next = NULL;
-               bi->bi_size = 0;
+               bi->bi_iter.bi_size = 0;
                trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
                                         bi, 0);
                bio_endio(bi, 0);
@@ -852,10 +852,10 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                                bi->bi_rw, i);
                        atomic_inc(&sh->count);
                        if (use_new_offset(conf, sh))
-                               bi->bi_sector = (sh->sector
+                               bi->bi_iter.bi_sector = (sh->sector
                                                 + rdev->new_data_offset);
                        else
-                               bi->bi_sector = (sh->sector
+                               bi->bi_iter.bi_sector = (sh->sector
                                                 + rdev->data_offset);
                        if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
                                bi->bi_rw |= REQ_NOMERGE;
@@ -863,7 +863,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                        bi->bi_vcnt = 1;
                        bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
                        bi->bi_io_vec[0].bv_offset = 0;
-                       bi->bi_size = STRIPE_SIZE;
+                       bi->bi_iter.bi_size = STRIPE_SIZE;
                        /*
                         * If this is discard request, set bi_vcnt 0. We don't
                         * want to confuse SCSI because SCSI will replace payload
@@ -899,15 +899,15 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                                rbi->bi_rw, i);
                        atomic_inc(&sh->count);
                        if (use_new_offset(conf, sh))
-                               rbi->bi_sector = (sh->sector
+                               rbi->bi_iter.bi_sector = (sh->sector
                                                  + rrdev->new_data_offset);
                        else
-                               rbi->bi_sector = (sh->sector
+                               rbi->bi_iter.bi_sector = (sh->sector
                                                  + rrdev->data_offset);
                        rbi->bi_vcnt = 1;
                        rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
                        rbi->bi_io_vec[0].bv_offset = 0;
-                       rbi->bi_size = STRIPE_SIZE;
+                       rbi->bi_iter.bi_size = STRIPE_SIZE;
                        /*
                         * If this is discard request, set bi_vcnt 0. We don't
                         * want to confuse SCSI because SCSI will replace payload
@@ -935,24 +935,24 @@ static struct dma_async_tx_descriptor *
 async_copy_data(int frombio, struct bio *bio, struct page *page,
        sector_t sector, struct dma_async_tx_descriptor *tx)
 {
-       struct bio_vec *bvl;
+       struct bio_vec bvl;
+       struct bvec_iter iter;
        struct page *bio_page;
-       int i;
        int page_offset;
        struct async_submit_ctl submit;
        enum async_tx_flags flags = 0;
 
-       if (bio->bi_sector >= sector)
-               page_offset = (signed)(bio->bi_sector - sector) * 512;
+       if (bio->bi_iter.bi_sector >= sector)
+               page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
        else
-               page_offset = (signed)(sector - bio->bi_sector) * -512;
+               page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
 
        if (frombio)
                flags |= ASYNC_TX_FENCE;
        init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
 
-       bio_for_each_segment(bvl, bio, i) {
-               int len = bvl->bv_len;
+       bio_for_each_segment(bvl, bio, iter) {
+               int len = bvl.bv_len;
                int clen;
                int b_offset = 0;
 
@@ -968,8 +968,8 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
                        clen = len;
 
                if (clen > 0) {
-                       b_offset += bvl->bv_offset;
-                       bio_page = bvl->bv_page;
+                       b_offset += bvl.bv_offset;
+                       bio_page = bvl.bv_page;
                        if (frombio)
                                tx = async_memcpy(page, bio_page, page_offset,
                                                  b_offset, clen, &submit);
@@ -1012,7 +1012,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
                        BUG_ON(!dev->read);
                        rbi = dev->read;
                        dev->read = NULL;
-                       while (rbi && rbi->bi_sector <
+                       while (rbi && rbi->bi_iter.bi_sector <
                                dev->sector + STRIPE_SECTORS) {
                                rbi2 = r5_next_bio(rbi, dev->sector);
                                if (!raid5_dec_bi_active_stripes(rbi)) {
@@ -1048,7 +1048,7 @@ static void ops_run_biofill(struct stripe_head *sh)
                        dev->read = rbi = dev->toread;
                        dev->toread = NULL;
                        spin_unlock_irq(&sh->stripe_lock);
-                       while (rbi && rbi->bi_sector <
+                       while (rbi && rbi->bi_iter.bi_sector <
                                dev->sector + STRIPE_SECTORS) {
                                tx = async_copy_data(0, rbi, dev->page,
                                        dev->sector, tx);
@@ -1390,7 +1390,7 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
                        wbi = dev->written = chosen;
                        spin_unlock_irq(&sh->stripe_lock);
 
-                       while (wbi && wbi->bi_sector <
+                       while (wbi && wbi->bi_iter.bi_sector <
                                dev->sector + STRIPE_SECTORS) {
                                if (wbi->bi_rw & REQ_FUA)
                                        set_bit(R5_WantFUA, &dev->flags);
@@ -2615,7 +2615,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
        int firstwrite=0;
 
        pr_debug("adding bi b#%llu to stripe s#%llu\n",
-               (unsigned long long)bi->bi_sector,
+               (unsigned long long)bi->bi_iter.bi_sector,
                (unsigned long long)sh->sector);
 
        /*
@@ -2633,12 +2633,12 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
                        firstwrite = 1;
        } else
                bip = &sh->dev[dd_idx].toread;
-       while (*bip && (*bip)->bi_sector < bi->bi_sector) {
-               if (bio_end_sector(*bip) > bi->bi_sector)
+       while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) {
+               if (bio_end_sector(*bip) > bi->bi_iter.bi_sector)
                        goto overlap;
                bip = & (*bip)->bi_next;
        }
-       if (*bip && (*bip)->bi_sector < bio_end_sector(bi))
+       if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))
                goto overlap;
 
        BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
@@ -2652,7 +2652,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
                sector_t sector = sh->dev[dd_idx].sector;
                for (bi=sh->dev[dd_idx].towrite;
                     sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
-                            bi && bi->bi_sector <= sector;
+                            bi && bi->bi_iter.bi_sector <= sector;
                     bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
                        if (bio_end_sector(bi) >= sector)
                                sector = bio_end_sector(bi);
@@ -2662,7 +2662,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
        }
 
        pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
-               (unsigned long long)(*bip)->bi_sector,
+               (unsigned long long)(*bip)->bi_iter.bi_sector,
                (unsigned long long)sh->sector, dd_idx);
        spin_unlock_irq(&sh->stripe_lock);
 
@@ -2737,7 +2737,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
                        wake_up(&conf->wait_for_overlap);
 
-               while (bi && bi->bi_sector <
+               while (bi && bi->bi_iter.bi_sector <
                        sh->dev[i].sector + STRIPE_SECTORS) {
                        struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
                        clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -2756,7 +2756,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                bi = sh->dev[i].written;
                sh->dev[i].written = NULL;
                if (bi) bitmap_end = 1;
-               while (bi && bi->bi_sector <
+               while (bi && bi->bi_iter.bi_sector <
                       sh->dev[i].sector + STRIPE_SECTORS) {
                        struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
                        clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -2780,7 +2780,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                        spin_unlock_irq(&sh->stripe_lock);
                        if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
                                wake_up(&conf->wait_for_overlap);
-                       while (bi && bi->bi_sector <
+                       while (bi && bi->bi_iter.bi_sector <
                               sh->dev[i].sector + STRIPE_SECTORS) {
                                struct bio *nextbi =
                                        r5_next_bio(bi, sh->dev[i].sector);
@@ -3004,7 +3004,7 @@ static void handle_stripe_clean_event(struct r5conf *conf,
                                        clear_bit(R5_UPTODATE, &dev->flags);
                                wbi = dev->written;
                                dev->written = NULL;
-                               while (wbi && wbi->bi_sector <
+                               while (wbi && wbi->bi_iter.bi_sector <
                                        dev->sector + STRIPE_SECTORS) {
                                        wbi2 = r5_next_bio(wbi, dev->sector);
                                        if (!raid5_dec_bi_active_stripes(wbi)) {
@@ -4096,7 +4096,7 @@ static int raid5_mergeable_bvec(struct request_queue *q,
 
 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
 {
-       sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
+       sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
        unsigned int chunk_sectors = mddev->chunk_sectors;
        unsigned int bio_sectors = bio_sectors(bio);
 
@@ -4233,9 +4233,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
        /*
         *      compute position
         */
-       align_bi->bi_sector =  raid5_compute_sector(conf, raid_bio->bi_sector,
-                                                   0,
-                                                   &dd_idx, NULL);
+       align_bi->bi_iter.bi_sector =
+               raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector,
+                                    0, &dd_idx, NULL);
 
        end_sector = bio_end_sector(align_bi);
        rcu_read_lock();
@@ -4260,7 +4260,8 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
                align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
 
                if (!bio_fits_rdev(align_bi) ||
-                   is_badblock(rdev, align_bi->bi_sector, bio_sectors(align_bi),
+                   is_badblock(rdev, align_bi->bi_iter.bi_sector,
+                               bio_sectors(align_bi),
                                &first_bad, &bad_sectors)) {
                        /* too big in some way, or has a known bad block */
                        bio_put(align_bi);
@@ -4269,7 +4270,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
                }
 
                /* No reshape active, so we can trust rdev->data_offset */
-               align_bi->bi_sector += rdev->data_offset;
+               align_bi->bi_iter.bi_sector += rdev->data_offset;
 
                spin_lock_irq(&conf->device_lock);
                wait_event_lock_irq(conf->wait_for_stripe,
@@ -4281,7 +4282,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
                if (mddev->gendisk)
                        trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
                                              align_bi, disk_devt(mddev->gendisk),
-                                             raid_bio->bi_sector);
+                                             raid_bio->bi_iter.bi_sector);
                generic_make_request(align_bi);
                return 1;
        } else {
@@ -4464,8 +4465,8 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
                /* Skip discard while reshape is happening */
                return;
 
-       logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
-       last_sector = bi->bi_sector + (bi->bi_size>>9);
+       logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
+       last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9);
 
        bi->bi_next = NULL;
        bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
@@ -4569,7 +4570,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
                return;
        }
 
-       logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
+       logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
        last_sector = bio_end_sector(bi);
        bi->bi_next = NULL;
        bi->bi_phys_segments = 1;       /* over-loaded to count active stripes */
@@ -5053,7 +5054,8 @@ static int  retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
        int remaining;
        int handled = 0;
 
-       logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
+       logical_sector = raid_bio->bi_iter.bi_sector &
+               ~((sector_t)STRIPE_SECTORS-1);
        sector = raid5_compute_sector(conf, logical_sector,
                                      0, &dd_idx, NULL);
        last_sector = bio_end_sector(raid_bio);
@@ -6101,6 +6103,7 @@ static int run(struct mddev *mddev)
                blk_queue_io_min(mddev->queue, chunk_size);
                blk_queue_io_opt(mddev->queue, chunk_size *
                                 (conf->raid_disks - conf->max_degraded));
+               mddev->queue->limits.raid_partial_stripes_expensive = 1;
                /*
                 * We can only discard a whole stripe. It doesn't make sense to
                 * discard data disk but write parity disk
index dd239bd..00d339c 100644 (file)
@@ -2235,10 +2235,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        }
 
        /* do we need to support multiple segments? */
-       if (bio_segments(req->bio) > 1 || bio_segments(rsp->bio) > 1) {
-               printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n",
-                   ioc->name, __func__, bio_segments(req->bio), blk_rq_bytes(req),
-                   bio_segments(rsp->bio), blk_rq_bytes(rsp));
+       if (bio_multiple_segments(req->bio) ||
+           bio_multiple_segments(rsp->bio)) {
+               printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u, rsp %u\n",
+                   ioc->name, __func__, blk_rq_bytes(req), blk_rq_bytes(rsp));
                return -EINVAL;
        }
 
index 92bd22c..9cbc567 100644 (file)
@@ -504,7 +504,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
        struct dasd_diag_req *dreq;
        struct dasd_diag_bio *dbio;
        struct req_iterator iter;
-       struct bio_vec *bv;
+       struct bio_vec bv;
        char *dst;
        unsigned int count, datasize;
        sector_t recid, first_rec, last_rec;
@@ -525,10 +525,10 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
        /* Check struct bio and count the number of blocks for the request. */
        count = 0;
        rq_for_each_segment(bv, req, iter) {
-               if (bv->bv_len & (blksize - 1))
+               if (bv.bv_len & (blksize - 1))
                        /* Fba can only do full blocks. */
                        return ERR_PTR(-EINVAL);
-               count += bv->bv_len >> (block->s2b_shift + 9);
+               count += bv.bv_len >> (block->s2b_shift + 9);
        }
        /* Paranoia. */
        if (count != last_rec - first_rec + 1)
@@ -545,8 +545,8 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
        dbio = dreq->bio;
        recid = first_rec;
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv->bv_page) + bv->bv_offset;
-               for (off = 0; off < bv->bv_len; off += blksize) {
+               dst = page_address(bv.bv_page) + bv.bv_offset;
+               for (off = 0; off < bv.bv_len; off += blksize) {
                        memset(dbio, 0, sizeof (struct dasd_diag_bio));
                        dbio->type = rw_cmd;
                        dbio->block_number = recid + 1;
index 95e4578..2e8e075 100644 (file)
@@ -2551,7 +2551,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
        struct dasd_ccw_req *cqr;
        struct ccw1 *ccw;
        struct req_iterator iter;
-       struct bio_vec *bv;
+       struct bio_vec bv;
        char *dst;
        unsigned int off;
        int count, cidaw, cplength, datasize;
@@ -2573,13 +2573,13 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
        count = 0;
        cidaw = 0;
        rq_for_each_segment(bv, req, iter) {
-               if (bv->bv_len & (blksize - 1))
+               if (bv.bv_len & (blksize - 1))
                        /* Eckd can only do full blocks. */
                        return ERR_PTR(-EINVAL);
-               count += bv->bv_len >> (block->s2b_shift + 9);
+               count += bv.bv_len >> (block->s2b_shift + 9);
 #if defined(CONFIG_64BIT)
-               if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
-                       cidaw += bv->bv_len >> (block->s2b_shift + 9);
+               if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
+                       cidaw += bv.bv_len >> (block->s2b_shift + 9);
 #endif
        }
        /* Paranoia. */
@@ -2650,16 +2650,16 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
                              last_rec - recid + 1, cmd, basedev, blksize);
        }
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv->bv_page) + bv->bv_offset;
+               dst = page_address(bv.bv_page) + bv.bv_offset;
                if (dasd_page_cache) {
                        char *copy = kmem_cache_alloc(dasd_page_cache,
                                                      GFP_DMA | __GFP_NOWARN);
                        if (copy && rq_data_dir(req) == WRITE)
-                               memcpy(copy + bv->bv_offset, dst, bv->bv_len);
+                               memcpy(copy + bv.bv_offset, dst, bv.bv_len);
                        if (copy)
-                               dst = copy + bv->bv_offset;
+                               dst = copy + bv.bv_offset;
                }
-               for (off = 0; off < bv->bv_len; off += blksize) {
+               for (off = 0; off < bv.bv_len; off += blksize) {
                        sector_t trkid = recid;
                        unsigned int recoffs = sector_div(trkid, blk_per_trk);
                        rcmd = cmd;
@@ -2735,7 +2735,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
        struct dasd_ccw_req *cqr;
        struct ccw1 *ccw;
        struct req_iterator iter;
-       struct bio_vec *bv;
+       struct bio_vec bv;
        char *dst, *idaw_dst;
        unsigned int cidaw, cplength, datasize;
        unsigned int tlf;
@@ -2813,8 +2813,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
        idaw_dst = NULL;
        idaw_len = 0;
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv->bv_page) + bv->bv_offset;
-               seg_len = bv->bv_len;
+               dst = page_address(bv.bv_page) + bv.bv_offset;
+               seg_len = bv.bv_len;
                while (seg_len) {
                        if (new_track) {
                                trkid = recid;
@@ -3039,7 +3039,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
 {
        struct dasd_ccw_req *cqr;
        struct req_iterator iter;
-       struct bio_vec *bv;
+       struct bio_vec bv;
        char *dst;
        unsigned int trkcount, ctidaw;
        unsigned char cmd;
@@ -3125,8 +3125,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
                new_track = 1;
                recid = first_rec;
                rq_for_each_segment(bv, req, iter) {
-                       dst = page_address(bv->bv_page) + bv->bv_offset;
-                       seg_len = bv->bv_len;
+                       dst = page_address(bv.bv_page) + bv.bv_offset;
+                       seg_len = bv.bv_len;
                        while (seg_len) {
                                if (new_track) {
                                        trkid = recid;
@@ -3158,9 +3158,9 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
                }
        } else {
                rq_for_each_segment(bv, req, iter) {
-                       dst = page_address(bv->bv_page) + bv->bv_offset;
+                       dst = page_address(bv.bv_page) + bv.bv_offset;
                        last_tidaw = itcw_add_tidaw(itcw, 0x00,
-                                                   dst, bv->bv_len);
+                                                   dst, bv.bv_len);
                        if (IS_ERR(last_tidaw)) {
                                ret = -EINVAL;
                                goto out_error;
@@ -3278,7 +3278,7 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
        struct dasd_ccw_req *cqr;
        struct ccw1 *ccw;
        struct req_iterator iter;
-       struct bio_vec *bv;
+       struct bio_vec bv;
        char *dst;
        unsigned char cmd;
        unsigned int trkcount;
@@ -3378,8 +3378,8 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
                        idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
        }
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv->bv_page) + bv->bv_offset;
-               seg_len = bv->bv_len;
+               dst = page_address(bv.bv_page) + bv.bv_offset;
+               seg_len = bv.bv_len;
                if (cmd == DASD_ECKD_CCW_READ_TRACK)
                        memset(dst, 0, seg_len);
                if (!len_to_track_end) {
@@ -3424,7 +3424,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
        struct dasd_eckd_private *private;
        struct ccw1 *ccw;
        struct req_iterator iter;
-       struct bio_vec *bv;
+       struct bio_vec bv;
        char *dst, *cda;
        unsigned int blksize, blk_per_trk, off;
        sector_t recid;
@@ -3442,8 +3442,8 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
        if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
                ccw++;
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv->bv_page) + bv->bv_offset;
-               for (off = 0; off < bv->bv_len; off += blksize) {
+               dst = page_address(bv.bv_page) + bv.bv_offset;
+               for (off = 0; off < bv.bv_len; off += blksize) {
                        /* Skip locate record. */
                        if (private->uses_cdl && recid <= 2*blk_per_trk)
                                ccw++;
@@ -3454,7 +3454,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
                                        cda = (char *)((addr_t) ccw->cda);
                                if (dst != cda) {
                                        if (rq_data_dir(req) == READ)
-                                               memcpy(dst, cda, bv->bv_len);
+                                               memcpy(dst, cda, bv.bv_len);
                                        kmem_cache_free(dasd_page_cache,
                                            (void *)((addr_t)cda & PAGE_MASK));
                                }
index 9cbc8c3..2c8e68b 100644 (file)
@@ -260,7 +260,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
        struct dasd_ccw_req *cqr;
        struct ccw1 *ccw;
        struct req_iterator iter;
-       struct bio_vec *bv;
+       struct bio_vec bv;
        char *dst;
        int count, cidaw, cplength, datasize;
        sector_t recid, first_rec, last_rec;
@@ -283,13 +283,13 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
        count = 0;
        cidaw = 0;
        rq_for_each_segment(bv, req, iter) {
-               if (bv->bv_len & (blksize - 1))
+               if (bv.bv_len & (blksize - 1))
                        /* Fba can only do full blocks. */
                        return ERR_PTR(-EINVAL);
-               count += bv->bv_len >> (block->s2b_shift + 9);
+               count += bv.bv_len >> (block->s2b_shift + 9);
 #if defined(CONFIG_64BIT)
-               if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
-                       cidaw += bv->bv_len / blksize;
+               if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
+                       cidaw += bv.bv_len / blksize;
 #endif
        }
        /* Paranoia. */
@@ -326,16 +326,16 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
        }
        recid = first_rec;
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv->bv_page) + bv->bv_offset;
+               dst = page_address(bv.bv_page) + bv.bv_offset;
                if (dasd_page_cache) {
                        char *copy = kmem_cache_alloc(dasd_page_cache,
                                                      GFP_DMA | __GFP_NOWARN);
                        if (copy && rq_data_dir(req) == WRITE)
-                               memcpy(copy + bv->bv_offset, dst, bv->bv_len);
+                               memcpy(copy + bv.bv_offset, dst, bv.bv_len);
                        if (copy)
-                               dst = copy + bv->bv_offset;
+                               dst = copy + bv.bv_offset;
                }
-               for (off = 0; off < bv->bv_len; off += blksize) {
+               for (off = 0; off < bv.bv_len; off += blksize) {
                        /* Locate record for stupid devices. */
                        if (private->rdc_data.mode.bits.data_chain == 0) {
                                ccw[-1].flags |= CCW_FLAG_CC;
@@ -384,7 +384,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
        struct dasd_fba_private *private;
        struct ccw1 *ccw;
        struct req_iterator iter;
-       struct bio_vec *bv;
+       struct bio_vec bv;
        char *dst, *cda;
        unsigned int blksize, off;
        int status;
@@ -399,8 +399,8 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
        if (private->rdc_data.mode.bits.data_chain != 0)
                ccw++;
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv->bv_page) + bv->bv_offset;
-               for (off = 0; off < bv->bv_len; off += blksize) {
+               dst = page_address(bv.bv_page) + bv.bv_offset;
+               for (off = 0; off < bv.bv_len; off += blksize) {
                        /* Skip locate record. */
                        if (private->rdc_data.mode.bits.data_chain == 0)
                                ccw++;
@@ -411,7 +411,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
                                        cda = (char *)((addr_t) ccw->cda);
                                if (dst != cda) {
                                        if (rq_data_dir(req) == READ)
-                                               memcpy(dst, cda, bv->bv_len);
+                                               memcpy(dst, cda, bv.bv_len);
                                        kmem_cache_free(dasd_page_cache,
                                            (void *)((addr_t)cda & PAGE_MASK));
                                }
index 6eca019..ebf41e2 100644 (file)
@@ -808,18 +808,19 @@ static void
 dcssblk_make_request(struct request_queue *q, struct bio *bio)
 {
        struct dcssblk_dev_info *dev_info;
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
        unsigned long index;
        unsigned long page_addr;
        unsigned long source_addr;
        unsigned long bytes_done;
-       int i;
 
        bytes_done = 0;
        dev_info = bio->bi_bdev->bd_disk->private_data;
        if (dev_info == NULL)
                goto fail;
-       if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0)
+       if ((bio->bi_iter.bi_sector & 7) != 0 ||
+           (bio->bi_iter.bi_size & 4095) != 0)
                /* Request is not page-aligned. */
                goto fail;
        if (bio_end_sector(bio) > get_capacity(bio->bi_bdev->bd_disk)) {
@@ -842,22 +843,22 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
                }
        }
 
-       index = (bio->bi_sector >> 3);
-       bio_for_each_segment(bvec, bio, i) {
+       index = (bio->bi_iter.bi_sector >> 3);
+       bio_for_each_segment(bvec, bio, iter) {
                page_addr = (unsigned long)
-                       page_address(bvec->bv_page) + bvec->bv_offset;
+                       page_address(bvec.bv_page) + bvec.bv_offset;
                source_addr = dev_info->start + (index<<12) + bytes_done;
-               if (unlikely((page_addr & 4095) != 0) || (bvec->bv_len & 4095) != 0)
+               if (unlikely((page_addr & 4095) != 0) || (bvec.bv_len & 4095) != 0)
                        // More paranoia.
                        goto fail;
                if (bio_data_dir(bio) == READ) {
                        memcpy((void*)page_addr, (void*)source_addr,
-                               bvec->bv_len);
+                               bvec.bv_len);
                } else {
                        memcpy((void*)source_addr, (void*)page_addr,
-                               bvec->bv_len);
+                               bvec.bv_len);
                }
-               bytes_done += bvec->bv_len;
+               bytes_done += bvec.bv_len;
        }
        bio_endio(bio, 0);
        return;
index d0ab501..76bed17 100644 (file)
@@ -130,7 +130,7 @@ static void scm_request_prepare(struct scm_request *scmrq)
        struct aidaw *aidaw = scmrq->aidaw;
        struct msb *msb = &scmrq->aob->msb[0];
        struct req_iterator iter;
-       struct bio_vec *bv;
+       struct bio_vec bv;
 
        msb->bs = MSB_BS_4K;
        scmrq->aob->request.msb_count = 1;
@@ -142,9 +142,9 @@ static void scm_request_prepare(struct scm_request *scmrq)
        msb->data_addr = (u64) aidaw;
 
        rq_for_each_segment(bv, scmrq->request, iter) {
-               WARN_ON(bv->bv_offset);
-               msb->blk_count += bv->bv_len >> 12;
-               aidaw->data_addr = (u64) page_address(bv->bv_page);
+               WARN_ON(bv.bv_offset);
+               msb->blk_count += bv.bv_len >> 12;
+               aidaw->data_addr = (u64) page_address(bv.bv_page);
                aidaw++;
        }
 }
index 27f930c..9aae909 100644 (file)
@@ -122,7 +122,7 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq)
        struct aidaw *aidaw = scmrq->aidaw;
        struct msb *msb = &scmrq->aob->msb[0];
        struct req_iterator iter;
-       struct bio_vec *bv;
+       struct bio_vec bv;
        int i = 0;
        u64 addr;
 
@@ -163,7 +163,7 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq)
                        i++;
                }
                rq_for_each_segment(bv, req, iter) {
-                       aidaw->data_addr = (u64) page_address(bv->bv_page);
+                       aidaw->data_addr = (u64) page_address(bv.bv_page);
                        aidaw++;
                        i++;
                }
index 58141f0..6969d39 100644 (file)
@@ -184,25 +184,26 @@ static unsigned long xpram_highest_page_index(void)
 static void xpram_make_request(struct request_queue *q, struct bio *bio)
 {
        xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
        unsigned int index;
        unsigned long page_addr;
        unsigned long bytes;
-       int i;
 
-       if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0)
+       if ((bio->bi_iter.bi_sector & 7) != 0 ||
+           (bio->bi_iter.bi_size & 4095) != 0)
                /* Request is not page-aligned. */
                goto fail;
-       if ((bio->bi_size >> 12) > xdev->size)
+       if ((bio->bi_iter.bi_size >> 12) > xdev->size)
                /* Request size is no page-aligned. */
                goto fail;
-       if ((bio->bi_sector >> 3) > 0xffffffffU - xdev->offset)
+       if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset)
                goto fail;
-       index = (bio->bi_sector >> 3) + xdev->offset;
-       bio_for_each_segment(bvec, bio, i) {
+       index = (bio->bi_iter.bi_sector >> 3) + xdev->offset;
+       bio_for_each_segment(bvec, bio, iter) {
                page_addr = (unsigned long)
-                       kmap(bvec->bv_page) + bvec->bv_offset;
-               bytes = bvec->bv_len;
+                       kmap(bvec.bv_page) + bvec.bv_offset;
+               bytes = bvec.bv_len;
                if ((page_addr & 4095) != 0 || (bytes & 4095) != 0)
                        /* More paranoia. */
                        goto fail;
index 446b851..0cac7d8 100644 (file)
@@ -2163,10 +2163,10 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        }
 
        /* do we need to support multiple segments? */
-       if (bio_segments(req->bio) > 1 || bio_segments(rsp->bio) > 1) {
-               printk("%s: multiple segments req %u %u, rsp %u %u\n",
-                      __func__, bio_segments(req->bio), blk_rq_bytes(req),
-                      bio_segments(rsp->bio), blk_rq_bytes(rsp));
+       if (bio_multiple_segments(req->bio) ||
+           bio_multiple_segments(rsp->bio)) {
+               printk("%s: multiple segments req %u, rsp %u\n",
+                      __func__, blk_rq_bytes(req), blk_rq_bytes(rsp));
                return -EINVAL;
        }
 
index 9d26637..410f4a3 100644 (file)
@@ -1901,7 +1901,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
        Mpi2SmpPassthroughRequest_t *mpi_request;
        Mpi2SmpPassthroughReply_t *mpi_reply;
-       int rc, i;
+       int rc;
        u16 smid;
        u32 ioc_state;
        unsigned long timeleft;
@@ -1916,7 +1916,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        void *pci_addr_out = NULL;
        u16 wait_state_count;
        struct request *rsp = req->next_rq;
-       struct bio_vec *bvec = NULL;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
 
        if (!rsp) {
                printk(MPT2SAS_ERR_FMT "%s: the smp response space is "
@@ -1942,7 +1943,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        ioc->transport_cmds.status = MPT2_CMD_PENDING;
 
        /* Check if the request is split across multiple segments */
-       if (bio_segments(req->bio) > 1) {
+       if (bio_multiple_segments(req->bio)) {
                u32 offset = 0;
 
                /* Allocate memory and copy the request */
@@ -1955,11 +1956,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
                        goto out;
                }
 
-               bio_for_each_segment(bvec, req->bio, i) {
+               bio_for_each_segment(bvec, req->bio, iter) {
                        memcpy(pci_addr_out + offset,
-                           page_address(bvec->bv_page) + bvec->bv_offset,
-                           bvec->bv_len);
-                       offset += bvec->bv_len;
+                           page_address(bvec.bv_page) + bvec.bv_offset,
+                           bvec.bv_len);
+                       offset += bvec.bv_len;
                }
        } else {
                dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
@@ -1974,7 +1975,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 
        /* Check if the response needs to be populated across
         * multiple segments */
-       if (bio_segments(rsp->bio) > 1) {
+       if (bio_multiple_segments(rsp->bio)) {
                pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),
                    &pci_dma_in);
                if (!pci_addr_in) {
@@ -2041,7 +2042,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
            MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
        sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
-       if (bio_segments(req->bio) > 1) {
+       if (bio_multiple_segments(req->bio)) {
                ioc->base_add_sg_single(psge, sgl_flags |
                    (blk_rq_bytes(req) - 4), pci_dma_out);
        } else {
@@ -2057,7 +2058,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
            MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
            MPI2_SGE_FLAGS_END_OF_LIST);
        sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
-       if (bio_segments(rsp->bio) > 1) {
+       if (bio_multiple_segments(rsp->bio)) {
                ioc->base_add_sg_single(psge, sgl_flags |
                    (blk_rq_bytes(rsp) + 4), pci_dma_in);
        } else {
@@ -2102,23 +2103,23 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
                    le16_to_cpu(mpi_reply->ResponseDataLength);
                /* check if the resp needs to be copied from the allocated
                 * pci mem */
-               if (bio_segments(rsp->bio) > 1) {
+               if (bio_multiple_segments(rsp->bio)) {
                        u32 offset = 0;
                        u32 bytes_to_copy =
                            le16_to_cpu(mpi_reply->ResponseDataLength);
-                       bio_for_each_segment(bvec, rsp->bio, i) {
-                               if (bytes_to_copy <= bvec->bv_len) {
-                                       memcpy(page_address(bvec->bv_page) +
-                                           bvec->bv_offset, pci_addr_in +
+                       bio_for_each_segment(bvec, rsp->bio, iter) {
+                               if (bytes_to_copy <= bvec.bv_len) {
+                                       memcpy(page_address(bvec.bv_page) +
+                                           bvec.bv_offset, pci_addr_in +
                                            offset, bytes_to_copy);
                                        break;
                                } else {
-                                       memcpy(page_address(bvec->bv_page) +
-                                           bvec->bv_offset, pci_addr_in +
-                                           offset, bvec->bv_len);
-                                       bytes_to_copy -= bvec->bv_len;
+                                       memcpy(page_address(bvec.bv_page) +
+                                           bvec.bv_offset, pci_addr_in +
+                                           offset, bvec.bv_len);
+                                       bytes_to_copy -= bvec.bv_len;
                                }
-                               offset += bvec->bv_len;
+                               offset += bvec.bv_len;
                        }
                }
        } else {
index e771a88..65170cb 100644 (file)
@@ -1884,7 +1884,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
        Mpi2SmpPassthroughRequest_t *mpi_request;
        Mpi2SmpPassthroughReply_t *mpi_reply;
-       int rc, i;
+       int rc;
        u16 smid;
        u32 ioc_state;
        unsigned long timeleft;
@@ -1898,7 +1898,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        void *pci_addr_out = NULL;
        u16 wait_state_count;
        struct request *rsp = req->next_rq;
-       struct bio_vec *bvec = NULL;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
 
        if (!rsp) {
                pr_err(MPT3SAS_FMT "%s: the smp response space is missing\n",
@@ -1925,7 +1926,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        ioc->transport_cmds.status = MPT3_CMD_PENDING;
 
        /* Check if the request is split across multiple segments */
-       if (req->bio->bi_vcnt > 1) {
+       if (bio_multiple_segments(req->bio)) {
                u32 offset = 0;
 
                /* Allocate memory and copy the request */
@@ -1938,11 +1939,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
                        goto out;
                }
 
-               bio_for_each_segment(bvec, req->bio, i) {
+               bio_for_each_segment(bvec, req->bio, iter) {
                        memcpy(pci_addr_out + offset,
-                           page_address(bvec->bv_page) + bvec->bv_offset,
-                           bvec->bv_len);
-                       offset += bvec->bv_len;
+                           page_address(bvec.bv_page) + bvec.bv_offset,
+                           bvec.bv_len);
+                       offset += bvec.bv_len;
                }
        } else {
                dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
@@ -1957,7 +1958,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 
        /* Check if the response needs to be populated across
         * multiple segments */
-       if (rsp->bio->bi_vcnt > 1) {
+       if (bio_multiple_segments(rsp->bio)) {
                pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),
                    &pci_dma_in);
                if (!pci_addr_in) {
@@ -2018,7 +2019,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
        psge = &mpi_request->SGL;
 
-       if (req->bio->bi_vcnt > 1)
+       if (bio_multiple_segments(req->bio))
                ioc->build_sg(ioc, psge, pci_dma_out, (blk_rq_bytes(req) - 4),
                    pci_dma_in, (blk_rq_bytes(rsp) + 4));
        else
@@ -2063,23 +2064,23 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 
                /* check if the resp needs to be copied from the allocated
                 * pci mem */
-               if (rsp->bio->bi_vcnt > 1) {
+               if (bio_multiple_segments(rsp->bio)) {
                        u32 offset = 0;
                        u32 bytes_to_copy =
                            le16_to_cpu(mpi_reply->ResponseDataLength);
-                       bio_for_each_segment(bvec, rsp->bio, i) {
-                               if (bytes_to_copy <= bvec->bv_len) {
-                                       memcpy(page_address(bvec->bv_page) +
-                                           bvec->bv_offset, pci_addr_in +
+                       bio_for_each_segment(bvec, rsp->bio, iter) {
+                               if (bytes_to_copy <= bvec.bv_len) {
+                                       memcpy(page_address(bvec.bv_page) +
+                                           bvec.bv_offset, pci_addr_in +
                                            offset, bytes_to_copy);
                                        break;
                                } else {
-                                       memcpy(page_address(bvec->bv_page) +
-                                           bvec->bv_offset, pci_addr_in +
-                                           offset, bvec->bv_len);
-                                       bytes_to_copy -= bvec->bv_len;
+                                       memcpy(page_address(bvec.bv_page) +
+                                           bvec.bv_offset, pci_addr_in +
+                                           offset, bvec.bv_len);
+                                       bytes_to_copy -= bvec.bv_len;
                                }
-                               offset += bvec->bv_len;
+                               offset += bvec.bv_len;
                        }
                }
        } else {
index aa66361..bac04c2 100644 (file)
@@ -731,7 +731,7 @@ static int _osd_req_list_objects(struct osd_request *or,
 
        bio->bi_rw &= ~REQ_WRITE;
        or->in.bio = bio;
-       or->in.total_bytes = bio->bi_size;
+       or->in.total_bytes = bio->bi_iter.bi_size;
        return 0;
 }
 
index 9846c6a..470954a 100644 (file)
@@ -801,7 +801,7 @@ static int sd_setup_write_same_cmnd(struct scsi_device *sdp, struct request *rq)
        if (sdkp->device->no_write_same)
                return BLKPREP_KILL;
 
-       BUG_ON(bio_offset(bio) || bio_iovec(bio)->bv_len != sdp->sector_size);
+       BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
 
        sector >>= ilog2(sdp->sector_size) - 9;
        nr_sectors >>= ilog2(sdp->sector_size) - 9;
index 6174ca4..a7a691d 100644 (file)
@@ -365,7 +365,6 @@ void sd_dif_prepare(struct request *rq, sector_t hw_sector,
        struct bio *bio;
        struct scsi_disk *sdkp;
        struct sd_dif_tuple *sdt;
-       unsigned int i, j;
        u32 phys, virt;
 
        sdkp = rq->bio->bi_bdev->bd_disk->private_data;
@@ -376,19 +375,21 @@ void sd_dif_prepare(struct request *rq, sector_t hw_sector,
        phys = hw_sector & 0xffffffff;
 
        __rq_for_each_bio(bio, rq) {
-               struct bio_vec *iv;
+               struct bio_vec iv;
+               struct bvec_iter iter;
+               unsigned int j;
 
                /* Already remapped? */
                if (bio_flagged(bio, BIO_MAPPED_INTEGRITY))
                        break;
 
-               virt = bio->bi_integrity->bip_sector & 0xffffffff;
+               virt = bio->bi_integrity->bip_iter.bi_sector & 0xffffffff;
 
-               bip_for_each_vec(iv, bio->bi_integrity, i) {
-                       sdt = kmap_atomic(iv->bv_page)
-                               + iv->bv_offset;
+               bip_for_each_vec(iv, bio->bi_integrity, iter) {
+                       sdt = kmap_atomic(iv.bv_page)
+                               + iv.bv_offset;
 
-                       for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
+                       for (j = 0; j < iv.bv_len; j += tuple_sz, sdt++) {
 
                                if (be32_to_cpu(sdt->ref_tag) == virt)
                                        sdt->ref_tag = cpu_to_be32(phys);
@@ -414,7 +415,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
        struct scsi_disk *sdkp;
        struct bio *bio;
        struct sd_dif_tuple *sdt;
-       unsigned int i, j, sectors, sector_sz;
+       unsigned int j, sectors, sector_sz;
        u32 phys, virt;
 
        sdkp = scsi_disk(scmd->request->rq_disk);
@@ -430,15 +431,16 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
                phys >>= 3;
 
        __rq_for_each_bio(bio, scmd->request) {
-               struct bio_vec *iv;
+               struct bio_vec iv;
+               struct bvec_iter iter;
 
-               virt = bio->bi_integrity->bip_sector & 0xffffffff;
+               virt = bio->bi_integrity->bip_iter.bi_sector & 0xffffffff;
 
-               bip_for_each_vec(iv, bio->bi_integrity, i) {
-                       sdt = kmap_atomic(iv->bv_page)
-                               + iv->bv_offset;
+               bip_for_each_vec(iv, bio->bi_integrity, iter) {
+                       sdt = kmap_atomic(iv.bv_page)
+                               + iv.bv_offset;
 
-                       for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
+                       for (j = 0; j < iv.bv_len; j += tuple_sz, sdt++) {
 
                                if (sectors == 0) {
                                        kunmap_atomic(sdt);
index 5338e8d..0718905 100644 (file)
@@ -194,10 +194,10 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
        struct cl_object     *obj = ll_i2info(inode)->lli_clob;
        pgoff_t        offset;
        int                ret;
-       int                i;
        int                rw;
        obd_count            page_count = 0;
-       struct bio_vec       *bvec;
+       struct bio_vec       bvec;
+       struct bvec_iter   iter;
        struct bio         *bio;
        ssize_t        bytes;
 
@@ -220,15 +220,15 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
        for (bio = head; bio != NULL; bio = bio->bi_next) {
                LASSERT(rw == bio->bi_rw);
 
-               offset = (pgoff_t)(bio->bi_sector << 9) + lo->lo_offset;
-               bio_for_each_segment(bvec, bio, i) {
-                       BUG_ON(bvec->bv_offset != 0);
-                       BUG_ON(bvec->bv_len != PAGE_CACHE_SIZE);
+               offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset;
+               bio_for_each_segment(bvec, bio, iter) {
+                       BUG_ON(bvec.bv_offset != 0);
+                       BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE);
 
-                       pages[page_count] = bvec->bv_page;
+                       pages[page_count] = bvec.bv_page;
                        offsets[page_count] = offset;
                        page_count++;
-                       offset += bvec->bv_len;
+                       offset += bvec.bv_len;
                }
                LASSERT(page_count <= LLOOP_MAX_SEGMENTS);
        }
@@ -313,7 +313,8 @@ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req)
        bio = &lo->lo_bio;
        while (*bio && (*bio)->bi_rw == rw) {
                CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n",
-                      (unsigned long long)(*bio)->bi_sector, (*bio)->bi_size,
+                      (unsigned long long)(*bio)->bi_iter.bi_sector,
+                      (*bio)->bi_iter.bi_size,
                       page_count, (*bio)->bi_vcnt);
                if (page_count + (*bio)->bi_vcnt > LLOOP_MAX_SEGMENTS)
                        break;
@@ -347,7 +348,8 @@ static void loop_make_request(struct request_queue *q, struct bio *old_bio)
                goto err;
 
        CDEBUG(D_INFO, "submit bio sector %llu size %u\n",
-              (unsigned long long)old_bio->bi_sector, old_bio->bi_size);
+              (unsigned long long)old_bio->bi_iter.bi_sector,
+              old_bio->bi_iter.bi_size);
 
        spin_lock_irq(&lo->lo_lock);
        inactive = (lo->lo_state != LLOOP_BOUND);
@@ -367,7 +369,7 @@ static void loop_make_request(struct request_queue *q, struct bio *old_bio)
        loop_add_bio(lo, old_bio);
        return;
 err:
-       cfs_bio_io_error(old_bio, old_bio->bi_size);
+       cfs_bio_io_error(old_bio, old_bio->bi_iter.bi_size);
 }
 
 
@@ -378,7 +380,7 @@ static inline void loop_handle_bio(struct lloop_device *lo, struct bio *bio)
        while (bio) {
                struct bio *tmp = bio->bi_next;
                bio->bi_next = NULL;
-               cfs_bio_endio(bio, bio->bi_size, ret);
+               cfs_bio_endio(bio, bio->bi_iter.bi_size, ret);
                bio = tmp;
        }
 }
index 3277d98..108f273 100644 (file)
@@ -171,13 +171,14 @@ static inline int valid_io_request(struct zram *zram, struct bio *bio)
        u64 start, end, bound;
 
        /* unaligned request */
-       if (unlikely(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
+       if (unlikely(bio->bi_iter.bi_sector &
+                    (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
                return 0;
-       if (unlikely(bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
+       if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
                return 0;
 
-       start = bio->bi_sector;
-       end = start + (bio->bi_size >> SECTOR_SHIFT);
+       start = bio->bi_iter.bi_sector;
+       end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
        bound = zram->disksize >> SECTOR_SHIFT;
        /* out of range range */
        if (unlikely(start >= bound || end > bound || start > end))
@@ -680,9 +681,10 @@ out:
 
 static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
 {
-       int i, offset;
+       int offset;
        u32 index;
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
 
        switch (rw) {
        case READ:
@@ -693,36 +695,37 @@ static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
                break;
        }
 
-       index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
-       offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
+       index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
+       offset = (bio->bi_iter.bi_sector &
+                 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
 
-       bio_for_each_segment(bvec, bio, i) {
+       bio_for_each_segment(bvec, bio, iter) {
                int max_transfer_size = PAGE_SIZE - offset;
 
-               if (bvec->bv_len > max_transfer_size) {
+               if (bvec.bv_len > max_transfer_size) {
                        /*
                         * zram_bvec_rw() can only make operation on a single
                         * zram page. Split the bio vector.
                         */
                        struct bio_vec bv;
 
-                       bv.bv_page = bvec->bv_page;
+                       bv.bv_page = bvec.bv_page;
                        bv.bv_len = max_transfer_size;
-                       bv.bv_offset = bvec->bv_offset;
+                       bv.bv_offset = bvec.bv_offset;
 
                        if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
                                goto out;
 
-                       bv.bv_len = bvec->bv_len - max_transfer_size;
+                       bv.bv_len = bvec.bv_len - max_transfer_size;
                        bv.bv_offset += max_transfer_size;
                        if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
                                goto out;
                } else
-                       if (zram_bvec_rw(zram, bvec, index, offset, bio, rw)
+                       if (zram_bvec_rw(zram, &bvec, index, offset, bio, rw)
                            < 0)
                                goto out;
 
-               update_position(&index, &offset, bvec);
+               update_position(&index, &offset, &bvec);
        }
 
        set_bit(BIO_UPTODATE, &bio->bi_flags);
index c87959f..2d29356 100644 (file)
@@ -319,7 +319,7 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
        bio->bi_bdev = ib_dev->ibd_bd;
        bio->bi_private = cmd;
        bio->bi_end_io = &iblock_bio_done;
-       bio->bi_sector = lba;
+       bio->bi_iter.bi_sector = lba;
 
        return bio;
 }
index fc60b31..0bad24d 100644 (file)
@@ -134,8 +134,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
                return 0;
        }
 
-       iv = bip_vec_idx(bip, bip->bip_vcnt);
-       BUG_ON(iv == NULL);
+       iv = bip->bip_vec + bip->bip_vcnt;
 
        iv->bv_page = page;
        iv->bv_len = len;
@@ -203,6 +202,12 @@ static inline unsigned int bio_integrity_hw_sectors(struct blk_integrity *bi,
        return sectors;
 }
 
+static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
+                                              unsigned int sectors)
+{
+       return bio_integrity_hw_sectors(bi, sectors) * bi->tuple_size;
+}
+
 /**
  * bio_integrity_tag_size - Retrieve integrity tag space
  * @bio:       bio to inspect
@@ -215,9 +220,9 @@ unsigned int bio_integrity_tag_size(struct bio *bio)
 {
        struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
 
-       BUG_ON(bio->bi_size == 0);
+       BUG_ON(bio->bi_iter.bi_size == 0);
 
-       return bi->tag_size * (bio->bi_size / bi->sector_size);
+       return bi->tag_size * (bio->bi_iter.bi_size / bi->sector_size);
 }
 EXPORT_SYMBOL(bio_integrity_tag_size);
 
@@ -235,9 +240,9 @@ int bio_integrity_tag(struct bio *bio, void *tag_buf, unsigned int len, int set)
        nr_sectors = bio_integrity_hw_sectors(bi,
                                        DIV_ROUND_UP(len, bi->tag_size));
 
-       if (nr_sectors * bi->tuple_size > bip->bip_size) {
-               printk(KERN_ERR "%s: tag too big for bio: %u > %u\n",
-                      __func__, nr_sectors * bi->tuple_size, bip->bip_size);
+       if (nr_sectors * bi->tuple_size > bip->bip_iter.bi_size) {
+               printk(KERN_ERR "%s: tag too big for bio: %u > %u\n", __func__,
+                      nr_sectors * bi->tuple_size, bip->bip_iter.bi_size);
                return -1;
        }
 
@@ -299,29 +304,30 @@ static void bio_integrity_generate(struct bio *bio)
 {
        struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
        struct blk_integrity_exchg bix;
-       struct bio_vec *bv;
-       sector_t sector = bio->bi_sector;
-       unsigned int i, sectors, total;
+       struct bio_vec bv;
+       struct bvec_iter iter;
+       sector_t sector = bio->bi_iter.bi_sector;
+       unsigned int sectors, total;
        void *prot_buf = bio->bi_integrity->bip_buf;
 
        total = 0;
        bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
        bix.sector_size = bi->sector_size;
 
-       bio_for_each_segment(bv, bio, i) {
-               void *kaddr = kmap_atomic(bv->bv_page);
-               bix.data_buf = kaddr + bv->bv_offset;
-               bix.data_size = bv->bv_len;
+       bio_for_each_segment(bv, bio, iter) {
+               void *kaddr = kmap_atomic(bv.bv_page);
+               bix.data_buf = kaddr + bv.bv_offset;
+               bix.data_size = bv.bv_len;
                bix.prot_buf = prot_buf;
                bix.sector = sector;
 
                bi->generate_fn(&bix);
 
-               sectors = bv->bv_len / bi->sector_size;
+               sectors = bv.bv_len / bi->sector_size;
                sector += sectors;
                prot_buf += sectors * bi->tuple_size;
                total += sectors * bi->tuple_size;
-               BUG_ON(total > bio->bi_integrity->bip_size);
+               BUG_ON(total > bio->bi_integrity->bip_iter.bi_size);
 
                kunmap_atomic(kaddr);
        }
@@ -386,8 +392,8 @@ int bio_integrity_prep(struct bio *bio)
 
        bip->bip_owns_buf = 1;
        bip->bip_buf = buf;
-       bip->bip_size = len;
-       bip->bip_sector = bio->bi_sector;
+       bip->bip_iter.bi_size = len;
+       bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
 
        /* Map it */
        offset = offset_in_page(buf);
@@ -442,16 +448,18 @@ static int bio_integrity_verify(struct bio *bio)
        struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
        struct blk_integrity_exchg bix;
        struct bio_vec *bv;
-       sector_t sector = bio->bi_integrity->bip_sector;
-       unsigned int i, sectors, total, ret;
+       sector_t sector = bio->bi_integrity->bip_iter.bi_sector;
+       unsigned int sectors, total, ret;
        void *prot_buf = bio->bi_integrity->bip_buf;
+       int i;
 
        ret = total = 0;
        bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
        bix.sector_size = bi->sector_size;
 
-       bio_for_each_segment(bv, bio, i) {
+       bio_for_each_segment_all(bv, bio, i) {
                void *kaddr = kmap_atomic(bv->bv_page);
+
                bix.data_buf = kaddr + bv->bv_offset;
                bix.data_size = bv->bv_len;
                bix.prot_buf = prot_buf;
@@ -468,7 +476,7 @@ static int bio_integrity_verify(struct bio *bio)
                sector += sectors;
                prot_buf += sectors * bi->tuple_size;
                total += sectors * bi->tuple_size;
-               BUG_ON(total > bio->bi_integrity->bip_size);
+               BUG_ON(total > bio->bi_integrity->bip_iter.bi_size);
 
                kunmap_atomic(kaddr);
        }
@@ -495,7 +503,7 @@ static void bio_integrity_verify_fn(struct work_struct *work)
 
        /* Restore original bio completion handler */
        bio->bi_end_io = bip->bip_end_io;
-       bio_endio(bio, error);
+       bio_endio_nodec(bio, error);
 }
 
 /**
@@ -532,56 +540,6 @@ void bio_integrity_endio(struct bio *bio, int error)
 }
 EXPORT_SYMBOL(bio_integrity_endio);
 
-/**
- * bio_integrity_mark_head - Advance bip_vec skip bytes
- * @bip:       Integrity vector to advance
- * @skip:      Number of bytes to advance it
- */
-void bio_integrity_mark_head(struct bio_integrity_payload *bip,
-                            unsigned int skip)
-{
-       struct bio_vec *iv;
-       unsigned int i;
-
-       bip_for_each_vec(iv, bip, i) {
-               if (skip == 0) {
-                       bip->bip_idx = i;
-                       return;
-               } else if (skip >= iv->bv_len) {
-                       skip -= iv->bv_len;
-               } else { /* skip < iv->bv_len) */
-                       iv->bv_offset += skip;
-                       iv->bv_len -= skip;
-                       bip->bip_idx = i;
-                       return;
-               }
-       }
-}
-
-/**
- * bio_integrity_mark_tail - Truncate bip_vec to be len bytes long
- * @bip:       Integrity vector to truncate
- * @len:       New length of integrity vector
- */
-void bio_integrity_mark_tail(struct bio_integrity_payload *bip,
-                            unsigned int len)
-{
-       struct bio_vec *iv;
-       unsigned int i;
-
-       bip_for_each_vec(iv, bip, i) {
-               if (len == 0) {
-                       bip->bip_vcnt = i;
-                       return;
-               } else if (len >= iv->bv_len) {
-                       len -= iv->bv_len;
-               } else { /* len < iv->bv_len) */
-                       iv->bv_len = len;
-                       len = 0;
-               }
-       }
-}
-
 /**
  * bio_integrity_advance - Advance integrity vector
  * @bio:       bio whose integrity vector to update
@@ -595,13 +553,9 @@ void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
 {
        struct bio_integrity_payload *bip = bio->bi_integrity;
        struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
-       unsigned int nr_sectors;
+       unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
 
-       BUG_ON(bip == NULL);
-       BUG_ON(bi == NULL);
-
-       nr_sectors = bio_integrity_hw_sectors(bi, bytes_done >> 9);
-       bio_integrity_mark_head(bip, nr_sectors * bi->tuple_size);
+       bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes);
 }
 EXPORT_SYMBOL(bio_integrity_advance);
 
@@ -621,63 +575,12 @@ void bio_integrity_trim(struct bio *bio, unsigned int offset,
 {
        struct bio_integrity_payload *bip = bio->bi_integrity;
        struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
-       unsigned int nr_sectors;
-
-       BUG_ON(bip == NULL);
-       BUG_ON(bi == NULL);
-       BUG_ON(!bio_flagged(bio, BIO_CLONED));
 
-       nr_sectors = bio_integrity_hw_sectors(bi, sectors);
-       bip->bip_sector = bip->bip_sector + offset;
-       bio_integrity_mark_head(bip, offset * bi->tuple_size);
-       bio_integrity_mark_tail(bip, sectors * bi->tuple_size);
+       bio_integrity_advance(bio, offset << 9);
+       bip->bip_iter.bi_size = bio_integrity_bytes(bi, sectors);
 }
 EXPORT_SYMBOL(bio_integrity_trim);
 
-/**
- * bio_integrity_split - Split integrity metadata
- * @bio:       Protected bio
- * @bp:                Resulting bio_pair
- * @sectors:   Offset
- *
- * Description: Splits an integrity page into a bio_pair.
- */
-void bio_integrity_split(struct bio *bio, struct bio_pair *bp, int sectors)
-{
-       struct blk_integrity *bi;
-       struct bio_integrity_payload *bip = bio->bi_integrity;
-       unsigned int nr_sectors;
-
-       if (bio_integrity(bio) == 0)
-               return;
-
-       bi = bdev_get_integrity(bio->bi_bdev);
-       BUG_ON(bi == NULL);
-       BUG_ON(bip->bip_vcnt != 1);
-
-       nr_sectors = bio_integrity_hw_sectors(bi, sectors);
-
-       bp->bio1.bi_integrity = &bp->bip1;
-       bp->bio2.bi_integrity = &bp->bip2;
-
-       bp->iv1 = bip->bip_vec[bip->bip_idx];
-       bp->iv2 = bip->bip_vec[bip->bip_idx];
-
-       bp->bip1.bip_vec = &bp->iv1;
-       bp->bip2.bip_vec = &bp->iv2;
-
-       bp->iv1.bv_len = sectors * bi->tuple_size;
-       bp->iv2.bv_offset += sectors * bi->tuple_size;
-       bp->iv2.bv_len -= sectors * bi->tuple_size;
-
-       bp->bip1.bip_sector = bio->bi_integrity->bip_sector;
-       bp->bip2.bip_sector = bio->bi_integrity->bip_sector + nr_sectors;
-
-       bp->bip1.bip_vcnt = bp->bip2.bip_vcnt = 1;
-       bp->bip1.bip_idx = bp->bip2.bip_idx = 0;
-}
-EXPORT_SYMBOL(bio_integrity_split);
-
 /**
  * bio_integrity_clone - Callback for cloning bios with integrity metadata
  * @bio:       New bio
@@ -702,9 +605,8 @@ int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
        memcpy(bip->bip_vec, bip_src->bip_vec,
               bip_src->bip_vcnt * sizeof(struct bio_vec));
 
-       bip->bip_sector = bip_src->bip_sector;
        bip->bip_vcnt = bip_src->bip_vcnt;
-       bip->bip_idx = bip_src->bip_idx;
+       bip->bip_iter = bip_src->bip_iter;
 
        return 0;
 }
index 33d79a4..75c49a3 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -38,8 +38,6 @@
  */
 #define BIO_INLINE_VECS                4
 
-static mempool_t *bio_split_pool __read_mostly;
-
 /*
  * if you change this list, also change bvec_alloc or things will
  * break badly! cannot be bigger than what you can fit into an
@@ -273,6 +271,7 @@ void bio_init(struct bio *bio)
 {
        memset(bio, 0, sizeof(*bio));
        bio->bi_flags = 1 << BIO_UPTODATE;
+       atomic_set(&bio->bi_remaining, 1);
        atomic_set(&bio->bi_cnt, 1);
 }
 EXPORT_SYMBOL(bio_init);
@@ -295,9 +294,35 @@ void bio_reset(struct bio *bio)
 
        memset(bio, 0, BIO_RESET_BYTES);
        bio->bi_flags = flags|(1 << BIO_UPTODATE);
+       atomic_set(&bio->bi_remaining, 1);
 }
 EXPORT_SYMBOL(bio_reset);
 
+static void bio_chain_endio(struct bio *bio, int error)
+{
+       bio_endio(bio->bi_private, error);
+       bio_put(bio);
+}
+
+/**
+ * bio_chain - chain bio completions
+ *
+ * The caller won't have a bi_end_io called when @bio completes - instead,
+ * @parent's bi_end_io won't be called until both @parent and @bio have
+ * completed; the chained bio will also be freed when it completes.
+ *
+ * The caller must not set bi_private or bi_end_io in @bio.
+ */
+void bio_chain(struct bio *bio, struct bio *parent)
+{
+       BUG_ON(bio->bi_private || bio->bi_end_io);
+
+       bio->bi_private = parent;
+       bio->bi_end_io  = bio_chain_endio;
+       atomic_inc(&parent->bi_remaining);
+}
+EXPORT_SYMBOL(bio_chain);
+
 static void bio_alloc_rescue(struct work_struct *work)
 {
        struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
@@ -473,13 +498,13 @@ EXPORT_SYMBOL(bio_alloc_bioset);
 void zero_fill_bio(struct bio *bio)
 {
        unsigned long flags;
-       struct bio_vec *bv;
-       int i;
+       struct bio_vec bv;
+       struct bvec_iter iter;
 
-       bio_for_each_segment(bv, bio, i) {
-               char *data = bvec_kmap_irq(bv, &flags);
-               memset(data, 0, bv->bv_len);
-               flush_dcache_page(bv->bv_page);
+       bio_for_each_segment(bv, bio, iter) {
+               char *data = bvec_kmap_irq(&bv, &flags);
+               memset(data, 0, bv.bv_len);
+               flush_dcache_page(bv.bv_page);
                bvec_kunmap_irq(data, &flags);
        }
 }
@@ -515,51 +540,49 @@ inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
 EXPORT_SYMBOL(bio_phys_segments);
 
 /**
- *     __bio_clone     -       clone a bio
+ *     __bio_clone_fast - clone a bio that shares the original bio's biovec
  *     @bio: destination bio
  *     @bio_src: bio to clone
  *
  *     Clone a &bio. Caller will own the returned bio, but not
  *     the actual data it points to. Reference count of returned
  *     bio will be one.
+ *
+ *     Caller must ensure that @bio_src is not freed before @bio.
  */
-void __bio_clone(struct bio *bio, struct bio *bio_src)
+void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
 {
-       memcpy(bio->bi_io_vec, bio_src->bi_io_vec,
-               bio_src->bi_max_vecs * sizeof(struct bio_vec));
+       BUG_ON(bio->bi_pool && BIO_POOL_IDX(bio) != BIO_POOL_NONE);
 
        /*
         * most users will be overriding ->bi_bdev with a new target,
         * so we don't set nor calculate new physical/hw segment counts here
         */
-       bio->bi_sector = bio_src->bi_sector;
        bio->bi_bdev = bio_src->bi_bdev;
        bio->bi_flags |= 1 << BIO_CLONED;
        bio->bi_rw = bio_src->bi_rw;
-       bio->bi_vcnt = bio_src->bi_vcnt;
-       bio->bi_size = bio_src->bi_size;
-       bio->bi_idx = bio_src->bi_idx;
+       bio->bi_iter = bio_src->bi_iter;
+       bio->bi_io_vec = bio_src->bi_io_vec;
 }
-EXPORT_SYMBOL(__bio_clone);
+EXPORT_SYMBOL(__bio_clone_fast);
 
 /**
- *     bio_clone_bioset -      clone a bio
+ *     bio_clone_fast - clone a bio that shares the original bio's biovec
  *     @bio: bio to clone
  *     @gfp_mask: allocation priority
  *     @bs: bio_set to allocate from
  *
- *     Like __bio_clone, only also allocates the returned bio
+ *     Like __bio_clone_fast, only also allocates the returned bio
  */
-struct bio *bio_clone_bioset(struct bio *bio, gfp_t gfp_mask,
-                            struct bio_set *bs)
+struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
 {
        struct bio *b;
 
-       b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, bs);
+       b = bio_alloc_bioset(gfp_mask, 0, bs);
        if (!b)
                return NULL;
 
-       __bio_clone(b, bio);
+       __bio_clone_fast(b, bio);
 
        if (bio_integrity(bio)) {
                int ret;
@@ -574,6 +597,74 @@ struct bio *bio_clone_bioset(struct bio *bio, gfp_t gfp_mask,
 
        return b;
 }
+EXPORT_SYMBOL(bio_clone_fast);
+
+/**
+ *     bio_clone_bioset - clone a bio
+ *     @bio_src: bio to clone
+ *     @gfp_mask: allocation priority
+ *     @bs: bio_set to allocate from
+ *
+ *     Clone bio. Caller will own the returned bio, but not the actual data it
+ *     points to. Reference count of returned bio will be one.
+ */
+struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
+                            struct bio_set *bs)
+{
+       unsigned nr_iovecs = 0;
+       struct bvec_iter iter;
+       struct bio_vec bv;
+       struct bio *bio;
+
+       /*
+        * Pre immutable biovecs, __bio_clone() used to just do a memcpy from
+        * bio_src->bi_io_vec to bio->bi_io_vec.
+        *
+        * We can't do that anymore, because:
+        *
+        *  - The point of cloning the biovec is to produce a bio with a biovec
+        *    the caller can modify: bi_idx and bi_bvec_done should be 0.
+        *
+        *  - The original bio could've had more than BIO_MAX_PAGES biovecs; if
+        *    we tried to clone the whole thing bio_alloc_bioset() would fail.
+        *    But the clone should succeed as long as the number of biovecs we
+        *    actually need to allocate is fewer than BIO_MAX_PAGES.
+        *
+        *  - Lastly, bi_vcnt should not be looked at or relied upon by code
+        *    that does not own the bio - reason being drivers don't use it for
+        *    iterating over the biovec anymore, so expecting it to be kept up
+        *    to date (i.e. for clones that share the parent biovec) is just
+        *    asking for trouble and would force extra work on
+        *    __bio_clone_fast() anyways.
+        */
+
+       bio_for_each_segment(bv, bio_src, iter)
+               nr_iovecs++;
+
+       bio = bio_alloc_bioset(gfp_mask, nr_iovecs, bs);
+       if (!bio)
+               return NULL;
+
+       bio->bi_bdev            = bio_src->bi_bdev;
+       bio->bi_rw              = bio_src->bi_rw;
+       bio->bi_iter.bi_sector  = bio_src->bi_iter.bi_sector;
+       bio->bi_iter.bi_size    = bio_src->bi_iter.bi_size;
+
+       bio_for_each_segment(bv, bio_src, iter)
+               bio->bi_io_vec[bio->bi_vcnt++] = bv;
+
+       if (bio_integrity(bio_src)) {
+               int ret;
+
+               ret = bio_integrity_clone(bio, bio_src, gfp_mask);
+               if (ret < 0) {
+                       bio_put(bio);
+                       return NULL;
+               }
+       }
+
+       return bio;
+}
 EXPORT_SYMBOL(bio_clone_bioset);
 
 /**
@@ -612,7 +703,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
        if (unlikely(bio_flagged(bio, BIO_CLONED)))
                return 0;
 
-       if (((bio->bi_size + len) >> 9) > max_sectors)
+       if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors)
                return 0;
 
        /*
@@ -635,8 +726,9 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
                                           simulate merging updated prev_bvec
                                           as new bvec. */
                                        .bi_bdev = bio->bi_bdev,
-                                       .bi_sector = bio->bi_sector,
-                                       .bi_size = bio->bi_size - prev_bv_len,
+                                       .bi_sector = bio->bi_iter.bi_sector,
+                                       .bi_size = bio->bi_iter.bi_size -
+                                               prev_bv_len,
                                        .bi_rw = bio->bi_rw,
                                };
 
@@ -684,8 +776,8 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
        if (q->merge_bvec_fn) {
                struct bvec_merge_data bvm = {
                        .bi_bdev = bio->bi_bdev,
-                       .bi_sector = bio->bi_sector,
-                       .bi_size = bio->bi_size,
+                       .bi_sector = bio->bi_iter.bi_sector,
+                       .bi_size = bio->bi_iter.bi_size,
                        .bi_rw = bio->bi_rw,
                };
 
@@ -708,7 +800,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
        bio->bi_vcnt++;
        bio->bi_phys_segments++;
  done:
-       bio->bi_size += len;
+       bio->bi_iter.bi_size += len;
        return len;
 }
 
@@ -807,28 +899,7 @@ void bio_advance(struct bio *bio, unsigned bytes)
        if (bio_integrity(bio))
                bio_integrity_advance(bio, bytes);
 
-       bio->bi_sector += bytes >> 9;
-       bio->bi_size -= bytes;
-
-       if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
-               return;
-
-       while (bytes) {
-               if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
-                       WARN_ONCE(1, "bio idx %d >= vcnt %d\n",
-                                 bio->bi_idx, bio->bi_vcnt);
-                       break;
-               }
-
-               if (bytes >= bio_iovec(bio)->bv_len) {
-                       bytes -= bio_iovec(bio)->bv_len;
-                       bio->bi_idx++;
-               } else {
-                       bio_iovec(bio)->bv_len -= bytes;
-                       bio_iovec(bio)->bv_offset += bytes;
-                       bytes = 0;
-               }
-       }
+       bio_advance_iter(bio, &bio->bi_iter, bytes);
 }
 EXPORT_SYMBOL(bio_advance);
 
@@ -874,117 +945,80 @@ EXPORT_SYMBOL(bio_alloc_pages);
  */
 void bio_copy_data(struct bio *dst, struct bio *src)
 {
-       struct bio_vec *src_bv, *dst_bv;
-       unsigned src_offset, dst_offset, bytes;
+       struct bvec_iter src_iter, dst_iter;
+       struct bio_vec src_bv, dst_bv;
        void *src_p, *dst_p;
+       unsigned bytes;
 
-       src_bv = bio_iovec(src);
-       dst_bv = bio_iovec(dst);
-
-       src_offset = src_bv->bv_offset;
-       dst_offset = dst_bv->bv_offset;
+       src_iter = src->bi_iter;
+       dst_iter = dst->bi_iter;
 
        while (1) {
-               if (src_offset == src_bv->bv_offset + src_bv->bv_len) {
-                       src_bv++;
-                       if (src_bv == bio_iovec_idx(src, src->bi_vcnt)) {
-                               src = src->bi_next;
-                               if (!src)
-                                       break;
-
-                               src_bv = bio_iovec(src);
-                       }
+               if (!src_iter.bi_size) {
+                       src = src->bi_next;
+                       if (!src)
+                               break;
 
-                       src_offset = src_bv->bv_offset;
+                       src_iter = src->bi_iter;
                }
 
-               if (dst_offset == dst_bv->bv_offset + dst_bv->bv_len) {
-                       dst_bv++;
-                       if (dst_bv == bio_iovec_idx(dst, dst->bi_vcnt)) {
-                               dst = dst->bi_next;
-                               if (!dst)
-                                       break;
-
-                               dst_bv = bio_iovec(dst);
-                       }
+               if (!dst_iter.bi_size) {
+                       dst = dst->bi_next;
+                       if (!dst)
+                               break;
 
-                       dst_offset = dst_bv->bv_offset;
+                       dst_iter = dst->bi_iter;
                }
 
-               bytes = min(dst_bv->bv_offset + dst_bv->bv_len - dst_offset,
-                           src_bv->bv_offset + src_bv->bv_len - src_offset);
+               src_bv = bio_iter_iovec(src, src_iter);
+               dst_bv = bio_iter_iovec(dst, dst_iter);
+
+               bytes = min(src_bv.bv_len, dst_bv.bv_len);
 
-               src_p = kmap_atomic(src_bv->bv_page);
-               dst_p = kmap_atomic(dst_bv->bv_page);
+               src_p = kmap_atomic(src_bv.bv_page);
+               dst_p = kmap_atomic(dst_bv.bv_page);
 
-               memcpy(dst_p + dst_offset,
-                      src_p + src_offset,
+               memcpy(dst_p + dst_bv.bv_offset,
+                      src_p + src_bv.bv_offset,
                       bytes);
 
                kunmap_atomic(dst_p);
                kunmap_atomic(src_p);
 
-               src_offset += bytes;
-               dst_offset += bytes;
+               bio_advance_iter(src, &src_iter, bytes);
+               bio_advance_iter(dst, &dst_iter, bytes);
        }
 }
 EXPORT_SYMBOL(bio_copy_data);
 
 struct bio_map_data {
-       struct bio_vec *iovecs;
-       struct sg_iovec *sgvecs;
        int nr_sgvecs;
        int is_our_pages;
+       struct sg_iovec sgvecs[];
 };
 
 static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio,
                             struct sg_iovec *iov, int iov_count,
                             int is_our_pages)
 {
-       memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt);
        memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count);
        bmd->nr_sgvecs = iov_count;
        bmd->is_our_pages = is_our_pages;
        bio->bi_private = bmd;
 }
 
-static void bio_free_map_data(struct bio_map_data *bmd)
-{
-       kfree(bmd->iovecs);
-       kfree(bmd->sgvecs);
-       kfree(bmd);
-}
-
 static struct bio_map_data *bio_alloc_map_data(int nr_segs,
                                               unsigned int iov_count,
                                               gfp_t gfp_mask)
 {
-       struct bio_map_data *bmd;
-
        if (iov_count > UIO_MAXIOV)
                return NULL;
 
-       bmd = kmalloc(sizeof(*bmd), gfp_mask);
-       if (!bmd)
-               return NULL;
-
-       bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, gfp_mask);
-       if (!bmd->iovecs) {
-               kfree(bmd);
-               return NULL;
-       }
-
-       bmd->sgvecs = kmalloc(sizeof(struct sg_iovec) * iov_count, gfp_mask);
-       if (bmd->sgvecs)
-               return bmd;
-
-       kfree(bmd->iovecs);
-       kfree(bmd);
-       return NULL;
+       return kmalloc(sizeof(struct bio_map_data) +
+                      sizeof(struct sg_iovec) * iov_count, gfp_mask);
 }
 
-static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
-                         struct sg_iovec *iov, int iov_count,
+static int __bio_copy_iov(struct bio *bio, struct sg_iovec *iov, int iov_count,
                          int to_user, int from_user, int do_free_page)
 {
        int ret = 0, i;
@@ -994,7 +1028,7 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
 
        bio_for_each_segment_all(bvec, bio, i) {
                char *bv_addr = page_address(bvec->bv_page);
-               unsigned int bv_len = iovecs[i].bv_len;
+               unsigned int bv_len = bvec->bv_len;
 
                while (bv_len && iov_idx < iov_count) {
                        unsigned int bytes;
@@ -1054,14 +1088,14 @@ int bio_uncopy_user(struct bio *bio)
                 * don't copy into a random user address space, just free.
                 */
                if (current->mm)
-                       ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
-                                            bmd->nr_sgvecs, bio_data_dir(bio) == READ,
+                       ret = __bio_copy_iov(bio, bmd->sgvecs, bmd->nr_sgvecs,
+                                            bio_data_dir(bio) == READ,
                                             0, bmd->is_our_pages);
                else if (bmd->is_our_pages)
                        bio_for_each_segment_all(bvec, bio, i)
                                __free_page(bvec->bv_page);
        }
-       bio_free_map_data(bmd);
+       kfree(bmd);
        bio_put(bio);
        return ret;
 }
@@ -1175,7 +1209,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
         */
        if ((!write_to_vm && (!map_data || !map_data->null_mapped)) ||
            (map_data && map_data->from_user)) {
-               ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 1, 0);
+               ret = __bio_copy_iov(bio, iov, iov_count, 0, 1, 0);
                if (ret)
                        goto cleanup;
        }
@@ -1189,7 +1223,7 @@ cleanup:
 
        bio_put(bio);
 out_bmd:
-       bio_free_map_data(bmd);
+       kfree(bmd);
        return ERR_PTR(ret);
 }
 
@@ -1485,7 +1519,7 @@ struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
        if (IS_ERR(bio))
                return bio;
 
-       if (bio->bi_size == len)
+       if (bio->bi_iter.bi_size == len)
                return bio;
 
        /*
@@ -1506,16 +1540,15 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
 
        bio_for_each_segment_all(bvec, bio, i) {
                char *addr = page_address(bvec->bv_page);
-               int len = bmd->iovecs[i].bv_len;
 
                if (read)
-                       memcpy(p, addr, len);
+                       memcpy(p, addr, bvec->bv_len);
 
                __free_page(bvec->bv_page);
-               p += len;
+               p += bvec->bv_len;
        }
 
-       bio_free_map_data(bmd);
+       kfree(bmd);
        bio_put(bio);
 }
 
@@ -1686,11 +1719,11 @@ void bio_check_pages_dirty(struct bio *bio)
 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
 void bio_flush_dcache_pages(struct bio *bi)
 {
-       int i;
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
 
-       bio_for_each_segment(bvec, bi, i)
-               flush_dcache_page(bvec->bv_page);
+       bio_for_each_segment(bvec, bi, iter)
+               flush_dcache_page(bvec.bv_page);
 }
 EXPORT_SYMBOL(bio_flush_dcache_pages);
 #endif
@@ -1711,96 +1744,86 @@ EXPORT_SYMBOL(bio_flush_dcache_pages);
  **/
 void bio_endio(struct bio *bio, int error)
 {
-       if (error)
-               clear_bit(BIO_UPTODATE, &bio->bi_flags);
-       else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
-               error = -EIO;
+       while (bio) {
+               BUG_ON(atomic_read(&bio->bi_remaining) <= 0);
 
-       if (bio->bi_end_io)
-               bio->bi_end_io(bio, error);
-}
-EXPORT_SYMBOL(bio_endio);
+               if (error)
+                       clear_bit(BIO_UPTODATE, &bio->bi_flags);
+               else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+                       error = -EIO;
 
-void bio_pair_release(struct bio_pair *bp)
-{
-       if (atomic_dec_and_test(&bp->cnt)) {
-               struct bio *master = bp->bio1.bi_private;
+               if (!atomic_dec_and_test(&bio->bi_remaining))
+                       return;
 
-               bio_endio(master, bp->error);
-               mempool_free(bp, bp->bio2.bi_private);
+               /*
+                * Need to have a real endio function for chained bios,
+                * otherwise various corner cases will break (like stacking
+                * block devices that save/restore bi_end_io) - however, we want
+                * to avoid unbounded recursion and blowing the stack. Tail call
+                * optimization would handle this, but compiling with frame
+                * pointers also disables gcc's sibling call optimization.
+                */
+               if (bio->bi_end_io == bio_chain_endio) {
+                       struct bio *parent = bio->bi_private;
+                       bio_put(bio);
+                       bio = parent;
+               } else {
+                       if (bio->bi_end_io)
+                               bio->bi_end_io(bio, error);
+                       bio = NULL;
+               }
        }
 }
-EXPORT_SYMBOL(bio_pair_release);
+EXPORT_SYMBOL(bio_endio);
 
-static void bio_pair_end_1(struct bio *bi, int err)
+/**
+ * bio_endio_nodec - end I/O on a bio, without decrementing bi_remaining
+ * @bio:       bio
+ * @error:     error, if any
+ *
+ * For code that has saved and restored bi_end_io; thing hard before using this
+ * function, probably you should've cloned the entire bio.
+ **/
+void bio_endio_nodec(struct bio *bio, int error)
 {
-       struct bio_pair *bp = container_of(bi, struct bio_pair, bio1);
-
-       if (err)
-               bp->error = err;
-
-       bio_pair_release(bp);
+       atomic_inc(&bio->bi_remaining);
+       bio_endio(bio, error);
 }
+EXPORT_SYMBOL(bio_endio_nodec);
 
-static void bio_pair_end_2(struct bio *bi, int err)
-{
-       struct bio_pair *bp = container_of(bi, struct bio_pair, bio2);
-
-       if (err)
-               bp->error = err;
-
-       bio_pair_release(bp);
-}
-
-/*
- * split a bio - only worry about a bio with a single page in its iovec
+/**
+ * bio_split - split a bio
+ * @bio:       bio to split
+ * @sectors:   number of sectors to split from the front of @bio
+ * @gfp:       gfp mask
+ * @bs:                bio set to allocate from
+ *
+ * Allocates and returns a new bio which represents @sectors from the start of
+ * @bio, and updates @bio to represent the remaining sectors.
+ *
+ * The newly allocated bio will point to @bio's bi_io_vec; it is the caller's
+ * responsibility to ensure that @bio is not freed before the split.
  */
-struct bio_pair *bio_split(struct bio *bi, int first_sectors)
+struct bio *bio_split(struct bio *bio, int sectors,
+                     gfp_t gfp, struct bio_set *bs)
 {
-       struct bio_pair *bp = mempool_alloc(bio_split_pool, GFP_NOIO);
-
-       if (!bp)
-               return bp;
-
-       trace_block_split(bdev_get_queue(bi->bi_bdev), bi,
-                               bi->bi_sector + first_sectors);
-
-       BUG_ON(bio_segments(bi) > 1);
-       atomic_set(&bp->cnt, 3);
-       bp->error = 0;
-       bp->bio1 = *bi;
-       bp->bio2 = *bi;
-       bp->bio2.bi_sector += first_sectors;
-       bp->bio2.bi_size -= first_sectors << 9;
-       bp->bio1.bi_size = first_sectors << 9;
-
-       if (bi->bi_vcnt != 0) {
-               bp->bv1 = *bio_iovec(bi);
-               bp->bv2 = *bio_iovec(bi);
-
-               if (bio_is_rw(bi)) {
-                       bp->bv2.bv_offset += first_sectors << 9;
-                       bp->bv2.bv_len -= first_sectors << 9;
-                       bp->bv1.bv_len = first_sectors << 9;
-               }
+       struct bio *split = NULL;
 
-               bp->bio1.bi_io_vec = &bp->bv1;
-               bp->bio2.bi_io_vec = &bp->bv2;
+       BUG_ON(sectors <= 0);
+       BUG_ON(sectors >= bio_sectors(bio));
 
-               bp->bio1.bi_max_vecs = 1;
-               bp->bio2.bi_max_vecs = 1;
-       }
+       split = bio_clone_fast(bio, gfp, bs);
+       if (!split)
+               return NULL;
 
-       bp->bio1.bi_end_io = bio_pair_end_1;
-       bp->bio2.bi_end_io = bio_pair_end_2;
+       split->bi_iter.bi_size = sectors << 9;
 
-       bp->bio1.bi_private = bi;
-       bp->bio2.bi_private = bio_split_pool;
+       if (bio_integrity(split))
+               bio_integrity_trim(split, 0, sectors);
 
-       if (bio_integrity(bi))
-               bio_integrity_split(bi, bp, first_sectors);
+       bio_advance(bio, split->bi_iter.bi_size);
 
-       return bp;
+       return split;
 }
 EXPORT_SYMBOL(bio_split);
 
@@ -1814,80 +1837,20 @@ void bio_trim(struct bio *bio, int offset, int size)
 {
        /* 'bio' is a cloned bio which we need to trim to match
         * the given offset and size.
-        * This requires adjusting bi_sector, bi_size, and bi_io_vec
         */
-       int i;
-       struct bio_vec *bvec;
-       int sofar = 0;
 
        size <<= 9;
-       if (offset == 0 && size == bio->bi_size)
+       if (offset == 0 && size == bio->bi_iter.bi_size)
                return;
 
        clear_bit(BIO_SEG_VALID, &bio->bi_flags);
 
        bio_advance(bio, offset << 9);
 
-       bio->bi_size = size;
-
-       /* avoid any complications with bi_idx being non-zero*/
-       if (bio->bi_idx) {
-               memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx,
-                       (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec));
-               bio->bi_vcnt -= bio->bi_idx;
-               bio->bi_idx = 0;
-       }
-       /* Make sure vcnt and last bv are not too big */
-       bio_for_each_segment(bvec, bio, i) {
-               if (sofar + bvec->bv_len > size)
-                       bvec->bv_len = size - sofar;
-               if (bvec->bv_len == 0) {
-                       bio->bi_vcnt = i;
-                       break;
-               }
-               sofar += bvec->bv_len;
-       }
+       bio->bi_iter.bi_size = size;
 }
 EXPORT_SYMBOL_GPL(bio_trim);
 
-/**
- *      bio_sector_offset - Find hardware sector offset in bio
- *      @bio:           bio to inspect
- *      @index:         bio_vec index
- *      @offset:        offset in bv_page
- *
- *      Return the number of hardware sectors between beginning of bio
- *      and an end point indicated by a bio_vec index and an offset
- *      within that vector's page.
- */
-sector_t bio_sector_offset(struct bio *bio, unsigned short index,
-                          unsigned int offset)
-{
-       unsigned int sector_sz;
-       struct bio_vec *bv;
-       sector_t sectors;
-       int i;
-
-       sector_sz = queue_logical_block_size(bio->bi_bdev->bd_disk->queue);
-       sectors = 0;
-
-       if (index >= bio->bi_idx)
-               index = bio->bi_vcnt - 1;
-
-       bio_for_each_segment_all(bv, bio, i) {
-               if (i == index) {
-                       if (offset > bv->bv_offset)
-                               sectors += (offset - bv->bv_offset) / sector_sz;
-                       break;
-               }
-
-               sectors += bv->bv_len / sector_sz;
-       }
-
-       return sectors;
-}
-EXPORT_SYMBOL(bio_sector_offset);
-
 /*
  * create memory pools for biovec's in a bio_set.
  * use the global biovec slabs created for general use.
@@ -2065,11 +2028,6 @@ static int __init init_bio(void)
        if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE))
                panic("bio: can't create integrity pool\n");
 
-       bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES,
-                                                    sizeof(struct bio_pair));
-       if (!bio_split_pool)
-               panic("bio: can't create split pool\n");
-
        return 0;
 }
 subsys_initcall(init_bio);
index 131d828..cb05e1c 100644 (file)
@@ -1695,7 +1695,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
                        return -1;
                }
                bio->bi_bdev = block_ctx->dev->bdev;
-               bio->bi_sector = dev_bytenr >> 9;
+               bio->bi_iter.bi_sector = dev_bytenr >> 9;
 
                for (j = i; j < num_pages; j++) {
                        ret = bio_add_page(bio, block_ctx->pagev[j],
@@ -3013,7 +3013,7 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio)
                int bio_is_patched;
                char **mapped_datav;
 
-               dev_bytenr = 512 * bio->bi_sector;
+               dev_bytenr = 512 * bio->bi_iter.bi_sector;
                bio_is_patched = 0;
                if (dev_state->state->print_mask &
                    BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
@@ -3021,8 +3021,8 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio)
                               "submit_bio(rw=0x%x, bi_vcnt=%u,"
                               " bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
                               rw, bio->bi_vcnt,
-                              (unsigned long long)bio->bi_sector, dev_bytenr,
-                              bio->bi_bdev);
+                              (unsigned long long)bio->bi_iter.bi_sector,
+                              dev_bytenr, bio->bi_bdev);
 
                mapped_datav = kmalloc(sizeof(*mapped_datav) * bio->bi_vcnt,
                                       GFP_NOFS);
index 1499b27..f5cdeb4 100644 (file)
@@ -172,7 +172,8 @@ static void end_compressed_bio_read(struct bio *bio, int err)
                goto out;
 
        inode = cb->inode;
-       ret = check_compressed_csum(inode, cb, (u64)bio->bi_sector << 9);
+       ret = check_compressed_csum(inode, cb,
+                                   (u64)bio->bi_iter.bi_sector << 9);
        if (ret)
                goto csum_failed;
 
@@ -201,18 +202,16 @@ csum_failed:
        if (cb->errors) {
                bio_io_error(cb->orig_bio);
        } else {
-               int bio_index = 0;
-               struct bio_vec *bvec = cb->orig_bio->bi_io_vec;
+               int i;
+               struct bio_vec *bvec;
 
                /*
                 * we have verified the checksum already, set page
                 * checked so the end_io handlers know about it
                 */
-               while (bio_index < cb->orig_bio->bi_vcnt) {
+               bio_for_each_segment_all(bvec, cb->orig_bio, i)
                        SetPageChecked(bvec->bv_page);
-                       bvec++;
-                       bio_index++;
-               }
+
                bio_endio(cb->orig_bio, 0);
        }
 
@@ -372,7 +371,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
        for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
                page = compressed_pages[pg_index];
                page->mapping = inode->i_mapping;
-               if (bio->bi_size)
+               if (bio->bi_iter.bi_size)
                        ret = io_tree->ops->merge_bio_hook(WRITE, page, 0,
                                                           PAGE_CACHE_SIZE,
                                                           bio, 0);
@@ -506,7 +505,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
 
                if (!em || last_offset < em->start ||
                    (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) ||
-                   (em->block_start >> 9) != cb->orig_bio->bi_sector) {
+                   (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
                        free_extent_map(em);
                        unlock_extent(tree, last_offset, end);
                        unlock_page(page);
@@ -552,7 +551,7 @@ next:
  * in it.  We don't actually do IO on those pages but allocate new ones
  * to hold the compressed pages on disk.
  *
- * bio->bi_sector points to the compressed extent on disk
+ * bio->bi_iter.bi_sector points to the compressed extent on disk
  * bio->bi_io_vec points to all of the inode pages
  * bio->bi_vcnt is a count of pages
  *
@@ -573,7 +572,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
        struct page *page;
        struct block_device *bdev;
        struct bio *comp_bio;
-       u64 cur_disk_byte = (u64)bio->bi_sector << 9;
+       u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9;
        u64 em_len;
        u64 em_start;
        struct extent_map *em;
@@ -659,7 +658,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
                page->mapping = inode->i_mapping;
                page->index = em_start >> PAGE_CACHE_SHIFT;
 
-               if (comp_bio->bi_size)
+               if (comp_bio->bi_iter.bi_size)
                        ret = tree->ops->merge_bio_hook(READ, page, 0,
                                                        PAGE_CACHE_SIZE,
                                                        comp_bio, 0);
@@ -687,8 +686,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
                                                        comp_bio, sums);
                                BUG_ON(ret); /* -ENOMEM */
                        }
-                       sums += (comp_bio->bi_size + root->sectorsize - 1) /
-                               root->sectorsize;
+                       sums += (comp_bio->bi_iter.bi_size +
+                                root->sectorsize - 1) / root->sectorsize;
 
                        ret = btrfs_map_bio(root, READ, comp_bio,
                                            mirror_num, 0);
index 8072cfa..e71039e 100644 (file)
@@ -842,20 +842,17 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
 
 static int btree_csum_one_bio(struct bio *bio)
 {
-       struct bio_vec *bvec = bio->bi_io_vec;
-       int bio_index = 0;
+       struct bio_vec *bvec;
        struct btrfs_root *root;
-       int ret = 0;
+       int i, ret = 0;
 
-       WARN_ON(bio->bi_vcnt <= 0);
-       while (bio_index < bio->bi_vcnt) {
+       bio_for_each_segment_all(bvec, bio, i) {
                root = BTRFS_I(bvec->bv_page->mapping->host)->root;
                ret = csum_dirty_buffer(root, bvec->bv_page);
                if (ret)
                        break;
-               bio_index++;
-               bvec++;
        }
+
        return ret;
 }
 
@@ -1695,7 +1692,7 @@ static void end_workqueue_fn(struct btrfs_work *work)
        bio->bi_private = end_io_wq->private;
        bio->bi_end_io = end_io_wq->end_io;
        kfree(end_io_wq);
-       bio_endio(bio, error);
+       bio_endio_nodec(bio, error);
 }
 
 static int cleaner_kthread(void *arg)
index ff43802..bcb6f1b 100644 (file)
@@ -1984,7 +1984,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
        bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
        if (!bio)
                return -EIO;
-       bio->bi_size = 0;
+       bio->bi_iter.bi_size = 0;
        map_length = length;
 
        ret = btrfs_map_block(fs_info, WRITE, logical,
@@ -1995,7 +1995,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
        }
        BUG_ON(mirror_num != bbio->mirror_num);
        sector = bbio->stripes[mirror_num-1].physical >> 9;
-       bio->bi_sector = sector;
+       bio->bi_iter.bi_sector = sector;
        dev = bbio->stripes[mirror_num-1].dev;
        kfree(bbio);
        if (!dev || !dev->bdev || !dev->writeable) {
@@ -2268,9 +2268,9 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
                return -EIO;
        }
        bio->bi_end_io = failed_bio->bi_end_io;
-       bio->bi_sector = failrec->logical >> 9;
+       bio->bi_iter.bi_sector = failrec->logical >> 9;
        bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
-       bio->bi_size = 0;
+       bio->bi_iter.bi_size = 0;
 
        btrfs_failed_bio = btrfs_io_bio(failed_bio);
        if (btrfs_failed_bio->csum) {
@@ -2332,12 +2332,13 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
  */
 static void end_bio_extent_writepage(struct bio *bio, int err)
 {
-       struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+       struct bio_vec *bvec;
        struct extent_io_tree *tree;
        u64 start;
        u64 end;
+       int i;
 
-       do {
+       bio_for_each_segment_all(bvec, bio, i) {
                struct page *page = bvec->bv_page;
                tree = &BTRFS_I(page->mapping->host)->io_tree;
 
@@ -2355,14 +2356,11 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
                start = page_offset(page);
                end = start + bvec->bv_offset + bvec->bv_len - 1;
 
-               if (--bvec >= bio->bi_io_vec)
-                       prefetchw(&bvec->bv_page->flags);
-
                if (end_extent_writepage(page, err, start, end))
                        continue;
 
                end_page_writeback(page);
-       } while (bvec >= bio->bi_io_vec);
+       }
 
        bio_put(bio);
 }
@@ -2392,9 +2390,8 @@ endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
  */
 static void end_bio_extent_readpage(struct bio *bio, int err)
 {
+       struct bio_vec *bvec;
        int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-       struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
-       struct bio_vec *bvec = bio->bi_io_vec;
        struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
        struct extent_io_tree *tree;
        u64 offset = 0;
@@ -2405,16 +2402,17 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
        u64 extent_len = 0;
        int mirror;
        int ret;
+       int i;
 
        if (err)
                uptodate = 0;
 
-       do {
+       bio_for_each_segment_all(bvec, bio, i) {
                struct page *page = bvec->bv_page;
                struct inode *inode = page->mapping->host;
 
                pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
-                        "mirror=%lu\n", (u64)bio->bi_sector, err,
+                        "mirror=%lu\n", (u64)bio->bi_iter.bi_sector, err,
                         io_bio->mirror_num);
                tree = &BTRFS_I(inode)->io_tree;
 
@@ -2433,9 +2431,6 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
                end = start + bvec->bv_offset + bvec->bv_len - 1;
                len = bvec->bv_len;
 
-               if (++bvec <= bvec_end)
-                       prefetchw(&bvec->bv_page->flags);
-
                mirror = io_bio->mirror_num;
                if (likely(uptodate && tree->ops &&
                           tree->ops->readpage_end_io_hook)) {
@@ -2516,7 +2511,7 @@ readpage_ok:
                        extent_start = start;
                        extent_len = end + 1 - start;
                }
-       } while (bvec <= bvec_end);
+       }
 
        if (extent_len)
                endio_readpage_release_extent(tree, extent_start, extent_len,
@@ -2547,9 +2542,8 @@ btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
        }
 
        if (bio) {
-               bio->bi_size = 0;
                bio->bi_bdev = bdev;
-               bio->bi_sector = first_sector;
+               bio->bi_iter.bi_sector = first_sector;
                btrfs_bio = btrfs_io_bio(bio);
                btrfs_bio->csum = NULL;
                btrfs_bio->csum_allocated = NULL;
@@ -2643,7 +2637,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
        if (bio_ret && *bio_ret) {
                bio = *bio_ret;
                if (old_compressed)
-                       contig = bio->bi_sector == sector;
+                       contig = bio->bi_iter.bi_sector == sector;
                else
                        contig = bio_end_sector(bio) == sector;
 
@@ -3410,20 +3404,18 @@ static void end_extent_buffer_writeback(struct extent_buffer *eb)
 
 static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
 {
-       int uptodate = err == 0;
-       struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+       struct bio_vec *bvec;
        struct extent_buffer *eb;
-       int done;
+       int i, done;
 
-       do {
+       bio_for_each_segment_all(bvec, bio, i) {
                struct page *page = bvec->bv_page;
 
-               bvec--;
                eb = (struct extent_buffer *)page->private;
                BUG_ON(!eb);
                done = atomic_dec_and_test(&eb->io_pages);
 
-               if (!uptodate || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
+               if (err || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
                        set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
                        ClearPageUptodate(page);
                        SetPageError(page);
@@ -3435,10 +3427,9 @@ static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
                        continue;
 
                end_extent_buffer_writeback(eb);
-       } while (bvec >= bio->bi_io_vec);
+       }
 
        bio_put(bio);
-
 }
 
 static int write_one_eb(struct extent_buffer *eb,
index 6f38488..84a46a4 100644 (file)
@@ -182,7 +182,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
        if (!path)
                return -ENOMEM;
 
-       nblocks = bio->bi_size >> inode->i_sb->s_blocksize_bits;
+       nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
        if (!dst) {
                if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
                        btrfs_bio->csum_allocated = kmalloc(nblocks * csum_size,
@@ -201,7 +201,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
                csum = (u8 *)dst;
        }
 
-       if (bio->bi_size > PAGE_CACHE_SIZE * 8)
+       if (bio->bi_iter.bi_size > PAGE_CACHE_SIZE * 8)
                path->reada = 2;
 
        WARN_ON(bio->bi_vcnt <= 0);
@@ -217,7 +217,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
                path->skip_locking = 1;
        }
 
-       disk_bytenr = (u64)bio->bi_sector << 9;
+       disk_bytenr = (u64)bio->bi_iter.bi_sector << 9;
        if (dio)
                offset = logical_offset;
        while (bio_index < bio->bi_vcnt) {
@@ -302,7 +302,7 @@ int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
                              struct btrfs_dio_private *dip, struct bio *bio,
                              u64 offset)
 {
-       int len = (bio->bi_sector << 9) - dip->disk_bytenr;
+       int len = (bio->bi_iter.bi_sector << 9) - dip->disk_bytenr;
        u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
        int ret;
 
@@ -447,11 +447,12 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
        u64 offset;
 
        WARN_ON(bio->bi_vcnt <= 0);
-       sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_size), GFP_NOFS);
+       sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_iter.bi_size),
+                      GFP_NOFS);
        if (!sums)
                return -ENOMEM;
 
-       sums->len = bio->bi_size;
+       sums->len = bio->bi_iter.bi_size;
        INIT_LIST_HEAD(&sums->list);
 
        if (contig)
@@ -461,7 +462,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
 
        ordered = btrfs_lookup_ordered_extent(inode, offset);
        BUG_ON(!ordered); /* Logic error */
-       sums->bytenr = (u64)bio->bi_sector << 9;
+       sums->bytenr = (u64)bio->bi_iter.bi_sector << 9;
        index = 0;
 
        while (bio_index < bio->bi_vcnt) {
@@ -476,7 +477,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
                        btrfs_add_ordered_sum(inode, ordered, sums);
                        btrfs_put_ordered_extent(ordered);
 
-                       bytes_left = bio->bi_size - total_bytes;
+                       bytes_left = bio->bi_iter.bi_size - total_bytes;
 
                        sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left),
                                       GFP_NOFS);
@@ -484,7 +485,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
                        sums->len = bytes_left;
                        ordered = btrfs_lookup_ordered_extent(inode, offset);
                        BUG_ON(!ordered); /* Logic error */
-                       sums->bytenr = ((u64)bio->bi_sector << 9) +
+                       sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9) +
                                       total_bytes;
                        index = 0;
                }
index 514b291..d546d8c 100644 (file)
@@ -1577,7 +1577,7 @@ int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
                         unsigned long bio_flags)
 {
        struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
-       u64 logical = (u64)bio->bi_sector << 9;
+       u64 logical = (u64)bio->bi_iter.bi_sector << 9;
        u64 length = 0;
        u64 map_length;
        int ret;
@@ -1585,7 +1585,7 @@ int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
        if (bio_flags & EXTENT_BIO_COMPRESSED)
                return 0;
 
-       length = bio->bi_size;
+       length = bio->bi_iter.bi_size;
        map_length = length;
        ret = btrfs_map_block(root->fs_info, rw, logical,
                              &map_length, NULL, 0);
@@ -6783,17 +6783,16 @@ unlock_err:
 static void btrfs_endio_direct_read(struct bio *bio, int err)
 {
        struct btrfs_dio_private *dip = bio->bi_private;
-       struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
-       struct bio_vec *bvec = bio->bi_io_vec;
+       struct bio_vec *bvec;
        struct inode *inode = dip->inode;
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct bio *dio_bio;
        u32 *csums = (u32 *)dip->csum;
-       int index = 0;
        u64 start;
+       int i;
 
        start = dip->logical_offset;
-       do {
+       bio_for_each_segment_all(bvec, bio, i) {
                if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
                        struct page *page = bvec->bv_page;
                        char *kaddr;
@@ -6809,18 +6808,16 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
                        local_irq_restore(flags);
 
                        flush_dcache_page(bvec->bv_page);
-                       if (csum != csums[index]) {
+                       if (csum != csums[i]) {
                                btrfs_err(root->fs_info, "csum failed ino %llu off %llu csum %u expected csum %u",
                                          btrfs_ino(inode), start, csum,
-                                         csums[index]);
+                                         csums[i]);
                                err = -EIO;
                        }
                }
 
                start += bvec->bv_len;
-               bvec++;
-               index++;
-       } while (bvec <= bvec_end);
+       }
 
        unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
                      dip->logical_offset + dip->bytes - 1);
@@ -6901,7 +6898,8 @@ static void btrfs_end_dio_bio(struct bio *bio, int err)
                printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu "
                      "sector %#Lx len %u err no %d\n",
                      btrfs_ino(dip->inode), bio->bi_rw,
-                     (unsigned long long)bio->bi_sector, bio->bi_size, err);
+                     (unsigned long long)bio->bi_iter.bi_sector,
+                     bio->bi_iter.bi_size, err);
                dip->errors = 1;
 
                /*
@@ -6992,7 +6990,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
        struct bio *bio;
        struct bio *orig_bio = dip->orig_bio;
        struct bio_vec *bvec = orig_bio->bi_io_vec;
-       u64 start_sector = orig_bio->bi_sector;
+       u64 start_sector = orig_bio->bi_iter.bi_sector;
        u64 file_offset = dip->logical_offset;
        u64 submit_len = 0;
        u64 map_length;
@@ -7000,7 +6998,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
        int ret = 0;
        int async_submit = 0;
 
-       map_length = orig_bio->bi_size;
+       map_length = orig_bio->bi_iter.bi_size;
        ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
                              &map_length, NULL, 0);
        if (ret) {
@@ -7008,7 +7006,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
                return -EIO;
        }
 
-       if (map_length >= orig_bio->bi_size) {
+       if (map_length >= orig_bio->bi_iter.bi_size) {
                bio = orig_bio;
                goto submit;
        }
@@ -7060,7 +7058,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
                        bio->bi_private = dip;
                        bio->bi_end_io = btrfs_end_dio_bio;
 
-                       map_length = orig_bio->bi_size;
+                       map_length = orig_bio->bi_iter.bi_size;
                        ret = btrfs_map_block(root->fs_info, rw,
                                              start_sector << 9,
                                              &map_length, NULL, 0);
@@ -7118,7 +7116,8 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio,
 
        if (!skip_sum && !write) {
                csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
-               sum_len = dio_bio->bi_size >> inode->i_sb->s_blocksize_bits;
+               sum_len = dio_bio->bi_iter.bi_size >>
+                       inode->i_sb->s_blocksize_bits;
                sum_len *= csum_size;
        } else {
                sum_len = 0;
@@ -7133,8 +7132,8 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio,
        dip->private = dio_bio->bi_private;
        dip->inode = inode;
        dip->logical_offset = file_offset;
-       dip->bytes = dio_bio->bi_size;
-       dip->disk_bytenr = (u64)dio_bio->bi_sector << 9;
+       dip->bytes = dio_bio->bi_iter.bi_size;
+       dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9;
        io_bio->bi_private = dip;
        dip->errors = 0;
        dip->orig_bio = io_bio;
index 24ac218..9af0b25 100644 (file)
@@ -1032,8 +1032,8 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
 
        /* see if we can add this page onto our existing bio */
        if (last) {
-               last_end = (u64)last->bi_sector << 9;
-               last_end += last->bi_size;
+               last_end = (u64)last->bi_iter.bi_sector << 9;
+               last_end += last->bi_iter.bi_size;
 
                /*
                 * we can't merge these if they are from different
@@ -1053,9 +1053,9 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
        if (!bio)
                return -ENOMEM;
 
-       bio->bi_size = 0;
+       bio->bi_iter.bi_size = 0;
        bio->bi_bdev = stripe->dev->bdev;
-       bio->bi_sector = disk_start >> 9;
+       bio->bi_iter.bi_sector = disk_start >> 9;
        set_bit(BIO_UPTODATE, &bio->bi_flags);
 
        bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
@@ -1111,7 +1111,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio)
 
        spin_lock_irq(&rbio->bio_list_lock);
        bio_list_for_each(bio, &rbio->bio_list) {
-               start = (u64)bio->bi_sector << 9;
+               start = (u64)bio->bi_iter.bi_sector << 9;
                stripe_offset = start - rbio->raid_map[0];
                page_index = stripe_offset >> PAGE_CACHE_SHIFT;
 
@@ -1272,7 +1272,7 @@ cleanup:
 static int find_bio_stripe(struct btrfs_raid_bio *rbio,
                           struct bio *bio)
 {
-       u64 physical = bio->bi_sector;
+       u64 physical = bio->bi_iter.bi_sector;
        u64 stripe_start;
        int i;
        struct btrfs_bio_stripe *stripe;
@@ -1298,7 +1298,7 @@ static int find_bio_stripe(struct btrfs_raid_bio *rbio,
 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
                                   struct bio *bio)
 {
-       u64 logical = bio->bi_sector;
+       u64 logical = bio->bi_iter.bi_sector;
        u64 stripe_start;
        int i;
 
@@ -1602,8 +1602,8 @@ static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
                                                 plug_list);
        struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
                                                 plug_list);
-       u64 a_sector = ra->bio_list.head->bi_sector;
-       u64 b_sector = rb->bio_list.head->bi_sector;
+       u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
+       u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
 
        if (a_sector < b_sector)
                return -1;
@@ -1691,7 +1691,7 @@ int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
        if (IS_ERR(rbio))
                return PTR_ERR(rbio);
        bio_list_add(&rbio->bio_list, bio);
-       rbio->bio_list_bytes = bio->bi_size;
+       rbio->bio_list_bytes = bio->bi_iter.bi_size;
 
        /*
         * don't plug on full rbios, just get them out the door
@@ -2044,7 +2044,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
 
        rbio->read_rebuild = 1;
        bio_list_add(&rbio->bio_list, bio);
-       rbio->bio_list_bytes = bio->bi_size;
+       rbio->bio_list_bytes = bio->bi_iter.bi_size;
 
        rbio->faila = find_logical_bio_stripe(rbio, bio);
        if (rbio->faila == -1) {
index 1fd3f33..bb9a928 100644 (file)
@@ -1308,7 +1308,7 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
                        continue;
                }
                bio->bi_bdev = page->dev->bdev;
-               bio->bi_sector = page->physical >> 9;
+               bio->bi_iter.bi_sector = page->physical >> 9;
 
                bio_add_page(bio, page->page, PAGE_SIZE, 0);
                if (btrfsic_submit_bio_wait(READ, bio))
@@ -1427,7 +1427,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
                if (!bio)
                        return -EIO;
                bio->bi_bdev = page_bad->dev->bdev;
-               bio->bi_sector = page_bad->physical >> 9;
+               bio->bi_iter.bi_sector = page_bad->physical >> 9;
 
                ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
                if (PAGE_SIZE != ret) {
@@ -1520,7 +1520,7 @@ again:
                bio->bi_private = sbio;
                bio->bi_end_io = scrub_wr_bio_end_io;
                bio->bi_bdev = sbio->dev->bdev;
-               bio->bi_sector = sbio->physical >> 9;
+               bio->bi_iter.bi_sector = sbio->physical >> 9;
                sbio->err = 0;
        } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
                   spage->physical_for_dev_replace ||
@@ -1926,7 +1926,7 @@ again:
                bio->bi_private = sbio;
                bio->bi_end_io = scrub_bio_end_io;
                bio->bi_bdev = sbio->dev->bdev;
-               bio->bi_sector = sbio->physical >> 9;
+               bio->bi_iter.bi_sector = sbio->physical >> 9;
                sbio->err = 0;
        } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
                   spage->physical ||
@@ -3371,8 +3371,8 @@ static int write_page_nocow(struct scrub_ctx *sctx,
                spin_unlock(&sctx->stat_lock);
                return -ENOMEM;
        }
-       bio->bi_size = 0;
-       bio->bi_sector = physical_for_dev_replace >> 9;
+       bio->bi_iter.bi_size = 0;
+       bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
        bio->bi_bdev = dev->bdev;
        ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
        if (ret != PAGE_CACHE_SIZE) {
index 92303f4..54d2685 100644 (file)
@@ -5298,6 +5298,13 @@ static void btrfs_end_bio(struct bio *bio, int err)
                        bio_put(bio);
                        bio = bbio->orig_bio;
                }
+
+               /*
+                * We have original bio now. So increment bi_remaining to
+                * account for it in endio
+                */
+               atomic_inc(&bio->bi_remaining);
+
                bio->bi_private = bbio->private;
                bio->bi_end_io = bbio->end_io;
                btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
@@ -5411,7 +5418,7 @@ static int bio_size_ok(struct block_device *bdev, struct bio *bio,
        if (!q->merge_bvec_fn)
                return 1;
 
-       bvm.bi_size = bio->bi_size - prev->bv_len;
+       bvm.bi_size = bio->bi_iter.bi_size - prev->bv_len;
        if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len)
                return 0;
        return 1;
@@ -5426,7 +5433,7 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
        bio->bi_private = bbio;
        btrfs_io_bio(bio)->stripe_index = dev_nr;
        bio->bi_end_io = btrfs_end_bio;
-       bio->bi_sector = physical >> 9;
+       bio->bi_iter.bi_sector = physical >> 9;
 #ifdef DEBUG
        {
                struct rcu_string *name;
@@ -5464,7 +5471,7 @@ again:
        while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) {
                if (bio_add_page(bio, bvec->bv_page, bvec->bv_len,
                                 bvec->bv_offset) < bvec->bv_len) {
-                       u64 len = bio->bi_size;
+                       u64 len = bio->bi_iter.bi_size;
 
                        atomic_inc(&bbio->stripes_pending);
                        submit_stripe_bio(root, bbio, bio, physical, dev_nr,
@@ -5486,7 +5493,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
                bio->bi_private = bbio->private;
                bio->bi_end_io = bbio->end_io;
                btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
-               bio->bi_sector = logical >> 9;
+               bio->bi_iter.bi_sector = logical >> 9;
                kfree(bbio);
                bio_endio(bio, -EIO);
        }
@@ -5497,7 +5504,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
 {
        struct btrfs_device *dev;
        struct bio *first_bio = bio;
-       u64 logical = (u64)bio->bi_sector << 9;
+       u64 logical = (u64)bio->bi_iter.bi_sector << 9;
        u64 length = 0;
        u64 map_length;
        u64 *raid_map = NULL;
@@ -5506,7 +5513,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
        int total_devs = 1;
        struct btrfs_bio *bbio = NULL;
 
-       length = bio->bi_size;
+       length = bio->bi_iter.bi_size;
        map_length = length;
 
        ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
index 6024877..651dba1 100644 (file)
@@ -1312,7 +1312,7 @@ static void bh_lru_install(struct buffer_head *bh)
                }
                while (out < BH_LRU_SIZE)
                        bhs[out++] = NULL;
-               memcpy(__this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
+               memcpy(this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
        }
        bh_lru_unlock();
 
@@ -2982,11 +2982,11 @@ static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh)
         * let it through, and the IO layer will turn it into
         * an EIO.
         */
-       if (unlikely(bio->bi_sector >= maxsector))
+       if (unlikely(bio->bi_iter.bi_sector >= maxsector))
                return;
 
-       maxsector -= bio->bi_sector;
-       bytes = bio->bi_size;
+       maxsector -= bio->bi_iter.bi_sector;
+       bytes = bio->bi_iter.bi_size;
        if (likely((bytes >> 9) <= maxsector))
                return;
 
@@ -2994,7 +2994,7 @@ static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh)
        bytes = maxsector << 9;
 
        /* Truncate the bio.. */
-       bio->bi_size = bytes;
+       bio->bi_iter.bi_size = bytes;
        bio->bi_io_vec[0].bv_len = bytes;
 
        /* ..and clear the end of the buffer for reads */
@@ -3029,14 +3029,14 @@ int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags)
         */
        bio = bio_alloc(GFP_NOIO, 1);
 
-       bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
+       bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
        bio->bi_bdev = bh->b_bdev;
        bio->bi_io_vec[0].bv_page = bh->b_page;
        bio->bi_io_vec[0].bv_len = bh->b_size;
        bio->bi_io_vec[0].bv_offset = bh_offset(bh);
 
        bio->bi_vcnt = 1;
-       bio->bi_size = bh->b_size;
+       bio->bi_iter.bi_size = bh->b_size;
 
        bio->bi_end_io = end_bio_bh_io_sync;
        bio->bi_private = bh;
index 0e04142..160a548 100644 (file)
@@ -375,7 +375,7 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
        bio = bio_alloc(GFP_KERNEL, nr_vecs);
 
        bio->bi_bdev = bdev;
-       bio->bi_sector = first_sector;
+       bio->bi_iter.bi_sector = first_sector;
        if (dio->is_async)
                bio->bi_end_io = dio_bio_end_aio;
        else
@@ -719,7 +719,7 @@ static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio,
        if (sdio->bio) {
                loff_t cur_offset = sdio->cur_page_fs_offset;
                loff_t bio_next_offset = sdio->logical_offset_in_bio +
-                       sdio->bio->bi_size;
+                       sdio->bio->bi_iter.bi_size;
 
                /*
                 * See whether this new request is contiguous with the old.
index d488f80..ab95508 100644 (file)
@@ -65,9 +65,9 @@ static void ext4_finish_bio(struct bio *bio)
 {
        int i;
        int error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
+       struct bio_vec *bvec;
 
-       for (i = 0; i < bio->bi_vcnt; i++) {
-               struct bio_vec *bvec = &bio->bi_io_vec[i];
+       bio_for_each_segment_all(bvec, bio, i) {
                struct page *page = bvec->bv_page;
                struct buffer_head *bh, *head;
                unsigned bio_start = bvec->bv_offset;
@@ -298,7 +298,7 @@ ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
 static void ext4_end_bio(struct bio *bio, int error)
 {
        ext4_io_end_t *io_end = bio->bi_private;
-       sector_t bi_sector = bio->bi_sector;
+       sector_t bi_sector = bio->bi_iter.bi_sector;
 
        BUG_ON(!io_end);
        bio->bi_end_io = NULL;
@@ -366,7 +366,7 @@ static int io_submit_init_bio(struct ext4_io_submit *io,
        bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
        if (!bio)
                return -ENOMEM;
-       bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
+       bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
        bio->bi_bdev = bh->b_bdev;
        bio->bi_end_io = ext4_end_bio;
        bio->bi_private = ext4_get_io_end(io->io_end);
index 0ae5587..2261ccd 100644 (file)
 
 static void f2fs_read_end_io(struct bio *bio, int err)
 {
-       const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-       struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+       struct bio_vec *bvec;
+       int i;
 
-       do {
+       bio_for_each_segment_all(bvec, bio, i) {
                struct page *page = bvec->bv_page;
 
-               if (--bvec >= bio->bi_io_vec)
-                       prefetchw(&bvec->bv_page->flags);
-
-               if (unlikely(!uptodate)) {
+               if (!err) {
+                       SetPageUptodate(page);
+               } else {
                        ClearPageUptodate(page);
                        SetPageError(page);
-               } else {
-                       SetPageUptodate(page);
                }
                unlock_page(page);
-       } while (bvec >= bio->bi_io_vec);
-
+       }
        bio_put(bio);
 }
 
 static void f2fs_write_end_io(struct bio *bio, int err)
 {
-       const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-       struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
-       struct f2fs_sb_info *sbi = F2FS_SB(bvec->bv_page->mapping->host->i_sb);
+       struct f2fs_sb_info *sbi = F2FS_SB(bio->bi_io_vec->bv_page->mapping->host->i_sb);
+       struct bio_vec *bvec;
+       int i;
 
-       do {
+       bio_for_each_segment_all(bvec, bio, i) {
                struct page *page = bvec->bv_page;
 
-               if (--bvec >= bio->bi_io_vec)
-                       prefetchw(&bvec->bv_page->flags);
-
-               if (unlikely(!uptodate)) {
+               if (unlikely(err)) {
                        SetPageError(page);
                        set_bit(AS_EIO, &page->mapping->flags);
                        set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
@@ -67,7 +60,7 @@ static void f2fs_write_end_io(struct bio *bio, int err)
                }
                end_page_writeback(page);
                dec_page_count(sbi, F2FS_WRITEBACK);
-       } while (bvec >= bio->bi_io_vec);
+       }
 
        if (bio->bi_private)
                complete(bio->bi_private);
@@ -91,7 +84,7 @@ static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
        bio = bio_alloc(GFP_NOIO, npages);
 
        bio->bi_bdev = sbi->sb->s_bdev;
-       bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
+       bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
        bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
 
        return bio;
index 58f0640..7669379 100644 (file)
@@ -273,7 +273,7 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno)
                nrvecs = max(nrvecs/2, 1U);
        }
 
-       bio->bi_sector = blkno * (sb->s_blocksize >> 9);
+       bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9);
        bio->bi_bdev = sb->s_bdev;
        bio->bi_end_io = gfs2_end_log_write;
        bio->bi_private = sdp;
index 1e712b5..c6872d0 100644 (file)
@@ -238,7 +238,7 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
        lock_page(page);
 
        bio = bio_alloc(GFP_NOFS, 1);
-       bio->bi_sector = sector * (sb->s_blocksize >> 9);
+       bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9);
        bio->bi_bdev = sb->s_bdev;
        bio_add_page(bio, page, PAGE_SIZE, 0);
 
index e9a97a0..3f99964 100644 (file)
@@ -63,7 +63,7 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
        sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1);
 
        bio = bio_alloc(GFP_NOIO, 1);
-       bio->bi_sector = sector;
+       bio->bi_iter.bi_sector = sector;
        bio->bi_bdev = sb->s_bdev;
 
        if (!(rw & WRITE) && data)
index 360d27c..8d811e0 100644 (file)
@@ -1998,20 +1998,20 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp)
 
        bio = bio_alloc(GFP_NOFS, 1);
 
-       bio->bi_sector = bp->l_blkno << (log->l2bsize - 9);
+       bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
        bio->bi_bdev = log->bdev;
        bio->bi_io_vec[0].bv_page = bp->l_page;
        bio->bi_io_vec[0].bv_len = LOGPSIZE;
        bio->bi_io_vec[0].bv_offset = bp->l_offset;
 
        bio->bi_vcnt = 1;
-       bio->bi_size = LOGPSIZE;
+       bio->bi_iter.bi_size = LOGPSIZE;
 
        bio->bi_end_io = lbmIODone;
        bio->bi_private = bp;
        /*check if journaling to disk has been disabled*/
        if (log->no_integrity) {
-               bio->bi_size = 0;
+               bio->bi_iter.bi_size = 0;
                lbmIODone(bio, 0);
        } else {
                submit_bio(READ_SYNC, bio);
@@ -2144,21 +2144,21 @@ static void lbmStartIO(struct lbuf * bp)
        jfs_info("lbmStartIO\n");
 
        bio = bio_alloc(GFP_NOFS, 1);
-       bio->bi_sector = bp->l_blkno << (log->l2bsize - 9);
+       bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
        bio->bi_bdev = log->bdev;
        bio->bi_io_vec[0].bv_page = bp->l_page;
        bio->bi_io_vec[0].bv_len = LOGPSIZE;
        bio->bi_io_vec[0].bv_offset = bp->l_offset;
 
        bio->bi_vcnt = 1;
-       bio->bi_size = LOGPSIZE;
+       bio->bi_iter.bi_size = LOGPSIZE;
 
        bio->bi_end_io = lbmIODone;
        bio->bi_private = bp;
 
        /* check if journaling to disk has been disabled */
        if (log->no_integrity) {
-               bio->bi_size = 0;
+               bio->bi_iter.bi_size = 0;
                lbmIODone(bio, 0);
        } else {
                submit_bio(WRITE_SYNC, bio);
index d165cde..49ba7ff 100644 (file)
@@ -416,7 +416,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
                         * count from hitting zero before we're through
                         */
                        inc_io(page);
-                       if (!bio->bi_size)
+                       if (!bio->bi_iter.bi_size)
                                goto dump_bio;
                        submit_bio(WRITE, bio);
                        nr_underway++;
@@ -438,7 +438,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
 
                bio = bio_alloc(GFP_NOFS, 1);
                bio->bi_bdev = inode->i_sb->s_bdev;
-               bio->bi_sector = pblock << (inode->i_blkbits - 9);
+               bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
                bio->bi_end_io = metapage_write_end_io;
                bio->bi_private = page;
 
@@ -452,7 +452,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
        if (bio) {
                if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes)
                                goto add_failed;
-               if (!bio->bi_size)
+               if (!bio->bi_iter.bi_size)
                        goto dump_bio;
 
                submit_bio(WRITE, bio);
@@ -517,7 +517,8 @@ static int metapage_readpage(struct file *fp, struct page *page)
 
                        bio = bio_alloc(GFP_NOFS, 1);
                        bio->bi_bdev = inode->i_sb->s_bdev;
-                       bio->bi_sector = pblock << (inode->i_blkbits - 9);
+                       bio->bi_iter.bi_sector =
+                               pblock << (inode->i_blkbits - 9);
                        bio->bi_end_io = metapage_read_end_io;
                        bio->bi_private = page;
                        len = xlen << inode->i_blkbits;
index 0f95f0d..76279e1 100644 (file)
@@ -26,9 +26,9 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw)
        bio_vec.bv_len = PAGE_SIZE;
        bio_vec.bv_offset = 0;
        bio.bi_vcnt = 1;
-       bio.bi_size = PAGE_SIZE;
        bio.bi_bdev = bdev;
-       bio.bi_sector = page->index * (PAGE_SIZE >> 9);
+       bio.bi_iter.bi_sector = page->index * (PAGE_SIZE >> 9);
+       bio.bi_iter.bi_size = PAGE_SIZE;
 
        return submit_bio_wait(rw, &bio);
 }
@@ -56,22 +56,18 @@ static DECLARE_WAIT_QUEUE_HEAD(wq);
 static void writeseg_end_io(struct bio *bio, int err)
 {
        const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-       struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+       struct bio_vec *bvec;
+       int i;
        struct super_block *sb = bio->bi_private;
        struct logfs_super *super = logfs_super(sb);
-       struct page *page;
 
        BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */
        BUG_ON(err);
-       BUG_ON(bio->bi_vcnt == 0);
-       do {
-               page = bvec->bv_page;
-               if (--bvec >= bio->bi_io_vec)
-                       prefetchw(&bvec->bv_page->flags);
-
-               end_page_writeback(page);
-               page_cache_release(page);
-       } while (bvec >= bio->bi_io_vec);
+
+       bio_for_each_segment_all(bvec, bio, i) {
+               end_page_writeback(bvec->bv_page);
+               page_cache_release(bvec->bv_page);
+       }
        bio_put(bio);
        if (atomic_dec_and_test(&super->s_pending_writes))
                wake_up(&wq);
@@ -96,9 +92,9 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
                if (i >= max_pages) {
                        /* Block layer cannot split bios :( */
                        bio->bi_vcnt = i;
-                       bio->bi_size = i * PAGE_SIZE;
+                       bio->bi_iter.bi_size = i * PAGE_SIZE;
                        bio->bi_bdev = super->s_bdev;
-                       bio->bi_sector = ofs >> 9;
+                       bio->bi_iter.bi_sector = ofs >> 9;
                        bio->bi_private = sb;
                        bio->bi_end_io = writeseg_end_io;
                        atomic_inc(&super->s_pending_writes);
@@ -123,9 +119,9 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
                unlock_page(page);
        }
        bio->bi_vcnt = nr_pages;
-       bio->bi_size = nr_pages * PAGE_SIZE;
+       bio->bi_iter.bi_size = nr_pages * PAGE_SIZE;
        bio->bi_bdev = super->s_bdev;
-       bio->bi_sector = ofs >> 9;
+       bio->bi_iter.bi_sector = ofs >> 9;
        bio->bi_private = sb;
        bio->bi_end_io = writeseg_end_io;
        atomic_inc(&super->s_pending_writes);
@@ -188,9 +184,9 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
                if (i >= max_pages) {
                        /* Block layer cannot split bios :( */
                        bio->bi_vcnt = i;
-                       bio->bi_size = i * PAGE_SIZE;
+                       bio->bi_iter.bi_size = i * PAGE_SIZE;
                        bio->bi_bdev = super->s_bdev;
-                       bio->bi_sector = ofs >> 9;
+                       bio->bi_iter.bi_sector = ofs >> 9;
                        bio->bi_private = sb;
                        bio->bi_end_io = erase_end_io;
                        atomic_inc(&super->s_pending_writes);
@@ -209,9 +205,9 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
                bio->bi_io_vec[i].bv_offset = 0;
        }
        bio->bi_vcnt = nr_pages;
-       bio->bi_size = nr_pages * PAGE_SIZE;
+       bio->bi_iter.bi_size = nr_pages * PAGE_SIZE;
        bio->bi_bdev = super->s_bdev;
-       bio->bi_sector = ofs >> 9;
+       bio->bi_iter.bi_sector = ofs >> 9;
        bio->bi_private = sb;
        bio->bi_end_io = erase_end_io;
        atomic_inc(&super->s_pending_writes);
index 0face1c..4979ffa 100644 (file)
  */
 static void mpage_end_io(struct bio *bio, int err)
 {
-       const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-       struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+       struct bio_vec *bv;
+       int i;
 
-       do {
-               struct page *page = bvec->bv_page;
+       bio_for_each_segment_all(bv, bio, i) {
+               struct page *page = bv->bv_page;
 
-               if (--bvec >= bio->bi_io_vec)
-                       prefetchw(&bvec->bv_page->flags);
                if (bio_data_dir(bio) == READ) {
-                       if (uptodate) {
+                       if (!err) {
                                SetPageUptodate(page);
                        } else {
                                ClearPageUptodate(page);
@@ -60,14 +58,15 @@ static void mpage_end_io(struct bio *bio, int err)
                        }
                        unlock_page(page);
                } else { /* bio_data_dir(bio) == WRITE */
-                       if (!uptodate) {
+                       if (err) {
                                SetPageError(page);
                                if (page->mapping)
                                        set_bit(AS_EIO, &page->mapping->flags);
                        }
                        end_page_writeback(page);
                }
-       } while (bvec >= bio->bi_io_vec);
+       }
+
        bio_put(bio);
 }
 
@@ -94,7 +93,7 @@ mpage_alloc(struct block_device *bdev,
 
        if (bio) {
                bio->bi_bdev = bdev;
-               bio->bi_sector = first_sector;
+               bio->bi_iter.bi_sector = first_sector;
        }
        return bio;
 }
index e242bbf..56ff823 100644 (file)
@@ -134,8 +134,8 @@ bl_submit_bio(int rw, struct bio *bio)
        if (bio) {
                get_parallel(bio->bi_private);
                dprintk("%s submitting %s bio %u@%llu\n", __func__,
-                       rw == READ ? "read" : "write",
-                       bio->bi_size, (unsigned long long)bio->bi_sector);
+                       rw == READ ? "read" : "write", bio->bi_iter.bi_size,
+                       (unsigned long long)bio->bi_iter.bi_sector);
                submit_bio(rw, bio);
        }
        return NULL;
@@ -156,7 +156,8 @@ static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
        }
 
        if (bio) {
-               bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
+               bio->bi_iter.bi_sector = isect - be->be_f_offset +
+                       be->be_v_offset;
                bio->bi_bdev = be->be_mdev;
                bio->bi_end_io = end_io;
                bio->bi_private = par;
@@ -201,18 +202,14 @@ static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
 static void bl_end_io_read(struct bio *bio, int err)
 {
        struct parallel_io *par = bio->bi_private;
-       const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-       struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+       struct bio_vec *bvec;
+       int i;
 
-       do {
-               struct page *page = bvec->bv_page;
+       if (!err)
+               bio_for_each_segment_all(bvec, bio, i)
+                       SetPageUptodate(bvec->bv_page);
 
-               if (--bvec >= bio->bi_io_vec)
-                       prefetchw(&bvec->bv_page->flags);
-               if (uptodate)
-                       SetPageUptodate(page);
-       } while (bvec >= bio->bi_io_vec);
-       if (!uptodate) {
+       if (err) {
                struct nfs_read_data *rdata = par->data;
                struct nfs_pgio_header *header = rdata->header;
 
@@ -383,20 +380,16 @@ static void mark_extents_written(struct pnfs_block_layout *bl,
 static void bl_end_io_write_zero(struct bio *bio, int err)
 {
        struct parallel_io *par = bio->bi_private;
-       const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-       struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
-
-       do {
-               struct page *page = bvec->bv_page;
+       struct bio_vec *bvec;
+       int i;
 
-               if (--bvec >= bio->bi_io_vec)
-                       prefetchw(&bvec->bv_page->flags);
+       bio_for_each_segment_all(bvec, bio, i) {
                /* This is the zeroing page we added */
-               end_page_writeback(page);
-               page_cache_release(page);
-       } while (bvec >= bio->bi_io_vec);
+               end_page_writeback(bvec->bv_page);
+               page_cache_release(bvec->bv_page);
+       }
 
-       if (unlikely(!uptodate)) {
+       if (unlikely(err)) {
                struct nfs_write_data *data = par->data;
                struct nfs_pgio_header *header = data->header;
 
@@ -519,7 +512,7 @@ bl_do_readpage_sync(struct page *page, struct pnfs_block_extent *be,
        isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) +
                (offset / SECTOR_SIZE);
 
-       bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
+       bio->bi_iter.bi_sector = isect - be->be_f_offset + be->be_v_offset;
        bio->bi_bdev = be->be_mdev;
        bio->bi_end_io = bl_read_single_end_io;
 
index d2255d7..aa9bc97 100644 (file)
@@ -924,11 +924,11 @@ static const struct inode_operations nfs3_dir_inode_operations = {
        .permission     = nfs_permission,
        .getattr        = nfs_getattr,
        .setattr        = nfs_setattr,
+#ifdef CONFIG_NFS_V3_ACL
        .listxattr      = generic_listxattr,
        .getxattr       = generic_getxattr,
        .setxattr       = generic_setxattr,
        .removexattr    = generic_removexattr,
-#ifdef CONFIG_NFS_V3_ACL
        .get_acl        = nfs3_get_acl,
        .set_acl        = nfs3_set_acl,
 #endif
@@ -938,11 +938,11 @@ static const struct inode_operations nfs3_file_inode_operations = {
        .permission     = nfs_permission,
        .getattr        = nfs_getattr,
        .setattr        = nfs_setattr,
+#ifdef CONFIG_NFS_V3_ACL
        .listxattr      = generic_listxattr,
        .getxattr       = generic_getxattr,
        .setxattr       = generic_setxattr,
        .removexattr    = generic_removexattr,
-#ifdef CONFIG_NFS_V3_ACL
        .get_acl        = nfs3_get_acl,
        .set_acl        = nfs3_set_acl,
 #endif
index 8b68218..a812fd1 100644 (file)
@@ -45,7 +45,7 @@ struct svc_rqst;
 
 struct nfs4_acl *nfs4_acl_new(int);
 int nfs4_acl_get_whotype(char *, u32);
-int nfs4_acl_write_who(int who, char *p);
+__be32 nfs4_acl_write_who(int who, __be32 **p, int *len);
 
 int nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry,
                struct nfs4_acl **acl);
index d5c5b3e..b582f9a 100644 (file)
@@ -84,12 +84,4 @@ int  nfsd_cache_lookup(struct svc_rqst *);
 void   nfsd_cache_update(struct svc_rqst *, int, __be32 *);
 int    nfsd_reply_cache_stats_open(struct inode *, struct file *);
 
-#ifdef CONFIG_NFSD_V4
-void   nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp);
-#else  /* CONFIG_NFSD_V4 */
-static inline void nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp)
-{
-}
-#endif /* CONFIG_NFSD_V4 */
-
 #endif /* NFSCACHE_H */
index bf95f6b..66e58db 100644 (file)
@@ -56,7 +56,7 @@ static inline void nfsd_idmap_shutdown(struct net *net)
 
 __be32 nfsd_map_name_to_uid(struct svc_rqst *, const char *, size_t, kuid_t *);
 __be32 nfsd_map_name_to_gid(struct svc_rqst *, const char *, size_t, kgid_t *);
-int nfsd_map_uid_to_name(struct svc_rqst *, kuid_t, char *);
-int nfsd_map_gid_to_name(struct svc_rqst *, kgid_t, char *);
+__be32 nfsd4_encode_user(struct svc_rqst *, kuid_t, __be32 **, int *);
+__be32 nfsd4_encode_group(struct svc_rqst *, kgid_t, __be32 **, int *);
 
 #endif /* LINUX_NFSD_IDMAP_H */
index 849a7c3..d32b3aa 100644 (file)
@@ -95,6 +95,7 @@ struct nfsd_net {
        time_t nfsd4_grace;
 
        bool nfsd_net_up;
+       bool lockd_up;
 
        /*
         * Time of server startup
index 14d9ecb..de6e39e 100644 (file)
@@ -168,7 +168,7 @@ encode_fattr3(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp,
              struct kstat *stat)
 {
        *p++ = htonl(nfs3_ftypes[(stat->mode & S_IFMT) >> 12]);
-       *p++ = htonl((u32) stat->mode);
+       *p++ = htonl((u32) (stat->mode & S_IALLUGO));
        *p++ = htonl((u32) stat->nlink);
        *p++ = htonl((u32) from_kuid(&init_user_ns, stat->uid));
        *p++ = htonl((u32) from_kgid(&init_user_ns, stat->gid));
@@ -842,21 +842,21 @@ out:
 
 static __be32 *encode_entryplus_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name, int namlen)
 {
-       struct svc_fh   fh;
+       struct svc_fh   *fh = &cd->scratch;
        __be32 err;
 
-       fh_init(&fh, NFS3_FHSIZE);
-       err = compose_entry_fh(cd, &fh, name, namlen);
+       fh_init(fh, NFS3_FHSIZE);
+       err = compose_entry_fh(cd, fh, name, namlen);
        if (err) {
                *p++ = 0;
                *p++ = 0;
                goto out;
        }
-       p = encode_post_op_attr(cd->rqstp, p, &fh);
+       p = encode_post_op_attr(cd->rqstp, p, fh);
        *p++ = xdr_one;                 /* yes, a file handle follows */
-       p = encode_fh(p, &fh);
+       p = encode_fh(p, fh);
 out:
-       fh_put(&fh);
+       fh_put(fh);
        return p;
 }
 
index 649ad7c..d3a5871 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/nfs_fs.h>
 #include <linux/export.h>
 #include "nfsfh.h"
+#include "nfsd.h"
 #include "acl.h"
 #include "vfs.h"
 
@@ -916,17 +917,22 @@ nfs4_acl_get_whotype(char *p, u32 len)
        return NFS4_ACL_WHO_NAMED;
 }
 
-int
-nfs4_acl_write_who(int who, char *p)
+__be32 nfs4_acl_write_who(int who, __be32 **p, int *len)
 {
        int i;
+       int bytes;
 
        for (i = 0; i < ARRAY_SIZE(s2t_map); i++) {
-               if (s2t_map[i].type == who) {
-                       memcpy(p, s2t_map[i].string, s2t_map[i].stringlen);
-                       return s2t_map[i].stringlen;
-               }
+               if (s2t_map[i].type != who)
+                       continue;
+               bytes = 4 + (XDR_QUADLEN(s2t_map[i].stringlen) << 2);
+               if (bytes > *len)
+                       return nfserr_resource;
+               *p = xdr_encode_opaque(*p, s2t_map[i].string,
+                                       s2t_map[i].stringlen);
+               *len -= bytes;
+               return 0;
        }
-       BUG();
+       WARN_ON_ONCE(1);
        return -1;
 }
index 4832fd8..c0dfde6 100644 (file)
@@ -551,27 +551,46 @@ idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen
        return 0;
 }
 
-static int
-idmap_id_to_name(struct svc_rqst *rqstp, int type, u32 id, char *name)
+static __be32 encode_ascii_id(u32 id, __be32 **p, int *buflen)
+{
+       char buf[11];
+       int len;
+       int bytes;
+
+       len = sprintf(buf, "%u", id);
+       bytes = 4 + (XDR_QUADLEN(len) << 2);
+       if (bytes > *buflen)
+               return nfserr_resource;
+       *p = xdr_encode_opaque(*p, buf, len);
+       *buflen -= bytes;
+       return 0;
+}
+
+static __be32 idmap_id_to_name(struct svc_rqst *rqstp, int type, u32 id, __be32 **p, int *buflen)
 {
        struct ent *item, key = {
                .id = id,
                .type = type,
        };
        int ret;
+       int bytes;
        struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
 
        strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname));
        ret = idmap_lookup(rqstp, idtoname_lookup, &key, nn->idtoname_cache, &item);
        if (ret == -ENOENT)
-               return sprintf(name, "%u", id);
+               return encode_ascii_id(id, p, buflen);
        if (ret)
-               return ret;
+               return nfserrno(ret);
        ret = strlen(item->name);
-       BUG_ON(ret > IDMAP_NAMESZ);
-       memcpy(name, item->name, ret);
+       WARN_ON_ONCE(ret > IDMAP_NAMESZ);
+       bytes = 4 + (XDR_QUADLEN(ret) << 2);
+       if (bytes > *buflen)
+               return nfserr_resource;
+       *p = xdr_encode_opaque(*p, item->name, ret);
+       *buflen -= bytes;
        cache_put(&item->h, nn->idtoname_cache);
-       return ret;
+       return 0;
 }
 
 static bool
@@ -603,12 +622,11 @@ do_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen, u
        return idmap_name_to_id(rqstp, type, name, namelen, id);
 }
 
-static int
-do_id_to_name(struct svc_rqst *rqstp, int type, u32 id, char *name)
+static __be32 encode_name_from_id(struct svc_rqst *rqstp, int type, u32 id, __be32 **p, int *buflen)
 {
        if (nfs4_disable_idmapping && rqstp->rq_cred.cr_flavor < RPC_AUTH_GSS)
-               return sprintf(name, "%u", id);
-       return idmap_id_to_name(rqstp, type, id, name);
+               return encode_ascii_id(id, p, buflen);
+       return idmap_id_to_name(rqstp, type, id, p, buflen);
 }
 
 __be32
@@ -637,16 +655,14 @@ nfsd_map_name_to_gid(struct svc_rqst *rqstp, const char *name, size_t namelen,
        return status;
 }
 
-int
-nfsd_map_uid_to_name(struct svc_rqst *rqstp, kuid_t uid, char *name)
+__be32 nfsd4_encode_user(struct svc_rqst *rqstp, kuid_t uid,  __be32 **p, int *buflen)
 {
        u32 id = from_kuid(&init_user_ns, uid);
-       return do_id_to_name(rqstp, IDMAP_TYPE_USER, id, name);
+       return encode_name_from_id(rqstp, IDMAP_TYPE_USER, id, p, buflen);
 }
 
-int
-nfsd_map_gid_to_name(struct svc_rqst *rqstp, kgid_t gid, char *name)
+__be32 nfsd4_encode_group(struct svc_rqst *rqstp, kgid_t gid, __be32 **p, int *buflen)
 {
        u32 id = from_kgid(&init_user_ns, gid);
-       return do_id_to_name(rqstp, IDMAP_TYPE_GROUP, id, name);
+       return encode_name_from_id(rqstp, IDMAP_TYPE_GROUP, id, p, buflen);
 }
index 825b8a9..82189b2 100644 (file)
@@ -231,17 +231,16 @@ static void nfsd4_set_open_owner_reply_cache(struct nfsd4_compound_state *cstate
 }
 
 static __be32
-do_open_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
+do_open_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_open *open, struct svc_fh **resfh)
 {
        struct svc_fh *current_fh = &cstate->current_fh;
-       struct svc_fh *resfh;
        int accmode;
        __be32 status;
 
-       resfh = kmalloc(sizeof(struct svc_fh), GFP_KERNEL);
-       if (!resfh)
+       *resfh = kmalloc(sizeof(struct svc_fh), GFP_KERNEL);
+       if (!*resfh)
                return nfserr_jukebox;
-       fh_init(resfh, NFS4_FHSIZE);
+       fh_init(*resfh, NFS4_FHSIZE);
        open->op_truncate = 0;
 
        if (open->op_create) {
@@ -266,12 +265,12 @@ do_open_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, stru
                 */
                status = do_nfsd_create(rqstp, current_fh, open->op_fname.data,
                                        open->op_fname.len, &open->op_iattr,
-                                       resfh, open->op_createmode,
+                                       *resfh, open->op_createmode,
                                        (u32 *)open->op_verf.data,
                                        &open->op_truncate, &open->op_created);
 
                if (!status && open->op_label.len)
-                       nfsd4_security_inode_setsecctx(resfh, &open->op_label, open->op_bmval);
+                       nfsd4_security_inode_setsecctx(*resfh, &open->op_label, open->op_bmval);
 
                /*
                 * Following rfc 3530 14.2.16, use the returned bitmask
@@ -281,31 +280,32 @@ do_open_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, stru
                if (open->op_createmode == NFS4_CREATE_EXCLUSIVE && status == 0)
                        open->op_bmval[1] = (FATTR4_WORD1_TIME_ACCESS |
                                                        FATTR4_WORD1_TIME_MODIFY);
-       } else {
+       } else
+               /*
+                * Note this may exit with the parent still locked.
+                * We will hold the lock until nfsd4_open's final
+                * lookup, to prevent renames or unlinks until we've had
+                * a chance to an acquire a delegation if appropriate.
+                */
                status = nfsd_lookup(rqstp, current_fh,
-                                    open->op_fname.data, open->op_fname.len, resfh);
-               fh_unlock(current_fh);
-       }
+                                    open->op_fname.data, open->op_fname.len, *resfh);
        if (status)
                goto out;
-       status = nfsd_check_obj_isreg(resfh);
+       status = nfsd_check_obj_isreg(*resfh);
        if (status)
                goto out;
 
        if (is_create_with_attrs(open) && open->op_acl != NULL)
-               do_set_nfs4_acl(rqstp, resfh, open->op_acl, open->op_bmval);
+               do_set_nfs4_acl(rqstp, *resfh, open->op_acl, open->op_bmval);
 
-       nfsd4_set_open_owner_reply_cache(cstate, open, resfh);
+       nfsd4_set_open_owner_reply_cache(cstate, open, *resfh);
        accmode = NFSD_MAY_NOP;
        if (open->op_created ||
                        open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR)
                accmode |= NFSD_MAY_OWNER_OVERRIDE;
-       status = do_open_permission(rqstp, resfh, open, accmode);
+       status = do_open_permission(rqstp, *resfh, open, accmode);
        set_change_info(&open->op_cinfo, current_fh);
-       fh_dup2(current_fh, resfh);
 out:
-       fh_put(resfh);
-       kfree(resfh);
        return status;
 }
 
@@ -358,6 +358,7 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
           struct nfsd4_open *open)
 {
        __be32 status;
+       struct svc_fh *resfh = NULL;
        struct nfsd4_compoundres *resp;
        struct net *net = SVC_NET(rqstp);
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
@@ -424,7 +425,7 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        switch (open->op_claim_type) {
                case NFS4_OPEN_CLAIM_DELEGATE_CUR:
                case NFS4_OPEN_CLAIM_NULL:
-                       status = do_open_lookup(rqstp, cstate, open);
+                       status = do_open_lookup(rqstp, cstate, open, &resfh);
                        if (status)
                                goto out;
                        break;
@@ -440,6 +441,7 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                        status = do_open_fhandle(rqstp, cstate, open);
                        if (status)
                                goto out;
+                       resfh = &cstate->current_fh;
                        break;
                case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
                case NFS4_OPEN_CLAIM_DELEGATE_PREV:
@@ -459,9 +461,14 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
         * successful, it (1) truncates the file if open->op_truncate was
         * set, (2) sets open->op_stateid, (3) sets open->op_delegation.
         */
-       status = nfsd4_process_open2(rqstp, &cstate->current_fh, open);
+       status = nfsd4_process_open2(rqstp, resfh, open);
        WARN_ON(status && open->op_created);
 out:
+       if (resfh && resfh != &cstate->current_fh) {
+               fh_dup2(&cstate->current_fh, resfh);
+               fh_put(resfh);
+               kfree(resfh);
+       }
        nfsd4_cleanup_open_state(open, status);
        if (open->op_openowner && !nfsd4_has_session(cstate))
                cstate->replay_owner = &open->op_openowner->oo_owner;
@@ -1070,8 +1077,10 @@ _nfsd4_verify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                                    cstate->current_fh.fh_dentry, &p,
                                    count, verify->ve_bmval,
                                    rqstp, 0);
-
-       /* this means that nfsd4_encode_fattr() ran out of space */
+       /*
+        * If nfsd4_encode_fattr() ran out of space, assume that's because
+        * the attributes are longer (hence different) than those given:
+        */
        if (status == nfserr_resource)
                status = nfserr_not_same;
        if (status)
@@ -1525,7 +1534,8 @@ static inline u32 nfsd4_write_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
 static inline u32 nfsd4_exchange_id_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
 {
        return (op_encode_hdr_size + 2 + 1 + /* eir_clientid, eir_sequenceid */\
-               1 + 1 + 2 + /* eir_flags, spr_how, spo_must_enforce & _allow */\
+               1 + 1 + /* eir_flags, spr_how */\
+               4 + /* spo_must_enforce & _allow with bitmap */\
                2 + /*eir_server_owner.so_minor_id */\
                /* eir_server_owner.so_major_id<> */\
                XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 +\
@@ -1882,6 +1892,7 @@ struct svc_version        nfsd_version4 = {
                .vs_proc        = nfsd_procedures4,
                .vs_dispatch    = nfsd_dispatch,
                .vs_xdrsize     = NFS4_SVC_XDRSIZE,
+               .vs_rpcb_optnl  = 1,
 };
 
 /*
index 105d6fa..d5d070f 100644 (file)
@@ -832,10 +832,11 @@ static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
        spin_unlock(&nfsd_drc_lock);
 }
 
-static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *attrs)
+static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
+                                          struct nfsd4_channel_attrs *battrs)
 {
-       int numslots = attrs->maxreqs;
-       int slotsize = slot_bytes(attrs);
+       int numslots = fattrs->maxreqs;
+       int slotsize = slot_bytes(fattrs);
        struct nfsd4_session *new;
        int mem, i;
 
@@ -852,6 +853,10 @@ static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *attrs)
                if (!new->se_slots[i])
                        goto out_free;
        }
+
+       memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
+       memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
+
        return new;
 out_free:
        while (i--)
@@ -997,8 +1002,7 @@ static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, stru
        list_add(&new->se_perclnt, &clp->cl_sessions);
        spin_unlock(&clp->cl_lock);
        spin_unlock(&nn->client_lock);
-       memcpy(&new->se_fchannel, &cses->fore_channel,
-                       sizeof(struct nfsd4_channel_attrs));
+
        if (cses->flags & SESSION4_BACK_CHAN) {
                struct sockaddr *sa = svc_addr(rqstp);
                /*
@@ -1851,6 +1855,11 @@ static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfs
        return nfs_ok;
 }
 
+#define NFSD_CB_MAX_REQ_SZ     ((NFS4_enc_cb_recall_sz + \
+                                RPC_MAX_HEADER_WITH_AUTH) * sizeof(__be32))
+#define NFSD_CB_MAX_RESP_SZ    ((NFS4_dec_cb_recall_sz + \
+                                RPC_MAX_REPHEADER_WITH_AUTH) * sizeof(__be32))
+
 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
 {
        ca->headerpadsz = 0;
@@ -1861,9 +1870,9 @@ static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
         * less than 1k.  Tighten up this estimate in the unlikely event
         * it turns out to be a problem for some client:
         */
-       if (ca->maxreq_sz < NFS4_enc_cb_recall_sz + RPC_MAX_HEADER_WITH_AUTH)
+       if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
                return nfserr_toosmall;
-       if (ca->maxresp_sz < NFS4_dec_cb_recall_sz + RPC_MAX_REPHEADER_WITH_AUTH)
+       if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
                return nfserr_toosmall;
        ca->maxresp_cached = 0;
        if (ca->maxops < 2)
@@ -1913,9 +1922,9 @@ nfsd4_create_session(struct svc_rqst *rqstp,
                return status;
        status = check_backchannel_attrs(&cr_ses->back_channel);
        if (status)
-               return status;
+               goto out_release_drc_mem;
        status = nfserr_jukebox;
-       new = alloc_session(&cr_ses->fore_channel);
+       new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
        if (!new)
                goto out_release_drc_mem;
        conn = alloc_conn_from_crses(rqstp, cr_ses);
@@ -3034,18 +3043,18 @@ static int nfs4_setlease(struct nfs4_delegation *dp)
        if (!fl)
                return -ENOMEM;
        fl->fl_file = find_readable_file(fp);
-       list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
        status = vfs_setlease(fl->fl_file, fl->fl_type, &fl);
-       if (status) {
-               list_del_init(&dp->dl_perclnt);
-               locks_free_lock(fl);
-               return status;
-       }
+       if (status)
+               goto out_free;
+       list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
        fp->fi_lease = fl;
        fp->fi_deleg_file = get_file(fl->fl_file);
        atomic_set(&fp->fi_delegees, 1);
        list_add(&dp->dl_perfile, &fp->fi_delegations);
        return 0;
+out_free:
+       locks_free_lock(fl);
+       return status;
 }
 
 static int nfs4_set_delegation(struct nfs4_delegation *dp, struct nfs4_file *fp)
@@ -3125,6 +3134,7 @@ nfs4_open_delegation(struct net *net, struct svc_fh *fh,
                                goto out_no_deleg;
                        break;
                case NFS4_OPEN_CLAIM_NULL:
+               case NFS4_OPEN_CLAIM_FH:
                        /*
                         * Let's not give out any delegations till everyone's
                         * had the chance to reclaim theirs....
index ee7237f..63f2395 100644 (file)
@@ -103,11 +103,6 @@ xdr_error:                                 \
        (x) = (u64)ntohl(*p++) << 32;           \
        (x) |= ntohl(*p++);                     \
 } while (0)
-#define READTIME(x)       do {                 \
-       p++;                                    \
-       (x) = ntohl(*p++);                      \
-       p++;                                    \
-} while (0)
 #define READMEM(x,nbytes) do {                 \
        x = (char *)p;                          \
        p += XDR_QUADLEN(nbytes);               \
@@ -190,6 +185,15 @@ static int zero_clientid(clientid_t *clid)
        return (clid->cl_boot == 0) && (clid->cl_id == 0);
 }
 
+/**
+ * defer_free - mark an allocation as deferred freed
+ * @argp: NFSv4 compound argument structure to be freed with
+ * @release: release callback to free @p, typically kfree()
+ * @p: pointer to be freed
+ *
+ * Marks @p to be freed when processing the compound operation
+ * described in @argp finishes.
+ */
 static int
 defer_free(struct nfsd4_compoundargs *argp,
                void (*release)(const void *), void *p)
@@ -206,6 +210,16 @@ defer_free(struct nfsd4_compoundargs *argp,
        return 0;
 }
 
+/**
+ * savemem - duplicate a chunk of memory for later processing
+ * @argp: NFSv4 compound argument structure to be freed with
+ * @p: pointer to be duplicated
+ * @nbytes: length to be duplicated
+ *
+ * Returns a pointer to a copy of @nbytes bytes of memory at @p
+ * that are preserved until processing of the NFSv4 compound
+ * operation described by @argp finishes.
+ */
 static char *savemem(struct nfsd4_compoundargs *argp, __be32 *p, int nbytes)
 {
        if (p == argp->tmp) {
@@ -257,7 +271,6 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
        int expected_len, len = 0;
        u32 dummy32;
        char *buf;
-       int host_err;
 
        DECODE_HEAD;
        iattr->ia_valid = 0;
@@ -284,10 +297,9 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
                        return nfserr_resource;
 
                *acl = nfs4_acl_new(nace);
-               if (*acl == NULL) {
-                       host_err = -ENOMEM;
-                       goto out_nfserr;
-               }
+               if (*acl == NULL)
+                       return nfserr_jukebox;
+
                defer_free(argp, kfree, *acl);
 
                (*acl)->naces = nace;
@@ -425,10 +437,6 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
                goto xdr_error;
 
        DECODE_TAIL;
-
-out_nfserr:
-       status = nfserrno(host_err);
-       goto out;
 }
 
 static __be32
@@ -1957,56 +1965,16 @@ static u32 nfs4_file_type(umode_t mode)
        };
 }
 
-static __be32
-nfsd4_encode_name(struct svc_rqst *rqstp, int whotype, kuid_t uid, kgid_t gid,
-                       __be32 **p, int *buflen)
-{
-       int status;
-
-       if (*buflen < (XDR_QUADLEN(IDMAP_NAMESZ) << 2) + 4)
-               return nfserr_resource;
-       if (whotype != NFS4_ACL_WHO_NAMED)
-               status = nfs4_acl_write_who(whotype, (u8 *)(*p + 1));
-       else if (gid_valid(gid))
-               status = nfsd_map_gid_to_name(rqstp, gid, (u8 *)(*p + 1));
-       else
-               status = nfsd_map_uid_to_name(rqstp, uid, (u8 *)(*p + 1));
-       if (status < 0)
-               return nfserrno(status);
-       *p = xdr_encode_opaque(*p, NULL, status);
-       *buflen -= (XDR_QUADLEN(status) << 2) + 4;
-       BUG_ON(*buflen < 0);
-       return 0;
-}
-
-static inline __be32
-nfsd4_encode_user(struct svc_rqst *rqstp, kuid_t user, __be32 **p, int *buflen)
-{
-       return nfsd4_encode_name(rqstp, NFS4_ACL_WHO_NAMED, user, INVALID_GID,
-                                p, buflen);
-}
-
-static inline __be32
-nfsd4_encode_group(struct svc_rqst *rqstp, kgid_t group, __be32 **p, int *buflen)
-{
-       return nfsd4_encode_name(rqstp, NFS4_ACL_WHO_NAMED, INVALID_UID, group,
-                                p, buflen);
-}
-
 static inline __be32
 nfsd4_encode_aclname(struct svc_rqst *rqstp, struct nfs4_ace *ace,
                __be32 **p, int *buflen)
 {
-       kuid_t uid = INVALID_UID;
-       kgid_t gid = INVALID_GID;
-
-       if (ace->whotype == NFS4_ACL_WHO_NAMED) {
-               if (ace->flag & NFS4_ACE_IDENTIFIER_GROUP)
-                       gid = ace->who_gid;
-               else
-                       uid = ace->who_uid;
-       }
-       return nfsd4_encode_name(rqstp, ace->whotype, uid, gid, p, buflen);
+       if (ace->whotype != NFS4_ACL_WHO_NAMED)
+               return nfs4_acl_write_who(ace->whotype, p, buflen);
+       else if (ace->flag & NFS4_ACE_IDENTIFIER_GROUP)
+               return nfsd4_encode_group(rqstp, ace->who_gid, p, buflen);
+       else
+               return nfsd4_encode_user(rqstp, ace->who_uid, p, buflen);
 }
 
 #define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \
@@ -2090,7 +2058,7 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
        u32 bmval1 = bmval[1];
        u32 bmval2 = bmval[2];
        struct kstat stat;
-       struct svc_fh tempfh;
+       struct svc_fh *tempfh = NULL;
        struct kstatfs statfs;
        int buflen = count << 2;
        __be32 *attrlenp;
@@ -2137,11 +2105,15 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
                        goto out_nfserr;
        }
        if ((bmval0 & (FATTR4_WORD0_FILEHANDLE | FATTR4_WORD0_FSID)) && !fhp) {
-               fh_init(&tempfh, NFS4_FHSIZE);
-               status = fh_compose(&tempfh, exp, dentry, NULL);
+               tempfh = kmalloc(sizeof(struct svc_fh), GFP_KERNEL);
+               status = nfserr_jukebox;
+               if (!tempfh)
+                       goto out;
+               fh_init(tempfh, NFS4_FHSIZE);
+               status = fh_compose(tempfh, exp, dentry, NULL);
                if (status)
                        goto out;
-               fhp = &tempfh;
+               fhp = tempfh;
        }
        if (bmval0 & (FATTR4_WORD0_ACL | FATTR4_WORD0_ACLSUPPORT
                        | FATTR4_WORD0_SUPPORTED_ATTRS)) {
@@ -2222,8 +2194,10 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
                if ((buflen -= 4) < 0)
                        goto out_resource;
                dummy = nfs4_file_type(stat.mode);
-               if (dummy == NF4BAD)
-                       goto out_serverfault;
+               if (dummy == NF4BAD) {
+                       status = nfserr_serverfault;
+                       goto out;
+               }
                WRITE32(dummy);
        }
        if (bmval0 & FATTR4_WORD0_FH_EXPIRE_TYPE) {
@@ -2317,8 +2291,6 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
                        WRITE32(ace->flag);
                        WRITE32(ace->access_mask & NFS4_ACE_MASK_ALL);
                        status = nfsd4_encode_aclname(rqstp, ace, &p, &buflen);
-                       if (status == nfserr_resource)
-                               goto out_resource;
                        if (status)
                                goto out;
                }
@@ -2379,8 +2351,6 @@ out_acl:
        }
        if (bmval0 & FATTR4_WORD0_FS_LOCATIONS) {
                status = nfsd4_encode_fs_locations(rqstp, exp, &p, &buflen);
-               if (status == nfserr_resource)
-                       goto out_resource;
                if (status)
                        goto out;
        }
@@ -2431,15 +2401,11 @@ out_acl:
        }
        if (bmval1 & FATTR4_WORD1_OWNER) {
                status = nfsd4_encode_user(rqstp, stat.uid, &p, &buflen);
-               if (status == nfserr_resource)
-                       goto out_resource;
                if (status)
                        goto out;
        }
        if (bmval1 & FATTR4_WORD1_OWNER_GROUP) {
                status = nfsd4_encode_group(rqstp, stat.gid, &p, &buflen);
-               if (status == nfserr_resource)
-                       goto out_resource;
                if (status)
                        goto out;
        }
@@ -2533,8 +2499,8 @@ out:
                security_release_secctx(context, contextlen);
 #endif /* CONFIG_NFSD_V4_SECURITY_LABEL */
        kfree(acl);
-       if (fhp == &tempfh)
-               fh_put(&tempfh);
+       if (tempfh)
+               fh_put(tempfh);
        return status;
 out_nfserr:
        status = nfserrno(err);
@@ -2542,9 +2508,6 @@ out_nfserr:
 out_resource:
        status = nfserr_resource;
        goto out;
-out_serverfault:
-       status = nfserr_serverfault;
-       goto out;
 }
 
 static inline int attributes_need_mount(u32 *bmval)
@@ -2621,17 +2584,14 @@ out_put:
 static __be32 *
 nfsd4_encode_rdattr_error(__be32 *p, int buflen, __be32 nfserr)
 {
-       __be32 *attrlenp;
-
        if (buflen < 6)
                return NULL;
        *p++ = htonl(2);
        *p++ = htonl(FATTR4_WORD0_RDATTR_ERROR); /* bmval0 */
        *p++ = htonl(0);                         /* bmval1 */
 
-       attrlenp = p++;
+       *p++ = htonl(4);     /* attribute length */
        *p++ = nfserr;       /* no htonl */
-       *attrlenp = htonl((char *)p - (char *)attrlenp - 4);
        return p;
 }
 
@@ -3244,7 +3204,7 @@ nfsd4_do_encode_secinfo(struct nfsd4_compoundres *resp,
 
                if (rpcauth_get_gssinfo(pf, &info) == 0) {
                        supported++;
-                       RESERVE_SPACE(4 + 4 + info.oid.len + 4 + 4);
+                       RESERVE_SPACE(4 + 4 + XDR_LEN(info.oid.len) + 4 + 4);
                        WRITE32(RPC_AUTH_GSS);
                        WRITE32(info.oid.len);
                        WRITEMEM(info.oid.data, info.oid.len);
@@ -3379,35 +3339,43 @@ nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
                8 /* eir_clientid */ +
                4 /* eir_sequenceid */ +
                4 /* eir_flags */ +
-               4 /* spr_how */ +
-               8 /* spo_must_enforce, spo_must_allow */ +
-               8 /* so_minor_id */ +
-               4 /* so_major_id.len */ +
-               (XDR_QUADLEN(major_id_sz) * 4) +
-               4 /* eir_server_scope.len */ +
-               (XDR_QUADLEN(server_scope_sz) * 4) +
-               4 /* eir_server_impl_id.count (0) */);
+               4 /* spr_how */);
 
        WRITEMEM(&exid->clientid, 8);
        WRITE32(exid->seqid);
        WRITE32(exid->flags);
 
        WRITE32(exid->spa_how);
+       ADJUST_ARGS();
+
        switch (exid->spa_how) {
        case SP4_NONE:
                break;
        case SP4_MACH_CRED:
+               /* spo_must_enforce, spo_must_allow */
+               RESERVE_SPACE(16);
+
                /* spo_must_enforce bitmap: */
                WRITE32(2);
                WRITE32(nfs4_minimal_spo_must_enforce[0]);
                WRITE32(nfs4_minimal_spo_must_enforce[1]);
                /* empty spo_must_allow bitmap: */
                WRITE32(0);
+
+               ADJUST_ARGS();
                break;
        default:
                WARN_ON_ONCE(1);
        }
 
+       RESERVE_SPACE(
+               8 /* so_minor_id */ +
+               4 /* so_major_id.len */ +
+               (XDR_QUADLEN(major_id_sz) * 4) +
+               4 /* eir_server_scope.len */ +
+               (XDR_QUADLEN(server_scope_sz) * 4) +
+               4 /* eir_server_impl_id.count (0) */);
+
        /* The server_owner struct */
        WRITE64(minor_id);      /* Minor id */
        /* major id */
@@ -3473,28 +3441,6 @@ nfsd4_encode_create_session(struct nfsd4_compoundres *resp, __be32 nfserr,
        return 0;
 }
 
-static __be32
-nfsd4_encode_destroy_session(struct nfsd4_compoundres *resp, __be32 nfserr,
-                            struct nfsd4_destroy_session *destroy_session)
-{
-       return nfserr;
-}
-
-static __be32
-nfsd4_encode_free_stateid(struct nfsd4_compoundres *resp, __be32 nfserr,
-                         struct nfsd4_free_stateid *free_stateid)
-{
-       __be32 *p;
-
-       if (nfserr)
-               return nfserr;
-
-       RESERVE_SPACE(4);
-       *p++ = nfserr;
-       ADJUST_ARGS();
-       return nfserr;
-}
-
 static __be32
 nfsd4_encode_sequence(struct nfsd4_compoundres *resp, __be32 nfserr,
                      struct nfsd4_sequence *seq)
@@ -3593,8 +3539,8 @@ static nfsd4_enc nfsd4_enc_ops[] = {
        [OP_BIND_CONN_TO_SESSION] = (nfsd4_enc)nfsd4_encode_bind_conn_to_session,
        [OP_EXCHANGE_ID]        = (nfsd4_enc)nfsd4_encode_exchange_id,
        [OP_CREATE_SESSION]     = (nfsd4_enc)nfsd4_encode_create_session,
-       [OP_DESTROY_SESSION]    = (nfsd4_enc)nfsd4_encode_destroy_session,
-       [OP_FREE_STATEID]       = (nfsd4_enc)nfsd4_encode_free_stateid,
+       [OP_DESTROY_SESSION]    = (nfsd4_enc)nfsd4_encode_noop,
+       [OP_FREE_STATEID]       = (nfsd4_enc)nfsd4_encode_noop,
        [OP_GET_DIR_DELEGATION] = (nfsd4_enc)nfsd4_encode_noop,
        [OP_GETDEVICEINFO]      = (nfsd4_enc)nfsd4_encode_noop,
        [OP_GETDEVICELIST]      = (nfsd4_enc)nfsd4_encode_noop,
index b6af150..f8f060f 100644 (file)
@@ -131,13 +131,6 @@ nfsd_reply_cache_alloc(void)
        return rp;
 }
 
-static void
-nfsd_reply_cache_unhash(struct svc_cacherep *rp)
-{
-       hlist_del_init(&rp->c_hash);
-       list_del_init(&rp->c_lru);
-}
-
 static void
 nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
 {
@@ -416,22 +409,8 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
 
        /*
         * Since the common case is a cache miss followed by an insert,
-        * preallocate an entry. First, try to reuse the first entry on the LRU
-        * if it works, then go ahead and prune the LRU list.
+        * preallocate an entry.
         */
-       spin_lock(&cache_lock);
-       if (!list_empty(&lru_head)) {
-               rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru);
-               if (nfsd_cache_entry_expired(rp) ||
-                   num_drc_entries >= max_drc_entries) {
-                       nfsd_reply_cache_unhash(rp);
-                       prune_cache_entries();
-                       goto search_cache;
-               }
-       }
-
-       /* No expired ones available, allocate a new one. */
-       spin_unlock(&cache_lock);
        rp = nfsd_reply_cache_alloc();
        spin_lock(&cache_lock);
        if (likely(rp)) {
@@ -439,7 +418,9 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
                drc_mem_usage += sizeof(*rp);
        }
 
-search_cache:
+       /* go ahead and prune the cache */
+       prune_cache_entries();
+
        found = nfsd_cache_search(rqstp, csum);
        if (found) {
                if (likely(rp))
@@ -453,15 +434,6 @@ search_cache:
                goto out;
        }
 
-       /*
-        * We're keeping the one we just allocated. Are we now over the
-        * limit? Prune one off the tip of the LRU in trade for the one we
-        * just allocated if so.
-        */
-       if (num_drc_entries >= max_drc_entries)
-               nfsd_reply_cache_free_locked(list_first_entry(&lru_head,
-                                               struct svc_cacherep, c_lru));
-
        nfsdstats.rcmisses++;
        rqstp->rq_cacherep = rp;
        rp->c_state = RC_INPROG;
index 760c85a..9a4a5f9 100644 (file)
@@ -241,6 +241,15 @@ static void nfsd_shutdown_generic(void)
        nfsd_racache_shutdown();
 }
 
+static bool nfsd_needs_lockd(void)
+{
+#if defined(CONFIG_NFSD_V3)
+       return (nfsd_versions[2] != NULL) || (nfsd_versions[3] != NULL);
+#else
+       return (nfsd_versions[2] != NULL);
+#endif
+}
+
 static int nfsd_startup_net(int nrservs, struct net *net)
 {
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
@@ -255,9 +264,14 @@ static int nfsd_startup_net(int nrservs, struct net *net)
        ret = nfsd_init_socks(net);
        if (ret)
                goto out_socks;
-       ret = lockd_up(net);
-       if (ret)
-               goto out_socks;
+
+       if (nfsd_needs_lockd() && !nn->lockd_up) {
+               ret = lockd_up(net);
+               if (ret)
+                       goto out_socks;
+               nn->lockd_up = 1;
+       }
+
        ret = nfs4_state_start_net(net);
        if (ret)
                goto out_lockd;
@@ -266,7 +280,10 @@ static int nfsd_startup_net(int nrservs, struct net *net)
        return 0;
 
 out_lockd:
-       lockd_down(net);
+       if (nn->lockd_up) {
+               lockd_down(net);
+               nn->lockd_up = 0;
+       }
 out_socks:
        nfsd_shutdown_generic();
        return ret;
@@ -277,7 +294,10 @@ static void nfsd_shutdown_net(struct net *net)
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
        nfs4_state_shutdown_net(net);
-       lockd_down(net);
+       if (nn->lockd_up) {
+               lockd_down(net);
+               nn->lockd_up = 0;
+       }
        nn->nfsd_net_up = false;
        nfsd_shutdown_generic();
 }
index 9c769a4..b17d932 100644 (file)
@@ -152,7 +152,7 @@ encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp,
        type = (stat->mode & S_IFMT);
 
        *p++ = htonl(nfs_ftypes[type >> 12]);
-       *p++ = htonl((u32) stat->mode);
+       *p++ = htonl((u32) (stat->mode & S_IALLUGO));
        *p++ = htonl((u32) stat->nlink);
        *p++ = htonl((u32) from_kuid(&init_user_ns, stat->uid));
        *p++ = htonl((u32) from_kgid(&init_user_ns, stat->gid));
index 1426eb6..017d3cb 100644 (file)
@@ -207,7 +207,12 @@ nfsd_lookup_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp,
                                goto out_nfserr;
                }
        } else {
-               fh_lock(fhp);
+               /*
+                * In the nfsd4_open() case, this may be held across
+                * subsequent open and delegation acquisition which may
+                * need to take the child's i_mutex:
+                */
+               fh_lock_nested(fhp, I_MUTEX_PARENT);
                dentry = lookup_one_len(name, dparent, len);
                host_err = PTR_ERR(dentry);
                if (IS_ERR(dentry))
@@ -273,13 +278,6 @@ out:
        return err;
 }
 
-static int nfsd_break_lease(struct inode *inode)
-{
-       if (!S_ISREG(inode->i_mode))
-               return 0;
-       return break_lease(inode, O_WRONLY | O_NONBLOCK);
-}
-
 /*
  * Commit metadata changes to stable storage.
  */
@@ -348,8 +346,7 @@ nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap)
 
        /* Revoke setuid/setgid on chown */
        if (!S_ISDIR(inode->i_mode) &&
-           (((iap->ia_valid & ATTR_UID) && !uid_eq(iap->ia_uid, inode->i_uid)) ||
-            ((iap->ia_valid & ATTR_GID) && !gid_eq(iap->ia_gid, inode->i_gid)))) {
+           ((iap->ia_valid & ATTR_UID) || (iap->ia_valid & ATTR_GID))) {
                iap->ia_valid |= ATTR_KILL_PRIV;
                if (iap->ia_valid & ATTR_MODE) {
                        /* we're setting mode too, just clear the s*id bits */
@@ -449,16 +446,10 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
                goto out_put_write_access;
        }
 
-       host_err = nfsd_break_lease(inode);
-       if (host_err)
-               goto out_put_write_access_nfserror;
-
        fh_lock(fhp);
        host_err = notify_change(dentry, iap, NULL);
        fh_unlock(fhp);
 
-out_put_write_access_nfserror:
-       err = nfserrno(host_err);
 out_put_write_access:
        if (size_change)
                put_write_access(inode);
@@ -1609,11 +1600,6 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
        err = nfserr_noent;
        if (!dold->d_inode)
                goto out_dput;
-       host_err = nfsd_break_lease(dold->d_inode);
-       if (host_err) {
-               err = nfserrno(host_err);
-               goto out_dput;
-       }
        host_err = vfs_link(dold, dirp, dnew, NULL);
        if (!host_err) {
                err = nfserrno(commit_metadata(ffhp));
@@ -1707,14 +1693,6 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
        if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry)
                goto out_dput_new;
 
-       host_err = nfsd_break_lease(odentry->d_inode);
-       if (host_err)
-               goto out_dput_new;
-       if (ndentry->d_inode) {
-               host_err = nfsd_break_lease(ndentry->d_inode);
-               if (host_err)
-                       goto out_dput_new;
-       }
        host_err = vfs_rename(fdir, odentry, tdir, ndentry, NULL);
        if (!host_err) {
                host_err = commit_metadata(tfhp);
@@ -1784,16 +1762,12 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
        if (!type)
                type = rdentry->d_inode->i_mode & S_IFMT;
 
-       host_err = nfsd_break_lease(rdentry->d_inode);
-       if (host_err)
-               goto out_put;
        if (type != S_IFDIR)
                host_err = vfs_unlink(dirp, rdentry, NULL);
        else
                host_err = vfs_rmdir(dirp, rdentry);
        if (!host_err)
                host_err = commit_metadata(fhp);
-out_put:
        dput(rdentry);
 
 out_nfserr:
index 1bc1d44..fbe90bd 100644 (file)
@@ -86,8 +86,6 @@ __be32                nfsd_link(struct svc_rqst *, struct svc_fh *,
 __be32         nfsd_rename(struct svc_rqst *,
                                struct svc_fh *, char *, int,
                                struct svc_fh *, char *, int);
-__be32         nfsd_remove(struct svc_rqst *,
-                               struct svc_fh *, char *, int);
 __be32         nfsd_unlink(struct svc_rqst *, struct svc_fh *, int type,
                                char *name, int len);
 __be32         nfsd_readdir(struct svc_rqst *, struct svc_fh *,
index b6d5542..335e04a 100644 (file)
@@ -174,6 +174,9 @@ struct nfsd3_linkres {
 struct nfsd3_readdirres {
        __be32                  status;
        struct svc_fh           fh;
+       /* Just to save kmalloc on every readdirplus entry (svc_fh is a
+        * little large for the stack): */
+       struct svc_fh           scratch;
        int                     count;
        __be32                  verf[2];
 
index b3ed644..d278a0d 100644 (file)
@@ -228,7 +228,7 @@ struct nfsd4_open {
        u32             op_create;          /* request */
        u32             op_createmode;      /* request */
        u32             op_bmval[3];        /* request */
-       struct iattr    iattr;              /* UNCHECKED4, GUARDED4, EXCLUSIVE4_1 */
+       struct iattr    op_iattr;           /* UNCHECKED4, GUARDED4, EXCLUSIVE4_1 */
        nfs4_verifier   op_verf __attribute__((aligned(32)));
                                            /* EXCLUSIVE4 */
        clientid_t      op_clientid;        /* request */
@@ -250,7 +250,6 @@ struct nfsd4_open {
        struct nfs4_acl *op_acl;
        struct xdr_netobj op_label;
 };
-#define op_iattr       iattr
 
 struct nfsd4_open_confirm {
        stateid_t       oc_req_stateid          /* request */;
@@ -374,7 +373,6 @@ struct nfsd4_test_stateid {
 
 struct nfsd4_free_stateid {
        stateid_t       fr_stateid;         /* request */
-       __be32          fr_status;          /* response */
 };
 
 /* also used for NVERIFY */
index 2d8be51..dc3a9ef 100644 (file)
@@ -416,7 +416,8 @@ static struct bio *nilfs_alloc_seg_bio(struct the_nilfs *nilfs, sector_t start,
        }
        if (likely(bio)) {
                bio->bi_bdev = nilfs->ns_bdev;
-               bio->bi_sector = start << (nilfs->ns_blocksize_bits - 9);
+               bio->bi_iter.bi_sector =
+                       start << (nilfs->ns_blocksize_bits - 9);
        }
        return bio;
 }
index 73920ff..bf482df 100644 (file)
@@ -413,7 +413,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
        }
 
        /* Must put everything in 512 byte sectors for the bio... */
-       bio->bi_sector = (reg->hr_start_block + cs) << (bits - 9);
+       bio->bi_iter.bi_sector = (reg->hr_start_block + cs) << (bits - 9);
        bio->bi_bdev = reg->hr_bdev;
        bio->bi_private = wc;
        bio->bi_end_io = o2hb_bio_end_io;
index a267394..db2cfb0 100644 (file)
@@ -407,7 +407,7 @@ xfs_alloc_ioend_bio(
        struct bio              *bio = bio_alloc(GFP_NOIO, nvecs);
 
        ASSERT(bio->bi_private == NULL);
-       bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
+       bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
        bio->bi_bdev = bh->b_bdev;
        return bio;
 }
index 5175711..9c061ef 100644 (file)
@@ -1240,7 +1240,7 @@ next_chunk:
 
        bio = bio_alloc(GFP_NOIO, nr_pages);
        bio->bi_bdev = bp->b_target->bt_bdev;
-       bio->bi_sector = sector;
+       bio->bi_iter.bi_sector = sector;
        bio->bi_end_io = xfs_buf_bio_end_io;
        bio->bi_private = bp;
 
@@ -1262,7 +1262,7 @@ next_chunk:
                total_nr_pages--;
        }
 
-       if (likely(bio->bi_size)) {
+       if (likely(bio->bi_iter.bi_size)) {
                if (xfs_buf_is_vmapped(bp)) {
                        flush_kernel_vmap_range(bp->b_addr,
                                                xfs_buf_vmap_len(bp));
index 060ff69..7065452 100644 (file)
  * various member access, note that bio_data should of course not be used
  * on highmem page vectors
  */
-#define bio_iovec_idx(bio, idx)        (&((bio)->bi_io_vec[(idx)]))
-#define bio_iovec(bio)         bio_iovec_idx((bio), (bio)->bi_idx)
-#define bio_page(bio)          bio_iovec((bio))->bv_page
-#define bio_offset(bio)                bio_iovec((bio))->bv_offset
-#define bio_segments(bio)      ((bio)->bi_vcnt - (bio)->bi_idx)
-#define bio_sectors(bio)       ((bio)->bi_size >> 9)
-#define bio_end_sector(bio)    ((bio)->bi_sector + bio_sectors((bio)))
+#define __bvec_iter_bvec(bvec, iter)   (&(bvec)[(iter).bi_idx])
+
+#define bvec_iter_page(bvec, iter)                             \
+       (__bvec_iter_bvec((bvec), (iter))->bv_page)
+
+#define bvec_iter_len(bvec, iter)                              \
+       min((iter).bi_size,                                     \
+           __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done)
+
+#define bvec_iter_offset(bvec, iter)                           \
+       (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done)
+
+#define bvec_iter_bvec(bvec, iter)                             \
+((struct bio_vec) {                                            \
+       .bv_page        = bvec_iter_page((bvec), (iter)),       \
+       .bv_len         = bvec_iter_len((bvec), (iter)),        \
+       .bv_offset      = bvec_iter_offset((bvec), (iter)),     \
+})
+
+#define bio_iter_iovec(bio, iter)                              \
+       bvec_iter_bvec((bio)->bi_io_vec, (iter))
+
+#define bio_iter_page(bio, iter)                               \
+       bvec_iter_page((bio)->bi_io_vec, (iter))
+#define bio_iter_len(bio, iter)                                        \
+       bvec_iter_len((bio)->bi_io_vec, (iter))
+#define bio_iter_offset(bio, iter)                             \
+       bvec_iter_offset((bio)->bi_io_vec, (iter))
+
+#define bio_page(bio)          bio_iter_page((bio), (bio)->bi_iter)
+#define bio_offset(bio)                bio_iter_offset((bio), (bio)->bi_iter)
+#define bio_iovec(bio)         bio_iter_iovec((bio), (bio)->bi_iter)
+
+#define bio_multiple_segments(bio)                             \
+       ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
+#define bio_sectors(bio)       ((bio)->bi_iter.bi_size >> 9)
+#define bio_end_sector(bio)    ((bio)->bi_iter.bi_sector + bio_sectors((bio)))
+
+/*
+ * Check whether this bio carries any data or not. A NULL bio is allowed.
+ */
+static inline bool bio_has_data(struct bio *bio)
+{
+       if (bio &&
+           bio->bi_iter.bi_size &&
+           !(bio->bi_rw & REQ_DISCARD))
+               return true;
+
+       return false;
+}
+
+static inline bool bio_is_rw(struct bio *bio)
+{
+       if (!bio_has_data(bio))
+               return false;
+
+       if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
+               return false;
+
+       return true;
+}
+
+static inline bool bio_mergeable(struct bio *bio)
+{
+       if (bio->bi_rw & REQ_NOMERGE_FLAGS)
+               return false;
+
+       return true;
+}
 
 static inline unsigned int bio_cur_bytes(struct bio *bio)
 {
-       if (bio->bi_vcnt)
-               return bio_iovec(bio)->bv_len;
+       if (bio_has_data(bio))
+               return bio_iovec(bio).bv_len;
        else /* dataless requests such as discard */
-               return bio->bi_size;
+               return bio->bi_iter.bi_size;
 }
 
 static inline void *bio_data(struct bio *bio)
 {
-       if (bio->bi_vcnt)
+       if (bio_has_data(bio))
                return page_address(bio_page(bio)) + bio_offset(bio);
 
        return NULL;
@@ -97,19 +159,16 @@ static inline void *bio_data(struct bio *bio)
  * permanent PIO fall back, user is probably better off disabling highmem
  * I/O completely on that queue (see ide-dma for example)
  */
-#define __bio_kmap_atomic(bio, idx)                            \
-       (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page) +    \
-               bio_iovec_idx((bio), (idx))->bv_offset)
+#define __bio_kmap_atomic(bio, iter)                           \
+       (kmap_atomic(bio_iter_iovec((bio), (iter)).bv_page) +   \
+               bio_iter_iovec((bio), (iter)).bv_offset)
 
-#define __bio_kunmap_atomic(addr) kunmap_atomic(addr)
+#define __bio_kunmap_atomic(addr)      kunmap_atomic(addr)
 
 /*
  * merge helpers etc
  */
 
-#define __BVEC_END(bio)                bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
-#define __BVEC_START(bio)      bio_iovec_idx((bio), (bio)->bi_idx)
-
 /* Default implementation of BIOVEC_PHYS_MERGEABLE */
 #define __BIOVEC_PHYS_MERGEABLE(vec1, vec2)    \
        ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
@@ -126,33 +185,76 @@ static inline void *bio_data(struct bio *bio)
        (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
 #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
        __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
-#define BIO_SEG_BOUNDARY(q, b1, b2) \
-       BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
 
 #define bio_io_error(bio) bio_endio((bio), -EIO)
 
-/*
- * drivers should not use the __ version unless they _really_ know what
- * they're doing
- */
-#define __bio_for_each_segment(bvl, bio, i, start_idx)                 \
-       for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx);  \
-            i < (bio)->bi_vcnt;                                        \
-            bvl++, i++)
-
 /*
  * drivers should _never_ use the all version - the bio may have been split
  * before it got to the driver and the driver won't own all of it
  */
 #define bio_for_each_segment_all(bvl, bio, i)                          \
-       for (i = 0;                                                     \
-            bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt;       \
-            i++)
+       for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++)
+
+static inline void bvec_iter_advance(struct bio_vec *bv, struct bvec_iter *iter,
+                                    unsigned bytes)
+{
+       WARN_ONCE(bytes > iter->bi_size,
+                 "Attempted to advance past end of bvec iter\n");
+
+       while (bytes) {
+               unsigned len = min(bytes, bvec_iter_len(bv, *iter));
+
+               bytes -= len;
+               iter->bi_size -= len;
+               iter->bi_bvec_done += len;
+
+               if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) {
+                       iter->bi_bvec_done = 0;
+                       iter->bi_idx++;
+               }
+       }
+}
+
+#define for_each_bvec(bvl, bio_vec, iter, start)                       \
+       for ((iter) = start;                                            \
+            (bvl) = bvec_iter_bvec((bio_vec), (iter)),                 \
+               (iter).bi_size;                                         \
+            bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
+
+
+static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
+                                   unsigned bytes)
+{
+       iter->bi_sector += bytes >> 9;
+
+       if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
+               iter->bi_size -= bytes;
+       else
+               bvec_iter_advance(bio->bi_io_vec, iter, bytes);
+}
 
-#define bio_for_each_segment(bvl, bio, i)                              \
-       for (i = (bio)->bi_idx;                                         \
-            bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt;       \
-            i++)
+#define __bio_for_each_segment(bvl, bio, iter, start)                  \
+       for (iter = (start);                                            \
+            (iter).bi_size &&                                          \
+               ((bvl = bio_iter_iovec((bio), (iter))), 1);             \
+            bio_advance_iter((bio), &(iter), (bvl).bv_len))
+
+#define bio_for_each_segment(bvl, bio, iter)                           \
+       __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
+
+#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
+
+static inline unsigned bio_segments(struct bio *bio)
+{
+       unsigned segs = 0;
+       struct bio_vec bv;
+       struct bvec_iter iter;
+
+       bio_for_each_segment(bv, bio, iter)
+               segs++;
+
+       return segs;
+}
 
 /*
  * get a reference to a bio, so it won't disappear. the intended use is
@@ -177,16 +279,15 @@ static inline void *bio_data(struct bio *bio)
 struct bio_integrity_payload {
        struct bio              *bip_bio;       /* parent bio */
 
-       sector_t                bip_sector;     /* virtual start sector */
+       struct bvec_iter        bip_iter;
 
+       /* kill - should just use bip_vec */
        void                    *bip_buf;       /* generated integrity data */
-       bio_end_io_t            *bip_end_io;    /* saved I/O completion fn */
 
-       unsigned int            bip_size;
+       bio_end_io_t            *bip_end_io;    /* saved I/O completion fn */
 
        unsigned short          bip_slab;       /* slab the bip came from */
        unsigned short          bip_vcnt;       /* # of integrity bio_vecs */
-       unsigned short          bip_idx;        /* current bip_vec index */
        unsigned                bip_owns_buf:1; /* should free bip_buf */
 
        struct work_struct      bip_work;       /* I/O completion */
@@ -196,29 +297,28 @@ struct bio_integrity_payload {
 };
 #endif /* CONFIG_BLK_DEV_INTEGRITY */
 
-/*
- * A bio_pair is used when we need to split a bio.
- * This can only happen for a bio that refers to just one
- * page of data, and in the unusual situation when the
- * page crosses a chunk/device boundary
+extern void bio_trim(struct bio *bio, int offset, int size);
+extern struct bio *bio_split(struct bio *bio, int sectors,
+                            gfp_t gfp, struct bio_set *bs);
+
+/**
+ * bio_next_split - get next @sectors from a bio, splitting if necessary
+ * @bio:       bio to split
+ * @sectors:   number of sectors to split from the front of @bio
+ * @gfp:       gfp mask
+ * @bs:                bio set to allocate from
  *
- * The address of the master bio is stored in bio1.bi_private
- * The address of the pool the pair was allocated from is stored
- *   in bio2.bi_private
+ * Returns a bio representing the next @sectors of @bio - if the bio is smaller
+ * than @sectors, returns the original bio unchanged.
  */
-struct bio_pair {
-       struct bio                      bio1, bio2;
-       struct bio_vec                  bv1, bv2;
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
-       struct bio_integrity_payload    bip1, bip2;
-       struct bio_vec                  iv1, iv2;
-#endif
-       atomic_t                        cnt;
-       int                             error;
-};
-extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
-extern void bio_pair_release(struct bio_pair *dbio);
-extern void bio_trim(struct bio *bio, int offset, int size);
+static inline struct bio *bio_next_split(struct bio *bio, int sectors,
+                                        gfp_t gfp, struct bio_set *bs)
+{
+       if (sectors >= bio_sectors(bio))
+               return bio;
+
+       return bio_split(bio, sectors, gfp, bs);
+}
 
 extern struct bio_set *bioset_create(unsigned int, unsigned int);
 extern void bioset_free(struct bio_set *);
@@ -227,7 +327,8 @@ extern mempool_t *biovec_create_pool(struct bio_set *bs, int pool_entries);
 extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
 extern void bio_put(struct bio *);
 
-extern void __bio_clone(struct bio *, struct bio *);
+extern void __bio_clone_fast(struct bio *, struct bio *);
+extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
 extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
 
 extern struct bio_set *fs_bio_set;
@@ -254,6 +355,7 @@ static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
 }
 
 extern void bio_endio(struct bio *, int);
+extern void bio_endio_nodec(struct bio *, int);
 struct request_queue;
 extern int bio_phys_segments(struct request_queue *, struct bio *);
 
@@ -262,12 +364,12 @@ extern void bio_advance(struct bio *, unsigned);
 
 extern void bio_init(struct bio *);
 extern void bio_reset(struct bio *);
+void bio_chain(struct bio *, struct bio *);
 
 extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
 extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
                           unsigned int, unsigned int);
 extern int bio_get_nr_vecs(struct block_device *);
-extern sector_t bio_sector_offset(struct bio *, unsigned short, unsigned int);
 extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
                                unsigned long, unsigned int, int, gfp_t);
 struct sg_iovec;
@@ -357,47 +459,17 @@ static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
 }
 #endif
 
-static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
+static inline char *__bio_kmap_irq(struct bio *bio, struct bvec_iter iter,
                                   unsigned long *flags)
 {
-       return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags);
+       return bvec_kmap_irq(&bio_iter_iovec(bio, iter), flags);
 }
 #define __bio_kunmap_irq(buf, flags)   bvec_kunmap_irq(buf, flags)
 
 #define bio_kmap_irq(bio, flags) \
-       __bio_kmap_irq((bio), (bio)->bi_idx, (flags))
+       __bio_kmap_irq((bio), (bio)->bi_iter, (flags))
 #define bio_kunmap_irq(buf,flags)      __bio_kunmap_irq(buf, flags)
 
-/*
- * Check whether this bio carries any data or not. A NULL bio is allowed.
- */
-static inline bool bio_has_data(struct bio *bio)
-{
-       if (bio && bio->bi_vcnt)
-               return true;
-
-       return false;
-}
-
-static inline bool bio_is_rw(struct bio *bio)
-{
-       if (!bio_has_data(bio))
-               return false;
-
-       if (bio->bi_rw & REQ_WRITE_SAME)
-               return false;
-
-       return true;
-}
-
-static inline bool bio_mergeable(struct bio *bio)
-{
-       if (bio->bi_rw & REQ_NOMERGE_FLAGS)
-               return false;
-
-       return true;
-}
-
 /*
  * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
  *
@@ -559,16 +631,12 @@ struct biovec_slab {
 
 #if defined(CONFIG_BLK_DEV_INTEGRITY)
 
-#define bip_vec_idx(bip, idx)  (&(bip->bip_vec[(idx)]))
-#define bip_vec(bip)           bip_vec_idx(bip, 0)
 
-#define __bip_for_each_vec(bvl, bip, i, start_idx)                     \
-       for (bvl = bip_vec_idx((bip), (start_idx)), i = (start_idx);    \
-            i < (bip)->bip_vcnt;                                       \
-            bvl++, i++)
 
-#define bip_for_each_vec(bvl, bip, i)                                  \
-       __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx)
+#define bip_vec_idx(bip, idx)  (&(bip->bip_vec[(idx)]))
+
+#define bip_for_each_vec(bvl, bip, iter)                               \
+       for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
 
 #define bio_for_each_integrity_vec(_bvl, _bio, _iter)                  \
        for_each_bio(_bio)                                              \
@@ -586,7 +654,6 @@ extern int bio_integrity_prep(struct bio *);
 extern void bio_integrity_endio(struct bio *, int);
 extern void bio_integrity_advance(struct bio *, unsigned int);
 extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
-extern void bio_integrity_split(struct bio *, struct bio_pair *, int);
 extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
 extern int bioset_integrity_create(struct bio_set *, int);
 extern void bioset_integrity_free(struct bio_set *);
@@ -630,12 +697,6 @@ static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
        return 0;
 }
 
-static inline void bio_integrity_split(struct bio *bio, struct bio_pair *bp,
-                                      int sectors)
-{
-       return;
-}
-
 static inline void bio_integrity_advance(struct bio *bio,
                                         unsigned int bytes_done)
 {
index ab0e9b2..161b231 100644 (file)
@@ -113,7 +113,6 @@ enum {
 };
 
 struct request_queue *blk_mq_init_queue(struct blk_mq_reg *, void *);
-void blk_mq_free_queue(struct request_queue *);
 int blk_mq_register_disk(struct gendisk *);
 void blk_mq_unregister_disk(struct gendisk *);
 void blk_mq_init_commands(struct request_queue *, void (*init)(void *data, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data);
@@ -159,16 +158,16 @@ static inline struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx,
 }
 
 #define queue_for_each_hw_ctx(q, hctx, i)                              \
-       for ((i) = 0, hctx = (q)->queue_hw_ctx[0];                      \
-            (i) < (q)->nr_hw_queues; (i)++, hctx = (q)->queue_hw_ctx[i])
+       for ((i) = 0; (i) < (q)->nr_hw_queues &&                        \
+            ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
 
 #define queue_for_each_ctx(q, ctx, i)                                  \
-       for ((i) = 0, ctx = per_cpu_ptr((q)->queue_ctx, 0);             \
-            (i) < (q)->nr_queues; (i)++, ctx = per_cpu_ptr(q->queue_ctx, (i)))
+       for ((i) = 0; (i) < (q)->nr_queues &&                           \
+            ({ ctx = per_cpu_ptr((q)->queue_ctx, (i)); 1; }); (i)++)
 
 #define hctx_for_each_ctx(hctx, ctx, i)                                        \
-       for ((i) = 0, ctx = (hctx)->ctxs[0];                            \
-            (i) < (hctx)->nr_ctx; (i)++, ctx = (hctx)->ctxs[(i)])
+       for ((i) = 0; (i) < (hctx)->nr_ctx &&                           \
+            ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
 
 #define blk_ctx_sum(q, sum)                                            \
 ({                                                                     \
index 238ef0e..bbc3a6c 100644 (file)
@@ -28,13 +28,22 @@ struct bio_vec {
        unsigned int    bv_offset;
 };
 
+struct bvec_iter {
+       sector_t                bi_sector;      /* device address in 512 byte
+                                                  sectors */
+       unsigned int            bi_size;        /* residual I/O count */
+
+       unsigned int            bi_idx;         /* current index into bvl_vec */
+
+       unsigned int            bi_bvec_done;   /* number of bytes completed in
+                                                  current bvec */
+};
+
 /*
  * main unit of I/O for the block layer and lower layers (ie drivers and
  * stacking drivers)
  */
 struct bio {
-       sector_t                bi_sector;      /* device address in 512 byte
-                                                  sectors */
        struct bio              *bi_next;       /* request queue link */
        struct block_device     *bi_bdev;
        unsigned long           bi_flags;       /* status, command, etc */
@@ -42,16 +51,13 @@ struct bio {
                                                 * top bits priority
                                                 */
 
-       unsigned short          bi_vcnt;        /* how many bio_vec's */
-       unsigned short          bi_idx;         /* current index into bvl_vec */
+       struct bvec_iter        bi_iter;
 
        /* Number of segments in this BIO after
         * physical address coalescing is performed.
         */
        unsigned int            bi_phys_segments;
 
-       unsigned int            bi_size;        /* residual I/O count */
-
        /*
         * To keep track of the max segment size, we account for the
         * sizes of the first and last mergeable segments in this bio.
@@ -59,6 +65,8 @@ struct bio {
        unsigned int            bi_seg_front_size;
        unsigned int            bi_seg_back_size;
 
+       atomic_t                bi_remaining;
+
        bio_end_io_t            *bi_end_io;
 
        void                    *bi_private;
@@ -74,11 +82,13 @@ struct bio {
        struct bio_integrity_payload *bi_integrity;  /* data integrity */
 #endif
 
+       unsigned short          bi_vcnt;        /* how many bio_vec's */
+
        /*
         * Everything starting with bi_max_vecs will be preserved by bio_reset()
         */
 
-       unsigned int            bi_max_vecs;    /* max bvl_vecs we can hold */
+       unsigned short          bi_max_vecs;    /* max bvl_vecs we can hold */
 
        atomic_t                bi_cnt;         /* pin count */
 
index 1b135d4..0375654 100644 (file)
@@ -291,6 +291,7 @@ struct queue_limits {
        unsigned char           discard_misaligned;
        unsigned char           cluster;
        unsigned char           discard_zeroes_data;
+       unsigned char           raid_partial_stripes_expensive;
 };
 
 struct request_queue {
@@ -735,7 +736,7 @@ struct rq_map_data {
 };
 
 struct req_iterator {
-       int i;
+       struct bvec_iter iter;
        struct bio *bio;
 };
 
@@ -748,10 +749,11 @@ struct req_iterator {
 
 #define rq_for_each_segment(bvl, _rq, _iter)                   \
        __rq_for_each_bio(_iter.bio, _rq)                       \
-               bio_for_each_segment(bvl, _iter.bio, _iter.i)
+               bio_for_each_segment(bvl, _iter.bio, _iter.iter)
 
-#define rq_iter_last(rq, _iter)                                        \
-               (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1)
+#define rq_iter_last(bvec, _iter)                              \
+               (_iter.bio->bi_next == NULL &&                  \
+                bio_iter_last(bvec, _iter.iter))
 
 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
 # error        "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
index 20ee8b6..d21f2db 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef __FS_CEPH_MESSENGER_H
 #define __FS_CEPH_MESSENGER_H
 
+#include <linux/blk_types.h>
 #include <linux/kref.h>
 #include <linux/mutex.h>
 #include <linux/net.h>
@@ -119,8 +120,7 @@ struct ceph_msg_data_cursor {
 #ifdef CONFIG_BLOCK
                struct {                                /* bio */
                        struct bio      *bio;           /* bio from list */
-                       unsigned int    vector_index;   /* vector from bio */
-                       unsigned int    vector_offset;  /* bytes from vector */
+                       struct bvec_iter bvec_iter;
                };
 #endif /* CONFIG_BLOCK */
                struct {                                /* pages */
index a0f9280..2e6dce6 100644 (file)
@@ -37,9 +37,9 @@ int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline);
 struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
                                         const char *bdev);
 
-void cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
-                      int slot,
-                      int (*add_part)(int, struct cmdline_subpart *, void *),
-                      void *param);
+int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
+                     int slot,
+                     int (*add_part)(int, struct cmdline_subpart *, void *),
+                     void *param);
 
 #endif /* CMDLINEPARSEH */
index f4b0aa3..a68cbe5 100644 (file)
@@ -29,7 +29,7 @@ typedef void (*io_notify_fn)(unsigned long error, void *context);
 
 enum dm_io_mem_type {
        DM_IO_PAGE_LIST,/* Page list */
-       DM_IO_BVEC,     /* Bio vector */
+       DM_IO_BIO,      /* Bio vector */
        DM_IO_VMA,      /* Virtual memory area */
        DM_IO_KMEM,     /* Kernel memory */
 };
@@ -41,7 +41,7 @@ struct dm_io_memory {
 
        union {
                struct page_list *pl;
-               struct bio_vec *bvec;
+               struct bio *bio;
                void *vma;
                void *addr;
        } ptr;
index 6eecfc2..04e7632 100644 (file)
@@ -368,7 +368,7 @@ struct svc_program {
        struct svc_program *    pg_next;        /* other programs (same xprt) */
        u32                     pg_prog;        /* program number */
        unsigned int            pg_lovers;      /* lowest version */
-       unsigned int            pg_hivers;      /* lowest version */
+       unsigned int            pg_hivers;      /* highest version */
        unsigned int            pg_nvers;       /* number of versions */
        struct svc_version **   pg_vers;        /* version array */
        char *                  pg_name;        /* service name */
@@ -386,8 +386,10 @@ struct svc_version {
        struct svc_procedure *  vs_proc;        /* per-procedure info */
        u32                     vs_xdrsize;     /* xdrsize needed for this version */
 
-       unsigned int            vs_hidden : 1;  /* Don't register with portmapper.
+       unsigned int            vs_hidden : 1,  /* Don't register with portmapper.
                                                 * Only used for nfsacl so far. */
+                               vs_rpcb_optnl:1;/* Don't care the result of register.
+                                                * Only used for nfsv4. */
 
        /* Override dispatch function (e.g. when caching replies).
         * A return value of 0 means drop the request. 
index e2b9576..7110897 100644 (file)
@@ -24,10 +24,10 @@ DECLARE_EVENT_CLASS(bcache_request,
                __entry->dev            = bio->bi_bdev->bd_dev;
                __entry->orig_major     = d->disk->major;
                __entry->orig_minor     = d->disk->first_minor;
-               __entry->sector         = bio->bi_sector;
-               __entry->orig_sector    = bio->bi_sector - 16;
-               __entry->nr_sector      = bio->bi_size >> 9;
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+               __entry->sector         = bio->bi_iter.bi_sector;
+               __entry->orig_sector    = bio->bi_iter.bi_sector - 16;
+               __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
+               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
        ),
 
        TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
@@ -99,9 +99,9 @@ DECLARE_EVENT_CLASS(bcache_bio,
 
        TP_fast_assign(
                __entry->dev            = bio->bi_bdev->bd_dev;
-               __entry->sector         = bio->bi_sector;
-               __entry->nr_sector      = bio->bi_size >> 9;
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+               __entry->sector         = bio->bi_iter.bi_sector;
+               __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
+               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
        ),
 
        TP_printk("%d,%d  %s %llu + %u",
@@ -134,9 +134,9 @@ TRACE_EVENT(bcache_read,
 
        TP_fast_assign(
                __entry->dev            = bio->bi_bdev->bd_dev;
-               __entry->sector         = bio->bi_sector;
-               __entry->nr_sector      = bio->bi_size >> 9;
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+               __entry->sector         = bio->bi_iter.bi_sector;
+               __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
+               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
                __entry->cache_hit = hit;
                __entry->bypass = bypass;
        ),
@@ -162,9 +162,9 @@ TRACE_EVENT(bcache_write,
 
        TP_fast_assign(
                __entry->dev            = bio->bi_bdev->bd_dev;
-               __entry->sector         = bio->bi_sector;
-               __entry->nr_sector      = bio->bi_size >> 9;
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+               __entry->sector         = bio->bi_iter.bi_sector;
+               __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
+               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
                __entry->writeback = writeback;
                __entry->bypass = bypass;
        ),
@@ -247,7 +247,7 @@ TRACE_EVENT(bcache_btree_write,
        TP_fast_assign(
                __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
                __entry->block  = b->written;
-               __entry->keys   = b->sets[b->nsets].data->keys;
+               __entry->keys   = b->keys.set[b->keys.nsets].data->keys;
        ),
 
        TP_printk("bucket %zu", __entry->bucket)
@@ -411,7 +411,7 @@ TRACE_EVENT(bcache_alloc_invalidate,
        ),
 
        TP_fast_assign(
-               __entry->free           = fifo_used(&ca->free);
+               __entry->free           = fifo_used(&ca->free[RESERVE_NONE]);
                __entry->free_inc       = fifo_used(&ca->free_inc);
                __entry->free_inc_size  = ca->free_inc.size;
                __entry->unused         = fifo_used(&ca->unused);
@@ -422,8 +422,8 @@ TRACE_EVENT(bcache_alloc_invalidate,
 );
 
 TRACE_EVENT(bcache_alloc_fail,
-       TP_PROTO(struct cache *ca),
-       TP_ARGS(ca),
+       TP_PROTO(struct cache *ca, unsigned reserve),
+       TP_ARGS(ca, reserve),
 
        TP_STRUCT__entry(
                __field(unsigned,       free                    )
@@ -433,7 +433,7 @@ TRACE_EVENT(bcache_alloc_fail,
        ),
 
        TP_fast_assign(
-               __entry->free           = fifo_used(&ca->free);
+               __entry->free           = fifo_used(&ca->free[reserve]);
                __entry->free_inc       = fifo_used(&ca->free_inc);
                __entry->unused         = fifo_used(&ca->unused);
                __entry->blocked        = atomic_read(&ca->set->prio_blocked);
index 4c2301d..e76ae19 100644 (file)
@@ -243,9 +243,9 @@ TRACE_EVENT(block_bio_bounce,
        TP_fast_assign(
                __entry->dev            = bio->bi_bdev ?
                                          bio->bi_bdev->bd_dev : 0;
-               __entry->sector         = bio->bi_sector;
+               __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio_sectors(bio);
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
        ),
 
@@ -280,10 +280,10 @@ TRACE_EVENT(block_bio_complete,
 
        TP_fast_assign(
                __entry->dev            = bio->bi_bdev->bd_dev;
-               __entry->sector         = bio->bi_sector;
+               __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio_sectors(bio);
                __entry->error          = error;
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
        ),
 
        TP_printk("%d,%d %s %llu + %u [%d]",
@@ -308,9 +308,9 @@ DECLARE_EVENT_CLASS(block_bio_merge,
 
        TP_fast_assign(
                __entry->dev            = bio->bi_bdev->bd_dev;
-               __entry->sector         = bio->bi_sector;
+               __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio_sectors(bio);
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
        ),
 
@@ -375,9 +375,9 @@ TRACE_EVENT(block_bio_queue,
 
        TP_fast_assign(
                __entry->dev            = bio->bi_bdev->bd_dev;
-               __entry->sector         = bio->bi_sector;
+               __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio_sectors(bio);
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
        ),
 
@@ -403,7 +403,7 @@ DECLARE_EVENT_CLASS(block_get_rq,
 
        TP_fast_assign(
                __entry->dev            = bio ? bio->bi_bdev->bd_dev : 0;
-               __entry->sector         = bio ? bio->bi_sector : 0;
+               __entry->sector         = bio ? bio->bi_iter.bi_sector : 0;
                __entry->nr_sector      = bio ? bio_sectors(bio) : 0;
                blk_fill_rwbs(__entry->rwbs,
                              bio ? bio->bi_rw : 0, __entry->nr_sector);
@@ -538,9 +538,9 @@ TRACE_EVENT(block_split,
 
        TP_fast_assign(
                __entry->dev            = bio->bi_bdev->bd_dev;
-               __entry->sector         = bio->bi_sector;
+               __entry->sector         = bio->bi_iter.bi_sector;
                __entry->new_sector     = new_sector;
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
        ),
 
@@ -579,11 +579,11 @@ TRACE_EVENT(block_bio_remap,
 
        TP_fast_assign(
                __entry->dev            = bio->bi_bdev->bd_dev;
-               __entry->sector         = bio->bi_sector;
+               __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio_sectors(bio);
                __entry->old_dev        = dev;
                __entry->old_sector     = from;
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
        ),
 
        TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
index 3b9f28d..67f38fa 100644 (file)
@@ -629,8 +629,8 @@ DECLARE_EVENT_CLASS(f2fs__submit_bio,
                __entry->dev            = sb->s_dev;
                __entry->rw             = rw;
                __entry->type           = type;
-               __entry->sector         = bio->bi_sector;
-               __entry->size           = bio->bi_size;
+               __entry->sector         = bio->bi_iter.bi_sector;
+               __entry->size           = bio->bi_iter.bi_size;
        ),
 
        TP_printk("dev = (%d,%d), %s%s, %s, sector = %lld, size = %u",
index 164a7e2..22b6ad3 100644 (file)
@@ -39,6 +39,7 @@ static inline void SET_##name(struct bkey *k, unsigned i, __u64 v)    \
 }
 
 #define KEY_SIZE_BITS          16
+#define KEY_MAX_U64S           8
 
 KEY_FIELD(KEY_PTRS,    high, 60, 3)
 KEY_FIELD(HEADER_SIZE, high, 58, 2)
@@ -118,7 +119,7 @@ static inline struct bkey *bkey_next(const struct bkey *k)
        return (struct bkey *) (d + bkey_u64s(k));
 }
 
-static inline struct bkey *bkey_last(const struct bkey *k, unsigned nr_keys)
+static inline struct bkey *bkey_idx(const struct bkey *k, unsigned nr_keys)
 {
        __u64 *d = (void *) k;
        return (struct bkey *) (d + nr_keys);
index f1f3dd5..84c517c 100644 (file)
@@ -185,7 +185,8 @@ enum {
                                 * to clear media change status */
        FD_UNUSED_BIT,
        FD_DISK_CHANGED_BIT,    /* disk has been changed since last i/o */
-       FD_DISK_WRITABLE_BIT    /* disk is writable */
+       FD_DISK_WRITABLE_BIT,   /* disk is writable */
+       FD_OPEN_SHOULD_FAIL_BIT
 };
 
 #define FDSETDRVPRM _IOW(2, 0x90, struct floppy_drive_params)
index d09dd10..9a58bc2 100644 (file)
@@ -32,7 +32,7 @@ static int submit(int rw, struct block_device *bdev, sector_t sector,
        struct bio *bio;
 
        bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
-       bio->bi_sector = sector;
+       bio->bi_iter.bi_sector = sector;
        bio->bi_bdev = bdev;
        bio->bi_end_io = end_swap_bio_read;
 
index f785aef..b418cb0 100644 (file)
@@ -781,8 +781,8 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
        if (!error && !bio_flagged(bio, BIO_UPTODATE))
                error = EIO;
 
-       __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what,
-                       error, 0, NULL);
+       __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
+                       bio->bi_rw, what, error, 0, NULL);
 }
 
 static void blk_add_trace_bio_bounce(void *ignore,
@@ -885,8 +885,9 @@ static void blk_add_trace_split(void *ignore,
        if (bt) {
                __be64 rpdu = cpu_to_be64(pdu);
 
-               __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
-                               BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE),
+               __blk_add_trace(bt, bio->bi_iter.bi_sector,
+                               bio->bi_iter.bi_size, bio->bi_rw, BLK_TA_SPLIT,
+                               !bio_flagged(bio, BIO_UPTODATE),
                                sizeof(rpdu), &rpdu);
        }
 }
@@ -918,9 +919,9 @@ static void blk_add_trace_bio_remap(void *ignore,
        r.device_to   = cpu_to_be32(bio->bi_bdev->bd_dev);
        r.sector_from = cpu_to_be64(from);
 
-       __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
-                       BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE),
-                       sizeof(r), &r);
+       __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
+                       bio->bi_rw, BLK_TA_REMAP,
+                       !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
 }
 
 /**
index 5a7d58f..523918b 100644 (file)
@@ -98,27 +98,24 @@ int init_emergency_isa_pool(void)
 static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
 {
        unsigned char *vfrom;
-       struct bio_vec *tovec, *fromvec;
-       int i;
-
-       bio_for_each_segment(tovec, to, i) {
-               fromvec = from->bi_io_vec + i;
-
-               /*
-                * not bounced
-                */
-               if (tovec->bv_page == fromvec->bv_page)
-                       continue;
-
-               /*
-                * fromvec->bv_offset and fromvec->bv_len might have been
-                * modified by the block layer, so use the original copy,
-                * bounce_copy_vec already uses tovec->bv_len
-                */
-               vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
+       struct bio_vec tovec, *fromvec = from->bi_io_vec;
+       struct bvec_iter iter;
+
+       bio_for_each_segment(tovec, to, iter) {
+               if (tovec.bv_page != fromvec->bv_page) {
+                       /*
+                        * fromvec->bv_offset and fromvec->bv_len might have
+                        * been modified by the block layer, so use the original
+                        * copy, bounce_copy_vec already uses tovec->bv_len
+                        */
+                       vfrom = page_address(fromvec->bv_page) +
+                               tovec.bv_offset;
+
+                       bounce_copy_vec(&tovec, vfrom);
+                       flush_dcache_page(tovec.bv_page);
+               }
 
-               bounce_copy_vec(tovec, vfrom);
-               flush_dcache_page(tovec->bv_page);
+               fromvec++;
        }
 }
 
@@ -201,13 +198,14 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
 {
        struct bio *bio;
        int rw = bio_data_dir(*bio_orig);
-       struct bio_vec *to, *from;
+       struct bio_vec *to, from;
+       struct bvec_iter iter;
        unsigned i;
 
        if (force)
                goto bounce;
-       bio_for_each_segment(from, *bio_orig, i)
-               if (page_to_pfn(from->bv_page) > queue_bounce_pfn(q))
+       bio_for_each_segment(from, *bio_orig, iter)
+               if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q))
                        goto bounce;
 
        return;
index 7247be6..7c59ef6 100644 (file)
@@ -31,13 +31,13 @@ static struct bio *get_swap_bio(gfp_t gfp_flags,
 
        bio = bio_alloc(gfp_flags, 1);
        if (bio) {
-               bio->bi_sector = map_swap_page(page, &bio->bi_bdev);
-               bio->bi_sector <<= PAGE_SHIFT - 9;
+               bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev);
+               bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9;
                bio->bi_io_vec[0].bv_page = page;
                bio->bi_io_vec[0].bv_len = PAGE_SIZE;
                bio->bi_io_vec[0].bv_offset = 0;
                bio->bi_vcnt = 1;
-               bio->bi_size = PAGE_SIZE;
+               bio->bi_iter.bi_size = PAGE_SIZE;
                bio->bi_end_io = end_io;
        }
        return bio;
@@ -62,7 +62,7 @@ void end_swap_bio_write(struct bio *bio, int err)
                printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n",
                                imajor(bio->bi_bdev->bd_inode),
                                iminor(bio->bi_bdev->bd_inode),
-                               (unsigned long long)bio->bi_sector);
+                               (unsigned long long)bio->bi_iter.bi_sector);
                ClearPageReclaim(page);
        }
        end_page_writeback(page);
@@ -80,7 +80,7 @@ void end_swap_bio_read(struct bio *bio, int err)
                printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
                                imajor(bio->bi_bdev->bd_inode),
                                iminor(bio->bi_bdev->bd_inode),
-                               (unsigned long long)bio->bi_sector);
+                               (unsigned long long)bio->bi_iter.bi_sector);
                goto out;
        }
 
index 2ed1304..0e478a0 100644 (file)
@@ -778,13 +778,12 @@ static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor *cursor,
 
        bio = data->bio;
        BUG_ON(!bio);
-       BUG_ON(!bio->bi_vcnt);
 
        cursor->resid = min(length, data->bio_length);
        cursor->bio = bio;
-       cursor->vector_index = 0;
-       cursor->vector_offset = 0;
-       cursor->last_piece = length <= bio->bi_io_vec[0].bv_len;
+       cursor->bvec_iter = bio->bi_iter;
+       cursor->last_piece =
+               cursor->resid <= bio_iter_len(bio, cursor->bvec_iter);
 }
 
 static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor,
@@ -793,71 +792,63 @@ static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor,
 {
        struct ceph_msg_data *data = cursor->data;
        struct bio *bio;
-       struct bio_vec *bio_vec;
-       unsigned int index;
+       struct bio_vec bio_vec;
 
        BUG_ON(data->type != CEPH_MSG_DATA_BIO);
 
        bio = cursor->bio;
        BUG_ON(!bio);
 
-       index = cursor->vector_index;
-       BUG_ON(index >= (unsigned int) bio->bi_vcnt);
+       bio_vec = bio_iter_iovec(bio, cursor->bvec_iter);
 
-       bio_vec = &bio->bi_io_vec[index];
-       BUG_ON(cursor->vector_offset >= bio_vec->bv_len);
-       *page_offset = (size_t) (bio_vec->bv_offset + cursor->vector_offset);
+       *page_offset = (size_t) bio_vec.bv_offset;
        BUG_ON(*page_offset >= PAGE_SIZE);
        if (cursor->last_piece) /* pagelist offset is always 0 */
                *length = cursor->resid;
        else
-               *length = (size_t) (bio_vec->bv_len - cursor->vector_offset);
+               *length = (size_t) bio_vec.bv_len;
        BUG_ON(*length > cursor->resid);
        BUG_ON(*page_offset + *length > PAGE_SIZE);
 
-       return bio_vec->bv_page;
+       return bio_vec.bv_page;
 }
 
 static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
                                        size_t bytes)
 {
        struct bio *bio;
-       struct bio_vec *bio_vec;
-       unsigned int index;
+       struct bio_vec bio_vec;
 
        BUG_ON(cursor->data->type != CEPH_MSG_DATA_BIO);
 
        bio = cursor->bio;
        BUG_ON(!bio);
 
-       index = cursor->vector_index;
-       BUG_ON(index >= (unsigned int) bio->bi_vcnt);
-       bio_vec = &bio->bi_io_vec[index];
+       bio_vec = bio_iter_iovec(bio, cursor->bvec_iter);
 
        /* Advance the cursor offset */
 
        BUG_ON(cursor->resid < bytes);
        cursor->resid -= bytes;
-       cursor->vector_offset += bytes;
-       if (cursor->vector_offset < bio_vec->bv_len)
+
+       bio_advance_iter(bio, &cursor->bvec_iter, bytes);
+
+       if (bytes < bio_vec.bv_len)
                return false;   /* more bytes to process in this segment */
-       BUG_ON(cursor->vector_offset != bio_vec->bv_len);
 
        /* Move on to the next segment, and possibly the next bio */
 
-       if (++index == (unsigned int) bio->bi_vcnt) {
+       if (!cursor->bvec_iter.bi_size) {
                bio = bio->bi_next;
-               index = 0;
+               cursor->bvec_iter = bio->bi_iter;
        }
        cursor->bio = bio;
-       cursor->vector_index = index;
-       cursor->vector_offset = 0;
 
        if (!cursor->last_piece) {
                BUG_ON(!cursor->resid);
                BUG_ON(!bio);
                /* A short read is OK, so use <= rather than == */
-               if (cursor->resid <= bio->bi_io_vec[index].bv_len)
+               if (cursor->resid <= bio_iter_len(bio, cursor->bvec_iter))
                        cursor->last_piece = true;
        }
 
index 76e42e6..24589bd 100644 (file)
@@ -59,6 +59,7 @@
 #include <linux/crypto.h>
 #include <linux/sunrpc/gss_krb5.h>
 #include <linux/sunrpc/xdr.h>
+#include <linux/lcm.h>
 
 #ifdef RPC_DEBUG
 # define RPCDBG_FACILITY        RPCDBG_AUTH
@@ -72,7 +73,7 @@
 static void krb5_nfold(u32 inbits, const u8 *in,
                       u32 outbits, u8 *out)
 {
-       int a, b, c, lcm;
+       unsigned long ulcm;
        int byte, i, msbit;
 
        /* the code below is more readable if I make these bytes
@@ -82,17 +83,7 @@ static void krb5_nfold(u32 inbits, const u8 *in,
        outbits >>= 3;
 
        /* first compute lcm(n,k) */
-
-       a = outbits;
-       b = inbits;
-
-       while (b != 0) {
-               c = b;
-               b = a%b;
-               a = c;
-       }
-
-       lcm = outbits*inbits/a;
+       ulcm = lcm(inbits, outbits);
 
        /* now do the real work */
 
@@ -101,7 +92,7 @@ static void krb5_nfold(u32 inbits, const u8 *in,
 
        /* this will end up cycling through k lcm(k,n)/k times, which
           is correct */
-       for (i = lcm-1; i >= 0; i--) {
+       for (i = ulcm-1; i >= 0; i--) {
                /* compute the msbit in k which gets added into this byte */
                msbit = (
                        /* first, start with the msbit in the first,
index 458f85e..abbb7dc 100644 (file)
@@ -137,7 +137,6 @@ void init_gssp_clnt(struct sunrpc_net *sn)
 {
        mutex_init(&sn->gssp_lock);
        sn->gssp_clnt = NULL;
-       init_waitqueue_head(&sn->gssp_wq);
 }
 
 int set_gssp_clnt(struct net *net)
@@ -154,7 +153,6 @@ int set_gssp_clnt(struct net *net)
                sn->gssp_clnt = clnt;
        }
        mutex_unlock(&sn->gssp_lock);
-       wake_up(&sn->gssp_wq);
        return ret;
 }
 
index 008cdad..0f73f45 100644 (file)
@@ -1263,65 +1263,34 @@ out:
        return ret;
 }
 
-DEFINE_SPINLOCK(use_gssp_lock);
-
-static bool use_gss_proxy(struct net *net)
-{
-       struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
-
-       if (sn->use_gss_proxy != -1)
-               return sn->use_gss_proxy;
-       spin_lock(&use_gssp_lock);
-       /*
-        * If you wanted gss-proxy, you should have said so before
-        * starting to accept requests:
-        */
-       sn->use_gss_proxy = 0;
-       spin_unlock(&use_gssp_lock);
-       return 0;
-}
-
-#ifdef CONFIG_PROC_FS
-
+/*
+ * Try to set the sn->use_gss_proxy variable to a new value. We only allow
+ * it to be changed if it's currently undefined (-1). If it's any other value
+ * then return -EBUSY unless the type wouldn't have changed anyway.
+ */
 static int set_gss_proxy(struct net *net, int type)
 {
        struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
-       int ret = 0;
+       int ret;
 
        WARN_ON_ONCE(type != 0 && type != 1);
-       spin_lock(&use_gssp_lock);
-       if (sn->use_gss_proxy == -1 || sn->use_gss_proxy == type)
-               sn->use_gss_proxy = type;
-       else
-               ret = -EBUSY;
-       spin_unlock(&use_gssp_lock);
-       wake_up(&sn->gssp_wq);
-       return ret;
-}
-
-static inline bool gssp_ready(struct sunrpc_net *sn)
-{
-       switch (sn->use_gss_proxy) {
-               case -1:
-                       return false;
-               case 0:
-                       return true;
-               case 1:
-                       return sn->gssp_clnt;
-       }
-       WARN_ON_ONCE(1);
-       return false;
+       ret = cmpxchg(&sn->use_gss_proxy, -1, type);
+       if (ret != -1 && ret != type)
+               return -EBUSY;
+       return 0;
 }
 
-static int wait_for_gss_proxy(struct net *net, struct file *file)
+static bool use_gss_proxy(struct net *net)
 {
        struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
 
-       if (file->f_flags & O_NONBLOCK && !gssp_ready(sn))
-               return -EAGAIN;
-       return wait_event_interruptible(sn->gssp_wq, gssp_ready(sn));
+       /* If use_gss_proxy is still undefined, then try to disable it */
+       if (sn->use_gss_proxy == -1)
+               set_gss_proxy(net, 0);
+       return sn->use_gss_proxy;
 }
 
+#ifdef CONFIG_PROC_FS
 
 static ssize_t write_gssp(struct file *file, const char __user *buf,
                         size_t count, loff_t *ppos)
@@ -1342,10 +1311,10 @@ static ssize_t write_gssp(struct file *file, const char __user *buf,
                return res;
        if (i != 1)
                return -EINVAL;
-       res = set_gss_proxy(net, 1);
+       res = set_gssp_clnt(net);
        if (res)
                return res;
-       res = set_gssp_clnt(net);
+       res = set_gss_proxy(net, 1);
        if (res)
                return res;
        return count;
@@ -1355,16 +1324,12 @@ static ssize_t read_gssp(struct file *file, char __user *buf,
                         size_t count, loff_t *ppos)
 {
        struct net *net = PDE_DATA(file_inode(file));
+       struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
        unsigned long p = *ppos;
        char tbuf[10];
        size_t len;
-       int ret;
 
-       ret = wait_for_gss_proxy(net, file);
-       if (ret)
-               return ret;
-
-       snprintf(tbuf, sizeof(tbuf), "%d\n", use_gss_proxy(net));
+       snprintf(tbuf, sizeof(tbuf), "%d\n", sn->use_gss_proxy);
        len = strlen(tbuf);
        if (p >= len)
                return 0;
@@ -1626,8 +1591,7 @@ svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp)
        BUG_ON(integ_len % 4);
        *p++ = htonl(integ_len);
        *p++ = htonl(gc->gc_seq);
-       if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset,
-                               integ_len))
+       if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset, integ_len))
                BUG();
        if (resbuf->tail[0].iov_base == NULL) {
                if (resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE > PAGE_SIZE)
@@ -1635,10 +1599,8 @@ svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp)
                resbuf->tail[0].iov_base = resbuf->head[0].iov_base
                                                + resbuf->head[0].iov_len;
                resbuf->tail[0].iov_len = 0;
-               resv = &resbuf->tail[0];
-       } else {
-               resv = &resbuf->tail[0];
        }
+       resv = &resbuf->tail[0];
        mic.data = (u8 *)resv->iov_base + resv->iov_len + 4;
        if (gss_get_mic(gsd->rsci->mechctx, &integ_buf, &mic))
                goto out_err;
index e521d20..ae333c1 100644 (file)
@@ -1111,9 +1111,7 @@ void qword_addhex(char **bpp, int *lp, char *buf, int blen)
                *bp++ = 'x';
                len -= 2;
                while (blen && len >= 2) {
-                       unsigned char c = *buf++;
-                       *bp++ = '0' + ((c&0xf0)>>4) + (c>=0xa0)*('a'-'9'-1);
-                       *bp++ = '0' + (c&0x0f) + ((c&0x0f)>=0x0a)*('a'-'9'-1);
+                       bp = hex_byte_pack(bp, *buf++);
                        len -= 2;
                        blen--;
                }
index 94e506f..df58268 100644 (file)
@@ -27,7 +27,6 @@ struct sunrpc_net {
        unsigned int rpcb_is_af_local : 1;
 
        struct mutex gssp_lock;
-       wait_queue_head_t gssp_wq;
        struct rpc_clnt *gssp_clnt;
        int use_gss_proxy;
        int pipe_version;
index e7fbe36..5de6801 100644 (file)
@@ -916,9 +916,6 @@ static int __svc_register(struct net *net, const char *progname,
 #endif
        }
 
-       if (error < 0)
-               printk(KERN_WARNING "svc: failed to register %sv%u RPC "
-                       "service (errno %d).\n", progname, version, -error);
        return error;
 }
 
@@ -937,6 +934,7 @@ int svc_register(const struct svc_serv *serv, struct net *net,
                 const unsigned short port)
 {
        struct svc_program      *progp;
+       struct svc_version      *vers;
        unsigned int            i;
        int                     error = 0;
 
@@ -946,7 +944,8 @@ int svc_register(const struct svc_serv *serv, struct net *net,
 
        for (progp = serv->sv_program; progp; progp = progp->pg_next) {
                for (i = 0; i < progp->pg_nvers; i++) {
-                       if (progp->pg_vers[i] == NULL)
+                       vers = progp->pg_vers[i];
+                       if (vers == NULL)
                                continue;
 
                        dprintk("svc: svc_register(%sv%d, %s, %u, %u)%s\n",
@@ -955,16 +954,26 @@ int svc_register(const struct svc_serv *serv, struct net *net,
                                        proto == IPPROTO_UDP?  "udp" : "tcp",
                                        port,
                                        family,
-                                       progp->pg_vers[i]->vs_hidden?
-                                               " (but not telling portmap)" : "");
+                                       vers->vs_hidden ?
+                                       " (but not telling portmap)" : "");
 
-                       if (progp->pg_vers[i]->vs_hidden)
+                       if (vers->vs_hidden)
                                continue;
 
                        error = __svc_register(net, progp->pg_name, progp->pg_prog,
                                                i, family, proto, port);
-                       if (error < 0)
+
+                       if (vers->vs_rpcb_optnl) {
+                               error = 0;
+                               continue;
+                       }
+
+                       if (error < 0) {
+                               printk(KERN_WARNING "svc: failed to register "
+                                       "%sv%u RPC service (errno %d).\n",
+                                       progp->pg_name, i, -error);
                                break;
+                       }
                }
        }
 
index 2a7ca8f..817a1e5 100644 (file)
@@ -2964,10 +2964,9 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
 
        /*
         * Once we've associated a backchannel xprt with a connection,
-        * we want to keep it around as long as long as the connection
-        * lasts, in case we need to start using it for a backchannel
-        * again; this reference won't be dropped until bc_xprt is
-        * destroyed.
+        * we want to keep it around as long as the connection lasts,
+        * in case we need to start using it for a backchannel again;
+        * this reference won't be dropped until bc_xprt is destroyed.
         */
        xprt_get(xprt);
        args->bc_xprt->xpt_bc_xprt = xprt;