commit
d61b7f972dab2a7d187c38254845546dfc8eed85 upstream.
A user noticed that write performance was horrible over loopback and we
traced it to an inversion of when we need to set MSG_MORE. It should be
set when we have more bvec's to send, not when we are on the last bvec.
This patch made the test go from 20 iops to 78k iops.
Signed-off-by: Josef Bacik <jbacik@fb.com>
Fixes:
429a787be679 ("nbd: fix use-after-free of rq/bio in the xmit path")
Signed-off-by: Jens Axboe <axboe@fb.com>
[bwh: Backported to 3.2: adjust context]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
/* always call with the tx_lock held */
static int nbd_send_req(struct nbd_device *lo, struct request *req)
{
/* always call with the tx_lock held */
static int nbd_send_req(struct nbd_device *lo, struct request *req)
{
struct nbd_request request;
unsigned long size = blk_rq_bytes(req);
struct bio *bio;
struct nbd_request request;
unsigned long size = blk_rq_bytes(req);
struct bio *bio;
if (nbd_cmd(req) != NBD_CMD_WRITE)
return 0;
if (nbd_cmd(req) != NBD_CMD_WRITE)
return 0;
bio = req->bio;
while (bio) {
struct bio *next = bio->bi_next;
bio = req->bio;
while (bio) {
struct bio *next = bio->bi_next;
bio_for_each_segment(bvec, bio, i) {
bool is_last = !next && i == bio->bi_vcnt - 1;
bio_for_each_segment(bvec, bio, i) {
bool is_last = !next && i == bio->bi_vcnt - 1;
+ int flags = is_last ? 0 : MSG_MORE;
- if (is_last)
- flags = MSG_MORE;
dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
lo->disk->disk_name, req, bvec->bv_len);
result = sock_send_bvec(lo, bvec, flags);
dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
lo->disk->disk_name, req, bvec->bv_len);
result = sock_send_bvec(lo, bvec, flags);