virtio-blk: Use block layer provided spinlock
authorAsias He <asias@redhat.com>
Fri, 25 May 2012 08:03:27 +0000 (16:03 +0800)
committerBen Hutchings <ben@decadent.org.uk>
Thu, 9 Aug 2012 23:24:58 +0000 (00:24 +0100)
commit 2c95a3290919541b846bee3e0fbaa75860929f53 upstream.

Block layer will allocate a spinlock for the queue if the driver does
not provide one in blk_init_queue().

The reason to use the internal spinlock is that blk_cleanup_queue() will
switch to use the internal spinlock in the cleanup code path.

        if (q->queue_lock != &q->__queue_lock)
                q->queue_lock = &q->__queue_lock;

However, processes which are in D state might have taken the driver
provided spinlock, when the processes wake up, they would release the
block provided spinlock.

=====================================
[ BUG: bad unlock balance detected! ]
3.4.0-rc7+ #238 Not tainted
-------------------------------------
fio/3587 is trying to release lock (&(&q->__queue_lock)->rlock) at:
[<ffffffff813274d2>] blk_queue_bio+0x2a2/0x380
but there are no more locks to release!

other info that might help us debug this:
1 lock held by fio/3587:
 #0:  (&(&vblk->lock)->rlock){......}, at:
[<ffffffff8132661a>] get_request_wait+0x19a/0x250

Other drivers use block layer provided spinlock as well, e.g. SCSI.

Switching to the block layer provided spinlock saves a bit of memory and
does not increase lock contention. Performance test shows no real
difference is observed before and after this patch.

Changes in v2: Improve commit log as Michael suggested.

Cc: virtualization@lists.linux-foundation.org
Cc: kvm@vger.kernel.org
Signed-off-by: Asias He <asias@redhat.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
[bwh: Backported to 3.2: adjust context]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
drivers/block/virtio_blk.c

index e46f2f7..650a308 100644 (file)
@@ -20,8 +20,6 @@ struct workqueue_struct *virtblk_wq;
 
 struct virtio_blk
 {
-       spinlock_t lock;
-
        struct virtio_device *vdev;
        struct virtqueue *vq;
 
@@ -62,7 +60,7 @@ static void blk_done(struct virtqueue *vq)
        unsigned int len;
        unsigned long flags;
 
-       spin_lock_irqsave(&vblk->lock, flags);
+       spin_lock_irqsave(vblk->disk->queue->queue_lock, flags);
        while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
                int error;
 
@@ -97,7 +95,7 @@ static void blk_done(struct virtqueue *vq)
        }
        /* In case queue is stopped waiting for more buffers. */
        blk_start_queue(vblk->disk->queue);
-       spin_unlock_irqrestore(&vblk->lock, flags);
+       spin_unlock_irqrestore(vblk->disk->queue->queue_lock, flags);
 }
 
 static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
@@ -384,7 +382,6 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
        }
 
        INIT_LIST_HEAD(&vblk->reqs);
-       spin_lock_init(&vblk->lock);
        vblk->vdev = vdev;
        vblk->sg_elems = sg_elems;
        sg_init_table(vblk->sg, vblk->sg_elems);
@@ -410,7 +407,7 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
                goto out_mempool;
        }
 
-       q = vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock);
+       q = vblk->disk->queue = blk_init_queue(do_virtblk_request, NULL);
        if (!q) {
                err = -ENOMEM;
                goto out_put_disk;