#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
+#include <linux/bitmap.h>
#include <xen/xen.h>
#include <xen/xenbus.h>
*/
struct blkfront_info
{
+ spinlock_t io_lock;
struct mutex mutex;
struct xenbus_device *xbdev;
struct gendisk *gd;
int is_ready;
};
-static DEFINE_SPINLOCK(blkif_io_lock);
-
static unsigned int nr_minors;
static unsigned long *minors;
static DEFINE_SPINLOCK(minor_lock);
spin_lock(&minor_lock);
if (find_next_bit(minors, end, minor) >= end) {
- for (; minor < end; ++minor)
- __set_bit(minor, minors);
+ bitmap_set(minors, minor, nr);
rc = 0;
} else
rc = -EBUSY;
BUG_ON(end > nr_minors);
spin_lock(&minor_lock);
- for (; minor < end; ++minor)
- __clear_bit(minor, minors);
+ bitmap_clear(minors, minor, nr);
spin_unlock(&minor_lock);
}
struct request_queue *rq;
struct blkfront_info *info = gd->private_data;
- rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
+ rq = blk_init_queue(do_blkif_request, &info->io_lock);
if (rq == NULL)
return -1;
if (info->rq == NULL)
return;
- spin_lock_irqsave(&blkif_io_lock, flags);
+ spin_lock_irqsave(&info->io_lock, flags);
/* No more blkif_request(). */
blk_stop_queue(info->rq);
/* No more gnttab callback work. */
gnttab_cancel_free_callback(&info->callback);
- spin_unlock_irqrestore(&blkif_io_lock, flags);
+ spin_unlock_irqrestore(&info->io_lock, flags);
/* Flush gnttab callback work. Must be done with no locks held. */
flush_work_sync(&info->work);
{
struct blkfront_info *info = container_of(work, struct blkfront_info, work);
- spin_lock_irq(&blkif_io_lock);
+ spin_lock_irq(&info->io_lock);
if (info->connected == BLKIF_STATE_CONNECTED)
kick_pending_request_queues(info);
- spin_unlock_irq(&blkif_io_lock);
+ spin_unlock_irq(&info->io_lock);
}
static void blkif_free(struct blkfront_info *info, int suspend)
{
/* Prevent new requests being issued until we fix things up. */
- spin_lock_irq(&blkif_io_lock);
+ spin_lock_irq(&info->io_lock);
info->connected = suspend ?
BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
/* No more blkif_request(). */
blk_stop_queue(info->rq);
/* No more gnttab callback work. */
gnttab_cancel_free_callback(&info->callback);
- spin_unlock_irq(&blkif_io_lock);
+ spin_unlock_irq(&info->io_lock);
/* Flush gnttab callback work. Must be done with no locks held. */
flush_work_sync(&info->work);
struct blkfront_info *info = (struct blkfront_info *)dev_id;
int error;
- spin_lock_irqsave(&blkif_io_lock, flags);
+ spin_lock_irqsave(&info->io_lock, flags);
if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
- spin_unlock_irqrestore(&blkif_io_lock, flags);
+ spin_unlock_irqrestore(&info->io_lock, flags);
return IRQ_HANDLED;
}
kick_pending_request_queues(info);
- spin_unlock_irqrestore(&blkif_io_lock, flags);
+ spin_unlock_irqrestore(&info->io_lock, flags);
return IRQ_HANDLED;
}
}
mutex_init(&info->mutex);
+ spin_lock_init(&info->io_lock);
info->xbdev = dev;
info->vdevice = vdevice;
info->connected = BLKIF_STATE_DISCONNECTED;
xenbus_switch_state(info->xbdev, XenbusStateConnected);
- spin_lock_irq(&blkif_io_lock);
+ spin_lock_irq(&info->io_lock);
/* Now safe for us to use the shared ring */
info->connected = BLKIF_STATE_CONNECTED;
/* Kick any other new requests queued since we resumed */
kick_pending_request_queues(info);
- spin_unlock_irq(&blkif_io_lock);
+ spin_unlock_irq(&info->io_lock);
return 0;
}
xenbus_switch_state(info->xbdev, XenbusStateConnected);
/* Kick pending requests. */
- spin_lock_irq(&blkif_io_lock);
+ spin_lock_irq(&info->io_lock);
info->connected = BLKIF_STATE_CONNECTED;
kick_pending_request_queues(info);
- spin_unlock_irq(&blkif_io_lock);
+ spin_unlock_irq(&info->io_lock);
add_disk(info->gd);
mutex_lock(&blkfront_mutex);
bdev = bdget_disk(disk, 0);
- bdput(bdev);
if (bdev->bd_openers)
goto out;
}
out:
+ bdput(bdev);
mutex_unlock(&blkfront_mutex);
return 0;
}
if (!xen_domain())
return -ENODEV;
- if (!xen_platform_pci_unplug)
+ if (xen_hvm_domain() && !xen_platform_pci_unplug)
return -ENODEV;
if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {