4 * XenLinux virtual block device driver.
6 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
7 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
8 * Copyright (c) 2004, Christian Limpach
9 * Copyright (c) 2004, Andrew Warfield
10 * Copyright (c) 2005, Christopher Clark
11 * Copyright (c) 2005, XenSource Ltd
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation; or, when distributed
16 * separately from the Linux kernel or incorporated into other
17 * software packages, subject to the following license:
19 * Permission is hereby granted, free of charge, to any person obtaining a copy
20 * of this source file (the "Software"), to deal in the Software without
21 * restriction, including without limitation the rights to use, copy, modify,
22 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23 * and to permit persons to whom the Software is furnished to do so, subject to
24 * the following conditions:
26 * The above copyright notice and this permission notice shall be included in
27 * all copies or substantial portions of the Software.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
38 #include <linux/interrupt.h>
39 #include <linux/blkdev.h>
40 #include <linux/module.h>
42 #include <xen/xenbus.h>
43 #include <xen/grant_table.h>
44 #include <xen/events.h>
47 #include <xen/interface/grant_table.h>
48 #include <xen/interface/io/blkif.h>
50 #include <asm/xen/hypervisor.h>
53 BLKIF_STATE_DISCONNECTED,
54 BLKIF_STATE_CONNECTED,
55 BLKIF_STATE_SUSPENDED,
59 struct blkif_request req;
60 unsigned long request;
61 unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
64 static struct block_device_operations xlvbd_block_fops;
66 #define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE)
69 * We have one of these per vbd, whether ide, scsi or 'other'. They
70 * hang in private_data off the gendisk structure. We may end up
71 * putting all kinds of interesting stuff here :-)
75 struct xenbus_device *xbdev;
80 enum blkif_state connected;
82 struct blkif_front_ring ring;
83 unsigned int evtchn, irq;
84 struct request_queue *rq;
85 struct work_struct work;
86 struct gnttab_free_callback callback;
87 struct blk_shadow shadow[BLK_RING_SIZE];
88 unsigned long shadow_free;
92 * The number of people holding this device open. We won't allow a
93 * hot-unplug unless this is 0.
98 static DEFINE_SPINLOCK(blkif_io_lock);
100 #define MAXIMUM_OUTSTANDING_BLOCK_REQS \
101 (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
102 #define GRANT_INVALID_REF 0
104 #define PARTS_PER_DISK 16
106 #define BLKIF_MAJOR(dev) ((dev)>>8)
107 #define BLKIF_MINOR(dev) ((dev) & 0xff)
109 #define DEV_NAME "xvd" /* name in /dev */
111 /* Information about our VBDs. */
113 static LIST_HEAD(vbds_list);
115 static int get_id_from_freelist(struct blkfront_info *info)
117 unsigned long free = info->shadow_free;
118 BUG_ON(free > BLK_RING_SIZE);
119 info->shadow_free = info->shadow[free].req.id;
120 info->shadow[free].req.id = 0x0fffffee; /* debug */
124 static void add_id_to_freelist(struct blkfront_info *info,
127 info->shadow[id].req.id = info->shadow_free;
128 info->shadow[id].request = 0;
129 info->shadow_free = id;
132 static void blkif_restart_queue_callback(void *arg)
134 struct blkfront_info *info = (struct blkfront_info *)arg;
135 schedule_work(&info->work);
139 * blkif_queue_request
143 * id: for guest use only.
144 * operation: BLKIF_OP_{READ,WRITE,PROBE}
145 * buffer: buffer to read/write into. this should be a
146 * virtual address in the guest os.
148 static int blkif_queue_request(struct request *req)
150 struct blkfront_info *info = req->rq_disk->private_data;
151 unsigned long buffer_mfn;
152 struct blkif_request *ring_req;
153 struct req_iterator iter;
154 struct bio_vec *bvec;
156 unsigned int fsect, lsect;
158 grant_ref_t gref_head;
160 if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
163 if (gnttab_alloc_grant_references(
164 BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) {
165 gnttab_request_free_callback(
167 blkif_restart_queue_callback,
169 BLKIF_MAX_SEGMENTS_PER_REQUEST);
173 /* Fill out a communications ring structure. */
174 ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
175 id = get_id_from_freelist(info);
176 info->shadow[id].request = (unsigned long)req;
179 ring_req->sector_number = (blkif_sector_t)req->sector;
180 ring_req->handle = info->handle;
182 ring_req->operation = rq_data_dir(req) ?
183 BLKIF_OP_WRITE : BLKIF_OP_READ;
184 if (blk_barrier_rq(req))
185 ring_req->operation = BLKIF_OP_WRITE_BARRIER;
187 ring_req->nr_segments = 0;
188 rq_for_each_segment(bvec, req, iter) {
189 BUG_ON(ring_req->nr_segments
190 == BLKIF_MAX_SEGMENTS_PER_REQUEST);
191 buffer_mfn = pfn_to_mfn(page_to_pfn(bvec->bv_page));
192 fsect = bvec->bv_offset >> 9;
193 lsect = fsect + (bvec->bv_len >> 9) - 1;
194 /* install a grant reference. */
195 ref = gnttab_claim_grant_reference(&gref_head);
196 BUG_ON(ref == -ENOSPC);
198 gnttab_grant_foreign_access_ref(
200 info->xbdev->otherend_id,
204 info->shadow[id].frame[ring_req->nr_segments] =
205 mfn_to_pfn(buffer_mfn);
207 ring_req->seg[ring_req->nr_segments] =
208 (struct blkif_request_segment) {
211 .last_sect = lsect };
213 ring_req->nr_segments++;
216 info->ring.req_prod_pvt++;
218 /* Keep a private copy so we can reissue requests when recovering. */
219 info->shadow[id].req = *ring_req;
221 gnttab_free_grant_references(gref_head);
227 static inline void flush_requests(struct blkfront_info *info)
231 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify);
234 notify_remote_via_irq(info->irq);
239 * read a block; request is in a request queue
241 static void do_blkif_request(struct request_queue *rq)
243 struct blkfront_info *info = NULL;
247 pr_debug("Entered do_blkif_request\n");
251 while ((req = elv_next_request(rq)) != NULL) {
252 info = req->rq_disk->private_data;
253 if (!blk_fs_request(req)) {
258 if (RING_FULL(&info->ring))
261 pr_debug("do_blk_req %p: cmd %p, sec %lx, "
262 "(%u/%li) buffer:%p [%s]\n",
263 req, req->cmd, (unsigned long)req->sector,
264 req->current_nr_sectors,
265 req->nr_sectors, req->buffer,
266 rq_data_dir(req) ? "write" : "read");
269 blkdev_dequeue_request(req);
270 if (blkif_queue_request(req)) {
271 blk_requeue_request(rq, req);
273 /* Avoid pointless unplugs. */
282 flush_requests(info);
285 static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
287 struct request_queue *rq;
289 rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
293 elevator_init(rq, "noop");
295 /* Hard sector size and max sectors impersonate the equiv. hardware. */
296 blk_queue_hardsect_size(rq, sector_size);
297 blk_queue_max_sectors(rq, 512);
299 /* Each segment in a request is up to an aligned page in size. */
300 blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
301 blk_queue_max_segment_size(rq, PAGE_SIZE);
303 /* Ensure a merged request will fit in a single I/O ring slot. */
304 blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
305 blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
307 /* Make sure buffer addresses are sector-aligned. */
308 blk_queue_dma_alignment(rq, 511);
316 static int xlvbd_barrier(struct blkfront_info *info)
320 err = blk_queue_ordered(info->rq,
321 info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE,
327 printk(KERN_INFO "blkfront: %s: barriers %s\n",
329 info->feature_barrier ? "enabled" : "disabled");
334 static int xlvbd_alloc_gendisk(int minor, blkif_sector_t capacity,
335 int vdevice, u16 vdisk_info, u16 sector_size,
336 struct blkfront_info *info)
342 BUG_ON(info->gd != NULL);
343 BUG_ON(info->rq != NULL);
345 if ((minor % PARTS_PER_DISK) == 0)
346 nr_minors = PARTS_PER_DISK;
348 gd = alloc_disk(nr_minors);
353 sprintf(gd->disk_name, "%s%c", DEV_NAME,
354 'a' + minor / PARTS_PER_DISK);
356 sprintf(gd->disk_name, "%s%c%d", DEV_NAME,
357 'a' + minor / PARTS_PER_DISK,
358 minor % PARTS_PER_DISK);
360 gd->major = XENVBD_MAJOR;
361 gd->first_minor = minor;
362 gd->fops = &xlvbd_block_fops;
363 gd->private_data = info;
364 gd->driverfs_dev = &(info->xbdev->dev);
365 set_capacity(gd, capacity);
367 if (xlvbd_init_blk_queue(gd, sector_size)) {
372 info->rq = gd->queue;
375 if (info->feature_barrier)
378 if (vdisk_info & VDISK_READONLY)
381 if (vdisk_info & VDISK_REMOVABLE)
382 gd->flags |= GENHD_FL_REMOVABLE;
384 if (vdisk_info & VDISK_CDROM)
385 gd->flags |= GENHD_FL_CD;
393 static void kick_pending_request_queues(struct blkfront_info *info)
395 if (!RING_FULL(&info->ring)) {
396 /* Re-enable calldowns. */
397 blk_start_queue(info->rq);
398 /* Kick things off immediately. */
399 do_blkif_request(info->rq);
403 static void blkif_restart_queue(struct work_struct *work)
405 struct blkfront_info *info = container_of(work, struct blkfront_info, work);
407 spin_lock_irq(&blkif_io_lock);
408 if (info->connected == BLKIF_STATE_CONNECTED)
409 kick_pending_request_queues(info);
410 spin_unlock_irq(&blkif_io_lock);
413 static void blkif_free(struct blkfront_info *info, int suspend)
415 /* Prevent new requests being issued until we fix things up. */
416 spin_lock_irq(&blkif_io_lock);
417 info->connected = suspend ?
418 BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
419 /* No more blkif_request(). */
421 blk_stop_queue(info->rq);
422 /* No more gnttab callback work. */
423 gnttab_cancel_free_callback(&info->callback);
424 spin_unlock_irq(&blkif_io_lock);
426 /* Flush gnttab callback work. Must be done with no locks held. */
427 flush_scheduled_work();
429 /* Free resources associated with old device channel. */
430 if (info->ring_ref != GRANT_INVALID_REF) {
431 gnttab_end_foreign_access(info->ring_ref, 0,
432 (unsigned long)info->ring.sring);
433 info->ring_ref = GRANT_INVALID_REF;
434 info->ring.sring = NULL;
437 unbind_from_irqhandler(info->irq, info);
438 info->evtchn = info->irq = 0;
442 static void blkif_completion(struct blk_shadow *s)
445 for (i = 0; i < s->req.nr_segments; i++)
446 gnttab_end_foreign_access(s->req.seg[i].gref, 0, 0UL);
449 static irqreturn_t blkif_interrupt(int irq, void *dev_id)
452 struct blkif_response *bret;
455 struct blkfront_info *info = (struct blkfront_info *)dev_id;
458 spin_lock_irqsave(&blkif_io_lock, flags);
460 if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
461 spin_unlock_irqrestore(&blkif_io_lock, flags);
466 rp = info->ring.sring->rsp_prod;
467 rmb(); /* Ensure we see queued responses up to 'rp'. */
469 for (i = info->ring.rsp_cons; i != rp; i++) {
473 bret = RING_GET_RESPONSE(&info->ring, i);
475 req = (struct request *)info->shadow[id].request;
477 blkif_completion(&info->shadow[id]);
479 add_id_to_freelist(info, id);
481 uptodate = (bret->status == BLKIF_RSP_OKAY);
482 switch (bret->operation) {
483 case BLKIF_OP_WRITE_BARRIER:
484 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
485 printk(KERN_WARNING "blkfront: %s: write barrier op failed\n",
486 info->gd->disk_name);
487 uptodate = -EOPNOTSUPP;
488 info->feature_barrier = 0;
494 if (unlikely(bret->status != BLKIF_RSP_OKAY))
495 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
496 "request: %x\n", bret->status);
498 ret = end_that_request_first(req, uptodate,
499 req->hard_nr_sectors);
501 end_that_request_last(req, uptodate);
508 info->ring.rsp_cons = i;
510 if (i != info->ring.req_prod_pvt) {
512 RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
516 info->ring.sring->rsp_event = i + 1;
518 kick_pending_request_queues(info);
520 spin_unlock_irqrestore(&blkif_io_lock, flags);
526 static int setup_blkring(struct xenbus_device *dev,
527 struct blkfront_info *info)
529 struct blkif_sring *sring;
532 info->ring_ref = GRANT_INVALID_REF;
534 sring = (struct blkif_sring *)__get_free_page(GFP_KERNEL);
536 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
539 SHARED_RING_INIT(sring);
540 FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
542 err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
544 free_page((unsigned long)sring);
545 info->ring.sring = NULL;
548 info->ring_ref = err;
550 err = xenbus_alloc_evtchn(dev, &info->evtchn);
554 err = bind_evtchn_to_irqhandler(info->evtchn,
556 IRQF_SAMPLE_RANDOM, "blkif", info);
558 xenbus_dev_fatal(dev, err,
559 "bind_evtchn_to_irqhandler failed");
571 /* Common code used when first setting up, and when resuming. */
572 static int talk_to_backend(struct xenbus_device *dev,
573 struct blkfront_info *info)
575 const char *message = NULL;
576 struct xenbus_transaction xbt;
579 /* Create shared ring, alloc event channel. */
580 err = setup_blkring(dev, info);
585 err = xenbus_transaction_start(&xbt);
587 xenbus_dev_fatal(dev, err, "starting transaction");
588 goto destroy_blkring;
591 err = xenbus_printf(xbt, dev->nodename,
592 "ring-ref", "%u", info->ring_ref);
594 message = "writing ring-ref";
595 goto abort_transaction;
597 err = xenbus_printf(xbt, dev->nodename,
598 "event-channel", "%u", info->evtchn);
600 message = "writing event-channel";
601 goto abort_transaction;
604 err = xenbus_transaction_end(xbt, 0);
608 xenbus_dev_fatal(dev, err, "completing transaction");
609 goto destroy_blkring;
612 xenbus_switch_state(dev, XenbusStateInitialised);
617 xenbus_transaction_end(xbt, 1);
619 xenbus_dev_fatal(dev, err, "%s", message);
628 * Entry point to this code when a new device is created. Allocate the basic
629 * structures and the ring buffer for communication with the backend, and
630 * inform the backend of the appropriate details for those. Switch to
633 static int blkfront_probe(struct xenbus_device *dev,
634 const struct xenbus_device_id *id)
637 struct blkfront_info *info;
639 /* FIXME: Use dynamic device id if this is not set. */
640 err = xenbus_scanf(XBT_NIL, dev->nodename,
641 "virtual-device", "%i", &vdevice);
643 xenbus_dev_fatal(dev, err, "reading virtual-device");
647 info = kzalloc(sizeof(*info), GFP_KERNEL);
649 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
654 info->vdevice = vdevice;
655 info->connected = BLKIF_STATE_DISCONNECTED;
656 INIT_WORK(&info->work, blkif_restart_queue);
658 for (i = 0; i < BLK_RING_SIZE; i++)
659 info->shadow[i].req.id = i+1;
660 info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
662 /* Front end dir is a number, which is used as the id. */
663 info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
664 dev->dev.driver_data = info;
666 err = talk_to_backend(dev, info);
669 dev->dev.driver_data = NULL;
677 static int blkif_recover(struct blkfront_info *info)
680 struct blkif_request *req;
681 struct blk_shadow *copy;
684 /* Stage 1: Make a safe copy of the shadow state. */
685 copy = kmalloc(sizeof(info->shadow), GFP_KERNEL);
688 memcpy(copy, info->shadow, sizeof(info->shadow));
690 /* Stage 2: Set up free list. */
691 memset(&info->shadow, 0, sizeof(info->shadow));
692 for (i = 0; i < BLK_RING_SIZE; i++)
693 info->shadow[i].req.id = i+1;
694 info->shadow_free = info->ring.req_prod_pvt;
695 info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
697 /* Stage 3: Find pending requests and requeue them. */
698 for (i = 0; i < BLK_RING_SIZE; i++) {
700 if (copy[i].request == 0)
703 /* Grab a request slot and copy shadow state into it. */
704 req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
707 /* We get a new request id, and must reset the shadow state. */
708 req->id = get_id_from_freelist(info);
709 memcpy(&info->shadow[req->id], ©[i], sizeof(copy[i]));
711 /* Rewrite any grant references invalidated by susp/resume. */
712 for (j = 0; j < req->nr_segments; j++)
713 gnttab_grant_foreign_access_ref(
715 info->xbdev->otherend_id,
716 pfn_to_mfn(info->shadow[req->id].frame[j]),
719 info->shadow[req->id].request));
720 info->shadow[req->id].req = *req;
722 info->ring.req_prod_pvt++;
727 xenbus_switch_state(info->xbdev, XenbusStateConnected);
729 spin_lock_irq(&blkif_io_lock);
731 /* Now safe for us to use the shared ring */
732 info->connected = BLKIF_STATE_CONNECTED;
734 /* Send off requeued requests */
735 flush_requests(info);
737 /* Kick any other new requests queued since we resumed */
738 kick_pending_request_queues(info);
740 spin_unlock_irq(&blkif_io_lock);
746 * We are reconnecting to the backend, due to a suspend/resume, or a backend
747 * driver restart. We tear down our blkif structure and recreate it, but
748 * leave the device-layer structures intact so that this is transparent to the
749 * rest of the kernel.
751 static int blkfront_resume(struct xenbus_device *dev)
753 struct blkfront_info *info = dev->dev.driver_data;
756 dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
758 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
760 err = talk_to_backend(dev, info);
761 if (info->connected == BLKIF_STATE_SUSPENDED && !err)
762 err = blkif_recover(info);
769 * Invoked when the backend is finally 'ready' (and has told produced
770 * the details about the physical device - #sectors, size, etc).
772 static void blkfront_connect(struct blkfront_info *info)
774 unsigned long long sectors;
775 unsigned long sector_size;
779 if ((info->connected == BLKIF_STATE_CONNECTED) ||
780 (info->connected == BLKIF_STATE_SUSPENDED) )
783 dev_dbg(&info->xbdev->dev, "%s:%s.\n",
784 __func__, info->xbdev->otherend);
786 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
787 "sectors", "%llu", §ors,
788 "info", "%u", &binfo,
789 "sector-size", "%lu", §or_size,
792 xenbus_dev_fatal(info->xbdev, err,
793 "reading backend fields at %s",
794 info->xbdev->otherend);
798 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
799 "feature-barrier", "%lu", &info->feature_barrier,
802 info->feature_barrier = 0;
804 err = xlvbd_alloc_gendisk(BLKIF_MINOR(info->vdevice),
805 sectors, info->vdevice,
806 binfo, sector_size, info);
808 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
809 info->xbdev->otherend);
813 xenbus_switch_state(info->xbdev, XenbusStateConnected);
815 /* Kick pending requests. */
816 spin_lock_irq(&blkif_io_lock);
817 info->connected = BLKIF_STATE_CONNECTED;
818 kick_pending_request_queues(info);
819 spin_unlock_irq(&blkif_io_lock);
825 * Handle the change of state of the backend to Closing. We must delete our
826 * device-layer structures now, to ensure that writes are flushed through to
827 * the backend. Once is this done, we can switch to Closed in
830 static void blkfront_closing(struct xenbus_device *dev)
832 struct blkfront_info *info = dev->dev.driver_data;
835 dev_dbg(&dev->dev, "blkfront_closing: %s removed\n", dev->nodename);
837 if (info->rq == NULL)
840 spin_lock_irqsave(&blkif_io_lock, flags);
842 del_gendisk(info->gd);
844 /* No more blkif_request(). */
845 blk_stop_queue(info->rq);
847 /* No more gnttab callback work. */
848 gnttab_cancel_free_callback(&info->callback);
849 spin_unlock_irqrestore(&blkif_io_lock, flags);
851 /* Flush gnttab callback work. Must be done with no locks held. */
852 flush_scheduled_work();
854 blk_cleanup_queue(info->rq);
858 xenbus_frontend_closed(dev);
862 * Callback received when the backend's state changes.
864 static void backend_changed(struct xenbus_device *dev,
865 enum xenbus_state backend_state)
867 struct blkfront_info *info = dev->dev.driver_data;
868 struct block_device *bd;
870 dev_dbg(&dev->dev, "blkfront:backend_changed.\n");
872 switch (backend_state) {
873 case XenbusStateInitialising:
874 case XenbusStateInitWait:
875 case XenbusStateInitialised:
876 case XenbusStateUnknown:
877 case XenbusStateClosed:
880 case XenbusStateConnected:
881 blkfront_connect(info);
884 case XenbusStateClosing:
885 bd = bdget(info->dev);
887 xenbus_dev_fatal(dev, -ENODEV, "bdget failed");
889 mutex_lock(&bd->bd_mutex);
891 xenbus_dev_error(dev, -EBUSY,
892 "Device in use; refusing to close");
894 blkfront_closing(dev);
895 mutex_unlock(&bd->bd_mutex);
901 static int blkfront_remove(struct xenbus_device *dev)
903 struct blkfront_info *info = dev->dev.driver_data;
905 dev_dbg(&dev->dev, "blkfront_remove: %s removed\n", dev->nodename);
914 static int blkif_open(struct inode *inode, struct file *filep)
916 struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
921 static int blkif_release(struct inode *inode, struct file *filep)
923 struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
925 if (info->users == 0) {
926 /* Check whether we have been instructed to close. We will
927 have ignored this request initially, as the device was
929 struct xenbus_device *dev = info->xbdev;
930 enum xenbus_state state = xenbus_read_driver_state(dev->otherend);
932 if (state == XenbusStateClosing)
933 blkfront_closing(dev);
938 static struct block_device_operations xlvbd_block_fops =
940 .owner = THIS_MODULE,
942 .release = blkif_release,
946 static struct xenbus_device_id blkfront_ids[] = {
951 static struct xenbus_driver blkfront = {
953 .owner = THIS_MODULE,
955 .probe = blkfront_probe,
956 .remove = blkfront_remove,
957 .resume = blkfront_resume,
958 .otherend_changed = backend_changed,
961 static int __init xlblk_init(void)
963 if (!is_running_on_xen())
966 if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
967 printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n",
968 XENVBD_MAJOR, DEV_NAME);
972 return xenbus_register_frontend(&blkfront);
974 module_init(xlblk_init);
977 static void xlblk_exit(void)
979 return xenbus_unregister_driver(&blkfront);
981 module_exit(xlblk_exit);
983 MODULE_DESCRIPTION("Xen virtual block device frontend");
984 MODULE_LICENSE("GPL");
985 MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR);