2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 * K. Y. Srinivasan <kys@microsoft.com>
22 #include <linux/init.h>
23 #include <linux/module.h>
24 #include <linux/device.h>
25 #include <linux/blkdev.h>
26 #include <linux/major.h>
27 #include <linux/delay.h>
28 #include <linux/hdreg.h>
29 #include <linux/slab.h>
30 #include <scsi/scsi.h>
31 #include <scsi/scsi_cmnd.h>
32 #include <scsi/scsi_eh.h>
33 #include <scsi/scsi_dbg.h>
36 #include "version_info.h"
38 #include "storvsc_api.h"
41 #define BLKVSC_MINORS 64
43 enum blkvsc_device_type {
56 * This request ties the struct request and struct
57 * blkvsc_request/hv_storvsc_request together A struct request may be
58 * represented by 1 or more struct blkvsc_request
60 struct blkvsc_request_group {
63 struct list_head blkvsc_req_list; /* list of blkvsc_requests */
66 struct blkvsc_request {
67 /* blkvsc_request_group.blkvsc_req_list */
68 struct list_head req_entry;
70 /* block_device_context.pending_list */
71 struct list_head pend_entry;
73 /* This may be null if we generate a request internally */
76 struct block_device_context *dev;
78 /* The group this request is part of. Maybe null */
79 struct blkvsc_request_group *group;
82 sector_t sector_start;
83 unsigned long sector_count;
85 unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE];
86 unsigned char cmd_len;
87 unsigned char cmnd[MAX_COMMAND_SIZE];
89 struct hv_storvsc_request request;
92 /* Per device structure */
93 struct block_device_context {
94 /* point back to our device context */
95 struct hv_device *device_ctx;
96 struct kmem_cache *request_pool;
99 enum blkvsc_device_type device_type;
100 struct list_head pending_list;
102 unsigned char device_id[64];
103 unsigned int device_id_len;
104 int num_outstanding_reqs;
106 unsigned int sector_size;
110 unsigned char target;
114 static const char *drv_name = "blkvsc";
116 /* {32412632-86cb-44a2-9b5c-50d1417354f5} */
117 static const struct hv_guid dev_type = {
119 0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
120 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5
125 * There is a circular dependency involving blkvsc_request_completion()
126 * and blkvsc_do_request().
128 static void blkvsc_request_completion(struct hv_storvsc_request *request);
130 static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
132 module_param(blkvsc_ringbuffer_size, int, S_IRUGO);
133 MODULE_PARM_DESC(ring_size, "Ring buffer size (in bytes)");
136 * There is a circular dependency involving blkvsc_probe()
139 static int blkvsc_probe(struct hv_device *dev);
141 static int blkvsc_device_add(struct hv_device *device,
142 void *additional_info)
144 struct storvsc_device_info *device_info;
147 device_info = (struct storvsc_device_info *)additional_info;
149 ret = storvsc_dev_add(device, additional_info);
154 * We need to use the device instance guid to set the path and target
155 * id. For IDE devices, the device instance id is formatted as
156 * <bus id> * - <device id> - 8899 - 000000000000.
158 device_info->path_id = device->dev_instance.data[3] << 24 |
159 device->dev_instance.data[2] << 16 |
160 device->dev_instance.data[1] << 8 |
161 device->dev_instance.data[0];
163 device_info->target_id = device->dev_instance.data[5] << 8 |
164 device->dev_instance.data[4];
169 static int blkvsc_submit_request(struct blkvsc_request *blkvsc_req,
170 void (*request_completion)(struct hv_storvsc_request *))
172 struct block_device_context *blkdev = blkvsc_req->dev;
173 struct hv_storvsc_request *storvsc_req;
174 struct vmscsi_request *vm_srb;
178 storvsc_req = &blkvsc_req->request;
179 vm_srb = &storvsc_req->vstor_packet.vm_srb;
181 vm_srb->data_in = blkvsc_req->write ? WRITE_TYPE : READ_TYPE;
183 storvsc_req->on_io_completion = request_completion;
184 storvsc_req->context = blkvsc_req;
186 vm_srb->port_number = blkdev->port;
187 vm_srb->path_id = blkdev->path;
188 vm_srb->target_id = blkdev->target;
189 vm_srb->lun = 0; /* this is not really used at all */
191 vm_srb->cdb_length = blkvsc_req->cmd_len;
193 memcpy(vm_srb->cdb, blkvsc_req->cmnd, vm_srb->cdb_length);
195 storvsc_req->sense_buffer = blkvsc_req->sense_buffer;
197 ret = storvsc_do_io(blkdev->device_ctx,
198 &blkvsc_req->request);
200 blkdev->num_outstanding_reqs++;
206 static int blkvsc_open(struct block_device *bdev, fmode_t mode)
208 struct block_device_context *blkdev = bdev->bd_disk->private_data;
211 spin_lock_irqsave(&blkdev->lock, flags);
215 spin_unlock_irqrestore(&blkdev->lock, flags);
221 static int blkvsc_getgeo(struct block_device *bd, struct hd_geometry *hg)
223 sector_t nsect = get_capacity(bd->bd_disk);
224 sector_t cylinders = nsect;
227 * We are making up these values; let us keep it simple.
231 sector_div(cylinders, hg->heads * hg->sectors);
232 hg->cylinders = cylinders;
233 if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect)
234 hg->cylinders = 0xffff;
240 static void blkvsc_init_rw(struct blkvsc_request *blkvsc_req)
243 blkvsc_req->cmd_len = 16;
245 if (rq_data_dir(blkvsc_req->req)) {
246 blkvsc_req->write = 1;
247 blkvsc_req->cmnd[0] = WRITE_16;
249 blkvsc_req->write = 0;
250 blkvsc_req->cmnd[0] = READ_16;
253 blkvsc_req->cmnd[1] |=
254 (blkvsc_req->req->cmd_flags & REQ_FUA) ? 0x8 : 0;
256 *(unsigned long long *)&blkvsc_req->cmnd[2] =
257 cpu_to_be64(blkvsc_req->sector_start);
258 *(unsigned int *)&blkvsc_req->cmnd[10] =
259 cpu_to_be32(blkvsc_req->sector_count);
263 static int blkvsc_ioctl(struct block_device *bd, fmode_t mode,
264 unsigned cmd, unsigned long arg)
266 struct block_device_context *blkdev = bd->bd_disk->private_data;
270 case HDIO_GET_IDENTITY:
271 if (copy_to_user((void __user *)arg, blkdev->device_id,
272 blkdev->device_id_len))
283 static void blkvsc_cmd_completion(struct hv_storvsc_request *request)
285 struct blkvsc_request *blkvsc_req =
286 (struct blkvsc_request *)request->context;
287 struct block_device_context *blkdev =
288 (struct block_device_context *)blkvsc_req->dev;
289 struct scsi_sense_hdr sense_hdr;
290 struct vmscsi_request *vm_srb;
294 vm_srb = &blkvsc_req->request.vstor_packet.vm_srb;
296 spin_lock_irqsave(&blkdev->lock, flags);
297 blkdev->num_outstanding_reqs--;
298 spin_unlock_irqrestore(&blkdev->lock, flags);
300 if (vm_srb->scsi_status)
301 if (scsi_normalize_sense(blkvsc_req->sense_buffer,
302 SCSI_SENSE_BUFFERSIZE, &sense_hdr))
303 scsi_print_sense_hdr("blkvsc", &sense_hdr);
305 complete(&blkvsc_req->request.wait_event);
309 static int blkvsc_do_operation(struct block_device_context *blkdev,
310 enum blkvsc_op_type op)
312 struct blkvsc_request *blkvsc_req;
313 struct page *page_buf;
315 unsigned char device_type;
316 struct scsi_sense_hdr sense_hdr;
317 struct vmscsi_request *vm_srb;
322 blkvsc_req = kmem_cache_zalloc(blkdev->request_pool, GFP_KERNEL);
326 page_buf = alloc_page(GFP_KERNEL);
328 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
332 vm_srb = &blkvsc_req->request.vstor_packet.vm_srb;
333 init_completion(&blkvsc_req->request.wait_event);
334 blkvsc_req->dev = blkdev;
335 blkvsc_req->req = NULL;
336 blkvsc_req->write = 0;
338 blkvsc_req->request.data_buffer.pfn_array[0] =
339 page_to_pfn(page_buf);
340 blkvsc_req->request.data_buffer.offset = 0;
344 blkvsc_req->cmnd[0] = INQUIRY;
345 blkvsc_req->cmnd[1] = 0x1; /* Get product data */
346 blkvsc_req->cmnd[2] = 0x83; /* mode page 83 */
347 blkvsc_req->cmnd[4] = 64;
348 blkvsc_req->cmd_len = 6;
349 blkvsc_req->request.data_buffer.len = 64;
353 blkdev->sector_size = 0;
354 blkdev->capacity = 0;
356 blkvsc_req->cmnd[0] = READ_CAPACITY;
357 blkvsc_req->cmd_len = 16;
358 blkvsc_req->request.data_buffer.len = 8;
362 blkvsc_req->cmnd[0] = SYNCHRONIZE_CACHE;
363 blkvsc_req->cmd_len = 10;
364 blkvsc_req->request.data_buffer.pfn_array[0] = 0;
365 blkvsc_req->request.data_buffer.len = 0;
372 spin_lock_irqsave(&blkdev->lock, flags);
373 blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
374 spin_unlock_irqrestore(&blkdev->lock, flags);
376 wait_for_completion_interruptible(&blkvsc_req->request.wait_event);
379 if (vm_srb->scsi_status) {
380 scsi_normalize_sense(blkvsc_req->sense_buffer,
381 SCSI_SENSE_BUFFERSIZE, &sense_hdr);
386 buf = kmap(page_buf);
390 device_type = buf[0] & 0x1F;
392 if (device_type == 0x0)
393 blkdev->device_type = HARDDISK_TYPE;
395 blkdev->device_type = UNKNOWN_DEV_TYPE;
397 blkdev->device_id_len = buf[7];
398 if (blkdev->device_id_len > 64)
399 blkdev->device_id_len = 64;
401 memcpy(blkdev->device_id, &buf[8], blkdev->device_id_len);
407 ((buf[0] << 24) | (buf[1] << 16) |
408 (buf[2] << 8) | buf[3]) + 1;
410 blkdev->sector_size =
411 (buf[4] << 24) | (buf[5] << 16) |
412 (buf[6] << 8) | buf[7];
423 __free_page(page_buf);
425 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
431 static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev)
433 struct blkvsc_request *pend_req, *tmp;
434 struct blkvsc_request *comp_req, *tmp2;
435 struct vmscsi_request *vm_srb;
440 /* Flush the pending list first */
441 list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list,
444 * The pend_req could be part of a partially completed
445 * request. If so, complete those req first until we
448 list_for_each_entry_safe(comp_req, tmp2,
449 &pend_req->group->blkvsc_req_list,
452 if (comp_req == pend_req)
455 list_del(&comp_req->req_entry);
459 &comp_req->request.vstor_packet.
461 ret = __blk_end_request(comp_req->req,
462 (!vm_srb->scsi_status ? 0 : -EIO),
463 comp_req->sector_count *
464 blkdev->sector_size);
466 /* FIXME: shouldn't this do more than return? */
471 kmem_cache_free(blkdev->request_pool, comp_req);
474 list_del(&pend_req->pend_entry);
476 list_del(&pend_req->req_entry);
479 if (!__blk_end_request(pend_req->req, -EIO,
480 pend_req->sector_count *
481 blkdev->sector_size)) {
483 * All the sectors have been xferred ie the
486 kmem_cache_free(blkdev->request_pool,
491 kmem_cache_free(blkdev->request_pool, pend_req);
500 * blkvsc_remove() - Callback when our device is removed
502 static int blkvsc_remove(struct hv_device *dev)
504 struct block_device_context *blkdev = dev_get_drvdata(&dev->device);
508 /* Get to a known state */
509 spin_lock_irqsave(&blkdev->lock, flags);
511 blkdev->shutting_down = 1;
513 blk_stop_queue(blkdev->gd->queue);
515 blkvsc_cancel_pending_reqs(blkdev);
517 spin_unlock_irqrestore(&blkdev->lock, flags);
519 blkvsc_do_operation(blkdev, DO_FLUSH);
521 blk_cleanup_queue(blkdev->gd->queue);
524 * Call to the vsc driver to let it know that the device is being
527 storvsc_dev_remove(dev);
529 del_gendisk(blkdev->gd);
531 kmem_cache_destroy(blkdev->request_pool);
539 static void blkvsc_shutdown(struct hv_device *dev)
541 struct block_device_context *blkdev = dev_get_drvdata(&dev->device);
547 spin_lock_irqsave(&blkdev->lock, flags);
549 blkdev->shutting_down = 1;
551 blk_stop_queue(blkdev->gd->queue);
553 blkvsc_cancel_pending_reqs(blkdev);
555 spin_unlock_irqrestore(&blkdev->lock, flags);
557 blkvsc_do_operation(blkdev, DO_FLUSH);
560 * Now wait for all outgoing I/O to be drained.
562 storvsc_wait_to_drain((struct storvsc_device *)dev->ext);
566 static int blkvsc_release(struct gendisk *disk, fmode_t mode)
568 struct block_device_context *blkdev = disk->private_data;
571 if (blkdev->users == 1) {
572 blkvsc_do_operation(blkdev, DO_FLUSH);
575 spin_lock_irqsave(&blkdev->lock, flags);
577 spin_unlock_irqrestore(&blkdev->lock, flags);
584 * We break the request into 1 or more blkvsc_requests and submit
585 * them. If we cant submit them all, we put them on the
586 * pending_list. The blkvsc_request() will work on the pending_list.
588 static int blkvsc_do_request(struct block_device_context *blkdev,
591 struct bio *bio = NULL;
592 struct bio_vec *bvec = NULL;
593 struct bio_vec *prev_bvec = NULL;
594 struct blkvsc_request *blkvsc_req = NULL;
595 struct blkvsc_request *tmp;
598 sector_t start_sector;
599 unsigned long num_sectors = 0;
602 struct blkvsc_request_group *group = NULL;
604 /* Create a group to tie req to list of blkvsc_reqs */
605 group = kmem_cache_zalloc(blkdev->request_pool, GFP_ATOMIC);
609 INIT_LIST_HEAD(&group->blkvsc_req_list);
610 group->outstanding = group->status = 0;
612 start_sector = blk_rq_pos(req);
614 /* foreach bio in the request */
616 for (bio = req->bio; bio; bio = bio->bi_next) {
618 * Map this bio into an existing or new storvsc request
620 bio_for_each_segment(bvec, bio, seg_idx) {
621 /* Get a new storvsc request */
624 (databuf_idx >= MAX_MULTIPAGE_BUFFER_COUNT)
625 /* hole at the begin of page */
626 || (bvec->bv_offset != 0) ||
627 /* hold at the end of page */
629 (prev_bvec->bv_len != PAGE_SIZE))) {
630 /* submit the prev one */
632 blkvsc_req->sector_start =
635 blkvsc_req->sector_start,
636 (blkdev->sector_size >> 9));
638 blkvsc_req->sector_count =
640 (blkdev->sector_size >> 9);
641 blkvsc_init_rw(blkvsc_req);
645 * Create new blkvsc_req to represent
650 blkdev->request_pool, GFP_ATOMIC);
652 /* free up everything */
653 list_for_each_entry_safe(
655 &group->blkvsc_req_list,
658 &blkvsc_req->req_entry);
660 blkdev->request_pool,
665 blkdev->request_pool, group);
669 memset(blkvsc_req, 0,
670 sizeof(struct blkvsc_request));
672 blkvsc_req->dev = blkdev;
673 blkvsc_req->req = req;
680 /* Add to the group */
681 blkvsc_req->group = group;
682 blkvsc_req->group->outstanding++;
683 list_add_tail(&blkvsc_req->req_entry,
684 &blkvsc_req->group->blkvsc_req_list);
686 start_sector += num_sectors;
692 * Add the curr bvec/segment to the curr
695 blkvsc_req->request.data_buffer.
696 pfn_array[databuf_idx]
697 = page_to_pfn(bvec->bv_page);
698 blkvsc_req->request.data_buffer.len
704 num_sectors += bvec->bv_len >> 9;
706 } /* bio_for_each_segment */
708 } /* rq_for_each_bio */
711 /* Handle the last one */
713 blkvsc_req->sector_start = start_sector;
714 sector_div(blkvsc_req->sector_start,
715 (blkdev->sector_size >> 9));
717 blkvsc_req->sector_count = num_sectors /
718 (blkdev->sector_size >> 9);
720 blkvsc_init_rw(blkvsc_req);
723 list_for_each_entry(blkvsc_req, &group->blkvsc_req_list, req_entry) {
726 list_add_tail(&blkvsc_req->pend_entry,
727 &blkdev->pending_list);
729 ret = blkvsc_submit_request(blkvsc_req,
730 blkvsc_request_completion);
733 list_add_tail(&blkvsc_req->pend_entry,
734 &blkdev->pending_list);
743 static int blkvsc_do_pending_reqs(struct block_device_context *blkdev)
745 struct blkvsc_request *pend_req, *tmp;
748 /* Flush the pending list first */
749 list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list,
752 ret = blkvsc_submit_request(pend_req,
753 blkvsc_request_completion);
757 list_del(&pend_req->pend_entry);
764 static void blkvsc_request(struct request_queue *queue)
766 struct block_device_context *blkdev = NULL;
770 while ((req = blk_peek_request(queue)) != NULL) {
772 blkdev = req->rq_disk->private_data;
773 if (blkdev->shutting_down || req->cmd_type != REQ_TYPE_FS) {
774 __blk_end_request_cur(req, 0);
778 ret = blkvsc_do_pending_reqs(blkdev);
781 blk_stop_queue(queue);
785 blk_start_request(req);
787 ret = blkvsc_do_request(blkdev, req);
789 blk_stop_queue(queue);
791 } else if (ret < 0) {
792 blk_requeue_request(queue, req);
793 blk_stop_queue(queue);
801 /* The one and only one */
802 static struct storvsc_driver blkvsc_drv = {
803 .base.probe = blkvsc_probe,
804 .base.remove = blkvsc_remove,
805 .base.shutdown = blkvsc_shutdown,
808 static const struct block_device_operations block_ops = {
809 .owner = THIS_MODULE,
811 .release = blkvsc_release,
812 .getgeo = blkvsc_getgeo,
813 .ioctl = blkvsc_ioctl,
817 * blkvsc_drv_init - BlkVsc driver initialization.
819 static int blkvsc_drv_init(void)
821 struct storvsc_driver *storvsc_drv = &blkvsc_drv;
822 struct hv_driver *drv = &blkvsc_drv.base;
825 BUILD_BUG_ON(sizeof(sector_t) != 8);
827 storvsc_drv->ring_buffer_size = blkvsc_ringbuffer_size;
829 memcpy(&drv->dev_type, &dev_type, sizeof(struct hv_guid));
830 drv->name = drv_name;
831 drv->driver.name = drv_name;
833 /* The driver belongs to vmbus */
834 ret = vmbus_child_driver_register(&drv->driver);
839 static int blkvsc_drv_exit_cb(struct device *dev, void *data)
841 struct device **curr = (struct device **)data;
843 return 1; /* stop iterating */
846 static void blkvsc_drv_exit(void)
848 struct hv_driver *drv = &blkvsc_drv.base;
849 struct device *current_dev;
856 ret = driver_for_each_device(&drv->driver, NULL,
857 (void *) ¤t_dev,
861 DPRINT_WARN(BLKVSC_DRV,
862 "driver_for_each_device returned %d", ret);
865 if (current_dev == NULL)
868 /* Initiate removal from the top-down */
869 device_unregister(current_dev);
872 vmbus_child_driver_unregister(&drv->driver);
878 * blkvsc_probe - Add a new device for this driver
880 static int blkvsc_probe(struct hv_device *dev)
882 struct block_device_context *blkdev = NULL;
883 struct storvsc_device_info device_info;
884 struct storvsc_major_info major_info;
887 blkdev = kzalloc(sizeof(struct block_device_context), GFP_KERNEL);
893 INIT_LIST_HEAD(&blkdev->pending_list);
895 /* Initialize what we can here */
896 spin_lock_init(&blkdev->lock);
899 blkdev->request_pool = kmem_cache_create(dev_name(&dev->device),
900 sizeof(struct blkvsc_request), 0,
901 SLAB_HWCACHE_ALIGN, NULL);
902 if (!blkdev->request_pool) {
908 ret = blkvsc_device_add(dev, &device_info);
912 blkdev->device_ctx = dev;
913 /* this identified the device 0 or 1 */
914 blkdev->target = device_info.target_id;
915 /* this identified the ide ctrl 0 or 1 */
916 blkdev->path = device_info.path_id;
918 dev_set_drvdata(&dev->device, blkdev);
920 ret = storvsc_get_major_info(&device_info, &major_info);
925 if (major_info.do_register) {
926 ret = register_blkdev(major_info.major, major_info.devname);
929 DPRINT_ERR(BLKVSC_DRV,
930 "register_blkdev() failed! ret %d", ret);
935 DPRINT_INFO(BLKVSC_DRV, "blkvsc registered for major %d!!",
938 blkdev->gd = alloc_disk(BLKVSC_MINORS);
944 blkdev->gd->queue = blk_init_queue(blkvsc_request, &blkdev->lock);
946 blk_queue_max_segment_size(blkdev->gd->queue, PAGE_SIZE);
947 blk_queue_max_segments(blkdev->gd->queue, MAX_MULTIPAGE_BUFFER_COUNT);
948 blk_queue_segment_boundary(blkdev->gd->queue, PAGE_SIZE-1);
949 blk_queue_bounce_limit(blkdev->gd->queue, BLK_BOUNCE_ANY);
950 blk_queue_dma_alignment(blkdev->gd->queue, 511);
952 blkdev->gd->major = major_info.major;
953 if (major_info.index == 1 || major_info.index == 3)
954 blkdev->gd->first_minor = BLKVSC_MINORS;
956 blkdev->gd->first_minor = 0;
957 blkdev->gd->fops = &block_ops;
958 blkdev->gd->events = DISK_EVENT_MEDIA_CHANGE;
959 blkdev->gd->private_data = blkdev;
960 blkdev->gd->driverfs_dev = &(blkdev->device_ctx->device);
961 sprintf(blkdev->gd->disk_name, "hd%c", 'a' + major_info.index);
963 blkvsc_do_operation(blkdev, DO_INQUIRY);
964 blkvsc_do_operation(blkdev, DO_CAPACITY);
966 set_capacity(blkdev->gd, blkdev->capacity * (blkdev->sector_size/512));
967 blk_queue_logical_block_size(blkdev->gd->queue, blkdev->sector_size);
969 add_disk(blkdev->gd);
971 DPRINT_INFO(BLKVSC_DRV, "%s added!! capacity %lu sector_size %d",
972 blkdev->gd->disk_name, (unsigned long)blkdev->capacity,
973 blkdev->sector_size);
978 storvsc_dev_remove(dev);
982 if (blkdev->request_pool) {
983 kmem_cache_destroy(blkdev->request_pool);
984 blkdev->request_pool = NULL;
993 static void blkvsc_request_completion(struct hv_storvsc_request *request)
995 struct blkvsc_request *blkvsc_req =
996 (struct blkvsc_request *)request->context;
997 struct block_device_context *blkdev =
998 (struct block_device_context *)blkvsc_req->dev;
1000 struct blkvsc_request *comp_req, *tmp;
1001 struct vmscsi_request *vm_srb;
1004 spin_lock_irqsave(&blkdev->lock, flags);
1006 blkdev->num_outstanding_reqs--;
1007 blkvsc_req->group->outstanding--;
1010 * Only start processing when all the blkvsc_reqs are
1011 * completed. This guarantees no out-of-order blkvsc_req
1012 * completion when calling end_that_request_first()
1014 if (blkvsc_req->group->outstanding == 0) {
1015 list_for_each_entry_safe(comp_req, tmp,
1016 &blkvsc_req->group->blkvsc_req_list,
1019 list_del(&comp_req->req_entry);
1022 &comp_req->request.vstor_packet.vm_srb;
1023 if (!__blk_end_request(comp_req->req,
1024 (!vm_srb->scsi_status ? 0 : -EIO),
1025 comp_req->sector_count * blkdev->sector_size)) {
1027 * All the sectors have been xferred ie the
1030 kmem_cache_free(blkdev->request_pool,
1034 kmem_cache_free(blkdev->request_pool, comp_req);
1037 if (!blkdev->shutting_down) {
1038 blkvsc_do_pending_reqs(blkdev);
1039 blk_start_queue(blkdev->gd->queue);
1040 blkvsc_request(blkdev->gd->queue);
1044 spin_unlock_irqrestore(&blkdev->lock, flags);
1047 static int __init blkvsc_init(void)
1051 ret = blkvsc_drv_init();
1056 static void __exit blkvsc_exit(void)
1061 MODULE_LICENSE("GPL");
1062 MODULE_VERSION(HV_DRV_VERSION);
1063 MODULE_DESCRIPTION("Microsoft Hyper-V virtual block driver");
1064 module_init(blkvsc_init);
1065 module_exit(blkvsc_exit);