drm/vmwgfx: Don't use memory accounting for kernel-side fence objects
[pandora-kernel.git] / drivers / staging / hv / storvsc_drv.c
1 /*
2  * Copyright (c) 2009, Microsoft Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Authors:
18  *   Haiyang Zhang <haiyangz@microsoft.com>
19  *   Hank Janssen  <hjanssen@microsoft.com>
20  *   K. Y. Srinivasan <kys@microsoft.com>
21  */
22
23 #include <linux/kernel.h>
24 #include <linux/wait.h>
25 #include <linux/sched.h>
26 #include <linux/completion.h>
27 #include <linux/string.h>
28 #include <linux/mm.h>
29 #include <linux/delay.h>
30 #include <linux/init.h>
31 #include <linux/slab.h>
32 #include <linux/module.h>
33 #include <linux/device.h>
34 #include <linux/hyperv.h>
35 #include <linux/blkdev.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_cmnd.h>
38 #include <scsi/scsi_host.h>
39 #include <scsi/scsi_device.h>
40 #include <scsi/scsi_tcq.h>
41 #include <scsi/scsi_eh.h>
42 #include <scsi/scsi_devinfo.h>
43 #include <scsi/scsi_dbg.h>
44
45
46 #define STORVSC_RING_BUFFER_SIZE                        (20*PAGE_SIZE)
47 static int storvsc_ringbuffer_size = STORVSC_RING_BUFFER_SIZE;
48
49 module_param(storvsc_ringbuffer_size, int, S_IRUGO);
50 MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
51
52 /* to alert the user that structure sizes may be mismatched even though the */
53 /* protocol versions match. */
54
55
56 #define REVISION_STRING(REVISION_) #REVISION_
57 #define FILL_VMSTOR_REVISION(RESULT_LVALUE_)                            \
58         do {                                                            \
59                 char *revision_string                                   \
60                         = REVISION_STRING($Rev : 6 $) + 6;              \
61                 RESULT_LVALUE_ = 0;                                     \
62                 while (*revision_string >= '0'                          \
63                         && *revision_string <= '9') {                   \
64                         RESULT_LVALUE_ *= 10;                           \
65                         RESULT_LVALUE_ += *revision_string - '0';       \
66                         revision_string++;                              \
67                 }                                                       \
68         } while (0)
69
70 /* Major/minor macros.  Minor version is in LSB, meaning that earlier flat */
71 /* version numbers will be interpreted as "0.x" (i.e., 1 becomes 0.1). */
72 #define VMSTOR_PROTOCOL_MAJOR(VERSION_)         (((VERSION_) >> 8) & 0xff)
73 #define VMSTOR_PROTOCOL_MINOR(VERSION_)         (((VERSION_))      & 0xff)
74 #define VMSTOR_PROTOCOL_VERSION(MAJOR_, MINOR_) ((((MAJOR_) & 0xff) << 8) | \
75                                                  (((MINOR_) & 0xff)))
76 #define VMSTOR_INVALID_PROTOCOL_VERSION         (-1)
77
78 /* Version history: */
79 /* V1 Beta                    0.1 */
80 /* V1 RC < 2008/1/31          1.0 */
81 /* V1 RC > 2008/1/31          2.0 */
82 #define VMSTOR_PROTOCOL_VERSION_CURRENT VMSTOR_PROTOCOL_VERSION(2, 0)
83
84
85
86
87 /*  This will get replaced with the max transfer length that is possible on */
88 /*  the host adapter. */
89 /*  The max transfer length will be published when we offer a vmbus channel. */
90 #define MAX_TRANSFER_LENGTH     0x40000
91 #define DEFAULT_PACKET_SIZE (sizeof(struct vmdata_gpa_direct) + \
92                         sizeof(struct vstor_packet) +           \
93                         sizesizeof(u64) * (MAX_TRANSFER_LENGTH / PAGE_SIZE)))
94
95
96 /*  Packet structure describing virtual storage requests. */
97 enum vstor_packet_operation {
98         VSTOR_OPERATION_COMPLETE_IO             = 1,
99         VSTOR_OPERATION_REMOVE_DEVICE           = 2,
100         VSTOR_OPERATION_EXECUTE_SRB             = 3,
101         VSTOR_OPERATION_RESET_LUN               = 4,
102         VSTOR_OPERATION_RESET_ADAPTER           = 5,
103         VSTOR_OPERATION_RESET_BUS               = 6,
104         VSTOR_OPERATION_BEGIN_INITIALIZATION    = 7,
105         VSTOR_OPERATION_END_INITIALIZATION      = 8,
106         VSTOR_OPERATION_QUERY_PROTOCOL_VERSION  = 9,
107         VSTOR_OPERATION_QUERY_PROPERTIES        = 10,
108         VSTOR_OPERATION_MAXIMUM                 = 10
109 };
110
111 /*
112  * Platform neutral description of a scsi request -
113  * this remains the same across the write regardless of 32/64 bit
114  * note: it's patterned off the SCSI_PASS_THROUGH structure
115  */
116 #define CDB16GENERIC_LENGTH                     0x10
117
118 #ifndef SENSE_BUFFER_SIZE
119 #define SENSE_BUFFER_SIZE                       0x12
120 #endif
121
122 #define MAX_DATA_BUF_LEN_WITH_PADDING           0x14
123
124 struct vmscsi_request {
125         unsigned short length;
126         unsigned char srb_status;
127         unsigned char scsi_status;
128
129         unsigned char port_number;
130         unsigned char path_id;
131         unsigned char target_id;
132         unsigned char lun;
133
134         unsigned char cdb_length;
135         unsigned char sense_info_length;
136         unsigned char data_in;
137         unsigned char reserved;
138
139         unsigned int data_transfer_length;
140
141         union {
142                 unsigned char cdb[CDB16GENERIC_LENGTH];
143                 unsigned char sense_data[SENSE_BUFFER_SIZE];
144                 unsigned char reserved_array[MAX_DATA_BUF_LEN_WITH_PADDING];
145         };
146 } __attribute((packed));
147
148
149 /*
150  * This structure is sent during the intialization phase to get the different
151  * properties of the channel.
152  */
153 struct vmstorage_channel_properties {
154         unsigned short protocol_version;
155         unsigned char path_id;
156         unsigned char target_id;
157
158         /* Note: port number is only really known on the client side */
159         unsigned int port_number;
160         unsigned int flags;
161         unsigned int max_transfer_bytes;
162
163         /*  This id is unique for each channel and will correspond with */
164         /*  vendor specific data in the inquirydata */
165         unsigned long long unique_id;
166 } __packed;
167
168 /*  This structure is sent during the storage protocol negotiations. */
169 struct vmstorage_protocol_version {
170         /* Major (MSW) and minor (LSW) version numbers. */
171         unsigned short major_minor;
172
173         /*
174          * Revision number is auto-incremented whenever this file is changed
175          * (See FILL_VMSTOR_REVISION macro above).  Mismatch does not
176          * definitely indicate incompatibility--but it does indicate mismatched
177          * builds.
178          */
179         unsigned short revision;
180 } __packed;
181
182 /* Channel Property Flags */
183 #define STORAGE_CHANNEL_REMOVABLE_FLAG          0x1
184 #define STORAGE_CHANNEL_EMULATED_IDE_FLAG       0x2
185
186 struct vstor_packet {
187         /* Requested operation type */
188         enum vstor_packet_operation operation;
189
190         /*  Flags - see below for values */
191         unsigned int flags;
192
193         /* Status of the request returned from the server side. */
194         unsigned int status;
195
196         /* Data payload area */
197         union {
198                 /*
199                  * Structure used to forward SCSI commands from the
200                  * client to the server.
201                  */
202                 struct vmscsi_request vm_srb;
203
204                 /* Structure used to query channel properties. */
205                 struct vmstorage_channel_properties storage_channel_properties;
206
207                 /* Used during version negotiations. */
208                 struct vmstorage_protocol_version version;
209         };
210 } __packed;
211
212 /* Packet flags */
213 /*
214  * This flag indicates that the server should send back a completion for this
215  * packet.
216  */
217 #define REQUEST_COMPLETION_FLAG 0x1
218
219 /*  This is the set of flags that the vsc can set in any packets it sends */
220 #define VSC_LEGAL_FLAGS         (REQUEST_COMPLETION_FLAG)
221
222
223 /* Defines */
224
225 #define STORVSC_MAX_IO_REQUESTS                         128
226
227 /*
228  * In Hyper-V, each port/path/target maps to 1 scsi host adapter.  In
229  * reality, the path/target is not used (ie always set to 0) so our
230  * scsi host adapter essentially has 1 bus with 1 target that contains
231  * up to 256 luns.
232  */
233 #define STORVSC_MAX_LUNS_PER_TARGET                     64
234 #define STORVSC_MAX_TARGETS                             1
235 #define STORVSC_MAX_CHANNELS                            1
236 #define STORVSC_MAX_CMD_LEN                             16
237
238 struct hv_storvsc_request;
239
240 /* Matches Windows-end */
241 enum storvsc_request_type {
242         WRITE_TYPE,
243         READ_TYPE,
244         UNKNOWN_TYPE,
245 };
246
247
248 struct hv_storvsc_request {
249         struct hv_device *device;
250
251         /* Synchronize the request/response if needed */
252         struct completion wait_event;
253
254         unsigned char *sense_buffer;
255         void *context;
256         void (*on_io_completion)(struct hv_storvsc_request *request);
257         struct hv_multipage_buffer data_buffer;
258
259         struct vstor_packet vstor_packet;
260 };
261
262
263 /* A storvsc device is a device object that contains a vmbus channel */
264 struct storvsc_device {
265         struct hv_device *device;
266
267         bool     destroy;
268         bool     drain_notify;
269         atomic_t num_outstanding_req;
270         struct Scsi_Host *host;
271
272         wait_queue_head_t waiting_to_drain;
273
274         /*
275          * Each unique Port/Path/Target represents 1 channel ie scsi
276          * controller. In reality, the pathid, targetid is always 0
277          * and the port is set by us
278          */
279         unsigned int port_number;
280         unsigned char path_id;
281         unsigned char target_id;
282
283         /* Used for vsc/vsp channel reset process */
284         struct hv_storvsc_request init_request;
285         struct hv_storvsc_request reset_request;
286 };
287
288 struct hv_host_device {
289         struct hv_device *dev;
290         struct kmem_cache *request_pool;
291         unsigned int port;
292         unsigned char path;
293         unsigned char target;
294 };
295
296 struct storvsc_cmd_request {
297         struct list_head entry;
298         struct scsi_cmnd *cmd;
299
300         unsigned int bounce_sgl_count;
301         struct scatterlist *bounce_sgl;
302
303         struct hv_storvsc_request request;
304 };
305
306 static inline struct storvsc_device *get_out_stor_device(
307                                         struct hv_device *device)
308 {
309         struct storvsc_device *stor_device;
310
311         stor_device = hv_get_drvdata(device);
312
313         if (stor_device && stor_device->destroy)
314                 stor_device = NULL;
315
316         return stor_device;
317 }
318
319
320 static inline void storvsc_wait_to_drain(struct storvsc_device *dev)
321 {
322         dev->drain_notify = true;
323         wait_event(dev->waiting_to_drain,
324                    atomic_read(&dev->num_outstanding_req) == 0);
325         dev->drain_notify = false;
326 }
327
328 static inline struct storvsc_device *get_in_stor_device(
329                                         struct hv_device *device)
330 {
331         struct storvsc_device *stor_device;
332
333         stor_device = hv_get_drvdata(device);
334
335         if (!stor_device)
336                 goto get_in_err;
337
338         /*
339          * If the device is being destroyed; allow incoming
340          * traffic only to cleanup outstanding requests.
341          */
342
343         if (stor_device->destroy  &&
344                 (atomic_read(&stor_device->num_outstanding_req) == 0))
345                 stor_device = NULL;
346
347 get_in_err:
348         return stor_device;
349
350 }
351
352 static int storvsc_channel_init(struct hv_device *device)
353 {
354         struct storvsc_device *stor_device;
355         struct hv_storvsc_request *request;
356         struct vstor_packet *vstor_packet;
357         int ret, t;
358
359         stor_device = get_out_stor_device(device);
360         if (!stor_device)
361                 return -ENODEV;
362
363         request = &stor_device->init_request;
364         vstor_packet = &request->vstor_packet;
365
366         /*
367          * Now, initiate the vsc/vsp initialization protocol on the open
368          * channel
369          */
370         memset(request, 0, sizeof(struct hv_storvsc_request));
371         init_completion(&request->wait_event);
372         vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION;
373         vstor_packet->flags = REQUEST_COMPLETION_FLAG;
374
375         ret = vmbus_sendpacket(device->channel, vstor_packet,
376                                sizeof(struct vstor_packet),
377                                (unsigned long)request,
378                                VM_PKT_DATA_INBAND,
379                                VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
380         if (ret != 0)
381                 goto cleanup;
382
383         t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
384         if (t == 0) {
385                 ret = -ETIMEDOUT;
386                 goto cleanup;
387         }
388
389         if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
390             vstor_packet->status != 0)
391                 goto cleanup;
392
393
394         /* reuse the packet for version range supported */
395         memset(vstor_packet, 0, sizeof(struct vstor_packet));
396         vstor_packet->operation = VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
397         vstor_packet->flags = REQUEST_COMPLETION_FLAG;
398
399         vstor_packet->version.major_minor = VMSTOR_PROTOCOL_VERSION_CURRENT;
400         FILL_VMSTOR_REVISION(vstor_packet->version.revision);
401
402         ret = vmbus_sendpacket(device->channel, vstor_packet,
403                                sizeof(struct vstor_packet),
404                                (unsigned long)request,
405                                VM_PKT_DATA_INBAND,
406                                VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
407         if (ret != 0)
408                 goto cleanup;
409
410         t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
411         if (t == 0) {
412                 ret = -ETIMEDOUT;
413                 goto cleanup;
414         }
415
416         if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
417             vstor_packet->status != 0)
418                 goto cleanup;
419
420
421         memset(vstor_packet, 0, sizeof(struct vstor_packet));
422         vstor_packet->operation = VSTOR_OPERATION_QUERY_PROPERTIES;
423         vstor_packet->flags = REQUEST_COMPLETION_FLAG;
424         vstor_packet->storage_channel_properties.port_number =
425                                         stor_device->port_number;
426
427         ret = vmbus_sendpacket(device->channel, vstor_packet,
428                                sizeof(struct vstor_packet),
429                                (unsigned long)request,
430                                VM_PKT_DATA_INBAND,
431                                VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
432
433         if (ret != 0)
434                 goto cleanup;
435
436         t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
437         if (t == 0) {
438                 ret = -ETIMEDOUT;
439                 goto cleanup;
440         }
441
442         if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
443             vstor_packet->status != 0)
444                 goto cleanup;
445
446         stor_device->path_id = vstor_packet->storage_channel_properties.path_id;
447         stor_device->target_id
448                 = vstor_packet->storage_channel_properties.target_id;
449
450         memset(vstor_packet, 0, sizeof(struct vstor_packet));
451         vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION;
452         vstor_packet->flags = REQUEST_COMPLETION_FLAG;
453
454         ret = vmbus_sendpacket(device->channel, vstor_packet,
455                                sizeof(struct vstor_packet),
456                                (unsigned long)request,
457                                VM_PKT_DATA_INBAND,
458                                VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
459
460         if (ret != 0)
461                 goto cleanup;
462
463         t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
464         if (t == 0) {
465                 ret = -ETIMEDOUT;
466                 goto cleanup;
467         }
468
469         if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
470             vstor_packet->status != 0)
471                 goto cleanup;
472
473
474 cleanup:
475         return ret;
476 }
477
478 static void storvsc_on_io_completion(struct hv_device *device,
479                                   struct vstor_packet *vstor_packet,
480                                   struct hv_storvsc_request *request)
481 {
482         struct storvsc_device *stor_device;
483         struct vstor_packet *stor_pkt;
484
485         stor_device = hv_get_drvdata(device);
486         stor_pkt = &request->vstor_packet;
487
488         /*
489          * The current SCSI handling on the host side does
490          * not correctly handle:
491          * INQUIRY command with page code parameter set to 0x80
492          * MODE_SENSE command with cmd[2] == 0x1c
493          *
494          * Setup srb and scsi status so this won't be fatal.
495          * We do this so we can distinguish truly fatal failues
496          * (srb status == 0x4) and off-line the device in that case.
497          */
498
499         if ((stor_pkt->vm_srb.cdb[0] == INQUIRY) ||
500                 (stor_pkt->vm_srb.cdb[0] == MODE_SENSE)) {
501                 vstor_packet->vm_srb.scsi_status = 0;
502                 vstor_packet->vm_srb.srb_status = 0x1;
503         }
504
505
506         /* Copy over the status...etc */
507         stor_pkt->vm_srb.scsi_status = vstor_packet->vm_srb.scsi_status;
508         stor_pkt->vm_srb.srb_status = vstor_packet->vm_srb.srb_status;
509         stor_pkt->vm_srb.sense_info_length =
510         vstor_packet->vm_srb.sense_info_length;
511
512         if (vstor_packet->vm_srb.scsi_status != 0 ||
513                 vstor_packet->vm_srb.srb_status != 1){
514                 dev_warn(&device->device,
515                          "cmd 0x%x scsi status 0x%x srb status 0x%x\n",
516                          stor_pkt->vm_srb.cdb[0],
517                          vstor_packet->vm_srb.scsi_status,
518                          vstor_packet->vm_srb.srb_status);
519         }
520
521         if ((vstor_packet->vm_srb.scsi_status & 0xFF) == 0x02) {
522                 /* CHECK_CONDITION */
523                 if (vstor_packet->vm_srb.srb_status & 0x80) {
524                         /* autosense data available */
525                         dev_warn(&device->device,
526                                  "stor pkt %p autosense data valid - len %d\n",
527                                  request,
528                                  vstor_packet->vm_srb.sense_info_length);
529
530                         memcpy(request->sense_buffer,
531                                vstor_packet->vm_srb.sense_data,
532                                vstor_packet->vm_srb.sense_info_length);
533
534                 }
535         }
536
537         stor_pkt->vm_srb.data_transfer_length =
538         vstor_packet->vm_srb.data_transfer_length;
539
540         request->on_io_completion(request);
541
542         if (atomic_dec_and_test(&stor_device->num_outstanding_req) &&
543                 stor_device->drain_notify)
544                 wake_up(&stor_device->waiting_to_drain);
545
546
547 }
548
549 static void storvsc_on_receive(struct hv_device *device,
550                              struct vstor_packet *vstor_packet,
551                              struct hv_storvsc_request *request)
552 {
553         switch (vstor_packet->operation) {
554         case VSTOR_OPERATION_COMPLETE_IO:
555                 storvsc_on_io_completion(device, vstor_packet, request);
556                 break;
557         case VSTOR_OPERATION_REMOVE_DEVICE:
558
559         default:
560                 break;
561         }
562 }
563
564 static void storvsc_on_channel_callback(void *context)
565 {
566         struct hv_device *device = (struct hv_device *)context;
567         struct storvsc_device *stor_device;
568         u32 bytes_recvd;
569         u64 request_id;
570         unsigned char packet[ALIGN(sizeof(struct vstor_packet), 8)];
571         struct hv_storvsc_request *request;
572         int ret;
573
574
575         stor_device = get_in_stor_device(device);
576         if (!stor_device)
577                 return;
578
579         do {
580                 ret = vmbus_recvpacket(device->channel, packet,
581                                        ALIGN(sizeof(struct vstor_packet), 8),
582                                        &bytes_recvd, &request_id);
583                 if (ret == 0 && bytes_recvd > 0) {
584
585                         request = (struct hv_storvsc_request *)
586                                         (unsigned long)request_id;
587
588                         if ((request == &stor_device->init_request) ||
589                             (request == &stor_device->reset_request)) {
590
591                                 memcpy(&request->vstor_packet, packet,
592                                        sizeof(struct vstor_packet));
593                                 complete(&request->wait_event);
594                         } else {
595                                 storvsc_on_receive(device,
596                                                 (struct vstor_packet *)packet,
597                                                 request);
598                         }
599                 } else {
600                         break;
601                 }
602         } while (1);
603
604         return;
605 }
606
607 static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size)
608 {
609         struct vmstorage_channel_properties props;
610         int ret;
611
612         memset(&props, 0, sizeof(struct vmstorage_channel_properties));
613
614         /* Open the channel */
615         ret = vmbus_open(device->channel,
616                          ring_size,
617                          ring_size,
618                          (void *)&props,
619                          sizeof(struct vmstorage_channel_properties),
620                          storvsc_on_channel_callback, device);
621
622         if (ret != 0)
623                 return ret;
624
625         ret = storvsc_channel_init(device);
626
627         return ret;
628 }
629
630 static int storvsc_dev_remove(struct hv_device *device)
631 {
632         struct storvsc_device *stor_device;
633         unsigned long flags;
634
635         stor_device = hv_get_drvdata(device);
636
637         spin_lock_irqsave(&device->channel->inbound_lock, flags);
638         stor_device->destroy = true;
639         spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
640
641         /*
642          * At this point, all outbound traffic should be disable. We
643          * only allow inbound traffic (responses) to proceed so that
644          * outstanding requests can be completed.
645          */
646
647         storvsc_wait_to_drain(stor_device);
648
649         /*
650          * Since we have already drained, we don't need to busy wait
651          * as was done in final_release_stor_device()
652          * Note that we cannot set the ext pointer to NULL until
653          * we have drained - to drain the outgoing packets, we need to
654          * allow incoming packets.
655          */
656         spin_lock_irqsave(&device->channel->inbound_lock, flags);
657         hv_set_drvdata(device, NULL);
658         spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
659
660         /* Close the channel */
661         vmbus_close(device->channel);
662
663         kfree(stor_device);
664         return 0;
665 }
666
667 static int storvsc_do_io(struct hv_device *device,
668                               struct hv_storvsc_request *request)
669 {
670         struct storvsc_device *stor_device;
671         struct vstor_packet *vstor_packet;
672         int ret = 0;
673
674         vstor_packet = &request->vstor_packet;
675         stor_device = get_out_stor_device(device);
676
677         if (!stor_device)
678                 return -ENODEV;
679
680
681         request->device  = device;
682
683
684         vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
685
686         vstor_packet->vm_srb.length = sizeof(struct vmscsi_request);
687
688
689         vstor_packet->vm_srb.sense_info_length = SENSE_BUFFER_SIZE;
690
691
692         vstor_packet->vm_srb.data_transfer_length =
693         request->data_buffer.len;
694
695         vstor_packet->operation = VSTOR_OPERATION_EXECUTE_SRB;
696
697         if (request->data_buffer.len) {
698                 ret = vmbus_sendpacket_multipagebuffer(device->channel,
699                                 &request->data_buffer,
700                                 vstor_packet,
701                                 sizeof(struct vstor_packet),
702                                 (unsigned long)request);
703         } else {
704                 ret = vmbus_sendpacket(device->channel, vstor_packet,
705                                sizeof(struct vstor_packet),
706                                (unsigned long)request,
707                                VM_PKT_DATA_INBAND,
708                                VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
709         }
710
711         if (ret != 0)
712                 return ret;
713
714         atomic_inc(&stor_device->num_outstanding_req);
715
716         return ret;
717 }
718
719 static void storvsc_get_ide_info(struct hv_device *dev, int *target, int *path)
720 {
721         *target =
722                 dev->dev_instance.b[5] << 8 | dev->dev_instance.b[4];
723
724         *path =
725                 dev->dev_instance.b[3] << 24 |
726                 dev->dev_instance.b[2] << 16 |
727                 dev->dev_instance.b[1] << 8  | dev->dev_instance.b[0];
728 }
729
730
731 static int storvsc_device_alloc(struct scsi_device *sdevice)
732 {
733         /*
734          * This enables luns to be located sparsely. Otherwise, we may not
735          * discovered them.
736          */
737         sdevice->sdev_bflags |= BLIST_SPARSELUN | BLIST_LARGELUN;
738         return 0;
739 }
740
741 static int storvsc_merge_bvec(struct request_queue *q,
742                               struct bvec_merge_data *bmd, struct bio_vec *bvec)
743 {
744         /* checking done by caller. */
745         return bvec->bv_len;
746 }
747
748 static int storvsc_device_configure(struct scsi_device *sdevice)
749 {
750         scsi_adjust_queue_depth(sdevice, MSG_SIMPLE_TAG,
751                                 STORVSC_MAX_IO_REQUESTS);
752
753         blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE);
754
755         blk_queue_merge_bvec(sdevice->request_queue, storvsc_merge_bvec);
756
757         blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
758
759         return 0;
760 }
761
762 static void destroy_bounce_buffer(struct scatterlist *sgl,
763                                   unsigned int sg_count)
764 {
765         int i;
766         struct page *page_buf;
767
768         for (i = 0; i < sg_count; i++) {
769                 page_buf = sg_page((&sgl[i]));
770                 if (page_buf != NULL)
771                         __free_page(page_buf);
772         }
773
774         kfree(sgl);
775 }
776
777 static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
778 {
779         int i;
780
781         /* No need to check */
782         if (sg_count < 2)
783                 return -1;
784
785         /* We have at least 2 sg entries */
786         for (i = 0; i < sg_count; i++) {
787                 if (i == 0) {
788                         /* make sure 1st one does not have hole */
789                         if (sgl[i].offset + sgl[i].length != PAGE_SIZE)
790                                 return i;
791                 } else if (i == sg_count - 1) {
792                         /* make sure last one does not have hole */
793                         if (sgl[i].offset != 0)
794                                 return i;
795                 } else {
796                         /* make sure no hole in the middle */
797                         if (sgl[i].length != PAGE_SIZE || sgl[i].offset != 0)
798                                 return i;
799                 }
800         }
801         return -1;
802 }
803
804 static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
805                                                 unsigned int sg_count,
806                                                 unsigned int len)
807 {
808         int i;
809         int num_pages;
810         struct scatterlist *bounce_sgl;
811         struct page *page_buf;
812
813         num_pages = ALIGN(len, PAGE_SIZE) >> PAGE_SHIFT;
814
815         bounce_sgl = kcalloc(num_pages, sizeof(struct scatterlist), GFP_ATOMIC);
816         if (!bounce_sgl)
817                 return NULL;
818
819         sg_init_table(bounce_sgl, num_pages);
820         for (i = 0; i < num_pages; i++) {
821                 page_buf = alloc_page(GFP_ATOMIC);
822                 if (!page_buf)
823                         goto cleanup;
824                 sg_set_page(&bounce_sgl[i], page_buf, 0, 0);
825         }
826
827         return bounce_sgl;
828
829 cleanup:
830         destroy_bounce_buffer(bounce_sgl, num_pages);
831         return NULL;
832 }
833
834
835 /* Assume the original sgl has enough room */
836 static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
837                                             struct scatterlist *bounce_sgl,
838                                             unsigned int orig_sgl_count)
839 {
840         int i;
841         int j = 0;
842         unsigned long src, dest;
843         unsigned int srclen, destlen, copylen;
844         unsigned int total_copied = 0;
845         unsigned long bounce_addr = 0;
846         unsigned long dest_addr = 0;
847         unsigned long flags;
848
849         local_irq_save(flags);
850
851         for (i = 0; i < orig_sgl_count; i++) {
852                 dest_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
853                                         KM_IRQ0) + orig_sgl[i].offset;
854                 dest = dest_addr;
855                 destlen = orig_sgl[i].length;
856
857                 if (bounce_addr == 0)
858                         bounce_addr =
859                         (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
860                                                         KM_IRQ0);
861
862                 while (destlen) {
863                         src = bounce_addr + bounce_sgl[j].offset;
864                         srclen = bounce_sgl[j].length - bounce_sgl[j].offset;
865
866                         copylen = min(srclen, destlen);
867                         memcpy((void *)dest, (void *)src, copylen);
868
869                         total_copied += copylen;
870                         bounce_sgl[j].offset += copylen;
871                         destlen -= copylen;
872                         dest += copylen;
873
874                         if (bounce_sgl[j].offset == bounce_sgl[j].length) {
875                                 /* full */
876                                 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
877                                 j++;
878
879                                 /* if we need to use another bounce buffer */
880                                 if (destlen || i != orig_sgl_count - 1)
881                                         bounce_addr =
882                                         (unsigned long)kmap_atomic(
883                                         sg_page((&bounce_sgl[j])), KM_IRQ0);
884                         } else if (destlen == 0 && i == orig_sgl_count - 1) {
885                                 /* unmap the last bounce that is < PAGE_SIZE */
886                                 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
887                         }
888                 }
889
890                 kunmap_atomic((void *)(dest_addr - orig_sgl[i].offset),
891                               KM_IRQ0);
892         }
893
894         local_irq_restore(flags);
895
896         return total_copied;
897 }
898
899
900 /* Assume the bounce_sgl has enough room ie using the create_bounce_buffer() */
901 static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
902                                           struct scatterlist *bounce_sgl,
903                                           unsigned int orig_sgl_count)
904 {
905         int i;
906         int j = 0;
907         unsigned long src, dest;
908         unsigned int srclen, destlen, copylen;
909         unsigned int total_copied = 0;
910         unsigned long bounce_addr = 0;
911         unsigned long src_addr = 0;
912         unsigned long flags;
913
914         local_irq_save(flags);
915
916         for (i = 0; i < orig_sgl_count; i++) {
917                 src_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
918                                 KM_IRQ0) + orig_sgl[i].offset;
919                 src = src_addr;
920                 srclen = orig_sgl[i].length;
921
922                 if (bounce_addr == 0)
923                         bounce_addr =
924                         (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
925                                                 KM_IRQ0);
926
927                 while (srclen) {
928                         /* assume bounce offset always == 0 */
929                         dest = bounce_addr + bounce_sgl[j].length;
930                         destlen = PAGE_SIZE - bounce_sgl[j].length;
931
932                         copylen = min(srclen, destlen);
933                         memcpy((void *)dest, (void *)src, copylen);
934
935                         total_copied += copylen;
936                         bounce_sgl[j].length += copylen;
937                         srclen -= copylen;
938                         src += copylen;
939
940                         if (bounce_sgl[j].length == PAGE_SIZE) {
941                                 /* full..move to next entry */
942                                 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
943                                 j++;
944
945                                 /* if we need to use another bounce buffer */
946                                 if (srclen || i != orig_sgl_count - 1)
947                                         bounce_addr =
948                                         (unsigned long)kmap_atomic(
949                                         sg_page((&bounce_sgl[j])), KM_IRQ0);
950
951                         } else if (srclen == 0 && i == orig_sgl_count - 1) {
952                                 /* unmap the last bounce that is < PAGE_SIZE */
953                                 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
954                         }
955                 }
956
957                 kunmap_atomic((void *)(src_addr - orig_sgl[i].offset), KM_IRQ0);
958         }
959
960         local_irq_restore(flags);
961
962         return total_copied;
963 }
964
965
966 static int storvsc_remove(struct hv_device *dev)
967 {
968         struct storvsc_device *stor_device = hv_get_drvdata(dev);
969         struct Scsi_Host *host = stor_device->host;
970         struct hv_host_device *host_dev =
971                         (struct hv_host_device *)host->hostdata;
972
973         scsi_remove_host(host);
974
975         scsi_host_put(host);
976
977         storvsc_dev_remove(dev);
978         if (host_dev->request_pool) {
979                 kmem_cache_destroy(host_dev->request_pool);
980                 host_dev->request_pool = NULL;
981         }
982         return 0;
983 }
984
985
986 static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev,
987                            sector_t capacity, int *info)
988 {
989         sector_t nsect = capacity;
990         sector_t cylinders = nsect;
991         int heads, sectors_pt;
992
993         /*
994          * We are making up these values; let us keep it simple.
995          */
996         heads = 0xff;
997         sectors_pt = 0x3f;      /* Sectors per track */
998         sector_div(cylinders, heads * sectors_pt);
999         if ((sector_t)(cylinders + 1) * heads * sectors_pt < nsect)
1000                 cylinders = 0xffff;
1001
1002         info[0] = heads;
1003         info[1] = sectors_pt;
1004         info[2] = (int)cylinders;
1005
1006         return 0;
1007 }
1008
1009 static int storvsc_host_reset(struct hv_device *device)
1010 {
1011         struct storvsc_device *stor_device;
1012         struct hv_storvsc_request *request;
1013         struct vstor_packet *vstor_packet;
1014         int ret, t;
1015
1016
1017         stor_device = get_out_stor_device(device);
1018         if (!stor_device)
1019                 return -ENODEV;
1020
1021         request = &stor_device->reset_request;
1022         vstor_packet = &request->vstor_packet;
1023
1024         init_completion(&request->wait_event);
1025
1026         vstor_packet->operation = VSTOR_OPERATION_RESET_BUS;
1027         vstor_packet->flags = REQUEST_COMPLETION_FLAG;
1028         vstor_packet->vm_srb.path_id = stor_device->path_id;
1029
1030         ret = vmbus_sendpacket(device->channel, vstor_packet,
1031                                sizeof(struct vstor_packet),
1032                                (unsigned long)&stor_device->reset_request,
1033                                VM_PKT_DATA_INBAND,
1034                                VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1035         if (ret != 0)
1036                 goto cleanup;
1037
1038         t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
1039         if (t == 0) {
1040                 ret = -ETIMEDOUT;
1041                 goto cleanup;
1042         }
1043
1044
1045         /*
1046          * At this point, all outstanding requests in the adapter
1047          * should have been flushed out and return to us
1048          * There is a potential race here where the host may be in
1049          * the process of responding when we return from here.
1050          * Just wait for all in-transit packets to be accounted for
1051          * before we return from here.
1052          */
1053         storvsc_wait_to_drain(stor_device);
1054
1055 cleanup:
1056         return ret;
1057 }
1058
1059
1060 /*
1061  * storvsc_host_reset_handler - Reset the scsi HBA
1062  */
1063 static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
1064 {
1065         int ret;
1066         struct hv_host_device *host_dev =
1067                 (struct hv_host_device *)scmnd->device->host->hostdata;
1068         struct hv_device *dev = host_dev->dev;
1069
1070         ret = storvsc_host_reset(dev);
1071         if (ret != 0)
1072                 return ret;
1073
1074         return ret;
1075 }
1076
1077
1078 /*
1079  * storvsc_command_completion - Command completion processing
1080  */
1081 static void storvsc_command_completion(struct hv_storvsc_request *request)
1082 {
1083         struct storvsc_cmd_request *cmd_request =
1084                 (struct storvsc_cmd_request *)request->context;
1085         struct scsi_cmnd *scmnd = cmd_request->cmd;
1086         struct hv_host_device *host_dev =
1087                 (struct hv_host_device *)scmnd->device->host->hostdata;
1088         void (*scsi_done_fn)(struct scsi_cmnd *);
1089         struct scsi_sense_hdr sense_hdr;
1090         struct vmscsi_request *vm_srb;
1091
1092         vm_srb = &request->vstor_packet.vm_srb;
1093         if (cmd_request->bounce_sgl_count) {
1094                 if (vm_srb->data_in == READ_TYPE) {
1095                         copy_from_bounce_buffer(scsi_sglist(scmnd),
1096                                         cmd_request->bounce_sgl,
1097                                         scsi_sg_count(scmnd));
1098                         destroy_bounce_buffer(cmd_request->bounce_sgl,
1099                                         cmd_request->bounce_sgl_count);
1100                 }
1101         }
1102
1103         /*
1104          * If there is an error; offline the device since all
1105          * error recovery strategies would have already been
1106          * deployed on the host side.
1107          */
1108         if (vm_srb->srb_status == 0x4)
1109                 scmnd->result = DID_TARGET_FAILURE << 16;
1110         else
1111                 scmnd->result = vm_srb->scsi_status;
1112
1113         if (scmnd->result) {
1114                 if (scsi_normalize_sense(scmnd->sense_buffer,
1115                                 SCSI_SENSE_BUFFERSIZE, &sense_hdr))
1116                         scsi_print_sense_hdr("storvsc", &sense_hdr);
1117         }
1118
1119         scsi_set_resid(scmnd,
1120                 request->data_buffer.len -
1121                 vm_srb->data_transfer_length);
1122
1123         scsi_done_fn = scmnd->scsi_done;
1124
1125         scmnd->host_scribble = NULL;
1126         scmnd->scsi_done = NULL;
1127
1128         scsi_done_fn(scmnd);
1129
1130         kmem_cache_free(host_dev->request_pool, cmd_request);
1131 }
1132
1133 /*
1134  * The host guarantees to respond to each command, although I/O latencies might
1135  * be unbounded on Azure.  Reset the timer unconditionally to give the host a
1136  * chance to perform EH.
1137  */
1138 static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd)
1139 {
1140         return BLK_EH_RESET_TIMER;
1141 }
1142
1143 static bool storvsc_check_scsi_cmd(struct scsi_cmnd *scmnd)
1144 {
1145         bool allowed = true;
1146         u8 scsi_op = scmnd->cmnd[0];
1147
1148         switch (scsi_op) {
1149         /* smartd sends this command, which will offline the device */
1150         case SET_WINDOW:
1151                 scmnd->result = DID_ERROR << 16;
1152                 allowed = false;
1153                 break;
1154         default:
1155                 break;
1156         }
1157         return allowed;
1158 }
1159
1160 /*
1161  * storvsc_queuecommand - Initiate command processing
1162  */
1163 static int storvsc_queuecommand_lck(struct scsi_cmnd *scmnd,
1164                                 void (*done)(struct scsi_cmnd *))
1165 {
1166         int ret;
1167         struct hv_host_device *host_dev =
1168                 (struct hv_host_device *)scmnd->device->host->hostdata;
1169         struct hv_device *dev = host_dev->dev;
1170         struct hv_storvsc_request *request;
1171         struct storvsc_cmd_request *cmd_request;
1172         unsigned int request_size = 0;
1173         int i;
1174         struct scatterlist *sgl;
1175         unsigned int sg_count = 0;
1176         struct vmscsi_request *vm_srb;
1177
1178         if (storvsc_check_scsi_cmd(scmnd) == false) {
1179                 done(scmnd);
1180                 return 0;
1181         }
1182
1183         /* If retrying, no need to prep the cmd */
1184         if (scmnd->host_scribble) {
1185
1186                 cmd_request =
1187                         (struct storvsc_cmd_request *)scmnd->host_scribble;
1188
1189                 goto retry_request;
1190         }
1191
1192         scmnd->scsi_done = done;
1193
1194         request_size = sizeof(struct storvsc_cmd_request);
1195
1196         cmd_request = kmem_cache_zalloc(host_dev->request_pool,
1197                                        GFP_ATOMIC);
1198         if (!cmd_request) {
1199                 scmnd->scsi_done = NULL;
1200                 return SCSI_MLQUEUE_DEVICE_BUSY;
1201         }
1202
1203         /* Setup the cmd request */
1204         cmd_request->bounce_sgl_count = 0;
1205         cmd_request->bounce_sgl = NULL;
1206         cmd_request->cmd = scmnd;
1207
1208         scmnd->host_scribble = (unsigned char *)cmd_request;
1209
1210         request = &cmd_request->request;
1211         vm_srb = &request->vstor_packet.vm_srb;
1212
1213
1214         /* Build the SRB */
1215         switch (scmnd->sc_data_direction) {
1216         case DMA_TO_DEVICE:
1217                 vm_srb->data_in = WRITE_TYPE;
1218                 break;
1219         case DMA_FROM_DEVICE:
1220                 vm_srb->data_in = READ_TYPE;
1221                 break;
1222         default:
1223                 vm_srb->data_in = UNKNOWN_TYPE;
1224                 break;
1225         }
1226
1227         request->on_io_completion = storvsc_command_completion;
1228         request->context = cmd_request;/* scmnd; */
1229
1230         vm_srb->port_number = host_dev->port;
1231         vm_srb->path_id = scmnd->device->channel;
1232         vm_srb->target_id = scmnd->device->id;
1233         vm_srb->lun = scmnd->device->lun;
1234
1235         vm_srb->cdb_length = scmnd->cmd_len;
1236
1237         memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length);
1238
1239         request->sense_buffer = scmnd->sense_buffer;
1240
1241
1242         request->data_buffer.len = scsi_bufflen(scmnd);
1243         if (scsi_sg_count(scmnd)) {
1244                 sgl = (struct scatterlist *)scsi_sglist(scmnd);
1245                 sg_count = scsi_sg_count(scmnd);
1246
1247                 /* check if we need to bounce the sgl */
1248                 if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
1249                         cmd_request->bounce_sgl =
1250                                 create_bounce_buffer(sgl, scsi_sg_count(scmnd),
1251                                                      scsi_bufflen(scmnd));
1252                         if (!cmd_request->bounce_sgl) {
1253                                 scmnd->scsi_done = NULL;
1254                                 scmnd->host_scribble = NULL;
1255                                 kmem_cache_free(host_dev->request_pool,
1256                                                 cmd_request);
1257
1258                                 return SCSI_MLQUEUE_HOST_BUSY;
1259                         }
1260
1261                         cmd_request->bounce_sgl_count =
1262                                 ALIGN(scsi_bufflen(scmnd), PAGE_SIZE) >>
1263                                         PAGE_SHIFT;
1264
1265                         if (vm_srb->data_in == WRITE_TYPE)
1266                                 copy_to_bounce_buffer(sgl,
1267                                         cmd_request->bounce_sgl,
1268                                         scsi_sg_count(scmnd));
1269
1270                         sgl = cmd_request->bounce_sgl;
1271                         sg_count = cmd_request->bounce_sgl_count;
1272                 }
1273
1274                 request->data_buffer.offset = sgl[0].offset;
1275
1276                 for (i = 0; i < sg_count; i++)
1277                         request->data_buffer.pfn_array[i] =
1278                                 page_to_pfn(sg_page((&sgl[i])));
1279
1280         } else if (scsi_sglist(scmnd)) {
1281                 request->data_buffer.offset =
1282                         virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1);
1283                 request->data_buffer.pfn_array[0] =
1284                         virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT;
1285         }
1286
1287 retry_request:
1288         /* Invokes the vsc to start an IO */
1289         ret = storvsc_do_io(dev, &cmd_request->request);
1290
1291         if (ret == -EAGAIN) {
1292                 /* no more space */
1293
1294                 if (cmd_request->bounce_sgl_count)
1295                         destroy_bounce_buffer(cmd_request->bounce_sgl,
1296                                         cmd_request->bounce_sgl_count);
1297
1298                 kmem_cache_free(host_dev->request_pool, cmd_request);
1299
1300                 scmnd->scsi_done = NULL;
1301                 scmnd->host_scribble = NULL;
1302
1303                 ret = SCSI_MLQUEUE_DEVICE_BUSY;
1304         }
1305
1306         return ret;
1307 }
1308
1309 static DEF_SCSI_QCMD(storvsc_queuecommand)
1310
1311
1312 /* Scsi driver */
1313 static struct scsi_host_template scsi_driver = {
1314         .module =               THIS_MODULE,
1315         .name =                 "storvsc_host_t",
1316         .bios_param =           storvsc_get_chs,
1317         .queuecommand =         storvsc_queuecommand,
1318         .eh_host_reset_handler =        storvsc_host_reset_handler,
1319         .eh_timed_out =         storvsc_eh_timed_out,
1320         .slave_alloc =          storvsc_device_alloc,
1321         .slave_configure =      storvsc_device_configure,
1322         .cmd_per_lun =          1,
1323         /* 64 max_queue * 1 target */
1324         .can_queue =            STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS,
1325         .this_id =              -1,
1326         /* no use setting to 0 since ll_blk_rw reset it to 1 */
1327         /* currently 32 */
1328         .sg_tablesize =         MAX_MULTIPAGE_BUFFER_COUNT,
1329         /*
1330          * ENABLE_CLUSTERING allows mutiple physically contig bio_vecs to merge
1331          * into 1 sg element. If set, we must limit the max_segment_size to
1332          * PAGE_SIZE, otherwise we may get 1 sg element that represents
1333          * multiple
1334          */
1335         /* physically contig pfns (ie sg[x].length > PAGE_SIZE). */
1336         .use_clustering =       ENABLE_CLUSTERING,
1337         /* Make sure we dont get a sg segment crosses a page boundary */
1338         .dma_boundary =         PAGE_SIZE-1,
1339 };
1340
1341 enum {
1342         SCSI_GUID,
1343         IDE_GUID,
1344 };
1345
1346 static const struct hv_vmbus_device_id id_table[] = {
1347         /* SCSI guid */
1348         { VMBUS_DEVICE(0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
1349                        0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
1350           .driver_data = SCSI_GUID },
1351         /* IDE guid */
1352         { VMBUS_DEVICE(0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
1353                        0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
1354           .driver_data = IDE_GUID },
1355         { },
1356 };
1357
1358 MODULE_DEVICE_TABLE(vmbus, id_table);
1359
1360
1361 /*
1362  * storvsc_probe - Add a new device for this driver
1363  */
1364
1365 static int storvsc_probe(struct hv_device *device,
1366                         const struct hv_vmbus_device_id *dev_id)
1367 {
1368         int ret;
1369         struct Scsi_Host *host;
1370         struct hv_host_device *host_dev;
1371         bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false);
1372         int path = 0;
1373         int target = 0;
1374         struct storvsc_device *stor_device;
1375
1376         host = scsi_host_alloc(&scsi_driver,
1377                                sizeof(struct hv_host_device));
1378         if (!host)
1379                 return -ENOMEM;
1380
1381         host_dev = (struct hv_host_device *)host->hostdata;
1382         memset(host_dev, 0, sizeof(struct hv_host_device));
1383
1384         host_dev->port = host->host_no;
1385         host_dev->dev = device;
1386
1387         host_dev->request_pool =
1388                                 kmem_cache_create(dev_name(&device->device),
1389                                         sizeof(struct storvsc_cmd_request), 0,
1390                                         SLAB_HWCACHE_ALIGN, NULL);
1391
1392         if (!host_dev->request_pool) {
1393                 scsi_host_put(host);
1394                 return -ENOMEM;
1395         }
1396
1397         stor_device = kzalloc(sizeof(struct storvsc_device), GFP_KERNEL);
1398         if (!stor_device) {
1399                 kmem_cache_destroy(host_dev->request_pool);
1400                 scsi_host_put(host);
1401                 return -ENOMEM;
1402         }
1403
1404         stor_device->destroy = false;
1405         init_waitqueue_head(&stor_device->waiting_to_drain);
1406         stor_device->device = device;
1407         stor_device->host = host;
1408         hv_set_drvdata(device, stor_device);
1409
1410         stor_device->port_number = host->host_no;
1411         ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size);
1412         if (ret) {
1413                 kmem_cache_destroy(host_dev->request_pool);
1414                 scsi_host_put(host);
1415                 kfree(stor_device);
1416                 return ret;
1417         }
1418
1419         if (dev_is_ide)
1420                 storvsc_get_ide_info(device, &target, &path);
1421
1422         host_dev->path = stor_device->path_id;
1423         host_dev->target = stor_device->target_id;
1424
1425         /* max # of devices per target */
1426         host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
1427         /* max # of targets per channel */
1428         host->max_id = STORVSC_MAX_TARGETS;
1429         /* max # of channels */
1430         host->max_channel = STORVSC_MAX_CHANNELS - 1;
1431         /* max cmd length */
1432         host->max_cmd_len = STORVSC_MAX_CMD_LEN;
1433
1434         /* Register the HBA and start the scsi bus scan */
1435         ret = scsi_add_host(host, &device->device);
1436         if (ret != 0)
1437                 goto err_out;
1438
1439         if (!dev_is_ide) {
1440                 scsi_scan_host(host);
1441                 return 0;
1442         }
1443         ret = scsi_add_device(host, 0, target, 0);
1444         if (ret) {
1445                 scsi_remove_host(host);
1446                 goto err_out;
1447         }
1448         return 0;
1449
1450 err_out:
1451         storvsc_dev_remove(device);
1452         kmem_cache_destroy(host_dev->request_pool);
1453         scsi_host_put(host);
1454         return -ENODEV;
1455 }
1456
1457 /* The one and only one */
1458
1459 static struct hv_driver storvsc_drv = {
1460         .name = "storvsc",
1461         .id_table = id_table,
1462         .probe = storvsc_probe,
1463         .remove = storvsc_remove,
1464 };
1465
1466 static int __init storvsc_drv_init(void)
1467 {
1468         u32 max_outstanding_req_per_channel;
1469
1470         /*
1471          * Divide the ring buffer data size (which is 1 page less
1472          * than the ring buffer size since that page is reserved for
1473          * the ring buffer indices) by the max request size (which is
1474          * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
1475          */
1476         max_outstanding_req_per_channel =
1477                 ((storvsc_ringbuffer_size - PAGE_SIZE) /
1478                 ALIGN(MAX_MULTIPAGE_BUFFER_PACKET +
1479                 sizeof(struct vstor_packet) + sizeof(u64),
1480                 sizeof(u64)));
1481
1482         if (max_outstanding_req_per_channel <
1483             STORVSC_MAX_IO_REQUESTS)
1484                 return -EINVAL;
1485
1486         return vmbus_driver_register(&storvsc_drv);
1487 }
1488
1489 static void __exit storvsc_drv_exit(void)
1490 {
1491         vmbus_driver_unregister(&storvsc_drv);
1492 }
1493
1494 MODULE_LICENSE("GPL");
1495 MODULE_VERSION(HV_DRV_VERSION);
1496 MODULE_DESCRIPTION("Microsoft Hyper-V virtual storage driver");
1497 module_init(storvsc_drv_init);
1498 module_exit(storvsc_drv_exit);