Merge remote-tracking branches 'regulator/fix/act8865', 'regulator/fix/arizona' and...
[pandora-kernel.git] / drivers / vhost / scsi.c
1 /*******************************************************************************
2  * Vhost kernel TCM fabric driver for virtio SCSI initiators
3  *
4  * (C) Copyright 2010-2013 Datera, Inc.
5  * (C) Copyright 2010-2012 IBM Corp.
6  *
7  * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8  *
9  * Authors: Nicholas A. Bellinger <nab@daterainc.com>
10  *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  ****************************************************************************/
23
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <generated/utsrelease.h>
27 #include <linux/utsname.h>
28 #include <linux/init.h>
29 #include <linux/slab.h>
30 #include <linux/kthread.h>
31 #include <linux/types.h>
32 #include <linux/string.h>
33 #include <linux/configfs.h>
34 #include <linux/ctype.h>
35 #include <linux/compat.h>
36 #include <linux/eventfd.h>
37 #include <linux/fs.h>
38 #include <linux/miscdevice.h>
39 #include <asm/unaligned.h>
40 #include <scsi/scsi.h>
41 #include <scsi/scsi_tcq.h>
42 #include <target/target_core_base.h>
43 #include <target/target_core_fabric.h>
44 #include <target/target_core_fabric_configfs.h>
45 #include <target/target_core_configfs.h>
46 #include <target/configfs_macros.h>
47 #include <linux/vhost.h>
48 #include <linux/virtio_scsi.h>
49 #include <linux/llist.h>
50 #include <linux/bitmap.h>
51 #include <linux/percpu_ida.h>
52
53 #include "vhost.h"
54
55 #define TCM_VHOST_VERSION  "v0.1"
56 #define TCM_VHOST_NAMELEN 256
57 #define TCM_VHOST_MAX_CDB_SIZE 32
58 #define TCM_VHOST_DEFAULT_TAGS 256
59 #define TCM_VHOST_PREALLOC_SGLS 2048
60 #define TCM_VHOST_PREALLOC_UPAGES 2048
61 #define TCM_VHOST_PREALLOC_PROT_SGLS 512
62
63 struct vhost_scsi_inflight {
64         /* Wait for the flush operation to finish */
65         struct completion comp;
66         /* Refcount for the inflight reqs */
67         struct kref kref;
68 };
69
70 struct tcm_vhost_cmd {
71         /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
72         int tvc_vq_desc;
73         /* virtio-scsi initiator task attribute */
74         int tvc_task_attr;
75         /* virtio-scsi initiator data direction */
76         enum dma_data_direction tvc_data_direction;
77         /* Expected data transfer length from virtio-scsi header */
78         u32 tvc_exp_data_len;
79         /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
80         u64 tvc_tag;
81         /* The number of scatterlists associated with this cmd */
82         u32 tvc_sgl_count;
83         u32 tvc_prot_sgl_count;
84         /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */
85         u32 tvc_lun;
86         /* Pointer to the SGL formatted memory from virtio-scsi */
87         struct scatterlist *tvc_sgl;
88         struct scatterlist *tvc_prot_sgl;
89         struct page **tvc_upages;
90         /* Pointer to response */
91         struct virtio_scsi_cmd_resp __user *tvc_resp;
92         /* Pointer to vhost_scsi for our device */
93         struct vhost_scsi *tvc_vhost;
94         /* Pointer to vhost_virtqueue for the cmd */
95         struct vhost_virtqueue *tvc_vq;
96         /* Pointer to vhost nexus memory */
97         struct tcm_vhost_nexus *tvc_nexus;
98         /* The TCM I/O descriptor that is accessed via container_of() */
99         struct se_cmd tvc_se_cmd;
100         /* work item used for cmwq dispatch to tcm_vhost_submission_work() */
101         struct work_struct work;
102         /* Copy of the incoming SCSI command descriptor block (CDB) */
103         unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE];
104         /* Sense buffer that will be mapped into outgoing status */
105         unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
106         /* Completed commands list, serviced from vhost worker thread */
107         struct llist_node tvc_completion_list;
108         /* Used to track inflight cmd */
109         struct vhost_scsi_inflight *inflight;
110 };
111
112 struct tcm_vhost_nexus {
113         /* Pointer to TCM session for I_T Nexus */
114         struct se_session *tvn_se_sess;
115 };
116
117 struct tcm_vhost_nacl {
118         /* Binary World Wide unique Port Name for Vhost Initiator port */
119         u64 iport_wwpn;
120         /* ASCII formatted WWPN for Sas Initiator port */
121         char iport_name[TCM_VHOST_NAMELEN];
122         /* Returned by tcm_vhost_make_nodeacl() */
123         struct se_node_acl se_node_acl;
124 };
125
126 struct tcm_vhost_tpg {
127         /* Vhost port target portal group tag for TCM */
128         u16 tport_tpgt;
129         /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
130         int tv_tpg_port_count;
131         /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
132         int tv_tpg_vhost_count;
133         /* list for tcm_vhost_list */
134         struct list_head tv_tpg_list;
135         /* Used to protect access for tpg_nexus */
136         struct mutex tv_tpg_mutex;
137         /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
138         struct tcm_vhost_nexus *tpg_nexus;
139         /* Pointer back to tcm_vhost_tport */
140         struct tcm_vhost_tport *tport;
141         /* Returned by tcm_vhost_make_tpg() */
142         struct se_portal_group se_tpg;
143         /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
144         struct vhost_scsi *vhost_scsi;
145 };
146
147 struct tcm_vhost_tport {
148         /* SCSI protocol the tport is providing */
149         u8 tport_proto_id;
150         /* Binary World Wide unique Port Name for Vhost Target port */
151         u64 tport_wwpn;
152         /* ASCII formatted WWPN for Vhost Target port */
153         char tport_name[TCM_VHOST_NAMELEN];
154         /* Returned by tcm_vhost_make_tport() */
155         struct se_wwn tport_wwn;
156 };
157
158 struct tcm_vhost_evt {
159         /* event to be sent to guest */
160         struct virtio_scsi_event event;
161         /* event list, serviced from vhost worker thread */
162         struct llist_node list;
163 };
164
165 enum {
166         VHOST_SCSI_VQ_CTL = 0,
167         VHOST_SCSI_VQ_EVT = 1,
168         VHOST_SCSI_VQ_IO = 2,
169 };
170
171 enum {
172         VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
173                                                (1ULL << VIRTIO_SCSI_F_T10_PI)
174 };
175
176 #define VHOST_SCSI_MAX_TARGET   256
177 #define VHOST_SCSI_MAX_VQ       128
178 #define VHOST_SCSI_MAX_EVENT    128
179
180 struct vhost_scsi_virtqueue {
181         struct vhost_virtqueue vq;
182         /*
183          * Reference counting for inflight reqs, used for flush operation. At
184          * each time, one reference tracks new commands submitted, while we
185          * wait for another one to reach 0.
186          */
187         struct vhost_scsi_inflight inflights[2];
188         /*
189          * Indicate current inflight in use, protected by vq->mutex.
190          * Writers must also take dev mutex and flush under it.
191          */
192         int inflight_idx;
193 };
194
195 struct vhost_scsi {
196         /* Protected by vhost_scsi->dev.mutex */
197         struct tcm_vhost_tpg **vs_tpg;
198         char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
199
200         struct vhost_dev dev;
201         struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
202
203         struct vhost_work vs_completion_work; /* cmd completion work item */
204         struct llist_head vs_completion_list; /* cmd completion queue */
205
206         struct vhost_work vs_event_work; /* evt injection work item */
207         struct llist_head vs_event_list; /* evt injection queue */
208
209         bool vs_events_missed; /* any missed events, protected by vq->mutex */
210         int vs_events_nr; /* num of pending events, protected by vq->mutex */
211 };
212
213 /* Local pointer to allocated TCM configfs fabric module */
214 static struct target_fabric_configfs *tcm_vhost_fabric_configfs;
215
216 static struct workqueue_struct *tcm_vhost_workqueue;
217
218 /* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */
219 static DEFINE_MUTEX(tcm_vhost_mutex);
220 static LIST_HEAD(tcm_vhost_list);
221
222 static int iov_num_pages(struct iovec *iov)
223 {
224         return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
225                ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
226 }
227
228 static void tcm_vhost_done_inflight(struct kref *kref)
229 {
230         struct vhost_scsi_inflight *inflight;
231
232         inflight = container_of(kref, struct vhost_scsi_inflight, kref);
233         complete(&inflight->comp);
234 }
235
236 static void tcm_vhost_init_inflight(struct vhost_scsi *vs,
237                                     struct vhost_scsi_inflight *old_inflight[])
238 {
239         struct vhost_scsi_inflight *new_inflight;
240         struct vhost_virtqueue *vq;
241         int idx, i;
242
243         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
244                 vq = &vs->vqs[i].vq;
245
246                 mutex_lock(&vq->mutex);
247
248                 /* store old infight */
249                 idx = vs->vqs[i].inflight_idx;
250                 if (old_inflight)
251                         old_inflight[i] = &vs->vqs[i].inflights[idx];
252
253                 /* setup new infight */
254                 vs->vqs[i].inflight_idx = idx ^ 1;
255                 new_inflight = &vs->vqs[i].inflights[idx ^ 1];
256                 kref_init(&new_inflight->kref);
257                 init_completion(&new_inflight->comp);
258
259                 mutex_unlock(&vq->mutex);
260         }
261 }
262
263 static struct vhost_scsi_inflight *
264 tcm_vhost_get_inflight(struct vhost_virtqueue *vq)
265 {
266         struct vhost_scsi_inflight *inflight;
267         struct vhost_scsi_virtqueue *svq;
268
269         svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
270         inflight = &svq->inflights[svq->inflight_idx];
271         kref_get(&inflight->kref);
272
273         return inflight;
274 }
275
276 static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight)
277 {
278         kref_put(&inflight->kref, tcm_vhost_done_inflight);
279 }
280
281 static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
282 {
283         return 1;
284 }
285
286 static int tcm_vhost_check_false(struct se_portal_group *se_tpg)
287 {
288         return 0;
289 }
290
291 static char *tcm_vhost_get_fabric_name(void)
292 {
293         return "vhost";
294 }
295
296 static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
297 {
298         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
299                                 struct tcm_vhost_tpg, se_tpg);
300         struct tcm_vhost_tport *tport = tpg->tport;
301
302         switch (tport->tport_proto_id) {
303         case SCSI_PROTOCOL_SAS:
304                 return sas_get_fabric_proto_ident(se_tpg);
305         case SCSI_PROTOCOL_FCP:
306                 return fc_get_fabric_proto_ident(se_tpg);
307         case SCSI_PROTOCOL_ISCSI:
308                 return iscsi_get_fabric_proto_ident(se_tpg);
309         default:
310                 pr_err("Unknown tport_proto_id: 0x%02x, using"
311                         " SAS emulation\n", tport->tport_proto_id);
312                 break;
313         }
314
315         return sas_get_fabric_proto_ident(se_tpg);
316 }
317
318 static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg)
319 {
320         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
321                                 struct tcm_vhost_tpg, se_tpg);
322         struct tcm_vhost_tport *tport = tpg->tport;
323
324         return &tport->tport_name[0];
325 }
326
327 static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg)
328 {
329         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
330                                 struct tcm_vhost_tpg, se_tpg);
331         return tpg->tport_tpgt;
332 }
333
334 static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
335 {
336         return 1;
337 }
338
339 static u32
340 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
341                               struct se_node_acl *se_nacl,
342                               struct t10_pr_registration *pr_reg,
343                               int *format_code,
344                               unsigned char *buf)
345 {
346         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
347                                 struct tcm_vhost_tpg, se_tpg);
348         struct tcm_vhost_tport *tport = tpg->tport;
349
350         switch (tport->tport_proto_id) {
351         case SCSI_PROTOCOL_SAS:
352                 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
353                                         format_code, buf);
354         case SCSI_PROTOCOL_FCP:
355                 return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
356                                         format_code, buf);
357         case SCSI_PROTOCOL_ISCSI:
358                 return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
359                                         format_code, buf);
360         default:
361                 pr_err("Unknown tport_proto_id: 0x%02x, using"
362                         " SAS emulation\n", tport->tport_proto_id);
363                 break;
364         }
365
366         return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
367                         format_code, buf);
368 }
369
370 static u32
371 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
372                                   struct se_node_acl *se_nacl,
373                                   struct t10_pr_registration *pr_reg,
374                                   int *format_code)
375 {
376         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
377                                 struct tcm_vhost_tpg, se_tpg);
378         struct tcm_vhost_tport *tport = tpg->tport;
379
380         switch (tport->tport_proto_id) {
381         case SCSI_PROTOCOL_SAS:
382                 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
383                                         format_code);
384         case SCSI_PROTOCOL_FCP:
385                 return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
386                                         format_code);
387         case SCSI_PROTOCOL_ISCSI:
388                 return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
389                                         format_code);
390         default:
391                 pr_err("Unknown tport_proto_id: 0x%02x, using"
392                         " SAS emulation\n", tport->tport_proto_id);
393                 break;
394         }
395
396         return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
397                         format_code);
398 }
399
400 static char *
401 tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
402                                     const char *buf,
403                                     u32 *out_tid_len,
404                                     char **port_nexus_ptr)
405 {
406         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
407                                 struct tcm_vhost_tpg, se_tpg);
408         struct tcm_vhost_tport *tport = tpg->tport;
409
410         switch (tport->tport_proto_id) {
411         case SCSI_PROTOCOL_SAS:
412                 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
413                                         port_nexus_ptr);
414         case SCSI_PROTOCOL_FCP:
415                 return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
416                                         port_nexus_ptr);
417         case SCSI_PROTOCOL_ISCSI:
418                 return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
419                                         port_nexus_ptr);
420         default:
421                 pr_err("Unknown tport_proto_id: 0x%02x, using"
422                         " SAS emulation\n", tport->tport_proto_id);
423                 break;
424         }
425
426         return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
427                         port_nexus_ptr);
428 }
429
430 static struct se_node_acl *
431 tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg)
432 {
433         struct tcm_vhost_nacl *nacl;
434
435         nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL);
436         if (!nacl) {
437                 pr_err("Unable to allocate struct tcm_vhost_nacl\n");
438                 return NULL;
439         }
440
441         return &nacl->se_node_acl;
442 }
443
444 static void
445 tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
446                              struct se_node_acl *se_nacl)
447 {
448         struct tcm_vhost_nacl *nacl = container_of(se_nacl,
449                         struct tcm_vhost_nacl, se_node_acl);
450         kfree(nacl);
451 }
452
453 static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg)
454 {
455         return 1;
456 }
457
458 static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
459 {
460         struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
461                                 struct tcm_vhost_cmd, tvc_se_cmd);
462         struct se_session *se_sess = se_cmd->se_sess;
463         int i;
464
465         if (tv_cmd->tvc_sgl_count) {
466                 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
467                         put_page(sg_page(&tv_cmd->tvc_sgl[i]));
468         }
469         if (tv_cmd->tvc_prot_sgl_count) {
470                 for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
471                         put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
472         }
473
474         tcm_vhost_put_inflight(tv_cmd->inflight);
475         percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
476 }
477
478 static int tcm_vhost_shutdown_session(struct se_session *se_sess)
479 {
480         return 0;
481 }
482
483 static void tcm_vhost_close_session(struct se_session *se_sess)
484 {
485         return;
486 }
487
488 static u32 tcm_vhost_sess_get_index(struct se_session *se_sess)
489 {
490         return 0;
491 }
492
493 static int tcm_vhost_write_pending(struct se_cmd *se_cmd)
494 {
495         /* Go ahead and process the write immediately */
496         target_execute_cmd(se_cmd);
497         return 0;
498 }
499
500 static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd)
501 {
502         return 0;
503 }
504
505 static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl)
506 {
507         return;
508 }
509
510 static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd)
511 {
512         return 0;
513 }
514
515 static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
516 {
517         return 0;
518 }
519
520 static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd)
521 {
522         struct vhost_scsi *vs = cmd->tvc_vhost;
523
524         llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
525
526         vhost_work_queue(&vs->dev, &vs->vs_completion_work);
527 }
528
529 static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
530 {
531         struct tcm_vhost_cmd *cmd = container_of(se_cmd,
532                                 struct tcm_vhost_cmd, tvc_se_cmd);
533         vhost_scsi_complete_cmd(cmd);
534         return 0;
535 }
536
537 static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
538 {
539         struct tcm_vhost_cmd *cmd = container_of(se_cmd,
540                                 struct tcm_vhost_cmd, tvc_se_cmd);
541         vhost_scsi_complete_cmd(cmd);
542         return 0;
543 }
544
545 static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
546 {
547         return;
548 }
549
550 static void tcm_vhost_aborted_task(struct se_cmd *se_cmd)
551 {
552         return;
553 }
554
555 static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
556 {
557         vs->vs_events_nr--;
558         kfree(evt);
559 }
560
561 static struct tcm_vhost_evt *
562 tcm_vhost_allocate_evt(struct vhost_scsi *vs,
563                        u32 event, u32 reason)
564 {
565         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
566         struct tcm_vhost_evt *evt;
567
568         if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
569                 vs->vs_events_missed = true;
570                 return NULL;
571         }
572
573         evt = kzalloc(sizeof(*evt), GFP_KERNEL);
574         if (!evt) {
575                 vq_err(vq, "Failed to allocate tcm_vhost_evt\n");
576                 vs->vs_events_missed = true;
577                 return NULL;
578         }
579
580         evt->event.event = event;
581         evt->event.reason = reason;
582         vs->vs_events_nr++;
583
584         return evt;
585 }
586
587 static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *cmd)
588 {
589         struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
590
591         /* TODO locking against target/backend threads? */
592         transport_generic_free_cmd(se_cmd, 0);
593
594 }
595
596 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
597 {
598         return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
599 }
600
601 static void
602 tcm_vhost_do_evt_work(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
603 {
604         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
605         struct virtio_scsi_event *event = &evt->event;
606         struct virtio_scsi_event __user *eventp;
607         unsigned out, in;
608         int head, ret;
609
610         if (!vq->private_data) {
611                 vs->vs_events_missed = true;
612                 return;
613         }
614
615 again:
616         vhost_disable_notify(&vs->dev, vq);
617         head = vhost_get_vq_desc(vq, vq->iov,
618                         ARRAY_SIZE(vq->iov), &out, &in,
619                         NULL, NULL);
620         if (head < 0) {
621                 vs->vs_events_missed = true;
622                 return;
623         }
624         if (head == vq->num) {
625                 if (vhost_enable_notify(&vs->dev, vq))
626                         goto again;
627                 vs->vs_events_missed = true;
628                 return;
629         }
630
631         if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
632                 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
633                                 vq->iov[out].iov_len);
634                 vs->vs_events_missed = true;
635                 return;
636         }
637
638         if (vs->vs_events_missed) {
639                 event->event |= VIRTIO_SCSI_T_EVENTS_MISSED;
640                 vs->vs_events_missed = false;
641         }
642
643         eventp = vq->iov[out].iov_base;
644         ret = __copy_to_user(eventp, event, sizeof(*event));
645         if (!ret)
646                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
647         else
648                 vq_err(vq, "Faulted on tcm_vhost_send_event\n");
649 }
650
651 static void tcm_vhost_evt_work(struct vhost_work *work)
652 {
653         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
654                                         vs_event_work);
655         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
656         struct tcm_vhost_evt *evt;
657         struct llist_node *llnode;
658
659         mutex_lock(&vq->mutex);
660         llnode = llist_del_all(&vs->vs_event_list);
661         while (llnode) {
662                 evt = llist_entry(llnode, struct tcm_vhost_evt, list);
663                 llnode = llist_next(llnode);
664                 tcm_vhost_do_evt_work(vs, evt);
665                 tcm_vhost_free_evt(vs, evt);
666         }
667         mutex_unlock(&vq->mutex);
668 }
669
670 /* Fill in status and signal that we are done processing this command
671  *
672  * This is scheduled in the vhost work queue so we are called with the owner
673  * process mm and can access the vring.
674  */
675 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
676 {
677         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
678                                         vs_completion_work);
679         DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
680         struct virtio_scsi_cmd_resp v_rsp;
681         struct tcm_vhost_cmd *cmd;
682         struct llist_node *llnode;
683         struct se_cmd *se_cmd;
684         int ret, vq;
685
686         bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
687         llnode = llist_del_all(&vs->vs_completion_list);
688         while (llnode) {
689                 cmd = llist_entry(llnode, struct tcm_vhost_cmd,
690                                      tvc_completion_list);
691                 llnode = llist_next(llnode);
692                 se_cmd = &cmd->tvc_se_cmd;
693
694                 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
695                         cmd, se_cmd->residual_count, se_cmd->scsi_status);
696
697                 memset(&v_rsp, 0, sizeof(v_rsp));
698                 v_rsp.resid = se_cmd->residual_count;
699                 /* TODO is status_qualifier field needed? */
700                 v_rsp.status = se_cmd->scsi_status;
701                 v_rsp.sense_len = se_cmd->scsi_sense_length;
702                 memcpy(v_rsp.sense, cmd->tvc_sense_buf,
703                        v_rsp.sense_len);
704                 ret = copy_to_user(cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
705                 if (likely(ret == 0)) {
706                         struct vhost_scsi_virtqueue *q;
707                         vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
708                         q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
709                         vq = q - vs->vqs;
710                         __set_bit(vq, signal);
711                 } else
712                         pr_err("Faulted on virtio_scsi_cmd_resp\n");
713
714                 vhost_scsi_free_cmd(cmd);
715         }
716
717         vq = -1;
718         while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
719                 < VHOST_SCSI_MAX_VQ)
720                 vhost_signal(&vs->dev, &vs->vqs[vq].vq);
721 }
722
723 static struct tcm_vhost_cmd *
724 vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg,
725                    unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
726                    u32 exp_data_len, int data_direction)
727 {
728         struct tcm_vhost_cmd *cmd;
729         struct tcm_vhost_nexus *tv_nexus;
730         struct se_session *se_sess;
731         struct scatterlist *sg, *prot_sg;
732         struct page **pages;
733         int tag;
734
735         tv_nexus = tpg->tpg_nexus;
736         if (!tv_nexus) {
737                 pr_err("Unable to locate active struct tcm_vhost_nexus\n");
738                 return ERR_PTR(-EIO);
739         }
740         se_sess = tv_nexus->tvn_se_sess;
741
742         tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
743         if (tag < 0) {
744                 pr_err("Unable to obtain tag for tcm_vhost_cmd\n");
745                 return ERR_PTR(-ENOMEM);
746         }
747
748         cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag];
749         sg = cmd->tvc_sgl;
750         prot_sg = cmd->tvc_prot_sgl;
751         pages = cmd->tvc_upages;
752         memset(cmd, 0, sizeof(struct tcm_vhost_cmd));
753
754         cmd->tvc_sgl = sg;
755         cmd->tvc_prot_sgl = prot_sg;
756         cmd->tvc_upages = pages;
757         cmd->tvc_se_cmd.map_tag = tag;
758         cmd->tvc_tag = scsi_tag;
759         cmd->tvc_lun = lun;
760         cmd->tvc_task_attr = task_attr;
761         cmd->tvc_exp_data_len = exp_data_len;
762         cmd->tvc_data_direction = data_direction;
763         cmd->tvc_nexus = tv_nexus;
764         cmd->inflight = tcm_vhost_get_inflight(vq);
765
766         memcpy(cmd->tvc_cdb, cdb, TCM_VHOST_MAX_CDB_SIZE);
767
768         return cmd;
769 }
770
771 /*
772  * Map a user memory range into a scatterlist
773  *
774  * Returns the number of scatterlist entries used or -errno on error.
775  */
776 static int
777 vhost_scsi_map_to_sgl(struct tcm_vhost_cmd *tv_cmd,
778                       struct scatterlist *sgl,
779                       unsigned int sgl_count,
780                       struct iovec *iov,
781                       struct page **pages,
782                       bool write)
783 {
784         unsigned int npages = 0, pages_nr, offset, nbytes;
785         struct scatterlist *sg = sgl;
786         void __user *ptr = iov->iov_base;
787         size_t len = iov->iov_len;
788         int ret, i;
789
790         pages_nr = iov_num_pages(iov);
791         if (pages_nr > sgl_count) {
792                 pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
793                        " sgl_count: %u\n", pages_nr, sgl_count);
794                 return -ENOBUFS;
795         }
796         if (pages_nr > TCM_VHOST_PREALLOC_UPAGES) {
797                 pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
798                        " preallocated TCM_VHOST_PREALLOC_UPAGES: %u\n",
799                         pages_nr, TCM_VHOST_PREALLOC_UPAGES);
800                 return -ENOBUFS;
801         }
802
803         ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
804         /* No pages were pinned */
805         if (ret < 0)
806                 goto out;
807         /* Less pages pinned than wanted */
808         if (ret != pages_nr) {
809                 for (i = 0; i < ret; i++)
810                         put_page(pages[i]);
811                 ret = -EFAULT;
812                 goto out;
813         }
814
815         while (len > 0) {
816                 offset = (uintptr_t)ptr & ~PAGE_MASK;
817                 nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
818                 sg_set_page(sg, pages[npages], nbytes, offset);
819                 ptr += nbytes;
820                 len -= nbytes;
821                 sg++;
822                 npages++;
823         }
824
825 out:
826         return ret;
827 }
828
829 static int
830 vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
831                           struct iovec *iov,
832                           int niov,
833                           bool write)
834 {
835         struct scatterlist *sg = cmd->tvc_sgl;
836         unsigned int sgl_count = 0;
837         int ret, i;
838
839         for (i = 0; i < niov; i++)
840                 sgl_count += iov_num_pages(&iov[i]);
841
842         if (sgl_count > TCM_VHOST_PREALLOC_SGLS) {
843                 pr_err("vhost_scsi_map_iov_to_sgl() sgl_count: %u greater than"
844                         " preallocated TCM_VHOST_PREALLOC_SGLS: %u\n",
845                         sgl_count, TCM_VHOST_PREALLOC_SGLS);
846                 return -ENOBUFS;
847         }
848
849         pr_debug("%s sg %p sgl_count %u\n", __func__, sg, sgl_count);
850         sg_init_table(sg, sgl_count);
851         cmd->tvc_sgl_count = sgl_count;
852
853         pr_debug("Mapping iovec %p for %u pages\n", &iov[0], sgl_count);
854
855         for (i = 0; i < niov; i++) {
856                 ret = vhost_scsi_map_to_sgl(cmd, sg, sgl_count, &iov[i],
857                                             cmd->tvc_upages, write);
858                 if (ret < 0) {
859                         for (i = 0; i < cmd->tvc_sgl_count; i++)
860                                 put_page(sg_page(&cmd->tvc_sgl[i]));
861
862                         cmd->tvc_sgl_count = 0;
863                         return ret;
864                 }
865                 sg += ret;
866                 sgl_count -= ret;
867         }
868         return 0;
869 }
870
871 static int
872 vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd,
873                            struct iovec *iov,
874                            int niov,
875                            bool write)
876 {
877         struct scatterlist *prot_sg = cmd->tvc_prot_sgl;
878         unsigned int prot_sgl_count = 0;
879         int ret, i;
880
881         for (i = 0; i < niov; i++)
882                 prot_sgl_count += iov_num_pages(&iov[i]);
883
884         if (prot_sgl_count > TCM_VHOST_PREALLOC_PROT_SGLS) {
885                 pr_err("vhost_scsi_map_iov_to_prot() sgl_count: %u greater than"
886                         " preallocated TCM_VHOST_PREALLOC_PROT_SGLS: %u\n",
887                         prot_sgl_count, TCM_VHOST_PREALLOC_PROT_SGLS);
888                 return -ENOBUFS;
889         }
890
891         pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
892                  prot_sg, prot_sgl_count);
893         sg_init_table(prot_sg, prot_sgl_count);
894         cmd->tvc_prot_sgl_count = prot_sgl_count;
895
896         for (i = 0; i < niov; i++) {
897                 ret = vhost_scsi_map_to_sgl(cmd, prot_sg, prot_sgl_count, &iov[i],
898                                             cmd->tvc_upages, write);
899                 if (ret < 0) {
900                         for (i = 0; i < cmd->tvc_prot_sgl_count; i++)
901                                 put_page(sg_page(&cmd->tvc_prot_sgl[i]));
902
903                         cmd->tvc_prot_sgl_count = 0;
904                         return ret;
905                 }
906                 prot_sg += ret;
907                 prot_sgl_count -= ret;
908         }
909         return 0;
910 }
911
912 static void tcm_vhost_submission_work(struct work_struct *work)
913 {
914         struct tcm_vhost_cmd *cmd =
915                 container_of(work, struct tcm_vhost_cmd, work);
916         struct tcm_vhost_nexus *tv_nexus;
917         struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
918         struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
919         int rc;
920
921         /* FIXME: BIDI operation */
922         if (cmd->tvc_sgl_count) {
923                 sg_ptr = cmd->tvc_sgl;
924
925                 if (cmd->tvc_prot_sgl_count)
926                         sg_prot_ptr = cmd->tvc_prot_sgl;
927                 else
928                         se_cmd->prot_pto = true;
929         } else {
930                 sg_ptr = NULL;
931         }
932         tv_nexus = cmd->tvc_nexus;
933
934         rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
935                         cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
936                         cmd->tvc_lun, cmd->tvc_exp_data_len,
937                         cmd->tvc_task_attr, cmd->tvc_data_direction,
938                         TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
939                         NULL, 0, sg_prot_ptr, cmd->tvc_prot_sgl_count);
940         if (rc < 0) {
941                 transport_send_check_condition_and_sense(se_cmd,
942                                 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
943                 transport_generic_free_cmd(se_cmd, 0);
944         }
945 }
946
947 static void
948 vhost_scsi_send_bad_target(struct vhost_scsi *vs,
949                            struct vhost_virtqueue *vq,
950                            int head, unsigned out)
951 {
952         struct virtio_scsi_cmd_resp __user *resp;
953         struct virtio_scsi_cmd_resp rsp;
954         int ret;
955
956         memset(&rsp, 0, sizeof(rsp));
957         rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
958         resp = vq->iov[out].iov_base;
959         ret = __copy_to_user(resp, &rsp, sizeof(rsp));
960         if (!ret)
961                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
962         else
963                 pr_err("Faulted on virtio_scsi_cmd_resp\n");
964 }
965
966 static void
967 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
968 {
969         struct tcm_vhost_tpg **vs_tpg;
970         struct virtio_scsi_cmd_req v_req;
971         struct virtio_scsi_cmd_req_pi v_req_pi;
972         struct tcm_vhost_tpg *tpg;
973         struct tcm_vhost_cmd *cmd;
974         u64 tag;
975         u32 exp_data_len, data_first, data_num, data_direction, prot_first;
976         unsigned out, in, i;
977         int head, ret, data_niov, prot_niov, prot_bytes;
978         size_t req_size;
979         u16 lun;
980         u8 *target, *lunp, task_attr;
981         bool hdr_pi;
982         void *req, *cdb;
983
984         mutex_lock(&vq->mutex);
985         /*
986          * We can handle the vq only after the endpoint is setup by calling the
987          * VHOST_SCSI_SET_ENDPOINT ioctl.
988          */
989         vs_tpg = vq->private_data;
990         if (!vs_tpg)
991                 goto out;
992
993         vhost_disable_notify(&vs->dev, vq);
994
995         for (;;) {
996                 head = vhost_get_vq_desc(vq, vq->iov,
997                                         ARRAY_SIZE(vq->iov), &out, &in,
998                                         NULL, NULL);
999                 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
1000                                         head, out, in);
1001                 /* On error, stop handling until the next kick. */
1002                 if (unlikely(head < 0))
1003                         break;
1004                 /* Nothing new?  Wait for eventfd to tell us they refilled. */
1005                 if (head == vq->num) {
1006                         if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
1007                                 vhost_disable_notify(&vs->dev, vq);
1008                                 continue;
1009                         }
1010                         break;
1011                 }
1012
1013                 /* FIXME: BIDI operation */
1014                 if (out == 1 && in == 1) {
1015                         data_direction = DMA_NONE;
1016                         data_first = 0;
1017                         data_num = 0;
1018                 } else if (out == 1 && in > 1) {
1019                         data_direction = DMA_FROM_DEVICE;
1020                         data_first = out + 1;
1021                         data_num = in - 1;
1022                 } else if (out > 1 && in == 1) {
1023                         data_direction = DMA_TO_DEVICE;
1024                         data_first = 1;
1025                         data_num = out - 1;
1026                 } else {
1027                         vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
1028                                         out, in);
1029                         break;
1030                 }
1031
1032                 /*
1033                  * Check for a sane resp buffer so we can report errors to
1034                  * the guest.
1035                  */
1036                 if (unlikely(vq->iov[out].iov_len !=
1037                                         sizeof(struct virtio_scsi_cmd_resp))) {
1038                         vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
1039                                 " bytes\n", vq->iov[out].iov_len);
1040                         break;
1041                 }
1042
1043                 if (vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI)) {
1044                         req = &v_req_pi;
1045                         lunp = &v_req_pi.lun[0];
1046                         target = &v_req_pi.lun[1];
1047                         req_size = sizeof(v_req_pi);
1048                         hdr_pi = true;
1049                 } else {
1050                         req = &v_req;
1051                         lunp = &v_req.lun[0];
1052                         target = &v_req.lun[1];
1053                         req_size = sizeof(v_req);
1054                         hdr_pi = false;
1055                 }
1056
1057                 if (unlikely(vq->iov[0].iov_len < req_size)) {
1058                         pr_err("Expecting virtio-scsi header: %zu, got %zu\n",
1059                                req_size, vq->iov[0].iov_len);
1060                         break;
1061                 }
1062                 ret = memcpy_fromiovecend(req, &vq->iov[0], 0, req_size);
1063                 if (unlikely(ret)) {
1064                         vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
1065                         break;
1066                 }
1067
1068                 /* virtio-scsi spec requires byte 0 of the lun to be 1 */
1069                 if (unlikely(*lunp != 1)) {
1070                         vhost_scsi_send_bad_target(vs, vq, head, out);
1071                         continue;
1072                 }
1073
1074                 tpg = ACCESS_ONCE(vs_tpg[*target]);
1075
1076                 /* Target does not exist, fail the request */
1077                 if (unlikely(!tpg)) {
1078                         vhost_scsi_send_bad_target(vs, vq, head, out);
1079                         continue;
1080                 }
1081
1082                 data_niov = data_num;
1083                 prot_niov = prot_first = prot_bytes = 0;
1084                 /*
1085                  * Determine if any protection information iovecs are preceeding
1086                  * the actual data payload, and adjust data_first + data_niov
1087                  * values accordingly for vhost_scsi_map_iov_to_sgl() below.
1088                  *
1089                  * Also extract virtio_scsi header bits for vhost_scsi_get_tag()
1090                  */
1091                 if (hdr_pi) {
1092                         if (v_req_pi.pi_bytesout) {
1093                                 if (data_direction != DMA_TO_DEVICE) {
1094                                         vq_err(vq, "Received non zero do_pi_niov"
1095                                                 ", but wrong data_direction\n");
1096                                         goto err_cmd;
1097                                 }
1098                                 prot_bytes = v_req_pi.pi_bytesout;
1099                         } else if (v_req_pi.pi_bytesin) {
1100                                 if (data_direction != DMA_FROM_DEVICE) {
1101                                         vq_err(vq, "Received non zero di_pi_niov"
1102                                                 ", but wrong data_direction\n");
1103                                         goto err_cmd;
1104                                 }
1105                                 prot_bytes = v_req_pi.pi_bytesin;
1106                         }
1107                         if (prot_bytes) {
1108                                 int tmp = 0;
1109
1110                                 for (i = 0; i < data_num; i++) {
1111                                         tmp += vq->iov[data_first + i].iov_len;
1112                                         prot_niov++;
1113                                         if (tmp >= prot_bytes)
1114                                                 break;
1115                                 }
1116                                 prot_first = data_first;
1117                                 data_first += prot_niov;
1118                                 data_niov = data_num - prot_niov;
1119                         }
1120                         tag = v_req_pi.tag;
1121                         task_attr = v_req_pi.task_attr;
1122                         cdb = &v_req_pi.cdb[0];
1123                         lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF;
1124                 } else {
1125                         tag = v_req.tag;
1126                         task_attr = v_req.task_attr;
1127                         cdb = &v_req.cdb[0];
1128                         lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
1129                 }
1130                 exp_data_len = 0;
1131                 for (i = 0; i < data_niov; i++)
1132                         exp_data_len += vq->iov[data_first + i].iov_len;
1133                 /*
1134                  * Check that the recieved CDB size does not exceeded our
1135                  * hardcoded max for vhost-scsi
1136                  *
1137                  * TODO what if cdb was too small for varlen cdb header?
1138                  */
1139                 if (unlikely(scsi_command_size(cdb) > TCM_VHOST_MAX_CDB_SIZE)) {
1140                         vq_err(vq, "Received SCSI CDB with command_size: %d that"
1141                                 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1142                                 scsi_command_size(cdb), TCM_VHOST_MAX_CDB_SIZE);
1143                         goto err_cmd;
1144                 }
1145
1146                 cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
1147                                          exp_data_len + prot_bytes,
1148                                          data_direction);
1149                 if (IS_ERR(cmd)) {
1150                         vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
1151                                         PTR_ERR(cmd));
1152                         goto err_cmd;
1153                 }
1154
1155                 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
1156                         ": %d\n", cmd, exp_data_len, data_direction);
1157
1158                 cmd->tvc_vhost = vs;
1159                 cmd->tvc_vq = vq;
1160                 cmd->tvc_resp = vq->iov[out].iov_base;
1161
1162                 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1163                         cmd->tvc_cdb[0], cmd->tvc_lun);
1164
1165                 if (prot_niov) {
1166                         ret = vhost_scsi_map_iov_to_prot(cmd,
1167                                         &vq->iov[prot_first], prot_niov,
1168                                         data_direction == DMA_FROM_DEVICE);
1169                         if (unlikely(ret)) {
1170                                 vq_err(vq, "Failed to map iov to"
1171                                         " prot_sgl\n");
1172                                 goto err_free;
1173                         }
1174                 }
1175                 if (data_direction != DMA_NONE) {
1176                         ret = vhost_scsi_map_iov_to_sgl(cmd,
1177                                         &vq->iov[data_first], data_niov,
1178                                         data_direction == DMA_FROM_DEVICE);
1179                         if (unlikely(ret)) {
1180                                 vq_err(vq, "Failed to map iov to sgl\n");
1181                                 goto err_free;
1182                         }
1183                 }
1184                 /*
1185                  * Save the descriptor from vhost_get_vq_desc() to be used to
1186                  * complete the virtio-scsi request in TCM callback context via
1187                  * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
1188                  */
1189                 cmd->tvc_vq_desc = head;
1190                 /*
1191                  * Dispatch tv_cmd descriptor for cmwq execution in process
1192                  * context provided by tcm_vhost_workqueue.  This also ensures
1193                  * tv_cmd is executed on the same kworker CPU as this vhost
1194                  * thread to gain positive L2 cache locality effects..
1195                  */
1196                 INIT_WORK(&cmd->work, tcm_vhost_submission_work);
1197                 queue_work(tcm_vhost_workqueue, &cmd->work);
1198         }
1199
1200         mutex_unlock(&vq->mutex);
1201         return;
1202
1203 err_free:
1204         vhost_scsi_free_cmd(cmd);
1205 err_cmd:
1206         vhost_scsi_send_bad_target(vs, vq, head, out);
1207 out:
1208         mutex_unlock(&vq->mutex);
1209 }
1210
1211 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1212 {
1213         pr_debug("%s: The handling func for control queue.\n", __func__);
1214 }
1215
1216 static void
1217 tcm_vhost_send_evt(struct vhost_scsi *vs,
1218                    struct tcm_vhost_tpg *tpg,
1219                    struct se_lun *lun,
1220                    u32 event,
1221                    u32 reason)
1222 {
1223         struct tcm_vhost_evt *evt;
1224
1225         evt = tcm_vhost_allocate_evt(vs, event, reason);
1226         if (!evt)
1227                 return;
1228
1229         if (tpg && lun) {
1230                 /* TODO: share lun setup code with virtio-scsi.ko */
1231                 /*
1232                  * Note: evt->event is zeroed when we allocate it and
1233                  * lun[4-7] need to be zero according to virtio-scsi spec.
1234                  */
1235                 evt->event.lun[0] = 0x01;
1236                 evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
1237                 if (lun->unpacked_lun >= 256)
1238                         evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1239                 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1240         }
1241
1242         llist_add(&evt->list, &vs->vs_event_list);
1243         vhost_work_queue(&vs->dev, &vs->vs_event_work);
1244 }
1245
1246 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1247 {
1248         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1249                                                 poll.work);
1250         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1251
1252         mutex_lock(&vq->mutex);
1253         if (!vq->private_data)
1254                 goto out;
1255
1256         if (vs->vs_events_missed)
1257                 tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1258 out:
1259         mutex_unlock(&vq->mutex);
1260 }
1261
1262 static void vhost_scsi_handle_kick(struct vhost_work *work)
1263 {
1264         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1265                                                 poll.work);
1266         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1267
1268         vhost_scsi_handle_vq(vs, vq);
1269 }
1270
1271 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1272 {
1273         vhost_poll_flush(&vs->vqs[index].vq.poll);
1274 }
1275
1276 /* Callers must hold dev mutex */
1277 static void vhost_scsi_flush(struct vhost_scsi *vs)
1278 {
1279         struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1280         int i;
1281
1282         /* Init new inflight and remember the old inflight */
1283         tcm_vhost_init_inflight(vs, old_inflight);
1284
1285         /*
1286          * The inflight->kref was initialized to 1. We decrement it here to
1287          * indicate the start of the flush operation so that it will reach 0
1288          * when all the reqs are finished.
1289          */
1290         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1291                 kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight);
1292
1293         /* Flush both the vhost poll and vhost work */
1294         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1295                 vhost_scsi_flush_vq(vs, i);
1296         vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1297         vhost_work_flush(&vs->dev, &vs->vs_event_work);
1298
1299         /* Wait for all reqs issued before the flush to be finished */
1300         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1301                 wait_for_completion(&old_inflight[i]->comp);
1302 }
1303
1304 /*
1305  * Called from vhost_scsi_ioctl() context to walk the list of available
1306  * tcm_vhost_tpg with an active struct tcm_vhost_nexus
1307  *
1308  *  The lock nesting rule is:
1309  *    tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1310  */
1311 static int
1312 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1313                         struct vhost_scsi_target *t)
1314 {
1315         struct tcm_vhost_tport *tv_tport;
1316         struct tcm_vhost_tpg *tpg;
1317         struct tcm_vhost_tpg **vs_tpg;
1318         struct vhost_virtqueue *vq;
1319         int index, ret, i, len;
1320         bool match = false;
1321
1322         mutex_lock(&tcm_vhost_mutex);
1323         mutex_lock(&vs->dev.mutex);
1324
1325         /* Verify that ring has been setup correctly. */
1326         for (index = 0; index < vs->dev.nvqs; ++index) {
1327                 /* Verify that ring has been setup correctly. */
1328                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1329                         ret = -EFAULT;
1330                         goto out;
1331                 }
1332         }
1333
1334         len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1335         vs_tpg = kzalloc(len, GFP_KERNEL);
1336         if (!vs_tpg) {
1337                 ret = -ENOMEM;
1338                 goto out;
1339         }
1340         if (vs->vs_tpg)
1341                 memcpy(vs_tpg, vs->vs_tpg, len);
1342
1343         list_for_each_entry(tpg, &tcm_vhost_list, tv_tpg_list) {
1344                 mutex_lock(&tpg->tv_tpg_mutex);
1345                 if (!tpg->tpg_nexus) {
1346                         mutex_unlock(&tpg->tv_tpg_mutex);
1347                         continue;
1348                 }
1349                 if (tpg->tv_tpg_vhost_count != 0) {
1350                         mutex_unlock(&tpg->tv_tpg_mutex);
1351                         continue;
1352                 }
1353                 tv_tport = tpg->tport;
1354
1355                 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1356                         if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1357                                 kfree(vs_tpg);
1358                                 mutex_unlock(&tpg->tv_tpg_mutex);
1359                                 ret = -EEXIST;
1360                                 goto out;
1361                         }
1362                         tpg->tv_tpg_vhost_count++;
1363                         tpg->vhost_scsi = vs;
1364                         vs_tpg[tpg->tport_tpgt] = tpg;
1365                         smp_mb__after_atomic();
1366                         match = true;
1367                 }
1368                 mutex_unlock(&tpg->tv_tpg_mutex);
1369         }
1370
1371         if (match) {
1372                 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1373                        sizeof(vs->vs_vhost_wwpn));
1374                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1375                         vq = &vs->vqs[i].vq;
1376                         mutex_lock(&vq->mutex);
1377                         vq->private_data = vs_tpg;
1378                         vhost_init_used(vq);
1379                         mutex_unlock(&vq->mutex);
1380                 }
1381                 ret = 0;
1382         } else {
1383                 ret = -EEXIST;
1384         }
1385
1386         /*
1387          * Act as synchronize_rcu to make sure access to
1388          * old vs->vs_tpg is finished.
1389          */
1390         vhost_scsi_flush(vs);
1391         kfree(vs->vs_tpg);
1392         vs->vs_tpg = vs_tpg;
1393
1394 out:
1395         mutex_unlock(&vs->dev.mutex);
1396         mutex_unlock(&tcm_vhost_mutex);
1397         return ret;
1398 }
1399
1400 static int
1401 vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1402                           struct vhost_scsi_target *t)
1403 {
1404         struct tcm_vhost_tport *tv_tport;
1405         struct tcm_vhost_tpg *tpg;
1406         struct vhost_virtqueue *vq;
1407         bool match = false;
1408         int index, ret, i;
1409         u8 target;
1410
1411         mutex_lock(&tcm_vhost_mutex);
1412         mutex_lock(&vs->dev.mutex);
1413         /* Verify that ring has been setup correctly. */
1414         for (index = 0; index < vs->dev.nvqs; ++index) {
1415                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1416                         ret = -EFAULT;
1417                         goto err_dev;
1418                 }
1419         }
1420
1421         if (!vs->vs_tpg) {
1422                 ret = 0;
1423                 goto err_dev;
1424         }
1425
1426         for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1427                 target = i;
1428                 tpg = vs->vs_tpg[target];
1429                 if (!tpg)
1430                         continue;
1431
1432                 mutex_lock(&tpg->tv_tpg_mutex);
1433                 tv_tport = tpg->tport;
1434                 if (!tv_tport) {
1435                         ret = -ENODEV;
1436                         goto err_tpg;
1437                 }
1438
1439                 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1440                         pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1441                                 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1442                                 tv_tport->tport_name, tpg->tport_tpgt,
1443                                 t->vhost_wwpn, t->vhost_tpgt);
1444                         ret = -EINVAL;
1445                         goto err_tpg;
1446                 }
1447                 tpg->tv_tpg_vhost_count--;
1448                 tpg->vhost_scsi = NULL;
1449                 vs->vs_tpg[target] = NULL;
1450                 match = true;
1451                 mutex_unlock(&tpg->tv_tpg_mutex);
1452         }
1453         if (match) {
1454                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1455                         vq = &vs->vqs[i].vq;
1456                         mutex_lock(&vq->mutex);
1457                         vq->private_data = NULL;
1458                         mutex_unlock(&vq->mutex);
1459                 }
1460         }
1461         /*
1462          * Act as synchronize_rcu to make sure access to
1463          * old vs->vs_tpg is finished.
1464          */
1465         vhost_scsi_flush(vs);
1466         kfree(vs->vs_tpg);
1467         vs->vs_tpg = NULL;
1468         WARN_ON(vs->vs_events_nr);
1469         mutex_unlock(&vs->dev.mutex);
1470         mutex_unlock(&tcm_vhost_mutex);
1471         return 0;
1472
1473 err_tpg:
1474         mutex_unlock(&tpg->tv_tpg_mutex);
1475 err_dev:
1476         mutex_unlock(&vs->dev.mutex);
1477         mutex_unlock(&tcm_vhost_mutex);
1478         return ret;
1479 }
1480
1481 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1482 {
1483         struct vhost_virtqueue *vq;
1484         int i;
1485
1486         if (features & ~VHOST_SCSI_FEATURES)
1487                 return -EOPNOTSUPP;
1488
1489         mutex_lock(&vs->dev.mutex);
1490         if ((features & (1 << VHOST_F_LOG_ALL)) &&
1491             !vhost_log_access_ok(&vs->dev)) {
1492                 mutex_unlock(&vs->dev.mutex);
1493                 return -EFAULT;
1494         }
1495
1496         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1497                 vq = &vs->vqs[i].vq;
1498                 mutex_lock(&vq->mutex);
1499                 vq->acked_features = features;
1500                 mutex_unlock(&vq->mutex);
1501         }
1502         mutex_unlock(&vs->dev.mutex);
1503         return 0;
1504 }
1505
1506 static int vhost_scsi_open(struct inode *inode, struct file *f)
1507 {
1508         struct vhost_scsi *vs;
1509         struct vhost_virtqueue **vqs;
1510         int r = -ENOMEM, i;
1511
1512         vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
1513         if (!vs) {
1514                 vs = vzalloc(sizeof(*vs));
1515                 if (!vs)
1516                         goto err_vs;
1517         }
1518
1519         vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
1520         if (!vqs)
1521                 goto err_vqs;
1522
1523         vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1524         vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work);
1525
1526         vs->vs_events_nr = 0;
1527         vs->vs_events_missed = false;
1528
1529         vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1530         vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1531         vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1532         vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1533         for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1534                 vqs[i] = &vs->vqs[i].vq;
1535                 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1536         }
1537         vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
1538
1539         tcm_vhost_init_inflight(vs, NULL);
1540
1541         f->private_data = vs;
1542         return 0;
1543
1544 err_vqs:
1545         kvfree(vs);
1546 err_vs:
1547         return r;
1548 }
1549
1550 static int vhost_scsi_release(struct inode *inode, struct file *f)
1551 {
1552         struct vhost_scsi *vs = f->private_data;
1553         struct vhost_scsi_target t;
1554
1555         mutex_lock(&vs->dev.mutex);
1556         memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1557         mutex_unlock(&vs->dev.mutex);
1558         vhost_scsi_clear_endpoint(vs, &t);
1559         vhost_dev_stop(&vs->dev);
1560         vhost_dev_cleanup(&vs->dev, false);
1561         /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1562         vhost_scsi_flush(vs);
1563         kfree(vs->dev.vqs);
1564         kvfree(vs);
1565         return 0;
1566 }
1567
1568 static long
1569 vhost_scsi_ioctl(struct file *f,
1570                  unsigned int ioctl,
1571                  unsigned long arg)
1572 {
1573         struct vhost_scsi *vs = f->private_data;
1574         struct vhost_scsi_target backend;
1575         void __user *argp = (void __user *)arg;
1576         u64 __user *featurep = argp;
1577         u32 __user *eventsp = argp;
1578         u32 events_missed;
1579         u64 features;
1580         int r, abi_version = VHOST_SCSI_ABI_VERSION;
1581         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1582
1583         switch (ioctl) {
1584         case VHOST_SCSI_SET_ENDPOINT:
1585                 if (copy_from_user(&backend, argp, sizeof backend))
1586                         return -EFAULT;
1587                 if (backend.reserved != 0)
1588                         return -EOPNOTSUPP;
1589
1590                 return vhost_scsi_set_endpoint(vs, &backend);
1591         case VHOST_SCSI_CLEAR_ENDPOINT:
1592                 if (copy_from_user(&backend, argp, sizeof backend))
1593                         return -EFAULT;
1594                 if (backend.reserved != 0)
1595                         return -EOPNOTSUPP;
1596
1597                 return vhost_scsi_clear_endpoint(vs, &backend);
1598         case VHOST_SCSI_GET_ABI_VERSION:
1599                 if (copy_to_user(argp, &abi_version, sizeof abi_version))
1600                         return -EFAULT;
1601                 return 0;
1602         case VHOST_SCSI_SET_EVENTS_MISSED:
1603                 if (get_user(events_missed, eventsp))
1604                         return -EFAULT;
1605                 mutex_lock(&vq->mutex);
1606                 vs->vs_events_missed = events_missed;
1607                 mutex_unlock(&vq->mutex);
1608                 return 0;
1609         case VHOST_SCSI_GET_EVENTS_MISSED:
1610                 mutex_lock(&vq->mutex);
1611                 events_missed = vs->vs_events_missed;
1612                 mutex_unlock(&vq->mutex);
1613                 if (put_user(events_missed, eventsp))
1614                         return -EFAULT;
1615                 return 0;
1616         case VHOST_GET_FEATURES:
1617                 features = VHOST_SCSI_FEATURES;
1618                 if (copy_to_user(featurep, &features, sizeof features))
1619                         return -EFAULT;
1620                 return 0;
1621         case VHOST_SET_FEATURES:
1622                 if (copy_from_user(&features, featurep, sizeof features))
1623                         return -EFAULT;
1624                 return vhost_scsi_set_features(vs, features);
1625         default:
1626                 mutex_lock(&vs->dev.mutex);
1627                 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1628                 /* TODO: flush backend after dev ioctl. */
1629                 if (r == -ENOIOCTLCMD)
1630                         r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1631                 mutex_unlock(&vs->dev.mutex);
1632                 return r;
1633         }
1634 }
1635
1636 #ifdef CONFIG_COMPAT
1637 static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1638                                 unsigned long arg)
1639 {
1640         return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1641 }
1642 #endif
1643
1644 static const struct file_operations vhost_scsi_fops = {
1645         .owner          = THIS_MODULE,
1646         .release        = vhost_scsi_release,
1647         .unlocked_ioctl = vhost_scsi_ioctl,
1648 #ifdef CONFIG_COMPAT
1649         .compat_ioctl   = vhost_scsi_compat_ioctl,
1650 #endif
1651         .open           = vhost_scsi_open,
1652         .llseek         = noop_llseek,
1653 };
1654
1655 static struct miscdevice vhost_scsi_misc = {
1656         MISC_DYNAMIC_MINOR,
1657         "vhost-scsi",
1658         &vhost_scsi_fops,
1659 };
1660
1661 static int __init vhost_scsi_register(void)
1662 {
1663         return misc_register(&vhost_scsi_misc);
1664 }
1665
1666 static int vhost_scsi_deregister(void)
1667 {
1668         return misc_deregister(&vhost_scsi_misc);
1669 }
1670
1671 static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
1672 {
1673         switch (tport->tport_proto_id) {
1674         case SCSI_PROTOCOL_SAS:
1675                 return "SAS";
1676         case SCSI_PROTOCOL_FCP:
1677                 return "FCP";
1678         case SCSI_PROTOCOL_ISCSI:
1679                 return "iSCSI";
1680         default:
1681                 break;
1682         }
1683
1684         return "Unknown";
1685 }
1686
1687 static void
1688 tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
1689                   struct se_lun *lun, bool plug)
1690 {
1691
1692         struct vhost_scsi *vs = tpg->vhost_scsi;
1693         struct vhost_virtqueue *vq;
1694         u32 reason;
1695
1696         if (!vs)
1697                 return;
1698
1699         mutex_lock(&vs->dev.mutex);
1700
1701         if (plug)
1702                 reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1703         else
1704                 reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1705
1706         vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1707         mutex_lock(&vq->mutex);
1708         if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
1709                 tcm_vhost_send_evt(vs, tpg, lun,
1710                                    VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1711         mutex_unlock(&vq->mutex);
1712         mutex_unlock(&vs->dev.mutex);
1713 }
1714
1715 static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1716 {
1717         tcm_vhost_do_plug(tpg, lun, true);
1718 }
1719
1720 static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1721 {
1722         tcm_vhost_do_plug(tpg, lun, false);
1723 }
1724
1725 static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
1726                                struct se_lun *lun)
1727 {
1728         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1729                                 struct tcm_vhost_tpg, se_tpg);
1730
1731         mutex_lock(&tcm_vhost_mutex);
1732
1733         mutex_lock(&tpg->tv_tpg_mutex);
1734         tpg->tv_tpg_port_count++;
1735         mutex_unlock(&tpg->tv_tpg_mutex);
1736
1737         tcm_vhost_hotplug(tpg, lun);
1738
1739         mutex_unlock(&tcm_vhost_mutex);
1740
1741         return 0;
1742 }
1743
1744 static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
1745                                   struct se_lun *lun)
1746 {
1747         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1748                                 struct tcm_vhost_tpg, se_tpg);
1749
1750         mutex_lock(&tcm_vhost_mutex);
1751
1752         mutex_lock(&tpg->tv_tpg_mutex);
1753         tpg->tv_tpg_port_count--;
1754         mutex_unlock(&tpg->tv_tpg_mutex);
1755
1756         tcm_vhost_hotunplug(tpg, lun);
1757
1758         mutex_unlock(&tcm_vhost_mutex);
1759 }
1760
1761 static struct se_node_acl *
1762 tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg,
1763                        struct config_group *group,
1764                        const char *name)
1765 {
1766         struct se_node_acl *se_nacl, *se_nacl_new;
1767         struct tcm_vhost_nacl *nacl;
1768         u64 wwpn = 0;
1769         u32 nexus_depth;
1770
1771         /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1772                 return ERR_PTR(-EINVAL); */
1773         se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg);
1774         if (!se_nacl_new)
1775                 return ERR_PTR(-ENOMEM);
1776
1777         nexus_depth = 1;
1778         /*
1779          * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
1780          * when converting a NodeACL from demo mode -> explict
1781          */
1782         se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
1783                                 name, nexus_depth);
1784         if (IS_ERR(se_nacl)) {
1785                 tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new);
1786                 return se_nacl;
1787         }
1788         /*
1789          * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN
1790          */
1791         nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl);
1792         nacl->iport_wwpn = wwpn;
1793
1794         return se_nacl;
1795 }
1796
1797 static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
1798 {
1799         struct tcm_vhost_nacl *nacl = container_of(se_acl,
1800                                 struct tcm_vhost_nacl, se_node_acl);
1801         core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
1802         kfree(nacl);
1803 }
1804
1805 static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus,
1806                                        struct se_session *se_sess)
1807 {
1808         struct tcm_vhost_cmd *tv_cmd;
1809         unsigned int i;
1810
1811         if (!se_sess->sess_cmd_map)
1812                 return;
1813
1814         for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) {
1815                 tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i];
1816
1817                 kfree(tv_cmd->tvc_sgl);
1818                 kfree(tv_cmd->tvc_prot_sgl);
1819                 kfree(tv_cmd->tvc_upages);
1820         }
1821 }
1822
1823 static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
1824                                 const char *name)
1825 {
1826         struct se_portal_group *se_tpg;
1827         struct se_session *se_sess;
1828         struct tcm_vhost_nexus *tv_nexus;
1829         struct tcm_vhost_cmd *tv_cmd;
1830         unsigned int i;
1831
1832         mutex_lock(&tpg->tv_tpg_mutex);
1833         if (tpg->tpg_nexus) {
1834                 mutex_unlock(&tpg->tv_tpg_mutex);
1835                 pr_debug("tpg->tpg_nexus already exists\n");
1836                 return -EEXIST;
1837         }
1838         se_tpg = &tpg->se_tpg;
1839
1840         tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
1841         if (!tv_nexus) {
1842                 mutex_unlock(&tpg->tv_tpg_mutex);
1843                 pr_err("Unable to allocate struct tcm_vhost_nexus\n");
1844                 return -ENOMEM;
1845         }
1846         /*
1847          *  Initialize the struct se_session pointer and setup tagpool
1848          *  for struct tcm_vhost_cmd descriptors
1849          */
1850         tv_nexus->tvn_se_sess = transport_init_session_tags(
1851                                         TCM_VHOST_DEFAULT_TAGS,
1852                                         sizeof(struct tcm_vhost_cmd),
1853                                         TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
1854         if (IS_ERR(tv_nexus->tvn_se_sess)) {
1855                 mutex_unlock(&tpg->tv_tpg_mutex);
1856                 kfree(tv_nexus);
1857                 return -ENOMEM;
1858         }
1859         se_sess = tv_nexus->tvn_se_sess;
1860         for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) {
1861                 tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i];
1862
1863                 tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
1864                                         TCM_VHOST_PREALLOC_SGLS, GFP_KERNEL);
1865                 if (!tv_cmd->tvc_sgl) {
1866                         mutex_unlock(&tpg->tv_tpg_mutex);
1867                         pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1868                         goto out;
1869                 }
1870
1871                 tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
1872                                         TCM_VHOST_PREALLOC_UPAGES, GFP_KERNEL);
1873                 if (!tv_cmd->tvc_upages) {
1874                         mutex_unlock(&tpg->tv_tpg_mutex);
1875                         pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1876                         goto out;
1877                 }
1878
1879                 tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
1880                                         TCM_VHOST_PREALLOC_PROT_SGLS, GFP_KERNEL);
1881                 if (!tv_cmd->tvc_prot_sgl) {
1882                         mutex_unlock(&tpg->tv_tpg_mutex);
1883                         pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1884                         goto out;
1885                 }
1886         }
1887         /*
1888          * Since we are running in 'demo mode' this call with generate a
1889          * struct se_node_acl for the tcm_vhost struct se_portal_group with
1890          * the SCSI Initiator port name of the passed configfs group 'name'.
1891          */
1892         tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
1893                                 se_tpg, (unsigned char *)name);
1894         if (!tv_nexus->tvn_se_sess->se_node_acl) {
1895                 mutex_unlock(&tpg->tv_tpg_mutex);
1896                 pr_debug("core_tpg_check_initiator_node_acl() failed"
1897                                 " for %s\n", name);
1898                 goto out;
1899         }
1900         /*
1901          * Now register the TCM vhost virtual I_T Nexus as active with the
1902          * call to __transport_register_session()
1903          */
1904         __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1905                         tv_nexus->tvn_se_sess, tv_nexus);
1906         tpg->tpg_nexus = tv_nexus;
1907
1908         mutex_unlock(&tpg->tv_tpg_mutex);
1909         return 0;
1910
1911 out:
1912         tcm_vhost_free_cmd_map_res(tv_nexus, se_sess);
1913         transport_free_session(se_sess);
1914         kfree(tv_nexus);
1915         return -ENOMEM;
1916 }
1917
1918 static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
1919 {
1920         struct se_session *se_sess;
1921         struct tcm_vhost_nexus *tv_nexus;
1922
1923         mutex_lock(&tpg->tv_tpg_mutex);
1924         tv_nexus = tpg->tpg_nexus;
1925         if (!tv_nexus) {
1926                 mutex_unlock(&tpg->tv_tpg_mutex);
1927                 return -ENODEV;
1928         }
1929
1930         se_sess = tv_nexus->tvn_se_sess;
1931         if (!se_sess) {
1932                 mutex_unlock(&tpg->tv_tpg_mutex);
1933                 return -ENODEV;
1934         }
1935
1936         if (tpg->tv_tpg_port_count != 0) {
1937                 mutex_unlock(&tpg->tv_tpg_mutex);
1938                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1939                         " active TPG port count: %d\n",
1940                         tpg->tv_tpg_port_count);
1941                 return -EBUSY;
1942         }
1943
1944         if (tpg->tv_tpg_vhost_count != 0) {
1945                 mutex_unlock(&tpg->tv_tpg_mutex);
1946                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1947                         " active TPG vhost count: %d\n",
1948                         tpg->tv_tpg_vhost_count);
1949                 return -EBUSY;
1950         }
1951
1952         pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
1953                 " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
1954                 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1955
1956         tcm_vhost_free_cmd_map_res(tv_nexus, se_sess);
1957         /*
1958          * Release the SCSI I_T Nexus to the emulated vhost Target Port
1959          */
1960         transport_deregister_session(tv_nexus->tvn_se_sess);
1961         tpg->tpg_nexus = NULL;
1962         mutex_unlock(&tpg->tv_tpg_mutex);
1963
1964         kfree(tv_nexus);
1965         return 0;
1966 }
1967
1968 static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
1969                                         char *page)
1970 {
1971         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1972                                 struct tcm_vhost_tpg, se_tpg);
1973         struct tcm_vhost_nexus *tv_nexus;
1974         ssize_t ret;
1975
1976         mutex_lock(&tpg->tv_tpg_mutex);
1977         tv_nexus = tpg->tpg_nexus;
1978         if (!tv_nexus) {
1979                 mutex_unlock(&tpg->tv_tpg_mutex);
1980                 return -ENODEV;
1981         }
1982         ret = snprintf(page, PAGE_SIZE, "%s\n",
1983                         tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1984         mutex_unlock(&tpg->tv_tpg_mutex);
1985
1986         return ret;
1987 }
1988
1989 static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
1990                                          const char *page,
1991                                          size_t count)
1992 {
1993         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1994                                 struct tcm_vhost_tpg, se_tpg);
1995         struct tcm_vhost_tport *tport_wwn = tpg->tport;
1996         unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
1997         int ret;
1998         /*
1999          * Shutdown the active I_T nexus if 'NULL' is passed..
2000          */
2001         if (!strncmp(page, "NULL", 4)) {
2002                 ret = tcm_vhost_drop_nexus(tpg);
2003                 return (!ret) ? count : ret;
2004         }
2005         /*
2006          * Otherwise make sure the passed virtual Initiator port WWN matches
2007          * the fabric protocol_id set in tcm_vhost_make_tport(), and call
2008          * tcm_vhost_make_nexus().
2009          */
2010         if (strlen(page) >= TCM_VHOST_NAMELEN) {
2011                 pr_err("Emulated NAA Sas Address: %s, exceeds"
2012                                 " max: %d\n", page, TCM_VHOST_NAMELEN);
2013                 return -EINVAL;
2014         }
2015         snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page);
2016
2017         ptr = strstr(i_port, "naa.");
2018         if (ptr) {
2019                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
2020                         pr_err("Passed SAS Initiator Port %s does not"
2021                                 " match target port protoid: %s\n", i_port,
2022                                 tcm_vhost_dump_proto_id(tport_wwn));
2023                         return -EINVAL;
2024                 }
2025                 port_ptr = &i_port[0];
2026                 goto check_newline;
2027         }
2028         ptr = strstr(i_port, "fc.");
2029         if (ptr) {
2030                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
2031                         pr_err("Passed FCP Initiator Port %s does not"
2032                                 " match target port protoid: %s\n", i_port,
2033                                 tcm_vhost_dump_proto_id(tport_wwn));
2034                         return -EINVAL;
2035                 }
2036                 port_ptr = &i_port[3]; /* Skip over "fc." */
2037                 goto check_newline;
2038         }
2039         ptr = strstr(i_port, "iqn.");
2040         if (ptr) {
2041                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
2042                         pr_err("Passed iSCSI Initiator Port %s does not"
2043                                 " match target port protoid: %s\n", i_port,
2044                                 tcm_vhost_dump_proto_id(tport_wwn));
2045                         return -EINVAL;
2046                 }
2047                 port_ptr = &i_port[0];
2048                 goto check_newline;
2049         }
2050         pr_err("Unable to locate prefix for emulated Initiator Port:"
2051                         " %s\n", i_port);
2052         return -EINVAL;
2053         /*
2054          * Clear any trailing newline for the NAA WWN
2055          */
2056 check_newline:
2057         if (i_port[strlen(i_port)-1] == '\n')
2058                 i_port[strlen(i_port)-1] = '\0';
2059
2060         ret = tcm_vhost_make_nexus(tpg, port_ptr);
2061         if (ret < 0)
2062                 return ret;
2063
2064         return count;
2065 }
2066
2067 TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR);
2068
2069 static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
2070         &tcm_vhost_tpg_nexus.attr,
2071         NULL,
2072 };
2073
2074 static struct se_portal_group *
2075 tcm_vhost_make_tpg(struct se_wwn *wwn,
2076                    struct config_group *group,
2077                    const char *name)
2078 {
2079         struct tcm_vhost_tport *tport = container_of(wwn,
2080                         struct tcm_vhost_tport, tport_wwn);
2081
2082         struct tcm_vhost_tpg *tpg;
2083         unsigned long tpgt;
2084         int ret;
2085
2086         if (strstr(name, "tpgt_") != name)
2087                 return ERR_PTR(-EINVAL);
2088         if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
2089                 return ERR_PTR(-EINVAL);
2090
2091         tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
2092         if (!tpg) {
2093                 pr_err("Unable to allocate struct tcm_vhost_tpg");
2094                 return ERR_PTR(-ENOMEM);
2095         }
2096         mutex_init(&tpg->tv_tpg_mutex);
2097         INIT_LIST_HEAD(&tpg->tv_tpg_list);
2098         tpg->tport = tport;
2099         tpg->tport_tpgt = tpgt;
2100
2101         ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn,
2102                                 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
2103         if (ret < 0) {
2104                 kfree(tpg);
2105                 return NULL;
2106         }
2107         mutex_lock(&tcm_vhost_mutex);
2108         list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list);
2109         mutex_unlock(&tcm_vhost_mutex);
2110
2111         return &tpg->se_tpg;
2112 }
2113
2114 static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
2115 {
2116         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
2117                                 struct tcm_vhost_tpg, se_tpg);
2118
2119         mutex_lock(&tcm_vhost_mutex);
2120         list_del(&tpg->tv_tpg_list);
2121         mutex_unlock(&tcm_vhost_mutex);
2122         /*
2123          * Release the virtual I_T Nexus for this vhost TPG
2124          */
2125         tcm_vhost_drop_nexus(tpg);
2126         /*
2127          * Deregister the se_tpg from TCM..
2128          */
2129         core_tpg_deregister(se_tpg);
2130         kfree(tpg);
2131 }
2132
2133 static struct se_wwn *
2134 tcm_vhost_make_tport(struct target_fabric_configfs *tf,
2135                      struct config_group *group,
2136                      const char *name)
2137 {
2138         struct tcm_vhost_tport *tport;
2139         char *ptr;
2140         u64 wwpn = 0;
2141         int off = 0;
2142
2143         /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
2144                 return ERR_PTR(-EINVAL); */
2145
2146         tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL);
2147         if (!tport) {
2148                 pr_err("Unable to allocate struct tcm_vhost_tport");
2149                 return ERR_PTR(-ENOMEM);
2150         }
2151         tport->tport_wwpn = wwpn;
2152         /*
2153          * Determine the emulated Protocol Identifier and Target Port Name
2154          * based on the incoming configfs directory name.
2155          */
2156         ptr = strstr(name, "naa.");
2157         if (ptr) {
2158                 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2159                 goto check_len;
2160         }
2161         ptr = strstr(name, "fc.");
2162         if (ptr) {
2163                 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2164                 off = 3; /* Skip over "fc." */
2165                 goto check_len;
2166         }
2167         ptr = strstr(name, "iqn.");
2168         if (ptr) {
2169                 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2170                 goto check_len;
2171         }
2172
2173         pr_err("Unable to locate prefix for emulated Target Port:"
2174                         " %s\n", name);
2175         kfree(tport);
2176         return ERR_PTR(-EINVAL);
2177
2178 check_len:
2179         if (strlen(name) >= TCM_VHOST_NAMELEN) {
2180                 pr_err("Emulated %s Address: %s, exceeds"
2181                         " max: %d\n", name, tcm_vhost_dump_proto_id(tport),
2182                         TCM_VHOST_NAMELEN);
2183                 kfree(tport);
2184                 return ERR_PTR(-EINVAL);
2185         }
2186         snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]);
2187
2188         pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2189                 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name);
2190
2191         return &tport->tport_wwn;
2192 }
2193
2194 static void tcm_vhost_drop_tport(struct se_wwn *wwn)
2195 {
2196         struct tcm_vhost_tport *tport = container_of(wwn,
2197                                 struct tcm_vhost_tport, tport_wwn);
2198
2199         pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2200                 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport),
2201                 tport->tport_name);
2202
2203         kfree(tport);
2204 }
2205
2206 static ssize_t
2207 tcm_vhost_wwn_show_attr_version(struct target_fabric_configfs *tf,
2208                                 char *page)
2209 {
2210         return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2211                 "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
2212                 utsname()->machine);
2213 }
2214
2215 TF_WWN_ATTR_RO(tcm_vhost, version);
2216
2217 static struct configfs_attribute *tcm_vhost_wwn_attrs[] = {
2218         &tcm_vhost_wwn_version.attr,
2219         NULL,
2220 };
2221
2222 static struct target_core_fabric_ops tcm_vhost_ops = {
2223         .get_fabric_name                = tcm_vhost_get_fabric_name,
2224         .get_fabric_proto_ident         = tcm_vhost_get_fabric_proto_ident,
2225         .tpg_get_wwn                    = tcm_vhost_get_fabric_wwn,
2226         .tpg_get_tag                    = tcm_vhost_get_tag,
2227         .tpg_get_default_depth          = tcm_vhost_get_default_depth,
2228         .tpg_get_pr_transport_id        = tcm_vhost_get_pr_transport_id,
2229         .tpg_get_pr_transport_id_len    = tcm_vhost_get_pr_transport_id_len,
2230         .tpg_parse_pr_out_transport_id  = tcm_vhost_parse_pr_out_transport_id,
2231         .tpg_check_demo_mode            = tcm_vhost_check_true,
2232         .tpg_check_demo_mode_cache      = tcm_vhost_check_true,
2233         .tpg_check_demo_mode_write_protect = tcm_vhost_check_false,
2234         .tpg_check_prod_mode_write_protect = tcm_vhost_check_false,
2235         .tpg_alloc_fabric_acl           = tcm_vhost_alloc_fabric_acl,
2236         .tpg_release_fabric_acl         = tcm_vhost_release_fabric_acl,
2237         .tpg_get_inst_index             = tcm_vhost_tpg_get_inst_index,
2238         .release_cmd                    = tcm_vhost_release_cmd,
2239         .check_stop_free                = vhost_scsi_check_stop_free,
2240         .shutdown_session               = tcm_vhost_shutdown_session,
2241         .close_session                  = tcm_vhost_close_session,
2242         .sess_get_index                 = tcm_vhost_sess_get_index,
2243         .sess_get_initiator_sid         = NULL,
2244         .write_pending                  = tcm_vhost_write_pending,
2245         .write_pending_status           = tcm_vhost_write_pending_status,
2246         .set_default_node_attributes    = tcm_vhost_set_default_node_attrs,
2247         .get_task_tag                   = tcm_vhost_get_task_tag,
2248         .get_cmd_state                  = tcm_vhost_get_cmd_state,
2249         .queue_data_in                  = tcm_vhost_queue_data_in,
2250         .queue_status                   = tcm_vhost_queue_status,
2251         .queue_tm_rsp                   = tcm_vhost_queue_tm_rsp,
2252         .aborted_task                   = tcm_vhost_aborted_task,
2253         /*
2254          * Setup callers for generic logic in target_core_fabric_configfs.c
2255          */
2256         .fabric_make_wwn                = tcm_vhost_make_tport,
2257         .fabric_drop_wwn                = tcm_vhost_drop_tport,
2258         .fabric_make_tpg                = tcm_vhost_make_tpg,
2259         .fabric_drop_tpg                = tcm_vhost_drop_tpg,
2260         .fabric_post_link               = tcm_vhost_port_link,
2261         .fabric_pre_unlink              = tcm_vhost_port_unlink,
2262         .fabric_make_np                 = NULL,
2263         .fabric_drop_np                 = NULL,
2264         .fabric_make_nodeacl            = tcm_vhost_make_nodeacl,
2265         .fabric_drop_nodeacl            = tcm_vhost_drop_nodeacl,
2266 };
2267
2268 static int tcm_vhost_register_configfs(void)
2269 {
2270         struct target_fabric_configfs *fabric;
2271         int ret;
2272
2273         pr_debug("TCM_VHOST fabric module %s on %s/%s"
2274                 " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
2275                 utsname()->machine);
2276         /*
2277          * Register the top level struct config_item_type with TCM core
2278          */
2279         fabric = target_fabric_configfs_init(THIS_MODULE, "vhost");
2280         if (IS_ERR(fabric)) {
2281                 pr_err("target_fabric_configfs_init() failed\n");
2282                 return PTR_ERR(fabric);
2283         }
2284         /*
2285          * Setup fabric->tf_ops from our local tcm_vhost_ops
2286          */
2287         fabric->tf_ops = tcm_vhost_ops;
2288         /*
2289          * Setup default attribute lists for various fabric->tf_cit_tmpl
2290          */
2291         fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
2292         fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
2293         fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
2294         fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
2295         fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
2296         fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
2297         fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
2298         fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
2299         fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
2300         /*
2301          * Register the fabric for use within TCM
2302          */
2303         ret = target_fabric_configfs_register(fabric);
2304         if (ret < 0) {
2305                 pr_err("target_fabric_configfs_register() failed"
2306                                 " for TCM_VHOST\n");
2307                 return ret;
2308         }
2309         /*
2310          * Setup our local pointer to *fabric
2311          */
2312         tcm_vhost_fabric_configfs = fabric;
2313         pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n");
2314         return 0;
2315 };
2316
2317 static void tcm_vhost_deregister_configfs(void)
2318 {
2319         if (!tcm_vhost_fabric_configfs)
2320                 return;
2321
2322         target_fabric_configfs_deregister(tcm_vhost_fabric_configfs);
2323         tcm_vhost_fabric_configfs = NULL;
2324         pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n");
2325 };
2326
2327 static int __init tcm_vhost_init(void)
2328 {
2329         int ret = -ENOMEM;
2330         /*
2331          * Use our own dedicated workqueue for submitting I/O into
2332          * target core to avoid contention within system_wq.
2333          */
2334         tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
2335         if (!tcm_vhost_workqueue)
2336                 goto out;
2337
2338         ret = vhost_scsi_register();
2339         if (ret < 0)
2340                 goto out_destroy_workqueue;
2341
2342         ret = tcm_vhost_register_configfs();
2343         if (ret < 0)
2344                 goto out_vhost_scsi_deregister;
2345
2346         return 0;
2347
2348 out_vhost_scsi_deregister:
2349         vhost_scsi_deregister();
2350 out_destroy_workqueue:
2351         destroy_workqueue(tcm_vhost_workqueue);
2352 out:
2353         return ret;
2354 };
2355
2356 static void tcm_vhost_exit(void)
2357 {
2358         tcm_vhost_deregister_configfs();
2359         vhost_scsi_deregister();
2360         destroy_workqueue(tcm_vhost_workqueue);
2361 };
2362
2363 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2364 MODULE_ALIAS("tcm_vhost");
2365 MODULE_LICENSE("GPL");
2366 module_init(tcm_vhost_init);
2367 module_exit(tcm_vhost_exit);