Merge tag 'defconfig-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / drivers / vhost / scsi.c
1 /*******************************************************************************
2  * Vhost kernel TCM fabric driver for virtio SCSI initiators
3  *
4  * (C) Copyright 2010-2013 Datera, Inc.
5  * (C) Copyright 2010-2012 IBM Corp.
6  *
7  * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8  *
9  * Authors: Nicholas A. Bellinger <nab@daterainc.com>
10  *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  ****************************************************************************/
23
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <generated/utsrelease.h>
27 #include <linux/utsname.h>
28 #include <linux/init.h>
29 #include <linux/slab.h>
30 #include <linux/kthread.h>
31 #include <linux/types.h>
32 #include <linux/string.h>
33 #include <linux/configfs.h>
34 #include <linux/ctype.h>
35 #include <linux/compat.h>
36 #include <linux/eventfd.h>
37 #include <linux/fs.h>
38 #include <linux/miscdevice.h>
39 #include <asm/unaligned.h>
40 #include <scsi/scsi.h>
41 #include <scsi/scsi_tcq.h>
42 #include <target/target_core_base.h>
43 #include <target/target_core_fabric.h>
44 #include <target/target_core_fabric_configfs.h>
45 #include <target/target_core_configfs.h>
46 #include <target/configfs_macros.h>
47 #include <linux/vhost.h>
48 #include <linux/virtio_scsi.h>
49 #include <linux/llist.h>
50 #include <linux/bitmap.h>
51 #include <linux/percpu_ida.h>
52
53 #include "vhost.h"
54
55 #define TCM_VHOST_VERSION  "v0.1"
56 #define TCM_VHOST_NAMELEN 256
57 #define TCM_VHOST_MAX_CDB_SIZE 32
58 #define TCM_VHOST_DEFAULT_TAGS 256
59 #define TCM_VHOST_PREALLOC_SGLS 2048
60 #define TCM_VHOST_PREALLOC_UPAGES 2048
61 #define TCM_VHOST_PREALLOC_PROT_SGLS 512
62
63 struct vhost_scsi_inflight {
64         /* Wait for the flush operation to finish */
65         struct completion comp;
66         /* Refcount for the inflight reqs */
67         struct kref kref;
68 };
69
70 struct tcm_vhost_cmd {
71         /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
72         int tvc_vq_desc;
73         /* virtio-scsi initiator task attribute */
74         int tvc_task_attr;
75         /* virtio-scsi initiator data direction */
76         enum dma_data_direction tvc_data_direction;
77         /* Expected data transfer length from virtio-scsi header */
78         u32 tvc_exp_data_len;
79         /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
80         u64 tvc_tag;
81         /* The number of scatterlists associated with this cmd */
82         u32 tvc_sgl_count;
83         u32 tvc_prot_sgl_count;
84         /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */
85         u32 tvc_lun;
86         /* Pointer to the SGL formatted memory from virtio-scsi */
87         struct scatterlist *tvc_sgl;
88         struct scatterlist *tvc_prot_sgl;
89         struct page **tvc_upages;
90         /* Pointer to response */
91         struct virtio_scsi_cmd_resp __user *tvc_resp;
92         /* Pointer to vhost_scsi for our device */
93         struct vhost_scsi *tvc_vhost;
94         /* Pointer to vhost_virtqueue for the cmd */
95         struct vhost_virtqueue *tvc_vq;
96         /* Pointer to vhost nexus memory */
97         struct tcm_vhost_nexus *tvc_nexus;
98         /* The TCM I/O descriptor that is accessed via container_of() */
99         struct se_cmd tvc_se_cmd;
100         /* work item used for cmwq dispatch to tcm_vhost_submission_work() */
101         struct work_struct work;
102         /* Copy of the incoming SCSI command descriptor block (CDB) */
103         unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE];
104         /* Sense buffer that will be mapped into outgoing status */
105         unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
106         /* Completed commands list, serviced from vhost worker thread */
107         struct llist_node tvc_completion_list;
108         /* Used to track inflight cmd */
109         struct vhost_scsi_inflight *inflight;
110 };
111
112 struct tcm_vhost_nexus {
113         /* Pointer to TCM session for I_T Nexus */
114         struct se_session *tvn_se_sess;
115 };
116
117 struct tcm_vhost_nacl {
118         /* Binary World Wide unique Port Name for Vhost Initiator port */
119         u64 iport_wwpn;
120         /* ASCII formatted WWPN for Sas Initiator port */
121         char iport_name[TCM_VHOST_NAMELEN];
122         /* Returned by tcm_vhost_make_nodeacl() */
123         struct se_node_acl se_node_acl;
124 };
125
126 struct tcm_vhost_tpg {
127         /* Vhost port target portal group tag for TCM */
128         u16 tport_tpgt;
129         /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
130         int tv_tpg_port_count;
131         /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
132         int tv_tpg_vhost_count;
133         /* list for tcm_vhost_list */
134         struct list_head tv_tpg_list;
135         /* Used to protect access for tpg_nexus */
136         struct mutex tv_tpg_mutex;
137         /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
138         struct tcm_vhost_nexus *tpg_nexus;
139         /* Pointer back to tcm_vhost_tport */
140         struct tcm_vhost_tport *tport;
141         /* Returned by tcm_vhost_make_tpg() */
142         struct se_portal_group se_tpg;
143         /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
144         struct vhost_scsi *vhost_scsi;
145 };
146
147 struct tcm_vhost_tport {
148         /* SCSI protocol the tport is providing */
149         u8 tport_proto_id;
150         /* Binary World Wide unique Port Name for Vhost Target port */
151         u64 tport_wwpn;
152         /* ASCII formatted WWPN for Vhost Target port */
153         char tport_name[TCM_VHOST_NAMELEN];
154         /* Returned by tcm_vhost_make_tport() */
155         struct se_wwn tport_wwn;
156 };
157
158 struct tcm_vhost_evt {
159         /* event to be sent to guest */
160         struct virtio_scsi_event event;
161         /* event list, serviced from vhost worker thread */
162         struct llist_node list;
163 };
164
165 enum {
166         VHOST_SCSI_VQ_CTL = 0,
167         VHOST_SCSI_VQ_EVT = 1,
168         VHOST_SCSI_VQ_IO = 2,
169 };
170
171 enum {
172         VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
173                                                (1ULL << VIRTIO_SCSI_F_T10_PI)
174 };
175
176 #define VHOST_SCSI_MAX_TARGET   256
177 #define VHOST_SCSI_MAX_VQ       128
178 #define VHOST_SCSI_MAX_EVENT    128
179
180 struct vhost_scsi_virtqueue {
181         struct vhost_virtqueue vq;
182         /*
183          * Reference counting for inflight reqs, used for flush operation. At
184          * each time, one reference tracks new commands submitted, while we
185          * wait for another one to reach 0.
186          */
187         struct vhost_scsi_inflight inflights[2];
188         /*
189          * Indicate current inflight in use, protected by vq->mutex.
190          * Writers must also take dev mutex and flush under it.
191          */
192         int inflight_idx;
193 };
194
195 struct vhost_scsi {
196         /* Protected by vhost_scsi->dev.mutex */
197         struct tcm_vhost_tpg **vs_tpg;
198         char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
199
200         struct vhost_dev dev;
201         struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
202
203         struct vhost_work vs_completion_work; /* cmd completion work item */
204         struct llist_head vs_completion_list; /* cmd completion queue */
205
206         struct vhost_work vs_event_work; /* evt injection work item */
207         struct llist_head vs_event_list; /* evt injection queue */
208
209         bool vs_events_missed; /* any missed events, protected by vq->mutex */
210         int vs_events_nr; /* num of pending events, protected by vq->mutex */
211 };
212
213 /* Local pointer to allocated TCM configfs fabric module */
214 static struct target_fabric_configfs *tcm_vhost_fabric_configfs;
215
216 static struct workqueue_struct *tcm_vhost_workqueue;
217
218 /* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */
219 static DEFINE_MUTEX(tcm_vhost_mutex);
220 static LIST_HEAD(tcm_vhost_list);
221
222 static int iov_num_pages(struct iovec *iov)
223 {
224         return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
225                ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
226 }
227
228 static void tcm_vhost_done_inflight(struct kref *kref)
229 {
230         struct vhost_scsi_inflight *inflight;
231
232         inflight = container_of(kref, struct vhost_scsi_inflight, kref);
233         complete(&inflight->comp);
234 }
235
236 static void tcm_vhost_init_inflight(struct vhost_scsi *vs,
237                                     struct vhost_scsi_inflight *old_inflight[])
238 {
239         struct vhost_scsi_inflight *new_inflight;
240         struct vhost_virtqueue *vq;
241         int idx, i;
242
243         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
244                 vq = &vs->vqs[i].vq;
245
246                 mutex_lock(&vq->mutex);
247
248                 /* store old infight */
249                 idx = vs->vqs[i].inflight_idx;
250                 if (old_inflight)
251                         old_inflight[i] = &vs->vqs[i].inflights[idx];
252
253                 /* setup new infight */
254                 vs->vqs[i].inflight_idx = idx ^ 1;
255                 new_inflight = &vs->vqs[i].inflights[idx ^ 1];
256                 kref_init(&new_inflight->kref);
257                 init_completion(&new_inflight->comp);
258
259                 mutex_unlock(&vq->mutex);
260         }
261 }
262
263 static struct vhost_scsi_inflight *
264 tcm_vhost_get_inflight(struct vhost_virtqueue *vq)
265 {
266         struct vhost_scsi_inflight *inflight;
267         struct vhost_scsi_virtqueue *svq;
268
269         svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
270         inflight = &svq->inflights[svq->inflight_idx];
271         kref_get(&inflight->kref);
272
273         return inflight;
274 }
275
276 static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight)
277 {
278         kref_put(&inflight->kref, tcm_vhost_done_inflight);
279 }
280
281 static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
282 {
283         return 1;
284 }
285
286 static int tcm_vhost_check_false(struct se_portal_group *se_tpg)
287 {
288         return 0;
289 }
290
291 static char *tcm_vhost_get_fabric_name(void)
292 {
293         return "vhost";
294 }
295
296 static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
297 {
298         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
299                                 struct tcm_vhost_tpg, se_tpg);
300         struct tcm_vhost_tport *tport = tpg->tport;
301
302         switch (tport->tport_proto_id) {
303         case SCSI_PROTOCOL_SAS:
304                 return sas_get_fabric_proto_ident(se_tpg);
305         case SCSI_PROTOCOL_FCP:
306                 return fc_get_fabric_proto_ident(se_tpg);
307         case SCSI_PROTOCOL_ISCSI:
308                 return iscsi_get_fabric_proto_ident(se_tpg);
309         default:
310                 pr_err("Unknown tport_proto_id: 0x%02x, using"
311                         " SAS emulation\n", tport->tport_proto_id);
312                 break;
313         }
314
315         return sas_get_fabric_proto_ident(se_tpg);
316 }
317
318 static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg)
319 {
320         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
321                                 struct tcm_vhost_tpg, se_tpg);
322         struct tcm_vhost_tport *tport = tpg->tport;
323
324         return &tport->tport_name[0];
325 }
326
327 static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg)
328 {
329         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
330                                 struct tcm_vhost_tpg, se_tpg);
331         return tpg->tport_tpgt;
332 }
333
334 static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
335 {
336         return 1;
337 }
338
339 static u32
340 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
341                               struct se_node_acl *se_nacl,
342                               struct t10_pr_registration *pr_reg,
343                               int *format_code,
344                               unsigned char *buf)
345 {
346         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
347                                 struct tcm_vhost_tpg, se_tpg);
348         struct tcm_vhost_tport *tport = tpg->tport;
349
350         switch (tport->tport_proto_id) {
351         case SCSI_PROTOCOL_SAS:
352                 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
353                                         format_code, buf);
354         case SCSI_PROTOCOL_FCP:
355                 return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
356                                         format_code, buf);
357         case SCSI_PROTOCOL_ISCSI:
358                 return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
359                                         format_code, buf);
360         default:
361                 pr_err("Unknown tport_proto_id: 0x%02x, using"
362                         " SAS emulation\n", tport->tport_proto_id);
363                 break;
364         }
365
366         return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
367                         format_code, buf);
368 }
369
370 static u32
371 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
372                                   struct se_node_acl *se_nacl,
373                                   struct t10_pr_registration *pr_reg,
374                                   int *format_code)
375 {
376         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
377                                 struct tcm_vhost_tpg, se_tpg);
378         struct tcm_vhost_tport *tport = tpg->tport;
379
380         switch (tport->tport_proto_id) {
381         case SCSI_PROTOCOL_SAS:
382                 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
383                                         format_code);
384         case SCSI_PROTOCOL_FCP:
385                 return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
386                                         format_code);
387         case SCSI_PROTOCOL_ISCSI:
388                 return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
389                                         format_code);
390         default:
391                 pr_err("Unknown tport_proto_id: 0x%02x, using"
392                         " SAS emulation\n", tport->tport_proto_id);
393                 break;
394         }
395
396         return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
397                         format_code);
398 }
399
400 static char *
401 tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
402                                     const char *buf,
403                                     u32 *out_tid_len,
404                                     char **port_nexus_ptr)
405 {
406         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
407                                 struct tcm_vhost_tpg, se_tpg);
408         struct tcm_vhost_tport *tport = tpg->tport;
409
410         switch (tport->tport_proto_id) {
411         case SCSI_PROTOCOL_SAS:
412                 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
413                                         port_nexus_ptr);
414         case SCSI_PROTOCOL_FCP:
415                 return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
416                                         port_nexus_ptr);
417         case SCSI_PROTOCOL_ISCSI:
418                 return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
419                                         port_nexus_ptr);
420         default:
421                 pr_err("Unknown tport_proto_id: 0x%02x, using"
422                         " SAS emulation\n", tport->tport_proto_id);
423                 break;
424         }
425
426         return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
427                         port_nexus_ptr);
428 }
429
430 static struct se_node_acl *
431 tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg)
432 {
433         struct tcm_vhost_nacl *nacl;
434
435         nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL);
436         if (!nacl) {
437                 pr_err("Unable to allocate struct tcm_vhost_nacl\n");
438                 return NULL;
439         }
440
441         return &nacl->se_node_acl;
442 }
443
444 static void
445 tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
446                              struct se_node_acl *se_nacl)
447 {
448         struct tcm_vhost_nacl *nacl = container_of(se_nacl,
449                         struct tcm_vhost_nacl, se_node_acl);
450         kfree(nacl);
451 }
452
453 static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg)
454 {
455         return 1;
456 }
457
458 static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
459 {
460         struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
461                                 struct tcm_vhost_cmd, tvc_se_cmd);
462         struct se_session *se_sess = se_cmd->se_sess;
463         int i;
464
465         if (tv_cmd->tvc_sgl_count) {
466                 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
467                         put_page(sg_page(&tv_cmd->tvc_sgl[i]));
468         }
469         if (tv_cmd->tvc_prot_sgl_count) {
470                 for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
471                         put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
472         }
473
474         tcm_vhost_put_inflight(tv_cmd->inflight);
475         percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
476 }
477
478 static int tcm_vhost_shutdown_session(struct se_session *se_sess)
479 {
480         return 0;
481 }
482
483 static void tcm_vhost_close_session(struct se_session *se_sess)
484 {
485         return;
486 }
487
488 static u32 tcm_vhost_sess_get_index(struct se_session *se_sess)
489 {
490         return 0;
491 }
492
493 static int tcm_vhost_write_pending(struct se_cmd *se_cmd)
494 {
495         /* Go ahead and process the write immediately */
496         target_execute_cmd(se_cmd);
497         return 0;
498 }
499
500 static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd)
501 {
502         return 0;
503 }
504
505 static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl)
506 {
507         return;
508 }
509
510 static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd)
511 {
512         return 0;
513 }
514
515 static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
516 {
517         return 0;
518 }
519
520 static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd)
521 {
522         struct vhost_scsi *vs = cmd->tvc_vhost;
523
524         llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
525
526         vhost_work_queue(&vs->dev, &vs->vs_completion_work);
527 }
528
529 static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
530 {
531         struct tcm_vhost_cmd *cmd = container_of(se_cmd,
532                                 struct tcm_vhost_cmd, tvc_se_cmd);
533         vhost_scsi_complete_cmd(cmd);
534         return 0;
535 }
536
537 static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
538 {
539         struct tcm_vhost_cmd *cmd = container_of(se_cmd,
540                                 struct tcm_vhost_cmd, tvc_se_cmd);
541         vhost_scsi_complete_cmd(cmd);
542         return 0;
543 }
544
545 static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
546 {
547         return;
548 }
549
550 static void tcm_vhost_aborted_task(struct se_cmd *se_cmd)
551 {
552         return;
553 }
554
555 static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
556 {
557         vs->vs_events_nr--;
558         kfree(evt);
559 }
560
561 static struct tcm_vhost_evt *
562 tcm_vhost_allocate_evt(struct vhost_scsi *vs,
563                        u32 event, u32 reason)
564 {
565         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
566         struct tcm_vhost_evt *evt;
567
568         if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
569                 vs->vs_events_missed = true;
570                 return NULL;
571         }
572
573         evt = kzalloc(sizeof(*evt), GFP_KERNEL);
574         if (!evt) {
575                 vq_err(vq, "Failed to allocate tcm_vhost_evt\n");
576                 vs->vs_events_missed = true;
577                 return NULL;
578         }
579
580         evt->event.event = event;
581         evt->event.reason = reason;
582         vs->vs_events_nr++;
583
584         return evt;
585 }
586
587 static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *cmd)
588 {
589         struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
590
591         /* TODO locking against target/backend threads? */
592         transport_generic_free_cmd(se_cmd, 0);
593
594 }
595
596 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
597 {
598         return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
599 }
600
601 static void
602 tcm_vhost_do_evt_work(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
603 {
604         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
605         struct virtio_scsi_event *event = &evt->event;
606         struct virtio_scsi_event __user *eventp;
607         unsigned out, in;
608         int head, ret;
609
610         if (!vq->private_data) {
611                 vs->vs_events_missed = true;
612                 return;
613         }
614
615 again:
616         vhost_disable_notify(&vs->dev, vq);
617         head = vhost_get_vq_desc(vq, vq->iov,
618                         ARRAY_SIZE(vq->iov), &out, &in,
619                         NULL, NULL);
620         if (head < 0) {
621                 vs->vs_events_missed = true;
622                 return;
623         }
624         if (head == vq->num) {
625                 if (vhost_enable_notify(&vs->dev, vq))
626                         goto again;
627                 vs->vs_events_missed = true;
628                 return;
629         }
630
631         if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
632                 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
633                                 vq->iov[out].iov_len);
634                 vs->vs_events_missed = true;
635                 return;
636         }
637
638         if (vs->vs_events_missed) {
639                 event->event |= VIRTIO_SCSI_T_EVENTS_MISSED;
640                 vs->vs_events_missed = false;
641         }
642
643         eventp = vq->iov[out].iov_base;
644         ret = __copy_to_user(eventp, event, sizeof(*event));
645         if (!ret)
646                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
647         else
648                 vq_err(vq, "Faulted on tcm_vhost_send_event\n");
649 }
650
651 static void tcm_vhost_evt_work(struct vhost_work *work)
652 {
653         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
654                                         vs_event_work);
655         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
656         struct tcm_vhost_evt *evt;
657         struct llist_node *llnode;
658
659         mutex_lock(&vq->mutex);
660         llnode = llist_del_all(&vs->vs_event_list);
661         while (llnode) {
662                 evt = llist_entry(llnode, struct tcm_vhost_evt, list);
663                 llnode = llist_next(llnode);
664                 tcm_vhost_do_evt_work(vs, evt);
665                 tcm_vhost_free_evt(vs, evt);
666         }
667         mutex_unlock(&vq->mutex);
668 }
669
670 /* Fill in status and signal that we are done processing this command
671  *
672  * This is scheduled in the vhost work queue so we are called with the owner
673  * process mm and can access the vring.
674  */
675 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
676 {
677         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
678                                         vs_completion_work);
679         DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
680         struct virtio_scsi_cmd_resp v_rsp;
681         struct tcm_vhost_cmd *cmd;
682         struct llist_node *llnode;
683         struct se_cmd *se_cmd;
684         int ret, vq;
685
686         bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
687         llnode = llist_del_all(&vs->vs_completion_list);
688         while (llnode) {
689                 cmd = llist_entry(llnode, struct tcm_vhost_cmd,
690                                      tvc_completion_list);
691                 llnode = llist_next(llnode);
692                 se_cmd = &cmd->tvc_se_cmd;
693
694                 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
695                         cmd, se_cmd->residual_count, se_cmd->scsi_status);
696
697                 memset(&v_rsp, 0, sizeof(v_rsp));
698                 v_rsp.resid = se_cmd->residual_count;
699                 /* TODO is status_qualifier field needed? */
700                 v_rsp.status = se_cmd->scsi_status;
701                 v_rsp.sense_len = se_cmd->scsi_sense_length;
702                 memcpy(v_rsp.sense, cmd->tvc_sense_buf,
703                        v_rsp.sense_len);
704                 ret = copy_to_user(cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
705                 if (likely(ret == 0)) {
706                         struct vhost_scsi_virtqueue *q;
707                         vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
708                         q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
709                         vq = q - vs->vqs;
710                         __set_bit(vq, signal);
711                 } else
712                         pr_err("Faulted on virtio_scsi_cmd_resp\n");
713
714                 vhost_scsi_free_cmd(cmd);
715         }
716
717         vq = -1;
718         while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
719                 < VHOST_SCSI_MAX_VQ)
720                 vhost_signal(&vs->dev, &vs->vqs[vq].vq);
721 }
722
723 static struct tcm_vhost_cmd *
724 vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg,
725                    unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
726                    u32 exp_data_len, int data_direction)
727 {
728         struct tcm_vhost_cmd *cmd;
729         struct tcm_vhost_nexus *tv_nexus;
730         struct se_session *se_sess;
731         struct scatterlist *sg, *prot_sg;
732         struct page **pages;
733         int tag;
734
735         tv_nexus = tpg->tpg_nexus;
736         if (!tv_nexus) {
737                 pr_err("Unable to locate active struct tcm_vhost_nexus\n");
738                 return ERR_PTR(-EIO);
739         }
740         se_sess = tv_nexus->tvn_se_sess;
741
742         tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
743         if (tag < 0) {
744                 pr_err("Unable to obtain tag for tcm_vhost_cmd\n");
745                 return ERR_PTR(-ENOMEM);
746         }
747
748         cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag];
749         sg = cmd->tvc_sgl;
750         prot_sg = cmd->tvc_prot_sgl;
751         pages = cmd->tvc_upages;
752         memset(cmd, 0, sizeof(struct tcm_vhost_cmd));
753
754         cmd->tvc_sgl = sg;
755         cmd->tvc_prot_sgl = prot_sg;
756         cmd->tvc_upages = pages;
757         cmd->tvc_se_cmd.map_tag = tag;
758         cmd->tvc_tag = scsi_tag;
759         cmd->tvc_lun = lun;
760         cmd->tvc_task_attr = task_attr;
761         cmd->tvc_exp_data_len = exp_data_len;
762         cmd->tvc_data_direction = data_direction;
763         cmd->tvc_nexus = tv_nexus;
764         cmd->inflight = tcm_vhost_get_inflight(vq);
765
766         memcpy(cmd->tvc_cdb, cdb, TCM_VHOST_MAX_CDB_SIZE);
767
768         return cmd;
769 }
770
771 /*
772  * Map a user memory range into a scatterlist
773  *
774  * Returns the number of scatterlist entries used or -errno on error.
775  */
776 static int
777 vhost_scsi_map_to_sgl(struct tcm_vhost_cmd *tv_cmd,
778                       struct scatterlist *sgl,
779                       unsigned int sgl_count,
780                       struct iovec *iov,
781                       struct page **pages,
782                       bool write)
783 {
784         unsigned int npages = 0, pages_nr, offset, nbytes;
785         struct scatterlist *sg = sgl;
786         void __user *ptr = iov->iov_base;
787         size_t len = iov->iov_len;
788         int ret, i;
789
790         pages_nr = iov_num_pages(iov);
791         if (pages_nr > sgl_count) {
792                 pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
793                        " sgl_count: %u\n", pages_nr, sgl_count);
794                 return -ENOBUFS;
795         }
796         if (pages_nr > TCM_VHOST_PREALLOC_UPAGES) {
797                 pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
798                        " preallocated TCM_VHOST_PREALLOC_UPAGES: %u\n",
799                         pages_nr, TCM_VHOST_PREALLOC_UPAGES);
800                 return -ENOBUFS;
801         }
802
803         ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
804         /* No pages were pinned */
805         if (ret < 0)
806                 goto out;
807         /* Less pages pinned than wanted */
808         if (ret != pages_nr) {
809                 for (i = 0; i < ret; i++)
810                         put_page(pages[i]);
811                 ret = -EFAULT;
812                 goto out;
813         }
814
815         while (len > 0) {
816                 offset = (uintptr_t)ptr & ~PAGE_MASK;
817                 nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
818                 sg_set_page(sg, pages[npages], nbytes, offset);
819                 ptr += nbytes;
820                 len -= nbytes;
821                 sg++;
822                 npages++;
823         }
824
825 out:
826         return ret;
827 }
828
829 static int
830 vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
831                           struct iovec *iov,
832                           int niov,
833                           bool write)
834 {
835         struct scatterlist *sg = cmd->tvc_sgl;
836         unsigned int sgl_count = 0;
837         int ret, i;
838
839         for (i = 0; i < niov; i++)
840                 sgl_count += iov_num_pages(&iov[i]);
841
842         if (sgl_count > TCM_VHOST_PREALLOC_SGLS) {
843                 pr_err("vhost_scsi_map_iov_to_sgl() sgl_count: %u greater than"
844                         " preallocated TCM_VHOST_PREALLOC_SGLS: %u\n",
845                         sgl_count, TCM_VHOST_PREALLOC_SGLS);
846                 return -ENOBUFS;
847         }
848
849         pr_debug("%s sg %p sgl_count %u\n", __func__, sg, sgl_count);
850         sg_init_table(sg, sgl_count);
851         cmd->tvc_sgl_count = sgl_count;
852
853         pr_debug("Mapping iovec %p for %u pages\n", &iov[0], sgl_count);
854
855         for (i = 0; i < niov; i++) {
856                 ret = vhost_scsi_map_to_sgl(cmd, sg, sgl_count, &iov[i],
857                                             cmd->tvc_upages, write);
858                 if (ret < 0) {
859                         for (i = 0; i < cmd->tvc_sgl_count; i++)
860                                 put_page(sg_page(&cmd->tvc_sgl[i]));
861
862                         cmd->tvc_sgl_count = 0;
863                         return ret;
864                 }
865                 sg += ret;
866                 sgl_count -= ret;
867         }
868         return 0;
869 }
870
871 static int
872 vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd,
873                            struct iovec *iov,
874                            int niov,
875                            bool write)
876 {
877         struct scatterlist *prot_sg = cmd->tvc_prot_sgl;
878         unsigned int prot_sgl_count = 0;
879         int ret, i;
880
881         for (i = 0; i < niov; i++)
882                 prot_sgl_count += iov_num_pages(&iov[i]);
883
884         if (prot_sgl_count > TCM_VHOST_PREALLOC_PROT_SGLS) {
885                 pr_err("vhost_scsi_map_iov_to_prot() sgl_count: %u greater than"
886                         " preallocated TCM_VHOST_PREALLOC_PROT_SGLS: %u\n",
887                         prot_sgl_count, TCM_VHOST_PREALLOC_PROT_SGLS);
888                 return -ENOBUFS;
889         }
890
891         pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
892                  prot_sg, prot_sgl_count);
893         sg_init_table(prot_sg, prot_sgl_count);
894         cmd->tvc_prot_sgl_count = prot_sgl_count;
895
896         for (i = 0; i < niov; i++) {
897                 ret = vhost_scsi_map_to_sgl(cmd, prot_sg, prot_sgl_count, &iov[i],
898                                             cmd->tvc_upages, write);
899                 if (ret < 0) {
900                         for (i = 0; i < cmd->tvc_prot_sgl_count; i++)
901                                 put_page(sg_page(&cmd->tvc_prot_sgl[i]));
902
903                         cmd->tvc_prot_sgl_count = 0;
904                         return ret;
905                 }
906                 prot_sg += ret;
907                 prot_sgl_count -= ret;
908         }
909         return 0;
910 }
911
912 static void tcm_vhost_submission_work(struct work_struct *work)
913 {
914         struct tcm_vhost_cmd *cmd =
915                 container_of(work, struct tcm_vhost_cmd, work);
916         struct tcm_vhost_nexus *tv_nexus;
917         struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
918         struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
919         int rc;
920
921         /* FIXME: BIDI operation */
922         if (cmd->tvc_sgl_count) {
923                 sg_ptr = cmd->tvc_sgl;
924
925                 if (cmd->tvc_prot_sgl_count)
926                         sg_prot_ptr = cmd->tvc_prot_sgl;
927                 else
928                         se_cmd->prot_pto = true;
929         } else {
930                 sg_ptr = NULL;
931         }
932         tv_nexus = cmd->tvc_nexus;
933
934         rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
935                         cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
936                         cmd->tvc_lun, cmd->tvc_exp_data_len,
937                         cmd->tvc_task_attr, cmd->tvc_data_direction,
938                         TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
939                         NULL, 0, sg_prot_ptr, cmd->tvc_prot_sgl_count);
940         if (rc < 0) {
941                 transport_send_check_condition_and_sense(se_cmd,
942                                 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
943                 transport_generic_free_cmd(se_cmd, 0);
944         }
945 }
946
947 static void
948 vhost_scsi_send_bad_target(struct vhost_scsi *vs,
949                            struct vhost_virtqueue *vq,
950                            int head, unsigned out)
951 {
952         struct virtio_scsi_cmd_resp __user *resp;
953         struct virtio_scsi_cmd_resp rsp;
954         int ret;
955
956         memset(&rsp, 0, sizeof(rsp));
957         rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
958         resp = vq->iov[out].iov_base;
959         ret = __copy_to_user(resp, &rsp, sizeof(rsp));
960         if (!ret)
961                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
962         else
963                 pr_err("Faulted on virtio_scsi_cmd_resp\n");
964 }
965
966 static void
967 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
968 {
969         struct tcm_vhost_tpg **vs_tpg;
970         struct virtio_scsi_cmd_req v_req;
971         struct virtio_scsi_cmd_req_pi v_req_pi;
972         struct tcm_vhost_tpg *tpg;
973         struct tcm_vhost_cmd *cmd;
974         u64 tag;
975         u32 exp_data_len, data_first, data_num, data_direction, prot_first;
976         unsigned out, in, i;
977         int head, ret, data_niov, prot_niov, prot_bytes;
978         size_t req_size;
979         u16 lun;
980         u8 *target, *lunp, task_attr;
981         bool hdr_pi;
982         void *req, *cdb;
983
984         mutex_lock(&vq->mutex);
985         /*
986          * We can handle the vq only after the endpoint is setup by calling the
987          * VHOST_SCSI_SET_ENDPOINT ioctl.
988          */
989         vs_tpg = vq->private_data;
990         if (!vs_tpg)
991                 goto out;
992
993         vhost_disable_notify(&vs->dev, vq);
994
995         for (;;) {
996                 head = vhost_get_vq_desc(vq, vq->iov,
997                                         ARRAY_SIZE(vq->iov), &out, &in,
998                                         NULL, NULL);
999                 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
1000                                         head, out, in);
1001                 /* On error, stop handling until the next kick. */
1002                 if (unlikely(head < 0))
1003                         break;
1004                 /* Nothing new?  Wait for eventfd to tell us they refilled. */
1005                 if (head == vq->num) {
1006                         if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
1007                                 vhost_disable_notify(&vs->dev, vq);
1008                                 continue;
1009                         }
1010                         break;
1011                 }
1012
1013                 /* FIXME: BIDI operation */
1014                 if (out == 1 && in == 1) {
1015                         data_direction = DMA_NONE;
1016                         data_first = 0;
1017                         data_num = 0;
1018                 } else if (out == 1 && in > 1) {
1019                         data_direction = DMA_FROM_DEVICE;
1020                         data_first = out + 1;
1021                         data_num = in - 1;
1022                 } else if (out > 1 && in == 1) {
1023                         data_direction = DMA_TO_DEVICE;
1024                         data_first = 1;
1025                         data_num = out - 1;
1026                 } else {
1027                         vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
1028                                         out, in);
1029                         break;
1030                 }
1031
1032                 /*
1033                  * Check for a sane resp buffer so we can report errors to
1034                  * the guest.
1035                  */
1036                 if (unlikely(vq->iov[out].iov_len !=
1037                                         sizeof(struct virtio_scsi_cmd_resp))) {
1038                         vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
1039                                 " bytes\n", vq->iov[out].iov_len);
1040                         break;
1041                 }
1042
1043                 if (vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI)) {
1044                         req = &v_req_pi;
1045                         lunp = &v_req_pi.lun[0];
1046                         target = &v_req_pi.lun[1];
1047                         req_size = sizeof(v_req_pi);
1048                         hdr_pi = true;
1049                 } else {
1050                         req = &v_req;
1051                         lunp = &v_req.lun[0];
1052                         target = &v_req.lun[1];
1053                         req_size = sizeof(v_req);
1054                         hdr_pi = false;
1055                 }
1056
1057                 if (unlikely(vq->iov[0].iov_len < req_size)) {
1058                         pr_err("Expecting virtio-scsi header: %zu, got %zu\n",
1059                                req_size, vq->iov[0].iov_len);
1060                         break;
1061                 }
1062                 ret = memcpy_fromiovecend(req, &vq->iov[0], 0, req_size);
1063                 if (unlikely(ret)) {
1064                         vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
1065                         break;
1066                 }
1067
1068                 /* virtio-scsi spec requires byte 0 of the lun to be 1 */
1069                 if (unlikely(*lunp != 1)) {
1070                         vhost_scsi_send_bad_target(vs, vq, head, out);
1071                         continue;
1072                 }
1073
1074                 tpg = ACCESS_ONCE(vs_tpg[*target]);
1075
1076                 /* Target does not exist, fail the request */
1077                 if (unlikely(!tpg)) {
1078                         vhost_scsi_send_bad_target(vs, vq, head, out);
1079                         continue;
1080                 }
1081
1082                 data_niov = data_num;
1083                 prot_niov = prot_first = prot_bytes = 0;
1084                 /*
1085                  * Determine if any protection information iovecs are preceeding
1086                  * the actual data payload, and adjust data_first + data_niov
1087                  * values accordingly for vhost_scsi_map_iov_to_sgl() below.
1088                  *
1089                  * Also extract virtio_scsi header bits for vhost_scsi_get_tag()
1090                  */
1091                 if (hdr_pi) {
1092                         if (v_req_pi.pi_bytesout) {
1093                                 if (data_direction != DMA_TO_DEVICE) {
1094                                         vq_err(vq, "Received non zero do_pi_niov"
1095                                                 ", but wrong data_direction\n");
1096                                         goto err_cmd;
1097                                 }
1098                                 prot_bytes = v_req_pi.pi_bytesout;
1099                         } else if (v_req_pi.pi_bytesin) {
1100                                 if (data_direction != DMA_FROM_DEVICE) {
1101                                         vq_err(vq, "Received non zero di_pi_niov"
1102                                                 ", but wrong data_direction\n");
1103                                         goto err_cmd;
1104                                 }
1105                                 prot_bytes = v_req_pi.pi_bytesin;
1106                         }
1107                         if (prot_bytes) {
1108                                 int tmp = 0;
1109
1110                                 for (i = 0; i < data_num; i++) {
1111                                         tmp += vq->iov[data_first + i].iov_len;
1112                                         prot_niov++;
1113                                         if (tmp >= prot_bytes)
1114                                                 break;
1115                                 }
1116                                 prot_first = data_first;
1117                                 data_first += prot_niov;
1118                                 data_niov = data_num - prot_niov;
1119                         }
1120                         tag = v_req_pi.tag;
1121                         task_attr = v_req_pi.task_attr;
1122                         cdb = &v_req_pi.cdb[0];
1123                         lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF;
1124                 } else {
1125                         tag = v_req.tag;
1126                         task_attr = v_req.task_attr;
1127                         cdb = &v_req.cdb[0];
1128                         lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
1129                 }
1130                 exp_data_len = 0;
1131                 for (i = 0; i < data_niov; i++)
1132                         exp_data_len += vq->iov[data_first + i].iov_len;
1133                 /*
1134                  * Check that the recieved CDB size does not exceeded our
1135                  * hardcoded max for vhost-scsi
1136                  *
1137                  * TODO what if cdb was too small for varlen cdb header?
1138                  */
1139                 if (unlikely(scsi_command_size(cdb) > TCM_VHOST_MAX_CDB_SIZE)) {
1140                         vq_err(vq, "Received SCSI CDB with command_size: %d that"
1141                                 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1142                                 scsi_command_size(cdb), TCM_VHOST_MAX_CDB_SIZE);
1143                         goto err_cmd;
1144                 }
1145
1146                 cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
1147                                          exp_data_len + prot_bytes,
1148                                          data_direction);
1149                 if (IS_ERR(cmd)) {
1150                         vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
1151                                         PTR_ERR(cmd));
1152                         goto err_cmd;
1153                 }
1154
1155                 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
1156                         ": %d\n", cmd, exp_data_len, data_direction);
1157
1158                 cmd->tvc_vhost = vs;
1159                 cmd->tvc_vq = vq;
1160                 cmd->tvc_resp = vq->iov[out].iov_base;
1161
1162                 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1163                         cmd->tvc_cdb[0], cmd->tvc_lun);
1164
1165                 if (prot_niov) {
1166                         ret = vhost_scsi_map_iov_to_prot(cmd,
1167                                         &vq->iov[prot_first], prot_niov,
1168                                         data_direction == DMA_FROM_DEVICE);
1169                         if (unlikely(ret)) {
1170                                 vq_err(vq, "Failed to map iov to"
1171                                         " prot_sgl\n");
1172                                 goto err_free;
1173                         }
1174                 }
1175                 if (data_direction != DMA_NONE) {
1176                         ret = vhost_scsi_map_iov_to_sgl(cmd,
1177                                         &vq->iov[data_first], data_niov,
1178                                         data_direction == DMA_FROM_DEVICE);
1179                         if (unlikely(ret)) {
1180                                 vq_err(vq, "Failed to map iov to sgl\n");
1181                                 goto err_free;
1182                         }
1183                 }
1184                 /*
1185                  * Save the descriptor from vhost_get_vq_desc() to be used to
1186                  * complete the virtio-scsi request in TCM callback context via
1187                  * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
1188                  */
1189                 cmd->tvc_vq_desc = head;
1190                 /*
1191                  * Dispatch tv_cmd descriptor for cmwq execution in process
1192                  * context provided by tcm_vhost_workqueue.  This also ensures
1193                  * tv_cmd is executed on the same kworker CPU as this vhost
1194                  * thread to gain positive L2 cache locality effects..
1195                  */
1196                 INIT_WORK(&cmd->work, tcm_vhost_submission_work);
1197                 queue_work(tcm_vhost_workqueue, &cmd->work);
1198         }
1199
1200         mutex_unlock(&vq->mutex);
1201         return;
1202
1203 err_free:
1204         vhost_scsi_free_cmd(cmd);
1205 err_cmd:
1206         vhost_scsi_send_bad_target(vs, vq, head, out);
1207 out:
1208         mutex_unlock(&vq->mutex);
1209 }
1210
1211 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1212 {
1213         pr_debug("%s: The handling func for control queue.\n", __func__);
1214 }
1215
1216 static void
1217 tcm_vhost_send_evt(struct vhost_scsi *vs,
1218                    struct tcm_vhost_tpg *tpg,
1219                    struct se_lun *lun,
1220                    u32 event,
1221                    u32 reason)
1222 {
1223         struct tcm_vhost_evt *evt;
1224
1225         evt = tcm_vhost_allocate_evt(vs, event, reason);
1226         if (!evt)
1227                 return;
1228
1229         if (tpg && lun) {
1230                 /* TODO: share lun setup code with virtio-scsi.ko */
1231                 /*
1232                  * Note: evt->event is zeroed when we allocate it and
1233                  * lun[4-7] need to be zero according to virtio-scsi spec.
1234                  */
1235                 evt->event.lun[0] = 0x01;
1236                 evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
1237                 if (lun->unpacked_lun >= 256)
1238                         evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1239                 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1240         }
1241
1242         llist_add(&evt->list, &vs->vs_event_list);
1243         vhost_work_queue(&vs->dev, &vs->vs_event_work);
1244 }
1245
1246 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1247 {
1248         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1249                                                 poll.work);
1250         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1251
1252         mutex_lock(&vq->mutex);
1253         if (!vq->private_data)
1254                 goto out;
1255
1256         if (vs->vs_events_missed)
1257                 tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1258 out:
1259         mutex_unlock(&vq->mutex);
1260 }
1261
1262 static void vhost_scsi_handle_kick(struct vhost_work *work)
1263 {
1264         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1265                                                 poll.work);
1266         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1267
1268         vhost_scsi_handle_vq(vs, vq);
1269 }
1270
1271 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1272 {
1273         vhost_poll_flush(&vs->vqs[index].vq.poll);
1274 }
1275
1276 /* Callers must hold dev mutex */
1277 static void vhost_scsi_flush(struct vhost_scsi *vs)
1278 {
1279         struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1280         int i;
1281
1282         /* Init new inflight and remember the old inflight */
1283         tcm_vhost_init_inflight(vs, old_inflight);
1284
1285         /*
1286          * The inflight->kref was initialized to 1. We decrement it here to
1287          * indicate the start of the flush operation so that it will reach 0
1288          * when all the reqs are finished.
1289          */
1290         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1291                 kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight);
1292
1293         /* Flush both the vhost poll and vhost work */
1294         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1295                 vhost_scsi_flush_vq(vs, i);
1296         vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1297         vhost_work_flush(&vs->dev, &vs->vs_event_work);
1298
1299         /* Wait for all reqs issued before the flush to be finished */
1300         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1301                 wait_for_completion(&old_inflight[i]->comp);
1302 }
1303
1304 /*
1305  * Called from vhost_scsi_ioctl() context to walk the list of available
1306  * tcm_vhost_tpg with an active struct tcm_vhost_nexus
1307  *
1308  *  The lock nesting rule is:
1309  *    tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1310  */
1311 static int
1312 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1313                         struct vhost_scsi_target *t)
1314 {
1315         struct se_portal_group *se_tpg;
1316         struct tcm_vhost_tport *tv_tport;
1317         struct tcm_vhost_tpg *tpg;
1318         struct tcm_vhost_tpg **vs_tpg;
1319         struct vhost_virtqueue *vq;
1320         int index, ret, i, len;
1321         bool match = false;
1322
1323         mutex_lock(&tcm_vhost_mutex);
1324         mutex_lock(&vs->dev.mutex);
1325
1326         /* Verify that ring has been setup correctly. */
1327         for (index = 0; index < vs->dev.nvqs; ++index) {
1328                 /* Verify that ring has been setup correctly. */
1329                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1330                         ret = -EFAULT;
1331                         goto out;
1332                 }
1333         }
1334
1335         len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1336         vs_tpg = kzalloc(len, GFP_KERNEL);
1337         if (!vs_tpg) {
1338                 ret = -ENOMEM;
1339                 goto out;
1340         }
1341         if (vs->vs_tpg)
1342                 memcpy(vs_tpg, vs->vs_tpg, len);
1343
1344         list_for_each_entry(tpg, &tcm_vhost_list, tv_tpg_list) {
1345                 mutex_lock(&tpg->tv_tpg_mutex);
1346                 if (!tpg->tpg_nexus) {
1347                         mutex_unlock(&tpg->tv_tpg_mutex);
1348                         continue;
1349                 }
1350                 if (tpg->tv_tpg_vhost_count != 0) {
1351                         mutex_unlock(&tpg->tv_tpg_mutex);
1352                         continue;
1353                 }
1354                 tv_tport = tpg->tport;
1355
1356                 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1357                         if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1358                                 kfree(vs_tpg);
1359                                 mutex_unlock(&tpg->tv_tpg_mutex);
1360                                 ret = -EEXIST;
1361                                 goto out;
1362                         }
1363                         /*
1364                          * In order to ensure individual vhost-scsi configfs
1365                          * groups cannot be removed while in use by vhost ioctl,
1366                          * go ahead and take an explicit se_tpg->tpg_group.cg_item
1367                          * dependency now.
1368                          */
1369                         se_tpg = &tpg->se_tpg;
1370                         ret = configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys,
1371                                                    &se_tpg->tpg_group.cg_item);
1372                         if (ret) {
1373                                 pr_warn("configfs_depend_item() failed: %d\n", ret);
1374                                 kfree(vs_tpg);
1375                                 mutex_unlock(&tpg->tv_tpg_mutex);
1376                                 goto out;
1377                         }
1378                         tpg->tv_tpg_vhost_count++;
1379                         tpg->vhost_scsi = vs;
1380                         vs_tpg[tpg->tport_tpgt] = tpg;
1381                         smp_mb__after_atomic();
1382                         match = true;
1383                 }
1384                 mutex_unlock(&tpg->tv_tpg_mutex);
1385         }
1386
1387         if (match) {
1388                 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1389                        sizeof(vs->vs_vhost_wwpn));
1390                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1391                         vq = &vs->vqs[i].vq;
1392                         mutex_lock(&vq->mutex);
1393                         vq->private_data = vs_tpg;
1394                         vhost_init_used(vq);
1395                         mutex_unlock(&vq->mutex);
1396                 }
1397                 ret = 0;
1398         } else {
1399                 ret = -EEXIST;
1400         }
1401
1402         /*
1403          * Act as synchronize_rcu to make sure access to
1404          * old vs->vs_tpg is finished.
1405          */
1406         vhost_scsi_flush(vs);
1407         kfree(vs->vs_tpg);
1408         vs->vs_tpg = vs_tpg;
1409
1410 out:
1411         mutex_unlock(&vs->dev.mutex);
1412         mutex_unlock(&tcm_vhost_mutex);
1413         return ret;
1414 }
1415
1416 static int
1417 vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1418                           struct vhost_scsi_target *t)
1419 {
1420         struct se_portal_group *se_tpg;
1421         struct tcm_vhost_tport *tv_tport;
1422         struct tcm_vhost_tpg *tpg;
1423         struct vhost_virtqueue *vq;
1424         bool match = false;
1425         int index, ret, i;
1426         u8 target;
1427
1428         mutex_lock(&tcm_vhost_mutex);
1429         mutex_lock(&vs->dev.mutex);
1430         /* Verify that ring has been setup correctly. */
1431         for (index = 0; index < vs->dev.nvqs; ++index) {
1432                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1433                         ret = -EFAULT;
1434                         goto err_dev;
1435                 }
1436         }
1437
1438         if (!vs->vs_tpg) {
1439                 ret = 0;
1440                 goto err_dev;
1441         }
1442
1443         for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1444                 target = i;
1445                 tpg = vs->vs_tpg[target];
1446                 if (!tpg)
1447                         continue;
1448
1449                 mutex_lock(&tpg->tv_tpg_mutex);
1450                 tv_tport = tpg->tport;
1451                 if (!tv_tport) {
1452                         ret = -ENODEV;
1453                         goto err_tpg;
1454                 }
1455
1456                 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1457                         pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1458                                 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1459                                 tv_tport->tport_name, tpg->tport_tpgt,
1460                                 t->vhost_wwpn, t->vhost_tpgt);
1461                         ret = -EINVAL;
1462                         goto err_tpg;
1463                 }
1464                 tpg->tv_tpg_vhost_count--;
1465                 tpg->vhost_scsi = NULL;
1466                 vs->vs_tpg[target] = NULL;
1467                 match = true;
1468                 mutex_unlock(&tpg->tv_tpg_mutex);
1469                 /*
1470                  * Release se_tpg->tpg_group.cg_item configfs dependency now
1471                  * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
1472                  */
1473                 se_tpg = &tpg->se_tpg;
1474                 configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys,
1475                                        &se_tpg->tpg_group.cg_item);
1476         }
1477         if (match) {
1478                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1479                         vq = &vs->vqs[i].vq;
1480                         mutex_lock(&vq->mutex);
1481                         vq->private_data = NULL;
1482                         mutex_unlock(&vq->mutex);
1483                 }
1484         }
1485         /*
1486          * Act as synchronize_rcu to make sure access to
1487          * old vs->vs_tpg is finished.
1488          */
1489         vhost_scsi_flush(vs);
1490         kfree(vs->vs_tpg);
1491         vs->vs_tpg = NULL;
1492         WARN_ON(vs->vs_events_nr);
1493         mutex_unlock(&vs->dev.mutex);
1494         mutex_unlock(&tcm_vhost_mutex);
1495         return 0;
1496
1497 err_tpg:
1498         mutex_unlock(&tpg->tv_tpg_mutex);
1499 err_dev:
1500         mutex_unlock(&vs->dev.mutex);
1501         mutex_unlock(&tcm_vhost_mutex);
1502         return ret;
1503 }
1504
1505 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1506 {
1507         struct vhost_virtqueue *vq;
1508         int i;
1509
1510         if (features & ~VHOST_SCSI_FEATURES)
1511                 return -EOPNOTSUPP;
1512
1513         mutex_lock(&vs->dev.mutex);
1514         if ((features & (1 << VHOST_F_LOG_ALL)) &&
1515             !vhost_log_access_ok(&vs->dev)) {
1516                 mutex_unlock(&vs->dev.mutex);
1517                 return -EFAULT;
1518         }
1519
1520         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1521                 vq = &vs->vqs[i].vq;
1522                 mutex_lock(&vq->mutex);
1523                 vq->acked_features = features;
1524                 mutex_unlock(&vq->mutex);
1525         }
1526         mutex_unlock(&vs->dev.mutex);
1527         return 0;
1528 }
1529
1530 static int vhost_scsi_open(struct inode *inode, struct file *f)
1531 {
1532         struct vhost_scsi *vs;
1533         struct vhost_virtqueue **vqs;
1534         int r = -ENOMEM, i;
1535
1536         vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
1537         if (!vs) {
1538                 vs = vzalloc(sizeof(*vs));
1539                 if (!vs)
1540                         goto err_vs;
1541         }
1542
1543         vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
1544         if (!vqs)
1545                 goto err_vqs;
1546
1547         vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1548         vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work);
1549
1550         vs->vs_events_nr = 0;
1551         vs->vs_events_missed = false;
1552
1553         vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1554         vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1555         vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1556         vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1557         for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1558                 vqs[i] = &vs->vqs[i].vq;
1559                 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1560         }
1561         vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
1562
1563         tcm_vhost_init_inflight(vs, NULL);
1564
1565         f->private_data = vs;
1566         return 0;
1567
1568 err_vqs:
1569         kvfree(vs);
1570 err_vs:
1571         return r;
1572 }
1573
1574 static int vhost_scsi_release(struct inode *inode, struct file *f)
1575 {
1576         struct vhost_scsi *vs = f->private_data;
1577         struct vhost_scsi_target t;
1578
1579         mutex_lock(&vs->dev.mutex);
1580         memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1581         mutex_unlock(&vs->dev.mutex);
1582         vhost_scsi_clear_endpoint(vs, &t);
1583         vhost_dev_stop(&vs->dev);
1584         vhost_dev_cleanup(&vs->dev, false);
1585         /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1586         vhost_scsi_flush(vs);
1587         kfree(vs->dev.vqs);
1588         kvfree(vs);
1589         return 0;
1590 }
1591
1592 static long
1593 vhost_scsi_ioctl(struct file *f,
1594                  unsigned int ioctl,
1595                  unsigned long arg)
1596 {
1597         struct vhost_scsi *vs = f->private_data;
1598         struct vhost_scsi_target backend;
1599         void __user *argp = (void __user *)arg;
1600         u64 __user *featurep = argp;
1601         u32 __user *eventsp = argp;
1602         u32 events_missed;
1603         u64 features;
1604         int r, abi_version = VHOST_SCSI_ABI_VERSION;
1605         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1606
1607         switch (ioctl) {
1608         case VHOST_SCSI_SET_ENDPOINT:
1609                 if (copy_from_user(&backend, argp, sizeof backend))
1610                         return -EFAULT;
1611                 if (backend.reserved != 0)
1612                         return -EOPNOTSUPP;
1613
1614                 return vhost_scsi_set_endpoint(vs, &backend);
1615         case VHOST_SCSI_CLEAR_ENDPOINT:
1616                 if (copy_from_user(&backend, argp, sizeof backend))
1617                         return -EFAULT;
1618                 if (backend.reserved != 0)
1619                         return -EOPNOTSUPP;
1620
1621                 return vhost_scsi_clear_endpoint(vs, &backend);
1622         case VHOST_SCSI_GET_ABI_VERSION:
1623                 if (copy_to_user(argp, &abi_version, sizeof abi_version))
1624                         return -EFAULT;
1625                 return 0;
1626         case VHOST_SCSI_SET_EVENTS_MISSED:
1627                 if (get_user(events_missed, eventsp))
1628                         return -EFAULT;
1629                 mutex_lock(&vq->mutex);
1630                 vs->vs_events_missed = events_missed;
1631                 mutex_unlock(&vq->mutex);
1632                 return 0;
1633         case VHOST_SCSI_GET_EVENTS_MISSED:
1634                 mutex_lock(&vq->mutex);
1635                 events_missed = vs->vs_events_missed;
1636                 mutex_unlock(&vq->mutex);
1637                 if (put_user(events_missed, eventsp))
1638                         return -EFAULT;
1639                 return 0;
1640         case VHOST_GET_FEATURES:
1641                 features = VHOST_SCSI_FEATURES;
1642                 if (copy_to_user(featurep, &features, sizeof features))
1643                         return -EFAULT;
1644                 return 0;
1645         case VHOST_SET_FEATURES:
1646                 if (copy_from_user(&features, featurep, sizeof features))
1647                         return -EFAULT;
1648                 return vhost_scsi_set_features(vs, features);
1649         default:
1650                 mutex_lock(&vs->dev.mutex);
1651                 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1652                 /* TODO: flush backend after dev ioctl. */
1653                 if (r == -ENOIOCTLCMD)
1654                         r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1655                 mutex_unlock(&vs->dev.mutex);
1656                 return r;
1657         }
1658 }
1659
1660 #ifdef CONFIG_COMPAT
1661 static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1662                                 unsigned long arg)
1663 {
1664         return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1665 }
1666 #endif
1667
1668 static const struct file_operations vhost_scsi_fops = {
1669         .owner          = THIS_MODULE,
1670         .release        = vhost_scsi_release,
1671         .unlocked_ioctl = vhost_scsi_ioctl,
1672 #ifdef CONFIG_COMPAT
1673         .compat_ioctl   = vhost_scsi_compat_ioctl,
1674 #endif
1675         .open           = vhost_scsi_open,
1676         .llseek         = noop_llseek,
1677 };
1678
1679 static struct miscdevice vhost_scsi_misc = {
1680         MISC_DYNAMIC_MINOR,
1681         "vhost-scsi",
1682         &vhost_scsi_fops,
1683 };
1684
1685 static int __init vhost_scsi_register(void)
1686 {
1687         return misc_register(&vhost_scsi_misc);
1688 }
1689
1690 static int vhost_scsi_deregister(void)
1691 {
1692         return misc_deregister(&vhost_scsi_misc);
1693 }
1694
1695 static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
1696 {
1697         switch (tport->tport_proto_id) {
1698         case SCSI_PROTOCOL_SAS:
1699                 return "SAS";
1700         case SCSI_PROTOCOL_FCP:
1701                 return "FCP";
1702         case SCSI_PROTOCOL_ISCSI:
1703                 return "iSCSI";
1704         default:
1705                 break;
1706         }
1707
1708         return "Unknown";
1709 }
1710
1711 static void
1712 tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
1713                   struct se_lun *lun, bool plug)
1714 {
1715
1716         struct vhost_scsi *vs = tpg->vhost_scsi;
1717         struct vhost_virtqueue *vq;
1718         u32 reason;
1719
1720         if (!vs)
1721                 return;
1722
1723         mutex_lock(&vs->dev.mutex);
1724
1725         if (plug)
1726                 reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1727         else
1728                 reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1729
1730         vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1731         mutex_lock(&vq->mutex);
1732         if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
1733                 tcm_vhost_send_evt(vs, tpg, lun,
1734                                    VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1735         mutex_unlock(&vq->mutex);
1736         mutex_unlock(&vs->dev.mutex);
1737 }
1738
1739 static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1740 {
1741         tcm_vhost_do_plug(tpg, lun, true);
1742 }
1743
1744 static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1745 {
1746         tcm_vhost_do_plug(tpg, lun, false);
1747 }
1748
1749 static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
1750                                struct se_lun *lun)
1751 {
1752         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1753                                 struct tcm_vhost_tpg, se_tpg);
1754
1755         mutex_lock(&tcm_vhost_mutex);
1756
1757         mutex_lock(&tpg->tv_tpg_mutex);
1758         tpg->tv_tpg_port_count++;
1759         mutex_unlock(&tpg->tv_tpg_mutex);
1760
1761         tcm_vhost_hotplug(tpg, lun);
1762
1763         mutex_unlock(&tcm_vhost_mutex);
1764
1765         return 0;
1766 }
1767
1768 static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
1769                                   struct se_lun *lun)
1770 {
1771         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1772                                 struct tcm_vhost_tpg, se_tpg);
1773
1774         mutex_lock(&tcm_vhost_mutex);
1775
1776         mutex_lock(&tpg->tv_tpg_mutex);
1777         tpg->tv_tpg_port_count--;
1778         mutex_unlock(&tpg->tv_tpg_mutex);
1779
1780         tcm_vhost_hotunplug(tpg, lun);
1781
1782         mutex_unlock(&tcm_vhost_mutex);
1783 }
1784
1785 static struct se_node_acl *
1786 tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg,
1787                        struct config_group *group,
1788                        const char *name)
1789 {
1790         struct se_node_acl *se_nacl, *se_nacl_new;
1791         struct tcm_vhost_nacl *nacl;
1792         u64 wwpn = 0;
1793         u32 nexus_depth;
1794
1795         /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1796                 return ERR_PTR(-EINVAL); */
1797         se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg);
1798         if (!se_nacl_new)
1799                 return ERR_PTR(-ENOMEM);
1800
1801         nexus_depth = 1;
1802         /*
1803          * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
1804          * when converting a NodeACL from demo mode -> explict
1805          */
1806         se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
1807                                 name, nexus_depth);
1808         if (IS_ERR(se_nacl)) {
1809                 tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new);
1810                 return se_nacl;
1811         }
1812         /*
1813          * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN
1814          */
1815         nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl);
1816         nacl->iport_wwpn = wwpn;
1817
1818         return se_nacl;
1819 }
1820
1821 static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
1822 {
1823         struct tcm_vhost_nacl *nacl = container_of(se_acl,
1824                                 struct tcm_vhost_nacl, se_node_acl);
1825         core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
1826         kfree(nacl);
1827 }
1828
1829 static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus,
1830                                        struct se_session *se_sess)
1831 {
1832         struct tcm_vhost_cmd *tv_cmd;
1833         unsigned int i;
1834
1835         if (!se_sess->sess_cmd_map)
1836                 return;
1837
1838         for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) {
1839                 tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i];
1840
1841                 kfree(tv_cmd->tvc_sgl);
1842                 kfree(tv_cmd->tvc_prot_sgl);
1843                 kfree(tv_cmd->tvc_upages);
1844         }
1845 }
1846
1847 static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
1848                                 const char *name)
1849 {
1850         struct se_portal_group *se_tpg;
1851         struct se_session *se_sess;
1852         struct tcm_vhost_nexus *tv_nexus;
1853         struct tcm_vhost_cmd *tv_cmd;
1854         unsigned int i;
1855
1856         mutex_lock(&tpg->tv_tpg_mutex);
1857         if (tpg->tpg_nexus) {
1858                 mutex_unlock(&tpg->tv_tpg_mutex);
1859                 pr_debug("tpg->tpg_nexus already exists\n");
1860                 return -EEXIST;
1861         }
1862         se_tpg = &tpg->se_tpg;
1863
1864         tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
1865         if (!tv_nexus) {
1866                 mutex_unlock(&tpg->tv_tpg_mutex);
1867                 pr_err("Unable to allocate struct tcm_vhost_nexus\n");
1868                 return -ENOMEM;
1869         }
1870         /*
1871          *  Initialize the struct se_session pointer and setup tagpool
1872          *  for struct tcm_vhost_cmd descriptors
1873          */
1874         tv_nexus->tvn_se_sess = transport_init_session_tags(
1875                                         TCM_VHOST_DEFAULT_TAGS,
1876                                         sizeof(struct tcm_vhost_cmd),
1877                                         TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
1878         if (IS_ERR(tv_nexus->tvn_se_sess)) {
1879                 mutex_unlock(&tpg->tv_tpg_mutex);
1880                 kfree(tv_nexus);
1881                 return -ENOMEM;
1882         }
1883         se_sess = tv_nexus->tvn_se_sess;
1884         for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) {
1885                 tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i];
1886
1887                 tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
1888                                         TCM_VHOST_PREALLOC_SGLS, GFP_KERNEL);
1889                 if (!tv_cmd->tvc_sgl) {
1890                         mutex_unlock(&tpg->tv_tpg_mutex);
1891                         pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1892                         goto out;
1893                 }
1894
1895                 tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
1896                                         TCM_VHOST_PREALLOC_UPAGES, GFP_KERNEL);
1897                 if (!tv_cmd->tvc_upages) {
1898                         mutex_unlock(&tpg->tv_tpg_mutex);
1899                         pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1900                         goto out;
1901                 }
1902
1903                 tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
1904                                         TCM_VHOST_PREALLOC_PROT_SGLS, GFP_KERNEL);
1905                 if (!tv_cmd->tvc_prot_sgl) {
1906                         mutex_unlock(&tpg->tv_tpg_mutex);
1907                         pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1908                         goto out;
1909                 }
1910         }
1911         /*
1912          * Since we are running in 'demo mode' this call with generate a
1913          * struct se_node_acl for the tcm_vhost struct se_portal_group with
1914          * the SCSI Initiator port name of the passed configfs group 'name'.
1915          */
1916         tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
1917                                 se_tpg, (unsigned char *)name);
1918         if (!tv_nexus->tvn_se_sess->se_node_acl) {
1919                 mutex_unlock(&tpg->tv_tpg_mutex);
1920                 pr_debug("core_tpg_check_initiator_node_acl() failed"
1921                                 " for %s\n", name);
1922                 goto out;
1923         }
1924         /*
1925          * Now register the TCM vhost virtual I_T Nexus as active with the
1926          * call to __transport_register_session()
1927          */
1928         __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1929                         tv_nexus->tvn_se_sess, tv_nexus);
1930         tpg->tpg_nexus = tv_nexus;
1931
1932         mutex_unlock(&tpg->tv_tpg_mutex);
1933         return 0;
1934
1935 out:
1936         tcm_vhost_free_cmd_map_res(tv_nexus, se_sess);
1937         transport_free_session(se_sess);
1938         kfree(tv_nexus);
1939         return -ENOMEM;
1940 }
1941
1942 static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
1943 {
1944         struct se_session *se_sess;
1945         struct tcm_vhost_nexus *tv_nexus;
1946
1947         mutex_lock(&tpg->tv_tpg_mutex);
1948         tv_nexus = tpg->tpg_nexus;
1949         if (!tv_nexus) {
1950                 mutex_unlock(&tpg->tv_tpg_mutex);
1951                 return -ENODEV;
1952         }
1953
1954         se_sess = tv_nexus->tvn_se_sess;
1955         if (!se_sess) {
1956                 mutex_unlock(&tpg->tv_tpg_mutex);
1957                 return -ENODEV;
1958         }
1959
1960         if (tpg->tv_tpg_port_count != 0) {
1961                 mutex_unlock(&tpg->tv_tpg_mutex);
1962                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1963                         " active TPG port count: %d\n",
1964                         tpg->tv_tpg_port_count);
1965                 return -EBUSY;
1966         }
1967
1968         if (tpg->tv_tpg_vhost_count != 0) {
1969                 mutex_unlock(&tpg->tv_tpg_mutex);
1970                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1971                         " active TPG vhost count: %d\n",
1972                         tpg->tv_tpg_vhost_count);
1973                 return -EBUSY;
1974         }
1975
1976         pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
1977                 " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
1978                 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1979
1980         tcm_vhost_free_cmd_map_res(tv_nexus, se_sess);
1981         /*
1982          * Release the SCSI I_T Nexus to the emulated vhost Target Port
1983          */
1984         transport_deregister_session(tv_nexus->tvn_se_sess);
1985         tpg->tpg_nexus = NULL;
1986         mutex_unlock(&tpg->tv_tpg_mutex);
1987
1988         kfree(tv_nexus);
1989         return 0;
1990 }
1991
1992 static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
1993                                         char *page)
1994 {
1995         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1996                                 struct tcm_vhost_tpg, se_tpg);
1997         struct tcm_vhost_nexus *tv_nexus;
1998         ssize_t ret;
1999
2000         mutex_lock(&tpg->tv_tpg_mutex);
2001         tv_nexus = tpg->tpg_nexus;
2002         if (!tv_nexus) {
2003                 mutex_unlock(&tpg->tv_tpg_mutex);
2004                 return -ENODEV;
2005         }
2006         ret = snprintf(page, PAGE_SIZE, "%s\n",
2007                         tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2008         mutex_unlock(&tpg->tv_tpg_mutex);
2009
2010         return ret;
2011 }
2012
2013 static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
2014                                          const char *page,
2015                                          size_t count)
2016 {
2017         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
2018                                 struct tcm_vhost_tpg, se_tpg);
2019         struct tcm_vhost_tport *tport_wwn = tpg->tport;
2020         unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
2021         int ret;
2022         /*
2023          * Shutdown the active I_T nexus if 'NULL' is passed..
2024          */
2025         if (!strncmp(page, "NULL", 4)) {
2026                 ret = tcm_vhost_drop_nexus(tpg);
2027                 return (!ret) ? count : ret;
2028         }
2029         /*
2030          * Otherwise make sure the passed virtual Initiator port WWN matches
2031          * the fabric protocol_id set in tcm_vhost_make_tport(), and call
2032          * tcm_vhost_make_nexus().
2033          */
2034         if (strlen(page) >= TCM_VHOST_NAMELEN) {
2035                 pr_err("Emulated NAA Sas Address: %s, exceeds"
2036                                 " max: %d\n", page, TCM_VHOST_NAMELEN);
2037                 return -EINVAL;
2038         }
2039         snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page);
2040
2041         ptr = strstr(i_port, "naa.");
2042         if (ptr) {
2043                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
2044                         pr_err("Passed SAS Initiator Port %s does not"
2045                                 " match target port protoid: %s\n", i_port,
2046                                 tcm_vhost_dump_proto_id(tport_wwn));
2047                         return -EINVAL;
2048                 }
2049                 port_ptr = &i_port[0];
2050                 goto check_newline;
2051         }
2052         ptr = strstr(i_port, "fc.");
2053         if (ptr) {
2054                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
2055                         pr_err("Passed FCP Initiator Port %s does not"
2056                                 " match target port protoid: %s\n", i_port,
2057                                 tcm_vhost_dump_proto_id(tport_wwn));
2058                         return -EINVAL;
2059                 }
2060                 port_ptr = &i_port[3]; /* Skip over "fc." */
2061                 goto check_newline;
2062         }
2063         ptr = strstr(i_port, "iqn.");
2064         if (ptr) {
2065                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
2066                         pr_err("Passed iSCSI Initiator Port %s does not"
2067                                 " match target port protoid: %s\n", i_port,
2068                                 tcm_vhost_dump_proto_id(tport_wwn));
2069                         return -EINVAL;
2070                 }
2071                 port_ptr = &i_port[0];
2072                 goto check_newline;
2073         }
2074         pr_err("Unable to locate prefix for emulated Initiator Port:"
2075                         " %s\n", i_port);
2076         return -EINVAL;
2077         /*
2078          * Clear any trailing newline for the NAA WWN
2079          */
2080 check_newline:
2081         if (i_port[strlen(i_port)-1] == '\n')
2082                 i_port[strlen(i_port)-1] = '\0';
2083
2084         ret = tcm_vhost_make_nexus(tpg, port_ptr);
2085         if (ret < 0)
2086                 return ret;
2087
2088         return count;
2089 }
2090
2091 TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR);
2092
2093 static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
2094         &tcm_vhost_tpg_nexus.attr,
2095         NULL,
2096 };
2097
2098 static struct se_portal_group *
2099 tcm_vhost_make_tpg(struct se_wwn *wwn,
2100                    struct config_group *group,
2101                    const char *name)
2102 {
2103         struct tcm_vhost_tport *tport = container_of(wwn,
2104                         struct tcm_vhost_tport, tport_wwn);
2105
2106         struct tcm_vhost_tpg *tpg;
2107         unsigned long tpgt;
2108         int ret;
2109
2110         if (strstr(name, "tpgt_") != name)
2111                 return ERR_PTR(-EINVAL);
2112         if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
2113                 return ERR_PTR(-EINVAL);
2114
2115         tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
2116         if (!tpg) {
2117                 pr_err("Unable to allocate struct tcm_vhost_tpg");
2118                 return ERR_PTR(-ENOMEM);
2119         }
2120         mutex_init(&tpg->tv_tpg_mutex);
2121         INIT_LIST_HEAD(&tpg->tv_tpg_list);
2122         tpg->tport = tport;
2123         tpg->tport_tpgt = tpgt;
2124
2125         ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn,
2126                                 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
2127         if (ret < 0) {
2128                 kfree(tpg);
2129                 return NULL;
2130         }
2131         mutex_lock(&tcm_vhost_mutex);
2132         list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list);
2133         mutex_unlock(&tcm_vhost_mutex);
2134
2135         return &tpg->se_tpg;
2136 }
2137
2138 static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
2139 {
2140         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
2141                                 struct tcm_vhost_tpg, se_tpg);
2142
2143         mutex_lock(&tcm_vhost_mutex);
2144         list_del(&tpg->tv_tpg_list);
2145         mutex_unlock(&tcm_vhost_mutex);
2146         /*
2147          * Release the virtual I_T Nexus for this vhost TPG
2148          */
2149         tcm_vhost_drop_nexus(tpg);
2150         /*
2151          * Deregister the se_tpg from TCM..
2152          */
2153         core_tpg_deregister(se_tpg);
2154         kfree(tpg);
2155 }
2156
2157 static struct se_wwn *
2158 tcm_vhost_make_tport(struct target_fabric_configfs *tf,
2159                      struct config_group *group,
2160                      const char *name)
2161 {
2162         struct tcm_vhost_tport *tport;
2163         char *ptr;
2164         u64 wwpn = 0;
2165         int off = 0;
2166
2167         /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
2168                 return ERR_PTR(-EINVAL); */
2169
2170         tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL);
2171         if (!tport) {
2172                 pr_err("Unable to allocate struct tcm_vhost_tport");
2173                 return ERR_PTR(-ENOMEM);
2174         }
2175         tport->tport_wwpn = wwpn;
2176         /*
2177          * Determine the emulated Protocol Identifier and Target Port Name
2178          * based on the incoming configfs directory name.
2179          */
2180         ptr = strstr(name, "naa.");
2181         if (ptr) {
2182                 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2183                 goto check_len;
2184         }
2185         ptr = strstr(name, "fc.");
2186         if (ptr) {
2187                 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2188                 off = 3; /* Skip over "fc." */
2189                 goto check_len;
2190         }
2191         ptr = strstr(name, "iqn.");
2192         if (ptr) {
2193                 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2194                 goto check_len;
2195         }
2196
2197         pr_err("Unable to locate prefix for emulated Target Port:"
2198                         " %s\n", name);
2199         kfree(tport);
2200         return ERR_PTR(-EINVAL);
2201
2202 check_len:
2203         if (strlen(name) >= TCM_VHOST_NAMELEN) {
2204                 pr_err("Emulated %s Address: %s, exceeds"
2205                         " max: %d\n", name, tcm_vhost_dump_proto_id(tport),
2206                         TCM_VHOST_NAMELEN);
2207                 kfree(tport);
2208                 return ERR_PTR(-EINVAL);
2209         }
2210         snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]);
2211
2212         pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2213                 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name);
2214
2215         return &tport->tport_wwn;
2216 }
2217
2218 static void tcm_vhost_drop_tport(struct se_wwn *wwn)
2219 {
2220         struct tcm_vhost_tport *tport = container_of(wwn,
2221                                 struct tcm_vhost_tport, tport_wwn);
2222
2223         pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2224                 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport),
2225                 tport->tport_name);
2226
2227         kfree(tport);
2228 }
2229
2230 static ssize_t
2231 tcm_vhost_wwn_show_attr_version(struct target_fabric_configfs *tf,
2232                                 char *page)
2233 {
2234         return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2235                 "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
2236                 utsname()->machine);
2237 }
2238
2239 TF_WWN_ATTR_RO(tcm_vhost, version);
2240
2241 static struct configfs_attribute *tcm_vhost_wwn_attrs[] = {
2242         &tcm_vhost_wwn_version.attr,
2243         NULL,
2244 };
2245
2246 static struct target_core_fabric_ops tcm_vhost_ops = {
2247         .get_fabric_name                = tcm_vhost_get_fabric_name,
2248         .get_fabric_proto_ident         = tcm_vhost_get_fabric_proto_ident,
2249         .tpg_get_wwn                    = tcm_vhost_get_fabric_wwn,
2250         .tpg_get_tag                    = tcm_vhost_get_tag,
2251         .tpg_get_default_depth          = tcm_vhost_get_default_depth,
2252         .tpg_get_pr_transport_id        = tcm_vhost_get_pr_transport_id,
2253         .tpg_get_pr_transport_id_len    = tcm_vhost_get_pr_transport_id_len,
2254         .tpg_parse_pr_out_transport_id  = tcm_vhost_parse_pr_out_transport_id,
2255         .tpg_check_demo_mode            = tcm_vhost_check_true,
2256         .tpg_check_demo_mode_cache      = tcm_vhost_check_true,
2257         .tpg_check_demo_mode_write_protect = tcm_vhost_check_false,
2258         .tpg_check_prod_mode_write_protect = tcm_vhost_check_false,
2259         .tpg_alloc_fabric_acl           = tcm_vhost_alloc_fabric_acl,
2260         .tpg_release_fabric_acl         = tcm_vhost_release_fabric_acl,
2261         .tpg_get_inst_index             = tcm_vhost_tpg_get_inst_index,
2262         .release_cmd                    = tcm_vhost_release_cmd,
2263         .check_stop_free                = vhost_scsi_check_stop_free,
2264         .shutdown_session               = tcm_vhost_shutdown_session,
2265         .close_session                  = tcm_vhost_close_session,
2266         .sess_get_index                 = tcm_vhost_sess_get_index,
2267         .sess_get_initiator_sid         = NULL,
2268         .write_pending                  = tcm_vhost_write_pending,
2269         .write_pending_status           = tcm_vhost_write_pending_status,
2270         .set_default_node_attributes    = tcm_vhost_set_default_node_attrs,
2271         .get_task_tag                   = tcm_vhost_get_task_tag,
2272         .get_cmd_state                  = tcm_vhost_get_cmd_state,
2273         .queue_data_in                  = tcm_vhost_queue_data_in,
2274         .queue_status                   = tcm_vhost_queue_status,
2275         .queue_tm_rsp                   = tcm_vhost_queue_tm_rsp,
2276         .aborted_task                   = tcm_vhost_aborted_task,
2277         /*
2278          * Setup callers for generic logic in target_core_fabric_configfs.c
2279          */
2280         .fabric_make_wwn                = tcm_vhost_make_tport,
2281         .fabric_drop_wwn                = tcm_vhost_drop_tport,
2282         .fabric_make_tpg                = tcm_vhost_make_tpg,
2283         .fabric_drop_tpg                = tcm_vhost_drop_tpg,
2284         .fabric_post_link               = tcm_vhost_port_link,
2285         .fabric_pre_unlink              = tcm_vhost_port_unlink,
2286         .fabric_make_np                 = NULL,
2287         .fabric_drop_np                 = NULL,
2288         .fabric_make_nodeacl            = tcm_vhost_make_nodeacl,
2289         .fabric_drop_nodeacl            = tcm_vhost_drop_nodeacl,
2290 };
2291
2292 static int tcm_vhost_register_configfs(void)
2293 {
2294         struct target_fabric_configfs *fabric;
2295         int ret;
2296
2297         pr_debug("TCM_VHOST fabric module %s on %s/%s"
2298                 " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
2299                 utsname()->machine);
2300         /*
2301          * Register the top level struct config_item_type with TCM core
2302          */
2303         fabric = target_fabric_configfs_init(THIS_MODULE, "vhost");
2304         if (IS_ERR(fabric)) {
2305                 pr_err("target_fabric_configfs_init() failed\n");
2306                 return PTR_ERR(fabric);
2307         }
2308         /*
2309          * Setup fabric->tf_ops from our local tcm_vhost_ops
2310          */
2311         fabric->tf_ops = tcm_vhost_ops;
2312         /*
2313          * Setup default attribute lists for various fabric->tf_cit_tmpl
2314          */
2315         fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
2316         fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
2317         fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
2318         fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
2319         fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
2320         fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
2321         fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
2322         fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
2323         fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
2324         /*
2325          * Register the fabric for use within TCM
2326          */
2327         ret = target_fabric_configfs_register(fabric);
2328         if (ret < 0) {
2329                 pr_err("target_fabric_configfs_register() failed"
2330                                 " for TCM_VHOST\n");
2331                 return ret;
2332         }
2333         /*
2334          * Setup our local pointer to *fabric
2335          */
2336         tcm_vhost_fabric_configfs = fabric;
2337         pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n");
2338         return 0;
2339 };
2340
2341 static void tcm_vhost_deregister_configfs(void)
2342 {
2343         if (!tcm_vhost_fabric_configfs)
2344                 return;
2345
2346         target_fabric_configfs_deregister(tcm_vhost_fabric_configfs);
2347         tcm_vhost_fabric_configfs = NULL;
2348         pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n");
2349 };
2350
2351 static int __init tcm_vhost_init(void)
2352 {
2353         int ret = -ENOMEM;
2354         /*
2355          * Use our own dedicated workqueue for submitting I/O into
2356          * target core to avoid contention within system_wq.
2357          */
2358         tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
2359         if (!tcm_vhost_workqueue)
2360                 goto out;
2361
2362         ret = vhost_scsi_register();
2363         if (ret < 0)
2364                 goto out_destroy_workqueue;
2365
2366         ret = tcm_vhost_register_configfs();
2367         if (ret < 0)
2368                 goto out_vhost_scsi_deregister;
2369
2370         return 0;
2371
2372 out_vhost_scsi_deregister:
2373         vhost_scsi_deregister();
2374 out_destroy_workqueue:
2375         destroy_workqueue(tcm_vhost_workqueue);
2376 out:
2377         return ret;
2378 };
2379
2380 static void tcm_vhost_exit(void)
2381 {
2382         tcm_vhost_deregister_configfs();
2383         vhost_scsi_deregister();
2384         destroy_workqueue(tcm_vhost_workqueue);
2385 };
2386
2387 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2388 MODULE_ALIAS("tcm_vhost");
2389 MODULE_LICENSE("GPL");
2390 module_init(tcm_vhost_init);
2391 module_exit(tcm_vhost_exit);