Merge branch 'stable/drivers' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen
[pandora-kernel.git] / drivers / target / tcm_fc / tfc_cmd.c
1 /*
2  * Copyright (c) 2010 Cisco Systems, Inc.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc.,
15  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16  */
17
18 /* XXX TBD some includes may be extraneous */
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/version.h>
23 #include <generated/utsrelease.h>
24 #include <linux/utsname.h>
25 #include <linux/init.h>
26 #include <linux/slab.h>
27 #include <linux/kthread.h>
28 #include <linux/types.h>
29 #include <linux/string.h>
30 #include <linux/configfs.h>
31 #include <linux/ctype.h>
32 #include <linux/hash.h>
33 #include <asm/unaligned.h>
34 #include <scsi/scsi.h>
35 #include <scsi/scsi_host.h>
36 #include <scsi/scsi_device.h>
37 #include <scsi/scsi_cmnd.h>
38 #include <scsi/scsi_tcq.h>
39 #include <scsi/libfc.h>
40 #include <scsi/fc_encode.h>
41
42 #include <target/target_core_base.h>
43 #include <target/target_core_transport.h>
44 #include <target/target_core_fabric_ops.h>
45 #include <target/target_core_device.h>
46 #include <target/target_core_tpg.h>
47 #include <target/target_core_configfs.h>
48 #include <target/target_core_base.h>
49 #include <target/target_core_tmr.h>
50 #include <target/configfs_macros.h>
51
52 #include "tcm_fc.h"
53
54 /*
55  * Dump cmd state for debugging.
56  */
57 void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
58 {
59         struct fc_exch *ep;
60         struct fc_seq *sp;
61         struct se_cmd *se_cmd;
62         struct se_mem *mem;
63         struct se_transport_task *task;
64
65         if (!(ft_debug_logging & FT_DEBUG_IO))
66                 return;
67
68         se_cmd = &cmd->se_cmd;
69         printk(KERN_INFO "%s: cmd %p state %d sess %p seq %p se_cmd %p\n",
70                 caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd);
71         printk(KERN_INFO "%s: cmd %p cdb %p\n",
72                 caller, cmd, cmd->cdb);
73         printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
74
75         task = T_TASK(se_cmd);
76         printk(KERN_INFO "%s: cmd %p task %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n",
77                caller, cmd, task, task->t_tasks_se_num,
78                task->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags);
79         if (task->t_mem_list)
80                 list_for_each_entry(mem, task->t_mem_list, se_list)
81                         printk(KERN_INFO "%s: cmd %p mem %p page %p "
82                                "len 0x%x off 0x%x\n",
83                                caller, cmd, mem,
84                                mem->se_page, mem->se_len, mem->se_off);
85         sp = cmd->seq;
86         if (sp) {
87                 ep = fc_seq_exch(sp);
88                 printk(KERN_INFO "%s: cmd %p sid %x did %x "
89                         "ox_id %x rx_id %x seq_id %x e_stat %x\n",
90                         caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid,
91                         sp->id, ep->esb_stat);
92         }
93         print_hex_dump(KERN_INFO, "ft_dump_cmd ", DUMP_PREFIX_NONE,
94                 16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0);
95 }
96
97 static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd)
98 {
99         struct se_queue_obj *qobj;
100         unsigned long flags;
101
102         qobj = &sess->tport->tpg->qobj;
103         spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
104         list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list);
105         spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
106         atomic_inc(&qobj->queue_cnt);
107         wake_up_interruptible(&qobj->thread_wq);
108 }
109
110 static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj)
111 {
112         unsigned long flags;
113         struct se_queue_req *qr;
114
115         spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
116         if (list_empty(&qobj->qobj_list)) {
117                 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
118                 return NULL;
119         }
120         qr = list_first_entry(&qobj->qobj_list, struct se_queue_req, qr_list);
121         list_del(&qr->qr_list);
122         atomic_dec(&qobj->queue_cnt);
123         spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
124         return container_of(qr, struct ft_cmd, se_req);
125 }
126
127 static void ft_free_cmd(struct ft_cmd *cmd)
128 {
129         struct fc_frame *fp;
130         struct fc_lport *lport;
131
132         if (!cmd)
133                 return;
134         fp = cmd->req_frame;
135         lport = fr_dev(fp);
136         if (fr_seq(fp))
137                 lport->tt.seq_release(fr_seq(fp));
138         fc_frame_free(fp);
139         ft_sess_put(cmd->sess); /* undo get from lookup at recv */
140         kfree(cmd);
141 }
142
143 void ft_release_cmd(struct se_cmd *se_cmd)
144 {
145         struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
146
147         ft_free_cmd(cmd);
148 }
149
150 void ft_check_stop_free(struct se_cmd *se_cmd)
151 {
152         transport_generic_free_cmd(se_cmd, 0, 1, 0);
153 }
154
155 /*
156  * Send response.
157  */
158 int ft_queue_status(struct se_cmd *se_cmd)
159 {
160         struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
161         struct fc_frame *fp;
162         struct fcp_resp_with_ext *fcp;
163         struct fc_lport *lport;
164         struct fc_exch *ep;
165         size_t len;
166
167         ft_dump_cmd(cmd, __func__);
168         ep = fc_seq_exch(cmd->seq);
169         lport = ep->lp;
170         len = sizeof(*fcp) + se_cmd->scsi_sense_length;
171         fp = fc_frame_alloc(lport, len);
172         if (!fp) {
173                 /* XXX shouldn't just drop it - requeue and retry? */
174                 return 0;
175         }
176         fcp = fc_frame_payload_get(fp, len);
177         memset(fcp, 0, len);
178         fcp->resp.fr_status = se_cmd->scsi_status;
179
180         len = se_cmd->scsi_sense_length;
181         if (len) {
182                 fcp->resp.fr_flags |= FCP_SNS_LEN_VAL;
183                 fcp->ext.fr_sns_len = htonl(len);
184                 memcpy((fcp + 1), se_cmd->sense_buffer, len);
185         }
186
187         /*
188          * Test underflow and overflow with one mask.  Usually both are off.
189          * Bidirectional commands are not handled yet.
190          */
191         if (se_cmd->se_cmd_flags & (SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT)) {
192                 if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
193                         fcp->resp.fr_flags |= FCP_RESID_OVER;
194                 else
195                         fcp->resp.fr_flags |= FCP_RESID_UNDER;
196                 fcp->ext.fr_resid = cpu_to_be32(se_cmd->residual_count);
197         }
198
199         /*
200          * Send response.
201          */
202         cmd->seq = lport->tt.seq_start_next(cmd->seq);
203         fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
204                        FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);
205
206         lport->tt.seq_send(lport, cmd->seq, fp);
207         lport->tt.exch_done(cmd->seq);
208         return 0;
209 }
210
211 int ft_write_pending_status(struct se_cmd *se_cmd)
212 {
213         struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
214
215         return cmd->write_data_len != se_cmd->data_length;
216 }
217
218 /*
219  * Send TX_RDY (transfer ready).
220  */
221 int ft_write_pending(struct se_cmd *se_cmd)
222 {
223         struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
224         struct fc_frame *fp;
225         struct fcp_txrdy *txrdy;
226         struct fc_lport *lport;
227         struct fc_exch *ep;
228         struct fc_frame_header *fh;
229         u32 f_ctl;
230
231         ft_dump_cmd(cmd, __func__);
232
233         ep = fc_seq_exch(cmd->seq);
234         lport = ep->lp;
235         fp = fc_frame_alloc(lport, sizeof(*txrdy));
236         if (!fp)
237                 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
238
239         txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
240         memset(txrdy, 0, sizeof(*txrdy));
241         txrdy->ft_burst_len = htonl(se_cmd->data_length);
242
243         cmd->seq = lport->tt.seq_start_next(cmd->seq);
244         fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP,
245                        FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
246
247         fh = fc_frame_header_get(fp);
248         f_ctl = ntoh24(fh->fh_f_ctl);
249
250         /* Only if it is 'Exchange Responder' */
251         if (f_ctl & FC_FC_EX_CTX) {
252                 /* Target is 'exchange responder' and sending XFER_READY
253                  * to 'exchange initiator (initiator)'
254                  */
255                 if ((ep->xid <= lport->lro_xid) &&
256                     (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) {
257                         if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
258                                 /*
259                                  * Map se_mem list to scatterlist, so that
260                                  * DDP can be setup. DDP setup function require
261                                  * scatterlist. se_mem_list is internal to
262                                  * TCM/LIO target
263                                  */
264                                 transport_do_task_sg_chain(se_cmd);
265                                 cmd->sg = T_TASK(se_cmd)->t_tasks_sg_chained;
266                                 cmd->sg_cnt =
267                                         T_TASK(se_cmd)->t_tasks_sg_chained_no;
268                         }
269                         if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid,
270                                                     cmd->sg, cmd->sg_cnt))
271                                 cmd->was_ddp_setup = 1;
272                 }
273         }
274         lport->tt.seq_send(lport, cmd->seq, fp);
275         return 0;
276 }
277
278 u32 ft_get_task_tag(struct se_cmd *se_cmd)
279 {
280         struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
281
282         return fc_seq_exch(cmd->seq)->rxid;
283 }
284
285 int ft_get_cmd_state(struct se_cmd *se_cmd)
286 {
287         struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
288
289         return cmd->state;
290 }
291
292 int ft_is_state_remove(struct se_cmd *se_cmd)
293 {
294         return 0;       /* XXX TBD */
295 }
296
297 void ft_new_cmd_failure(struct se_cmd *se_cmd)
298 {
299         /* XXX TBD */
300         printk(KERN_INFO "%s: se_cmd %p\n", __func__, se_cmd);
301 }
302
303 /*
304  * FC sequence response handler for follow-on sequences (data) and aborts.
305  */
306 static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
307 {
308         struct ft_cmd *cmd = arg;
309         struct fc_frame_header *fh;
310
311         if (IS_ERR(fp)) {
312                 /* XXX need to find cmd if queued */
313                 cmd->se_cmd.t_state = TRANSPORT_REMOVE;
314                 cmd->seq = NULL;
315                 transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0);
316                 return;
317         }
318
319         fh = fc_frame_header_get(fp);
320
321         switch (fh->fh_r_ctl) {
322         case FC_RCTL_DD_SOL_DATA:       /* write data */
323                 ft_recv_write_data(cmd, fp);
324                 break;
325         case FC_RCTL_DD_UNSOL_CTL:      /* command */
326         case FC_RCTL_DD_SOL_CTL:        /* transfer ready */
327         case FC_RCTL_DD_DATA_DESC:      /* transfer ready */
328         default:
329                 printk(KERN_INFO "%s: unhandled frame r_ctl %x\n",
330                        __func__, fh->fh_r_ctl);
331                 fc_frame_free(fp);
332                 transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0);
333                 break;
334         }
335 }
336
337 /*
338  * Send a FCP response including SCSI status and optional FCP rsp_code.
339  * status is SAM_STAT_GOOD (zero) iff code is valid.
340  * This is used in error cases, such as allocation failures.
341  */
342 static void ft_send_resp_status(struct fc_lport *lport,
343                                 const struct fc_frame *rx_fp,
344                                 u32 status, enum fcp_resp_rsp_codes code)
345 {
346         struct fc_frame *fp;
347         struct fc_seq *sp;
348         const struct fc_frame_header *fh;
349         size_t len;
350         struct fcp_resp_with_ext *fcp;
351         struct fcp_resp_rsp_info *info;
352
353         fh = fc_frame_header_get(rx_fp);
354         FT_IO_DBG("FCP error response: did %x oxid %x status %x code %x\n",
355                   ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code);
356         len = sizeof(*fcp);
357         if (status == SAM_STAT_GOOD)
358                 len += sizeof(*info);
359         fp = fc_frame_alloc(lport, len);
360         if (!fp)
361                 return;
362         fcp = fc_frame_payload_get(fp, len);
363         memset(fcp, 0, len);
364         fcp->resp.fr_status = status;
365         if (status == SAM_STAT_GOOD) {
366                 fcp->ext.fr_rsp_len = htonl(sizeof(*info));
367                 fcp->resp.fr_flags |= FCP_RSP_LEN_VAL;
368                 info = (struct fcp_resp_rsp_info *)(fcp + 1);
369                 info->rsp_code = code;
370         }
371
372         fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0);
373         sp = fr_seq(fp);
374         if (sp)
375                 lport->tt.seq_send(lport, sp, fp);
376         else
377                 lport->tt.frame_send(lport, fp);
378 }
379
380 /*
381  * Send error or task management response.
382  * Always frees the cmd and associated state.
383  */
384 static void ft_send_resp_code(struct ft_cmd *cmd, enum fcp_resp_rsp_codes code)
385 {
386         ft_send_resp_status(cmd->sess->tport->lport,
387                             cmd->req_frame, SAM_STAT_GOOD, code);
388         ft_free_cmd(cmd);
389 }
390
391 /*
392  * Handle Task Management Request.
393  */
394 static void ft_send_tm(struct ft_cmd *cmd)
395 {
396         struct se_tmr_req *tmr;
397         struct fcp_cmnd *fcp;
398         struct ft_sess *sess;
399         u8 tm_func;
400
401         fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
402
403         switch (fcp->fc_tm_flags) {
404         case FCP_TMF_LUN_RESET:
405                 tm_func = TMR_LUN_RESET;
406                 break;
407         case FCP_TMF_TGT_RESET:
408                 tm_func = TMR_TARGET_WARM_RESET;
409                 break;
410         case FCP_TMF_CLR_TASK_SET:
411                 tm_func = TMR_CLEAR_TASK_SET;
412                 break;
413         case FCP_TMF_ABT_TASK_SET:
414                 tm_func = TMR_ABORT_TASK_SET;
415                 break;
416         case FCP_TMF_CLR_ACA:
417                 tm_func = TMR_CLEAR_ACA;
418                 break;
419         default:
420                 /*
421                  * FCP4r01 indicates having a combination of
422                  * tm_flags set is invalid.
423                  */
424                 FT_TM_DBG("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
425                 ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID);
426                 return;
427         }
428
429         FT_TM_DBG("alloc tm cmd fn %d\n", tm_func);
430         tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func);
431         if (!tmr) {
432                 FT_TM_DBG("alloc failed\n");
433                 ft_send_resp_code(cmd, FCP_TMF_FAILED);
434                 return;
435         }
436         cmd->se_cmd.se_tmr_req = tmr;
437
438         switch (fcp->fc_tm_flags) {
439         case FCP_TMF_LUN_RESET:
440                 cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
441                 if (transport_get_lun_for_tmr(&cmd->se_cmd, cmd->lun) < 0) {
442                         /*
443                          * Make sure to clean up newly allocated TMR request
444                          * since "unable to  handle TMR request because failed
445                          * to get to LUN"
446                          */
447                         FT_TM_DBG("Failed to get LUN for TMR func %d, "
448                                   "se_cmd %p, unpacked_lun %d\n",
449                                   tm_func, &cmd->se_cmd, cmd->lun);
450                         ft_dump_cmd(cmd, __func__);
451                         sess = cmd->sess;
452                         transport_send_check_condition_and_sense(&cmd->se_cmd,
453                                 cmd->se_cmd.scsi_sense_reason, 0);
454                         transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0);
455                         ft_sess_put(sess);
456                         return;
457                 }
458                 break;
459         case FCP_TMF_TGT_RESET:
460         case FCP_TMF_CLR_TASK_SET:
461         case FCP_TMF_ABT_TASK_SET:
462         case FCP_TMF_CLR_ACA:
463                 break;
464         default:
465                 return;
466         }
467         transport_generic_handle_tmr(&cmd->se_cmd);
468 }
469
470 /*
471  * Send status from completed task management request.
472  */
473 int ft_queue_tm_resp(struct se_cmd *se_cmd)
474 {
475         struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
476         struct se_tmr_req *tmr = se_cmd->se_tmr_req;
477         enum fcp_resp_rsp_codes code;
478
479         switch (tmr->response) {
480         case TMR_FUNCTION_COMPLETE:
481                 code = FCP_TMF_CMPL;
482                 break;
483         case TMR_LUN_DOES_NOT_EXIST:
484                 code = FCP_TMF_INVALID_LUN;
485                 break;
486         case TMR_FUNCTION_REJECTED:
487                 code = FCP_TMF_REJECTED;
488                 break;
489         case TMR_TASK_DOES_NOT_EXIST:
490         case TMR_TASK_STILL_ALLEGIANT:
491         case TMR_TASK_FAILOVER_NOT_SUPPORTED:
492         case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
493         case TMR_FUNCTION_AUTHORIZATION_FAILED:
494         default:
495                 code = FCP_TMF_FAILED;
496                 break;
497         }
498         FT_TM_DBG("tmr fn %d resp %d fcp code %d\n",
499                   tmr->function, tmr->response, code);
500         ft_send_resp_code(cmd, code);
501         return 0;
502 }
503
504 /*
505  * Handle incoming FCP command.
506  */
507 static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
508 {
509         struct ft_cmd *cmd;
510         struct fc_lport *lport = sess->tport->lport;
511
512         cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
513         if (!cmd)
514                 goto busy;
515         cmd->sess = sess;
516         cmd->seq = lport->tt.seq_assign(lport, fp);
517         if (!cmd->seq) {
518                 kfree(cmd);
519                 goto busy;
520         }
521         cmd->req_frame = fp;            /* hold frame during cmd */
522         ft_queue_cmd(sess, cmd);
523         return;
524
525 busy:
526         FT_IO_DBG("cmd or seq allocation failure - sending BUSY\n");
527         ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0);
528         fc_frame_free(fp);
529         ft_sess_put(sess);              /* undo get from lookup */
530 }
531
532
533 /*
534  * Handle incoming FCP frame.
535  * Caller has verified that the frame is type FCP.
536  */
537 void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)
538 {
539         struct fc_frame_header *fh = fc_frame_header_get(fp);
540
541         switch (fh->fh_r_ctl) {
542         case FC_RCTL_DD_UNSOL_CMD:      /* command */
543                 ft_recv_cmd(sess, fp);
544                 break;
545         case FC_RCTL_DD_SOL_DATA:       /* write data */
546         case FC_RCTL_DD_UNSOL_CTL:
547         case FC_RCTL_DD_SOL_CTL:
548         case FC_RCTL_DD_DATA_DESC:      /* transfer ready */
549         case FC_RCTL_ELS4_REQ:          /* SRR, perhaps */
550         default:
551                 printk(KERN_INFO "%s: unhandled frame r_ctl %x\n",
552                        __func__, fh->fh_r_ctl);
553                 fc_frame_free(fp);
554                 ft_sess_put(sess);      /* undo get from lookup */
555                 break;
556         }
557 }
558
559 /*
560  * Send new command to target.
561  */
562 static void ft_send_cmd(struct ft_cmd *cmd)
563 {
564         struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
565         struct se_cmd *se_cmd;
566         struct fcp_cmnd *fcp;
567         int data_dir;
568         u32 data_len;
569         int task_attr;
570         int ret;
571
572         fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
573         if (!fcp)
574                 goto err;
575
576         if (fcp->fc_flags & FCP_CFL_LEN_MASK)
577                 goto err;               /* not handling longer CDBs yet */
578
579         if (fcp->fc_tm_flags) {
580                 task_attr = FCP_PTA_SIMPLE;
581                 data_dir = DMA_NONE;
582                 data_len = 0;
583         } else {
584                 switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) {
585                 case 0:
586                         data_dir = DMA_NONE;
587                         break;
588                 case FCP_CFL_RDDATA:
589                         data_dir = DMA_FROM_DEVICE;
590                         break;
591                 case FCP_CFL_WRDATA:
592                         data_dir = DMA_TO_DEVICE;
593                         break;
594                 case FCP_CFL_WRDATA | FCP_CFL_RDDATA:
595                         goto err;       /* TBD not supported by tcm_fc yet */
596                 }
597                 /*
598                  * Locate the SAM Task Attr from fc_pri_ta
599                  */
600                 switch (fcp->fc_pri_ta & FCP_PTA_MASK) {
601                 case FCP_PTA_HEADQ:
602                         task_attr = MSG_HEAD_TAG;
603                         break;
604                 case FCP_PTA_ORDERED:
605                         task_attr = MSG_ORDERED_TAG;
606                         break;
607                 case FCP_PTA_ACA:
608                         task_attr = MSG_ACA_TAG;
609                         break;
610                 case FCP_PTA_SIMPLE: /* Fallthrough */
611                 default:
612                         task_attr = MSG_SIMPLE_TAG;
613                 }
614
615
616                 task_attr = fcp->fc_pri_ta & FCP_PTA_MASK;
617                 data_len = ntohl(fcp->fc_dl);
618                 cmd->cdb = fcp->fc_cdb;
619         }
620
621         se_cmd = &cmd->se_cmd;
622         /*
623          * Initialize struct se_cmd descriptor from target_core_mod
624          * infrastructure
625          */
626         transport_init_se_cmd(se_cmd, &ft_configfs->tf_ops, cmd->sess->se_sess,
627                               data_len, data_dir, task_attr,
628                               &cmd->ft_sense_buffer[0]);
629         /*
630          * Check for FCP task management flags
631          */
632         if (fcp->fc_tm_flags) {
633                 ft_send_tm(cmd);
634                 return;
635         }
636
637         fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
638
639         cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
640         ret = transport_get_lun_for_cmd(&cmd->se_cmd, NULL, cmd->lun);
641         if (ret < 0) {
642                 ft_dump_cmd(cmd, __func__);
643                 transport_send_check_condition_and_sense(&cmd->se_cmd,
644                         cmd->se_cmd.scsi_sense_reason, 0);
645                 return;
646         }
647
648         ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb);
649
650         FT_IO_DBG("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret);
651         ft_dump_cmd(cmd, __func__);
652
653         if (ret == -1) {
654                 transport_send_check_condition_and_sense(se_cmd,
655                                 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
656                 transport_generic_free_cmd(se_cmd, 0, 1, 0);
657                 return;
658         }
659         if (ret == -2) {
660                 if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
661                         ft_queue_status(se_cmd);
662                 else
663                         transport_send_check_condition_and_sense(se_cmd,
664                                         se_cmd->scsi_sense_reason, 0);
665                 transport_generic_free_cmd(se_cmd, 0, 1, 0);
666                 return;
667         }
668         transport_generic_handle_cdb(se_cmd);
669         return;
670
671 err:
672         ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID);
673         return;
674 }
675
676 /*
677  * Handle request in the command thread.
678  */
679 static void ft_exec_req(struct ft_cmd *cmd)
680 {
681         FT_IO_DBG("cmd state %x\n", cmd->state);
682         switch (cmd->state) {
683         case FC_CMD_ST_NEW:
684                 ft_send_cmd(cmd);
685                 break;
686         default:
687                 break;
688         }
689 }
690
691 /*
692  * Processing thread.
693  * Currently one thread per tpg.
694  */
695 int ft_thread(void *arg)
696 {
697         struct ft_tpg *tpg = arg;
698         struct se_queue_obj *qobj = &tpg->qobj;
699         struct ft_cmd *cmd;
700         int ret;
701
702         set_user_nice(current, -20);
703
704         while (!kthread_should_stop()) {
705                 ret = wait_event_interruptible(qobj->thread_wq,
706                         atomic_read(&qobj->queue_cnt) || kthread_should_stop());
707                 if (ret < 0 || kthread_should_stop())
708                         goto out;
709                 cmd = ft_dequeue_cmd(qobj);
710                 if (cmd)
711                         ft_exec_req(cmd);
712         }
713
714 out:
715         return 0;
716 }