Merge branches 'upstream-fixes' and 'magicmouse' into for-linus
[pandora-kernel.git] / drivers / target / target_core_transport.c
1 /*******************************************************************************
2  * Filename:  target_core_transport.c
3  *
4  * This file contains the Generic Target Engine Core.
5  *
6  * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7  * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8  * Copyright (c) 2007-2010 Rising Tide Systems
9  * Copyright (c) 2008-2010 Linux-iSCSI.org
10  *
11  * Nicholas A. Bellinger <nab@kernel.org>
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26  *
27  ******************************************************************************/
28
29 #include <linux/version.h>
30 #include <linux/net.h>
31 #include <linux/delay.h>
32 #include <linux/string.h>
33 #include <linux/timer.h>
34 #include <linux/slab.h>
35 #include <linux/blkdev.h>
36 #include <linux/spinlock.h>
37 #include <linux/kthread.h>
38 #include <linux/in.h>
39 #include <linux/cdrom.h>
40 #include <asm/unaligned.h>
41 #include <net/sock.h>
42 #include <net/tcp.h>
43 #include <scsi/scsi.h>
44 #include <scsi/scsi_cmnd.h>
45 #include <scsi/scsi_tcq.h>
46
47 #include <target/target_core_base.h>
48 #include <target/target_core_device.h>
49 #include <target/target_core_tmr.h>
50 #include <target/target_core_tpg.h>
51 #include <target/target_core_transport.h>
52 #include <target/target_core_fabric_ops.h>
53 #include <target/target_core_configfs.h>
54
55 #include "target_core_alua.h"
56 #include "target_core_hba.h"
57 #include "target_core_pr.h"
58 #include "target_core_scdb.h"
59 #include "target_core_ua.h"
60
61 static int sub_api_initialized;
62
63 static struct kmem_cache *se_cmd_cache;
64 static struct kmem_cache *se_sess_cache;
65 struct kmem_cache *se_tmr_req_cache;
66 struct kmem_cache *se_ua_cache;
67 struct kmem_cache *t10_pr_reg_cache;
68 struct kmem_cache *t10_alua_lu_gp_cache;
69 struct kmem_cache *t10_alua_lu_gp_mem_cache;
70 struct kmem_cache *t10_alua_tg_pt_gp_cache;
71 struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
72
73 /* Used for transport_dev_get_map_*() */
74 typedef int (*map_func_t)(struct se_task *, u32);
75
76 static int transport_generic_write_pending(struct se_cmd *);
77 static int transport_processing_thread(void *param);
78 static int __transport_execute_tasks(struct se_device *dev);
79 static void transport_complete_task_attr(struct se_cmd *cmd);
80 static int transport_complete_qf(struct se_cmd *cmd);
81 static void transport_handle_queue_full(struct se_cmd *cmd,
82                 struct se_device *dev, int (*qf_callback)(struct se_cmd *));
83 static void transport_direct_request_timeout(struct se_cmd *cmd);
84 static void transport_free_dev_tasks(struct se_cmd *cmd);
85 static u32 transport_allocate_tasks(struct se_cmd *cmd,
86                 unsigned long long starting_lba,
87                 enum dma_data_direction data_direction,
88                 struct scatterlist *sgl, unsigned int nents);
89 static int transport_generic_get_mem(struct se_cmd *cmd);
90 static int transport_generic_remove(struct se_cmd *cmd,
91                 int session_reinstatement);
92 static void transport_release_fe_cmd(struct se_cmd *cmd);
93 static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
94                 struct se_queue_obj *qobj);
95 static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
96 static void transport_stop_all_task_timers(struct se_cmd *cmd);
97
98 int init_se_kmem_caches(void)
99 {
100         se_cmd_cache = kmem_cache_create("se_cmd_cache",
101                         sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
102         if (!se_cmd_cache) {
103                 pr_err("kmem_cache_create for struct se_cmd failed\n");
104                 goto out;
105         }
106         se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
107                         sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
108                         0, NULL);
109         if (!se_tmr_req_cache) {
110                 pr_err("kmem_cache_create() for struct se_tmr_req"
111                                 " failed\n");
112                 goto out;
113         }
114         se_sess_cache = kmem_cache_create("se_sess_cache",
115                         sizeof(struct se_session), __alignof__(struct se_session),
116                         0, NULL);
117         if (!se_sess_cache) {
118                 pr_err("kmem_cache_create() for struct se_session"
119                                 " failed\n");
120                 goto out;
121         }
122         se_ua_cache = kmem_cache_create("se_ua_cache",
123                         sizeof(struct se_ua), __alignof__(struct se_ua),
124                         0, NULL);
125         if (!se_ua_cache) {
126                 pr_err("kmem_cache_create() for struct se_ua failed\n");
127                 goto out;
128         }
129         t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
130                         sizeof(struct t10_pr_registration),
131                         __alignof__(struct t10_pr_registration), 0, NULL);
132         if (!t10_pr_reg_cache) {
133                 pr_err("kmem_cache_create() for struct t10_pr_registration"
134                                 " failed\n");
135                 goto out;
136         }
137         t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
138                         sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
139                         0, NULL);
140         if (!t10_alua_lu_gp_cache) {
141                 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
142                                 " failed\n");
143                 goto out;
144         }
145         t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
146                         sizeof(struct t10_alua_lu_gp_member),
147                         __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
148         if (!t10_alua_lu_gp_mem_cache) {
149                 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
150                                 "cache failed\n");
151                 goto out;
152         }
153         t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
154                         sizeof(struct t10_alua_tg_pt_gp),
155                         __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
156         if (!t10_alua_tg_pt_gp_cache) {
157                 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
158                                 "cache failed\n");
159                 goto out;
160         }
161         t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
162                         "t10_alua_tg_pt_gp_mem_cache",
163                         sizeof(struct t10_alua_tg_pt_gp_member),
164                         __alignof__(struct t10_alua_tg_pt_gp_member),
165                         0, NULL);
166         if (!t10_alua_tg_pt_gp_mem_cache) {
167                 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
168                                 "mem_t failed\n");
169                 goto out;
170         }
171
172         return 0;
173 out:
174         if (se_cmd_cache)
175                 kmem_cache_destroy(se_cmd_cache);
176         if (se_tmr_req_cache)
177                 kmem_cache_destroy(se_tmr_req_cache);
178         if (se_sess_cache)
179                 kmem_cache_destroy(se_sess_cache);
180         if (se_ua_cache)
181                 kmem_cache_destroy(se_ua_cache);
182         if (t10_pr_reg_cache)
183                 kmem_cache_destroy(t10_pr_reg_cache);
184         if (t10_alua_lu_gp_cache)
185                 kmem_cache_destroy(t10_alua_lu_gp_cache);
186         if (t10_alua_lu_gp_mem_cache)
187                 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
188         if (t10_alua_tg_pt_gp_cache)
189                 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
190         if (t10_alua_tg_pt_gp_mem_cache)
191                 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
192         return -ENOMEM;
193 }
194
195 void release_se_kmem_caches(void)
196 {
197         kmem_cache_destroy(se_cmd_cache);
198         kmem_cache_destroy(se_tmr_req_cache);
199         kmem_cache_destroy(se_sess_cache);
200         kmem_cache_destroy(se_ua_cache);
201         kmem_cache_destroy(t10_pr_reg_cache);
202         kmem_cache_destroy(t10_alua_lu_gp_cache);
203         kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
204         kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
205         kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
206 }
207
208 /* This code ensures unique mib indexes are handed out. */
209 static DEFINE_SPINLOCK(scsi_mib_index_lock);
210 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
211
212 /*
213  * Allocate a new row index for the entry type specified
214  */
215 u32 scsi_get_new_index(scsi_index_t type)
216 {
217         u32 new_index;
218
219         BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
220
221         spin_lock(&scsi_mib_index_lock);
222         new_index = ++scsi_mib_index[type];
223         spin_unlock(&scsi_mib_index_lock);
224
225         return new_index;
226 }
227
228 void transport_init_queue_obj(struct se_queue_obj *qobj)
229 {
230         atomic_set(&qobj->queue_cnt, 0);
231         INIT_LIST_HEAD(&qobj->qobj_list);
232         init_waitqueue_head(&qobj->thread_wq);
233         spin_lock_init(&qobj->cmd_queue_lock);
234 }
235 EXPORT_SYMBOL(transport_init_queue_obj);
236
237 static int transport_subsystem_reqmods(void)
238 {
239         int ret;
240
241         ret = request_module("target_core_iblock");
242         if (ret != 0)
243                 pr_err("Unable to load target_core_iblock\n");
244
245         ret = request_module("target_core_file");
246         if (ret != 0)
247                 pr_err("Unable to load target_core_file\n");
248
249         ret = request_module("target_core_pscsi");
250         if (ret != 0)
251                 pr_err("Unable to load target_core_pscsi\n");
252
253         ret = request_module("target_core_stgt");
254         if (ret != 0)
255                 pr_err("Unable to load target_core_stgt\n");
256
257         return 0;
258 }
259
260 int transport_subsystem_check_init(void)
261 {
262         int ret;
263
264         if (sub_api_initialized)
265                 return 0;
266         /*
267          * Request the loading of known TCM subsystem plugins..
268          */
269         ret = transport_subsystem_reqmods();
270         if (ret < 0)
271                 return ret;
272
273         sub_api_initialized = 1;
274         return 0;
275 }
276
277 struct se_session *transport_init_session(void)
278 {
279         struct se_session *se_sess;
280
281         se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
282         if (!se_sess) {
283                 pr_err("Unable to allocate struct se_session from"
284                                 " se_sess_cache\n");
285                 return ERR_PTR(-ENOMEM);
286         }
287         INIT_LIST_HEAD(&se_sess->sess_list);
288         INIT_LIST_HEAD(&se_sess->sess_acl_list);
289
290         return se_sess;
291 }
292 EXPORT_SYMBOL(transport_init_session);
293
294 /*
295  * Called with spin_lock_bh(&struct se_portal_group->session_lock called.
296  */
297 void __transport_register_session(
298         struct se_portal_group *se_tpg,
299         struct se_node_acl *se_nacl,
300         struct se_session *se_sess,
301         void *fabric_sess_ptr)
302 {
303         unsigned char buf[PR_REG_ISID_LEN];
304
305         se_sess->se_tpg = se_tpg;
306         se_sess->fabric_sess_ptr = fabric_sess_ptr;
307         /*
308          * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
309          *
310          * Only set for struct se_session's that will actually be moving I/O.
311          * eg: *NOT* discovery sessions.
312          */
313         if (se_nacl) {
314                 /*
315                  * If the fabric module supports an ISID based TransportID,
316                  * save this value in binary from the fabric I_T Nexus now.
317                  */
318                 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
319                         memset(&buf[0], 0, PR_REG_ISID_LEN);
320                         se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
321                                         &buf[0], PR_REG_ISID_LEN);
322                         se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
323                 }
324                 spin_lock_irq(&se_nacl->nacl_sess_lock);
325                 /*
326                  * The se_nacl->nacl_sess pointer will be set to the
327                  * last active I_T Nexus for each struct se_node_acl.
328                  */
329                 se_nacl->nacl_sess = se_sess;
330
331                 list_add_tail(&se_sess->sess_acl_list,
332                               &se_nacl->acl_sess_list);
333                 spin_unlock_irq(&se_nacl->nacl_sess_lock);
334         }
335         list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
336
337         pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
338                 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
339 }
340 EXPORT_SYMBOL(__transport_register_session);
341
342 void transport_register_session(
343         struct se_portal_group *se_tpg,
344         struct se_node_acl *se_nacl,
345         struct se_session *se_sess,
346         void *fabric_sess_ptr)
347 {
348         spin_lock_bh(&se_tpg->session_lock);
349         __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
350         spin_unlock_bh(&se_tpg->session_lock);
351 }
352 EXPORT_SYMBOL(transport_register_session);
353
354 void transport_deregister_session_configfs(struct se_session *se_sess)
355 {
356         struct se_node_acl *se_nacl;
357         unsigned long flags;
358         /*
359          * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
360          */
361         se_nacl = se_sess->se_node_acl;
362         if (se_nacl) {
363                 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
364                 list_del(&se_sess->sess_acl_list);
365                 /*
366                  * If the session list is empty, then clear the pointer.
367                  * Otherwise, set the struct se_session pointer from the tail
368                  * element of the per struct se_node_acl active session list.
369                  */
370                 if (list_empty(&se_nacl->acl_sess_list))
371                         se_nacl->nacl_sess = NULL;
372                 else {
373                         se_nacl->nacl_sess = container_of(
374                                         se_nacl->acl_sess_list.prev,
375                                         struct se_session, sess_acl_list);
376                 }
377                 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
378         }
379 }
380 EXPORT_SYMBOL(transport_deregister_session_configfs);
381
382 void transport_free_session(struct se_session *se_sess)
383 {
384         kmem_cache_free(se_sess_cache, se_sess);
385 }
386 EXPORT_SYMBOL(transport_free_session);
387
388 void transport_deregister_session(struct se_session *se_sess)
389 {
390         struct se_portal_group *se_tpg = se_sess->se_tpg;
391         struct se_node_acl *se_nacl;
392         unsigned long flags;
393
394         if (!se_tpg) {
395                 transport_free_session(se_sess);
396                 return;
397         }
398
399         spin_lock_irqsave(&se_tpg->session_lock, flags);
400         list_del(&se_sess->sess_list);
401         se_sess->se_tpg = NULL;
402         se_sess->fabric_sess_ptr = NULL;
403         spin_unlock_irqrestore(&se_tpg->session_lock, flags);
404
405         /*
406          * Determine if we need to do extra work for this initiator node's
407          * struct se_node_acl if it had been previously dynamically generated.
408          */
409         se_nacl = se_sess->se_node_acl;
410         if (se_nacl) {
411                 spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
412                 if (se_nacl->dynamic_node_acl) {
413                         if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
414                                         se_tpg)) {
415                                 list_del(&se_nacl->acl_list);
416                                 se_tpg->num_node_acls--;
417                                 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
418
419                                 core_tpg_wait_for_nacl_pr_ref(se_nacl);
420                                 core_free_device_list_for_node(se_nacl, se_tpg);
421                                 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg,
422                                                 se_nacl);
423                                 spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
424                         }
425                 }
426                 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
427         }
428
429         transport_free_session(se_sess);
430
431         pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
432                 se_tpg->se_tpg_tfo->get_fabric_name());
433 }
434 EXPORT_SYMBOL(transport_deregister_session);
435
436 /*
437  * Called with cmd->t_state_lock held.
438  */
439 static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
440 {
441         struct se_device *dev;
442         struct se_task *task;
443         unsigned long flags;
444
445         list_for_each_entry(task, &cmd->t_task_list, t_list) {
446                 dev = task->se_dev;
447                 if (!dev)
448                         continue;
449
450                 if (atomic_read(&task->task_active))
451                         continue;
452
453                 if (!atomic_read(&task->task_state_active))
454                         continue;
455
456                 spin_lock_irqsave(&dev->execute_task_lock, flags);
457                 list_del(&task->t_state_list);
458                 pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
459                         cmd->se_tfo->get_task_tag(cmd), dev, task);
460                 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
461
462                 atomic_set(&task->task_state_active, 0);
463                 atomic_dec(&cmd->t_task_cdbs_ex_left);
464         }
465 }
466
467 /*      transport_cmd_check_stop():
468  *
469  *      'transport_off = 1' determines if t_transport_active should be cleared.
470  *      'transport_off = 2' determines if task_dev_state should be removed.
471  *
472  *      A non-zero u8 t_state sets cmd->t_state.
473  *      Returns 1 when command is stopped, else 0.
474  */
475 static int transport_cmd_check_stop(
476         struct se_cmd *cmd,
477         int transport_off,
478         u8 t_state)
479 {
480         unsigned long flags;
481
482         spin_lock_irqsave(&cmd->t_state_lock, flags);
483         /*
484          * Determine if IOCTL context caller in requesting the stopping of this
485          * command for LUN shutdown purposes.
486          */
487         if (atomic_read(&cmd->transport_lun_stop)) {
488                 pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)"
489                         " == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
490                         cmd->se_tfo->get_task_tag(cmd));
491
492                 cmd->deferred_t_state = cmd->t_state;
493                 cmd->t_state = TRANSPORT_DEFERRED_CMD;
494                 atomic_set(&cmd->t_transport_active, 0);
495                 if (transport_off == 2)
496                         transport_all_task_dev_remove_state(cmd);
497                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
498
499                 complete(&cmd->transport_lun_stop_comp);
500                 return 1;
501         }
502         /*
503          * Determine if frontend context caller is requesting the stopping of
504          * this command for frontend exceptions.
505          */
506         if (atomic_read(&cmd->t_transport_stop)) {
507                 pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) =="
508                         " TRUE for ITT: 0x%08x\n", __func__, __LINE__,
509                         cmd->se_tfo->get_task_tag(cmd));
510
511                 cmd->deferred_t_state = cmd->t_state;
512                 cmd->t_state = TRANSPORT_DEFERRED_CMD;
513                 if (transport_off == 2)
514                         transport_all_task_dev_remove_state(cmd);
515
516                 /*
517                  * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
518                  * to FE.
519                  */
520                 if (transport_off == 2)
521                         cmd->se_lun = NULL;
522                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
523
524                 complete(&cmd->t_transport_stop_comp);
525                 return 1;
526         }
527         if (transport_off) {
528                 atomic_set(&cmd->t_transport_active, 0);
529                 if (transport_off == 2) {
530                         transport_all_task_dev_remove_state(cmd);
531                         /*
532                          * Clear struct se_cmd->se_lun before the transport_off == 2
533                          * handoff to fabric module.
534                          */
535                         cmd->se_lun = NULL;
536                         /*
537                          * Some fabric modules like tcm_loop can release
538                          * their internally allocated I/O reference now and
539                          * struct se_cmd now.
540                          */
541                         if (cmd->se_tfo->check_stop_free != NULL) {
542                                 spin_unlock_irqrestore(
543                                         &cmd->t_state_lock, flags);
544
545                                 cmd->se_tfo->check_stop_free(cmd);
546                                 return 1;
547                         }
548                 }
549                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
550
551                 return 0;
552         } else if (t_state)
553                 cmd->t_state = t_state;
554         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
555
556         return 0;
557 }
558
559 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
560 {
561         return transport_cmd_check_stop(cmd, 2, 0);
562 }
563
564 static void transport_lun_remove_cmd(struct se_cmd *cmd)
565 {
566         struct se_lun *lun = cmd->se_lun;
567         unsigned long flags;
568
569         if (!lun)
570                 return;
571
572         spin_lock_irqsave(&cmd->t_state_lock, flags);
573         if (!atomic_read(&cmd->transport_dev_active)) {
574                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
575                 goto check_lun;
576         }
577         atomic_set(&cmd->transport_dev_active, 0);
578         transport_all_task_dev_remove_state(cmd);
579         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
580
581
582 check_lun:
583         spin_lock_irqsave(&lun->lun_cmd_lock, flags);
584         if (atomic_read(&cmd->transport_lun_active)) {
585                 list_del(&cmd->se_lun_node);
586                 atomic_set(&cmd->transport_lun_active, 0);
587 #if 0
588                 pr_debug("Removed ITT: 0x%08x from LUN LIST[%d]\n"
589                         cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun);
590 #endif
591         }
592         spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
593 }
594
595 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
596 {
597         transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
598         transport_lun_remove_cmd(cmd);
599
600         if (transport_cmd_check_stop_to_fabric(cmd))
601                 return;
602         if (remove)
603                 transport_generic_remove(cmd, 0);
604 }
605
606 void transport_cmd_finish_abort_tmr(struct se_cmd *cmd)
607 {
608         transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
609
610         if (transport_cmd_check_stop_to_fabric(cmd))
611                 return;
612
613         transport_generic_remove(cmd, 0);
614 }
615
616 static void transport_add_cmd_to_queue(
617         struct se_cmd *cmd,
618         int t_state)
619 {
620         struct se_device *dev = cmd->se_dev;
621         struct se_queue_obj *qobj = &dev->dev_queue_obj;
622         unsigned long flags;
623
624         INIT_LIST_HEAD(&cmd->se_queue_node);
625
626         if (t_state) {
627                 spin_lock_irqsave(&cmd->t_state_lock, flags);
628                 cmd->t_state = t_state;
629                 atomic_set(&cmd->t_transport_active, 1);
630                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
631         }
632
633         spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
634         if (cmd->se_cmd_flags & SCF_EMULATE_QUEUE_FULL) {
635                 cmd->se_cmd_flags &= ~SCF_EMULATE_QUEUE_FULL;
636                 list_add(&cmd->se_queue_node, &qobj->qobj_list);
637         } else
638                 list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
639         atomic_inc(&cmd->t_transport_queue_active);
640         spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
641
642         atomic_inc(&qobj->queue_cnt);
643         wake_up_interruptible(&qobj->thread_wq);
644 }
645
646 static struct se_cmd *
647 transport_get_cmd_from_queue(struct se_queue_obj *qobj)
648 {
649         struct se_cmd *cmd;
650         unsigned long flags;
651
652         spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
653         if (list_empty(&qobj->qobj_list)) {
654                 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
655                 return NULL;
656         }
657         cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
658
659         atomic_dec(&cmd->t_transport_queue_active);
660
661         list_del(&cmd->se_queue_node);
662         atomic_dec(&qobj->queue_cnt);
663         spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
664
665         return cmd;
666 }
667
668 static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
669                 struct se_queue_obj *qobj)
670 {
671         struct se_cmd *t;
672         unsigned long flags;
673
674         spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
675         if (!atomic_read(&cmd->t_transport_queue_active)) {
676                 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
677                 return;
678         }
679
680         list_for_each_entry(t, &qobj->qobj_list, se_queue_node)
681                 if (t == cmd) {
682                         atomic_dec(&cmd->t_transport_queue_active);
683                         atomic_dec(&qobj->queue_cnt);
684                         list_del(&cmd->se_queue_node);
685                         break;
686                 }
687         spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
688
689         if (atomic_read(&cmd->t_transport_queue_active)) {
690                 pr_err("ITT: 0x%08x t_transport_queue_active: %d\n",
691                         cmd->se_tfo->get_task_tag(cmd),
692                         atomic_read(&cmd->t_transport_queue_active));
693         }
694 }
695
696 /*
697  * Completion function used by TCM subsystem plugins (such as FILEIO)
698  * for queueing up response from struct se_subsystem_api->do_task()
699  */
700 void transport_complete_sync_cache(struct se_cmd *cmd, int good)
701 {
702         struct se_task *task = list_entry(cmd->t_task_list.next,
703                                 struct se_task, t_list);
704
705         if (good) {
706                 cmd->scsi_status = SAM_STAT_GOOD;
707                 task->task_scsi_status = GOOD;
708         } else {
709                 task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
710                 task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST;
711                 task->task_se_cmd->transport_error_status =
712                                         PYX_TRANSPORT_ILLEGAL_REQUEST;
713         }
714
715         transport_complete_task(task, good);
716 }
717 EXPORT_SYMBOL(transport_complete_sync_cache);
718
719 /*      transport_complete_task():
720  *
721  *      Called from interrupt and non interrupt context depending
722  *      on the transport plugin.
723  */
724 void transport_complete_task(struct se_task *task, int success)
725 {
726         struct se_cmd *cmd = task->task_se_cmd;
727         struct se_device *dev = task->se_dev;
728         int t_state;
729         unsigned long flags;
730 #if 0
731         pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task,
732                         cmd->t_task_cdb[0], dev);
733 #endif
734         if (dev)
735                 atomic_inc(&dev->depth_left);
736
737         spin_lock_irqsave(&cmd->t_state_lock, flags);
738         atomic_set(&task->task_active, 0);
739
740         /*
741          * See if any sense data exists, if so set the TASK_SENSE flag.
742          * Also check for any other post completion work that needs to be
743          * done by the plugins.
744          */
745         if (dev && dev->transport->transport_complete) {
746                 if (dev->transport->transport_complete(task) != 0) {
747                         cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
748                         task->task_sense = 1;
749                         success = 1;
750                 }
751         }
752
753         /*
754          * See if we are waiting for outstanding struct se_task
755          * to complete for an exception condition
756          */
757         if (atomic_read(&task->task_stop)) {
758                 /*
759                  * Decrement cmd->t_se_count if this task had
760                  * previously thrown its timeout exception handler.
761                  */
762                 if (atomic_read(&task->task_timeout)) {
763                         atomic_dec(&cmd->t_se_count);
764                         atomic_set(&task->task_timeout, 0);
765                 }
766                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
767
768                 complete(&task->task_stop_comp);
769                 return;
770         }
771         /*
772          * If the task's timeout handler has fired, use the t_task_cdbs_timeout
773          * left counter to determine when the struct se_cmd is ready to be queued to
774          * the processing thread.
775          */
776         if (atomic_read(&task->task_timeout)) {
777                 if (!atomic_dec_and_test(
778                                 &cmd->t_task_cdbs_timeout_left)) {
779                         spin_unlock_irqrestore(&cmd->t_state_lock,
780                                 flags);
781                         return;
782                 }
783                 t_state = TRANSPORT_COMPLETE_TIMEOUT;
784                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
785
786                 transport_add_cmd_to_queue(cmd, t_state);
787                 return;
788         }
789         atomic_dec(&cmd->t_task_cdbs_timeout_left);
790
791         /*
792          * Decrement the outstanding t_task_cdbs_left count.  The last
793          * struct se_task from struct se_cmd will complete itself into the
794          * device queue depending upon int success.
795          */
796         if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
797                 if (!success)
798                         cmd->t_tasks_failed = 1;
799
800                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
801                 return;
802         }
803
804         if (!success || cmd->t_tasks_failed) {
805                 t_state = TRANSPORT_COMPLETE_FAILURE;
806                 if (!task->task_error_status) {
807                         task->task_error_status =
808                                 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
809                         cmd->transport_error_status =
810                                 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
811                 }
812         } else {
813                 atomic_set(&cmd->t_transport_complete, 1);
814                 t_state = TRANSPORT_COMPLETE_OK;
815         }
816         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
817
818         transport_add_cmd_to_queue(cmd, t_state);
819 }
820 EXPORT_SYMBOL(transport_complete_task);
821
822 /*
823  * Called by transport_add_tasks_from_cmd() once a struct se_cmd's
824  * struct se_task list are ready to be added to the active execution list
825  * struct se_device
826
827  * Called with se_dev_t->execute_task_lock called.
828  */
829 static inline int transport_add_task_check_sam_attr(
830         struct se_task *task,
831         struct se_task *task_prev,
832         struct se_device *dev)
833 {
834         /*
835          * No SAM Task attribute emulation enabled, add to tail of
836          * execution queue
837          */
838         if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) {
839                 list_add_tail(&task->t_execute_list, &dev->execute_task_list);
840                 return 0;
841         }
842         /*
843          * HEAD_OF_QUEUE attribute for received CDB, which means
844          * the first task that is associated with a struct se_cmd goes to
845          * head of the struct se_device->execute_task_list, and task_prev
846          * after that for each subsequent task
847          */
848         if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
849                 list_add(&task->t_execute_list,
850                                 (task_prev != NULL) ?
851                                 &task_prev->t_execute_list :
852                                 &dev->execute_task_list);
853
854                 pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
855                                 " in execution queue\n",
856                                 task->task_se_cmd->t_task_cdb[0]);
857                 return 1;
858         }
859         /*
860          * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been
861          * transitioned from Dermant -> Active state, and are added to the end
862          * of the struct se_device->execute_task_list
863          */
864         list_add_tail(&task->t_execute_list, &dev->execute_task_list);
865         return 0;
866 }
867
868 /*      __transport_add_task_to_execute_queue():
869  *
870  *      Called with se_dev_t->execute_task_lock called.
871  */
872 static void __transport_add_task_to_execute_queue(
873         struct se_task *task,
874         struct se_task *task_prev,
875         struct se_device *dev)
876 {
877         int head_of_queue;
878
879         head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
880         atomic_inc(&dev->execute_tasks);
881
882         if (atomic_read(&task->task_state_active))
883                 return;
884         /*
885          * Determine if this task needs to go to HEAD_OF_QUEUE for the
886          * state list as well.  Running with SAM Task Attribute emulation
887          * will always return head_of_queue == 0 here
888          */
889         if (head_of_queue)
890                 list_add(&task->t_state_list, (task_prev) ?
891                                 &task_prev->t_state_list :
892                                 &dev->state_task_list);
893         else
894                 list_add_tail(&task->t_state_list, &dev->state_task_list);
895
896         atomic_set(&task->task_state_active, 1);
897
898         pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
899                 task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
900                 task, dev);
901 }
902
903 static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
904 {
905         struct se_device *dev;
906         struct se_task *task;
907         unsigned long flags;
908
909         spin_lock_irqsave(&cmd->t_state_lock, flags);
910         list_for_each_entry(task, &cmd->t_task_list, t_list) {
911                 dev = task->se_dev;
912
913                 if (atomic_read(&task->task_state_active))
914                         continue;
915
916                 spin_lock(&dev->execute_task_lock);
917                 list_add_tail(&task->t_state_list, &dev->state_task_list);
918                 atomic_set(&task->task_state_active, 1);
919
920                 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
921                         task->task_se_cmd->se_tfo->get_task_tag(
922                         task->task_se_cmd), task, dev);
923
924                 spin_unlock(&dev->execute_task_lock);
925         }
926         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
927 }
928
929 static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
930 {
931         struct se_device *dev = cmd->se_dev;
932         struct se_task *task, *task_prev = NULL;
933         unsigned long flags;
934
935         spin_lock_irqsave(&dev->execute_task_lock, flags);
936         list_for_each_entry(task, &cmd->t_task_list, t_list) {
937                 if (atomic_read(&task->task_execute_queue))
938                         continue;
939                 /*
940                  * __transport_add_task_to_execute_queue() handles the
941                  * SAM Task Attribute emulation if enabled
942                  */
943                 __transport_add_task_to_execute_queue(task, task_prev, dev);
944                 atomic_set(&task->task_execute_queue, 1);
945                 task_prev = task;
946         }
947         spin_unlock_irqrestore(&dev->execute_task_lock, flags);
948 }
949
950 /*      transport_remove_task_from_execute_queue():
951  *
952  *
953  */
954 void transport_remove_task_from_execute_queue(
955         struct se_task *task,
956         struct se_device *dev)
957 {
958         unsigned long flags;
959
960         if (atomic_read(&task->task_execute_queue) == 0) {
961                 dump_stack();
962                 return;
963         }
964
965         spin_lock_irqsave(&dev->execute_task_lock, flags);
966         list_del(&task->t_execute_list);
967         atomic_set(&task->task_execute_queue, 0);
968         atomic_dec(&dev->execute_tasks);
969         spin_unlock_irqrestore(&dev->execute_task_lock, flags);
970 }
971
972 /*
973  * Handle QUEUE_FULL / -EAGAIN status
974  */
975
976 static void target_qf_do_work(struct work_struct *work)
977 {
978         struct se_device *dev = container_of(work, struct se_device,
979                                         qf_work_queue);
980         struct se_cmd *cmd, *cmd_tmp;
981
982         spin_lock_irq(&dev->qf_cmd_lock);
983         list_for_each_entry_safe(cmd, cmd_tmp, &dev->qf_cmd_list, se_qf_node) {
984
985                 list_del(&cmd->se_qf_node);
986                 atomic_dec(&dev->dev_qf_count);
987                 smp_mb__after_atomic_dec();
988                 spin_unlock_irq(&dev->qf_cmd_lock);
989
990                 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
991                         " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
992                         (cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" :
993                         (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
994                         : "UNKNOWN");
995                 /*
996                  * The SCF_EMULATE_QUEUE_FULL flag will be cleared once se_cmd
997                  * has been added to head of queue
998                  */
999                 transport_add_cmd_to_queue(cmd, cmd->t_state);
1000
1001                 spin_lock_irq(&dev->qf_cmd_lock);
1002         }
1003         spin_unlock_irq(&dev->qf_cmd_lock);
1004 }
1005
1006 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
1007 {
1008         switch (cmd->data_direction) {
1009         case DMA_NONE:
1010                 return "NONE";
1011         case DMA_FROM_DEVICE:
1012                 return "READ";
1013         case DMA_TO_DEVICE:
1014                 return "WRITE";
1015         case DMA_BIDIRECTIONAL:
1016                 return "BIDI";
1017         default:
1018                 break;
1019         }
1020
1021         return "UNKNOWN";
1022 }
1023
1024 void transport_dump_dev_state(
1025         struct se_device *dev,
1026         char *b,
1027         int *bl)
1028 {
1029         *bl += sprintf(b + *bl, "Status: ");
1030         switch (dev->dev_status) {
1031         case TRANSPORT_DEVICE_ACTIVATED:
1032                 *bl += sprintf(b + *bl, "ACTIVATED");
1033                 break;
1034         case TRANSPORT_DEVICE_DEACTIVATED:
1035                 *bl += sprintf(b + *bl, "DEACTIVATED");
1036                 break;
1037         case TRANSPORT_DEVICE_SHUTDOWN:
1038                 *bl += sprintf(b + *bl, "SHUTDOWN");
1039                 break;
1040         case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
1041         case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
1042                 *bl += sprintf(b + *bl, "OFFLINE");
1043                 break;
1044         default:
1045                 *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
1046                 break;
1047         }
1048
1049         *bl += sprintf(b + *bl, "  Execute/Left/Max Queue Depth: %d/%d/%d",
1050                 atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left),
1051                 dev->queue_depth);
1052         *bl += sprintf(b + *bl, "  SectorSize: %u  MaxSectors: %u\n",
1053                 dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
1054         *bl += sprintf(b + *bl, "        ");
1055 }
1056
1057 /*      transport_release_all_cmds():
1058  *
1059  *
1060  */
1061 static void transport_release_all_cmds(struct se_device *dev)
1062 {
1063         struct se_cmd *cmd, *tcmd;
1064         int bug_out = 0, t_state;
1065         unsigned long flags;
1066
1067         spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags);
1068         list_for_each_entry_safe(cmd, tcmd, &dev->dev_queue_obj.qobj_list,
1069                                 se_queue_node) {
1070                 t_state = cmd->t_state;
1071                 list_del(&cmd->se_queue_node);
1072                 spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock,
1073                                 flags);
1074
1075                 pr_err("Releasing ITT: 0x%08x, i_state: %u,"
1076                         " t_state: %u directly\n",
1077                         cmd->se_tfo->get_task_tag(cmd),
1078                         cmd->se_tfo->get_cmd_state(cmd), t_state);
1079
1080                 transport_release_fe_cmd(cmd);
1081                 bug_out = 1;
1082
1083                 spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags);
1084         }
1085         spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, flags);
1086 #if 0
1087         if (bug_out)
1088                 BUG();
1089 #endif
1090 }
1091
1092 void transport_dump_vpd_proto_id(
1093         struct t10_vpd *vpd,
1094         unsigned char *p_buf,
1095         int p_buf_len)
1096 {
1097         unsigned char buf[VPD_TMP_BUF_SIZE];
1098         int len;
1099
1100         memset(buf, 0, VPD_TMP_BUF_SIZE);
1101         len = sprintf(buf, "T10 VPD Protocol Identifier: ");
1102
1103         switch (vpd->protocol_identifier) {
1104         case 0x00:
1105                 sprintf(buf+len, "Fibre Channel\n");
1106                 break;
1107         case 0x10:
1108                 sprintf(buf+len, "Parallel SCSI\n");
1109                 break;
1110         case 0x20:
1111                 sprintf(buf+len, "SSA\n");
1112                 break;
1113         case 0x30:
1114                 sprintf(buf+len, "IEEE 1394\n");
1115                 break;
1116         case 0x40:
1117                 sprintf(buf+len, "SCSI Remote Direct Memory Access"
1118                                 " Protocol\n");
1119                 break;
1120         case 0x50:
1121                 sprintf(buf+len, "Internet SCSI (iSCSI)\n");
1122                 break;
1123         case 0x60:
1124                 sprintf(buf+len, "SAS Serial SCSI Protocol\n");
1125                 break;
1126         case 0x70:
1127                 sprintf(buf+len, "Automation/Drive Interface Transport"
1128                                 " Protocol\n");
1129                 break;
1130         case 0x80:
1131                 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
1132                 break;
1133         default:
1134                 sprintf(buf+len, "Unknown 0x%02x\n",
1135                                 vpd->protocol_identifier);
1136                 break;
1137         }
1138
1139         if (p_buf)
1140                 strncpy(p_buf, buf, p_buf_len);
1141         else
1142                 pr_debug("%s", buf);
1143 }
1144
1145 void
1146 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
1147 {
1148         /*
1149          * Check if the Protocol Identifier Valid (PIV) bit is set..
1150          *
1151          * from spc3r23.pdf section 7.5.1
1152          */
1153          if (page_83[1] & 0x80) {
1154                 vpd->protocol_identifier = (page_83[0] & 0xf0);
1155                 vpd->protocol_identifier_set = 1;
1156                 transport_dump_vpd_proto_id(vpd, NULL, 0);
1157         }
1158 }
1159 EXPORT_SYMBOL(transport_set_vpd_proto_id);
1160
1161 int transport_dump_vpd_assoc(
1162         struct t10_vpd *vpd,
1163         unsigned char *p_buf,
1164         int p_buf_len)
1165 {
1166         unsigned char buf[VPD_TMP_BUF_SIZE];
1167         int ret = 0;
1168         int len;
1169
1170         memset(buf, 0, VPD_TMP_BUF_SIZE);
1171         len = sprintf(buf, "T10 VPD Identifier Association: ");
1172
1173         switch (vpd->association) {
1174         case 0x00:
1175                 sprintf(buf+len, "addressed logical unit\n");
1176                 break;
1177         case 0x10:
1178                 sprintf(buf+len, "target port\n");
1179                 break;
1180         case 0x20:
1181                 sprintf(buf+len, "SCSI target device\n");
1182                 break;
1183         default:
1184                 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
1185                 ret = -EINVAL;
1186                 break;
1187         }
1188
1189         if (p_buf)
1190                 strncpy(p_buf, buf, p_buf_len);
1191         else
1192                 pr_debug("%s", buf);
1193
1194         return ret;
1195 }
1196
1197 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
1198 {
1199         /*
1200          * The VPD identification association..
1201          *
1202          * from spc3r23.pdf Section 7.6.3.1 Table 297
1203          */
1204         vpd->association = (page_83[1] & 0x30);
1205         return transport_dump_vpd_assoc(vpd, NULL, 0);
1206 }
1207 EXPORT_SYMBOL(transport_set_vpd_assoc);
1208
1209 int transport_dump_vpd_ident_type(
1210         struct t10_vpd *vpd,
1211         unsigned char *p_buf,
1212         int p_buf_len)
1213 {
1214         unsigned char buf[VPD_TMP_BUF_SIZE];
1215         int ret = 0;
1216         int len;
1217
1218         memset(buf, 0, VPD_TMP_BUF_SIZE);
1219         len = sprintf(buf, "T10 VPD Identifier Type: ");
1220
1221         switch (vpd->device_identifier_type) {
1222         case 0x00:
1223                 sprintf(buf+len, "Vendor specific\n");
1224                 break;
1225         case 0x01:
1226                 sprintf(buf+len, "T10 Vendor ID based\n");
1227                 break;
1228         case 0x02:
1229                 sprintf(buf+len, "EUI-64 based\n");
1230                 break;
1231         case 0x03:
1232                 sprintf(buf+len, "NAA\n");
1233                 break;
1234         case 0x04:
1235                 sprintf(buf+len, "Relative target port identifier\n");
1236                 break;
1237         case 0x08:
1238                 sprintf(buf+len, "SCSI name string\n");
1239                 break;
1240         default:
1241                 sprintf(buf+len, "Unsupported: 0x%02x\n",
1242                                 vpd->device_identifier_type);
1243                 ret = -EINVAL;
1244                 break;
1245         }
1246
1247         if (p_buf) {
1248                 if (p_buf_len < strlen(buf)+1)
1249                         return -EINVAL;
1250                 strncpy(p_buf, buf, p_buf_len);
1251         } else {
1252                 pr_debug("%s", buf);
1253         }
1254
1255         return ret;
1256 }
1257
1258 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1259 {
1260         /*
1261          * The VPD identifier type..
1262          *
1263          * from spc3r23.pdf Section 7.6.3.1 Table 298
1264          */
1265         vpd->device_identifier_type = (page_83[1] & 0x0f);
1266         return transport_dump_vpd_ident_type(vpd, NULL, 0);
1267 }
1268 EXPORT_SYMBOL(transport_set_vpd_ident_type);
1269
1270 int transport_dump_vpd_ident(
1271         struct t10_vpd *vpd,
1272         unsigned char *p_buf,
1273         int p_buf_len)
1274 {
1275         unsigned char buf[VPD_TMP_BUF_SIZE];
1276         int ret = 0;
1277
1278         memset(buf, 0, VPD_TMP_BUF_SIZE);
1279
1280         switch (vpd->device_identifier_code_set) {
1281         case 0x01: /* Binary */
1282                 sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
1283                         &vpd->device_identifier[0]);
1284                 break;
1285         case 0x02: /* ASCII */
1286                 sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
1287                         &vpd->device_identifier[0]);
1288                 break;
1289         case 0x03: /* UTF-8 */
1290                 sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
1291                         &vpd->device_identifier[0]);
1292                 break;
1293         default:
1294                 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1295                         " 0x%02x", vpd->device_identifier_code_set);
1296                 ret = -EINVAL;
1297                 break;
1298         }
1299
1300         if (p_buf)
1301                 strncpy(p_buf, buf, p_buf_len);
1302         else
1303                 pr_debug("%s", buf);
1304
1305         return ret;
1306 }
1307
1308 int
1309 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1310 {
1311         static const char hex_str[] = "0123456789abcdef";
1312         int j = 0, i = 4; /* offset to start of the identifer */
1313
1314         /*
1315          * The VPD Code Set (encoding)
1316          *
1317          * from spc3r23.pdf Section 7.6.3.1 Table 296
1318          */
1319         vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1320         switch (vpd->device_identifier_code_set) {
1321         case 0x01: /* Binary */
1322                 vpd->device_identifier[j++] =
1323                                 hex_str[vpd->device_identifier_type];
1324                 while (i < (4 + page_83[3])) {
1325                         vpd->device_identifier[j++] =
1326                                 hex_str[(page_83[i] & 0xf0) >> 4];
1327                         vpd->device_identifier[j++] =
1328                                 hex_str[page_83[i] & 0x0f];
1329                         i++;
1330                 }
1331                 break;
1332         case 0x02: /* ASCII */
1333         case 0x03: /* UTF-8 */
1334                 while (i < (4 + page_83[3]))
1335                         vpd->device_identifier[j++] = page_83[i++];
1336                 break;
1337         default:
1338                 break;
1339         }
1340
1341         return transport_dump_vpd_ident(vpd, NULL, 0);
1342 }
1343 EXPORT_SYMBOL(transport_set_vpd_ident);
1344
1345 static void core_setup_task_attr_emulation(struct se_device *dev)
1346 {
1347         /*
1348          * If this device is from Target_Core_Mod/pSCSI, disable the
1349          * SAM Task Attribute emulation.
1350          *
1351          * This is currently not available in upsream Linux/SCSI Target
1352          * mode code, and is assumed to be disabled while using TCM/pSCSI.
1353          */
1354         if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1355                 dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
1356                 return;
1357         }
1358
1359         dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
1360         pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
1361                 " device\n", dev->transport->name,
1362                 dev->transport->get_device_rev(dev));
1363 }
1364
1365 static void scsi_dump_inquiry(struct se_device *dev)
1366 {
1367         struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
1368         int i, device_type;
1369         /*
1370          * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1371          */
1372         pr_debug("  Vendor: ");
1373         for (i = 0; i < 8; i++)
1374                 if (wwn->vendor[i] >= 0x20)
1375                         pr_debug("%c", wwn->vendor[i]);
1376                 else
1377                         pr_debug(" ");
1378
1379         pr_debug("  Model: ");
1380         for (i = 0; i < 16; i++)
1381                 if (wwn->model[i] >= 0x20)
1382                         pr_debug("%c", wwn->model[i]);
1383                 else
1384                         pr_debug(" ");
1385
1386         pr_debug("  Revision: ");
1387         for (i = 0; i < 4; i++)
1388                 if (wwn->revision[i] >= 0x20)
1389                         pr_debug("%c", wwn->revision[i]);
1390                 else
1391                         pr_debug(" ");
1392
1393         pr_debug("\n");
1394
1395         device_type = dev->transport->get_device_type(dev);
1396         pr_debug("  Type:   %s ", scsi_device_type(device_type));
1397         pr_debug("                 ANSI SCSI revision: %02x\n",
1398                                 dev->transport->get_device_rev(dev));
1399 }
1400
1401 struct se_device *transport_add_device_to_core_hba(
1402         struct se_hba *hba,
1403         struct se_subsystem_api *transport,
1404         struct se_subsystem_dev *se_dev,
1405         u32 device_flags,
1406         void *transport_dev,
1407         struct se_dev_limits *dev_limits,
1408         const char *inquiry_prod,
1409         const char *inquiry_rev)
1410 {
1411         int force_pt;
1412         struct se_device  *dev;
1413
1414         dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
1415         if (!dev) {
1416                 pr_err("Unable to allocate memory for se_dev_t\n");
1417                 return NULL;
1418         }
1419
1420         transport_init_queue_obj(&dev->dev_queue_obj);
1421         dev->dev_flags          = device_flags;
1422         dev->dev_status         |= TRANSPORT_DEVICE_DEACTIVATED;
1423         dev->dev_ptr            = transport_dev;
1424         dev->se_hba             = hba;
1425         dev->se_sub_dev         = se_dev;
1426         dev->transport          = transport;
1427         atomic_set(&dev->active_cmds, 0);
1428         INIT_LIST_HEAD(&dev->dev_list);
1429         INIT_LIST_HEAD(&dev->dev_sep_list);
1430         INIT_LIST_HEAD(&dev->dev_tmr_list);
1431         INIT_LIST_HEAD(&dev->execute_task_list);
1432         INIT_LIST_HEAD(&dev->delayed_cmd_list);
1433         INIT_LIST_HEAD(&dev->ordered_cmd_list);
1434         INIT_LIST_HEAD(&dev->state_task_list);
1435         INIT_LIST_HEAD(&dev->qf_cmd_list);
1436         spin_lock_init(&dev->execute_task_lock);
1437         spin_lock_init(&dev->delayed_cmd_lock);
1438         spin_lock_init(&dev->ordered_cmd_lock);
1439         spin_lock_init(&dev->state_task_lock);
1440         spin_lock_init(&dev->dev_alua_lock);
1441         spin_lock_init(&dev->dev_reservation_lock);
1442         spin_lock_init(&dev->dev_status_lock);
1443         spin_lock_init(&dev->dev_status_thr_lock);
1444         spin_lock_init(&dev->se_port_lock);
1445         spin_lock_init(&dev->se_tmr_lock);
1446         spin_lock_init(&dev->qf_cmd_lock);
1447
1448         dev->queue_depth        = dev_limits->queue_depth;
1449         atomic_set(&dev->depth_left, dev->queue_depth);
1450         atomic_set(&dev->dev_ordered_id, 0);
1451
1452         se_dev_set_default_attribs(dev, dev_limits);
1453
1454         dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1455         dev->creation_time = get_jiffies_64();
1456         spin_lock_init(&dev->stats_lock);
1457
1458         spin_lock(&hba->device_lock);
1459         list_add_tail(&dev->dev_list, &hba->hba_dev_list);
1460         hba->dev_count++;
1461         spin_unlock(&hba->device_lock);
1462         /*
1463          * Setup the SAM Task Attribute emulation for struct se_device
1464          */
1465         core_setup_task_attr_emulation(dev);
1466         /*
1467          * Force PR and ALUA passthrough emulation with internal object use.
1468          */
1469         force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
1470         /*
1471          * Setup the Reservations infrastructure for struct se_device
1472          */
1473         core_setup_reservations(dev, force_pt);
1474         /*
1475          * Setup the Asymmetric Logical Unit Assignment for struct se_device
1476          */
1477         if (core_setup_alua(dev, force_pt) < 0)
1478                 goto out;
1479
1480         /*
1481          * Startup the struct se_device processing thread
1482          */
1483         dev->process_thread = kthread_run(transport_processing_thread, dev,
1484                                           "LIO_%s", dev->transport->name);
1485         if (IS_ERR(dev->process_thread)) {
1486                 pr_err("Unable to create kthread: LIO_%s\n",
1487                         dev->transport->name);
1488                 goto out;
1489         }
1490         /*
1491          * Setup work_queue for QUEUE_FULL
1492          */
1493         INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1494         /*
1495          * Preload the initial INQUIRY const values if we are doing
1496          * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1497          * passthrough because this is being provided by the backend LLD.
1498          * This is required so that transport_get_inquiry() copies these
1499          * originals once back into DEV_T10_WWN(dev) for the virtual device
1500          * setup.
1501          */
1502         if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1503                 if (!inquiry_prod || !inquiry_rev) {
1504                         pr_err("All non TCM/pSCSI plugins require"
1505                                 " INQUIRY consts\n");
1506                         goto out;
1507                 }
1508
1509                 strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1510                 strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
1511                 strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
1512         }
1513         scsi_dump_inquiry(dev);
1514
1515         return dev;
1516 out:
1517         kthread_stop(dev->process_thread);
1518
1519         spin_lock(&hba->device_lock);
1520         list_del(&dev->dev_list);
1521         hba->dev_count--;
1522         spin_unlock(&hba->device_lock);
1523
1524         se_release_vpd_for_dev(dev);
1525
1526         kfree(dev);
1527
1528         return NULL;
1529 }
1530 EXPORT_SYMBOL(transport_add_device_to_core_hba);
1531
1532 /*      transport_generic_prepare_cdb():
1533  *
1534  *      Since the Initiator sees iSCSI devices as LUNs,  the SCSI CDB will
1535  *      contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
1536  *      The point of this is since we are mapping iSCSI LUNs to
1537  *      SCSI Target IDs having a non-zero LUN in the CDB will throw the
1538  *      devices and HBAs for a loop.
1539  */
1540 static inline void transport_generic_prepare_cdb(
1541         unsigned char *cdb)
1542 {
1543         switch (cdb[0]) {
1544         case READ_10: /* SBC - RDProtect */
1545         case READ_12: /* SBC - RDProtect */
1546         case READ_16: /* SBC - RDProtect */
1547         case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
1548         case VERIFY: /* SBC - VRProtect */
1549         case VERIFY_16: /* SBC - VRProtect */
1550         case WRITE_VERIFY: /* SBC - VRProtect */
1551         case WRITE_VERIFY_12: /* SBC - VRProtect */
1552                 break;
1553         default:
1554                 cdb[1] &= 0x1f; /* clear logical unit number */
1555                 break;
1556         }
1557 }
1558
1559 static struct se_task *
1560 transport_generic_get_task(struct se_cmd *cmd,
1561                 enum dma_data_direction data_direction)
1562 {
1563         struct se_task *task;
1564         struct se_device *dev = cmd->se_dev;
1565
1566         task = dev->transport->alloc_task(cmd->t_task_cdb);
1567         if (!task) {
1568                 pr_err("Unable to allocate struct se_task\n");
1569                 return NULL;
1570         }
1571
1572         INIT_LIST_HEAD(&task->t_list);
1573         INIT_LIST_HEAD(&task->t_execute_list);
1574         INIT_LIST_HEAD(&task->t_state_list);
1575         init_completion(&task->task_stop_comp);
1576         task->task_se_cmd = cmd;
1577         task->se_dev = dev;
1578         task->task_data_direction = data_direction;
1579
1580         return task;
1581 }
1582
1583 static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
1584
1585 /*
1586  * Used by fabric modules containing a local struct se_cmd within their
1587  * fabric dependent per I/O descriptor.
1588  */
1589 void transport_init_se_cmd(
1590         struct se_cmd *cmd,
1591         struct target_core_fabric_ops *tfo,
1592         struct se_session *se_sess,
1593         u32 data_length,
1594         int data_direction,
1595         int task_attr,
1596         unsigned char *sense_buffer)
1597 {
1598         INIT_LIST_HEAD(&cmd->se_lun_node);
1599         INIT_LIST_HEAD(&cmd->se_delayed_node);
1600         INIT_LIST_HEAD(&cmd->se_ordered_node);
1601         INIT_LIST_HEAD(&cmd->se_qf_node);
1602
1603         INIT_LIST_HEAD(&cmd->t_task_list);
1604         init_completion(&cmd->transport_lun_fe_stop_comp);
1605         init_completion(&cmd->transport_lun_stop_comp);
1606         init_completion(&cmd->t_transport_stop_comp);
1607         spin_lock_init(&cmd->t_state_lock);
1608         atomic_set(&cmd->transport_dev_active, 1);
1609
1610         cmd->se_tfo = tfo;
1611         cmd->se_sess = se_sess;
1612         cmd->data_length = data_length;
1613         cmd->data_direction = data_direction;
1614         cmd->sam_task_attr = task_attr;
1615         cmd->sense_buffer = sense_buffer;
1616 }
1617 EXPORT_SYMBOL(transport_init_se_cmd);
1618
1619 static int transport_check_alloc_task_attr(struct se_cmd *cmd)
1620 {
1621         /*
1622          * Check if SAM Task Attribute emulation is enabled for this
1623          * struct se_device storage object
1624          */
1625         if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1626                 return 0;
1627
1628         if (cmd->sam_task_attr == MSG_ACA_TAG) {
1629                 pr_debug("SAM Task Attribute ACA"
1630                         " emulation is not supported\n");
1631                 return -EINVAL;
1632         }
1633         /*
1634          * Used to determine when ORDERED commands should go from
1635          * Dormant to Active status.
1636          */
1637         cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
1638         smp_mb__after_atomic_inc();
1639         pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1640                         cmd->se_ordered_id, cmd->sam_task_attr,
1641                         cmd->se_dev->transport->name);
1642         return 0;
1643 }
1644
1645 void transport_free_se_cmd(
1646         struct se_cmd *se_cmd)
1647 {
1648         if (se_cmd->se_tmr_req)
1649                 core_tmr_release_req(se_cmd->se_tmr_req);
1650         /*
1651          * Check and free any extended CDB buffer that was allocated
1652          */
1653         if (se_cmd->t_task_cdb != se_cmd->__t_task_cdb)
1654                 kfree(se_cmd->t_task_cdb);
1655 }
1656 EXPORT_SYMBOL(transport_free_se_cmd);
1657
1658 static void transport_generic_wait_for_tasks(struct se_cmd *, int, int);
1659
1660 /*      transport_generic_allocate_tasks():
1661  *
1662  *      Called from fabric RX Thread.
1663  */
1664 int transport_generic_allocate_tasks(
1665         struct se_cmd *cmd,
1666         unsigned char *cdb)
1667 {
1668         int ret;
1669
1670         transport_generic_prepare_cdb(cdb);
1671
1672         /*
1673          * This is needed for early exceptions.
1674          */
1675         cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
1676
1677         /*
1678          * Ensure that the received CDB is less than the max (252 + 8) bytes
1679          * for VARIABLE_LENGTH_CMD
1680          */
1681         if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1682                 pr_err("Received SCSI CDB with command_size: %d that"
1683                         " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1684                         scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1685                 return -EINVAL;
1686         }
1687         /*
1688          * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1689          * allocate the additional extended CDB buffer now..  Otherwise
1690          * setup the pointer from __t_task_cdb to t_task_cdb.
1691          */
1692         if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1693                 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1694                                                 GFP_KERNEL);
1695                 if (!cmd->t_task_cdb) {
1696                         pr_err("Unable to allocate cmd->t_task_cdb"
1697                                 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1698                                 scsi_command_size(cdb),
1699                                 (unsigned long)sizeof(cmd->__t_task_cdb));
1700                         return -ENOMEM;
1701                 }
1702         } else
1703                 cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1704         /*
1705          * Copy the original CDB into cmd->
1706          */
1707         memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1708         /*
1709          * Setup the received CDB based on SCSI defined opcodes and
1710          * perform unit attention, persistent reservations and ALUA
1711          * checks for virtual device backends.  The cmd->t_task_cdb
1712          * pointer is expected to be setup before we reach this point.
1713          */
1714         ret = transport_generic_cmd_sequencer(cmd, cdb);
1715         if (ret < 0)
1716                 return ret;
1717         /*
1718          * Check for SAM Task Attribute Emulation
1719          */
1720         if (transport_check_alloc_task_attr(cmd) < 0) {
1721                 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1722                 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1723                 return -EINVAL;
1724         }
1725         spin_lock(&cmd->se_lun->lun_sep_lock);
1726         if (cmd->se_lun->lun_sep)
1727                 cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
1728         spin_unlock(&cmd->se_lun->lun_sep_lock);
1729         return 0;
1730 }
1731 EXPORT_SYMBOL(transport_generic_allocate_tasks);
1732
1733 /*
1734  * Used by fabric module frontends not defining a TFO->new_cmd_map()
1735  * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD statis
1736  */
1737 int transport_generic_handle_cdb(
1738         struct se_cmd *cmd)
1739 {
1740         if (!cmd->se_lun) {
1741                 dump_stack();
1742                 pr_err("cmd->se_lun is NULL\n");
1743                 return -EINVAL;
1744         }
1745
1746         transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD);
1747         return 0;
1748 }
1749 EXPORT_SYMBOL(transport_generic_handle_cdb);
1750
1751 static void transport_generic_request_failure(struct se_cmd *,
1752                         struct se_device *, int, int);
1753 /*
1754  * Used by fabric module frontends to queue tasks directly.
1755  * Many only be used from process context only
1756  */
1757 int transport_handle_cdb_direct(
1758         struct se_cmd *cmd)
1759 {
1760         int ret;
1761
1762         if (!cmd->se_lun) {
1763                 dump_stack();
1764                 pr_err("cmd->se_lun is NULL\n");
1765                 return -EINVAL;
1766         }
1767         if (in_interrupt()) {
1768                 dump_stack();
1769                 pr_err("transport_generic_handle_cdb cannot be called"
1770                                 " from interrupt context\n");
1771                 return -EINVAL;
1772         }
1773         /*
1774          * Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following
1775          * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
1776          * in existing usage to ensure that outstanding descriptors are handled
1777          * correctly during shutdown via transport_generic_wait_for_tasks()
1778          *
1779          * Also, we don't take cmd->t_state_lock here as we only expect
1780          * this to be called for initial descriptor submission.
1781          */
1782         cmd->t_state = TRANSPORT_NEW_CMD;
1783         atomic_set(&cmd->t_transport_active, 1);
1784         /*
1785          * transport_generic_new_cmd() is already handling QUEUE_FULL,
1786          * so follow TRANSPORT_NEW_CMD processing thread context usage
1787          * and call transport_generic_request_failure() if necessary..
1788          */
1789         ret = transport_generic_new_cmd(cmd);
1790         if (ret == -EAGAIN)
1791                 return 0;
1792         else if (ret < 0) {
1793                 cmd->transport_error_status = ret;
1794                 transport_generic_request_failure(cmd, NULL, 0,
1795                                 (cmd->data_direction != DMA_TO_DEVICE));
1796         }
1797         return 0;
1798 }
1799 EXPORT_SYMBOL(transport_handle_cdb_direct);
1800
1801 /*
1802  * Used by fabric module frontends defining a TFO->new_cmd_map() caller
1803  * to  queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
1804  * complete setup in TCM process context w/ TFO->new_cmd_map().
1805  */
1806 int transport_generic_handle_cdb_map(
1807         struct se_cmd *cmd)
1808 {
1809         if (!cmd->se_lun) {
1810                 dump_stack();
1811                 pr_err("cmd->se_lun is NULL\n");
1812                 return -EINVAL;
1813         }
1814
1815         transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP);
1816         return 0;
1817 }
1818 EXPORT_SYMBOL(transport_generic_handle_cdb_map);
1819
1820 /*      transport_generic_handle_data():
1821  *
1822  *
1823  */
1824 int transport_generic_handle_data(
1825         struct se_cmd *cmd)
1826 {
1827         /*
1828          * For the software fabric case, then we assume the nexus is being
1829          * failed/shutdown when signals are pending from the kthread context
1830          * caller, so we return a failure.  For the HW target mode case running
1831          * in interrupt code, the signal_pending() check is skipped.
1832          */
1833         if (!in_interrupt() && signal_pending(current))
1834                 return -EPERM;
1835         /*
1836          * If the received CDB has aleady been ABORTED by the generic
1837          * target engine, we now call transport_check_aborted_status()
1838          * to queue any delated TASK_ABORTED status for the received CDB to the
1839          * fabric module as we are expecting no further incoming DATA OUT
1840          * sequences at this point.
1841          */
1842         if (transport_check_aborted_status(cmd, 1) != 0)
1843                 return 0;
1844
1845         transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE);
1846         return 0;
1847 }
1848 EXPORT_SYMBOL(transport_generic_handle_data);
1849
1850 /*      transport_generic_handle_tmr():
1851  *
1852  *
1853  */
1854 int transport_generic_handle_tmr(
1855         struct se_cmd *cmd)
1856 {
1857         /*
1858          * This is needed for early exceptions.
1859          */
1860         cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
1861
1862         transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR);
1863         return 0;
1864 }
1865 EXPORT_SYMBOL(transport_generic_handle_tmr);
1866
1867 void transport_generic_free_cmd_intr(
1868         struct se_cmd *cmd)
1869 {
1870         transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR);
1871 }
1872 EXPORT_SYMBOL(transport_generic_free_cmd_intr);
1873
1874 static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
1875 {
1876         struct se_task *task, *task_tmp;
1877         unsigned long flags;
1878         int ret = 0;
1879
1880         pr_debug("ITT[0x%08x] - Stopping tasks\n",
1881                 cmd->se_tfo->get_task_tag(cmd));
1882
1883         /*
1884          * No tasks remain in the execution queue
1885          */
1886         spin_lock_irqsave(&cmd->t_state_lock, flags);
1887         list_for_each_entry_safe(task, task_tmp,
1888                                 &cmd->t_task_list, t_list) {
1889                 pr_debug("task_no[%d] - Processing task %p\n",
1890                                 task->task_no, task);
1891                 /*
1892                  * If the struct se_task has not been sent and is not active,
1893                  * remove the struct se_task from the execution queue.
1894                  */
1895                 if (!atomic_read(&task->task_sent) &&
1896                     !atomic_read(&task->task_active)) {
1897                         spin_unlock_irqrestore(&cmd->t_state_lock,
1898                                         flags);
1899                         transport_remove_task_from_execute_queue(task,
1900                                         task->se_dev);
1901
1902                         pr_debug("task_no[%d] - Removed from execute queue\n",
1903                                 task->task_no);
1904                         spin_lock_irqsave(&cmd->t_state_lock, flags);
1905                         continue;
1906                 }
1907
1908                 /*
1909                  * If the struct se_task is active, sleep until it is returned
1910                  * from the plugin.
1911                  */
1912                 if (atomic_read(&task->task_active)) {
1913                         atomic_set(&task->task_stop, 1);
1914                         spin_unlock_irqrestore(&cmd->t_state_lock,
1915                                         flags);
1916
1917                         pr_debug("task_no[%d] - Waiting to complete\n",
1918                                 task->task_no);
1919                         wait_for_completion(&task->task_stop_comp);
1920                         pr_debug("task_no[%d] - Stopped successfully\n",
1921                                 task->task_no);
1922
1923                         spin_lock_irqsave(&cmd->t_state_lock, flags);
1924                         atomic_dec(&cmd->t_task_cdbs_left);
1925
1926                         atomic_set(&task->task_active, 0);
1927                         atomic_set(&task->task_stop, 0);
1928                 } else {
1929                         pr_debug("task_no[%d] - Did nothing\n", task->task_no);
1930                         ret++;
1931                 }
1932
1933                 __transport_stop_task_timer(task, &flags);
1934         }
1935         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1936
1937         return ret;
1938 }
1939
1940 /*
1941  * Handle SAM-esque emulation for generic transport request failures.
1942  */
1943 static void transport_generic_request_failure(
1944         struct se_cmd *cmd,
1945         struct se_device *dev,
1946         int complete,
1947         int sc)
1948 {
1949         int ret = 0;
1950
1951         pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
1952                 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
1953                 cmd->t_task_cdb[0]);
1954         pr_debug("-----[ i_state: %d t_state/def_t_state:"
1955                 " %d/%d transport_error_status: %d\n",
1956                 cmd->se_tfo->get_cmd_state(cmd),
1957                 cmd->t_state, cmd->deferred_t_state,
1958                 cmd->transport_error_status);
1959         pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
1960                 " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
1961                 " t_transport_active: %d t_transport_stop: %d"
1962                 " t_transport_sent: %d\n", cmd->t_task_list_num,
1963                 atomic_read(&cmd->t_task_cdbs_left),
1964                 atomic_read(&cmd->t_task_cdbs_sent),
1965                 atomic_read(&cmd->t_task_cdbs_ex_left),
1966                 atomic_read(&cmd->t_transport_active),
1967                 atomic_read(&cmd->t_transport_stop),
1968                 atomic_read(&cmd->t_transport_sent));
1969
1970         transport_stop_all_task_timers(cmd);
1971
1972         if (dev)
1973                 atomic_inc(&dev->depth_left);
1974         /*
1975          * For SAM Task Attribute emulation for failed struct se_cmd
1976          */
1977         if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
1978                 transport_complete_task_attr(cmd);
1979
1980         if (complete) {
1981                 transport_direct_request_timeout(cmd);
1982                 cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
1983         }
1984
1985         switch (cmd->transport_error_status) {
1986         case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE:
1987                 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1988                 break;
1989         case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS:
1990                 cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
1991                 break;
1992         case PYX_TRANSPORT_INVALID_CDB_FIELD:
1993                 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1994                 break;
1995         case PYX_TRANSPORT_INVALID_PARAMETER_LIST:
1996                 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
1997                 break;
1998         case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES:
1999                 if (!sc)
2000                         transport_new_cmd_failure(cmd);
2001                 /*
2002                  * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES,
2003                  * we force this session to fall back to session
2004                  * recovery.
2005                  */
2006                 cmd->se_tfo->fall_back_to_erl0(cmd->se_sess);
2007                 cmd->se_tfo->stop_session(cmd->se_sess, 0, 0);
2008
2009                 goto check_stop;
2010         case PYX_TRANSPORT_LU_COMM_FAILURE:
2011         case PYX_TRANSPORT_ILLEGAL_REQUEST:
2012                 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2013                 break;
2014         case PYX_TRANSPORT_UNKNOWN_MODE_PAGE:
2015                 cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
2016                 break;
2017         case PYX_TRANSPORT_WRITE_PROTECTED:
2018                 cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
2019                 break;
2020         case PYX_TRANSPORT_RESERVATION_CONFLICT:
2021                 /*
2022                  * No SENSE Data payload for this case, set SCSI Status
2023                  * and queue the response to $FABRIC_MOD.
2024                  *
2025                  * Uses linux/include/scsi/scsi.h SAM status codes defs
2026                  */
2027                 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
2028                 /*
2029                  * For UA Interlock Code 11b, a RESERVATION CONFLICT will
2030                  * establish a UNIT ATTENTION with PREVIOUS RESERVATION
2031                  * CONFLICT STATUS.
2032                  *
2033                  * See spc4r17, section 7.4.6 Control Mode Page, Table 349
2034                  */
2035                 if (cmd->se_sess &&
2036                     cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
2037                         core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
2038                                 cmd->orig_fe_lun, 0x2C,
2039                                 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
2040
2041                 ret = cmd->se_tfo->queue_status(cmd);
2042                 if (ret == -EAGAIN)
2043                         goto queue_full;
2044                 goto check_stop;
2045         case PYX_TRANSPORT_USE_SENSE_REASON:
2046                 /*
2047                  * struct se_cmd->scsi_sense_reason already set
2048                  */
2049                 break;
2050         default:
2051                 pr_err("Unknown transport error for CDB 0x%02x: %d\n",
2052                         cmd->t_task_cdb[0],
2053                         cmd->transport_error_status);
2054                 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
2055                 break;
2056         }
2057         /*
2058          * If a fabric does not define a cmd->se_tfo->new_cmd_map caller,
2059          * make the call to transport_send_check_condition_and_sense()
2060          * directly.  Otherwise expect the fabric to make the call to
2061          * transport_send_check_condition_and_sense() after handling
2062          * possible unsoliticied write data payloads.
2063          */
2064         if (!sc && !cmd->se_tfo->new_cmd_map)
2065                 transport_new_cmd_failure(cmd);
2066         else {
2067                 ret = transport_send_check_condition_and_sense(cmd,
2068                                 cmd->scsi_sense_reason, 0);
2069                 if (ret == -EAGAIN)
2070                         goto queue_full;
2071         }
2072
2073 check_stop:
2074         transport_lun_remove_cmd(cmd);
2075         if (!transport_cmd_check_stop_to_fabric(cmd))
2076                 ;
2077         return;
2078
2079 queue_full:
2080         cmd->t_state = TRANSPORT_COMPLETE_OK;
2081         transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf);
2082 }
2083
2084 static void transport_direct_request_timeout(struct se_cmd *cmd)
2085 {
2086         unsigned long flags;
2087
2088         spin_lock_irqsave(&cmd->t_state_lock, flags);
2089         if (!atomic_read(&cmd->t_transport_timeout)) {
2090                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2091                 return;
2092         }
2093         if (atomic_read(&cmd->t_task_cdbs_timeout_left)) {
2094                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2095                 return;
2096         }
2097
2098         atomic_sub(atomic_read(&cmd->t_transport_timeout),
2099                    &cmd->t_se_count);
2100         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2101 }
2102
2103 static void transport_generic_request_timeout(struct se_cmd *cmd)
2104 {
2105         unsigned long flags;
2106
2107         /*
2108          * Reset cmd->t_se_count to allow transport_generic_remove()
2109          * to allow last call to free memory resources.
2110          */
2111         spin_lock_irqsave(&cmd->t_state_lock, flags);
2112         if (atomic_read(&cmd->t_transport_timeout) > 1) {
2113                 int tmp = (atomic_read(&cmd->t_transport_timeout) - 1);
2114
2115                 atomic_sub(tmp, &cmd->t_se_count);
2116         }
2117         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2118
2119         transport_generic_remove(cmd, 0);
2120 }
2121
2122 static inline u32 transport_lba_21(unsigned char *cdb)
2123 {
2124         return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
2125 }
2126
2127 static inline u32 transport_lba_32(unsigned char *cdb)
2128 {
2129         return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
2130 }
2131
2132 static inline unsigned long long transport_lba_64(unsigned char *cdb)
2133 {
2134         unsigned int __v1, __v2;
2135
2136         __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
2137         __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
2138
2139         return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
2140 }
2141
2142 /*
2143  * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
2144  */
2145 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
2146 {
2147         unsigned int __v1, __v2;
2148
2149         __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
2150         __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
2151
2152         return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
2153 }
2154
2155 static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
2156 {
2157         unsigned long flags;
2158
2159         spin_lock_irqsave(&se_cmd->t_state_lock, flags);
2160         se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
2161         spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
2162 }
2163
2164 /*
2165  * Called from interrupt context.
2166  */
2167 static void transport_task_timeout_handler(unsigned long data)
2168 {
2169         struct se_task *task = (struct se_task *)data;
2170         struct se_cmd *cmd = task->task_se_cmd;
2171         unsigned long flags;
2172
2173         pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
2174
2175         spin_lock_irqsave(&cmd->t_state_lock, flags);
2176         if (task->task_flags & TF_STOP) {
2177                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2178                 return;
2179         }
2180         task->task_flags &= ~TF_RUNNING;
2181
2182         /*
2183          * Determine if transport_complete_task() has already been called.
2184          */
2185         if (!atomic_read(&task->task_active)) {
2186                 pr_debug("transport task: %p cmd: %p timeout task_active"
2187                                 " == 0\n", task, cmd);
2188                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2189                 return;
2190         }
2191
2192         atomic_inc(&cmd->t_se_count);
2193         atomic_inc(&cmd->t_transport_timeout);
2194         cmd->t_tasks_failed = 1;
2195
2196         atomic_set(&task->task_timeout, 1);
2197         task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT;
2198         task->task_scsi_status = 1;
2199
2200         if (atomic_read(&task->task_stop)) {
2201                 pr_debug("transport task: %p cmd: %p timeout task_stop"
2202                                 " == 1\n", task, cmd);
2203                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2204                 complete(&task->task_stop_comp);
2205                 return;
2206         }
2207
2208         if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
2209                 pr_debug("transport task: %p cmd: %p timeout non zero"
2210                                 " t_task_cdbs_left\n", task, cmd);
2211                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2212                 return;
2213         }
2214         pr_debug("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
2215                         task, cmd);
2216
2217         cmd->t_state = TRANSPORT_COMPLETE_FAILURE;
2218         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2219
2220         transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE);
2221 }
2222
2223 /*
2224  * Called with cmd->t_state_lock held.
2225  */
2226 static void transport_start_task_timer(struct se_task *task)
2227 {
2228         struct se_device *dev = task->se_dev;
2229         int timeout;
2230
2231         if (task->task_flags & TF_RUNNING)
2232                 return;
2233         /*
2234          * If the task_timeout is disabled, exit now.
2235          */
2236         timeout = dev->se_sub_dev->se_dev_attrib.task_timeout;
2237         if (!timeout)
2238                 return;
2239
2240         init_timer(&task->task_timer);
2241         task->task_timer.expires = (get_jiffies_64() + timeout * HZ);
2242         task->task_timer.data = (unsigned long) task;
2243         task->task_timer.function = transport_task_timeout_handler;
2244
2245         task->task_flags |= TF_RUNNING;
2246         add_timer(&task->task_timer);
2247 #if 0
2248         pr_debug("Starting task timer for cmd: %p task: %p seconds:"
2249                 " %d\n", task->task_se_cmd, task, timeout);
2250 #endif
2251 }
2252
2253 /*
2254  * Called with spin_lock_irq(&cmd->t_state_lock) held.
2255  */
2256 void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
2257 {
2258         struct se_cmd *cmd = task->task_se_cmd;
2259
2260         if (!task->task_flags & TF_RUNNING)
2261                 return;
2262
2263         task->task_flags |= TF_STOP;
2264         spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
2265
2266         del_timer_sync(&task->task_timer);
2267
2268         spin_lock_irqsave(&cmd->t_state_lock, *flags);
2269         task->task_flags &= ~TF_RUNNING;
2270         task->task_flags &= ~TF_STOP;
2271 }
2272
2273 static void transport_stop_all_task_timers(struct se_cmd *cmd)
2274 {
2275         struct se_task *task = NULL, *task_tmp;
2276         unsigned long flags;
2277
2278         spin_lock_irqsave(&cmd->t_state_lock, flags);
2279         list_for_each_entry_safe(task, task_tmp,
2280                                 &cmd->t_task_list, t_list)
2281                 __transport_stop_task_timer(task, &flags);
2282         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2283 }
2284
2285 static inline int transport_tcq_window_closed(struct se_device *dev)
2286 {
2287         if (dev->dev_tcq_window_closed++ <
2288                         PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) {
2289                 msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT);
2290         } else
2291                 msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
2292
2293         wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
2294         return 0;
2295 }
2296
2297 /*
2298  * Called from Fabric Module context from transport_execute_tasks()
2299  *
2300  * The return of this function determins if the tasks from struct se_cmd
2301  * get added to the execution queue in transport_execute_tasks(),
2302  * or are added to the delayed or ordered lists here.
2303  */
2304 static inline int transport_execute_task_attr(struct se_cmd *cmd)
2305 {
2306         if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
2307                 return 1;
2308         /*
2309          * Check for the existence of HEAD_OF_QUEUE, and if true return 1
2310          * to allow the passed struct se_cmd list of tasks to the front of the list.
2311          */
2312          if (cmd->sam_task_attr == MSG_HEAD_TAG) {
2313                 atomic_inc(&cmd->se_dev->dev_hoq_count);
2314                 smp_mb__after_atomic_inc();
2315                 pr_debug("Added HEAD_OF_QUEUE for CDB:"
2316                         " 0x%02x, se_ordered_id: %u\n",
2317                         cmd->t_task_cdb[0],
2318                         cmd->se_ordered_id);
2319                 return 1;
2320         } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
2321                 spin_lock(&cmd->se_dev->ordered_cmd_lock);
2322                 list_add_tail(&cmd->se_ordered_node,
2323                                 &cmd->se_dev->ordered_cmd_list);
2324                 spin_unlock(&cmd->se_dev->ordered_cmd_lock);
2325
2326                 atomic_inc(&cmd->se_dev->dev_ordered_sync);
2327                 smp_mb__after_atomic_inc();
2328
2329                 pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
2330                                 " list, se_ordered_id: %u\n",
2331                                 cmd->t_task_cdb[0],
2332                                 cmd->se_ordered_id);
2333                 /*
2334                  * Add ORDERED command to tail of execution queue if
2335                  * no other older commands exist that need to be
2336                  * completed first.
2337                  */
2338                 if (!atomic_read(&cmd->se_dev->simple_cmds))
2339                         return 1;
2340         } else {
2341                 /*
2342                  * For SIMPLE and UNTAGGED Task Attribute commands
2343                  */
2344                 atomic_inc(&cmd->se_dev->simple_cmds);
2345                 smp_mb__after_atomic_inc();
2346         }
2347         /*
2348          * Otherwise if one or more outstanding ORDERED task attribute exist,
2349          * add the dormant task(s) built for the passed struct se_cmd to the
2350          * execution queue and become in Active state for this struct se_device.
2351          */
2352         if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
2353                 /*
2354                  * Otherwise, add cmd w/ tasks to delayed cmd queue that
2355                  * will be drained upon completion of HEAD_OF_QUEUE task.
2356                  */
2357                 spin_lock(&cmd->se_dev->delayed_cmd_lock);
2358                 cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
2359                 list_add_tail(&cmd->se_delayed_node,
2360                                 &cmd->se_dev->delayed_cmd_list);
2361                 spin_unlock(&cmd->se_dev->delayed_cmd_lock);
2362
2363                 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
2364                         " delayed CMD list, se_ordered_id: %u\n",
2365                         cmd->t_task_cdb[0], cmd->sam_task_attr,
2366                         cmd->se_ordered_id);
2367                 /*
2368                  * Return zero to let transport_execute_tasks() know
2369                  * not to add the delayed tasks to the execution list.
2370                  */
2371                 return 0;
2372         }
2373         /*
2374          * Otherwise, no ORDERED task attributes exist..
2375          */
2376         return 1;
2377 }
2378
2379 /*
2380  * Called from fabric module context in transport_generic_new_cmd() and
2381  * transport_generic_process_write()
2382  */
2383 static int transport_execute_tasks(struct se_cmd *cmd)
2384 {
2385         int add_tasks;
2386
2387         if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
2388                 cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
2389                 transport_generic_request_failure(cmd, NULL, 0, 1);
2390                 return 0;
2391         }
2392
2393         /*
2394          * Call transport_cmd_check_stop() to see if a fabric exception
2395          * has occurred that prevents execution.
2396          */
2397         if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
2398                 /*
2399                  * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
2400                  * attribute for the tasks of the received struct se_cmd CDB
2401                  */
2402                 add_tasks = transport_execute_task_attr(cmd);
2403                 if (!add_tasks)
2404                         goto execute_tasks;
2405                 /*
2406                  * This calls transport_add_tasks_from_cmd() to handle
2407                  * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation
2408                  * (if enabled) in __transport_add_task_to_execute_queue() and
2409                  * transport_add_task_check_sam_attr().
2410                  */
2411                 transport_add_tasks_from_cmd(cmd);
2412         }
2413         /*
2414          * Kick the execution queue for the cmd associated struct se_device
2415          * storage object.
2416          */
2417 execute_tasks:
2418         __transport_execute_tasks(cmd->se_dev);
2419         return 0;
2420 }
2421
2422 /*
2423  * Called to check struct se_device tcq depth window, and once open pull struct se_task
2424  * from struct se_device->execute_task_list and
2425  *
2426  * Called from transport_processing_thread()
2427  */
2428 static int __transport_execute_tasks(struct se_device *dev)
2429 {
2430         int error;
2431         struct se_cmd *cmd = NULL;
2432         struct se_task *task = NULL;
2433         unsigned long flags;
2434
2435         /*
2436          * Check if there is enough room in the device and HBA queue to send
2437          * struct se_tasks to the selected transport.
2438          */
2439 check_depth:
2440         if (!atomic_read(&dev->depth_left))
2441                 return transport_tcq_window_closed(dev);
2442
2443         dev->dev_tcq_window_closed = 0;
2444
2445         spin_lock_irq(&dev->execute_task_lock);
2446         if (list_empty(&dev->execute_task_list)) {
2447                 spin_unlock_irq(&dev->execute_task_lock);
2448                 return 0;
2449         }
2450         task = list_first_entry(&dev->execute_task_list,
2451                                 struct se_task, t_execute_list);
2452         list_del(&task->t_execute_list);
2453         atomic_set(&task->task_execute_queue, 0);
2454         atomic_dec(&dev->execute_tasks);
2455         spin_unlock_irq(&dev->execute_task_lock);
2456
2457         atomic_dec(&dev->depth_left);
2458
2459         cmd = task->task_se_cmd;
2460
2461         spin_lock_irqsave(&cmd->t_state_lock, flags);
2462         atomic_set(&task->task_active, 1);
2463         atomic_set(&task->task_sent, 1);
2464         atomic_inc(&cmd->t_task_cdbs_sent);
2465
2466         if (atomic_read(&cmd->t_task_cdbs_sent) ==
2467             cmd->t_task_list_num)
2468                 atomic_set(&cmd->transport_sent, 1);
2469
2470         transport_start_task_timer(task);
2471         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2472         /*
2473          * The struct se_cmd->transport_emulate_cdb() function pointer is used
2474          * to grab REPORT_LUNS and other CDBs we want to handle before they hit the
2475          * struct se_subsystem_api->do_task() caller below.
2476          */
2477         if (cmd->transport_emulate_cdb) {
2478                 error = cmd->transport_emulate_cdb(cmd);
2479                 if (error != 0) {
2480                         cmd->transport_error_status = error;
2481                         atomic_set(&task->task_active, 0);
2482                         atomic_set(&cmd->transport_sent, 0);
2483                         transport_stop_tasks_for_cmd(cmd);
2484                         transport_generic_request_failure(cmd, dev, 0, 1);
2485                         goto check_depth;
2486                 }
2487                 /*
2488                  * Handle the successful completion for transport_emulate_cdb()
2489                  * for synchronous operation, following SCF_EMULATE_CDB_ASYNC
2490                  * Otherwise the caller is expected to complete the task with
2491                  * proper status.
2492                  */
2493                 if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) {
2494                         cmd->scsi_status = SAM_STAT_GOOD;
2495                         task->task_scsi_status = GOOD;
2496                         transport_complete_task(task, 1);
2497                 }
2498         } else {
2499                 /*
2500                  * Currently for all virtual TCM plugins including IBLOCK, FILEIO and
2501                  * RAMDISK we use the internal transport_emulate_control_cdb() logic
2502                  * with struct se_subsystem_api callers for the primary SPC-3 TYPE_DISK
2503                  * LUN emulation code.
2504                  *
2505                  * For TCM/pSCSI and all other SCF_SCSI_DATA_SG_IO_CDB I/O tasks we
2506                  * call ->do_task() directly and let the underlying TCM subsystem plugin
2507                  * code handle the CDB emulation.
2508                  */
2509                 if ((dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) &&
2510                     (!(task->task_se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
2511                         error = transport_emulate_control_cdb(task);
2512                 else
2513                         error = dev->transport->do_task(task);
2514
2515                 if (error != 0) {
2516                         cmd->transport_error_status = error;
2517                         atomic_set(&task->task_active, 0);
2518                         atomic_set(&cmd->transport_sent, 0);
2519                         transport_stop_tasks_for_cmd(cmd);
2520                         transport_generic_request_failure(cmd, dev, 0, 1);
2521                 }
2522         }
2523
2524         goto check_depth;
2525
2526         return 0;
2527 }
2528
2529 void transport_new_cmd_failure(struct se_cmd *se_cmd)
2530 {
2531         unsigned long flags;
2532         /*
2533          * Any unsolicited data will get dumped for failed command inside of
2534          * the fabric plugin
2535          */
2536         spin_lock_irqsave(&se_cmd->t_state_lock, flags);
2537         se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
2538         se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2539         spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
2540 }
2541
2542 static void transport_nop_wait_for_tasks(struct se_cmd *, int, int);
2543
2544 static inline u32 transport_get_sectors_6(
2545         unsigned char *cdb,
2546         struct se_cmd *cmd,
2547         int *ret)
2548 {
2549         struct se_device *dev = cmd->se_dev;
2550
2551         /*
2552          * Assume TYPE_DISK for non struct se_device objects.
2553          * Use 8-bit sector value.
2554          */
2555         if (!dev)
2556                 goto type_disk;
2557
2558         /*
2559          * Use 24-bit allocation length for TYPE_TAPE.
2560          */
2561         if (dev->transport->get_device_type(dev) == TYPE_TAPE)
2562                 return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
2563
2564         /*
2565          * Everything else assume TYPE_DISK Sector CDB location.
2566          * Use 8-bit sector value.
2567          */
2568 type_disk:
2569         return (u32)cdb[4];
2570 }
2571
2572 static inline u32 transport_get_sectors_10(
2573         unsigned char *cdb,
2574         struct se_cmd *cmd,
2575         int *ret)
2576 {
2577         struct se_device *dev = cmd->se_dev;
2578
2579         /*
2580          * Assume TYPE_DISK for non struct se_device objects.
2581          * Use 16-bit sector value.
2582          */
2583         if (!dev)
2584                 goto type_disk;
2585
2586         /*
2587          * XXX_10 is not defined in SSC, throw an exception
2588          */
2589         if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2590                 *ret = -EINVAL;
2591                 return 0;
2592         }
2593
2594         /*
2595          * Everything else assume TYPE_DISK Sector CDB location.
2596          * Use 16-bit sector value.
2597          */
2598 type_disk:
2599         return (u32)(cdb[7] << 8) + cdb[8];
2600 }
2601
2602 static inline u32 transport_get_sectors_12(
2603         unsigned char *cdb,
2604         struct se_cmd *cmd,
2605         int *ret)
2606 {
2607         struct se_device *dev = cmd->se_dev;
2608
2609         /*
2610          * Assume TYPE_DISK for non struct se_device objects.
2611          * Use 32-bit sector value.
2612          */
2613         if (!dev)
2614                 goto type_disk;
2615
2616         /*
2617          * XXX_12 is not defined in SSC, throw an exception
2618          */
2619         if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2620                 *ret = -EINVAL;
2621                 return 0;
2622         }
2623
2624         /*
2625          * Everything else assume TYPE_DISK Sector CDB location.
2626          * Use 32-bit sector value.
2627          */
2628 type_disk:
2629         return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
2630 }
2631
2632 static inline u32 transport_get_sectors_16(
2633         unsigned char *cdb,
2634         struct se_cmd *cmd,
2635         int *ret)
2636 {
2637         struct se_device *dev = cmd->se_dev;
2638
2639         /*
2640          * Assume TYPE_DISK for non struct se_device objects.
2641          * Use 32-bit sector value.
2642          */
2643         if (!dev)
2644                 goto type_disk;
2645
2646         /*
2647          * Use 24-bit allocation length for TYPE_TAPE.
2648          */
2649         if (dev->transport->get_device_type(dev) == TYPE_TAPE)
2650                 return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
2651
2652 type_disk:
2653         return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
2654                     (cdb[12] << 8) + cdb[13];
2655 }
2656
2657 /*
2658  * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
2659  */
2660 static inline u32 transport_get_sectors_32(
2661         unsigned char *cdb,
2662         struct se_cmd *cmd,
2663         int *ret)
2664 {
2665         /*
2666          * Assume TYPE_DISK for non struct se_device objects.
2667          * Use 32-bit sector value.
2668          */
2669         return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
2670                     (cdb[30] << 8) + cdb[31];
2671
2672 }
2673
2674 static inline u32 transport_get_size(
2675         u32 sectors,
2676         unsigned char *cdb,
2677         struct se_cmd *cmd)
2678 {
2679         struct se_device *dev = cmd->se_dev;
2680
2681         if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2682                 if (cdb[1] & 1) { /* sectors */
2683                         return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
2684                 } else /* bytes */
2685                         return sectors;
2686         }
2687 #if 0
2688         pr_debug("Returning block_size: %u, sectors: %u == %u for"
2689                         " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors,
2690                         dev->se_sub_dev->se_dev_attrib.block_size * sectors,
2691                         dev->transport->name);
2692 #endif
2693         return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
2694 }
2695
2696 static void transport_xor_callback(struct se_cmd *cmd)
2697 {
2698         unsigned char *buf, *addr;
2699         struct scatterlist *sg;
2700         unsigned int offset;
2701         int i;
2702         int count;
2703         /*
2704          * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
2705          *
2706          * 1) read the specified logical block(s);
2707          * 2) transfer logical blocks from the data-out buffer;
2708          * 3) XOR the logical blocks transferred from the data-out buffer with
2709          *    the logical blocks read, storing the resulting XOR data in a buffer;
2710          * 4) if the DISABLE WRITE bit is set to zero, then write the logical
2711          *    blocks transferred from the data-out buffer; and
2712          * 5) transfer the resulting XOR data to the data-in buffer.
2713          */
2714         buf = kmalloc(cmd->data_length, GFP_KERNEL);
2715         if (!buf) {
2716                 pr_err("Unable to allocate xor_callback buf\n");
2717                 return;
2718         }
2719         /*
2720          * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
2721          * into the locally allocated *buf
2722          */
2723         sg_copy_to_buffer(cmd->t_data_sg,
2724                           cmd->t_data_nents,
2725                           buf,
2726                           cmd->data_length);
2727
2728         /*
2729          * Now perform the XOR against the BIDI read memory located at
2730          * cmd->t_mem_bidi_list
2731          */
2732
2733         offset = 0;
2734         for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
2735                 addr = kmap_atomic(sg_page(sg), KM_USER0);
2736                 if (!addr)
2737                         goto out;
2738
2739                 for (i = 0; i < sg->length; i++)
2740                         *(addr + sg->offset + i) ^= *(buf + offset + i);
2741
2742                 offset += sg->length;
2743                 kunmap_atomic(addr, KM_USER0);
2744         }
2745
2746 out:
2747         kfree(buf);
2748 }
2749
2750 /*
2751  * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
2752  */
2753 static int transport_get_sense_data(struct se_cmd *cmd)
2754 {
2755         unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
2756         struct se_device *dev;
2757         struct se_task *task = NULL, *task_tmp;
2758         unsigned long flags;
2759         u32 offset = 0;
2760
2761         WARN_ON(!cmd->se_lun);
2762
2763         spin_lock_irqsave(&cmd->t_state_lock, flags);
2764         if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
2765                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2766                 return 0;
2767         }
2768
2769         list_for_each_entry_safe(task, task_tmp,
2770                                 &cmd->t_task_list, t_list) {
2771
2772                 if (!task->task_sense)
2773                         continue;
2774
2775                 dev = task->se_dev;
2776                 if (!dev)
2777                         continue;
2778
2779                 if (!dev->transport->get_sense_buffer) {
2780                         pr_err("dev->transport->get_sense_buffer"
2781                                         " is NULL\n");
2782                         continue;
2783                 }
2784
2785                 sense_buffer = dev->transport->get_sense_buffer(task);
2786                 if (!sense_buffer) {
2787                         pr_err("ITT[0x%08x]_TASK[%d]: Unable to locate"
2788                                 " sense buffer for task with sense\n",
2789                                 cmd->se_tfo->get_task_tag(cmd), task->task_no);
2790                         continue;
2791                 }
2792                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2793
2794                 offset = cmd->se_tfo->set_fabric_sense_len(cmd,
2795                                 TRANSPORT_SENSE_BUFFER);
2796
2797                 memcpy(&buffer[offset], sense_buffer,
2798                                 TRANSPORT_SENSE_BUFFER);
2799                 cmd->scsi_status = task->task_scsi_status;
2800                 /* Automatically padded */
2801                 cmd->scsi_sense_length =
2802                                 (TRANSPORT_SENSE_BUFFER + offset);
2803
2804                 pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
2805                                 " and sense\n",
2806                         dev->se_hba->hba_id, dev->transport->name,
2807                                 cmd->scsi_status);
2808                 return 0;
2809         }
2810         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2811
2812         return -1;
2813 }
2814
2815 static int
2816 transport_handle_reservation_conflict(struct se_cmd *cmd)
2817 {
2818         cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
2819         cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2820         cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
2821         cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
2822         /*
2823          * For UA Interlock Code 11b, a RESERVATION CONFLICT will
2824          * establish a UNIT ATTENTION with PREVIOUS RESERVATION
2825          * CONFLICT STATUS.
2826          *
2827          * See spc4r17, section 7.4.6 Control Mode Page, Table 349
2828          */
2829         if (cmd->se_sess &&
2830             cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
2831                 core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
2832                         cmd->orig_fe_lun, 0x2C,
2833                         ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
2834         return -EINVAL;
2835 }
2836
2837 static inline long long transport_dev_end_lba(struct se_device *dev)
2838 {
2839         return dev->transport->get_blocks(dev) + 1;
2840 }
2841
2842 static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
2843 {
2844         struct se_device *dev = cmd->se_dev;
2845         u32 sectors;
2846
2847         if (dev->transport->get_device_type(dev) != TYPE_DISK)
2848                 return 0;
2849
2850         sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
2851
2852         if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
2853                 pr_err("LBA: %llu Sectors: %u exceeds"
2854                         " transport_dev_end_lba(): %llu\n",
2855                         cmd->t_task_lba, sectors,
2856                         transport_dev_end_lba(dev));
2857                 return -EINVAL;
2858         }
2859
2860         return 0;
2861 }
2862
2863 static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev)
2864 {
2865         /*
2866          * Determine if the received WRITE_SAME is used to for direct
2867          * passthrough into Linux/SCSI with struct request via TCM/pSCSI
2868          * or we are signaling the use of internal WRITE_SAME + UNMAP=1
2869          * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code.
2870          */
2871         int passthrough = (dev->transport->transport_type ==
2872                                 TRANSPORT_PLUGIN_PHBA_PDEV);
2873
2874         if (!passthrough) {
2875                 if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
2876                         pr_err("WRITE_SAME PBDATA and LBDATA"
2877                                 " bits not supported for Block Discard"
2878                                 " Emulation\n");
2879                         return -ENOSYS;
2880                 }
2881                 /*
2882                  * Currently for the emulated case we only accept
2883                  * tpws with the UNMAP=1 bit set.
2884                  */
2885                 if (!(flags[0] & 0x08)) {
2886                         pr_err("WRITE_SAME w/o UNMAP bit not"
2887                                 " supported for Block Discard Emulation\n");
2888                         return -ENOSYS;
2889                 }
2890         }
2891
2892         return 0;
2893 }
2894
2895 /*      transport_generic_cmd_sequencer():
2896  *
2897  *      Generic Command Sequencer that should work for most DAS transport
2898  *      drivers.
2899  *
2900  *      Called from transport_generic_allocate_tasks() in the $FABRIC_MOD
2901  *      RX Thread.
2902  *
2903  *      FIXME: Need to support other SCSI OPCODES where as well.
2904  */
2905 static int transport_generic_cmd_sequencer(
2906         struct se_cmd *cmd,
2907         unsigned char *cdb)
2908 {
2909         struct se_device *dev = cmd->se_dev;
2910         struct se_subsystem_dev *su_dev = dev->se_sub_dev;
2911         int ret = 0, sector_ret = 0, passthrough;
2912         u32 sectors = 0, size = 0, pr_reg_type = 0;
2913         u16 service_action;
2914         u8 alua_ascq = 0;
2915         /*
2916          * Check for an existing UNIT ATTENTION condition
2917          */
2918         if (core_scsi3_ua_check(cmd, cdb) < 0) {
2919                 cmd->transport_wait_for_tasks =
2920                                 &transport_nop_wait_for_tasks;
2921                 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2922                 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
2923                 return -EINVAL;
2924         }
2925         /*
2926          * Check status of Asymmetric Logical Unit Assignment port
2927          */
2928         ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
2929         if (ret != 0) {
2930                 cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
2931                 /*
2932                  * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
2933                  * The ALUA additional sense code qualifier (ASCQ) is determined
2934                  * by the ALUA primary or secondary access state..
2935                  */
2936                 if (ret > 0) {
2937 #if 0
2938                         pr_debug("[%s]: ALUA TG Port not available,"
2939                                 " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
2940                                 cmd->se_tfo->get_fabric_name(), alua_ascq);
2941 #endif
2942                         transport_set_sense_codes(cmd, 0x04, alua_ascq);
2943                         cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2944                         cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
2945                         return -EINVAL;
2946                 }
2947                 goto out_invalid_cdb_field;
2948         }
2949         /*
2950          * Check status for SPC-3 Persistent Reservations
2951          */
2952         if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
2953                 if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
2954                                         cmd, cdb, pr_reg_type) != 0)
2955                         return transport_handle_reservation_conflict(cmd);
2956                 /*
2957                  * This means the CDB is allowed for the SCSI Initiator port
2958                  * when said port is *NOT* holding the legacy SPC-2 or
2959                  * SPC-3 Persistent Reservation.
2960                  */
2961         }
2962
2963         switch (cdb[0]) {
2964         case READ_6:
2965                 sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
2966                 if (sector_ret)
2967                         goto out_unsupported_cdb;
2968                 size = transport_get_size(sectors, cdb, cmd);
2969                 cmd->transport_split_cdb = &split_cdb_XX_6;
2970                 cmd->t_task_lba = transport_lba_21(cdb);
2971                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2972                 break;
2973         case READ_10:
2974                 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2975                 if (sector_ret)
2976                         goto out_unsupported_cdb;
2977                 size = transport_get_size(sectors, cdb, cmd);
2978                 cmd->transport_split_cdb = &split_cdb_XX_10;
2979                 cmd->t_task_lba = transport_lba_32(cdb);
2980                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2981                 break;
2982         case READ_12:
2983                 sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
2984                 if (sector_ret)
2985                         goto out_unsupported_cdb;
2986                 size = transport_get_size(sectors, cdb, cmd);
2987                 cmd->transport_split_cdb = &split_cdb_XX_12;
2988                 cmd->t_task_lba = transport_lba_32(cdb);
2989                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2990                 break;
2991         case READ_16:
2992                 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2993                 if (sector_ret)
2994                         goto out_unsupported_cdb;
2995                 size = transport_get_size(sectors, cdb, cmd);
2996                 cmd->transport_split_cdb = &split_cdb_XX_16;
2997                 cmd->t_task_lba = transport_lba_64(cdb);
2998                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2999                 break;
3000         case WRITE_6:
3001                 sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
3002                 if (sector_ret)
3003                         goto out_unsupported_cdb;
3004                 size = transport_get_size(sectors, cdb, cmd);
3005                 cmd->transport_split_cdb = &split_cdb_XX_6;
3006                 cmd->t_task_lba = transport_lba_21(cdb);
3007                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3008                 break;
3009         case WRITE_10:
3010                 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3011                 if (sector_ret)
3012                         goto out_unsupported_cdb;
3013                 size = transport_get_size(sectors, cdb, cmd);
3014                 cmd->transport_split_cdb = &split_cdb_XX_10;
3015                 cmd->t_task_lba = transport_lba_32(cdb);
3016                 cmd->t_tasks_fua = (cdb[1] & 0x8);
3017                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3018                 break;
3019         case WRITE_12:
3020                 sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
3021                 if (sector_ret)
3022                         goto out_unsupported_cdb;
3023                 size = transport_get_size(sectors, cdb, cmd);
3024                 cmd->transport_split_cdb = &split_cdb_XX_12;
3025                 cmd->t_task_lba = transport_lba_32(cdb);
3026                 cmd->t_tasks_fua = (cdb[1] & 0x8);
3027                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3028                 break;
3029         case WRITE_16:
3030                 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
3031                 if (sector_ret)
3032                         goto out_unsupported_cdb;
3033                 size = transport_get_size(sectors, cdb, cmd);
3034                 cmd->transport_split_cdb = &split_cdb_XX_16;
3035                 cmd->t_task_lba = transport_lba_64(cdb);
3036                 cmd->t_tasks_fua = (cdb[1] & 0x8);
3037                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3038                 break;
3039         case XDWRITEREAD_10:
3040                 if ((cmd->data_direction != DMA_TO_DEVICE) ||
3041                     !(cmd->t_tasks_bidi))
3042                         goto out_invalid_cdb_field;
3043                 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3044                 if (sector_ret)
3045                         goto out_unsupported_cdb;
3046                 size = transport_get_size(sectors, cdb, cmd);
3047                 cmd->transport_split_cdb = &split_cdb_XX_10;
3048                 cmd->t_task_lba = transport_lba_32(cdb);
3049                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3050                 passthrough = (dev->transport->transport_type ==
3051                                 TRANSPORT_PLUGIN_PHBA_PDEV);
3052                 /*
3053                  * Skip the remaining assignments for TCM/PSCSI passthrough
3054                  */
3055                 if (passthrough)
3056                         break;
3057                 /*
3058                  * Setup BIDI XOR callback to be run during transport_generic_complete_ok()
3059                  */
3060                 cmd->transport_complete_callback = &transport_xor_callback;
3061                 cmd->t_tasks_fua = (cdb[1] & 0x8);
3062                 break;
3063         case VARIABLE_LENGTH_CMD:
3064                 service_action = get_unaligned_be16(&cdb[8]);
3065                 /*
3066                  * Determine if this is TCM/PSCSI device and we should disable
3067                  * internal emulation for this CDB.
3068                  */
3069                 passthrough = (dev->transport->transport_type ==
3070                                         TRANSPORT_PLUGIN_PHBA_PDEV);
3071
3072                 switch (service_action) {
3073                 case XDWRITEREAD_32:
3074                         sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
3075                         if (sector_ret)
3076                                 goto out_unsupported_cdb;
3077                         size = transport_get_size(sectors, cdb, cmd);
3078                         /*
3079                          * Use WRITE_32 and READ_32 opcodes for the emulated
3080                          * XDWRITE_READ_32 logic.
3081                          */
3082                         cmd->transport_split_cdb = &split_cdb_XX_32;
3083                         cmd->t_task_lba = transport_lba_64_ext(cdb);
3084                         cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3085
3086                         /*
3087                          * Skip the remaining assignments for TCM/PSCSI passthrough
3088                          */
3089                         if (passthrough)
3090                                 break;
3091
3092                         /*
3093                          * Setup BIDI XOR callback to be run during
3094                          * transport_generic_complete_ok()
3095                          */
3096                         cmd->transport_complete_callback = &transport_xor_callback;
3097                         cmd->t_tasks_fua = (cdb[10] & 0x8);
3098                         break;
3099                 case WRITE_SAME_32:
3100                         sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
3101                         if (sector_ret)
3102                                 goto out_unsupported_cdb;
3103
3104                         if (sectors)
3105                                 size = transport_get_size(1, cdb, cmd);
3106                         else {
3107                                 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
3108                                        " supported\n");
3109                                 goto out_invalid_cdb_field;
3110                         }
3111
3112                         cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
3113                         cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3114
3115                         if (target_check_write_same_discard(&cdb[10], dev) < 0)
3116                                 goto out_invalid_cdb_field;
3117
3118                         break;
3119                 default:
3120                         pr_err("VARIABLE_LENGTH_CMD service action"
3121                                 " 0x%04x not supported\n", service_action);
3122                         goto out_unsupported_cdb;
3123                 }
3124                 break;
3125         case MAINTENANCE_IN:
3126                 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
3127                         /* MAINTENANCE_IN from SCC-2 */
3128                         /*
3129                          * Check for emulated MI_REPORT_TARGET_PGS.
3130                          */
3131                         if (cdb[1] == MI_REPORT_TARGET_PGS) {
3132                                 cmd->transport_emulate_cdb =
3133                                 (su_dev->t10_alua.alua_type ==
3134                                  SPC3_ALUA_EMULATED) ?
3135                                 core_emulate_report_target_port_groups :
3136                                 NULL;
3137                         }
3138                         size = (cdb[6] << 24) | (cdb[7] << 16) |
3139                                (cdb[8] << 8) | cdb[9];
3140                 } else {
3141                         /* GPCMD_SEND_KEY from multi media commands */
3142                         size = (cdb[8] << 8) + cdb[9];
3143                 }
3144                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3145                 break;
3146         case MODE_SELECT:
3147                 size = cdb[4];
3148                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3149                 break;
3150         case MODE_SELECT_10:
3151                 size = (cdb[7] << 8) + cdb[8];
3152                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3153                 break;
3154         case MODE_SENSE:
3155                 size = cdb[4];
3156                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3157                 break;
3158         case MODE_SENSE_10:
3159         case GPCMD_READ_BUFFER_CAPACITY:
3160         case GPCMD_SEND_OPC:
3161         case LOG_SELECT:
3162         case LOG_SENSE:
3163                 size = (cdb[7] << 8) + cdb[8];
3164                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3165                 break;
3166         case READ_BLOCK_LIMITS:
3167                 size = READ_BLOCK_LEN;
3168                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3169                 break;
3170         case GPCMD_GET_CONFIGURATION:
3171         case GPCMD_READ_FORMAT_CAPACITIES:
3172         case GPCMD_READ_DISC_INFO:
3173         case GPCMD_READ_TRACK_RZONE_INFO:
3174                 size = (cdb[7] << 8) + cdb[8];
3175                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3176                 break;
3177         case PERSISTENT_RESERVE_IN:
3178         case PERSISTENT_RESERVE_OUT:
3179                 cmd->transport_emulate_cdb =
3180                         (su_dev->t10_pr.res_type ==
3181                          SPC3_PERSISTENT_RESERVATIONS) ?
3182                         core_scsi3_emulate_pr : NULL;
3183                 size = (cdb[7] << 8) + cdb[8];
3184                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3185                 break;
3186         case GPCMD_MECHANISM_STATUS:
3187         case GPCMD_READ_DVD_STRUCTURE:
3188                 size = (cdb[8] << 8) + cdb[9];
3189                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3190                 break;
3191         case READ_POSITION:
3192                 size = READ_POSITION_LEN;
3193                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3194                 break;
3195         case MAINTENANCE_OUT:
3196                 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
3197                         /* MAINTENANCE_OUT from SCC-2
3198                          *
3199                          * Check for emulated MO_SET_TARGET_PGS.
3200                          */
3201                         if (cdb[1] == MO_SET_TARGET_PGS) {
3202                                 cmd->transport_emulate_cdb =
3203                                 (su_dev->t10_alua.alua_type ==
3204                                         SPC3_ALUA_EMULATED) ?
3205                                 core_emulate_set_target_port_groups :
3206                                 NULL;
3207                         }
3208
3209                         size = (cdb[6] << 24) | (cdb[7] << 16) |
3210                                (cdb[8] << 8) | cdb[9];
3211                 } else  {
3212                         /* GPCMD_REPORT_KEY from multi media commands */
3213                         size = (cdb[8] << 8) + cdb[9];
3214                 }
3215                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3216                 break;
3217         case INQUIRY:
3218                 size = (cdb[3] << 8) + cdb[4];
3219                 /*
3220                  * Do implict HEAD_OF_QUEUE processing for INQUIRY.
3221                  * See spc4r17 section 5.3
3222                  */
3223                 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3224                         cmd->sam_task_attr = MSG_HEAD_TAG;
3225                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3226                 break;
3227         case READ_BUFFER:
3228                 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
3229                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3230                 break;
3231         case READ_CAPACITY:
3232                 size = READ_CAP_LEN;
3233                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3234                 break;
3235         case READ_MEDIA_SERIAL_NUMBER:
3236         case SECURITY_PROTOCOL_IN:
3237         case SECURITY_PROTOCOL_OUT:
3238                 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
3239                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3240                 break;
3241         case SERVICE_ACTION_IN:
3242         case ACCESS_CONTROL_IN:
3243         case ACCESS_CONTROL_OUT:
3244         case EXTENDED_COPY:
3245         case READ_ATTRIBUTE:
3246         case RECEIVE_COPY_RESULTS:
3247         case WRITE_ATTRIBUTE:
3248                 size = (cdb[10] << 24) | (cdb[11] << 16) |
3249                        (cdb[12] << 8) | cdb[13];
3250                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3251                 break;
3252         case RECEIVE_DIAGNOSTIC:
3253         case SEND_DIAGNOSTIC:
3254                 size = (cdb[3] << 8) | cdb[4];
3255                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3256                 break;
3257 /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
3258 #if 0
3259         case GPCMD_READ_CD:
3260                 sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
3261                 size = (2336 * sectors);
3262                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3263                 break;
3264 #endif
3265         case READ_TOC:
3266                 size = cdb[8];
3267                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3268                 break;
3269         case REQUEST_SENSE:
3270                 size = cdb[4];
3271                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3272                 break;
3273         case READ_ELEMENT_STATUS:
3274                 size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
3275                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3276                 break;
3277         case WRITE_BUFFER:
3278                 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
3279                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3280                 break;
3281         case RESERVE:
3282         case RESERVE_10:
3283                 /*
3284                  * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
3285                  * Assume the passthrough or $FABRIC_MOD will tell us about it.
3286                  */
3287                 if (cdb[0] == RESERVE_10)
3288                         size = (cdb[7] << 8) | cdb[8];
3289                 else
3290                         size = cmd->data_length;
3291
3292                 /*
3293                  * Setup the legacy emulated handler for SPC-2 and
3294                  * >= SPC-3 compatible reservation handling (CRH=1)
3295                  * Otherwise, we assume the underlying SCSI logic is
3296                  * is running in SPC_PASSTHROUGH, and wants reservations
3297                  * emulation disabled.
3298                  */
3299                 cmd->transport_emulate_cdb =
3300                                 (su_dev->t10_pr.res_type !=
3301                                  SPC_PASSTHROUGH) ?
3302                                 core_scsi2_emulate_crh : NULL;
3303                 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3304                 break;
3305         case RELEASE:
3306         case RELEASE_10:
3307                 /*
3308                  * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
3309                  * Assume the passthrough or $FABRIC_MOD will tell us about it.
3310                 */
3311                 if (cdb[0] == RELEASE_10)
3312                         size = (cdb[7] << 8) | cdb[8];
3313                 else
3314                         size = cmd->data_length;
3315
3316                 cmd->transport_emulate_cdb =
3317                                 (su_dev->t10_pr.res_type !=
3318                                  SPC_PASSTHROUGH) ?
3319                                 core_scsi2_emulate_crh : NULL;
3320                 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3321                 break;
3322         case SYNCHRONIZE_CACHE:
3323         case 0x91: /* SYNCHRONIZE_CACHE_16: */
3324                 /*
3325                  * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
3326                  */
3327                 if (cdb[0] == SYNCHRONIZE_CACHE) {
3328                         sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3329                         cmd->t_task_lba = transport_lba_32(cdb);
3330                 } else {
3331                         sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
3332                         cmd->t_task_lba = transport_lba_64(cdb);
3333                 }
3334                 if (sector_ret)
3335                         goto out_unsupported_cdb;
3336
3337                 size = transport_get_size(sectors, cdb, cmd);
3338                 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3339
3340                 /*
3341                  * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb()
3342                  */
3343                 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
3344                         break;
3345                 /*
3346                  * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation
3347                  * for SYNCHRONIZE_CACHE* Immed=1 case in __transport_execute_tasks()
3348                  */
3349                 cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC;
3350                 /*
3351                  * Check to ensure that LBA + Range does not exceed past end of
3352                  * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
3353                  */
3354                 if ((cmd->t_task_lba != 0) || (sectors != 0)) {
3355                         if (transport_cmd_get_valid_sectors(cmd) < 0)
3356                                 goto out_invalid_cdb_field;
3357                 }
3358                 break;
3359         case UNMAP:
3360                 size = get_unaligned_be16(&cdb[7]);
3361                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3362                 break;
3363         case WRITE_SAME_16:
3364                 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
3365                 if (sector_ret)
3366                         goto out_unsupported_cdb;
3367
3368                 if (sectors)
3369                         size = transport_get_size(1, cdb, cmd);
3370                 else {
3371                         pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
3372                         goto out_invalid_cdb_field;
3373                 }
3374
3375                 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
3376                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3377
3378                 if (target_check_write_same_discard(&cdb[1], dev) < 0)
3379                         goto out_invalid_cdb_field;
3380                 break;
3381         case WRITE_SAME:
3382                 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3383                 if (sector_ret)
3384                         goto out_unsupported_cdb;
3385
3386                 if (sectors)
3387                         size = transport_get_size(1, cdb, cmd);
3388                 else {
3389                         pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
3390                         goto out_invalid_cdb_field;
3391                 }
3392
3393                 cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
3394                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3395                 /*
3396                  * Follow sbcr26 with WRITE_SAME (10) and check for the existence
3397                  * of byte 1 bit 3 UNMAP instead of original reserved field
3398                  */
3399                 if (target_check_write_same_discard(&cdb[1], dev) < 0)
3400                         goto out_invalid_cdb_field;
3401                 break;
3402         case ALLOW_MEDIUM_REMOVAL:
3403         case GPCMD_CLOSE_TRACK:
3404         case ERASE:
3405         case INITIALIZE_ELEMENT_STATUS:
3406         case GPCMD_LOAD_UNLOAD:
3407         case REZERO_UNIT:
3408         case SEEK_10:
3409         case GPCMD_SET_SPEED:
3410         case SPACE:
3411         case START_STOP:
3412         case TEST_UNIT_READY:
3413         case VERIFY:
3414         case WRITE_FILEMARKS:
3415         case MOVE_MEDIUM:
3416                 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3417                 break;
3418         case REPORT_LUNS:
3419                 cmd->transport_emulate_cdb =
3420                                 transport_core_report_lun_response;
3421                 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
3422                 /*
3423                  * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
3424                  * See spc4r17 section 5.3
3425                  */
3426                 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3427                         cmd->sam_task_attr = MSG_HEAD_TAG;
3428                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3429                 break;
3430         default:
3431                 pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
3432                         " 0x%02x, sending CHECK_CONDITION.\n",
3433                         cmd->se_tfo->get_fabric_name(), cdb[0]);
3434                 cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
3435                 goto out_unsupported_cdb;
3436         }
3437
3438         if (size != cmd->data_length) {
3439                 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
3440                         " %u does not match SCSI CDB Length: %u for SAM Opcode:"
3441                         " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
3442                                 cmd->data_length, size, cdb[0]);
3443
3444                 cmd->cmd_spdtl = size;
3445
3446                 if (cmd->data_direction == DMA_TO_DEVICE) {
3447                         pr_err("Rejecting underflow/overflow"
3448                                         " WRITE data\n");
3449                         goto out_invalid_cdb_field;
3450                 }
3451                 /*
3452                  * Reject READ_* or WRITE_* with overflow/underflow for
3453                  * type SCF_SCSI_DATA_SG_IO_CDB.
3454                  */
3455                 if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512))  {
3456                         pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
3457                                 " CDB on non 512-byte sector setup subsystem"
3458                                 " plugin: %s\n", dev->transport->name);
3459                         /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
3460                         goto out_invalid_cdb_field;
3461                 }
3462
3463                 if (size > cmd->data_length) {
3464                         cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
3465                         cmd->residual_count = (size - cmd->data_length);
3466                 } else {
3467                         cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
3468                         cmd->residual_count = (cmd->data_length - size);
3469                 }
3470                 cmd->data_length = size;
3471         }
3472
3473         /* Let's limit control cdbs to a page, for simplicity's sake. */
3474         if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
3475             size > PAGE_SIZE)
3476                 goto out_invalid_cdb_field;
3477
3478         transport_set_supported_SAM_opcode(cmd);
3479         return ret;
3480
3481 out_unsupported_cdb:
3482         cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3483         cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
3484         return -EINVAL;
3485 out_invalid_cdb_field:
3486         cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3487         cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3488         return -EINVAL;
3489 }
3490
3491 /*
3492  * Called from transport_generic_complete_ok() and
3493  * transport_generic_request_failure() to determine which dormant/delayed
3494  * and ordered cmds need to have their tasks added to the execution queue.
3495  */
3496 static void transport_complete_task_attr(struct se_cmd *cmd)
3497 {
3498         struct se_device *dev = cmd->se_dev;
3499         struct se_cmd *cmd_p, *cmd_tmp;
3500         int new_active_tasks = 0;
3501
3502         if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
3503                 atomic_dec(&dev->simple_cmds);
3504                 smp_mb__after_atomic_dec();
3505                 dev->dev_cur_ordered_id++;
3506                 pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
3507                         " SIMPLE: %u\n", dev->dev_cur_ordered_id,
3508                         cmd->se_ordered_id);
3509         } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
3510                 atomic_dec(&dev->dev_hoq_count);
3511                 smp_mb__after_atomic_dec();
3512                 dev->dev_cur_ordered_id++;
3513                 pr_debug("Incremented dev_cur_ordered_id: %u for"
3514                         " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
3515                         cmd->se_ordered_id);
3516         } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
3517                 spin_lock(&dev->ordered_cmd_lock);
3518                 list_del(&cmd->se_ordered_node);
3519                 atomic_dec(&dev->dev_ordered_sync);
3520                 smp_mb__after_atomic_dec();
3521                 spin_unlock(&dev->ordered_cmd_lock);
3522
3523                 dev->dev_cur_ordered_id++;
3524                 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
3525                         " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
3526         }
3527         /*
3528          * Process all commands up to the last received
3529          * ORDERED task attribute which requires another blocking
3530          * boundary
3531          */
3532         spin_lock(&dev->delayed_cmd_lock);
3533         list_for_each_entry_safe(cmd_p, cmd_tmp,
3534                         &dev->delayed_cmd_list, se_delayed_node) {
3535
3536                 list_del(&cmd_p->se_delayed_node);
3537                 spin_unlock(&dev->delayed_cmd_lock);
3538
3539                 pr_debug("Calling add_tasks() for"
3540                         " cmd_p: 0x%02x Task Attr: 0x%02x"
3541                         " Dormant -> Active, se_ordered_id: %u\n",
3542                         cmd_p->t_task_cdb[0],
3543                         cmd_p->sam_task_attr, cmd_p->se_ordered_id);
3544
3545                 transport_add_tasks_from_cmd(cmd_p);
3546                 new_active_tasks++;
3547
3548                 spin_lock(&dev->delayed_cmd_lock);
3549                 if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
3550                         break;
3551         }
3552         spin_unlock(&dev->delayed_cmd_lock);
3553         /*
3554          * If new tasks have become active, wake up the transport thread
3555          * to do the processing of the Active tasks.
3556          */
3557         if (new_active_tasks != 0)
3558                 wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
3559 }
3560
3561 static int transport_complete_qf(struct se_cmd *cmd)
3562 {
3563         int ret = 0;
3564
3565         if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
3566                 return cmd->se_tfo->queue_status(cmd);
3567
3568         switch (cmd->data_direction) {
3569         case DMA_FROM_DEVICE:
3570                 ret = cmd->se_tfo->queue_data_in(cmd);
3571                 break;
3572         case DMA_TO_DEVICE:
3573                 if (cmd->t_bidi_data_sg) {
3574                         ret = cmd->se_tfo->queue_data_in(cmd);
3575                         if (ret < 0)
3576                                 return ret;
3577                 }
3578                 /* Fall through for DMA_TO_DEVICE */
3579         case DMA_NONE:
3580                 ret = cmd->se_tfo->queue_status(cmd);
3581                 break;
3582         default:
3583                 break;
3584         }
3585
3586         return ret;
3587 }
3588
3589 static void transport_handle_queue_full(
3590         struct se_cmd *cmd,
3591         struct se_device *dev,
3592         int (*qf_callback)(struct se_cmd *))
3593 {
3594         spin_lock_irq(&dev->qf_cmd_lock);
3595         cmd->se_cmd_flags |= SCF_EMULATE_QUEUE_FULL;
3596         cmd->transport_qf_callback = qf_callback;
3597         list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
3598         atomic_inc(&dev->dev_qf_count);
3599         smp_mb__after_atomic_inc();
3600         spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
3601
3602         schedule_work(&cmd->se_dev->qf_work_queue);
3603 }
3604
3605 static void transport_generic_complete_ok(struct se_cmd *cmd)
3606 {
3607         int reason = 0, ret;
3608         /*
3609          * Check if we need to move delayed/dormant tasks from cmds on the
3610          * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
3611          * Attribute.
3612          */
3613         if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3614                 transport_complete_task_attr(cmd);
3615         /*
3616          * Check to schedule QUEUE_FULL work, or execute an existing
3617          * cmd->transport_qf_callback()
3618          */
3619         if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
3620                 schedule_work(&cmd->se_dev->qf_work_queue);
3621
3622         if (cmd->transport_qf_callback) {
3623                 ret = cmd->transport_qf_callback(cmd);
3624                 if (ret < 0)
3625                         goto queue_full;
3626
3627                 cmd->transport_qf_callback = NULL;
3628                 goto done;
3629         }
3630         /*
3631          * Check if we need to retrieve a sense buffer from
3632          * the struct se_cmd in question.
3633          */
3634         if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
3635                 if (transport_get_sense_data(cmd) < 0)
3636                         reason = TCM_NON_EXISTENT_LUN;
3637
3638                 /*
3639                  * Only set when an struct se_task->task_scsi_status returned
3640                  * a non GOOD status.
3641                  */
3642                 if (cmd->scsi_status) {
3643                         ret = transport_send_check_condition_and_sense(
3644                                         cmd, reason, 1);
3645                         if (ret == -EAGAIN)
3646                                 goto queue_full;
3647
3648                         transport_lun_remove_cmd(cmd);
3649                         transport_cmd_check_stop_to_fabric(cmd);
3650                         return;
3651                 }
3652         }
3653         /*
3654          * Check for a callback, used by amongst other things
3655          * XDWRITE_READ_10 emulation.
3656          */
3657         if (cmd->transport_complete_callback)
3658                 cmd->transport_complete_callback(cmd);
3659
3660         switch (cmd->data_direction) {
3661         case DMA_FROM_DEVICE:
3662                 spin_lock(&cmd->se_lun->lun_sep_lock);
3663                 if (cmd->se_lun->lun_sep) {
3664                         cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
3665                                         cmd->data_length;
3666                 }
3667                 spin_unlock(&cmd->se_lun->lun_sep_lock);
3668
3669                 ret = cmd->se_tfo->queue_data_in(cmd);
3670                 if (ret == -EAGAIN)
3671                         goto queue_full;
3672                 break;
3673         case DMA_TO_DEVICE:
3674                 spin_lock(&cmd->se_lun->lun_sep_lock);
3675                 if (cmd->se_lun->lun_sep) {
3676                         cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
3677                                 cmd->data_length;
3678                 }
3679                 spin_unlock(&cmd->se_lun->lun_sep_lock);
3680                 /*
3681                  * Check if we need to send READ payload for BIDI-COMMAND
3682                  */
3683                 if (cmd->t_bidi_data_sg) {
3684                         spin_lock(&cmd->se_lun->lun_sep_lock);
3685                         if (cmd->se_lun->lun_sep) {
3686                                 cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
3687                                         cmd->data_length;
3688                         }
3689                         spin_unlock(&cmd->se_lun->lun_sep_lock);
3690                         ret = cmd->se_tfo->queue_data_in(cmd);
3691                         if (ret == -EAGAIN)
3692                                 goto queue_full;
3693                         break;
3694                 }
3695                 /* Fall through for DMA_TO_DEVICE */
3696         case DMA_NONE:
3697                 ret = cmd->se_tfo->queue_status(cmd);
3698                 if (ret == -EAGAIN)
3699                         goto queue_full;
3700                 break;
3701         default:
3702                 break;
3703         }
3704
3705 done:
3706         transport_lun_remove_cmd(cmd);
3707         transport_cmd_check_stop_to_fabric(cmd);
3708         return;
3709
3710 queue_full:
3711         pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
3712                 " data_direction: %d\n", cmd, cmd->data_direction);
3713         transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf);
3714 }
3715
3716 static void transport_free_dev_tasks(struct se_cmd *cmd)
3717 {
3718         struct se_task *task, *task_tmp;
3719         unsigned long flags;
3720
3721         spin_lock_irqsave(&cmd->t_state_lock, flags);
3722         list_for_each_entry_safe(task, task_tmp,
3723                                 &cmd->t_task_list, t_list) {
3724                 if (atomic_read(&task->task_active))
3725                         continue;
3726
3727                 kfree(task->task_sg_bidi);
3728                 kfree(task->task_sg);
3729
3730                 list_del(&task->t_list);
3731
3732                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3733                 if (task->se_dev)
3734                         task->se_dev->transport->free_task(task);
3735                 else
3736                         pr_err("task[%u] - task->se_dev is NULL\n",
3737                                 task->task_no);
3738                 spin_lock_irqsave(&cmd->t_state_lock, flags);
3739         }
3740         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3741 }
3742
3743 static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
3744 {
3745         struct scatterlist *sg;
3746         int count;
3747
3748         for_each_sg(sgl, sg, nents, count)
3749                 __free_page(sg_page(sg));
3750
3751         kfree(sgl);
3752 }
3753
3754 static inline void transport_free_pages(struct se_cmd *cmd)
3755 {
3756         if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
3757                 return;
3758
3759         transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
3760         cmd->t_data_sg = NULL;
3761         cmd->t_data_nents = 0;
3762
3763         transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
3764         cmd->t_bidi_data_sg = NULL;
3765         cmd->t_bidi_data_nents = 0;
3766 }
3767
3768 static inline void transport_release_tasks(struct se_cmd *cmd)
3769 {
3770         transport_free_dev_tasks(cmd);
3771 }
3772
3773 static inline int transport_dec_and_check(struct se_cmd *cmd)
3774 {
3775         unsigned long flags;
3776
3777         spin_lock_irqsave(&cmd->t_state_lock, flags);
3778         if (atomic_read(&cmd->t_fe_count)) {
3779                 if (!atomic_dec_and_test(&cmd->t_fe_count)) {
3780                         spin_unlock_irqrestore(&cmd->t_state_lock,
3781                                         flags);
3782                         return 1;
3783                 }
3784         }
3785
3786         if (atomic_read(&cmd->t_se_count)) {
3787                 if (!atomic_dec_and_test(&cmd->t_se_count)) {
3788                         spin_unlock_irqrestore(&cmd->t_state_lock,
3789                                         flags);
3790                         return 1;
3791                 }
3792         }
3793         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3794
3795         return 0;
3796 }
3797
3798 static void transport_release_fe_cmd(struct se_cmd *cmd)
3799 {
3800         unsigned long flags;
3801
3802         if (transport_dec_and_check(cmd))
3803                 return;
3804
3805         spin_lock_irqsave(&cmd->t_state_lock, flags);
3806         if (!atomic_read(&cmd->transport_dev_active)) {
3807                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3808                 goto free_pages;
3809         }
3810         atomic_set(&cmd->transport_dev_active, 0);
3811         transport_all_task_dev_remove_state(cmd);
3812         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3813
3814         transport_release_tasks(cmd);
3815 free_pages:
3816         transport_free_pages(cmd);
3817         transport_free_se_cmd(cmd);
3818         cmd->se_tfo->release_cmd(cmd);
3819 }
3820
3821 static int
3822 transport_generic_remove(struct se_cmd *cmd, int session_reinstatement)
3823 {
3824         unsigned long flags;
3825
3826         if (transport_dec_and_check(cmd)) {
3827                 if (session_reinstatement) {
3828                         spin_lock_irqsave(&cmd->t_state_lock, flags);
3829                         transport_all_task_dev_remove_state(cmd);
3830                         spin_unlock_irqrestore(&cmd->t_state_lock,
3831                                         flags);
3832                 }
3833                 return 1;
3834         }
3835
3836         spin_lock_irqsave(&cmd->t_state_lock, flags);
3837         if (!atomic_read(&cmd->transport_dev_active)) {
3838                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3839                 goto free_pages;
3840         }
3841         atomic_set(&cmd->transport_dev_active, 0);
3842         transport_all_task_dev_remove_state(cmd);
3843         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3844
3845         transport_release_tasks(cmd);
3846
3847 free_pages:
3848         transport_free_pages(cmd);
3849         transport_release_cmd(cmd);
3850         return 0;
3851 }
3852
3853 /*
3854  * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
3855  * allocating in the core.
3856  * @cmd:  Associated se_cmd descriptor
3857  * @mem:  SGL style memory for TCM WRITE / READ
3858  * @sg_mem_num: Number of SGL elements
3859  * @mem_bidi_in: SGL style memory for TCM BIDI READ
3860  * @sg_mem_bidi_num: Number of BIDI READ SGL elements
3861  *
3862  * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
3863  * of parameters.
3864  */
3865 int transport_generic_map_mem_to_cmd(
3866         struct se_cmd *cmd,
3867         struct scatterlist *sgl,
3868         u32 sgl_count,
3869         struct scatterlist *sgl_bidi,
3870         u32 sgl_bidi_count)
3871 {
3872         if (!sgl || !sgl_count)
3873                 return 0;
3874
3875         if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
3876             (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
3877
3878                 cmd->t_data_sg = sgl;
3879                 cmd->t_data_nents = sgl_count;
3880
3881                 if (sgl_bidi && sgl_bidi_count) {
3882                         cmd->t_bidi_data_sg = sgl_bidi;
3883                         cmd->t_bidi_data_nents = sgl_bidi_count;
3884                 }
3885                 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
3886         }
3887
3888         return 0;
3889 }
3890 EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
3891
3892 static int transport_new_cmd_obj(struct se_cmd *cmd)
3893 {
3894         struct se_device *dev = cmd->se_dev;
3895         int set_counts = 1, rc, task_cdbs;
3896
3897         /*
3898          * Setup any BIDI READ tasks and memory from
3899          * cmd->t_mem_bidi_list so the READ struct se_tasks
3900          * are queued first for the non pSCSI passthrough case.
3901          */
3902         if (cmd->t_bidi_data_sg &&
3903             (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) {
3904                 rc = transport_allocate_tasks(cmd,
3905                                               cmd->t_task_lba,
3906                                               DMA_FROM_DEVICE,
3907                                               cmd->t_bidi_data_sg,
3908                                               cmd->t_bidi_data_nents);
3909                 if (rc <= 0) {
3910                         cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3911                         cmd->scsi_sense_reason =
3912                                 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3913                         return -EINVAL;
3914                 }
3915                 atomic_inc(&cmd->t_fe_count);
3916                 atomic_inc(&cmd->t_se_count);
3917                 set_counts = 0;
3918         }
3919         /*
3920          * Setup the tasks and memory from cmd->t_mem_list
3921          * Note for BIDI transfers this will contain the WRITE payload
3922          */
3923         task_cdbs = transport_allocate_tasks(cmd,
3924                                              cmd->t_task_lba,
3925                                              cmd->data_direction,
3926                                              cmd->t_data_sg,
3927                                              cmd->t_data_nents);
3928         if (task_cdbs <= 0) {
3929                 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3930                 cmd->scsi_sense_reason =
3931                         TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3932                 return -EINVAL;
3933         }
3934
3935         if (set_counts) {
3936                 atomic_inc(&cmd->t_fe_count);
3937                 atomic_inc(&cmd->t_se_count);
3938         }
3939
3940         cmd->t_task_list_num = task_cdbs;
3941
3942         atomic_set(&cmd->t_task_cdbs_left, task_cdbs);
3943         atomic_set(&cmd->t_task_cdbs_ex_left, task_cdbs);
3944         atomic_set(&cmd->t_task_cdbs_timeout_left, task_cdbs);
3945         return 0;
3946 }
3947
3948 void *transport_kmap_first_data_page(struct se_cmd *cmd)
3949 {
3950         struct scatterlist *sg = cmd->t_data_sg;
3951
3952         BUG_ON(!sg);
3953         /*
3954          * We need to take into account a possible offset here for fabrics like
3955          * tcm_loop who may be using a contig buffer from the SCSI midlayer for
3956          * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
3957          */
3958         return kmap(sg_page(sg)) + sg->offset;
3959 }
3960 EXPORT_SYMBOL(transport_kmap_first_data_page);
3961
3962 void transport_kunmap_first_data_page(struct se_cmd *cmd)
3963 {
3964         kunmap(sg_page(cmd->t_data_sg));
3965 }
3966 EXPORT_SYMBOL(transport_kunmap_first_data_page);
3967
3968 static int
3969 transport_generic_get_mem(struct se_cmd *cmd)
3970 {
3971         u32 length = cmd->data_length;
3972         unsigned int nents;
3973         struct page *page;
3974         int i = 0;
3975
3976         nents = DIV_ROUND_UP(length, PAGE_SIZE);
3977         cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
3978         if (!cmd->t_data_sg)
3979                 return -ENOMEM;
3980
3981         cmd->t_data_nents = nents;
3982         sg_init_table(cmd->t_data_sg, nents);
3983
3984         while (length) {
3985                 u32 page_len = min_t(u32, length, PAGE_SIZE);
3986                 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
3987                 if (!page)
3988                         goto out;
3989
3990                 sg_set_page(&cmd->t_data_sg[i], page, page_len, 0);
3991                 length -= page_len;
3992                 i++;
3993         }
3994         return 0;
3995
3996 out:
3997         while (i >= 0) {
3998                 __free_page(sg_page(&cmd->t_data_sg[i]));
3999                 i--;
4000         }
4001         kfree(cmd->t_data_sg);
4002         cmd->t_data_sg = NULL;
4003         return -ENOMEM;
4004 }
4005
4006 /* Reduce sectors if they are too long for the device */
4007 static inline sector_t transport_limit_task_sectors(
4008         struct se_device *dev,
4009         unsigned long long lba,
4010         sector_t sectors)
4011 {
4012         sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
4013
4014         if (dev->transport->get_device_type(dev) == TYPE_DISK)
4015                 if ((lba + sectors) > transport_dev_end_lba(dev))
4016                         sectors = ((transport_dev_end_lba(dev) - lba) + 1);
4017
4018         return sectors;
4019 }
4020
4021
4022 /*
4023  * This function can be used by HW target mode drivers to create a linked
4024  * scatterlist from all contiguously allocated struct se_task->task_sg[].
4025  * This is intended to be called during the completion path by TCM Core
4026  * when struct target_core_fabric_ops->check_task_sg_chaining is enabled.
4027  */
4028 void transport_do_task_sg_chain(struct se_cmd *cmd)
4029 {
4030         struct scatterlist *sg_first = NULL;
4031         struct scatterlist *sg_prev = NULL;
4032         int sg_prev_nents = 0;
4033         struct scatterlist *sg;
4034         struct se_task *task;
4035         u32 chained_nents = 0;
4036         int i;
4037
4038         BUG_ON(!cmd->se_tfo->task_sg_chaining);
4039
4040         /*
4041          * Walk the struct se_task list and setup scatterlist chains
4042          * for each contiguously allocated struct se_task->task_sg[].
4043          */
4044         list_for_each_entry(task, &cmd->t_task_list, t_list) {
4045                 if (!task->task_sg)
4046                         continue;
4047
4048                 if (!sg_first) {
4049                         sg_first = task->task_sg;
4050                         chained_nents = task->task_sg_nents;
4051                 } else {
4052                         sg_chain(sg_prev, sg_prev_nents, task->task_sg);
4053                         chained_nents += task->task_sg_nents;
4054                 }
4055                 /*
4056                  * For the padded tasks, use the extra SGL vector allocated
4057                  * in transport_allocate_data_tasks() for the sg_prev_nents
4058                  * offset into sg_chain() above..  The last task of a
4059                  * multi-task list, or a single task will not have
4060                  * task->task_sg_padded set..
4061                  */
4062                 if (task->task_padded_sg)
4063                         sg_prev_nents = (task->task_sg_nents + 1);
4064                 else
4065                         sg_prev_nents = task->task_sg_nents;
4066
4067                 sg_prev = task->task_sg;
4068         }
4069         /*
4070          * Setup the starting pointer and total t_tasks_sg_linked_no including
4071          * padding SGs for linking and to mark the end.
4072          */
4073         cmd->t_tasks_sg_chained = sg_first;
4074         cmd->t_tasks_sg_chained_no = chained_nents;
4075
4076         pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and"
4077                 " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained,
4078                 cmd->t_tasks_sg_chained_no);
4079
4080         for_each_sg(cmd->t_tasks_sg_chained, sg,
4081                         cmd->t_tasks_sg_chained_no, i) {
4082
4083                 pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n",
4084                         i, sg, sg_page(sg), sg->length, sg->offset);
4085                 if (sg_is_chain(sg))
4086                         pr_debug("SG: %p sg_is_chain=1\n", sg);
4087                 if (sg_is_last(sg))
4088                         pr_debug("SG: %p sg_is_last=1\n", sg);
4089         }
4090 }
4091 EXPORT_SYMBOL(transport_do_task_sg_chain);
4092
4093 /*
4094  * Break up cmd into chunks transport can handle
4095  */
4096 static int transport_allocate_data_tasks(
4097         struct se_cmd *cmd,
4098         unsigned long long lba,
4099         enum dma_data_direction data_direction,
4100         struct scatterlist *sgl,
4101         unsigned int sgl_nents)
4102 {
4103         unsigned char *cdb = NULL;
4104         struct se_task *task;
4105         struct se_device *dev = cmd->se_dev;
4106         unsigned long flags;
4107         int task_count, i, ret;
4108         sector_t sectors, dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
4109         u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
4110         struct scatterlist *sg;
4111         struct scatterlist *cmd_sg;
4112
4113         WARN_ON(cmd->data_length % sector_size);
4114         sectors = DIV_ROUND_UP(cmd->data_length, sector_size);
4115         task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors);
4116         
4117         cmd_sg = sgl;
4118         for (i = 0; i < task_count; i++) {
4119                 unsigned int task_size, task_sg_nents_padded;
4120                 int count;
4121
4122                 task = transport_generic_get_task(cmd, data_direction);
4123                 if (!task)
4124                         return -ENOMEM;
4125
4126                 task->task_lba = lba;
4127                 task->task_sectors = min(sectors, dev_max_sectors);
4128                 task->task_size = task->task_sectors * sector_size;
4129
4130                 cdb = dev->transport->get_cdb(task);
4131                 BUG_ON(!cdb);
4132
4133                 memcpy(cdb, cmd->t_task_cdb,
4134                        scsi_command_size(cmd->t_task_cdb));
4135
4136                 /* Update new cdb with updated lba/sectors */
4137                 cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb);
4138                 /*
4139                  * This now assumes that passed sg_ents are in PAGE_SIZE chunks
4140                  * in order to calculate the number per task SGL entries
4141                  */
4142                 task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE);
4143                 /*
4144                  * Check if the fabric module driver is requesting that all
4145                  * struct se_task->task_sg[] be chained together..  If so,
4146                  * then allocate an extra padding SG entry for linking and
4147                  * marking the end of the chained SGL for every task except
4148                  * the last one for (task_count > 1) operation, or skipping
4149                  * the extra padding for the (task_count == 1) case.
4150                  */
4151                 if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) {
4152                         task_sg_nents_padded = (task->task_sg_nents + 1);
4153                         task->task_padded_sg = 1;
4154                 } else
4155                         task_sg_nents_padded = task->task_sg_nents;
4156
4157                 task->task_sg = kmalloc(sizeof(struct scatterlist) *
4158                                         task_sg_nents_padded, GFP_KERNEL);
4159                 if (!task->task_sg) {
4160                         cmd->se_dev->transport->free_task(task);
4161                         return -ENOMEM;
4162                 }
4163
4164                 sg_init_table(task->task_sg, task_sg_nents_padded);
4165
4166                 task_size = task->task_size;
4167
4168                 /* Build new sgl, only up to task_size */
4169                 for_each_sg(task->task_sg, sg, task->task_sg_nents, count) {
4170                         if (cmd_sg->length > task_size)
4171                                 break;
4172
4173                         *sg = *cmd_sg;
4174                         task_size -= cmd_sg->length;
4175                         cmd_sg = sg_next(cmd_sg);
4176                 }
4177
4178                 lba += task->task_sectors;
4179                 sectors -= task->task_sectors;
4180
4181                 spin_lock_irqsave(&cmd->t_state_lock, flags);
4182                 list_add_tail(&task->t_list, &cmd->t_task_list);
4183                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4184         }
4185         /*
4186          * Now perform the memory map of task->task_sg[] into backend
4187          * subsystem memory..
4188          */
4189         list_for_each_entry(task, &cmd->t_task_list, t_list) {
4190                 if (atomic_read(&task->task_sent))
4191                         continue;
4192                 if (!dev->transport->map_data_SG)
4193                         continue;
4194
4195                 ret = dev->transport->map_data_SG(task);
4196                 if (ret < 0)
4197                         return 0;
4198         }
4199
4200         return task_count;
4201 }
4202
4203 static int
4204 transport_allocate_control_task(struct se_cmd *cmd)
4205 {
4206         struct se_device *dev = cmd->se_dev;
4207         unsigned char *cdb;
4208         struct se_task *task;
4209         unsigned long flags;
4210         int ret = 0;
4211
4212         task = transport_generic_get_task(cmd, cmd->data_direction);
4213         if (!task)
4214                 return -ENOMEM;
4215
4216         cdb = dev->transport->get_cdb(task);
4217         BUG_ON(!cdb);
4218         memcpy(cdb, cmd->t_task_cdb,
4219                scsi_command_size(cmd->t_task_cdb));
4220
4221         task->task_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
4222                                 GFP_KERNEL);
4223         if (!task->task_sg) {
4224                 cmd->se_dev->transport->free_task(task);
4225                 return -ENOMEM;
4226         }
4227
4228         memcpy(task->task_sg, cmd->t_data_sg,
4229                sizeof(struct scatterlist) * cmd->t_data_nents);
4230         task->task_size = cmd->data_length;
4231         task->task_sg_nents = cmd->t_data_nents;
4232
4233         spin_lock_irqsave(&cmd->t_state_lock, flags);
4234         list_add_tail(&task->t_list, &cmd->t_task_list);
4235         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4236
4237         if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) {
4238                 if (dev->transport->map_control_SG)
4239                         ret = dev->transport->map_control_SG(task);
4240         } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
4241                 if (dev->transport->cdb_none)
4242                         ret = dev->transport->cdb_none(task);
4243         } else {
4244                 pr_err("target: Unknown control cmd type!\n");
4245                 BUG();
4246         }
4247
4248         /* Success! Return number of tasks allocated */
4249         if (ret == 0)
4250                 return 1;
4251         return ret;
4252 }
4253
4254 static u32 transport_allocate_tasks(
4255         struct se_cmd *cmd,
4256         unsigned long long lba,
4257         enum dma_data_direction data_direction,
4258         struct scatterlist *sgl,
4259         unsigned int sgl_nents)
4260 {
4261         if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
4262                 if (transport_cmd_get_valid_sectors(cmd) < 0)
4263                         return -EINVAL;
4264
4265                 return transport_allocate_data_tasks(cmd, lba, data_direction,
4266                                                      sgl, sgl_nents);
4267         } else
4268                 return transport_allocate_control_task(cmd);
4269
4270 }
4271
4272
4273 /*       transport_generic_new_cmd(): Called from transport_processing_thread()
4274  *
4275  *       Allocate storage transport resources from a set of values predefined
4276  *       by transport_generic_cmd_sequencer() from the iSCSI Target RX process.
4277  *       Any non zero return here is treated as an "out of resource' op here.
4278  */
4279         /*
4280          * Generate struct se_task(s) and/or their payloads for this CDB.
4281          */
4282 int transport_generic_new_cmd(struct se_cmd *cmd)
4283 {
4284         int ret = 0;
4285
4286         /*
4287          * Determine is the TCM fabric module has already allocated physical
4288          * memory, and is directly calling transport_generic_map_mem_to_cmd()
4289          * beforehand.
4290          */
4291         if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
4292             cmd->data_length) {
4293                 ret = transport_generic_get_mem(cmd);
4294                 if (ret < 0)
4295                         return ret;
4296         }
4297         /*
4298          * Call transport_new_cmd_obj() to invoke transport_allocate_tasks() for
4299          * control or data CDB types, and perform the map to backend subsystem
4300          * code from SGL memory allocated here by transport_generic_get_mem(), or
4301          * via pre-existing SGL memory setup explictly by fabric module code with
4302          * transport_generic_map_mem_to_cmd().
4303          */
4304         ret = transport_new_cmd_obj(cmd);
4305         if (ret < 0)
4306                 return ret;
4307         /*
4308          * For WRITEs, let the fabric know its buffer is ready..
4309          * This WRITE struct se_cmd (and all of its associated struct se_task's)
4310          * will be added to the struct se_device execution queue after its WRITE
4311          * data has arrived. (ie: It gets handled by the transport processing
4312          * thread a second time)
4313          */
4314         if (cmd->data_direction == DMA_TO_DEVICE) {
4315                 transport_add_tasks_to_state_queue(cmd);
4316                 return transport_generic_write_pending(cmd);
4317         }
4318         /*
4319          * Everything else but a WRITE, add the struct se_cmd's struct se_task's
4320          * to the execution queue.
4321          */
4322         transport_execute_tasks(cmd);
4323         return 0;
4324 }
4325 EXPORT_SYMBOL(transport_generic_new_cmd);
4326
4327 /*      transport_generic_process_write():
4328  *
4329  *
4330  */
4331 void transport_generic_process_write(struct se_cmd *cmd)
4332 {
4333         transport_execute_tasks(cmd);
4334 }
4335 EXPORT_SYMBOL(transport_generic_process_write);
4336
4337 static int transport_write_pending_qf(struct se_cmd *cmd)
4338 {
4339         return cmd->se_tfo->write_pending(cmd);
4340 }
4341
4342 /*      transport_generic_write_pending():
4343  *
4344  *
4345  */
4346 static int transport_generic_write_pending(struct se_cmd *cmd)
4347 {
4348         unsigned long flags;
4349         int ret;
4350
4351         spin_lock_irqsave(&cmd->t_state_lock, flags);
4352         cmd->t_state = TRANSPORT_WRITE_PENDING;
4353         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4354
4355         if (cmd->transport_qf_callback) {
4356                 ret = cmd->transport_qf_callback(cmd);
4357                 if (ret == -EAGAIN)
4358                         goto queue_full;
4359                 else if (ret < 0)
4360                         return ret;
4361
4362                 cmd->transport_qf_callback = NULL;
4363                 return 0;
4364         }
4365
4366         /*
4367          * Clear the se_cmd for WRITE_PENDING status in order to set
4368          * cmd->t_transport_active=0 so that transport_generic_handle_data
4369          * can be called from HW target mode interrupt code.  This is safe
4370          * to be called with transport_off=1 before the cmd->se_tfo->write_pending
4371          * because the se_cmd->se_lun pointer is not being cleared.
4372          */
4373         transport_cmd_check_stop(cmd, 1, 0);
4374
4375         /*
4376          * Call the fabric write_pending function here to let the
4377          * frontend know that WRITE buffers are ready.
4378          */
4379         ret = cmd->se_tfo->write_pending(cmd);
4380         if (ret == -EAGAIN)
4381                 goto queue_full;
4382         else if (ret < 0)
4383                 return ret;
4384
4385         return PYX_TRANSPORT_WRITE_PENDING;
4386
4387 queue_full:
4388         pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
4389         cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
4390         transport_handle_queue_full(cmd, cmd->se_dev,
4391                         transport_write_pending_qf);
4392         return ret;
4393 }
4394
4395 void transport_release_cmd(struct se_cmd *cmd)
4396 {
4397         BUG_ON(!cmd->se_tfo);
4398
4399         transport_free_se_cmd(cmd);
4400         cmd->se_tfo->release_cmd(cmd);
4401 }
4402 EXPORT_SYMBOL(transport_release_cmd);
4403
4404 /*      transport_generic_free_cmd():
4405  *
4406  *      Called from processing frontend to release storage engine resources
4407  */
4408 void transport_generic_free_cmd(
4409         struct se_cmd *cmd,
4410         int wait_for_tasks,
4411         int session_reinstatement)
4412 {
4413         if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD))
4414                 transport_release_cmd(cmd);
4415         else {
4416                 core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
4417
4418                 if (cmd->se_lun) {
4419 #if 0
4420                         pr_debug("cmd: %p ITT: 0x%08x contains"
4421                                 " cmd->se_lun\n", cmd,
4422                                 cmd->se_tfo->get_task_tag(cmd));
4423 #endif
4424                         transport_lun_remove_cmd(cmd);
4425                 }
4426
4427                 if (wait_for_tasks && cmd->transport_wait_for_tasks)
4428                         cmd->transport_wait_for_tasks(cmd, 0, 0);
4429
4430                 transport_free_dev_tasks(cmd);
4431
4432                 transport_generic_remove(cmd, session_reinstatement);
4433         }
4434 }
4435 EXPORT_SYMBOL(transport_generic_free_cmd);
4436
4437 static void transport_nop_wait_for_tasks(
4438         struct se_cmd *cmd,
4439         int remove_cmd,
4440         int session_reinstatement)
4441 {
4442         return;
4443 }
4444
4445 /*      transport_lun_wait_for_tasks():
4446  *
4447  *      Called from ConfigFS context to stop the passed struct se_cmd to allow
4448  *      an struct se_lun to be successfully shutdown.
4449  */
4450 static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
4451 {
4452         unsigned long flags;
4453         int ret;
4454         /*
4455          * If the frontend has already requested this struct se_cmd to
4456          * be stopped, we can safely ignore this struct se_cmd.
4457          */
4458         spin_lock_irqsave(&cmd->t_state_lock, flags);
4459         if (atomic_read(&cmd->t_transport_stop)) {
4460                 atomic_set(&cmd->transport_lun_stop, 0);
4461                 pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop =="
4462                         " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd));
4463                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4464                 transport_cmd_check_stop(cmd, 1, 0);
4465                 return -EPERM;
4466         }
4467         atomic_set(&cmd->transport_lun_fe_stop, 1);
4468         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4469
4470         wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
4471
4472         ret = transport_stop_tasks_for_cmd(cmd);
4473
4474         pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:"
4475                         " %d\n", cmd, cmd->t_task_list_num, ret);
4476         if (!ret) {
4477                 pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
4478                                 cmd->se_tfo->get_task_tag(cmd));
4479                 wait_for_completion(&cmd->transport_lun_stop_comp);
4480                 pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
4481                                 cmd->se_tfo->get_task_tag(cmd));
4482         }
4483         transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
4484
4485         return 0;
4486 }
4487
4488 static void __transport_clear_lun_from_sessions(struct se_lun *lun)
4489 {
4490         struct se_cmd *cmd = NULL;
4491         unsigned long lun_flags, cmd_flags;
4492         /*
4493          * Do exception processing and return CHECK_CONDITION status to the
4494          * Initiator Port.
4495          */
4496         spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4497         while (!list_empty(&lun->lun_cmd_list)) {
4498                 cmd = list_first_entry(&lun->lun_cmd_list,
4499                        struct se_cmd, se_lun_node);
4500                 list_del(&cmd->se_lun_node);
4501
4502                 atomic_set(&cmd->transport_lun_active, 0);
4503                 /*
4504                  * This will notify iscsi_target_transport.c:
4505                  * transport_cmd_check_stop() that a LUN shutdown is in
4506                  * progress for the iscsi_cmd_t.
4507                  */
4508                 spin_lock(&cmd->t_state_lock);
4509                 pr_debug("SE_LUN[%d] - Setting cmd->transport"
4510                         "_lun_stop for  ITT: 0x%08x\n",
4511                         cmd->se_lun->unpacked_lun,
4512                         cmd->se_tfo->get_task_tag(cmd));
4513                 atomic_set(&cmd->transport_lun_stop, 1);
4514                 spin_unlock(&cmd->t_state_lock);
4515
4516                 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
4517
4518                 if (!cmd->se_lun) {
4519                         pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
4520                                 cmd->se_tfo->get_task_tag(cmd),
4521                                 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
4522                         BUG();
4523                 }
4524                 /*
4525                  * If the Storage engine still owns the iscsi_cmd_t, determine
4526                  * and/or stop its context.
4527                  */
4528                 pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
4529                         "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
4530                         cmd->se_tfo->get_task_tag(cmd));
4531
4532                 if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
4533                         spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4534                         continue;
4535                 }
4536
4537                 pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
4538                         "_wait_for_tasks(): SUCCESS\n",
4539                         cmd->se_lun->unpacked_lun,
4540                         cmd->se_tfo->get_task_tag(cmd));
4541
4542                 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
4543                 if (!atomic_read(&cmd->transport_dev_active)) {
4544                         spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4545                         goto check_cond;
4546                 }
4547                 atomic_set(&cmd->transport_dev_active, 0);
4548                 transport_all_task_dev_remove_state(cmd);
4549                 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4550
4551                 transport_free_dev_tasks(cmd);
4552                 /*
4553                  * The Storage engine stopped this struct se_cmd before it was
4554                  * send to the fabric frontend for delivery back to the
4555                  * Initiator Node.  Return this SCSI CDB back with an
4556                  * CHECK_CONDITION status.
4557                  */
4558 check_cond:
4559                 transport_send_check_condition_and_sense(cmd,
4560                                 TCM_NON_EXISTENT_LUN, 0);
4561                 /*
4562                  *  If the fabric frontend is waiting for this iscsi_cmd_t to
4563                  * be released, notify the waiting thread now that LU has
4564                  * finished accessing it.
4565                  */
4566                 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
4567                 if (atomic_read(&cmd->transport_lun_fe_stop)) {
4568                         pr_debug("SE_LUN[%d] - Detected FE stop for"
4569                                 " struct se_cmd: %p ITT: 0x%08x\n",
4570                                 lun->unpacked_lun,
4571                                 cmd, cmd->se_tfo->get_task_tag(cmd));
4572
4573                         spin_unlock_irqrestore(&cmd->t_state_lock,
4574                                         cmd_flags);
4575                         transport_cmd_check_stop(cmd, 1, 0);
4576                         complete(&cmd->transport_lun_fe_stop_comp);
4577                         spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4578                         continue;
4579                 }
4580                 pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
4581                         lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
4582
4583                 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4584                 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4585         }
4586         spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
4587 }
4588
4589 static int transport_clear_lun_thread(void *p)
4590 {
4591         struct se_lun *lun = (struct se_lun *)p;
4592
4593         __transport_clear_lun_from_sessions(lun);
4594         complete(&lun->lun_shutdown_comp);
4595
4596         return 0;
4597 }
4598
4599 int transport_clear_lun_from_sessions(struct se_lun *lun)
4600 {
4601         struct task_struct *kt;
4602
4603         kt = kthread_run(transport_clear_lun_thread, lun,
4604                         "tcm_cl_%u", lun->unpacked_lun);
4605         if (IS_ERR(kt)) {
4606                 pr_err("Unable to start clear_lun thread\n");
4607                 return PTR_ERR(kt);
4608         }
4609         wait_for_completion(&lun->lun_shutdown_comp);
4610
4611         return 0;
4612 }
4613
4614 /*      transport_generic_wait_for_tasks():
4615  *
4616  *      Called from frontend or passthrough context to wait for storage engine
4617  *      to pause and/or release frontend generated struct se_cmd.
4618  */
4619 static void transport_generic_wait_for_tasks(
4620         struct se_cmd *cmd,
4621         int remove_cmd,
4622         int session_reinstatement)
4623 {
4624         unsigned long flags;
4625
4626         if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req))
4627                 return;
4628
4629         spin_lock_irqsave(&cmd->t_state_lock, flags);
4630         /*
4631          * If we are already stopped due to an external event (ie: LUN shutdown)
4632          * sleep until the connection can have the passed struct se_cmd back.
4633          * The cmd->transport_lun_stopped_sem will be upped by
4634          * transport_clear_lun_from_sessions() once the ConfigFS context caller
4635          * has completed its operation on the struct se_cmd.
4636          */
4637         if (atomic_read(&cmd->transport_lun_stop)) {
4638
4639                 pr_debug("wait_for_tasks: Stopping"
4640                         " wait_for_completion(&cmd->t_tasktransport_lun_fe"
4641                         "_stop_comp); for ITT: 0x%08x\n",
4642                         cmd->se_tfo->get_task_tag(cmd));
4643                 /*
4644                  * There is a special case for WRITES where a FE exception +
4645                  * LUN shutdown means ConfigFS context is still sleeping on
4646                  * transport_lun_stop_comp in transport_lun_wait_for_tasks().
4647                  * We go ahead and up transport_lun_stop_comp just to be sure
4648                  * here.
4649                  */
4650                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4651                 complete(&cmd->transport_lun_stop_comp);
4652                 wait_for_completion(&cmd->transport_lun_fe_stop_comp);
4653                 spin_lock_irqsave(&cmd->t_state_lock, flags);
4654
4655                 transport_all_task_dev_remove_state(cmd);
4656                 /*
4657                  * At this point, the frontend who was the originator of this
4658                  * struct se_cmd, now owns the structure and can be released through
4659                  * normal means below.
4660                  */
4661                 pr_debug("wait_for_tasks: Stopped"
4662                         " wait_for_completion(&cmd->t_tasktransport_lun_fe_"
4663                         "stop_comp); for ITT: 0x%08x\n",
4664                         cmd->se_tfo->get_task_tag(cmd));
4665
4666                 atomic_set(&cmd->transport_lun_stop, 0);
4667         }
4668         if (!atomic_read(&cmd->t_transport_active) ||
4669              atomic_read(&cmd->t_transport_aborted))
4670                 goto remove;
4671
4672         atomic_set(&cmd->t_transport_stop, 1);
4673
4674         pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
4675                 " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop"
4676                 " = TRUE\n", cmd, cmd->se_tfo->get_task_tag(cmd),
4677                 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
4678                 cmd->deferred_t_state);
4679
4680         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4681
4682         wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
4683
4684         wait_for_completion(&cmd->t_transport_stop_comp);
4685
4686         spin_lock_irqsave(&cmd->t_state_lock, flags);
4687         atomic_set(&cmd->t_transport_active, 0);
4688         atomic_set(&cmd->t_transport_stop, 0);
4689
4690         pr_debug("wait_for_tasks: Stopped wait_for_compltion("
4691                 "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
4692                 cmd->se_tfo->get_task_tag(cmd));
4693 remove:
4694         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4695         if (!remove_cmd)
4696                 return;
4697
4698         transport_generic_free_cmd(cmd, 0, session_reinstatement);
4699 }
4700
4701 static int transport_get_sense_codes(
4702         struct se_cmd *cmd,
4703         u8 *asc,
4704         u8 *ascq)
4705 {
4706         *asc = cmd->scsi_asc;
4707         *ascq = cmd->scsi_ascq;
4708
4709         return 0;
4710 }
4711
4712 static int transport_set_sense_codes(
4713         struct se_cmd *cmd,
4714         u8 asc,
4715         u8 ascq)
4716 {
4717         cmd->scsi_asc = asc;
4718         cmd->scsi_ascq = ascq;
4719
4720         return 0;
4721 }
4722
4723 int transport_send_check_condition_and_sense(
4724         struct se_cmd *cmd,
4725         u8 reason,
4726         int from_transport)
4727 {
4728         unsigned char *buffer = cmd->sense_buffer;
4729         unsigned long flags;
4730         int offset;
4731         u8 asc = 0, ascq = 0;
4732
4733         spin_lock_irqsave(&cmd->t_state_lock, flags);
4734         if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
4735                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4736                 return 0;
4737         }
4738         cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
4739         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4740
4741         if (!reason && from_transport)
4742                 goto after_reason;
4743
4744         if (!from_transport)
4745                 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
4746         /*
4747          * Data Segment and SenseLength of the fabric response PDU.
4748          *
4749          * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
4750          * from include/scsi/scsi_cmnd.h
4751          */
4752         offset = cmd->se_tfo->set_fabric_sense_len(cmd,
4753                                 TRANSPORT_SENSE_BUFFER);
4754         /*
4755          * Actual SENSE DATA, see SPC-3 7.23.2  SPC_SENSE_KEY_OFFSET uses
4756          * SENSE KEY values from include/scsi/scsi.h
4757          */
4758         switch (reason) {
4759         case TCM_NON_EXISTENT_LUN:
4760                 /* CURRENT ERROR */
4761                 buffer[offset] = 0x70;
4762                 /* ILLEGAL REQUEST */
4763                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4764                 /* LOGICAL UNIT NOT SUPPORTED */
4765                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25;
4766                 break;
4767         case TCM_UNSUPPORTED_SCSI_OPCODE:
4768         case TCM_SECTOR_COUNT_TOO_MANY:
4769                 /* CURRENT ERROR */
4770                 buffer[offset] = 0x70;
4771                 /* ILLEGAL REQUEST */
4772                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4773                 /* INVALID COMMAND OPERATION CODE */
4774                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
4775                 break;
4776         case TCM_UNKNOWN_MODE_PAGE:
4777                 /* CURRENT ERROR */
4778                 buffer[offset] = 0x70;
4779                 /* ILLEGAL REQUEST */
4780                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4781                 /* INVALID FIELD IN CDB */
4782                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
4783                 break;
4784         case TCM_CHECK_CONDITION_ABORT_CMD:
4785                 /* CURRENT ERROR */
4786                 buffer[offset] = 0x70;
4787                 /* ABORTED COMMAND */
4788                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4789                 /* BUS DEVICE RESET FUNCTION OCCURRED */
4790                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
4791                 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
4792                 break;
4793         case TCM_INCORRECT_AMOUNT_OF_DATA:
4794                 /* CURRENT ERROR */
4795                 buffer[offset] = 0x70;
4796                 /* ABORTED COMMAND */
4797                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4798                 /* WRITE ERROR */
4799                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
4800                 /* NOT ENOUGH UNSOLICITED DATA */
4801                 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
4802                 break;
4803         case TCM_INVALID_CDB_FIELD:
4804                 /* CURRENT ERROR */
4805                 buffer[offset] = 0x70;
4806                 /* ABORTED COMMAND */
4807                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4808                 /* INVALID FIELD IN CDB */
4809                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
4810                 break;
4811         case TCM_INVALID_PARAMETER_LIST:
4812                 /* CURRENT ERROR */
4813                 buffer[offset] = 0x70;
4814                 /* ABORTED COMMAND */
4815                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4816                 /* INVALID FIELD IN PARAMETER LIST */
4817                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
4818                 break;
4819         case TCM_UNEXPECTED_UNSOLICITED_DATA:
4820                 /* CURRENT ERROR */
4821                 buffer[offset] = 0x70;
4822                 /* ABORTED COMMAND */
4823                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4824                 /* WRITE ERROR */
4825                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
4826                 /* UNEXPECTED_UNSOLICITED_DATA */
4827                 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
4828                 break;
4829         case TCM_SERVICE_CRC_ERROR:
4830                 /* CURRENT ERROR */
4831                 buffer[offset] = 0x70;
4832                 /* ABORTED COMMAND */
4833                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4834                 /* PROTOCOL SERVICE CRC ERROR */
4835                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
4836                 /* N/A */
4837                 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
4838                 break;
4839         case TCM_SNACK_REJECTED:
4840                 /* CURRENT ERROR */
4841                 buffer[offset] = 0x70;
4842                 /* ABORTED COMMAND */
4843                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4844                 /* READ ERROR */
4845                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
4846                 /* FAILED RETRANSMISSION REQUEST */
4847                 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
4848                 break;
4849         case TCM_WRITE_PROTECTED:
4850                 /* CURRENT ERROR */
4851                 buffer[offset] = 0x70;
4852                 /* DATA PROTECT */
4853                 buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
4854                 /* WRITE PROTECTED */
4855                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
4856                 break;
4857         case TCM_CHECK_CONDITION_UNIT_ATTENTION:
4858                 /* CURRENT ERROR */
4859                 buffer[offset] = 0x70;
4860                 /* UNIT ATTENTION */
4861                 buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
4862                 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
4863                 buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
4864                 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
4865                 break;
4866         case TCM_CHECK_CONDITION_NOT_READY:
4867                 /* CURRENT ERROR */
4868                 buffer[offset] = 0x70;
4869                 /* Not Ready */
4870                 buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
4871                 transport_get_sense_codes(cmd, &asc, &ascq);
4872                 buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
4873                 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
4874                 break;
4875         case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
4876         default:
4877                 /* CURRENT ERROR */
4878                 buffer[offset] = 0x70;
4879                 /* ILLEGAL REQUEST */
4880                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4881                 /* LOGICAL UNIT COMMUNICATION FAILURE */
4882                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
4883                 break;
4884         }
4885         /*
4886          * This code uses linux/include/scsi/scsi.h SAM status codes!
4887          */
4888         cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
4889         /*
4890          * Automatically padded, this value is encoded in the fabric's
4891          * data_length response PDU containing the SCSI defined sense data.
4892          */
4893         cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER + offset;
4894
4895 after_reason:
4896         return cmd->se_tfo->queue_status(cmd);
4897 }
4898 EXPORT_SYMBOL(transport_send_check_condition_and_sense);
4899
4900 int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
4901 {
4902         int ret = 0;
4903
4904         if (atomic_read(&cmd->t_transport_aborted) != 0) {
4905                 if (!send_status ||
4906                      (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
4907                         return 1;
4908 #if 0
4909                 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
4910                         " status for CDB: 0x%02x ITT: 0x%08x\n",
4911                         cmd->t_task_cdb[0],
4912                         cmd->se_tfo->get_task_tag(cmd));
4913 #endif
4914                 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
4915                 cmd->se_tfo->queue_status(cmd);
4916                 ret = 1;
4917         }
4918         return ret;
4919 }
4920 EXPORT_SYMBOL(transport_check_aborted_status);
4921
4922 void transport_send_task_abort(struct se_cmd *cmd)
4923 {
4924         /*
4925          * If there are still expected incoming fabric WRITEs, we wait
4926          * until until they have completed before sending a TASK_ABORTED
4927          * response.  This response with TASK_ABORTED status will be
4928          * queued back to fabric module by transport_check_aborted_status().
4929          */
4930         if (cmd->data_direction == DMA_TO_DEVICE) {
4931                 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
4932                         atomic_inc(&cmd->t_transport_aborted);
4933                         smp_mb__after_atomic_inc();
4934                         cmd->scsi_status = SAM_STAT_TASK_ABORTED;
4935                         transport_new_cmd_failure(cmd);
4936                         return;
4937                 }
4938         }
4939         cmd->scsi_status = SAM_STAT_TASK_ABORTED;
4940 #if 0
4941         pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
4942                 " ITT: 0x%08x\n", cmd->t_task_cdb[0],
4943                 cmd->se_tfo->get_task_tag(cmd));
4944 #endif
4945         cmd->se_tfo->queue_status(cmd);
4946 }
4947
4948 /*      transport_generic_do_tmr():
4949  *
4950  *
4951  */
4952 int transport_generic_do_tmr(struct se_cmd *cmd)
4953 {
4954         struct se_device *dev = cmd->se_dev;
4955         struct se_tmr_req *tmr = cmd->se_tmr_req;
4956         int ret;
4957
4958         switch (tmr->function) {
4959         case TMR_ABORT_TASK:
4960                 tmr->response = TMR_FUNCTION_REJECTED;
4961                 break;
4962         case TMR_ABORT_TASK_SET:
4963         case TMR_CLEAR_ACA:
4964         case TMR_CLEAR_TASK_SET:
4965                 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
4966                 break;
4967         case TMR_LUN_RESET:
4968                 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
4969                 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
4970                                          TMR_FUNCTION_REJECTED;
4971                 break;
4972         case TMR_TARGET_WARM_RESET:
4973                 tmr->response = TMR_FUNCTION_REJECTED;
4974                 break;
4975         case TMR_TARGET_COLD_RESET:
4976                 tmr->response = TMR_FUNCTION_REJECTED;
4977                 break;
4978         default:
4979                 pr_err("Uknown TMR function: 0x%02x.\n",
4980                                 tmr->function);
4981                 tmr->response = TMR_FUNCTION_REJECTED;
4982                 break;
4983         }
4984
4985         cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
4986         cmd->se_tfo->queue_tm_rsp(cmd);
4987
4988         transport_cmd_check_stop(cmd, 2, 0);
4989         return 0;
4990 }
4991
4992 /*
4993  *      Called with spin_lock_irq(&dev->execute_task_lock); held
4994  *
4995  */
4996 static struct se_task *
4997 transport_get_task_from_state_list(struct se_device *dev)
4998 {
4999         struct se_task *task;
5000
5001         if (list_empty(&dev->state_task_list))
5002                 return NULL;
5003
5004         list_for_each_entry(task, &dev->state_task_list, t_state_list)
5005                 break;
5006
5007         list_del(&task->t_state_list);
5008         atomic_set(&task->task_state_active, 0);
5009
5010         return task;
5011 }
5012
5013 static void transport_processing_shutdown(struct se_device *dev)
5014 {
5015         struct se_cmd *cmd;
5016         struct se_task *task;
5017         unsigned long flags;
5018         /*
5019          * Empty the struct se_device's struct se_task state list.
5020          */
5021         spin_lock_irqsave(&dev->execute_task_lock, flags);
5022         while ((task = transport_get_task_from_state_list(dev))) {
5023                 if (!task->task_se_cmd) {
5024                         pr_err("task->task_se_cmd is NULL!\n");
5025                         continue;
5026                 }
5027                 cmd = task->task_se_cmd;
5028
5029                 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
5030
5031                 spin_lock_irqsave(&cmd->t_state_lock, flags);
5032
5033                 pr_debug("PT: cmd: %p task: %p ITT: 0x%08x,"
5034                         " i_state: %d, t_state/def_t_state:"
5035                         " %d/%d cdb: 0x%02x\n", cmd, task,
5036                         cmd->se_tfo->get_task_tag(cmd),
5037                         cmd->se_tfo->get_cmd_state(cmd),
5038                         cmd->t_state, cmd->deferred_t_state,
5039                         cmd->t_task_cdb[0]);
5040                 pr_debug("PT: ITT[0x%08x] - t_tasks: %d t_task_cdbs_left:"
5041                         " %d t_task_cdbs_sent: %d -- t_transport_active: %d"
5042                         " t_transport_stop: %d t_transport_sent: %d\n",
5043                         cmd->se_tfo->get_task_tag(cmd),
5044                         cmd->t_task_list_num,
5045                         atomic_read(&cmd->t_task_cdbs_left),
5046                         atomic_read(&cmd->t_task_cdbs_sent),
5047                         atomic_read(&cmd->t_transport_active),
5048                         atomic_read(&cmd->t_transport_stop),
5049                         atomic_read(&cmd->t_transport_sent));
5050
5051                 if (atomic_read(&task->task_active)) {
5052                         atomic_set(&task->task_stop, 1);
5053                         spin_unlock_irqrestore(
5054                                 &cmd->t_state_lock, flags);
5055
5056                         pr_debug("Waiting for task: %p to shutdown for dev:"
5057                                 " %p\n", task, dev);
5058                         wait_for_completion(&task->task_stop_comp);
5059                         pr_debug("Completed task: %p shutdown for dev: %p\n",
5060                                 task, dev);
5061
5062                         spin_lock_irqsave(&cmd->t_state_lock, flags);
5063                         atomic_dec(&cmd->t_task_cdbs_left);
5064
5065                         atomic_set(&task->task_active, 0);
5066                         atomic_set(&task->task_stop, 0);
5067                 } else {
5068                         if (atomic_read(&task->task_execute_queue) != 0)
5069                                 transport_remove_task_from_execute_queue(task, dev);
5070                 }
5071                 __transport_stop_task_timer(task, &flags);
5072
5073                 if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) {
5074                         spin_unlock_irqrestore(
5075                                         &cmd->t_state_lock, flags);
5076
5077                         pr_debug("Skipping task: %p, dev: %p for"
5078                                 " t_task_cdbs_ex_left: %d\n", task, dev,
5079                                 atomic_read(&cmd->t_task_cdbs_ex_left));
5080
5081                         spin_lock_irqsave(&dev->execute_task_lock, flags);
5082                         continue;
5083                 }
5084
5085                 if (atomic_read(&cmd->t_transport_active)) {
5086                         pr_debug("got t_transport_active = 1 for task: %p, dev:"
5087                                         " %p\n", task, dev);
5088
5089                         if (atomic_read(&cmd->t_fe_count)) {
5090                                 spin_unlock_irqrestore(
5091                                         &cmd->t_state_lock, flags);
5092                                 transport_send_check_condition_and_sense(
5093                                         cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE,
5094                                         0);
5095                                 transport_remove_cmd_from_queue(cmd,
5096                                         &cmd->se_dev->dev_queue_obj);
5097
5098                                 transport_lun_remove_cmd(cmd);
5099                                 transport_cmd_check_stop(cmd, 1, 0);
5100                         } else {
5101                                 spin_unlock_irqrestore(
5102                                         &cmd->t_state_lock, flags);
5103
5104                                 transport_remove_cmd_from_queue(cmd,
5105                                         &cmd->se_dev->dev_queue_obj);
5106
5107                                 transport_lun_remove_cmd(cmd);
5108
5109                                 if (transport_cmd_check_stop(cmd, 1, 0))
5110                                         transport_generic_remove(cmd, 0);
5111                         }
5112
5113                         spin_lock_irqsave(&dev->execute_task_lock, flags);
5114                         continue;
5115                 }
5116                 pr_debug("Got t_transport_active = 0 for task: %p, dev: %p\n",
5117                                 task, dev);
5118
5119                 if (atomic_read(&cmd->t_fe_count)) {
5120                         spin_unlock_irqrestore(
5121                                 &cmd->t_state_lock, flags);
5122                         transport_send_check_condition_and_sense(cmd,
5123                                 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
5124                         transport_remove_cmd_from_queue(cmd,
5125                                 &cmd->se_dev->dev_queue_obj);
5126
5127                         transport_lun_remove_cmd(cmd);
5128                         transport_cmd_check_stop(cmd, 1, 0);
5129                 } else {
5130                         spin_unlock_irqrestore(
5131                                 &cmd->t_state_lock, flags);
5132
5133                         transport_remove_cmd_from_queue(cmd,
5134                                 &cmd->se_dev->dev_queue_obj);
5135                         transport_lun_remove_cmd(cmd);
5136
5137                         if (transport_cmd_check_stop(cmd, 1, 0))
5138                                 transport_generic_remove(cmd, 0);
5139                 }
5140
5141                 spin_lock_irqsave(&dev->execute_task_lock, flags);
5142         }
5143         spin_unlock_irqrestore(&dev->execute_task_lock, flags);
5144         /*
5145          * Empty the struct se_device's struct se_cmd list.
5146          */
5147         while ((cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj))) {
5148
5149                 pr_debug("From Device Queue: cmd: %p t_state: %d\n",
5150                                 cmd, cmd->t_state);
5151
5152                 if (atomic_read(&cmd->t_fe_count)) {
5153                         transport_send_check_condition_and_sense(cmd,
5154                                 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
5155
5156                         transport_lun_remove_cmd(cmd);
5157                         transport_cmd_check_stop(cmd, 1, 0);
5158                 } else {
5159                         transport_lun_remove_cmd(cmd);
5160                         if (transport_cmd_check_stop(cmd, 1, 0))
5161                                 transport_generic_remove(cmd, 0);
5162                 }
5163         }
5164 }
5165
5166 /*      transport_processing_thread():
5167  *
5168  *
5169  */
5170 static int transport_processing_thread(void *param)
5171 {
5172         int ret;
5173         struct se_cmd *cmd;
5174         struct se_device *dev = (struct se_device *) param;
5175
5176         set_user_nice(current, -20);
5177
5178         while (!kthread_should_stop()) {
5179                 ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
5180                                 atomic_read(&dev->dev_queue_obj.queue_cnt) ||
5181                                 kthread_should_stop());
5182                 if (ret < 0)
5183                         goto out;
5184
5185                 spin_lock_irq(&dev->dev_status_lock);
5186                 if (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) {
5187                         spin_unlock_irq(&dev->dev_status_lock);
5188                         transport_processing_shutdown(dev);
5189                         continue;
5190                 }
5191                 spin_unlock_irq(&dev->dev_status_lock);
5192
5193 get_cmd:
5194                 __transport_execute_tasks(dev);
5195
5196                 cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
5197                 if (!cmd)
5198                         continue;
5199
5200                 switch (cmd->t_state) {
5201                 case TRANSPORT_NEW_CMD_MAP:
5202                         if (!cmd->se_tfo->new_cmd_map) {
5203                                 pr_err("cmd->se_tfo->new_cmd_map is"
5204                                         " NULL for TRANSPORT_NEW_CMD_MAP\n");
5205                                 BUG();
5206                         }
5207                         ret = cmd->se_tfo->new_cmd_map(cmd);
5208                         if (ret < 0) {
5209                                 cmd->transport_error_status = ret;
5210                                 transport_generic_request_failure(cmd, NULL,
5211                                                 0, (cmd->data_direction !=
5212                                                     DMA_TO_DEVICE));
5213                                 break;
5214                         }
5215                         /* Fall through */
5216                 case TRANSPORT_NEW_CMD:
5217                         ret = transport_generic_new_cmd(cmd);
5218                         if (ret == -EAGAIN)
5219                                 break;
5220                         else if (ret < 0) {
5221                                 cmd->transport_error_status = ret;
5222                                 transport_generic_request_failure(cmd, NULL,
5223                                         0, (cmd->data_direction !=
5224                                          DMA_TO_DEVICE));
5225                         }
5226                         break;
5227                 case TRANSPORT_PROCESS_WRITE:
5228                         transport_generic_process_write(cmd);
5229                         break;
5230                 case TRANSPORT_COMPLETE_OK:
5231                         transport_stop_all_task_timers(cmd);
5232                         transport_generic_complete_ok(cmd);
5233                         break;
5234                 case TRANSPORT_REMOVE:
5235                         transport_generic_remove(cmd, 0);
5236                         break;
5237                 case TRANSPORT_FREE_CMD_INTR:
5238                         transport_generic_free_cmd(cmd, 0, 0);
5239                         break;
5240                 case TRANSPORT_PROCESS_TMR:
5241                         transport_generic_do_tmr(cmd);
5242                         break;
5243                 case TRANSPORT_COMPLETE_FAILURE:
5244                         transport_generic_request_failure(cmd, NULL, 1, 1);
5245                         break;
5246                 case TRANSPORT_COMPLETE_TIMEOUT:
5247                         transport_stop_all_task_timers(cmd);
5248                         transport_generic_request_timeout(cmd);
5249                         break;
5250                 case TRANSPORT_COMPLETE_QF_WP:
5251                         transport_generic_write_pending(cmd);
5252                         break;
5253                 default:
5254                         pr_err("Unknown t_state: %d deferred_t_state:"
5255                                 " %d for ITT: 0x%08x i_state: %d on SE LUN:"
5256                                 " %u\n", cmd->t_state, cmd->deferred_t_state,
5257                                 cmd->se_tfo->get_task_tag(cmd),
5258                                 cmd->se_tfo->get_cmd_state(cmd),
5259                                 cmd->se_lun->unpacked_lun);
5260                         BUG();
5261                 }
5262
5263                 goto get_cmd;
5264         }
5265
5266 out:
5267         transport_release_all_cmds(dev);
5268         dev->process_thread = NULL;
5269         return 0;
5270 }