Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / drivers / target / iscsi / iscsi_target_util.c
1 /*******************************************************************************
2  * This file contains the iSCSI Target specific utility functions.
3  *
4  * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5  *
6  * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7  *
8  * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  ******************************************************************************/
20
21 #include <linux/list.h>
22 #include <scsi/scsi_tcq.h>
23 #include <scsi/iscsi_proto.h>
24 #include <target/target_core_base.h>
25 #include <target/target_core_transport.h>
26 #include <target/target_core_tmr.h>
27 #include <target/target_core_fabric_ops.h>
28 #include <target/target_core_configfs.h>
29
30 #include "iscsi_target_core.h"
31 #include "iscsi_target_parameters.h"
32 #include "iscsi_target_seq_pdu_list.h"
33 #include "iscsi_target_datain_values.h"
34 #include "iscsi_target_erl0.h"
35 #include "iscsi_target_erl1.h"
36 #include "iscsi_target_erl2.h"
37 #include "iscsi_target_tpg.h"
38 #include "iscsi_target_tq.h"
39 #include "iscsi_target_util.h"
40 #include "iscsi_target.h"
41
42 #define PRINT_BUFF(buff, len)                                   \
43 {                                                               \
44         int zzz;                                                \
45                                                                 \
46         pr_debug("%d:\n", __LINE__);                            \
47         for (zzz = 0; zzz < len; zzz++) {                       \
48                 if (zzz % 16 == 0) {                            \
49                         if (zzz)                                \
50                                 pr_debug("\n");                 \
51                         pr_debug("%4i: ", zzz);                 \
52                 }                                               \
53                 pr_debug("%02x ", (unsigned char) (buff)[zzz]); \
54         }                                                       \
55         if ((len + 1) % 16)                                     \
56                 pr_debug("\n");                                 \
57 }
58
59 extern struct list_head g_tiqn_list;
60 extern spinlock_t tiqn_lock;
61
62 /*
63  *      Called with cmd->r2t_lock held.
64  */
65 int iscsit_add_r2t_to_list(
66         struct iscsi_cmd *cmd,
67         u32 offset,
68         u32 xfer_len,
69         int recovery,
70         u32 r2t_sn)
71 {
72         struct iscsi_r2t *r2t;
73
74         r2t = kmem_cache_zalloc(lio_r2t_cache, GFP_ATOMIC);
75         if (!r2t) {
76                 pr_err("Unable to allocate memory for struct iscsi_r2t.\n");
77                 return -1;
78         }
79         INIT_LIST_HEAD(&r2t->r2t_list);
80
81         r2t->recovery_r2t = recovery;
82         r2t->r2t_sn = (!r2t_sn) ? cmd->r2t_sn++ : r2t_sn;
83         r2t->offset = offset;
84         r2t->xfer_len = xfer_len;
85         list_add_tail(&r2t->r2t_list, &cmd->cmd_r2t_list);
86         spin_unlock_bh(&cmd->r2t_lock);
87
88         iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T);
89
90         spin_lock_bh(&cmd->r2t_lock);
91         return 0;
92 }
93
94 struct iscsi_r2t *iscsit_get_r2t_for_eos(
95         struct iscsi_cmd *cmd,
96         u32 offset,
97         u32 length)
98 {
99         struct iscsi_r2t *r2t;
100
101         spin_lock_bh(&cmd->r2t_lock);
102         list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
103                 if ((r2t->offset <= offset) &&
104                     (r2t->offset + r2t->xfer_len) >= (offset + length)) {
105                         spin_unlock_bh(&cmd->r2t_lock);
106                         return r2t;
107                 }
108         }
109         spin_unlock_bh(&cmd->r2t_lock);
110
111         pr_err("Unable to locate R2T for Offset: %u, Length:"
112                         " %u\n", offset, length);
113         return NULL;
114 }
115
116 struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *cmd)
117 {
118         struct iscsi_r2t *r2t;
119
120         spin_lock_bh(&cmd->r2t_lock);
121         list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
122                 if (!r2t->sent_r2t) {
123                         spin_unlock_bh(&cmd->r2t_lock);
124                         return r2t;
125                 }
126         }
127         spin_unlock_bh(&cmd->r2t_lock);
128
129         pr_err("Unable to locate next R2T to send for ITT:"
130                         " 0x%08x.\n", cmd->init_task_tag);
131         return NULL;
132 }
133
134 /*
135  *      Called with cmd->r2t_lock held.
136  */
137 void iscsit_free_r2t(struct iscsi_r2t *r2t, struct iscsi_cmd *cmd)
138 {
139         list_del(&r2t->r2t_list);
140         kmem_cache_free(lio_r2t_cache, r2t);
141 }
142
143 void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd)
144 {
145         struct iscsi_r2t *r2t, *r2t_tmp;
146
147         spin_lock_bh(&cmd->r2t_lock);
148         list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list)
149                 iscsit_free_r2t(r2t, cmd);
150         spin_unlock_bh(&cmd->r2t_lock);
151 }
152
153 /*
154  * May be called from software interrupt (timer) context for allocating
155  * iSCSI NopINs.
156  */
157 struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
158 {
159         struct iscsi_cmd *cmd;
160
161         cmd = kmem_cache_zalloc(lio_cmd_cache, gfp_mask);
162         if (!cmd) {
163                 pr_err("Unable to allocate memory for struct iscsi_cmd.\n");
164                 return NULL;
165         }
166
167         cmd->conn       = conn;
168         INIT_LIST_HEAD(&cmd->i_list);
169         INIT_LIST_HEAD(&cmd->datain_list);
170         INIT_LIST_HEAD(&cmd->cmd_r2t_list);
171         init_completion(&cmd->reject_comp);
172         spin_lock_init(&cmd->datain_lock);
173         spin_lock_init(&cmd->dataout_timeout_lock);
174         spin_lock_init(&cmd->istate_lock);
175         spin_lock_init(&cmd->error_lock);
176         spin_lock_init(&cmd->r2t_lock);
177
178         return cmd;
179 }
180
181 /*
182  * Called from iscsi_handle_scsi_cmd()
183  */
184 struct iscsi_cmd *iscsit_allocate_se_cmd(
185         struct iscsi_conn *conn,
186         u32 data_length,
187         int data_direction,
188         int iscsi_task_attr)
189 {
190         struct iscsi_cmd *cmd;
191         struct se_cmd *se_cmd;
192         int sam_task_attr;
193
194         cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
195         if (!cmd)
196                 return NULL;
197
198         cmd->data_direction = data_direction;
199         cmd->data_length = data_length;
200         /*
201          * Figure out the SAM Task Attribute for the incoming SCSI CDB
202          */
203         if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
204             (iscsi_task_attr == ISCSI_ATTR_SIMPLE))
205                 sam_task_attr = MSG_SIMPLE_TAG;
206         else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
207                 sam_task_attr = MSG_ORDERED_TAG;
208         else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
209                 sam_task_attr = MSG_HEAD_TAG;
210         else if (iscsi_task_attr == ISCSI_ATTR_ACA)
211                 sam_task_attr = MSG_ACA_TAG;
212         else {
213                 pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
214                         " MSG_SIMPLE_TAG\n", iscsi_task_attr);
215                 sam_task_attr = MSG_SIMPLE_TAG;
216         }
217
218         se_cmd = &cmd->se_cmd;
219         /*
220          * Initialize struct se_cmd descriptor from target_core_mod infrastructure
221          */
222         transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops,
223                         conn->sess->se_sess, data_length, data_direction,
224                         sam_task_attr, &cmd->sense_buffer[0]);
225         return cmd;
226 }
227
228 struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
229         struct iscsi_conn *conn,
230         u8 function)
231 {
232         struct iscsi_cmd *cmd;
233         struct se_cmd *se_cmd;
234         u8 tcm_function;
235
236         cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
237         if (!cmd)
238                 return NULL;
239
240         cmd->data_direction = DMA_NONE;
241
242         cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL);
243         if (!cmd->tmr_req) {
244                 pr_err("Unable to allocate memory for"
245                         " Task Management command!\n");
246                 return NULL;
247         }
248         /*
249          * TASK_REASSIGN for ERL=2 / connection stays inside of
250          * LIO-Target $FABRIC_MOD
251          */
252         if (function == ISCSI_TM_FUNC_TASK_REASSIGN)
253                 return cmd;
254
255         se_cmd = &cmd->se_cmd;
256         /*
257          * Initialize struct se_cmd descriptor from target_core_mod infrastructure
258          */
259         transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops,
260                                 conn->sess->se_sess, 0, DMA_NONE,
261                                 MSG_SIMPLE_TAG, &cmd->sense_buffer[0]);
262
263         switch (function) {
264         case ISCSI_TM_FUNC_ABORT_TASK:
265                 tcm_function = TMR_ABORT_TASK;
266                 break;
267         case ISCSI_TM_FUNC_ABORT_TASK_SET:
268                 tcm_function = TMR_ABORT_TASK_SET;
269                 break;
270         case ISCSI_TM_FUNC_CLEAR_ACA:
271                 tcm_function = TMR_CLEAR_ACA;
272                 break;
273         case ISCSI_TM_FUNC_CLEAR_TASK_SET:
274                 tcm_function = TMR_CLEAR_TASK_SET;
275                 break;
276         case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
277                 tcm_function = TMR_LUN_RESET;
278                 break;
279         case ISCSI_TM_FUNC_TARGET_WARM_RESET:
280                 tcm_function = TMR_TARGET_WARM_RESET;
281                 break;
282         case ISCSI_TM_FUNC_TARGET_COLD_RESET:
283                 tcm_function = TMR_TARGET_COLD_RESET;
284                 break;
285         default:
286                 pr_err("Unknown iSCSI TMR Function:"
287                         " 0x%02x\n", function);
288                 goto out;
289         }
290
291         se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd,
292                                 (void *)cmd->tmr_req, tcm_function);
293         if (!se_cmd->se_tmr_req)
294                 goto out;
295
296         cmd->tmr_req->se_tmr_req = se_cmd->se_tmr_req;
297
298         return cmd;
299 out:
300         iscsit_release_cmd(cmd);
301         if (se_cmd)
302                 transport_free_se_cmd(se_cmd);
303         return NULL;
304 }
305
306 int iscsit_decide_list_to_build(
307         struct iscsi_cmd *cmd,
308         u32 immediate_data_length)
309 {
310         struct iscsi_build_list bl;
311         struct iscsi_conn *conn = cmd->conn;
312         struct iscsi_session *sess = conn->sess;
313         struct iscsi_node_attrib *na;
314
315         if (sess->sess_ops->DataSequenceInOrder &&
316             sess->sess_ops->DataPDUInOrder)
317                 return 0;
318
319         if (cmd->data_direction == DMA_NONE)
320                 return 0;
321
322         na = iscsit_tpg_get_node_attrib(sess);
323         memset(&bl, 0, sizeof(struct iscsi_build_list));
324
325         if (cmd->data_direction == DMA_FROM_DEVICE) {
326                 bl.data_direction = ISCSI_PDU_READ;
327                 bl.type = PDULIST_NORMAL;
328                 if (na->random_datain_pdu_offsets)
329                         bl.randomize |= RANDOM_DATAIN_PDU_OFFSETS;
330                 if (na->random_datain_seq_offsets)
331                         bl.randomize |= RANDOM_DATAIN_SEQ_OFFSETS;
332         } else {
333                 bl.data_direction = ISCSI_PDU_WRITE;
334                 bl.immediate_data_length = immediate_data_length;
335                 if (na->random_r2t_offsets)
336                         bl.randomize |= RANDOM_R2T_OFFSETS;
337
338                 if (!cmd->immediate_data && !cmd->unsolicited_data)
339                         bl.type = PDULIST_NORMAL;
340                 else if (cmd->immediate_data && !cmd->unsolicited_data)
341                         bl.type = PDULIST_IMMEDIATE;
342                 else if (!cmd->immediate_data && cmd->unsolicited_data)
343                         bl.type = PDULIST_UNSOLICITED;
344                 else if (cmd->immediate_data && cmd->unsolicited_data)
345                         bl.type = PDULIST_IMMEDIATE_AND_UNSOLICITED;
346         }
347
348         return iscsit_do_build_list(cmd, &bl);
349 }
350
351 struct iscsi_seq *iscsit_get_seq_holder_for_datain(
352         struct iscsi_cmd *cmd,
353         u32 seq_send_order)
354 {
355         u32 i;
356
357         for (i = 0; i < cmd->seq_count; i++)
358                 if (cmd->seq_list[i].seq_send_order == seq_send_order)
359                         return &cmd->seq_list[i];
360
361         return NULL;
362 }
363
364 struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *cmd)
365 {
366         u32 i;
367
368         if (!cmd->seq_list) {
369                 pr_err("struct iscsi_cmd->seq_list is NULL!\n");
370                 return NULL;
371         }
372
373         for (i = 0; i < cmd->seq_count; i++) {
374                 if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
375                         continue;
376                 if (cmd->seq_list[i].seq_send_order == cmd->seq_send_order) {
377                         cmd->seq_send_order++;
378                         return &cmd->seq_list[i];
379                 }
380         }
381
382         return NULL;
383 }
384
385 struct iscsi_r2t *iscsit_get_holder_for_r2tsn(
386         struct iscsi_cmd *cmd,
387         u32 r2t_sn)
388 {
389         struct iscsi_r2t *r2t;
390
391         spin_lock_bh(&cmd->r2t_lock);
392         list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
393                 if (r2t->r2t_sn == r2t_sn) {
394                         spin_unlock_bh(&cmd->r2t_lock);
395                         return r2t;
396                 }
397         }
398         spin_unlock_bh(&cmd->r2t_lock);
399
400         return NULL;
401 }
402
403 static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cmdsn)
404 {
405         int ret;
406
407         /*
408          * This is the proper method of checking received CmdSN against
409          * ExpCmdSN and MaxCmdSN values, as well as accounting for out
410          * or order CmdSNs due to multiple connection sessions and/or
411          * CRC failures.
412          */
413         if (iscsi_sna_gt(cmdsn, sess->max_cmd_sn)) {
414                 pr_err("Received CmdSN: 0x%08x is greater than"
415                        " MaxCmdSN: 0x%08x, protocol error.\n", cmdsn,
416                        sess->max_cmd_sn);
417                 ret = CMDSN_ERROR_CANNOT_RECOVER;
418
419         } else if (cmdsn == sess->exp_cmd_sn) {
420                 sess->exp_cmd_sn++;
421                 pr_debug("Received CmdSN matches ExpCmdSN,"
422                       " incremented ExpCmdSN to: 0x%08x\n",
423                       sess->exp_cmd_sn);
424                 ret = CMDSN_NORMAL_OPERATION;
425
426         } else if (iscsi_sna_gt(cmdsn, sess->exp_cmd_sn)) {
427                 pr_debug("Received CmdSN: 0x%08x is greater"
428                       " than ExpCmdSN: 0x%08x, not acknowledging.\n",
429                       cmdsn, sess->exp_cmd_sn);
430                 ret = CMDSN_HIGHER_THAN_EXP;
431
432         } else {
433                 pr_err("Received CmdSN: 0x%08x is less than"
434                        " ExpCmdSN: 0x%08x, ignoring.\n", cmdsn,
435                        sess->exp_cmd_sn);
436                 ret = CMDSN_LOWER_THAN_EXP;
437         }
438
439         return ret;
440 }
441
442 /*
443  * Commands may be received out of order if MC/S is in use.
444  * Ensure they are executed in CmdSN order.
445  */
446 int iscsit_sequence_cmd(
447         struct iscsi_conn *conn,
448         struct iscsi_cmd *cmd,
449         u32 cmdsn)
450 {
451         int ret;
452         int cmdsn_ret;
453
454         mutex_lock(&conn->sess->cmdsn_mutex);
455
456         cmdsn_ret = iscsit_check_received_cmdsn(conn->sess, cmdsn);
457         switch (cmdsn_ret) {
458         case CMDSN_NORMAL_OPERATION:
459                 ret = iscsit_execute_cmd(cmd, 0);
460                 if ((ret >= 0) && !list_empty(&conn->sess->sess_ooo_cmdsn_list))
461                         iscsit_execute_ooo_cmdsns(conn->sess);
462                 break;
463         case CMDSN_HIGHER_THAN_EXP:
464                 ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, cmdsn);
465                 break;
466         case CMDSN_LOWER_THAN_EXP:
467                 cmd->i_state = ISTATE_REMOVE;
468                 iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
469                 ret = cmdsn_ret;
470                 break;
471         default:
472                 ret = cmdsn_ret;
473                 break;
474         }
475         mutex_unlock(&conn->sess->cmdsn_mutex);
476
477         return ret;
478 }
479
480 int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf)
481 {
482         struct iscsi_conn *conn = cmd->conn;
483         struct se_cmd *se_cmd = &cmd->se_cmd;
484         struct iscsi_data *hdr = (struct iscsi_data *) buf;
485         u32 payload_length = ntoh24(hdr->dlength);
486
487         if (conn->sess->sess_ops->InitialR2T) {
488                 pr_err("Received unexpected unsolicited data"
489                         " while InitialR2T=Yes, protocol error.\n");
490                 transport_send_check_condition_and_sense(se_cmd,
491                                 TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
492                 return -1;
493         }
494
495         if ((cmd->first_burst_len + payload_length) >
496              conn->sess->sess_ops->FirstBurstLength) {
497                 pr_err("Total %u bytes exceeds FirstBurstLength: %u"
498                         " for this Unsolicited DataOut Burst.\n",
499                         (cmd->first_burst_len + payload_length),
500                                 conn->sess->sess_ops->FirstBurstLength);
501                 transport_send_check_condition_and_sense(se_cmd,
502                                 TCM_INCORRECT_AMOUNT_OF_DATA, 0);
503                 return -1;
504         }
505
506         if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))
507                 return 0;
508
509         if (((cmd->first_burst_len + payload_length) != cmd->data_length) &&
510             ((cmd->first_burst_len + payload_length) !=
511               conn->sess->sess_ops->FirstBurstLength)) {
512                 pr_err("Unsolicited non-immediate data received %u"
513                         " does not equal FirstBurstLength: %u, and does"
514                         " not equal ExpXferLen %u.\n",
515                         (cmd->first_burst_len + payload_length),
516                         conn->sess->sess_ops->FirstBurstLength, cmd->data_length);
517                 transport_send_check_condition_and_sense(se_cmd,
518                                 TCM_INCORRECT_AMOUNT_OF_DATA, 0);
519                 return -1;
520         }
521         return 0;
522 }
523
524 struct iscsi_cmd *iscsit_find_cmd_from_itt(
525         struct iscsi_conn *conn,
526         u32 init_task_tag)
527 {
528         struct iscsi_cmd *cmd;
529
530         spin_lock_bh(&conn->cmd_lock);
531         list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
532                 if (cmd->init_task_tag == init_task_tag) {
533                         spin_unlock_bh(&conn->cmd_lock);
534                         return cmd;
535                 }
536         }
537         spin_unlock_bh(&conn->cmd_lock);
538
539         pr_err("Unable to locate ITT: 0x%08x on CID: %hu",
540                         init_task_tag, conn->cid);
541         return NULL;
542 }
543
544 struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(
545         struct iscsi_conn *conn,
546         u32 init_task_tag,
547         u32 length)
548 {
549         struct iscsi_cmd *cmd;
550
551         spin_lock_bh(&conn->cmd_lock);
552         list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
553                 if (cmd->init_task_tag == init_task_tag) {
554                         spin_unlock_bh(&conn->cmd_lock);
555                         return cmd;
556                 }
557         }
558         spin_unlock_bh(&conn->cmd_lock);
559
560         pr_err("Unable to locate ITT: 0x%08x on CID: %hu,"
561                         " dumping payload\n", init_task_tag, conn->cid);
562         if (length)
563                 iscsit_dump_data_payload(conn, length, 1);
564
565         return NULL;
566 }
567
568 struct iscsi_cmd *iscsit_find_cmd_from_ttt(
569         struct iscsi_conn *conn,
570         u32 targ_xfer_tag)
571 {
572         struct iscsi_cmd *cmd = NULL;
573
574         spin_lock_bh(&conn->cmd_lock);
575         list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
576                 if (cmd->targ_xfer_tag == targ_xfer_tag) {
577                         spin_unlock_bh(&conn->cmd_lock);
578                         return cmd;
579                 }
580         }
581         spin_unlock_bh(&conn->cmd_lock);
582
583         pr_err("Unable to locate TTT: 0x%08x on CID: %hu\n",
584                         targ_xfer_tag, conn->cid);
585         return NULL;
586 }
587
588 int iscsit_find_cmd_for_recovery(
589         struct iscsi_session *sess,
590         struct iscsi_cmd **cmd_ptr,
591         struct iscsi_conn_recovery **cr_ptr,
592         u32 init_task_tag)
593 {
594         struct iscsi_cmd *cmd = NULL;
595         struct iscsi_conn_recovery *cr;
596         /*
597          * Scan through the inactive connection recovery list's command list.
598          * If init_task_tag matches the command is still alligent.
599          */
600         spin_lock(&sess->cr_i_lock);
601         list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) {
602                 spin_lock(&cr->conn_recovery_cmd_lock);
603                 list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) {
604                         if (cmd->init_task_tag == init_task_tag) {
605                                 spin_unlock(&cr->conn_recovery_cmd_lock);
606                                 spin_unlock(&sess->cr_i_lock);
607
608                                 *cr_ptr = cr;
609                                 *cmd_ptr = cmd;
610                                 return -2;
611                         }
612                 }
613                 spin_unlock(&cr->conn_recovery_cmd_lock);
614         }
615         spin_unlock(&sess->cr_i_lock);
616         /*
617          * Scan through the active connection recovery list's command list.
618          * If init_task_tag matches the command is ready to be reassigned.
619          */
620         spin_lock(&sess->cr_a_lock);
621         list_for_each_entry(cr, &sess->cr_active_list, cr_list) {
622                 spin_lock(&cr->conn_recovery_cmd_lock);
623                 list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) {
624                         if (cmd->init_task_tag == init_task_tag) {
625                                 spin_unlock(&cr->conn_recovery_cmd_lock);
626                                 spin_unlock(&sess->cr_a_lock);
627
628                                 *cr_ptr = cr;
629                                 *cmd_ptr = cmd;
630                                 return 0;
631                         }
632                 }
633                 spin_unlock(&cr->conn_recovery_cmd_lock);
634         }
635         spin_unlock(&sess->cr_a_lock);
636
637         return -1;
638 }
639
640 void iscsit_add_cmd_to_immediate_queue(
641         struct iscsi_cmd *cmd,
642         struct iscsi_conn *conn,
643         u8 state)
644 {
645         struct iscsi_queue_req *qr;
646
647         qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
648         if (!qr) {
649                 pr_err("Unable to allocate memory for"
650                                 " struct iscsi_queue_req\n");
651                 return;
652         }
653         INIT_LIST_HEAD(&qr->qr_list);
654         qr->cmd = cmd;
655         qr->state = state;
656
657         spin_lock_bh(&conn->immed_queue_lock);
658         list_add_tail(&qr->qr_list, &conn->immed_queue_list);
659         atomic_inc(&cmd->immed_queue_count);
660         atomic_set(&conn->check_immediate_queue, 1);
661         spin_unlock_bh(&conn->immed_queue_lock);
662
663         wake_up_process(conn->thread_set->tx_thread);
664 }
665
666 struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn)
667 {
668         struct iscsi_queue_req *qr;
669
670         spin_lock_bh(&conn->immed_queue_lock);
671         if (list_empty(&conn->immed_queue_list)) {
672                 spin_unlock_bh(&conn->immed_queue_lock);
673                 return NULL;
674         }
675         list_for_each_entry(qr, &conn->immed_queue_list, qr_list)
676                 break;
677
678         list_del(&qr->qr_list);
679         if (qr->cmd)
680                 atomic_dec(&qr->cmd->immed_queue_count);
681         spin_unlock_bh(&conn->immed_queue_lock);
682
683         return qr;
684 }
685
686 static void iscsit_remove_cmd_from_immediate_queue(
687         struct iscsi_cmd *cmd,
688         struct iscsi_conn *conn)
689 {
690         struct iscsi_queue_req *qr, *qr_tmp;
691
692         spin_lock_bh(&conn->immed_queue_lock);
693         if (!atomic_read(&cmd->immed_queue_count)) {
694                 spin_unlock_bh(&conn->immed_queue_lock);
695                 return;
696         }
697
698         list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
699                 if (qr->cmd != cmd)
700                         continue;
701
702                 atomic_dec(&qr->cmd->immed_queue_count);
703                 list_del(&qr->qr_list);
704                 kmem_cache_free(lio_qr_cache, qr);
705         }
706         spin_unlock_bh(&conn->immed_queue_lock);
707
708         if (atomic_read(&cmd->immed_queue_count)) {
709                 pr_err("ITT: 0x%08x immed_queue_count: %d\n",
710                         cmd->init_task_tag,
711                         atomic_read(&cmd->immed_queue_count));
712         }
713 }
714
715 void iscsit_add_cmd_to_response_queue(
716         struct iscsi_cmd *cmd,
717         struct iscsi_conn *conn,
718         u8 state)
719 {
720         struct iscsi_queue_req *qr;
721
722         qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
723         if (!qr) {
724                 pr_err("Unable to allocate memory for"
725                         " struct iscsi_queue_req\n");
726                 return;
727         }
728         INIT_LIST_HEAD(&qr->qr_list);
729         qr->cmd = cmd;
730         qr->state = state;
731
732         spin_lock_bh(&conn->response_queue_lock);
733         list_add_tail(&qr->qr_list, &conn->response_queue_list);
734         atomic_inc(&cmd->response_queue_count);
735         spin_unlock_bh(&conn->response_queue_lock);
736
737         wake_up_process(conn->thread_set->tx_thread);
738 }
739
740 struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
741 {
742         struct iscsi_queue_req *qr;
743
744         spin_lock_bh(&conn->response_queue_lock);
745         if (list_empty(&conn->response_queue_list)) {
746                 spin_unlock_bh(&conn->response_queue_lock);
747                 return NULL;
748         }
749
750         list_for_each_entry(qr, &conn->response_queue_list, qr_list)
751                 break;
752
753         list_del(&qr->qr_list);
754         if (qr->cmd)
755                 atomic_dec(&qr->cmd->response_queue_count);
756         spin_unlock_bh(&conn->response_queue_lock);
757
758         return qr;
759 }
760
761 static void iscsit_remove_cmd_from_response_queue(
762         struct iscsi_cmd *cmd,
763         struct iscsi_conn *conn)
764 {
765         struct iscsi_queue_req *qr, *qr_tmp;
766
767         spin_lock_bh(&conn->response_queue_lock);
768         if (!atomic_read(&cmd->response_queue_count)) {
769                 spin_unlock_bh(&conn->response_queue_lock);
770                 return;
771         }
772
773         list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
774                                 qr_list) {
775                 if (qr->cmd != cmd)
776                         continue;
777
778                 atomic_dec(&qr->cmd->response_queue_count);
779                 list_del(&qr->qr_list);
780                 kmem_cache_free(lio_qr_cache, qr);
781         }
782         spin_unlock_bh(&conn->response_queue_lock);
783
784         if (atomic_read(&cmd->response_queue_count)) {
785                 pr_err("ITT: 0x%08x response_queue_count: %d\n",
786                         cmd->init_task_tag,
787                         atomic_read(&cmd->response_queue_count));
788         }
789 }
790
791 void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
792 {
793         struct iscsi_queue_req *qr, *qr_tmp;
794
795         spin_lock_bh(&conn->immed_queue_lock);
796         list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
797                 list_del(&qr->qr_list);
798                 if (qr->cmd)
799                         atomic_dec(&qr->cmd->immed_queue_count);
800
801                 kmem_cache_free(lio_qr_cache, qr);
802         }
803         spin_unlock_bh(&conn->immed_queue_lock);
804
805         spin_lock_bh(&conn->response_queue_lock);
806         list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
807                         qr_list) {
808                 list_del(&qr->qr_list);
809                 if (qr->cmd)
810                         atomic_dec(&qr->cmd->response_queue_count);
811
812                 kmem_cache_free(lio_qr_cache, qr);
813         }
814         spin_unlock_bh(&conn->response_queue_lock);
815 }
816
817 void iscsit_release_cmd(struct iscsi_cmd *cmd)
818 {
819         struct iscsi_conn *conn = cmd->conn;
820         int i;
821
822         iscsit_free_r2ts_from_list(cmd);
823         iscsit_free_all_datain_reqs(cmd);
824
825         kfree(cmd->buf_ptr);
826         kfree(cmd->pdu_list);
827         kfree(cmd->seq_list);
828         kfree(cmd->tmr_req);
829         kfree(cmd->iov_data);
830
831         for (i = 0; i < cmd->t_mem_sg_nents; i++)
832                 __free_page(sg_page(&cmd->t_mem_sg[i]));
833
834         kfree(cmd->t_mem_sg);
835
836         if (conn) {
837                 iscsit_remove_cmd_from_immediate_queue(cmd, conn);
838                 iscsit_remove_cmd_from_response_queue(cmd, conn);
839         }
840
841         kmem_cache_free(lio_cmd_cache, cmd);
842 }
843
844 int iscsit_check_session_usage_count(struct iscsi_session *sess)
845 {
846         spin_lock_bh(&sess->session_usage_lock);
847         if (sess->session_usage_count != 0) {
848                 sess->session_waiting_on_uc = 1;
849                 spin_unlock_bh(&sess->session_usage_lock);
850                 if (in_interrupt())
851                         return 2;
852
853                 wait_for_completion(&sess->session_waiting_on_uc_comp);
854                 return 1;
855         }
856         spin_unlock_bh(&sess->session_usage_lock);
857
858         return 0;
859 }
860
861 void iscsit_dec_session_usage_count(struct iscsi_session *sess)
862 {
863         spin_lock_bh(&sess->session_usage_lock);
864         sess->session_usage_count--;
865
866         if (!sess->session_usage_count && sess->session_waiting_on_uc)
867                 complete(&sess->session_waiting_on_uc_comp);
868
869         spin_unlock_bh(&sess->session_usage_lock);
870 }
871
872 void iscsit_inc_session_usage_count(struct iscsi_session *sess)
873 {
874         spin_lock_bh(&sess->session_usage_lock);
875         sess->session_usage_count++;
876         spin_unlock_bh(&sess->session_usage_lock);
877 }
878
879 /*
880  *      Used before iscsi_do[rx,tx]_data() to determine iov and [rx,tx]_marker
881  *      array counts needed for sync and steering.
882  */
883 static int iscsit_determine_sync_and_steering_counts(
884         struct iscsi_conn *conn,
885         struct iscsi_data_count *count)
886 {
887         u32 length = count->data_length;
888         u32 marker, markint;
889
890         count->sync_and_steering = 1;
891
892         marker = (count->type == ISCSI_RX_DATA) ?
893                         conn->of_marker : conn->if_marker;
894         markint = (count->type == ISCSI_RX_DATA) ?
895                         (conn->conn_ops->OFMarkInt * 4) :
896                         (conn->conn_ops->IFMarkInt * 4);
897         count->ss_iov_count = count->iov_count;
898
899         while (length > 0) {
900                 if (length >= marker) {
901                         count->ss_iov_count += 3;
902                         count->ss_marker_count += 2;
903
904                         length -= marker;
905                         marker = markint;
906                 } else
907                         length = 0;
908         }
909
910         return 0;
911 }
912
913 /*
914  *      Setup conn->if_marker and conn->of_marker values based upon
915  *      the initial marker-less interval. (see iSCSI v19 A.2)
916  */
917 int iscsit_set_sync_and_steering_values(struct iscsi_conn *conn)
918 {
919         int login_ifmarker_count = 0, login_ofmarker_count = 0, next_marker = 0;
920         /*
921          * IFMarkInt and OFMarkInt are negotiated as 32-bit words.
922          */
923         u32 IFMarkInt = (conn->conn_ops->IFMarkInt * 4);
924         u32 OFMarkInt = (conn->conn_ops->OFMarkInt * 4);
925
926         if (conn->conn_ops->OFMarker) {
927                 /*
928                  * Account for the first Login Command received not
929                  * via iscsi_recv_msg().
930                  */
931                 conn->of_marker += ISCSI_HDR_LEN;
932                 if (conn->of_marker <= OFMarkInt) {
933                         conn->of_marker = (OFMarkInt - conn->of_marker);
934                 } else {
935                         login_ofmarker_count = (conn->of_marker / OFMarkInt);
936                         next_marker = (OFMarkInt * (login_ofmarker_count + 1)) +
937                                         (login_ofmarker_count * MARKER_SIZE);
938                         conn->of_marker = (next_marker - conn->of_marker);
939                 }
940                 conn->of_marker_offset = 0;
941                 pr_debug("Setting OFMarker value to %u based on Initial"
942                         " Markerless Interval.\n", conn->of_marker);
943         }
944
945         if (conn->conn_ops->IFMarker) {
946                 if (conn->if_marker <= IFMarkInt) {
947                         conn->if_marker = (IFMarkInt - conn->if_marker);
948                 } else {
949                         login_ifmarker_count = (conn->if_marker / IFMarkInt);
950                         next_marker = (IFMarkInt * (login_ifmarker_count + 1)) +
951                                         (login_ifmarker_count * MARKER_SIZE);
952                         conn->if_marker = (next_marker - conn->if_marker);
953                 }
954                 pr_debug("Setting IFMarker value to %u based on Initial"
955                         " Markerless Interval.\n", conn->if_marker);
956         }
957
958         return 0;
959 }
960
961 struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *sess, u16 cid)
962 {
963         struct iscsi_conn *conn;
964
965         spin_lock_bh(&sess->conn_lock);
966         list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
967                 if ((conn->cid == cid) &&
968                     (conn->conn_state == TARG_CONN_STATE_LOGGED_IN)) {
969                         iscsit_inc_conn_usage_count(conn);
970                         spin_unlock_bh(&sess->conn_lock);
971                         return conn;
972                 }
973         }
974         spin_unlock_bh(&sess->conn_lock);
975
976         return NULL;
977 }
978
979 struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *sess, u16 cid)
980 {
981         struct iscsi_conn *conn;
982
983         spin_lock_bh(&sess->conn_lock);
984         list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
985                 if (conn->cid == cid) {
986                         iscsit_inc_conn_usage_count(conn);
987                         spin_lock(&conn->state_lock);
988                         atomic_set(&conn->connection_wait_rcfr, 1);
989                         spin_unlock(&conn->state_lock);
990                         spin_unlock_bh(&sess->conn_lock);
991                         return conn;
992                 }
993         }
994         spin_unlock_bh(&sess->conn_lock);
995
996         return NULL;
997 }
998
999 void iscsit_check_conn_usage_count(struct iscsi_conn *conn)
1000 {
1001         spin_lock_bh(&conn->conn_usage_lock);
1002         if (conn->conn_usage_count != 0) {
1003                 conn->conn_waiting_on_uc = 1;
1004                 spin_unlock_bh(&conn->conn_usage_lock);
1005
1006                 wait_for_completion(&conn->conn_waiting_on_uc_comp);
1007                 return;
1008         }
1009         spin_unlock_bh(&conn->conn_usage_lock);
1010 }
1011
1012 void iscsit_dec_conn_usage_count(struct iscsi_conn *conn)
1013 {
1014         spin_lock_bh(&conn->conn_usage_lock);
1015         conn->conn_usage_count--;
1016
1017         if (!conn->conn_usage_count && conn->conn_waiting_on_uc)
1018                 complete(&conn->conn_waiting_on_uc_comp);
1019
1020         spin_unlock_bh(&conn->conn_usage_lock);
1021 }
1022
1023 void iscsit_inc_conn_usage_count(struct iscsi_conn *conn)
1024 {
1025         spin_lock_bh(&conn->conn_usage_lock);
1026         conn->conn_usage_count++;
1027         spin_unlock_bh(&conn->conn_usage_lock);
1028 }
1029
1030 static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
1031 {
1032         u8 state;
1033         struct iscsi_cmd *cmd;
1034
1035         cmd = iscsit_allocate_cmd(conn, GFP_ATOMIC);
1036         if (!cmd)
1037                 return -1;
1038
1039         cmd->iscsi_opcode = ISCSI_OP_NOOP_IN;
1040         state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE :
1041                                 ISTATE_SEND_NOPIN_NO_RESPONSE;
1042         cmd->init_task_tag = 0xFFFFFFFF;
1043         spin_lock_bh(&conn->sess->ttt_lock);
1044         cmd->targ_xfer_tag = (want_response) ? conn->sess->targ_xfer_tag++ :
1045                         0xFFFFFFFF;
1046         if (want_response && (cmd->targ_xfer_tag == 0xFFFFFFFF))
1047                 cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
1048         spin_unlock_bh(&conn->sess->ttt_lock);
1049
1050         spin_lock_bh(&conn->cmd_lock);
1051         list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
1052         spin_unlock_bh(&conn->cmd_lock);
1053
1054         if (want_response)
1055                 iscsit_start_nopin_response_timer(conn);
1056         iscsit_add_cmd_to_immediate_queue(cmd, conn, state);
1057
1058         return 0;
1059 }
1060
1061 static void iscsit_handle_nopin_response_timeout(unsigned long data)
1062 {
1063         struct iscsi_conn *conn = (struct iscsi_conn *) data;
1064
1065         iscsit_inc_conn_usage_count(conn);
1066
1067         spin_lock_bh(&conn->nopin_timer_lock);
1068         if (conn->nopin_response_timer_flags & ISCSI_TF_STOP) {
1069                 spin_unlock_bh(&conn->nopin_timer_lock);
1070                 iscsit_dec_conn_usage_count(conn);
1071                 return;
1072         }
1073
1074         pr_debug("Did not receive response to NOPIN on CID: %hu on"
1075                 " SID: %u, failing connection.\n", conn->cid,
1076                         conn->sess->sid);
1077         conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
1078         spin_unlock_bh(&conn->nopin_timer_lock);
1079
1080         {
1081         struct iscsi_portal_group *tpg = conn->sess->tpg;
1082         struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
1083
1084         if (tiqn) {
1085                 spin_lock_bh(&tiqn->sess_err_stats.lock);
1086                 strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
1087                                 (void *)conn->sess->sess_ops->InitiatorName);
1088                 tiqn->sess_err_stats.last_sess_failure_type =
1089                                 ISCSI_SESS_ERR_CXN_TIMEOUT;
1090                 tiqn->sess_err_stats.cxn_timeout_errors++;
1091                 conn->sess->conn_timeout_errors++;
1092                 spin_unlock_bh(&tiqn->sess_err_stats.lock);
1093         }
1094         }
1095
1096         iscsit_cause_connection_reinstatement(conn, 0);
1097         iscsit_dec_conn_usage_count(conn);
1098 }
1099
1100 void iscsit_mod_nopin_response_timer(struct iscsi_conn *conn)
1101 {
1102         struct iscsi_session *sess = conn->sess;
1103         struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
1104
1105         spin_lock_bh(&conn->nopin_timer_lock);
1106         if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
1107                 spin_unlock_bh(&conn->nopin_timer_lock);
1108                 return;
1109         }
1110
1111         mod_timer(&conn->nopin_response_timer,
1112                 (get_jiffies_64() + na->nopin_response_timeout * HZ));
1113         spin_unlock_bh(&conn->nopin_timer_lock);
1114 }
1115
1116 /*
1117  *      Called with conn->nopin_timer_lock held.
1118  */
1119 void iscsit_start_nopin_response_timer(struct iscsi_conn *conn)
1120 {
1121         struct iscsi_session *sess = conn->sess;
1122         struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
1123
1124         spin_lock_bh(&conn->nopin_timer_lock);
1125         if (conn->nopin_response_timer_flags & ISCSI_TF_RUNNING) {
1126                 spin_unlock_bh(&conn->nopin_timer_lock);
1127                 return;
1128         }
1129
1130         init_timer(&conn->nopin_response_timer);
1131         conn->nopin_response_timer.expires =
1132                 (get_jiffies_64() + na->nopin_response_timeout * HZ);
1133         conn->nopin_response_timer.data = (unsigned long)conn;
1134         conn->nopin_response_timer.function = iscsit_handle_nopin_response_timeout;
1135         conn->nopin_response_timer_flags &= ~ISCSI_TF_STOP;
1136         conn->nopin_response_timer_flags |= ISCSI_TF_RUNNING;
1137         add_timer(&conn->nopin_response_timer);
1138
1139         pr_debug("Started NOPIN Response Timer on CID: %d to %u"
1140                 " seconds\n", conn->cid, na->nopin_response_timeout);
1141         spin_unlock_bh(&conn->nopin_timer_lock);
1142 }
1143
1144 void iscsit_stop_nopin_response_timer(struct iscsi_conn *conn)
1145 {
1146         spin_lock_bh(&conn->nopin_timer_lock);
1147         if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
1148                 spin_unlock_bh(&conn->nopin_timer_lock);
1149                 return;
1150         }
1151         conn->nopin_response_timer_flags |= ISCSI_TF_STOP;
1152         spin_unlock_bh(&conn->nopin_timer_lock);
1153
1154         del_timer_sync(&conn->nopin_response_timer);
1155
1156         spin_lock_bh(&conn->nopin_timer_lock);
1157         conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
1158         spin_unlock_bh(&conn->nopin_timer_lock);
1159 }
1160
1161 static void iscsit_handle_nopin_timeout(unsigned long data)
1162 {
1163         struct iscsi_conn *conn = (struct iscsi_conn *) data;
1164
1165         iscsit_inc_conn_usage_count(conn);
1166
1167         spin_lock_bh(&conn->nopin_timer_lock);
1168         if (conn->nopin_timer_flags & ISCSI_TF_STOP) {
1169                 spin_unlock_bh(&conn->nopin_timer_lock);
1170                 iscsit_dec_conn_usage_count(conn);
1171                 return;
1172         }
1173         conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
1174         spin_unlock_bh(&conn->nopin_timer_lock);
1175
1176         iscsit_add_nopin(conn, 1);
1177         iscsit_dec_conn_usage_count(conn);
1178 }
1179
1180 /*
1181  * Called with conn->nopin_timer_lock held.
1182  */
1183 void __iscsit_start_nopin_timer(struct iscsi_conn *conn)
1184 {
1185         struct iscsi_session *sess = conn->sess;
1186         struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
1187         /*
1188         * NOPIN timeout is disabled.
1189          */
1190         if (!na->nopin_timeout)
1191                 return;
1192
1193         if (conn->nopin_timer_flags & ISCSI_TF_RUNNING)
1194                 return;
1195
1196         init_timer(&conn->nopin_timer);
1197         conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ);
1198         conn->nopin_timer.data = (unsigned long)conn;
1199         conn->nopin_timer.function = iscsit_handle_nopin_timeout;
1200         conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
1201         conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
1202         add_timer(&conn->nopin_timer);
1203
1204         pr_debug("Started NOPIN Timer on CID: %d at %u second"
1205                 " interval\n", conn->cid, na->nopin_timeout);
1206 }
1207
1208 void iscsit_start_nopin_timer(struct iscsi_conn *conn)
1209 {
1210         struct iscsi_session *sess = conn->sess;
1211         struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
1212         /*
1213          * NOPIN timeout is disabled..
1214          */
1215         if (!na->nopin_timeout)
1216                 return;
1217
1218         spin_lock_bh(&conn->nopin_timer_lock);
1219         if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) {
1220                 spin_unlock_bh(&conn->nopin_timer_lock);
1221                 return;
1222         }
1223
1224         init_timer(&conn->nopin_timer);
1225         conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ);
1226         conn->nopin_timer.data = (unsigned long)conn;
1227         conn->nopin_timer.function = iscsit_handle_nopin_timeout;
1228         conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
1229         conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
1230         add_timer(&conn->nopin_timer);
1231
1232         pr_debug("Started NOPIN Timer on CID: %d at %u second"
1233                         " interval\n", conn->cid, na->nopin_timeout);
1234         spin_unlock_bh(&conn->nopin_timer_lock);
1235 }
1236
1237 void iscsit_stop_nopin_timer(struct iscsi_conn *conn)
1238 {
1239         spin_lock_bh(&conn->nopin_timer_lock);
1240         if (!(conn->nopin_timer_flags & ISCSI_TF_RUNNING)) {
1241                 spin_unlock_bh(&conn->nopin_timer_lock);
1242                 return;
1243         }
1244         conn->nopin_timer_flags |= ISCSI_TF_STOP;
1245         spin_unlock_bh(&conn->nopin_timer_lock);
1246
1247         del_timer_sync(&conn->nopin_timer);
1248
1249         spin_lock_bh(&conn->nopin_timer_lock);
1250         conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
1251         spin_unlock_bh(&conn->nopin_timer_lock);
1252 }
1253
1254 int iscsit_send_tx_data(
1255         struct iscsi_cmd *cmd,
1256         struct iscsi_conn *conn,
1257         int use_misc)
1258 {
1259         int tx_sent, tx_size;
1260         u32 iov_count;
1261         struct kvec *iov;
1262
1263 send_data:
1264         tx_size = cmd->tx_size;
1265
1266         if (!use_misc) {
1267                 iov = &cmd->iov_data[0];
1268                 iov_count = cmd->iov_data_count;
1269         } else {
1270                 iov = &cmd->iov_misc[0];
1271                 iov_count = cmd->iov_misc_count;
1272         }
1273
1274         tx_sent = tx_data(conn, &iov[0], iov_count, tx_size);
1275         if (tx_size != tx_sent) {
1276                 if (tx_sent == -EAGAIN) {
1277                         pr_err("tx_data() returned -EAGAIN\n");
1278                         goto send_data;
1279                 } else
1280                         return -1;
1281         }
1282         cmd->tx_size = 0;
1283
1284         return 0;
1285 }
1286
1287 int iscsit_fe_sendpage_sg(
1288         struct iscsi_cmd *cmd,
1289         struct iscsi_conn *conn)
1290 {
1291         struct scatterlist *sg = cmd->first_data_sg;
1292         struct kvec iov;
1293         u32 tx_hdr_size, data_len;
1294         u32 offset = cmd->first_data_sg_off;
1295         int tx_sent;
1296
1297 send_hdr:
1298         tx_hdr_size = ISCSI_HDR_LEN;
1299         if (conn->conn_ops->HeaderDigest)
1300                 tx_hdr_size += ISCSI_CRC_LEN;
1301
1302         iov.iov_base = cmd->pdu;
1303         iov.iov_len = tx_hdr_size;
1304
1305         tx_sent = tx_data(conn, &iov, 1, tx_hdr_size);
1306         if (tx_hdr_size != tx_sent) {
1307                 if (tx_sent == -EAGAIN) {
1308                         pr_err("tx_data() returned -EAGAIN\n");
1309                         goto send_hdr;
1310                 }
1311                 return -1;
1312         }
1313
1314         data_len = cmd->tx_size - tx_hdr_size - cmd->padding;
1315         if (conn->conn_ops->DataDigest)
1316                 data_len -= ISCSI_CRC_LEN;
1317
1318         /*
1319          * Perform sendpage() for each page in the scatterlist
1320          */
1321         while (data_len) {
1322                 u32 space = (sg->length - offset);
1323                 u32 sub_len = min_t(u32, data_len, space);
1324 send_pg:
1325                 tx_sent = conn->sock->ops->sendpage(conn->sock,
1326                                         sg_page(sg), sg->offset + offset, sub_len, 0);
1327                 if (tx_sent != sub_len) {
1328                         if (tx_sent == -EAGAIN) {
1329                                 pr_err("tcp_sendpage() returned"
1330                                                 " -EAGAIN\n");
1331                                 goto send_pg;
1332                         }
1333
1334                         pr_err("tcp_sendpage() failure: %d\n",
1335                                         tx_sent);
1336                         return -1;
1337                 }
1338
1339                 data_len -= sub_len;
1340                 offset = 0;
1341                 sg = sg_next(sg);
1342         }
1343
1344 send_padding:
1345         if (cmd->padding) {
1346                 struct kvec *iov_p =
1347                         &cmd->iov_data[cmd->iov_data_count-1];
1348
1349                 tx_sent = tx_data(conn, iov_p, 1, cmd->padding);
1350                 if (cmd->padding != tx_sent) {
1351                         if (tx_sent == -EAGAIN) {
1352                                 pr_err("tx_data() returned -EAGAIN\n");
1353                                 goto send_padding;
1354                         }
1355                         return -1;
1356                 }
1357         }
1358
1359 send_datacrc:
1360         if (conn->conn_ops->DataDigest) {
1361                 struct kvec *iov_d =
1362                         &cmd->iov_data[cmd->iov_data_count];
1363
1364                 tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN);
1365                 if (ISCSI_CRC_LEN != tx_sent) {
1366                         if (tx_sent == -EAGAIN) {
1367                                 pr_err("tx_data() returned -EAGAIN\n");
1368                                 goto send_datacrc;
1369                         }
1370                         return -1;
1371                 }
1372         }
1373
1374         return 0;
1375 }
1376
1377 /*
1378  *      This function is used for mainly sending a ISCSI_TARG_LOGIN_RSP PDU
1379  *      back to the Initiator when an expection condition occurs with the
1380  *      errors set in status_class and status_detail.
1381  *
1382  *      Parameters:     iSCSI Connection, Status Class, Status Detail.
1383  *      Returns:        0 on success, -1 on error.
1384  */
1385 int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_detail)
1386 {
1387         u8 iscsi_hdr[ISCSI_HDR_LEN];
1388         int err;
1389         struct kvec iov;
1390         struct iscsi_login_rsp *hdr;
1391
1392         iscsit_collect_login_stats(conn, status_class, status_detail);
1393
1394         memset(&iov, 0, sizeof(struct kvec));
1395         memset(&iscsi_hdr, 0x0, ISCSI_HDR_LEN);
1396
1397         hdr     = (struct iscsi_login_rsp *)&iscsi_hdr;
1398         hdr->opcode             = ISCSI_OP_LOGIN_RSP;
1399         hdr->status_class       = status_class;
1400         hdr->status_detail      = status_detail;
1401         hdr->itt                = cpu_to_be32(conn->login_itt);
1402
1403         iov.iov_base            = &iscsi_hdr;
1404         iov.iov_len             = ISCSI_HDR_LEN;
1405
1406         PRINT_BUFF(iscsi_hdr, ISCSI_HDR_LEN);
1407
1408         err = tx_data(conn, &iov, 1, ISCSI_HDR_LEN);
1409         if (err != ISCSI_HDR_LEN) {
1410                 pr_err("tx_data returned less than expected\n");
1411                 return -1;
1412         }
1413
1414         return 0;
1415 }
1416
1417 void iscsit_print_session_params(struct iscsi_session *sess)
1418 {
1419         struct iscsi_conn *conn;
1420
1421         pr_debug("-----------------------------[Session Params for"
1422                 " SID: %u]-----------------------------\n", sess->sid);
1423         spin_lock_bh(&sess->conn_lock);
1424         list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
1425                 iscsi_dump_conn_ops(conn->conn_ops);
1426         spin_unlock_bh(&sess->conn_lock);
1427
1428         iscsi_dump_sess_ops(sess->sess_ops);
1429 }
1430
1431 static int iscsit_do_rx_data(
1432         struct iscsi_conn *conn,
1433         struct iscsi_data_count *count)
1434 {
1435         int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len;
1436         u32 rx_marker_val[count->ss_marker_count], rx_marker_iov = 0;
1437         struct kvec iov[count->ss_iov_count], *iov_p;
1438         struct msghdr msg;
1439
1440         if (!conn || !conn->sock || !conn->conn_ops)
1441                 return -1;
1442
1443         memset(&msg, 0, sizeof(struct msghdr));
1444
1445         if (count->sync_and_steering) {
1446                 int size = 0;
1447                 u32 i, orig_iov_count = 0;
1448                 u32 orig_iov_len = 0, orig_iov_loc = 0;
1449                 u32 iov_count = 0, per_iov_bytes = 0;
1450                 u32 *rx_marker, old_rx_marker = 0;
1451                 struct kvec *iov_record;
1452
1453                 memset(&rx_marker_val, 0,
1454                                 count->ss_marker_count * sizeof(u32));
1455                 memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec));
1456
1457                 iov_record = count->iov;
1458                 orig_iov_count = count->iov_count;
1459                 rx_marker = &conn->of_marker;
1460
1461                 i = 0;
1462                 size = data;
1463                 orig_iov_len = iov_record[orig_iov_loc].iov_len;
1464                 while (size > 0) {
1465                         pr_debug("rx_data: #1 orig_iov_len %u,"
1466                         " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc);
1467                         pr_debug("rx_data: #2 rx_marker %u, size"
1468                                 " %u\n", *rx_marker, size);
1469
1470                         if (orig_iov_len >= *rx_marker) {
1471                                 iov[iov_count].iov_len = *rx_marker;
1472                                 iov[iov_count++].iov_base =
1473                                         (iov_record[orig_iov_loc].iov_base +
1474                                                 per_iov_bytes);
1475
1476                                 iov[iov_count].iov_len = (MARKER_SIZE / 2);
1477                                 iov[iov_count++].iov_base =
1478                                         &rx_marker_val[rx_marker_iov++];
1479                                 iov[iov_count].iov_len = (MARKER_SIZE / 2);
1480                                 iov[iov_count++].iov_base =
1481                                         &rx_marker_val[rx_marker_iov++];
1482                                 old_rx_marker = *rx_marker;
1483
1484                                 /*
1485                                  * OFMarkInt is in 32-bit words.
1486                                  */
1487                                 *rx_marker = (conn->conn_ops->OFMarkInt * 4);
1488                                 size -= old_rx_marker;
1489                                 orig_iov_len -= old_rx_marker;
1490                                 per_iov_bytes += old_rx_marker;
1491
1492                                 pr_debug("rx_data: #3 new_rx_marker"
1493                                         " %u, size %u\n", *rx_marker, size);
1494                         } else {
1495                                 iov[iov_count].iov_len = orig_iov_len;
1496                                 iov[iov_count++].iov_base =
1497                                         (iov_record[orig_iov_loc].iov_base +
1498                                                 per_iov_bytes);
1499
1500                                 per_iov_bytes = 0;
1501                                 *rx_marker -= orig_iov_len;
1502                                 size -= orig_iov_len;
1503
1504                                 if (size)
1505                                         orig_iov_len =
1506                                         iov_record[++orig_iov_loc].iov_len;
1507
1508                                 pr_debug("rx_data: #4 new_rx_marker"
1509                                         " %u, size %u\n", *rx_marker, size);
1510                         }
1511                 }
1512                 data += (rx_marker_iov * (MARKER_SIZE / 2));
1513
1514                 iov_p   = &iov[0];
1515                 iov_len = iov_count;
1516
1517                 if (iov_count > count->ss_iov_count) {
1518                         pr_err("iov_count: %d, count->ss_iov_count:"
1519                                 " %d\n", iov_count, count->ss_iov_count);
1520                         return -1;
1521                 }
1522                 if (rx_marker_iov > count->ss_marker_count) {
1523                         pr_err("rx_marker_iov: %d, count->ss_marker"
1524                                 "_count: %d\n", rx_marker_iov,
1525                                 count->ss_marker_count);
1526                         return -1;
1527                 }
1528         } else {
1529                 iov_p = count->iov;
1530                 iov_len = count->iov_count;
1531         }
1532
1533         while (total_rx < data) {
1534                 rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len,
1535                                         (data - total_rx), MSG_WAITALL);
1536                 if (rx_loop <= 0) {
1537                         pr_debug("rx_loop: %d total_rx: %d\n",
1538                                 rx_loop, total_rx);
1539                         return rx_loop;
1540                 }
1541                 total_rx += rx_loop;
1542                 pr_debug("rx_loop: %d, total_rx: %d, data: %d\n",
1543                                 rx_loop, total_rx, data);
1544         }
1545
1546         if (count->sync_and_steering) {
1547                 int j;
1548                 for (j = 0; j < rx_marker_iov; j++) {
1549                         pr_debug("rx_data: #5 j: %d, offset: %d\n",
1550                                 j, rx_marker_val[j]);
1551                         conn->of_marker_offset = rx_marker_val[j];
1552                 }
1553                 total_rx -= (rx_marker_iov * (MARKER_SIZE / 2));
1554         }
1555
1556         return total_rx;
1557 }
1558
1559 static int iscsit_do_tx_data(
1560         struct iscsi_conn *conn,
1561         struct iscsi_data_count *count)
1562 {
1563         int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len;
1564         u32 tx_marker_val[count->ss_marker_count], tx_marker_iov = 0;
1565         struct kvec iov[count->ss_iov_count], *iov_p;
1566         struct msghdr msg;
1567
1568         if (!conn || !conn->sock || !conn->conn_ops)
1569                 return -1;
1570
1571         if (data <= 0) {
1572                 pr_err("Data length is: %d\n", data);
1573                 return -1;
1574         }
1575
1576         memset(&msg, 0, sizeof(struct msghdr));
1577
1578         if (count->sync_and_steering) {
1579                 int size = 0;
1580                 u32 i, orig_iov_count = 0;
1581                 u32 orig_iov_len = 0, orig_iov_loc = 0;
1582                 u32 iov_count = 0, per_iov_bytes = 0;
1583                 u32 *tx_marker, old_tx_marker = 0;
1584                 struct kvec *iov_record;
1585
1586                 memset(&tx_marker_val, 0,
1587                         count->ss_marker_count * sizeof(u32));
1588                 memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec));
1589
1590                 iov_record = count->iov;
1591                 orig_iov_count = count->iov_count;
1592                 tx_marker = &conn->if_marker;
1593
1594                 i = 0;
1595                 size = data;
1596                 orig_iov_len = iov_record[orig_iov_loc].iov_len;
1597                 while (size > 0) {
1598                         pr_debug("tx_data: #1 orig_iov_len %u,"
1599                         " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc);
1600                         pr_debug("tx_data: #2 tx_marker %u, size"
1601                                 " %u\n", *tx_marker, size);
1602
1603                         if (orig_iov_len >= *tx_marker) {
1604                                 iov[iov_count].iov_len = *tx_marker;
1605                                 iov[iov_count++].iov_base =
1606                                         (iov_record[orig_iov_loc].iov_base +
1607                                                 per_iov_bytes);
1608
1609                                 tx_marker_val[tx_marker_iov] =
1610                                                 (size - *tx_marker);
1611                                 iov[iov_count].iov_len = (MARKER_SIZE / 2);
1612                                 iov[iov_count++].iov_base =
1613                                         &tx_marker_val[tx_marker_iov++];
1614                                 iov[iov_count].iov_len = (MARKER_SIZE / 2);
1615                                 iov[iov_count++].iov_base =
1616                                         &tx_marker_val[tx_marker_iov++];
1617                                 old_tx_marker = *tx_marker;
1618
1619                                 /*
1620                                  * IFMarkInt is in 32-bit words.
1621                                  */
1622                                 *tx_marker = (conn->conn_ops->IFMarkInt * 4);
1623                                 size -= old_tx_marker;
1624                                 orig_iov_len -= old_tx_marker;
1625                                 per_iov_bytes += old_tx_marker;
1626
1627                                 pr_debug("tx_data: #3 new_tx_marker"
1628                                         " %u, size %u\n", *tx_marker, size);
1629                                 pr_debug("tx_data: #4 offset %u\n",
1630                                         tx_marker_val[tx_marker_iov-1]);
1631                         } else {
1632                                 iov[iov_count].iov_len = orig_iov_len;
1633                                 iov[iov_count++].iov_base
1634                                         = (iov_record[orig_iov_loc].iov_base +
1635                                                 per_iov_bytes);
1636
1637                                 per_iov_bytes = 0;
1638                                 *tx_marker -= orig_iov_len;
1639                                 size -= orig_iov_len;
1640
1641                                 if (size)
1642                                         orig_iov_len =
1643                                         iov_record[++orig_iov_loc].iov_len;
1644
1645                                 pr_debug("tx_data: #5 new_tx_marker"
1646                                         " %u, size %u\n", *tx_marker, size);
1647                         }
1648                 }
1649
1650                 data += (tx_marker_iov * (MARKER_SIZE / 2));
1651
1652                 iov_p = &iov[0];
1653                 iov_len = iov_count;
1654
1655                 if (iov_count > count->ss_iov_count) {
1656                         pr_err("iov_count: %d, count->ss_iov_count:"
1657                                 " %d\n", iov_count, count->ss_iov_count);
1658                         return -1;
1659                 }
1660                 if (tx_marker_iov > count->ss_marker_count) {
1661                         pr_err("tx_marker_iov: %d, count->ss_marker"
1662                                 "_count: %d\n", tx_marker_iov,
1663                                 count->ss_marker_count);
1664                         return -1;
1665                 }
1666         } else {
1667                 iov_p = count->iov;
1668                 iov_len = count->iov_count;
1669         }
1670
1671         while (total_tx < data) {
1672                 tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
1673                                         (data - total_tx));
1674                 if (tx_loop <= 0) {
1675                         pr_debug("tx_loop: %d total_tx %d\n",
1676                                 tx_loop, total_tx);
1677                         return tx_loop;
1678                 }
1679                 total_tx += tx_loop;
1680                 pr_debug("tx_loop: %d, total_tx: %d, data: %d\n",
1681                                         tx_loop, total_tx, data);
1682         }
1683
1684         if (count->sync_and_steering)
1685                 total_tx -= (tx_marker_iov * (MARKER_SIZE / 2));
1686
1687         return total_tx;
1688 }
1689
1690 int rx_data(
1691         struct iscsi_conn *conn,
1692         struct kvec *iov,
1693         int iov_count,
1694         int data)
1695 {
1696         struct iscsi_data_count c;
1697
1698         if (!conn || !conn->sock || !conn->conn_ops)
1699                 return -1;
1700
1701         memset(&c, 0, sizeof(struct iscsi_data_count));
1702         c.iov = iov;
1703         c.iov_count = iov_count;
1704         c.data_length = data;
1705         c.type = ISCSI_RX_DATA;
1706
1707         if (conn->conn_ops->OFMarker &&
1708            (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) {
1709                 if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0)
1710                         return -1;
1711         }
1712
1713         return iscsit_do_rx_data(conn, &c);
1714 }
1715
1716 int tx_data(
1717         struct iscsi_conn *conn,
1718         struct kvec *iov,
1719         int iov_count,
1720         int data)
1721 {
1722         struct iscsi_data_count c;
1723
1724         if (!conn || !conn->sock || !conn->conn_ops)
1725                 return -1;
1726
1727         memset(&c, 0, sizeof(struct iscsi_data_count));
1728         c.iov = iov;
1729         c.iov_count = iov_count;
1730         c.data_length = data;
1731         c.type = ISCSI_TX_DATA;
1732
1733         if (conn->conn_ops->IFMarker &&
1734            (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) {
1735                 if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0)
1736                         return -1;
1737         }
1738
1739         return iscsit_do_tx_data(conn, &c);
1740 }
1741
1742 void iscsit_collect_login_stats(
1743         struct iscsi_conn *conn,
1744         u8 status_class,
1745         u8 status_detail)
1746 {
1747         struct iscsi_param *intrname = NULL;
1748         struct iscsi_tiqn *tiqn;
1749         struct iscsi_login_stats *ls;
1750
1751         tiqn = iscsit_snmp_get_tiqn(conn);
1752         if (!tiqn)
1753                 return;
1754
1755         ls = &tiqn->login_stats;
1756
1757         spin_lock(&ls->lock);
1758         if (!strcmp(conn->login_ip, ls->last_intr_fail_ip_addr) &&
1759             ((get_jiffies_64() - ls->last_fail_time) < 10)) {
1760                 /* We already have the failure info for this login */
1761                 spin_unlock(&ls->lock);
1762                 return;
1763         }
1764
1765         if (status_class == ISCSI_STATUS_CLS_SUCCESS)
1766                 ls->accepts++;
1767         else if (status_class == ISCSI_STATUS_CLS_REDIRECT) {
1768                 ls->redirects++;
1769                 ls->last_fail_type = ISCSI_LOGIN_FAIL_REDIRECT;
1770         } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR)  &&
1771                  (status_detail == ISCSI_LOGIN_STATUS_AUTH_FAILED)) {
1772                 ls->authenticate_fails++;
1773                 ls->last_fail_type =  ISCSI_LOGIN_FAIL_AUTHENTICATE;
1774         } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR)  &&
1775                  (status_detail == ISCSI_LOGIN_STATUS_TGT_FORBIDDEN)) {
1776                 ls->authorize_fails++;
1777                 ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHORIZE;
1778         } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
1779                  (status_detail == ISCSI_LOGIN_STATUS_INIT_ERR)) {
1780                 ls->negotiate_fails++;
1781                 ls->last_fail_type = ISCSI_LOGIN_FAIL_NEGOTIATE;
1782         } else {
1783                 ls->other_fails++;
1784                 ls->last_fail_type = ISCSI_LOGIN_FAIL_OTHER;
1785         }
1786
1787         /* Save initiator name, ip address and time, if it is a failed login */
1788         if (status_class != ISCSI_STATUS_CLS_SUCCESS) {
1789                 if (conn->param_list)
1790                         intrname = iscsi_find_param_from_key(INITIATORNAME,
1791                                                              conn->param_list);
1792                 strcpy(ls->last_intr_fail_name,
1793                        (intrname ? intrname->value : "Unknown"));
1794
1795                 ls->last_intr_fail_ip_family = conn->sock->sk->sk_family;
1796                 snprintf(ls->last_intr_fail_ip_addr, IPV6_ADDRESS_SPACE,
1797                                 "%s", conn->login_ip);
1798                 ls->last_fail_time = get_jiffies_64();
1799         }
1800
1801         spin_unlock(&ls->lock);
1802 }
1803
1804 struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn)
1805 {
1806         struct iscsi_portal_group *tpg;
1807
1808         if (!conn || !conn->sess)
1809                 return NULL;
1810
1811         tpg = conn->sess->tpg;
1812         if (!tpg)
1813                 return NULL;
1814
1815         if (!tpg->tpg_tiqn)
1816                 return NULL;
1817
1818         return tpg->tpg_tiqn;
1819 }