Merge git://git.infradead.org/~dwmw2/random-2.6
[pandora-kernel.git] / drivers / scsi / ibmvscsi / ibmvfc.c
1 /*
2  * ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter
3  *
4  * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) IBM Corporation, 2008
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/dmapool.h>
28 #include <linux/delay.h>
29 #include <linux/interrupt.h>
30 #include <linux/kthread.h>
31 #include <linux/of.h>
32 #include <linux/stringify.h>
33 #include <asm/firmware.h>
34 #include <asm/irq.h>
35 #include <asm/vio.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_cmnd.h>
38 #include <scsi/scsi_host.h>
39 #include <scsi/scsi_device.h>
40 #include <scsi/scsi_tcq.h>
41 #include <scsi/scsi_transport_fc.h>
42 #include "ibmvfc.h"
43
44 static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT;
45 static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
46 static unsigned int max_lun = IBMVFC_MAX_LUN;
47 static unsigned int max_targets = IBMVFC_MAX_TARGETS;
48 static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
49 static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
50 static unsigned int dev_loss_tmo = IBMVFC_DEV_LOSS_TMO;
51 static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
52 static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
53 static LIST_HEAD(ibmvfc_head);
54 static DEFINE_SPINLOCK(ibmvfc_driver_lock);
55 static struct scsi_transport_template *ibmvfc_transport_template;
56
57 MODULE_DESCRIPTION("IBM Virtual Fibre Channel Driver");
58 MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
59 MODULE_LICENSE("GPL");
60 MODULE_VERSION(IBMVFC_DRIVER_VERSION);
61
62 module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR);
63 MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. "
64                  "[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]");
65 module_param_named(default_timeout, default_timeout, uint, S_IRUGO | S_IWUSR);
66 MODULE_PARM_DESC(default_timeout,
67                  "Default timeout in seconds for initialization and EH commands. "
68                  "[Default=" __stringify(IBMVFC_DEFAULT_TIMEOUT) "]");
69 module_param_named(max_requests, max_requests, uint, S_IRUGO);
70 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
71                  "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
72 module_param_named(max_lun, max_lun, uint, S_IRUGO);
73 MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
74                  "[Default=" __stringify(IBMVFC_MAX_LUN) "]");
75 module_param_named(max_targets, max_targets, uint, S_IRUGO);
76 MODULE_PARM_DESC(max_targets, "Maximum allowed targets. "
77                  "[Default=" __stringify(IBMVFC_MAX_TARGETS) "]");
78 module_param_named(disc_threads, disc_threads, uint, S_IRUGO | S_IWUSR);
79 MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. "
80                  "[Default=" __stringify(IBMVFC_MAX_DISC_THREADS) "]");
81 module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR);
82 MODULE_PARM_DESC(debug, "Enable driver debug information. "
83                  "[Default=" __stringify(IBMVFC_DEBUG) "]");
84 module_param_named(dev_loss_tmo, dev_loss_tmo, uint, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(dev_loss_tmo, "Maximum number of seconds that the FC "
86                  "transport should insulate the loss of a remote port. Once this "
87                  "value is exceeded, the scsi target is removed. "
88                  "[Default=" __stringify(IBMVFC_DEV_LOSS_TMO) "]");
89 module_param_named(log_level, log_level, uint, 0);
90 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
91                  "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
92
93 static const struct {
94         u16 status;
95         u16 error;
96         u8 result;
97         u8 retry;
98         int log;
99         char *name;
100 } cmd_status [] = {
101         { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" },
102         { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" },
103         { IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" },
104         { IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_NO_CONNECT, 1, 1, "network down" },
105         { IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" },
106         { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" },
107         { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" },
108         { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_REGISTER, DID_ERROR, 1, 1, "unable to register" },
109         { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_BUSY, DID_BUS_BUSY, 1, 0, "transport busy" },
110         { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
111         { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
112         { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
113         { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 0, 0, "link halted" },
114         { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
115
116         { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
117         { IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" },
118         { IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ABORT, 0, 1, "invalid parameter" },
119         { IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ABORT, 0, 1, "missing parameter" },
120         { IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" },
121         { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ABORT, 0, 1, "transaction cancelled" },
122         { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ABORT, 0, 1, "transaction cancelled implicit" },
123         { IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
124         { IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
125
126         { IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" },
127         { IBMVFC_FC_FAILURE, IBMVFC_INVALID_VERSION, DID_ERROR, 0, 1, "invalid version level" },
128         { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_ERROR, DID_ERROR, 1, 1, "logical error" },
129         { IBMVFC_FC_FAILURE, IBMVFC_INVALID_CT_IU_SIZE, DID_ERROR, 0, 1, "invalid CT_IU size" },
130         { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_BUSY, DID_REQUEUE, 1, 0, "logical busy" },
131         { IBMVFC_FC_FAILURE, IBMVFC_PROTOCOL_ERROR, DID_ERROR, 1, 1, "protocol error" },
132         { IBMVFC_FC_FAILURE, IBMVFC_UNABLE_TO_PERFORM_REQ, DID_ERROR, 1, 1, "unable to perform request" },
133         { IBMVFC_FC_FAILURE, IBMVFC_CMD_NOT_SUPPORTED, DID_ERROR, 0, 0, "command not supported" },
134         { IBMVFC_FC_FAILURE, IBMVFC_SERVER_NOT_AVAIL, DID_ERROR, 0, 1, "server not available" },
135         { IBMVFC_FC_FAILURE, IBMVFC_CMD_IN_PROGRESS, DID_ERROR, 0, 1, "command already in progress" },
136         { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
137
138         { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
139 };
140
141 static void ibmvfc_npiv_login(struct ibmvfc_host *);
142 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
143 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
144 static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
145
146 static const char *unknown_error = "unknown error";
147
148 #ifdef CONFIG_SCSI_IBMVFC_TRACE
149 /**
150  * ibmvfc_trc_start - Log a start trace entry
151  * @evt:                ibmvfc event struct
152  *
153  **/
154 static void ibmvfc_trc_start(struct ibmvfc_event *evt)
155 {
156         struct ibmvfc_host *vhost = evt->vhost;
157         struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
158         struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
159         struct ibmvfc_trace_entry *entry;
160
161         entry = &vhost->trace[vhost->trace_index++];
162         entry->evt = evt;
163         entry->time = jiffies;
164         entry->fmt = evt->crq.format;
165         entry->type = IBMVFC_TRC_START;
166
167         switch (entry->fmt) {
168         case IBMVFC_CMD_FORMAT:
169                 entry->op_code = vfc_cmd->iu.cdb[0];
170                 entry->scsi_id = vfc_cmd->tgt_scsi_id;
171                 entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
172                 entry->tmf_flags = vfc_cmd->iu.tmf_flags;
173                 entry->u.start.xfer_len = vfc_cmd->iu.xfer_len;
174                 break;
175         case IBMVFC_MAD_FORMAT:
176                 entry->op_code = mad->opcode;
177                 break;
178         default:
179                 break;
180         };
181 }
182
183 /**
184  * ibmvfc_trc_end - Log an end trace entry
185  * @evt:                ibmvfc event struct
186  *
187  **/
188 static void ibmvfc_trc_end(struct ibmvfc_event *evt)
189 {
190         struct ibmvfc_host *vhost = evt->vhost;
191         struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
192         struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
193         struct ibmvfc_trace_entry *entry = &vhost->trace[vhost->trace_index++];
194
195         entry->evt = evt;
196         entry->time = jiffies;
197         entry->fmt = evt->crq.format;
198         entry->type = IBMVFC_TRC_END;
199
200         switch (entry->fmt) {
201         case IBMVFC_CMD_FORMAT:
202                 entry->op_code = vfc_cmd->iu.cdb[0];
203                 entry->scsi_id = vfc_cmd->tgt_scsi_id;
204                 entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
205                 entry->tmf_flags = vfc_cmd->iu.tmf_flags;
206                 entry->u.end.status = vfc_cmd->status;
207                 entry->u.end.error = vfc_cmd->error;
208                 entry->u.end.fcp_rsp_flags = vfc_cmd->rsp.flags;
209                 entry->u.end.rsp_code = vfc_cmd->rsp.data.info.rsp_code;
210                 entry->u.end.scsi_status = vfc_cmd->rsp.scsi_status;
211                 break;
212         case IBMVFC_MAD_FORMAT:
213                 entry->op_code = mad->opcode;
214                 entry->u.end.status = mad->status;
215                 break;
216         default:
217                 break;
218
219         };
220 }
221
222 #else
223 #define ibmvfc_trc_start(evt) do { } while (0)
224 #define ibmvfc_trc_end(evt) do { } while (0)
225 #endif
226
227 /**
228  * ibmvfc_get_err_index - Find the index into cmd_status for the fcp response
229  * @status:             status / error class
230  * @error:              error
231  *
232  * Return value:
233  *      index into cmd_status / -EINVAL on failure
234  **/
235 static int ibmvfc_get_err_index(u16 status, u16 error)
236 {
237         int i;
238
239         for (i = 0; i < ARRAY_SIZE(cmd_status); i++)
240                 if ((cmd_status[i].status & status) == cmd_status[i].status &&
241                     cmd_status[i].error == error)
242                         return i;
243
244         return -EINVAL;
245 }
246
247 /**
248  * ibmvfc_get_cmd_error - Find the error description for the fcp response
249  * @status:             status / error class
250  * @error:              error
251  *
252  * Return value:
253  *      error description string
254  **/
255 static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
256 {
257         int rc = ibmvfc_get_err_index(status, error);
258         if (rc >= 0)
259                 return cmd_status[rc].name;
260         return unknown_error;
261 }
262
263 /**
264  * ibmvfc_get_err_result - Find the scsi status to return for the fcp response
265  * @vfc_cmd:    ibmvfc command struct
266  *
267  * Return value:
268  *      SCSI result value to return for completed command
269  **/
270 static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd)
271 {
272         int err;
273         struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
274         int fc_rsp_len = rsp->fcp_rsp_len;
275
276         if ((rsp->flags & FCP_RSP_LEN_VALID) &&
277             ((!fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
278              rsp->data.info.rsp_code))
279                 return DID_ERROR << 16;
280
281         if (!vfc_cmd->status) {
282                 if (rsp->flags & FCP_RESID_OVER)
283                         return rsp->scsi_status | (DID_ERROR << 16);
284                 else
285                         return rsp->scsi_status | (DID_OK << 16);
286         }
287
288         err = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error);
289         if (err >= 0)
290                 return rsp->scsi_status | (cmd_status[err].result << 16);
291         return rsp->scsi_status | (DID_ERROR << 16);
292 }
293
294 /**
295  * ibmvfc_retry_cmd - Determine if error status is retryable
296  * @status:             status / error class
297  * @error:              error
298  *
299  * Return value:
300  *      1 if error should be retried / 0 if it should not
301  **/
302 static int ibmvfc_retry_cmd(u16 status, u16 error)
303 {
304         int rc = ibmvfc_get_err_index(status, error);
305
306         if (rc >= 0)
307                 return cmd_status[rc].retry;
308         return 1;
309 }
310
311 static const char *unknown_fc_explain = "unknown fc explain";
312
313 static const struct {
314         u16 fc_explain;
315         char *name;
316 } ls_explain [] = {
317         { 0x00, "no additional explanation" },
318         { 0x01, "service parameter error - options" },
319         { 0x03, "service parameter error - initiator control" },
320         { 0x05, "service parameter error - recipient control" },
321         { 0x07, "service parameter error - received data field size" },
322         { 0x09, "service parameter error - concurrent seq" },
323         { 0x0B, "service parameter error - credit" },
324         { 0x0D, "invalid N_Port/F_Port_Name" },
325         { 0x0E, "invalid node/Fabric Name" },
326         { 0x0F, "invalid common service parameters" },
327         { 0x11, "invalid association header" },
328         { 0x13, "association header required" },
329         { 0x15, "invalid originator S_ID" },
330         { 0x17, "invalid OX_ID-RX-ID combination" },
331         { 0x19, "command (request) already in progress" },
332         { 0x1E, "N_Port Login requested" },
333         { 0x1F, "Invalid N_Port_ID" },
334 };
335
336 static const struct {
337         u16 fc_explain;
338         char *name;
339 } gs_explain [] = {
340         { 0x00, "no additional explanation" },
341         { 0x01, "port identifier not registered" },
342         { 0x02, "port name not registered" },
343         { 0x03, "node name not registered" },
344         { 0x04, "class of service not registered" },
345         { 0x06, "initial process associator not registered" },
346         { 0x07, "FC-4 TYPEs not registered" },
347         { 0x08, "symbolic port name not registered" },
348         { 0x09, "symbolic node name not registered" },
349         { 0x0A, "port type not registered" },
350         { 0xF0, "authorization exception" },
351         { 0xF1, "authentication exception" },
352         { 0xF2, "data base full" },
353         { 0xF3, "data base empty" },
354         { 0xF4, "processing request" },
355         { 0xF5, "unable to verify connection" },
356         { 0xF6, "devices not in a common zone" },
357 };
358
359 /**
360  * ibmvfc_get_ls_explain - Return the FC Explain description text
361  * @status:     FC Explain status
362  *
363  * Returns:
364  *      error string
365  **/
366 static const char *ibmvfc_get_ls_explain(u16 status)
367 {
368         int i;
369
370         for (i = 0; i < ARRAY_SIZE(ls_explain); i++)
371                 if (ls_explain[i].fc_explain == status)
372                         return ls_explain[i].name;
373
374         return unknown_fc_explain;
375 }
376
377 /**
378  * ibmvfc_get_gs_explain - Return the FC Explain description text
379  * @status:     FC Explain status
380  *
381  * Returns:
382  *      error string
383  **/
384 static const char *ibmvfc_get_gs_explain(u16 status)
385 {
386         int i;
387
388         for (i = 0; i < ARRAY_SIZE(gs_explain); i++)
389                 if (gs_explain[i].fc_explain == status)
390                         return gs_explain[i].name;
391
392         return unknown_fc_explain;
393 }
394
395 static const struct {
396         enum ibmvfc_fc_type fc_type;
397         char *name;
398 } fc_type [] = {
399         { IBMVFC_FABRIC_REJECT, "fabric reject" },
400         { IBMVFC_PORT_REJECT, "port reject" },
401         { IBMVFC_LS_REJECT, "ELS reject" },
402         { IBMVFC_FABRIC_BUSY, "fabric busy" },
403         { IBMVFC_PORT_BUSY, "port busy" },
404         { IBMVFC_BASIC_REJECT, "basic reject" },
405 };
406
407 static const char *unknown_fc_type = "unknown fc type";
408
409 /**
410  * ibmvfc_get_fc_type - Return the FC Type description text
411  * @status:     FC Type error status
412  *
413  * Returns:
414  *      error string
415  **/
416 static const char *ibmvfc_get_fc_type(u16 status)
417 {
418         int i;
419
420         for (i = 0; i < ARRAY_SIZE(fc_type); i++)
421                 if (fc_type[i].fc_type == status)
422                         return fc_type[i].name;
423
424         return unknown_fc_type;
425 }
426
427 /**
428  * ibmvfc_set_tgt_action - Set the next init action for the target
429  * @tgt:                ibmvfc target struct
430  * @action:             action to perform
431  *
432  **/
433 static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
434                                   enum ibmvfc_target_action action)
435 {
436         switch (tgt->action) {
437         case IBMVFC_TGT_ACTION_DEL_RPORT:
438                 break;
439         default:
440                 tgt->action = action;
441                 break;
442         }
443 }
444
445 /**
446  * ibmvfc_set_host_state - Set the state for the host
447  * @vhost:              ibmvfc host struct
448  * @state:              state to set host to
449  *
450  * Returns:
451  *      0 if state changed / non-zero if not changed
452  **/
453 static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
454                                   enum ibmvfc_host_state state)
455 {
456         int rc = 0;
457
458         switch (vhost->state) {
459         case IBMVFC_HOST_OFFLINE:
460                 rc = -EINVAL;
461                 break;
462         default:
463                 vhost->state = state;
464                 break;
465         };
466
467         return rc;
468 }
469
470 /**
471  * ibmvfc_set_host_action - Set the next init action for the host
472  * @vhost:              ibmvfc host struct
473  * @action:             action to perform
474  *
475  **/
476 static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
477                                    enum ibmvfc_host_action action)
478 {
479         switch (action) {
480         case IBMVFC_HOST_ACTION_ALLOC_TGTS:
481                 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
482                         vhost->action = action;
483                 break;
484         case IBMVFC_HOST_ACTION_INIT_WAIT:
485                 if (vhost->action == IBMVFC_HOST_ACTION_INIT)
486                         vhost->action = action;
487                 break;
488         case IBMVFC_HOST_ACTION_QUERY:
489                 switch (vhost->action) {
490                 case IBMVFC_HOST_ACTION_INIT_WAIT:
491                 case IBMVFC_HOST_ACTION_NONE:
492                 case IBMVFC_HOST_ACTION_TGT_ADD:
493                         vhost->action = action;
494                         break;
495                 default:
496                         break;
497                 };
498                 break;
499         case IBMVFC_HOST_ACTION_TGT_INIT:
500                 if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
501                         vhost->action = action;
502                 break;
503         case IBMVFC_HOST_ACTION_INIT:
504         case IBMVFC_HOST_ACTION_TGT_DEL:
505         case IBMVFC_HOST_ACTION_QUERY_TGTS:
506         case IBMVFC_HOST_ACTION_TGT_ADD:
507         case IBMVFC_HOST_ACTION_NONE:
508         default:
509                 vhost->action = action;
510                 break;
511         };
512 }
513
514 /**
515  * ibmvfc_reinit_host - Re-start host initialization (no NPIV Login)
516  * @vhost:              ibmvfc host struct
517  *
518  * Return value:
519  *      nothing
520  **/
521 static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
522 {
523         if (vhost->action == IBMVFC_HOST_ACTION_NONE) {
524                 scsi_block_requests(vhost->host);
525                 ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING);
526                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
527         } else
528                 vhost->reinit = 1;
529
530         wake_up(&vhost->work_wait_q);
531 }
532
533 /**
534  * ibmvfc_link_down - Handle a link down event from the adapter
535  * @vhost:      ibmvfc host struct
536  * @state:      ibmvfc host state to enter
537  *
538  **/
539 static void ibmvfc_link_down(struct ibmvfc_host *vhost,
540                              enum ibmvfc_host_state state)
541 {
542         struct ibmvfc_target *tgt;
543
544         ENTER;
545         scsi_block_requests(vhost->host);
546         list_for_each_entry(tgt, &vhost->targets, queue)
547                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
548         ibmvfc_set_host_state(vhost, state);
549         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
550         vhost->events_to_log |= IBMVFC_AE_LINKDOWN;
551         wake_up(&vhost->work_wait_q);
552         LEAVE;
553 }
554
555 /**
556  * ibmvfc_init_host - Start host initialization
557  * @vhost:              ibmvfc host struct
558  *
559  * Return value:
560  *      nothing
561  **/
562 static void ibmvfc_init_host(struct ibmvfc_host *vhost)
563 {
564         struct ibmvfc_target *tgt;
565
566         if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
567                 if (++vhost->init_retries > IBMVFC_MAX_INIT_RETRIES) {
568                         dev_err(vhost->dev,
569                                 "Host initialization retries exceeded. Taking adapter offline\n");
570                         ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
571                         return;
572                 }
573         }
574
575         if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
576                 list_for_each_entry(tgt, &vhost->targets, queue)
577                         tgt->need_login = 1;
578                 scsi_block_requests(vhost->host);
579                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
580                 vhost->job_step = ibmvfc_npiv_login;
581                 wake_up(&vhost->work_wait_q);
582         }
583 }
584
585 /**
586  * ibmvfc_send_crq - Send a CRQ
587  * @vhost:      ibmvfc host struct
588  * @word1:      the first 64 bits of the data
589  * @word2:      the second 64 bits of the data
590  *
591  * Return value:
592  *      0 on success / other on failure
593  **/
594 static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
595 {
596         struct vio_dev *vdev = to_vio_dev(vhost->dev);
597         return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
598 }
599
600 /**
601  * ibmvfc_send_crq_init - Send a CRQ init message
602  * @vhost:      ibmvfc host struct
603  *
604  * Return value:
605  *      0 on success / other on failure
606  **/
607 static int ibmvfc_send_crq_init(struct ibmvfc_host *vhost)
608 {
609         ibmvfc_dbg(vhost, "Sending CRQ init\n");
610         return ibmvfc_send_crq(vhost, 0xC001000000000000LL, 0);
611 }
612
613 /**
614  * ibmvfc_send_crq_init_complete - Send a CRQ init complete message
615  * @vhost:      ibmvfc host struct
616  *
617  * Return value:
618  *      0 on success / other on failure
619  **/
620 static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
621 {
622         ibmvfc_dbg(vhost, "Sending CRQ init complete\n");
623         return ibmvfc_send_crq(vhost, 0xC002000000000000LL, 0);
624 }
625
626 /**
627  * ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ
628  * @vhost:      ibmvfc host struct
629  *
630  * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
631  * the crq with the hypervisor.
632  **/
633 static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
634 {
635         long rc;
636         struct vio_dev *vdev = to_vio_dev(vhost->dev);
637         struct ibmvfc_crq_queue *crq = &vhost->crq;
638
639         ibmvfc_dbg(vhost, "Releasing CRQ\n");
640         free_irq(vdev->irq, vhost);
641         do {
642                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
643         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
644
645         vhost->state = IBMVFC_NO_CRQ;
646         dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
647         free_page((unsigned long)crq->msgs);
648 }
649
650 /**
651  * ibmvfc_reenable_crq_queue - reenables the CRQ
652  * @vhost:      ibmvfc host struct
653  *
654  * Return value:
655  *      0 on success / other on failure
656  **/
657 static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
658 {
659         int rc;
660         struct vio_dev *vdev = to_vio_dev(vhost->dev);
661
662         /* Re-enable the CRQ */
663         do {
664                 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
665         } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
666
667         if (rc)
668                 dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc);
669
670         return rc;
671 }
672
673 /**
674  * ibmvfc_reset_crq - resets a crq after a failure
675  * @vhost:      ibmvfc host struct
676  *
677  * Return value:
678  *      0 on success / other on failure
679  **/
680 static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
681 {
682         int rc;
683         struct vio_dev *vdev = to_vio_dev(vhost->dev);
684         struct ibmvfc_crq_queue *crq = &vhost->crq;
685
686         /* Close the CRQ */
687         do {
688                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
689         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
690
691         vhost->state = IBMVFC_NO_CRQ;
692         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
693
694         /* Clean out the queue */
695         memset(crq->msgs, 0, PAGE_SIZE);
696         crq->cur = 0;
697
698         /* And re-open it again */
699         rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
700                                 crq->msg_token, PAGE_SIZE);
701
702         if (rc == H_CLOSED)
703                 /* Adapter is good, but other end is not ready */
704                 dev_warn(vhost->dev, "Partner adapter not ready\n");
705         else if (rc != 0)
706                 dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
707
708         return rc;
709 }
710
711 /**
712  * ibmvfc_valid_event - Determines if event is valid.
713  * @pool:       event_pool that contains the event
714  * @evt:        ibmvfc event to be checked for validity
715  *
716  * Return value:
717  *      1 if event is valid / 0 if event is not valid
718  **/
719 static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool,
720                               struct ibmvfc_event *evt)
721 {
722         int index = evt - pool->events;
723         if (index < 0 || index >= pool->size)   /* outside of bounds */
724                 return 0;
725         if (evt != pool->events + index)        /* unaligned */
726                 return 0;
727         return 1;
728 }
729
730 /**
731  * ibmvfc_free_event - Free the specified event
732  * @evt:        ibmvfc_event to be freed
733  *
734  **/
735 static void ibmvfc_free_event(struct ibmvfc_event *evt)
736 {
737         struct ibmvfc_host *vhost = evt->vhost;
738         struct ibmvfc_event_pool *pool = &vhost->pool;
739
740         BUG_ON(!ibmvfc_valid_event(pool, evt));
741         BUG_ON(atomic_inc_return(&evt->free) != 1);
742         list_add_tail(&evt->queue, &vhost->free);
743 }
744
745 /**
746  * ibmvfc_scsi_eh_done - EH done function for queuecommand commands
747  * @evt:        ibmvfc event struct
748  *
749  * This function does not setup any error status, that must be done
750  * before this function gets called.
751  **/
752 static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
753 {
754         struct scsi_cmnd *cmnd = evt->cmnd;
755
756         if (cmnd) {
757                 scsi_dma_unmap(cmnd);
758                 cmnd->scsi_done(cmnd);
759         }
760
761         ibmvfc_free_event(evt);
762 }
763
764 /**
765  * ibmvfc_fail_request - Fail request with specified error code
766  * @evt:                ibmvfc event struct
767  * @error_code: error code to fail request with
768  *
769  * Return value:
770  *      none
771  **/
772 static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
773 {
774         if (evt->cmnd) {
775                 evt->cmnd->result = (error_code << 16);
776                 evt->done = ibmvfc_scsi_eh_done;
777         } else
778                 evt->xfer_iu->mad_common.status = IBMVFC_MAD_DRIVER_FAILED;
779
780         list_del(&evt->queue);
781         del_timer(&evt->timer);
782         ibmvfc_trc_end(evt);
783         evt->done(evt);
784 }
785
786 /**
787  * ibmvfc_purge_requests - Our virtual adapter just shut down. Purge any sent requests
788  * @vhost:              ibmvfc host struct
789  * @error_code: error code to fail requests with
790  *
791  * Return value:
792  *      none
793  **/
794 static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
795 {
796         struct ibmvfc_event *evt, *pos;
797
798         ibmvfc_dbg(vhost, "Purging all requests\n");
799         list_for_each_entry_safe(evt, pos, &vhost->sent, queue)
800                 ibmvfc_fail_request(evt, error_code);
801 }
802
803 /**
804  * __ibmvfc_reset_host - Reset the connection to the server (no locking)
805  * @vhost:      struct ibmvfc host to reset
806  **/
807 static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
808 {
809         int rc;
810
811         scsi_block_requests(vhost->host);
812         ibmvfc_purge_requests(vhost, DID_ERROR);
813         if ((rc = ibmvfc_reset_crq(vhost)) ||
814             (rc = ibmvfc_send_crq_init(vhost)) ||
815             (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
816                 dev_err(vhost->dev, "Error after reset rc=%d\n", rc);
817                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
818         } else
819                 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
820 }
821
822 /**
823  * ibmvfc_reset_host - Reset the connection to the server
824  * @vhost:      struct ibmvfc host to reset
825  **/
826 static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
827 {
828         unsigned long flags;
829
830         spin_lock_irqsave(vhost->host->host_lock, flags);
831         __ibmvfc_reset_host(vhost);
832         spin_unlock_irqrestore(vhost->host->host_lock, flags);
833 }
834
835 /**
836  * ibmvfc_retry_host_init - Retry host initialization if allowed
837  * @vhost:      ibmvfc host struct
838  *
839  **/
840 static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
841 {
842         if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
843                 if (++vhost->init_retries > IBMVFC_MAX_INIT_RETRIES) {
844                         dev_err(vhost->dev,
845                                 "Host initialization retries exceeded. Taking adapter offline\n");
846                         ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
847                 } else if (vhost->init_retries == IBMVFC_MAX_INIT_RETRIES)
848                         __ibmvfc_reset_host(vhost);
849                 else
850                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
851         }
852
853         wake_up(&vhost->work_wait_q);
854 }
855
856 /**
857  * __ibmvfc_find_target - Find the specified scsi_target (no locking)
858  * @starget:    scsi target struct
859  *
860  * Return value:
861  *      ibmvfc_target struct / NULL if not found
862  **/
863 static struct ibmvfc_target *__ibmvfc_find_target(struct scsi_target *starget)
864 {
865         struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
866         struct ibmvfc_host *vhost = shost_priv(shost);
867         struct ibmvfc_target *tgt;
868
869         list_for_each_entry(tgt, &vhost->targets, queue)
870                 if (tgt->target_id == starget->id)
871                         return tgt;
872         return NULL;
873 }
874
875 /**
876  * ibmvfc_find_target - Find the specified scsi_target
877  * @starget:    scsi target struct
878  *
879  * Return value:
880  *      ibmvfc_target struct / NULL if not found
881  **/
882 static struct ibmvfc_target *ibmvfc_find_target(struct scsi_target *starget)
883 {
884         struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
885         struct ibmvfc_target *tgt;
886         unsigned long flags;
887
888         spin_lock_irqsave(shost->host_lock, flags);
889         tgt = __ibmvfc_find_target(starget);
890         spin_unlock_irqrestore(shost->host_lock, flags);
891         return tgt;
892 }
893
894 /**
895  * ibmvfc_get_host_speed - Get host port speed
896  * @shost:              scsi host struct
897  *
898  * Return value:
899  *      none
900  **/
901 static void ibmvfc_get_host_speed(struct Scsi_Host *shost)
902 {
903         struct ibmvfc_host *vhost = shost_priv(shost);
904         unsigned long flags;
905
906         spin_lock_irqsave(shost->host_lock, flags);
907         if (vhost->state == IBMVFC_ACTIVE) {
908                 switch (vhost->login_buf->resp.link_speed / 100) {
909                 case 1:
910                         fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
911                         break;
912                 case 2:
913                         fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
914                         break;
915                 case 4:
916                         fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
917                         break;
918                 case 8:
919                         fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
920                         break;
921                 case 10:
922                         fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
923                         break;
924                 case 16:
925                         fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
926                         break;
927                 default:
928                         ibmvfc_log(vhost, 3, "Unknown port speed: %ld Gbit\n",
929                                    vhost->login_buf->resp.link_speed / 100);
930                         fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
931                         break;
932                 }
933         } else
934                 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
935         spin_unlock_irqrestore(shost->host_lock, flags);
936 }
937
938 /**
939  * ibmvfc_get_host_port_state - Get host port state
940  * @shost:              scsi host struct
941  *
942  * Return value:
943  *      none
944  **/
945 static void ibmvfc_get_host_port_state(struct Scsi_Host *shost)
946 {
947         struct ibmvfc_host *vhost = shost_priv(shost);
948         unsigned long flags;
949
950         spin_lock_irqsave(shost->host_lock, flags);
951         switch (vhost->state) {
952         case IBMVFC_INITIALIZING:
953         case IBMVFC_ACTIVE:
954                 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
955                 break;
956         case IBMVFC_LINK_DOWN:
957                 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
958                 break;
959         case IBMVFC_LINK_DEAD:
960         case IBMVFC_HOST_OFFLINE:
961                 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
962                 break;
963         case IBMVFC_HALTED:
964                 fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED;
965                 break;
966         default:
967                 ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state);
968                 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
969                 break;
970         }
971         spin_unlock_irqrestore(shost->host_lock, flags);
972 }
973
974 /**
975  * ibmvfc_set_rport_dev_loss_tmo - Set rport's device loss timeout
976  * @rport:              rport struct
977  * @timeout:    timeout value
978  *
979  * Return value:
980  *      none
981  **/
982 static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
983 {
984         if (timeout)
985                 rport->dev_loss_tmo = timeout;
986         else
987                 rport->dev_loss_tmo = 1;
988 }
989
990 /**
991  * ibmvfc_get_starget_node_name - Get SCSI target's node name
992  * @starget:    scsi target struct
993  *
994  * Return value:
995  *      none
996  **/
997 static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
998 {
999         struct ibmvfc_target *tgt = ibmvfc_find_target(starget);
1000         fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
1001 }
1002
1003 /**
1004  * ibmvfc_get_starget_port_name - Get SCSI target's port name
1005  * @starget:    scsi target struct
1006  *
1007  * Return value:
1008  *      none
1009  **/
1010 static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
1011 {
1012         struct ibmvfc_target *tgt = ibmvfc_find_target(starget);
1013         fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
1014 }
1015
1016 /**
1017  * ibmvfc_get_starget_port_id - Get SCSI target's port ID
1018  * @starget:    scsi target struct
1019  *
1020  * Return value:
1021  *      none
1022  **/
1023 static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
1024 {
1025         struct ibmvfc_target *tgt = ibmvfc_find_target(starget);
1026         fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
1027 }
1028
1029 /**
1030  * ibmvfc_wait_while_resetting - Wait while the host resets
1031  * @vhost:              ibmvfc host struct
1032  *
1033  * Return value:
1034  *      0 on success / other on failure
1035  **/
1036 static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost)
1037 {
1038         long timeout = wait_event_timeout(vhost->init_wait_q,
1039                                           (vhost->state == IBMVFC_ACTIVE ||
1040                                            vhost->state == IBMVFC_HOST_OFFLINE ||
1041                                            vhost->state == IBMVFC_LINK_DEAD),
1042                                           (init_timeout * HZ));
1043
1044         return timeout ? 0 : -EIO;
1045 }
1046
1047 /**
1048  * ibmvfc_issue_fc_host_lip - Re-initiate link initialization
1049  * @shost:              scsi host struct
1050  *
1051  * Return value:
1052  *      0 on success / other on failure
1053  **/
1054 static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost)
1055 {
1056         struct ibmvfc_host *vhost = shost_priv(shost);
1057
1058         dev_err(vhost->dev, "Initiating host LIP. Resetting connection\n");
1059         ibmvfc_reset_host(vhost);
1060         return ibmvfc_wait_while_resetting(vhost);
1061 }
1062
1063 /**
1064  * ibmvfc_gather_partition_info - Gather info about the LPAR
1065  *
1066  * Return value:
1067  *      none
1068  **/
1069 static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
1070 {
1071         struct device_node *rootdn;
1072         const char *name;
1073         const unsigned int *num;
1074
1075         rootdn = of_find_node_by_path("/");
1076         if (!rootdn)
1077                 return;
1078
1079         name = of_get_property(rootdn, "ibm,partition-name", NULL);
1080         if (name)
1081                 strncpy(vhost->partition_name, name, sizeof(vhost->partition_name));
1082         num = of_get_property(rootdn, "ibm,partition-no", NULL);
1083         if (num)
1084                 vhost->partition_number = *num;
1085         of_node_put(rootdn);
1086 }
1087
1088 /**
1089  * ibmvfc_set_login_info - Setup info for NPIV login
1090  * @vhost:      ibmvfc host struct
1091  *
1092  * Return value:
1093  *      none
1094  **/
1095 static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
1096 {
1097         struct ibmvfc_npiv_login *login_info = &vhost->login_info;
1098         struct device_node *of_node = vhost->dev->archdata.of_node;
1099         const char *location;
1100
1101         memset(login_info, 0, sizeof(*login_info));
1102
1103         login_info->ostype = IBMVFC_OS_LINUX;
1104         login_info->max_dma_len = IBMVFC_MAX_SECTORS << 9;
1105         login_info->max_payload = sizeof(struct ibmvfc_fcp_cmd_iu);
1106         login_info->max_response = sizeof(struct ibmvfc_fcp_rsp);
1107         login_info->partition_num = vhost->partition_number;
1108         login_info->vfc_frame_version = 1;
1109         login_info->fcp_version = 3;
1110         if (vhost->client_migrated)
1111                 login_info->flags = IBMVFC_CLIENT_MIGRATED;
1112
1113         login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ;
1114         login_info->capabilities = IBMVFC_CAN_MIGRATE;
1115         login_info->async.va = vhost->async_crq.msg_token;
1116         login_info->async.len = vhost->async_crq.size;
1117         strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
1118         strncpy(login_info->device_name,
1119                 vhost->host->shost_gendev.bus_id, IBMVFC_MAX_NAME);
1120
1121         location = of_get_property(of_node, "ibm,loc-code", NULL);
1122         location = location ? location : vhost->dev->bus_id;
1123         strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME);
1124 }
1125
1126 /**
1127  * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
1128  * @vhost:      ibmvfc host who owns the event pool
1129  *
1130  * Returns zero on success.
1131  **/
1132 static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost)
1133 {
1134         int i;
1135         struct ibmvfc_event_pool *pool = &vhost->pool;
1136
1137         ENTER;
1138         pool->size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
1139         pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
1140         if (!pool->events)
1141                 return -ENOMEM;
1142
1143         pool->iu_storage = dma_alloc_coherent(vhost->dev,
1144                                               pool->size * sizeof(*pool->iu_storage),
1145                                               &pool->iu_token, 0);
1146
1147         if (!pool->iu_storage) {
1148                 kfree(pool->events);
1149                 return -ENOMEM;
1150         }
1151
1152         for (i = 0; i < pool->size; ++i) {
1153                 struct ibmvfc_event *evt = &pool->events[i];
1154                 atomic_set(&evt->free, 1);
1155                 evt->crq.valid = 0x80;
1156                 evt->crq.ioba = pool->iu_token + (sizeof(*evt->xfer_iu) * i);
1157                 evt->xfer_iu = pool->iu_storage + i;
1158                 evt->vhost = vhost;
1159                 evt->ext_list = NULL;
1160                 list_add_tail(&evt->queue, &vhost->free);
1161         }
1162
1163         LEAVE;
1164         return 0;
1165 }
1166
1167 /**
1168  * ibmvfc_free_event_pool - Frees memory of the event pool of a host
1169  * @vhost:      ibmvfc host who owns the event pool
1170  *
1171  **/
1172 static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost)
1173 {
1174         int i;
1175         struct ibmvfc_event_pool *pool = &vhost->pool;
1176
1177         ENTER;
1178         for (i = 0; i < pool->size; ++i) {
1179                 list_del(&pool->events[i].queue);
1180                 BUG_ON(atomic_read(&pool->events[i].free) != 1);
1181                 if (pool->events[i].ext_list)
1182                         dma_pool_free(vhost->sg_pool,
1183                                       pool->events[i].ext_list,
1184                                       pool->events[i].ext_list_token);
1185         }
1186
1187         kfree(pool->events);
1188         dma_free_coherent(vhost->dev,
1189                           pool->size * sizeof(*pool->iu_storage),
1190                           pool->iu_storage, pool->iu_token);
1191         LEAVE;
1192 }
1193
1194 /**
1195  * ibmvfc_get_event - Gets the next free event in pool
1196  * @vhost:      ibmvfc host struct
1197  *
1198  * Returns a free event from the pool.
1199  **/
1200 static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_host *vhost)
1201 {
1202         struct ibmvfc_event *evt;
1203
1204         BUG_ON(list_empty(&vhost->free));
1205         evt = list_entry(vhost->free.next, struct ibmvfc_event, queue);
1206         atomic_set(&evt->free, 0);
1207         list_del(&evt->queue);
1208         return evt;
1209 }
1210
1211 /**
1212  * ibmvfc_init_event - Initialize fields in an event struct that are always
1213  *                              required.
1214  * @evt:        The event
1215  * @done:       Routine to call when the event is responded to
1216  * @format:     SRP or MAD format
1217  **/
1218 static void ibmvfc_init_event(struct ibmvfc_event *evt,
1219                               void (*done) (struct ibmvfc_event *), u8 format)
1220 {
1221         evt->cmnd = NULL;
1222         evt->sync_iu = NULL;
1223         evt->crq.format = format;
1224         evt->done = done;
1225 }
1226
1227 /**
1228  * ibmvfc_map_sg_list - Initialize scatterlist
1229  * @scmd:       scsi command struct
1230  * @nseg:       number of scatterlist segments
1231  * @md: memory descriptor list to initialize
1232  **/
1233 static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
1234                                struct srp_direct_buf *md)
1235 {
1236         int i;
1237         struct scatterlist *sg;
1238
1239         scsi_for_each_sg(scmd, sg, nseg, i) {
1240                 md[i].va = sg_dma_address(sg);
1241                 md[i].len = sg_dma_len(sg);
1242                 md[i].key = 0;
1243         }
1244 }
1245
1246 /**
1247  * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes decriptor fields
1248  * @scmd:               Scsi_Cmnd with the scatterlist
1249  * @evt:                ibmvfc event struct
1250  * @vfc_cmd:    vfc_cmd that contains the memory descriptor
1251  * @dev:                device for which to map dma memory
1252  *
1253  * Returns:
1254  *      0 on success / non-zero on failure
1255  **/
1256 static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
1257                               struct ibmvfc_event *evt,
1258                               struct ibmvfc_cmd *vfc_cmd, struct device *dev)
1259 {
1260
1261         int sg_mapped;
1262         struct srp_direct_buf *data = &vfc_cmd->ioba;
1263         struct ibmvfc_host *vhost = dev_get_drvdata(dev);
1264
1265         sg_mapped = scsi_dma_map(scmd);
1266         if (!sg_mapped) {
1267                 vfc_cmd->flags |= IBMVFC_NO_MEM_DESC;
1268                 return 0;
1269         } else if (unlikely(sg_mapped < 0)) {
1270                 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1271                         scmd_printk(KERN_ERR, scmd, "Failed to map DMA buffer for command\n");
1272                 return sg_mapped;
1273         }
1274
1275         if (scmd->sc_data_direction == DMA_TO_DEVICE) {
1276                 vfc_cmd->flags |= IBMVFC_WRITE;
1277                 vfc_cmd->iu.add_cdb_len |= IBMVFC_WRDATA;
1278         } else {
1279                 vfc_cmd->flags |= IBMVFC_READ;
1280                 vfc_cmd->iu.add_cdb_len |= IBMVFC_RDDATA;
1281         }
1282
1283         if (sg_mapped == 1) {
1284                 ibmvfc_map_sg_list(scmd, sg_mapped, data);
1285                 return 0;
1286         }
1287
1288         vfc_cmd->flags |= IBMVFC_SCATTERLIST;
1289
1290         if (!evt->ext_list) {
1291                 evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC,
1292                                                &evt->ext_list_token);
1293
1294                 if (!evt->ext_list) {
1295                         scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n");
1296                         return -ENOMEM;
1297                 }
1298         }
1299
1300         ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list);
1301
1302         data->va = evt->ext_list_token;
1303         data->len = sg_mapped * sizeof(struct srp_direct_buf);
1304         data->key = 0;
1305         return 0;
1306 }
1307
1308 /**
1309  * ibmvfc_timeout - Internal command timeout handler
1310  * @evt:        struct ibmvfc_event that timed out
1311  *
1312  * Called when an internally generated command times out
1313  **/
1314 static void ibmvfc_timeout(struct ibmvfc_event *evt)
1315 {
1316         struct ibmvfc_host *vhost = evt->vhost;
1317         dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt);
1318         ibmvfc_reset_host(vhost);
1319 }
1320
1321 /**
1322  * ibmvfc_send_event - Transforms event to u64 array and calls send_crq()
1323  * @evt:                event to be sent
1324  * @vhost:              ibmvfc host struct
1325  * @timeout:    timeout in seconds - 0 means do not time command
1326  *
1327  * Returns the value returned from ibmvfc_send_crq(). (Zero for success)
1328  **/
1329 static int ibmvfc_send_event(struct ibmvfc_event *evt,
1330                              struct ibmvfc_host *vhost, unsigned long timeout)
1331 {
1332         u64 *crq_as_u64 = (u64 *) &evt->crq;
1333         int rc;
1334
1335         /* Copy the IU into the transfer area */
1336         *evt->xfer_iu = evt->iu;
1337         if (evt->crq.format == IBMVFC_CMD_FORMAT)
1338                 evt->xfer_iu->cmd.tag = (u64)evt;
1339         else if (evt->crq.format == IBMVFC_MAD_FORMAT)
1340                 evt->xfer_iu->mad_common.tag = (u64)evt;
1341         else
1342                 BUG();
1343
1344         list_add_tail(&evt->queue, &vhost->sent);
1345         init_timer(&evt->timer);
1346
1347         if (timeout) {
1348                 evt->timer.data = (unsigned long) evt;
1349                 evt->timer.expires = jiffies + (timeout * HZ);
1350                 evt->timer.function = (void (*)(unsigned long))ibmvfc_timeout;
1351                 add_timer(&evt->timer);
1352         }
1353
1354         if ((rc = ibmvfc_send_crq(vhost, crq_as_u64[0], crq_as_u64[1]))) {
1355                 list_del(&evt->queue);
1356                 del_timer(&evt->timer);
1357
1358                 /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
1359                  * Firmware will send a CRQ with a transport event (0xFF) to
1360                  * tell this client what has happened to the transport. This
1361                  * will be handled in ibmvfc_handle_crq()
1362                  */
1363                 if (rc == H_CLOSED) {
1364                         if (printk_ratelimit())
1365                                 dev_warn(vhost->dev, "Send warning. Receive queue closed, will retry.\n");
1366                         if (evt->cmnd)
1367                                 scsi_dma_unmap(evt->cmnd);
1368                         ibmvfc_free_event(evt);
1369                         return SCSI_MLQUEUE_HOST_BUSY;
1370                 }
1371
1372                 dev_err(vhost->dev, "Send error (rc=%d)\n", rc);
1373                 if (evt->cmnd) {
1374                         evt->cmnd->result = DID_ERROR << 16;
1375                         evt->done = ibmvfc_scsi_eh_done;
1376                 } else
1377                         evt->xfer_iu->mad_common.status = IBMVFC_MAD_CRQ_ERROR;
1378
1379                 evt->done(evt);
1380         } else
1381                 ibmvfc_trc_start(evt);
1382
1383         return 0;
1384 }
1385
1386 /**
1387  * ibmvfc_log_error - Log an error for the failed command if appropriate
1388  * @evt:        ibmvfc event to log
1389  *
1390  **/
1391 static void ibmvfc_log_error(struct ibmvfc_event *evt)
1392 {
1393         struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1394         struct ibmvfc_host *vhost = evt->vhost;
1395         struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
1396         struct scsi_cmnd *cmnd = evt->cmnd;
1397         const char *err = unknown_error;
1398         int index = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error);
1399         int logerr = 0;
1400         int rsp_code = 0;
1401
1402         if (index >= 0) {
1403                 logerr = cmd_status[index].log;
1404                 err = cmd_status[index].name;
1405         }
1406
1407         if (!logerr && (vhost->log_level <= IBMVFC_DEFAULT_LOG_LEVEL))
1408                 return;
1409
1410         if (rsp->flags & FCP_RSP_LEN_VALID)
1411                 rsp_code = rsp->data.info.rsp_code;
1412
1413         scmd_printk(KERN_ERR, cmnd, "Command (%02X) failed: %s (%x:%x) "
1414                     "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
1415                     cmnd->cmnd[0], err, vfc_cmd->status, vfc_cmd->error,
1416                     rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
1417 }
1418
1419 /**
1420  * ibmvfc_scsi_done - Handle responses from commands
1421  * @evt:        ibmvfc event to be handled
1422  *
1423  * Used as a callback when sending scsi cmds.
1424  **/
1425 static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1426 {
1427         struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1428         struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
1429         struct scsi_cmnd *cmnd = evt->cmnd;
1430         int rsp_len = 0;
1431         int sense_len = rsp->fcp_sense_len;
1432
1433         if (cmnd) {
1434                 if (vfc_cmd->response_flags & IBMVFC_ADAPTER_RESID_VALID)
1435                         scsi_set_resid(cmnd, vfc_cmd->adapter_resid);
1436                 else if (rsp->flags & FCP_RESID_UNDER)
1437                         scsi_set_resid(cmnd, rsp->fcp_resid);
1438                 else
1439                         scsi_set_resid(cmnd, 0);
1440
1441                 if (vfc_cmd->status) {
1442                         cmnd->result = ibmvfc_get_err_result(vfc_cmd);
1443
1444                         if (rsp->flags & FCP_RSP_LEN_VALID)
1445                                 rsp_len = rsp->fcp_rsp_len;
1446                         if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE)
1447                                 sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
1448                         if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len)
1449                                 memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
1450
1451                         ibmvfc_log_error(evt);
1452                 }
1453
1454                 if (!cmnd->result &&
1455                     (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) < cmnd->underflow))
1456                         cmnd->result = (DID_ERROR << 16);
1457
1458                 scsi_dma_unmap(cmnd);
1459                 cmnd->scsi_done(cmnd);
1460         }
1461
1462         ibmvfc_free_event(evt);
1463 }
1464
1465 /**
1466  * ibmvfc_host_chkready - Check if the host can accept commands
1467  * @vhost:       struct ibmvfc host
1468  *
1469  * Returns:
1470  *      1 if host can accept command / 0 if not
1471  **/
1472 static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
1473 {
1474         int result = 0;
1475
1476         switch (vhost->state) {
1477         case IBMVFC_LINK_DEAD:
1478         case IBMVFC_HOST_OFFLINE:
1479                 result = DID_NO_CONNECT << 16;
1480                 break;
1481         case IBMVFC_NO_CRQ:
1482         case IBMVFC_INITIALIZING:
1483         case IBMVFC_HALTED:
1484         case IBMVFC_LINK_DOWN:
1485                 result = DID_REQUEUE << 16;
1486                 break;
1487         case IBMVFC_ACTIVE:
1488                 result = 0;
1489                 break;
1490         };
1491
1492         return result;
1493 }
1494
1495 /**
1496  * ibmvfc_queuecommand - The queuecommand function of the scsi template
1497  * @cmnd:       struct scsi_cmnd to be executed
1498  * @done:       Callback function to be called when cmnd is completed
1499  *
1500  * Returns:
1501  *      0 on success / other on failure
1502  **/
1503 static int ibmvfc_queuecommand(struct scsi_cmnd *cmnd,
1504                                void (*done) (struct scsi_cmnd *))
1505 {
1506         struct ibmvfc_host *vhost = shost_priv(cmnd->device->host);
1507         struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1508         struct ibmvfc_cmd *vfc_cmd;
1509         struct ibmvfc_event *evt;
1510         u8 tag[2];
1511         int rc;
1512
1513         if (unlikely((rc = fc_remote_port_chkready(rport))) ||
1514             unlikely((rc = ibmvfc_host_chkready(vhost)))) {
1515                 cmnd->result = rc;
1516                 done(cmnd);
1517                 return 0;
1518         }
1519
1520         cmnd->result = (DID_OK << 16);
1521         evt = ibmvfc_get_event(vhost);
1522         ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
1523         evt->cmnd = cmnd;
1524         cmnd->scsi_done = done;
1525         vfc_cmd = &evt->iu.cmd;
1526         memset(vfc_cmd, 0, sizeof(*vfc_cmd));
1527         vfc_cmd->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
1528         vfc_cmd->resp.len = sizeof(vfc_cmd->rsp);
1529         vfc_cmd->frame_type = IBMVFC_SCSI_FCP_TYPE;
1530         vfc_cmd->payload_len = sizeof(vfc_cmd->iu);
1531         vfc_cmd->resp_len = sizeof(vfc_cmd->rsp);
1532         vfc_cmd->cancel_key = (unsigned long)cmnd->device->hostdata;
1533         vfc_cmd->tgt_scsi_id = rport->port_id;
1534         if ((rport->supported_classes & FC_COS_CLASS3) &&
1535             (fc_host_supported_classes(vhost->host) & FC_COS_CLASS3))
1536                 vfc_cmd->flags = IBMVFC_CLASS_3_ERR;
1537         vfc_cmd->iu.xfer_len = scsi_bufflen(cmnd);
1538         int_to_scsilun(cmnd->device->lun, &vfc_cmd->iu.lun);
1539         memcpy(vfc_cmd->iu.cdb, cmnd->cmnd, cmnd->cmd_len);
1540
1541         if (scsi_populate_tag_msg(cmnd, tag)) {
1542                 vfc_cmd->task_tag = tag[1];
1543                 switch (tag[0]) {
1544                 case MSG_SIMPLE_TAG:
1545                         vfc_cmd->iu.pri_task_attr = IBMVFC_SIMPLE_TASK;
1546                         break;
1547                 case MSG_HEAD_TAG:
1548                         vfc_cmd->iu.pri_task_attr = IBMVFC_HEAD_OF_QUEUE;
1549                         break;
1550                 case MSG_ORDERED_TAG:
1551                         vfc_cmd->iu.pri_task_attr = IBMVFC_ORDERED_TASK;
1552                         break;
1553                 };
1554         }
1555
1556         if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
1557                 return ibmvfc_send_event(evt, vhost, 0);
1558
1559         ibmvfc_free_event(evt);
1560         if (rc == -ENOMEM)
1561                 return SCSI_MLQUEUE_HOST_BUSY;
1562
1563         if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1564                 scmd_printk(KERN_ERR, cmnd,
1565                             "Failed to map DMA buffer for command. rc=%d\n", rc);
1566
1567         cmnd->result = DID_ERROR << 16;
1568         done(cmnd);
1569         return 0;
1570 }
1571
1572 /**
1573  * ibmvfc_sync_completion - Signal that a synchronous command has completed
1574  * @evt:        ibmvfc event struct
1575  *
1576  **/
1577 static void ibmvfc_sync_completion(struct ibmvfc_event *evt)
1578 {
1579         /* copy the response back */
1580         if (evt->sync_iu)
1581                 *evt->sync_iu = *evt->xfer_iu;
1582
1583         complete(&evt->comp);
1584 }
1585
1586 /**
1587  * ibmvfc_reset_device - Reset the device with the specified reset type
1588  * @sdev:       scsi device to reset
1589  * @type:       reset type
1590  * @desc:       reset type description for log messages
1591  *
1592  * Returns:
1593  *      0 on success / other on failure
1594  **/
1595 static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
1596 {
1597         struct ibmvfc_host *vhost = shost_priv(sdev->host);
1598         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1599         struct ibmvfc_cmd *tmf;
1600         struct ibmvfc_event *evt;
1601         union ibmvfc_iu rsp_iu;
1602         struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
1603         int rsp_rc = -EBUSY;
1604         unsigned long flags;
1605         int rsp_code = 0;
1606
1607         spin_lock_irqsave(vhost->host->host_lock, flags);
1608         if (vhost->state == IBMVFC_ACTIVE) {
1609                 evt = ibmvfc_get_event(vhost);
1610                 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
1611
1612                 tmf = &evt->iu.cmd;
1613                 memset(tmf, 0, sizeof(*tmf));
1614                 tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
1615                 tmf->resp.len = sizeof(tmf->rsp);
1616                 tmf->frame_type = IBMVFC_SCSI_FCP_TYPE;
1617                 tmf->payload_len = sizeof(tmf->iu);
1618                 tmf->resp_len = sizeof(tmf->rsp);
1619                 tmf->cancel_key = (unsigned long)sdev->hostdata;
1620                 tmf->tgt_scsi_id = rport->port_id;
1621                 int_to_scsilun(sdev->lun, &tmf->iu.lun);
1622                 tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF);
1623                 tmf->iu.tmf_flags = type;
1624                 evt->sync_iu = &rsp_iu;
1625
1626                 init_completion(&evt->comp);
1627                 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
1628         }
1629         spin_unlock_irqrestore(vhost->host->host_lock, flags);
1630
1631         if (rsp_rc != 0) {
1632                 sdev_printk(KERN_ERR, sdev, "Failed to send %s reset event. rc=%d\n",
1633                             desc, rsp_rc);
1634                 return -EIO;
1635         }
1636
1637         sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc);
1638         wait_for_completion(&evt->comp);
1639
1640         if (rsp_iu.cmd.status) {
1641                 if (fc_rsp->flags & FCP_RSP_LEN_VALID)
1642                         rsp_code = fc_rsp->data.info.rsp_code;
1643
1644                 sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
1645                             "flags: %x fcp_rsp: %x, scsi_status: %x\n",
1646                             desc, ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error),
1647                             rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
1648                             fc_rsp->scsi_status);
1649                 rsp_rc = -EIO;
1650         } else
1651                 sdev_printk(KERN_INFO, sdev, "%s reset successful\n", desc);
1652
1653         spin_lock_irqsave(vhost->host->host_lock, flags);
1654         ibmvfc_free_event(evt);
1655         spin_unlock_irqrestore(vhost->host->host_lock, flags);
1656         return rsp_rc;
1657 }
1658
1659 /**
1660  * ibmvfc_abort_task_set - Abort outstanding commands to the device
1661  * @sdev:       scsi device to abort commands
1662  *
1663  * This sends an Abort Task Set to the VIOS for the specified device. This does
1664  * NOT send any cancel to the VIOS. That must be done separately.
1665  *
1666  * Returns:
1667  *      0 on success / other on failure
1668  **/
1669 static int ibmvfc_abort_task_set(struct scsi_device *sdev)
1670 {
1671         struct ibmvfc_host *vhost = shost_priv(sdev->host);
1672         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1673         struct ibmvfc_cmd *tmf;
1674         struct ibmvfc_event *evt, *found_evt;
1675         union ibmvfc_iu rsp_iu;
1676         struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
1677         int rsp_rc = -EBUSY;
1678         unsigned long flags;
1679         int rsp_code = 0;
1680
1681         spin_lock_irqsave(vhost->host->host_lock, flags);
1682         found_evt = NULL;
1683         list_for_each_entry(evt, &vhost->sent, queue) {
1684                 if (evt->cmnd && evt->cmnd->device == sdev) {
1685                         found_evt = evt;
1686                         break;
1687                 }
1688         }
1689
1690         if (!found_evt) {
1691                 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1692                         sdev_printk(KERN_INFO, sdev, "No events found to abort\n");
1693                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1694                 return 0;
1695         }
1696
1697         if (vhost->state == IBMVFC_ACTIVE) {
1698                 evt = ibmvfc_get_event(vhost);
1699                 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
1700
1701                 tmf = &evt->iu.cmd;
1702                 memset(tmf, 0, sizeof(*tmf));
1703                 tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
1704                 tmf->resp.len = sizeof(tmf->rsp);
1705                 tmf->frame_type = IBMVFC_SCSI_FCP_TYPE;
1706                 tmf->payload_len = sizeof(tmf->iu);
1707                 tmf->resp_len = sizeof(tmf->rsp);
1708                 tmf->cancel_key = (unsigned long)sdev->hostdata;
1709                 tmf->tgt_scsi_id = rport->port_id;
1710                 int_to_scsilun(sdev->lun, &tmf->iu.lun);
1711                 tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF);
1712                 tmf->iu.tmf_flags = IBMVFC_ABORT_TASK_SET;
1713                 evt->sync_iu = &rsp_iu;
1714
1715                 init_completion(&evt->comp);
1716                 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
1717         }
1718
1719         spin_unlock_irqrestore(vhost->host->host_lock, flags);
1720
1721         if (rsp_rc != 0) {
1722                 sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc);
1723                 return -EIO;
1724         }
1725
1726         sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
1727         wait_for_completion(&evt->comp);
1728
1729         if (rsp_iu.cmd.status) {
1730                 if (fc_rsp->flags & FCP_RSP_LEN_VALID)
1731                         rsp_code = fc_rsp->data.info.rsp_code;
1732
1733                 sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
1734                             "flags: %x fcp_rsp: %x, scsi_status: %x\n",
1735                             ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error),
1736                             rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
1737                             fc_rsp->scsi_status);
1738                 rsp_rc = -EIO;
1739         } else
1740                 sdev_printk(KERN_INFO, sdev, "Abort successful\n");
1741
1742         spin_lock_irqsave(vhost->host->host_lock, flags);
1743         ibmvfc_free_event(evt);
1744         spin_unlock_irqrestore(vhost->host->host_lock, flags);
1745         return rsp_rc;
1746 }
1747
1748 /**
1749  * ibmvfc_cancel_all - Cancel all outstanding commands to the device
1750  * @sdev:       scsi device to cancel commands
1751  * @type:       type of error recovery being performed
1752  *
1753  * This sends a cancel to the VIOS for the specified device. This does
1754  * NOT send any abort to the actual device. That must be done separately.
1755  *
1756  * Returns:
1757  *      0 on success / other on failure
1758  **/
1759 static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
1760 {
1761         struct ibmvfc_host *vhost = shost_priv(sdev->host);
1762         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1763         struct ibmvfc_tmf *tmf;
1764         struct ibmvfc_event *evt, *found_evt;
1765         union ibmvfc_iu rsp;
1766         int rsp_rc = -EBUSY;
1767         unsigned long flags;
1768         u16 status;
1769
1770         ENTER;
1771         spin_lock_irqsave(vhost->host->host_lock, flags);
1772         found_evt = NULL;
1773         list_for_each_entry(evt, &vhost->sent, queue) {
1774                 if (evt->cmnd && evt->cmnd->device == sdev) {
1775                         found_evt = evt;
1776                         break;
1777                 }
1778         }
1779
1780         if (!found_evt) {
1781                 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1782                         sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
1783                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1784                 return 0;
1785         }
1786
1787         if (vhost->state == IBMVFC_ACTIVE) {
1788                 evt = ibmvfc_get_event(vhost);
1789                 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
1790
1791                 tmf = &evt->iu.tmf;
1792                 memset(tmf, 0, sizeof(*tmf));
1793                 tmf->common.version = 1;
1794                 tmf->common.opcode = IBMVFC_TMF_MAD;
1795                 tmf->common.length = sizeof(*tmf);
1796                 tmf->scsi_id = rport->port_id;
1797                 int_to_scsilun(sdev->lun, &tmf->lun);
1798                 tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
1799                 tmf->cancel_key = (unsigned long)sdev->hostdata;
1800                 tmf->my_cancel_key = (IBMVFC_TMF_CANCEL_KEY | (unsigned long)sdev->hostdata);
1801
1802                 evt->sync_iu = &rsp;
1803                 init_completion(&evt->comp);
1804                 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
1805         }
1806
1807         spin_unlock_irqrestore(vhost->host->host_lock, flags);
1808
1809         if (rsp_rc != 0) {
1810                 sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
1811                 return -EIO;
1812         }
1813
1814         sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
1815
1816         wait_for_completion(&evt->comp);
1817         status = rsp.mad_common.status;
1818         spin_lock_irqsave(vhost->host->host_lock, flags);
1819         ibmvfc_free_event(evt);
1820         spin_unlock_irqrestore(vhost->host->host_lock, flags);
1821
1822         if (status != IBMVFC_MAD_SUCCESS) {
1823                 sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
1824                 return -EIO;
1825         }
1826
1827         sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
1828         return 0;
1829 }
1830
1831 /**
1832  * ibmvfc_eh_abort_handler - Abort a command
1833  * @cmd:        scsi command to abort
1834  *
1835  * Returns:
1836  *      SUCCESS / FAILED
1837  **/
1838 static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
1839 {
1840         struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
1841         struct ibmvfc_event *evt, *pos;
1842         int cancel_rc, abort_rc;
1843         unsigned long flags;
1844
1845         ENTER;
1846         ibmvfc_wait_while_resetting(vhost);
1847         cancel_rc = ibmvfc_cancel_all(cmd->device, IBMVFC_TMF_ABORT_TASK_SET);
1848         abort_rc = ibmvfc_abort_task_set(cmd->device);
1849
1850         if (!cancel_rc && !abort_rc) {
1851                 spin_lock_irqsave(vhost->host->host_lock, flags);
1852                 list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
1853                         if (evt->cmnd && evt->cmnd->device == cmd->device)
1854                                 ibmvfc_fail_request(evt, DID_ABORT);
1855                 }
1856                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1857                 LEAVE;
1858                 return SUCCESS;
1859         }
1860
1861         LEAVE;
1862         return FAILED;
1863 }
1864
1865 /**
1866  * ibmvfc_eh_device_reset_handler - Reset a single LUN
1867  * @cmd:        scsi command struct
1868  *
1869  * Returns:
1870  *      SUCCESS / FAILED
1871  **/
1872 static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
1873 {
1874         struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
1875         struct ibmvfc_event *evt, *pos;
1876         int cancel_rc, reset_rc;
1877         unsigned long flags;
1878
1879         ENTER;
1880         ibmvfc_wait_while_resetting(vhost);
1881         cancel_rc = ibmvfc_cancel_all(cmd->device, IBMVFC_TMF_LUN_RESET);
1882         reset_rc = ibmvfc_reset_device(cmd->device, IBMVFC_LUN_RESET, "LUN");
1883
1884         if (!cancel_rc && !reset_rc) {
1885                 spin_lock_irqsave(vhost->host->host_lock, flags);
1886                 list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
1887                         if (evt->cmnd && evt->cmnd->device == cmd->device)
1888                                 ibmvfc_fail_request(evt, DID_ABORT);
1889                 }
1890                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1891                 LEAVE;
1892                 return SUCCESS;
1893         }
1894
1895         LEAVE;
1896         return FAILED;
1897 }
1898
1899 /**
1900  * ibmvfc_dev_cancel_all - Device iterated cancel all function
1901  * @sdev:       scsi device struct
1902  * @data:       return code
1903  *
1904  **/
1905 static void ibmvfc_dev_cancel_all(struct scsi_device *sdev, void *data)
1906 {
1907         unsigned long *rc = data;
1908         *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET);
1909 }
1910
1911 /**
1912  * ibmvfc_dev_abort_all - Device iterated abort task set function
1913  * @sdev:       scsi device struct
1914  * @data:       return code
1915  *
1916  **/
1917 static void ibmvfc_dev_abort_all(struct scsi_device *sdev, void *data)
1918 {
1919         unsigned long *rc = data;
1920         *rc |= ibmvfc_abort_task_set(sdev);
1921 }
1922
1923 /**
1924  * ibmvfc_eh_target_reset_handler - Reset the target
1925  * @cmd:        scsi command struct
1926  *
1927  * Returns:
1928  *      SUCCESS / FAILED
1929  **/
1930 static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
1931 {
1932         struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
1933         struct scsi_target *starget = scsi_target(cmd->device);
1934         struct ibmvfc_event *evt, *pos;
1935         int reset_rc;
1936         unsigned long cancel_rc = 0;
1937         unsigned long flags;
1938
1939         ENTER;
1940         ibmvfc_wait_while_resetting(vhost);
1941         starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all);
1942         reset_rc = ibmvfc_reset_device(cmd->device, IBMVFC_TARGET_RESET, "target");
1943
1944         if (!cancel_rc && !reset_rc) {
1945                 spin_lock_irqsave(vhost->host->host_lock, flags);
1946                 list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
1947                         if (evt->cmnd && scsi_target(evt->cmnd->device) == starget)
1948                                 ibmvfc_fail_request(evt, DID_ABORT);
1949                 }
1950                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1951                 LEAVE;
1952                 return SUCCESS;
1953         }
1954
1955         LEAVE;
1956         return FAILED;
1957 }
1958
1959 /**
1960  * ibmvfc_eh_host_reset_handler - Reset the connection to the server
1961  * @cmd:        struct scsi_cmnd having problems
1962  *
1963  **/
1964 static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
1965 {
1966         int rc;
1967         struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
1968
1969         dev_err(vhost->dev, "Resetting connection due to error recovery\n");
1970         rc = ibmvfc_issue_fc_host_lip(vhost->host);
1971         return rc ? FAILED : SUCCESS;
1972 }
1973
1974 /**
1975  * ibmvfc_terminate_rport_io - Terminate all pending I/O to the rport.
1976  * @rport:              rport struct
1977  *
1978  * Return value:
1979  *      none
1980  **/
1981 static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
1982 {
1983         struct scsi_target *starget = to_scsi_target(&rport->dev);
1984         struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1985         struct ibmvfc_host *vhost = shost_priv(shost);
1986         struct ibmvfc_event *evt, *pos;
1987         unsigned long cancel_rc = 0;
1988         unsigned long abort_rc = 0;
1989         unsigned long flags;
1990
1991         ENTER;
1992         starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all);
1993         starget_for_each_device(starget, &abort_rc, ibmvfc_dev_abort_all);
1994
1995         if (!cancel_rc && !abort_rc) {
1996                 spin_lock_irqsave(shost->host_lock, flags);
1997                 list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
1998                         if (evt->cmnd && scsi_target(evt->cmnd->device) == starget)
1999                                 ibmvfc_fail_request(evt, DID_ABORT);
2000                 }
2001                 spin_unlock_irqrestore(shost->host_lock, flags);
2002         } else
2003                 ibmvfc_issue_fc_host_lip(shost);
2004
2005         scsi_target_unblock(&rport->dev);
2006         LEAVE;
2007 }
2008
2009 static const struct {
2010         enum ibmvfc_async_event ae;
2011         const char *desc;
2012 } ae_desc [] = {
2013         { IBMVFC_AE_ELS_PLOGI,          "PLOGI" },
2014         { IBMVFC_AE_ELS_LOGO,           "LOGO" },
2015         { IBMVFC_AE_ELS_PRLO,           "PRLO" },
2016         { IBMVFC_AE_SCN_NPORT,          "N-Port SCN" },
2017         { IBMVFC_AE_SCN_GROUP,          "Group SCN" },
2018         { IBMVFC_AE_SCN_DOMAIN,         "Domain SCN" },
2019         { IBMVFC_AE_SCN_FABRIC,         "Fabric SCN" },
2020         { IBMVFC_AE_LINK_UP,            "Link Up" },
2021         { IBMVFC_AE_LINK_DOWN,          "Link Down" },
2022         { IBMVFC_AE_LINK_DEAD,          "Link Dead" },
2023         { IBMVFC_AE_HALT,                       "Halt" },
2024         { IBMVFC_AE_RESUME,             "Resume" },
2025         { IBMVFC_AE_ADAPTER_FAILED,     "Adapter Failed" },
2026 };
2027
2028 static const char *unknown_ae = "Unknown async";
2029
2030 /**
2031  * ibmvfc_get_ae_desc - Get text description for async event
2032  * @ae: async event
2033  *
2034  **/
2035 static const char *ibmvfc_get_ae_desc(u64 ae)
2036 {
2037         int i;
2038
2039         for (i = 0; i < ARRAY_SIZE(ae_desc); i++)
2040                 if (ae_desc[i].ae == ae)
2041                         return ae_desc[i].desc;
2042
2043         return unknown_ae;
2044 }
2045
2046 /**
2047  * ibmvfc_handle_async - Handle an async event from the adapter
2048  * @crq:        crq to process
2049  * @vhost:      ibmvfc host struct
2050  *
2051  **/
2052 static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
2053                                 struct ibmvfc_host *vhost)
2054 {
2055         const char *desc = ibmvfc_get_ae_desc(crq->event);
2056
2057         ibmvfc_log(vhost, 2, "%s event received\n", desc);
2058
2059         switch (crq->event) {
2060         case IBMVFC_AE_LINK_UP:
2061         case IBMVFC_AE_RESUME:
2062                 vhost->events_to_log |= IBMVFC_AE_LINKUP;
2063                 ibmvfc_init_host(vhost);
2064                 break;
2065         case IBMVFC_AE_SCN_FABRIC:
2066                 vhost->events_to_log |= IBMVFC_AE_RSCN;
2067                 ibmvfc_init_host(vhost);
2068                 break;
2069         case IBMVFC_AE_SCN_NPORT:
2070         case IBMVFC_AE_SCN_GROUP:
2071         case IBMVFC_AE_SCN_DOMAIN:
2072                 vhost->events_to_log |= IBMVFC_AE_RSCN;
2073         case IBMVFC_AE_ELS_LOGO:
2074         case IBMVFC_AE_ELS_PRLO:
2075         case IBMVFC_AE_ELS_PLOGI:
2076                 ibmvfc_reinit_host(vhost);
2077                 break;
2078         case IBMVFC_AE_LINK_DOWN:
2079         case IBMVFC_AE_ADAPTER_FAILED:
2080                 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2081                 break;
2082         case IBMVFC_AE_LINK_DEAD:
2083                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
2084                 break;
2085         case IBMVFC_AE_HALT:
2086                 ibmvfc_link_down(vhost, IBMVFC_HALTED);
2087                 break;
2088         default:
2089                 dev_err(vhost->dev, "Unknown async event received: %ld\n", crq->event);
2090                 break;
2091         };
2092 }
2093
2094 /**
2095  * ibmvfc_handle_crq - Handles and frees received events in the CRQ
2096  * @crq:        Command/Response queue
2097  * @vhost:      ibmvfc host struct
2098  *
2099  **/
2100 static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
2101 {
2102         long rc;
2103         struct ibmvfc_event *evt = (struct ibmvfc_event *)crq->ioba;
2104
2105         switch (crq->valid) {
2106         case IBMVFC_CRQ_INIT_RSP:
2107                 switch (crq->format) {
2108                 case IBMVFC_CRQ_INIT:
2109                         dev_info(vhost->dev, "Partner initialized\n");
2110                         /* Send back a response */
2111                         rc = ibmvfc_send_crq_init_complete(vhost);
2112                         if (rc == 0)
2113                                 ibmvfc_init_host(vhost);
2114                         else
2115                                 dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc);
2116                         break;
2117                 case IBMVFC_CRQ_INIT_COMPLETE:
2118                         dev_info(vhost->dev, "Partner initialization complete\n");
2119                         ibmvfc_init_host(vhost);
2120                         break;
2121                 default:
2122                         dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format);
2123                 }
2124                 return;
2125         case IBMVFC_CRQ_XPORT_EVENT:
2126                 vhost->state = IBMVFC_NO_CRQ;
2127                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
2128                 if (crq->format == IBMVFC_PARTITION_MIGRATED) {
2129                         /* We need to re-setup the interpartition connection */
2130                         dev_info(vhost->dev, "Re-enabling adapter\n");
2131                         vhost->client_migrated = 1;
2132                         ibmvfc_purge_requests(vhost, DID_REQUEUE);
2133                         if ((rc = ibmvfc_reenable_crq_queue(vhost)) ||
2134                             (rc = ibmvfc_send_crq_init(vhost))) {
2135                                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
2136                                 dev_err(vhost->dev, "Error after enable (rc=%ld)\n", rc);
2137                         } else
2138                                 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2139                 } else {
2140                         dev_err(vhost->dev, "Virtual adapter failed (rc=%d)\n", crq->format);
2141
2142                         ibmvfc_purge_requests(vhost, DID_ERROR);
2143                         if ((rc = ibmvfc_reset_crq(vhost)) ||
2144                             (rc = ibmvfc_send_crq_init(vhost))) {
2145                                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
2146                                 dev_err(vhost->dev, "Error after reset (rc=%ld)\n", rc);
2147                         } else
2148                                 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2149                 }
2150                 return;
2151         case IBMVFC_CRQ_CMD_RSP:
2152                 break;
2153         default:
2154                 dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid);
2155                 return;
2156         }
2157
2158         if (crq->format == IBMVFC_ASYNC_EVENT)
2159                 return;
2160
2161         /* The only kind of payload CRQs we should get are responses to
2162          * things we send. Make sure this response is to something we
2163          * actually sent
2164          */
2165         if (unlikely(!ibmvfc_valid_event(&vhost->pool, evt))) {
2166                 dev_err(vhost->dev, "Returned correlation_token 0x%08lx is invalid!\n",
2167                         crq->ioba);
2168                 return;
2169         }
2170
2171         if (unlikely(atomic_read(&evt->free))) {
2172                 dev_err(vhost->dev, "Received duplicate correlation_token 0x%08lx!\n",
2173                         crq->ioba);
2174                 return;
2175         }
2176
2177         del_timer(&evt->timer);
2178         list_del(&evt->queue);
2179         ibmvfc_trc_end(evt);
2180         evt->done(evt);
2181 }
2182
2183 /**
2184  * ibmvfc_scan_finished - Check if the device scan is done.
2185  * @shost:      scsi host struct
2186  * @time:       current elapsed time
2187  *
2188  * Returns:
2189  *      0 if scan is not done / 1 if scan is done
2190  **/
2191 static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2192 {
2193         unsigned long flags;
2194         struct ibmvfc_host *vhost = shost_priv(shost);
2195         int done = 0;
2196
2197         spin_lock_irqsave(shost->host_lock, flags);
2198         if (time >= (init_timeout * HZ)) {
2199                 dev_info(vhost->dev, "Scan taking longer than %d seconds, "
2200                          "continuing initialization\n", init_timeout);
2201                 done = 1;
2202         }
2203
2204         if (vhost->state != IBMVFC_NO_CRQ && vhost->action == IBMVFC_HOST_ACTION_NONE)
2205                 done = 1;
2206         spin_unlock_irqrestore(shost->host_lock, flags);
2207         return done;
2208 }
2209
2210 /**
2211  * ibmvfc_slave_alloc - Setup the device's task set value
2212  * @sdev:       struct scsi_device device to configure
2213  *
2214  * Set the device's task set value so that error handling works as
2215  * expected.
2216  *
2217  * Returns:
2218  *      0 on success / -ENXIO if device does not exist
2219  **/
2220 static int ibmvfc_slave_alloc(struct scsi_device *sdev)
2221 {
2222         struct Scsi_Host *shost = sdev->host;
2223         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2224         struct ibmvfc_host *vhost = shost_priv(shost);
2225         unsigned long flags = 0;
2226
2227         if (!rport || fc_remote_port_chkready(rport))
2228                 return -ENXIO;
2229
2230         spin_lock_irqsave(shost->host_lock, flags);
2231         sdev->hostdata = (void *)(unsigned long)vhost->task_set++;
2232         spin_unlock_irqrestore(shost->host_lock, flags);
2233         return 0;
2234 }
2235
2236 /**
2237  * ibmvfc_slave_configure - Configure the device
2238  * @sdev:       struct scsi_device device to configure
2239  *
2240  * Enable allow_restart for a device if it is a disk. Adjust the
2241  * queue_depth here also.
2242  *
2243  * Returns:
2244  *      0
2245  **/
2246 static int ibmvfc_slave_configure(struct scsi_device *sdev)
2247 {
2248         struct Scsi_Host *shost = sdev->host;
2249         struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
2250         unsigned long flags = 0;
2251
2252         spin_lock_irqsave(shost->host_lock, flags);
2253         if (sdev->type == TYPE_DISK)
2254                 sdev->allow_restart = 1;
2255
2256         if (sdev->tagged_supported) {
2257                 scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
2258                 scsi_activate_tcq(sdev, sdev->queue_depth);
2259         } else
2260                 scsi_deactivate_tcq(sdev, sdev->queue_depth);
2261
2262         rport->dev_loss_tmo = dev_loss_tmo;
2263         spin_unlock_irqrestore(shost->host_lock, flags);
2264         return 0;
2265 }
2266
2267 /**
2268  * ibmvfc_change_queue_depth - Change the device's queue depth
2269  * @sdev:       scsi device struct
2270  * @qdepth:     depth to set
2271  *
2272  * Return value:
2273  *      actual depth set
2274  **/
2275 static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth)
2276 {
2277         if (qdepth > IBMVFC_MAX_CMDS_PER_LUN)
2278                 qdepth = IBMVFC_MAX_CMDS_PER_LUN;
2279
2280         scsi_adjust_queue_depth(sdev, 0, qdepth);
2281         return sdev->queue_depth;
2282 }
2283
2284 /**
2285  * ibmvfc_change_queue_type - Change the device's queue type
2286  * @sdev:               scsi device struct
2287  * @tag_type:   type of tags to use
2288  *
2289  * Return value:
2290  *      actual queue type set
2291  **/
2292 static int ibmvfc_change_queue_type(struct scsi_device *sdev, int tag_type)
2293 {
2294         if (sdev->tagged_supported) {
2295                 scsi_set_tag_type(sdev, tag_type);
2296
2297                 if (tag_type)
2298                         scsi_activate_tcq(sdev, sdev->queue_depth);
2299                 else
2300                         scsi_deactivate_tcq(sdev, sdev->queue_depth);
2301         } else
2302                 tag_type = 0;
2303
2304         return tag_type;
2305 }
2306
2307 static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
2308                                                  struct device_attribute *attr, char *buf)
2309 {
2310         struct Scsi_Host *shost = class_to_shost(dev);
2311         struct ibmvfc_host *vhost = shost_priv(shost);
2312
2313         return snprintf(buf, PAGE_SIZE, "%s\n",
2314                         vhost->login_buf->resp.partition_name);
2315 }
2316
2317 static struct device_attribute ibmvfc_host_partition_name = {
2318         .attr = {
2319                 .name = "partition_name",
2320                 .mode = S_IRUGO,
2321         },
2322         .show = ibmvfc_show_host_partition_name,
2323 };
2324
2325 static ssize_t ibmvfc_show_host_device_name(struct device *dev,
2326                                             struct device_attribute *attr, char *buf)
2327 {
2328         struct Scsi_Host *shost = class_to_shost(dev);
2329         struct ibmvfc_host *vhost = shost_priv(shost);
2330
2331         return snprintf(buf, PAGE_SIZE, "%s\n",
2332                         vhost->login_buf->resp.device_name);
2333 }
2334
2335 static struct device_attribute ibmvfc_host_device_name = {
2336         .attr = {
2337                 .name = "device_name",
2338                 .mode = S_IRUGO,
2339         },
2340         .show = ibmvfc_show_host_device_name,
2341 };
2342
2343 static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
2344                                          struct device_attribute *attr, char *buf)
2345 {
2346         struct Scsi_Host *shost = class_to_shost(dev);
2347         struct ibmvfc_host *vhost = shost_priv(shost);
2348
2349         return snprintf(buf, PAGE_SIZE, "%s\n",
2350                         vhost->login_buf->resp.port_loc_code);
2351 }
2352
2353 static struct device_attribute ibmvfc_host_loc_code = {
2354         .attr = {
2355                 .name = "port_loc_code",
2356                 .mode = S_IRUGO,
2357         },
2358         .show = ibmvfc_show_host_loc_code,
2359 };
2360
2361 static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
2362                                          struct device_attribute *attr, char *buf)
2363 {
2364         struct Scsi_Host *shost = class_to_shost(dev);
2365         struct ibmvfc_host *vhost = shost_priv(shost);
2366
2367         return snprintf(buf, PAGE_SIZE, "%s\n",
2368                         vhost->login_buf->resp.drc_name);
2369 }
2370
2371 static struct device_attribute ibmvfc_host_drc_name = {
2372         .attr = {
2373                 .name = "drc_name",
2374                 .mode = S_IRUGO,
2375         },
2376         .show = ibmvfc_show_host_drc_name,
2377 };
2378
2379 static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
2380                                              struct device_attribute *attr, char *buf)
2381 {
2382         struct Scsi_Host *shost = class_to_shost(dev);
2383         struct ibmvfc_host *vhost = shost_priv(shost);
2384         return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version);
2385 }
2386
2387 static struct device_attribute ibmvfc_host_npiv_version = {
2388         .attr = {
2389                 .name = "npiv_version",
2390                 .mode = S_IRUGO,
2391         },
2392         .show = ibmvfc_show_host_npiv_version,
2393 };
2394
2395 /**
2396  * ibmvfc_show_log_level - Show the adapter's error logging level
2397  * @dev:        class device struct
2398  * @buf:        buffer
2399  *
2400  * Return value:
2401  *      number of bytes printed to buffer
2402  **/
2403 static ssize_t ibmvfc_show_log_level(struct device *dev,
2404                                      struct device_attribute *attr, char *buf)
2405 {
2406         struct Scsi_Host *shost = class_to_shost(dev);
2407         struct ibmvfc_host *vhost = shost_priv(shost);
2408         unsigned long flags = 0;
2409         int len;
2410
2411         spin_lock_irqsave(shost->host_lock, flags);
2412         len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->log_level);
2413         spin_unlock_irqrestore(shost->host_lock, flags);
2414         return len;
2415 }
2416
2417 /**
2418  * ibmvfc_store_log_level - Change the adapter's error logging level
2419  * @dev:        class device struct
2420  * @buf:        buffer
2421  *
2422  * Return value:
2423  *      number of bytes printed to buffer
2424  **/
2425 static ssize_t ibmvfc_store_log_level(struct device *dev,
2426                                       struct device_attribute *attr,
2427                                       const char *buf, size_t count)
2428 {
2429         struct Scsi_Host *shost = class_to_shost(dev);
2430         struct ibmvfc_host *vhost = shost_priv(shost);
2431         unsigned long flags = 0;
2432
2433         spin_lock_irqsave(shost->host_lock, flags);
2434         vhost->log_level = simple_strtoul(buf, NULL, 10);
2435         spin_unlock_irqrestore(shost->host_lock, flags);
2436         return strlen(buf);
2437 }
2438
2439 static struct device_attribute ibmvfc_log_level_attr = {
2440         .attr = {
2441                 .name =         "log_level",
2442                 .mode =         S_IRUGO | S_IWUSR,
2443         },
2444         .show = ibmvfc_show_log_level,
2445         .store = ibmvfc_store_log_level
2446 };
2447
2448 #ifdef CONFIG_SCSI_IBMVFC_TRACE
2449 /**
2450  * ibmvfc_read_trace - Dump the adapter trace
2451  * @kobj:               kobject struct
2452  * @bin_attr:   bin_attribute struct
2453  * @buf:                buffer
2454  * @off:                offset
2455  * @count:              buffer size
2456  *
2457  * Return value:
2458  *      number of bytes printed to buffer
2459  **/
2460 static ssize_t ibmvfc_read_trace(struct kobject *kobj,
2461                                  struct bin_attribute *bin_attr,
2462                                  char *buf, loff_t off, size_t count)
2463 {
2464         struct device *dev = container_of(kobj, struct device, kobj);
2465         struct Scsi_Host *shost = class_to_shost(dev);
2466         struct ibmvfc_host *vhost = shost_priv(shost);
2467         unsigned long flags = 0;
2468         int size = IBMVFC_TRACE_SIZE;
2469         char *src = (char *)vhost->trace;
2470
2471         if (off > size)
2472                 return 0;
2473         if (off + count > size) {
2474                 size -= off;
2475                 count = size;
2476         }
2477
2478         spin_lock_irqsave(shost->host_lock, flags);
2479         memcpy(buf, &src[off], count);
2480         spin_unlock_irqrestore(shost->host_lock, flags);
2481         return count;
2482 }
2483
2484 static struct bin_attribute ibmvfc_trace_attr = {
2485         .attr = {
2486                 .name = "trace",
2487                 .mode = S_IRUGO,
2488         },
2489         .size = 0,
2490         .read = ibmvfc_read_trace,
2491 };
2492 #endif
2493
2494 static struct device_attribute *ibmvfc_attrs[] = {
2495         &ibmvfc_host_partition_name,
2496         &ibmvfc_host_device_name,
2497         &ibmvfc_host_loc_code,
2498         &ibmvfc_host_drc_name,
2499         &ibmvfc_host_npiv_version,
2500         &ibmvfc_log_level_attr,
2501         NULL
2502 };
2503
2504 static struct scsi_host_template driver_template = {
2505         .module = THIS_MODULE,
2506         .name = "IBM POWER Virtual FC Adapter",
2507         .proc_name = IBMVFC_NAME,
2508         .queuecommand = ibmvfc_queuecommand,
2509         .eh_abort_handler = ibmvfc_eh_abort_handler,
2510         .eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
2511         .eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
2512         .eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
2513         .slave_alloc = ibmvfc_slave_alloc,
2514         .slave_configure = ibmvfc_slave_configure,
2515         .scan_finished = ibmvfc_scan_finished,
2516         .change_queue_depth = ibmvfc_change_queue_depth,
2517         .change_queue_type = ibmvfc_change_queue_type,
2518         .cmd_per_lun = 16,
2519         .can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
2520         .this_id = -1,
2521         .sg_tablesize = SG_ALL,
2522         .max_sectors = IBMVFC_MAX_SECTORS,
2523         .use_clustering = ENABLE_CLUSTERING,
2524         .shost_attrs = ibmvfc_attrs,
2525 };
2526
2527 /**
2528  * ibmvfc_next_async_crq - Returns the next entry in async queue
2529  * @vhost:      ibmvfc host struct
2530  *
2531  * Returns:
2532  *      Pointer to next entry in queue / NULL if empty
2533  **/
2534 static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
2535 {
2536         struct ibmvfc_async_crq_queue *async_crq = &vhost->async_crq;
2537         struct ibmvfc_async_crq *crq;
2538
2539         crq = &async_crq->msgs[async_crq->cur];
2540         if (crq->valid & 0x80) {
2541                 if (++async_crq->cur == async_crq->size)
2542                         async_crq->cur = 0;
2543         } else
2544                 crq = NULL;
2545
2546         return crq;
2547 }
2548
2549 /**
2550  * ibmvfc_next_crq - Returns the next entry in message queue
2551  * @vhost:      ibmvfc host struct
2552  *
2553  * Returns:
2554  *      Pointer to next entry in queue / NULL if empty
2555  **/
2556 static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
2557 {
2558         struct ibmvfc_crq_queue *queue = &vhost->crq;
2559         struct ibmvfc_crq *crq;
2560
2561         crq = &queue->msgs[queue->cur];
2562         if (crq->valid & 0x80) {
2563                 if (++queue->cur == queue->size)
2564                         queue->cur = 0;
2565         } else
2566                 crq = NULL;
2567
2568         return crq;
2569 }
2570
2571 /**
2572  * ibmvfc_interrupt - Interrupt handler
2573  * @irq:                number of irq to handle, not used
2574  * @dev_instance: ibmvfc_host that received interrupt
2575  *
2576  * Returns:
2577  *      IRQ_HANDLED
2578  **/
2579 static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
2580 {
2581         struct ibmvfc_host *vhost = (struct ibmvfc_host *)dev_instance;
2582         struct vio_dev *vdev = to_vio_dev(vhost->dev);
2583         struct ibmvfc_crq *crq;
2584         struct ibmvfc_async_crq *async;
2585         unsigned long flags;
2586         int done = 0;
2587
2588         spin_lock_irqsave(vhost->host->host_lock, flags);
2589         vio_disable_interrupts(to_vio_dev(vhost->dev));
2590         while (!done) {
2591                 /* Pull all the valid messages off the CRQ */
2592                 while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
2593                         ibmvfc_handle_crq(crq, vhost);
2594                         crq->valid = 0;
2595                 }
2596
2597                 /* Pull all the valid messages off the async CRQ */
2598                 while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
2599                         ibmvfc_handle_async(async, vhost);
2600                         async->valid = 0;
2601                 }
2602
2603                 vio_enable_interrupts(vdev);
2604                 if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
2605                         vio_disable_interrupts(vdev);
2606                         ibmvfc_handle_crq(crq, vhost);
2607                         crq->valid = 0;
2608                 } else if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
2609                         vio_disable_interrupts(vdev);
2610                         ibmvfc_handle_async(async, vhost);
2611                         crq->valid = 0;
2612                 } else
2613                         done = 1;
2614         }
2615
2616         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2617         return IRQ_HANDLED;
2618 }
2619
2620 /**
2621  * ibmvfc_init_tgt - Set the next init job step for the target
2622  * @tgt:                ibmvfc target struct
2623  * @job_step:   job step to perform
2624  *
2625  **/
2626 static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
2627                             void (*job_step) (struct ibmvfc_target *))
2628 {
2629         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT);
2630         tgt->job_step = job_step;
2631         wake_up(&tgt->vhost->work_wait_q);
2632 }
2633
2634 /**
2635  * ibmvfc_retry_tgt_init - Attempt to retry a step in target initialization
2636  * @tgt:                ibmvfc target struct
2637  * @job_step:   initialization job step
2638  *
2639  **/
2640 static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
2641                                   void (*job_step) (struct ibmvfc_target *))
2642 {
2643         if (++tgt->init_retries > IBMVFC_MAX_INIT_RETRIES) {
2644                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2645                 wake_up(&tgt->vhost->work_wait_q);
2646         } else
2647                 ibmvfc_init_tgt(tgt, job_step);
2648 }
2649
2650 /**
2651  * ibmvfc_release_tgt - Free memory allocated for a target
2652  * @kref:               kref struct
2653  *
2654  **/
2655 static void ibmvfc_release_tgt(struct kref *kref)
2656 {
2657         struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
2658         kfree(tgt);
2659 }
2660
2661 /**
2662  * ibmvfc_tgt_prli_done - Completion handler for Process Login
2663  * @evt:        ibmvfc event struct
2664  *
2665  **/
2666 static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
2667 {
2668         struct ibmvfc_target *tgt = evt->tgt;
2669         struct ibmvfc_host *vhost = evt->vhost;
2670         struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
2671         u32 status = rsp->common.status;
2672
2673         vhost->discovery_threads--;
2674         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
2675         switch (status) {
2676         case IBMVFC_MAD_SUCCESS:
2677                 tgt_dbg(tgt, "Process Login succeeded\n");
2678                 tgt->need_login = 0;
2679                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_ADD_RPORT);
2680                 break;
2681         case IBMVFC_MAD_DRIVER_FAILED:
2682                 break;
2683         case IBMVFC_MAD_CRQ_ERROR:
2684                 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
2685                 break;
2686         case IBMVFC_MAD_FAILED:
2687         default:
2688                 tgt_err(tgt, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
2689                         ibmvfc_get_cmd_error(rsp->status, rsp->error),
2690                         rsp->status, rsp->error, status);
2691                 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
2692                         ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
2693                 break;
2694         };
2695
2696         kref_put(&tgt->kref, ibmvfc_release_tgt);
2697         ibmvfc_free_event(evt);
2698         wake_up(&vhost->work_wait_q);
2699 }
2700
2701 /**
2702  * ibmvfc_tgt_send_prli - Send a process login
2703  * @tgt:        ibmvfc target struct
2704  *
2705  **/
2706 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
2707 {
2708         struct ibmvfc_process_login *prli;
2709         struct ibmvfc_host *vhost = tgt->vhost;
2710         struct ibmvfc_event *evt;
2711
2712         if (vhost->discovery_threads >= disc_threads)
2713                 return;
2714
2715         kref_get(&tgt->kref);
2716         evt = ibmvfc_get_event(vhost);
2717         vhost->discovery_threads++;
2718         ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
2719         evt->tgt = tgt;
2720         prli = &evt->iu.prli;
2721         memset(prli, 0, sizeof(*prli));
2722         prli->common.version = 1;
2723         prli->common.opcode = IBMVFC_PROCESS_LOGIN;
2724         prli->common.length = sizeof(*prli);
2725         prli->scsi_id = tgt->scsi_id;
2726
2727         prli->parms.type = IBMVFC_SCSI_FCP_TYPE;
2728         prli->parms.flags = IBMVFC_PRLI_EST_IMG_PAIR;
2729         prli->parms.service_parms = IBMVFC_PRLI_INITIATOR_FUNC;
2730
2731         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
2732         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
2733                 vhost->discovery_threads--;
2734                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
2735                 kref_put(&tgt->kref, ibmvfc_release_tgt);
2736         } else
2737                 tgt_dbg(tgt, "Sent process login\n");
2738 }
2739
2740 /**
2741  * ibmvfc_tgt_plogi_done - Completion handler for Port Login
2742  * @evt:        ibmvfc event struct
2743  *
2744  **/
2745 static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
2746 {
2747         struct ibmvfc_target *tgt = evt->tgt;
2748         struct ibmvfc_host *vhost = evt->vhost;
2749         struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
2750         u32 status = rsp->common.status;
2751
2752         vhost->discovery_threads--;
2753         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
2754         switch (status) {
2755         case IBMVFC_MAD_SUCCESS:
2756                 tgt_dbg(tgt, "Port Login succeeded\n");
2757                 if (tgt->ids.port_name &&
2758                     tgt->ids.port_name != wwn_to_u64(rsp->service_parms.port_name)) {
2759                         vhost->reinit = 1;
2760                         tgt_dbg(tgt, "Port re-init required\n");
2761                         break;
2762                 }
2763                 tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
2764                 tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
2765                 tgt->ids.port_id = tgt->scsi_id;
2766                 tgt->ids.roles = FC_PORT_ROLE_FCP_TARGET;
2767                 memcpy(&tgt->service_parms, &rsp->service_parms,
2768                        sizeof(tgt->service_parms));
2769                 memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
2770                        sizeof(tgt->service_parms_change));
2771                 ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
2772                 break;
2773         case IBMVFC_MAD_DRIVER_FAILED:
2774                 break;
2775         case IBMVFC_MAD_CRQ_ERROR:
2776                 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
2777                 break;
2778         case IBMVFC_MAD_FAILED:
2779         default:
2780                 tgt_err(tgt, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
2781                         ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
2782                         ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
2783                         ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status);
2784
2785                 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
2786                         ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
2787                 break;
2788         };
2789
2790         kref_put(&tgt->kref, ibmvfc_release_tgt);
2791         ibmvfc_free_event(evt);
2792         wake_up(&vhost->work_wait_q);
2793 }
2794
2795 /**
2796  * ibmvfc_tgt_send_plogi - Send PLOGI to the specified target
2797  * @tgt:        ibmvfc target struct
2798  *
2799  **/
2800 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
2801 {
2802         struct ibmvfc_port_login *plogi;
2803         struct ibmvfc_host *vhost = tgt->vhost;
2804         struct ibmvfc_event *evt;
2805
2806         if (vhost->discovery_threads >= disc_threads)
2807                 return;
2808
2809         kref_get(&tgt->kref);
2810         evt = ibmvfc_get_event(vhost);
2811         vhost->discovery_threads++;
2812         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
2813         ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
2814         evt->tgt = tgt;
2815         plogi = &evt->iu.plogi;
2816         memset(plogi, 0, sizeof(*plogi));
2817         plogi->common.version = 1;
2818         plogi->common.opcode = IBMVFC_PORT_LOGIN;
2819         plogi->common.length = sizeof(*plogi);
2820         plogi->scsi_id = tgt->scsi_id;
2821
2822         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
2823                 vhost->discovery_threads--;
2824                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
2825                 kref_put(&tgt->kref, ibmvfc_release_tgt);
2826         } else
2827                 tgt_dbg(tgt, "Sent port login\n");
2828 }
2829
2830 /**
2831  * ibmvfc_tgt_implicit_logout_done - Completion handler for Implicit Logout MAD
2832  * @evt:        ibmvfc event struct
2833  *
2834  **/
2835 static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
2836 {
2837         struct ibmvfc_target *tgt = evt->tgt;
2838         struct ibmvfc_host *vhost = evt->vhost;
2839         struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout;
2840         u32 status = rsp->common.status;
2841
2842         vhost->discovery_threads--;
2843         ibmvfc_free_event(evt);
2844         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
2845
2846         switch (status) {
2847         case IBMVFC_MAD_SUCCESS:
2848                 tgt_dbg(tgt, "Implicit Logout succeeded\n");
2849                 break;
2850         case IBMVFC_MAD_DRIVER_FAILED:
2851                 kref_put(&tgt->kref, ibmvfc_release_tgt);
2852                 wake_up(&vhost->work_wait_q);
2853                 return;
2854         case IBMVFC_MAD_FAILED:
2855         default:
2856                 tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
2857                 break;
2858         };
2859
2860         if (vhost->action == IBMVFC_HOST_ACTION_TGT_INIT)
2861                 ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
2862         else if (vhost->action == IBMVFC_HOST_ACTION_QUERY_TGTS &&
2863                  tgt->scsi_id != tgt->new_scsi_id)
2864                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2865         kref_put(&tgt->kref, ibmvfc_release_tgt);
2866         wake_up(&vhost->work_wait_q);
2867 }
2868
2869 /**
2870  * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
2871  * @tgt:                ibmvfc target struct
2872  *
2873  **/
2874 static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
2875 {
2876         struct ibmvfc_implicit_logout *mad;
2877         struct ibmvfc_host *vhost = tgt->vhost;
2878         struct ibmvfc_event *evt;
2879
2880         if (vhost->discovery_threads >= disc_threads)
2881                 return;
2882
2883         kref_get(&tgt->kref);
2884         evt = ibmvfc_get_event(vhost);
2885         vhost->discovery_threads++;
2886         ibmvfc_init_event(evt, ibmvfc_tgt_implicit_logout_done, IBMVFC_MAD_FORMAT);
2887         evt->tgt = tgt;
2888         mad = &evt->iu.implicit_logout;
2889         memset(mad, 0, sizeof(*mad));
2890         mad->common.version = 1;
2891         mad->common.opcode = IBMVFC_IMPLICIT_LOGOUT;
2892         mad->common.length = sizeof(*mad);
2893         mad->old_scsi_id = tgt->scsi_id;
2894
2895         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
2896         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
2897                 vhost->discovery_threads--;
2898                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
2899                 kref_put(&tgt->kref, ibmvfc_release_tgt);
2900         } else
2901                 tgt_dbg(tgt, "Sent Implicit Logout\n");
2902 }
2903
2904 /**
2905  * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD
2906  * @evt:        ibmvfc event struct
2907  *
2908  **/
2909 static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
2910 {
2911         struct ibmvfc_target *tgt = evt->tgt;
2912         struct ibmvfc_host *vhost = evt->vhost;
2913         struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
2914         u32 status = rsp->common.status;
2915
2916         vhost->discovery_threads--;
2917         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
2918         switch (status) {
2919         case IBMVFC_MAD_SUCCESS:
2920                 tgt_dbg(tgt, "Query Target succeeded\n");
2921                 tgt->new_scsi_id = rsp->scsi_id;
2922                 if (rsp->scsi_id != tgt->scsi_id)
2923                         ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
2924                 break;
2925         case IBMVFC_MAD_DRIVER_FAILED:
2926                 break;
2927         case IBMVFC_MAD_CRQ_ERROR:
2928                 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
2929                 break;
2930         case IBMVFC_MAD_FAILED:
2931         default:
2932                 tgt_err(tgt, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
2933                         ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
2934                         ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
2935                         ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status);
2936
2937                 if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
2938                     rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ &&
2939                     rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG)
2940                         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2941                 else if (ibmvfc_retry_cmd(rsp->status, rsp->error))
2942                         ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
2943                 break;
2944         };
2945
2946         kref_put(&tgt->kref, ibmvfc_release_tgt);
2947         ibmvfc_free_event(evt);
2948         wake_up(&vhost->work_wait_q);
2949 }
2950
2951 /**
2952  * ibmvfc_tgt_query_target - Initiate a Query Target for specified target
2953  * @tgt:        ibmvfc target struct
2954  *
2955  **/
2956 static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
2957 {
2958         struct ibmvfc_query_tgt *query_tgt;
2959         struct ibmvfc_host *vhost = tgt->vhost;
2960         struct ibmvfc_event *evt;
2961
2962         if (vhost->discovery_threads >= disc_threads)
2963                 return;
2964
2965         kref_get(&tgt->kref);
2966         evt = ibmvfc_get_event(vhost);
2967         vhost->discovery_threads++;
2968         evt->tgt = tgt;
2969         ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
2970         query_tgt = &evt->iu.query_tgt;
2971         memset(query_tgt, 0, sizeof(*query_tgt));
2972         query_tgt->common.version = 1;
2973         query_tgt->common.opcode = IBMVFC_QUERY_TARGET;
2974         query_tgt->common.length = sizeof(*query_tgt);
2975         query_tgt->wwpn = tgt->ids.port_name;
2976
2977         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
2978         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
2979                 vhost->discovery_threads--;
2980                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
2981                 kref_put(&tgt->kref, ibmvfc_release_tgt);
2982         } else
2983                 tgt_dbg(tgt, "Sent Query Target\n");
2984 }
2985
2986 /**
2987  * ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
2988  * @vhost:              ibmvfc host struct
2989  * @scsi_id:    SCSI ID to allocate target for
2990  *
2991  * Returns:
2992  *      0 on success / other on failure
2993  **/
2994 static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id)
2995 {
2996         struct ibmvfc_target *tgt;
2997         unsigned long flags;
2998
2999         spin_lock_irqsave(vhost->host->host_lock, flags);
3000         list_for_each_entry(tgt, &vhost->targets, queue) {
3001                 if (tgt->scsi_id == scsi_id) {
3002                         if (tgt->need_login)
3003                                 ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
3004                         goto unlock_out;
3005                 }
3006         }
3007         spin_unlock_irqrestore(vhost->host->host_lock, flags);
3008
3009         tgt = mempool_alloc(vhost->tgt_pool, GFP_KERNEL);
3010         if (!tgt) {
3011                 dev_err(vhost->dev, "Target allocation failure for scsi id %08lx\n",
3012                         scsi_id);
3013                 return -ENOMEM;
3014         }
3015
3016         tgt->scsi_id = scsi_id;
3017         tgt->new_scsi_id = scsi_id;
3018         tgt->vhost = vhost;
3019         tgt->need_login = 1;
3020         kref_init(&tgt->kref);
3021         ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
3022         spin_lock_irqsave(vhost->host->host_lock, flags);
3023         list_add_tail(&tgt->queue, &vhost->targets);
3024
3025 unlock_out:
3026         spin_unlock_irqrestore(vhost->host->host_lock, flags);
3027         return 0;
3028 }
3029
3030 /**
3031  * ibmvfc_alloc_targets - Allocate and initialize ibmvfc targets
3032  * @vhost:              ibmvfc host struct
3033  *
3034  * Returns:
3035  *      0 on success / other on failure
3036  **/
3037 static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
3038 {
3039         int i, rc;
3040
3041         for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
3042                 rc = ibmvfc_alloc_target(vhost,
3043                                          vhost->disc_buf->scsi_id[i] & IBMVFC_DISC_TGT_SCSI_ID_MASK);
3044
3045         return rc;
3046 }
3047
3048 /**
3049  * ibmvfc_discover_targets_done - Completion handler for discover targets MAD
3050  * @evt:        ibmvfc event struct
3051  *
3052  **/
3053 static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
3054 {
3055         struct ibmvfc_host *vhost = evt->vhost;
3056         struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
3057         u32 mad_status = rsp->common.status;
3058
3059         switch (mad_status) {
3060         case IBMVFC_MAD_SUCCESS:
3061                 ibmvfc_dbg(vhost, "Discover Targets succeeded\n");
3062                 vhost->num_targets = rsp->num_written;
3063                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
3064                 break;
3065         case IBMVFC_MAD_FAILED:
3066                 dev_err(vhost->dev, "Discover Targets failed: %s (%x:%x)\n",
3067                         ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
3068                 ibmvfc_retry_host_init(vhost);
3069                 break;
3070         case IBMVFC_MAD_DRIVER_FAILED:
3071                 break;
3072         default:
3073                 dev_err(vhost->dev, "Invalid Discover Targets response: 0x%x\n", mad_status);
3074                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3075                 break;
3076         }
3077
3078         ibmvfc_free_event(evt);
3079         wake_up(&vhost->work_wait_q);
3080 }
3081
3082 /**
3083  * ibmvfc_discover_targets - Send Discover Targets MAD
3084  * @vhost:      ibmvfc host struct
3085  *
3086  **/
3087 static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
3088 {
3089         struct ibmvfc_discover_targets *mad;
3090         struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
3091
3092         ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
3093         mad = &evt->iu.discover_targets;
3094         memset(mad, 0, sizeof(*mad));
3095         mad->common.version = 1;
3096         mad->common.opcode = IBMVFC_DISC_TARGETS;
3097         mad->common.length = sizeof(*mad);
3098         mad->bufflen = vhost->disc_buf_sz;
3099         mad->buffer.va = vhost->disc_buf_dma;
3100         mad->buffer.len = vhost->disc_buf_sz;
3101         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
3102
3103         if (!ibmvfc_send_event(evt, vhost, default_timeout))
3104                 ibmvfc_dbg(vhost, "Sent discover targets\n");
3105         else
3106                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3107 }
3108
3109 /**
3110  * ibmvfc_npiv_login_done - Completion handler for NPIV Login
3111  * @evt:        ibmvfc event struct
3112  *
3113  **/
3114 static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
3115 {
3116         struct ibmvfc_host *vhost = evt->vhost;
3117         u32 mad_status = evt->xfer_iu->npiv_login.common.status;
3118         struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
3119         unsigned int npiv_max_sectors;
3120
3121         switch (mad_status) {
3122         case IBMVFC_MAD_SUCCESS:
3123                 ibmvfc_free_event(evt);
3124                 break;
3125         case IBMVFC_MAD_FAILED:
3126                 dev_err(vhost->dev, "NPIV Login failed: %s (%x:%x)\n",
3127                         ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
3128                 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
3129                         ibmvfc_retry_host_init(vhost);
3130                 else
3131                         ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3132                 ibmvfc_free_event(evt);
3133                 return;
3134         case IBMVFC_MAD_CRQ_ERROR:
3135                 ibmvfc_retry_host_init(vhost);
3136         case IBMVFC_MAD_DRIVER_FAILED:
3137                 ibmvfc_free_event(evt);
3138                 return;
3139         default:
3140                 dev_err(vhost->dev, "Invalid NPIV Login response: 0x%x\n", mad_status);
3141                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3142                 ibmvfc_free_event(evt);
3143                 return;
3144         }
3145
3146         vhost->client_migrated = 0;
3147
3148         if (!(rsp->flags & IBMVFC_NATIVE_FC)) {
3149                 dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n",
3150                         rsp->flags);
3151                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3152                 wake_up(&vhost->work_wait_q);
3153                 return;
3154         }
3155
3156         if (rsp->max_cmds <= IBMVFC_NUM_INTERNAL_REQ) {
3157                 dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n",
3158                         rsp->max_cmds);
3159                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3160                 wake_up(&vhost->work_wait_q);
3161                 return;
3162         }
3163
3164         npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS);
3165         dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
3166                  rsp->partition_name, rsp->device_name, rsp->port_loc_code,
3167                  rsp->drc_name, npiv_max_sectors);
3168
3169         fc_host_fabric_name(vhost->host) = rsp->node_name;
3170         fc_host_node_name(vhost->host) = rsp->node_name;
3171         fc_host_port_name(vhost->host) = rsp->port_name;
3172         fc_host_port_id(vhost->host) = rsp->scsi_id;
3173         fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV;
3174         fc_host_supported_classes(vhost->host) = 0;
3175         if (rsp->service_parms.class1_parms[0] & 0x80000000)
3176                 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1;
3177         if (rsp->service_parms.class2_parms[0] & 0x80000000)
3178                 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2;
3179         if (rsp->service_parms.class3_parms[0] & 0x80000000)
3180                 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3;
3181         fc_host_maxframe_size(vhost->host) =
3182                 rsp->service_parms.common.bb_rcv_sz & 0x0fff;
3183
3184         vhost->host->can_queue = rsp->max_cmds - IBMVFC_NUM_INTERNAL_REQ;
3185         vhost->host->max_sectors = npiv_max_sectors;
3186         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
3187         wake_up(&vhost->work_wait_q);
3188 }
3189
3190 /**
3191  * ibmvfc_npiv_login - Sends NPIV login
3192  * @vhost:      ibmvfc host struct
3193  *
3194  **/
3195 static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
3196 {
3197         struct ibmvfc_npiv_login_mad *mad;
3198         struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
3199
3200         ibmvfc_gather_partition_info(vhost);
3201         ibmvfc_set_login_info(vhost);
3202         ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
3203
3204         memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info));
3205         mad = &evt->iu.npiv_login;
3206         memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad));
3207         mad->common.version = 1;
3208         mad->common.opcode = IBMVFC_NPIV_LOGIN;
3209         mad->common.length = sizeof(struct ibmvfc_npiv_login_mad);
3210         mad->buffer.va = vhost->login_buf_dma;
3211         mad->buffer.len = sizeof(*vhost->login_buf);
3212
3213         memset(vhost->async_crq.msgs, 0, PAGE_SIZE);
3214         vhost->async_crq.cur = 0;
3215         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
3216
3217         if (!ibmvfc_send_event(evt, vhost, default_timeout))
3218                 ibmvfc_dbg(vhost, "Sent NPIV login\n");
3219         else
3220                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3221 };
3222
3223 /**
3224  * ibmvfc_dev_init_to_do - Is there target initialization work to do?
3225  * @vhost:              ibmvfc host struct
3226  *
3227  * Returns:
3228  *      1 if work to do / 0 if not
3229  **/
3230 static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost)
3231 {
3232         struct ibmvfc_target *tgt;
3233
3234         list_for_each_entry(tgt, &vhost->targets, queue) {
3235                 if (tgt->action == IBMVFC_TGT_ACTION_INIT ||
3236                     tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
3237                         return 1;
3238         }
3239
3240         return 0;
3241 }
3242
3243 /**
3244  * __ibmvfc_work_to_do - Is there task level work to do? (no locking)
3245  * @vhost:              ibmvfc host struct
3246  *
3247  * Returns:
3248  *      1 if work to do / 0 if not
3249  **/
3250 static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
3251 {
3252         struct ibmvfc_target *tgt;
3253
3254         if (kthread_should_stop())
3255                 return 1;
3256         switch (vhost->action) {
3257         case IBMVFC_HOST_ACTION_NONE:
3258         case IBMVFC_HOST_ACTION_INIT_WAIT:
3259                 return 0;
3260         case IBMVFC_HOST_ACTION_TGT_INIT:
3261         case IBMVFC_HOST_ACTION_QUERY_TGTS:
3262                 if (vhost->discovery_threads == disc_threads)
3263                         return 0;
3264                 list_for_each_entry(tgt, &vhost->targets, queue)
3265                         if (tgt->action == IBMVFC_TGT_ACTION_INIT)
3266                                 return 1;
3267                 list_for_each_entry(tgt, &vhost->targets, queue)
3268                         if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
3269                                 return 0;
3270                 return 1;
3271         case IBMVFC_HOST_ACTION_INIT:
3272         case IBMVFC_HOST_ACTION_ALLOC_TGTS:
3273         case IBMVFC_HOST_ACTION_TGT_ADD:
3274         case IBMVFC_HOST_ACTION_TGT_DEL:
3275         case IBMVFC_HOST_ACTION_QUERY:
3276         default:
3277                 break;
3278         };
3279
3280         return 1;
3281 }
3282
3283 /**
3284  * ibmvfc_work_to_do - Is there task level work to do?
3285  * @vhost:              ibmvfc host struct
3286  *
3287  * Returns:
3288  *      1 if work to do / 0 if not
3289  **/
3290 static int ibmvfc_work_to_do(struct ibmvfc_host *vhost)
3291 {
3292         unsigned long flags;
3293         int rc;
3294
3295         spin_lock_irqsave(vhost->host->host_lock, flags);
3296         rc = __ibmvfc_work_to_do(vhost);
3297         spin_unlock_irqrestore(vhost->host->host_lock, flags);
3298         return rc;
3299 }
3300
3301 /**
3302  * ibmvfc_log_ae - Log async events if necessary
3303  * @vhost:              ibmvfc host struct
3304  * @events:             events to log
3305  *
3306  **/
3307 static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
3308 {
3309         if (events & IBMVFC_AE_RSCN)
3310                 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_RSCN, 0);
3311         if ((events & IBMVFC_AE_LINKDOWN) &&
3312             vhost->state >= IBMVFC_HALTED)
3313                 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
3314         if ((events & IBMVFC_AE_LINKUP) &&
3315             vhost->state == IBMVFC_INITIALIZING)
3316                 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKUP, 0);
3317 }
3318
3319 /**
3320  * ibmvfc_tgt_add_rport - Tell the FC transport about a new remote port
3321  * @tgt:                ibmvfc target struct
3322  *
3323  **/
3324 static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
3325 {
3326         struct ibmvfc_host *vhost = tgt->vhost;
3327         struct fc_rport *rport;
3328         unsigned long flags;
3329
3330         tgt_dbg(tgt, "Adding rport\n");
3331         rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
3332         spin_lock_irqsave(vhost->host->host_lock, flags);
3333         tgt->rport = rport;
3334         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3335         if (rport) {
3336                 tgt_dbg(tgt, "rport add succeeded\n");
3337                 rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff;
3338                 rport->supported_classes = 0;
3339                 if (tgt->service_parms.class1_parms[0] & 0x80000000)
3340                         rport->supported_classes |= FC_COS_CLASS1;
3341                 if (tgt->service_parms.class2_parms[0] & 0x80000000)
3342                         rport->supported_classes |= FC_COS_CLASS2;
3343                 if (tgt->service_parms.class3_parms[0] & 0x80000000)
3344                         rport->supported_classes |= FC_COS_CLASS3;
3345         } else
3346                 tgt_dbg(tgt, "rport add failed\n");
3347         spin_unlock_irqrestore(vhost->host->host_lock, flags);
3348 }
3349
3350 /**
3351  * ibmvfc_do_work - Do task level work
3352  * @vhost:              ibmvfc host struct
3353  *
3354  **/
3355 static void ibmvfc_do_work(struct ibmvfc_host *vhost)
3356 {
3357         struct ibmvfc_target *tgt;
3358         unsigned long flags;
3359         struct fc_rport *rport;
3360
3361         ibmvfc_log_ae(vhost, vhost->events_to_log);
3362         spin_lock_irqsave(vhost->host->host_lock, flags);
3363         vhost->events_to_log = 0;
3364         switch (vhost->action) {
3365         case IBMVFC_HOST_ACTION_NONE:
3366         case IBMVFC_HOST_ACTION_INIT_WAIT:
3367                 break;
3368         case IBMVFC_HOST_ACTION_INIT:
3369                 BUG_ON(vhost->state != IBMVFC_INITIALIZING);
3370                 vhost->job_step(vhost);
3371                 break;
3372         case IBMVFC_HOST_ACTION_QUERY:
3373                 list_for_each_entry(tgt, &vhost->targets, queue)
3374                         ibmvfc_init_tgt(tgt, ibmvfc_tgt_query_target);
3375                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY_TGTS);
3376                 break;
3377         case IBMVFC_HOST_ACTION_QUERY_TGTS:
3378                 list_for_each_entry(tgt, &vhost->targets, queue) {
3379                         if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
3380                                 tgt->job_step(tgt);
3381                                 break;
3382                         }
3383                 }
3384
3385                 if (!ibmvfc_dev_init_to_do(vhost))
3386                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
3387                 break;
3388         case IBMVFC_HOST_ACTION_TGT_DEL:
3389                 list_for_each_entry(tgt, &vhost->targets, queue) {
3390                         if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
3391                                 tgt_dbg(tgt, "Deleting rport\n");
3392                                 rport = tgt->rport;
3393                                 tgt->rport = NULL;
3394                                 list_del(&tgt->queue);
3395                                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3396                                 if (rport)
3397                                         fc_remote_port_delete(rport);
3398                                 kref_put(&tgt->kref, ibmvfc_release_tgt);
3399                                 return;
3400                         }
3401                 }
3402
3403                 if (vhost->state == IBMVFC_INITIALIZING) {
3404                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
3405                         vhost->job_step = ibmvfc_discover_targets;
3406                 } else {
3407                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
3408                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
3409                         scsi_unblock_requests(vhost->host);
3410                         wake_up(&vhost->init_wait_q);
3411                         return;
3412                 }
3413                 break;
3414         case IBMVFC_HOST_ACTION_ALLOC_TGTS:
3415                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_INIT);
3416                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3417                 ibmvfc_alloc_targets(vhost);
3418                 spin_lock_irqsave(vhost->host->host_lock, flags);
3419                 break;
3420         case IBMVFC_HOST_ACTION_TGT_INIT:
3421                 list_for_each_entry(tgt, &vhost->targets, queue) {
3422                         if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
3423                                 tgt->job_step(tgt);
3424                                 break;
3425                         }
3426                 }
3427
3428                 if (!ibmvfc_dev_init_to_do(vhost)) {
3429                         ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
3430                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_ADD);
3431                         vhost->init_retries = 0;
3432                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
3433                         scsi_unblock_requests(vhost->host);
3434                         return;
3435                 }
3436                 break;
3437         case IBMVFC_HOST_ACTION_TGT_ADD:
3438                 list_for_each_entry(tgt, &vhost->targets, queue) {
3439                         if (tgt->action == IBMVFC_TGT_ACTION_ADD_RPORT) {
3440                                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3441                                 ibmvfc_tgt_add_rport(tgt);
3442                                 return;
3443                         } else if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
3444                                 tgt_dbg(tgt, "Deleting rport\n");
3445                                 rport = tgt->rport;
3446                                 tgt->rport = NULL;
3447                                 list_del(&tgt->queue);
3448                                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3449                                 if (rport)
3450                                         fc_remote_port_delete(rport);
3451                                 kref_put(&tgt->kref, ibmvfc_release_tgt);
3452                                 return;
3453                         }
3454                 }
3455
3456                 if (vhost->reinit) {
3457                         vhost->reinit = 0;
3458                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
3459                 } else {
3460                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
3461                         wake_up(&vhost->init_wait_q);
3462                 }
3463                 break;
3464         default:
3465                 break;
3466         };
3467
3468         spin_unlock_irqrestore(vhost->host->host_lock, flags);
3469 }
3470
3471 /**
3472  * ibmvfc_work - Do task level work
3473  * @data:               ibmvfc host struct
3474  *
3475  * Returns:
3476  *      zero
3477  **/
3478 static int ibmvfc_work(void *data)
3479 {
3480         struct ibmvfc_host *vhost = data;
3481         int rc;
3482
3483         set_user_nice(current, -20);
3484
3485         while (1) {
3486                 rc = wait_event_interruptible(vhost->work_wait_q,
3487                                               ibmvfc_work_to_do(vhost));
3488
3489                 BUG_ON(rc);
3490
3491                 if (kthread_should_stop())
3492                         break;
3493
3494                 ibmvfc_do_work(vhost);
3495         }
3496
3497         ibmvfc_dbg(vhost, "ibmvfc kthread exiting...\n");
3498         return 0;
3499 }
3500
3501 /**
3502  * ibmvfc_init_crq - Initializes and registers CRQ with hypervisor
3503  * @vhost:      ibmvfc host struct
3504  *
3505  * Allocates a page for messages, maps it for dma, and registers
3506  * the crq with the hypervisor.
3507  *
3508  * Return value:
3509  *      zero on success / other on failure
3510  **/
3511 static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
3512 {
3513         int rc, retrc = -ENOMEM;
3514         struct device *dev = vhost->dev;
3515         struct vio_dev *vdev = to_vio_dev(dev);
3516         struct ibmvfc_crq_queue *crq = &vhost->crq;
3517
3518         ENTER;
3519         crq->msgs = (struct ibmvfc_crq *)get_zeroed_page(GFP_KERNEL);
3520
3521         if (!crq->msgs)
3522                 return -ENOMEM;
3523
3524         crq->size = PAGE_SIZE / sizeof(*crq->msgs);
3525         crq->msg_token = dma_map_single(dev, crq->msgs,
3526                                         PAGE_SIZE, DMA_BIDIRECTIONAL);
3527
3528         if (dma_mapping_error(crq->msg_token))
3529                 goto map_failed;
3530
3531         retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3532                                         crq->msg_token, PAGE_SIZE);
3533
3534         if (rc == H_RESOURCE)
3535                 /* maybe kexecing and resource is busy. try a reset */
3536                 retrc = rc = ibmvfc_reset_crq(vhost);
3537
3538         if (rc == H_CLOSED)
3539                 dev_warn(dev, "Partner adapter not ready\n");
3540         else if (rc) {
3541                 dev_warn(dev, "Error %d opening adapter\n", rc);
3542                 goto reg_crq_failed;
3543         }
3544
3545         retrc = 0;
3546
3547         if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) {
3548                 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
3549                 goto req_irq_failed;
3550         }
3551
3552         if ((rc = vio_enable_interrupts(vdev))) {
3553                 dev_err(dev, "Error %d enabling interrupts\n", rc);
3554                 goto req_irq_failed;
3555         }
3556
3557         crq->cur = 0;
3558         LEAVE;
3559         return retrc;
3560
3561 req_irq_failed:
3562         do {
3563                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3564         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3565 reg_crq_failed:
3566         dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
3567 map_failed:
3568         free_page((unsigned long)crq->msgs);
3569         return retrc;
3570 }
3571
3572 /**
3573  * ibmvfc_free_mem - Free memory for vhost
3574  * @vhost:      ibmvfc host struct
3575  *
3576  * Return value:
3577  *      none
3578  **/
3579 static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
3580 {
3581         struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
3582
3583         ENTER;
3584         mempool_destroy(vhost->tgt_pool);
3585         kfree(vhost->trace);
3586         dma_free_coherent(vhost->dev, vhost->disc_buf_sz, vhost->disc_buf,
3587                           vhost->disc_buf_dma);
3588         dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf),
3589                           vhost->login_buf, vhost->login_buf_dma);
3590         dma_pool_destroy(vhost->sg_pool);
3591         dma_unmap_single(vhost->dev, async_q->msg_token,
3592                          async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
3593         free_page((unsigned long)async_q->msgs);
3594         LEAVE;
3595 }
3596
3597 /**
3598  * ibmvfc_alloc_mem - Allocate memory for vhost
3599  * @vhost:      ibmvfc host struct
3600  *
3601  * Return value:
3602  *      0 on success / non-zero on failure
3603  **/
3604 static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
3605 {
3606         struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
3607         struct device *dev = vhost->dev;
3608
3609         ENTER;
3610         async_q->msgs = (struct ibmvfc_async_crq *)get_zeroed_page(GFP_KERNEL);
3611         if (!async_q->msgs) {
3612                 dev_err(dev, "Couldn't allocate async queue.\n");
3613                 goto nomem;
3614         }
3615
3616         async_q->size = PAGE_SIZE / sizeof(struct ibmvfc_async_crq);
3617         async_q->msg_token = dma_map_single(dev, async_q->msgs,
3618                                             async_q->size * sizeof(*async_q->msgs),
3619                                             DMA_BIDIRECTIONAL);
3620
3621         if (dma_mapping_error(async_q->msg_token)) {
3622                 dev_err(dev, "Failed to map async queue\n");
3623                 goto free_async_crq;
3624         }
3625
3626         vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev,
3627                                          SG_ALL * sizeof(struct srp_direct_buf),
3628                                          sizeof(struct srp_direct_buf), 0);
3629
3630         if (!vhost->sg_pool) {
3631                 dev_err(dev, "Failed to allocate sg pool\n");
3632                 goto unmap_async_crq;
3633         }
3634
3635         vhost->login_buf = dma_alloc_coherent(dev, sizeof(*vhost->login_buf),
3636                                               &vhost->login_buf_dma, GFP_KERNEL);
3637
3638         if (!vhost->login_buf) {
3639                 dev_err(dev, "Couldn't allocate NPIV login buffer\n");
3640                 goto free_sg_pool;
3641         }
3642
3643         vhost->disc_buf_sz = sizeof(vhost->disc_buf->scsi_id[0]) * max_targets;
3644         vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz,
3645                                              &vhost->disc_buf_dma, GFP_KERNEL);
3646
3647         if (!vhost->disc_buf) {
3648                 dev_err(dev, "Couldn't allocate Discover Targets buffer\n");
3649                 goto free_login_buffer;
3650         }
3651
3652         vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES,
3653                                sizeof(struct ibmvfc_trace_entry), GFP_KERNEL);
3654
3655         if (!vhost->trace)
3656                 goto free_disc_buffer;
3657
3658         vhost->tgt_pool = mempool_create_kzalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
3659                                                       sizeof(struct ibmvfc_target));
3660
3661         if (!vhost->tgt_pool) {
3662                 dev_err(dev, "Couldn't allocate target memory pool\n");
3663                 goto free_trace;
3664         }
3665
3666         LEAVE;
3667         return 0;
3668
3669 free_trace:
3670         kfree(vhost->trace);
3671 free_disc_buffer:
3672         dma_free_coherent(dev, vhost->disc_buf_sz, vhost->disc_buf,
3673                           vhost->disc_buf_dma);
3674 free_login_buffer:
3675         dma_free_coherent(dev, sizeof(*vhost->login_buf),
3676                           vhost->login_buf, vhost->login_buf_dma);
3677 free_sg_pool:
3678         dma_pool_destroy(vhost->sg_pool);
3679 unmap_async_crq:
3680         dma_unmap_single(dev, async_q->msg_token,
3681                          async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
3682 free_async_crq:
3683         free_page((unsigned long)async_q->msgs);
3684 nomem:
3685         LEAVE;
3686         return -ENOMEM;
3687 }
3688
3689 /**
3690  * ibmvfc_probe - Adapter hot plug add entry point
3691  * @vdev:       vio device struct
3692  * @id: vio device id struct
3693  *
3694  * Return value:
3695  *      0 on success / non-zero on failure
3696  **/
3697 static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
3698 {
3699         struct ibmvfc_host *vhost;
3700         struct Scsi_Host *shost;
3701         struct device *dev = &vdev->dev;
3702         int rc = -ENOMEM;
3703
3704         ENTER;
3705         shost = scsi_host_alloc(&driver_template, sizeof(*vhost));
3706         if (!shost) {
3707                 dev_err(dev, "Couldn't allocate host data\n");
3708                 goto out;
3709         }
3710
3711         shost->transportt = ibmvfc_transport_template;
3712         shost->can_queue = max_requests;
3713         shost->max_lun = max_lun;
3714         shost->max_id = max_targets;
3715         shost->max_sectors = IBMVFC_MAX_SECTORS;
3716         shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
3717         shost->unique_id = shost->host_no;
3718
3719         vhost = shost_priv(shost);
3720         INIT_LIST_HEAD(&vhost->sent);
3721         INIT_LIST_HEAD(&vhost->free);
3722         INIT_LIST_HEAD(&vhost->targets);
3723         sprintf(vhost->name, IBMVFC_NAME);
3724         vhost->host = shost;
3725         vhost->dev = dev;
3726         vhost->partition_number = -1;
3727         vhost->log_level = log_level;
3728         strcpy(vhost->partition_name, "UNKNOWN");
3729         init_waitqueue_head(&vhost->work_wait_q);
3730         init_waitqueue_head(&vhost->init_wait_q);
3731
3732         if ((rc = ibmvfc_alloc_mem(vhost)))
3733                 goto free_scsi_host;
3734
3735         vhost->work_thread = kthread_run(ibmvfc_work, vhost, "%s_%d", IBMVFC_NAME,
3736                                          shost->host_no);
3737
3738         if (IS_ERR(vhost->work_thread)) {
3739                 dev_err(dev, "Couldn't create kernel thread: %ld\n",
3740                         PTR_ERR(vhost->work_thread));
3741                 goto free_host_mem;
3742         }
3743
3744         if ((rc = ibmvfc_init_crq(vhost))) {
3745                 dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
3746                 goto kill_kthread;
3747         }
3748
3749         if ((rc = ibmvfc_init_event_pool(vhost))) {
3750                 dev_err(dev, "Couldn't initialize event pool. rc=%d\n", rc);
3751                 goto release_crq;
3752         }
3753
3754         if ((rc = scsi_add_host(shost, dev)))
3755                 goto release_event_pool;
3756
3757         if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj,
3758                                            &ibmvfc_trace_attr))) {
3759                 dev_err(dev, "Failed to create trace file. rc=%d\n", rc);
3760                 goto remove_shost;
3761         }
3762
3763         dev_set_drvdata(dev, vhost);
3764         spin_lock(&ibmvfc_driver_lock);
3765         list_add_tail(&vhost->queue, &ibmvfc_head);
3766         spin_unlock(&ibmvfc_driver_lock);
3767
3768         ibmvfc_send_crq_init(vhost);
3769         scsi_scan_host(shost);
3770         return 0;
3771
3772 remove_shost:
3773         scsi_remove_host(shost);
3774 release_event_pool:
3775         ibmvfc_free_event_pool(vhost);
3776 release_crq:
3777         ibmvfc_release_crq_queue(vhost);
3778 kill_kthread:
3779         kthread_stop(vhost->work_thread);
3780 free_host_mem:
3781         ibmvfc_free_mem(vhost);
3782 free_scsi_host:
3783         scsi_host_put(shost);
3784 out:
3785         LEAVE;
3786         return rc;
3787 }
3788
3789 /**
3790  * ibmvfc_remove - Adapter hot plug remove entry point
3791  * @vdev:       vio device struct
3792  *
3793  * Return value:
3794  *      0
3795  **/
3796 static int ibmvfc_remove(struct vio_dev *vdev)
3797 {
3798         struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev);
3799         unsigned long flags;
3800
3801         ENTER;
3802         ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
3803         kthread_stop(vhost->work_thread);
3804         fc_remove_host(vhost->host);
3805         scsi_remove_host(vhost->host);
3806         ibmvfc_release_crq_queue(vhost);
3807
3808         spin_lock_irqsave(vhost->host->host_lock, flags);
3809         ibmvfc_purge_requests(vhost, DID_ERROR);
3810         ibmvfc_free_event_pool(vhost);
3811         spin_unlock_irqrestore(vhost->host->host_lock, flags);
3812
3813         ibmvfc_free_mem(vhost);
3814         spin_lock(&ibmvfc_driver_lock);
3815         list_del(&vhost->queue);
3816         spin_unlock(&ibmvfc_driver_lock);
3817         scsi_host_put(vhost->host);
3818         LEAVE;
3819         return 0;
3820 }
3821
3822 /**
3823  * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver
3824  * @vdev:       vio device struct
3825  *
3826  * Return value:
3827  *      Number of bytes the driver will need to DMA map at the same time in
3828  *      order to perform well.
3829  */
3830 static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev)
3831 {
3832         unsigned long pool_dma = max_requests * sizeof(union ibmvfc_iu);
3833         return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun);
3834 }
3835
3836 static struct vio_device_id ibmvfc_device_table[] __devinitdata = {
3837         {"fcp", "IBM,vfc-client"},
3838         { "", "" }
3839 };
3840 MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
3841
3842 static struct vio_driver ibmvfc_driver = {
3843         .id_table = ibmvfc_device_table,
3844         .probe = ibmvfc_probe,
3845         .remove = ibmvfc_remove,
3846         .get_desired_dma = ibmvfc_get_desired_dma,
3847         .driver = {
3848                 .name = IBMVFC_NAME,
3849                 .owner = THIS_MODULE,
3850         }
3851 };
3852
3853 static struct fc_function_template ibmvfc_transport_functions = {
3854         .show_host_fabric_name = 1,
3855         .show_host_node_name = 1,
3856         .show_host_port_name = 1,
3857         .show_host_supported_classes = 1,
3858         .show_host_port_type = 1,
3859         .show_host_port_id = 1,
3860
3861         .get_host_port_state = ibmvfc_get_host_port_state,
3862         .show_host_port_state = 1,
3863
3864         .get_host_speed = ibmvfc_get_host_speed,
3865         .show_host_speed = 1,
3866
3867         .issue_fc_host_lip = ibmvfc_issue_fc_host_lip,
3868         .terminate_rport_io = ibmvfc_terminate_rport_io,
3869
3870         .show_rport_maxframe_size = 1,
3871         .show_rport_supported_classes = 1,
3872
3873         .set_rport_dev_loss_tmo = ibmvfc_set_rport_dev_loss_tmo,
3874         .show_rport_dev_loss_tmo = 1,
3875
3876         .get_starget_node_name = ibmvfc_get_starget_node_name,
3877         .show_starget_node_name = 1,
3878
3879         .get_starget_port_name = ibmvfc_get_starget_port_name,
3880         .show_starget_port_name = 1,
3881
3882         .get_starget_port_id = ibmvfc_get_starget_port_id,
3883         .show_starget_port_id = 1,
3884 };
3885
3886 /**
3887  * ibmvfc_module_init - Initialize the ibmvfc module
3888  *
3889  * Return value:
3890  *      0 on success / other on failure
3891  **/
3892 static int __init ibmvfc_module_init(void)
3893 {
3894         int rc;
3895
3896         if (!firmware_has_feature(FW_FEATURE_VIO))
3897                 return -ENODEV;
3898
3899         printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n",
3900                IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE);
3901
3902         ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions);
3903         if (!ibmvfc_transport_template)
3904                 return -ENOMEM;
3905
3906         rc = vio_register_driver(&ibmvfc_driver);
3907         if (rc)
3908                 fc_release_transport(ibmvfc_transport_template);
3909         return rc;
3910 }
3911
3912 /**
3913  * ibmvfc_module_exit - Teardown the ibmvfc module
3914  *
3915  * Return value:
3916  *      nothing
3917  **/
3918 static void __exit ibmvfc_module_exit(void)
3919 {
3920         vio_unregister_driver(&ibmvfc_driver);
3921         fc_release_transport(ibmvfc_transport_template);
3922 }
3923
3924 module_init(ibmvfc_module_init);
3925 module_exit(ibmvfc_module_exit);