a13db590842692df2a02095f906a46ccdf49bbee
[pandora-kernel.git] / drivers / scsi / ibmvscsi / ibmvfc.c
1 /*
2  * ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter
3  *
4  * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) IBM Corporation, 2008
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/dmapool.h>
28 #include <linux/delay.h>
29 #include <linux/interrupt.h>
30 #include <linux/kthread.h>
31 #include <linux/slab.h>
32 #include <linux/of.h>
33 #include <linux/pm.h>
34 #include <linux/stringify.h>
35 #include <asm/firmware.h>
36 #include <asm/irq.h>
37 #include <asm/vio.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_cmnd.h>
40 #include <scsi/scsi_host.h>
41 #include <scsi/scsi_device.h>
42 #include <scsi/scsi_tcq.h>
43 #include <scsi/scsi_transport_fc.h>
44 #include <scsi/scsi_bsg_fc.h>
45 #include "ibmvfc.h"
46
47 static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT;
48 static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
49 static unsigned int max_lun = IBMVFC_MAX_LUN;
50 static unsigned int max_targets = IBMVFC_MAX_TARGETS;
51 static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
52 static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
53 static unsigned int dev_loss_tmo = IBMVFC_DEV_LOSS_TMO;
54 static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
55 static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
56 static LIST_HEAD(ibmvfc_head);
57 static DEFINE_SPINLOCK(ibmvfc_driver_lock);
58 static struct scsi_transport_template *ibmvfc_transport_template;
59
60 MODULE_DESCRIPTION("IBM Virtual Fibre Channel Driver");
61 MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
62 MODULE_LICENSE("GPL");
63 MODULE_VERSION(IBMVFC_DRIVER_VERSION);
64
65 module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR);
66 MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. "
67                  "[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]");
68 module_param_named(default_timeout, default_timeout, uint, S_IRUGO | S_IWUSR);
69 MODULE_PARM_DESC(default_timeout,
70                  "Default timeout in seconds for initialization and EH commands. "
71                  "[Default=" __stringify(IBMVFC_DEFAULT_TIMEOUT) "]");
72 module_param_named(max_requests, max_requests, uint, S_IRUGO);
73 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
74                  "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
75 module_param_named(max_lun, max_lun, uint, S_IRUGO);
76 MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
77                  "[Default=" __stringify(IBMVFC_MAX_LUN) "]");
78 module_param_named(max_targets, max_targets, uint, S_IRUGO);
79 MODULE_PARM_DESC(max_targets, "Maximum allowed targets. "
80                  "[Default=" __stringify(IBMVFC_MAX_TARGETS) "]");
81 module_param_named(disc_threads, disc_threads, uint, S_IRUGO);
82 MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. "
83                  "[Default=" __stringify(IBMVFC_MAX_DISC_THREADS) "]");
84 module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(debug, "Enable driver debug information. "
86                  "[Default=" __stringify(IBMVFC_DEBUG) "]");
87 module_param_named(dev_loss_tmo, dev_loss_tmo, uint, S_IRUGO | S_IWUSR);
88 MODULE_PARM_DESC(dev_loss_tmo, "Maximum number of seconds that the FC "
89                  "transport should insulate the loss of a remote port. Once this "
90                  "value is exceeded, the scsi target is removed. "
91                  "[Default=" __stringify(IBMVFC_DEV_LOSS_TMO) "]");
92 module_param_named(log_level, log_level, uint, 0);
93 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
94                  "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
95
96 static const struct {
97         u16 status;
98         u16 error;
99         u8 result;
100         u8 retry;
101         int log;
102         char *name;
103 } cmd_status [] = {
104         { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" },
105         { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" },
106         { IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" },
107         { IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_TRANSPORT_DISRUPTED, 1, 1, "network down" },
108         { IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" },
109         { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" },
110         { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" },
111         { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_REGISTER, DID_ERROR, 1, 1, "unable to register" },
112         { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_BUSY, DID_BUS_BUSY, 1, 0, "transport busy" },
113         { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
114         { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
115         { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
116         { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" },
117         { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
118
119         { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
120         { IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" },
121         { IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ERROR, 0, 1, "invalid parameter" },
122         { IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ERROR, 0, 1, "missing parameter" },
123         { IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" },
124         { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ERROR, 0, 1, "transaction cancelled" },
125         { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ERROR, 0, 1, "transaction cancelled implicit" },
126         { IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
127         { IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" },
128         { IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
129
130         { IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" },
131         { IBMVFC_FC_FAILURE, IBMVFC_INVALID_VERSION, DID_ERROR, 0, 1, "invalid version level" },
132         { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_ERROR, DID_ERROR, 1, 1, "logical error" },
133         { IBMVFC_FC_FAILURE, IBMVFC_INVALID_CT_IU_SIZE, DID_ERROR, 0, 1, "invalid CT_IU size" },
134         { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_BUSY, DID_REQUEUE, 1, 0, "logical busy" },
135         { IBMVFC_FC_FAILURE, IBMVFC_PROTOCOL_ERROR, DID_ERROR, 1, 1, "protocol error" },
136         { IBMVFC_FC_FAILURE, IBMVFC_UNABLE_TO_PERFORM_REQ, DID_ERROR, 1, 1, "unable to perform request" },
137         { IBMVFC_FC_FAILURE, IBMVFC_CMD_NOT_SUPPORTED, DID_ERROR, 0, 0, "command not supported" },
138         { IBMVFC_FC_FAILURE, IBMVFC_SERVER_NOT_AVAIL, DID_ERROR, 0, 1, "server not available" },
139         { IBMVFC_FC_FAILURE, IBMVFC_CMD_IN_PROGRESS, DID_ERROR, 0, 1, "command already in progress" },
140         { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
141
142         { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
143 };
144
145 static void ibmvfc_npiv_login(struct ibmvfc_host *);
146 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
147 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
148 static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
149 static void ibmvfc_npiv_logout(struct ibmvfc_host *);
150
151 static const char *unknown_error = "unknown error";
152
153 #ifdef CONFIG_SCSI_IBMVFC_TRACE
154 /**
155  * ibmvfc_trc_start - Log a start trace entry
156  * @evt:                ibmvfc event struct
157  *
158  **/
159 static void ibmvfc_trc_start(struct ibmvfc_event *evt)
160 {
161         struct ibmvfc_host *vhost = evt->vhost;
162         struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
163         struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
164         struct ibmvfc_trace_entry *entry;
165
166         entry = &vhost->trace[vhost->trace_index++];
167         entry->evt = evt;
168         entry->time = jiffies;
169         entry->fmt = evt->crq.format;
170         entry->type = IBMVFC_TRC_START;
171
172         switch (entry->fmt) {
173         case IBMVFC_CMD_FORMAT:
174                 entry->op_code = vfc_cmd->iu.cdb[0];
175                 entry->scsi_id = vfc_cmd->tgt_scsi_id;
176                 entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
177                 entry->tmf_flags = vfc_cmd->iu.tmf_flags;
178                 entry->u.start.xfer_len = vfc_cmd->iu.xfer_len;
179                 break;
180         case IBMVFC_MAD_FORMAT:
181                 entry->op_code = mad->opcode;
182                 break;
183         default:
184                 break;
185         };
186 }
187
188 /**
189  * ibmvfc_trc_end - Log an end trace entry
190  * @evt:                ibmvfc event struct
191  *
192  **/
193 static void ibmvfc_trc_end(struct ibmvfc_event *evt)
194 {
195         struct ibmvfc_host *vhost = evt->vhost;
196         struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
197         struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
198         struct ibmvfc_trace_entry *entry = &vhost->trace[vhost->trace_index++];
199
200         entry->evt = evt;
201         entry->time = jiffies;
202         entry->fmt = evt->crq.format;
203         entry->type = IBMVFC_TRC_END;
204
205         switch (entry->fmt) {
206         case IBMVFC_CMD_FORMAT:
207                 entry->op_code = vfc_cmd->iu.cdb[0];
208                 entry->scsi_id = vfc_cmd->tgt_scsi_id;
209                 entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
210                 entry->tmf_flags = vfc_cmd->iu.tmf_flags;
211                 entry->u.end.status = vfc_cmd->status;
212                 entry->u.end.error = vfc_cmd->error;
213                 entry->u.end.fcp_rsp_flags = vfc_cmd->rsp.flags;
214                 entry->u.end.rsp_code = vfc_cmd->rsp.data.info.rsp_code;
215                 entry->u.end.scsi_status = vfc_cmd->rsp.scsi_status;
216                 break;
217         case IBMVFC_MAD_FORMAT:
218                 entry->op_code = mad->opcode;
219                 entry->u.end.status = mad->status;
220                 break;
221         default:
222                 break;
223
224         };
225 }
226
227 #else
228 #define ibmvfc_trc_start(evt) do { } while (0)
229 #define ibmvfc_trc_end(evt) do { } while (0)
230 #endif
231
232 /**
233  * ibmvfc_get_err_index - Find the index into cmd_status for the fcp response
234  * @status:             status / error class
235  * @error:              error
236  *
237  * Return value:
238  *      index into cmd_status / -EINVAL on failure
239  **/
240 static int ibmvfc_get_err_index(u16 status, u16 error)
241 {
242         int i;
243
244         for (i = 0; i < ARRAY_SIZE(cmd_status); i++)
245                 if ((cmd_status[i].status & status) == cmd_status[i].status &&
246                     cmd_status[i].error == error)
247                         return i;
248
249         return -EINVAL;
250 }
251
252 /**
253  * ibmvfc_get_cmd_error - Find the error description for the fcp response
254  * @status:             status / error class
255  * @error:              error
256  *
257  * Return value:
258  *      error description string
259  **/
260 static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
261 {
262         int rc = ibmvfc_get_err_index(status, error);
263         if (rc >= 0)
264                 return cmd_status[rc].name;
265         return unknown_error;
266 }
267
268 /**
269  * ibmvfc_get_err_result - Find the scsi status to return for the fcp response
270  * @vfc_cmd:    ibmvfc command struct
271  *
272  * Return value:
273  *      SCSI result value to return for completed command
274  **/
275 static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd)
276 {
277         int err;
278         struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
279         int fc_rsp_len = rsp->fcp_rsp_len;
280
281         if ((rsp->flags & FCP_RSP_LEN_VALID) &&
282             ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
283              rsp->data.info.rsp_code))
284                 return DID_ERROR << 16;
285
286         err = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error);
287         if (err >= 0)
288                 return rsp->scsi_status | (cmd_status[err].result << 16);
289         return rsp->scsi_status | (DID_ERROR << 16);
290 }
291
292 /**
293  * ibmvfc_retry_cmd - Determine if error status is retryable
294  * @status:             status / error class
295  * @error:              error
296  *
297  * Return value:
298  *      1 if error should be retried / 0 if it should not
299  **/
300 static int ibmvfc_retry_cmd(u16 status, u16 error)
301 {
302         int rc = ibmvfc_get_err_index(status, error);
303
304         if (rc >= 0)
305                 return cmd_status[rc].retry;
306         return 1;
307 }
308
309 static const char *unknown_fc_explain = "unknown fc explain";
310
311 static const struct {
312         u16 fc_explain;
313         char *name;
314 } ls_explain [] = {
315         { 0x00, "no additional explanation" },
316         { 0x01, "service parameter error - options" },
317         { 0x03, "service parameter error - initiator control" },
318         { 0x05, "service parameter error - recipient control" },
319         { 0x07, "service parameter error - received data field size" },
320         { 0x09, "service parameter error - concurrent seq" },
321         { 0x0B, "service parameter error - credit" },
322         { 0x0D, "invalid N_Port/F_Port_Name" },
323         { 0x0E, "invalid node/Fabric Name" },
324         { 0x0F, "invalid common service parameters" },
325         { 0x11, "invalid association header" },
326         { 0x13, "association header required" },
327         { 0x15, "invalid originator S_ID" },
328         { 0x17, "invalid OX_ID-RX-ID combination" },
329         { 0x19, "command (request) already in progress" },
330         { 0x1E, "N_Port Login requested" },
331         { 0x1F, "Invalid N_Port_ID" },
332 };
333
334 static const struct {
335         u16 fc_explain;
336         char *name;
337 } gs_explain [] = {
338         { 0x00, "no additional explanation" },
339         { 0x01, "port identifier not registered" },
340         { 0x02, "port name not registered" },
341         { 0x03, "node name not registered" },
342         { 0x04, "class of service not registered" },
343         { 0x06, "initial process associator not registered" },
344         { 0x07, "FC-4 TYPEs not registered" },
345         { 0x08, "symbolic port name not registered" },
346         { 0x09, "symbolic node name not registered" },
347         { 0x0A, "port type not registered" },
348         { 0xF0, "authorization exception" },
349         { 0xF1, "authentication exception" },
350         { 0xF2, "data base full" },
351         { 0xF3, "data base empty" },
352         { 0xF4, "processing request" },
353         { 0xF5, "unable to verify connection" },
354         { 0xF6, "devices not in a common zone" },
355 };
356
357 /**
358  * ibmvfc_get_ls_explain - Return the FC Explain description text
359  * @status:     FC Explain status
360  *
361  * Returns:
362  *      error string
363  **/
364 static const char *ibmvfc_get_ls_explain(u16 status)
365 {
366         int i;
367
368         for (i = 0; i < ARRAY_SIZE(ls_explain); i++)
369                 if (ls_explain[i].fc_explain == status)
370                         return ls_explain[i].name;
371
372         return unknown_fc_explain;
373 }
374
375 /**
376  * ibmvfc_get_gs_explain - Return the FC Explain description text
377  * @status:     FC Explain status
378  *
379  * Returns:
380  *      error string
381  **/
382 static const char *ibmvfc_get_gs_explain(u16 status)
383 {
384         int i;
385
386         for (i = 0; i < ARRAY_SIZE(gs_explain); i++)
387                 if (gs_explain[i].fc_explain == status)
388                         return gs_explain[i].name;
389
390         return unknown_fc_explain;
391 }
392
393 static const struct {
394         enum ibmvfc_fc_type fc_type;
395         char *name;
396 } fc_type [] = {
397         { IBMVFC_FABRIC_REJECT, "fabric reject" },
398         { IBMVFC_PORT_REJECT, "port reject" },
399         { IBMVFC_LS_REJECT, "ELS reject" },
400         { IBMVFC_FABRIC_BUSY, "fabric busy" },
401         { IBMVFC_PORT_BUSY, "port busy" },
402         { IBMVFC_BASIC_REJECT, "basic reject" },
403 };
404
405 static const char *unknown_fc_type = "unknown fc type";
406
407 /**
408  * ibmvfc_get_fc_type - Return the FC Type description text
409  * @status:     FC Type error status
410  *
411  * Returns:
412  *      error string
413  **/
414 static const char *ibmvfc_get_fc_type(u16 status)
415 {
416         int i;
417
418         for (i = 0; i < ARRAY_SIZE(fc_type); i++)
419                 if (fc_type[i].fc_type == status)
420                         return fc_type[i].name;
421
422         return unknown_fc_type;
423 }
424
425 /**
426  * ibmvfc_set_tgt_action - Set the next init action for the target
427  * @tgt:                ibmvfc target struct
428  * @action:             action to perform
429  *
430  **/
431 static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
432                                   enum ibmvfc_target_action action)
433 {
434         switch (tgt->action) {
435         case IBMVFC_TGT_ACTION_DEL_RPORT:
436                 if (action == IBMVFC_TGT_ACTION_DELETED_RPORT)
437                         tgt->action = action;
438         case IBMVFC_TGT_ACTION_DELETED_RPORT:
439                 break;
440         default:
441                 if (action == IBMVFC_TGT_ACTION_DEL_RPORT)
442                         tgt->add_rport = 0;
443                 tgt->action = action;
444                 break;
445         }
446 }
447
448 /**
449  * ibmvfc_set_host_state - Set the state for the host
450  * @vhost:              ibmvfc host struct
451  * @state:              state to set host to
452  *
453  * Returns:
454  *      0 if state changed / non-zero if not changed
455  **/
456 static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
457                                   enum ibmvfc_host_state state)
458 {
459         int rc = 0;
460
461         switch (vhost->state) {
462         case IBMVFC_HOST_OFFLINE:
463                 rc = -EINVAL;
464                 break;
465         default:
466                 vhost->state = state;
467                 break;
468         };
469
470         return rc;
471 }
472
473 /**
474  * ibmvfc_set_host_action - Set the next init action for the host
475  * @vhost:              ibmvfc host struct
476  * @action:             action to perform
477  *
478  **/
479 static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
480                                    enum ibmvfc_host_action action)
481 {
482         switch (action) {
483         case IBMVFC_HOST_ACTION_ALLOC_TGTS:
484                 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
485                         vhost->action = action;
486                 break;
487         case IBMVFC_HOST_ACTION_LOGO_WAIT:
488                 if (vhost->action == IBMVFC_HOST_ACTION_LOGO)
489                         vhost->action = action;
490                 break;
491         case IBMVFC_HOST_ACTION_INIT_WAIT:
492                 if (vhost->action == IBMVFC_HOST_ACTION_INIT)
493                         vhost->action = action;
494                 break;
495         case IBMVFC_HOST_ACTION_QUERY:
496                 switch (vhost->action) {
497                 case IBMVFC_HOST_ACTION_INIT_WAIT:
498                 case IBMVFC_HOST_ACTION_NONE:
499                 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
500                         vhost->action = action;
501                         break;
502                 default:
503                         break;
504                 };
505                 break;
506         case IBMVFC_HOST_ACTION_TGT_INIT:
507                 if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
508                         vhost->action = action;
509                 break;
510         case IBMVFC_HOST_ACTION_INIT:
511         case IBMVFC_HOST_ACTION_TGT_DEL:
512                 switch (vhost->action) {
513                 case IBMVFC_HOST_ACTION_RESET:
514                 case IBMVFC_HOST_ACTION_REENABLE:
515                         break;
516                 default:
517                         vhost->action = action;
518                         break;
519                 };
520                 break;
521         case IBMVFC_HOST_ACTION_LOGO:
522         case IBMVFC_HOST_ACTION_QUERY_TGTS:
523         case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
524         case IBMVFC_HOST_ACTION_NONE:
525         case IBMVFC_HOST_ACTION_RESET:
526         case IBMVFC_HOST_ACTION_REENABLE:
527         default:
528                 vhost->action = action;
529                 break;
530         };
531 }
532
533 /**
534  * ibmvfc_reinit_host - Re-start host initialization (no NPIV Login)
535  * @vhost:              ibmvfc host struct
536  *
537  * Return value:
538  *      nothing
539  **/
540 static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
541 {
542         if (vhost->action == IBMVFC_HOST_ACTION_NONE) {
543                 if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
544                         scsi_block_requests(vhost->host);
545                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
546                 }
547         } else
548                 vhost->reinit = 1;
549
550         wake_up(&vhost->work_wait_q);
551 }
552
553 /**
554  * ibmvfc_link_down - Handle a link down event from the adapter
555  * @vhost:      ibmvfc host struct
556  * @state:      ibmvfc host state to enter
557  *
558  **/
559 static void ibmvfc_link_down(struct ibmvfc_host *vhost,
560                              enum ibmvfc_host_state state)
561 {
562         struct ibmvfc_target *tgt;
563
564         ENTER;
565         scsi_block_requests(vhost->host);
566         list_for_each_entry(tgt, &vhost->targets, queue)
567                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
568         ibmvfc_set_host_state(vhost, state);
569         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
570         vhost->events_to_log |= IBMVFC_AE_LINKDOWN;
571         wake_up(&vhost->work_wait_q);
572         LEAVE;
573 }
574
575 /**
576  * ibmvfc_init_host - Start host initialization
577  * @vhost:              ibmvfc host struct
578  *
579  * Return value:
580  *      nothing
581  **/
582 static void ibmvfc_init_host(struct ibmvfc_host *vhost)
583 {
584         struct ibmvfc_target *tgt;
585
586         if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
587                 if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
588                         dev_err(vhost->dev,
589                                 "Host initialization retries exceeded. Taking adapter offline\n");
590                         ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
591                         return;
592                 }
593         }
594
595         if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
596                 memset(vhost->async_crq.msgs, 0, PAGE_SIZE);
597                 vhost->async_crq.cur = 0;
598
599                 list_for_each_entry(tgt, &vhost->targets, queue)
600                         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
601                 scsi_block_requests(vhost->host);
602                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
603                 vhost->job_step = ibmvfc_npiv_login;
604                 wake_up(&vhost->work_wait_q);
605         }
606 }
607
608 /**
609  * ibmvfc_send_crq - Send a CRQ
610  * @vhost:      ibmvfc host struct
611  * @word1:      the first 64 bits of the data
612  * @word2:      the second 64 bits of the data
613  *
614  * Return value:
615  *      0 on success / other on failure
616  **/
617 static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
618 {
619         struct vio_dev *vdev = to_vio_dev(vhost->dev);
620         return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
621 }
622
623 /**
624  * ibmvfc_send_crq_init - Send a CRQ init message
625  * @vhost:      ibmvfc host struct
626  *
627  * Return value:
628  *      0 on success / other on failure
629  **/
630 static int ibmvfc_send_crq_init(struct ibmvfc_host *vhost)
631 {
632         ibmvfc_dbg(vhost, "Sending CRQ init\n");
633         return ibmvfc_send_crq(vhost, 0xC001000000000000LL, 0);
634 }
635
636 /**
637  * ibmvfc_send_crq_init_complete - Send a CRQ init complete message
638  * @vhost:      ibmvfc host struct
639  *
640  * Return value:
641  *      0 on success / other on failure
642  **/
643 static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
644 {
645         ibmvfc_dbg(vhost, "Sending CRQ init complete\n");
646         return ibmvfc_send_crq(vhost, 0xC002000000000000LL, 0);
647 }
648
649 /**
650  * ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ
651  * @vhost:      ibmvfc host struct
652  *
653  * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
654  * the crq with the hypervisor.
655  **/
656 static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
657 {
658         long rc = 0;
659         struct vio_dev *vdev = to_vio_dev(vhost->dev);
660         struct ibmvfc_crq_queue *crq = &vhost->crq;
661
662         ibmvfc_dbg(vhost, "Releasing CRQ\n");
663         free_irq(vdev->irq, vhost);
664         tasklet_kill(&vhost->tasklet);
665         do {
666                 if (rc)
667                         msleep(100);
668                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
669         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
670
671         vhost->state = IBMVFC_NO_CRQ;
672         vhost->logged_in = 0;
673         dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
674         free_page((unsigned long)crq->msgs);
675 }
676
677 /**
678  * ibmvfc_reenable_crq_queue - reenables the CRQ
679  * @vhost:      ibmvfc host struct
680  *
681  * Return value:
682  *      0 on success / other on failure
683  **/
684 static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
685 {
686         int rc = 0;
687         struct vio_dev *vdev = to_vio_dev(vhost->dev);
688
689         /* Re-enable the CRQ */
690         do {
691                 if (rc)
692                         msleep(100);
693                 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
694         } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
695
696         if (rc)
697                 dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc);
698
699         return rc;
700 }
701
702 /**
703  * ibmvfc_reset_crq - resets a crq after a failure
704  * @vhost:      ibmvfc host struct
705  *
706  * Return value:
707  *      0 on success / other on failure
708  **/
709 static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
710 {
711         int rc = 0;
712         unsigned long flags;
713         struct vio_dev *vdev = to_vio_dev(vhost->dev);
714         struct ibmvfc_crq_queue *crq = &vhost->crq;
715
716         /* Close the CRQ */
717         do {
718                 if (rc)
719                         msleep(100);
720                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
721         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
722
723         spin_lock_irqsave(vhost->host->host_lock, flags);
724         vhost->state = IBMVFC_NO_CRQ;
725         vhost->logged_in = 0;
726         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
727
728         /* Clean out the queue */
729         memset(crq->msgs, 0, PAGE_SIZE);
730         crq->cur = 0;
731
732         /* And re-open it again */
733         rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
734                                 crq->msg_token, PAGE_SIZE);
735
736         if (rc == H_CLOSED)
737                 /* Adapter is good, but other end is not ready */
738                 dev_warn(vhost->dev, "Partner adapter not ready\n");
739         else if (rc != 0)
740                 dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
741         spin_unlock_irqrestore(vhost->host->host_lock, flags);
742
743         return rc;
744 }
745
746 /**
747  * ibmvfc_valid_event - Determines if event is valid.
748  * @pool:       event_pool that contains the event
749  * @evt:        ibmvfc event to be checked for validity
750  *
751  * Return value:
752  *      1 if event is valid / 0 if event is not valid
753  **/
754 static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool,
755                               struct ibmvfc_event *evt)
756 {
757         int index = evt - pool->events;
758         if (index < 0 || index >= pool->size)   /* outside of bounds */
759                 return 0;
760         if (evt != pool->events + index)        /* unaligned */
761                 return 0;
762         return 1;
763 }
764
765 /**
766  * ibmvfc_free_event - Free the specified event
767  * @evt:        ibmvfc_event to be freed
768  *
769  **/
770 static void ibmvfc_free_event(struct ibmvfc_event *evt)
771 {
772         struct ibmvfc_host *vhost = evt->vhost;
773         struct ibmvfc_event_pool *pool = &vhost->pool;
774
775         BUG_ON(!ibmvfc_valid_event(pool, evt));
776         BUG_ON(atomic_inc_return(&evt->free) != 1);
777         list_add_tail(&evt->queue, &vhost->free);
778 }
779
780 /**
781  * ibmvfc_scsi_eh_done - EH done function for queuecommand commands
782  * @evt:        ibmvfc event struct
783  *
784  * This function does not setup any error status, that must be done
785  * before this function gets called.
786  **/
787 static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
788 {
789         struct scsi_cmnd *cmnd = evt->cmnd;
790
791         if (cmnd) {
792                 scsi_dma_unmap(cmnd);
793                 cmnd->scsi_done(cmnd);
794         }
795
796         if (evt->eh_comp)
797                 complete(evt->eh_comp);
798
799         ibmvfc_free_event(evt);
800 }
801
802 /**
803  * ibmvfc_fail_request - Fail request with specified error code
804  * @evt:                ibmvfc event struct
805  * @error_code: error code to fail request with
806  *
807  * Return value:
808  *      none
809  **/
810 static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
811 {
812         if (evt->cmnd) {
813                 evt->cmnd->result = (error_code << 16);
814                 evt->done = ibmvfc_scsi_eh_done;
815         } else
816                 evt->xfer_iu->mad_common.status = IBMVFC_MAD_DRIVER_FAILED;
817
818         list_del(&evt->queue);
819         del_timer(&evt->timer);
820         ibmvfc_trc_end(evt);
821         evt->done(evt);
822 }
823
824 /**
825  * ibmvfc_purge_requests - Our virtual adapter just shut down. Purge any sent requests
826  * @vhost:              ibmvfc host struct
827  * @error_code: error code to fail requests with
828  *
829  * Return value:
830  *      none
831  **/
832 static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
833 {
834         struct ibmvfc_event *evt, *pos;
835
836         ibmvfc_dbg(vhost, "Purging all requests\n");
837         list_for_each_entry_safe(evt, pos, &vhost->sent, queue)
838                 ibmvfc_fail_request(evt, error_code);
839 }
840
841 /**
842  * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ
843  * @vhost:      struct ibmvfc host to reset
844  **/
845 static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost)
846 {
847         ibmvfc_purge_requests(vhost, DID_ERROR);
848         ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
849         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
850 }
851
852 /**
853  * __ibmvfc_reset_host - Reset the connection to the server (no locking)
854  * @vhost:      struct ibmvfc host to reset
855  **/
856 static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
857 {
858         if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT &&
859             !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
860                 scsi_block_requests(vhost->host);
861                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO);
862                 vhost->job_step = ibmvfc_npiv_logout;
863                 wake_up(&vhost->work_wait_q);
864         } else
865                 ibmvfc_hard_reset_host(vhost);
866 }
867
868 /**
869  * ibmvfc_reset_host - Reset the connection to the server
870  * @vhost:      ibmvfc host struct
871  **/
872 static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
873 {
874         unsigned long flags;
875
876         spin_lock_irqsave(vhost->host->host_lock, flags);
877         __ibmvfc_reset_host(vhost);
878         spin_unlock_irqrestore(vhost->host->host_lock, flags);
879 }
880
881 /**
882  * ibmvfc_retry_host_init - Retry host initialization if allowed
883  * @vhost:      ibmvfc host struct
884  *
885  * Returns: 1 if init will be retried / 0 if not
886  *
887  **/
888 static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
889 {
890         int retry = 0;
891
892         if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
893                 vhost->delay_init = 1;
894                 if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
895                         dev_err(vhost->dev,
896                                 "Host initialization retries exceeded. Taking adapter offline\n");
897                         ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
898                 } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
899                         __ibmvfc_reset_host(vhost);
900                 else {
901                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
902                         retry = 1;
903                 }
904         }
905
906         wake_up(&vhost->work_wait_q);
907         return retry;
908 }
909
910 /**
911  * __ibmvfc_get_target - Find the specified scsi_target (no locking)
912  * @starget:    scsi target struct
913  *
914  * Return value:
915  *      ibmvfc_target struct / NULL if not found
916  **/
917 static struct ibmvfc_target *__ibmvfc_get_target(struct scsi_target *starget)
918 {
919         struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
920         struct ibmvfc_host *vhost = shost_priv(shost);
921         struct ibmvfc_target *tgt;
922
923         list_for_each_entry(tgt, &vhost->targets, queue)
924                 if (tgt->target_id == starget->id) {
925                         kref_get(&tgt->kref);
926                         return tgt;
927                 }
928         return NULL;
929 }
930
931 /**
932  * ibmvfc_get_target - Find the specified scsi_target
933  * @starget:    scsi target struct
934  *
935  * Return value:
936  *      ibmvfc_target struct / NULL if not found
937  **/
938 static struct ibmvfc_target *ibmvfc_get_target(struct scsi_target *starget)
939 {
940         struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
941         struct ibmvfc_target *tgt;
942         unsigned long flags;
943
944         spin_lock_irqsave(shost->host_lock, flags);
945         tgt = __ibmvfc_get_target(starget);
946         spin_unlock_irqrestore(shost->host_lock, flags);
947         return tgt;
948 }
949
950 /**
951  * ibmvfc_get_host_speed - Get host port speed
952  * @shost:              scsi host struct
953  *
954  * Return value:
955  *      none
956  **/
957 static void ibmvfc_get_host_speed(struct Scsi_Host *shost)
958 {
959         struct ibmvfc_host *vhost = shost_priv(shost);
960         unsigned long flags;
961
962         spin_lock_irqsave(shost->host_lock, flags);
963         if (vhost->state == IBMVFC_ACTIVE) {
964                 switch (vhost->login_buf->resp.link_speed / 100) {
965                 case 1:
966                         fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
967                         break;
968                 case 2:
969                         fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
970                         break;
971                 case 4:
972                         fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
973                         break;
974                 case 8:
975                         fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
976                         break;
977                 case 10:
978                         fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
979                         break;
980                 case 16:
981                         fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
982                         break;
983                 default:
984                         ibmvfc_log(vhost, 3, "Unknown port speed: %lld Gbit\n",
985                                    vhost->login_buf->resp.link_speed / 100);
986                         fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
987                         break;
988                 }
989         } else
990                 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
991         spin_unlock_irqrestore(shost->host_lock, flags);
992 }
993
994 /**
995  * ibmvfc_get_host_port_state - Get host port state
996  * @shost:              scsi host struct
997  *
998  * Return value:
999  *      none
1000  **/
1001 static void ibmvfc_get_host_port_state(struct Scsi_Host *shost)
1002 {
1003         struct ibmvfc_host *vhost = shost_priv(shost);
1004         unsigned long flags;
1005
1006         spin_lock_irqsave(shost->host_lock, flags);
1007         switch (vhost->state) {
1008         case IBMVFC_INITIALIZING:
1009         case IBMVFC_ACTIVE:
1010                 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
1011                 break;
1012         case IBMVFC_LINK_DOWN:
1013                 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
1014                 break;
1015         case IBMVFC_LINK_DEAD:
1016         case IBMVFC_HOST_OFFLINE:
1017                 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
1018                 break;
1019         case IBMVFC_HALTED:
1020                 fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED;
1021                 break;
1022         case IBMVFC_NO_CRQ:
1023                 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1024                 break;
1025         default:
1026                 ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state);
1027                 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1028                 break;
1029         }
1030         spin_unlock_irqrestore(shost->host_lock, flags);
1031 }
1032
1033 /**
1034  * ibmvfc_set_rport_dev_loss_tmo - Set rport's device loss timeout
1035  * @rport:              rport struct
1036  * @timeout:    timeout value
1037  *
1038  * Return value:
1039  *      none
1040  **/
1041 static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
1042 {
1043         if (timeout)
1044                 rport->dev_loss_tmo = timeout;
1045         else
1046                 rport->dev_loss_tmo = 1;
1047 }
1048
1049 /**
1050  * ibmvfc_release_tgt - Free memory allocated for a target
1051  * @kref:               kref struct
1052  *
1053  **/
1054 static void ibmvfc_release_tgt(struct kref *kref)
1055 {
1056         struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
1057         kfree(tgt);
1058 }
1059
1060 /**
1061  * ibmvfc_get_starget_node_name - Get SCSI target's node name
1062  * @starget:    scsi target struct
1063  *
1064  * Return value:
1065  *      none
1066  **/
1067 static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
1068 {
1069         struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1070         fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
1071         if (tgt)
1072                 kref_put(&tgt->kref, ibmvfc_release_tgt);
1073 }
1074
1075 /**
1076  * ibmvfc_get_starget_port_name - Get SCSI target's port name
1077  * @starget:    scsi target struct
1078  *
1079  * Return value:
1080  *      none
1081  **/
1082 static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
1083 {
1084         struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1085         fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
1086         if (tgt)
1087                 kref_put(&tgt->kref, ibmvfc_release_tgt);
1088 }
1089
1090 /**
1091  * ibmvfc_get_starget_port_id - Get SCSI target's port ID
1092  * @starget:    scsi target struct
1093  *
1094  * Return value:
1095  *      none
1096  **/
1097 static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
1098 {
1099         struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1100         fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
1101         if (tgt)
1102                 kref_put(&tgt->kref, ibmvfc_release_tgt);
1103 }
1104
1105 /**
1106  * ibmvfc_wait_while_resetting - Wait while the host resets
1107  * @vhost:              ibmvfc host struct
1108  *
1109  * Return value:
1110  *      0 on success / other on failure
1111  **/
1112 static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost)
1113 {
1114         long timeout = wait_event_timeout(vhost->init_wait_q,
1115                                           ((vhost->state == IBMVFC_ACTIVE ||
1116                                             vhost->state == IBMVFC_HOST_OFFLINE ||
1117                                             vhost->state == IBMVFC_LINK_DEAD) &&
1118                                            vhost->action == IBMVFC_HOST_ACTION_NONE),
1119                                           (init_timeout * HZ));
1120
1121         return timeout ? 0 : -EIO;
1122 }
1123
1124 /**
1125  * ibmvfc_issue_fc_host_lip - Re-initiate link initialization
1126  * @shost:              scsi host struct
1127  *
1128  * Return value:
1129  *      0 on success / other on failure
1130  **/
1131 static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost)
1132 {
1133         struct ibmvfc_host *vhost = shost_priv(shost);
1134
1135         dev_err(vhost->dev, "Initiating host LIP. Resetting connection\n");
1136         ibmvfc_reset_host(vhost);
1137         return ibmvfc_wait_while_resetting(vhost);
1138 }
1139
1140 /**
1141  * ibmvfc_gather_partition_info - Gather info about the LPAR
1142  *
1143  * Return value:
1144  *      none
1145  **/
1146 static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
1147 {
1148         struct device_node *rootdn;
1149         const char *name;
1150         const unsigned int *num;
1151
1152         rootdn = of_find_node_by_path("/");
1153         if (!rootdn)
1154                 return;
1155
1156         name = of_get_property(rootdn, "ibm,partition-name", NULL);
1157         if (name)
1158                 strncpy(vhost->partition_name, name, sizeof(vhost->partition_name));
1159         num = of_get_property(rootdn, "ibm,partition-no", NULL);
1160         if (num)
1161                 vhost->partition_number = *num;
1162         of_node_put(rootdn);
1163 }
1164
1165 /**
1166  * ibmvfc_set_login_info - Setup info for NPIV login
1167  * @vhost:      ibmvfc host struct
1168  *
1169  * Return value:
1170  *      none
1171  **/
1172 static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
1173 {
1174         struct ibmvfc_npiv_login *login_info = &vhost->login_info;
1175         struct device_node *of_node = vhost->dev->of_node;
1176         const char *location;
1177
1178         memset(login_info, 0, sizeof(*login_info));
1179
1180         login_info->ostype = IBMVFC_OS_LINUX;
1181         login_info->max_dma_len = IBMVFC_MAX_SECTORS << 9;
1182         login_info->max_payload = sizeof(struct ibmvfc_fcp_cmd_iu);
1183         login_info->max_response = sizeof(struct ibmvfc_fcp_rsp);
1184         login_info->partition_num = vhost->partition_number;
1185         login_info->vfc_frame_version = 1;
1186         login_info->fcp_version = 3;
1187         login_info->flags = IBMVFC_FLUSH_ON_HALT;
1188         if (vhost->client_migrated)
1189                 login_info->flags |= IBMVFC_CLIENT_MIGRATED;
1190
1191         login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ;
1192         login_info->capabilities = IBMVFC_CAN_MIGRATE;
1193         login_info->async.va = vhost->async_crq.msg_token;
1194         login_info->async.len = vhost->async_crq.size * sizeof(*vhost->async_crq.msgs);
1195         strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
1196         strncpy(login_info->device_name,
1197                 dev_name(&vhost->host->shost_gendev), IBMVFC_MAX_NAME);
1198
1199         location = of_get_property(of_node, "ibm,loc-code", NULL);
1200         location = location ? location : dev_name(vhost->dev);
1201         strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME);
1202 }
1203
1204 /**
1205  * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
1206  * @vhost:      ibmvfc host who owns the event pool
1207  *
1208  * Returns zero on success.
1209  **/
1210 static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost)
1211 {
1212         int i;
1213         struct ibmvfc_event_pool *pool = &vhost->pool;
1214
1215         ENTER;
1216         pool->size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
1217         pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
1218         if (!pool->events)
1219                 return -ENOMEM;
1220
1221         pool->iu_storage = dma_alloc_coherent(vhost->dev,
1222                                               pool->size * sizeof(*pool->iu_storage),
1223                                               &pool->iu_token, 0);
1224
1225         if (!pool->iu_storage) {
1226                 kfree(pool->events);
1227                 return -ENOMEM;
1228         }
1229
1230         for (i = 0; i < pool->size; ++i) {
1231                 struct ibmvfc_event *evt = &pool->events[i];
1232                 atomic_set(&evt->free, 1);
1233                 evt->crq.valid = 0x80;
1234                 evt->crq.ioba = pool->iu_token + (sizeof(*evt->xfer_iu) * i);
1235                 evt->xfer_iu = pool->iu_storage + i;
1236                 evt->vhost = vhost;
1237                 evt->ext_list = NULL;
1238                 list_add_tail(&evt->queue, &vhost->free);
1239         }
1240
1241         LEAVE;
1242         return 0;
1243 }
1244
1245 /**
1246  * ibmvfc_free_event_pool - Frees memory of the event pool of a host
1247  * @vhost:      ibmvfc host who owns the event pool
1248  *
1249  **/
1250 static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost)
1251 {
1252         int i;
1253         struct ibmvfc_event_pool *pool = &vhost->pool;
1254
1255         ENTER;
1256         for (i = 0; i < pool->size; ++i) {
1257                 list_del(&pool->events[i].queue);
1258                 BUG_ON(atomic_read(&pool->events[i].free) != 1);
1259                 if (pool->events[i].ext_list)
1260                         dma_pool_free(vhost->sg_pool,
1261                                       pool->events[i].ext_list,
1262                                       pool->events[i].ext_list_token);
1263         }
1264
1265         kfree(pool->events);
1266         dma_free_coherent(vhost->dev,
1267                           pool->size * sizeof(*pool->iu_storage),
1268                           pool->iu_storage, pool->iu_token);
1269         LEAVE;
1270 }
1271
1272 /**
1273  * ibmvfc_get_event - Gets the next free event in pool
1274  * @vhost:      ibmvfc host struct
1275  *
1276  * Returns a free event from the pool.
1277  **/
1278 static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_host *vhost)
1279 {
1280         struct ibmvfc_event *evt;
1281
1282         BUG_ON(list_empty(&vhost->free));
1283         evt = list_entry(vhost->free.next, struct ibmvfc_event, queue);
1284         atomic_set(&evt->free, 0);
1285         list_del(&evt->queue);
1286         return evt;
1287 }
1288
1289 /**
1290  * ibmvfc_init_event - Initialize fields in an event struct that are always
1291  *                              required.
1292  * @evt:        The event
1293  * @done:       Routine to call when the event is responded to
1294  * @format:     SRP or MAD format
1295  **/
1296 static void ibmvfc_init_event(struct ibmvfc_event *evt,
1297                               void (*done) (struct ibmvfc_event *), u8 format)
1298 {
1299         evt->cmnd = NULL;
1300         evt->sync_iu = NULL;
1301         evt->crq.format = format;
1302         evt->done = done;
1303         evt->eh_comp = NULL;
1304 }
1305
1306 /**
1307  * ibmvfc_map_sg_list - Initialize scatterlist
1308  * @scmd:       scsi command struct
1309  * @nseg:       number of scatterlist segments
1310  * @md: memory descriptor list to initialize
1311  **/
1312 static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
1313                                struct srp_direct_buf *md)
1314 {
1315         int i;
1316         struct scatterlist *sg;
1317
1318         scsi_for_each_sg(scmd, sg, nseg, i) {
1319                 md[i].va = sg_dma_address(sg);
1320                 md[i].len = sg_dma_len(sg);
1321                 md[i].key = 0;
1322         }
1323 }
1324
1325 /**
1326  * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes decriptor fields
1327  * @scmd:               Scsi_Cmnd with the scatterlist
1328  * @evt:                ibmvfc event struct
1329  * @vfc_cmd:    vfc_cmd that contains the memory descriptor
1330  * @dev:                device for which to map dma memory
1331  *
1332  * Returns:
1333  *      0 on success / non-zero on failure
1334  **/
1335 static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
1336                               struct ibmvfc_event *evt,
1337                               struct ibmvfc_cmd *vfc_cmd, struct device *dev)
1338 {
1339
1340         int sg_mapped;
1341         struct srp_direct_buf *data = &vfc_cmd->ioba;
1342         struct ibmvfc_host *vhost = dev_get_drvdata(dev);
1343
1344         sg_mapped = scsi_dma_map(scmd);
1345         if (!sg_mapped) {
1346                 vfc_cmd->flags |= IBMVFC_NO_MEM_DESC;
1347                 return 0;
1348         } else if (unlikely(sg_mapped < 0)) {
1349                 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1350                         scmd_printk(KERN_ERR, scmd, "Failed to map DMA buffer for command\n");
1351                 return sg_mapped;
1352         }
1353
1354         if (scmd->sc_data_direction == DMA_TO_DEVICE) {
1355                 vfc_cmd->flags |= IBMVFC_WRITE;
1356                 vfc_cmd->iu.add_cdb_len |= IBMVFC_WRDATA;
1357         } else {
1358                 vfc_cmd->flags |= IBMVFC_READ;
1359                 vfc_cmd->iu.add_cdb_len |= IBMVFC_RDDATA;
1360         }
1361
1362         if (sg_mapped == 1) {
1363                 ibmvfc_map_sg_list(scmd, sg_mapped, data);
1364                 return 0;
1365         }
1366
1367         vfc_cmd->flags |= IBMVFC_SCATTERLIST;
1368
1369         if (!evt->ext_list) {
1370                 evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC,
1371                                                &evt->ext_list_token);
1372
1373                 if (!evt->ext_list) {
1374                         scsi_dma_unmap(scmd);
1375                         if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1376                                 scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n");
1377                         return -ENOMEM;
1378                 }
1379         }
1380
1381         ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list);
1382
1383         data->va = evt->ext_list_token;
1384         data->len = sg_mapped * sizeof(struct srp_direct_buf);
1385         data->key = 0;
1386         return 0;
1387 }
1388
1389 /**
1390  * ibmvfc_timeout - Internal command timeout handler
1391  * @evt:        struct ibmvfc_event that timed out
1392  *
1393  * Called when an internally generated command times out
1394  **/
1395 static void ibmvfc_timeout(struct ibmvfc_event *evt)
1396 {
1397         struct ibmvfc_host *vhost = evt->vhost;
1398         dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt);
1399         ibmvfc_reset_host(vhost);
1400 }
1401
1402 /**
1403  * ibmvfc_send_event - Transforms event to u64 array and calls send_crq()
1404  * @evt:                event to be sent
1405  * @vhost:              ibmvfc host struct
1406  * @timeout:    timeout in seconds - 0 means do not time command
1407  *
1408  * Returns the value returned from ibmvfc_send_crq(). (Zero for success)
1409  **/
1410 static int ibmvfc_send_event(struct ibmvfc_event *evt,
1411                              struct ibmvfc_host *vhost, unsigned long timeout)
1412 {
1413         u64 *crq_as_u64 = (u64 *) &evt->crq;
1414         int rc;
1415
1416         /* Copy the IU into the transfer area */
1417         *evt->xfer_iu = evt->iu;
1418         if (evt->crq.format == IBMVFC_CMD_FORMAT)
1419                 evt->xfer_iu->cmd.tag = (u64)evt;
1420         else if (evt->crq.format == IBMVFC_MAD_FORMAT)
1421                 evt->xfer_iu->mad_common.tag = (u64)evt;
1422         else
1423                 BUG();
1424
1425         list_add_tail(&evt->queue, &vhost->sent);
1426         init_timer(&evt->timer);
1427
1428         if (timeout) {
1429                 evt->timer.data = (unsigned long) evt;
1430                 evt->timer.expires = jiffies + (timeout * HZ);
1431                 evt->timer.function = (void (*)(unsigned long))ibmvfc_timeout;
1432                 add_timer(&evt->timer);
1433         }
1434
1435         mb();
1436
1437         if ((rc = ibmvfc_send_crq(vhost, crq_as_u64[0], crq_as_u64[1]))) {
1438                 list_del(&evt->queue);
1439                 del_timer(&evt->timer);
1440
1441                 /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
1442                  * Firmware will send a CRQ with a transport event (0xFF) to
1443                  * tell this client what has happened to the transport. This
1444                  * will be handled in ibmvfc_handle_crq()
1445                  */
1446                 if (rc == H_CLOSED) {
1447                         if (printk_ratelimit())
1448                                 dev_warn(vhost->dev, "Send warning. Receive queue closed, will retry.\n");
1449                         if (evt->cmnd)
1450                                 scsi_dma_unmap(evt->cmnd);
1451                         ibmvfc_free_event(evt);
1452                         return SCSI_MLQUEUE_HOST_BUSY;
1453                 }
1454
1455                 dev_err(vhost->dev, "Send error (rc=%d)\n", rc);
1456                 if (evt->cmnd) {
1457                         evt->cmnd->result = DID_ERROR << 16;
1458                         evt->done = ibmvfc_scsi_eh_done;
1459                 } else
1460                         evt->xfer_iu->mad_common.status = IBMVFC_MAD_CRQ_ERROR;
1461
1462                 evt->done(evt);
1463         } else
1464                 ibmvfc_trc_start(evt);
1465
1466         return 0;
1467 }
1468
1469 /**
1470  * ibmvfc_log_error - Log an error for the failed command if appropriate
1471  * @evt:        ibmvfc event to log
1472  *
1473  **/
1474 static void ibmvfc_log_error(struct ibmvfc_event *evt)
1475 {
1476         struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1477         struct ibmvfc_host *vhost = evt->vhost;
1478         struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
1479         struct scsi_cmnd *cmnd = evt->cmnd;
1480         const char *err = unknown_error;
1481         int index = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error);
1482         int logerr = 0;
1483         int rsp_code = 0;
1484
1485         if (index >= 0) {
1486                 logerr = cmd_status[index].log;
1487                 err = cmd_status[index].name;
1488         }
1489
1490         if (!logerr && (vhost->log_level <= (IBMVFC_DEFAULT_LOG_LEVEL + 1)))
1491                 return;
1492
1493         if (rsp->flags & FCP_RSP_LEN_VALID)
1494                 rsp_code = rsp->data.info.rsp_code;
1495
1496         scmd_printk(KERN_ERR, cmnd, "Command (%02X) failed: %s (%x:%x) "
1497                     "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
1498                     cmnd->cmnd[0], err, vfc_cmd->status, vfc_cmd->error,
1499                     rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
1500 }
1501
1502 /**
1503  * ibmvfc_relogin - Log back into the specified device
1504  * @sdev:       scsi device struct
1505  *
1506  **/
1507 static void ibmvfc_relogin(struct scsi_device *sdev)
1508 {
1509         struct ibmvfc_host *vhost = shost_priv(sdev->host);
1510         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1511         struct ibmvfc_target *tgt;
1512
1513         list_for_each_entry(tgt, &vhost->targets, queue) {
1514                 if (rport == tgt->rport) {
1515                         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
1516                         break;
1517                 }
1518         }
1519
1520         ibmvfc_reinit_host(vhost);
1521 }
1522
1523 /**
1524  * ibmvfc_scsi_done - Handle responses from commands
1525  * @evt:        ibmvfc event to be handled
1526  *
1527  * Used as a callback when sending scsi cmds.
1528  **/
1529 static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1530 {
1531         struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1532         struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
1533         struct scsi_cmnd *cmnd = evt->cmnd;
1534         u32 rsp_len = 0;
1535         u32 sense_len = rsp->fcp_sense_len;
1536
1537         if (cmnd) {
1538                 if (vfc_cmd->response_flags & IBMVFC_ADAPTER_RESID_VALID)
1539                         scsi_set_resid(cmnd, vfc_cmd->adapter_resid);
1540                 else if (rsp->flags & FCP_RESID_UNDER)
1541                         scsi_set_resid(cmnd, rsp->fcp_resid);
1542                 else
1543                         scsi_set_resid(cmnd, 0);
1544
1545                 if (vfc_cmd->status) {
1546                         cmnd->result = ibmvfc_get_err_result(vfc_cmd);
1547
1548                         if (rsp->flags & FCP_RSP_LEN_VALID)
1549                                 rsp_len = rsp->fcp_rsp_len;
1550                         if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE)
1551                                 sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
1552                         if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
1553                                 memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
1554                         if ((vfc_cmd->status & IBMVFC_VIOS_FAILURE) && (vfc_cmd->error == IBMVFC_PLOGI_REQUIRED))
1555                                 ibmvfc_relogin(cmnd->device);
1556
1557                         if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
1558                                 cmnd->result = (DID_ERROR << 16);
1559
1560                         ibmvfc_log_error(evt);
1561                 }
1562
1563                 if (!cmnd->result &&
1564                     (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) < cmnd->underflow))
1565                         cmnd->result = (DID_ERROR << 16);
1566
1567                 scsi_dma_unmap(cmnd);
1568                 cmnd->scsi_done(cmnd);
1569         }
1570
1571         if (evt->eh_comp)
1572                 complete(evt->eh_comp);
1573
1574         ibmvfc_free_event(evt);
1575 }
1576
1577 /**
1578  * ibmvfc_host_chkready - Check if the host can accept commands
1579  * @vhost:       struct ibmvfc host
1580  *
1581  * Returns:
1582  *      1 if host can accept command / 0 if not
1583  **/
1584 static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
1585 {
1586         int result = 0;
1587
1588         switch (vhost->state) {
1589         case IBMVFC_LINK_DEAD:
1590         case IBMVFC_HOST_OFFLINE:
1591                 result = DID_NO_CONNECT << 16;
1592                 break;
1593         case IBMVFC_NO_CRQ:
1594         case IBMVFC_INITIALIZING:
1595         case IBMVFC_HALTED:
1596         case IBMVFC_LINK_DOWN:
1597                 result = DID_REQUEUE << 16;
1598                 break;
1599         case IBMVFC_ACTIVE:
1600                 result = 0;
1601                 break;
1602         };
1603
1604         return result;
1605 }
1606
1607 /**
1608  * ibmvfc_queuecommand - The queuecommand function of the scsi template
1609  * @cmnd:       struct scsi_cmnd to be executed
1610  * @done:       Callback function to be called when cmnd is completed
1611  *
1612  * Returns:
1613  *      0 on success / other on failure
1614  **/
1615 static int ibmvfc_queuecommand(struct scsi_cmnd *cmnd,
1616                                void (*done) (struct scsi_cmnd *))
1617 {
1618         struct ibmvfc_host *vhost = shost_priv(cmnd->device->host);
1619         struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1620         struct ibmvfc_cmd *vfc_cmd;
1621         struct ibmvfc_event *evt;
1622         u8 tag[2];
1623         int rc;
1624
1625         if (unlikely((rc = fc_remote_port_chkready(rport))) ||
1626             unlikely((rc = ibmvfc_host_chkready(vhost)))) {
1627                 cmnd->result = rc;
1628                 done(cmnd);
1629                 return 0;
1630         }
1631
1632         cmnd->result = (DID_OK << 16);
1633         evt = ibmvfc_get_event(vhost);
1634         ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
1635         evt->cmnd = cmnd;
1636         cmnd->scsi_done = done;
1637         vfc_cmd = &evt->iu.cmd;
1638         memset(vfc_cmd, 0, sizeof(*vfc_cmd));
1639         vfc_cmd->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
1640         vfc_cmd->resp.len = sizeof(vfc_cmd->rsp);
1641         vfc_cmd->frame_type = IBMVFC_SCSI_FCP_TYPE;
1642         vfc_cmd->payload_len = sizeof(vfc_cmd->iu);
1643         vfc_cmd->resp_len = sizeof(vfc_cmd->rsp);
1644         vfc_cmd->cancel_key = (unsigned long)cmnd->device->hostdata;
1645         vfc_cmd->tgt_scsi_id = rport->port_id;
1646         vfc_cmd->iu.xfer_len = scsi_bufflen(cmnd);
1647         int_to_scsilun(cmnd->device->lun, &vfc_cmd->iu.lun);
1648         memcpy(vfc_cmd->iu.cdb, cmnd->cmnd, cmnd->cmd_len);
1649
1650         if (scsi_populate_tag_msg(cmnd, tag)) {
1651                 vfc_cmd->task_tag = tag[1];
1652                 switch (tag[0]) {
1653                 case MSG_SIMPLE_TAG:
1654                         vfc_cmd->iu.pri_task_attr = IBMVFC_SIMPLE_TASK;
1655                         break;
1656                 case MSG_HEAD_TAG:
1657                         vfc_cmd->iu.pri_task_attr = IBMVFC_HEAD_OF_QUEUE;
1658                         break;
1659                 case MSG_ORDERED_TAG:
1660                         vfc_cmd->iu.pri_task_attr = IBMVFC_ORDERED_TASK;
1661                         break;
1662                 };
1663         }
1664
1665         if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
1666                 return ibmvfc_send_event(evt, vhost, 0);
1667
1668         ibmvfc_free_event(evt);
1669         if (rc == -ENOMEM)
1670                 return SCSI_MLQUEUE_HOST_BUSY;
1671
1672         if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1673                 scmd_printk(KERN_ERR, cmnd,
1674                             "Failed to map DMA buffer for command. rc=%d\n", rc);
1675
1676         cmnd->result = DID_ERROR << 16;
1677         done(cmnd);
1678         return 0;
1679 }
1680
1681 /**
1682  * ibmvfc_sync_completion - Signal that a synchronous command has completed
1683  * @evt:        ibmvfc event struct
1684  *
1685  **/
1686 static void ibmvfc_sync_completion(struct ibmvfc_event *evt)
1687 {
1688         /* copy the response back */
1689         if (evt->sync_iu)
1690                 *evt->sync_iu = *evt->xfer_iu;
1691
1692         complete(&evt->comp);
1693 }
1694
1695 /**
1696  * ibmvfc_bsg_timeout_done - Completion handler for cancelling BSG commands
1697  * @evt:        struct ibmvfc_event
1698  *
1699  **/
1700 static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt)
1701 {
1702         struct ibmvfc_host *vhost = evt->vhost;
1703
1704         ibmvfc_free_event(evt);
1705         vhost->aborting_passthru = 0;
1706         dev_info(vhost->dev, "Passthru command cancelled\n");
1707 }
1708
1709 /**
1710  * ibmvfc_bsg_timeout - Handle a BSG timeout
1711  * @job:        struct fc_bsg_job that timed out
1712  *
1713  * Returns:
1714  *      0 on success / other on failure
1715  **/
1716 static int ibmvfc_bsg_timeout(struct fc_bsg_job *job)
1717 {
1718         struct ibmvfc_host *vhost = shost_priv(job->shost);
1719         unsigned long port_id = (unsigned long)job->dd_data;
1720         struct ibmvfc_event *evt;
1721         struct ibmvfc_tmf *tmf;
1722         unsigned long flags;
1723         int rc;
1724
1725         ENTER;
1726         spin_lock_irqsave(vhost->host->host_lock, flags);
1727         if (vhost->aborting_passthru || vhost->state != IBMVFC_ACTIVE) {
1728                 __ibmvfc_reset_host(vhost);
1729                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1730                 return 0;
1731         }
1732
1733         vhost->aborting_passthru = 1;
1734         evt = ibmvfc_get_event(vhost);
1735         ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
1736
1737         tmf = &evt->iu.tmf;
1738         memset(tmf, 0, sizeof(*tmf));
1739         tmf->common.version = 1;
1740         tmf->common.opcode = IBMVFC_TMF_MAD;
1741         tmf->common.length = sizeof(*tmf);
1742         tmf->scsi_id = port_id;
1743         tmf->cancel_key = IBMVFC_PASSTHRU_CANCEL_KEY;
1744         tmf->my_cancel_key = IBMVFC_INTERNAL_CANCEL_KEY;
1745         rc = ibmvfc_send_event(evt, vhost, default_timeout);
1746
1747         if (rc != 0) {
1748                 vhost->aborting_passthru = 0;
1749                 dev_err(vhost->dev, "Failed to send cancel event. rc=%d\n", rc);
1750                 rc = -EIO;
1751         } else
1752                 dev_info(vhost->dev, "Cancelling passthru command to port id 0x%lx\n",
1753                          port_id);
1754
1755         spin_unlock_irqrestore(vhost->host->host_lock, flags);
1756
1757         LEAVE;
1758         return rc;
1759 }
1760
1761 /**
1762  * ibmvfc_bsg_plogi - PLOGI into a target to handle a BSG command
1763  * @vhost:              struct ibmvfc_host to send command
1764  * @port_id:    port ID to send command
1765  *
1766  * Returns:
1767  *      0 on success / other on failure
1768  **/
1769 static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
1770 {
1771         struct ibmvfc_port_login *plogi;
1772         struct ibmvfc_target *tgt;
1773         struct ibmvfc_event *evt;
1774         union ibmvfc_iu rsp_iu;
1775         unsigned long flags;
1776         int rc = 0, issue_login = 1;
1777
1778         ENTER;
1779         spin_lock_irqsave(vhost->host->host_lock, flags);
1780         list_for_each_entry(tgt, &vhost->targets, queue) {
1781                 if (tgt->scsi_id == port_id) {
1782                         issue_login = 0;
1783                         break;
1784                 }
1785         }
1786
1787         if (!issue_login)
1788                 goto unlock_out;
1789         if (unlikely((rc = ibmvfc_host_chkready(vhost))))
1790                 goto unlock_out;
1791
1792         evt = ibmvfc_get_event(vhost);
1793         ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
1794         plogi = &evt->iu.plogi;
1795         memset(plogi, 0, sizeof(*plogi));
1796         plogi->common.version = 1;
1797         plogi->common.opcode = IBMVFC_PORT_LOGIN;
1798         plogi->common.length = sizeof(*plogi);
1799         plogi->scsi_id = port_id;
1800         evt->sync_iu = &rsp_iu;
1801         init_completion(&evt->comp);
1802
1803         rc = ibmvfc_send_event(evt, vhost, default_timeout);
1804         spin_unlock_irqrestore(vhost->host->host_lock, flags);
1805
1806         if (rc)
1807                 return -EIO;
1808
1809         wait_for_completion(&evt->comp);
1810
1811         if (rsp_iu.plogi.common.status)
1812                 rc = -EIO;
1813
1814         spin_lock_irqsave(vhost->host->host_lock, flags);
1815         ibmvfc_free_event(evt);
1816 unlock_out:
1817         spin_unlock_irqrestore(vhost->host->host_lock, flags);
1818         LEAVE;
1819         return rc;
1820 }
1821
1822 /**
1823  * ibmvfc_bsg_request - Handle a BSG request
1824  * @job:        struct fc_bsg_job to be executed
1825  *
1826  * Returns:
1827  *      0 on success / other on failure
1828  **/
1829 static int ibmvfc_bsg_request(struct fc_bsg_job *job)
1830 {
1831         struct ibmvfc_host *vhost = shost_priv(job->shost);
1832         struct fc_rport *rport = job->rport;
1833         struct ibmvfc_passthru_mad *mad;
1834         struct ibmvfc_event *evt;
1835         union ibmvfc_iu rsp_iu;
1836         unsigned long flags, port_id = -1;
1837         unsigned int code = job->request->msgcode;
1838         int rc = 0, req_seg, rsp_seg, issue_login = 0;
1839         u32 fc_flags, rsp_len;
1840
1841         ENTER;
1842         job->reply->reply_payload_rcv_len = 0;
1843         if (rport)
1844                 port_id = rport->port_id;
1845
1846         switch (code) {
1847         case FC_BSG_HST_ELS_NOLOGIN:
1848                 port_id = (job->request->rqst_data.h_els.port_id[0] << 16) |
1849                         (job->request->rqst_data.h_els.port_id[1] << 8) |
1850                         job->request->rqst_data.h_els.port_id[2];
1851         case FC_BSG_RPT_ELS:
1852                 fc_flags = IBMVFC_FC_ELS;
1853                 break;
1854         case FC_BSG_HST_CT:
1855                 issue_login = 1;
1856                 port_id = (job->request->rqst_data.h_ct.port_id[0] << 16) |
1857                         (job->request->rqst_data.h_ct.port_id[1] << 8) |
1858                         job->request->rqst_data.h_ct.port_id[2];
1859         case FC_BSG_RPT_CT:
1860                 fc_flags = IBMVFC_FC_CT_IU;
1861                 break;
1862         default:
1863                 return -ENOTSUPP;
1864         };
1865
1866         if (port_id == -1)
1867                 return -EINVAL;
1868         if (!mutex_trylock(&vhost->passthru_mutex))
1869                 return -EBUSY;
1870
1871         job->dd_data = (void *)port_id;
1872         req_seg = dma_map_sg(vhost->dev, job->request_payload.sg_list,
1873                              job->request_payload.sg_cnt, DMA_TO_DEVICE);
1874
1875         if (!req_seg) {
1876                 mutex_unlock(&vhost->passthru_mutex);
1877                 return -ENOMEM;
1878         }
1879
1880         rsp_seg = dma_map_sg(vhost->dev, job->reply_payload.sg_list,
1881                              job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1882
1883         if (!rsp_seg) {
1884                 dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
1885                              job->request_payload.sg_cnt, DMA_TO_DEVICE);
1886                 mutex_unlock(&vhost->passthru_mutex);
1887                 return -ENOMEM;
1888         }
1889
1890         if (req_seg > 1 || rsp_seg > 1) {
1891                 rc = -EINVAL;
1892                 goto out;
1893         }
1894
1895         if (issue_login)
1896                 rc = ibmvfc_bsg_plogi(vhost, port_id);
1897
1898         spin_lock_irqsave(vhost->host->host_lock, flags);
1899
1900         if (unlikely(rc || (rport && (rc = fc_remote_port_chkready(rport)))) ||
1901             unlikely((rc = ibmvfc_host_chkready(vhost)))) {
1902                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1903                 goto out;
1904         }
1905
1906         evt = ibmvfc_get_event(vhost);
1907         ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
1908         mad = &evt->iu.passthru;
1909
1910         memset(mad, 0, sizeof(*mad));
1911         mad->common.version = 1;
1912         mad->common.opcode = IBMVFC_PASSTHRU;
1913         mad->common.length = sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu);
1914
1915         mad->cmd_ioba.va = (u64)evt->crq.ioba +
1916                 offsetof(struct ibmvfc_passthru_mad, iu);
1917         mad->cmd_ioba.len = sizeof(mad->iu);
1918
1919         mad->iu.cmd_len = job->request_payload.payload_len;
1920         mad->iu.rsp_len = job->reply_payload.payload_len;
1921         mad->iu.flags = fc_flags;
1922         mad->iu.cancel_key = IBMVFC_PASSTHRU_CANCEL_KEY;
1923
1924         mad->iu.cmd.va = sg_dma_address(job->request_payload.sg_list);
1925         mad->iu.cmd.len = sg_dma_len(job->request_payload.sg_list);
1926         mad->iu.rsp.va = sg_dma_address(job->reply_payload.sg_list);
1927         mad->iu.rsp.len = sg_dma_len(job->reply_payload.sg_list);
1928         mad->iu.scsi_id = port_id;
1929         mad->iu.tag = (u64)evt;
1930         rsp_len = mad->iu.rsp.len;
1931
1932         evt->sync_iu = &rsp_iu;
1933         init_completion(&evt->comp);
1934         rc = ibmvfc_send_event(evt, vhost, 0);
1935         spin_unlock_irqrestore(vhost->host->host_lock, flags);
1936
1937         if (rc) {
1938                 rc = -EIO;
1939                 goto out;
1940         }
1941
1942         wait_for_completion(&evt->comp);
1943
1944         if (rsp_iu.passthru.common.status)
1945                 rc = -EIO;
1946         else
1947                 job->reply->reply_payload_rcv_len = rsp_len;
1948
1949         spin_lock_irqsave(vhost->host->host_lock, flags);
1950         ibmvfc_free_event(evt);
1951         spin_unlock_irqrestore(vhost->host->host_lock, flags);
1952         job->reply->result = rc;
1953         job->job_done(job);
1954         rc = 0;
1955 out:
1956         dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
1957                      job->request_payload.sg_cnt, DMA_TO_DEVICE);
1958         dma_unmap_sg(vhost->dev, job->reply_payload.sg_list,
1959                      job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1960         mutex_unlock(&vhost->passthru_mutex);
1961         LEAVE;
1962         return rc;
1963 }
1964
1965 /**
1966  * ibmvfc_reset_device - Reset the device with the specified reset type
1967  * @sdev:       scsi device to reset
1968  * @type:       reset type
1969  * @desc:       reset type description for log messages
1970  *
1971  * Returns:
1972  *      0 on success / other on failure
1973  **/
1974 static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
1975 {
1976         struct ibmvfc_host *vhost = shost_priv(sdev->host);
1977         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1978         struct ibmvfc_cmd *tmf;
1979         struct ibmvfc_event *evt = NULL;
1980         union ibmvfc_iu rsp_iu;
1981         struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
1982         int rsp_rc = -EBUSY;
1983         unsigned long flags;
1984         int rsp_code = 0;
1985
1986         spin_lock_irqsave(vhost->host->host_lock, flags);
1987         if (vhost->state == IBMVFC_ACTIVE) {
1988                 evt = ibmvfc_get_event(vhost);
1989                 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
1990
1991                 tmf = &evt->iu.cmd;
1992                 memset(tmf, 0, sizeof(*tmf));
1993                 tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
1994                 tmf->resp.len = sizeof(tmf->rsp);
1995                 tmf->frame_type = IBMVFC_SCSI_FCP_TYPE;
1996                 tmf->payload_len = sizeof(tmf->iu);
1997                 tmf->resp_len = sizeof(tmf->rsp);
1998                 tmf->cancel_key = (unsigned long)sdev->hostdata;
1999                 tmf->tgt_scsi_id = rport->port_id;
2000                 int_to_scsilun(sdev->lun, &tmf->iu.lun);
2001                 tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF);
2002                 tmf->iu.tmf_flags = type;
2003                 evt->sync_iu = &rsp_iu;
2004
2005                 init_completion(&evt->comp);
2006                 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2007         }
2008         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2009
2010         if (rsp_rc != 0) {
2011                 sdev_printk(KERN_ERR, sdev, "Failed to send %s reset event. rc=%d\n",
2012                             desc, rsp_rc);
2013                 return -EIO;
2014         }
2015
2016         sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc);
2017         wait_for_completion(&evt->comp);
2018
2019         if (rsp_iu.cmd.status)
2020                 rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd);
2021
2022         if (rsp_code) {
2023                 if (fc_rsp->flags & FCP_RSP_LEN_VALID)
2024                         rsp_code = fc_rsp->data.info.rsp_code;
2025
2026                 sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
2027                             "flags: %x fcp_rsp: %x, scsi_status: %x\n",
2028                             desc, ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error),
2029                             rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
2030                             fc_rsp->scsi_status);
2031                 rsp_rc = -EIO;
2032         } else
2033                 sdev_printk(KERN_INFO, sdev, "%s reset successful\n", desc);
2034
2035         spin_lock_irqsave(vhost->host->host_lock, flags);
2036         ibmvfc_free_event(evt);
2037         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2038         return rsp_rc;
2039 }
2040
2041 /**
2042  * ibmvfc_abort_task_set - Abort outstanding commands to the device
2043  * @sdev:       scsi device to abort commands
2044  *
2045  * This sends an Abort Task Set to the VIOS for the specified device. This does
2046  * NOT send any cancel to the VIOS. That must be done separately.
2047  *
2048  * Returns:
2049  *      0 on success / other on failure
2050  **/
2051 static int ibmvfc_abort_task_set(struct scsi_device *sdev)
2052 {
2053         struct ibmvfc_host *vhost = shost_priv(sdev->host);
2054         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2055         struct ibmvfc_cmd *tmf;
2056         struct ibmvfc_event *evt, *found_evt;
2057         union ibmvfc_iu rsp_iu;
2058         struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
2059         int rsp_rc = -EBUSY;
2060         unsigned long flags;
2061         int rsp_code = 0;
2062
2063         spin_lock_irqsave(vhost->host->host_lock, flags);
2064         found_evt = NULL;
2065         list_for_each_entry(evt, &vhost->sent, queue) {
2066                 if (evt->cmnd && evt->cmnd->device == sdev) {
2067                         found_evt = evt;
2068                         break;
2069                 }
2070         }
2071
2072         if (!found_evt) {
2073                 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2074                         sdev_printk(KERN_INFO, sdev, "No events found to abort\n");
2075                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2076                 return 0;
2077         }
2078
2079         if (vhost->state == IBMVFC_ACTIVE) {
2080                 evt = ibmvfc_get_event(vhost);
2081                 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
2082
2083                 tmf = &evt->iu.cmd;
2084                 memset(tmf, 0, sizeof(*tmf));
2085                 tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
2086                 tmf->resp.len = sizeof(tmf->rsp);
2087                 tmf->frame_type = IBMVFC_SCSI_FCP_TYPE;
2088                 tmf->payload_len = sizeof(tmf->iu);
2089                 tmf->resp_len = sizeof(tmf->rsp);
2090                 tmf->cancel_key = (unsigned long)sdev->hostdata;
2091                 tmf->tgt_scsi_id = rport->port_id;
2092                 int_to_scsilun(sdev->lun, &tmf->iu.lun);
2093                 tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF);
2094                 tmf->iu.tmf_flags = IBMVFC_ABORT_TASK_SET;
2095                 evt->sync_iu = &rsp_iu;
2096
2097                 init_completion(&evt->comp);
2098                 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2099         }
2100
2101         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2102
2103         if (rsp_rc != 0) {
2104                 sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc);
2105                 return -EIO;
2106         }
2107
2108         sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
2109         wait_for_completion(&evt->comp);
2110
2111         if (rsp_iu.cmd.status)
2112                 rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd);
2113
2114         if (rsp_code) {
2115                 if (fc_rsp->flags & FCP_RSP_LEN_VALID)
2116                         rsp_code = fc_rsp->data.info.rsp_code;
2117
2118                 sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
2119                             "flags: %x fcp_rsp: %x, scsi_status: %x\n",
2120                             ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error),
2121                             rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
2122                             fc_rsp->scsi_status);
2123                 rsp_rc = -EIO;
2124         } else
2125                 sdev_printk(KERN_INFO, sdev, "Abort successful\n");
2126
2127         spin_lock_irqsave(vhost->host->host_lock, flags);
2128         ibmvfc_free_event(evt);
2129         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2130         return rsp_rc;
2131 }
2132
2133 /**
2134  * ibmvfc_cancel_all - Cancel all outstanding commands to the device
2135  * @sdev:       scsi device to cancel commands
2136  * @type:       type of error recovery being performed
2137  *
2138  * This sends a cancel to the VIOS for the specified device. This does
2139  * NOT send any abort to the actual device. That must be done separately.
2140  *
2141  * Returns:
2142  *      0 on success / other on failure
2143  **/
2144 static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
2145 {
2146         struct ibmvfc_host *vhost = shost_priv(sdev->host);
2147         struct scsi_target *starget = scsi_target(sdev);
2148         struct fc_rport *rport = starget_to_rport(starget);
2149         struct ibmvfc_tmf *tmf;
2150         struct ibmvfc_event *evt, *found_evt;
2151         union ibmvfc_iu rsp;
2152         int rsp_rc = -EBUSY;
2153         unsigned long flags;
2154         u16 status;
2155
2156         ENTER;
2157         spin_lock_irqsave(vhost->host->host_lock, flags);
2158         found_evt = NULL;
2159         list_for_each_entry(evt, &vhost->sent, queue) {
2160                 if (evt->cmnd && evt->cmnd->device == sdev) {
2161                         found_evt = evt;
2162                         break;
2163                 }
2164         }
2165
2166         if (!found_evt) {
2167                 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2168                         sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
2169                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2170                 return 0;
2171         }
2172
2173         if (vhost->state == IBMVFC_ACTIVE) {
2174                 evt = ibmvfc_get_event(vhost);
2175                 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2176
2177                 tmf = &evt->iu.tmf;
2178                 memset(tmf, 0, sizeof(*tmf));
2179                 tmf->common.version = 1;
2180                 tmf->common.opcode = IBMVFC_TMF_MAD;
2181                 tmf->common.length = sizeof(*tmf);
2182                 tmf->scsi_id = rport->port_id;
2183                 int_to_scsilun(sdev->lun, &tmf->lun);
2184                 tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
2185                 tmf->cancel_key = (unsigned long)sdev->hostdata;
2186                 tmf->my_cancel_key = (unsigned long)starget->hostdata;
2187
2188                 evt->sync_iu = &rsp;
2189                 init_completion(&evt->comp);
2190                 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2191         }
2192
2193         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2194
2195         if (rsp_rc != 0) {
2196                 sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
2197                 return -EIO;
2198         }
2199
2200         sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
2201
2202         wait_for_completion(&evt->comp);
2203         status = rsp.mad_common.status;
2204         spin_lock_irqsave(vhost->host->host_lock, flags);
2205         ibmvfc_free_event(evt);
2206         spin_unlock_irqrestore(vhost->host->host_lock, flags);
2207
2208         if (status != IBMVFC_MAD_SUCCESS) {
2209                 sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
2210                 return -EIO;
2211         }
2212
2213         sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
2214         return 0;
2215 }
2216
2217 /**
2218  * ibmvfc_match_target - Match function for specified target
2219  * @evt:        ibmvfc event struct
2220  * @device:     device to match (starget)
2221  *
2222  * Returns:
2223  *      1 if event matches starget / 0 if event does not match starget
2224  **/
2225 static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device)
2226 {
2227         if (evt->cmnd && scsi_target(evt->cmnd->device) == device)
2228                 return 1;
2229         return 0;
2230 }
2231
2232 /**
2233  * ibmvfc_match_lun - Match function for specified LUN
2234  * @evt:        ibmvfc event struct
2235  * @device:     device to match (sdev)
2236  *
2237  * Returns:
2238  *      1 if event matches sdev / 0 if event does not match sdev
2239  **/
2240 static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
2241 {
2242         if (evt->cmnd && evt->cmnd->device == device)
2243                 return 1;
2244         return 0;
2245 }
2246
2247 /**
2248  * ibmvfc_wait_for_ops - Wait for ops to complete
2249  * @vhost:      ibmvfc host struct
2250  * @device:     device to match (starget or sdev)
2251  * @match:      match function
2252  *
2253  * Returns:
2254  *      SUCCESS / FAILED
2255  **/
2256 static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
2257                                int (*match) (struct ibmvfc_event *, void *))
2258 {
2259         struct ibmvfc_event *evt;
2260         DECLARE_COMPLETION_ONSTACK(comp);
2261         int wait;
2262         unsigned long flags;
2263         signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
2264
2265         ENTER;
2266         do {
2267                 wait = 0;
2268                 spin_lock_irqsave(vhost->host->host_lock, flags);
2269                 list_for_each_entry(evt, &vhost->sent, queue) {
2270                         if (match(evt, device)) {
2271                                 evt->eh_comp = &comp;
2272                                 wait++;
2273                         }
2274                 }
2275                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2276
2277                 if (wait) {
2278                         timeout = wait_for_completion_timeout(&comp, timeout);
2279
2280                         if (!timeout) {
2281                                 wait = 0;
2282                                 spin_lock_irqsave(vhost->host->host_lock, flags);
2283                                 list_for_each_entry(evt, &vhost->sent, queue) {
2284                                         if (match(evt, device)) {
2285                                                 evt->eh_comp = NULL;
2286                                                 wait++;
2287                                         }
2288                                 }
2289                                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2290                                 if (wait)
2291                                         dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
2292                                 LEAVE;
2293                                 return wait ? FAILED : SUCCESS;
2294                         }
2295                 }
2296         } while (wait);
2297
2298         LEAVE;
2299         return SUCCESS;
2300 }
2301
2302 /**
2303  * ibmvfc_eh_abort_handler - Abort a command
2304  * @cmd:        scsi command to abort
2305  *
2306  * Returns:
2307  *      SUCCESS / FAILED
2308  **/
2309 static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
2310 {
2311         struct scsi_device *sdev = cmd->device;
2312         struct ibmvfc_host *vhost = shost_priv(sdev->host);
2313         int cancel_rc, abort_rc;
2314         int rc = FAILED;
2315
2316         ENTER;
2317         fc_block_scsi_eh(cmd);
2318         ibmvfc_wait_while_resetting(vhost);
2319         cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
2320         abort_rc = ibmvfc_abort_task_set(sdev);
2321
2322         if (!cancel_rc && !abort_rc)
2323                 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2324
2325         LEAVE;
2326         return rc;
2327 }
2328
2329 /**
2330  * ibmvfc_eh_device_reset_handler - Reset a single LUN
2331  * @cmd:        scsi command struct
2332  *
2333  * Returns:
2334  *      SUCCESS / FAILED
2335  **/
2336 static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
2337 {
2338         struct scsi_device *sdev = cmd->device;
2339         struct ibmvfc_host *vhost = shost_priv(sdev->host);
2340         int cancel_rc, reset_rc;
2341         int rc = FAILED;
2342
2343         ENTER;
2344         fc_block_scsi_eh(cmd);
2345         ibmvfc_wait_while_resetting(vhost);
2346         cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
2347         reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
2348
2349         if (!cancel_rc && !reset_rc)
2350                 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2351
2352         LEAVE;
2353         return rc;
2354 }
2355
2356 /**
2357  * ibmvfc_dev_cancel_all_abts - Device iterated cancel all function
2358  * @sdev:       scsi device struct
2359  * @data:       return code
2360  *
2361  **/
2362 static void ibmvfc_dev_cancel_all_abts(struct scsi_device *sdev, void *data)
2363 {
2364         unsigned long *rc = data;
2365         *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
2366 }
2367
2368 /**
2369  * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function
2370  * @sdev:       scsi device struct
2371  * @data:       return code
2372  *
2373  **/
2374 static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
2375 {
2376         unsigned long *rc = data;
2377         *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET);
2378 }
2379
2380 /**
2381  * ibmvfc_dev_abort_all - Device iterated abort task set function
2382  * @sdev:       scsi device struct
2383  * @data:       return code
2384  *
2385  **/
2386 static void ibmvfc_dev_abort_all(struct scsi_device *sdev, void *data)
2387 {
2388         unsigned long *rc = data;
2389         *rc |= ibmvfc_abort_task_set(sdev);
2390 }
2391
2392 /**
2393  * ibmvfc_eh_target_reset_handler - Reset the target
2394  * @cmd:        scsi command struct
2395  *
2396  * Returns:
2397  *      SUCCESS / FAILED
2398  **/
2399 static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
2400 {
2401         struct scsi_device *sdev = cmd->device;
2402         struct ibmvfc_host *vhost = shost_priv(sdev->host);
2403         struct scsi_target *starget = scsi_target(sdev);
2404         int reset_rc;
2405         int rc = FAILED;
2406         unsigned long cancel_rc = 0;
2407
2408         ENTER;
2409         fc_block_scsi_eh(cmd);
2410         ibmvfc_wait_while_resetting(vhost);
2411         starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
2412         reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
2413
2414         if (!cancel_rc && !reset_rc)
2415                 rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
2416
2417         LEAVE;
2418         return rc;
2419 }
2420
2421 /**
2422  * ibmvfc_eh_host_reset_handler - Reset the connection to the server
2423  * @cmd:        struct scsi_cmnd having problems
2424  *
2425  **/
2426 static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
2427 {
2428         int rc;
2429         struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
2430
2431         fc_block_scsi_eh(cmd);
2432         dev_err(vhost->dev, "Resetting connection due to error recovery\n");
2433         rc = ibmvfc_issue_fc_host_lip(vhost->host);
2434         return rc ? FAILED : SUCCESS;
2435 }
2436
2437 /**
2438  * ibmvfc_terminate_rport_io - Terminate all pending I/O to the rport.
2439  * @rport:              rport struct
2440  *
2441  * Return value:
2442  *      none
2443  **/
2444 static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
2445 {
2446         struct scsi_target *starget = to_scsi_target(&rport->dev);
2447         struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2448         struct ibmvfc_host *vhost = shost_priv(shost);
2449         unsigned long cancel_rc = 0;
2450         unsigned long abort_rc = 0;
2451         int rc = FAILED;
2452
2453         ENTER;
2454         starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_abts);
2455         starget_for_each_device(starget, &abort_rc, ibmvfc_dev_abort_all);
2456
2457         if (!cancel_rc && !abort_rc)
2458                 rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
2459
2460         if (rc == FAILED)
2461                 ibmvfc_issue_fc_host_lip(shost);
2462         LEAVE;
2463 }
2464
2465 static const struct {
2466         enum ibmvfc_async_event ae;
2467         const char *desc;
2468 } ae_desc [] = {
2469         { IBMVFC_AE_ELS_PLOGI,          "PLOGI" },
2470         { IBMVFC_AE_ELS_LOGO,           "LOGO" },
2471         { IBMVFC_AE_ELS_PRLO,           "PRLO" },
2472         { IBMVFC_AE_SCN_NPORT,          "N-Port SCN" },
2473         { IBMVFC_AE_SCN_GROUP,          "Group SCN" },
2474         { IBMVFC_AE_SCN_DOMAIN,         "Domain SCN" },
2475         { IBMVFC_AE_SCN_FABRIC,         "Fabric SCN" },
2476         { IBMVFC_AE_LINK_UP,            "Link Up" },
2477         { IBMVFC_AE_LINK_DOWN,          "Link Down" },
2478         { IBMVFC_AE_LINK_DEAD,          "Link Dead" },
2479         { IBMVFC_AE_HALT,                       "Halt" },
2480         { IBMVFC_AE_RESUME,             "Resume" },
2481         { IBMVFC_AE_ADAPTER_FAILED,     "Adapter Failed" },
2482 };
2483
2484 static const char *unknown_ae = "Unknown async";
2485
2486 /**
2487  * ibmvfc_get_ae_desc - Get text description for async event
2488  * @ae: async event
2489  *
2490  **/
2491 static const char *ibmvfc_get_ae_desc(u64 ae)
2492 {
2493         int i;
2494
2495         for (i = 0; i < ARRAY_SIZE(ae_desc); i++)
2496                 if (ae_desc[i].ae == ae)
2497                         return ae_desc[i].desc;
2498
2499         return unknown_ae;
2500 }
2501
2502 /**
2503  * ibmvfc_handle_async - Handle an async event from the adapter
2504  * @crq:        crq to process
2505  * @vhost:      ibmvfc host struct
2506  *
2507  **/
2508 static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
2509                                 struct ibmvfc_host *vhost)
2510 {
2511         const char *desc = ibmvfc_get_ae_desc(crq->event);
2512         struct ibmvfc_target *tgt;
2513
2514         ibmvfc_log(vhost, 3, "%s event received. scsi_id: %llx, wwpn: %llx,"
2515                    " node_name: %llx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name);
2516
2517         switch (crq->event) {
2518         case IBMVFC_AE_RESUME:
2519                 switch (crq->link_state) {
2520                 case IBMVFC_AE_LS_LINK_DOWN:
2521                         ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2522                         break;
2523                 case IBMVFC_AE_LS_LINK_DEAD:
2524                         ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
2525                         break;
2526                 case IBMVFC_AE_LS_LINK_UP:
2527                 case IBMVFC_AE_LS_LINK_BOUNCED:
2528                 default:
2529                         vhost->events_to_log |= IBMVFC_AE_LINKUP;
2530                         vhost->delay_init = 1;
2531                         __ibmvfc_reset_host(vhost);
2532                         break;
2533                 };
2534
2535                 break;
2536         case IBMVFC_AE_LINK_UP:
2537                 vhost->events_to_log |= IBMVFC_AE_LINKUP;
2538                 vhost->delay_init = 1;
2539                 __ibmvfc_reset_host(vhost);
2540                 break;
2541         case IBMVFC_AE_SCN_FABRIC:
2542         case IBMVFC_AE_SCN_DOMAIN:
2543                 vhost->events_to_log |= IBMVFC_AE_RSCN;
2544                 vhost->delay_init = 1;
2545                 __ibmvfc_reset_host(vhost);
2546                 break;
2547         case IBMVFC_AE_SCN_NPORT:
2548         case IBMVFC_AE_SCN_GROUP:
2549                 vhost->events_to_log |= IBMVFC_AE_RSCN;
2550                 ibmvfc_reinit_host(vhost);
2551                 break;
2552         case IBMVFC_AE_ELS_LOGO:
2553         case IBMVFC_AE_ELS_PRLO:
2554         case IBMVFC_AE_ELS_PLOGI:
2555                 list_for_each_entry(tgt, &vhost->targets, queue) {
2556                         if (!crq->scsi_id && !crq->wwpn && !crq->node_name)
2557                                 break;
2558                         if (crq->scsi_id && tgt->scsi_id != crq->scsi_id)
2559                                 continue;
2560                         if (crq->wwpn && tgt->ids.port_name != crq->wwpn)
2561                                 continue;
2562                         if (crq->node_name && tgt->ids.node_name != crq->node_name)
2563                                 continue;
2564                         if (tgt->need_login && crq->event == IBMVFC_AE_ELS_LOGO)
2565                                 tgt->logo_rcvd = 1;
2566                         if (!tgt->need_login || crq->event == IBMVFC_AE_ELS_PLOGI) {
2567                                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2568                                 ibmvfc_reinit_host(vhost);
2569                         }
2570                 }
2571                 break;
2572         case IBMVFC_AE_LINK_DOWN:
2573         case IBMVFC_AE_ADAPTER_FAILED:
2574                 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2575                 break;
2576         case IBMVFC_AE_LINK_DEAD:
2577                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
2578                 break;
2579         case IBMVFC_AE_HALT:
2580                 ibmvfc_link_down(vhost, IBMVFC_HALTED);
2581                 break;
2582         default:
2583                 dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event);
2584                 break;
2585         };
2586 }
2587
2588 /**
2589  * ibmvfc_handle_crq - Handles and frees received events in the CRQ
2590  * @crq:        Command/Response queue
2591  * @vhost:      ibmvfc host struct
2592  *
2593  **/
2594 static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
2595 {
2596         long rc;
2597         struct ibmvfc_event *evt = (struct ibmvfc_event *)crq->ioba;
2598
2599         switch (crq->valid) {
2600         case IBMVFC_CRQ_INIT_RSP:
2601                 switch (crq->format) {
2602                 case IBMVFC_CRQ_INIT:
2603                         dev_info(vhost->dev, "Partner initialized\n");
2604                         /* Send back a response */
2605                         rc = ibmvfc_send_crq_init_complete(vhost);
2606                         if (rc == 0)
2607                                 ibmvfc_init_host(vhost);
2608                         else
2609                                 dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc);
2610                         break;
2611                 case IBMVFC_CRQ_INIT_COMPLETE:
2612                         dev_info(vhost->dev, "Partner initialization complete\n");
2613                         ibmvfc_init_host(vhost);
2614                         break;
2615                 default:
2616                         dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format);
2617                 }
2618                 return;
2619         case IBMVFC_CRQ_XPORT_EVENT:
2620                 vhost->state = IBMVFC_NO_CRQ;
2621                 vhost->logged_in = 0;
2622                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
2623                 if (crq->format == IBMVFC_PARTITION_MIGRATED) {
2624                         /* We need to re-setup the interpartition connection */
2625                         dev_info(vhost->dev, "Re-enabling adapter\n");
2626                         vhost->client_migrated = 1;
2627                         ibmvfc_purge_requests(vhost, DID_REQUEUE);
2628                         ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2629                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
2630                 } else {
2631                         dev_err(vhost->dev, "Virtual adapter failed (rc=%d)\n", crq->format);
2632                         ibmvfc_purge_requests(vhost, DID_ERROR);
2633                         ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2634                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
2635                 }
2636                 return;
2637         case IBMVFC_CRQ_CMD_RSP:
2638                 break;
2639         default:
2640                 dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid);
2641                 return;
2642         }
2643
2644         if (crq->format == IBMVFC_ASYNC_EVENT)
2645                 return;
2646
2647         /* The only kind of payload CRQs we should get are responses to
2648          * things we send. Make sure this response is to something we
2649          * actually sent
2650          */
2651         if (unlikely(!ibmvfc_valid_event(&vhost->pool, evt))) {
2652                 dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
2653                         crq->ioba);
2654                 return;
2655         }
2656
2657         if (unlikely(atomic_read(&evt->free))) {
2658                 dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
2659                         crq->ioba);
2660                 return;
2661         }
2662
2663         del_timer(&evt->timer);
2664         list_del(&evt->queue);
2665         ibmvfc_trc_end(evt);
2666         evt->done(evt);
2667 }
2668
2669 /**
2670  * ibmvfc_scan_finished - Check if the device scan is done.
2671  * @shost:      scsi host struct
2672  * @time:       current elapsed time
2673  *
2674  * Returns:
2675  *      0 if scan is not done / 1 if scan is done
2676  **/
2677 static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2678 {
2679         unsigned long flags;
2680         struct ibmvfc_host *vhost = shost_priv(shost);
2681         int done = 0;
2682
2683         spin_lock_irqsave(shost->host_lock, flags);
2684         if (time >= (init_timeout * HZ)) {
2685                 dev_info(vhost->dev, "Scan taking longer than %d seconds, "
2686                          "continuing initialization\n", init_timeout);
2687                 done = 1;
2688         }
2689
2690         if (vhost->scan_complete)
2691                 done = 1;
2692         spin_unlock_irqrestore(shost->host_lock, flags);
2693         return done;
2694 }
2695
2696 /**
2697  * ibmvfc_slave_alloc - Setup the device's task set value
2698  * @sdev:       struct scsi_device device to configure
2699  *
2700  * Set the device's task set value so that error handling works as
2701  * expected.
2702  *
2703  * Returns:
2704  *      0 on success / -ENXIO if device does not exist
2705  **/
2706 static int ibmvfc_slave_alloc(struct scsi_device *sdev)
2707 {
2708         struct Scsi_Host *shost = sdev->host;
2709         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2710         struct ibmvfc_host *vhost = shost_priv(shost);
2711         unsigned long flags = 0;
2712
2713         if (!rport || fc_remote_port_chkready(rport))
2714                 return -ENXIO;
2715
2716         spin_lock_irqsave(shost->host_lock, flags);
2717         sdev->hostdata = (void *)(unsigned long)vhost->task_set++;
2718         spin_unlock_irqrestore(shost->host_lock, flags);
2719         return 0;
2720 }
2721
2722 /**
2723  * ibmvfc_target_alloc - Setup the target's task set value
2724  * @starget:    struct scsi_target
2725  *
2726  * Set the target's task set value so that error handling works as
2727  * expected.
2728  *
2729  * Returns:
2730  *      0 on success / -ENXIO if device does not exist
2731  **/
2732 static int ibmvfc_target_alloc(struct scsi_target *starget)
2733 {
2734         struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2735         struct ibmvfc_host *vhost = shost_priv(shost);
2736         unsigned long flags = 0;
2737
2738         spin_lock_irqsave(shost->host_lock, flags);
2739         starget->hostdata = (void *)(unsigned long)vhost->task_set++;
2740         spin_unlock_irqrestore(shost->host_lock, flags);
2741         return 0;
2742 }
2743
2744 /**
2745  * ibmvfc_slave_configure - Configure the device
2746  * @sdev:       struct scsi_device device to configure
2747  *
2748  * Enable allow_restart for a device if it is a disk. Adjust the
2749  * queue_depth here also.
2750  *
2751  * Returns:
2752  *      0
2753  **/
2754 static int ibmvfc_slave_configure(struct scsi_device *sdev)
2755 {
2756         struct Scsi_Host *shost = sdev->host;
2757         struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
2758         unsigned long flags = 0;
2759
2760         spin_lock_irqsave(shost->host_lock, flags);
2761         if (sdev->type == TYPE_DISK)
2762                 sdev->allow_restart = 1;
2763
2764         if (sdev->tagged_supported) {
2765                 scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
2766                 scsi_activate_tcq(sdev, sdev->queue_depth);
2767         } else
2768                 scsi_deactivate_tcq(sdev, sdev->queue_depth);
2769
2770         rport->dev_loss_tmo = dev_loss_tmo;
2771         spin_unlock_irqrestore(shost->host_lock, flags);
2772         return 0;
2773 }
2774
2775 /**
2776  * ibmvfc_change_queue_depth - Change the device's queue depth
2777  * @sdev:       scsi device struct
2778  * @qdepth:     depth to set
2779  * @reason:     calling context
2780  *
2781  * Return value:
2782  *      actual depth set
2783  **/
2784 static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth,
2785                                      int reason)
2786 {
2787         if (reason != SCSI_QDEPTH_DEFAULT)
2788                 return -EOPNOTSUPP;
2789
2790         if (qdepth > IBMVFC_MAX_CMDS_PER_LUN)
2791                 qdepth = IBMVFC_MAX_CMDS_PER_LUN;
2792
2793         scsi_adjust_queue_depth(sdev, 0, qdepth);
2794         return sdev->queue_depth;
2795 }
2796
2797 /**
2798  * ibmvfc_change_queue_type - Change the device's queue type
2799  * @sdev:               scsi device struct
2800  * @tag_type:   type of tags to use
2801  *
2802  * Return value:
2803  *      actual queue type set
2804  **/
2805 static int ibmvfc_change_queue_type(struct scsi_device *sdev, int tag_type)
2806 {
2807         if (sdev->tagged_supported) {
2808                 scsi_set_tag_type(sdev, tag_type);
2809
2810                 if (tag_type)
2811                         scsi_activate_tcq(sdev, sdev->queue_depth);
2812                 else
2813                         scsi_deactivate_tcq(sdev, sdev->queue_depth);
2814         } else
2815                 tag_type = 0;
2816
2817         return tag_type;
2818 }
2819
2820 static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
2821                                                  struct device_attribute *attr, char *buf)
2822 {
2823         struct Scsi_Host *shost = class_to_shost(dev);
2824         struct ibmvfc_host *vhost = shost_priv(shost);
2825
2826         return snprintf(buf, PAGE_SIZE, "%s\n",
2827                         vhost->login_buf->resp.partition_name);
2828 }
2829
2830 static ssize_t ibmvfc_show_host_device_name(struct device *dev,
2831                                             struct device_attribute *attr, char *buf)
2832 {
2833         struct Scsi_Host *shost = class_to_shost(dev);
2834         struct ibmvfc_host *vhost = shost_priv(shost);
2835
2836         return snprintf(buf, PAGE_SIZE, "%s\n",
2837                         vhost->login_buf->resp.device_name);
2838 }
2839
2840 static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
2841                                          struct device_attribute *attr, char *buf)
2842 {
2843         struct Scsi_Host *shost = class_to_shost(dev);
2844         struct ibmvfc_host *vhost = shost_priv(shost);
2845
2846         return snprintf(buf, PAGE_SIZE, "%s\n",
2847                         vhost->login_buf->resp.port_loc_code);
2848 }
2849
2850 static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
2851                                          struct device_attribute *attr, char *buf)
2852 {
2853         struct Scsi_Host *shost = class_to_shost(dev);
2854         struct ibmvfc_host *vhost = shost_priv(shost);
2855
2856         return snprintf(buf, PAGE_SIZE, "%s\n",
2857                         vhost->login_buf->resp.drc_name);
2858 }
2859
2860 static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
2861                                              struct device_attribute *attr, char *buf)
2862 {
2863         struct Scsi_Host *shost = class_to_shost(dev);
2864         struct ibmvfc_host *vhost = shost_priv(shost);
2865         return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version);
2866 }
2867
2868 static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
2869                                              struct device_attribute *attr, char *buf)
2870 {
2871         struct Scsi_Host *shost = class_to_shost(dev);
2872         struct ibmvfc_host *vhost = shost_priv(shost);
2873         return snprintf(buf, PAGE_SIZE, "%llx\n", vhost->login_buf->resp.capabilities);
2874 }
2875
2876 /**
2877  * ibmvfc_show_log_level - Show the adapter's error logging level
2878  * @dev:        class device struct
2879  * @buf:        buffer
2880  *
2881  * Return value:
2882  *      number of bytes printed to buffer
2883  **/
2884 static ssize_t ibmvfc_show_log_level(struct device *dev,
2885                                      struct device_attribute *attr, char *buf)
2886 {
2887         struct Scsi_Host *shost = class_to_shost(dev);
2888         struct ibmvfc_host *vhost = shost_priv(shost);
2889         unsigned long flags = 0;
2890         int len;
2891
2892         spin_lock_irqsave(shost->host_lock, flags);
2893         len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->log_level);
2894         spin_unlock_irqrestore(shost->host_lock, flags);
2895         return len;
2896 }
2897
2898 /**
2899  * ibmvfc_store_log_level - Change the adapter's error logging level
2900  * @dev:        class device struct
2901  * @buf:        buffer
2902  *
2903  * Return value:
2904  *      number of bytes printed to buffer
2905  **/
2906 static ssize_t ibmvfc_store_log_level(struct device *dev,
2907                                       struct device_attribute *attr,
2908                                       const char *buf, size_t count)
2909 {
2910         struct Scsi_Host *shost = class_to_shost(dev);
2911         struct ibmvfc_host *vhost = shost_priv(shost);
2912         unsigned long flags = 0;
2913
2914         spin_lock_irqsave(shost->host_lock, flags);
2915         vhost->log_level = simple_strtoul(buf, NULL, 10);
2916         spin_unlock_irqrestore(shost->host_lock, flags);
2917         return strlen(buf);
2918 }
2919
2920 static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL);
2921 static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL);
2922 static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL);
2923 static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL);
2924 static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL);
2925 static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL);
2926 static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
2927                    ibmvfc_show_log_level, ibmvfc_store_log_level);
2928
2929 #ifdef CONFIG_SCSI_IBMVFC_TRACE
2930 /**
2931  * ibmvfc_read_trace - Dump the adapter trace
2932  * @filp:               open sysfs file
2933  * @kobj:               kobject struct
2934  * @bin_attr:   bin_attribute struct
2935  * @buf:                buffer
2936  * @off:                offset
2937  * @count:              buffer size
2938  *
2939  * Return value:
2940  *      number of bytes printed to buffer
2941  **/
2942 static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
2943                                  struct bin_attribute *bin_attr,
2944                                  char *buf, loff_t off, size_t count)
2945 {
2946         struct device *dev = container_of(kobj, struct device, kobj);
2947         struct Scsi_Host *shost = class_to_shost(dev);
2948         struct ibmvfc_host *vhost = shost_priv(shost);
2949         unsigned long flags = 0;
2950         int size = IBMVFC_TRACE_SIZE;
2951         char *src = (char *)vhost->trace;
2952
2953         if (off > size)
2954                 return 0;
2955         if (off + count > size) {
2956                 size -= off;
2957                 count = size;
2958         }
2959
2960         spin_lock_irqsave(shost->host_lock, flags);
2961         memcpy(buf, &src[off], count);
2962         spin_unlock_irqrestore(shost->host_lock, flags);
2963         return count;
2964 }
2965
2966 static struct bin_attribute ibmvfc_trace_attr = {
2967         .attr = {
2968                 .name = "trace",
2969                 .mode = S_IRUGO,
2970         },
2971         .size = 0,
2972         .read = ibmvfc_read_trace,
2973 };
2974 #endif
2975
2976 static struct device_attribute *ibmvfc_attrs[] = {
2977         &dev_attr_partition_name,
2978         &dev_attr_device_name,
2979         &dev_attr_port_loc_code,
2980         &dev_attr_drc_name,
2981         &dev_attr_npiv_version,
2982         &dev_attr_capabilities,
2983         &dev_attr_log_level,
2984         NULL
2985 };
2986
2987 static struct scsi_host_template driver_template = {
2988         .module = THIS_MODULE,
2989         .name = "IBM POWER Virtual FC Adapter",
2990         .proc_name = IBMVFC_NAME,
2991         .queuecommand = ibmvfc_queuecommand,
2992         .eh_abort_handler = ibmvfc_eh_abort_handler,
2993         .eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
2994         .eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
2995         .eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
2996         .slave_alloc = ibmvfc_slave_alloc,
2997         .slave_configure = ibmvfc_slave_configure,
2998         .target_alloc = ibmvfc_target_alloc,
2999         .scan_finished = ibmvfc_scan_finished,
3000         .change_queue_depth = ibmvfc_change_queue_depth,
3001         .change_queue_type = ibmvfc_change_queue_type,
3002         .cmd_per_lun = 16,
3003         .can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
3004         .this_id = -1,
3005         .sg_tablesize = SG_ALL,
3006         .max_sectors = IBMVFC_MAX_SECTORS,
3007         .use_clustering = ENABLE_CLUSTERING,
3008         .shost_attrs = ibmvfc_attrs,
3009 };
3010
3011 /**
3012  * ibmvfc_next_async_crq - Returns the next entry in async queue
3013  * @vhost:      ibmvfc host struct
3014  *
3015  * Returns:
3016  *      Pointer to next entry in queue / NULL if empty
3017  **/
3018 static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
3019 {
3020         struct ibmvfc_async_crq_queue *async_crq = &vhost->async_crq;
3021         struct ibmvfc_async_crq *crq;
3022
3023         crq = &async_crq->msgs[async_crq->cur];
3024         if (crq->valid & 0x80) {
3025                 if (++async_crq->cur == async_crq->size)
3026                         async_crq->cur = 0;
3027                 rmb();
3028         } else
3029                 crq = NULL;
3030
3031         return crq;
3032 }
3033
3034 /**
3035  * ibmvfc_next_crq - Returns the next entry in message queue
3036  * @vhost:      ibmvfc host struct
3037  *
3038  * Returns:
3039  *      Pointer to next entry in queue / NULL if empty
3040  **/
3041 static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
3042 {
3043         struct ibmvfc_crq_queue *queue = &vhost->crq;
3044         struct ibmvfc_crq *crq;
3045
3046         crq = &queue->msgs[queue->cur];
3047         if (crq->valid & 0x80) {
3048                 if (++queue->cur == queue->size)
3049                         queue->cur = 0;
3050                 rmb();
3051         } else
3052                 crq = NULL;
3053
3054         return crq;
3055 }
3056
3057 /**
3058  * ibmvfc_interrupt - Interrupt handler
3059  * @irq:                number of irq to handle, not used
3060  * @dev_instance: ibmvfc_host that received interrupt
3061  *
3062  * Returns:
3063  *      IRQ_HANDLED
3064  **/
3065 static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
3066 {
3067         struct ibmvfc_host *vhost = (struct ibmvfc_host *)dev_instance;
3068         unsigned long flags;
3069
3070         spin_lock_irqsave(vhost->host->host_lock, flags);
3071         vio_disable_interrupts(to_vio_dev(vhost->dev));
3072         tasklet_schedule(&vhost->tasklet);
3073         spin_unlock_irqrestore(vhost->host->host_lock, flags);
3074         return IRQ_HANDLED;
3075 }
3076
3077 /**
3078  * ibmvfc_tasklet - Interrupt handler tasklet
3079  * @data:               ibmvfc host struct
3080  *
3081  * Returns:
3082  *      Nothing
3083  **/
3084 static void ibmvfc_tasklet(void *data)
3085 {
3086         struct ibmvfc_host *vhost = data;
3087         struct vio_dev *vdev = to_vio_dev(vhost->dev);
3088         struct ibmvfc_crq *crq;
3089         struct ibmvfc_async_crq *async;
3090         unsigned long flags;
3091         int done = 0;
3092
3093         spin_lock_irqsave(vhost->host->host_lock, flags);
3094         while (!done) {
3095                 /* Pull all the valid messages off the async CRQ */
3096                 while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
3097                         ibmvfc_handle_async(async, vhost);
3098                         async->valid = 0;
3099                         wmb();
3100                 }
3101
3102                 /* Pull all the valid messages off the CRQ */
3103                 while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3104                         ibmvfc_handle_crq(crq, vhost);
3105                         crq->valid = 0;
3106                         wmb();
3107                 }
3108
3109                 vio_enable_interrupts(vdev);
3110                 if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
3111                         vio_disable_interrupts(vdev);
3112                         ibmvfc_handle_async(async, vhost);
3113                         async->valid = 0;
3114                         wmb();
3115                 } else if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3116                         vio_disable_interrupts(vdev);
3117                         ibmvfc_handle_crq(crq, vhost);
3118                         crq->valid = 0;
3119                         wmb();
3120                 } else
3121                         done = 1;
3122         }
3123
3124         spin_unlock_irqrestore(vhost->host->host_lock, flags);
3125 }
3126
3127 /**
3128  * ibmvfc_init_tgt - Set the next init job step for the target
3129  * @tgt:                ibmvfc target struct
3130  * @job_step:   job step to perform
3131  *
3132  **/
3133 static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
3134                             void (*job_step) (struct ibmvfc_target *))
3135 {
3136         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT);
3137         tgt->job_step = job_step;
3138         wake_up(&tgt->vhost->work_wait_q);
3139 }
3140
3141 /**
3142  * ibmvfc_retry_tgt_init - Attempt to retry a step in target initialization
3143  * @tgt:                ibmvfc target struct
3144  * @job_step:   initialization job step
3145  *
3146  * Returns: 1 if step will be retried / 0 if not
3147  *
3148  **/
3149 static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
3150                                   void (*job_step) (struct ibmvfc_target *))
3151 {
3152         if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
3153                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3154                 wake_up(&tgt->vhost->work_wait_q);
3155                 return 0;
3156         } else
3157                 ibmvfc_init_tgt(tgt, job_step);
3158         return 1;
3159 }
3160
3161 /* Defined in FC-LS */
3162 static const struct {
3163         int code;
3164         int retry;
3165         int logged_in;
3166 } prli_rsp [] = {
3167         { 0, 1, 0 },
3168         { 1, 0, 1 },
3169         { 2, 1, 0 },
3170         { 3, 1, 0 },
3171         { 4, 0, 0 },
3172         { 5, 0, 0 },
3173         { 6, 0, 1 },
3174         { 7, 0, 0 },
3175         { 8, 1, 0 },
3176 };
3177
3178 /**
3179  * ibmvfc_get_prli_rsp - Find PRLI response index
3180  * @flags:      PRLI response flags
3181  *
3182  **/
3183 static int ibmvfc_get_prli_rsp(u16 flags)
3184 {
3185         int i;
3186         int code = (flags & 0x0f00) >> 8;
3187
3188         for (i = 0; i < ARRAY_SIZE(prli_rsp); i++)
3189                 if (prli_rsp[i].code == code)
3190                         return i;
3191
3192         return 0;
3193 }
3194
3195 /**
3196  * ibmvfc_tgt_prli_done - Completion handler for Process Login
3197  * @evt:        ibmvfc event struct
3198  *
3199  **/
3200 static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
3201 {
3202         struct ibmvfc_target *tgt = evt->tgt;
3203         struct ibmvfc_host *vhost = evt->vhost;
3204         struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
3205         struct ibmvfc_prli_svc_parms *parms = &rsp->parms;
3206         u32 status = rsp->common.status;
3207         int index, level = IBMVFC_DEFAULT_LOG_LEVEL;
3208
3209         vhost->discovery_threads--;
3210         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3211         switch (status) {
3212         case IBMVFC_MAD_SUCCESS:
3213                 tgt_dbg(tgt, "Process Login succeeded: %X %02X %04X\n",
3214                         parms->type, parms->flags, parms->service_parms);
3215
3216                 if (parms->type == IBMVFC_SCSI_FCP_TYPE) {
3217                         index = ibmvfc_get_prli_rsp(parms->flags);
3218                         if (prli_rsp[index].logged_in) {
3219                                 if (parms->flags & IBMVFC_PRLI_EST_IMG_PAIR) {
3220                                         tgt->need_login = 0;
3221                                         tgt->ids.roles = 0;
3222                                         if (parms->service_parms & IBMVFC_PRLI_TARGET_FUNC)
3223                                                 tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET;
3224                                         if (parms->service_parms & IBMVFC_PRLI_INITIATOR_FUNC)
3225                                                 tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
3226                                         tgt->add_rport = 1;
3227                                 } else
3228                                         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3229                         } else if (prli_rsp[index].retry)
3230                                 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3231                         else
3232                                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3233                 } else
3234                         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3235                 break;
3236         case IBMVFC_MAD_DRIVER_FAILED:
3237                 break;
3238         case IBMVFC_MAD_CRQ_ERROR:
3239                 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3240                 break;
3241         case IBMVFC_MAD_FAILED:
3242         default:
3243                 if ((rsp->status & IBMVFC_VIOS_FAILURE) && rsp->error == IBMVFC_PLOGI_REQUIRED)
3244                         level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3245                 else if (tgt->logo_rcvd)
3246                         level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3247                 else if (ibmvfc_retry_cmd(rsp->status, rsp->error))
3248                         level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3249                 else
3250                         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3251
3252                 tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
3253                         ibmvfc_get_cmd_error(rsp->status, rsp->error),
3254                         rsp->status, rsp->error, status);
3255                 break;
3256         };
3257
3258         kref_put(&tgt->kref, ibmvfc_release_tgt);
3259         ibmvfc_free_event(evt);
3260         wake_up(&vhost->work_wait_q);
3261 }
3262
3263 /**
3264  * ibmvfc_tgt_send_prli - Send a process login
3265  * @tgt:        ibmvfc target struct
3266  *
3267  **/
3268 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
3269 {
3270         struct ibmvfc_process_login *prli;
3271         struct ibmvfc_host *vhost = tgt->vhost;
3272         struct ibmvfc_event *evt;
3273
3274         if (vhost->discovery_threads >= disc_threads)
3275                 return;
3276
3277         kref_get(&tgt->kref);
3278         evt = ibmvfc_get_event(vhost);
3279         vhost->discovery_threads++;
3280         ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
3281         evt->tgt = tgt;
3282         prli = &evt->iu.prli;
3283         memset(prli, 0, sizeof(*prli));
3284         prli->common.version = 1;
3285         prli->common.opcode = IBMVFC_PROCESS_LOGIN;
3286         prli->common.length = sizeof(*prli);
3287         prli->scsi_id = tgt->scsi_id;
3288
3289         prli->parms.type = IBMVFC_SCSI_FCP_TYPE;
3290         prli->parms.flags = IBMVFC_PRLI_EST_IMG_PAIR;
3291         prli->parms.service_parms = IBMVFC_PRLI_INITIATOR_FUNC;
3292
3293         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
3294         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3295                 vhost->discovery_threads--;
3296                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3297                 kref_put(&tgt->kref, ibmvfc_release_tgt);
3298         } else
3299                 tgt_dbg(tgt, "Sent process login\n");
3300 }
3301
3302 /**
3303  * ibmvfc_tgt_plogi_done - Completion handler for Port Login
3304  * @evt:        ibmvfc event struct
3305  *
3306  **/
3307 static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
3308 {
3309         struct ibmvfc_target *tgt = evt->tgt;
3310         struct ibmvfc_host *vhost = evt->vhost;
3311         struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
3312         u32 status = rsp->common.status;
3313         int level = IBMVFC_DEFAULT_LOG_LEVEL;
3314
3315         vhost->discovery_threads--;
3316         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3317         switch (status) {
3318         case IBMVFC_MAD_SUCCESS:
3319                 tgt_dbg(tgt, "Port Login succeeded\n");
3320                 if (tgt->ids.port_name &&
3321                     tgt->ids.port_name != wwn_to_u64(rsp->service_parms.port_name)) {
3322                         vhost->reinit = 1;
3323                         tgt_dbg(tgt, "Port re-init required\n");
3324                         break;
3325                 }
3326                 tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
3327                 tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
3328                 tgt->ids.port_id = tgt->scsi_id;
3329                 memcpy(&tgt->service_parms, &rsp->service_parms,
3330                        sizeof(tgt->service_parms));
3331                 memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
3332                        sizeof(tgt->service_parms_change));
3333                 ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
3334                 break;
3335         case IBMVFC_MAD_DRIVER_FAILED:
3336                 break;
3337         case IBMVFC_MAD_CRQ_ERROR:
3338                 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3339                 break;
3340         case IBMVFC_MAD_FAILED:
3341         default:
3342                 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
3343                         level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3344                 else
3345                         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3346
3347                 tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3348                         ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
3349                         ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
3350                         ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status);
3351                 break;
3352         };
3353
3354         kref_put(&tgt->kref, ibmvfc_release_tgt);
3355         ibmvfc_free_event(evt);
3356         wake_up(&vhost->work_wait_q);
3357 }
3358
3359 /**
3360  * ibmvfc_tgt_send_plogi - Send PLOGI to the specified target
3361  * @tgt:        ibmvfc target struct
3362  *
3363  **/
3364 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
3365 {
3366         struct ibmvfc_port_login *plogi;
3367         struct ibmvfc_host *vhost = tgt->vhost;
3368         struct ibmvfc_event *evt;
3369
3370         if (vhost->discovery_threads >= disc_threads)
3371                 return;
3372
3373         kref_get(&tgt->kref);
3374         tgt->logo_rcvd = 0;
3375         evt = ibmvfc_get_event(vhost);
3376         vhost->discovery_threads++;
3377         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
3378         ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
3379         evt->tgt = tgt;
3380         plogi = &evt->iu.plogi;
3381         memset(plogi, 0, sizeof(*plogi));
3382         plogi->common.version = 1;
3383         plogi->common.opcode = IBMVFC_PORT_LOGIN;
3384         plogi->common.length = sizeof(*plogi);
3385         plogi->scsi_id = tgt->scsi_id;
3386
3387         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3388                 vhost->discovery_threads--;
3389                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3390                 kref_put(&tgt->kref, ibmvfc_release_tgt);
3391         } else
3392                 tgt_dbg(tgt, "Sent port login\n");
3393 }
3394
3395 /**
3396  * ibmvfc_tgt_implicit_logout_done - Completion handler for Implicit Logout MAD
3397  * @evt:        ibmvfc event struct
3398  *
3399  **/
3400 static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
3401 {
3402         struct ibmvfc_target *tgt = evt->tgt;
3403         struct ibmvfc_host *vhost = evt->vhost;
3404         struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout;
3405         u32 status = rsp->common.status;
3406
3407         vhost->discovery_threads--;
3408         ibmvfc_free_event(evt);
3409         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3410
3411         switch (status) {
3412         case IBMVFC_MAD_SUCCESS:
3413                 tgt_dbg(tgt, "Implicit Logout succeeded\n");
3414                 break;
3415         case IBMVFC_MAD_DRIVER_FAILED:
3416                 kref_put(&tgt->kref, ibmvfc_release_tgt);
3417                 wake_up(&vhost->work_wait_q);
3418                 return;
3419         case IBMVFC_MAD_FAILED:
3420         default:
3421                 tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
3422                 break;
3423         };
3424
3425         if (vhost->action == IBMVFC_HOST_ACTION_TGT_INIT)
3426                 ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
3427         else if (vhost->action == IBMVFC_HOST_ACTION_QUERY_TGTS &&
3428                  tgt->scsi_id != tgt->new_scsi_id)
3429                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3430         kref_put(&tgt->kref, ibmvfc_release_tgt);
3431         wake_up(&vhost->work_wait_q);
3432 }
3433
3434 /**
3435  * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
3436  * @tgt:                ibmvfc target struct
3437  *
3438  **/
3439 static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
3440 {
3441         struct ibmvfc_implicit_logout *mad;
3442         struct ibmvfc_host *vhost = tgt->vhost;
3443         struct ibmvfc_event *evt;
3444
3445         if (vhost->discovery_threads >= disc_threads)
3446                 return;
3447
3448         kref_get(&tgt->kref);
3449         evt = ibmvfc_get_event(vhost);
3450         vhost->discovery_threads++;
3451         ibmvfc_init_event(evt, ibmvfc_tgt_implicit_logout_done, IBMVFC_MAD_FORMAT);
3452         evt->tgt = tgt;
3453         mad = &evt->iu.implicit_logout;
3454         memset(mad, 0, sizeof(*mad));
3455         mad->common.version = 1;
3456         mad->common.opcode = IBMVFC_IMPLICIT_LOGOUT;
3457         mad->common.length = sizeof(*mad);
3458         mad->old_scsi_id = tgt->scsi_id;
3459
3460         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
3461         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3462                 vhost->discovery_threads--;
3463                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3464                 kref_put(&tgt->kref, ibmvfc_release_tgt);
3465         } else
3466                 tgt_dbg(tgt, "Sent Implicit Logout\n");
3467 }
3468
3469 /**
3470  * ibmvfc_adisc_needs_plogi - Does device need PLOGI?
3471  * @mad:        ibmvfc passthru mad struct
3472  * @tgt:        ibmvfc target struct
3473  *
3474  * Returns:
3475  *      1 if PLOGI needed / 0 if PLOGI not needed
3476  **/
3477 static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad,
3478                                     struct ibmvfc_target *tgt)
3479 {
3480         if (memcmp(&mad->fc_iu.response[2], &tgt->ids.port_name,
3481                    sizeof(tgt->ids.port_name)))
3482                 return 1;
3483         if (memcmp(&mad->fc_iu.response[4], &tgt->ids.node_name,
3484                    sizeof(tgt->ids.node_name)))
3485                 return 1;
3486         if (mad->fc_iu.response[6] != tgt->scsi_id)
3487                 return 1;
3488         return 0;
3489 }
3490
3491 /**
3492  * ibmvfc_tgt_adisc_done - Completion handler for ADISC
3493  * @evt:        ibmvfc event struct
3494  *
3495  **/
3496 static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
3497 {
3498         struct ibmvfc_target *tgt = evt->tgt;
3499         struct ibmvfc_host *vhost = evt->vhost;
3500         struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
3501         u32 status = mad->common.status;
3502         u8 fc_reason, fc_explain;
3503
3504         vhost->discovery_threads--;
3505         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3506         del_timer(&tgt->timer);
3507
3508         switch (status) {
3509         case IBMVFC_MAD_SUCCESS:
3510                 tgt_dbg(tgt, "ADISC succeeded\n");
3511                 if (ibmvfc_adisc_needs_plogi(mad, tgt))
3512                         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3513                 break;
3514         case IBMVFC_MAD_DRIVER_FAILED:
3515                 break;
3516         case IBMVFC_MAD_FAILED:
3517         default:
3518                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3519                 fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16;
3520                 fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8;
3521                 tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3522                          ibmvfc_get_cmd_error(mad->iu.status, mad->iu.error),
3523                          mad->iu.status, mad->iu.error,
3524                          ibmvfc_get_fc_type(fc_reason), fc_reason,
3525                          ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
3526                 break;
3527         };
3528
3529         kref_put(&tgt->kref, ibmvfc_release_tgt);
3530         ibmvfc_free_event(evt);
3531         wake_up(&vhost->work_wait_q);
3532 }
3533
3534 /**
3535  * ibmvfc_init_passthru - Initialize an event struct for FC passthru
3536  * @evt:                ibmvfc event struct
3537  *
3538  **/
3539 static void ibmvfc_init_passthru(struct ibmvfc_event *evt)
3540 {
3541         struct ibmvfc_passthru_mad *mad = &evt->iu.passthru;
3542
3543         memset(mad, 0, sizeof(*mad));
3544         mad->common.version = 1;
3545         mad->common.opcode = IBMVFC_PASSTHRU;
3546         mad->common.length = sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu);
3547         mad->cmd_ioba.va = (u64)evt->crq.ioba +
3548                 offsetof(struct ibmvfc_passthru_mad, iu);
3549         mad->cmd_ioba.len = sizeof(mad->iu);
3550         mad->iu.cmd_len = sizeof(mad->fc_iu.payload);
3551         mad->iu.rsp_len = sizeof(mad->fc_iu.response);
3552         mad->iu.cmd.va = (u64)evt->crq.ioba +
3553                 offsetof(struct ibmvfc_passthru_mad, fc_iu) +
3554                 offsetof(struct ibmvfc_passthru_fc_iu, payload);
3555         mad->iu.cmd.len = sizeof(mad->fc_iu.payload);
3556         mad->iu.rsp.va = (u64)evt->crq.ioba +
3557                 offsetof(struct ibmvfc_passthru_mad, fc_iu) +
3558                 offsetof(struct ibmvfc_passthru_fc_iu, response);
3559         mad->iu.rsp.len = sizeof(mad->fc_iu.response);
3560 }
3561
3562 /**
3563  * ibmvfc_tgt_adisc_cancel_done - Completion handler when cancelling an ADISC
3564  * @evt:                ibmvfc event struct
3565  *
3566  * Just cleanup this event struct. Everything else is handled by
3567  * the ADISC completion handler. If the ADISC never actually comes
3568  * back, we still have the timer running on the ADISC event struct
3569  * which will fire and cause the CRQ to get reset.
3570  *
3571  **/
3572 static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt)
3573 {
3574         struct ibmvfc_host *vhost = evt->vhost;
3575         struct ibmvfc_target *tgt = evt->tgt;
3576
3577         tgt_dbg(tgt, "ADISC cancel complete\n");
3578         vhost->abort_threads--;
3579         ibmvfc_free_event(evt);
3580         kref_put(&tgt->kref, ibmvfc_release_tgt);
3581         wake_up(&vhost->work_wait_q);
3582 }
3583
3584 /**
3585  * ibmvfc_adisc_timeout - Handle an ADISC timeout
3586  * @tgt:                ibmvfc target struct
3587  *
3588  * If an ADISC times out, send a cancel. If the cancel times
3589  * out, reset the CRQ. When the ADISC comes back as cancelled,
3590  * log back into the target.
3591  **/
3592 static void ibmvfc_adisc_timeout(struct ibmvfc_target *tgt)
3593 {
3594         struct ibmvfc_host *vhost = tgt->vhost;
3595         struct ibmvfc_event *evt;
3596         struct ibmvfc_tmf *tmf;
3597         unsigned long flags;
3598         int rc;
3599
3600         tgt_dbg(tgt, "ADISC timeout\n");
3601         spin_lock_irqsave(vhost->host->host_lock, flags);
3602         if (vhost->abort_threads >= disc_threads ||
3603             tgt->action != IBMVFC_TGT_ACTION_INIT_WAIT ||
3604             vhost->state != IBMVFC_INITIALIZING ||
3605             vhost->action != IBMVFC_HOST_ACTION_QUERY_TGTS) {
3606                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3607                 return;
3608         }
3609
3610         vhost->abort_threads++;
3611         kref_get(&tgt->kref);
3612         evt = ibmvfc_get_event(vhost);
3613         ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
3614
3615         evt->tgt = tgt;
3616         tmf = &evt->iu.tmf;
3617         memset(tmf, 0, sizeof(*tmf));
3618         tmf->common.version = 1;
3619         tmf->common.opcode = IBMVFC_TMF_MAD;
3620         tmf->common.length = sizeof(*tmf);
3621         tmf->scsi_id = tgt->scsi_id;
3622         tmf->cancel_key = tgt->cancel_key;
3623
3624         rc = ibmvfc_send_event(evt, vhost, default_timeout);
3625
3626         if (rc) {
3627                 tgt_err(tgt, "Failed to send cancel event for ADISC. rc=%d\n", rc);
3628                 vhost->abort_threads--;
3629                 kref_put(&tgt->kref, ibmvfc_release_tgt);
3630                 __ibmvfc_reset_host(vhost);
3631         } else
3632                 tgt_dbg(tgt, "Attempting to cancel ADISC\n");
3633         spin_unlock_irqrestore(vhost->host->host_lock, flags);
3634 }
3635
3636 /**
3637  * ibmvfc_tgt_adisc - Initiate an ADISC for specified target
3638  * @tgt:                ibmvfc target struct
3639  *
3640  * When sending an ADISC we end up with two timers running. The
3641  * first timer is the timer in the ibmvfc target struct. If this
3642  * fires, we send a cancel to the target. The second timer is the
3643  * timer on the ibmvfc event for the ADISC, which is longer. If that
3644  * fires, it means the ADISC timed out and our attempt to cancel it
3645  * also failed, so we need to reset the CRQ.
3646  **/
3647 static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
3648 {
3649         struct ibmvfc_passthru_mad *mad;
3650         struct ibmvfc_host *vhost = tgt->vhost;
3651         struct ibmvfc_event *evt;
3652
3653         if (vhost->discovery_threads >= disc_threads)
3654                 return;
3655
3656         kref_get(&tgt->kref);
3657         evt = ibmvfc_get_event(vhost);
3658         vhost->discovery_threads++;
3659         ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
3660         evt->tgt = tgt;
3661
3662         ibmvfc_init_passthru(evt);
3663         mad = &evt->iu.passthru;
3664         mad->iu.flags = IBMVFC_FC_ELS;
3665         mad->iu.scsi_id = tgt->scsi_id;
3666         mad->iu.cancel_key = tgt->cancel_key;
3667
3668         mad->fc_iu.payload[0] = IBMVFC_ADISC;
3669         memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name,
3670                sizeof(vhost->login_buf->resp.port_name));
3671         memcpy(&mad->fc_iu.payload[4], &vhost->login_buf->resp.node_name,
3672                sizeof(vhost->login_buf->resp.node_name));
3673         mad->fc_iu.payload[6] = vhost->login_buf->resp.scsi_id & 0x00ffffff;
3674
3675         if (timer_pending(&tgt->timer))
3676                 mod_timer(&tgt->timer, jiffies + (IBMVFC_ADISC_TIMEOUT * HZ));
3677         else {
3678                 tgt->timer.data = (unsigned long) tgt;
3679                 tgt->timer.expires = jiffies + (IBMVFC_ADISC_TIMEOUT * HZ);
3680                 tgt->timer.function = (void (*)(unsigned long))ibmvfc_adisc_timeout;
3681                 add_timer(&tgt->timer);
3682         }
3683
3684         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
3685         if (ibmvfc_send_event(evt, vhost, IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT)) {
3686                 vhost->discovery_threads--;
3687                 del_timer(&tgt->timer);
3688                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3689                 kref_put(&tgt->kref, ibmvfc_release_tgt);
3690         } else
3691                 tgt_dbg(tgt, "Sent ADISC\n");
3692 }
3693
3694 /**
3695  * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD
3696  * @evt:        ibmvfc event struct
3697  *
3698  **/
3699 static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
3700 {
3701         struct ibmvfc_target *tgt = evt->tgt;
3702         struct ibmvfc_host *vhost = evt->vhost;
3703         struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
3704         u32 status = rsp->common.status;
3705         int level = IBMVFC_DEFAULT_LOG_LEVEL;
3706
3707         vhost->discovery_threads--;
3708         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3709         switch (status) {
3710         case IBMVFC_MAD_SUCCESS:
3711                 tgt_dbg(tgt, "Query Target succeeded\n");
3712                 tgt->new_scsi_id = rsp->scsi_id;
3713                 if (rsp->scsi_id != tgt->scsi_id)
3714                         ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
3715                 else
3716                         ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc);
3717                 break;
3718         case IBMVFC_MAD_DRIVER_FAILED:
3719                 break;
3720         case IBMVFC_MAD_CRQ_ERROR:
3721                 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
3722                 break;
3723         case IBMVFC_MAD_FAILED:
3724         default:
3725                 if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
3726                     rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ &&
3727                     rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG)
3728                         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3729                 else if (ibmvfc_retry_cmd(rsp->status, rsp->error))
3730                         level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
3731                 else
3732                         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3733
3734                 tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3735                         ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
3736                         ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
3737                         ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status);
3738                 break;
3739         };
3740
3741         kref_put(&tgt->kref, ibmvfc_release_tgt);
3742         ibmvfc_free_event(evt);
3743         wake_up(&vhost->work_wait_q);
3744 }
3745
3746 /**
3747  * ibmvfc_tgt_query_target - Initiate a Query Target for specified target
3748  * @tgt:        ibmvfc target struct
3749  *
3750  **/
3751 static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
3752 {
3753         struct ibmvfc_query_tgt *query_tgt;
3754         struct ibmvfc_host *vhost = tgt->vhost;
3755         struct ibmvfc_event *evt;
3756
3757         if (vhost->discovery_threads >= disc_threads)
3758                 return;
3759
3760         kref_get(&tgt->kref);
3761         evt = ibmvfc_get_event(vhost);
3762         vhost->discovery_threads++;
3763         evt->tgt = tgt;
3764         ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
3765         query_tgt = &evt->iu.query_tgt;
3766         memset(query_tgt, 0, sizeof(*query_tgt));
3767         query_tgt->common.version = 1;
3768         query_tgt->common.opcode = IBMVFC_QUERY_TARGET;
3769         query_tgt->common.length = sizeof(*query_tgt);
3770         query_tgt->wwpn = tgt->ids.port_name;
3771
3772         ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
3773         if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3774                 vhost->discovery_threads--;
3775                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3776                 kref_put(&tgt->kref, ibmvfc_release_tgt);
3777         } else
3778                 tgt_dbg(tgt, "Sent Query Target\n");
3779 }
3780
3781 /**
3782  * ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
3783  * @vhost:              ibmvfc host struct
3784  * @scsi_id:    SCSI ID to allocate target for
3785  *
3786  * Returns:
3787  *      0 on success / other on failure
3788  **/
3789 static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id)
3790 {
3791         struct ibmvfc_target *tgt;
3792         unsigned long flags;
3793
3794         spin_lock_irqsave(vhost->host->host_lock, flags);
3795         list_for_each_entry(tgt, &vhost->targets, queue) {
3796                 if (tgt->scsi_id == scsi_id) {
3797                         if (tgt->need_login)
3798                                 ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
3799                         goto unlock_out;
3800                 }
3801         }
3802         spin_unlock_irqrestore(vhost->host->host_lock, flags);
3803
3804         tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
3805         if (!tgt) {
3806                 dev_err(vhost->dev, "Target allocation failure for scsi id %08llx\n",
3807                         scsi_id);
3808                 return -ENOMEM;
3809         }
3810
3811         memset(tgt, 0, sizeof(*tgt));
3812         tgt->scsi_id = scsi_id;
3813         tgt->new_scsi_id = scsi_id;
3814         tgt->vhost = vhost;
3815         tgt->need_login = 1;
3816         tgt->cancel_key = vhost->task_set++;
3817         init_timer(&tgt->timer);
3818         kref_init(&tgt->kref);
3819         ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
3820         spin_lock_irqsave(vhost->host->host_lock, flags);
3821         list_add_tail(&tgt->queue, &vhost->targets);
3822
3823 unlock_out:
3824         spin_unlock_irqrestore(vhost->host->host_lock, flags);
3825         return 0;
3826 }
3827
3828 /**
3829  * ibmvfc_alloc_targets - Allocate and initialize ibmvfc targets
3830  * @vhost:              ibmvfc host struct
3831  *
3832  * Returns:
3833  *      0 on success / other on failure
3834  **/
3835 static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
3836 {
3837         int i, rc;
3838
3839         for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
3840                 rc = ibmvfc_alloc_target(vhost,
3841                                          vhost->disc_buf->scsi_id[i] & IBMVFC_DISC_TGT_SCSI_ID_MASK);
3842
3843         return rc;
3844 }
3845
3846 /**
3847  * ibmvfc_discover_targets_done - Completion handler for discover targets MAD
3848  * @evt:        ibmvfc event struct
3849  *
3850  **/
3851 static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
3852 {
3853         struct ibmvfc_host *vhost = evt->vhost;
3854         struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
3855         u32 mad_status = rsp->common.status;
3856         int level = IBMVFC_DEFAULT_LOG_LEVEL;
3857
3858         switch (mad_status) {
3859         case IBMVFC_MAD_SUCCESS:
3860                 ibmvfc_dbg(vhost, "Discover Targets succeeded\n");
3861                 vhost->num_targets = rsp->num_written;
3862                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
3863                 break;
3864         case IBMVFC_MAD_FAILED:
3865                 level += ibmvfc_retry_host_init(vhost);
3866                 ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
3867                            ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
3868                 break;
3869         case IBMVFC_MAD_DRIVER_FAILED:
3870                 break;
3871         default:
3872                 dev_err(vhost->dev, "Invalid Discover Targets response: 0x%x\n", mad_status);
3873                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3874                 break;
3875         }
3876
3877         ibmvfc_free_event(evt);
3878         wake_up(&vhost->work_wait_q);
3879 }
3880
3881 /**
3882  * ibmvfc_discover_targets - Send Discover Targets MAD
3883  * @vhost:      ibmvfc host struct
3884  *
3885  **/
3886 static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
3887 {
3888         struct ibmvfc_discover_targets *mad;
3889         struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
3890
3891         ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
3892         mad = &evt->iu.discover_targets;
3893         memset(mad, 0, sizeof(*mad));
3894         mad->common.version = 1;
3895         mad->common.opcode = IBMVFC_DISC_TARGETS;
3896         mad->common.length = sizeof(*mad);
3897         mad->bufflen = vhost->disc_buf_sz;
3898         mad->buffer.va = vhost->disc_buf_dma;
3899         mad->buffer.len = vhost->disc_buf_sz;
3900         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
3901
3902         if (!ibmvfc_send_event(evt, vhost, default_timeout))
3903                 ibmvfc_dbg(vhost, "Sent discover targets\n");
3904         else
3905                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3906 }
3907
3908 /**
3909  * ibmvfc_npiv_login_done - Completion handler for NPIV Login
3910  * @evt:        ibmvfc event struct
3911  *
3912  **/
3913 static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
3914 {
3915         struct ibmvfc_host *vhost = evt->vhost;
3916         u32 mad_status = evt->xfer_iu->npiv_login.common.status;
3917         struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
3918         unsigned int npiv_max_sectors;
3919         int level = IBMVFC_DEFAULT_LOG_LEVEL;
3920
3921         switch (mad_status) {
3922         case IBMVFC_MAD_SUCCESS:
3923                 ibmvfc_free_event(evt);
3924                 break;
3925         case IBMVFC_MAD_FAILED:
3926                 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
3927                         level += ibmvfc_retry_host_init(vhost);
3928                 else
3929                         ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3930                 ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
3931                            ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
3932                 ibmvfc_free_event(evt);
3933                 return;
3934         case IBMVFC_MAD_CRQ_ERROR:
3935                 ibmvfc_retry_host_init(vhost);
3936         case IBMVFC_MAD_DRIVER_FAILED:
3937                 ibmvfc_free_event(evt);
3938                 return;
3939         default:
3940                 dev_err(vhost->dev, "Invalid NPIV Login response: 0x%x\n", mad_status);
3941                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3942                 ibmvfc_free_event(evt);
3943                 return;
3944         }
3945
3946         vhost->client_migrated = 0;
3947
3948         if (!(rsp->flags & IBMVFC_NATIVE_FC)) {
3949                 dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n",
3950                         rsp->flags);
3951                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3952                 wake_up(&vhost->work_wait_q);
3953                 return;
3954         }
3955
3956         if (rsp->max_cmds <= IBMVFC_NUM_INTERNAL_REQ) {
3957                 dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n",
3958                         rsp->max_cmds);
3959                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3960                 wake_up(&vhost->work_wait_q);
3961                 return;
3962         }
3963
3964         vhost->logged_in = 1;
3965         npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS);
3966         dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
3967                  rsp->partition_name, rsp->device_name, rsp->port_loc_code,
3968                  rsp->drc_name, npiv_max_sectors);
3969
3970         fc_host_fabric_name(vhost->host) = rsp->node_name;
3971         fc_host_node_name(vhost->host) = rsp->node_name;
3972         fc_host_port_name(vhost->host) = rsp->port_name;
3973         fc_host_port_id(vhost->host) = rsp->scsi_id;
3974         fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV;
3975         fc_host_supported_classes(vhost->host) = 0;
3976         if (rsp->service_parms.class1_parms[0] & 0x80000000)
3977                 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1;
3978         if (rsp->service_parms.class2_parms[0] & 0x80000000)
3979                 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2;
3980         if (rsp->service_parms.class3_parms[0] & 0x80000000)
3981                 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3;
3982         fc_host_maxframe_size(vhost->host) =
3983                 rsp->service_parms.common.bb_rcv_sz & 0x0fff;
3984
3985         vhost->host->can_queue = rsp->max_cmds - IBMVFC_NUM_INTERNAL_REQ;
3986         vhost->host->max_sectors = npiv_max_sectors;
3987         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
3988         wake_up(&vhost->work_wait_q);
3989 }
3990
3991 /**
3992  * ibmvfc_npiv_login - Sends NPIV login
3993  * @vhost:      ibmvfc host struct
3994  *
3995  **/
3996 static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
3997 {
3998         struct ibmvfc_npiv_login_mad *mad;
3999         struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
4000
4001         ibmvfc_gather_partition_info(vhost);
4002         ibmvfc_set_login_info(vhost);
4003         ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
4004
4005         memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info));
4006         mad = &evt->iu.npiv_login;
4007         memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad));
4008         mad->common.version = 1;
4009         mad->common.opcode = IBMVFC_NPIV_LOGIN;
4010         mad->common.length = sizeof(struct ibmvfc_npiv_login_mad);
4011         mad->buffer.va = vhost->login_buf_dma;
4012         mad->buffer.len = sizeof(*vhost->login_buf);
4013
4014         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
4015
4016         if (!ibmvfc_send_event(evt, vhost, default_timeout))
4017                 ibmvfc_dbg(vhost, "Sent NPIV login\n");
4018         else
4019                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4020 };
4021
4022 /**
4023  * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
4024  * @vhost:              ibmvfc host struct
4025  *
4026  **/
4027 static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
4028 {
4029         struct ibmvfc_host *vhost = evt->vhost;
4030         u32 mad_status = evt->xfer_iu->npiv_logout.common.status;
4031
4032         ibmvfc_free_event(evt);
4033
4034         switch (mad_status) {
4035         case IBMVFC_MAD_SUCCESS:
4036                 if (list_empty(&vhost->sent) &&
4037                     vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
4038                         ibmvfc_init_host(vhost);
4039                         return;
4040                 }
4041                 break;
4042         case IBMVFC_MAD_FAILED:
4043         case IBMVFC_MAD_NOT_SUPPORTED:
4044         case IBMVFC_MAD_CRQ_ERROR:
4045         case IBMVFC_MAD_DRIVER_FAILED:
4046         default:
4047                 ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status);
4048                 break;
4049         }
4050
4051         ibmvfc_hard_reset_host(vhost);
4052 }
4053
4054 /**
4055  * ibmvfc_npiv_logout - Issue an NPIV Logout
4056  * @vhost:              ibmvfc host struct
4057  *
4058  **/
4059 static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
4060 {
4061         struct ibmvfc_npiv_logout_mad *mad;
4062         struct ibmvfc_event *evt;
4063
4064         evt = ibmvfc_get_event(vhost);
4065         ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
4066
4067         mad = &evt->iu.npiv_logout;
4068         memset(mad, 0, sizeof(*mad));
4069         mad->common.version = 1;
4070         mad->common.opcode = IBMVFC_NPIV_LOGOUT;
4071         mad->common.length = sizeof(struct ibmvfc_npiv_logout_mad);
4072
4073         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT);
4074
4075         if (!ibmvfc_send_event(evt, vhost, default_timeout))
4076                 ibmvfc_dbg(vhost, "Sent NPIV logout\n");
4077         else
4078                 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4079 }
4080
4081 /**
4082  * ibmvfc_dev_init_to_do - Is there target initialization work to do?
4083  * @vhost:              ibmvfc host struct
4084  *
4085  * Returns:
4086  *      1 if work to do / 0 if not
4087  **/
4088 static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost)
4089 {
4090         struct ibmvfc_target *tgt;
4091
4092         list_for_each_entry(tgt, &vhost->targets, queue) {
4093                 if (tgt->action == IBMVFC_TGT_ACTION_INIT ||
4094                     tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
4095                         return 1;
4096         }
4097
4098         return 0;
4099 }
4100
4101 /**
4102  * __ibmvfc_work_to_do - Is there task level work to do? (no locking)
4103  * @vhost:              ibmvfc host struct
4104  *
4105  * Returns:
4106  *      1 if work to do / 0 if not
4107  **/
4108 static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
4109 {
4110         struct ibmvfc_target *tgt;
4111
4112         if (kthread_should_stop())
4113                 return 1;
4114         switch (vhost->action) {
4115         case IBMVFC_HOST_ACTION_NONE:
4116         case IBMVFC_HOST_ACTION_INIT_WAIT:
4117         case IBMVFC_HOST_ACTION_LOGO_WAIT:
4118                 return 0;
4119         case IBMVFC_HOST_ACTION_TGT_INIT:
4120         case IBMVFC_HOST_ACTION_QUERY_TGTS:
4121                 if (vhost->discovery_threads == disc_threads)
4122                         return 0;
4123                 list_for_each_entry(tgt, &vhost->targets, queue)
4124                         if (tgt->action == IBMVFC_TGT_ACTION_INIT)
4125                                 return 1;
4126                 list_for_each_entry(tgt, &vhost->targets, queue)
4127                         if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
4128                                 return 0;
4129                 return 1;
4130         case IBMVFC_HOST_ACTION_LOGO:
4131         case IBMVFC_HOST_ACTION_INIT:
4132         case IBMVFC_HOST_ACTION_ALLOC_TGTS:
4133         case IBMVFC_HOST_ACTION_TGT_DEL:
4134         case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
4135         case IBMVFC_HOST_ACTION_QUERY:
4136         case IBMVFC_HOST_ACTION_RESET:
4137         case IBMVFC_HOST_ACTION_REENABLE:
4138         default:
4139                 break;
4140         };
4141
4142         return 1;
4143 }
4144
4145 /**
4146  * ibmvfc_work_to_do - Is there task level work to do?
4147  * @vhost:              ibmvfc host struct
4148  *
4149  * Returns:
4150  *      1 if work to do / 0 if not
4151  **/
4152 static int ibmvfc_work_to_do(struct ibmvfc_host *vhost)
4153 {
4154         unsigned long flags;
4155         int rc;
4156
4157         spin_lock_irqsave(vhost->host->host_lock, flags);
4158         rc = __ibmvfc_work_to_do(vhost);
4159         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4160         return rc;
4161 }
4162
4163 /**
4164  * ibmvfc_log_ae - Log async events if necessary
4165  * @vhost:              ibmvfc host struct
4166  * @events:             events to log
4167  *
4168  **/
4169 static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
4170 {
4171         if (events & IBMVFC_AE_RSCN)
4172                 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_RSCN, 0);
4173         if ((events & IBMVFC_AE_LINKDOWN) &&
4174             vhost->state >= IBMVFC_HALTED)
4175                 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
4176         if ((events & IBMVFC_AE_LINKUP) &&
4177             vhost->state == IBMVFC_INITIALIZING)
4178                 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKUP, 0);
4179 }
4180
4181 /**
4182  * ibmvfc_tgt_add_rport - Tell the FC transport about a new remote port
4183  * @tgt:                ibmvfc target struct
4184  *
4185  **/
4186 static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
4187 {
4188         struct ibmvfc_host *vhost = tgt->vhost;
4189         struct fc_rport *rport;
4190         unsigned long flags;
4191
4192         tgt_dbg(tgt, "Adding rport\n");
4193         rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
4194         spin_lock_irqsave(vhost->host->host_lock, flags);
4195
4196         if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
4197                 tgt_dbg(tgt, "Deleting rport\n");
4198                 list_del(&tgt->queue);
4199                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
4200                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4201                 fc_remote_port_delete(rport);
4202                 del_timer_sync(&tgt->timer);
4203                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4204                 return;
4205         } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
4206                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4207                 return;
4208         }
4209
4210         if (rport) {
4211                 tgt_dbg(tgt, "rport add succeeded\n");
4212                 tgt->rport = rport;
4213                 rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff;
4214                 rport->supported_classes = 0;
4215                 tgt->target_id = rport->scsi_target_id;
4216                 if (tgt->service_parms.class1_parms[0] & 0x80000000)
4217                         rport->supported_classes |= FC_COS_CLASS1;
4218                 if (tgt->service_parms.class2_parms[0] & 0x80000000)
4219                         rport->supported_classes |= FC_COS_CLASS2;
4220                 if (tgt->service_parms.class3_parms[0] & 0x80000000)
4221                         rport->supported_classes |= FC_COS_CLASS3;
4222                 if (rport->rqst_q)
4223                         blk_queue_max_segments(rport->rqst_q, 1);
4224         } else
4225                 tgt_dbg(tgt, "rport add failed\n");
4226         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4227 }
4228
4229 /**
4230  * ibmvfc_do_work - Do task level work
4231  * @vhost:              ibmvfc host struct
4232  *
4233  **/
4234 static void ibmvfc_do_work(struct ibmvfc_host *vhost)
4235 {
4236         struct ibmvfc_target *tgt;
4237         unsigned long flags;
4238         struct fc_rport *rport;
4239         int rc;
4240
4241         ibmvfc_log_ae(vhost, vhost->events_to_log);
4242         spin_lock_irqsave(vhost->host->host_lock, flags);
4243         vhost->events_to_log = 0;
4244         switch (vhost->action) {
4245         case IBMVFC_HOST_ACTION_NONE:
4246         case IBMVFC_HOST_ACTION_LOGO_WAIT:
4247         case IBMVFC_HOST_ACTION_INIT_WAIT:
4248                 break;
4249         case IBMVFC_HOST_ACTION_RESET:
4250                 vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
4251                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4252                 rc = ibmvfc_reset_crq(vhost);
4253                 spin_lock_irqsave(vhost->host->host_lock, flags);
4254                 if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
4255                     (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
4256                         ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4257                         dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
4258                 }
4259                 break;
4260         case IBMVFC_HOST_ACTION_REENABLE:
4261                 vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
4262                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4263                 rc = ibmvfc_reenable_crq_queue(vhost);
4264                 spin_lock_irqsave(vhost->host->host_lock, flags);
4265                 if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
4266                         ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4267                         dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
4268                 }
4269                 break;
4270         case IBMVFC_HOST_ACTION_LOGO:
4271                 vhost->job_step(vhost);
4272                 break;
4273         case IBMVFC_HOST_ACTION_INIT:
4274                 BUG_ON(vhost->state != IBMVFC_INITIALIZING);
4275                 if (vhost->delay_init) {
4276                         vhost->delay_init = 0;
4277                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4278                         ssleep(15);
4279                         return;
4280                 } else
4281                         vhost->job_step(vhost);
4282                 break;
4283         case IBMVFC_HOST_ACTION_QUERY:
4284                 list_for_each_entry(tgt, &vhost->targets, queue)
4285                         ibmvfc_init_tgt(tgt, ibmvfc_tgt_query_target);
4286                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY_TGTS);
4287                 break;
4288         case IBMVFC_HOST_ACTION_QUERY_TGTS:
4289                 list_for_each_entry(tgt, &vhost->targets, queue) {
4290                         if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
4291                                 tgt->job_step(tgt);
4292                                 break;
4293                         }
4294                 }
4295
4296                 if (!ibmvfc_dev_init_to_do(vhost))
4297                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
4298                 break;
4299         case IBMVFC_HOST_ACTION_TGT_DEL:
4300         case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
4301                 list_for_each_entry(tgt, &vhost->targets, queue) {
4302                         if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
4303                                 tgt_dbg(tgt, "Deleting rport\n");
4304                                 rport = tgt->rport;
4305                                 tgt->rport = NULL;
4306                                 list_del(&tgt->queue);
4307                                 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
4308                                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4309                                 if (rport)
4310                                         fc_remote_port_delete(rport);
4311                                 del_timer_sync(&tgt->timer);
4312                                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4313                                 return;
4314                         }
4315                 }
4316
4317                 if (vhost->state == IBMVFC_INITIALIZING) {
4318                         if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
4319                                 if (vhost->reinit) {
4320                                         vhost->reinit = 0;
4321                                         scsi_block_requests(vhost->host);
4322                                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
4323                                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4324                                 } else {
4325                                         ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
4326                                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
4327                                         wake_up(&vhost->init_wait_q);
4328                                         schedule_work(&vhost->rport_add_work_q);
4329                                         vhost->init_retries = 0;
4330                                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4331                                         scsi_unblock_requests(vhost->host);
4332                                 }
4333
4334                                 return;
4335                         } else {
4336                                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
4337                                 vhost->job_step = ibmvfc_discover_targets;
4338                         }
4339                 } else {
4340                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
4341                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4342                         scsi_unblock_requests(vhost->host);
4343                         wake_up(&vhost->init_wait_q);
4344                         return;
4345                 }
4346                 break;
4347         case IBMVFC_HOST_ACTION_ALLOC_TGTS:
4348                 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_INIT);
4349                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4350                 ibmvfc_alloc_targets(vhost);
4351                 spin_lock_irqsave(vhost->host->host_lock, flags);
4352                 break;
4353         case IBMVFC_HOST_ACTION_TGT_INIT:
4354                 list_for_each_entry(tgt, &vhost->targets, queue) {
4355                         if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
4356                                 tgt->job_step(tgt);
4357                                 break;
4358                         }
4359                 }
4360
4361                 if (!ibmvfc_dev_init_to_do(vhost))
4362                         ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
4363                 break;
4364         default:
4365                 break;
4366         };
4367
4368         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4369 }
4370
4371 /**
4372  * ibmvfc_work - Do task level work
4373  * @data:               ibmvfc host struct
4374  *
4375  * Returns:
4376  *      zero
4377  **/
4378 static int ibmvfc_work(void *data)
4379 {
4380         struct ibmvfc_host *vhost = data;
4381         int rc;
4382
4383         set_user_nice(current, -20);
4384
4385         while (1) {
4386                 rc = wait_event_interruptible(vhost->work_wait_q,
4387                                               ibmvfc_work_to_do(vhost));
4388
4389                 BUG_ON(rc);
4390
4391                 if (kthread_should_stop())
4392                         break;
4393
4394                 ibmvfc_do_work(vhost);
4395         }
4396
4397         ibmvfc_dbg(vhost, "ibmvfc kthread exiting...\n");
4398         return 0;
4399 }
4400
4401 /**
4402  * ibmvfc_init_crq - Initializes and registers CRQ with hypervisor
4403  * @vhost:      ibmvfc host struct
4404  *
4405  * Allocates a page for messages, maps it for dma, and registers
4406  * the crq with the hypervisor.
4407  *
4408  * Return value:
4409  *      zero on success / other on failure
4410  **/
4411 static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
4412 {
4413         int rc, retrc = -ENOMEM;
4414         struct device *dev = vhost->dev;
4415         struct vio_dev *vdev = to_vio_dev(dev);
4416         struct ibmvfc_crq_queue *crq = &vhost->crq;
4417
4418         ENTER;
4419         crq->msgs = (struct ibmvfc_crq *)get_zeroed_page(GFP_KERNEL);
4420
4421         if (!crq->msgs)
4422                 return -ENOMEM;
4423
4424         crq->size = PAGE_SIZE / sizeof(*crq->msgs);
4425         crq->msg_token = dma_map_single(dev, crq->msgs,
4426                                         PAGE_SIZE, DMA_BIDIRECTIONAL);
4427
4428         if (dma_mapping_error(dev, crq->msg_token))
4429                 goto map_failed;
4430
4431         retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4432                                         crq->msg_token, PAGE_SIZE);
4433
4434         if (rc == H_RESOURCE)
4435                 /* maybe kexecing and resource is busy. try a reset */
4436                 retrc = rc = ibmvfc_reset_crq(vhost);
4437
4438         if (rc == H_CLOSED)
4439                 dev_warn(dev, "Partner adapter not ready\n");
4440         else if (rc) {
4441                 dev_warn(dev, "Error %d opening adapter\n", rc);
4442                 goto reg_crq_failed;
4443         }
4444
4445         retrc = 0;
4446
4447         tasklet_init(&vhost->tasklet, (void *)ibmvfc_tasklet, (unsigned long)vhost);
4448
4449         if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) {
4450                 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
4451                 goto req_irq_failed;
4452         }
4453
4454         if ((rc = vio_enable_interrupts(vdev))) {
4455                 dev_err(dev, "Error %d enabling interrupts\n", rc);
4456                 goto req_irq_failed;
4457         }
4458
4459         crq->cur = 0;
4460         LEAVE;
4461         return retrc;
4462
4463 req_irq_failed:
4464         tasklet_kill(&vhost->tasklet);
4465         do {
4466                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4467         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4468 reg_crq_failed:
4469         dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
4470 map_failed:
4471         free_page((unsigned long)crq->msgs);
4472         return retrc;
4473 }
4474
4475 /**
4476  * ibmvfc_free_mem - Free memory for vhost
4477  * @vhost:      ibmvfc host struct
4478  *
4479  * Return value:
4480  *      none
4481  **/
4482 static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
4483 {
4484         struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
4485
4486         ENTER;
4487         mempool_destroy(vhost->tgt_pool);
4488         kfree(vhost->trace);
4489         dma_free_coherent(vhost->dev, vhost->disc_buf_sz, vhost->disc_buf,
4490                           vhost->disc_buf_dma);
4491         dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf),
4492                           vhost->login_buf, vhost->login_buf_dma);
4493         dma_pool_destroy(vhost->sg_pool);
4494         dma_unmap_single(vhost->dev, async_q->msg_token,
4495                          async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
4496         free_page((unsigned long)async_q->msgs);
4497         LEAVE;
4498 }
4499
4500 /**
4501  * ibmvfc_alloc_mem - Allocate memory for vhost
4502  * @vhost:      ibmvfc host struct
4503  *
4504  * Return value:
4505  *      0 on success / non-zero on failure
4506  **/
4507 static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
4508 {
4509         struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
4510         struct device *dev = vhost->dev;
4511
4512         ENTER;
4513         async_q->msgs = (struct ibmvfc_async_crq *)get_zeroed_page(GFP_KERNEL);
4514         if (!async_q->msgs) {
4515                 dev_err(dev, "Couldn't allocate async queue.\n");
4516                 goto nomem;
4517         }
4518
4519         async_q->size = PAGE_SIZE / sizeof(struct ibmvfc_async_crq);
4520         async_q->msg_token = dma_map_single(dev, async_q->msgs,
4521                                             async_q->size * sizeof(*async_q->msgs),
4522                                             DMA_BIDIRECTIONAL);
4523
4524         if (dma_mapping_error(dev, async_q->msg_token)) {
4525                 dev_err(dev, "Failed to map async queue\n");
4526                 goto free_async_crq;
4527         }
4528
4529         vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev,
4530                                          SG_ALL * sizeof(struct srp_direct_buf),
4531                                          sizeof(struct srp_direct_buf), 0);
4532
4533         if (!vhost->sg_pool) {
4534                 dev_err(dev, "Failed to allocate sg pool\n");
4535                 goto unmap_async_crq;
4536         }
4537
4538         vhost->login_buf = dma_alloc_coherent(dev, sizeof(*vhost->login_buf),
4539                                               &vhost->login_buf_dma, GFP_KERNEL);
4540
4541         if (!vhost->login_buf) {
4542                 dev_err(dev, "Couldn't allocate NPIV login buffer\n");
4543                 goto free_sg_pool;
4544         }
4545
4546         vhost->disc_buf_sz = sizeof(vhost->disc_buf->scsi_id[0]) * max_targets;
4547         vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz,
4548                                              &vhost->disc_buf_dma, GFP_KERNEL);
4549
4550         if (!vhost->disc_buf) {
4551                 dev_err(dev, "Couldn't allocate Discover Targets buffer\n");
4552                 goto free_login_buffer;
4553         }
4554
4555         vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES,
4556                                sizeof(struct ibmvfc_trace_entry), GFP_KERNEL);
4557
4558         if (!vhost->trace)
4559                 goto free_disc_buffer;
4560
4561         vhost->tgt_pool = mempool_create_kmalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
4562                                                       sizeof(struct ibmvfc_target));
4563
4564         if (!vhost->tgt_pool) {
4565                 dev_err(dev, "Couldn't allocate target memory pool\n");
4566                 goto free_trace;
4567         }
4568
4569         LEAVE;
4570         return 0;
4571
4572 free_trace:
4573         kfree(vhost->trace);
4574 free_disc_buffer:
4575         dma_free_coherent(dev, vhost->disc_buf_sz, vhost->disc_buf,
4576                           vhost->disc_buf_dma);
4577 free_login_buffer:
4578         dma_free_coherent(dev, sizeof(*vhost->login_buf),
4579                           vhost->login_buf, vhost->login_buf_dma);
4580 free_sg_pool:
4581         dma_pool_destroy(vhost->sg_pool);
4582 unmap_async_crq:
4583         dma_unmap_single(dev, async_q->msg_token,
4584                          async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
4585 free_async_crq:
4586         free_page((unsigned long)async_q->msgs);
4587 nomem:
4588         LEAVE;
4589         return -ENOMEM;
4590 }
4591
4592 /**
4593  * ibmvfc_rport_add_thread - Worker thread for rport adds
4594  * @work:       work struct
4595  *
4596  **/
4597 static void ibmvfc_rport_add_thread(struct work_struct *work)
4598 {
4599         struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host,
4600                                                  rport_add_work_q);
4601         struct ibmvfc_target *tgt;
4602         struct fc_rport *rport;
4603         unsigned long flags;
4604         int did_work;
4605
4606         ENTER;
4607         spin_lock_irqsave(vhost->host->host_lock, flags);
4608         do {
4609                 did_work = 0;
4610                 if (vhost->state != IBMVFC_ACTIVE)
4611                         break;
4612
4613                 list_for_each_entry(tgt, &vhost->targets, queue) {
4614                         if (tgt->add_rport) {
4615                                 did_work = 1;
4616                                 tgt->add_rport = 0;
4617                                 kref_get(&tgt->kref);
4618                                 rport = tgt->rport;
4619                                 if (!rport) {
4620                                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4621                                         ibmvfc_tgt_add_rport(tgt);
4622                                 } else if (get_device(&rport->dev)) {
4623                                         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4624                                         tgt_dbg(tgt, "Setting rport roles\n");
4625                                         fc_remote_port_rolechg(rport, tgt->ids.roles);
4626                                         put_device(&rport->dev);
4627                                 }
4628
4629                                 kref_put(&tgt->kref, ibmvfc_release_tgt);
4630                                 spin_lock_irqsave(vhost->host->host_lock, flags);
4631                                 break;
4632                         }
4633                 }
4634         } while(did_work);
4635
4636         if (vhost->state == IBMVFC_ACTIVE)
4637                 vhost->scan_complete = 1;
4638         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4639         LEAVE;
4640 }
4641
4642 /**
4643  * ibmvfc_probe - Adapter hot plug add entry point
4644  * @vdev:       vio device struct
4645  * @id: vio device id struct
4646  *
4647  * Return value:
4648  *      0 on success / non-zero on failure
4649  **/
4650 static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
4651 {
4652         struct ibmvfc_host *vhost;
4653         struct Scsi_Host *shost;
4654         struct device *dev = &vdev->dev;
4655         int rc = -ENOMEM;
4656
4657         ENTER;
4658         shost = scsi_host_alloc(&driver_template, sizeof(*vhost));
4659         if (!shost) {
4660                 dev_err(dev, "Couldn't allocate host data\n");
4661                 goto out;
4662         }
4663
4664         shost->transportt = ibmvfc_transport_template;
4665         shost->can_queue = max_requests;
4666         shost->max_lun = max_lun;
4667         shost->max_id = max_targets;
4668         shost->max_sectors = IBMVFC_MAX_SECTORS;
4669         shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
4670         shost->unique_id = shost->host_no;
4671
4672         vhost = shost_priv(shost);
4673         INIT_LIST_HEAD(&vhost->sent);
4674         INIT_LIST_HEAD(&vhost->free);
4675         INIT_LIST_HEAD(&vhost->targets);
4676         sprintf(vhost->name, IBMVFC_NAME);
4677         vhost->host = shost;
4678         vhost->dev = dev;
4679         vhost->partition_number = -1;
4680         vhost->log_level = log_level;
4681         vhost->task_set = 1;
4682         strcpy(vhost->partition_name, "UNKNOWN");
4683         init_waitqueue_head(&vhost->work_wait_q);
4684         init_waitqueue_head(&vhost->init_wait_q);
4685         INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread);
4686         mutex_init(&vhost->passthru_mutex);
4687
4688         if ((rc = ibmvfc_alloc_mem(vhost)))
4689                 goto free_scsi_host;
4690
4691         vhost->work_thread = kthread_run(ibmvfc_work, vhost, "%s_%d", IBMVFC_NAME,
4692                                          shost->host_no);
4693
4694         if (IS_ERR(vhost->work_thread)) {
4695                 dev_err(dev, "Couldn't create kernel thread: %ld\n",
4696                         PTR_ERR(vhost->work_thread));
4697                 goto free_host_mem;
4698         }
4699
4700         if ((rc = ibmvfc_init_crq(vhost))) {
4701                 dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
4702                 goto kill_kthread;
4703         }
4704
4705         if ((rc = ibmvfc_init_event_pool(vhost))) {
4706                 dev_err(dev, "Couldn't initialize event pool. rc=%d\n", rc);
4707                 goto release_crq;
4708         }
4709
4710         if ((rc = scsi_add_host(shost, dev)))
4711                 goto release_event_pool;
4712
4713         if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj,
4714                                            &ibmvfc_trace_attr))) {
4715                 dev_err(dev, "Failed to create trace file. rc=%d\n", rc);
4716                 goto remove_shost;
4717         }
4718
4719         if (shost_to_fc_host(shost)->rqst_q)
4720                 blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1);
4721         dev_set_drvdata(dev, vhost);
4722         spin_lock(&ibmvfc_driver_lock);
4723         list_add_tail(&vhost->queue, &ibmvfc_head);
4724         spin_unlock(&ibmvfc_driver_lock);
4725
4726         ibmvfc_send_crq_init(vhost);
4727         scsi_scan_host(shost);
4728         return 0;
4729
4730 remove_shost:
4731         scsi_remove_host(shost);
4732 release_event_pool:
4733         ibmvfc_free_event_pool(vhost);
4734 release_crq:
4735         ibmvfc_release_crq_queue(vhost);
4736 kill_kthread:
4737         kthread_stop(vhost->work_thread);
4738 free_host_mem:
4739         ibmvfc_free_mem(vhost);
4740 free_scsi_host:
4741         scsi_host_put(shost);
4742 out:
4743         LEAVE;
4744         return rc;
4745 }
4746
4747 /**
4748  * ibmvfc_remove - Adapter hot plug remove entry point
4749  * @vdev:       vio device struct
4750  *
4751  * Return value:
4752  *      0
4753  **/
4754 static int ibmvfc_remove(struct vio_dev *vdev)
4755 {
4756         struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev);
4757         unsigned long flags;
4758
4759         ENTER;
4760         ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
4761
4762         spin_lock_irqsave(vhost->host->host_lock, flags);
4763         ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
4764         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4765
4766         ibmvfc_wait_while_resetting(vhost);
4767         ibmvfc_release_crq_queue(vhost);
4768         kthread_stop(vhost->work_thread);
4769         fc_remove_host(vhost->host);
4770         scsi_remove_host(vhost->host);
4771
4772         spin_lock_irqsave(vhost->host->host_lock, flags);
4773         ibmvfc_purge_requests(vhost, DID_ERROR);
4774         ibmvfc_free_event_pool(vhost);
4775         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4776
4777         ibmvfc_free_mem(vhost);
4778         spin_lock(&ibmvfc_driver_lock);
4779         list_del(&vhost->queue);
4780         spin_unlock(&ibmvfc_driver_lock);
4781         scsi_host_put(vhost->host);
4782         LEAVE;
4783         return 0;
4784 }
4785
4786 /**
4787  * ibmvfc_resume - Resume from suspend
4788  * @dev:        device struct
4789  *
4790  * We may have lost an interrupt across suspend/resume, so kick the
4791  * interrupt handler
4792  *
4793  */
4794 static int ibmvfc_resume(struct device *dev)
4795 {
4796         unsigned long flags;
4797         struct ibmvfc_host *vhost = dev_get_drvdata(dev);
4798         struct vio_dev *vdev = to_vio_dev(dev);
4799
4800         spin_lock_irqsave(vhost->host->host_lock, flags);
4801         vio_disable_interrupts(vdev);
4802         tasklet_schedule(&vhost->tasklet);
4803         spin_unlock_irqrestore(vhost->host->host_lock, flags);
4804         return 0;
4805 }
4806
4807 /**
4808  * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver
4809  * @vdev:       vio device struct
4810  *
4811  * Return value:
4812  *      Number of bytes the driver will need to DMA map at the same time in
4813  *      order to perform well.
4814  */
4815 static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev)
4816 {
4817         unsigned long pool_dma = max_requests * sizeof(union ibmvfc_iu);
4818         return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun);
4819 }
4820
4821 static struct vio_device_id ibmvfc_device_table[] __devinitdata = {
4822         {"fcp", "IBM,vfc-client"},
4823         { "", "" }
4824 };
4825 MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
4826
4827 static struct dev_pm_ops ibmvfc_pm_ops = {
4828         .resume = ibmvfc_resume
4829 };
4830
4831 static struct vio_driver ibmvfc_driver = {
4832         .id_table = ibmvfc_device_table,
4833         .probe = ibmvfc_probe,
4834         .remove = ibmvfc_remove,
4835         .get_desired_dma = ibmvfc_get_desired_dma,
4836         .driver = {
4837                 .name = IBMVFC_NAME,
4838                 .owner = THIS_MODULE,
4839                 .pm = &ibmvfc_pm_ops,
4840         }
4841 };
4842
4843 static struct fc_function_template ibmvfc_transport_functions = {
4844         .show_host_fabric_name = 1,
4845         .show_host_node_name = 1,
4846         .show_host_port_name = 1,
4847         .show_host_supported_classes = 1,
4848         .show_host_port_type = 1,
4849         .show_host_port_id = 1,
4850         .show_host_maxframe_size = 1,
4851
4852         .get_host_port_state = ibmvfc_get_host_port_state,
4853         .show_host_port_state = 1,
4854
4855         .get_host_speed = ibmvfc_get_host_speed,
4856         .show_host_speed = 1,
4857
4858         .issue_fc_host_lip = ibmvfc_issue_fc_host_lip,
4859         .terminate_rport_io = ibmvfc_terminate_rport_io,
4860
4861         .show_rport_maxframe_size = 1,
4862         .show_rport_supported_classes = 1,
4863
4864         .set_rport_dev_loss_tmo = ibmvfc_set_rport_dev_loss_tmo,
4865         .show_rport_dev_loss_tmo = 1,
4866
4867         .get_starget_node_name = ibmvfc_get_starget_node_name,
4868         .show_starget_node_name = 1,
4869
4870         .get_starget_port_name = ibmvfc_get_starget_port_name,
4871         .show_starget_port_name = 1,
4872
4873         .get_starget_port_id = ibmvfc_get_starget_port_id,
4874         .show_starget_port_id = 1,
4875
4876         .bsg_request = ibmvfc_bsg_request,
4877         .bsg_timeout = ibmvfc_bsg_timeout,
4878 };
4879
4880 /**
4881  * ibmvfc_module_init - Initialize the ibmvfc module
4882  *
4883  * Return value:
4884  *      0 on success / other on failure
4885  **/
4886 static int __init ibmvfc_module_init(void)
4887 {
4888         int rc;
4889
4890         if (!firmware_has_feature(FW_FEATURE_VIO))
4891                 return -ENODEV;
4892
4893         printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n",
4894                IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE);
4895
4896         ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions);
4897         if (!ibmvfc_transport_template)
4898                 return -ENOMEM;
4899
4900         rc = vio_register_driver(&ibmvfc_driver);
4901         if (rc)
4902                 fc_release_transport(ibmvfc_transport_template);
4903         return rc;
4904 }
4905
4906 /**
4907  * ibmvfc_module_exit - Teardown the ibmvfc module
4908  *
4909  * Return value:
4910  *      nothing
4911  **/
4912 static void __exit ibmvfc_module_exit(void)
4913 {
4914         vio_unregister_driver(&ibmvfc_driver);
4915         fc_release_transport(ibmvfc_transport_template);
4916 }
4917
4918 module_init(ibmvfc_module_init);
4919 module_exit(ibmvfc_module_exit);