[SCSI] qla2xxx: Basic infrastructure for dynamic logging.
[pandora-kernel.git] / drivers / scsi / qla2xxx / qla_os.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2011 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/moduleparam.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 #include <linux/kthread.h>
13 #include <linux/mutex.h>
14 #include <linux/kobject.h>
15 #include <linux/slab.h>
16
17 #include <scsi/scsi_tcq.h>
18 #include <scsi/scsicam.h>
19 #include <scsi/scsi_transport.h>
20 #include <scsi/scsi_transport_fc.h>
21
22 /*
23  * Driver version
24  */
25 char qla2x00_version_str[40];
26
27 static int apidev_major;
28
29 /*
30  * SRB allocation cache
31  */
32 static struct kmem_cache *srb_cachep;
33
34 /*
35  * CT6 CTX allocation cache
36  */
37 static struct kmem_cache *ctx_cachep;
38 /*
39  * error level for logging
40  */
41 int ql_errlev = ql_log_all;
42
43 int ql2xlogintimeout = 20;
44 module_param(ql2xlogintimeout, int, S_IRUGO);
45 MODULE_PARM_DESC(ql2xlogintimeout,
46                 "Login timeout value in seconds.");
47
48 int qlport_down_retry;
49 module_param(qlport_down_retry, int, S_IRUGO);
50 MODULE_PARM_DESC(qlport_down_retry,
51                 "Maximum number of command retries to a port that returns "
52                 "a PORT-DOWN status.");
53
54 int ql2xplogiabsentdevice;
55 module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR);
56 MODULE_PARM_DESC(ql2xplogiabsentdevice,
57                 "Option to enable PLOGI to devices that are not present after "
58                 "a Fabric scan.  This is needed for several broken switches. "
59                 "Default is 0 - no PLOGI. 1 - perfom PLOGI.");
60
61 int ql2xloginretrycount = 0;
62 module_param(ql2xloginretrycount, int, S_IRUGO);
63 MODULE_PARM_DESC(ql2xloginretrycount,
64                 "Specify an alternate value for the NVRAM login retry count.");
65
66 int ql2xallocfwdump = 1;
67 module_param(ql2xallocfwdump, int, S_IRUGO);
68 MODULE_PARM_DESC(ql2xallocfwdump,
69                 "Option to enable allocation of memory for a firmware dump "
70                 "during HBA initialization.  Memory allocation requirements "
71                 "vary by ISP type.  Default is 1 - allocate memory.");
72
73 int ql2xextended_error_logging;
74 module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
75 MODULE_PARM_DESC(ql2xextended_error_logging,
76                 "Option to enable extended error logging,\n"
77                 "\t\tDefault is 0 - no logging.  0x40000000 - Module Init & Probe.\n"
78                 "\t\t0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.\n"
79                 "\t\t0x08000000 - IO tracing.    0x04000000 - DPC Thread.\n"
80                 "\t\t0x02000000 - Async events.  0x01000000 - Timer routines.\n"
81                 "\t\t0x00800000 - User space.    0x00400000 - Task Management.\n"
82                 "\t\t0x00200000 - AER/EEH.       0x00100000 - Multi Q.\n"
83                 "\t\t0x00080000 - P3P Specific.  0x00040000 - Virtual Port.\n"
84                 "\t\t0x00020000 - Buffer Dump.   0x00010000 - Misc.\n"
85                 "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n"
86                 "\t\tDo LOGICAL OR of the value to enable more than one level");
87
88 int ql2xshiftctondsd = 6;
89 module_param(ql2xshiftctondsd, int, S_IRUGO);
90 MODULE_PARM_DESC(ql2xshiftctondsd,
91                 "Set to control shifting of command type processing "
92                 "based on total number of SG elements.");
93
94 static void qla2x00_free_device(scsi_qla_host_t *);
95
96 int ql2xfdmienable=1;
97 module_param(ql2xfdmienable, int, S_IRUGO);
98 MODULE_PARM_DESC(ql2xfdmienable,
99                 "Enables FDMI registrations. "
100                 "0 - no FDMI. Default is 1 - perform FDMI.");
101
102 #define MAX_Q_DEPTH    32
103 static int ql2xmaxqdepth = MAX_Q_DEPTH;
104 module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
105 MODULE_PARM_DESC(ql2xmaxqdepth,
106                 "Maximum queue depth to report for target devices.");
107
108 /* Do not change the value of this after module load */
109 int ql2xenabledif = 1;
110 module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR);
111 MODULE_PARM_DESC(ql2xenabledif,
112                 " Enable T10-CRC-DIF "
113                 " Default is 0 - No DIF Support. 1 - Enable it");
114
115 int ql2xenablehba_err_chk;
116 module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
117 MODULE_PARM_DESC(ql2xenablehba_err_chk,
118                 " Enable T10-CRC-DIF Error isolation by HBA"
119                 " Default is 0 - Error isolation disabled, 1 - Enable it");
120
121 int ql2xiidmaenable=1;
122 module_param(ql2xiidmaenable, int, S_IRUGO);
123 MODULE_PARM_DESC(ql2xiidmaenable,
124                 "Enables iIDMA settings "
125                 "Default is 1 - perform iIDMA. 0 - no iIDMA.");
126
127 int ql2xmaxqueues = 1;
128 module_param(ql2xmaxqueues, int, S_IRUGO);
129 MODULE_PARM_DESC(ql2xmaxqueues,
130                 "Enables MQ settings "
131                 "Default is 1 for single queue. Set it to number "
132                 "of queues in MQ mode.");
133
134 int ql2xmultique_tag;
135 module_param(ql2xmultique_tag, int, S_IRUGO);
136 MODULE_PARM_DESC(ql2xmultique_tag,
137                 "Enables CPU affinity settings for the driver "
138                 "Default is 0 for no affinity of request and response IO. "
139                 "Set it to 1 to turn on the cpu affinity.");
140
141 int ql2xfwloadbin;
142 module_param(ql2xfwloadbin, int, S_IRUGO);
143 MODULE_PARM_DESC(ql2xfwloadbin,
144                 "Option to specify location from which to load ISP firmware:\n"
145                 " 2 -- load firmware via the request_firmware() (hotplug)\n"
146                 "      interface.\n"
147                 " 1 -- load firmware from flash.\n"
148                 " 0 -- use default semantics.\n");
149
150 int ql2xetsenable;
151 module_param(ql2xetsenable, int, S_IRUGO);
152 MODULE_PARM_DESC(ql2xetsenable,
153                 "Enables firmware ETS burst."
154                 "Default is 0 - skip ETS enablement.");
155
156 int ql2xdbwr = 1;
157 module_param(ql2xdbwr, int, S_IRUGO);
158 MODULE_PARM_DESC(ql2xdbwr,
159         "Option to specify scheme for request queue posting\n"
160         " 0 -- Regular doorbell.\n"
161         " 1 -- CAMRAM doorbell (faster).\n");
162
163 int ql2xtargetreset = 1;
164 module_param(ql2xtargetreset, int, S_IRUGO);
165 MODULE_PARM_DESC(ql2xtargetreset,
166                  "Enable target reset."
167                  "Default is 1 - use hw defaults.");
168
169 int ql2xgffidenable;
170 module_param(ql2xgffidenable, int, S_IRUGO);
171 MODULE_PARM_DESC(ql2xgffidenable,
172                 "Enables GFF_ID checks of port type. "
173                 "Default is 0 - Do not use GFF_ID information.");
174
175 int ql2xasynctmfenable;
176 module_param(ql2xasynctmfenable, int, S_IRUGO);
177 MODULE_PARM_DESC(ql2xasynctmfenable,
178                 "Enables issue of TM IOCBs asynchronously via IOCB mechanism"
179                 "Default is 0 - Issue TM IOCBs via mailbox mechanism.");
180
181 int ql2xdontresethba;
182 module_param(ql2xdontresethba, int, S_IRUGO);
183 MODULE_PARM_DESC(ql2xdontresethba,
184         "Option to specify reset behaviour\n"
185         " 0 (Default) -- Reset on failure.\n"
186         " 1 -- Do not reset on failure.\n");
187
188 uint ql2xmaxlun = MAX_LUNS;
189 module_param(ql2xmaxlun, uint, S_IRUGO);
190 MODULE_PARM_DESC(ql2xmaxlun,
191                 "Defines the maximum LU number to register with the SCSI "
192                 "midlayer. Default is 65535.");
193
194 /*
195  * SCSI host template entry points
196  */
197 static int qla2xxx_slave_configure(struct scsi_device * device);
198 static int qla2xxx_slave_alloc(struct scsi_device *);
199 static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time);
200 static void qla2xxx_scan_start(struct Scsi_Host *);
201 static void qla2xxx_slave_destroy(struct scsi_device *);
202 static int qla2xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
203 static int qla2xxx_eh_abort(struct scsi_cmnd *);
204 static int qla2xxx_eh_device_reset(struct scsi_cmnd *);
205 static int qla2xxx_eh_target_reset(struct scsi_cmnd *);
206 static int qla2xxx_eh_bus_reset(struct scsi_cmnd *);
207 static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
208
209 static int qla2x00_change_queue_depth(struct scsi_device *, int, int);
210 static int qla2x00_change_queue_type(struct scsi_device *, int);
211
212 struct scsi_host_template qla2xxx_driver_template = {
213         .module                 = THIS_MODULE,
214         .name                   = QLA2XXX_DRIVER_NAME,
215         .queuecommand           = qla2xxx_queuecommand,
216
217         .eh_abort_handler       = qla2xxx_eh_abort,
218         .eh_device_reset_handler = qla2xxx_eh_device_reset,
219         .eh_target_reset_handler = qla2xxx_eh_target_reset,
220         .eh_bus_reset_handler   = qla2xxx_eh_bus_reset,
221         .eh_host_reset_handler  = qla2xxx_eh_host_reset,
222
223         .slave_configure        = qla2xxx_slave_configure,
224
225         .slave_alloc            = qla2xxx_slave_alloc,
226         .slave_destroy          = qla2xxx_slave_destroy,
227         .scan_finished          = qla2xxx_scan_finished,
228         .scan_start             = qla2xxx_scan_start,
229         .change_queue_depth     = qla2x00_change_queue_depth,
230         .change_queue_type      = qla2x00_change_queue_type,
231         .this_id                = -1,
232         .cmd_per_lun            = 3,
233         .use_clustering         = ENABLE_CLUSTERING,
234         .sg_tablesize           = SG_ALL,
235
236         .max_sectors            = 0xFFFF,
237         .shost_attrs            = qla2x00_host_attrs,
238 };
239
240 static struct scsi_transport_template *qla2xxx_transport_template = NULL;
241 struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
242
243 /* TODO Convert to inlines
244  *
245  * Timer routines
246  */
247
248 __inline__ void
249 qla2x00_start_timer(scsi_qla_host_t *vha, void *func, unsigned long interval)
250 {
251         init_timer(&vha->timer);
252         vha->timer.expires = jiffies + interval * HZ;
253         vha->timer.data = (unsigned long)vha;
254         vha->timer.function = (void (*)(unsigned long))func;
255         add_timer(&vha->timer);
256         vha->timer_active = 1;
257 }
258
259 static inline void
260 qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
261 {
262         /* Currently used for 82XX only. */
263         if (vha->device_flags & DFLG_DEV_FAILED)
264                 return;
265
266         mod_timer(&vha->timer, jiffies + interval * HZ);
267 }
268
269 static __inline__ void
270 qla2x00_stop_timer(scsi_qla_host_t *vha)
271 {
272         del_timer_sync(&vha->timer);
273         vha->timer_active = 0;
274 }
275
276 static int qla2x00_do_dpc(void *data);
277
278 static void qla2x00_rst_aen(scsi_qla_host_t *);
279
280 static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
281         struct req_que **, struct rsp_que **);
282 static void qla2x00_free_fw_dump(struct qla_hw_data *);
283 static void qla2x00_mem_free(struct qla_hw_data *);
284 static void qla2x00_sp_free_dma(srb_t *);
285
286 /* -------------------------------------------------------------------------- */
287 static int qla2x00_alloc_queues(struct qla_hw_data *ha)
288 {
289         ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
290                                 GFP_KERNEL);
291         if (!ha->req_q_map) {
292                 qla_printk(KERN_WARNING, ha,
293                         "Unable to allocate memory for request queue ptrs\n");
294                 goto fail_req_map;
295         }
296
297         ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues,
298                                 GFP_KERNEL);
299         if (!ha->rsp_q_map) {
300                 qla_printk(KERN_WARNING, ha,
301                         "Unable to allocate memory for response queue ptrs\n");
302                 goto fail_rsp_map;
303         }
304         set_bit(0, ha->rsp_qid_map);
305         set_bit(0, ha->req_qid_map);
306         return 1;
307
308 fail_rsp_map:
309         kfree(ha->req_q_map);
310         ha->req_q_map = NULL;
311 fail_req_map:
312         return -ENOMEM;
313 }
314
315 static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
316 {
317         if (req && req->ring)
318                 dma_free_coherent(&ha->pdev->dev,
319                 (req->length + 1) * sizeof(request_t),
320                 req->ring, req->dma);
321
322         kfree(req);
323         req = NULL;
324 }
325
326 static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
327 {
328         if (rsp && rsp->ring)
329                 dma_free_coherent(&ha->pdev->dev,
330                 (rsp->length + 1) * sizeof(response_t),
331                 rsp->ring, rsp->dma);
332
333         kfree(rsp);
334         rsp = NULL;
335 }
336
337 static void qla2x00_free_queues(struct qla_hw_data *ha)
338 {
339         struct req_que *req;
340         struct rsp_que *rsp;
341         int cnt;
342
343         for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
344                 req = ha->req_q_map[cnt];
345                 qla2x00_free_req_que(ha, req);
346         }
347         kfree(ha->req_q_map);
348         ha->req_q_map = NULL;
349
350         for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
351                 rsp = ha->rsp_q_map[cnt];
352                 qla2x00_free_rsp_que(ha, rsp);
353         }
354         kfree(ha->rsp_q_map);
355         ha->rsp_q_map = NULL;
356 }
357
358 static int qla25xx_setup_mode(struct scsi_qla_host *vha)
359 {
360         uint16_t options = 0;
361         int ques, req, ret;
362         struct qla_hw_data *ha = vha->hw;
363
364         if (!(ha->fw_attributes & BIT_6)) {
365                 qla_printk(KERN_INFO, ha,
366                         "Firmware is not multi-queue capable\n");
367                 goto fail;
368         }
369         if (ql2xmultique_tag) {
370                 /* create a request queue for IO */
371                 options |= BIT_7;
372                 req = qla25xx_create_req_que(ha, options, 0, 0, -1,
373                         QLA_DEFAULT_QUE_QOS);
374                 if (!req) {
375                         qla_printk(KERN_WARNING, ha,
376                                 "Can't create request queue\n");
377                         goto fail;
378                 }
379                 ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
380                 vha->req = ha->req_q_map[req];
381                 options |= BIT_1;
382                 for (ques = 1; ques < ha->max_rsp_queues; ques++) {
383                         ret = qla25xx_create_rsp_que(ha, options, 0, 0, req);
384                         if (!ret) {
385                                 qla_printk(KERN_WARNING, ha,
386                                         "Response Queue create failed\n");
387                                 goto fail2;
388                         }
389                 }
390                 ha->flags.cpu_affinity_enabled = 1;
391
392                 DEBUG2(qla_printk(KERN_INFO, ha,
393                         "CPU affinity mode enabled, no. of response"
394                         " queues:%d, no. of request queues:%d\n",
395                         ha->max_rsp_queues, ha->max_req_queues));
396         }
397         return 0;
398 fail2:
399         qla25xx_delete_queues(vha);
400         destroy_workqueue(ha->wq);
401         ha->wq = NULL;
402 fail:
403         ha->mqenable = 0;
404         kfree(ha->req_q_map);
405         kfree(ha->rsp_q_map);
406         ha->max_req_queues = ha->max_rsp_queues = 1;
407         return 1;
408 }
409
410 static char *
411 qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str)
412 {
413         struct qla_hw_data *ha = vha->hw;
414         static char *pci_bus_modes[] = {
415                 "33", "66", "100", "133",
416         };
417         uint16_t pci_bus;
418
419         strcpy(str, "PCI");
420         pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9;
421         if (pci_bus) {
422                 strcat(str, "-X (");
423                 strcat(str, pci_bus_modes[pci_bus]);
424         } else {
425                 pci_bus = (ha->pci_attr & BIT_8) >> 8;
426                 strcat(str, " (");
427                 strcat(str, pci_bus_modes[pci_bus]);
428         }
429         strcat(str, " MHz)");
430
431         return (str);
432 }
433
434 static char *
435 qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
436 {
437         static char *pci_bus_modes[] = { "33", "66", "100", "133", };
438         struct qla_hw_data *ha = vha->hw;
439         uint32_t pci_bus;
440         int pcie_reg;
441
442         pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
443         if (pcie_reg) {
444                 char lwstr[6];
445                 uint16_t pcie_lstat, lspeed, lwidth;
446
447                 pcie_reg += 0x12;
448                 pci_read_config_word(ha->pdev, pcie_reg, &pcie_lstat);
449                 lspeed = pcie_lstat & (BIT_0 | BIT_1 | BIT_2 | BIT_3);
450                 lwidth = (pcie_lstat &
451                     (BIT_4 | BIT_5 | BIT_6 | BIT_7 | BIT_8 | BIT_9)) >> 4;
452
453                 strcpy(str, "PCIe (");
454                 if (lspeed == 1)
455                         strcat(str, "2.5GT/s ");
456                 else if (lspeed == 2)
457                         strcat(str, "5.0GT/s ");
458                 else
459                         strcat(str, "<unknown> ");
460                 snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth);
461                 strcat(str, lwstr);
462
463                 return str;
464         }
465
466         strcpy(str, "PCI");
467         pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8;
468         if (pci_bus == 0 || pci_bus == 8) {
469                 strcat(str, " (");
470                 strcat(str, pci_bus_modes[pci_bus >> 3]);
471         } else {
472                 strcat(str, "-X ");
473                 if (pci_bus & BIT_2)
474                         strcat(str, "Mode 2");
475                 else
476                         strcat(str, "Mode 1");
477                 strcat(str, " (");
478                 strcat(str, pci_bus_modes[pci_bus & ~BIT_2]);
479         }
480         strcat(str, " MHz)");
481
482         return str;
483 }
484
485 static char *
486 qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str)
487 {
488         char un_str[10];
489         struct qla_hw_data *ha = vha->hw;
490
491         sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
492             ha->fw_minor_version,
493             ha->fw_subminor_version);
494
495         if (ha->fw_attributes & BIT_9) {
496                 strcat(str, "FLX");
497                 return (str);
498         }
499
500         switch (ha->fw_attributes & 0xFF) {
501         case 0x7:
502                 strcat(str, "EF");
503                 break;
504         case 0x17:
505                 strcat(str, "TP");
506                 break;
507         case 0x37:
508                 strcat(str, "IP");
509                 break;
510         case 0x77:
511                 strcat(str, "VI");
512                 break;
513         default:
514                 sprintf(un_str, "(%x)", ha->fw_attributes);
515                 strcat(str, un_str);
516                 break;
517         }
518         if (ha->fw_attributes & 0x100)
519                 strcat(str, "X");
520
521         return (str);
522 }
523
524 static char *
525 qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str)
526 {
527         struct qla_hw_data *ha = vha->hw;
528
529         sprintf(str, "%d.%02d.%02d (%x)", ha->fw_major_version,
530             ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes);
531         return str;
532 }
533
534 static inline srb_t *
535 qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
536         struct scsi_cmnd *cmd)
537 {
538         srb_t *sp;
539         struct qla_hw_data *ha = vha->hw;
540
541         sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
542         if (!sp)
543                 return sp;
544
545         atomic_set(&sp->ref_count, 1);
546         sp->fcport = fcport;
547         sp->cmd = cmd;
548         sp->flags = 0;
549         CMD_SP(cmd) = (void *)sp;
550         sp->ctx = NULL;
551
552         return sp;
553 }
554
555 static int
556 qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
557 {
558         scsi_qla_host_t *vha = shost_priv(host);
559         fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
560         struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
561         struct qla_hw_data *ha = vha->hw;
562         struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
563         srb_t *sp;
564         int rval;
565
566         if (ha->flags.eeh_busy) {
567                 if (ha->flags.pci_channel_io_perm_failure)
568                         cmd->result = DID_NO_CONNECT << 16;
569                 else
570                         cmd->result = DID_REQUEUE << 16;
571                 goto qc24_fail_command;
572         }
573
574         rval = fc_remote_port_chkready(rport);
575         if (rval) {
576                 cmd->result = rval;
577                 goto qc24_fail_command;
578         }
579
580         if (!vha->flags.difdix_supported &&
581                 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
582                         DEBUG2(qla_printk(KERN_ERR, ha,
583                             "DIF Cap Not Reg, fail DIF capable cmd's:%x\n",
584                             cmd->cmnd[0]));
585                         cmd->result = DID_NO_CONNECT << 16;
586                         goto qc24_fail_command;
587         }
588         if (atomic_read(&fcport->state) != FCS_ONLINE) {
589                 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
590                         atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
591                         cmd->result = DID_NO_CONNECT << 16;
592                         goto qc24_fail_command;
593                 }
594                 goto qc24_target_busy;
595         }
596
597         sp = qla2x00_get_new_sp(base_vha, fcport, cmd);
598         if (!sp)
599                 goto qc24_host_busy;
600
601         rval = ha->isp_ops->start_scsi(sp);
602         if (rval != QLA_SUCCESS)
603                 goto qc24_host_busy_free_sp;
604
605         return 0;
606
607 qc24_host_busy_free_sp:
608         qla2x00_sp_free_dma(sp);
609         mempool_free(sp, ha->srb_mempool);
610
611 qc24_host_busy:
612         return SCSI_MLQUEUE_HOST_BUSY;
613
614 qc24_target_busy:
615         return SCSI_MLQUEUE_TARGET_BUSY;
616
617 qc24_fail_command:
618         cmd->scsi_done(cmd);
619
620         return 0;
621 }
622
623 /*
624  * qla2x00_eh_wait_on_command
625  *    Waits for the command to be returned by the Firmware for some
626  *    max time.
627  *
628  * Input:
629  *    cmd = Scsi Command to wait on.
630  *
631  * Return:
632  *    Not Found : 0
633  *    Found : 1
634  */
635 static int
636 qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
637 {
638 #define ABORT_POLLING_PERIOD    1000
639 #define ABORT_WAIT_ITER         ((10 * 1000) / (ABORT_POLLING_PERIOD))
640         unsigned long wait_iter = ABORT_WAIT_ITER;
641         scsi_qla_host_t *vha = shost_priv(cmd->device->host);
642         struct qla_hw_data *ha = vha->hw;
643         int ret = QLA_SUCCESS;
644
645         if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
646                 DEBUG17(qla_printk(KERN_WARNING, ha, "return:eh_wait\n"));
647                 return ret;
648         }
649
650         while (CMD_SP(cmd) && wait_iter--) {
651                 msleep(ABORT_POLLING_PERIOD);
652         }
653         if (CMD_SP(cmd))
654                 ret = QLA_FUNCTION_FAILED;
655
656         return ret;
657 }
658
659 /*
660  * qla2x00_wait_for_hba_online
661  *    Wait till the HBA is online after going through
662  *    <= MAX_RETRIES_OF_ISP_ABORT  or
663  *    finally HBA is disabled ie marked offline
664  *
665  * Input:
666  *     ha - pointer to host adapter structure
667  *
668  * Note:
669  *    Does context switching-Release SPIN_LOCK
670  *    (if any) before calling this routine.
671  *
672  * Return:
673  *    Success (Adapter is online) : 0
674  *    Failed  (Adapter is offline/disabled) : 1
675  */
676 int
677 qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
678 {
679         int             return_status;
680         unsigned long   wait_online;
681         struct qla_hw_data *ha = vha->hw;
682         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
683
684         wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
685         while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
686             test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
687             test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
688             ha->dpc_active) && time_before(jiffies, wait_online)) {
689
690                 msleep(1000);
691         }
692         if (base_vha->flags.online)
693                 return_status = QLA_SUCCESS;
694         else
695                 return_status = QLA_FUNCTION_FAILED;
696
697         return (return_status);
698 }
699
700 /*
701  * qla2x00_wait_for_reset_ready
702  *    Wait till the HBA is online after going through
703  *    <= MAX_RETRIES_OF_ISP_ABORT  or
704  *    finally HBA is disabled ie marked offline or flash
705  *    operations are in progress.
706  *
707  * Input:
708  *     ha - pointer to host adapter structure
709  *
710  * Note:
711  *    Does context switching-Release SPIN_LOCK
712  *    (if any) before calling this routine.
713  *
714  * Return:
715  *    Success (Adapter is online/no flash ops) : 0
716  *    Failed  (Adapter is offline/disabled/flash ops in progress) : 1
717  */
718 static int
719 qla2x00_wait_for_reset_ready(scsi_qla_host_t *vha)
720 {
721         int             return_status;
722         unsigned long   wait_online;
723         struct qla_hw_data *ha = vha->hw;
724         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
725
726         wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
727         while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
728             test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
729             test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
730             ha->optrom_state != QLA_SWAITING ||
731             ha->dpc_active) && time_before(jiffies, wait_online))
732                 msleep(1000);
733
734         if (base_vha->flags.online &&  ha->optrom_state == QLA_SWAITING)
735                 return_status = QLA_SUCCESS;
736         else
737                 return_status = QLA_FUNCTION_FAILED;
738
739         DEBUG2(printk("%s return_status=%d\n", __func__, return_status));
740
741         return return_status;
742 }
743
744 int
745 qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
746 {
747         int             return_status;
748         unsigned long   wait_reset;
749         struct qla_hw_data *ha = vha->hw;
750         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
751
752         wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
753         while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
754             test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
755             test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
756             ha->dpc_active) && time_before(jiffies, wait_reset)) {
757
758                 msleep(1000);
759
760                 if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
761                     ha->flags.chip_reset_done)
762                         break;
763         }
764         if (ha->flags.chip_reset_done)
765                 return_status = QLA_SUCCESS;
766         else
767                 return_status = QLA_FUNCTION_FAILED;
768
769         return return_status;
770 }
771
772 /*
773  * qla2x00_wait_for_loop_ready
774  *    Wait for MAX_LOOP_TIMEOUT(5 min) value for loop
775  *    to be in LOOP_READY state.
776  * Input:
777  *     ha - pointer to host adapter structure
778  *
779  * Note:
780  *    Does context switching-Release SPIN_LOCK
781  *    (if any) before calling this routine.
782  *
783  *
784  * Return:
785  *    Success (LOOP_READY) : 0
786  *    Failed  (LOOP_NOT_READY) : 1
787  */
788 static inline int
789 qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
790 {
791         int      return_status = QLA_SUCCESS;
792         unsigned long loop_timeout ;
793         struct qla_hw_data *ha = vha->hw;
794         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
795
796         /* wait for 5 min at the max for loop to be ready */
797         loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ);
798
799         while ((!atomic_read(&base_vha->loop_down_timer) &&
800             atomic_read(&base_vha->loop_state) == LOOP_DOWN) ||
801             atomic_read(&base_vha->loop_state) != LOOP_READY) {
802                 if (atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
803                         return_status = QLA_FUNCTION_FAILED;
804                         break;
805                 }
806                 msleep(1000);
807                 if (time_after_eq(jiffies, loop_timeout)) {
808                         return_status = QLA_FUNCTION_FAILED;
809                         break;
810                 }
811         }
812         return (return_status);
813 }
814
815 static void
816 sp_get(struct srb *sp)
817 {
818         atomic_inc(&sp->ref_count);
819 }
820
821 /**************************************************************************
822 * qla2xxx_eh_abort
823 *
824 * Description:
825 *    The abort function will abort the specified command.
826 *
827 * Input:
828 *    cmd = Linux SCSI command packet to be aborted.
829 *
830 * Returns:
831 *    Either SUCCESS or FAILED.
832 *
833 * Note:
834 *    Only return FAILED if command not returned by firmware.
835 **************************************************************************/
836 static int
837 qla2xxx_eh_abort(struct scsi_cmnd *cmd)
838 {
839         scsi_qla_host_t *vha = shost_priv(cmd->device->host);
840         srb_t *sp;
841         int ret;
842         unsigned int id, lun;
843         unsigned long flags;
844         int wait = 0;
845         struct qla_hw_data *ha = vha->hw;
846
847         if (!CMD_SP(cmd))
848                 return SUCCESS;
849
850         ret = fc_block_scsi_eh(cmd);
851         if (ret != 0)
852                 return ret;
853         ret = SUCCESS;
854
855         id = cmd->device->id;
856         lun = cmd->device->lun;
857
858         spin_lock_irqsave(&ha->hardware_lock, flags);
859         sp = (srb_t *) CMD_SP(cmd);
860         if (!sp) {
861                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
862                 return SUCCESS;
863         }
864
865         DEBUG2(printk("%s(%ld): aborting sp %p from RISC.",
866             __func__, vha->host_no, sp));
867
868         /* Get a reference to the sp and drop the lock.*/
869         sp_get(sp);
870
871         spin_unlock_irqrestore(&ha->hardware_lock, flags);
872         if (ha->isp_ops->abort_command(sp)) {
873                 DEBUG2(printk("%s(%ld): abort_command "
874                 "mbx failed.\n", __func__, vha->host_no));
875                 ret = FAILED;
876         } else {
877                 DEBUG3(printk("%s(%ld): abort_command "
878                 "mbx success.\n", __func__, vha->host_no));
879                 wait = 1;
880         }
881         qla2x00_sp_compl(ha, sp);
882
883         /* Wait for the command to be returned. */
884         if (wait) {
885                 if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
886                         qla_printk(KERN_ERR, ha,
887                             "scsi(%ld:%d:%d): Abort handler timed out -- %x.\n",
888                             vha->host_no, id, lun, ret);
889                         ret = FAILED;
890                 }
891         }
892
893         qla_printk(KERN_INFO, ha,
894             "scsi(%ld:%d:%d): Abort command issued -- %d %x.\n",
895             vha->host_no, id, lun, wait, ret);
896
897         return ret;
898 }
899
900 int
901 qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
902         unsigned int l, enum nexus_wait_type type)
903 {
904         int cnt, match, status;
905         unsigned long flags;
906         struct qla_hw_data *ha = vha->hw;
907         struct req_que *req;
908         srb_t *sp;
909
910         status = QLA_SUCCESS;
911
912         spin_lock_irqsave(&ha->hardware_lock, flags);
913         req = vha->req;
914         for (cnt = 1; status == QLA_SUCCESS &&
915                 cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
916                 sp = req->outstanding_cmds[cnt];
917                 if (!sp)
918                         continue;
919                 if ((sp->ctx) && !IS_PROT_IO(sp))
920                         continue;
921                 if (vha->vp_idx != sp->fcport->vha->vp_idx)
922                         continue;
923                 match = 0;
924                 switch (type) {
925                 case WAIT_HOST:
926                         match = 1;
927                         break;
928                 case WAIT_TARGET:
929                         match = sp->cmd->device->id == t;
930                         break;
931                 case WAIT_LUN:
932                         match = (sp->cmd->device->id == t &&
933                                 sp->cmd->device->lun == l);
934                         break;
935                 }
936                 if (!match)
937                         continue;
938
939                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
940                 status = qla2x00_eh_wait_on_command(sp->cmd);
941                 spin_lock_irqsave(&ha->hardware_lock, flags);
942         }
943         spin_unlock_irqrestore(&ha->hardware_lock, flags);
944
945         return status;
946 }
947
948 static char *reset_errors[] = {
949         "HBA not online",
950         "HBA not ready",
951         "Task management failed",
952         "Waiting for command completions",
953 };
954
955 static int
956 __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
957     struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int, int))
958 {
959         scsi_qla_host_t *vha = shost_priv(cmd->device->host);
960         fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
961         int err;
962
963         if (!fcport)
964                 return FAILED;
965
966         err = fc_block_scsi_eh(cmd);
967         if (err != 0)
968                 return err;
969
970         qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET ISSUED.\n",
971             vha->host_no, cmd->device->id, cmd->device->lun, name);
972
973         err = 0;
974         if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
975                 goto eh_reset_failed;
976         err = 1;
977         if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS)
978                 goto eh_reset_failed;
979         err = 2;
980         if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
981                 != QLA_SUCCESS)
982                 goto eh_reset_failed;
983         err = 3;
984         if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
985             cmd->device->lun, type) != QLA_SUCCESS)
986                 goto eh_reset_failed;
987
988         qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n",
989             vha->host_no, cmd->device->id, cmd->device->lun, name);
990
991         return SUCCESS;
992
993 eh_reset_failed:
994         qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n"
995             , vha->host_no, cmd->device->id, cmd->device->lun, name,
996             reset_errors[err]);
997         return FAILED;
998 }
999
1000 static int
1001 qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
1002 {
1003         scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1004         struct qla_hw_data *ha = vha->hw;
1005
1006         return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd,
1007             ha->isp_ops->lun_reset);
1008 }
1009
1010 static int
1011 qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
1012 {
1013         scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1014         struct qla_hw_data *ha = vha->hw;
1015
1016         return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd,
1017             ha->isp_ops->target_reset);
1018 }
1019
1020 /**************************************************************************
1021 * qla2xxx_eh_bus_reset
1022 *
1023 * Description:
1024 *    The bus reset function will reset the bus and abort any executing
1025 *    commands.
1026 *
1027 * Input:
1028 *    cmd = Linux SCSI command packet of the command that cause the
1029 *          bus reset.
1030 *
1031 * Returns:
1032 *    SUCCESS/FAILURE (defined as macro in scsi.h).
1033 *
1034 **************************************************************************/
1035 static int
1036 qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1037 {
1038         scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1039         fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
1040         int ret = FAILED;
1041         unsigned int id, lun;
1042
1043         id = cmd->device->id;
1044         lun = cmd->device->lun;
1045
1046         if (!fcport)
1047                 return ret;
1048
1049         ret = fc_block_scsi_eh(cmd);
1050         if (ret != 0)
1051                 return ret;
1052         ret = FAILED;
1053
1054         qla_printk(KERN_INFO, vha->hw,
1055             "scsi(%ld:%d:%d): BUS RESET ISSUED.\n", vha->host_no, id, lun);
1056
1057         if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1058                 DEBUG2(printk("%s failed:board disabled\n",__func__));
1059                 goto eh_bus_reset_done;
1060         }
1061
1062         if (qla2x00_wait_for_loop_ready(vha) == QLA_SUCCESS) {
1063                 if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
1064                         ret = SUCCESS;
1065         }
1066         if (ret == FAILED)
1067                 goto eh_bus_reset_done;
1068
1069         /* Flush outstanding commands. */
1070         if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) !=
1071             QLA_SUCCESS)
1072                 ret = FAILED;
1073
1074 eh_bus_reset_done:
1075         qla_printk(KERN_INFO, vha->hw, "%s: reset %s\n", __func__,
1076             (ret == FAILED) ? "failed" : "succeeded");
1077
1078         return ret;
1079 }
1080
1081 /**************************************************************************
1082 * qla2xxx_eh_host_reset
1083 *
1084 * Description:
1085 *    The reset function will reset the Adapter.
1086 *
1087 * Input:
1088 *      cmd = Linux SCSI command packet of the command that cause the
1089 *            adapter reset.
1090 *
1091 * Returns:
1092 *      Either SUCCESS or FAILED.
1093 *
1094 * Note:
1095 **************************************************************************/
1096 static int
1097 qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1098 {
1099         scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1100         fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
1101         struct qla_hw_data *ha = vha->hw;
1102         int ret = FAILED;
1103         unsigned int id, lun;
1104         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1105
1106         id = cmd->device->id;
1107         lun = cmd->device->lun;
1108
1109         if (!fcport)
1110                 return ret;
1111
1112         ret = fc_block_scsi_eh(cmd);
1113         if (ret != 0)
1114                 return ret;
1115         ret = FAILED;
1116
1117         qla_printk(KERN_INFO, ha,
1118             "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", vha->host_no, id, lun);
1119
1120         if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
1121                 goto eh_host_reset_lock;
1122
1123         /*
1124          * Fixme-may be dpc thread is active and processing
1125          * loop_resync,so wait a while for it to
1126          * be completed and then issue big hammer.Otherwise
1127          * it may cause I/O failure as big hammer marks the
1128          * devices as lost kicking of the port_down_timer
1129          * while dpc is stuck for the mailbox to complete.
1130          */
1131         qla2x00_wait_for_loop_ready(vha);
1132         if (vha != base_vha) {
1133                 if (qla2x00_vp_abort_isp(vha))
1134                         goto eh_host_reset_lock;
1135         } else {
1136                 if (IS_QLA82XX(vha->hw)) {
1137                         if (!qla82xx_fcoe_ctx_reset(vha)) {
1138                                 /* Ctx reset success */
1139                                 ret = SUCCESS;
1140                                 goto eh_host_reset_lock;
1141                         }
1142                         /* fall thru if ctx reset failed */
1143                 }
1144                 if (ha->wq)
1145                         flush_workqueue(ha->wq);
1146
1147                 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1148                 if (ha->isp_ops->abort_isp(base_vha)) {
1149                         clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1150                         /* failed. schedule dpc to try */
1151                         set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
1152
1153                         if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
1154                                 goto eh_host_reset_lock;
1155                 }
1156                 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1157         }
1158
1159         /* Waiting for command to be returned to OS.*/
1160         if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) ==
1161                 QLA_SUCCESS)
1162                 ret = SUCCESS;
1163
1164 eh_host_reset_lock:
1165         qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__,
1166             (ret == FAILED) ? "failed" : "succeeded");
1167
1168         return ret;
1169 }
1170
1171 /*
1172 * qla2x00_loop_reset
1173 *      Issue loop reset.
1174 *
1175 * Input:
1176 *      ha = adapter block pointer.
1177 *
1178 * Returns:
1179 *      0 = success
1180 */
1181 int
1182 qla2x00_loop_reset(scsi_qla_host_t *vha)
1183 {
1184         int ret;
1185         struct fc_port *fcport;
1186         struct qla_hw_data *ha = vha->hw;
1187
1188         if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) {
1189                 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1190                         if (fcport->port_type != FCT_TARGET)
1191                                 continue;
1192
1193                         ret = ha->isp_ops->target_reset(fcport, 0, 0);
1194                         if (ret != QLA_SUCCESS) {
1195                                 DEBUG2_3(printk("%s(%ld): bus_reset failed: "
1196                                     "target_reset=%d d_id=%x.\n", __func__,
1197                                     vha->host_no, ret, fcport->d_id.b24));
1198                         }
1199                 }
1200         }
1201
1202         if (ha->flags.enable_lip_full_login && !IS_QLA8XXX_TYPE(ha)) {
1203                 ret = qla2x00_full_login_lip(vha);
1204                 if (ret != QLA_SUCCESS) {
1205                         DEBUG2_3(printk("%s(%ld): failed: "
1206                             "full_login_lip=%d.\n", __func__, vha->host_no,
1207                             ret));
1208                 }
1209                 atomic_set(&vha->loop_state, LOOP_DOWN);
1210                 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1211                 qla2x00_mark_all_devices_lost(vha, 0);
1212                 qla2x00_wait_for_loop_ready(vha);
1213         }
1214
1215         if (ha->flags.enable_lip_reset) {
1216                 ret = qla2x00_lip_reset(vha);
1217                 if (ret != QLA_SUCCESS) {
1218                         DEBUG2_3(printk("%s(%ld): failed: "
1219                             "lip_reset=%d.\n", __func__, vha->host_no, ret));
1220                 } else
1221                         qla2x00_wait_for_loop_ready(vha);
1222         }
1223
1224         /* Issue marker command only when we are going to start the I/O */
1225         vha->marker_needed = 1;
1226
1227         return QLA_SUCCESS;
1228 }
1229
1230 void
1231 qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1232 {
1233         int que, cnt;
1234         unsigned long flags;
1235         srb_t *sp;
1236         struct srb_ctx *ctx;
1237         struct qla_hw_data *ha = vha->hw;
1238         struct req_que *req;
1239
1240         spin_lock_irqsave(&ha->hardware_lock, flags);
1241         for (que = 0; que < ha->max_req_queues; que++) {
1242                 req = ha->req_q_map[que];
1243                 if (!req)
1244                         continue;
1245                 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
1246                         sp = req->outstanding_cmds[cnt];
1247                         if (sp) {
1248                                 req->outstanding_cmds[cnt] = NULL;
1249                                 if (!sp->ctx ||
1250                                         (sp->flags & SRB_FCP_CMND_DMA_VALID) ||
1251                                         IS_PROT_IO(sp)) {
1252                                         sp->cmd->result = res;
1253                                         qla2x00_sp_compl(ha, sp);
1254                                 } else {
1255                                         ctx = sp->ctx;
1256                                         if (ctx->type == SRB_LOGIN_CMD ||
1257                                             ctx->type == SRB_LOGOUT_CMD) {
1258                                                 ctx->u.iocb_cmd->free(sp);
1259                                         } else {
1260                                                 struct fc_bsg_job *bsg_job =
1261                                                     ctx->u.bsg_job;
1262                                                 if (bsg_job->request->msgcode
1263                                                     == FC_BSG_HST_CT)
1264                                                         kfree(sp->fcport);
1265                                                 bsg_job->req->errors = 0;
1266                                                 bsg_job->reply->result = res;
1267                                                 bsg_job->job_done(bsg_job);
1268                                                 kfree(sp->ctx);
1269                                                 mempool_free(sp,
1270                                                         ha->srb_mempool);
1271                                         }
1272                                 }
1273                         }
1274                 }
1275         }
1276         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1277 }
1278
1279 static int
1280 qla2xxx_slave_alloc(struct scsi_device *sdev)
1281 {
1282         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1283
1284         if (!rport || fc_remote_port_chkready(rport))
1285                 return -ENXIO;
1286
1287         sdev->hostdata = *(fc_port_t **)rport->dd_data;
1288
1289         return 0;
1290 }
1291
1292 static int
1293 qla2xxx_slave_configure(struct scsi_device *sdev)
1294 {
1295         scsi_qla_host_t *vha = shost_priv(sdev->host);
1296         struct req_que *req = vha->req;
1297
1298         if (sdev->tagged_supported)
1299                 scsi_activate_tcq(sdev, req->max_q_depth);
1300         else
1301                 scsi_deactivate_tcq(sdev, req->max_q_depth);
1302         return 0;
1303 }
1304
1305 static void
1306 qla2xxx_slave_destroy(struct scsi_device *sdev)
1307 {
1308         sdev->hostdata = NULL;
1309 }
1310
1311 static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth)
1312 {
1313         fc_port_t *fcport = (struct fc_port *) sdev->hostdata;
1314
1315         if (!scsi_track_queue_full(sdev, qdepth))
1316                 return;
1317
1318         DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
1319                 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
1320                 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
1321                 sdev->queue_depth));
1322 }
1323
1324 static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
1325 {
1326         fc_port_t *fcport = sdev->hostdata;
1327         struct scsi_qla_host *vha = fcport->vha;
1328         struct qla_hw_data *ha = vha->hw;
1329         struct req_que *req = NULL;
1330
1331         req = vha->req;
1332         if (!req)
1333                 return;
1334
1335         if (req->max_q_depth <= sdev->queue_depth || req->max_q_depth < qdepth)
1336                 return;
1337
1338         if (sdev->ordered_tags)
1339                 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, qdepth);
1340         else
1341                 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth);
1342
1343         DEBUG2(qla_printk(KERN_INFO, ha,
1344                "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
1345                fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
1346                sdev->queue_depth));
1347 }
1348
1349 static int
1350 qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
1351 {
1352         switch (reason) {
1353         case SCSI_QDEPTH_DEFAULT:
1354                 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
1355                 break;
1356         case SCSI_QDEPTH_QFULL:
1357                 qla2x00_handle_queue_full(sdev, qdepth);
1358                 break;
1359         case SCSI_QDEPTH_RAMP_UP:
1360                 qla2x00_adjust_sdev_qdepth_up(sdev, qdepth);
1361                 break;
1362         default:
1363                 return -EOPNOTSUPP;
1364         }
1365
1366         return sdev->queue_depth;
1367 }
1368
1369 static int
1370 qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type)
1371 {
1372         if (sdev->tagged_supported) {
1373                 scsi_set_tag_type(sdev, tag_type);
1374                 if (tag_type)
1375                         scsi_activate_tcq(sdev, sdev->queue_depth);
1376                 else
1377                         scsi_deactivate_tcq(sdev, sdev->queue_depth);
1378         } else
1379                 tag_type = 0;
1380
1381         return tag_type;
1382 }
1383
1384 /**
1385  * qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
1386  * @ha: HA context
1387  *
1388  * At exit, the @ha's flags.enable_64bit_addressing set to indicated
1389  * supported addressing method.
1390  */
1391 static void
1392 qla2x00_config_dma_addressing(struct qla_hw_data *ha)
1393 {
1394         /* Assume a 32bit DMA mask. */
1395         ha->flags.enable_64bit_addressing = 0;
1396
1397         if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
1398                 /* Any upper-dword bits set? */
1399                 if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
1400                     !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
1401                         /* Ok, a 64bit DMA mask is applicable. */
1402                         ha->flags.enable_64bit_addressing = 1;
1403                         ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
1404                         ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
1405                         return;
1406                 }
1407         }
1408
1409         dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
1410         pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(32));
1411 }
1412
1413 static void
1414 qla2x00_enable_intrs(struct qla_hw_data *ha)
1415 {
1416         unsigned long flags = 0;
1417         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1418
1419         spin_lock_irqsave(&ha->hardware_lock, flags);
1420         ha->interrupts_on = 1;
1421         /* enable risc and host interrupts */
1422         WRT_REG_WORD(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
1423         RD_REG_WORD(&reg->ictrl);
1424         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1425
1426 }
1427
1428 static void
1429 qla2x00_disable_intrs(struct qla_hw_data *ha)
1430 {
1431         unsigned long flags = 0;
1432         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1433
1434         spin_lock_irqsave(&ha->hardware_lock, flags);
1435         ha->interrupts_on = 0;
1436         /* disable risc and host interrupts */
1437         WRT_REG_WORD(&reg->ictrl, 0);
1438         RD_REG_WORD(&reg->ictrl);
1439         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1440 }
1441
1442 static void
1443 qla24xx_enable_intrs(struct qla_hw_data *ha)
1444 {
1445         unsigned long flags = 0;
1446         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1447
1448         spin_lock_irqsave(&ha->hardware_lock, flags);
1449         ha->interrupts_on = 1;
1450         WRT_REG_DWORD(&reg->ictrl, ICRX_EN_RISC_INT);
1451         RD_REG_DWORD(&reg->ictrl);
1452         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1453 }
1454
1455 static void
1456 qla24xx_disable_intrs(struct qla_hw_data *ha)
1457 {
1458         unsigned long flags = 0;
1459         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1460
1461         if (IS_NOPOLLING_TYPE(ha))
1462                 return;
1463         spin_lock_irqsave(&ha->hardware_lock, flags);
1464         ha->interrupts_on = 0;
1465         WRT_REG_DWORD(&reg->ictrl, 0);
1466         RD_REG_DWORD(&reg->ictrl);
1467         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1468 }
1469
1470 static struct isp_operations qla2100_isp_ops = {
1471         .pci_config             = qla2100_pci_config,
1472         .reset_chip             = qla2x00_reset_chip,
1473         .chip_diag              = qla2x00_chip_diag,
1474         .config_rings           = qla2x00_config_rings,
1475         .reset_adapter          = qla2x00_reset_adapter,
1476         .nvram_config           = qla2x00_nvram_config,
1477         .update_fw_options      = qla2x00_update_fw_options,
1478         .load_risc              = qla2x00_load_risc,
1479         .pci_info_str           = qla2x00_pci_info_str,
1480         .fw_version_str         = qla2x00_fw_version_str,
1481         .intr_handler           = qla2100_intr_handler,
1482         .enable_intrs           = qla2x00_enable_intrs,
1483         .disable_intrs          = qla2x00_disable_intrs,
1484         .abort_command          = qla2x00_abort_command,
1485         .target_reset           = qla2x00_abort_target,
1486         .lun_reset              = qla2x00_lun_reset,
1487         .fabric_login           = qla2x00_login_fabric,
1488         .fabric_logout          = qla2x00_fabric_logout,
1489         .calc_req_entries       = qla2x00_calc_iocbs_32,
1490         .build_iocbs            = qla2x00_build_scsi_iocbs_32,
1491         .prep_ms_iocb           = qla2x00_prep_ms_iocb,
1492         .prep_ms_fdmi_iocb      = qla2x00_prep_ms_fdmi_iocb,
1493         .read_nvram             = qla2x00_read_nvram_data,
1494         .write_nvram            = qla2x00_write_nvram_data,
1495         .fw_dump                = qla2100_fw_dump,
1496         .beacon_on              = NULL,
1497         .beacon_off             = NULL,
1498         .beacon_blink           = NULL,
1499         .read_optrom            = qla2x00_read_optrom_data,
1500         .write_optrom           = qla2x00_write_optrom_data,
1501         .get_flash_version      = qla2x00_get_flash_version,
1502         .start_scsi             = qla2x00_start_scsi,
1503         .abort_isp              = qla2x00_abort_isp,
1504 };
1505
1506 static struct isp_operations qla2300_isp_ops = {
1507         .pci_config             = qla2300_pci_config,
1508         .reset_chip             = qla2x00_reset_chip,
1509         .chip_diag              = qla2x00_chip_diag,
1510         .config_rings           = qla2x00_config_rings,
1511         .reset_adapter          = qla2x00_reset_adapter,
1512         .nvram_config           = qla2x00_nvram_config,
1513         .update_fw_options      = qla2x00_update_fw_options,
1514         .load_risc              = qla2x00_load_risc,
1515         .pci_info_str           = qla2x00_pci_info_str,
1516         .fw_version_str         = qla2x00_fw_version_str,
1517         .intr_handler           = qla2300_intr_handler,
1518         .enable_intrs           = qla2x00_enable_intrs,
1519         .disable_intrs          = qla2x00_disable_intrs,
1520         .abort_command          = qla2x00_abort_command,
1521         .target_reset           = qla2x00_abort_target,
1522         .lun_reset              = qla2x00_lun_reset,
1523         .fabric_login           = qla2x00_login_fabric,
1524         .fabric_logout          = qla2x00_fabric_logout,
1525         .calc_req_entries       = qla2x00_calc_iocbs_32,
1526         .build_iocbs            = qla2x00_build_scsi_iocbs_32,
1527         .prep_ms_iocb           = qla2x00_prep_ms_iocb,
1528         .prep_ms_fdmi_iocb      = qla2x00_prep_ms_fdmi_iocb,
1529         .read_nvram             = qla2x00_read_nvram_data,
1530         .write_nvram            = qla2x00_write_nvram_data,
1531         .fw_dump                = qla2300_fw_dump,
1532         .beacon_on              = qla2x00_beacon_on,
1533         .beacon_off             = qla2x00_beacon_off,
1534         .beacon_blink           = qla2x00_beacon_blink,
1535         .read_optrom            = qla2x00_read_optrom_data,
1536         .write_optrom           = qla2x00_write_optrom_data,
1537         .get_flash_version      = qla2x00_get_flash_version,
1538         .start_scsi             = qla2x00_start_scsi,
1539         .abort_isp              = qla2x00_abort_isp,
1540 };
1541
1542 static struct isp_operations qla24xx_isp_ops = {
1543         .pci_config             = qla24xx_pci_config,
1544         .reset_chip             = qla24xx_reset_chip,
1545         .chip_diag              = qla24xx_chip_diag,
1546         .config_rings           = qla24xx_config_rings,
1547         .reset_adapter          = qla24xx_reset_adapter,
1548         .nvram_config           = qla24xx_nvram_config,
1549         .update_fw_options      = qla24xx_update_fw_options,
1550         .load_risc              = qla24xx_load_risc,
1551         .pci_info_str           = qla24xx_pci_info_str,
1552         .fw_version_str         = qla24xx_fw_version_str,
1553         .intr_handler           = qla24xx_intr_handler,
1554         .enable_intrs           = qla24xx_enable_intrs,
1555         .disable_intrs          = qla24xx_disable_intrs,
1556         .abort_command          = qla24xx_abort_command,
1557         .target_reset           = qla24xx_abort_target,
1558         .lun_reset              = qla24xx_lun_reset,
1559         .fabric_login           = qla24xx_login_fabric,
1560         .fabric_logout          = qla24xx_fabric_logout,
1561         .calc_req_entries       = NULL,
1562         .build_iocbs            = NULL,
1563         .prep_ms_iocb           = qla24xx_prep_ms_iocb,
1564         .prep_ms_fdmi_iocb      = qla24xx_prep_ms_fdmi_iocb,
1565         .read_nvram             = qla24xx_read_nvram_data,
1566         .write_nvram            = qla24xx_write_nvram_data,
1567         .fw_dump                = qla24xx_fw_dump,
1568         .beacon_on              = qla24xx_beacon_on,
1569         .beacon_off             = qla24xx_beacon_off,
1570         .beacon_blink           = qla24xx_beacon_blink,
1571         .read_optrom            = qla24xx_read_optrom_data,
1572         .write_optrom           = qla24xx_write_optrom_data,
1573         .get_flash_version      = qla24xx_get_flash_version,
1574         .start_scsi             = qla24xx_start_scsi,
1575         .abort_isp              = qla2x00_abort_isp,
1576 };
1577
1578 static struct isp_operations qla25xx_isp_ops = {
1579         .pci_config             = qla25xx_pci_config,
1580         .reset_chip             = qla24xx_reset_chip,
1581         .chip_diag              = qla24xx_chip_diag,
1582         .config_rings           = qla24xx_config_rings,
1583         .reset_adapter          = qla24xx_reset_adapter,
1584         .nvram_config           = qla24xx_nvram_config,
1585         .update_fw_options      = qla24xx_update_fw_options,
1586         .load_risc              = qla24xx_load_risc,
1587         .pci_info_str           = qla24xx_pci_info_str,
1588         .fw_version_str         = qla24xx_fw_version_str,
1589         .intr_handler           = qla24xx_intr_handler,
1590         .enable_intrs           = qla24xx_enable_intrs,
1591         .disable_intrs          = qla24xx_disable_intrs,
1592         .abort_command          = qla24xx_abort_command,
1593         .target_reset           = qla24xx_abort_target,
1594         .lun_reset              = qla24xx_lun_reset,
1595         .fabric_login           = qla24xx_login_fabric,
1596         .fabric_logout          = qla24xx_fabric_logout,
1597         .calc_req_entries       = NULL,
1598         .build_iocbs            = NULL,
1599         .prep_ms_iocb           = qla24xx_prep_ms_iocb,
1600         .prep_ms_fdmi_iocb      = qla24xx_prep_ms_fdmi_iocb,
1601         .read_nvram             = qla25xx_read_nvram_data,
1602         .write_nvram            = qla25xx_write_nvram_data,
1603         .fw_dump                = qla25xx_fw_dump,
1604         .beacon_on              = qla24xx_beacon_on,
1605         .beacon_off             = qla24xx_beacon_off,
1606         .beacon_blink           = qla24xx_beacon_blink,
1607         .read_optrom            = qla25xx_read_optrom_data,
1608         .write_optrom           = qla24xx_write_optrom_data,
1609         .get_flash_version      = qla24xx_get_flash_version,
1610         .start_scsi             = qla24xx_dif_start_scsi,
1611         .abort_isp              = qla2x00_abort_isp,
1612 };
1613
1614 static struct isp_operations qla81xx_isp_ops = {
1615         .pci_config             = qla25xx_pci_config,
1616         .reset_chip             = qla24xx_reset_chip,
1617         .chip_diag              = qla24xx_chip_diag,
1618         .config_rings           = qla24xx_config_rings,
1619         .reset_adapter          = qla24xx_reset_adapter,
1620         .nvram_config           = qla81xx_nvram_config,
1621         .update_fw_options      = qla81xx_update_fw_options,
1622         .load_risc              = qla81xx_load_risc,
1623         .pci_info_str           = qla24xx_pci_info_str,
1624         .fw_version_str         = qla24xx_fw_version_str,
1625         .intr_handler           = qla24xx_intr_handler,
1626         .enable_intrs           = qla24xx_enable_intrs,
1627         .disable_intrs          = qla24xx_disable_intrs,
1628         .abort_command          = qla24xx_abort_command,
1629         .target_reset           = qla24xx_abort_target,
1630         .lun_reset              = qla24xx_lun_reset,
1631         .fabric_login           = qla24xx_login_fabric,
1632         .fabric_logout          = qla24xx_fabric_logout,
1633         .calc_req_entries       = NULL,
1634         .build_iocbs            = NULL,
1635         .prep_ms_iocb           = qla24xx_prep_ms_iocb,
1636         .prep_ms_fdmi_iocb      = qla24xx_prep_ms_fdmi_iocb,
1637         .read_nvram             = NULL,
1638         .write_nvram            = NULL,
1639         .fw_dump                = qla81xx_fw_dump,
1640         .beacon_on              = qla24xx_beacon_on,
1641         .beacon_off             = qla24xx_beacon_off,
1642         .beacon_blink           = qla24xx_beacon_blink,
1643         .read_optrom            = qla25xx_read_optrom_data,
1644         .write_optrom           = qla24xx_write_optrom_data,
1645         .get_flash_version      = qla24xx_get_flash_version,
1646         .start_scsi             = qla24xx_dif_start_scsi,
1647         .abort_isp              = qla2x00_abort_isp,
1648 };
1649
1650 static struct isp_operations qla82xx_isp_ops = {
1651         .pci_config             = qla82xx_pci_config,
1652         .reset_chip             = qla82xx_reset_chip,
1653         .chip_diag              = qla24xx_chip_diag,
1654         .config_rings           = qla82xx_config_rings,
1655         .reset_adapter          = qla24xx_reset_adapter,
1656         .nvram_config           = qla81xx_nvram_config,
1657         .update_fw_options      = qla24xx_update_fw_options,
1658         .load_risc              = qla82xx_load_risc,
1659         .pci_info_str           = qla82xx_pci_info_str,
1660         .fw_version_str         = qla24xx_fw_version_str,
1661         .intr_handler           = qla82xx_intr_handler,
1662         .enable_intrs           = qla82xx_enable_intrs,
1663         .disable_intrs          = qla82xx_disable_intrs,
1664         .abort_command          = qla24xx_abort_command,
1665         .target_reset           = qla24xx_abort_target,
1666         .lun_reset              = qla24xx_lun_reset,
1667         .fabric_login           = qla24xx_login_fabric,
1668         .fabric_logout          = qla24xx_fabric_logout,
1669         .calc_req_entries       = NULL,
1670         .build_iocbs            = NULL,
1671         .prep_ms_iocb           = qla24xx_prep_ms_iocb,
1672         .prep_ms_fdmi_iocb      = qla24xx_prep_ms_fdmi_iocb,
1673         .read_nvram             = qla24xx_read_nvram_data,
1674         .write_nvram            = qla24xx_write_nvram_data,
1675         .fw_dump                = qla24xx_fw_dump,
1676         .beacon_on              = qla24xx_beacon_on,
1677         .beacon_off             = qla24xx_beacon_off,
1678         .beacon_blink           = qla24xx_beacon_blink,
1679         .read_optrom            = qla82xx_read_optrom_data,
1680         .write_optrom           = qla82xx_write_optrom_data,
1681         .get_flash_version      = qla24xx_get_flash_version,
1682         .start_scsi             = qla82xx_start_scsi,
1683         .abort_isp              = qla82xx_abort_isp,
1684 };
1685
1686 static inline void
1687 qla2x00_set_isp_flags(struct qla_hw_data *ha)
1688 {
1689         ha->device_type = DT_EXTENDED_IDS;
1690         switch (ha->pdev->device) {
1691         case PCI_DEVICE_ID_QLOGIC_ISP2100:
1692                 ha->device_type |= DT_ISP2100;
1693                 ha->device_type &= ~DT_EXTENDED_IDS;
1694                 ha->fw_srisc_address = RISC_START_ADDRESS_2100;
1695                 break;
1696         case PCI_DEVICE_ID_QLOGIC_ISP2200:
1697                 ha->device_type |= DT_ISP2200;
1698                 ha->device_type &= ~DT_EXTENDED_IDS;
1699                 ha->fw_srisc_address = RISC_START_ADDRESS_2100;
1700                 break;
1701         case PCI_DEVICE_ID_QLOGIC_ISP2300:
1702                 ha->device_type |= DT_ISP2300;
1703                 ha->device_type |= DT_ZIO_SUPPORTED;
1704                 ha->fw_srisc_address = RISC_START_ADDRESS_2300;
1705                 break;
1706         case PCI_DEVICE_ID_QLOGIC_ISP2312:
1707                 ha->device_type |= DT_ISP2312;
1708                 ha->device_type |= DT_ZIO_SUPPORTED;
1709                 ha->fw_srisc_address = RISC_START_ADDRESS_2300;
1710                 break;
1711         case PCI_DEVICE_ID_QLOGIC_ISP2322:
1712                 ha->device_type |= DT_ISP2322;
1713                 ha->device_type |= DT_ZIO_SUPPORTED;
1714                 if (ha->pdev->subsystem_vendor == 0x1028 &&
1715                     ha->pdev->subsystem_device == 0x0170)
1716                         ha->device_type |= DT_OEM_001;
1717                 ha->fw_srisc_address = RISC_START_ADDRESS_2300;
1718                 break;
1719         case PCI_DEVICE_ID_QLOGIC_ISP6312:
1720                 ha->device_type |= DT_ISP6312;
1721                 ha->fw_srisc_address = RISC_START_ADDRESS_2300;
1722                 break;
1723         case PCI_DEVICE_ID_QLOGIC_ISP6322:
1724                 ha->device_type |= DT_ISP6322;
1725                 ha->fw_srisc_address = RISC_START_ADDRESS_2300;
1726                 break;
1727         case PCI_DEVICE_ID_QLOGIC_ISP2422:
1728                 ha->device_type |= DT_ISP2422;
1729                 ha->device_type |= DT_ZIO_SUPPORTED;
1730                 ha->device_type |= DT_FWI2;
1731                 ha->device_type |= DT_IIDMA;
1732                 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1733                 break;
1734         case PCI_DEVICE_ID_QLOGIC_ISP2432:
1735                 ha->device_type |= DT_ISP2432;
1736                 ha->device_type |= DT_ZIO_SUPPORTED;
1737                 ha->device_type |= DT_FWI2;
1738                 ha->device_type |= DT_IIDMA;
1739                 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1740                 break;
1741         case PCI_DEVICE_ID_QLOGIC_ISP8432:
1742                 ha->device_type |= DT_ISP8432;
1743                 ha->device_type |= DT_ZIO_SUPPORTED;
1744                 ha->device_type |= DT_FWI2;
1745                 ha->device_type |= DT_IIDMA;
1746                 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1747                 break;
1748         case PCI_DEVICE_ID_QLOGIC_ISP5422:
1749                 ha->device_type |= DT_ISP5422;
1750                 ha->device_type |= DT_FWI2;
1751                 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1752                 break;
1753         case PCI_DEVICE_ID_QLOGIC_ISP5432:
1754                 ha->device_type |= DT_ISP5432;
1755                 ha->device_type |= DT_FWI2;
1756                 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1757                 break;
1758         case PCI_DEVICE_ID_QLOGIC_ISP2532:
1759                 ha->device_type |= DT_ISP2532;
1760                 ha->device_type |= DT_ZIO_SUPPORTED;
1761                 ha->device_type |= DT_FWI2;
1762                 ha->device_type |= DT_IIDMA;
1763                 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1764                 break;
1765         case PCI_DEVICE_ID_QLOGIC_ISP8001:
1766                 ha->device_type |= DT_ISP8001;
1767                 ha->device_type |= DT_ZIO_SUPPORTED;
1768                 ha->device_type |= DT_FWI2;
1769                 ha->device_type |= DT_IIDMA;
1770                 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1771                 break;
1772         case PCI_DEVICE_ID_QLOGIC_ISP8021:
1773                 ha->device_type |= DT_ISP8021;
1774                 ha->device_type |= DT_ZIO_SUPPORTED;
1775                 ha->device_type |= DT_FWI2;
1776                 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1777                 /* Initialize 82XX ISP flags */
1778                 qla82xx_init_flags(ha);
1779                 break;
1780         }
1781
1782         if (IS_QLA82XX(ha))
1783                 ha->port_no = !(ha->portnum & 1);
1784         else
1785                 /* Get adapter physical port no from interrupt pin register. */
1786                 pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
1787
1788         if (ha->port_no & 1)
1789                 ha->flags.port0 = 1;
1790         else
1791                 ha->flags.port0 = 0;
1792 }
1793
1794 static int
1795 qla2x00_iospace_config(struct qla_hw_data *ha)
1796 {
1797         resource_size_t pio;
1798         uint16_t msix;
1799         int cpus;
1800
1801         if (IS_QLA82XX(ha))
1802                 return qla82xx_iospace_config(ha);
1803
1804         if (pci_request_selected_regions(ha->pdev, ha->bars,
1805             QLA2XXX_DRIVER_NAME)) {
1806                 qla_printk(KERN_WARNING, ha,
1807                     "Failed to reserve PIO/MMIO regions (%s)\n",
1808                     pci_name(ha->pdev));
1809
1810                 goto iospace_error_exit;
1811         }
1812         if (!(ha->bars & 1))
1813                 goto skip_pio;
1814
1815         /* We only need PIO for Flash operations on ISP2312 v2 chips. */
1816         pio = pci_resource_start(ha->pdev, 0);
1817         if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
1818                 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
1819                         qla_printk(KERN_WARNING, ha,
1820                             "Invalid PCI I/O region size (%s)...\n",
1821                                 pci_name(ha->pdev));
1822                         pio = 0;
1823                 }
1824         } else {
1825                 qla_printk(KERN_WARNING, ha,
1826                     "region #0 not a PIO resource (%s)...\n",
1827                     pci_name(ha->pdev));
1828                 pio = 0;
1829         }
1830         ha->pio_address = pio;
1831
1832 skip_pio:
1833         /* Use MMIO operations for all accesses. */
1834         if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
1835                 qla_printk(KERN_ERR, ha,
1836                     "region #1 not an MMIO resource (%s), aborting\n",
1837                     pci_name(ha->pdev));
1838                 goto iospace_error_exit;
1839         }
1840         if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
1841                 qla_printk(KERN_ERR, ha,
1842                     "Invalid PCI mem region size (%s), aborting\n",
1843                         pci_name(ha->pdev));
1844                 goto iospace_error_exit;
1845         }
1846
1847         ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
1848         if (!ha->iobase) {
1849                 qla_printk(KERN_ERR, ha,
1850                     "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
1851
1852                 goto iospace_error_exit;
1853         }
1854
1855         /* Determine queue resources */
1856         ha->max_req_queues = ha->max_rsp_queues = 1;
1857         if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) ||
1858                 (ql2xmaxqueues > 1 && ql2xmultique_tag) ||
1859                 (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
1860                 goto mqiobase_exit;
1861
1862         ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
1863                         pci_resource_len(ha->pdev, 3));
1864         if (ha->mqiobase) {
1865                 /* Read MSIX vector size of the board */
1866                 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
1867                 ha->msix_count = msix;
1868                 /* Max queues are bounded by available msix vectors */
1869                 /* queue 0 uses two msix vectors */
1870                 if (ql2xmultique_tag) {
1871                         cpus = num_online_cpus();
1872                         ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
1873                                 (cpus + 1) : (ha->msix_count - 1);
1874                         ha->max_req_queues = 2;
1875                 } else if (ql2xmaxqueues > 1) {
1876                         ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
1877                                                 QLA_MQ_SIZE : ql2xmaxqueues;
1878                         DEBUG2(qla_printk(KERN_INFO, ha, "QoS mode set, max no"
1879                         " of request queues:%d\n", ha->max_req_queues));
1880                 }
1881                 qla_printk(KERN_INFO, ha,
1882                         "MSI-X vector count: %d\n", msix);
1883         } else
1884                 qla_printk(KERN_INFO, ha, "BAR 3 not enabled\n");
1885
1886 mqiobase_exit:
1887         ha->msix_count = ha->max_rsp_queues + 1;
1888         return (0);
1889
1890 iospace_error_exit:
1891         return (-ENOMEM);
1892 }
1893
1894 static void
1895 qla2xxx_scan_start(struct Scsi_Host *shost)
1896 {
1897         scsi_qla_host_t *vha = shost_priv(shost);
1898
1899         if (vha->hw->flags.running_gold_fw)
1900                 return;
1901
1902         set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1903         set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1904         set_bit(RSCN_UPDATE, &vha->dpc_flags);
1905         set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags);
1906 }
1907
1908 static int
1909 qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
1910 {
1911         scsi_qla_host_t *vha = shost_priv(shost);
1912
1913         if (!vha->host)
1914                 return 1;
1915         if (time > vha->hw->loop_reset_delay * HZ)
1916                 return 1;
1917
1918         return atomic_read(&vha->loop_state) == LOOP_READY;
1919 }
1920
1921 /*
1922  * PCI driver interface
1923  */
1924 static int __devinit
1925 qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1926 {
1927         int     ret = -ENODEV;
1928         struct Scsi_Host *host;
1929         scsi_qla_host_t *base_vha = NULL;
1930         struct qla_hw_data *ha;
1931         char pci_info[30];
1932         char fw_str[30];
1933         struct scsi_host_template *sht;
1934         int bars, max_id, mem_only = 0;
1935         uint16_t req_length = 0, rsp_length = 0;
1936         struct req_que *req = NULL;
1937         struct rsp_que *rsp = NULL;
1938
1939         bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
1940         sht = &qla2xxx_driver_template;
1941         if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
1942             pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 ||
1943             pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 ||
1944             pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 ||
1945             pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
1946             pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
1947             pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 ||
1948             pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021) {
1949                 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1950                 mem_only = 1;
1951         }
1952
1953         if (mem_only) {
1954                 if (pci_enable_device_mem(pdev))
1955                         goto probe_out;
1956         } else {
1957                 if (pci_enable_device(pdev))
1958                         goto probe_out;
1959         }
1960
1961         /* This may fail but that's ok */
1962         pci_enable_pcie_error_reporting(pdev);
1963
1964         ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
1965         if (!ha) {
1966                 DEBUG(printk("Unable to allocate memory for ha\n"));
1967                 goto probe_out;
1968         }
1969         ha->pdev = pdev;
1970
1971         /* Clear our data area */
1972         ha->bars = bars;
1973         ha->mem_only = mem_only;
1974         spin_lock_init(&ha->hardware_lock);
1975         spin_lock_init(&ha->vport_slock);
1976
1977         /* Set ISP-type information. */
1978         qla2x00_set_isp_flags(ha);
1979
1980         /* Set EEH reset type to fundamental if required by hba */
1981         if ( IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) {
1982                 pdev->needs_freset = 1;
1983         }
1984
1985         /* Configure PCI I/O space */
1986         ret = qla2x00_iospace_config(ha);
1987         if (ret)
1988                 goto probe_hw_failed;
1989
1990         qla_printk(KERN_INFO, ha,
1991             "Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq,
1992             ha->iobase);
1993
1994         ha->prev_topology = 0;
1995         ha->init_cb_size = sizeof(init_cb_t);
1996         ha->link_data_rate = PORT_SPEED_UNKNOWN;
1997         ha->optrom_size = OPTROM_SIZE_2300;
1998
1999         /* Assign ISP specific operations. */
2000         max_id = MAX_TARGETS_2200;
2001         if (IS_QLA2100(ha)) {
2002                 max_id = MAX_TARGETS_2100;
2003                 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
2004                 req_length = REQUEST_ENTRY_CNT_2100;
2005                 rsp_length = RESPONSE_ENTRY_CNT_2100;
2006                 ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
2007                 ha->gid_list_info_size = 4;
2008                 ha->flash_conf_off = ~0;
2009                 ha->flash_data_off = ~0;
2010                 ha->nvram_conf_off = ~0;
2011                 ha->nvram_data_off = ~0;
2012                 ha->isp_ops = &qla2100_isp_ops;
2013         } else if (IS_QLA2200(ha)) {
2014                 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2015                 req_length = REQUEST_ENTRY_CNT_2200;
2016                 rsp_length = RESPONSE_ENTRY_CNT_2100;
2017                 ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
2018                 ha->gid_list_info_size = 4;
2019                 ha->flash_conf_off = ~0;
2020                 ha->flash_data_off = ~0;
2021                 ha->nvram_conf_off = ~0;
2022                 ha->nvram_data_off = ~0;
2023                 ha->isp_ops = &qla2100_isp_ops;
2024         } else if (IS_QLA23XX(ha)) {
2025                 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2026                 req_length = REQUEST_ENTRY_CNT_2200;
2027                 rsp_length = RESPONSE_ENTRY_CNT_2300;
2028                 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2029                 ha->gid_list_info_size = 6;
2030                 if (IS_QLA2322(ha) || IS_QLA6322(ha))
2031                         ha->optrom_size = OPTROM_SIZE_2322;
2032                 ha->flash_conf_off = ~0;
2033                 ha->flash_data_off = ~0;
2034                 ha->nvram_conf_off = ~0;
2035                 ha->nvram_data_off = ~0;
2036                 ha->isp_ops = &qla2300_isp_ops;
2037         } else if (IS_QLA24XX_TYPE(ha)) {
2038                 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2039                 req_length = REQUEST_ENTRY_CNT_24XX;
2040                 rsp_length = RESPONSE_ENTRY_CNT_2300;
2041                 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2042                 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
2043                 ha->gid_list_info_size = 8;
2044                 ha->optrom_size = OPTROM_SIZE_24XX;
2045                 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX;
2046                 ha->isp_ops = &qla24xx_isp_ops;
2047                 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
2048                 ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
2049                 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
2050                 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
2051         } else if (IS_QLA25XX(ha)) {
2052                 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2053                 req_length = REQUEST_ENTRY_CNT_24XX;
2054                 rsp_length = RESPONSE_ENTRY_CNT_2300;
2055                 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2056                 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
2057                 ha->gid_list_info_size = 8;
2058                 ha->optrom_size = OPTROM_SIZE_25XX;
2059                 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
2060                 ha->isp_ops = &qla25xx_isp_ops;
2061                 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
2062                 ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
2063                 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
2064                 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
2065         } else if (IS_QLA81XX(ha)) {
2066                 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2067                 req_length = REQUEST_ENTRY_CNT_24XX;
2068                 rsp_length = RESPONSE_ENTRY_CNT_2300;
2069                 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2070                 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
2071                 ha->gid_list_info_size = 8;
2072                 ha->optrom_size = OPTROM_SIZE_81XX;
2073                 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
2074                 ha->isp_ops = &qla81xx_isp_ops;
2075                 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
2076                 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
2077                 ha->nvram_conf_off = ~0;
2078                 ha->nvram_data_off = ~0;
2079         } else if (IS_QLA82XX(ha)) {
2080                 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2081                 req_length = REQUEST_ENTRY_CNT_82XX;
2082                 rsp_length = RESPONSE_ENTRY_CNT_82XX;
2083                 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2084                 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
2085                 ha->gid_list_info_size = 8;
2086                 ha->optrom_size = OPTROM_SIZE_82XX;
2087                 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
2088                 ha->isp_ops = &qla82xx_isp_ops;
2089                 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
2090                 ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
2091                 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
2092                 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
2093         }
2094
2095         mutex_init(&ha->vport_lock);
2096         init_completion(&ha->mbx_cmd_comp);
2097         complete(&ha->mbx_cmd_comp);
2098         init_completion(&ha->mbx_intr_comp);
2099         init_completion(&ha->dcbx_comp);
2100
2101         set_bit(0, (unsigned long *) ha->vp_idx_map);
2102
2103         qla2x00_config_dma_addressing(ha);
2104         ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
2105         if (!ret) {
2106                 qla_printk(KERN_WARNING, ha,
2107                     "[ERROR] Failed to allocate memory for adapter\n");
2108
2109                 goto probe_hw_failed;
2110         }
2111
2112         req->max_q_depth = MAX_Q_DEPTH;
2113         if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU)
2114                 req->max_q_depth = ql2xmaxqdepth;
2115
2116
2117         base_vha = qla2x00_create_host(sht, ha);
2118         if (!base_vha) {
2119                 qla_printk(KERN_WARNING, ha,
2120                     "[ERROR] Failed to allocate memory for scsi_host\n");
2121
2122                 ret = -ENOMEM;
2123                 qla2x00_mem_free(ha);
2124                 qla2x00_free_req_que(ha, req);
2125                 qla2x00_free_rsp_que(ha, rsp);
2126                 goto probe_hw_failed;
2127         }
2128
2129         pci_set_drvdata(pdev, base_vha);
2130
2131         host = base_vha->host;
2132         base_vha->req = req;
2133         host->can_queue = req->length + 128;
2134         if (IS_QLA2XXX_MIDTYPE(ha))
2135                 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
2136         else
2137                 base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
2138                                                 base_vha->vp_idx;
2139
2140         /* Set the SG table size based on ISP type */
2141         if (!IS_FWI2_CAPABLE(ha)) {
2142                 if (IS_QLA2100(ha))
2143                         host->sg_tablesize = 32;
2144         } else {
2145                 if (!IS_QLA82XX(ha))
2146                         host->sg_tablesize = QLA_SG_ALL;
2147         }
2148
2149         host->max_id = max_id;
2150         host->this_id = 255;
2151         host->cmd_per_lun = 3;
2152         host->unique_id = host->host_no;
2153         if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif)
2154                 host->max_cmd_len = 32;
2155         else
2156                 host->max_cmd_len = MAX_CMDSZ;
2157         host->max_channel = MAX_BUSES - 1;
2158         host->max_lun = ql2xmaxlun;
2159         host->transportt = qla2xxx_transport_template;
2160         sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC);
2161
2162         /* Set up the irqs */
2163         ret = qla2x00_request_irqs(ha, rsp);
2164         if (ret)
2165                 goto probe_init_failed;
2166
2167         pci_save_state(pdev);
2168
2169         /* Alloc arrays of request and response ring ptrs */
2170 que_init:
2171         if (!qla2x00_alloc_queues(ha)) {
2172                 qla_printk(KERN_WARNING, ha,
2173                 "[ERROR] Failed to allocate memory for queue"
2174                 " pointers\n");
2175                 goto probe_init_failed;
2176         }
2177
2178         ha->rsp_q_map[0] = rsp;
2179         ha->req_q_map[0] = req;
2180         rsp->req = req;
2181         req->rsp = rsp;
2182         set_bit(0, ha->req_qid_map);
2183         set_bit(0, ha->rsp_qid_map);
2184         /* FWI2-capable only. */
2185         req->req_q_in = &ha->iobase->isp24.req_q_in;
2186         req->req_q_out = &ha->iobase->isp24.req_q_out;
2187         rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
2188         rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
2189         if (ha->mqenable) {
2190                 req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
2191                 req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
2192                 rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
2193                 rsp->rsp_q_out =  &ha->mqiobase->isp25mq.rsp_q_out;
2194         }
2195
2196         if (IS_QLA82XX(ha)) {
2197                 req->req_q_out = &ha->iobase->isp82.req_q_out[0];
2198                 rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0];
2199                 rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0];
2200         }
2201
2202         if (qla2x00_initialize_adapter(base_vha)) {
2203                 qla_printk(KERN_WARNING, ha,
2204                     "Failed to initialize adapter\n");
2205
2206                 DEBUG2(printk("scsi(%ld): Failed to initialize adapter - "
2207                     "Adapter flags %x.\n",
2208                     base_vha->host_no, base_vha->device_flags));
2209
2210                 if (IS_QLA82XX(ha)) {
2211                         qla82xx_idc_lock(ha);
2212                         qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2213                                 QLA82XX_DEV_FAILED);
2214                         qla82xx_idc_unlock(ha);
2215                         qla_printk(KERN_INFO, ha, "HW State: FAILED\n");
2216                 }
2217
2218                 ret = -ENODEV;
2219                 goto probe_failed;
2220         }
2221
2222         if (ha->mqenable) {
2223                 if (qla25xx_setup_mode(base_vha)) {
2224                         qla_printk(KERN_WARNING, ha,
2225                                 "Can't create queues, falling back to single"
2226                                 " queue mode\n");
2227                         goto que_init;
2228                 }
2229         }
2230
2231         if (ha->flags.running_gold_fw)
2232                 goto skip_dpc;
2233
2234         /*
2235          * Startup the kernel thread for this host adapter
2236          */
2237         ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha,
2238                         "%s_dpc", base_vha->host_str);
2239         if (IS_ERR(ha->dpc_thread)) {
2240                 qla_printk(KERN_WARNING, ha,
2241                     "Unable to start DPC thread!\n");
2242                 ret = PTR_ERR(ha->dpc_thread);
2243                 goto probe_failed;
2244         }
2245
2246 skip_dpc:
2247         list_add_tail(&base_vha->list, &ha->vp_list);
2248         base_vha->host->irq = ha->pdev->irq;
2249
2250         /* Initialized the timer */
2251         qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL);
2252
2253         DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
2254             base_vha->host_no, ha));
2255
2256         if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) {
2257                 if (ha->fw_attributes & BIT_4) {
2258                         base_vha->flags.difdix_supported = 1;
2259                         DEBUG18(qla_printk(KERN_INFO, ha,
2260                             "Registering for DIF/DIX type 1 and 3"
2261                             " protection.\n"));
2262                         scsi_host_set_prot(host,
2263                             SHOST_DIF_TYPE1_PROTECTION
2264                             | SHOST_DIF_TYPE2_PROTECTION
2265                             | SHOST_DIF_TYPE3_PROTECTION
2266                             | SHOST_DIX_TYPE1_PROTECTION
2267                             | SHOST_DIX_TYPE2_PROTECTION
2268                             | SHOST_DIX_TYPE3_PROTECTION);
2269                         scsi_host_set_guard(host, SHOST_DIX_GUARD_CRC);
2270                 } else
2271                         base_vha->flags.difdix_supported = 0;
2272         }
2273
2274         ha->isp_ops->enable_intrs(ha);
2275
2276         ret = scsi_add_host(host, &pdev->dev);
2277         if (ret)
2278                 goto probe_failed;
2279
2280         base_vha->flags.init_done = 1;
2281         base_vha->flags.online = 1;
2282
2283         scsi_scan_host(host);
2284
2285         qla2x00_alloc_sysfs_attr(base_vha);
2286
2287         qla2x00_init_host_attr(base_vha);
2288
2289         qla2x00_dfs_setup(base_vha);
2290
2291         qla_printk(KERN_INFO, ha, "\n"
2292             " QLogic Fibre Channel HBA Driver: %s\n"
2293             "  QLogic %s - %s\n"
2294             "  ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n",
2295             qla2x00_version_str, ha->model_number,
2296             ha->model_desc ? ha->model_desc : "", pdev->device,
2297             ha->isp_ops->pci_info_str(base_vha, pci_info), pci_name(pdev),
2298             ha->flags.enable_64bit_addressing ? '+' : '-', base_vha->host_no,
2299             ha->isp_ops->fw_version_str(base_vha, fw_str));
2300
2301         return 0;
2302
2303 probe_init_failed:
2304         qla2x00_free_req_que(ha, req);
2305         qla2x00_free_rsp_que(ha, rsp);
2306         ha->max_req_queues = ha->max_rsp_queues = 0;
2307
2308 probe_failed:
2309         if (base_vha->timer_active)
2310                 qla2x00_stop_timer(base_vha);
2311         base_vha->flags.online = 0;
2312         if (ha->dpc_thread) {
2313                 struct task_struct *t = ha->dpc_thread;
2314
2315                 ha->dpc_thread = NULL;
2316                 kthread_stop(t);
2317         }
2318
2319         qla2x00_free_device(base_vha);
2320
2321         scsi_host_put(base_vha->host);
2322
2323 probe_hw_failed:
2324         if (IS_QLA82XX(ha)) {
2325                 qla82xx_idc_lock(ha);
2326                 qla82xx_clear_drv_active(ha);
2327                 qla82xx_idc_unlock(ha);
2328                 iounmap((device_reg_t __iomem *)ha->nx_pcibase);
2329                 if (!ql2xdbwr)
2330                         iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
2331         } else {
2332                 if (ha->iobase)
2333                         iounmap(ha->iobase);
2334         }
2335         pci_release_selected_regions(ha->pdev, ha->bars);
2336         kfree(ha);
2337         ha = NULL;
2338
2339 probe_out:
2340         pci_disable_device(pdev);
2341         return ret;
2342 }
2343
2344 static void
2345 qla2x00_shutdown(struct pci_dev *pdev)
2346 {
2347         scsi_qla_host_t *vha;
2348         struct qla_hw_data  *ha;
2349
2350         vha = pci_get_drvdata(pdev);
2351         ha = vha->hw;
2352
2353         /* Turn-off FCE trace */
2354         if (ha->flags.fce_enabled) {
2355                 qla2x00_disable_fce_trace(vha, NULL, NULL);
2356                 ha->flags.fce_enabled = 0;
2357         }
2358
2359         /* Turn-off EFT trace */
2360         if (ha->eft)
2361                 qla2x00_disable_eft_trace(vha);
2362
2363         /* Stop currently executing firmware. */
2364         qla2x00_try_to_stop_firmware(vha);
2365
2366         /* Turn adapter off line */
2367         vha->flags.online = 0;
2368
2369         /* turn-off interrupts on the card */
2370         if (ha->interrupts_on) {
2371                 vha->flags.init_done = 0;
2372                 ha->isp_ops->disable_intrs(ha);
2373         }
2374
2375         qla2x00_free_irqs(vha);
2376
2377         qla2x00_free_fw_dump(ha);
2378 }
2379
2380 static void
2381 qla2x00_remove_one(struct pci_dev *pdev)
2382 {
2383         scsi_qla_host_t *base_vha, *vha;
2384         struct qla_hw_data  *ha;
2385         unsigned long flags;
2386
2387         base_vha = pci_get_drvdata(pdev);
2388         ha = base_vha->hw;
2389
2390         mutex_lock(&ha->vport_lock);
2391         while (ha->cur_vport_count) {
2392                 struct Scsi_Host *scsi_host;
2393
2394                 spin_lock_irqsave(&ha->vport_slock, flags);
2395
2396                 BUG_ON(base_vha->list.next == &ha->vp_list);
2397                 /* This assumes first entry in ha->vp_list is always base vha */
2398                 vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list);
2399                 scsi_host = scsi_host_get(vha->host);
2400
2401                 spin_unlock_irqrestore(&ha->vport_slock, flags);
2402                 mutex_unlock(&ha->vport_lock);
2403
2404                 fc_vport_terminate(vha->fc_vport);
2405                 scsi_host_put(vha->host);
2406
2407                 mutex_lock(&ha->vport_lock);
2408         }
2409         mutex_unlock(&ha->vport_lock);
2410
2411         set_bit(UNLOADING, &base_vha->dpc_flags);
2412
2413         qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
2414
2415         qla2x00_dfs_remove(base_vha);
2416
2417         qla84xx_put_chip(base_vha);
2418
2419         /* Disable timer */
2420         if (base_vha->timer_active)
2421                 qla2x00_stop_timer(base_vha);
2422
2423         base_vha->flags.online = 0;
2424
2425         /* Flush the work queue and remove it */
2426         if (ha->wq) {
2427                 flush_workqueue(ha->wq);
2428                 destroy_workqueue(ha->wq);
2429                 ha->wq = NULL;
2430         }
2431
2432         /* Kill the kernel thread for this host */
2433         if (ha->dpc_thread) {
2434                 struct task_struct *t = ha->dpc_thread;
2435
2436                 /*
2437                  * qla2xxx_wake_dpc checks for ->dpc_thread
2438                  * so we need to zero it out.
2439                  */
2440                 ha->dpc_thread = NULL;
2441                 kthread_stop(t);
2442         }
2443
2444         qla2x00_free_sysfs_attr(base_vha);
2445
2446         fc_remove_host(base_vha->host);
2447
2448         scsi_remove_host(base_vha->host);
2449
2450         qla2x00_free_device(base_vha);
2451
2452         scsi_host_put(base_vha->host);
2453
2454         if (IS_QLA82XX(ha)) {
2455                 qla82xx_idc_lock(ha);
2456                 qla82xx_clear_drv_active(ha);
2457                 qla82xx_idc_unlock(ha);
2458
2459                 iounmap((device_reg_t __iomem *)ha->nx_pcibase);
2460                 if (!ql2xdbwr)
2461                         iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
2462         } else {
2463                 if (ha->iobase)
2464                         iounmap(ha->iobase);
2465
2466                 if (ha->mqiobase)
2467                         iounmap(ha->mqiobase);
2468         }
2469
2470         pci_release_selected_regions(ha->pdev, ha->bars);
2471         kfree(ha);
2472         ha = NULL;
2473
2474         pci_disable_pcie_error_reporting(pdev);
2475
2476         pci_disable_device(pdev);
2477         pci_set_drvdata(pdev, NULL);
2478 }
2479
2480 static void
2481 qla2x00_free_device(scsi_qla_host_t *vha)
2482 {
2483         struct qla_hw_data *ha = vha->hw;
2484
2485         qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
2486
2487         /* Disable timer */
2488         if (vha->timer_active)
2489                 qla2x00_stop_timer(vha);
2490
2491         /* Kill the kernel thread for this host */
2492         if (ha->dpc_thread) {
2493                 struct task_struct *t = ha->dpc_thread;
2494
2495                 /*
2496                  * qla2xxx_wake_dpc checks for ->dpc_thread
2497                  * so we need to zero it out.
2498                  */
2499                 ha->dpc_thread = NULL;
2500                 kthread_stop(t);
2501         }
2502
2503         qla25xx_delete_queues(vha);
2504
2505         if (ha->flags.fce_enabled)
2506                 qla2x00_disable_fce_trace(vha, NULL, NULL);
2507
2508         if (ha->eft)
2509                 qla2x00_disable_eft_trace(vha);
2510
2511         /* Stop currently executing firmware. */
2512         qla2x00_try_to_stop_firmware(vha);
2513
2514         vha->flags.online = 0;
2515
2516         /* turn-off interrupts on the card */
2517         if (ha->interrupts_on) {
2518                 vha->flags.init_done = 0;
2519                 ha->isp_ops->disable_intrs(ha);
2520         }
2521
2522         qla2x00_free_irqs(vha);
2523
2524         qla2x00_free_fcports(vha);
2525
2526         qla2x00_mem_free(ha);
2527
2528         qla2x00_free_queues(ha);
2529 }
2530
2531 void qla2x00_free_fcports(struct scsi_qla_host *vha)
2532 {
2533         fc_port_t *fcport, *tfcport;
2534
2535         list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) {
2536                 list_del(&fcport->list);
2537                 kfree(fcport);
2538                 fcport = NULL;
2539         }
2540 }
2541
2542 static inline void
2543 qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
2544     int defer)
2545 {
2546         struct fc_rport *rport;
2547         scsi_qla_host_t *base_vha;
2548         unsigned long flags;
2549
2550         if (!fcport->rport)
2551                 return;
2552
2553         rport = fcport->rport;
2554         if (defer) {
2555                 base_vha = pci_get_drvdata(vha->hw->pdev);
2556                 spin_lock_irqsave(vha->host->host_lock, flags);
2557                 fcport->drport = rport;
2558                 spin_unlock_irqrestore(vha->host->host_lock, flags);
2559                 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
2560                 qla2xxx_wake_dpc(base_vha);
2561         } else
2562                 fc_remote_port_delete(rport);
2563 }
2564
2565 /*
2566  * qla2x00_mark_device_lost Updates fcport state when device goes offline.
2567  *
2568  * Input: ha = adapter block pointer.  fcport = port structure pointer.
2569  *
2570  * Return: None.
2571  *
2572  * Context:
2573  */
2574 void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
2575     int do_login, int defer)
2576 {
2577         if (atomic_read(&fcport->state) == FCS_ONLINE &&
2578             vha->vp_idx == fcport->vp_idx) {
2579                 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
2580                 qla2x00_schedule_rport_del(vha, fcport, defer);
2581         }
2582         /*
2583          * We may need to retry the login, so don't change the state of the
2584          * port but do the retries.
2585          */
2586         if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD)
2587                 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
2588
2589         if (!do_login)
2590                 return;
2591
2592         if (fcport->login_retry == 0) {
2593                 fcport->login_retry = vha->hw->login_retry_count;
2594                 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2595
2596                 DEBUG(printk("scsi(%ld): Port login retry: "
2597                     "%02x%02x%02x%02x%02x%02x%02x%02x, "
2598                     "id = 0x%04x retry cnt=%d\n",
2599                     vha->host_no,
2600                     fcport->port_name[0],
2601                     fcport->port_name[1],
2602                     fcport->port_name[2],
2603                     fcport->port_name[3],
2604                     fcport->port_name[4],
2605                     fcport->port_name[5],
2606                     fcport->port_name[6],
2607                     fcport->port_name[7],
2608                     fcport->loop_id,
2609                     fcport->login_retry));
2610         }
2611 }
2612
2613 /*
2614  * qla2x00_mark_all_devices_lost
2615  *      Updates fcport state when device goes offline.
2616  *
2617  * Input:
2618  *      ha = adapter block pointer.
2619  *      fcport = port structure pointer.
2620  *
2621  * Return:
2622  *      None.
2623  *
2624  * Context:
2625  */
2626 void
2627 qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
2628 {
2629         fc_port_t *fcport;
2630
2631         list_for_each_entry(fcport, &vha->vp_fcports, list) {
2632                 if (vha->vp_idx != 0 && vha->vp_idx != fcport->vp_idx)
2633                         continue;
2634
2635                 /*
2636                  * No point in marking the device as lost, if the device is
2637                  * already DEAD.
2638                  */
2639                 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
2640                         continue;
2641                 if (atomic_read(&fcport->state) == FCS_ONLINE) {
2642                         qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
2643                         if (defer)
2644                                 qla2x00_schedule_rport_del(vha, fcport, defer);
2645                         else if (vha->vp_idx == fcport->vp_idx)
2646                                 qla2x00_schedule_rport_del(vha, fcport, defer);
2647                 }
2648         }
2649 }
2650
2651 /*
2652 * qla2x00_mem_alloc
2653 *      Allocates adapter memory.
2654 *
2655 * Returns:
2656 *      0  = success.
2657 *      !0  = failure.
2658 */
2659 static int
2660 qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2661         struct req_que **req, struct rsp_que **rsp)
2662 {
2663         char    name[16];
2664
2665         ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
2666                 &ha->init_cb_dma, GFP_KERNEL);
2667         if (!ha->init_cb)
2668                 goto fail;
2669
2670         ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
2671                 &ha->gid_list_dma, GFP_KERNEL);
2672         if (!ha->gid_list)
2673                 goto fail_free_init_cb;
2674
2675         ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
2676         if (!ha->srb_mempool)
2677                 goto fail_free_gid_list;
2678
2679         if (IS_QLA82XX(ha)) {
2680                 /* Allocate cache for CT6 Ctx. */
2681                 if (!ctx_cachep) {
2682                         ctx_cachep = kmem_cache_create("qla2xxx_ctx",
2683                                 sizeof(struct ct6_dsd), 0,
2684                                 SLAB_HWCACHE_ALIGN, NULL);
2685                         if (!ctx_cachep)
2686                                 goto fail_free_gid_list;
2687                 }
2688                 ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
2689                         ctx_cachep);
2690                 if (!ha->ctx_mempool)
2691                         goto fail_free_srb_mempool;
2692         }
2693
2694         /* Get memory for cached NVRAM */
2695         ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
2696         if (!ha->nvram)
2697                 goto fail_free_ctx_mempool;
2698
2699         snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME,
2700                 ha->pdev->device);
2701         ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2702                 DMA_POOL_SIZE, 8, 0);
2703         if (!ha->s_dma_pool)
2704                 goto fail_free_nvram;
2705
2706         if (IS_QLA82XX(ha) || ql2xenabledif) {
2707                 ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2708                         DSD_LIST_DMA_POOL_SIZE, 8, 0);
2709                 if (!ha->dl_dma_pool) {
2710                         qla_printk(KERN_WARNING, ha,
2711                             "Memory Allocation failed - dl_dma_pool\n");
2712                         goto fail_s_dma_pool;
2713                 }
2714
2715                 ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2716                         FCP_CMND_DMA_POOL_SIZE, 8, 0);
2717                 if (!ha->fcp_cmnd_dma_pool) {
2718                         qla_printk(KERN_WARNING, ha,
2719                             "Memory Allocation failed - fcp_cmnd_dma_pool\n");
2720                         goto fail_dl_dma_pool;
2721                 }
2722         }
2723
2724         /* Allocate memory for SNS commands */
2725         if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
2726         /* Get consistent memory allocated for SNS commands */
2727                 ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
2728                 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
2729                 if (!ha->sns_cmd)
2730                         goto fail_dma_pool;
2731         } else {
2732         /* Get consistent memory allocated for MS IOCB */
2733                 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
2734                         &ha->ms_iocb_dma);
2735                 if (!ha->ms_iocb)
2736                         goto fail_dma_pool;
2737         /* Get consistent memory allocated for CT SNS commands */
2738                 ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
2739                         sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
2740                 if (!ha->ct_sns)
2741                         goto fail_free_ms_iocb;
2742         }
2743
2744         /* Allocate memory for request ring */
2745         *req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
2746         if (!*req) {
2747                 DEBUG(printk("Unable to allocate memory for req\n"));
2748                 goto fail_req;
2749         }
2750         (*req)->length = req_len;
2751         (*req)->ring = dma_alloc_coherent(&ha->pdev->dev,
2752                 ((*req)->length + 1) * sizeof(request_t),
2753                 &(*req)->dma, GFP_KERNEL);
2754         if (!(*req)->ring) {
2755                 DEBUG(printk("Unable to allocate memory for req_ring\n"));
2756                 goto fail_req_ring;
2757         }
2758         /* Allocate memory for response ring */
2759         *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
2760         if (!*rsp) {
2761                 qla_printk(KERN_WARNING, ha,
2762                         "Unable to allocate memory for rsp\n");
2763                 goto fail_rsp;
2764         }
2765         (*rsp)->hw = ha;
2766         (*rsp)->length = rsp_len;
2767         (*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev,
2768                 ((*rsp)->length + 1) * sizeof(response_t),
2769                 &(*rsp)->dma, GFP_KERNEL);
2770         if (!(*rsp)->ring) {
2771                 qla_printk(KERN_WARNING, ha,
2772                         "Unable to allocate memory for rsp_ring\n");
2773                 goto fail_rsp_ring;
2774         }
2775         (*req)->rsp = *rsp;
2776         (*rsp)->req = *req;
2777         /* Allocate memory for NVRAM data for vports */
2778         if (ha->nvram_npiv_size) {
2779                 ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) *
2780                                         ha->nvram_npiv_size, GFP_KERNEL);
2781                 if (!ha->npiv_info) {
2782                         qla_printk(KERN_WARNING, ha,
2783                                 "Unable to allocate memory for npiv info\n");
2784                         goto fail_npiv_info;
2785                 }
2786         } else
2787                 ha->npiv_info = NULL;
2788
2789         /* Get consistent memory allocated for EX-INIT-CB. */
2790         if (IS_QLA8XXX_TYPE(ha)) {
2791                 ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
2792                     &ha->ex_init_cb_dma);
2793                 if (!ha->ex_init_cb)
2794                         goto fail_ex_init_cb;
2795         }
2796
2797         INIT_LIST_HEAD(&ha->gbl_dsd_list);
2798
2799         /* Get consistent memory allocated for Async Port-Database. */
2800         if (!IS_FWI2_CAPABLE(ha)) {
2801                 ha->async_pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
2802                         &ha->async_pd_dma);
2803                 if (!ha->async_pd)
2804                         goto fail_async_pd;
2805         }
2806
2807         INIT_LIST_HEAD(&ha->vp_list);
2808         return 1;
2809
2810 fail_async_pd:
2811         dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
2812 fail_ex_init_cb:
2813         kfree(ha->npiv_info);
2814 fail_npiv_info:
2815         dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) *
2816                 sizeof(response_t), (*rsp)->ring, (*rsp)->dma);
2817         (*rsp)->ring = NULL;
2818         (*rsp)->dma = 0;
2819 fail_rsp_ring:
2820         kfree(*rsp);
2821 fail_rsp:
2822         dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) *
2823                 sizeof(request_t), (*req)->ring, (*req)->dma);
2824         (*req)->ring = NULL;
2825         (*req)->dma = 0;
2826 fail_req_ring:
2827         kfree(*req);
2828 fail_req:
2829         dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
2830                 ha->ct_sns, ha->ct_sns_dma);
2831         ha->ct_sns = NULL;
2832         ha->ct_sns_dma = 0;
2833 fail_free_ms_iocb:
2834         dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
2835         ha->ms_iocb = NULL;
2836         ha->ms_iocb_dma = 0;
2837 fail_dma_pool:
2838         if (IS_QLA82XX(ha) || ql2xenabledif) {
2839                 dma_pool_destroy(ha->fcp_cmnd_dma_pool);
2840                 ha->fcp_cmnd_dma_pool = NULL;
2841         }
2842 fail_dl_dma_pool:
2843         if (IS_QLA82XX(ha) || ql2xenabledif) {
2844                 dma_pool_destroy(ha->dl_dma_pool);
2845                 ha->dl_dma_pool = NULL;
2846         }
2847 fail_s_dma_pool:
2848         dma_pool_destroy(ha->s_dma_pool);
2849         ha->s_dma_pool = NULL;
2850 fail_free_nvram:
2851         kfree(ha->nvram);
2852         ha->nvram = NULL;
2853 fail_free_ctx_mempool:
2854         mempool_destroy(ha->ctx_mempool);
2855         ha->ctx_mempool = NULL;
2856 fail_free_srb_mempool:
2857         mempool_destroy(ha->srb_mempool);
2858         ha->srb_mempool = NULL;
2859 fail_free_gid_list:
2860         dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
2861         ha->gid_list_dma);
2862         ha->gid_list = NULL;
2863         ha->gid_list_dma = 0;
2864 fail_free_init_cb:
2865         dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
2866         ha->init_cb_dma);
2867         ha->init_cb = NULL;
2868         ha->init_cb_dma = 0;
2869 fail:
2870         DEBUG(printk("%s: Memory allocation failure\n", __func__));
2871         return -ENOMEM;
2872 }
2873
2874 /*
2875 * qla2x00_free_fw_dump
2876 *       Frees fw dump stuff.
2877 *
2878 * Input:
2879 *       ha = adapter block pointer.
2880 */
2881 static void
2882 qla2x00_free_fw_dump(struct qla_hw_data *ha)
2883 {
2884         if (ha->fce)
2885                 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
2886                     ha->fce_dma);
2887
2888         if (ha->fw_dump) {
2889                 if (ha->eft)
2890                         dma_free_coherent(&ha->pdev->dev,
2891                             ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma);
2892                 vfree(ha->fw_dump);
2893         }
2894         ha->fce = NULL;
2895         ha->fce_dma = 0;
2896         ha->eft = NULL;
2897         ha->eft_dma = 0;
2898         ha->fw_dump = NULL;
2899         ha->fw_dumped = 0;
2900         ha->fw_dump_reading = 0;
2901 }
2902
2903 /*
2904 * qla2x00_mem_free
2905 *      Frees all adapter allocated memory.
2906 *
2907 * Input:
2908 *      ha = adapter block pointer.
2909 */
2910 static void
2911 qla2x00_mem_free(struct qla_hw_data *ha)
2912 {
2913         qla2x00_free_fw_dump(ha);
2914
2915         if (ha->srb_mempool)
2916                 mempool_destroy(ha->srb_mempool);
2917
2918         if (ha->dcbx_tlv)
2919                 dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
2920                     ha->dcbx_tlv, ha->dcbx_tlv_dma);
2921
2922         if (ha->xgmac_data)
2923                 dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
2924                     ha->xgmac_data, ha->xgmac_data_dma);
2925
2926         if (ha->sns_cmd)
2927                 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
2928                 ha->sns_cmd, ha->sns_cmd_dma);
2929
2930         if (ha->ct_sns)
2931                 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
2932                 ha->ct_sns, ha->ct_sns_dma);
2933
2934         if (ha->sfp_data)
2935                 dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma);
2936
2937         if (ha->edc_data)
2938                 dma_pool_free(ha->s_dma_pool, ha->edc_data, ha->edc_data_dma);
2939
2940         if (ha->ms_iocb)
2941                 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
2942
2943         if (ha->ex_init_cb)
2944                 dma_pool_free(ha->s_dma_pool,
2945                         ha->ex_init_cb, ha->ex_init_cb_dma);
2946
2947         if (ha->async_pd)
2948                 dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
2949
2950         if (ha->s_dma_pool)
2951                 dma_pool_destroy(ha->s_dma_pool);
2952
2953         if (ha->gid_list)
2954                 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
2955                 ha->gid_list_dma);
2956
2957         if (IS_QLA82XX(ha)) {
2958                 if (!list_empty(&ha->gbl_dsd_list)) {
2959                         struct dsd_dma *dsd_ptr, *tdsd_ptr;
2960
2961                         /* clean up allocated prev pool */
2962                         list_for_each_entry_safe(dsd_ptr,
2963                                 tdsd_ptr, &ha->gbl_dsd_list, list) {
2964                                 dma_pool_free(ha->dl_dma_pool,
2965                                 dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma);
2966                                 list_del(&dsd_ptr->list);
2967                                 kfree(dsd_ptr);
2968                         }
2969                 }
2970         }
2971
2972         if (ha->dl_dma_pool)
2973                 dma_pool_destroy(ha->dl_dma_pool);
2974
2975         if (ha->fcp_cmnd_dma_pool)
2976                 dma_pool_destroy(ha->fcp_cmnd_dma_pool);
2977
2978         if (ha->ctx_mempool)
2979                 mempool_destroy(ha->ctx_mempool);
2980
2981         if (ha->init_cb)
2982                 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
2983                         ha->init_cb, ha->init_cb_dma);
2984         vfree(ha->optrom_buffer);
2985         kfree(ha->nvram);
2986         kfree(ha->npiv_info);
2987
2988         ha->srb_mempool = NULL;
2989         ha->ctx_mempool = NULL;
2990         ha->sns_cmd = NULL;
2991         ha->sns_cmd_dma = 0;
2992         ha->ct_sns = NULL;
2993         ha->ct_sns_dma = 0;
2994         ha->ms_iocb = NULL;
2995         ha->ms_iocb_dma = 0;
2996         ha->init_cb = NULL;
2997         ha->init_cb_dma = 0;
2998         ha->ex_init_cb = NULL;
2999         ha->ex_init_cb_dma = 0;
3000         ha->async_pd = NULL;
3001         ha->async_pd_dma = 0;
3002
3003         ha->s_dma_pool = NULL;
3004         ha->dl_dma_pool = NULL;
3005         ha->fcp_cmnd_dma_pool = NULL;
3006
3007         ha->gid_list = NULL;
3008         ha->gid_list_dma = 0;
3009 }
3010
3011 struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
3012                                                 struct qla_hw_data *ha)
3013 {
3014         struct Scsi_Host *host;
3015         struct scsi_qla_host *vha = NULL;
3016
3017         host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
3018         if (host == NULL) {
3019                 printk(KERN_WARNING
3020                 "qla2xxx: Couldn't allocate host from scsi layer!\n");
3021                 goto fail;
3022         }
3023
3024         /* Clear our data area */
3025         vha = shost_priv(host);
3026         memset(vha, 0, sizeof(scsi_qla_host_t));
3027
3028         vha->host = host;
3029         vha->host_no = host->host_no;
3030         vha->hw = ha;
3031
3032         INIT_LIST_HEAD(&vha->vp_fcports);
3033         INIT_LIST_HEAD(&vha->work_list);
3034         INIT_LIST_HEAD(&vha->list);
3035
3036         spin_lock_init(&vha->work_lock);
3037
3038         sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
3039         return vha;
3040
3041 fail:
3042         return vha;
3043 }
3044
3045 static struct qla_work_evt *
3046 qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
3047 {
3048         struct qla_work_evt *e;
3049         uint8_t bail;
3050
3051         QLA_VHA_MARK_BUSY(vha, bail);
3052         if (bail)
3053                 return NULL;
3054
3055         e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
3056         if (!e) {
3057                 QLA_VHA_MARK_NOT_BUSY(vha);
3058                 return NULL;
3059         }
3060
3061         INIT_LIST_HEAD(&e->list);
3062         e->type = type;
3063         e->flags = QLA_EVT_FLAG_FREE;
3064         return e;
3065 }
3066
3067 static int
3068 qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
3069 {
3070         unsigned long flags;
3071
3072         spin_lock_irqsave(&vha->work_lock, flags);
3073         list_add_tail(&e->list, &vha->work_list);
3074         spin_unlock_irqrestore(&vha->work_lock, flags);
3075         qla2xxx_wake_dpc(vha);
3076
3077         return QLA_SUCCESS;
3078 }
3079
3080 int
3081 qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
3082     u32 data)
3083 {
3084         struct qla_work_evt *e;
3085
3086         e = qla2x00_alloc_work(vha, QLA_EVT_AEN);
3087         if (!e)
3088                 return QLA_FUNCTION_FAILED;
3089
3090         e->u.aen.code = code;
3091         e->u.aen.data = data;
3092         return qla2x00_post_work(vha, e);
3093 }
3094
3095 int
3096 qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb)
3097 {
3098         struct qla_work_evt *e;
3099
3100         e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK);
3101         if (!e)
3102                 return QLA_FUNCTION_FAILED;
3103
3104         memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
3105         return qla2x00_post_work(vha, e);
3106 }
3107
3108 #define qla2x00_post_async_work(name, type)     \
3109 int qla2x00_post_async_##name##_work(           \
3110     struct scsi_qla_host *vha,                  \
3111     fc_port_t *fcport, uint16_t *data)          \
3112 {                                               \
3113         struct qla_work_evt *e;                 \
3114                                                 \
3115         e = qla2x00_alloc_work(vha, type);      \
3116         if (!e)                                 \
3117                 return QLA_FUNCTION_FAILED;     \
3118                                                 \
3119         e->u.logio.fcport = fcport;             \
3120         if (data) {                             \
3121                 e->u.logio.data[0] = data[0];   \
3122                 e->u.logio.data[1] = data[1];   \
3123         }                                       \
3124         return qla2x00_post_work(vha, e);       \
3125 }
3126
3127 qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
3128 qla2x00_post_async_work(login_done, QLA_EVT_ASYNC_LOGIN_DONE);
3129 qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
3130 qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
3131 qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
3132 qla2x00_post_async_work(adisc_done, QLA_EVT_ASYNC_ADISC_DONE);
3133
3134 int
3135 qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code)
3136 {
3137         struct qla_work_evt *e;
3138
3139         e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT);
3140         if (!e)
3141                 return QLA_FUNCTION_FAILED;
3142
3143         e->u.uevent.code = code;
3144         return qla2x00_post_work(vha, e);
3145 }
3146
3147 static void
3148 qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code)
3149 {
3150         char event_string[40];
3151         char *envp[] = { event_string, NULL };
3152
3153         switch (code) {
3154         case QLA_UEVENT_CODE_FW_DUMP:
3155                 snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
3156                     vha->host_no);
3157                 break;
3158         default:
3159                 /* do nothing */
3160                 break;
3161         }
3162         kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp);
3163 }
3164
3165 void
3166 qla2x00_do_work(struct scsi_qla_host *vha)
3167 {
3168         struct qla_work_evt *e, *tmp;
3169         unsigned long flags;
3170         LIST_HEAD(work);
3171
3172         spin_lock_irqsave(&vha->work_lock, flags);
3173         list_splice_init(&vha->work_list, &work);
3174         spin_unlock_irqrestore(&vha->work_lock, flags);
3175
3176         list_for_each_entry_safe(e, tmp, &work, list) {
3177                 list_del_init(&e->list);
3178
3179                 switch (e->type) {
3180                 case QLA_EVT_AEN:
3181                         fc_host_post_event(vha->host, fc_get_event_number(),
3182                             e->u.aen.code, e->u.aen.data);
3183                         break;
3184                 case QLA_EVT_IDC_ACK:
3185                         qla81xx_idc_ack(vha, e->u.idc_ack.mb);
3186                         break;
3187                 case QLA_EVT_ASYNC_LOGIN:
3188                         qla2x00_async_login(vha, e->u.logio.fcport,
3189                             e->u.logio.data);
3190                         break;
3191                 case QLA_EVT_ASYNC_LOGIN_DONE:
3192                         qla2x00_async_login_done(vha, e->u.logio.fcport,
3193                             e->u.logio.data);
3194                         break;
3195                 case QLA_EVT_ASYNC_LOGOUT:
3196                         qla2x00_async_logout(vha, e->u.logio.fcport);
3197                         break;
3198                 case QLA_EVT_ASYNC_LOGOUT_DONE:
3199                         qla2x00_async_logout_done(vha, e->u.logio.fcport,
3200                             e->u.logio.data);
3201                         break;
3202                 case QLA_EVT_ASYNC_ADISC:
3203                         qla2x00_async_adisc(vha, e->u.logio.fcport,
3204                             e->u.logio.data);
3205                         break;
3206                 case QLA_EVT_ASYNC_ADISC_DONE:
3207                         qla2x00_async_adisc_done(vha, e->u.logio.fcport,
3208                             e->u.logio.data);
3209                         break;
3210                 case QLA_EVT_UEVENT:
3211                         qla2x00_uevent_emit(vha, e->u.uevent.code);
3212                         break;
3213                 }
3214                 if (e->flags & QLA_EVT_FLAG_FREE)
3215                         kfree(e);
3216
3217                 /* For each work completed decrement vha ref count */
3218                 QLA_VHA_MARK_NOT_BUSY(vha);
3219         }
3220 }
3221
3222 /* Relogins all the fcports of a vport
3223  * Context: dpc thread
3224  */
3225 void qla2x00_relogin(struct scsi_qla_host *vha)
3226 {
3227         fc_port_t       *fcport;
3228         int status;
3229         uint16_t        next_loopid = 0;
3230         struct qla_hw_data *ha = vha->hw;
3231         uint16_t data[2];
3232
3233         list_for_each_entry(fcport, &vha->vp_fcports, list) {
3234         /*
3235          * If the port is not ONLINE then try to login
3236          * to it if we haven't run out of retries.
3237          */
3238                 if (atomic_read(&fcport->state) != FCS_ONLINE &&
3239                     fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) {
3240                         fcport->login_retry--;
3241                         if (fcport->flags & FCF_FABRIC_DEVICE) {
3242                                 if (fcport->flags & FCF_FCP2_DEVICE)
3243                                         ha->isp_ops->fabric_logout(vha,
3244                                                         fcport->loop_id,
3245                                                         fcport->d_id.b.domain,
3246                                                         fcport->d_id.b.area,
3247                                                         fcport->d_id.b.al_pa);
3248
3249                                 if (fcport->loop_id == FC_NO_LOOP_ID) {
3250                                         fcport->loop_id = next_loopid =
3251                                             ha->min_external_loopid;
3252                                         status = qla2x00_find_new_loop_id(
3253                                             vha, fcport);
3254                                         if (status != QLA_SUCCESS) {
3255                                                 /* Ran out of IDs to use */
3256                                                 break;
3257                                         }
3258                                 }
3259
3260                                 if (IS_ALOGIO_CAPABLE(ha)) {
3261                                         fcport->flags |= FCF_ASYNC_SENT;
3262                                         data[0] = 0;
3263                                         data[1] = QLA_LOGIO_LOGIN_RETRIED;
3264                                         status = qla2x00_post_async_login_work(
3265                                             vha, fcport, data);
3266                                         if (status == QLA_SUCCESS)
3267                                                 continue;
3268                                         /* Attempt a retry. */
3269                                         status = 1;
3270                                 } else
3271                                         status = qla2x00_fabric_login(vha,
3272                                             fcport, &next_loopid);
3273                         } else
3274                                 status = qla2x00_local_device_login(vha,
3275                                                                 fcport);
3276
3277                         if (status == QLA_SUCCESS) {
3278                                 fcport->old_loop_id = fcport->loop_id;
3279
3280                                 DEBUG(printk("scsi(%ld): port login OK: logged "
3281                                 "in ID 0x%x\n", vha->host_no, fcport->loop_id));
3282
3283                                 qla2x00_update_fcport(vha, fcport);
3284
3285                         } else if (status == 1) {
3286                                 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
3287                                 /* retry the login again */
3288                                 DEBUG(printk("scsi(%ld): Retrying"
3289                                 " %d login again loop_id 0x%x\n",
3290                                 vha->host_no, fcport->login_retry,
3291                                                 fcport->loop_id));
3292                         } else {
3293                                 fcport->login_retry = 0;
3294                         }
3295
3296                         if (fcport->login_retry == 0 && status != QLA_SUCCESS)
3297                                 fcport->loop_id = FC_NO_LOOP_ID;
3298                 }
3299                 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3300                         break;
3301         }
3302 }
3303
3304 /**************************************************************************
3305 * qla2x00_do_dpc
3306 *   This kernel thread is a task that is schedule by the interrupt handler
3307 *   to perform the background processing for interrupts.
3308 *
3309 * Notes:
3310 * This task always run in the context of a kernel thread.  It
3311 * is kick-off by the driver's detect code and starts up
3312 * up one per adapter. It immediately goes to sleep and waits for
3313 * some fibre event.  When either the interrupt handler or
3314 * the timer routine detects a event it will one of the task
3315 * bits then wake us up.
3316 **************************************************************************/
3317 static int
3318 qla2x00_do_dpc(void *data)
3319 {
3320         int             rval;
3321         scsi_qla_host_t *base_vha;
3322         struct qla_hw_data *ha;
3323
3324         ha = (struct qla_hw_data *)data;
3325         base_vha = pci_get_drvdata(ha->pdev);
3326
3327         set_user_nice(current, -20);
3328
3329         set_current_state(TASK_INTERRUPTIBLE);
3330         while (!kthread_should_stop()) {
3331                 DEBUG3(printk("qla2x00: DPC handler sleeping\n"));
3332
3333                 schedule();
3334                 __set_current_state(TASK_RUNNING);
3335
3336                 DEBUG3(printk("qla2x00: DPC handler waking up\n"));
3337
3338                 /* Initialization not yet finished. Don't do anything yet. */
3339                 if (!base_vha->flags.init_done)
3340                         continue;
3341
3342                 if (ha->flags.eeh_busy) {
3343                         DEBUG17(qla_printk(KERN_WARNING, ha,
3344                             "qla2x00_do_dpc: dpc_flags: %lx\n",
3345                             base_vha->dpc_flags));
3346                         continue;
3347                 }
3348
3349                 DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no));
3350
3351                 ha->dpc_active = 1;
3352
3353                 if (ha->flags.mbox_busy) {
3354                         ha->dpc_active = 0;
3355                         continue;
3356                 }
3357
3358                 qla2x00_do_work(base_vha);
3359
3360                 if (IS_QLA82XX(ha)) {
3361                         if (test_and_clear_bit(ISP_UNRECOVERABLE,
3362                                 &base_vha->dpc_flags)) {
3363                                 qla82xx_idc_lock(ha);
3364                                 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3365                                         QLA82XX_DEV_FAILED);
3366                                 qla82xx_idc_unlock(ha);
3367                                 qla_printk(KERN_INFO, ha,
3368                                         "HW State: FAILED\n");
3369                                 qla82xx_device_state_handler(base_vha);
3370                                 continue;
3371                         }
3372
3373                         if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED,
3374                                 &base_vha->dpc_flags)) {
3375
3376                                 DEBUG(printk(KERN_INFO
3377                                         "scsi(%ld): dpc: sched "
3378                                         "qla82xx_fcoe_ctx_reset ha = %p\n",
3379                                         base_vha->host_no, ha));
3380                                 if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
3381                                         &base_vha->dpc_flags))) {
3382                                         if (qla82xx_fcoe_ctx_reset(base_vha)) {
3383                                                 /* FCoE-ctx reset failed.
3384                                                  * Escalate to chip-reset
3385                                                  */
3386                                                 set_bit(ISP_ABORT_NEEDED,
3387                                                         &base_vha->dpc_flags);
3388                                         }
3389                                         clear_bit(ABORT_ISP_ACTIVE,
3390                                                 &base_vha->dpc_flags);
3391                                 }
3392
3393                                 DEBUG(printk("scsi(%ld): dpc:"
3394                                         " qla82xx_fcoe_ctx_reset end\n",
3395                                         base_vha->host_no));
3396                         }
3397                 }
3398
3399                 if (test_and_clear_bit(ISP_ABORT_NEEDED,
3400                                                 &base_vha->dpc_flags)) {
3401
3402                         DEBUG(printk("scsi(%ld): dpc: sched "
3403                             "qla2x00_abort_isp ha = %p\n",
3404                             base_vha->host_no, ha));
3405                         if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
3406                             &base_vha->dpc_flags))) {
3407
3408                                 if (ha->isp_ops->abort_isp(base_vha)) {
3409                                         /* failed. retry later */
3410                                         set_bit(ISP_ABORT_NEEDED,
3411                                             &base_vha->dpc_flags);
3412                                 }
3413                                 clear_bit(ABORT_ISP_ACTIVE,
3414                                                 &base_vha->dpc_flags);
3415                         }
3416
3417                         DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n",
3418                             base_vha->host_no));
3419                 }
3420
3421                 if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) {
3422                         qla2x00_update_fcports(base_vha);
3423                         clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
3424                 }
3425
3426                 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
3427                         DEBUG(printk(KERN_INFO "scsi(%ld): dpc: sched "
3428                             "qla2x00_quiesce_needed ha = %p\n",
3429                             base_vha->host_no, ha));
3430                         qla82xx_device_state_handler(base_vha);
3431                         clear_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags);
3432                         if (!ha->flags.quiesce_owner) {
3433                                 qla2x00_perform_loop_resync(base_vha);
3434
3435                                 qla82xx_idc_lock(ha);
3436                                 qla82xx_clear_qsnt_ready(base_vha);
3437                                 qla82xx_idc_unlock(ha);
3438                         }
3439                 }
3440
3441                 if (test_and_clear_bit(RESET_MARKER_NEEDED,
3442                                                         &base_vha->dpc_flags) &&
3443                     (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
3444
3445                         DEBUG(printk("scsi(%ld): qla2x00_reset_marker()\n",
3446                             base_vha->host_no));
3447
3448                         qla2x00_rst_aen(base_vha);
3449                         clear_bit(RESET_ACTIVE, &base_vha->dpc_flags);
3450                 }
3451
3452                 /* Retry each device up to login retry count */
3453                 if ((test_and_clear_bit(RELOGIN_NEEDED,
3454                                                 &base_vha->dpc_flags)) &&
3455                     !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
3456                     atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
3457
3458                         DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
3459                                         base_vha->host_no));
3460                         qla2x00_relogin(base_vha);
3461
3462                         DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
3463                             base_vha->host_no));
3464                 }
3465
3466                 if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
3467                                                         &base_vha->dpc_flags)) {
3468
3469                         DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n",
3470                                 base_vha->host_no));
3471
3472                         if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
3473                             &base_vha->dpc_flags))) {
3474
3475                                 rval = qla2x00_loop_resync(base_vha);
3476
3477                                 clear_bit(LOOP_RESYNC_ACTIVE,
3478                                                 &base_vha->dpc_flags);
3479                         }
3480
3481                         DEBUG(printk("scsi(%ld): qla2x00_loop_resync - end\n",
3482                             base_vha->host_no));
3483                 }
3484
3485                 if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
3486                     atomic_read(&base_vha->loop_state) == LOOP_READY) {
3487                         clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags);
3488                         qla2xxx_flash_npiv_conf(base_vha);
3489                 }
3490
3491                 if (!ha->interrupts_on)
3492                         ha->isp_ops->enable_intrs(ha);
3493
3494                 if (test_and_clear_bit(BEACON_BLINK_NEEDED,
3495                                         &base_vha->dpc_flags))
3496                         ha->isp_ops->beacon_blink(base_vha);
3497
3498                 qla2x00_do_dpc_all_vps(base_vha);
3499
3500                 ha->dpc_active = 0;
3501                 set_current_state(TASK_INTERRUPTIBLE);
3502         } /* End of while(1) */
3503         __set_current_state(TASK_RUNNING);
3504
3505         DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no));
3506
3507         /*
3508          * Make sure that nobody tries to wake us up again.
3509          */
3510         ha->dpc_active = 0;
3511
3512         /* Cleanup any residual CTX SRBs. */
3513         qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
3514
3515         return 0;
3516 }
3517
3518 void
3519 qla2xxx_wake_dpc(struct scsi_qla_host *vha)
3520 {
3521         struct qla_hw_data *ha = vha->hw;
3522         struct task_struct *t = ha->dpc_thread;
3523
3524         if (!test_bit(UNLOADING, &vha->dpc_flags) && t)
3525                 wake_up_process(t);
3526 }
3527
3528 /*
3529 *  qla2x00_rst_aen
3530 *      Processes asynchronous reset.
3531 *
3532 * Input:
3533 *      ha  = adapter block pointer.
3534 */
3535 static void
3536 qla2x00_rst_aen(scsi_qla_host_t *vha)
3537 {
3538         if (vha->flags.online && !vha->flags.reset_active &&
3539             !atomic_read(&vha->loop_down_timer) &&
3540             !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) {
3541                 do {
3542                         clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
3543
3544                         /*
3545                          * Issue marker command only when we are going to start
3546                          * the I/O.
3547                          */
3548                         vha->marker_needed = 1;
3549                 } while (!atomic_read(&vha->loop_down_timer) &&
3550                     (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)));
3551         }
3552 }
3553
3554 static void
3555 qla2x00_sp_free_dma(srb_t *sp)
3556 {
3557         struct scsi_cmnd *cmd = sp->cmd;
3558         struct qla_hw_data *ha = sp->fcport->vha->hw;
3559
3560         if (sp->flags & SRB_DMA_VALID) {
3561                 scsi_dma_unmap(cmd);
3562                 sp->flags &= ~SRB_DMA_VALID;
3563         }
3564
3565         if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
3566                 dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
3567                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
3568                 sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
3569         }
3570
3571         if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
3572                 /* List assured to be having elements */
3573                 qla2x00_clean_dsd_pool(ha, sp);
3574                 sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
3575         }
3576
3577         if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
3578                 dma_pool_free(ha->dl_dma_pool, sp->ctx,
3579                     ((struct crc_context *)sp->ctx)->crc_ctx_dma);
3580                 sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
3581         }
3582
3583         CMD_SP(cmd) = NULL;
3584 }
3585
3586 static void
3587 qla2x00_sp_final_compl(struct qla_hw_data *ha, srb_t *sp)
3588 {
3589         struct scsi_cmnd *cmd = sp->cmd;
3590
3591         qla2x00_sp_free_dma(sp);
3592
3593         if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
3594                 struct ct6_dsd *ctx = sp->ctx;
3595                 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd,
3596                         ctx->fcp_cmnd_dma);
3597                 list_splice(&ctx->dsd_list, &ha->gbl_dsd_list);
3598                 ha->gbl_dsd_inuse -= ctx->dsd_use_cnt;
3599                 ha->gbl_dsd_avail += ctx->dsd_use_cnt;
3600                 mempool_free(sp->ctx, ha->ctx_mempool);
3601                 sp->ctx = NULL;
3602         }
3603
3604         mempool_free(sp, ha->srb_mempool);
3605         cmd->scsi_done(cmd);
3606 }
3607
3608 void
3609 qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
3610 {
3611         if (atomic_read(&sp->ref_count) == 0) {
3612                 DEBUG2(qla_printk(KERN_WARNING, ha,
3613                     "SP reference-count to ZERO -- sp=%p\n", sp));
3614                 DEBUG2(BUG());
3615                 return;
3616         }
3617         if (!atomic_dec_and_test(&sp->ref_count))
3618                 return;
3619         qla2x00_sp_final_compl(ha, sp);
3620 }
3621
3622 /**************************************************************************
3623 *   qla2x00_timer
3624 *
3625 * Description:
3626 *   One second timer
3627 *
3628 * Context: Interrupt
3629 ***************************************************************************/
3630 void
3631 qla2x00_timer(scsi_qla_host_t *vha)
3632 {
3633         unsigned long   cpu_flags = 0;
3634         int             start_dpc = 0;
3635         int             index;
3636         srb_t           *sp;
3637         uint16_t        w;
3638         struct qla_hw_data *ha = vha->hw;
3639         struct req_que *req;
3640
3641         if (ha->flags.eeh_busy) {
3642                 qla2x00_restart_timer(vha, WATCH_INTERVAL);
3643                 return;
3644         }
3645
3646         /* Hardware read to raise pending EEH errors during mailbox waits. */
3647         if (!pci_channel_offline(ha->pdev))
3648                 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
3649
3650         /* Make sure qla82xx_watchdog is run only for physical port */
3651         if (!vha->vp_idx && IS_QLA82XX(ha)) {
3652                 if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags))
3653                         start_dpc++;
3654                 qla82xx_watchdog(vha);
3655         }
3656
3657         /* Loop down handler. */
3658         if (atomic_read(&vha->loop_down_timer) > 0 &&
3659             !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
3660             !(test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))
3661                 && vha->flags.online) {
3662
3663                 if (atomic_read(&vha->loop_down_timer) ==
3664                     vha->loop_down_abort_time) {
3665
3666                         DEBUG(printk("scsi(%ld): Loop Down - aborting the "
3667                             "queues before time expire\n",
3668                             vha->host_no));
3669
3670                         if (!IS_QLA2100(ha) && vha->link_down_timeout)
3671                                 atomic_set(&vha->loop_state, LOOP_DEAD);
3672
3673                         /*
3674                          * Schedule an ISP abort to return any FCP2-device
3675                          * commands.
3676                          */
3677                         /* NPIV - scan physical port only */
3678                         if (!vha->vp_idx) {
3679                                 spin_lock_irqsave(&ha->hardware_lock,
3680                                     cpu_flags);
3681                                 req = ha->req_q_map[0];
3682                                 for (index = 1;
3683                                     index < MAX_OUTSTANDING_COMMANDS;
3684                                     index++) {
3685                                         fc_port_t *sfcp;
3686
3687                                         sp = req->outstanding_cmds[index];
3688                                         if (!sp)
3689                                                 continue;
3690                                         if (sp->ctx && !IS_PROT_IO(sp))
3691                                                 continue;
3692                                         sfcp = sp->fcport;
3693                                         if (!(sfcp->flags & FCF_FCP2_DEVICE))
3694                                                 continue;
3695
3696                                         if (IS_QLA82XX(ha))
3697                                                 set_bit(FCOE_CTX_RESET_NEEDED,
3698                                                         &vha->dpc_flags);
3699                                         else
3700                                                 set_bit(ISP_ABORT_NEEDED,
3701                                                         &vha->dpc_flags);
3702                                         break;
3703                                 }
3704                                 spin_unlock_irqrestore(&ha->hardware_lock,
3705                                                                 cpu_flags);
3706                         }
3707                         start_dpc++;
3708                 }
3709
3710                 /* if the loop has been down for 4 minutes, reinit adapter */
3711                 if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
3712                         if (!(vha->device_flags & DFLG_NO_CABLE)) {
3713                                 DEBUG(printk("scsi(%ld): Loop down - "
3714                                     "aborting ISP.\n",
3715                                     vha->host_no));
3716                                 qla_printk(KERN_WARNING, ha,
3717                                     "Loop down - aborting ISP.\n");
3718
3719                                 if (IS_QLA82XX(ha))
3720                                         set_bit(FCOE_CTX_RESET_NEEDED,
3721                                                 &vha->dpc_flags);
3722                                 else
3723                                         set_bit(ISP_ABORT_NEEDED,
3724                                                 &vha->dpc_flags);
3725                         }
3726                 }
3727                 DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n",
3728                     vha->host_no,
3729                     atomic_read(&vha->loop_down_timer)));
3730         }
3731
3732         /* Check if beacon LED needs to be blinked for physical host only */
3733         if (!vha->vp_idx && (ha->beacon_blink_led == 1)) {
3734                 set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
3735                 start_dpc++;
3736         }
3737
3738         /* Process any deferred work. */
3739         if (!list_empty(&vha->work_list))
3740                 start_dpc++;
3741
3742         /* Schedule the DPC routine if needed */
3743         if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
3744             test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
3745             test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) ||
3746             start_dpc ||
3747             test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) ||
3748             test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) ||
3749             test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
3750             test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
3751             test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
3752             test_bit(RELOGIN_NEEDED, &vha->dpc_flags)))
3753                 qla2xxx_wake_dpc(vha);
3754
3755         qla2x00_restart_timer(vha, WATCH_INTERVAL);
3756 }
3757
3758 /* Firmware interface routines. */
3759
3760 #define FW_BLOBS        8
3761 #define FW_ISP21XX      0
3762 #define FW_ISP22XX      1
3763 #define FW_ISP2300      2
3764 #define FW_ISP2322      3
3765 #define FW_ISP24XX      4
3766 #define FW_ISP25XX      5
3767 #define FW_ISP81XX      6
3768 #define FW_ISP82XX      7
3769
3770 #define FW_FILE_ISP21XX "ql2100_fw.bin"
3771 #define FW_FILE_ISP22XX "ql2200_fw.bin"
3772 #define FW_FILE_ISP2300 "ql2300_fw.bin"
3773 #define FW_FILE_ISP2322 "ql2322_fw.bin"
3774 #define FW_FILE_ISP24XX "ql2400_fw.bin"
3775 #define FW_FILE_ISP25XX "ql2500_fw.bin"
3776 #define FW_FILE_ISP81XX "ql8100_fw.bin"
3777 #define FW_FILE_ISP82XX "ql8200_fw.bin"
3778
3779 static DEFINE_MUTEX(qla_fw_lock);
3780
3781 static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
3782         { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, },
3783         { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, },
3784         { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, },
3785         { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, },
3786         { .name = FW_FILE_ISP24XX, },
3787         { .name = FW_FILE_ISP25XX, },
3788         { .name = FW_FILE_ISP81XX, },
3789         { .name = FW_FILE_ISP82XX, },
3790 };
3791
3792 struct fw_blob *
3793 qla2x00_request_firmware(scsi_qla_host_t *vha)
3794 {
3795         struct qla_hw_data *ha = vha->hw;
3796         struct fw_blob *blob;
3797
3798         blob = NULL;
3799         if (IS_QLA2100(ha)) {
3800                 blob = &qla_fw_blobs[FW_ISP21XX];
3801         } else if (IS_QLA2200(ha)) {
3802                 blob = &qla_fw_blobs[FW_ISP22XX];
3803         } else if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
3804                 blob = &qla_fw_blobs[FW_ISP2300];
3805         } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
3806                 blob = &qla_fw_blobs[FW_ISP2322];
3807         } else if (IS_QLA24XX_TYPE(ha)) {
3808                 blob = &qla_fw_blobs[FW_ISP24XX];
3809         } else if (IS_QLA25XX(ha)) {
3810                 blob = &qla_fw_blobs[FW_ISP25XX];
3811         } else if (IS_QLA81XX(ha)) {
3812                 blob = &qla_fw_blobs[FW_ISP81XX];
3813         } else if (IS_QLA82XX(ha)) {
3814                 blob = &qla_fw_blobs[FW_ISP82XX];
3815         }
3816
3817         mutex_lock(&qla_fw_lock);
3818         if (blob->fw)
3819                 goto out;
3820
3821         if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) {
3822                 DEBUG2(printk("scsi(%ld): Failed to load firmware image "
3823                     "(%s).\n", vha->host_no, blob->name));
3824                 blob->fw = NULL;
3825                 blob = NULL;
3826                 goto out;
3827         }
3828
3829 out:
3830         mutex_unlock(&qla_fw_lock);
3831         return blob;
3832 }
3833
3834 static void
3835 qla2x00_release_firmware(void)
3836 {
3837         int idx;
3838
3839         mutex_lock(&qla_fw_lock);
3840         for (idx = 0; idx < FW_BLOBS; idx++)
3841                 if (qla_fw_blobs[idx].fw)
3842                         release_firmware(qla_fw_blobs[idx].fw);
3843         mutex_unlock(&qla_fw_lock);
3844 }
3845
3846 static pci_ers_result_t
3847 qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3848 {
3849         scsi_qla_host_t *vha = pci_get_drvdata(pdev);
3850         struct qla_hw_data *ha = vha->hw;
3851
3852         DEBUG2(qla_printk(KERN_WARNING, ha, "error_detected:state %x\n",
3853             state));
3854
3855         switch (state) {
3856         case pci_channel_io_normal:
3857                 ha->flags.eeh_busy = 0;
3858                 return PCI_ERS_RESULT_CAN_RECOVER;
3859         case pci_channel_io_frozen:
3860                 ha->flags.eeh_busy = 1;
3861                 /* For ISP82XX complete any pending mailbox cmd */
3862                 if (IS_QLA82XX(ha)) {
3863                         ha->flags.isp82xx_fw_hung = 1;
3864                         if (ha->flags.mbox_busy) {
3865                                 ha->flags.mbox_int = 1;
3866                                 DEBUG2(qla_printk(KERN_ERR, ha,
3867                                         "Due to pci channel io frozen, doing premature "
3868                                         "completion of mbx command\n"));
3869                                 complete(&ha->mbx_intr_comp);
3870                         }
3871                 }
3872                 qla2x00_free_irqs(vha);
3873                 pci_disable_device(pdev);
3874                 /* Return back all IOs */
3875                 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
3876                 return PCI_ERS_RESULT_NEED_RESET;
3877         case pci_channel_io_perm_failure:
3878                 ha->flags.pci_channel_io_perm_failure = 1;
3879                 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
3880                 return PCI_ERS_RESULT_DISCONNECT;
3881         }
3882         return PCI_ERS_RESULT_NEED_RESET;
3883 }
3884
3885 static pci_ers_result_t
3886 qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
3887 {
3888         int risc_paused = 0;
3889         uint32_t stat;
3890         unsigned long flags;
3891         scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
3892         struct qla_hw_data *ha = base_vha->hw;
3893         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3894         struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
3895
3896         if (IS_QLA82XX(ha))
3897                 return PCI_ERS_RESULT_RECOVERED;
3898
3899         spin_lock_irqsave(&ha->hardware_lock, flags);
3900         if (IS_QLA2100(ha) || IS_QLA2200(ha)){
3901                 stat = RD_REG_DWORD(&reg->hccr);
3902                 if (stat & HCCR_RISC_PAUSE)
3903                         risc_paused = 1;
3904         } else if (IS_QLA23XX(ha)) {
3905                 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
3906                 if (stat & HSR_RISC_PAUSED)
3907                         risc_paused = 1;
3908         } else if (IS_FWI2_CAPABLE(ha)) {
3909                 stat = RD_REG_DWORD(&reg24->host_status);
3910                 if (stat & HSRX_RISC_PAUSED)
3911                         risc_paused = 1;
3912         }
3913         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3914
3915         if (risc_paused) {
3916                 qla_printk(KERN_INFO, ha, "RISC paused -- mmio_enabled, "
3917                     "Dumping firmware!\n");
3918                 ha->isp_ops->fw_dump(base_vha, 0);
3919
3920                 return PCI_ERS_RESULT_NEED_RESET;
3921         } else
3922                 return PCI_ERS_RESULT_RECOVERED;
3923 }
3924
3925 uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3926 {
3927         uint32_t rval = QLA_FUNCTION_FAILED;
3928         uint32_t drv_active = 0;
3929         struct qla_hw_data *ha = base_vha->hw;
3930         int fn;
3931         struct pci_dev *other_pdev = NULL;
3932
3933         DEBUG17(qla_printk(KERN_INFO, ha,
3934             "scsi(%ld): In qla82xx_error_recovery\n", base_vha->host_no));
3935
3936         set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
3937
3938         if (base_vha->flags.online) {
3939                 /* Abort all outstanding commands,
3940                  * so as to be requeued later */
3941                 qla2x00_abort_isp_cleanup(base_vha);
3942         }
3943
3944
3945         fn = PCI_FUNC(ha->pdev->devfn);
3946         while (fn > 0) {
3947                 fn--;
3948                 DEBUG17(qla_printk(KERN_INFO, ha,
3949                     "Finding pci device at function = 0x%x\n", fn));
3950                 other_pdev =
3951                     pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
3952                     ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
3953                     fn));
3954
3955                 if (!other_pdev)
3956                         continue;
3957                 if (atomic_read(&other_pdev->enable_cnt)) {
3958                         DEBUG17(qla_printk(KERN_INFO, ha,
3959                             "Found PCI func available and enabled at 0x%x\n",
3960                             fn));
3961                         pci_dev_put(other_pdev);
3962                         break;
3963                 }
3964                 pci_dev_put(other_pdev);
3965         }
3966
3967         if (!fn) {
3968                 /* Reset owner */
3969                 DEBUG17(qla_printk(KERN_INFO, ha,
3970                     "This devfn is reset owner = 0x%x\n", ha->pdev->devfn));
3971                 qla82xx_idc_lock(ha);
3972
3973                 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3974                     QLA82XX_DEV_INITIALIZING);
3975
3976                 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
3977                     QLA82XX_IDC_VERSION);
3978
3979                 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3980                 DEBUG17(qla_printk(KERN_INFO, ha,
3981                     "drv_active = 0x%x\n", drv_active));
3982
3983                 qla82xx_idc_unlock(ha);
3984                 /* Reset if device is not already reset
3985                  * drv_active would be 0 if a reset has already been done
3986                  */
3987                 if (drv_active)
3988                         rval = qla82xx_start_firmware(base_vha);
3989                 else
3990                         rval = QLA_SUCCESS;
3991                 qla82xx_idc_lock(ha);
3992
3993                 if (rval != QLA_SUCCESS) {
3994                         qla_printk(KERN_INFO, ha, "HW State: FAILED\n");
3995                         qla82xx_clear_drv_active(ha);
3996                         qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3997                             QLA82XX_DEV_FAILED);
3998                 } else {
3999                         qla_printk(KERN_INFO, ha, "HW State: READY\n");
4000                         qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
4001                             QLA82XX_DEV_READY);
4002                         qla82xx_idc_unlock(ha);
4003                         ha->flags.isp82xx_fw_hung = 0;
4004                         rval = qla82xx_restart_isp(base_vha);
4005                         qla82xx_idc_lock(ha);
4006                         /* Clear driver state register */
4007                         qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
4008                         qla82xx_set_drv_active(base_vha);
4009                 }
4010                 qla82xx_idc_unlock(ha);
4011         } else {
4012                 DEBUG17(qla_printk(KERN_INFO, ha,
4013                     "This devfn is not reset owner = 0x%x\n", ha->pdev->devfn));
4014                 if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
4015                     QLA82XX_DEV_READY)) {
4016                         ha->flags.isp82xx_fw_hung = 0;
4017                         rval = qla82xx_restart_isp(base_vha);
4018                         qla82xx_idc_lock(ha);
4019                         qla82xx_set_drv_active(base_vha);
4020                         qla82xx_idc_unlock(ha);
4021                 }
4022         }
4023         clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
4024
4025         return rval;
4026 }
4027
4028 static pci_ers_result_t
4029 qla2xxx_pci_slot_reset(struct pci_dev *pdev)
4030 {
4031         pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
4032         scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
4033         struct qla_hw_data *ha = base_vha->hw;
4034         struct rsp_que *rsp;
4035         int rc, retries = 10;
4036
4037         DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n"));
4038
4039         /* Workaround: qla2xxx driver which access hardware earlier
4040          * needs error state to be pci_channel_io_online.
4041          * Otherwise mailbox command timesout.
4042          */
4043         pdev->error_state = pci_channel_io_normal;
4044
4045         pci_restore_state(pdev);
4046
4047         /* pci_restore_state() clears the saved_state flag of the device
4048          * save restored state which resets saved_state flag
4049          */
4050         pci_save_state(pdev);
4051
4052         if (ha->mem_only)
4053                 rc = pci_enable_device_mem(pdev);
4054         else
4055                 rc = pci_enable_device(pdev);
4056
4057         if (rc) {
4058                 qla_printk(KERN_WARNING, ha,
4059                     "Can't re-enable PCI device after reset.\n");
4060                 goto exit_slot_reset;
4061         }
4062
4063         rsp = ha->rsp_q_map[0];
4064         if (qla2x00_request_irqs(ha, rsp))
4065                 goto exit_slot_reset;
4066
4067         if (ha->isp_ops->pci_config(base_vha))
4068                 goto exit_slot_reset;
4069
4070         if (IS_QLA82XX(ha)) {
4071                 if (qla82xx_error_recovery(base_vha) == QLA_SUCCESS) {
4072                         ret = PCI_ERS_RESULT_RECOVERED;
4073                         goto exit_slot_reset;
4074                 } else
4075                         goto exit_slot_reset;
4076         }
4077
4078         while (ha->flags.mbox_busy && retries--)
4079                 msleep(1000);
4080
4081         set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
4082         if (ha->isp_ops->abort_isp(base_vha) == QLA_SUCCESS)
4083                 ret =  PCI_ERS_RESULT_RECOVERED;
4084         clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
4085
4086
4087 exit_slot_reset:
4088         DEBUG17(qla_printk(KERN_WARNING, ha,
4089             "slot_reset-return:ret=%x\n", ret));
4090
4091         return ret;
4092 }
4093
4094 static void
4095 qla2xxx_pci_resume(struct pci_dev *pdev)
4096 {
4097         scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
4098         struct qla_hw_data *ha = base_vha->hw;
4099         int ret;
4100
4101         DEBUG17(qla_printk(KERN_WARNING, ha, "pci_resume\n"));
4102
4103         ret = qla2x00_wait_for_hba_online(base_vha);
4104         if (ret != QLA_SUCCESS) {
4105                 qla_printk(KERN_ERR, ha,
4106                     "the device failed to resume I/O "
4107                     "from slot/link_reset");
4108         }
4109
4110         pci_cleanup_aer_uncorrect_error_status(pdev);
4111
4112         ha->flags.eeh_busy = 0;
4113 }
4114
4115 static struct pci_error_handlers qla2xxx_err_handler = {
4116         .error_detected = qla2xxx_pci_error_detected,
4117         .mmio_enabled = qla2xxx_pci_mmio_enabled,
4118         .slot_reset = qla2xxx_pci_slot_reset,
4119         .resume = qla2xxx_pci_resume,
4120 };
4121
4122 static struct pci_device_id qla2xxx_pci_tbl[] = {
4123         { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) },
4124         { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) },
4125         { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) },
4126         { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312) },
4127         { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322) },
4128         { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312) },
4129         { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) },
4130         { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) },
4131         { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) },
4132         { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) },
4133         { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) },
4134         { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
4135         { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
4136         { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
4137         { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
4138         { 0 },
4139 };
4140 MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
4141
4142 static struct pci_driver qla2xxx_pci_driver = {
4143         .name           = QLA2XXX_DRIVER_NAME,
4144         .driver         = {
4145                 .owner          = THIS_MODULE,
4146         },
4147         .id_table       = qla2xxx_pci_tbl,
4148         .probe          = qla2x00_probe_one,
4149         .remove         = qla2x00_remove_one,
4150         .shutdown       = qla2x00_shutdown,
4151         .err_handler    = &qla2xxx_err_handler,
4152 };
4153
4154 static struct file_operations apidev_fops = {
4155         .owner = THIS_MODULE,
4156         .llseek = noop_llseek,
4157 };
4158
4159 /**
4160  * qla2x00_module_init - Module initialization.
4161  **/
4162 static int __init
4163 qla2x00_module_init(void)
4164 {
4165         int ret = 0;
4166
4167         /* Allocate cache for SRBs. */
4168         srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
4169             SLAB_HWCACHE_ALIGN, NULL);
4170         if (srb_cachep == NULL) {
4171                 printk(KERN_ERR
4172                     "qla2xxx: Unable to allocate SRB cache...Failing load!\n");
4173                 return -ENOMEM;
4174         }
4175
4176         /* Derive version string. */
4177         strcpy(qla2x00_version_str, QLA2XXX_VERSION);
4178         if (ql2xextended_error_logging)
4179                 strcat(qla2x00_version_str, "-debug");
4180
4181         qla2xxx_transport_template =
4182             fc_attach_transport(&qla2xxx_transport_functions);
4183         if (!qla2xxx_transport_template) {
4184                 kmem_cache_destroy(srb_cachep);
4185                 return -ENODEV;
4186         }
4187
4188         apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops);
4189         if (apidev_major < 0) {
4190                 printk(KERN_WARNING "qla2xxx: Unable to register char device "
4191                     "%s\n", QLA2XXX_APIDEV);
4192         }
4193
4194         qla2xxx_transport_vport_template =
4195             fc_attach_transport(&qla2xxx_transport_vport_functions);
4196         if (!qla2xxx_transport_vport_template) {
4197                 kmem_cache_destroy(srb_cachep);
4198                 fc_release_transport(qla2xxx_transport_template);
4199                 return -ENODEV;
4200         }
4201
4202         printk(KERN_INFO "QLogic Fibre Channel HBA Driver: %s\n",
4203             qla2x00_version_str);
4204         ret = pci_register_driver(&qla2xxx_pci_driver);
4205         if (ret) {
4206                 kmem_cache_destroy(srb_cachep);
4207                 fc_release_transport(qla2xxx_transport_template);
4208                 fc_release_transport(qla2xxx_transport_vport_template);
4209         }
4210         return ret;
4211 }
4212
4213 /**
4214  * qla2x00_module_exit - Module cleanup.
4215  **/
4216 static void __exit
4217 qla2x00_module_exit(void)
4218 {
4219         unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
4220         pci_unregister_driver(&qla2xxx_pci_driver);
4221         qla2x00_release_firmware();
4222         kmem_cache_destroy(srb_cachep);
4223         if (ctx_cachep)
4224                 kmem_cache_destroy(ctx_cachep);
4225         fc_release_transport(qla2xxx_transport_template);
4226         fc_release_transport(qla2xxx_transport_vport_template);
4227 }
4228
4229 module_init(qla2x00_module_init);
4230 module_exit(qla2x00_module_exit);
4231
4232 MODULE_AUTHOR("QLogic Corporation");
4233 MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver");
4234 MODULE_LICENSE("GPL");
4235 MODULE_VERSION(QLA2XXX_VERSION);
4236 MODULE_FIRMWARE(FW_FILE_ISP21XX);
4237 MODULE_FIRMWARE(FW_FILE_ISP22XX);
4238 MODULE_FIRMWARE(FW_FILE_ISP2300);
4239 MODULE_FIRMWARE(FW_FILE_ISP2322);
4240 MODULE_FIRMWARE(FW_FILE_ISP24XX);
4241 MODULE_FIRMWARE(FW_FILE_ISP25XX);