Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial
[pandora-kernel.git] / drivers / scsi / qla2xxx / qla_isr.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2011 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <scsi/scsi_tcq.h>
12 #include <scsi/scsi_bsg_fc.h>
13 #include <scsi/scsi_eh.h>
14
15 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
16 static void qla2x00_process_completed_request(struct scsi_qla_host *,
17         struct req_que *, uint32_t);
18 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
19 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
20 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
21         sts_entry_t *);
22
23 /**
24  * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
25  * @irq:
26  * @dev_id: SCSI driver HA context
27  *
28  * Called by system whenever the host adapter generates an interrupt.
29  *
30  * Returns handled flag.
31  */
32 irqreturn_t
33 qla2100_intr_handler(int irq, void *dev_id)
34 {
35         scsi_qla_host_t *vha;
36         struct qla_hw_data *ha;
37         struct device_reg_2xxx __iomem *reg;
38         int             status;
39         unsigned long   iter;
40         uint16_t        hccr;
41         uint16_t        mb[4];
42         struct rsp_que *rsp;
43         unsigned long   flags;
44
45         rsp = (struct rsp_que *) dev_id;
46         if (!rsp) {
47                 printk(KERN_INFO
48                     "%s(): NULL response queue pointer\n", __func__);
49                 return (IRQ_NONE);
50         }
51
52         ha = rsp->hw;
53         reg = &ha->iobase->isp;
54         status = 0;
55
56         spin_lock_irqsave(&ha->hardware_lock, flags);
57         vha = pci_get_drvdata(ha->pdev);
58         for (iter = 50; iter--; ) {
59                 hccr = RD_REG_WORD(&reg->hccr);
60                 if (hccr & HCCR_RISC_PAUSE) {
61                         if (pci_channel_offline(ha->pdev))
62                                 break;
63
64                         /*
65                          * Issue a "HARD" reset in order for the RISC interrupt
66                          * bit to be cleared.  Schedule a big hammmer to get
67                          * out of the RISC PAUSED state.
68                          */
69                         WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
70                         RD_REG_WORD(&reg->hccr);
71
72                         ha->isp_ops->fw_dump(vha, 1);
73                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
74                         break;
75                 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
76                         break;
77
78                 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
79                         WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
80                         RD_REG_WORD(&reg->hccr);
81
82                         /* Get mailbox data. */
83                         mb[0] = RD_MAILBOX_REG(ha, reg, 0);
84                         if (mb[0] > 0x3fff && mb[0] < 0x8000) {
85                                 qla2x00_mbx_completion(vha, mb[0]);
86                                 status |= MBX_INTERRUPT;
87                         } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
88                                 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
89                                 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
90                                 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
91                                 qla2x00_async_event(vha, rsp, mb);
92                         } else {
93                                 /*EMPTY*/
94                                 DEBUG2(printk("scsi(%ld): Unrecognized "
95                                     "interrupt type (%d).\n",
96                                     vha->host_no, mb[0]));
97                         }
98                         /* Release mailbox registers. */
99                         WRT_REG_WORD(&reg->semaphore, 0);
100                         RD_REG_WORD(&reg->semaphore);
101                 } else {
102                         qla2x00_process_response_queue(rsp);
103
104                         WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
105                         RD_REG_WORD(&reg->hccr);
106                 }
107         }
108         spin_unlock_irqrestore(&ha->hardware_lock, flags);
109
110         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
111             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
112                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
113                 complete(&ha->mbx_intr_comp);
114         }
115
116         return (IRQ_HANDLED);
117 }
118
119 /**
120  * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
121  * @irq:
122  * @dev_id: SCSI driver HA context
123  *
124  * Called by system whenever the host adapter generates an interrupt.
125  *
126  * Returns handled flag.
127  */
128 irqreturn_t
129 qla2300_intr_handler(int irq, void *dev_id)
130 {
131         scsi_qla_host_t *vha;
132         struct device_reg_2xxx __iomem *reg;
133         int             status;
134         unsigned long   iter;
135         uint32_t        stat;
136         uint16_t        hccr;
137         uint16_t        mb[4];
138         struct rsp_que *rsp;
139         struct qla_hw_data *ha;
140         unsigned long   flags;
141
142         rsp = (struct rsp_que *) dev_id;
143         if (!rsp) {
144                 printk(KERN_INFO
145                     "%s(): NULL response queue pointer\n", __func__);
146                 return (IRQ_NONE);
147         }
148
149         ha = rsp->hw;
150         reg = &ha->iobase->isp;
151         status = 0;
152
153         spin_lock_irqsave(&ha->hardware_lock, flags);
154         vha = pci_get_drvdata(ha->pdev);
155         for (iter = 50; iter--; ) {
156                 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
157                 if (stat & HSR_RISC_PAUSED) {
158                         if (unlikely(pci_channel_offline(ha->pdev)))
159                                 break;
160
161                         hccr = RD_REG_WORD(&reg->hccr);
162                         if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
163                                 qla_printk(KERN_INFO, ha, "Parity error -- "
164                                     "HCCR=%x, Dumping firmware!\n", hccr);
165                         else
166                                 qla_printk(KERN_INFO, ha, "RISC paused -- "
167                                     "HCCR=%x, Dumping firmware!\n", hccr);
168
169                         /*
170                          * Issue a "HARD" reset in order for the RISC
171                          * interrupt bit to be cleared.  Schedule a big
172                          * hammmer to get out of the RISC PAUSED state.
173                          */
174                         WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
175                         RD_REG_WORD(&reg->hccr);
176
177                         ha->isp_ops->fw_dump(vha, 1);
178                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
179                         break;
180                 } else if ((stat & HSR_RISC_INT) == 0)
181                         break;
182
183                 switch (stat & 0xff) {
184                 case 0x1:
185                 case 0x2:
186                 case 0x10:
187                 case 0x11:
188                         qla2x00_mbx_completion(vha, MSW(stat));
189                         status |= MBX_INTERRUPT;
190
191                         /* Release mailbox registers. */
192                         WRT_REG_WORD(&reg->semaphore, 0);
193                         break;
194                 case 0x12:
195                         mb[0] = MSW(stat);
196                         mb[1] = RD_MAILBOX_REG(ha, reg, 1);
197                         mb[2] = RD_MAILBOX_REG(ha, reg, 2);
198                         mb[3] = RD_MAILBOX_REG(ha, reg, 3);
199                         qla2x00_async_event(vha, rsp, mb);
200                         break;
201                 case 0x13:
202                         qla2x00_process_response_queue(rsp);
203                         break;
204                 case 0x15:
205                         mb[0] = MBA_CMPLT_1_16BIT;
206                         mb[1] = MSW(stat);
207                         qla2x00_async_event(vha, rsp, mb);
208                         break;
209                 case 0x16:
210                         mb[0] = MBA_SCSI_COMPLETION;
211                         mb[1] = MSW(stat);
212                         mb[2] = RD_MAILBOX_REG(ha, reg, 2);
213                         qla2x00_async_event(vha, rsp, mb);
214                         break;
215                 default:
216                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
217                             "(%d).\n",
218                             vha->host_no, stat & 0xff));
219                         break;
220                 }
221                 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
222                 RD_REG_WORD_RELAXED(&reg->hccr);
223         }
224         spin_unlock_irqrestore(&ha->hardware_lock, flags);
225
226         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
227             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
228                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
229                 complete(&ha->mbx_intr_comp);
230         }
231
232         return (IRQ_HANDLED);
233 }
234
235 /**
236  * qla2x00_mbx_completion() - Process mailbox command completions.
237  * @ha: SCSI driver HA context
238  * @mb0: Mailbox0 register
239  */
240 static void
241 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
242 {
243         uint16_t        cnt;
244         uint16_t __iomem *wptr;
245         struct qla_hw_data *ha = vha->hw;
246         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
247
248         /* Load return mailbox registers. */
249         ha->flags.mbox_int = 1;
250         ha->mailbox_out[0] = mb0;
251         wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
252
253         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
254                 if (IS_QLA2200(ha) && cnt == 8)
255                         wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
256                 if (cnt == 4 || cnt == 5)
257                         ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
258                 else
259                         ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
260
261                 wptr++;
262         }
263
264         if (ha->mcp) {
265                 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
266                     __func__, vha->host_no, ha->mcp->mb[0]));
267         } else {
268                 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
269                     __func__, vha->host_no));
270         }
271 }
272
273 static void
274 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
275 {
276         static char *event[] =
277                 { "Complete", "Request Notification", "Time Extension" };
278         int rval;
279         struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
280         uint16_t __iomem *wptr;
281         uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
282
283         /* Seed data -- mailbox1 -> mailbox7. */
284         wptr = (uint16_t __iomem *)&reg24->mailbox1;
285         for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
286                 mb[cnt] = RD_REG_WORD(wptr);
287
288         DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- "
289             "%04x %04x %04x %04x %04x %04x %04x.\n", vha->host_no,
290             event[aen & 0xff],
291             mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6]));
292
293         /* Acknowledgement needed? [Notify && non-zero timeout]. */
294         timeout = (descr >> 8) & 0xf;
295         if (aen != MBA_IDC_NOTIFY || !timeout)
296                 return;
297
298         DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- "
299             "ACK timeout=%d.\n", vha->host_no, event[aen & 0xff], timeout));
300
301         rval = qla2x00_post_idc_ack_work(vha, mb);
302         if (rval != QLA_SUCCESS)
303                 qla_printk(KERN_WARNING, vha->hw,
304                     "IDC failed to post ACK.\n");
305 }
306
307 /**
308  * qla2x00_async_event() - Process aynchronous events.
309  * @ha: SCSI driver HA context
310  * @mb: Mailbox registers (0 - 3)
311  */
312 void
313 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
314 {
315 #define LS_UNKNOWN      2
316         static char     *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
317         char            *link_speed;
318         uint16_t        handle_cnt;
319         uint16_t        cnt, mbx;
320         uint32_t        handles[5];
321         struct qla_hw_data *ha = vha->hw;
322         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
323         struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
324         struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
325         uint32_t        rscn_entry, host_pid;
326         uint8_t         rscn_queue_index;
327         unsigned long   flags;
328
329         /* Setup to process RIO completion. */
330         handle_cnt = 0;
331         if (IS_QLA8XXX_TYPE(ha))
332                 goto skip_rio;
333         switch (mb[0]) {
334         case MBA_SCSI_COMPLETION:
335                 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
336                 handle_cnt = 1;
337                 break;
338         case MBA_CMPLT_1_16BIT:
339                 handles[0] = mb[1];
340                 handle_cnt = 1;
341                 mb[0] = MBA_SCSI_COMPLETION;
342                 break;
343         case MBA_CMPLT_2_16BIT:
344                 handles[0] = mb[1];
345                 handles[1] = mb[2];
346                 handle_cnt = 2;
347                 mb[0] = MBA_SCSI_COMPLETION;
348                 break;
349         case MBA_CMPLT_3_16BIT:
350                 handles[0] = mb[1];
351                 handles[1] = mb[2];
352                 handles[2] = mb[3];
353                 handle_cnt = 3;
354                 mb[0] = MBA_SCSI_COMPLETION;
355                 break;
356         case MBA_CMPLT_4_16BIT:
357                 handles[0] = mb[1];
358                 handles[1] = mb[2];
359                 handles[2] = mb[3];
360                 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
361                 handle_cnt = 4;
362                 mb[0] = MBA_SCSI_COMPLETION;
363                 break;
364         case MBA_CMPLT_5_16BIT:
365                 handles[0] = mb[1];
366                 handles[1] = mb[2];
367                 handles[2] = mb[3];
368                 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
369                 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
370                 handle_cnt = 5;
371                 mb[0] = MBA_SCSI_COMPLETION;
372                 break;
373         case MBA_CMPLT_2_32BIT:
374                 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
375                 handles[1] = le32_to_cpu(
376                     ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
377                     RD_MAILBOX_REG(ha, reg, 6));
378                 handle_cnt = 2;
379                 mb[0] = MBA_SCSI_COMPLETION;
380                 break;
381         default:
382                 break;
383         }
384 skip_rio:
385         switch (mb[0]) {
386         case MBA_SCSI_COMPLETION:       /* Fast Post */
387                 if (!vha->flags.online)
388                         break;
389
390                 for (cnt = 0; cnt < handle_cnt; cnt++)
391                         qla2x00_process_completed_request(vha, rsp->req,
392                                 handles[cnt]);
393                 break;
394
395         case MBA_RESET:                 /* Reset */
396                 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n",
397                         vha->host_no));
398
399                 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
400                 break;
401
402         case MBA_SYSTEM_ERR:            /* System Error */
403                 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox7) : 0;
404                 qla_printk(KERN_INFO, ha,
405                     "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
406                     "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
407
408                 ha->isp_ops->fw_dump(vha, 1);
409
410                 if (IS_FWI2_CAPABLE(ha)) {
411                         if (mb[1] == 0 && mb[2] == 0) {
412                                 qla_printk(KERN_ERR, ha,
413                                     "Unrecoverable Hardware Error: adapter "
414                                     "marked OFFLINE!\n");
415                                 vha->flags.online = 0;
416                         } else {
417                                 /* Check to see if MPI timeout occurred */
418                                 if ((mbx & MBX_3) && (ha->flags.port0))
419                                         set_bit(MPI_RESET_NEEDED,
420                                             &vha->dpc_flags);
421
422                                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
423                         }
424                 } else if (mb[1] == 0) {
425                         qla_printk(KERN_INFO, ha,
426                             "Unrecoverable Hardware Error: adapter marked "
427                             "OFFLINE!\n");
428                         vha->flags.online = 0;
429                 } else
430                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
431                 break;
432
433         case MBA_REQ_TRANSFER_ERR:      /* Request Transfer Error */
434                 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error (%x).\n",
435                     vha->host_no, mb[1]));
436                 qla_printk(KERN_WARNING, ha,
437                     "ISP Request Transfer Error (%x).\n", mb[1]);
438
439                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
440                 break;
441
442         case MBA_RSP_TRANSFER_ERR:      /* Response Transfer Error */
443                 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
444                     vha->host_no));
445                 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
446
447                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
448                 break;
449
450         case MBA_WAKEUP_THRES:          /* Request Queue Wake-up */
451                 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
452                     vha->host_no));
453                 break;
454
455         case MBA_LIP_OCCURRED:          /* Loop Initialization Procedure */
456                 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", vha->host_no,
457                     mb[1]));
458                 qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
459
460                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
461                         atomic_set(&vha->loop_state, LOOP_DOWN);
462                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
463                         qla2x00_mark_all_devices_lost(vha, 1);
464                 }
465
466                 if (vha->vp_idx) {
467                         atomic_set(&vha->vp_state, VP_FAILED);
468                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
469                 }
470
471                 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
472                 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
473
474                 vha->flags.management_server_logged_in = 0;
475                 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
476                 break;
477
478         case MBA_LOOP_UP:               /* Loop Up Event */
479                 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
480                         link_speed = link_speeds[0];
481                         ha->link_data_rate = PORT_SPEED_1GB;
482                 } else {
483                         link_speed = link_speeds[LS_UNKNOWN];
484                         if (mb[1] < 5)
485                                 link_speed = link_speeds[mb[1]];
486                         else if (mb[1] == 0x13)
487                                 link_speed = link_speeds[5];
488                         ha->link_data_rate = mb[1];
489                 }
490
491                 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
492                     vha->host_no, link_speed));
493                 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
494                     link_speed);
495
496                 vha->flags.management_server_logged_in = 0;
497                 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
498                 break;
499
500         case MBA_LOOP_DOWN:             /* Loop Down Event */
501                 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0;
502                 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
503                 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN "
504                     "(%x %x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3],
505                     mbx));
506                 qla_printk(KERN_INFO, ha,
507                     "LOOP DOWN detected (%x %x %x %x).\n", mb[1], mb[2], mb[3],
508                     mbx);
509
510                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
511                         atomic_set(&vha->loop_state, LOOP_DOWN);
512                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
513                         vha->device_flags |= DFLG_NO_CABLE;
514                         qla2x00_mark_all_devices_lost(vha, 1);
515                 }
516
517                 if (vha->vp_idx) {
518                         atomic_set(&vha->vp_state, VP_FAILED);
519                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
520                 }
521
522                 vha->flags.management_server_logged_in = 0;
523                 ha->link_data_rate = PORT_SPEED_UNKNOWN;
524                 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
525                 break;
526
527         case MBA_LIP_RESET:             /* LIP reset occurred */
528                 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
529                     vha->host_no, mb[1]));
530                 qla_printk(KERN_INFO, ha,
531                     "LIP reset occurred (%x).\n", mb[1]);
532
533                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
534                         atomic_set(&vha->loop_state, LOOP_DOWN);
535                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
536                         qla2x00_mark_all_devices_lost(vha, 1);
537                 }
538
539                 if (vha->vp_idx) {
540                         atomic_set(&vha->vp_state, VP_FAILED);
541                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
542                 }
543
544                 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
545
546                 ha->operating_mode = LOOP;
547                 vha->flags.management_server_logged_in = 0;
548                 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
549                 break;
550
551         /* case MBA_DCBX_COMPLETE: */
552         case MBA_POINT_TO_POINT:        /* Point-to-Point */
553                 if (IS_QLA2100(ha))
554                         break;
555
556                 if (IS_QLA8XXX_TYPE(ha)) {
557                         DEBUG2(printk("scsi(%ld): DCBX Completed -- %04x %04x "
558                             "%04x\n", vha->host_no, mb[1], mb[2], mb[3]));
559                         if (ha->notify_dcbx_comp)
560                                 complete(&ha->dcbx_comp);
561
562                 } else
563                         DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE "
564                             "received.\n", vha->host_no));
565
566                 /*
567                  * Until there's a transition from loop down to loop up, treat
568                  * this as loop down only.
569                  */
570                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
571                         atomic_set(&vha->loop_state, LOOP_DOWN);
572                         if (!atomic_read(&vha->loop_down_timer))
573                                 atomic_set(&vha->loop_down_timer,
574                                     LOOP_DOWN_TIME);
575                         qla2x00_mark_all_devices_lost(vha, 1);
576                 }
577
578                 if (vha->vp_idx) {
579                         atomic_set(&vha->vp_state, VP_FAILED);
580                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
581                 }
582
583                 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
584                         set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
585
586                 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
587                 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
588
589                 ha->flags.gpsc_supported = 1;
590                 vha->flags.management_server_logged_in = 0;
591                 break;
592
593         case MBA_CHG_IN_CONNECTION:     /* Change in connection mode */
594                 if (IS_QLA2100(ha))
595                         break;
596
597                 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
598                     "received.\n",
599                     vha->host_no));
600                 qla_printk(KERN_INFO, ha,
601                     "Configuration change detected: value=%x.\n", mb[1]);
602
603                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
604                         atomic_set(&vha->loop_state, LOOP_DOWN);
605                         if (!atomic_read(&vha->loop_down_timer))
606                                 atomic_set(&vha->loop_down_timer,
607                                     LOOP_DOWN_TIME);
608                         qla2x00_mark_all_devices_lost(vha, 1);
609                 }
610
611                 if (vha->vp_idx) {
612                         atomic_set(&vha->vp_state, VP_FAILED);
613                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
614                 }
615
616                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
617                 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
618                 break;
619
620         case MBA_PORT_UPDATE:           /* Port database update */
621                 /*
622                  * Handle only global and vn-port update events
623                  *
624                  * Relevant inputs:
625                  * mb[1] = N_Port handle of changed port
626                  * OR 0xffff for global event
627                  * mb[2] = New login state
628                  * 7 = Port logged out
629                  * mb[3] = LSB is vp_idx, 0xff = all vps
630                  *
631                  * Skip processing if:
632                  *       Event is global, vp_idx is NOT all vps,
633                  *           vp_idx does not match
634                  *       Event is not global, vp_idx does not match
635                  */
636                 if (IS_QLA2XXX_MIDTYPE(ha) &&
637                     ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
638                         (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
639                         break;
640
641                 /* Global event -- port logout or port unavailable. */
642                 if (mb[1] == 0xffff && mb[2] == 0x7) {
643                         DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
644                             vha->host_no));
645                         DEBUG(printk(KERN_INFO
646                             "scsi(%ld): Port unavailable %04x %04x %04x.\n",
647                             vha->host_no, mb[1], mb[2], mb[3]));
648
649                         if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
650                                 atomic_set(&vha->loop_state, LOOP_DOWN);
651                                 atomic_set(&vha->loop_down_timer,
652                                     LOOP_DOWN_TIME);
653                                 vha->device_flags |= DFLG_NO_CABLE;
654                                 qla2x00_mark_all_devices_lost(vha, 1);
655                         }
656
657                         if (vha->vp_idx) {
658                                 atomic_set(&vha->vp_state, VP_FAILED);
659                                 fc_vport_set_state(vha->fc_vport,
660                                     FC_VPORT_FAILED);
661                                 qla2x00_mark_all_devices_lost(vha, 1);
662                         }
663
664                         vha->flags.management_server_logged_in = 0;
665                         ha->link_data_rate = PORT_SPEED_UNKNOWN;
666                         break;
667                 }
668
669                 /*
670                  * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
671                  * event etc. earlier indicating loop is down) then process
672                  * it.  Otherwise ignore it and Wait for RSCN to come in.
673                  */
674                 atomic_set(&vha->loop_down_timer, 0);
675                 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
676                     atomic_read(&vha->loop_state) != LOOP_DEAD) {
677                         DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
678                             "ignored %04x/%04x/%04x.\n", vha->host_no, mb[1],
679                             mb[2], mb[3]));
680                         break;
681                 }
682
683                 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
684                     vha->host_no));
685                 DEBUG(printk(KERN_INFO
686                     "scsi(%ld): Port database changed %04x %04x %04x.\n",
687                     vha->host_no, mb[1], mb[2], mb[3]));
688
689                 /*
690                  * Mark all devices as missing so we will login again.
691                  */
692                 atomic_set(&vha->loop_state, LOOP_UP);
693
694                 qla2x00_mark_all_devices_lost(vha, 1);
695
696                 vha->flags.rscn_queue_overflow = 1;
697
698                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
699                 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
700                 break;
701
702         case MBA_RSCN_UPDATE:           /* State Change Registration */
703                 /* Check if the Vport has issued a SCR */
704                 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
705                         break;
706                 /* Only handle SCNs for our Vport index. */
707                 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
708                         break;
709
710                 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
711                     vha->host_no));
712                 DEBUG(printk(KERN_INFO
713                     "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
714                     vha->host_no, mb[1], mb[2], mb[3]));
715
716                 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
717                 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
718                                 | vha->d_id.b.al_pa;
719                 if (rscn_entry == host_pid) {
720                         DEBUG(printk(KERN_INFO
721                             "scsi(%ld): Ignoring RSCN update to local host "
722                             "port ID (%06x)\n",
723                             vha->host_no, host_pid));
724                         break;
725                 }
726
727                 /* Ignore reserved bits from RSCN-payload. */
728                 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
729                 rscn_queue_index = vha->rscn_in_ptr + 1;
730                 if (rscn_queue_index == MAX_RSCN_COUNT)
731                         rscn_queue_index = 0;
732                 if (rscn_queue_index != vha->rscn_out_ptr) {
733                         vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry;
734                         vha->rscn_in_ptr = rscn_queue_index;
735                 } else {
736                         vha->flags.rscn_queue_overflow = 1;
737                 }
738
739                 atomic_set(&vha->loop_state, LOOP_UPDATE);
740                 atomic_set(&vha->loop_down_timer, 0);
741                 vha->flags.management_server_logged_in = 0;
742
743                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
744                 set_bit(RSCN_UPDATE, &vha->dpc_flags);
745                 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
746                 break;
747
748         /* case MBA_RIO_RESPONSE: */
749         case MBA_ZIO_RESPONSE:
750                 DEBUG3(printk("scsi(%ld): [R|Z]IO update completion.\n",
751                     vha->host_no));
752
753                 if (IS_FWI2_CAPABLE(ha))
754                         qla24xx_process_response_queue(vha, rsp);
755                 else
756                         qla2x00_process_response_queue(rsp);
757                 break;
758
759         case MBA_DISCARD_RND_FRAME:
760                 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
761                     "%04x.\n", vha->host_no, mb[1], mb[2], mb[3]));
762                 break;
763
764         case MBA_TRACE_NOTIFICATION:
765                 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
766                 vha->host_no, mb[1], mb[2]));
767                 break;
768
769         case MBA_ISP84XX_ALERT:
770                 DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- "
771                     "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
772
773                 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
774                 switch (mb[1]) {
775                 case A84_PANIC_RECOVERY:
776                         qla_printk(KERN_INFO, ha, "Alert 84XX: panic recovery "
777                             "%04x %04x\n", mb[2], mb[3]);
778                         break;
779                 case A84_OP_LOGIN_COMPLETE:
780                         ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
781                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
782                             "firmware version %x\n", ha->cs84xx->op_fw_version));
783                         break;
784                 case A84_DIAG_LOGIN_COMPLETE:
785                         ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
786                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
787                             "diagnostic firmware version %x\n",
788                             ha->cs84xx->diag_fw_version));
789                         break;
790                 case A84_GOLD_LOGIN_COMPLETE:
791                         ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
792                         ha->cs84xx->fw_update = 1;
793                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX: gold "
794                             "firmware version %x\n",
795                             ha->cs84xx->gold_fw_version));
796                         break;
797                 default:
798                         qla_printk(KERN_ERR, ha,
799                             "Alert 84xx: Invalid Alert %04x %04x %04x\n",
800                             mb[1], mb[2], mb[3]);
801                 }
802                 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
803                 break;
804         case MBA_DCBX_START:
805                 DEBUG2(printk("scsi(%ld): DCBX Started -- %04x %04x %04x\n",
806                     vha->host_no, mb[1], mb[2], mb[3]));
807                 break;
808         case MBA_DCBX_PARAM_UPDATE:
809                 DEBUG2(printk("scsi(%ld): DCBX Parameters Updated -- "
810                     "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
811                 break;
812         case MBA_FCF_CONF_ERR:
813                 DEBUG2(printk("scsi(%ld): FCF Configuration Error -- "
814                     "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
815                 break;
816         case MBA_IDC_COMPLETE:
817         case MBA_IDC_NOTIFY:
818         case MBA_IDC_TIME_EXT:
819                 qla81xx_idc_event(vha, mb[0], mb[1]);
820                 break;
821         }
822
823         if (!vha->vp_idx && ha->num_vhosts)
824                 qla2x00_alert_all_vps(rsp, mb);
825 }
826
827 /**
828  * qla2x00_process_completed_request() - Process a Fast Post response.
829  * @ha: SCSI driver HA context
830  * @index: SRB index
831  */
832 static void
833 qla2x00_process_completed_request(struct scsi_qla_host *vha,
834                                 struct req_que *req, uint32_t index)
835 {
836         srb_t *sp;
837         struct qla_hw_data *ha = vha->hw;
838
839         /* Validate handle. */
840         if (index >= MAX_OUTSTANDING_COMMANDS) {
841                 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
842                     vha->host_no, index));
843                 qla_printk(KERN_WARNING, ha,
844                     "Invalid SCSI completion handle %d.\n", index);
845
846                 if (IS_QLA82XX(ha))
847                         set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
848                 else
849                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
850                 return;
851         }
852
853         sp = req->outstanding_cmds[index];
854         if (sp) {
855                 /* Free outstanding command slot. */
856                 req->outstanding_cmds[index] = NULL;
857
858                 /* Save ISP completion status */
859                 sp->cmd->result = DID_OK << 16;
860                 qla2x00_sp_compl(ha, sp);
861         } else {
862                 DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion"
863                         " handle(0x%x)\n", vha->host_no, req->id, index));
864                 qla_printk(KERN_WARNING, ha,
865                     "Invalid ISP SCSI completion handle\n");
866
867                 if (IS_QLA82XX(ha))
868                         set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
869                 else
870                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
871         }
872 }
873
874 static srb_t *
875 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
876     struct req_que *req, void *iocb)
877 {
878         struct qla_hw_data *ha = vha->hw;
879         sts_entry_t *pkt = iocb;
880         srb_t *sp = NULL;
881         uint16_t index;
882
883         index = LSW(pkt->handle);
884         if (index >= MAX_OUTSTANDING_COMMANDS) {
885                 qla_printk(KERN_WARNING, ha,
886                     "%s: Invalid completion handle (%x).\n", func, index);
887                 if (IS_QLA82XX(ha))
888                         set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
889                 else
890                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
891                 goto done;
892         }
893         sp = req->outstanding_cmds[index];
894         if (!sp) {
895                 qla_printk(KERN_WARNING, ha,
896                     "%s: Invalid completion handle (%x) -- timed-out.\n", func,
897                     index);
898                 return sp;
899         }
900         if (sp->handle != index) {
901                 qla_printk(KERN_WARNING, ha,
902                     "%s: SRB handle (%x) mismatch %x.\n", func, sp->handle,
903                     index);
904                 return NULL;
905         }
906
907         req->outstanding_cmds[index] = NULL;
908
909 done:
910         return sp;
911 }
912
913 static void
914 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
915     struct mbx_entry *mbx)
916 {
917         const char func[] = "MBX-IOCB";
918         const char *type;
919         fc_port_t *fcport;
920         srb_t *sp;
921         struct srb_iocb *lio;
922         struct srb_ctx *ctx;
923         uint16_t *data;
924         uint16_t status;
925
926         sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
927         if (!sp)
928                 return;
929
930         ctx = sp->ctx;
931         lio = ctx->u.iocb_cmd;
932         type = ctx->name;
933         fcport = sp->fcport;
934         data = lio->u.logio.data;
935
936         data[0] = MBS_COMMAND_ERROR;
937         data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
938             QLA_LOGIO_LOGIN_RETRIED : 0;
939         if (mbx->entry_status) {
940                 DEBUG2(printk(KERN_WARNING
941                     "scsi(%ld:%x): Async-%s error entry - portid=%02x%02x%02x "
942                     "entry-status=%x status=%x state-flag=%x "
943                     "status-flags=%x.\n",
944                     fcport->vha->host_no, sp->handle, type,
945                     fcport->d_id.b.domain, fcport->d_id.b.area,
946                     fcport->d_id.b.al_pa, mbx->entry_status,
947                     le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
948                     le16_to_cpu(mbx->status_flags)));
949
950                 DEBUG2(qla2x00_dump_buffer((uint8_t *)mbx, sizeof(*mbx)));
951
952                 goto logio_done;
953         }
954
955         status = le16_to_cpu(mbx->status);
956         if (status == 0x30 && ctx->type == SRB_LOGIN_CMD &&
957             le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
958                 status = 0;
959         if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
960                 DEBUG2(printk(KERN_DEBUG
961                     "scsi(%ld:%x): Async-%s complete - portid=%02x%02x%02x "
962                     "mbx1=%x.\n",
963                     fcport->vha->host_no, sp->handle, type,
964                     fcport->d_id.b.domain, fcport->d_id.b.area,
965                     fcport->d_id.b.al_pa, le16_to_cpu(mbx->mb1)));
966
967                 data[0] = MBS_COMMAND_COMPLETE;
968                 if (ctx->type == SRB_LOGIN_CMD) {
969                         fcport->port_type = FCT_TARGET;
970                         if (le16_to_cpu(mbx->mb1) & BIT_0)
971                                 fcport->port_type = FCT_INITIATOR;
972                         else if (le16_to_cpu(mbx->mb1) & BIT_1)
973                                 fcport->flags |= FCF_FCP2_DEVICE;
974                 }
975                 goto logio_done;
976         }
977
978         data[0] = le16_to_cpu(mbx->mb0);
979         switch (data[0]) {
980         case MBS_PORT_ID_USED:
981                 data[1] = le16_to_cpu(mbx->mb1);
982                 break;
983         case MBS_LOOP_ID_USED:
984                 break;
985         default:
986                 data[0] = MBS_COMMAND_ERROR;
987                 break;
988         }
989
990         DEBUG2(printk(KERN_WARNING
991             "scsi(%ld:%x): Async-%s failed - portid=%02x%02x%02x status=%x "
992             "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n",
993             fcport->vha->host_no, sp->handle, type, fcport->d_id.b.domain,
994             fcport->d_id.b.area, fcport->d_id.b.al_pa, status,
995             le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
996             le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
997             le16_to_cpu(mbx->mb7)));
998
999 logio_done:
1000         lio->done(sp);
1001 }
1002
1003 static void
1004 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1005     sts_entry_t *pkt, int iocb_type)
1006 {
1007         const char func[] = "CT_IOCB";
1008         const char *type;
1009         struct qla_hw_data *ha = vha->hw;
1010         srb_t *sp;
1011         struct srb_ctx *sp_bsg;
1012         struct fc_bsg_job *bsg_job;
1013         uint16_t comp_status;
1014
1015         sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1016         if (!sp)
1017                 return;
1018
1019         sp_bsg = sp->ctx;
1020         bsg_job = sp_bsg->u.bsg_job;
1021
1022         type = NULL;
1023         switch (sp_bsg->type) {
1024         case SRB_CT_CMD:
1025                 type = "ct pass-through";
1026                 break;
1027         default:
1028                 qla_printk(KERN_WARNING, ha,
1029                     "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
1030                     sp_bsg->type);
1031                 return;
1032         }
1033
1034         comp_status = le16_to_cpu(pkt->comp_status);
1035
1036         /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1037          * fc payload  to the caller
1038          */
1039         bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1040         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1041
1042         if (comp_status != CS_COMPLETE) {
1043                 if (comp_status == CS_DATA_UNDERRUN) {
1044                         bsg_job->reply->result = DID_OK << 16;
1045                         bsg_job->reply->reply_payload_rcv_len =
1046                             le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1047
1048                         DEBUG2(qla_printk(KERN_WARNING, ha,
1049                             "scsi(%ld): CT pass-through-%s error "
1050                             "comp_status-status=0x%x total_byte = 0x%x.\n",
1051                             vha->host_no, type, comp_status,
1052                             bsg_job->reply->reply_payload_rcv_len));
1053                 } else {
1054                         DEBUG2(qla_printk(KERN_WARNING, ha,
1055                             "scsi(%ld): CT pass-through-%s error "
1056                             "comp_status-status=0x%x.\n",
1057                             vha->host_no, type, comp_status));
1058                         bsg_job->reply->result = DID_ERROR << 16;
1059                         bsg_job->reply->reply_payload_rcv_len = 0;
1060                 }
1061                 DEBUG2(qla2x00_dump_buffer((uint8_t *)pkt, sizeof(*pkt)));
1062         } else {
1063                 bsg_job->reply->result =  DID_OK << 16;
1064                 bsg_job->reply->reply_payload_rcv_len =
1065                     bsg_job->reply_payload.payload_len;
1066                 bsg_job->reply_len = 0;
1067         }
1068
1069         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1070             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1071
1072         dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1073             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1074
1075         if (sp_bsg->type == SRB_ELS_CMD_HST || sp_bsg->type == SRB_CT_CMD)
1076                 kfree(sp->fcport);
1077
1078         kfree(sp->ctx);
1079         mempool_free(sp, ha->srb_mempool);
1080         bsg_job->job_done(bsg_job);
1081 }
1082
1083 static void
1084 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1085     struct sts_entry_24xx *pkt, int iocb_type)
1086 {
1087         const char func[] = "ELS_CT_IOCB";
1088         const char *type;
1089         struct qla_hw_data *ha = vha->hw;
1090         srb_t *sp;
1091         struct srb_ctx *sp_bsg;
1092         struct fc_bsg_job *bsg_job;
1093         uint16_t comp_status;
1094         uint32_t fw_status[3];
1095         uint8_t* fw_sts_ptr;
1096
1097         sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1098         if (!sp)
1099                 return;
1100         sp_bsg = sp->ctx;
1101         bsg_job = sp_bsg->u.bsg_job;
1102
1103         type = NULL;
1104         switch (sp_bsg->type) {
1105         case SRB_ELS_CMD_RPT:
1106         case SRB_ELS_CMD_HST:
1107                 type = "els";
1108                 break;
1109         case SRB_CT_CMD:
1110                 type = "ct pass-through";
1111                 break;
1112         default:
1113                 qla_printk(KERN_WARNING, ha,
1114                     "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
1115                     sp_bsg->type);
1116                 return;
1117         }
1118
1119         comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1120         fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1121         fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1122
1123         /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1124          * fc payload  to the caller
1125          */
1126         bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1127         bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1128
1129         if (comp_status != CS_COMPLETE) {
1130                 if (comp_status == CS_DATA_UNDERRUN) {
1131                         bsg_job->reply->result = DID_OK << 16;
1132                         bsg_job->reply->reply_payload_rcv_len =
1133                                 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count);
1134
1135                         DEBUG2(qla_printk(KERN_WARNING, ha,
1136                             "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x "
1137                             "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1138                                 vha->host_no, sp->handle, type, comp_status, fw_status[1], fw_status[2],
1139                                 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count)));
1140                         fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1141                         memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1142                 }
1143                 else {
1144                         DEBUG2(qla_printk(KERN_WARNING, ha,
1145                             "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x "
1146                             "error subcode 1=0x%x error subcode 2=0x%x.\n",
1147                                 vha->host_no, sp->handle, type, comp_status,
1148                                 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1),
1149                                 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2)));
1150                         bsg_job->reply->result = DID_ERROR << 16;
1151                         bsg_job->reply->reply_payload_rcv_len = 0;
1152                         fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1153                         memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1154                 }
1155                 DEBUG2(qla2x00_dump_buffer((uint8_t *)pkt, sizeof(*pkt)));
1156         }
1157         else {
1158                 bsg_job->reply->result =  DID_OK << 16;
1159                 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1160                 bsg_job->reply_len = 0;
1161         }
1162
1163         dma_unmap_sg(&ha->pdev->dev,
1164             bsg_job->request_payload.sg_list,
1165             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1166         dma_unmap_sg(&ha->pdev->dev,
1167             bsg_job->reply_payload.sg_list,
1168             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1169         if ((sp_bsg->type == SRB_ELS_CMD_HST) ||
1170             (sp_bsg->type == SRB_CT_CMD))
1171                 kfree(sp->fcport);
1172         kfree(sp->ctx);
1173         mempool_free(sp, ha->srb_mempool);
1174         bsg_job->job_done(bsg_job);
1175 }
1176
1177 static void
1178 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1179     struct logio_entry_24xx *logio)
1180 {
1181         const char func[] = "LOGIO-IOCB";
1182         const char *type;
1183         fc_port_t *fcport;
1184         srb_t *sp;
1185         struct srb_iocb *lio;
1186         struct srb_ctx *ctx;
1187         uint16_t *data;
1188         uint32_t iop[2];
1189
1190         sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1191         if (!sp)
1192                 return;
1193
1194         ctx = sp->ctx;
1195         lio = ctx->u.iocb_cmd;
1196         type = ctx->name;
1197         fcport = sp->fcport;
1198         data = lio->u.logio.data;
1199
1200         data[0] = MBS_COMMAND_ERROR;
1201         data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1202                 QLA_LOGIO_LOGIN_RETRIED : 0;
1203         if (logio->entry_status) {
1204                 DEBUG2(printk(KERN_WARNING
1205                     "scsi(%ld:%x): Async-%s error entry - "
1206                     "portid=%02x%02x%02x entry-status=%x.\n",
1207                     fcport->vha->host_no, sp->handle, type,
1208                     fcport->d_id.b.domain, fcport->d_id.b.area,
1209                     fcport->d_id.b.al_pa, logio->entry_status));
1210                 DEBUG2(qla2x00_dump_buffer((uint8_t *)logio, sizeof(*logio)));
1211
1212                 goto logio_done;
1213         }
1214
1215         if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1216                 DEBUG2(printk(KERN_DEBUG
1217                     "scsi(%ld:%x): Async-%s complete - portid=%02x%02x%02x "
1218                     "iop0=%x.\n",
1219                     fcport->vha->host_no, sp->handle, type,
1220                     fcport->d_id.b.domain, fcport->d_id.b.area,
1221                     fcport->d_id.b.al_pa,
1222                     le32_to_cpu(logio->io_parameter[0])));
1223
1224                 data[0] = MBS_COMMAND_COMPLETE;
1225                 if (ctx->type != SRB_LOGIN_CMD)
1226                         goto logio_done;
1227
1228                 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1229                 if (iop[0] & BIT_4) {
1230                         fcport->port_type = FCT_TARGET;
1231                         if (iop[0] & BIT_8)
1232                                 fcport->flags |= FCF_FCP2_DEVICE;
1233                 } else if (iop[0] & BIT_5)
1234                         fcport->port_type = FCT_INITIATOR;
1235
1236                 if (logio->io_parameter[7] || logio->io_parameter[8])
1237                         fcport->supported_classes |= FC_COS_CLASS2;
1238                 if (logio->io_parameter[9] || logio->io_parameter[10])
1239                         fcport->supported_classes |= FC_COS_CLASS3;
1240
1241                 goto logio_done;
1242         }
1243
1244         iop[0] = le32_to_cpu(logio->io_parameter[0]);
1245         iop[1] = le32_to_cpu(logio->io_parameter[1]);
1246         switch (iop[0]) {
1247         case LSC_SCODE_PORTID_USED:
1248                 data[0] = MBS_PORT_ID_USED;
1249                 data[1] = LSW(iop[1]);
1250                 break;
1251         case LSC_SCODE_NPORT_USED:
1252                 data[0] = MBS_LOOP_ID_USED;
1253                 break;
1254         default:
1255                 data[0] = MBS_COMMAND_ERROR;
1256                 break;
1257         }
1258
1259         DEBUG2(printk(KERN_WARNING
1260             "scsi(%ld:%x): Async-%s failed - portid=%02x%02x%02x comp=%x "
1261             "iop0=%x iop1=%x.\n",
1262             fcport->vha->host_no, sp->handle, type, fcport->d_id.b.domain,
1263             fcport->d_id.b.area, fcport->d_id.b.al_pa,
1264             le16_to_cpu(logio->comp_status),
1265             le32_to_cpu(logio->io_parameter[0]),
1266             le32_to_cpu(logio->io_parameter[1])));
1267
1268 logio_done:
1269         lio->done(sp);
1270 }
1271
1272 static void
1273 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1274     struct tsk_mgmt_entry *tsk)
1275 {
1276         const char func[] = "TMF-IOCB";
1277         const char *type;
1278         fc_port_t *fcport;
1279         srb_t *sp;
1280         struct srb_iocb *iocb;
1281         struct srb_ctx *ctx;
1282         struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1283         int error = 1;
1284
1285         sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1286         if (!sp)
1287                 return;
1288
1289         ctx = sp->ctx;
1290         iocb = ctx->u.iocb_cmd;
1291         type = ctx->name;
1292         fcport = sp->fcport;
1293
1294         if (sts->entry_status) {
1295                 DEBUG2(printk(KERN_WARNING
1296                     "scsi(%ld:%x): Async-%s error - entry-status(%x).\n",
1297                     fcport->vha->host_no, sp->handle, type,
1298                     sts->entry_status));
1299         } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1300                 DEBUG2(printk(KERN_WARNING
1301                     "scsi(%ld:%x): Async-%s error - completion status(%x).\n",
1302                     fcport->vha->host_no, sp->handle, type,
1303                     sts->comp_status));
1304         } else if (!(le16_to_cpu(sts->scsi_status) &
1305             SS_RESPONSE_INFO_LEN_VALID)) {
1306                 DEBUG2(printk(KERN_WARNING
1307                     "scsi(%ld:%x): Async-%s error - no response info(%x).\n",
1308                     fcport->vha->host_no, sp->handle, type,
1309                     sts->scsi_status));
1310         } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
1311                 DEBUG2(printk(KERN_WARNING
1312                     "scsi(%ld:%x): Async-%s error - not enough response(%d).\n",
1313                     fcport->vha->host_no, sp->handle, type,
1314                     sts->rsp_data_len));
1315         } else if (sts->data[3]) {
1316                 DEBUG2(printk(KERN_WARNING
1317                     "scsi(%ld:%x): Async-%s error - response(%x).\n",
1318                     fcport->vha->host_no, sp->handle, type,
1319                     sts->data[3]));
1320         } else {
1321                 error = 0;
1322         }
1323
1324         if (error) {
1325                 iocb->u.tmf.data = error;
1326                 DEBUG2(qla2x00_dump_buffer((uint8_t *)sts, sizeof(*sts)));
1327         }
1328
1329         iocb->done(sp);
1330 }
1331
1332 /**
1333  * qla2x00_process_response_queue() - Process response queue entries.
1334  * @ha: SCSI driver HA context
1335  */
1336 void
1337 qla2x00_process_response_queue(struct rsp_que *rsp)
1338 {
1339         struct scsi_qla_host *vha;
1340         struct qla_hw_data *ha = rsp->hw;
1341         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1342         sts_entry_t     *pkt;
1343         uint16_t        handle_cnt;
1344         uint16_t        cnt;
1345
1346         vha = pci_get_drvdata(ha->pdev);
1347
1348         if (!vha->flags.online)
1349                 return;
1350
1351         while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1352                 pkt = (sts_entry_t *)rsp->ring_ptr;
1353
1354                 rsp->ring_index++;
1355                 if (rsp->ring_index == rsp->length) {
1356                         rsp->ring_index = 0;
1357                         rsp->ring_ptr = rsp->ring;
1358                 } else {
1359                         rsp->ring_ptr++;
1360                 }
1361
1362                 if (pkt->entry_status != 0) {
1363                         DEBUG3(printk(KERN_INFO
1364                             "scsi(%ld): Process error entry.\n", vha->host_no));
1365
1366                         qla2x00_error_entry(vha, rsp, pkt);
1367                         ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1368                         wmb();
1369                         continue;
1370                 }
1371
1372                 switch (pkt->entry_type) {
1373                 case STATUS_TYPE:
1374                         qla2x00_status_entry(vha, rsp, pkt);
1375                         break;
1376                 case STATUS_TYPE_21:
1377                         handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
1378                         for (cnt = 0; cnt < handle_cnt; cnt++) {
1379                                 qla2x00_process_completed_request(vha, rsp->req,
1380                                     ((sts21_entry_t *)pkt)->handle[cnt]);
1381                         }
1382                         break;
1383                 case STATUS_TYPE_22:
1384                         handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
1385                         for (cnt = 0; cnt < handle_cnt; cnt++) {
1386                                 qla2x00_process_completed_request(vha, rsp->req,
1387                                     ((sts22_entry_t *)pkt)->handle[cnt]);
1388                         }
1389                         break;
1390                 case STATUS_CONT_TYPE:
1391                         qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1392                         break;
1393                 case MBX_IOCB_TYPE:
1394                         qla2x00_mbx_iocb_entry(vha, rsp->req,
1395                             (struct mbx_entry *)pkt);
1396                         break;
1397                 case CT_IOCB_TYPE:
1398                         qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
1399                         break;
1400                 default:
1401                         /* Type Not Supported. */
1402                         DEBUG4(printk(KERN_WARNING
1403                             "scsi(%ld): Received unknown response pkt type %x "
1404                             "entry status=%x.\n",
1405                             vha->host_no, pkt->entry_type, pkt->entry_status));
1406                         break;
1407                 }
1408                 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1409                 wmb();
1410         }
1411
1412         /* Adjust ring index */
1413         WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
1414 }
1415
1416 static inline void
1417
1418 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1419     uint32_t sense_len, struct rsp_que *rsp)
1420 {
1421         struct scsi_cmnd *cp = sp->cmd;
1422
1423         if (sense_len >= SCSI_SENSE_BUFFERSIZE)
1424                 sense_len = SCSI_SENSE_BUFFERSIZE;
1425
1426         sp->request_sense_length = sense_len;
1427         sp->request_sense_ptr = cp->sense_buffer;
1428         if (sp->request_sense_length > par_sense_len)
1429                 sense_len = par_sense_len;
1430
1431         memcpy(cp->sense_buffer, sense_data, sense_len);
1432
1433         sp->request_sense_ptr += sense_len;
1434         sp->request_sense_length -= sense_len;
1435         if (sp->request_sense_length != 0)
1436                 rsp->status_srb = sp;
1437
1438         DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
1439             "cmd=%p\n", __func__, sp->fcport->vha->host_no,
1440             cp->device->channel, cp->device->id, cp->device->lun, cp));
1441         if (sense_len)
1442                 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, sense_len));
1443 }
1444
1445 struct scsi_dif_tuple {
1446         __be16 guard;       /* Checksum */
1447         __be16 app_tag;         /* APPL identifer */
1448         __be32 ref_tag;         /* Target LBA or indirect LBA */
1449 };
1450
1451 /*
1452  * Checks the guard or meta-data for the type of error
1453  * detected by the HBA. In case of errors, we set the
1454  * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
1455  * to indicate to the kernel that the HBA detected error.
1456  */
1457 static inline void
1458 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1459 {
1460         struct scsi_cmnd *cmd = sp->cmd;
1461         struct scsi_dif_tuple   *ep =
1462                         (struct scsi_dif_tuple *)&sts24->data[20];
1463         struct scsi_dif_tuple   *ap =
1464                         (struct scsi_dif_tuple *)&sts24->data[12];
1465         uint32_t        e_ref_tag, a_ref_tag;
1466         uint16_t        e_app_tag, a_app_tag;
1467         uint16_t        e_guard, a_guard;
1468
1469         e_ref_tag = be32_to_cpu(ep->ref_tag);
1470         a_ref_tag = be32_to_cpu(ap->ref_tag);
1471         e_app_tag = be16_to_cpu(ep->app_tag);
1472         a_app_tag = be16_to_cpu(ap->app_tag);
1473         e_guard = be16_to_cpu(ep->guard);
1474         a_guard = be16_to_cpu(ap->guard);
1475
1476         DEBUG18(printk(KERN_DEBUG
1477             "%s(): iocb(s) %p Returned STATUS\n", __func__, sts24));
1478
1479         DEBUG18(printk(KERN_ERR "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
1480             " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
1481             " tag=0x%x, act guard=0x%x, exp guard=0x%x\n",
1482             cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
1483             a_app_tag, e_app_tag, a_guard, e_guard));
1484
1485
1486         /* check guard */
1487         if (e_guard != a_guard) {
1488                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1489                     0x10, 0x1);
1490                 set_driver_byte(cmd, DRIVER_SENSE);
1491                 set_host_byte(cmd, DID_ABORT);
1492                 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1493                 return;
1494         }
1495
1496         /* check appl tag */
1497         if (e_app_tag != a_app_tag) {
1498                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1499                     0x10, 0x2);
1500                 set_driver_byte(cmd, DRIVER_SENSE);
1501                 set_host_byte(cmd, DID_ABORT);
1502                 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1503                 return;
1504         }
1505
1506         /* check ref tag */
1507         if (e_ref_tag != a_ref_tag) {
1508                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1509                     0x10, 0x3);
1510                 set_driver_byte(cmd, DRIVER_SENSE);
1511                 set_host_byte(cmd, DID_ABORT);
1512                 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1513                 return;
1514         }
1515 }
1516
1517 /**
1518  * qla2x00_status_entry() - Process a Status IOCB entry.
1519  * @ha: SCSI driver HA context
1520  * @pkt: Entry pointer
1521  */
1522 static void
1523 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1524 {
1525         srb_t           *sp;
1526         fc_port_t       *fcport;
1527         struct scsi_cmnd *cp;
1528         sts_entry_t *sts;
1529         struct sts_entry_24xx *sts24;
1530         uint16_t        comp_status;
1531         uint16_t        scsi_status;
1532         uint16_t        ox_id;
1533         uint8_t         lscsi_status;
1534         int32_t         resid;
1535         uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
1536             fw_resid_len;
1537         uint8_t         *rsp_info, *sense_data;
1538         struct qla_hw_data *ha = vha->hw;
1539         uint32_t handle;
1540         uint16_t que;
1541         struct req_que *req;
1542         int logit = 1;
1543
1544         sts = (sts_entry_t *) pkt;
1545         sts24 = (struct sts_entry_24xx *) pkt;
1546         if (IS_FWI2_CAPABLE(ha)) {
1547                 comp_status = le16_to_cpu(sts24->comp_status);
1548                 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1549         } else {
1550                 comp_status = le16_to_cpu(sts->comp_status);
1551                 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1552         }
1553         handle = (uint32_t) LSW(sts->handle);
1554         que = MSW(sts->handle);
1555         req = ha->req_q_map[que];
1556
1557         /* Fast path completion. */
1558         if (comp_status == CS_COMPLETE && scsi_status == 0) {
1559                 qla2x00_process_completed_request(vha, req, handle);
1560
1561                 return;
1562         }
1563
1564         /* Validate handle. */
1565         if (handle < MAX_OUTSTANDING_COMMANDS) {
1566                 sp = req->outstanding_cmds[handle];
1567                 req->outstanding_cmds[handle] = NULL;
1568         } else
1569                 sp = NULL;
1570
1571         if (sp == NULL) {
1572                 qla_printk(KERN_WARNING, ha,
1573                     "scsi(%ld): Invalid status handle (0x%x).\n", vha->host_no,
1574                     sts->handle);
1575
1576                 if (IS_QLA82XX(ha))
1577                         set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1578                 else
1579                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1580                 qla2xxx_wake_dpc(vha);
1581                 return;
1582         }
1583         cp = sp->cmd;
1584         if (cp == NULL) {
1585                 qla_printk(KERN_WARNING, ha,
1586                     "scsi(%ld): Command already returned (0x%x/%p).\n",
1587                     vha->host_no, sts->handle, sp);
1588
1589                 return;
1590         }
1591
1592         lscsi_status = scsi_status & STATUS_MASK;
1593
1594         fcport = sp->fcport;
1595
1596         ox_id = 0;
1597         sense_len = par_sense_len = rsp_info_len = resid_len =
1598             fw_resid_len = 0;
1599         if (IS_FWI2_CAPABLE(ha)) {
1600                 if (scsi_status & SS_SENSE_LEN_VALID)
1601                         sense_len = le32_to_cpu(sts24->sense_len);
1602                 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
1603                         rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
1604                 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
1605                         resid_len = le32_to_cpu(sts24->rsp_residual_count);
1606                 if (comp_status == CS_DATA_UNDERRUN)
1607                         fw_resid_len = le32_to_cpu(sts24->residual_len);
1608                 rsp_info = sts24->data;
1609                 sense_data = sts24->data;
1610                 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
1611                 ox_id = le16_to_cpu(sts24->ox_id);
1612                 par_sense_len = sizeof(sts24->data);
1613         } else {
1614                 if (scsi_status & SS_SENSE_LEN_VALID)
1615                         sense_len = le16_to_cpu(sts->req_sense_length);
1616                 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
1617                         rsp_info_len = le16_to_cpu(sts->rsp_info_len);
1618                 resid_len = le32_to_cpu(sts->residual_length);
1619                 rsp_info = sts->rsp_info;
1620                 sense_data = sts->req_sense_data;
1621                 par_sense_len = sizeof(sts->req_sense_data);
1622         }
1623
1624         /* Check for any FCP transport errors. */
1625         if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
1626                 /* Sense data lies beyond any FCP RESPONSE data. */
1627                 if (IS_FWI2_CAPABLE(ha)) {
1628                         sense_data += rsp_info_len;
1629                         par_sense_len -= rsp_info_len;
1630                 }
1631                 if (rsp_info_len > 3 && rsp_info[3]) {
1632                         DEBUG2(qla_printk(KERN_INFO, ha,
1633                             "scsi(%ld:%d:%d): FCP I/O protocol failure "
1634                             "(0x%x/0x%x).\n", vha->host_no, cp->device->id,
1635                             cp->device->lun, rsp_info_len, rsp_info[3]));
1636
1637                         cp->result = DID_BUS_BUSY << 16;
1638                         goto out;
1639                 }
1640         }
1641
1642         /* Check for overrun. */
1643         if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
1644             scsi_status & SS_RESIDUAL_OVER)
1645                 comp_status = CS_DATA_OVERRUN;
1646
1647         /*
1648          * Based on Host and scsi status generate status code for Linux
1649          */
1650         switch (comp_status) {
1651         case CS_COMPLETE:
1652         case CS_QUEUE_FULL:
1653                 if (scsi_status == 0) {
1654                         cp->result = DID_OK << 16;
1655                         break;
1656                 }
1657                 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
1658                         resid = resid_len;
1659                         scsi_set_resid(cp, resid);
1660
1661                         if (!lscsi_status &&
1662                             ((unsigned)(scsi_bufflen(cp) - resid) <
1663                              cp->underflow)) {
1664                                 qla_printk(KERN_INFO, ha,
1665                                     "scsi(%ld:%d:%d): Mid-layer underflow "
1666                                     "detected (0x%x of 0x%x bytes).\n",
1667                                     vha->host_no, cp->device->id,
1668                                     cp->device->lun, resid, scsi_bufflen(cp));
1669
1670                                 cp->result = DID_ERROR << 16;
1671                                 break;
1672                         }
1673                 }
1674                 cp->result = DID_OK << 16 | lscsi_status;
1675
1676                 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1677                         DEBUG2(qla_printk(KERN_INFO, ha,
1678                             "scsi(%ld:%d:%d) QUEUE FULL detected.\n",
1679                             vha->host_no, cp->device->id, cp->device->lun));
1680                         break;
1681                 }
1682                 logit = 0;
1683                 if (lscsi_status != SS_CHECK_CONDITION)
1684                         break;
1685
1686                 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1687                 if (!(scsi_status & SS_SENSE_LEN_VALID))
1688                         break;
1689
1690                 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
1691                     rsp);
1692                 break;
1693
1694         case CS_DATA_UNDERRUN:
1695                 /* Use F/W calculated residual length. */
1696                 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
1697                 scsi_set_resid(cp, resid);
1698                 if (scsi_status & SS_RESIDUAL_UNDER) {
1699                         if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
1700                                 DEBUG2(qla_printk(KERN_INFO, ha,
1701                                     "scsi(%ld:%d:%d) Dropped frame(s) detected "
1702                                     "(0x%x of 0x%x bytes).\n", vha->host_no,
1703                                     cp->device->id, cp->device->lun, resid,
1704                                     scsi_bufflen(cp)));
1705
1706                                 cp->result = DID_ERROR << 16 | lscsi_status;
1707                                 break;
1708                         }
1709
1710                         if (!lscsi_status &&
1711                             ((unsigned)(scsi_bufflen(cp) - resid) <
1712                             cp->underflow)) {
1713                                 qla_printk(KERN_INFO, ha,
1714                                     "scsi(%ld:%d:%d): Mid-layer underflow "
1715                                     "detected (0x%x of 0x%x bytes).\n",
1716                                     vha->host_no, cp->device->id,
1717                                     cp->device->lun, resid, scsi_bufflen(cp));
1718
1719                                 cp->result = DID_ERROR << 16;
1720                                 break;
1721                         }
1722                 } else {
1723                         DEBUG2(qla_printk(KERN_INFO, ha,
1724                             "scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x "
1725                             "of 0x%x bytes).\n", vha->host_no, cp->device->id,
1726                             cp->device->lun, resid, scsi_bufflen(cp)));
1727
1728                         cp->result = DID_ERROR << 16 | lscsi_status;
1729                         goto check_scsi_status;
1730                 }
1731
1732                 cp->result = DID_OK << 16 | lscsi_status;
1733                 logit = 0;
1734
1735 check_scsi_status:
1736                 /*
1737                  * Check to see if SCSI Status is non zero. If so report SCSI
1738                  * Status.
1739                  */
1740                 if (lscsi_status != 0) {
1741                         if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1742                                 DEBUG2(qla_printk(KERN_INFO, ha,
1743                                     "scsi(%ld:%d:%d) QUEUE FULL detected.\n",
1744                                     vha->host_no, cp->device->id,
1745                                     cp->device->lun));
1746                                 logit = 1;
1747                                 break;
1748                         }
1749                         if (lscsi_status != SS_CHECK_CONDITION)
1750                                 break;
1751
1752                         memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1753                         if (!(scsi_status & SS_SENSE_LEN_VALID))
1754                                 break;
1755
1756                         qla2x00_handle_sense(sp, sense_data, par_sense_len,
1757                             sense_len, rsp);
1758                 }
1759                 break;
1760
1761         case CS_PORT_LOGGED_OUT:
1762         case CS_PORT_CONFIG_CHG:
1763         case CS_PORT_BUSY:
1764         case CS_INCOMPLETE:
1765         case CS_PORT_UNAVAILABLE:
1766         case CS_TIMEOUT:
1767         case CS_RESET:
1768
1769                 /*
1770                  * We are going to have the fc class block the rport
1771                  * while we try to recover so instruct the mid layer
1772                  * to requeue until the class decides how to handle this.
1773                  */
1774                 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1775
1776                 if (comp_status == CS_TIMEOUT) {
1777                         if (IS_FWI2_CAPABLE(ha))
1778                                 break;
1779                         else if ((le16_to_cpu(sts->status_flags) &
1780                             SF_LOGOUT_SENT) == 0)
1781                                 break;
1782                 }
1783
1784                 DEBUG2(qla_printk(KERN_INFO, ha,
1785                         "scsi(%ld:%d:%d) Port down status: port-state=0x%x\n",
1786                         vha->host_no, cp->device->id, cp->device->lun,
1787                         atomic_read(&fcport->state)));
1788
1789                 if (atomic_read(&fcport->state) == FCS_ONLINE)
1790                         qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1791                 break;
1792
1793         case CS_ABORTED:
1794                 cp->result = DID_RESET << 16;
1795                 break;
1796
1797         case CS_DIF_ERROR:
1798                 qla2x00_handle_dif_error(sp, sts24);
1799                 break;
1800         default:
1801                 cp->result = DID_ERROR << 16;
1802                 break;
1803         }
1804
1805 out:
1806         if (logit)
1807                 DEBUG2(qla_printk(KERN_INFO, ha,
1808                     "scsi(%ld:%d:%d) FCP command status: 0x%x-0x%x (0x%x) "
1809                     "portid=%02x%02x%02x oxid=0x%x cdb=%02x%02x%02x len=0x%x "
1810                     "rsp_info=0x%x resid=0x%x fw_resid=0x%x\n", vha->host_no,
1811                     cp->device->id, cp->device->lun, comp_status, scsi_status,
1812                     cp->result, fcport->d_id.b.domain, fcport->d_id.b.area,
1813                     fcport->d_id.b.al_pa, ox_id, cp->cmnd[0], cp->cmnd[1],
1814                     cp->cmnd[2], scsi_bufflen(cp), rsp_info_len, resid_len,
1815                     fw_resid_len));
1816
1817         if (rsp->status_srb == NULL)
1818                 qla2x00_sp_compl(ha, sp);
1819 }
1820
1821 /**
1822  * qla2x00_status_cont_entry() - Process a Status Continuations entry.
1823  * @ha: SCSI driver HA context
1824  * @pkt: Entry pointer
1825  *
1826  * Extended sense data.
1827  */
1828 static void
1829 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
1830 {
1831         uint8_t         sense_sz = 0;
1832         struct qla_hw_data *ha = rsp->hw;
1833         srb_t           *sp = rsp->status_srb;
1834         struct scsi_cmnd *cp;
1835
1836         if (sp != NULL && sp->request_sense_length != 0) {
1837                 cp = sp->cmd;
1838                 if (cp == NULL) {
1839                         DEBUG2(printk("%s(): Cmd already returned back to OS "
1840                             "sp=%p.\n", __func__, sp));
1841                         qla_printk(KERN_INFO, ha,
1842                             "cmd is NULL: already returned to OS (sp=%p)\n",
1843                             sp);
1844
1845                         rsp->status_srb = NULL;
1846                         return;
1847                 }
1848
1849                 if (sp->request_sense_length > sizeof(pkt->data)) {
1850                         sense_sz = sizeof(pkt->data);
1851                 } else {
1852                         sense_sz = sp->request_sense_length;
1853                 }
1854
1855                 /* Move sense data. */
1856                 if (IS_FWI2_CAPABLE(ha))
1857                         host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1858                 memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
1859                 DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz));
1860
1861                 sp->request_sense_ptr += sense_sz;
1862                 sp->request_sense_length -= sense_sz;
1863
1864                 /* Place command on done queue. */
1865                 if (sp->request_sense_length == 0) {
1866                         rsp->status_srb = NULL;
1867                         qla2x00_sp_compl(ha, sp);
1868                 }
1869         }
1870 }
1871
1872 /**
1873  * qla2x00_error_entry() - Process an error entry.
1874  * @ha: SCSI driver HA context
1875  * @pkt: Entry pointer
1876  */
1877 static void
1878 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1879 {
1880         srb_t *sp;
1881         struct qla_hw_data *ha = vha->hw;
1882         uint32_t handle = LSW(pkt->handle);
1883         uint16_t que = MSW(pkt->handle);
1884         struct req_que *req = ha->req_q_map[que];
1885 #if defined(QL_DEBUG_LEVEL_2)
1886         if (pkt->entry_status & RF_INV_E_ORDER)
1887                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
1888         else if (pkt->entry_status & RF_INV_E_COUNT)
1889                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__);
1890         else if (pkt->entry_status & RF_INV_E_PARAM)
1891                 qla_printk(KERN_ERR, ha,
1892                     "%s: Invalid Entry Parameter\n", __func__);
1893         else if (pkt->entry_status & RF_INV_E_TYPE)
1894                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__);
1895         else if (pkt->entry_status & RF_BUSY)
1896                 qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__);
1897         else
1898                 qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__);
1899 #endif
1900
1901         /* Validate handle. */
1902         if (handle < MAX_OUTSTANDING_COMMANDS)
1903                 sp = req->outstanding_cmds[handle];
1904         else
1905                 sp = NULL;
1906
1907         if (sp) {
1908                 /* Free outstanding command slot. */
1909                 req->outstanding_cmds[handle] = NULL;
1910
1911                 /* Bad payload or header */
1912                 if (pkt->entry_status &
1913                     (RF_INV_E_ORDER | RF_INV_E_COUNT |
1914                      RF_INV_E_PARAM | RF_INV_E_TYPE)) {
1915                         sp->cmd->result = DID_ERROR << 16;
1916                 } else if (pkt->entry_status & RF_BUSY) {
1917                         sp->cmd->result = DID_BUS_BUSY << 16;
1918                 } else {
1919                         sp->cmd->result = DID_ERROR << 16;
1920                 }
1921                 qla2x00_sp_compl(ha, sp);
1922
1923         } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1924                 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7
1925                 || pkt->entry_type == COMMAND_TYPE_6) {
1926                 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
1927                         vha->host_no));
1928                 qla_printk(KERN_WARNING, ha,
1929                         "Error entry - invalid handle\n");
1930
1931                 if (IS_QLA82XX(ha))
1932                         set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1933                 else
1934                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1935                 qla2xxx_wake_dpc(vha);
1936         }
1937 }
1938
1939 /**
1940  * qla24xx_mbx_completion() - Process mailbox command completions.
1941  * @ha: SCSI driver HA context
1942  * @mb0: Mailbox0 register
1943  */
1944 static void
1945 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1946 {
1947         uint16_t        cnt;
1948         uint16_t __iomem *wptr;
1949         struct qla_hw_data *ha = vha->hw;
1950         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1951
1952         /* Load return mailbox registers. */
1953         ha->flags.mbox_int = 1;
1954         ha->mailbox_out[0] = mb0;
1955         wptr = (uint16_t __iomem *)&reg->mailbox1;
1956
1957         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
1958                 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
1959                 wptr++;
1960         }
1961
1962         if (ha->mcp) {
1963                 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
1964                     __func__, vha->host_no, ha->mcp->mb[0]));
1965         } else {
1966                 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
1967                     __func__, vha->host_no));
1968         }
1969 }
1970
1971 /**
1972  * qla24xx_process_response_queue() - Process response queue entries.
1973  * @ha: SCSI driver HA context
1974  */
1975 void qla24xx_process_response_queue(struct scsi_qla_host *vha,
1976         struct rsp_que *rsp)
1977 {
1978         struct sts_entry_24xx *pkt;
1979         struct qla_hw_data *ha = vha->hw;
1980
1981         if (!vha->flags.online)
1982                 return;
1983
1984         while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1985                 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
1986
1987                 rsp->ring_index++;
1988                 if (rsp->ring_index == rsp->length) {
1989                         rsp->ring_index = 0;
1990                         rsp->ring_ptr = rsp->ring;
1991                 } else {
1992                         rsp->ring_ptr++;
1993                 }
1994
1995                 if (pkt->entry_status != 0) {
1996                         DEBUG3(printk(KERN_INFO
1997                             "scsi(%ld): Process error entry.\n", vha->host_no));
1998
1999                         qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
2000                         ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2001                         wmb();
2002                         continue;
2003                 }
2004
2005                 switch (pkt->entry_type) {
2006                 case STATUS_TYPE:
2007                         qla2x00_status_entry(vha, rsp, pkt);
2008                         break;
2009                 case STATUS_CONT_TYPE:
2010                         qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2011                         break;
2012                 case VP_RPT_ID_IOCB_TYPE:
2013                         qla24xx_report_id_acquisition(vha,
2014                             (struct vp_rpt_id_entry_24xx *)pkt);
2015                         break;
2016                 case LOGINOUT_PORT_IOCB_TYPE:
2017                         qla24xx_logio_entry(vha, rsp->req,
2018                             (struct logio_entry_24xx *)pkt);
2019                         break;
2020                 case TSK_MGMT_IOCB_TYPE:
2021                         qla24xx_tm_iocb_entry(vha, rsp->req,
2022                             (struct tsk_mgmt_entry *)pkt);
2023                         break;
2024                 case CT_IOCB_TYPE:
2025                         qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2026                         clear_bit(MBX_INTERRUPT, &vha->hw->mbx_cmd_flags);
2027                         break;
2028                 case ELS_IOCB_TYPE:
2029                         qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2030                         break;
2031                 default:
2032                         /* Type Not Supported. */
2033                         DEBUG4(printk(KERN_WARNING
2034                             "scsi(%ld): Received unknown response pkt type %x "
2035                             "entry status=%x.\n",
2036                             vha->host_no, pkt->entry_type, pkt->entry_status));
2037                         break;
2038                 }
2039                 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2040                 wmb();
2041         }
2042
2043         /* Adjust ring index */
2044         if (IS_QLA82XX(ha)) {
2045                 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
2046                 WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
2047         } else
2048                 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
2049 }
2050
2051 static void
2052 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
2053 {
2054         int rval;
2055         uint32_t cnt;
2056         struct qla_hw_data *ha = vha->hw;
2057         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2058
2059         if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
2060                 return;
2061
2062         rval = QLA_SUCCESS;
2063         WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
2064         RD_REG_DWORD(&reg->iobase_addr);
2065         WRT_REG_DWORD(&reg->iobase_window, 0x0001);
2066         for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
2067             rval == QLA_SUCCESS; cnt--) {
2068                 if (cnt) {
2069                         WRT_REG_DWORD(&reg->iobase_window, 0x0001);
2070                         udelay(10);
2071                 } else
2072                         rval = QLA_FUNCTION_TIMEOUT;
2073         }
2074         if (rval == QLA_SUCCESS)
2075                 goto next_test;
2076
2077         WRT_REG_DWORD(&reg->iobase_window, 0x0003);
2078         for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
2079             rval == QLA_SUCCESS; cnt--) {
2080                 if (cnt) {
2081                         WRT_REG_DWORD(&reg->iobase_window, 0x0003);
2082                         udelay(10);
2083                 } else
2084                         rval = QLA_FUNCTION_TIMEOUT;
2085         }
2086         if (rval != QLA_SUCCESS)
2087                 goto done;
2088
2089 next_test:
2090         if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
2091                 qla_printk(KERN_INFO, ha, "Additional code -- 0x55AA.\n");
2092
2093 done:
2094         WRT_REG_DWORD(&reg->iobase_window, 0x0000);
2095         RD_REG_DWORD(&reg->iobase_window);
2096 }
2097
2098 /**
2099  * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
2100  * @irq:
2101  * @dev_id: SCSI driver HA context
2102  *
2103  * Called by system whenever the host adapter generates an interrupt.
2104  *
2105  * Returns handled flag.
2106  */
2107 irqreturn_t
2108 qla24xx_intr_handler(int irq, void *dev_id)
2109 {
2110         scsi_qla_host_t *vha;
2111         struct qla_hw_data *ha;
2112         struct device_reg_24xx __iomem *reg;
2113         int             status;
2114         unsigned long   iter;
2115         uint32_t        stat;
2116         uint32_t        hccr;
2117         uint16_t        mb[4];
2118         struct rsp_que *rsp;
2119         unsigned long   flags;
2120
2121         rsp = (struct rsp_que *) dev_id;
2122         if (!rsp) {
2123                 printk(KERN_INFO
2124                     "%s(): NULL response queue pointer\n", __func__);
2125                 return IRQ_NONE;
2126         }
2127
2128         ha = rsp->hw;
2129         reg = &ha->iobase->isp24;
2130         status = 0;
2131
2132         if (unlikely(pci_channel_offline(ha->pdev)))
2133                 return IRQ_HANDLED;
2134
2135         spin_lock_irqsave(&ha->hardware_lock, flags);
2136         vha = pci_get_drvdata(ha->pdev);
2137         for (iter = 50; iter--; ) {
2138                 stat = RD_REG_DWORD(&reg->host_status);
2139                 if (stat & HSRX_RISC_PAUSED) {
2140                         if (unlikely(pci_channel_offline(ha->pdev)))
2141                                 break;
2142
2143                         hccr = RD_REG_DWORD(&reg->hccr);
2144
2145                         qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
2146                             "Dumping firmware!\n", hccr);
2147
2148                         qla2xxx_check_risc_status(vha);
2149
2150                         ha->isp_ops->fw_dump(vha, 1);
2151                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2152                         break;
2153                 } else if ((stat & HSRX_RISC_INT) == 0)
2154                         break;
2155
2156                 switch (stat & 0xff) {
2157                 case 0x1:
2158                 case 0x2:
2159                 case 0x10:
2160                 case 0x11:
2161                         qla24xx_mbx_completion(vha, MSW(stat));
2162                         status |= MBX_INTERRUPT;
2163
2164                         break;
2165                 case 0x12:
2166                         mb[0] = MSW(stat);
2167                         mb[1] = RD_REG_WORD(&reg->mailbox1);
2168                         mb[2] = RD_REG_WORD(&reg->mailbox2);
2169                         mb[3] = RD_REG_WORD(&reg->mailbox3);
2170                         qla2x00_async_event(vha, rsp, mb);
2171                         break;
2172                 case 0x13:
2173                 case 0x14:
2174                         qla24xx_process_response_queue(vha, rsp);
2175                         break;
2176                 default:
2177                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
2178                             "(%d).\n",
2179                             vha->host_no, stat & 0xff));
2180                         break;
2181                 }
2182                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2183                 RD_REG_DWORD_RELAXED(&reg->hccr);
2184         }
2185         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2186
2187         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2188             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2189                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2190                 complete(&ha->mbx_intr_comp);
2191         }
2192
2193         return IRQ_HANDLED;
2194 }
2195
2196 static irqreturn_t
2197 qla24xx_msix_rsp_q(int irq, void *dev_id)
2198 {
2199         struct qla_hw_data *ha;
2200         struct rsp_que *rsp;
2201         struct device_reg_24xx __iomem *reg;
2202         struct scsi_qla_host *vha;
2203         unsigned long flags;
2204
2205         rsp = (struct rsp_que *) dev_id;
2206         if (!rsp) {
2207                 printk(KERN_INFO
2208                 "%s(): NULL response queue pointer\n", __func__);
2209                 return IRQ_NONE;
2210         }
2211         ha = rsp->hw;
2212         reg = &ha->iobase->isp24;
2213
2214         spin_lock_irqsave(&ha->hardware_lock, flags);
2215
2216         vha = pci_get_drvdata(ha->pdev);
2217         qla24xx_process_response_queue(vha, rsp);
2218         if (!ha->flags.disable_msix_handshake) {
2219                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2220                 RD_REG_DWORD_RELAXED(&reg->hccr);
2221         }
2222         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2223
2224         return IRQ_HANDLED;
2225 }
2226
2227 static irqreturn_t
2228 qla25xx_msix_rsp_q(int irq, void *dev_id)
2229 {
2230         struct qla_hw_data *ha;
2231         struct rsp_que *rsp;
2232         struct device_reg_24xx __iomem *reg;
2233         unsigned long flags;
2234
2235         rsp = (struct rsp_que *) dev_id;
2236         if (!rsp) {
2237                 printk(KERN_INFO
2238                         "%s(): NULL response queue pointer\n", __func__);
2239                 return IRQ_NONE;
2240         }
2241         ha = rsp->hw;
2242
2243         /* Clear the interrupt, if enabled, for this response queue */
2244         if (rsp->options & ~BIT_6) {
2245                 reg = &ha->iobase->isp24;
2246                 spin_lock_irqsave(&ha->hardware_lock, flags);
2247                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2248                 RD_REG_DWORD_RELAXED(&reg->hccr);
2249                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2250         }
2251         queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
2252
2253         return IRQ_HANDLED;
2254 }
2255
2256 static irqreturn_t
2257 qla24xx_msix_default(int irq, void *dev_id)
2258 {
2259         scsi_qla_host_t *vha;
2260         struct qla_hw_data *ha;
2261         struct rsp_que *rsp;
2262         struct device_reg_24xx __iomem *reg;
2263         int             status;
2264         uint32_t        stat;
2265         uint32_t        hccr;
2266         uint16_t        mb[4];
2267         unsigned long flags;
2268
2269         rsp = (struct rsp_que *) dev_id;
2270         if (!rsp) {
2271                 DEBUG(printk(
2272                 "%s(): NULL response queue pointer\n", __func__));
2273                 return IRQ_NONE;
2274         }
2275         ha = rsp->hw;
2276         reg = &ha->iobase->isp24;
2277         status = 0;
2278
2279         spin_lock_irqsave(&ha->hardware_lock, flags);
2280         vha = pci_get_drvdata(ha->pdev);
2281         do {
2282                 stat = RD_REG_DWORD(&reg->host_status);
2283                 if (stat & HSRX_RISC_PAUSED) {
2284                         if (unlikely(pci_channel_offline(ha->pdev)))
2285                                 break;
2286
2287                         hccr = RD_REG_DWORD(&reg->hccr);
2288
2289                         qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
2290                             "Dumping firmware!\n", hccr);
2291
2292                         qla2xxx_check_risc_status(vha);
2293
2294                         ha->isp_ops->fw_dump(vha, 1);
2295                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2296                         break;
2297                 } else if ((stat & HSRX_RISC_INT) == 0)
2298                         break;
2299
2300                 switch (stat & 0xff) {
2301                 case 0x1:
2302                 case 0x2:
2303                 case 0x10:
2304                 case 0x11:
2305                         qla24xx_mbx_completion(vha, MSW(stat));
2306                         status |= MBX_INTERRUPT;
2307
2308                         break;
2309                 case 0x12:
2310                         mb[0] = MSW(stat);
2311                         mb[1] = RD_REG_WORD(&reg->mailbox1);
2312                         mb[2] = RD_REG_WORD(&reg->mailbox2);
2313                         mb[3] = RD_REG_WORD(&reg->mailbox3);
2314                         qla2x00_async_event(vha, rsp, mb);
2315                         break;
2316                 case 0x13:
2317                 case 0x14:
2318                         qla24xx_process_response_queue(vha, rsp);
2319                         break;
2320                 default:
2321                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
2322                             "(%d).\n",
2323                             vha->host_no, stat & 0xff));
2324                         break;
2325                 }
2326                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2327         } while (0);
2328         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2329
2330         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2331             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2332                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2333                 complete(&ha->mbx_intr_comp);
2334         }
2335         return IRQ_HANDLED;
2336 }
2337
2338 /* Interrupt handling helpers. */
2339
2340 struct qla_init_msix_entry {
2341         const char *name;
2342         irq_handler_t handler;
2343 };
2344
2345 static struct qla_init_msix_entry msix_entries[3] = {
2346         { "qla2xxx (default)", qla24xx_msix_default },
2347         { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
2348         { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
2349 };
2350
2351 static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
2352         { "qla2xxx (default)", qla82xx_msix_default },
2353         { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
2354 };
2355
2356 static void
2357 qla24xx_disable_msix(struct qla_hw_data *ha)
2358 {
2359         int i;
2360         struct qla_msix_entry *qentry;
2361
2362         for (i = 0; i < ha->msix_count; i++) {
2363                 qentry = &ha->msix_entries[i];
2364                 if (qentry->have_irq)
2365                         free_irq(qentry->vector, qentry->rsp);
2366         }
2367         pci_disable_msix(ha->pdev);
2368         kfree(ha->msix_entries);
2369         ha->msix_entries = NULL;
2370         ha->flags.msix_enabled = 0;
2371 }
2372
2373 static int
2374 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
2375 {
2376 #define MIN_MSIX_COUNT  2
2377         int i, ret;
2378         struct msix_entry *entries;
2379         struct qla_msix_entry *qentry;
2380
2381         entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
2382                         GFP_KERNEL);
2383         if (!entries)
2384                 return -ENOMEM;
2385
2386         for (i = 0; i < ha->msix_count; i++)
2387                 entries[i].entry = i;
2388
2389         ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2390         if (ret) {
2391                 if (ret < MIN_MSIX_COUNT)
2392                         goto msix_failed;
2393
2394                 qla_printk(KERN_WARNING, ha,
2395                         "MSI-X: Failed to enable support -- %d/%d\n"
2396                         " Retry with %d vectors\n", ha->msix_count, ret, ret);
2397                 ha->msix_count = ret;
2398                 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2399                 if (ret) {
2400 msix_failed:
2401                         qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable"
2402                                 " support, giving up -- %d/%d\n",
2403                                 ha->msix_count, ret);
2404                         goto msix_out;
2405                 }
2406                 ha->max_rsp_queues = ha->msix_count - 1;
2407         }
2408         ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
2409                                 ha->msix_count, GFP_KERNEL);
2410         if (!ha->msix_entries) {
2411                 ret = -ENOMEM;
2412                 goto msix_out;
2413         }
2414         ha->flags.msix_enabled = 1;
2415
2416         for (i = 0; i < ha->msix_count; i++) {
2417                 qentry = &ha->msix_entries[i];
2418                 qentry->vector = entries[i].vector;
2419                 qentry->entry = entries[i].entry;
2420                 qentry->have_irq = 0;
2421                 qentry->rsp = NULL;
2422         }
2423
2424         /* Enable MSI-X vectors for the base queue */
2425         for (i = 0; i < 2; i++) {
2426                 qentry = &ha->msix_entries[i];
2427                 if (IS_QLA82XX(ha)) {
2428                         ret = request_irq(qentry->vector,
2429                                 qla82xx_msix_entries[i].handler,
2430                                 0, qla82xx_msix_entries[i].name, rsp);
2431                 } else {
2432                         ret = request_irq(qentry->vector,
2433                                 msix_entries[i].handler,
2434                                 0, msix_entries[i].name, rsp);
2435                 }
2436                 if (ret) {
2437                         qla_printk(KERN_WARNING, ha,
2438                         "MSI-X: Unable to register handler -- %x/%d.\n",
2439                         qentry->vector, ret);
2440                         qla24xx_disable_msix(ha);
2441                         ha->mqenable = 0;
2442                         goto msix_out;
2443                 }
2444                 qentry->have_irq = 1;
2445                 qentry->rsp = rsp;
2446                 rsp->msix = qentry;
2447         }
2448
2449         /* Enable MSI-X vector for response queue update for queue 0 */
2450         if (ha->mqiobase &&  (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2451                 ha->mqenable = 1;
2452
2453 msix_out:
2454         kfree(entries);
2455         return ret;
2456 }
2457
2458 int
2459 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2460 {
2461         int ret;
2462         device_reg_t __iomem *reg = ha->iobase;
2463
2464         /* If possible, enable MSI-X. */
2465         if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
2466                 !IS_QLA8432(ha) && !IS_QLA8XXX_TYPE(ha))
2467                 goto skip_msi;
2468
2469         if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
2470                 (ha->pdev->subsystem_device == 0x7040 ||
2471                 ha->pdev->subsystem_device == 0x7041 ||
2472                 ha->pdev->subsystem_device == 0x1705)) {
2473                 DEBUG2(qla_printk(KERN_WARNING, ha,
2474                         "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X,0x%X).\n",
2475                         ha->pdev->subsystem_vendor,
2476                         ha->pdev->subsystem_device));
2477                 goto skip_msi;
2478         }
2479
2480         if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
2481                 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
2482                 DEBUG2(qla_printk(KERN_WARNING, ha,
2483                 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
2484                         ha->pdev->revision, ha->fw_attributes));
2485                 goto skip_msix;
2486         }
2487
2488         ret = qla24xx_enable_msix(ha, rsp);
2489         if (!ret) {
2490                 DEBUG2(qla_printk(KERN_INFO, ha,
2491                     "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
2492                     ha->fw_attributes));
2493                 goto clear_risc_ints;
2494         }
2495         qla_printk(KERN_WARNING, ha,
2496             "MSI-X: Falling back-to MSI mode -- %d.\n", ret);
2497 skip_msix:
2498
2499         if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2500             !IS_QLA8001(ha))
2501                 goto skip_msi;
2502
2503         ret = pci_enable_msi(ha->pdev);
2504         if (!ret) {
2505                 DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
2506                 ha->flags.msi_enabled = 1;
2507         } else
2508                 qla_printk(KERN_WARNING, ha,
2509                     "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
2510 skip_msi:
2511
2512         ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
2513             ha->flags.msi_enabled ? 0 : IRQF_SHARED,
2514             QLA2XXX_DRIVER_NAME, rsp);
2515         if (ret) {
2516                 qla_printk(KERN_WARNING, ha,
2517                     "Failed to reserve interrupt %d already in use.\n",
2518                     ha->pdev->irq);
2519                 goto fail;
2520         }
2521
2522 clear_risc_ints:
2523
2524         /*
2525          * FIXME: Noted that 8014s were being dropped during NK testing.
2526          * Timing deltas during MSI-X/INTa transitions?
2527          */
2528         if (IS_QLA81XX(ha) || IS_QLA82XX(ha))
2529                 goto fail;
2530         spin_lock_irq(&ha->hardware_lock);
2531         if (IS_FWI2_CAPABLE(ha)) {
2532                 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
2533                 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
2534         } else {
2535                 WRT_REG_WORD(&reg->isp.semaphore, 0);
2536                 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
2537                 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
2538         }
2539         spin_unlock_irq(&ha->hardware_lock);
2540
2541 fail:
2542         return ret;
2543 }
2544
2545 void
2546 qla2x00_free_irqs(scsi_qla_host_t *vha)
2547 {
2548         struct qla_hw_data *ha = vha->hw;
2549         struct rsp_que *rsp = ha->rsp_q_map[0];
2550
2551         if (ha->flags.msix_enabled)
2552                 qla24xx_disable_msix(ha);
2553         else if (ha->flags.msi_enabled) {
2554                 free_irq(ha->pdev->irq, rsp);
2555                 pci_disable_msi(ha->pdev);
2556         } else
2557                 free_irq(ha->pdev->irq, rsp);
2558 }
2559
2560
2561 int qla25xx_request_irq(struct rsp_que *rsp)
2562 {
2563         struct qla_hw_data *ha = rsp->hw;
2564         struct qla_init_msix_entry *intr = &msix_entries[2];
2565         struct qla_msix_entry *msix = rsp->msix;
2566         int ret;
2567
2568         ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
2569         if (ret) {
2570                 qla_printk(KERN_WARNING, ha,
2571                         "MSI-X: Unable to register handler -- %x/%d.\n",
2572                         msix->vector, ret);
2573                 return ret;
2574         }
2575         msix->have_irq = 1;
2576         msix->rsp = rsp;
2577         return ret;
2578 }