Merge branch 'next-devicetree' of git://git.secretlab.ca/git/linux-2.6
[pandora-kernel.git] / drivers / scsi / qla2xxx / qla_isr.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2010 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <scsi/scsi_tcq.h>
12 #include <scsi/scsi_bsg_fc.h>
13 #include <scsi/scsi_eh.h>
14
15 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
16 static void qla2x00_process_completed_request(struct scsi_qla_host *,
17         struct req_que *, uint32_t);
18 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
19 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
20 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
21         sts_entry_t *);
22
23 /**
24  * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
25  * @irq:
26  * @dev_id: SCSI driver HA context
27  *
28  * Called by system whenever the host adapter generates an interrupt.
29  *
30  * Returns handled flag.
31  */
32 irqreturn_t
33 qla2100_intr_handler(int irq, void *dev_id)
34 {
35         scsi_qla_host_t *vha;
36         struct qla_hw_data *ha;
37         struct device_reg_2xxx __iomem *reg;
38         int             status;
39         unsigned long   iter;
40         uint16_t        hccr;
41         uint16_t        mb[4];
42         struct rsp_que *rsp;
43         unsigned long   flags;
44
45         rsp = (struct rsp_que *) dev_id;
46         if (!rsp) {
47                 printk(KERN_INFO
48                     "%s(): NULL response queue pointer\n", __func__);
49                 return (IRQ_NONE);
50         }
51
52         ha = rsp->hw;
53         reg = &ha->iobase->isp;
54         status = 0;
55
56         spin_lock_irqsave(&ha->hardware_lock, flags);
57         vha = pci_get_drvdata(ha->pdev);
58         for (iter = 50; iter--; ) {
59                 hccr = RD_REG_WORD(&reg->hccr);
60                 if (hccr & HCCR_RISC_PAUSE) {
61                         if (pci_channel_offline(ha->pdev))
62                                 break;
63
64                         /*
65                          * Issue a "HARD" reset in order for the RISC interrupt
66                          * bit to be cleared.  Schedule a big hammmer to get
67                          * out of the RISC PAUSED state.
68                          */
69                         WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
70                         RD_REG_WORD(&reg->hccr);
71
72                         ha->isp_ops->fw_dump(vha, 1);
73                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
74                         break;
75                 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
76                         break;
77
78                 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
79                         WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
80                         RD_REG_WORD(&reg->hccr);
81
82                         /* Get mailbox data. */
83                         mb[0] = RD_MAILBOX_REG(ha, reg, 0);
84                         if (mb[0] > 0x3fff && mb[0] < 0x8000) {
85                                 qla2x00_mbx_completion(vha, mb[0]);
86                                 status |= MBX_INTERRUPT;
87                         } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
88                                 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
89                                 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
90                                 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
91                                 qla2x00_async_event(vha, rsp, mb);
92                         } else {
93                                 /*EMPTY*/
94                                 DEBUG2(printk("scsi(%ld): Unrecognized "
95                                     "interrupt type (%d).\n",
96                                     vha->host_no, mb[0]));
97                         }
98                         /* Release mailbox registers. */
99                         WRT_REG_WORD(&reg->semaphore, 0);
100                         RD_REG_WORD(&reg->semaphore);
101                 } else {
102                         qla2x00_process_response_queue(rsp);
103
104                         WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
105                         RD_REG_WORD(&reg->hccr);
106                 }
107         }
108         spin_unlock_irqrestore(&ha->hardware_lock, flags);
109
110         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
111             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
112                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
113                 complete(&ha->mbx_intr_comp);
114         }
115
116         return (IRQ_HANDLED);
117 }
118
119 /**
120  * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
121  * @irq:
122  * @dev_id: SCSI driver HA context
123  *
124  * Called by system whenever the host adapter generates an interrupt.
125  *
126  * Returns handled flag.
127  */
128 irqreturn_t
129 qla2300_intr_handler(int irq, void *dev_id)
130 {
131         scsi_qla_host_t *vha;
132         struct device_reg_2xxx __iomem *reg;
133         int             status;
134         unsigned long   iter;
135         uint32_t        stat;
136         uint16_t        hccr;
137         uint16_t        mb[4];
138         struct rsp_que *rsp;
139         struct qla_hw_data *ha;
140         unsigned long   flags;
141
142         rsp = (struct rsp_que *) dev_id;
143         if (!rsp) {
144                 printk(KERN_INFO
145                     "%s(): NULL response queue pointer\n", __func__);
146                 return (IRQ_NONE);
147         }
148
149         ha = rsp->hw;
150         reg = &ha->iobase->isp;
151         status = 0;
152
153         spin_lock_irqsave(&ha->hardware_lock, flags);
154         vha = pci_get_drvdata(ha->pdev);
155         for (iter = 50; iter--; ) {
156                 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
157                 if (stat & HSR_RISC_PAUSED) {
158                         if (unlikely(pci_channel_offline(ha->pdev)))
159                                 break;
160
161                         hccr = RD_REG_WORD(&reg->hccr);
162                         if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
163                                 qla_printk(KERN_INFO, ha, "Parity error -- "
164                                     "HCCR=%x, Dumping firmware!\n", hccr);
165                         else
166                                 qla_printk(KERN_INFO, ha, "RISC paused -- "
167                                     "HCCR=%x, Dumping firmware!\n", hccr);
168
169                         /*
170                          * Issue a "HARD" reset in order for the RISC
171                          * interrupt bit to be cleared.  Schedule a big
172                          * hammmer to get out of the RISC PAUSED state.
173                          */
174                         WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
175                         RD_REG_WORD(&reg->hccr);
176
177                         ha->isp_ops->fw_dump(vha, 1);
178                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
179                         break;
180                 } else if ((stat & HSR_RISC_INT) == 0)
181                         break;
182
183                 switch (stat & 0xff) {
184                 case 0x1:
185                 case 0x2:
186                 case 0x10:
187                 case 0x11:
188                         qla2x00_mbx_completion(vha, MSW(stat));
189                         status |= MBX_INTERRUPT;
190
191                         /* Release mailbox registers. */
192                         WRT_REG_WORD(&reg->semaphore, 0);
193                         break;
194                 case 0x12:
195                         mb[0] = MSW(stat);
196                         mb[1] = RD_MAILBOX_REG(ha, reg, 1);
197                         mb[2] = RD_MAILBOX_REG(ha, reg, 2);
198                         mb[3] = RD_MAILBOX_REG(ha, reg, 3);
199                         qla2x00_async_event(vha, rsp, mb);
200                         break;
201                 case 0x13:
202                         qla2x00_process_response_queue(rsp);
203                         break;
204                 case 0x15:
205                         mb[0] = MBA_CMPLT_1_16BIT;
206                         mb[1] = MSW(stat);
207                         qla2x00_async_event(vha, rsp, mb);
208                         break;
209                 case 0x16:
210                         mb[0] = MBA_SCSI_COMPLETION;
211                         mb[1] = MSW(stat);
212                         mb[2] = RD_MAILBOX_REG(ha, reg, 2);
213                         qla2x00_async_event(vha, rsp, mb);
214                         break;
215                 default:
216                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
217                             "(%d).\n",
218                             vha->host_no, stat & 0xff));
219                         break;
220                 }
221                 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
222                 RD_REG_WORD_RELAXED(&reg->hccr);
223         }
224         spin_unlock_irqrestore(&ha->hardware_lock, flags);
225
226         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
227             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
228                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
229                 complete(&ha->mbx_intr_comp);
230         }
231
232         return (IRQ_HANDLED);
233 }
234
235 /**
236  * qla2x00_mbx_completion() - Process mailbox command completions.
237  * @ha: SCSI driver HA context
238  * @mb0: Mailbox0 register
239  */
240 static void
241 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
242 {
243         uint16_t        cnt;
244         uint16_t __iomem *wptr;
245         struct qla_hw_data *ha = vha->hw;
246         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
247
248         /* Load return mailbox registers. */
249         ha->flags.mbox_int = 1;
250         ha->mailbox_out[0] = mb0;
251         wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
252
253         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
254                 if (IS_QLA2200(ha) && cnt == 8)
255                         wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
256                 if (cnt == 4 || cnt == 5)
257                         ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
258                 else
259                         ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
260
261                 wptr++;
262         }
263
264         if (ha->mcp) {
265                 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
266                     __func__, vha->host_no, ha->mcp->mb[0]));
267         } else {
268                 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
269                     __func__, vha->host_no));
270         }
271 }
272
273 static void
274 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
275 {
276         static char *event[] =
277                 { "Complete", "Request Notification", "Time Extension" };
278         int rval;
279         struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
280         uint16_t __iomem *wptr;
281         uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
282
283         /* Seed data -- mailbox1 -> mailbox7. */
284         wptr = (uint16_t __iomem *)&reg24->mailbox1;
285         for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
286                 mb[cnt] = RD_REG_WORD(wptr);
287
288         DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- "
289             "%04x %04x %04x %04x %04x %04x %04x.\n", vha->host_no,
290             event[aen & 0xff],
291             mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6]));
292
293         /* Acknowledgement needed? [Notify && non-zero timeout]. */
294         timeout = (descr >> 8) & 0xf;
295         if (aen != MBA_IDC_NOTIFY || !timeout)
296                 return;
297
298         DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- "
299             "ACK timeout=%d.\n", vha->host_no, event[aen & 0xff], timeout));
300
301         rval = qla2x00_post_idc_ack_work(vha, mb);
302         if (rval != QLA_SUCCESS)
303                 qla_printk(KERN_WARNING, vha->hw,
304                     "IDC failed to post ACK.\n");
305 }
306
307 /**
308  * qla2x00_async_event() - Process aynchronous events.
309  * @ha: SCSI driver HA context
310  * @mb: Mailbox registers (0 - 3)
311  */
312 void
313 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
314 {
315 #define LS_UNKNOWN      2
316         static char     *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
317         char            *link_speed;
318         uint16_t        handle_cnt;
319         uint16_t        cnt, mbx;
320         uint32_t        handles[5];
321         struct qla_hw_data *ha = vha->hw;
322         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
323         struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
324         uint32_t        rscn_entry, host_pid;
325         uint8_t         rscn_queue_index;
326         unsigned long   flags;
327
328         /* Setup to process RIO completion. */
329         handle_cnt = 0;
330         if (IS_QLA8XXX_TYPE(ha))
331                 goto skip_rio;
332         switch (mb[0]) {
333         case MBA_SCSI_COMPLETION:
334                 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
335                 handle_cnt = 1;
336                 break;
337         case MBA_CMPLT_1_16BIT:
338                 handles[0] = mb[1];
339                 handle_cnt = 1;
340                 mb[0] = MBA_SCSI_COMPLETION;
341                 break;
342         case MBA_CMPLT_2_16BIT:
343                 handles[0] = mb[1];
344                 handles[1] = mb[2];
345                 handle_cnt = 2;
346                 mb[0] = MBA_SCSI_COMPLETION;
347                 break;
348         case MBA_CMPLT_3_16BIT:
349                 handles[0] = mb[1];
350                 handles[1] = mb[2];
351                 handles[2] = mb[3];
352                 handle_cnt = 3;
353                 mb[0] = MBA_SCSI_COMPLETION;
354                 break;
355         case MBA_CMPLT_4_16BIT:
356                 handles[0] = mb[1];
357                 handles[1] = mb[2];
358                 handles[2] = mb[3];
359                 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
360                 handle_cnt = 4;
361                 mb[0] = MBA_SCSI_COMPLETION;
362                 break;
363         case MBA_CMPLT_5_16BIT:
364                 handles[0] = mb[1];
365                 handles[1] = mb[2];
366                 handles[2] = mb[3];
367                 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
368                 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
369                 handle_cnt = 5;
370                 mb[0] = MBA_SCSI_COMPLETION;
371                 break;
372         case MBA_CMPLT_2_32BIT:
373                 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
374                 handles[1] = le32_to_cpu(
375                     ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
376                     RD_MAILBOX_REG(ha, reg, 6));
377                 handle_cnt = 2;
378                 mb[0] = MBA_SCSI_COMPLETION;
379                 break;
380         default:
381                 break;
382         }
383 skip_rio:
384         switch (mb[0]) {
385         case MBA_SCSI_COMPLETION:       /* Fast Post */
386                 if (!vha->flags.online)
387                         break;
388
389                 for (cnt = 0; cnt < handle_cnt; cnt++)
390                         qla2x00_process_completed_request(vha, rsp->req,
391                                 handles[cnt]);
392                 break;
393
394         case MBA_RESET:                 /* Reset */
395                 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n",
396                         vha->host_no));
397
398                 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
399                 break;
400
401         case MBA_SYSTEM_ERR:            /* System Error */
402                 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox7) : 0;
403                 qla_printk(KERN_INFO, ha,
404                     "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
405                     "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
406
407                 ha->isp_ops->fw_dump(vha, 1);
408
409                 if (IS_FWI2_CAPABLE(ha)) {
410                         if (mb[1] == 0 && mb[2] == 0) {
411                                 qla_printk(KERN_ERR, ha,
412                                     "Unrecoverable Hardware Error: adapter "
413                                     "marked OFFLINE!\n");
414                                 vha->flags.online = 0;
415                         } else {
416                                 /* Check to see if MPI timeout occured */
417                                 if ((mbx & MBX_3) && (ha->flags.port0))
418                                         set_bit(MPI_RESET_NEEDED,
419                                             &vha->dpc_flags);
420
421                                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
422                         }
423                 } else if (mb[1] == 0) {
424                         qla_printk(KERN_INFO, ha,
425                             "Unrecoverable Hardware Error: adapter marked "
426                             "OFFLINE!\n");
427                         vha->flags.online = 0;
428                 } else
429                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
430                 break;
431
432         case MBA_REQ_TRANSFER_ERR:      /* Request Transfer Error */
433                 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error (%x).\n",
434                     vha->host_no, mb[1]));
435                 qla_printk(KERN_WARNING, ha,
436                     "ISP Request Transfer Error (%x).\n", mb[1]);
437
438                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
439                 break;
440
441         case MBA_RSP_TRANSFER_ERR:      /* Response Transfer Error */
442                 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
443                     vha->host_no));
444                 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
445
446                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
447                 break;
448
449         case MBA_WAKEUP_THRES:          /* Request Queue Wake-up */
450                 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
451                     vha->host_no));
452                 break;
453
454         case MBA_LIP_OCCURRED:          /* Loop Initialization Procedure */
455                 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", vha->host_no,
456                     mb[1]));
457                 qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
458
459                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
460                         atomic_set(&vha->loop_state, LOOP_DOWN);
461                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
462                         qla2x00_mark_all_devices_lost(vha, 1);
463                 }
464
465                 if (vha->vp_idx) {
466                         atomic_set(&vha->vp_state, VP_FAILED);
467                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
468                 }
469
470                 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
471                 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
472
473                 vha->flags.management_server_logged_in = 0;
474                 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
475                 break;
476
477         case MBA_LOOP_UP:               /* Loop Up Event */
478                 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
479                         link_speed = link_speeds[0];
480                         ha->link_data_rate = PORT_SPEED_1GB;
481                 } else {
482                         link_speed = link_speeds[LS_UNKNOWN];
483                         if (mb[1] < 5)
484                                 link_speed = link_speeds[mb[1]];
485                         else if (mb[1] == 0x13)
486                                 link_speed = link_speeds[5];
487                         ha->link_data_rate = mb[1];
488                 }
489
490                 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
491                     vha->host_no, link_speed));
492                 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
493                     link_speed);
494
495                 vha->flags.management_server_logged_in = 0;
496                 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
497                 break;
498
499         case MBA_LOOP_DOWN:             /* Loop Down Event */
500                 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0;
501                 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN "
502                     "(%x %x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3],
503                     mbx));
504                 qla_printk(KERN_INFO, ha,
505                     "LOOP DOWN detected (%x %x %x %x).\n", mb[1], mb[2], mb[3],
506                     mbx);
507
508                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
509                         atomic_set(&vha->loop_state, LOOP_DOWN);
510                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
511                         vha->device_flags |= DFLG_NO_CABLE;
512                         qla2x00_mark_all_devices_lost(vha, 1);
513                 }
514
515                 if (vha->vp_idx) {
516                         atomic_set(&vha->vp_state, VP_FAILED);
517                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
518                 }
519
520                 vha->flags.management_server_logged_in = 0;
521                 ha->link_data_rate = PORT_SPEED_UNKNOWN;
522                 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
523                 break;
524
525         case MBA_LIP_RESET:             /* LIP reset occurred */
526                 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
527                     vha->host_no, mb[1]));
528                 qla_printk(KERN_INFO, ha,
529                     "LIP reset occurred (%x).\n", mb[1]);
530
531                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
532                         atomic_set(&vha->loop_state, LOOP_DOWN);
533                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
534                         qla2x00_mark_all_devices_lost(vha, 1);
535                 }
536
537                 if (vha->vp_idx) {
538                         atomic_set(&vha->vp_state, VP_FAILED);
539                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
540                 }
541
542                 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
543
544                 ha->operating_mode = LOOP;
545                 vha->flags.management_server_logged_in = 0;
546                 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
547                 break;
548
549         /* case MBA_DCBX_COMPLETE: */
550         case MBA_POINT_TO_POINT:        /* Point-to-Point */
551                 if (IS_QLA2100(ha))
552                         break;
553
554                 if (IS_QLA8XXX_TYPE(ha)) {
555                         DEBUG2(printk("scsi(%ld): DCBX Completed -- %04x %04x "
556                             "%04x\n", vha->host_no, mb[1], mb[2], mb[3]));
557                         if (ha->notify_dcbx_comp)
558                                 complete(&ha->dcbx_comp);
559
560                 } else
561                         DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE "
562                             "received.\n", vha->host_no));
563
564                 /*
565                  * Until there's a transition from loop down to loop up, treat
566                  * this as loop down only.
567                  */
568                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
569                         atomic_set(&vha->loop_state, LOOP_DOWN);
570                         if (!atomic_read(&vha->loop_down_timer))
571                                 atomic_set(&vha->loop_down_timer,
572                                     LOOP_DOWN_TIME);
573                         qla2x00_mark_all_devices_lost(vha, 1);
574                 }
575
576                 if (vha->vp_idx) {
577                         atomic_set(&vha->vp_state, VP_FAILED);
578                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
579                 }
580
581                 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
582                         set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
583
584                 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
585                 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
586
587                 ha->flags.gpsc_supported = 1;
588                 vha->flags.management_server_logged_in = 0;
589                 break;
590
591         case MBA_CHG_IN_CONNECTION:     /* Change in connection mode */
592                 if (IS_QLA2100(ha))
593                         break;
594
595                 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
596                     "received.\n",
597                     vha->host_no));
598                 qla_printk(KERN_INFO, ha,
599                     "Configuration change detected: value=%x.\n", mb[1]);
600
601                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
602                         atomic_set(&vha->loop_state, LOOP_DOWN);
603                         if (!atomic_read(&vha->loop_down_timer))
604                                 atomic_set(&vha->loop_down_timer,
605                                     LOOP_DOWN_TIME);
606                         qla2x00_mark_all_devices_lost(vha, 1);
607                 }
608
609                 if (vha->vp_idx) {
610                         atomic_set(&vha->vp_state, VP_FAILED);
611                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
612                 }
613
614                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
615                 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
616                 break;
617
618         case MBA_PORT_UPDATE:           /* Port database update */
619                 /*
620                  * Handle only global and vn-port update events
621                  *
622                  * Relevant inputs:
623                  * mb[1] = N_Port handle of changed port
624                  * OR 0xffff for global event
625                  * mb[2] = New login state
626                  * 7 = Port logged out
627                  * mb[3] = LSB is vp_idx, 0xff = all vps
628                  *
629                  * Skip processing if:
630                  *       Event is global, vp_idx is NOT all vps,
631                  *           vp_idx does not match
632                  *       Event is not global, vp_idx does not match
633                  */
634                 if (IS_QLA2XXX_MIDTYPE(ha) &&
635                     ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
636                         (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
637                         break;
638
639                 /* Global event -- port logout or port unavailable. */
640                 if (mb[1] == 0xffff && mb[2] == 0x7) {
641                         DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
642                             vha->host_no));
643                         DEBUG(printk(KERN_INFO
644                             "scsi(%ld): Port unavailable %04x %04x %04x.\n",
645                             vha->host_no, mb[1], mb[2], mb[3]));
646
647                         if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
648                                 atomic_set(&vha->loop_state, LOOP_DOWN);
649                                 atomic_set(&vha->loop_down_timer,
650                                     LOOP_DOWN_TIME);
651                                 vha->device_flags |= DFLG_NO_CABLE;
652                                 qla2x00_mark_all_devices_lost(vha, 1);
653                         }
654
655                         if (vha->vp_idx) {
656                                 atomic_set(&vha->vp_state, VP_FAILED);
657                                 fc_vport_set_state(vha->fc_vport,
658                                     FC_VPORT_FAILED);
659                                 qla2x00_mark_all_devices_lost(vha, 1);
660                         }
661
662                         vha->flags.management_server_logged_in = 0;
663                         ha->link_data_rate = PORT_SPEED_UNKNOWN;
664                         break;
665                 }
666
667                 /*
668                  * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
669                  * event etc. earlier indicating loop is down) then process
670                  * it.  Otherwise ignore it and Wait for RSCN to come in.
671                  */
672                 atomic_set(&vha->loop_down_timer, 0);
673                 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
674                     atomic_read(&vha->loop_state) != LOOP_DEAD) {
675                         DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
676                             "ignored %04x/%04x/%04x.\n", vha->host_no, mb[1],
677                             mb[2], mb[3]));
678                         break;
679                 }
680
681                 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
682                     vha->host_no));
683                 DEBUG(printk(KERN_INFO
684                     "scsi(%ld): Port database changed %04x %04x %04x.\n",
685                     vha->host_no, mb[1], mb[2], mb[3]));
686
687                 /*
688                  * Mark all devices as missing so we will login again.
689                  */
690                 atomic_set(&vha->loop_state, LOOP_UP);
691
692                 qla2x00_mark_all_devices_lost(vha, 1);
693
694                 vha->flags.rscn_queue_overflow = 1;
695
696                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
697                 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
698                 break;
699
700         case MBA_RSCN_UPDATE:           /* State Change Registration */
701                 /* Check if the Vport has issued a SCR */
702                 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
703                         break;
704                 /* Only handle SCNs for our Vport index. */
705                 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
706                         break;
707
708                 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
709                     vha->host_no));
710                 DEBUG(printk(KERN_INFO
711                     "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
712                     vha->host_no, mb[1], mb[2], mb[3]));
713
714                 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
715                 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
716                                 | vha->d_id.b.al_pa;
717                 if (rscn_entry == host_pid) {
718                         DEBUG(printk(KERN_INFO
719                             "scsi(%ld): Ignoring RSCN update to local host "
720                             "port ID (%06x)\n",
721                             vha->host_no, host_pid));
722                         break;
723                 }
724
725                 /* Ignore reserved bits from RSCN-payload. */
726                 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
727                 rscn_queue_index = vha->rscn_in_ptr + 1;
728                 if (rscn_queue_index == MAX_RSCN_COUNT)
729                         rscn_queue_index = 0;
730                 if (rscn_queue_index != vha->rscn_out_ptr) {
731                         vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry;
732                         vha->rscn_in_ptr = rscn_queue_index;
733                 } else {
734                         vha->flags.rscn_queue_overflow = 1;
735                 }
736
737                 atomic_set(&vha->loop_state, LOOP_UPDATE);
738                 atomic_set(&vha->loop_down_timer, 0);
739                 vha->flags.management_server_logged_in = 0;
740
741                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
742                 set_bit(RSCN_UPDATE, &vha->dpc_flags);
743                 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
744                 break;
745
746         /* case MBA_RIO_RESPONSE: */
747         case MBA_ZIO_RESPONSE:
748                 DEBUG3(printk("scsi(%ld): [R|Z]IO update completion.\n",
749                     vha->host_no));
750
751                 if (IS_FWI2_CAPABLE(ha))
752                         qla24xx_process_response_queue(vha, rsp);
753                 else
754                         qla2x00_process_response_queue(rsp);
755                 break;
756
757         case MBA_DISCARD_RND_FRAME:
758                 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
759                     "%04x.\n", vha->host_no, mb[1], mb[2], mb[3]));
760                 break;
761
762         case MBA_TRACE_NOTIFICATION:
763                 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
764                 vha->host_no, mb[1], mb[2]));
765                 break;
766
767         case MBA_ISP84XX_ALERT:
768                 DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- "
769                     "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
770
771                 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
772                 switch (mb[1]) {
773                 case A84_PANIC_RECOVERY:
774                         qla_printk(KERN_INFO, ha, "Alert 84XX: panic recovery "
775                             "%04x %04x\n", mb[2], mb[3]);
776                         break;
777                 case A84_OP_LOGIN_COMPLETE:
778                         ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
779                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
780                             "firmware version %x\n", ha->cs84xx->op_fw_version));
781                         break;
782                 case A84_DIAG_LOGIN_COMPLETE:
783                         ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
784                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
785                             "diagnostic firmware version %x\n",
786                             ha->cs84xx->diag_fw_version));
787                         break;
788                 case A84_GOLD_LOGIN_COMPLETE:
789                         ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
790                         ha->cs84xx->fw_update = 1;
791                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX: gold "
792                             "firmware version %x\n",
793                             ha->cs84xx->gold_fw_version));
794                         break;
795                 default:
796                         qla_printk(KERN_ERR, ha,
797                             "Alert 84xx: Invalid Alert %04x %04x %04x\n",
798                             mb[1], mb[2], mb[3]);
799                 }
800                 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
801                 break;
802         case MBA_DCBX_START:
803                 DEBUG2(printk("scsi(%ld): DCBX Started -- %04x %04x %04x\n",
804                     vha->host_no, mb[1], mb[2], mb[3]));
805                 break;
806         case MBA_DCBX_PARAM_UPDATE:
807                 DEBUG2(printk("scsi(%ld): DCBX Parameters Updated -- "
808                     "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
809                 break;
810         case MBA_FCF_CONF_ERR:
811                 DEBUG2(printk("scsi(%ld): FCF Configuration Error -- "
812                     "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
813                 break;
814         case MBA_IDC_COMPLETE:
815         case MBA_IDC_NOTIFY:
816         case MBA_IDC_TIME_EXT:
817                 qla81xx_idc_event(vha, mb[0], mb[1]);
818                 break;
819         }
820
821         if (!vha->vp_idx && ha->num_vhosts)
822                 qla2x00_alert_all_vps(rsp, mb);
823 }
824
825 /**
826  * qla2x00_process_completed_request() - Process a Fast Post response.
827  * @ha: SCSI driver HA context
828  * @index: SRB index
829  */
830 static void
831 qla2x00_process_completed_request(struct scsi_qla_host *vha,
832                                 struct req_que *req, uint32_t index)
833 {
834         srb_t *sp;
835         struct qla_hw_data *ha = vha->hw;
836
837         /* Validate handle. */
838         if (index >= MAX_OUTSTANDING_COMMANDS) {
839                 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
840                     vha->host_no, index));
841                 qla_printk(KERN_WARNING, ha,
842                     "Invalid SCSI completion handle %d.\n", index);
843
844                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
845                 return;
846         }
847
848         sp = req->outstanding_cmds[index];
849         if (sp) {
850                 /* Free outstanding command slot. */
851                 req->outstanding_cmds[index] = NULL;
852
853                 /* Save ISP completion status */
854                 sp->cmd->result = DID_OK << 16;
855                 qla2x00_sp_compl(ha, sp);
856         } else {
857                 DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion"
858                         " handle(0x%x)\n", vha->host_no, req->id, index));
859                 qla_printk(KERN_WARNING, ha,
860                     "Invalid ISP SCSI completion handle\n");
861
862                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
863         }
864 }
865
866 static srb_t *
867 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
868     struct req_que *req, void *iocb)
869 {
870         struct qla_hw_data *ha = vha->hw;
871         sts_entry_t *pkt = iocb;
872         srb_t *sp = NULL;
873         uint16_t index;
874
875         index = LSW(pkt->handle);
876         if (index >= MAX_OUTSTANDING_COMMANDS) {
877                 qla_printk(KERN_WARNING, ha,
878                     "%s: Invalid completion handle (%x).\n", func, index);
879                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
880                 goto done;
881         }
882         sp = req->outstanding_cmds[index];
883         if (!sp) {
884                 qla_printk(KERN_WARNING, ha,
885                     "%s: Invalid completion handle (%x) -- timed-out.\n", func,
886                     index);
887                 return sp;
888         }
889         if (sp->handle != index) {
890                 qla_printk(KERN_WARNING, ha,
891                     "%s: SRB handle (%x) mismatch %x.\n", func, sp->handle,
892                     index);
893                 return NULL;
894         }
895
896         req->outstanding_cmds[index] = NULL;
897
898 done:
899         return sp;
900 }
901
902 static void
903 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
904     struct mbx_entry *mbx)
905 {
906         const char func[] = "MBX-IOCB";
907         const char *type;
908         fc_port_t *fcport;
909         srb_t *sp;
910         struct srb_iocb *lio;
911         struct srb_ctx *ctx;
912         uint16_t *data;
913         uint16_t status;
914
915         sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
916         if (!sp)
917                 return;
918
919         ctx = sp->ctx;
920         lio = ctx->u.iocb_cmd;
921         type = ctx->name;
922         fcport = sp->fcport;
923         data = lio->u.logio.data;
924
925         data[0] = MBS_COMMAND_ERROR;
926         data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
927             QLA_LOGIO_LOGIN_RETRIED : 0;
928         if (mbx->entry_status) {
929                 DEBUG2(printk(KERN_WARNING
930                     "scsi(%ld:%x): Async-%s error entry - portid=%02x%02x%02x "
931                     "entry-status=%x status=%x state-flag=%x "
932                     "status-flags=%x.\n",
933                     fcport->vha->host_no, sp->handle, type,
934                     fcport->d_id.b.domain, fcport->d_id.b.area,
935                     fcport->d_id.b.al_pa, mbx->entry_status,
936                     le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
937                     le16_to_cpu(mbx->status_flags)));
938
939                 DEBUG2(qla2x00_dump_buffer((uint8_t *)mbx, sizeof(*mbx)));
940
941                 goto logio_done;
942         }
943
944         status = le16_to_cpu(mbx->status);
945         if (status == 0x30 && ctx->type == SRB_LOGIN_CMD &&
946             le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
947                 status = 0;
948         if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
949                 DEBUG2(printk(KERN_DEBUG
950                     "scsi(%ld:%x): Async-%s complete - portid=%02x%02x%02x "
951                     "mbx1=%x.\n",
952                     fcport->vha->host_no, sp->handle, type,
953                     fcport->d_id.b.domain, fcport->d_id.b.area,
954                     fcport->d_id.b.al_pa, le16_to_cpu(mbx->mb1)));
955
956                 data[0] = MBS_COMMAND_COMPLETE;
957                 if (ctx->type == SRB_LOGIN_CMD) {
958                         fcport->port_type = FCT_TARGET;
959                         if (le16_to_cpu(mbx->mb1) & BIT_0)
960                                 fcport->port_type = FCT_INITIATOR;
961                         else if (le16_to_cpu(mbx->mb1) & BIT_1)
962                                 fcport->flags |= FCF_FCP2_DEVICE;
963                 }
964                 goto logio_done;
965         }
966
967         data[0] = le16_to_cpu(mbx->mb0);
968         switch (data[0]) {
969         case MBS_PORT_ID_USED:
970                 data[1] = le16_to_cpu(mbx->mb1);
971                 break;
972         case MBS_LOOP_ID_USED:
973                 break;
974         default:
975                 data[0] = MBS_COMMAND_ERROR;
976                 break;
977         }
978
979         DEBUG2(printk(KERN_WARNING
980             "scsi(%ld:%x): Async-%s failed - portid=%02x%02x%02x status=%x "
981             "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n",
982             fcport->vha->host_no, sp->handle, type, fcport->d_id.b.domain,
983             fcport->d_id.b.area, fcport->d_id.b.al_pa, status,
984             le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
985             le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
986             le16_to_cpu(mbx->mb7)));
987
988 logio_done:
989         lio->done(sp);
990 }
991
992 static void
993 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
994     sts_entry_t *pkt, int iocb_type)
995 {
996         const char func[] = "CT_IOCB";
997         const char *type;
998         struct qla_hw_data *ha = vha->hw;
999         srb_t *sp;
1000         struct srb_ctx *sp_bsg;
1001         struct fc_bsg_job *bsg_job;
1002         uint16_t comp_status;
1003
1004         sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1005         if (!sp)
1006                 return;
1007
1008         sp_bsg = sp->ctx;
1009         bsg_job = sp_bsg->u.bsg_job;
1010
1011         type = NULL;
1012         switch (sp_bsg->type) {
1013         case SRB_CT_CMD:
1014                 type = "ct pass-through";
1015                 break;
1016         default:
1017                 qla_printk(KERN_WARNING, ha,
1018                     "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
1019                     sp_bsg->type);
1020                 return;
1021         }
1022
1023         comp_status = le16_to_cpu(pkt->comp_status);
1024
1025         /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1026          * fc payload  to the caller
1027          */
1028         bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1029         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1030
1031         if (comp_status != CS_COMPLETE) {
1032                 if (comp_status == CS_DATA_UNDERRUN) {
1033                         bsg_job->reply->result = DID_OK << 16;
1034                         bsg_job->reply->reply_payload_rcv_len =
1035                             le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1036
1037                         DEBUG2(qla_printk(KERN_WARNING, ha,
1038                             "scsi(%ld): CT pass-through-%s error "
1039                             "comp_status-status=0x%x total_byte = 0x%x.\n",
1040                             vha->host_no, type, comp_status,
1041                             bsg_job->reply->reply_payload_rcv_len));
1042                 } else {
1043                         DEBUG2(qla_printk(KERN_WARNING, ha,
1044                             "scsi(%ld): CT pass-through-%s error "
1045                             "comp_status-status=0x%x.\n",
1046                             vha->host_no, type, comp_status));
1047                         bsg_job->reply->result = DID_ERROR << 16;
1048                         bsg_job->reply->reply_payload_rcv_len = 0;
1049                 }
1050                 DEBUG2(qla2x00_dump_buffer((uint8_t *)pkt, sizeof(*pkt)));
1051         } else {
1052                 bsg_job->reply->result =  DID_OK << 16;;
1053                 bsg_job->reply->reply_payload_rcv_len =
1054                     bsg_job->reply_payload.payload_len;
1055                 bsg_job->reply_len = 0;
1056         }
1057
1058         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1059             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1060
1061         dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1062             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1063
1064         if (sp_bsg->type == SRB_ELS_CMD_HST || sp_bsg->type == SRB_CT_CMD)
1065                 kfree(sp->fcport);
1066
1067         kfree(sp->ctx);
1068         mempool_free(sp, ha->srb_mempool);
1069         bsg_job->job_done(bsg_job);
1070 }
1071
1072 static void
1073 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1074     struct sts_entry_24xx *pkt, int iocb_type)
1075 {
1076         const char func[] = "ELS_CT_IOCB";
1077         const char *type;
1078         struct qla_hw_data *ha = vha->hw;
1079         srb_t *sp;
1080         struct srb_ctx *sp_bsg;
1081         struct fc_bsg_job *bsg_job;
1082         uint16_t comp_status;
1083         uint32_t fw_status[3];
1084         uint8_t* fw_sts_ptr;
1085
1086         sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1087         if (!sp)
1088                 return;
1089         sp_bsg = sp->ctx;
1090         bsg_job = sp_bsg->u.bsg_job;
1091
1092         type = NULL;
1093         switch (sp_bsg->type) {
1094         case SRB_ELS_CMD_RPT:
1095         case SRB_ELS_CMD_HST:
1096                 type = "els";
1097                 break;
1098         case SRB_CT_CMD:
1099                 type = "ct pass-through";
1100                 break;
1101         default:
1102                 qla_printk(KERN_WARNING, ha,
1103                     "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
1104                     sp_bsg->type);
1105                 return;
1106         }
1107
1108         comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1109         fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1110         fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1111
1112         /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1113          * fc payload  to the caller
1114          */
1115         bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1116         bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1117
1118         if (comp_status != CS_COMPLETE) {
1119                 if (comp_status == CS_DATA_UNDERRUN) {
1120                         bsg_job->reply->result = DID_OK << 16;
1121                         bsg_job->reply->reply_payload_rcv_len =
1122                                 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count);
1123
1124                         DEBUG2(qla_printk(KERN_WARNING, ha,
1125                             "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x "
1126                             "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1127                                 vha->host_no, sp->handle, type, comp_status, fw_status[1], fw_status[2],
1128                                 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count)));
1129                         fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1130                         memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1131                 }
1132                 else {
1133                         DEBUG2(qla_printk(KERN_WARNING, ha,
1134                             "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x "
1135                             "error subcode 1=0x%x error subcode 2=0x%x.\n",
1136                                 vha->host_no, sp->handle, type, comp_status,
1137                                 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1),
1138                                 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2)));
1139                         bsg_job->reply->result = DID_ERROR << 16;
1140                         bsg_job->reply->reply_payload_rcv_len = 0;
1141                         fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1142                         memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1143                 }
1144                 DEBUG2(qla2x00_dump_buffer((uint8_t *)pkt, sizeof(*pkt)));
1145         }
1146         else {
1147                 bsg_job->reply->result =  DID_OK << 16;;
1148                 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1149                 bsg_job->reply_len = 0;
1150         }
1151
1152         dma_unmap_sg(&ha->pdev->dev,
1153             bsg_job->request_payload.sg_list,
1154             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1155         dma_unmap_sg(&ha->pdev->dev,
1156             bsg_job->reply_payload.sg_list,
1157             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1158         if ((sp_bsg->type == SRB_ELS_CMD_HST) ||
1159             (sp_bsg->type == SRB_CT_CMD))
1160                 kfree(sp->fcport);
1161         kfree(sp->ctx);
1162         mempool_free(sp, ha->srb_mempool);
1163         bsg_job->job_done(bsg_job);
1164 }
1165
1166 static void
1167 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1168     struct logio_entry_24xx *logio)
1169 {
1170         const char func[] = "LOGIO-IOCB";
1171         const char *type;
1172         fc_port_t *fcport;
1173         srb_t *sp;
1174         struct srb_iocb *lio;
1175         struct srb_ctx *ctx;
1176         uint16_t *data;
1177         uint32_t iop[2];
1178
1179         sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1180         if (!sp)
1181                 return;
1182
1183         ctx = sp->ctx;
1184         lio = ctx->u.iocb_cmd;
1185         type = ctx->name;
1186         fcport = sp->fcport;
1187         data = lio->u.logio.data;
1188
1189         data[0] = MBS_COMMAND_ERROR;
1190         data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1191                 QLA_LOGIO_LOGIN_RETRIED : 0;
1192         if (logio->entry_status) {
1193                 DEBUG2(printk(KERN_WARNING
1194                     "scsi(%ld:%x): Async-%s error entry - "
1195                     "portid=%02x%02x%02x entry-status=%x.\n",
1196                     fcport->vha->host_no, sp->handle, type,
1197                     fcport->d_id.b.domain, fcport->d_id.b.area,
1198                     fcport->d_id.b.al_pa, logio->entry_status));
1199                 DEBUG2(qla2x00_dump_buffer((uint8_t *)logio, sizeof(*logio)));
1200
1201                 goto logio_done;
1202         }
1203
1204         if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1205                 DEBUG2(printk(KERN_DEBUG
1206                     "scsi(%ld:%x): Async-%s complete - portid=%02x%02x%02x "
1207                     "iop0=%x.\n",
1208                     fcport->vha->host_no, sp->handle, type,
1209                     fcport->d_id.b.domain, fcport->d_id.b.area,
1210                     fcport->d_id.b.al_pa,
1211                     le32_to_cpu(logio->io_parameter[0])));
1212
1213                 data[0] = MBS_COMMAND_COMPLETE;
1214                 if (ctx->type != SRB_LOGIN_CMD)
1215                         goto logio_done;
1216
1217                 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1218                 if (iop[0] & BIT_4) {
1219                         fcport->port_type = FCT_TARGET;
1220                         if (iop[0] & BIT_8)
1221                                 fcport->flags |= FCF_FCP2_DEVICE;
1222                 } else if (iop[0] & BIT_5)
1223                         fcport->port_type = FCT_INITIATOR;
1224
1225                 if (logio->io_parameter[7] || logio->io_parameter[8])
1226                         fcport->supported_classes |= FC_COS_CLASS2;
1227                 if (logio->io_parameter[9] || logio->io_parameter[10])
1228                         fcport->supported_classes |= FC_COS_CLASS3;
1229
1230                 goto logio_done;
1231         }
1232
1233         iop[0] = le32_to_cpu(logio->io_parameter[0]);
1234         iop[1] = le32_to_cpu(logio->io_parameter[1]);
1235         switch (iop[0]) {
1236         case LSC_SCODE_PORTID_USED:
1237                 data[0] = MBS_PORT_ID_USED;
1238                 data[1] = LSW(iop[1]);
1239                 break;
1240         case LSC_SCODE_NPORT_USED:
1241                 data[0] = MBS_LOOP_ID_USED;
1242                 break;
1243         case LSC_SCODE_CMD_FAILED:
1244                 if ((iop[1] & 0xff) == 0x05) {
1245                         data[0] = MBS_NOT_LOGGED_IN;
1246                         break;
1247                 }
1248                 /* Fall through. */
1249         default:
1250                 data[0] = MBS_COMMAND_ERROR;
1251                 break;
1252         }
1253
1254         DEBUG2(printk(KERN_WARNING
1255             "scsi(%ld:%x): Async-%s failed - portid=%02x%02x%02x comp=%x "
1256             "iop0=%x iop1=%x.\n",
1257             fcport->vha->host_no, sp->handle, type, fcport->d_id.b.domain,
1258             fcport->d_id.b.area, fcport->d_id.b.al_pa,
1259             le16_to_cpu(logio->comp_status),
1260             le32_to_cpu(logio->io_parameter[0]),
1261             le32_to_cpu(logio->io_parameter[1])));
1262
1263 logio_done:
1264         lio->done(sp);
1265 }
1266
1267 static void
1268 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1269     struct tsk_mgmt_entry *tsk)
1270 {
1271         const char func[] = "TMF-IOCB";
1272         const char *type;
1273         fc_port_t *fcport;
1274         srb_t *sp;
1275         struct srb_iocb *iocb;
1276         struct srb_ctx *ctx;
1277         struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1278         int error = 1;
1279
1280         sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1281         if (!sp)
1282                 return;
1283
1284         ctx = sp->ctx;
1285         iocb = ctx->u.iocb_cmd;
1286         type = ctx->name;
1287         fcport = sp->fcport;
1288
1289         if (sts->entry_status) {
1290                 DEBUG2(printk(KERN_WARNING
1291                     "scsi(%ld:%x): Async-%s error - entry-status(%x).\n",
1292                     fcport->vha->host_no, sp->handle, type,
1293                     sts->entry_status));
1294         } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1295                 DEBUG2(printk(KERN_WARNING
1296                     "scsi(%ld:%x): Async-%s error - completion status(%x).\n",
1297                     fcport->vha->host_no, sp->handle, type,
1298                     sts->comp_status));
1299         } else if (!(le16_to_cpu(sts->scsi_status) &
1300             SS_RESPONSE_INFO_LEN_VALID)) {
1301                 DEBUG2(printk(KERN_WARNING
1302                     "scsi(%ld:%x): Async-%s error - no response info(%x).\n",
1303                     fcport->vha->host_no, sp->handle, type,
1304                     sts->scsi_status));
1305         } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
1306                 DEBUG2(printk(KERN_WARNING
1307                     "scsi(%ld:%x): Async-%s error - not enough response(%d).\n",
1308                     fcport->vha->host_no, sp->handle, type,
1309                     sts->rsp_data_len));
1310         } else if (sts->data[3]) {
1311                 DEBUG2(printk(KERN_WARNING
1312                     "scsi(%ld:%x): Async-%s error - response(%x).\n",
1313                     fcport->vha->host_no, sp->handle, type,
1314                     sts->data[3]));
1315         } else {
1316                 error = 0;
1317         }
1318
1319         if (error) {
1320                 iocb->u.tmf.data = error;
1321                 DEBUG2(qla2x00_dump_buffer((uint8_t *)sts, sizeof(*sts)));
1322         }
1323
1324         iocb->done(sp);
1325 }
1326
1327 /**
1328  * qla2x00_process_response_queue() - Process response queue entries.
1329  * @ha: SCSI driver HA context
1330  */
1331 void
1332 qla2x00_process_response_queue(struct rsp_que *rsp)
1333 {
1334         struct scsi_qla_host *vha;
1335         struct qla_hw_data *ha = rsp->hw;
1336         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1337         sts_entry_t     *pkt;
1338         uint16_t        handle_cnt;
1339         uint16_t        cnt;
1340
1341         vha = pci_get_drvdata(ha->pdev);
1342
1343         if (!vha->flags.online)
1344                 return;
1345
1346         while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1347                 pkt = (sts_entry_t *)rsp->ring_ptr;
1348
1349                 rsp->ring_index++;
1350                 if (rsp->ring_index == rsp->length) {
1351                         rsp->ring_index = 0;
1352                         rsp->ring_ptr = rsp->ring;
1353                 } else {
1354                         rsp->ring_ptr++;
1355                 }
1356
1357                 if (pkt->entry_status != 0) {
1358                         DEBUG3(printk(KERN_INFO
1359                             "scsi(%ld): Process error entry.\n", vha->host_no));
1360
1361                         qla2x00_error_entry(vha, rsp, pkt);
1362                         ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1363                         wmb();
1364                         continue;
1365                 }
1366
1367                 switch (pkt->entry_type) {
1368                 case STATUS_TYPE:
1369                         qla2x00_status_entry(vha, rsp, pkt);
1370                         break;
1371                 case STATUS_TYPE_21:
1372                         handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
1373                         for (cnt = 0; cnt < handle_cnt; cnt++) {
1374                                 qla2x00_process_completed_request(vha, rsp->req,
1375                                     ((sts21_entry_t *)pkt)->handle[cnt]);
1376                         }
1377                         break;
1378                 case STATUS_TYPE_22:
1379                         handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
1380                         for (cnt = 0; cnt < handle_cnt; cnt++) {
1381                                 qla2x00_process_completed_request(vha, rsp->req,
1382                                     ((sts22_entry_t *)pkt)->handle[cnt]);
1383                         }
1384                         break;
1385                 case STATUS_CONT_TYPE:
1386                         qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1387                         break;
1388                 case MBX_IOCB_TYPE:
1389                         qla2x00_mbx_iocb_entry(vha, rsp->req,
1390                             (struct mbx_entry *)pkt);
1391                         break;
1392                 case CT_IOCB_TYPE:
1393                         qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
1394                         break;
1395                 default:
1396                         /* Type Not Supported. */
1397                         DEBUG4(printk(KERN_WARNING
1398                             "scsi(%ld): Received unknown response pkt type %x "
1399                             "entry status=%x.\n",
1400                             vha->host_no, pkt->entry_type, pkt->entry_status));
1401                         break;
1402                 }
1403                 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1404                 wmb();
1405         }
1406
1407         /* Adjust ring index */
1408         WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
1409 }
1410
1411 static inline void
1412
1413 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1414     uint32_t sense_len, struct rsp_que *rsp)
1415 {
1416         struct scsi_cmnd *cp = sp->cmd;
1417
1418         if (sense_len >= SCSI_SENSE_BUFFERSIZE)
1419                 sense_len = SCSI_SENSE_BUFFERSIZE;
1420
1421         sp->request_sense_length = sense_len;
1422         sp->request_sense_ptr = cp->sense_buffer;
1423         if (sp->request_sense_length > par_sense_len)
1424                 sense_len = par_sense_len;
1425
1426         memcpy(cp->sense_buffer, sense_data, sense_len);
1427
1428         sp->request_sense_ptr += sense_len;
1429         sp->request_sense_length -= sense_len;
1430         if (sp->request_sense_length != 0)
1431                 rsp->status_srb = sp;
1432
1433         DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
1434             "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no,
1435             cp->device->channel, cp->device->id, cp->device->lun, cp,
1436             cp->serial_number));
1437         if (sense_len)
1438                 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, sense_len));
1439 }
1440
1441 struct scsi_dif_tuple {
1442         __be16 guard;       /* Checksum */
1443         __be16 app_tag;         /* APPL identifer */
1444         __be32 ref_tag;         /* Target LBA or indirect LBA */
1445 };
1446
1447 /*
1448  * Checks the guard or meta-data for the type of error
1449  * detected by the HBA. In case of errors, we set the
1450  * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
1451  * to indicate to the kernel that the HBA detected error.
1452  */
1453 static inline void
1454 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1455 {
1456         struct scsi_cmnd *cmd = sp->cmd;
1457         struct scsi_dif_tuple   *ep =
1458                         (struct scsi_dif_tuple *)&sts24->data[20];
1459         struct scsi_dif_tuple   *ap =
1460                         (struct scsi_dif_tuple *)&sts24->data[12];
1461         uint32_t        e_ref_tag, a_ref_tag;
1462         uint16_t        e_app_tag, a_app_tag;
1463         uint16_t        e_guard, a_guard;
1464
1465         e_ref_tag = be32_to_cpu(ep->ref_tag);
1466         a_ref_tag = be32_to_cpu(ap->ref_tag);
1467         e_app_tag = be16_to_cpu(ep->app_tag);
1468         a_app_tag = be16_to_cpu(ap->app_tag);
1469         e_guard = be16_to_cpu(ep->guard);
1470         a_guard = be16_to_cpu(ap->guard);
1471
1472         DEBUG18(printk(KERN_DEBUG
1473             "%s(): iocb(s) %p Returned STATUS\n", __func__, sts24));
1474
1475         DEBUG18(printk(KERN_ERR "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
1476             " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
1477             " tag=0x%x, act guard=0x%x, exp guard=0x%x\n",
1478             cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
1479             a_app_tag, e_app_tag, a_guard, e_guard));
1480
1481
1482         /* check guard */
1483         if (e_guard != a_guard) {
1484                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1485                     0x10, 0x1);
1486                 set_driver_byte(cmd, DRIVER_SENSE);
1487                 set_host_byte(cmd, DID_ABORT);
1488                 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1489                 return;
1490         }
1491
1492         /* check appl tag */
1493         if (e_app_tag != a_app_tag) {
1494                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1495                     0x10, 0x2);
1496                 set_driver_byte(cmd, DRIVER_SENSE);
1497                 set_host_byte(cmd, DID_ABORT);
1498                 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1499                 return;
1500         }
1501
1502         /* check ref tag */
1503         if (e_ref_tag != a_ref_tag) {
1504                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1505                     0x10, 0x3);
1506                 set_driver_byte(cmd, DRIVER_SENSE);
1507                 set_host_byte(cmd, DID_ABORT);
1508                 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1509                 return;
1510         }
1511 }
1512
1513 /**
1514  * qla2x00_status_entry() - Process a Status IOCB entry.
1515  * @ha: SCSI driver HA context
1516  * @pkt: Entry pointer
1517  */
1518 static void
1519 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1520 {
1521         srb_t           *sp;
1522         fc_port_t       *fcport;
1523         struct scsi_cmnd *cp;
1524         sts_entry_t *sts;
1525         struct sts_entry_24xx *sts24;
1526         uint16_t        comp_status;
1527         uint16_t        scsi_status;
1528         uint16_t        ox_id;
1529         uint8_t         lscsi_status;
1530         int32_t         resid;
1531         uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
1532             fw_resid_len;
1533         uint8_t         *rsp_info, *sense_data;
1534         struct qla_hw_data *ha = vha->hw;
1535         uint32_t handle;
1536         uint16_t que;
1537         struct req_que *req;
1538         int logit = 1;
1539
1540         sts = (sts_entry_t *) pkt;
1541         sts24 = (struct sts_entry_24xx *) pkt;
1542         if (IS_FWI2_CAPABLE(ha)) {
1543                 comp_status = le16_to_cpu(sts24->comp_status);
1544                 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1545         } else {
1546                 comp_status = le16_to_cpu(sts->comp_status);
1547                 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1548         }
1549         handle = (uint32_t) LSW(sts->handle);
1550         que = MSW(sts->handle);
1551         req = ha->req_q_map[que];
1552
1553         /* Fast path completion. */
1554         if (comp_status == CS_COMPLETE && scsi_status == 0) {
1555                 qla2x00_process_completed_request(vha, req, handle);
1556
1557                 return;
1558         }
1559
1560         /* Validate handle. */
1561         if (handle < MAX_OUTSTANDING_COMMANDS) {
1562                 sp = req->outstanding_cmds[handle];
1563                 req->outstanding_cmds[handle] = NULL;
1564         } else
1565                 sp = NULL;
1566
1567         if (sp == NULL) {
1568                 qla_printk(KERN_WARNING, ha,
1569                     "scsi(%ld): Invalid status handle (0x%x).\n", vha->host_no,
1570                     sts->handle);
1571
1572                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1573                 qla2xxx_wake_dpc(vha);
1574                 return;
1575         }
1576         cp = sp->cmd;
1577         if (cp == NULL) {
1578                 qla_printk(KERN_WARNING, ha,
1579                     "scsi(%ld): Command already returned (0x%x/%p).\n",
1580                     vha->host_no, sts->handle, sp);
1581
1582                 return;
1583         }
1584
1585         lscsi_status = scsi_status & STATUS_MASK;
1586
1587         fcport = sp->fcport;
1588
1589         ox_id = 0;
1590         sense_len = par_sense_len = rsp_info_len = resid_len =
1591             fw_resid_len = 0;
1592         if (IS_FWI2_CAPABLE(ha)) {
1593                 if (scsi_status & SS_SENSE_LEN_VALID)
1594                         sense_len = le32_to_cpu(sts24->sense_len);
1595                 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
1596                         rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
1597                 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
1598                         resid_len = le32_to_cpu(sts24->rsp_residual_count);
1599                 if (comp_status == CS_DATA_UNDERRUN)
1600                         fw_resid_len = le32_to_cpu(sts24->residual_len);
1601                 rsp_info = sts24->data;
1602                 sense_data = sts24->data;
1603                 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
1604                 ox_id = le16_to_cpu(sts24->ox_id);
1605                 par_sense_len = sizeof(sts24->data);
1606         } else {
1607                 if (scsi_status & SS_SENSE_LEN_VALID)
1608                         sense_len = le16_to_cpu(sts->req_sense_length);
1609                 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
1610                         rsp_info_len = le16_to_cpu(sts->rsp_info_len);
1611                 resid_len = le32_to_cpu(sts->residual_length);
1612                 rsp_info = sts->rsp_info;
1613                 sense_data = sts->req_sense_data;
1614                 par_sense_len = sizeof(sts->req_sense_data);
1615         }
1616
1617         /* Check for any FCP transport errors. */
1618         if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
1619                 /* Sense data lies beyond any FCP RESPONSE data. */
1620                 if (IS_FWI2_CAPABLE(ha)) {
1621                         sense_data += rsp_info_len;
1622                         par_sense_len -= rsp_info_len;
1623                 }
1624                 if (rsp_info_len > 3 && rsp_info[3]) {
1625                         DEBUG2(qla_printk(KERN_INFO, ha,
1626                             "scsi(%ld:%d:%d): FCP I/O protocol failure "
1627                             "(0x%x/0x%x).\n", vha->host_no, cp->device->id,
1628                             cp->device->lun, rsp_info_len, rsp_info[3]));
1629
1630                         cp->result = DID_BUS_BUSY << 16;
1631                         goto out;
1632                 }
1633         }
1634
1635         /* Check for overrun. */
1636         if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
1637             scsi_status & SS_RESIDUAL_OVER)
1638                 comp_status = CS_DATA_OVERRUN;
1639
1640         /*
1641          * Based on Host and scsi status generate status code for Linux
1642          */
1643         switch (comp_status) {
1644         case CS_COMPLETE:
1645         case CS_QUEUE_FULL:
1646                 if (scsi_status == 0) {
1647                         cp->result = DID_OK << 16;
1648                         break;
1649                 }
1650                 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
1651                         resid = resid_len;
1652                         scsi_set_resid(cp, resid);
1653
1654                         if (!lscsi_status &&
1655                             ((unsigned)(scsi_bufflen(cp) - resid) <
1656                              cp->underflow)) {
1657                                 qla_printk(KERN_INFO, ha,
1658                                     "scsi(%ld:%d:%d): Mid-layer underflow "
1659                                     "detected (0x%x of 0x%x bytes).\n",
1660                                     vha->host_no, cp->device->id,
1661                                     cp->device->lun, resid, scsi_bufflen(cp));
1662
1663                                 cp->result = DID_ERROR << 16;
1664                                 break;
1665                         }
1666                 }
1667                 cp->result = DID_OK << 16 | lscsi_status;
1668
1669                 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1670                         DEBUG2(qla_printk(KERN_INFO, ha,
1671                             "scsi(%ld:%d:%d) QUEUE FULL detected.\n",
1672                             vha->host_no, cp->device->id, cp->device->lun));
1673                         break;
1674                 }
1675                 logit = 0;
1676                 if (lscsi_status != SS_CHECK_CONDITION)
1677                         break;
1678
1679                 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1680                 if (!(scsi_status & SS_SENSE_LEN_VALID))
1681                         break;
1682
1683                 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
1684                     rsp);
1685                 break;
1686
1687         case CS_DATA_UNDERRUN:
1688                 /* Use F/W calculated residual length. */
1689                 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
1690                 scsi_set_resid(cp, resid);
1691                 if (scsi_status & SS_RESIDUAL_UNDER) {
1692                         if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
1693                                 DEBUG2(qla_printk(KERN_INFO, ha,
1694                                     "scsi(%ld:%d:%d) Dropped frame(s) detected "
1695                                     "(0x%x of 0x%x bytes).\n", vha->host_no,
1696                                     cp->device->id, cp->device->lun, resid,
1697                                     scsi_bufflen(cp)));
1698
1699                                 cp->result = DID_ERROR << 16 | lscsi_status;
1700                                 break;
1701                         }
1702
1703                         if (!lscsi_status &&
1704                             ((unsigned)(scsi_bufflen(cp) - resid) <
1705                             cp->underflow)) {
1706                                 qla_printk(KERN_INFO, ha,
1707                                     "scsi(%ld:%d:%d): Mid-layer underflow "
1708                                     "detected (0x%x of 0x%x bytes).\n",
1709                                     vha->host_no, cp->device->id,
1710                                     cp->device->lun, resid, scsi_bufflen(cp));
1711
1712                                 cp->result = DID_ERROR << 16;
1713                                 break;
1714                         }
1715                 } else {
1716                         DEBUG2(qla_printk(KERN_INFO, ha,
1717                             "scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x "
1718                             "of 0x%x bytes).\n", vha->host_no, cp->device->id,
1719                             cp->device->lun, resid, scsi_bufflen(cp)));
1720
1721                         cp->result = DID_ERROR << 16 | lscsi_status;
1722                         goto check_scsi_status;
1723                 }
1724
1725                 cp->result = DID_OK << 16 | lscsi_status;
1726                 logit = 0;
1727
1728 check_scsi_status:
1729                 /*
1730                  * Check to see if SCSI Status is non zero. If so report SCSI
1731                  * Status.
1732                  */
1733                 if (lscsi_status != 0) {
1734                         if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1735                                 DEBUG2(qla_printk(KERN_INFO, ha,
1736                                     "scsi(%ld:%d:%d) QUEUE FULL detected.\n",
1737                                     vha->host_no, cp->device->id,
1738                                     cp->device->lun));
1739                                 logit = 1;
1740                                 break;
1741                         }
1742                         if (lscsi_status != SS_CHECK_CONDITION)
1743                                 break;
1744
1745                         memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1746                         if (!(scsi_status & SS_SENSE_LEN_VALID))
1747                                 break;
1748
1749                         qla2x00_handle_sense(sp, sense_data, par_sense_len,
1750                             sense_len, rsp);
1751                 }
1752                 break;
1753
1754         case CS_PORT_LOGGED_OUT:
1755         case CS_PORT_CONFIG_CHG:
1756         case CS_PORT_BUSY:
1757         case CS_INCOMPLETE:
1758         case CS_PORT_UNAVAILABLE:
1759         case CS_TIMEOUT:
1760                 /*
1761                  * We are going to have the fc class block the rport
1762                  * while we try to recover so instruct the mid layer
1763                  * to requeue until the class decides how to handle this.
1764                  */
1765                 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1766
1767                 if (comp_status == CS_TIMEOUT) {
1768                         if (IS_FWI2_CAPABLE(ha))
1769                                 break;
1770                         else if ((le16_to_cpu(sts->status_flags) &
1771                             SF_LOGOUT_SENT) == 0)
1772                                 break;
1773                 }
1774
1775                 DEBUG2(qla_printk(KERN_INFO, ha,
1776                         "scsi(%ld:%d:%d) Port down status: port-state=0x%x\n",
1777                         vha->host_no, cp->device->id, cp->device->lun,
1778                         atomic_read(&fcport->state)));
1779
1780                 if (atomic_read(&fcport->state) == FCS_ONLINE)
1781                         qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1782                 break;
1783
1784         case CS_RESET:
1785                 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1786                 break;
1787
1788         case CS_ABORTED:
1789                 cp->result = DID_RESET << 16;
1790                 break;
1791
1792         case CS_DIF_ERROR:
1793                 qla2x00_handle_dif_error(sp, sts24);
1794                 break;
1795         default:
1796                 cp->result = DID_ERROR << 16;
1797                 break;
1798         }
1799
1800 out:
1801         if (logit)
1802                 DEBUG2(qla_printk(KERN_INFO, ha,
1803                     "scsi(%ld:%d:%d) FCP command status: 0x%x-0x%x (0x%x) "
1804                     "oxid=0x%x ser=0x%lx cdb=%02x%02x%02x len=0x%x "
1805                     "rsp_info=0x%x resid=0x%x fw_resid=0x%x\n", vha->host_no,
1806                     cp->device->id, cp->device->lun, comp_status, scsi_status,
1807                     cp->result, ox_id, cp->serial_number, cp->cmnd[0],
1808                     cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len,
1809                     resid_len, fw_resid_len));
1810
1811         if (rsp->status_srb == NULL)
1812                 qla2x00_sp_compl(ha, sp);
1813 }
1814
1815 /**
1816  * qla2x00_status_cont_entry() - Process a Status Continuations entry.
1817  * @ha: SCSI driver HA context
1818  * @pkt: Entry pointer
1819  *
1820  * Extended sense data.
1821  */
1822 static void
1823 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
1824 {
1825         uint8_t         sense_sz = 0;
1826         struct qla_hw_data *ha = rsp->hw;
1827         srb_t           *sp = rsp->status_srb;
1828         struct scsi_cmnd *cp;
1829
1830         if (sp != NULL && sp->request_sense_length != 0) {
1831                 cp = sp->cmd;
1832                 if (cp == NULL) {
1833                         DEBUG2(printk("%s(): Cmd already returned back to OS "
1834                             "sp=%p.\n", __func__, sp));
1835                         qla_printk(KERN_INFO, ha,
1836                             "cmd is NULL: already returned to OS (sp=%p)\n",
1837                             sp);
1838
1839                         rsp->status_srb = NULL;
1840                         return;
1841                 }
1842
1843                 if (sp->request_sense_length > sizeof(pkt->data)) {
1844                         sense_sz = sizeof(pkt->data);
1845                 } else {
1846                         sense_sz = sp->request_sense_length;
1847                 }
1848
1849                 /* Move sense data. */
1850                 if (IS_FWI2_CAPABLE(ha))
1851                         host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1852                 memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
1853                 DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz));
1854
1855                 sp->request_sense_ptr += sense_sz;
1856                 sp->request_sense_length -= sense_sz;
1857
1858                 /* Place command on done queue. */
1859                 if (sp->request_sense_length == 0) {
1860                         rsp->status_srb = NULL;
1861                         qla2x00_sp_compl(ha, sp);
1862                 }
1863         }
1864 }
1865
1866 /**
1867  * qla2x00_error_entry() - Process an error entry.
1868  * @ha: SCSI driver HA context
1869  * @pkt: Entry pointer
1870  */
1871 static void
1872 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1873 {
1874         srb_t *sp;
1875         struct qla_hw_data *ha = vha->hw;
1876         uint32_t handle = LSW(pkt->handle);
1877         uint16_t que = MSW(pkt->handle);
1878         struct req_que *req = ha->req_q_map[que];
1879 #if defined(QL_DEBUG_LEVEL_2)
1880         if (pkt->entry_status & RF_INV_E_ORDER)
1881                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
1882         else if (pkt->entry_status & RF_INV_E_COUNT)
1883                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__);
1884         else if (pkt->entry_status & RF_INV_E_PARAM)
1885                 qla_printk(KERN_ERR, ha,
1886                     "%s: Invalid Entry Parameter\n", __func__);
1887         else if (pkt->entry_status & RF_INV_E_TYPE)
1888                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__);
1889         else if (pkt->entry_status & RF_BUSY)
1890                 qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__);
1891         else
1892                 qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__);
1893 #endif
1894
1895         /* Validate handle. */
1896         if (handle < MAX_OUTSTANDING_COMMANDS)
1897                 sp = req->outstanding_cmds[handle];
1898         else
1899                 sp = NULL;
1900
1901         if (sp) {
1902                 /* Free outstanding command slot. */
1903                 req->outstanding_cmds[handle] = NULL;
1904
1905                 /* Bad payload or header */
1906                 if (pkt->entry_status &
1907                     (RF_INV_E_ORDER | RF_INV_E_COUNT |
1908                      RF_INV_E_PARAM | RF_INV_E_TYPE)) {
1909                         sp->cmd->result = DID_ERROR << 16;
1910                 } else if (pkt->entry_status & RF_BUSY) {
1911                         sp->cmd->result = DID_BUS_BUSY << 16;
1912                 } else {
1913                         sp->cmd->result = DID_ERROR << 16;
1914                 }
1915                 qla2x00_sp_compl(ha, sp);
1916
1917         } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1918             COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) {
1919                 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
1920                     vha->host_no));
1921                 qla_printk(KERN_WARNING, ha,
1922                     "Error entry - invalid handle\n");
1923
1924                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1925                 qla2xxx_wake_dpc(vha);
1926         }
1927 }
1928
1929 /**
1930  * qla24xx_mbx_completion() - Process mailbox command completions.
1931  * @ha: SCSI driver HA context
1932  * @mb0: Mailbox0 register
1933  */
1934 static void
1935 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1936 {
1937         uint16_t        cnt;
1938         uint16_t __iomem *wptr;
1939         struct qla_hw_data *ha = vha->hw;
1940         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1941
1942         /* Load return mailbox registers. */
1943         ha->flags.mbox_int = 1;
1944         ha->mailbox_out[0] = mb0;
1945         wptr = (uint16_t __iomem *)&reg->mailbox1;
1946
1947         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
1948                 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
1949                 wptr++;
1950         }
1951
1952         if (ha->mcp) {
1953                 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
1954                     __func__, vha->host_no, ha->mcp->mb[0]));
1955         } else {
1956                 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
1957                     __func__, vha->host_no));
1958         }
1959 }
1960
1961 /**
1962  * qla24xx_process_response_queue() - Process response queue entries.
1963  * @ha: SCSI driver HA context
1964  */
1965 void qla24xx_process_response_queue(struct scsi_qla_host *vha,
1966         struct rsp_que *rsp)
1967 {
1968         struct sts_entry_24xx *pkt;
1969         struct qla_hw_data *ha = vha->hw;
1970
1971         if (!vha->flags.online)
1972                 return;
1973
1974         while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1975                 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
1976
1977                 rsp->ring_index++;
1978                 if (rsp->ring_index == rsp->length) {
1979                         rsp->ring_index = 0;
1980                         rsp->ring_ptr = rsp->ring;
1981                 } else {
1982                         rsp->ring_ptr++;
1983                 }
1984
1985                 if (pkt->entry_status != 0) {
1986                         DEBUG3(printk(KERN_INFO
1987                             "scsi(%ld): Process error entry.\n", vha->host_no));
1988
1989                         qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
1990                         ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1991                         wmb();
1992                         continue;
1993                 }
1994
1995                 switch (pkt->entry_type) {
1996                 case STATUS_TYPE:
1997                         qla2x00_status_entry(vha, rsp, pkt);
1998                         break;
1999                 case STATUS_CONT_TYPE:
2000                         qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2001                         break;
2002                 case VP_RPT_ID_IOCB_TYPE:
2003                         qla24xx_report_id_acquisition(vha,
2004                             (struct vp_rpt_id_entry_24xx *)pkt);
2005                         break;
2006                 case LOGINOUT_PORT_IOCB_TYPE:
2007                         qla24xx_logio_entry(vha, rsp->req,
2008                             (struct logio_entry_24xx *)pkt);
2009                         break;
2010                 case TSK_MGMT_IOCB_TYPE:
2011                         qla24xx_tm_iocb_entry(vha, rsp->req,
2012                             (struct tsk_mgmt_entry *)pkt);
2013                         break;
2014                 case CT_IOCB_TYPE:
2015                         qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2016                         clear_bit(MBX_INTERRUPT, &vha->hw->mbx_cmd_flags);
2017                         break;
2018                 case ELS_IOCB_TYPE:
2019                         qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2020                         break;
2021                 default:
2022                         /* Type Not Supported. */
2023                         DEBUG4(printk(KERN_WARNING
2024                             "scsi(%ld): Received unknown response pkt type %x "
2025                             "entry status=%x.\n",
2026                             vha->host_no, pkt->entry_type, pkt->entry_status));
2027                         break;
2028                 }
2029                 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2030                 wmb();
2031         }
2032
2033         /* Adjust ring index */
2034         if (IS_QLA82XX(ha)) {
2035                 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
2036                 WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
2037         } else
2038                 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
2039 }
2040
2041 static void
2042 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
2043 {
2044         int rval;
2045         uint32_t cnt;
2046         struct qla_hw_data *ha = vha->hw;
2047         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2048
2049         if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
2050                 return;
2051
2052         rval = QLA_SUCCESS;
2053         WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
2054         RD_REG_DWORD(&reg->iobase_addr);
2055         WRT_REG_DWORD(&reg->iobase_window, 0x0001);
2056         for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
2057             rval == QLA_SUCCESS; cnt--) {
2058                 if (cnt) {
2059                         WRT_REG_DWORD(&reg->iobase_window, 0x0001);
2060                         udelay(10);
2061                 } else
2062                         rval = QLA_FUNCTION_TIMEOUT;
2063         }
2064         if (rval == QLA_SUCCESS)
2065                 goto next_test;
2066
2067         WRT_REG_DWORD(&reg->iobase_window, 0x0003);
2068         for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
2069             rval == QLA_SUCCESS; cnt--) {
2070                 if (cnt) {
2071                         WRT_REG_DWORD(&reg->iobase_window, 0x0003);
2072                         udelay(10);
2073                 } else
2074                         rval = QLA_FUNCTION_TIMEOUT;
2075         }
2076         if (rval != QLA_SUCCESS)
2077                 goto done;
2078
2079 next_test:
2080         if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
2081                 qla_printk(KERN_INFO, ha, "Additional code -- 0x55AA.\n");
2082
2083 done:
2084         WRT_REG_DWORD(&reg->iobase_window, 0x0000);
2085         RD_REG_DWORD(&reg->iobase_window);
2086 }
2087
2088 /**
2089  * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
2090  * @irq:
2091  * @dev_id: SCSI driver HA context
2092  *
2093  * Called by system whenever the host adapter generates an interrupt.
2094  *
2095  * Returns handled flag.
2096  */
2097 irqreturn_t
2098 qla24xx_intr_handler(int irq, void *dev_id)
2099 {
2100         scsi_qla_host_t *vha;
2101         struct qla_hw_data *ha;
2102         struct device_reg_24xx __iomem *reg;
2103         int             status;
2104         unsigned long   iter;
2105         uint32_t        stat;
2106         uint32_t        hccr;
2107         uint16_t        mb[4];
2108         struct rsp_que *rsp;
2109         unsigned long   flags;
2110
2111         rsp = (struct rsp_que *) dev_id;
2112         if (!rsp) {
2113                 printk(KERN_INFO
2114                     "%s(): NULL response queue pointer\n", __func__);
2115                 return IRQ_NONE;
2116         }
2117
2118         ha = rsp->hw;
2119         reg = &ha->iobase->isp24;
2120         status = 0;
2121
2122         if (unlikely(pci_channel_offline(ha->pdev)))
2123                 return IRQ_HANDLED;
2124
2125         spin_lock_irqsave(&ha->hardware_lock, flags);
2126         vha = pci_get_drvdata(ha->pdev);
2127         for (iter = 50; iter--; ) {
2128                 stat = RD_REG_DWORD(&reg->host_status);
2129                 if (stat & HSRX_RISC_PAUSED) {
2130                         if (unlikely(pci_channel_offline(ha->pdev)))
2131                                 break;
2132
2133                         hccr = RD_REG_DWORD(&reg->hccr);
2134
2135                         qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
2136                             "Dumping firmware!\n", hccr);
2137
2138                         qla2xxx_check_risc_status(vha);
2139
2140                         ha->isp_ops->fw_dump(vha, 1);
2141                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2142                         break;
2143                 } else if ((stat & HSRX_RISC_INT) == 0)
2144                         break;
2145
2146                 switch (stat & 0xff) {
2147                 case 0x1:
2148                 case 0x2:
2149                 case 0x10:
2150                 case 0x11:
2151                         qla24xx_mbx_completion(vha, MSW(stat));
2152                         status |= MBX_INTERRUPT;
2153
2154                         break;
2155                 case 0x12:
2156                         mb[0] = MSW(stat);
2157                         mb[1] = RD_REG_WORD(&reg->mailbox1);
2158                         mb[2] = RD_REG_WORD(&reg->mailbox2);
2159                         mb[3] = RD_REG_WORD(&reg->mailbox3);
2160                         qla2x00_async_event(vha, rsp, mb);
2161                         break;
2162                 case 0x13:
2163                 case 0x14:
2164                         qla24xx_process_response_queue(vha, rsp);
2165                         break;
2166                 default:
2167                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
2168                             "(%d).\n",
2169                             vha->host_no, stat & 0xff));
2170                         break;
2171                 }
2172                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2173                 RD_REG_DWORD_RELAXED(&reg->hccr);
2174         }
2175         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2176
2177         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2178             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2179                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2180                 complete(&ha->mbx_intr_comp);
2181         }
2182
2183         return IRQ_HANDLED;
2184 }
2185
2186 static irqreturn_t
2187 qla24xx_msix_rsp_q(int irq, void *dev_id)
2188 {
2189         struct qla_hw_data *ha;
2190         struct rsp_que *rsp;
2191         struct device_reg_24xx __iomem *reg;
2192         struct scsi_qla_host *vha;
2193         unsigned long flags;
2194
2195         rsp = (struct rsp_que *) dev_id;
2196         if (!rsp) {
2197                 printk(KERN_INFO
2198                 "%s(): NULL response queue pointer\n", __func__);
2199                 return IRQ_NONE;
2200         }
2201         ha = rsp->hw;
2202         reg = &ha->iobase->isp24;
2203
2204         spin_lock_irqsave(&ha->hardware_lock, flags);
2205
2206         vha = pci_get_drvdata(ha->pdev);
2207         qla24xx_process_response_queue(vha, rsp);
2208         if (!ha->flags.disable_msix_handshake) {
2209                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2210                 RD_REG_DWORD_RELAXED(&reg->hccr);
2211         }
2212         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2213
2214         return IRQ_HANDLED;
2215 }
2216
2217 static irqreturn_t
2218 qla25xx_msix_rsp_q(int irq, void *dev_id)
2219 {
2220         struct qla_hw_data *ha;
2221         struct rsp_que *rsp;
2222         struct device_reg_24xx __iomem *reg;
2223         unsigned long flags;
2224
2225         rsp = (struct rsp_que *) dev_id;
2226         if (!rsp) {
2227                 printk(KERN_INFO
2228                         "%s(): NULL response queue pointer\n", __func__);
2229                 return IRQ_NONE;
2230         }
2231         ha = rsp->hw;
2232
2233         /* Clear the interrupt, if enabled, for this response queue */
2234         if (rsp->options & ~BIT_6) {
2235                 reg = &ha->iobase->isp24;
2236                 spin_lock_irqsave(&ha->hardware_lock, flags);
2237                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2238                 RD_REG_DWORD_RELAXED(&reg->hccr);
2239                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2240         }
2241         queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
2242
2243         return IRQ_HANDLED;
2244 }
2245
2246 static irqreturn_t
2247 qla24xx_msix_default(int irq, void *dev_id)
2248 {
2249         scsi_qla_host_t *vha;
2250         struct qla_hw_data *ha;
2251         struct rsp_que *rsp;
2252         struct device_reg_24xx __iomem *reg;
2253         int             status;
2254         uint32_t        stat;
2255         uint32_t        hccr;
2256         uint16_t        mb[4];
2257         unsigned long flags;
2258
2259         rsp = (struct rsp_que *) dev_id;
2260         if (!rsp) {
2261                 DEBUG(printk(
2262                 "%s(): NULL response queue pointer\n", __func__));
2263                 return IRQ_NONE;
2264         }
2265         ha = rsp->hw;
2266         reg = &ha->iobase->isp24;
2267         status = 0;
2268
2269         spin_lock_irqsave(&ha->hardware_lock, flags);
2270         vha = pci_get_drvdata(ha->pdev);
2271         do {
2272                 stat = RD_REG_DWORD(&reg->host_status);
2273                 if (stat & HSRX_RISC_PAUSED) {
2274                         if (unlikely(pci_channel_offline(ha->pdev)))
2275                                 break;
2276
2277                         hccr = RD_REG_DWORD(&reg->hccr);
2278
2279                         qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
2280                             "Dumping firmware!\n", hccr);
2281
2282                         qla2xxx_check_risc_status(vha);
2283
2284                         ha->isp_ops->fw_dump(vha, 1);
2285                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2286                         break;
2287                 } else if ((stat & HSRX_RISC_INT) == 0)
2288                         break;
2289
2290                 switch (stat & 0xff) {
2291                 case 0x1:
2292                 case 0x2:
2293                 case 0x10:
2294                 case 0x11:
2295                         qla24xx_mbx_completion(vha, MSW(stat));
2296                         status |= MBX_INTERRUPT;
2297
2298                         break;
2299                 case 0x12:
2300                         mb[0] = MSW(stat);
2301                         mb[1] = RD_REG_WORD(&reg->mailbox1);
2302                         mb[2] = RD_REG_WORD(&reg->mailbox2);
2303                         mb[3] = RD_REG_WORD(&reg->mailbox3);
2304                         qla2x00_async_event(vha, rsp, mb);
2305                         break;
2306                 case 0x13:
2307                 case 0x14:
2308                         qla24xx_process_response_queue(vha, rsp);
2309                         break;
2310                 default:
2311                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
2312                             "(%d).\n",
2313                             vha->host_no, stat & 0xff));
2314                         break;
2315                 }
2316                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2317         } while (0);
2318         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2319
2320         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2321             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2322                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2323                 complete(&ha->mbx_intr_comp);
2324         }
2325         return IRQ_HANDLED;
2326 }
2327
2328 /* Interrupt handling helpers. */
2329
2330 struct qla_init_msix_entry {
2331         const char *name;
2332         irq_handler_t handler;
2333 };
2334
2335 static struct qla_init_msix_entry msix_entries[3] = {
2336         { "qla2xxx (default)", qla24xx_msix_default },
2337         { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
2338         { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
2339 };
2340
2341 static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
2342         { "qla2xxx (default)", qla82xx_msix_default },
2343         { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
2344 };
2345
2346 static void
2347 qla24xx_disable_msix(struct qla_hw_data *ha)
2348 {
2349         int i;
2350         struct qla_msix_entry *qentry;
2351
2352         for (i = 0; i < ha->msix_count; i++) {
2353                 qentry = &ha->msix_entries[i];
2354                 if (qentry->have_irq)
2355                         free_irq(qentry->vector, qentry->rsp);
2356         }
2357         pci_disable_msix(ha->pdev);
2358         kfree(ha->msix_entries);
2359         ha->msix_entries = NULL;
2360         ha->flags.msix_enabled = 0;
2361 }
2362
2363 static int
2364 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
2365 {
2366 #define MIN_MSIX_COUNT  2
2367         int i, ret;
2368         struct msix_entry *entries;
2369         struct qla_msix_entry *qentry;
2370
2371         entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
2372                         GFP_KERNEL);
2373         if (!entries)
2374                 return -ENOMEM;
2375
2376         for (i = 0; i < ha->msix_count; i++)
2377                 entries[i].entry = i;
2378
2379         ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2380         if (ret) {
2381                 if (ret < MIN_MSIX_COUNT)
2382                         goto msix_failed;
2383
2384                 qla_printk(KERN_WARNING, ha,
2385                         "MSI-X: Failed to enable support -- %d/%d\n"
2386                         " Retry with %d vectors\n", ha->msix_count, ret, ret);
2387                 ha->msix_count = ret;
2388                 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2389                 if (ret) {
2390 msix_failed:
2391                         qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable"
2392                                 " support, giving up -- %d/%d\n",
2393                                 ha->msix_count, ret);
2394                         goto msix_out;
2395                 }
2396                 ha->max_rsp_queues = ha->msix_count - 1;
2397         }
2398         ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
2399                                 ha->msix_count, GFP_KERNEL);
2400         if (!ha->msix_entries) {
2401                 ret = -ENOMEM;
2402                 goto msix_out;
2403         }
2404         ha->flags.msix_enabled = 1;
2405
2406         for (i = 0; i < ha->msix_count; i++) {
2407                 qentry = &ha->msix_entries[i];
2408                 qentry->vector = entries[i].vector;
2409                 qentry->entry = entries[i].entry;
2410                 qentry->have_irq = 0;
2411                 qentry->rsp = NULL;
2412         }
2413
2414         /* Enable MSI-X vectors for the base queue */
2415         for (i = 0; i < 2; i++) {
2416                 qentry = &ha->msix_entries[i];
2417                 if (IS_QLA82XX(ha)) {
2418                         ret = request_irq(qentry->vector,
2419                                 qla82xx_msix_entries[i].handler,
2420                                 0, qla82xx_msix_entries[i].name, rsp);
2421                 } else {
2422                         ret = request_irq(qentry->vector,
2423                                 msix_entries[i].handler,
2424                                 0, msix_entries[i].name, rsp);
2425                 }
2426                 if (ret) {
2427                         qla_printk(KERN_WARNING, ha,
2428                         "MSI-X: Unable to register handler -- %x/%d.\n",
2429                         qentry->vector, ret);
2430                         qla24xx_disable_msix(ha);
2431                         ha->mqenable = 0;
2432                         goto msix_out;
2433                 }
2434                 qentry->have_irq = 1;
2435                 qentry->rsp = rsp;
2436                 rsp->msix = qentry;
2437         }
2438
2439         /* Enable MSI-X vector for response queue update for queue 0 */
2440         if (ha->mqiobase &&  (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2441                 ha->mqenable = 1;
2442
2443 msix_out:
2444         kfree(entries);
2445         return ret;
2446 }
2447
2448 int
2449 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2450 {
2451         int ret;
2452         device_reg_t __iomem *reg = ha->iobase;
2453
2454         /* If possible, enable MSI-X. */
2455         if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
2456                 !IS_QLA8432(ha) && !IS_QLA8XXX_TYPE(ha))
2457                 goto skip_msi;
2458
2459         if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
2460                 (ha->pdev->subsystem_device == 0x7040 ||
2461                 ha->pdev->subsystem_device == 0x7041 ||
2462                 ha->pdev->subsystem_device == 0x1705)) {
2463                 DEBUG2(qla_printk(KERN_WARNING, ha,
2464                         "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X,0x%X).\n",
2465                         ha->pdev->subsystem_vendor,
2466                         ha->pdev->subsystem_device));
2467                 goto skip_msi;
2468         }
2469
2470         if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
2471                 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
2472                 DEBUG2(qla_printk(KERN_WARNING, ha,
2473                 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
2474                         ha->pdev->revision, ha->fw_attributes));
2475                 goto skip_msix;
2476         }
2477
2478         ret = qla24xx_enable_msix(ha, rsp);
2479         if (!ret) {
2480                 DEBUG2(qla_printk(KERN_INFO, ha,
2481                     "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
2482                     ha->fw_attributes));
2483                 goto clear_risc_ints;
2484         }
2485         qla_printk(KERN_WARNING, ha,
2486             "MSI-X: Falling back-to MSI mode -- %d.\n", ret);
2487 skip_msix:
2488
2489         if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2490             !IS_QLA8001(ha))
2491                 goto skip_msi;
2492
2493         ret = pci_enable_msi(ha->pdev);
2494         if (!ret) {
2495                 DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
2496                 ha->flags.msi_enabled = 1;
2497         } else
2498                 qla_printk(KERN_WARNING, ha,
2499                     "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
2500 skip_msi:
2501
2502         ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
2503             IRQF_SHARED, QLA2XXX_DRIVER_NAME, rsp);
2504         if (ret) {
2505                 qla_printk(KERN_WARNING, ha,
2506                     "Failed to reserve interrupt %d already in use.\n",
2507                     ha->pdev->irq);
2508                 goto fail;
2509         }
2510         ha->flags.inta_enabled = 1;
2511 clear_risc_ints:
2512
2513         /*
2514          * FIXME: Noted that 8014s were being dropped during NK testing.
2515          * Timing deltas during MSI-X/INTa transitions?
2516          */
2517         if (IS_QLA81XX(ha) || IS_QLA82XX(ha))
2518                 goto fail;
2519         spin_lock_irq(&ha->hardware_lock);
2520         if (IS_FWI2_CAPABLE(ha)) {
2521                 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
2522                 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
2523         } else {
2524                 WRT_REG_WORD(&reg->isp.semaphore, 0);
2525                 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
2526                 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
2527         }
2528         spin_unlock_irq(&ha->hardware_lock);
2529
2530 fail:
2531         return ret;
2532 }
2533
2534 void
2535 qla2x00_free_irqs(scsi_qla_host_t *vha)
2536 {
2537         struct qla_hw_data *ha = vha->hw;
2538         struct rsp_que *rsp = ha->rsp_q_map[0];
2539
2540         if (ha->flags.msix_enabled)
2541                 qla24xx_disable_msix(ha);
2542         else if (ha->flags.msi_enabled) {
2543                 free_irq(ha->pdev->irq, rsp);
2544                 pci_disable_msi(ha->pdev);
2545         } else
2546                 free_irq(ha->pdev->irq, rsp);
2547 }
2548
2549
2550 int qla25xx_request_irq(struct rsp_que *rsp)
2551 {
2552         struct qla_hw_data *ha = rsp->hw;
2553         struct qla_init_msix_entry *intr = &msix_entries[2];
2554         struct qla_msix_entry *msix = rsp->msix;
2555         int ret;
2556
2557         ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
2558         if (ret) {
2559                 qla_printk(KERN_WARNING, ha,
2560                         "MSI-X: Unable to register handler -- %x/%d.\n",
2561                         msix->vector, ret);
2562                 return ret;
2563         }
2564         msix->have_irq = 1;
2565         msix->rsp = rsp;
2566         return ret;
2567 }