2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
14 #include "qla_devtbl.h"
21 * QLogic ISP2x00 Hardware Support Function Prototypes.
23 static int qla2x00_isp_firmware(scsi_qla_host_t *);
24 static int qla2x00_setup_chip(scsi_qla_host_t *);
25 static int qla2x00_init_rings(scsi_qla_host_t *);
26 static int qla2x00_fw_ready(scsi_qla_host_t *);
27 static int qla2x00_configure_hba(scsi_qla_host_t *);
28 static int qla2x00_configure_loop(scsi_qla_host_t *);
29 static int qla2x00_configure_local_loop(scsi_qla_host_t *);
30 static int qla2x00_configure_fabric(scsi_qla_host_t *);
31 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *);
32 static int qla2x00_device_resync(scsi_qla_host_t *);
33 static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *,
36 static int qla2x00_restart_isp(scsi_qla_host_t *);
38 static int qla2x00_find_new_loop_id(scsi_qla_host_t *, fc_port_t *);
40 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
41 static int qla84xx_init_chip(scsi_qla_host_t *);
42 static int qla25xx_init_queues(struct qla_hw_data *);
44 /* SRB Extensions ---------------------------------------------------------- */
47 qla2x00_ctx_sp_timeout(unsigned long __data)
49 srb_t *sp = (srb_t *)__data;
51 struct srb_iocb *iocb;
52 fc_port_t *fcport = sp->fcport;
53 struct qla_hw_data *ha = fcport->vha->hw;
57 spin_lock_irqsave(&ha->hardware_lock, flags);
58 req = ha->req_q_map[0];
59 req->outstanding_cmds[sp->handle] = NULL;
61 iocb = ctx->u.iocb_cmd;
64 spin_unlock_irqrestore(&ha->hardware_lock, flags);
68 qla2x00_ctx_sp_free(srb_t *sp)
70 struct srb_ctx *ctx = sp->ctx;
71 struct srb_iocb *iocb = ctx->u.iocb_cmd;
73 del_timer_sync(&iocb->timer);
76 mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
80 qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
84 struct qla_hw_data *ha = vha->hw;
86 struct srb_iocb *iocb;
88 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
91 ctx = kzalloc(size, GFP_KERNEL);
93 mempool_free(sp, ha->srb_mempool);
97 iocb = kzalloc(sizeof(struct srb_iocb), GFP_KERNEL);
99 mempool_free(sp, ha->srb_mempool);
105 memset(sp, 0, sizeof(*sp));
108 ctx->u.iocb_cmd = iocb;
109 iocb->free = qla2x00_ctx_sp_free;
111 init_timer(&iocb->timer);
114 iocb->timer.expires = jiffies + tmo * HZ;
115 iocb->timer.data = (unsigned long)sp;
116 iocb->timer.function = qla2x00_ctx_sp_timeout;
117 add_timer(&iocb->timer);
122 /* Asynchronous Login/Logout Routines -------------------------------------- */
124 static inline unsigned long
125 qla2x00_get_async_timeout(struct scsi_qla_host *vha)
128 struct qla_hw_data *ha = vha->hw;
130 /* Firmware should use switch negotiated r_a_tov for timeout. */
131 tmo = ha->r_a_tov / 10 * 2;
132 if (!IS_FWI2_CAPABLE(ha)) {
134 * Except for earlier ISPs where the timeout is seeded from the
135 * initialization control block.
137 tmo = ha->login_timeout;
143 qla2x00_async_iocb_timeout(srb_t *sp)
145 fc_port_t *fcport = sp->fcport;
146 struct srb_ctx *ctx = sp->ctx;
148 DEBUG2(printk(KERN_WARNING
149 "scsi(%ld:%x): Async-%s timeout - portid=%02x%02x%02x.\n",
150 fcport->vha->host_no, sp->handle,
151 ctx->name, fcport->d_id.b.domain,
152 fcport->d_id.b.area, fcport->d_id.b.al_pa));
154 fcport->flags &= ~FCF_ASYNC_SENT;
155 if (ctx->type == SRB_LOGIN_CMD) {
156 struct srb_iocb *lio = ctx->u.iocb_cmd;
157 qla2x00_post_async_logout_work(fcport->vha, fcport, NULL);
158 /* Retry as needed. */
159 lio->u.logio.data[0] = MBS_COMMAND_ERROR;
160 lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
161 QLA_LOGIO_LOGIN_RETRIED : 0;
162 qla2x00_post_async_login_done_work(fcport->vha, fcport,
168 qla2x00_async_login_ctx_done(srb_t *sp)
170 struct srb_ctx *ctx = sp->ctx;
171 struct srb_iocb *lio = ctx->u.iocb_cmd;
173 qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport,
179 qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
184 struct srb_iocb *lio;
187 rval = QLA_FUNCTION_FAILED;
188 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
189 qla2x00_get_async_timeout(vha) + 2);
194 ctx->type = SRB_LOGIN_CMD;
196 lio = ctx->u.iocb_cmd;
197 lio->timeout = qla2x00_async_iocb_timeout;
198 lio->done = qla2x00_async_login_ctx_done;
199 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
200 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
201 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
202 rval = qla2x00_start_sp(sp);
203 if (rval != QLA_SUCCESS)
206 DEBUG2(printk(KERN_DEBUG
207 "scsi(%ld:%x): Async-login - loop-id=%x portid=%02x%02x%02x "
208 "retries=%d.\n", fcport->vha->host_no, sp->handle, fcport->loop_id,
209 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
210 fcport->login_retry));
220 qla2x00_async_logout_ctx_done(srb_t *sp)
222 struct srb_ctx *ctx = sp->ctx;
223 struct srb_iocb *lio = ctx->u.iocb_cmd;
225 qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport,
231 qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
235 struct srb_iocb *lio;
238 rval = QLA_FUNCTION_FAILED;
239 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
240 qla2x00_get_async_timeout(vha) + 2);
245 ctx->type = SRB_LOGOUT_CMD;
246 ctx->name = "logout";
247 lio = ctx->u.iocb_cmd;
248 lio->timeout = qla2x00_async_iocb_timeout;
249 lio->done = qla2x00_async_logout_ctx_done;
250 rval = qla2x00_start_sp(sp);
251 if (rval != QLA_SUCCESS)
254 DEBUG2(printk(KERN_DEBUG
255 "scsi(%ld:%x): Async-logout - loop-id=%x portid=%02x%02x%02x.\n",
256 fcport->vha->host_no, sp->handle, fcport->loop_id,
257 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa));
267 qla2x00_async_adisc_ctx_done(srb_t *sp)
269 struct srb_ctx *ctx = sp->ctx;
270 struct srb_iocb *lio = ctx->u.iocb_cmd;
272 qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport,
278 qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
283 struct srb_iocb *lio;
286 rval = QLA_FUNCTION_FAILED;
287 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
288 qla2x00_get_async_timeout(vha) + 2);
293 ctx->type = SRB_ADISC_CMD;
295 lio = ctx->u.iocb_cmd;
296 lio->timeout = qla2x00_async_iocb_timeout;
297 lio->done = qla2x00_async_adisc_ctx_done;
298 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
299 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
300 rval = qla2x00_start_sp(sp);
301 if (rval != QLA_SUCCESS)
304 DEBUG2(printk(KERN_DEBUG
305 "scsi(%ld:%x): Async-adisc - loop-id=%x portid=%02x%02x%02x.\n",
306 fcport->vha->host_no, sp->handle, fcport->loop_id,
307 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa));
318 qla2x00_async_tm_cmd_ctx_done(srb_t *sp)
320 struct srb_ctx *ctx = sp->ctx;
321 struct srb_iocb *iocb = (struct srb_iocb *)ctx->u.iocb_cmd;
323 qla2x00_async_tm_cmd_done(sp->fcport->vha, sp->fcport, iocb);
328 qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
331 struct scsi_qla_host *vha = fcport->vha;
334 struct srb_iocb *tcf;
337 rval = QLA_FUNCTION_FAILED;
338 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
339 qla2x00_get_async_timeout(vha) + 2);
344 ctx->type = SRB_TM_CMD;
346 tcf = ctx->u.iocb_cmd;
347 tcf->u.tmf.flags = flags;
348 tcf->u.tmf.lun = lun;
349 tcf->u.tmf.data = tag;
350 tcf->timeout = qla2x00_async_iocb_timeout;
351 tcf->done = qla2x00_async_tm_cmd_ctx_done;
353 rval = qla2x00_start_sp(sp);
354 if (rval != QLA_SUCCESS)
357 DEBUG2(printk(KERN_DEBUG
358 "scsi(%ld:%x): Async-tmf - loop-id=%x portid=%02x%02x%02x.\n",
359 fcport->vha->host_no, sp->handle, fcport->loop_id,
360 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa));
371 qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
377 case MBS_COMMAND_COMPLETE:
378 if (fcport->flags & FCF_FCP2_DEVICE) {
379 fcport->flags |= FCF_ASYNC_SENT;
380 qla2x00_post_async_adisc_work(vha, fcport, data);
383 qla2x00_update_fcport(vha, fcport);
385 case MBS_COMMAND_ERROR:
386 fcport->flags &= ~FCF_ASYNC_SENT;
387 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
388 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
390 qla2x00_mark_device_lost(vha, fcport, 1, 1);
392 case MBS_PORT_ID_USED:
393 fcport->loop_id = data[1];
394 qla2x00_post_async_logout_work(vha, fcport, NULL);
395 qla2x00_post_async_login_work(vha, fcport, NULL);
397 case MBS_LOOP_ID_USED:
399 rval = qla2x00_find_new_loop_id(vha, fcport);
400 if (rval != QLA_SUCCESS) {
401 fcport->flags &= ~FCF_ASYNC_SENT;
402 qla2x00_mark_device_lost(vha, fcport, 1, 1);
405 qla2x00_post_async_login_work(vha, fcport, NULL);
412 qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
415 qla2x00_mark_device_lost(vha, fcport, 1, 0);
420 qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
423 if (data[0] == MBS_COMMAND_COMPLETE) {
424 qla2x00_update_fcport(vha, fcport);
430 fcport->flags &= ~FCF_ASYNC_SENT;
431 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
432 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
434 qla2x00_mark_device_lost(vha, fcport, 1, 1);
440 qla2x00_async_tm_cmd_done(struct scsi_qla_host *vha, fc_port_t *fcport,
441 struct srb_iocb *iocb)
447 flags = iocb->u.tmf.flags;
448 lun = (uint16_t)iocb->u.tmf.lun;
450 /* Issue Marker IOCB */
451 rval = qla2x00_marker(vha, vha->hw->req_q_map[0],
452 vha->hw->rsp_q_map[0], fcport->loop_id, lun,
453 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
455 if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
456 DEBUG2_3_11(printk(KERN_WARNING
457 "%s(%ld): TM IOCB failed (%x).\n",
458 __func__, vha->host_no, rval));
464 /****************************************************************************/
465 /* QLogic ISP2x00 Hardware Support Functions. */
466 /****************************************************************************/
469 * qla2x00_initialize_adapter
473 * ha = adapter block pointer.
479 qla2x00_initialize_adapter(scsi_qla_host_t *vha)
482 struct qla_hw_data *ha = vha->hw;
483 struct req_que *req = ha->req_q_map[0];
485 /* Clear adapter flags. */
486 vha->flags.online = 0;
487 ha->flags.chip_reset_done = 0;
488 vha->flags.reset_active = 0;
489 ha->flags.pci_channel_io_perm_failure = 0;
490 ha->flags.eeh_busy = 0;
491 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
492 atomic_set(&vha->loop_state, LOOP_DOWN);
493 vha->device_flags = DFLG_NO_CABLE;
495 vha->flags.management_server_logged_in = 0;
496 vha->marker_needed = 0;
497 ha->isp_abort_cnt = 0;
498 ha->beacon_blink_led = 0;
500 set_bit(0, ha->req_qid_map);
501 set_bit(0, ha->rsp_qid_map);
503 qla_printk(KERN_INFO, ha, "Configuring PCI space...\n");
504 rval = ha->isp_ops->pci_config(vha);
506 DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n",
511 ha->isp_ops->reset_chip(vha);
513 rval = qla2xxx_get_flash_info(vha);
515 DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n",
520 ha->isp_ops->get_flash_version(vha, req->ring);
522 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n");
524 ha->isp_ops->nvram_config(vha);
526 if (ha->flags.disable_serdes) {
527 /* Mask HBA via NVRAM settings? */
528 qla_printk(KERN_INFO, ha, "Masking HBA WWPN "
529 "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n",
530 vha->port_name[0], vha->port_name[1],
531 vha->port_name[2], vha->port_name[3],
532 vha->port_name[4], vha->port_name[5],
533 vha->port_name[6], vha->port_name[7]);
534 return QLA_FUNCTION_FAILED;
537 qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n");
539 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
540 rval = ha->isp_ops->chip_diag(vha);
543 rval = qla2x00_setup_chip(vha);
548 if (IS_QLA84XX(ha)) {
549 ha->cs84xx = qla84xx_get_chip(vha);
551 qla_printk(KERN_ERR, ha,
552 "Unable to configure ISP84XX.\n");
553 return QLA_FUNCTION_FAILED;
556 rval = qla2x00_init_rings(vha);
557 ha->flags.chip_reset_done = 1;
559 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
560 /* Issue verify 84xx FW IOCB to complete 84xx initialization */
561 rval = qla84xx_init_chip(vha);
562 if (rval != QLA_SUCCESS) {
563 qla_printk(KERN_ERR, ha,
564 "Unable to initialize ISP84XX.\n");
565 qla84xx_put_chip(vha);
569 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
570 qla24xx_read_fcp_prio_cfg(vha);
576 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
579 * Returns 0 on success.
582 qla2100_pci_config(scsi_qla_host_t *vha)
586 struct qla_hw_data *ha = vha->hw;
587 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
589 pci_set_master(ha->pdev);
590 pci_try_set_mwi(ha->pdev);
592 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
593 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
594 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
596 pci_disable_rom(ha->pdev);
598 /* Get PCI bus information. */
599 spin_lock_irqsave(&ha->hardware_lock, flags);
600 ha->pci_attr = RD_REG_WORD(®->ctrl_status);
601 spin_unlock_irqrestore(&ha->hardware_lock, flags);
607 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
610 * Returns 0 on success.
613 qla2300_pci_config(scsi_qla_host_t *vha)
616 unsigned long flags = 0;
618 struct qla_hw_data *ha = vha->hw;
619 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
621 pci_set_master(ha->pdev);
622 pci_try_set_mwi(ha->pdev);
624 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
625 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
627 if (IS_QLA2322(ha) || IS_QLA6322(ha))
628 w &= ~PCI_COMMAND_INTX_DISABLE;
629 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
632 * If this is a 2300 card and not 2312, reset the
633 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
634 * the 2310 also reports itself as a 2300 so we need to get the
635 * fb revision level -- a 6 indicates it really is a 2300 and
638 if (IS_QLA2300(ha)) {
639 spin_lock_irqsave(&ha->hardware_lock, flags);
642 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC);
643 for (cnt = 0; cnt < 30000; cnt++) {
644 if ((RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) != 0)
650 /* Select FPM registers. */
651 WRT_REG_WORD(®->ctrl_status, 0x20);
652 RD_REG_WORD(®->ctrl_status);
654 /* Get the fb rev level */
655 ha->fb_rev = RD_FB_CMD_REG(ha, reg);
657 if (ha->fb_rev == FPM_2300)
658 pci_clear_mwi(ha->pdev);
660 /* Deselect FPM registers. */
661 WRT_REG_WORD(®->ctrl_status, 0x0);
662 RD_REG_WORD(®->ctrl_status);
664 /* Release RISC module. */
665 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC);
666 for (cnt = 0; cnt < 30000; cnt++) {
667 if ((RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0)
673 spin_unlock_irqrestore(&ha->hardware_lock, flags);
676 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
678 pci_disable_rom(ha->pdev);
680 /* Get PCI bus information. */
681 spin_lock_irqsave(&ha->hardware_lock, flags);
682 ha->pci_attr = RD_REG_WORD(®->ctrl_status);
683 spin_unlock_irqrestore(&ha->hardware_lock, flags);
689 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
692 * Returns 0 on success.
695 qla24xx_pci_config(scsi_qla_host_t *vha)
698 unsigned long flags = 0;
699 struct qla_hw_data *ha = vha->hw;
700 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
702 pci_set_master(ha->pdev);
703 pci_try_set_mwi(ha->pdev);
705 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
706 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
707 w &= ~PCI_COMMAND_INTX_DISABLE;
708 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
710 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
712 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
713 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
714 pcix_set_mmrbc(ha->pdev, 2048);
716 /* PCIe -- adjust Maximum Read Request Size (2048). */
717 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
718 pcie_set_readrq(ha->pdev, 2048);
720 pci_disable_rom(ha->pdev);
722 ha->chip_revision = ha->pdev->revision;
724 /* Get PCI bus information. */
725 spin_lock_irqsave(&ha->hardware_lock, flags);
726 ha->pci_attr = RD_REG_DWORD(®->ctrl_status);
727 spin_unlock_irqrestore(&ha->hardware_lock, flags);
733 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
736 * Returns 0 on success.
739 qla25xx_pci_config(scsi_qla_host_t *vha)
742 struct qla_hw_data *ha = vha->hw;
744 pci_set_master(ha->pdev);
745 pci_try_set_mwi(ha->pdev);
747 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
748 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
749 w &= ~PCI_COMMAND_INTX_DISABLE;
750 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
752 /* PCIe -- adjust Maximum Read Request Size (2048). */
753 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
754 pcie_set_readrq(ha->pdev, 2048);
756 pci_disable_rom(ha->pdev);
758 ha->chip_revision = ha->pdev->revision;
764 * qla2x00_isp_firmware() - Choose firmware image.
767 * Returns 0 on success.
770 qla2x00_isp_firmware(scsi_qla_host_t *vha)
773 uint16_t loop_id, topo, sw_cap;
774 uint8_t domain, area, al_pa;
775 struct qla_hw_data *ha = vha->hw;
777 /* Assume loading risc code */
778 rval = QLA_FUNCTION_FAILED;
780 if (ha->flags.disable_risc_code_load) {
781 DEBUG2(printk("scsi(%ld): RISC CODE NOT loaded\n",
783 qla_printk(KERN_INFO, ha, "RISC CODE NOT loaded\n");
785 /* Verify checksum of loaded RISC code. */
786 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
787 if (rval == QLA_SUCCESS) {
788 /* And, verify we are not in ROM code. */
789 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
790 &area, &domain, &topo, &sw_cap);
795 DEBUG2_3(printk("scsi(%ld): **** Load RISC code ****\n",
803 * qla2x00_reset_chip() - Reset ISP chip.
806 * Returns 0 on success.
809 qla2x00_reset_chip(scsi_qla_host_t *vha)
811 unsigned long flags = 0;
812 struct qla_hw_data *ha = vha->hw;
813 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
817 if (unlikely(pci_channel_offline(ha->pdev)))
820 ha->isp_ops->disable_intrs(ha);
822 spin_lock_irqsave(&ha->hardware_lock, flags);
824 /* Turn off master enable */
826 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
827 cmd &= ~PCI_COMMAND_MASTER;
828 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
830 if (!IS_QLA2100(ha)) {
832 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC);
833 if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
834 for (cnt = 0; cnt < 30000; cnt++) {
835 if ((RD_REG_WORD(®->hccr) &
836 HCCR_RISC_PAUSE) != 0)
841 RD_REG_WORD(®->hccr); /* PCI Posting. */
845 /* Select FPM registers. */
846 WRT_REG_WORD(®->ctrl_status, 0x20);
847 RD_REG_WORD(®->ctrl_status); /* PCI Posting. */
849 /* FPM Soft Reset. */
850 WRT_REG_WORD(®->fpm_diag_config, 0x100);
851 RD_REG_WORD(®->fpm_diag_config); /* PCI Posting. */
853 /* Toggle Fpm Reset. */
854 if (!IS_QLA2200(ha)) {
855 WRT_REG_WORD(®->fpm_diag_config, 0x0);
856 RD_REG_WORD(®->fpm_diag_config); /* PCI Posting. */
859 /* Select frame buffer registers. */
860 WRT_REG_WORD(®->ctrl_status, 0x10);
861 RD_REG_WORD(®->ctrl_status); /* PCI Posting. */
863 /* Reset frame buffer FIFOs. */
864 if (IS_QLA2200(ha)) {
865 WRT_FB_CMD_REG(ha, reg, 0xa000);
866 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */
868 WRT_FB_CMD_REG(ha, reg, 0x00fc);
870 /* Read back fb_cmd until zero or 3 seconds max */
871 for (cnt = 0; cnt < 3000; cnt++) {
872 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
878 /* Select RISC module registers. */
879 WRT_REG_WORD(®->ctrl_status, 0);
880 RD_REG_WORD(®->ctrl_status); /* PCI Posting. */
882 /* Reset RISC processor. */
883 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
884 RD_REG_WORD(®->hccr); /* PCI Posting. */
886 /* Release RISC processor. */
887 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC);
888 RD_REG_WORD(®->hccr); /* PCI Posting. */
891 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
892 WRT_REG_WORD(®->hccr, HCCR_CLR_HOST_INT);
894 /* Reset ISP chip. */
895 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET);
897 /* Wait for RISC to recover from reset. */
898 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
900 * It is necessary to for a delay here since the card doesn't
901 * respond to PCI reads during a reset. On some architectures
902 * this will result in an MCA.
905 for (cnt = 30000; cnt; cnt--) {
906 if ((RD_REG_WORD(®->ctrl_status) &
907 CSR_ISP_SOFT_RESET) == 0)
914 /* Reset RISC processor. */
915 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
917 WRT_REG_WORD(®->semaphore, 0);
919 /* Release RISC processor. */
920 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC);
921 RD_REG_WORD(®->hccr); /* PCI Posting. */
923 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
924 for (cnt = 0; cnt < 30000; cnt++) {
925 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
933 /* Turn on master enable */
934 cmd |= PCI_COMMAND_MASTER;
935 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
937 /* Disable RISC pause on FPM parity error. */
938 if (!IS_QLA2100(ha)) {
939 WRT_REG_WORD(®->hccr, HCCR_DISABLE_PARITY_PAUSE);
940 RD_REG_WORD(®->hccr); /* PCI Posting. */
943 spin_unlock_irqrestore(&ha->hardware_lock, flags);
947 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
950 * Returns 0 on success.
953 qla24xx_reset_risc(scsi_qla_host_t *vha)
955 unsigned long flags = 0;
956 struct qla_hw_data *ha = vha->hw;
957 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
961 spin_lock_irqsave(&ha->hardware_lock, flags);
964 WRT_REG_DWORD(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
965 for (cnt = 0; cnt < 30000; cnt++) {
966 if ((RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
972 WRT_REG_DWORD(®->ctrl_status,
973 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
974 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
977 /* Wait for firmware to complete NVRAM accesses. */
978 d2 = (uint32_t) RD_REG_WORD(®->mailbox0);
979 for (cnt = 10000 ; cnt && d2; cnt--) {
981 d2 = (uint32_t) RD_REG_WORD(®->mailbox0);
985 /* Wait for soft-reset to complete. */
986 d2 = RD_REG_DWORD(®->ctrl_status);
987 for (cnt = 6000000 ; cnt && (d2 & CSRX_ISP_SOFT_RESET); cnt--) {
989 d2 = RD_REG_DWORD(®->ctrl_status);
993 WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET);
994 RD_REG_DWORD(®->hccr);
996 WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE);
997 RD_REG_DWORD(®->hccr);
999 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET);
1000 RD_REG_DWORD(®->hccr);
1002 d2 = (uint32_t) RD_REG_WORD(®->mailbox0);
1003 for (cnt = 6000000 ; cnt && d2; cnt--) {
1005 d2 = (uint32_t) RD_REG_WORD(®->mailbox0);
1009 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1011 if (IS_NOPOLLING_TYPE(ha))
1012 ha->isp_ops->enable_intrs(ha);
1016 * qla24xx_reset_chip() - Reset ISP24xx chip.
1019 * Returns 0 on success.
1022 qla24xx_reset_chip(scsi_qla_host_t *vha)
1024 struct qla_hw_data *ha = vha->hw;
1026 if (pci_channel_offline(ha->pdev) &&
1027 ha->flags.pci_channel_io_perm_failure) {
1031 ha->isp_ops->disable_intrs(ha);
1033 /* Perform RISC reset. */
1034 qla24xx_reset_risc(vha);
1038 * qla2x00_chip_diag() - Test chip for proper operation.
1041 * Returns 0 on success.
1044 qla2x00_chip_diag(scsi_qla_host_t *vha)
1047 struct qla_hw_data *ha = vha->hw;
1048 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1049 unsigned long flags = 0;
1053 struct req_que *req = ha->req_q_map[0];
1055 /* Assume a failed state */
1056 rval = QLA_FUNCTION_FAILED;
1058 DEBUG3(printk("scsi(%ld): Testing device at %lx.\n",
1059 vha->host_no, (u_long)®->flash_address));
1061 spin_lock_irqsave(&ha->hardware_lock, flags);
1063 /* Reset ISP chip. */
1064 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET);
1067 * We need to have a delay here since the card will not respond while
1068 * in reset causing an MCA on some architectures.
1071 data = qla2x00_debounce_register(®->ctrl_status);
1072 for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
1074 data = RD_REG_WORD(®->ctrl_status);
1079 goto chip_diag_failed;
1081 DEBUG3(printk("scsi(%ld): Reset register cleared by chip reset\n",
1084 /* Reset RISC processor. */
1085 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
1086 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC);
1088 /* Workaround for QLA2312 PCI parity error */
1089 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
1090 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
1091 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
1093 data = RD_MAILBOX_REG(ha, reg, 0);
1100 goto chip_diag_failed;
1102 /* Check product ID of chip */
1103 DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", vha->host_no));
1105 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
1106 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
1107 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
1108 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
1109 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
1110 mb[3] != PROD_ID_3) {
1111 qla_printk(KERN_WARNING, ha,
1112 "Wrong product ID = 0x%x,0x%x,0x%x\n", mb[1], mb[2], mb[3]);
1114 goto chip_diag_failed;
1116 ha->product_id[0] = mb[1];
1117 ha->product_id[1] = mb[2];
1118 ha->product_id[2] = mb[3];
1119 ha->product_id[3] = mb[4];
1121 /* Adjust fw RISC transfer size */
1122 if (req->length > 1024)
1123 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
1125 ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
1128 if (IS_QLA2200(ha) &&
1129 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
1130 /* Limit firmware transfer size with a 2200A */
1131 DEBUG3(printk("scsi(%ld): Found QLA2200A chip.\n",
1134 ha->device_type |= DT_ISP2200A;
1135 ha->fw_transfer_size = 128;
1138 /* Wrap Incoming Mailboxes Test. */
1139 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1141 DEBUG3(printk("scsi(%ld): Checking mailboxes.\n", vha->host_no));
1142 rval = qla2x00_mbx_reg_test(vha);
1144 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n",
1146 qla_printk(KERN_WARNING, ha,
1147 "Failed mailbox send register test\n");
1150 /* Flag a successful rval */
1153 spin_lock_irqsave(&ha->hardware_lock, flags);
1157 DEBUG2_3(printk("scsi(%ld): Chip diagnostics **** FAILED "
1158 "****\n", vha->host_no));
1160 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1166 * qla24xx_chip_diag() - Test ISP24xx for proper operation.
1169 * Returns 0 on success.
1172 qla24xx_chip_diag(scsi_qla_host_t *vha)
1175 struct qla_hw_data *ha = vha->hw;
1176 struct req_que *req = ha->req_q_map[0];
1181 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
1183 rval = qla2x00_mbx_reg_test(vha);
1185 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n",
1187 qla_printk(KERN_WARNING, ha,
1188 "Failed mailbox send register test\n");
1190 /* Flag a successful rval */
1198 qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1201 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
1202 eft_size, fce_size, mq_size;
1205 struct qla_hw_data *ha = vha->hw;
1206 struct req_que *req = ha->req_q_map[0];
1207 struct rsp_que *rsp = ha->rsp_q_map[0];
1210 qla_printk(KERN_WARNING, ha,
1211 "Firmware dump previously allocated.\n");
1216 fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
1217 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
1218 fixed_size = sizeof(struct qla2100_fw_dump);
1219 } else if (IS_QLA23XX(ha)) {
1220 fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
1221 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
1223 } else if (IS_FWI2_CAPABLE(ha)) {
1225 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
1226 else if (IS_QLA25XX(ha))
1227 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
1229 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
1230 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
1233 mq_size = sizeof(struct qla2xxx_mq_chain);
1234 /* Allocate memory for Fibre Channel Event Buffer. */
1235 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
1238 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
1241 qla_printk(KERN_WARNING, ha, "Unable to allocate "
1242 "(%d KB) for FCE.\n", FCE_SIZE / 1024);
1246 memset(tc, 0, FCE_SIZE);
1247 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
1248 ha->fce_mb, &ha->fce_bufs);
1250 qla_printk(KERN_WARNING, ha, "Unable to initialize "
1251 "FCE (%d).\n", rval);
1252 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
1254 ha->flags.fce_enabled = 0;
1258 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for FCE...\n",
1261 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
1262 ha->flags.fce_enabled = 1;
1263 ha->fce_dma = tc_dma;
1266 /* Allocate memory for Extended Trace Buffer. */
1267 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
1270 qla_printk(KERN_WARNING, ha, "Unable to allocate "
1271 "(%d KB) for EFT.\n", EFT_SIZE / 1024);
1275 memset(tc, 0, EFT_SIZE);
1276 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
1278 qla_printk(KERN_WARNING, ha, "Unable to initialize "
1279 "EFT (%d).\n", rval);
1280 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
1285 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for EFT...\n",
1288 eft_size = EFT_SIZE;
1289 ha->eft_dma = tc_dma;
1293 req_q_size = req->length * sizeof(request_t);
1294 rsp_q_size = rsp->length * sizeof(response_t);
1296 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
1297 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
1298 ha->chain_offset = dump_size;
1299 dump_size += mq_size + fce_size;
1301 ha->fw_dump = vmalloc(dump_size);
1303 qla_printk(KERN_WARNING, ha, "Unable to allocate (%d KB) for "
1304 "firmware dump!!!\n", dump_size / 1024);
1307 dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft,
1314 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for firmware dump...\n",
1317 ha->fw_dump_len = dump_size;
1318 ha->fw_dump->signature[0] = 'Q';
1319 ha->fw_dump->signature[1] = 'L';
1320 ha->fw_dump->signature[2] = 'G';
1321 ha->fw_dump->signature[3] = 'C';
1322 ha->fw_dump->version = __constant_htonl(1);
1324 ha->fw_dump->fixed_size = htonl(fixed_size);
1325 ha->fw_dump->mem_size = htonl(mem_size);
1326 ha->fw_dump->req_q_size = htonl(req_q_size);
1327 ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
1329 ha->fw_dump->eft_size = htonl(eft_size);
1330 ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
1331 ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
1333 ha->fw_dump->header_size =
1334 htonl(offsetof(struct qla2xxx_fw_dump, isp));
1338 qla81xx_mpi_sync(scsi_qla_host_t *vha)
1340 #define MPS_MASK 0xe0
1344 struct qla_hw_data *ha = vha->hw;
1346 if (!IS_QLA81XX(vha->hw))
1349 rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
1350 if (rval != QLA_SUCCESS) {
1351 DEBUG2(qla_printk(KERN_WARNING, ha,
1352 "Sync-MPI: Unable to acquire semaphore.\n"));
1356 pci_read_config_word(vha->hw->pdev, 0x54, &dc);
1357 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
1358 if (rval != QLA_SUCCESS) {
1359 DEBUG2(qla_printk(KERN_WARNING, ha,
1360 "Sync-MPI: Unable to read sync.\n"));
1365 if (dc == (dw & MPS_MASK))
1370 rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
1371 if (rval != QLA_SUCCESS) {
1372 DEBUG2(qla_printk(KERN_WARNING, ha,
1373 "Sync-MPI: Unable to gain sync.\n"));
1377 rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
1378 if (rval != QLA_SUCCESS) {
1379 DEBUG2(qla_printk(KERN_WARNING, ha,
1380 "Sync-MPI: Unable to release semaphore.\n"));
1388 * qla2x00_setup_chip() - Load and start RISC firmware.
1391 * Returns 0 on success.
1394 qla2x00_setup_chip(scsi_qla_host_t *vha)
1397 uint32_t srisc_address = 0;
1398 struct qla_hw_data *ha = vha->hw;
1399 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1400 unsigned long flags;
1401 uint16_t fw_major_version;
1403 if (IS_QLA82XX(ha)) {
1404 rval = ha->isp_ops->load_risc(vha, &srisc_address);
1405 if (rval == QLA_SUCCESS) {
1406 qla2x00_stop_firmware(vha);
1407 goto enable_82xx_npiv;
1412 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
1413 /* Disable SRAM, Instruction RAM and GP RAM parity. */
1414 spin_lock_irqsave(&ha->hardware_lock, flags);
1415 WRT_REG_WORD(®->hccr, (HCCR_ENABLE_PARITY + 0x0));
1416 RD_REG_WORD(®->hccr);
1417 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1420 qla81xx_mpi_sync(vha);
1422 /* Load firmware sequences */
1423 rval = ha->isp_ops->load_risc(vha, &srisc_address);
1424 if (rval == QLA_SUCCESS) {
1425 DEBUG(printk("scsi(%ld): Verifying Checksum of loaded RISC "
1426 "code.\n", vha->host_no));
1428 rval = qla2x00_verify_checksum(vha, srisc_address);
1429 if (rval == QLA_SUCCESS) {
1430 /* Start firmware execution. */
1431 DEBUG(printk("scsi(%ld): Checksum OK, start "
1432 "firmware.\n", vha->host_no));
1434 rval = qla2x00_execute_fw(vha, srisc_address);
1435 /* Retrieve firmware information. */
1436 if (rval == QLA_SUCCESS) {
1438 fw_major_version = ha->fw_major_version;
1439 rval = qla2x00_get_fw_version(vha,
1440 &ha->fw_major_version,
1441 &ha->fw_minor_version,
1442 &ha->fw_subminor_version,
1443 &ha->fw_attributes, &ha->fw_memory_size,
1444 ha->mpi_version, &ha->mpi_capabilities,
1446 if (rval != QLA_SUCCESS)
1448 ha->flags.npiv_supported = 0;
1449 if (IS_QLA2XXX_MIDTYPE(ha) &&
1450 (ha->fw_attributes & BIT_2)) {
1451 ha->flags.npiv_supported = 1;
1452 if ((!ha->max_npiv_vports) ||
1453 ((ha->max_npiv_vports + 1) %
1454 MIN_MULTI_ID_FABRIC))
1455 ha->max_npiv_vports =
1456 MIN_MULTI_ID_FABRIC - 1;
1458 qla2x00_get_resource_cnts(vha, NULL,
1459 &ha->fw_xcb_count, NULL, NULL,
1460 &ha->max_npiv_vports, NULL);
1462 if (!fw_major_version && ql2xallocfwdump) {
1463 if (!IS_QLA82XX(ha))
1464 qla2x00_alloc_fw_dump(vha);
1468 DEBUG2(printk(KERN_INFO
1469 "scsi(%ld): ISP Firmware failed checksum.\n",
1474 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
1475 /* Enable proper parity. */
1476 spin_lock_irqsave(&ha->hardware_lock, flags);
1479 WRT_REG_WORD(®->hccr, HCCR_ENABLE_PARITY + 0x1);
1481 /* SRAM, Instruction RAM and GP RAM parity */
1482 WRT_REG_WORD(®->hccr, HCCR_ENABLE_PARITY + 0x7);
1483 RD_REG_WORD(®->hccr);
1484 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1487 if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
1490 rval = qla81xx_fac_get_sector_size(vha, &size);
1491 if (rval == QLA_SUCCESS) {
1492 ha->flags.fac_supported = 1;
1493 ha->fdt_block_size = size << 2;
1495 qla_printk(KERN_ERR, ha,
1496 "Unsupported FAC firmware (%d.%02d.%02d).\n",
1497 ha->fw_major_version, ha->fw_minor_version,
1498 ha->fw_subminor_version);
1503 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n",
1511 * qla2x00_init_response_q_entries() - Initializes response queue entries.
1514 * Beginning of request ring has initialization control block already built
1515 * by nvram config routine.
1517 * Returns 0 on success.
1520 qla2x00_init_response_q_entries(struct rsp_que *rsp)
1525 rsp->ring_ptr = rsp->ring;
1526 rsp->ring_index = 0;
1527 rsp->status_srb = NULL;
1528 pkt = rsp->ring_ptr;
1529 for (cnt = 0; cnt < rsp->length; cnt++) {
1530 pkt->signature = RESPONSE_PROCESSED;
1536 * qla2x00_update_fw_options() - Read and process firmware options.
1539 * Returns 0 on success.
1542 qla2x00_update_fw_options(scsi_qla_host_t *vha)
1544 uint16_t swing, emphasis, tx_sens, rx_sens;
1545 struct qla_hw_data *ha = vha->hw;
1547 memset(ha->fw_options, 0, sizeof(ha->fw_options));
1548 qla2x00_get_fw_options(vha, ha->fw_options);
1550 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1553 /* Serial Link options. */
1554 DEBUG3(printk("scsi(%ld): Serial link options:\n",
1556 DEBUG3(qla2x00_dump_buffer((uint8_t *)&ha->fw_seriallink_options,
1557 sizeof(ha->fw_seriallink_options)));
1559 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
1560 if (ha->fw_seriallink_options[3] & BIT_2) {
1561 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
1564 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
1565 emphasis = (ha->fw_seriallink_options[2] &
1566 (BIT_4 | BIT_3)) >> 3;
1567 tx_sens = ha->fw_seriallink_options[0] &
1568 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1569 rx_sens = (ha->fw_seriallink_options[0] &
1570 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
1571 ha->fw_options[10] = (emphasis << 14) | (swing << 8);
1572 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
1575 ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
1576 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
1577 ha->fw_options[10] |= BIT_5 |
1578 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
1579 (tx_sens & (BIT_1 | BIT_0));
1582 swing = (ha->fw_seriallink_options[2] &
1583 (BIT_7 | BIT_6 | BIT_5)) >> 5;
1584 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
1585 tx_sens = ha->fw_seriallink_options[1] &
1586 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1587 rx_sens = (ha->fw_seriallink_options[1] &
1588 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
1589 ha->fw_options[11] = (emphasis << 14) | (swing << 8);
1590 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
1593 ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
1594 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
1595 ha->fw_options[11] |= BIT_5 |
1596 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
1597 (tx_sens & (BIT_1 | BIT_0));
1601 /* Return command IOCBs without waiting for an ABTS to complete. */
1602 ha->fw_options[3] |= BIT_13;
1605 if (ha->flags.enable_led_scheme)
1606 ha->fw_options[2] |= BIT_12;
1608 /* Detect ISP6312. */
1610 ha->fw_options[2] |= BIT_13;
1612 /* Update firmware options. */
1613 qla2x00_set_fw_options(vha, ha->fw_options);
1617 qla24xx_update_fw_options(scsi_qla_host_t *vha)
1620 struct qla_hw_data *ha = vha->hw;
1625 /* Update Serial Link options. */
1626 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
1629 rval = qla2x00_set_serdes_params(vha,
1630 le16_to_cpu(ha->fw_seriallink_options24[1]),
1631 le16_to_cpu(ha->fw_seriallink_options24[2]),
1632 le16_to_cpu(ha->fw_seriallink_options24[3]));
1633 if (rval != QLA_SUCCESS) {
1634 qla_printk(KERN_WARNING, ha,
1635 "Unable to update Serial Link options (%x).\n", rval);
1640 qla2x00_config_rings(struct scsi_qla_host *vha)
1642 struct qla_hw_data *ha = vha->hw;
1643 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1644 struct req_que *req = ha->req_q_map[0];
1645 struct rsp_que *rsp = ha->rsp_q_map[0];
1647 /* Setup ring parameters in initialization control block. */
1648 ha->init_cb->request_q_outpointer = __constant_cpu_to_le16(0);
1649 ha->init_cb->response_q_inpointer = __constant_cpu_to_le16(0);
1650 ha->init_cb->request_q_length = cpu_to_le16(req->length);
1651 ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
1652 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1653 ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1654 ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1655 ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1657 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
1658 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
1659 WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0);
1660 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0);
1661 RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
1665 qla24xx_config_rings(struct scsi_qla_host *vha)
1667 struct qla_hw_data *ha = vha->hw;
1668 device_reg_t __iomem *reg = ISP_QUE_REG(ha, 0);
1669 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
1670 struct qla_msix_entry *msix;
1671 struct init_cb_24xx *icb;
1673 struct req_que *req = ha->req_q_map[0];
1674 struct rsp_que *rsp = ha->rsp_q_map[0];
1676 /* Setup ring parameters in initialization control block. */
1677 icb = (struct init_cb_24xx *)ha->init_cb;
1678 icb->request_q_outpointer = __constant_cpu_to_le16(0);
1679 icb->response_q_inpointer = __constant_cpu_to_le16(0);
1680 icb->request_q_length = cpu_to_le16(req->length);
1681 icb->response_q_length = cpu_to_le16(rsp->length);
1682 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1683 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1684 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1685 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1688 icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
1689 icb->rid = __constant_cpu_to_le16(rid);
1690 if (ha->flags.msix_enabled) {
1691 msix = &ha->msix_entries[1];
1692 DEBUG2_17(printk(KERN_INFO
1693 "Registering vector 0x%x for base que\n", msix->entry));
1694 icb->msix = cpu_to_le16(msix->entry);
1696 /* Use alternate PCI bus number */
1698 icb->firmware_options_2 |=
1699 __constant_cpu_to_le32(BIT_19);
1700 /* Use alternate PCI devfn */
1702 icb->firmware_options_2 |=
1703 __constant_cpu_to_le32(BIT_18);
1705 /* Use Disable MSIX Handshake mode for capable adapters */
1706 if (IS_MSIX_NACK_CAPABLE(ha)) {
1707 icb->firmware_options_2 &=
1708 __constant_cpu_to_le32(~BIT_22);
1709 ha->flags.disable_msix_handshake = 1;
1710 qla_printk(KERN_INFO, ha,
1711 "MSIX Handshake Disable Mode turned on\n");
1713 icb->firmware_options_2 |=
1714 __constant_cpu_to_le32(BIT_22);
1716 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23);
1718 WRT_REG_DWORD(®->isp25mq.req_q_in, 0);
1719 WRT_REG_DWORD(®->isp25mq.req_q_out, 0);
1720 WRT_REG_DWORD(®->isp25mq.rsp_q_in, 0);
1721 WRT_REG_DWORD(®->isp25mq.rsp_q_out, 0);
1723 WRT_REG_DWORD(®->isp24.req_q_in, 0);
1724 WRT_REG_DWORD(®->isp24.req_q_out, 0);
1725 WRT_REG_DWORD(®->isp24.rsp_q_in, 0);
1726 WRT_REG_DWORD(®->isp24.rsp_q_out, 0);
1729 RD_REG_DWORD(&ioreg->hccr);
1733 * qla2x00_init_rings() - Initializes firmware.
1736 * Beginning of request ring has initialization control block already built
1737 * by nvram config routine.
1739 * Returns 0 on success.
1742 qla2x00_init_rings(scsi_qla_host_t *vha)
1745 unsigned long flags = 0;
1747 struct qla_hw_data *ha = vha->hw;
1748 struct req_que *req;
1749 struct rsp_que *rsp;
1750 struct scsi_qla_host *vp;
1751 struct mid_init_cb_24xx *mid_init_cb =
1752 (struct mid_init_cb_24xx *) ha->init_cb;
1754 spin_lock_irqsave(&ha->hardware_lock, flags);
1756 /* Clear outstanding commands array. */
1757 for (que = 0; que < ha->max_req_queues; que++) {
1758 req = ha->req_q_map[que];
1761 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
1762 req->outstanding_cmds[cnt] = NULL;
1764 req->current_outstanding_cmd = 1;
1766 /* Initialize firmware. */
1767 req->ring_ptr = req->ring;
1768 req->ring_index = 0;
1769 req->cnt = req->length;
1772 for (que = 0; que < ha->max_rsp_queues; que++) {
1773 rsp = ha->rsp_q_map[que];
1776 /* Initialize response queue entries */
1777 qla2x00_init_response_q_entries(rsp);
1780 /* Clear RSCN queue. */
1781 list_for_each_entry(vp, &ha->vp_list, list) {
1782 vp->rscn_in_ptr = 0;
1783 vp->rscn_out_ptr = 0;
1785 ha->isp_ops->config_rings(vha);
1787 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1789 /* Update any ISP specific firmware options before initialization. */
1790 ha->isp_ops->update_fw_options(vha);
1792 DEBUG(printk("scsi(%ld): Issue init firmware.\n", vha->host_no));
1794 if (ha->flags.npiv_supported) {
1795 if (ha->operating_mode == LOOP)
1796 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
1797 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
1800 if (IS_FWI2_CAPABLE(ha)) {
1801 mid_init_cb->options = __constant_cpu_to_le16(BIT_1);
1802 mid_init_cb->init_cb.execution_throttle =
1803 cpu_to_le16(ha->fw_xcb_count);
1806 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
1808 DEBUG2_3(printk("scsi(%ld): Init firmware **** FAILED ****.\n",
1811 DEBUG3(printk("scsi(%ld): Init firmware -- success.\n",
1819 * qla2x00_fw_ready() - Waits for firmware ready.
1822 * Returns 0 on success.
1825 qla2x00_fw_ready(scsi_qla_host_t *vha)
1828 unsigned long wtime, mtime, cs84xx_time;
1829 uint16_t min_wait; /* Minimum wait time if loop is down */
1830 uint16_t wait_time; /* Wait time if loop is coming ready */
1832 struct qla_hw_data *ha = vha->hw;
1836 /* 20 seconds for loop down. */
1840 * Firmware should take at most one RATOV to login, plus 5 seconds for
1841 * our own processing.
1843 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
1844 wait_time = min_wait;
1847 /* Min wait time if loop down */
1848 mtime = jiffies + (min_wait * HZ);
1850 /* wait time before firmware ready */
1851 wtime = jiffies + (wait_time * HZ);
1853 /* Wait for ISP to finish LIP */
1854 if (!vha->flags.init_done)
1855 qla_printk(KERN_INFO, ha, "Waiting for LIP to complete...\n");
1857 DEBUG3(printk("scsi(%ld): Waiting for LIP to complete...\n",
1861 rval = qla2x00_get_firmware_state(vha, state);
1862 if (rval == QLA_SUCCESS) {
1863 if (state[0] < FSTATE_LOSS_OF_SYNC) {
1864 vha->device_flags &= ~DFLG_NO_CABLE;
1866 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
1867 DEBUG16(printk("scsi(%ld): fw_state=%x "
1868 "84xx=%x.\n", vha->host_no, state[0],
1870 if ((state[2] & FSTATE_LOGGED_IN) &&
1871 (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
1872 DEBUG16(printk("scsi(%ld): Sending "
1873 "verify iocb.\n", vha->host_no));
1875 cs84xx_time = jiffies;
1876 rval = qla84xx_init_chip(vha);
1877 if (rval != QLA_SUCCESS)
1880 /* Add time taken to initialize. */
1881 cs84xx_time = jiffies - cs84xx_time;
1882 wtime += cs84xx_time;
1883 mtime += cs84xx_time;
1884 DEBUG16(printk("scsi(%ld): Increasing "
1885 "wait time by %ld. New time %ld\n",
1886 vha->host_no, cs84xx_time, wtime));
1888 } else if (state[0] == FSTATE_READY) {
1889 DEBUG(printk("scsi(%ld): F/W Ready - OK \n",
1892 qla2x00_get_retry_cnt(vha, &ha->retry_count,
1893 &ha->login_timeout, &ha->r_a_tov);
1899 rval = QLA_FUNCTION_FAILED;
1901 if (atomic_read(&vha->loop_down_timer) &&
1902 state[0] != FSTATE_READY) {
1903 /* Loop down. Timeout on min_wait for states
1904 * other than Wait for Login.
1906 if (time_after_eq(jiffies, mtime)) {
1907 qla_printk(KERN_INFO, ha,
1908 "Cable is unplugged...\n");
1910 vha->device_flags |= DFLG_NO_CABLE;
1915 /* Mailbox cmd failed. Timeout on min_wait. */
1916 if (time_after_eq(jiffies, mtime) ||
1917 (IS_QLA82XX(ha) && ha->flags.fw_hung))
1921 if (time_after_eq(jiffies, wtime))
1924 /* Delay for a while */
1927 DEBUG3(printk("scsi(%ld): fw_state=%x curr time=%lx.\n",
1928 vha->host_no, state[0], jiffies));
1931 DEBUG(printk("scsi(%ld): fw_state=%x (%x, %x, %x, %x) curr time=%lx.\n",
1932 vha->host_no, state[0], state[1], state[2], state[3], state[4],
1936 DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n",
1944 * qla2x00_configure_hba
1945 * Setup adapter context.
1948 * ha = adapter state pointer.
1957 qla2x00_configure_hba(scsi_qla_host_t *vha)
1966 char connect_type[22];
1967 struct qla_hw_data *ha = vha->hw;
1969 /* Get host addresses. */
1970 rval = qla2x00_get_adapter_id(vha,
1971 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
1972 if (rval != QLA_SUCCESS) {
1973 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
1974 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
1975 DEBUG2(printk("%s(%ld) Loop is in a transition state\n",
1976 __func__, vha->host_no));
1978 qla_printk(KERN_WARNING, ha,
1979 "ERROR -- Unable to get host loop ID.\n");
1980 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1986 qla_printk(KERN_INFO, ha,
1987 "Cannot get topology - retrying.\n");
1988 return (QLA_FUNCTION_FAILED);
1991 vha->loop_id = loop_id;
1994 ha->min_external_loopid = SNS_FIRST_LOOP_ID;
1995 ha->operating_mode = LOOP;
2000 DEBUG3(printk("scsi(%ld): HBA in NL topology.\n",
2002 ha->current_topology = ISP_CFG_NL;
2003 strcpy(connect_type, "(Loop)");
2007 DEBUG3(printk("scsi(%ld): HBA in FL topology.\n",
2009 ha->switch_cap = sw_cap;
2010 ha->current_topology = ISP_CFG_FL;
2011 strcpy(connect_type, "(FL_Port)");
2015 DEBUG3(printk("scsi(%ld): HBA in N P2P topology.\n",
2017 ha->operating_mode = P2P;
2018 ha->current_topology = ISP_CFG_N;
2019 strcpy(connect_type, "(N_Port-to-N_Port)");
2023 DEBUG3(printk("scsi(%ld): HBA in F P2P topology.\n",
2025 ha->switch_cap = sw_cap;
2026 ha->operating_mode = P2P;
2027 ha->current_topology = ISP_CFG_F;
2028 strcpy(connect_type, "(F_Port)");
2032 DEBUG3(printk("scsi(%ld): HBA in unknown topology %x. "
2034 vha->host_no, topo));
2035 ha->current_topology = ISP_CFG_NL;
2036 strcpy(connect_type, "(Loop)");
2040 /* Save Host port and loop ID. */
2041 /* byte order - Big Endian */
2042 vha->d_id.b.domain = domain;
2043 vha->d_id.b.area = area;
2044 vha->d_id.b.al_pa = al_pa;
2046 if (!vha->flags.init_done)
2047 qla_printk(KERN_INFO, ha,
2048 "Topology - %s, Host Loop address 0x%x\n",
2049 connect_type, vha->loop_id);
2052 DEBUG2_3(printk("scsi(%ld): FAILED.\n", vha->host_no));
2054 DEBUG3(printk("scsi(%ld): exiting normally.\n", vha->host_no));
2061 qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
2066 struct qla_hw_data *ha = vha->hw;
2067 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
2068 !IS_QLA8XXX_TYPE(ha);
2070 if (memcmp(model, BINZERO, len) != 0) {
2071 strncpy(ha->model_number, model, len);
2072 st = en = ha->model_number;
2075 if (*en != 0x20 && *en != 0x00)
2080 index = (ha->pdev->subsystem_device & 0xff);
2082 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
2083 index < QLA_MODEL_NAMES)
2084 strncpy(ha->model_desc,
2085 qla2x00_model_name[index * 2 + 1],
2086 sizeof(ha->model_desc) - 1);
2088 index = (ha->pdev->subsystem_device & 0xff);
2090 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
2091 index < QLA_MODEL_NAMES) {
2092 strcpy(ha->model_number,
2093 qla2x00_model_name[index * 2]);
2094 strncpy(ha->model_desc,
2095 qla2x00_model_name[index * 2 + 1],
2096 sizeof(ha->model_desc) - 1);
2098 strcpy(ha->model_number, def);
2101 if (IS_FWI2_CAPABLE(ha))
2102 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
2103 sizeof(ha->model_desc));
2106 /* On sparc systems, obtain port and node WWN from firmware
2109 static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
2112 struct qla_hw_data *ha = vha->hw;
2113 struct pci_dev *pdev = ha->pdev;
2114 struct device_node *dp = pci_device_to_OF_node(pdev);
2118 val = of_get_property(dp, "port-wwn", &len);
2119 if (val && len >= WWN_SIZE)
2120 memcpy(nv->port_name, val, WWN_SIZE);
2122 val = of_get_property(dp, "node-wwn", &len);
2123 if (val && len >= WWN_SIZE)
2124 memcpy(nv->node_name, val, WWN_SIZE);
2129 * NVRAM configuration for ISP 2xxx
2132 * ha = adapter block pointer.
2135 * initialization control block in response_ring
2136 * host adapters parameters in host adapter block
2142 qla2x00_nvram_config(scsi_qla_host_t *vha)
2147 uint8_t *dptr1, *dptr2;
2148 struct qla_hw_data *ha = vha->hw;
2149 init_cb_t *icb = ha->init_cb;
2150 nvram_t *nv = ha->nvram;
2151 uint8_t *ptr = ha->nvram;
2152 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2156 /* Determine NVRAM starting address. */
2157 ha->nvram_size = sizeof(nvram_t);
2159 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
2160 if ((RD_REG_WORD(®->ctrl_status) >> 14) == 1)
2161 ha->nvram_base = 0x80;
2163 /* Get NVRAM data and calculate checksum. */
2164 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
2165 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
2168 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
2169 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
2171 /* Bad NVRAM data, set defaults parameters. */
2172 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
2173 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
2174 /* Reset NVRAM data. */
2175 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
2176 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
2178 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
2179 "invalid -- WWPN) defaults.\n");
2182 * Set default initialization control block.
2184 memset(nv, 0, ha->nvram_size);
2185 nv->parameter_block_version = ICB_VERSION;
2187 if (IS_QLA23XX(ha)) {
2188 nv->firmware_options[0] = BIT_2 | BIT_1;
2189 nv->firmware_options[1] = BIT_7 | BIT_5;
2190 nv->add_firmware_options[0] = BIT_5;
2191 nv->add_firmware_options[1] = BIT_5 | BIT_4;
2192 nv->frame_payload_size = __constant_cpu_to_le16(2048);
2193 nv->special_options[1] = BIT_7;
2194 } else if (IS_QLA2200(ha)) {
2195 nv->firmware_options[0] = BIT_2 | BIT_1;
2196 nv->firmware_options[1] = BIT_7 | BIT_5;
2197 nv->add_firmware_options[0] = BIT_5;
2198 nv->add_firmware_options[1] = BIT_5 | BIT_4;
2199 nv->frame_payload_size = __constant_cpu_to_le16(1024);
2200 } else if (IS_QLA2100(ha)) {
2201 nv->firmware_options[0] = BIT_3 | BIT_1;
2202 nv->firmware_options[1] = BIT_5;
2203 nv->frame_payload_size = __constant_cpu_to_le16(1024);
2206 nv->max_iocb_allocation = __constant_cpu_to_le16(256);
2207 nv->execution_throttle = __constant_cpu_to_le16(16);
2208 nv->retry_count = 8;
2209 nv->retry_delay = 1;
2211 nv->port_name[0] = 33;
2212 nv->port_name[3] = 224;
2213 nv->port_name[4] = 139;
2215 qla2xxx_nvram_wwn_from_ofw(vha, nv);
2217 nv->login_timeout = 4;
2220 * Set default host adapter parameters
2222 nv->host_p[1] = BIT_2;
2223 nv->reset_delay = 5;
2224 nv->port_down_retry_count = 8;
2225 nv->max_luns_per_target = __constant_cpu_to_le16(8);
2226 nv->link_down_timeout = 60;
2231 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2233 * The SN2 does not provide BIOS emulation which means you can't change
2234 * potentially bogus BIOS settings. Force the use of default settings
2235 * for link rate and frame size. Hope that the rest of the settings
2238 if (ia64_platform_is("sn2")) {
2239 nv->frame_payload_size = __constant_cpu_to_le16(2048);
2241 nv->special_options[1] = BIT_7;
2245 /* Reset Initialization control block */
2246 memset(icb, 0, ha->init_cb_size);
2249 * Setup driver NVRAM options.
2251 nv->firmware_options[0] |= (BIT_6 | BIT_1);
2252 nv->firmware_options[0] &= ~(BIT_5 | BIT_4);
2253 nv->firmware_options[1] |= (BIT_5 | BIT_0);
2254 nv->firmware_options[1] &= ~BIT_4;
2256 if (IS_QLA23XX(ha)) {
2257 nv->firmware_options[0] |= BIT_2;
2258 nv->firmware_options[0] &= ~BIT_3;
2259 nv->firmware_options[0] &= ~BIT_6;
2260 nv->add_firmware_options[1] |= BIT_5 | BIT_4;
2262 if (IS_QLA2300(ha)) {
2263 if (ha->fb_rev == FPM_2310) {
2264 strcpy(ha->model_number, "QLA2310");
2266 strcpy(ha->model_number, "QLA2300");
2269 qla2x00_set_model_info(vha, nv->model_number,
2270 sizeof(nv->model_number), "QLA23xx");
2272 } else if (IS_QLA2200(ha)) {
2273 nv->firmware_options[0] |= BIT_2;
2275 * 'Point-to-point preferred, else loop' is not a safe
2276 * connection mode setting.
2278 if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) ==
2280 /* Force 'loop preferred, else point-to-point'. */
2281 nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4);
2282 nv->add_firmware_options[0] |= BIT_5;
2284 strcpy(ha->model_number, "QLA22xx");
2285 } else /*if (IS_QLA2100(ha))*/ {
2286 strcpy(ha->model_number, "QLA2100");
2290 * Copy over NVRAM RISC parameter block to initialization control block.
2292 dptr1 = (uint8_t *)icb;
2293 dptr2 = (uint8_t *)&nv->parameter_block_version;
2294 cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version;
2296 *dptr1++ = *dptr2++;
2298 /* Copy 2nd half. */
2299 dptr1 = (uint8_t *)icb->add_firmware_options;
2300 cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
2302 *dptr1++ = *dptr2++;
2304 /* Use alternate WWN? */
2305 if (nv->host_p[1] & BIT_7) {
2306 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
2307 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
2310 /* Prepare nodename */
2311 if ((icb->firmware_options[1] & BIT_6) == 0) {
2313 * Firmware will apply the following mask if the nodename was
2316 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
2317 icb->node_name[0] &= 0xF0;
2321 * Set host adapter parameters.
2323 if (nv->host_p[0] & BIT_7)
2324 ql2xextended_error_logging = 1;
2325 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
2326 /* Always load RISC code on non ISP2[12]00 chips. */
2327 if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
2328 ha->flags.disable_risc_code_load = 0;
2329 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
2330 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
2331 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
2332 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
2333 ha->flags.disable_serdes = 0;
2335 ha->operating_mode =
2336 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
2338 memcpy(ha->fw_seriallink_options, nv->seriallink_options,
2339 sizeof(ha->fw_seriallink_options));
2341 /* save HBA serial number */
2342 ha->serial0 = icb->port_name[5];
2343 ha->serial1 = icb->port_name[6];
2344 ha->serial2 = icb->port_name[7];
2345 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
2346 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
2348 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
2350 ha->retry_count = nv->retry_count;
2352 /* Set minimum login_timeout to 4 seconds. */
2353 if (nv->login_timeout != ql2xlogintimeout)
2354 nv->login_timeout = ql2xlogintimeout;
2355 if (nv->login_timeout < 4)
2356 nv->login_timeout = 4;
2357 ha->login_timeout = nv->login_timeout;
2358 icb->login_timeout = nv->login_timeout;
2360 /* Set minimum RATOV to 100 tenths of a second. */
2363 ha->loop_reset_delay = nv->reset_delay;
2365 /* Link Down Timeout = 0:
2367 * When Port Down timer expires we will start returning
2368 * I/O's to OS with "DID_NO_CONNECT".
2370 * Link Down Timeout != 0:
2372 * The driver waits for the link to come up after link down
2373 * before returning I/Os to OS with "DID_NO_CONNECT".
2375 if (nv->link_down_timeout == 0) {
2376 ha->loop_down_abort_time =
2377 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
2379 ha->link_down_timeout = nv->link_down_timeout;
2380 ha->loop_down_abort_time =
2381 (LOOP_DOWN_TIME - ha->link_down_timeout);
2385 * Need enough time to try and get the port back.
2387 ha->port_down_retry_count = nv->port_down_retry_count;
2388 if (qlport_down_retry)
2389 ha->port_down_retry_count = qlport_down_retry;
2390 /* Set login_retry_count */
2391 ha->login_retry_count = nv->retry_count;
2392 if (ha->port_down_retry_count == nv->port_down_retry_count &&
2393 ha->port_down_retry_count > 3)
2394 ha->login_retry_count = ha->port_down_retry_count;
2395 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
2396 ha->login_retry_count = ha->port_down_retry_count;
2397 if (ql2xloginretrycount)
2398 ha->login_retry_count = ql2xloginretrycount;
2400 icb->lun_enables = __constant_cpu_to_le16(0);
2401 icb->command_resource_count = 0;
2402 icb->immediate_notify_resource_count = 0;
2403 icb->timeout = __constant_cpu_to_le16(0);
2405 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
2407 icb->firmware_options[0] &= ~BIT_3;
2408 icb->add_firmware_options[0] &=
2409 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
2410 icb->add_firmware_options[0] |= BIT_2;
2411 icb->response_accumulation_timer = 3;
2412 icb->interrupt_delay_timer = 5;
2414 vha->flags.process_response_queue = 1;
2417 if (!vha->flags.init_done) {
2418 ha->zio_mode = icb->add_firmware_options[0] &
2419 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
2420 ha->zio_timer = icb->interrupt_delay_timer ?
2421 icb->interrupt_delay_timer: 2;
2423 icb->add_firmware_options[0] &=
2424 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
2425 vha->flags.process_response_queue = 0;
2426 if (ha->zio_mode != QLA_ZIO_DISABLED) {
2427 ha->zio_mode = QLA_ZIO_MODE_6;
2429 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer "
2430 "delay (%d us).\n", vha->host_no, ha->zio_mode,
2431 ha->zio_timer * 100));
2432 qla_printk(KERN_INFO, ha,
2433 "ZIO mode %d enabled; timer delay (%d us).\n",
2434 ha->zio_mode, ha->zio_timer * 100);
2436 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
2437 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
2438 vha->flags.process_response_queue = 1;
2443 DEBUG2_3(printk(KERN_WARNING
2444 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
2450 qla2x00_rport_del(void *data)
2452 fc_port_t *fcport = data;
2453 struct fc_rport *rport;
2455 spin_lock_irq(fcport->vha->host->host_lock);
2456 rport = fcport->drport ? fcport->drport: fcport->rport;
2457 fcport->drport = NULL;
2458 spin_unlock_irq(fcport->vha->host->host_lock);
2460 fc_remote_port_delete(rport);
2464 * qla2x00_alloc_fcport() - Allocate a generic fcport.
2466 * @flags: allocation flags
2468 * Returns a pointer to the allocated fcport, or NULL, if none available.
2471 qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
2475 fcport = kzalloc(sizeof(fc_port_t), flags);
2479 /* Setup fcport template structure. */
2481 fcport->vp_idx = vha->vp_idx;
2482 fcport->port_type = FCT_UNKNOWN;
2483 fcport->loop_id = FC_NO_LOOP_ID;
2484 atomic_set(&fcport->state, FCS_UNCONFIGURED);
2485 fcport->supported_classes = FC_COS_UNSPECIFIED;
2491 * qla2x00_configure_loop
2492 * Updates Fibre Channel Device Database with what is actually on loop.
2495 * ha = adapter block pointer.
2500 * 2 = database was full and device was not configured.
2503 qla2x00_configure_loop(scsi_qla_host_t *vha)
2506 unsigned long flags, save_flags;
2507 struct qla_hw_data *ha = vha->hw;
2510 /* Get Initiator ID */
2511 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
2512 rval = qla2x00_configure_hba(vha);
2513 if (rval != QLA_SUCCESS) {
2514 DEBUG(printk("scsi(%ld): Unable to configure HBA.\n",
2520 save_flags = flags = vha->dpc_flags;
2521 DEBUG(printk("scsi(%ld): Configure loop -- dpc flags =0x%lx\n",
2522 vha->host_no, flags));
2525 * If we have both an RSCN and PORT UPDATE pending then handle them
2526 * both at the same time.
2528 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2529 clear_bit(RSCN_UPDATE, &vha->dpc_flags);
2531 qla2x00_get_data_rate(vha);
2533 /* Determine what we need to do */
2534 if (ha->current_topology == ISP_CFG_FL &&
2535 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
2537 vha->flags.rscn_queue_overflow = 1;
2538 set_bit(RSCN_UPDATE, &flags);
2540 } else if (ha->current_topology == ISP_CFG_F &&
2541 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
2543 vha->flags.rscn_queue_overflow = 1;
2544 set_bit(RSCN_UPDATE, &flags);
2545 clear_bit(LOCAL_LOOP_UPDATE, &flags);
2547 } else if (ha->current_topology == ISP_CFG_N) {
2548 clear_bit(RSCN_UPDATE, &flags);
2550 } else if (!vha->flags.online ||
2551 (test_bit(ABORT_ISP_ACTIVE, &flags))) {
2553 vha->flags.rscn_queue_overflow = 1;
2554 set_bit(RSCN_UPDATE, &flags);
2555 set_bit(LOCAL_LOOP_UPDATE, &flags);
2558 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
2559 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2560 rval = QLA_FUNCTION_FAILED;
2562 rval = qla2x00_configure_local_loop(vha);
2565 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
2566 if (LOOP_TRANSITION(vha))
2567 rval = QLA_FUNCTION_FAILED;
2569 rval = qla2x00_configure_fabric(vha);
2572 if (rval == QLA_SUCCESS) {
2573 if (atomic_read(&vha->loop_down_timer) ||
2574 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
2575 rval = QLA_FUNCTION_FAILED;
2577 atomic_set(&vha->loop_state, LOOP_READY);
2579 DEBUG(printk("scsi(%ld): LOOP READY\n", vha->host_no));
2584 DEBUG2_3(printk("%s(%ld): *** FAILED ***\n",
2585 __func__, vha->host_no));
2587 DEBUG3(printk("%s: exiting normally\n", __func__));
2590 /* Restore state if a resync event occurred during processing */
2591 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
2592 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
2593 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2594 if (test_bit(RSCN_UPDATE, &save_flags)) {
2595 set_bit(RSCN_UPDATE, &vha->dpc_flags);
2596 if (!IS_ALOGIO_CAPABLE(ha))
2597 vha->flags.rscn_queue_overflow = 1;
2607 * qla2x00_configure_local_loop
2608 * Updates Fibre Channel Device Database with local loop devices.
2611 * ha = adapter block pointer.
2617 qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2622 fc_port_t *fcport, *new_fcport;
2628 uint8_t domain, area, al_pa;
2629 struct qla_hw_data *ha = vha->hw;
2633 entries = MAX_FIBRE_DEVICES;
2635 DEBUG3(printk("scsi(%ld): Getting FCAL position map\n", vha->host_no));
2636 DEBUG3(qla2x00_get_fcal_position_map(vha, NULL));
2638 /* Get list of logged in devices. */
2639 memset(ha->gid_list, 0, GID_LIST_SIZE);
2640 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
2642 if (rval != QLA_SUCCESS)
2643 goto cleanup_allocation;
2645 DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n",
2646 vha->host_no, entries));
2647 DEBUG3(qla2x00_dump_buffer((uint8_t *)ha->gid_list,
2648 entries * sizeof(struct gid_list_info)));
2650 /* Allocate temporary fcport for any new fcports discovered. */
2651 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2652 if (new_fcport == NULL) {
2653 rval = QLA_MEMORY_ALLOC_FAILED;
2654 goto cleanup_allocation;
2656 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
2659 * Mark local devices that were present with FCF_DEVICE_LOST for now.
2661 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2662 if (atomic_read(&fcport->state) == FCS_ONLINE &&
2663 fcport->port_type != FCT_BROADCAST &&
2664 (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
2666 DEBUG(printk("scsi(%ld): Marking port lost, "
2668 vha->host_no, fcport->loop_id));
2670 atomic_set(&fcport->state, FCS_DEVICE_LOST);
2674 /* Add devices to port list. */
2675 id_iter = (char *)ha->gid_list;
2676 for (index = 0; index < entries; index++) {
2677 domain = ((struct gid_list_info *)id_iter)->domain;
2678 area = ((struct gid_list_info *)id_iter)->area;
2679 al_pa = ((struct gid_list_info *)id_iter)->al_pa;
2680 if (IS_QLA2100(ha) || IS_QLA2200(ha))
2681 loop_id = (uint16_t)
2682 ((struct gid_list_info *)id_iter)->loop_id_2100;
2684 loop_id = le16_to_cpu(
2685 ((struct gid_list_info *)id_iter)->loop_id);
2686 id_iter += ha->gid_list_info_size;
2688 /* Bypass reserved domain fields. */
2689 if ((domain & 0xf0) == 0xf0)
2692 /* Bypass if not same domain and area of adapter. */
2693 if (area && domain &&
2694 (area != vha->d_id.b.area || domain != vha->d_id.b.domain))
2697 /* Bypass invalid local loop ID. */
2698 if (loop_id > LAST_LOCAL_LOOP_ID)
2701 /* Fill in member data. */
2702 new_fcport->d_id.b.domain = domain;
2703 new_fcport->d_id.b.area = area;
2704 new_fcport->d_id.b.al_pa = al_pa;
2705 new_fcport->loop_id = loop_id;
2706 new_fcport->vp_idx = vha->vp_idx;
2707 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
2708 if (rval2 != QLA_SUCCESS) {
2709 DEBUG2(printk("scsi(%ld): Failed to retrieve fcport "
2710 "information -- get_port_database=%x, "
2712 vha->host_no, rval2, new_fcport->loop_id));
2713 DEBUG2(printk("scsi(%ld): Scheduling resync...\n",
2715 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
2719 /* Check for matching device in port list. */
2722 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2723 if (memcmp(new_fcport->port_name, fcport->port_name,
2727 fcport->flags &= ~FCF_FABRIC_DEVICE;
2728 fcport->loop_id = new_fcport->loop_id;
2729 fcport->port_type = new_fcport->port_type;
2730 fcport->d_id.b24 = new_fcport->d_id.b24;
2731 memcpy(fcport->node_name, new_fcport->node_name,
2739 /* New device, add to fcports list. */
2741 new_fcport->vha = vha;
2742 new_fcport->vp_idx = vha->vp_idx;
2744 list_add_tail(&new_fcport->list, &vha->vp_fcports);
2746 /* Allocate a new replacement fcport. */
2747 fcport = new_fcport;
2748 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2749 if (new_fcport == NULL) {
2750 rval = QLA_MEMORY_ALLOC_FAILED;
2751 goto cleanup_allocation;
2753 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
2756 /* Base iIDMA settings on HBA port speed. */
2757 fcport->fp_speed = ha->link_data_rate;
2759 qla2x00_update_fcport(vha, fcport);
2767 if (rval != QLA_SUCCESS) {
2768 DEBUG2(printk("scsi(%ld): Configure local loop error exit: "
2769 "rval=%x\n", vha->host_no, rval));
2776 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2778 #define LS_UNKNOWN 2
2779 static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
2783 struct qla_hw_data *ha = vha->hw;
2785 if (!IS_IIDMA_CAPABLE(ha))
2788 if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
2789 fcport->fp_speed > ha->link_data_rate)
2792 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
2794 if (rval != QLA_SUCCESS) {
2795 DEBUG2(printk("scsi(%ld): Unable to adjust iIDMA "
2796 "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x %04x.\n",
2797 vha->host_no, fcport->port_name[0], fcport->port_name[1],
2798 fcport->port_name[2], fcport->port_name[3],
2799 fcport->port_name[4], fcport->port_name[5],
2800 fcport->port_name[6], fcport->port_name[7], rval,
2801 fcport->fp_speed, mb[0], mb[1]));
2803 link_speed = link_speeds[LS_UNKNOWN];
2804 if (fcport->fp_speed < 5)
2805 link_speed = link_speeds[fcport->fp_speed];
2806 else if (fcport->fp_speed == 0x13)
2807 link_speed = link_speeds[5];
2808 DEBUG2(qla_printk(KERN_INFO, ha,
2809 "iIDMA adjusted to %s GB/s on "
2810 "%02x%02x%02x%02x%02x%02x%02x%02x.\n",
2811 link_speed, fcport->port_name[0],
2812 fcport->port_name[1], fcport->port_name[2],
2813 fcport->port_name[3], fcport->port_name[4],
2814 fcport->port_name[5], fcport->port_name[6],
2815 fcport->port_name[7]));
2820 qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
2822 struct fc_rport_identifiers rport_ids;
2823 struct fc_rport *rport;
2824 struct qla_hw_data *ha = vha->hw;
2826 qla2x00_rport_del(fcport);
2828 rport_ids.node_name = wwn_to_u64(fcport->node_name);
2829 rport_ids.port_name = wwn_to_u64(fcport->port_name);
2830 rport_ids.port_id = fcport->d_id.b.domain << 16 |
2831 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
2832 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
2833 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
2835 qla_printk(KERN_WARNING, ha,
2836 "Unable to allocate fc remote port!\n");
2839 spin_lock_irq(fcport->vha->host->host_lock);
2840 *((fc_port_t **)rport->dd_data) = fcport;
2841 spin_unlock_irq(fcport->vha->host->host_lock);
2843 rport->supported_classes = fcport->supported_classes;
2845 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
2846 if (fcport->port_type == FCT_INITIATOR)
2847 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
2848 if (fcport->port_type == FCT_TARGET)
2849 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
2850 fc_remote_port_rolechg(rport, rport_ids.roles);
2854 * qla2x00_update_fcport
2855 * Updates device on list.
2858 * ha = adapter block pointer.
2859 * fcport = port structure pointer.
2869 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2871 struct qla_hw_data *ha = vha->hw;
2874 fcport->login_retry = 0;
2875 fcport->port_login_retry_count = ha->port_down_retry_count *
2877 atomic_set(&fcport->port_down_timer, ha->port_down_retry_count *
2879 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
2881 qla2x00_iidma_fcport(vha, fcport);
2883 atomic_set(&fcport->state, FCS_ONLINE);
2885 qla2x00_reg_remote_port(vha, fcport);
2889 * qla2x00_configure_fabric
2890 * Setup SNS devices with loop ID's.
2893 * ha = adapter block pointer.
2900 qla2x00_configure_fabric(scsi_qla_host_t *vha)
2903 fc_port_t *fcport, *fcptemp;
2904 uint16_t next_loopid;
2905 uint16_t mb[MAILBOX_REGISTER_COUNT];
2907 LIST_HEAD(new_fcports);
2908 struct qla_hw_data *ha = vha->hw;
2909 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2911 /* If FL port exists, then SNS is present */
2912 if (IS_FWI2_CAPABLE(ha))
2913 loop_id = NPH_F_PORT;
2915 loop_id = SNS_FL_PORT;
2916 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
2917 if (rval != QLA_SUCCESS) {
2918 DEBUG2(printk("scsi(%ld): MBC_GET_PORT_NAME Failed, No FL "
2919 "Port\n", vha->host_no));
2921 vha->device_flags &= ~SWITCH_FOUND;
2922 return (QLA_SUCCESS);
2924 vha->device_flags |= SWITCH_FOUND;
2926 /* Mark devices that need re-synchronization. */
2927 rval2 = qla2x00_device_resync(vha);
2928 if (rval2 == QLA_RSCNS_HANDLED) {
2929 /* No point doing the scan, just continue. */
2930 return (QLA_SUCCESS);
2934 if (ql2xfdmienable &&
2935 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
2936 qla2x00_fdmi_register(vha);
2938 /* Ensure we are logged into the SNS. */
2939 if (IS_FWI2_CAPABLE(ha))
2942 loop_id = SIMPLE_NAME_SERVER;
2943 ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
2944 0xfc, mb, BIT_1 | BIT_0);
2945 if (mb[0] != MBS_COMMAND_COMPLETE) {
2946 DEBUG2(qla_printk(KERN_INFO, ha,
2947 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
2948 "mb[2]=%x mb[6]=%x mb[7]=%x\n", loop_id,
2949 mb[0], mb[1], mb[2], mb[6], mb[7]));
2950 return (QLA_SUCCESS);
2953 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
2954 if (qla2x00_rft_id(vha)) {
2956 DEBUG2(printk("scsi(%ld): Register FC-4 "
2957 "TYPE failed.\n", vha->host_no));
2959 if (qla2x00_rff_id(vha)) {
2961 DEBUG2(printk("scsi(%ld): Register FC-4 "
2962 "Features failed.\n", vha->host_no));
2964 if (qla2x00_rnn_id(vha)) {
2966 DEBUG2(printk("scsi(%ld): Register Node Name "
2967 "failed.\n", vha->host_no));
2968 } else if (qla2x00_rsnn_nn(vha)) {
2970 DEBUG2(printk("scsi(%ld): Register Symbolic "
2971 "Node Name failed.\n", vha->host_no));
2975 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
2976 if (rval != QLA_SUCCESS)
2980 * Logout all previous fabric devices marked lost, except
2983 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2984 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2987 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
2990 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
2991 qla2x00_mark_device_lost(vha, fcport,
2992 ql2xplogiabsentdevice, 0);
2993 if (fcport->loop_id != FC_NO_LOOP_ID &&
2994 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
2995 fcport->port_type != FCT_INITIATOR &&
2996 fcport->port_type != FCT_BROADCAST) {
2997 ha->isp_ops->fabric_logout(vha,
2999 fcport->d_id.b.domain,
3000 fcport->d_id.b.area,
3001 fcport->d_id.b.al_pa);
3002 fcport->loop_id = FC_NO_LOOP_ID;
3007 /* Starting free loop ID. */
3008 next_loopid = ha->min_external_loopid;
3011 * Scan through our port list and login entries that need to be
3014 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3015 if (atomic_read(&vha->loop_down_timer) ||
3016 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3019 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
3020 (fcport->flags & FCF_LOGIN_NEEDED) == 0)
3023 if (fcport->loop_id == FC_NO_LOOP_ID) {
3024 fcport->loop_id = next_loopid;
3025 rval = qla2x00_find_new_loop_id(
3027 if (rval != QLA_SUCCESS) {
3028 /* Ran out of IDs to use */
3032 /* Login and update database */
3033 qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
3036 /* Exit if out of loop IDs. */
3037 if (rval != QLA_SUCCESS) {
3042 * Login and add the new devices to our port list.
3044 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
3045 if (atomic_read(&vha->loop_down_timer) ||
3046 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3049 /* Find a new loop ID to use. */
3050 fcport->loop_id = next_loopid;
3051 rval = qla2x00_find_new_loop_id(base_vha, fcport);
3052 if (rval != QLA_SUCCESS) {
3053 /* Ran out of IDs to use */
3057 /* Login and update database */
3058 qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
3062 fcport->vp_idx = vha->vp_idx;
3064 list_move_tail(&fcport->list, &vha->vp_fcports);
3068 /* Free all new device structures not processed. */
3069 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
3070 list_del(&fcport->list);
3075 DEBUG2(printk("scsi(%ld): Configure fabric error exit: "
3076 "rval=%d\n", vha->host_no, rval));
3083 * qla2x00_find_all_fabric_devs
3086 * ha = adapter block pointer.
3087 * dev = database device entry pointer.
3096 qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3097 struct list_head *new_fcports)
3101 fc_port_t *fcport, *new_fcport, *fcptemp;
3106 int first_dev, last_dev;
3107 port_id_t wrap = {}, nxt_d_id;
3108 struct qla_hw_data *ha = vha->hw;
3109 struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
3110 struct scsi_qla_host *tvp;
3114 /* Try GID_PT to get device list, else GAN. */
3115 swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL);
3118 DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback "
3119 "on GA_NXT\n", vha->host_no));
3121 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
3124 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
3127 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
3130 } else if (ql2xiidmaenable &&
3131 qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) {
3132 qla2x00_gpsc(vha, swl);
3135 /* If other queries succeeded probe for FC-4 type */
3137 qla2x00_gff_id(vha, swl);
3141 /* Allocate temporary fcport for any new fcports discovered. */
3142 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
3143 if (new_fcport == NULL) {
3145 return (QLA_MEMORY_ALLOC_FAILED);
3147 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
3148 /* Set start port ID scan at adapter ID. */
3152 /* Starting free loop ID. */
3153 loop_id = ha->min_external_loopid;
3154 for (; loop_id <= ha->max_loop_id; loop_id++) {
3155 if (qla2x00_is_reserved_id(vha, loop_id))
3158 if (ha->current_topology == ISP_CFG_FL &&
3159 (atomic_read(&vha->loop_down_timer) ||
3160 LOOP_TRANSITION(vha))) {
3161 atomic_set(&vha->loop_down_timer, 0);
3162 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3163 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3169 wrap.b24 = new_fcport->d_id.b24;
3171 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24;
3172 memcpy(new_fcport->node_name,
3173 swl[swl_idx].node_name, WWN_SIZE);
3174 memcpy(new_fcport->port_name,
3175 swl[swl_idx].port_name, WWN_SIZE);
3176 memcpy(new_fcport->fabric_port_name,
3177 swl[swl_idx].fabric_port_name, WWN_SIZE);
3178 new_fcport->fp_speed = swl[swl_idx].fp_speed;
3179 new_fcport->fc4_type = swl[swl_idx].fc4_type;
3181 if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
3187 /* Send GA_NXT to the switch */
3188 rval = qla2x00_ga_nxt(vha, new_fcport);
3189 if (rval != QLA_SUCCESS) {
3190 qla_printk(KERN_WARNING, ha,
3191 "SNS scan failed -- assuming zero-entry "
3193 list_for_each_entry_safe(fcport, fcptemp,
3194 new_fcports, list) {
3195 list_del(&fcport->list);
3203 /* If wrap on switch device list, exit. */
3205 wrap.b24 = new_fcport->d_id.b24;
3207 } else if (new_fcport->d_id.b24 == wrap.b24) {
3208 DEBUG2(printk("scsi(%ld): device wrap (%02x%02x%02x)\n",
3209 vha->host_no, new_fcport->d_id.b.domain,
3210 new_fcport->d_id.b.area, new_fcport->d_id.b.al_pa));
3214 /* Bypass if same physical adapter. */
3215 if (new_fcport->d_id.b24 == base_vha->d_id.b24)
3218 /* Bypass virtual ports of the same host. */
3220 if (ha->num_vhosts) {
3221 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
3222 if (new_fcport->d_id.b24 == vp->d_id.b24) {
3231 /* Bypass if same domain and area of adapter. */
3232 if (((new_fcport->d_id.b24 & 0xffff00) ==
3233 (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
3237 /* Bypass reserved domain fields. */
3238 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
3241 /* Bypass ports whose FCP-4 type is not FCP_SCSI */
3242 if (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
3243 new_fcport->fc4_type != FC4_TYPE_UNKNOWN)
3246 /* Locate matching device in database. */
3248 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3249 if (memcmp(new_fcport->port_name, fcport->port_name,
3255 /* Update port state. */
3256 memcpy(fcport->fabric_port_name,
3257 new_fcport->fabric_port_name, WWN_SIZE);
3258 fcport->fp_speed = new_fcport->fp_speed;
3261 * If address the same and state FCS_ONLINE, nothing
3264 if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
3265 atomic_read(&fcport->state) == FCS_ONLINE) {
3270 * If device was not a fabric device before.
3272 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3273 fcport->d_id.b24 = new_fcport->d_id.b24;
3274 fcport->loop_id = FC_NO_LOOP_ID;
3275 fcport->flags |= (FCF_FABRIC_DEVICE |
3281 * Port ID changed or device was marked to be updated;
3282 * Log it out if still logged in and mark it for
3285 fcport->d_id.b24 = new_fcport->d_id.b24;
3286 fcport->flags |= FCF_LOGIN_NEEDED;
3287 if (fcport->loop_id != FC_NO_LOOP_ID &&
3288 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
3289 fcport->port_type != FCT_INITIATOR &&
3290 fcport->port_type != FCT_BROADCAST) {
3291 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3292 fcport->d_id.b.domain, fcport->d_id.b.area,
3293 fcport->d_id.b.al_pa);
3294 fcport->loop_id = FC_NO_LOOP_ID;
3302 /* If device was not in our fcports list, then add it. */
3303 list_add_tail(&new_fcport->list, new_fcports);
3305 /* Allocate a new replacement fcport. */
3306 nxt_d_id.b24 = new_fcport->d_id.b24;
3307 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
3308 if (new_fcport == NULL) {
3310 return (QLA_MEMORY_ALLOC_FAILED);
3312 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
3313 new_fcport->d_id.b24 = nxt_d_id.b24;
3323 * qla2x00_find_new_loop_id
3324 * Scan through our port list and find a new usable loop ID.
3327 * ha: adapter state pointer.
3328 * dev: port structure pointer.
3331 * qla2x00 local function return status code.
3337 qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
3342 uint16_t first_loop_id;
3343 struct qla_hw_data *ha = vha->hw;
3344 struct scsi_qla_host *vp;
3345 struct scsi_qla_host *tvp;
3349 /* Save starting loop ID. */
3350 first_loop_id = dev->loop_id;
3353 /* Skip loop ID if already used by adapter. */
3354 if (dev->loop_id == vha->loop_id)
3357 /* Skip reserved loop IDs. */
3358 while (qla2x00_is_reserved_id(vha, dev->loop_id))
3361 /* Reset loop ID if passed the end. */
3362 if (dev->loop_id > ha->max_loop_id) {
3363 /* first loop ID. */
3364 dev->loop_id = ha->min_external_loopid;
3367 /* Check for loop ID being already in use. */
3370 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
3371 list_for_each_entry(fcport, &vp->vp_fcports, list) {
3372 if (fcport->loop_id == dev->loop_id &&
3374 /* ID possibly in use */
3383 /* If not in use then it is free to use. */
3388 /* ID in use. Try next value. */
3391 /* If wrap around. No free ID to use. */
3392 if (dev->loop_id == first_loop_id) {
3393 dev->loop_id = FC_NO_LOOP_ID;
3394 rval = QLA_FUNCTION_FAILED;
3403 * qla2x00_device_resync
3404 * Marks devices in the database that needs resynchronization.
3407 * ha = adapter block pointer.
3413 qla2x00_device_resync(scsi_qla_host_t *vha)
3418 uint32_t rscn_entry;
3419 uint8_t rscn_out_iter;
3421 port_id_t d_id = {};
3423 rval = QLA_RSCNS_HANDLED;
3425 while (vha->rscn_out_ptr != vha->rscn_in_ptr ||
3426 vha->flags.rscn_queue_overflow) {
3428 rscn_entry = vha->rscn_queue[vha->rscn_out_ptr];
3429 format = MSB(MSW(rscn_entry));
3430 d_id.b.domain = LSB(MSW(rscn_entry));
3431 d_id.b.area = MSB(LSW(rscn_entry));
3432 d_id.b.al_pa = LSB(LSW(rscn_entry));
3434 DEBUG(printk("scsi(%ld): RSCN queue entry[%d] = "
3435 "[%02x/%02x%02x%02x].\n",
3436 vha->host_no, vha->rscn_out_ptr, format, d_id.b.domain,
3437 d_id.b.area, d_id.b.al_pa));
3439 vha->rscn_out_ptr++;
3440 if (vha->rscn_out_ptr == MAX_RSCN_COUNT)
3441 vha->rscn_out_ptr = 0;
3443 /* Skip duplicate entries. */
3444 for (rscn_out_iter = vha->rscn_out_ptr;
3445 !vha->flags.rscn_queue_overflow &&
3446 rscn_out_iter != vha->rscn_in_ptr;
3447 rscn_out_iter = (rscn_out_iter ==
3448 (MAX_RSCN_COUNT - 1)) ? 0: rscn_out_iter + 1) {
3450 if (rscn_entry != vha->rscn_queue[rscn_out_iter])
3453 DEBUG(printk("scsi(%ld): Skipping duplicate RSCN queue "
3454 "entry found at [%d].\n", vha->host_no,
3457 vha->rscn_out_ptr = rscn_out_iter;
3460 /* Queue overflow, set switch default case. */
3461 if (vha->flags.rscn_queue_overflow) {
3462 DEBUG(printk("scsi(%ld): device_resync: rscn "
3463 "overflow.\n", vha->host_no));
3466 vha->flags.rscn_queue_overflow = 0;
3482 vha->rscn_out_ptr = vha->rscn_in_ptr;
3488 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3489 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
3490 (fcport->d_id.b24 & mask) != d_id.b24 ||
3491 fcport->port_type == FCT_BROADCAST)
3494 if (atomic_read(&fcport->state) == FCS_ONLINE) {
3496 fcport->port_type != FCT_INITIATOR) {
3497 qla2x00_mark_device_lost(vha, fcport,
3507 * qla2x00_fabric_dev_login
3508 * Login fabric target device and update FC port database.
3511 * ha: adapter state pointer.
3512 * fcport: port structure list pointer.
3513 * next_loopid: contains value of a new loop ID that can be used
3514 * by the next login attempt.
3517 * qla2x00 local function return status code.
3523 qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3524 uint16_t *next_loopid)
3529 struct qla_hw_data *ha = vha->hw;
3534 if (IS_ALOGIO_CAPABLE(ha)) {
3535 if (fcport->flags & FCF_ASYNC_SENT)
3537 fcport->flags |= FCF_ASYNC_SENT;
3538 rval = qla2x00_post_async_login_work(vha, fcport, NULL);
3543 fcport->flags &= ~FCF_ASYNC_SENT;
3544 rval = qla2x00_fabric_login(vha, fcport, next_loopid);
3545 if (rval == QLA_SUCCESS) {
3546 /* Send an ADISC to FCP2 devices.*/
3548 if (fcport->flags & FCF_FCP2_DEVICE)
3550 rval = qla2x00_get_port_database(vha, fcport, opts);
3551 if (rval != QLA_SUCCESS) {
3552 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3553 fcport->d_id.b.domain, fcport->d_id.b.area,
3554 fcport->d_id.b.al_pa);
3555 qla2x00_mark_device_lost(vha, fcport, 1, 0);
3557 qla2x00_update_fcport(vha, fcport);
3565 * qla2x00_fabric_login
3566 * Issue fabric login command.
3569 * ha = adapter block pointer.
3570 * device = pointer to FC device type structure.
3573 * 0 - Login successfully
3575 * 2 - Initiator device
3579 qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3580 uint16_t *next_loopid)
3584 uint16_t tmp_loopid;
3585 uint16_t mb[MAILBOX_REGISTER_COUNT];
3586 struct qla_hw_data *ha = vha->hw;
3592 DEBUG(printk("scsi(%ld): Trying Fabric Login w/loop id 0x%04x "
3593 "for port %02x%02x%02x.\n",
3594 vha->host_no, fcport->loop_id, fcport->d_id.b.domain,
3595 fcport->d_id.b.area, fcport->d_id.b.al_pa));
3597 /* Login fcport on switch. */
3598 ha->isp_ops->fabric_login(vha, fcport->loop_id,
3599 fcport->d_id.b.domain, fcport->d_id.b.area,
3600 fcport->d_id.b.al_pa, mb, BIT_0);
3601 if (mb[0] == MBS_PORT_ID_USED) {
3603 * Device has another loop ID. The firmware team
3604 * recommends the driver perform an implicit login with
3605 * the specified ID again. The ID we just used is save
3606 * here so we return with an ID that can be tried by
3610 tmp_loopid = fcport->loop_id;
3611 fcport->loop_id = mb[1];
3613 DEBUG(printk("Fabric Login: port in use - next "
3614 "loop id=0x%04x, port Id=%02x%02x%02x.\n",
3615 fcport->loop_id, fcport->d_id.b.domain,
3616 fcport->d_id.b.area, fcport->d_id.b.al_pa));
3618 } else if (mb[0] == MBS_COMMAND_COMPLETE) {
3623 /* A retry occurred before. */
3624 *next_loopid = tmp_loopid;
3627 * No retry occurred before. Just increment the
3628 * ID value for next login.
3630 *next_loopid = (fcport->loop_id + 1);
3633 if (mb[1] & BIT_0) {
3634 fcport->port_type = FCT_INITIATOR;
3636 fcport->port_type = FCT_TARGET;
3637 if (mb[1] & BIT_1) {
3638 fcport->flags |= FCF_FCP2_DEVICE;
3643 fcport->supported_classes |= FC_COS_CLASS2;
3645 fcport->supported_classes |= FC_COS_CLASS3;
3649 } else if (mb[0] == MBS_LOOP_ID_USED) {
3651 * Loop ID already used, try next loop ID.
3654 rval = qla2x00_find_new_loop_id(vha, fcport);
3655 if (rval != QLA_SUCCESS) {
3656 /* Ran out of loop IDs to use */
3659 } else if (mb[0] == MBS_COMMAND_ERROR) {
3661 * Firmware possibly timed out during login. If NO
3662 * retries are left to do then the device is declared
3665 *next_loopid = fcport->loop_id;
3666 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3667 fcport->d_id.b.domain, fcport->d_id.b.area,
3668 fcport->d_id.b.al_pa);
3669 qla2x00_mark_device_lost(vha, fcport, 1, 0);
3675 * unrecoverable / not handled error
3677 DEBUG2(printk("%s(%ld): failed=%x port_id=%02x%02x%02x "
3678 "loop_id=%x jiffies=%lx.\n",
3679 __func__, vha->host_no, mb[0],
3680 fcport->d_id.b.domain, fcport->d_id.b.area,
3681 fcport->d_id.b.al_pa, fcport->loop_id, jiffies));
3683 *next_loopid = fcport->loop_id;
3684 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3685 fcport->d_id.b.domain, fcport->d_id.b.area,
3686 fcport->d_id.b.al_pa);
3687 fcport->loop_id = FC_NO_LOOP_ID;
3688 fcport->login_retry = 0;
3699 * qla2x00_local_device_login
3700 * Issue local device login command.
3703 * ha = adapter block pointer.
3704 * loop_id = loop id of device to login to.
3706 * Returns (Where's the #define!!!!):
3707 * 0 - Login successfully
3712 qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
3715 uint16_t mb[MAILBOX_REGISTER_COUNT];
3717 memset(mb, 0, sizeof(mb));
3718 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
3719 if (rval == QLA_SUCCESS) {
3720 /* Interrogate mailbox registers for any errors */
3721 if (mb[0] == MBS_COMMAND_ERROR)
3723 else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR)
3724 /* device not in PCB table */
3732 * qla2x00_loop_resync
3733 * Resync with fibre channel devices.
3736 * ha = adapter block pointer.
3742 qla2x00_loop_resync(scsi_qla_host_t *vha)
3744 int rval = QLA_SUCCESS;
3746 struct req_que *req;
3747 struct rsp_que *rsp;
3749 if (vha->hw->flags.cpu_affinity_enabled)
3750 req = vha->hw->req_q_map[0];
3755 atomic_set(&vha->loop_state, LOOP_UPDATE);
3756 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3757 if (vha->flags.online) {
3758 if (!(rval = qla2x00_fw_ready(vha))) {
3759 /* Wait at most MAX_TARGET RSCNs for a stable link. */
3762 atomic_set(&vha->loop_state, LOOP_UPDATE);
3764 /* Issue a marker after FW becomes ready. */
3765 qla2x00_marker(vha, req, rsp, 0, 0,
3767 vha->marker_needed = 0;
3769 /* Remap devices on Loop. */
3770 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3772 qla2x00_configure_loop(vha);
3774 } while (!atomic_read(&vha->loop_down_timer) &&
3775 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
3776 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
3781 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
3782 return (QLA_FUNCTION_FAILED);
3785 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
3791 qla2x00_update_fcports(scsi_qla_host_t *base_vha)
3794 struct scsi_qla_host *tvp, *vha;
3796 /* Go with deferred removal of rport references. */
3797 list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list)
3798 list_for_each_entry(fcport, &vha->vp_fcports, list)
3799 if (fcport && fcport->drport &&
3800 atomic_read(&fcport->state) != FCS_UNCONFIGURED)
3801 qla2x00_rport_del(fcport);
3805 qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
3807 struct qla_hw_data *ha = vha->hw;
3808 struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
3809 struct scsi_qla_host *tvp;
3811 vha->flags.online = 0;
3812 ha->flags.chip_reset_done = 0;
3813 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3814 ha->qla_stats.total_isp_aborts++;
3816 qla_printk(KERN_INFO, ha,
3817 "Performing ISP error recovery - ha= %p.\n", ha);
3819 /* Chip reset does not apply to 82XX */
3820 if (!IS_QLA82XX(ha))
3821 ha->isp_ops->reset_chip(vha);
3823 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
3824 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
3825 atomic_set(&vha->loop_state, LOOP_DOWN);
3826 qla2x00_mark_all_devices_lost(vha, 0);
3827 list_for_each_entry_safe(vp, tvp, &base_vha->hw->vp_list, list)
3828 qla2x00_mark_all_devices_lost(vp, 0);
3830 if (!atomic_read(&vha->loop_down_timer))
3831 atomic_set(&vha->loop_down_timer,
3835 /* Make sure for ISP 82XX IO DMA is complete */
3836 if (IS_QLA82XX(ha)) {
3837 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
3838 WAIT_HOST) == QLA_SUCCESS) {
3839 DEBUG2(qla_printk(KERN_INFO, ha,
3840 "Done wait for pending commands\n"));
3844 /* Requeue all commands in outstanding command list. */
3845 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
3850 * Resets ISP and aborts all outstanding commands.
3853 * ha = adapter block pointer.
3859 qla2x00_abort_isp(scsi_qla_host_t *vha)
3863 struct qla_hw_data *ha = vha->hw;
3864 struct scsi_qla_host *vp;
3865 struct scsi_qla_host *tvp;
3866 struct req_que *req = ha->req_q_map[0];
3868 if (vha->flags.online) {
3869 qla2x00_abort_isp_cleanup(vha);
3871 if (unlikely(pci_channel_offline(ha->pdev) &&
3872 ha->flags.pci_channel_io_perm_failure)) {
3873 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3878 ha->isp_ops->get_flash_version(vha, req->ring);
3880 ha->isp_ops->nvram_config(vha);
3882 if (!qla2x00_restart_isp(vha)) {
3883 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
3885 if (!atomic_read(&vha->loop_down_timer)) {
3887 * Issue marker command only when we are going
3888 * to start the I/O .
3890 vha->marker_needed = 1;
3893 vha->flags.online = 1;
3895 ha->isp_ops->enable_intrs(ha);
3897 ha->isp_abort_cnt = 0;
3898 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3901 qla2x00_get_fw_version(vha,
3902 &ha->fw_major_version,
3903 &ha->fw_minor_version,
3904 &ha->fw_subminor_version,
3905 &ha->fw_attributes, &ha->fw_memory_size,
3906 ha->mpi_version, &ha->mpi_capabilities,
3910 ha->flags.fce_enabled = 1;
3912 fce_calc_size(ha->fce_bufs));
3913 rval = qla2x00_enable_fce_trace(vha,
3914 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
3917 qla_printk(KERN_WARNING, ha,
3918 "Unable to reinitialize FCE "
3920 ha->flags.fce_enabled = 0;
3925 memset(ha->eft, 0, EFT_SIZE);
3926 rval = qla2x00_enable_eft_trace(vha,
3927 ha->eft_dma, EFT_NUM_BUFFERS);
3929 qla_printk(KERN_WARNING, ha,
3930 "Unable to reinitialize EFT "
3934 } else { /* failed the ISP abort */
3935 vha->flags.online = 1;
3936 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
3937 if (ha->isp_abort_cnt == 0) {
3938 qla_printk(KERN_WARNING, ha,
3939 "ISP error recovery failed - "
3940 "board disabled\n");
3942 * The next call disables the board
3945 ha->isp_ops->reset_adapter(vha);
3946 vha->flags.online = 0;
3947 clear_bit(ISP_ABORT_RETRY,
3950 } else { /* schedule another ISP abort */
3951 ha->isp_abort_cnt--;
3952 DEBUG(printk("qla%ld: ISP abort - "
3953 "retry remaining %d\n",
3954 vha->host_no, ha->isp_abort_cnt));
3958 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
3959 DEBUG(printk("qla2x00(%ld): ISP error recovery "
3960 "- retrying (%d) more times\n",
3961 vha->host_no, ha->isp_abort_cnt));
3962 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3970 DEBUG(printk(KERN_INFO
3971 "qla2x00_abort_isp(%ld): succeeded.\n",
3973 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
3975 qla2x00_vp_abort_isp(vp);
3978 qla_printk(KERN_INFO, ha,
3979 "qla2x00_abort_isp: **** FAILED ****\n");
3986 * qla2x00_restart_isp
3987 * restarts the ISP after a reset
3990 * ha = adapter block pointer.
3996 qla2x00_restart_isp(scsi_qla_host_t *vha)
4000 struct qla_hw_data *ha = vha->hw;
4001 struct req_que *req = ha->req_q_map[0];
4002 struct rsp_que *rsp = ha->rsp_q_map[0];
4004 /* If firmware needs to be loaded */
4005 if (qla2x00_isp_firmware(vha)) {
4006 vha->flags.online = 0;
4007 status = ha->isp_ops->chip_diag(vha);
4009 status = qla2x00_setup_chip(vha);
4012 if (!status && !(status = qla2x00_init_rings(vha))) {
4013 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
4014 ha->flags.chip_reset_done = 1;
4015 /* Initialize the queues in use */
4016 qla25xx_init_queues(ha);
4018 status = qla2x00_fw_ready(vha);
4020 DEBUG(printk("%s(): Start configure loop, "
4021 "status = %d\n", __func__, status));
4023 /* Issue a marker after FW becomes ready. */
4024 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
4026 vha->flags.online = 1;
4027 /* Wait at most MAX_TARGET RSCNs for a stable link. */
4030 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4031 qla2x00_configure_loop(vha);
4033 } while (!atomic_read(&vha->loop_down_timer) &&
4034 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
4035 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
4039 /* if no cable then assume it's good */
4040 if ((vha->device_flags & DFLG_NO_CABLE))
4043 DEBUG(printk("%s(): Configure loop done, status = 0x%x\n",
4051 qla25xx_init_queues(struct qla_hw_data *ha)
4053 struct rsp_que *rsp = NULL;
4054 struct req_que *req = NULL;
4055 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4059 for (i = 1; i < ha->max_rsp_queues; i++) {
4060 rsp = ha->rsp_q_map[i];
4062 rsp->options &= ~BIT_0;
4063 ret = qla25xx_init_rsp_que(base_vha, rsp);
4064 if (ret != QLA_SUCCESS)
4065 DEBUG2_17(printk(KERN_WARNING
4066 "%s Rsp que:%d init failed\n", __func__,
4069 DEBUG2_17(printk(KERN_INFO
4070 "%s Rsp que:%d inited\n", __func__,
4074 for (i = 1; i < ha->max_req_queues; i++) {
4075 req = ha->req_q_map[i];
4077 /* Clear outstanding commands array. */
4078 req->options &= ~BIT_0;
4079 ret = qla25xx_init_req_que(base_vha, req);
4080 if (ret != QLA_SUCCESS)
4081 DEBUG2_17(printk(KERN_WARNING
4082 "%s Req que:%d init failed\n", __func__,
4085 DEBUG2_17(printk(KERN_WARNING
4086 "%s Req que:%d inited\n", __func__,
4094 * qla2x00_reset_adapter
4098 * ha = adapter block pointer.
4101 qla2x00_reset_adapter(scsi_qla_host_t *vha)
4103 unsigned long flags = 0;
4104 struct qla_hw_data *ha = vha->hw;
4105 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
4107 vha->flags.online = 0;
4108 ha->isp_ops->disable_intrs(ha);
4110 spin_lock_irqsave(&ha->hardware_lock, flags);
4111 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
4112 RD_REG_WORD(®->hccr); /* PCI Posting. */
4113 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC);
4114 RD_REG_WORD(®->hccr); /* PCI Posting. */
4115 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4119 qla24xx_reset_adapter(scsi_qla_host_t *vha)
4121 unsigned long flags = 0;
4122 struct qla_hw_data *ha = vha->hw;
4123 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
4128 vha->flags.online = 0;
4129 ha->isp_ops->disable_intrs(ha);
4131 spin_lock_irqsave(&ha->hardware_lock, flags);
4132 WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET);
4133 RD_REG_DWORD(®->hccr);
4134 WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE);
4135 RD_REG_DWORD(®->hccr);
4136 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4138 if (IS_NOPOLLING_TYPE(ha))
4139 ha->isp_ops->enable_intrs(ha);
4142 /* On sparc systems, obtain port and node WWN from firmware
4145 static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
4146 struct nvram_24xx *nv)
4149 struct qla_hw_data *ha = vha->hw;
4150 struct pci_dev *pdev = ha->pdev;
4151 struct device_node *dp = pci_device_to_OF_node(pdev);
4155 val = of_get_property(dp, "port-wwn", &len);
4156 if (val && len >= WWN_SIZE)
4157 memcpy(nv->port_name, val, WWN_SIZE);
4159 val = of_get_property(dp, "node-wwn", &len);
4160 if (val && len >= WWN_SIZE)
4161 memcpy(nv->node_name, val, WWN_SIZE);
4166 qla24xx_nvram_config(scsi_qla_host_t *vha)
4169 struct init_cb_24xx *icb;
4170 struct nvram_24xx *nv;
4172 uint8_t *dptr1, *dptr2;
4175 struct qla_hw_data *ha = vha->hw;
4178 icb = (struct init_cb_24xx *)ha->init_cb;
4181 /* Determine NVRAM starting address. */
4182 if (ha->flags.port0) {
4183 ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
4184 ha->vpd_base = FA_NVRAM_VPD0_ADDR;
4186 ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
4187 ha->vpd_base = FA_NVRAM_VPD1_ADDR;
4189 ha->nvram_size = sizeof(struct nvram_24xx);
4190 ha->vpd_size = FA_NVRAM_VPD_SIZE;
4192 ha->vpd_size = FA_VPD_SIZE_82XX;
4194 /* Get VPD data into cache */
4195 ha->vpd = ha->nvram + VPD_OFFSET;
4196 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
4197 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
4199 /* Get NVRAM data into cache and calculate checksum. */
4200 dptr = (uint32_t *)nv;
4201 ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
4203 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
4204 chksum += le32_to_cpu(*dptr++);
4206 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
4207 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
4209 /* Bad NVRAM data, set defaults parameters. */
4210 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
4211 || nv->id[3] != ' ' ||
4212 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
4213 /* Reset NVRAM data. */
4214 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
4215 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
4216 le16_to_cpu(nv->nvram_version));
4217 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
4218 "invalid -- WWPN) defaults.\n");
4221 * Set default initialization control block.
4223 memset(nv, 0, ha->nvram_size);
4224 nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
4225 nv->version = __constant_cpu_to_le16(ICB_VERSION);
4226 nv->frame_payload_size = __constant_cpu_to_le16(2048);
4227 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
4228 nv->exchange_count = __constant_cpu_to_le16(0);
4229 nv->hard_address = __constant_cpu_to_le16(124);
4230 nv->port_name[0] = 0x21;
4231 nv->port_name[1] = 0x00 + ha->port_no;
4232 nv->port_name[2] = 0x00;
4233 nv->port_name[3] = 0xe0;
4234 nv->port_name[4] = 0x8b;
4235 nv->port_name[5] = 0x1c;
4236 nv->port_name[6] = 0x55;
4237 nv->port_name[7] = 0x86;
4238 nv->node_name[0] = 0x20;
4239 nv->node_name[1] = 0x00;
4240 nv->node_name[2] = 0x00;
4241 nv->node_name[3] = 0xe0;
4242 nv->node_name[4] = 0x8b;
4243 nv->node_name[5] = 0x1c;
4244 nv->node_name[6] = 0x55;
4245 nv->node_name[7] = 0x86;
4246 qla24xx_nvram_wwn_from_ofw(vha, nv);
4247 nv->login_retry_count = __constant_cpu_to_le16(8);
4248 nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
4249 nv->login_timeout = __constant_cpu_to_le16(0);
4250 nv->firmware_options_1 =
4251 __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
4252 nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
4253 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
4254 nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
4255 nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
4256 nv->efi_parameters = __constant_cpu_to_le32(0);
4257 nv->reset_delay = 5;
4258 nv->max_luns_per_target = __constant_cpu_to_le16(128);
4259 nv->port_down_retry_count = __constant_cpu_to_le16(30);
4260 nv->link_down_timeout = __constant_cpu_to_le16(30);
4265 /* Reset Initialization control block */
4266 memset(icb, 0, ha->init_cb_size);
4268 /* Copy 1st segment. */
4269 dptr1 = (uint8_t *)icb;
4270 dptr2 = (uint8_t *)&nv->version;
4271 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
4273 *dptr1++ = *dptr2++;
4275 icb->login_retry_count = nv->login_retry_count;
4276 icb->link_down_on_nos = nv->link_down_on_nos;
4278 /* Copy 2nd segment. */
4279 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
4280 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
4281 cnt = (uint8_t *)&icb->reserved_3 -
4282 (uint8_t *)&icb->interrupt_delay_timer;
4284 *dptr1++ = *dptr2++;
4287 * Setup driver NVRAM options.
4289 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
4292 /* Use alternate WWN? */
4293 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
4294 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
4295 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
4298 /* Prepare nodename */
4299 if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) {
4301 * Firmware will apply the following mask if the nodename was
4304 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
4305 icb->node_name[0] &= 0xF0;
4308 /* Set host adapter parameters. */
4309 ha->flags.disable_risc_code_load = 0;
4310 ha->flags.enable_lip_reset = 0;
4311 ha->flags.enable_lip_full_login =
4312 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
4313 ha->flags.enable_target_reset =
4314 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
4315 ha->flags.enable_led_scheme = 0;
4316 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
4318 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
4319 (BIT_6 | BIT_5 | BIT_4)) >> 4;
4321 memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
4322 sizeof(ha->fw_seriallink_options24));
4324 /* save HBA serial number */
4325 ha->serial0 = icb->port_name[5];
4326 ha->serial1 = icb->port_name[6];
4327 ha->serial2 = icb->port_name[7];
4328 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
4329 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
4331 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
4333 ha->retry_count = le16_to_cpu(nv->login_retry_count);
4335 /* Set minimum login_timeout to 4 seconds. */
4336 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
4337 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
4338 if (le16_to_cpu(nv->login_timeout) < 4)
4339 nv->login_timeout = __constant_cpu_to_le16(4);
4340 ha->login_timeout = le16_to_cpu(nv->login_timeout);
4341 icb->login_timeout = nv->login_timeout;
4343 /* Set minimum RATOV to 100 tenths of a second. */
4346 ha->loop_reset_delay = nv->reset_delay;
4348 /* Link Down Timeout = 0:
4350 * When Port Down timer expires we will start returning
4351 * I/O's to OS with "DID_NO_CONNECT".
4353 * Link Down Timeout != 0:
4355 * The driver waits for the link to come up after link down
4356 * before returning I/Os to OS with "DID_NO_CONNECT".
4358 if (le16_to_cpu(nv->link_down_timeout) == 0) {
4359 ha->loop_down_abort_time =
4360 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
4362 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
4363 ha->loop_down_abort_time =
4364 (LOOP_DOWN_TIME - ha->link_down_timeout);
4367 /* Need enough time to try and get the port back. */
4368 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
4369 if (qlport_down_retry)
4370 ha->port_down_retry_count = qlport_down_retry;
4372 /* Set login_retry_count */
4373 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
4374 if (ha->port_down_retry_count ==
4375 le16_to_cpu(nv->port_down_retry_count) &&
4376 ha->port_down_retry_count > 3)
4377 ha->login_retry_count = ha->port_down_retry_count;
4378 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
4379 ha->login_retry_count = ha->port_down_retry_count;
4380 if (ql2xloginretrycount)
4381 ha->login_retry_count = ql2xloginretrycount;
4384 if (!vha->flags.init_done) {
4385 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
4386 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
4387 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
4388 le16_to_cpu(icb->interrupt_delay_timer): 2;
4390 icb->firmware_options_2 &= __constant_cpu_to_le32(
4391 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
4392 vha->flags.process_response_queue = 0;
4393 if (ha->zio_mode != QLA_ZIO_DISABLED) {
4394 ha->zio_mode = QLA_ZIO_MODE_6;
4396 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay "
4397 "(%d us).\n", vha->host_no, ha->zio_mode,
4398 ha->zio_timer * 100));
4399 qla_printk(KERN_INFO, ha,
4400 "ZIO mode %d enabled; timer delay (%d us).\n",
4401 ha->zio_mode, ha->zio_timer * 100);
4403 icb->firmware_options_2 |= cpu_to_le32(
4404 (uint32_t)ha->zio_mode);
4405 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
4406 vha->flags.process_response_queue = 1;
4410 DEBUG2_3(printk(KERN_WARNING
4411 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
4417 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
4420 int rval = QLA_SUCCESS;
4421 int segments, fragment;
4422 uint32_t *dcode, dlen;
4426 struct qla_hw_data *ha = vha->hw;
4427 struct req_que *req = ha->req_q_map[0];
4429 qla_printk(KERN_INFO, ha,
4430 "FW: Loading from flash (%x)...\n", faddr);
4434 segments = FA_RISC_CODE_SEGMENTS;
4435 dcode = (uint32_t *)req->ring;
4438 /* Validate firmware image by checking version. */
4439 qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
4440 for (i = 0; i < 4; i++)
4441 dcode[i] = be32_to_cpu(dcode[i]);
4442 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
4443 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
4444 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
4446 qla_printk(KERN_WARNING, ha,
4447 "Unable to verify integrity of flash firmware image!\n");
4448 qla_printk(KERN_WARNING, ha,
4449 "Firmware data: %08x %08x %08x %08x!\n", dcode[0],
4450 dcode[1], dcode[2], dcode[3]);
4452 return QLA_FUNCTION_FAILED;
4455 while (segments && rval == QLA_SUCCESS) {
4456 /* Read segment's load information. */
4457 qla24xx_read_flash_data(vha, dcode, faddr, 4);
4459 risc_addr = be32_to_cpu(dcode[2]);
4460 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
4461 risc_size = be32_to_cpu(dcode[3]);
4464 while (risc_size > 0 && rval == QLA_SUCCESS) {
4465 dlen = (uint32_t)(ha->fw_transfer_size >> 2);
4466 if (dlen > risc_size)
4469 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
4470 "addr %x, number of dwords 0x%x, offset 0x%x.\n",
4471 vha->host_no, risc_addr, dlen, faddr));
4473 qla24xx_read_flash_data(vha, dcode, faddr, dlen);
4474 for (i = 0; i < dlen; i++)
4475 dcode[i] = swab32(dcode[i]);
4477 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4480 DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
4481 "segment %d of firmware\n", vha->host_no,
4483 qla_printk(KERN_WARNING, ha,
4484 "[ERROR] Failed to load segment %d of "
4485 "firmware\n", fragment);
4502 #define QLA_FW_URL "ftp://ftp.qlogic.com/outgoing/linux/firmware/"
4505 qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4509 uint16_t *wcode, *fwcode;
4510 uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
4511 struct fw_blob *blob;
4512 struct qla_hw_data *ha = vha->hw;
4513 struct req_que *req = ha->req_q_map[0];
4515 /* Load firmware blob. */
4516 blob = qla2x00_request_firmware(vha);
4518 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n");
4519 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved "
4520 "from: " QLA_FW_URL ".\n");
4521 return QLA_FUNCTION_FAILED;
4526 wcode = (uint16_t *)req->ring;
4528 fwcode = (uint16_t *)blob->fw->data;
4531 /* Validate firmware image by checking version. */
4532 if (blob->fw->size < 8 * sizeof(uint16_t)) {
4533 qla_printk(KERN_WARNING, ha,
4534 "Unable to verify integrity of firmware image (%Zd)!\n",
4536 goto fail_fw_integrity;
4538 for (i = 0; i < 4; i++)
4539 wcode[i] = be16_to_cpu(fwcode[i + 4]);
4540 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
4541 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
4542 wcode[2] == 0 && wcode[3] == 0)) {
4543 qla_printk(KERN_WARNING, ha,
4544 "Unable to verify integrity of firmware image!\n");
4545 qla_printk(KERN_WARNING, ha,
4546 "Firmware data: %04x %04x %04x %04x!\n", wcode[0],
4547 wcode[1], wcode[2], wcode[3]);
4548 goto fail_fw_integrity;
4552 while (*seg && rval == QLA_SUCCESS) {
4554 *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
4555 risc_size = be16_to_cpu(fwcode[3]);
4557 /* Validate firmware image size. */
4558 fwclen += risc_size * sizeof(uint16_t);
4559 if (blob->fw->size < fwclen) {
4560 qla_printk(KERN_WARNING, ha,
4561 "Unable to verify integrity of firmware image "
4562 "(%Zd)!\n", blob->fw->size);
4563 goto fail_fw_integrity;
4567 while (risc_size > 0 && rval == QLA_SUCCESS) {
4568 wlen = (uint16_t)(ha->fw_transfer_size >> 1);
4569 if (wlen > risc_size)
4572 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
4573 "addr %x, number of words 0x%x.\n", vha->host_no,
4576 for (i = 0; i < wlen; i++)
4577 wcode[i] = swab16(fwcode[i]);
4579 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4582 DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
4583 "segment %d of firmware\n", vha->host_no,
4585 qla_printk(KERN_WARNING, ha,
4586 "[ERROR] Failed to load segment %d of "
4587 "firmware\n", fragment);
4603 return QLA_FUNCTION_FAILED;
4607 qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4610 int segments, fragment;
4611 uint32_t *dcode, dlen;
4615 struct fw_blob *blob;
4616 uint32_t *fwcode, fwclen;
4617 struct qla_hw_data *ha = vha->hw;
4618 struct req_que *req = ha->req_q_map[0];
4620 /* Load firmware blob. */
4621 blob = qla2x00_request_firmware(vha);
4623 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n");
4624 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved "
4625 "from: " QLA_FW_URL ".\n");
4627 return QLA_FUNCTION_FAILED;
4630 qla_printk(KERN_INFO, ha,
4631 "FW: Loading via request-firmware...\n");
4635 segments = FA_RISC_CODE_SEGMENTS;
4636 dcode = (uint32_t *)req->ring;
4638 fwcode = (uint32_t *)blob->fw->data;
4641 /* Validate firmware image by checking version. */
4642 if (blob->fw->size < 8 * sizeof(uint32_t)) {
4643 qla_printk(KERN_WARNING, ha,
4644 "Unable to verify integrity of firmware image (%Zd)!\n",
4646 goto fail_fw_integrity;
4648 for (i = 0; i < 4; i++)
4649 dcode[i] = be32_to_cpu(fwcode[i + 4]);
4650 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
4651 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
4652 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
4654 qla_printk(KERN_WARNING, ha,
4655 "Unable to verify integrity of firmware image!\n");
4656 qla_printk(KERN_WARNING, ha,
4657 "Firmware data: %08x %08x %08x %08x!\n", dcode[0],
4658 dcode[1], dcode[2], dcode[3]);
4659 goto fail_fw_integrity;
4662 while (segments && rval == QLA_SUCCESS) {
4663 risc_addr = be32_to_cpu(fwcode[2]);
4664 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
4665 risc_size = be32_to_cpu(fwcode[3]);
4667 /* Validate firmware image size. */
4668 fwclen += risc_size * sizeof(uint32_t);
4669 if (blob->fw->size < fwclen) {
4670 qla_printk(KERN_WARNING, ha,
4671 "Unable to verify integrity of firmware image "
4672 "(%Zd)!\n", blob->fw->size);
4674 goto fail_fw_integrity;
4678 while (risc_size > 0 && rval == QLA_SUCCESS) {
4679 dlen = (uint32_t)(ha->fw_transfer_size >> 2);
4680 if (dlen > risc_size)
4683 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
4684 "addr %x, number of dwords 0x%x.\n", vha->host_no,
4687 for (i = 0; i < dlen; i++)
4688 dcode[i] = swab32(fwcode[i]);
4690 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4693 DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
4694 "segment %d of firmware\n", vha->host_no,
4696 qla_printk(KERN_WARNING, ha,
4697 "[ERROR] Failed to load segment %d of "
4698 "firmware\n", fragment);
4714 return QLA_FUNCTION_FAILED;
4718 qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4722 if (ql2xfwloadbin == 1)
4723 return qla81xx_load_risc(vha, srisc_addr);
4727 * 1) Firmware via request-firmware interface (.bin file).
4728 * 2) Firmware residing in flash.
4730 rval = qla24xx_load_risc_blob(vha, srisc_addr);
4731 if (rval == QLA_SUCCESS)
4734 return qla24xx_load_risc_flash(vha, srisc_addr,
4735 vha->hw->flt_region_fw);
4739 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4742 struct qla_hw_data *ha = vha->hw;
4744 if (ql2xfwloadbin == 2)
4749 * 1) Firmware residing in flash.
4750 * 2) Firmware via request-firmware interface (.bin file).
4751 * 3) Golden-Firmware residing in flash -- limited operation.
4753 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
4754 if (rval == QLA_SUCCESS)
4758 rval = qla24xx_load_risc_blob(vha, srisc_addr);
4759 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
4762 qla_printk(KERN_ERR, ha,
4763 "FW: Attempting to fallback to golden firmware...\n");
4764 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
4765 if (rval != QLA_SUCCESS)
4768 qla_printk(KERN_ERR, ha,
4769 "FW: Please update operational firmware...\n");
4770 ha->flags.running_gold_fw = 1;
4776 qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
4779 struct qla_hw_data *ha = vha->hw;
4781 if (ha->flags.pci_channel_io_perm_failure)
4783 if (!IS_FWI2_CAPABLE(ha))
4785 if (!ha->fw_major_version)
4788 ret = qla2x00_stop_firmware(vha);
4789 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
4790 ret != QLA_INVALID_COMMAND && retries ; retries--) {
4791 ha->isp_ops->reset_chip(vha);
4792 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
4794 if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
4796 qla_printk(KERN_INFO, ha,
4797 "Attempting retry of stop-firmware command...\n");
4798 ret = qla2x00_stop_firmware(vha);
4803 qla24xx_configure_vhba(scsi_qla_host_t *vha)
4805 int rval = QLA_SUCCESS;
4806 uint16_t mb[MAILBOX_REGISTER_COUNT];
4807 struct qla_hw_data *ha = vha->hw;
4808 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4809 struct req_que *req;
4810 struct rsp_que *rsp;
4815 rval = qla2x00_fw_ready(base_vha);
4816 if (ha->flags.cpu_affinity_enabled)
4817 req = ha->req_q_map[0];
4822 if (rval == QLA_SUCCESS) {
4823 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
4824 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
4827 vha->flags.management_server_logged_in = 0;
4829 /* Login to SNS first */
4830 ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1);
4831 if (mb[0] != MBS_COMMAND_COMPLETE) {
4832 DEBUG15(qla_printk(KERN_INFO, ha,
4833 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
4834 "mb[2]=%x mb[6]=%x mb[7]=%x\n", NPH_SNS,
4835 mb[0], mb[1], mb[2], mb[6], mb[7]));
4836 return (QLA_FUNCTION_FAILED);
4839 atomic_set(&vha->loop_down_timer, 0);
4840 atomic_set(&vha->loop_state, LOOP_UP);
4841 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4842 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4843 rval = qla2x00_loop_resync(base_vha);
4848 /* 84XX Support **************************************************************/
4850 static LIST_HEAD(qla_cs84xx_list);
4851 static DEFINE_MUTEX(qla_cs84xx_mutex);
4853 static struct qla_chip_state_84xx *
4854 qla84xx_get_chip(struct scsi_qla_host *vha)
4856 struct qla_chip_state_84xx *cs84xx;
4857 struct qla_hw_data *ha = vha->hw;
4859 mutex_lock(&qla_cs84xx_mutex);
4861 /* Find any shared 84xx chip. */
4862 list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
4863 if (cs84xx->bus == ha->pdev->bus) {
4864 kref_get(&cs84xx->kref);
4869 cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
4873 kref_init(&cs84xx->kref);
4874 spin_lock_init(&cs84xx->access_lock);
4875 mutex_init(&cs84xx->fw_update_mutex);
4876 cs84xx->bus = ha->pdev->bus;
4878 list_add_tail(&cs84xx->list, &qla_cs84xx_list);
4880 mutex_unlock(&qla_cs84xx_mutex);
4885 __qla84xx_chip_release(struct kref *kref)
4887 struct qla_chip_state_84xx *cs84xx =
4888 container_of(kref, struct qla_chip_state_84xx, kref);
4890 mutex_lock(&qla_cs84xx_mutex);
4891 list_del(&cs84xx->list);
4892 mutex_unlock(&qla_cs84xx_mutex);
4897 qla84xx_put_chip(struct scsi_qla_host *vha)
4899 struct qla_hw_data *ha = vha->hw;
4901 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
4905 qla84xx_init_chip(scsi_qla_host_t *vha)
4909 struct qla_hw_data *ha = vha->hw;
4911 mutex_lock(&ha->cs84xx->fw_update_mutex);
4913 rval = qla84xx_verify_chip(vha, status);
4915 mutex_unlock(&ha->cs84xx->fw_update_mutex);
4917 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED:
4921 /* 81XX Support **************************************************************/
4924 qla81xx_nvram_config(scsi_qla_host_t *vha)
4927 struct init_cb_81xx *icb;
4928 struct nvram_81xx *nv;
4930 uint8_t *dptr1, *dptr2;
4933 struct qla_hw_data *ha = vha->hw;
4936 icb = (struct init_cb_81xx *)ha->init_cb;
4939 /* Determine NVRAM starting address. */
4940 ha->nvram_size = sizeof(struct nvram_81xx);
4941 ha->vpd_size = FA_NVRAM_VPD_SIZE;
4943 /* Get VPD data into cache */
4944 ha->vpd = ha->nvram + VPD_OFFSET;
4945 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
4948 /* Get NVRAM data into cache and calculate checksum. */
4949 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
4951 dptr = (uint32_t *)nv;
4952 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
4953 chksum += le32_to_cpu(*dptr++);
4955 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
4956 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
4958 /* Bad NVRAM data, set defaults parameters. */
4959 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
4960 || nv->id[3] != ' ' ||
4961 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
4962 /* Reset NVRAM data. */
4963 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
4964 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
4965 le16_to_cpu(nv->nvram_version));
4966 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
4967 "invalid -- WWPN) defaults.\n");
4970 * Set default initialization control block.
4972 memset(nv, 0, ha->nvram_size);
4973 nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
4974 nv->version = __constant_cpu_to_le16(ICB_VERSION);
4975 nv->frame_payload_size = __constant_cpu_to_le16(2048);
4976 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
4977 nv->exchange_count = __constant_cpu_to_le16(0);
4978 nv->port_name[0] = 0x21;
4979 nv->port_name[1] = 0x00 + ha->port_no;
4980 nv->port_name[2] = 0x00;
4981 nv->port_name[3] = 0xe0;
4982 nv->port_name[4] = 0x8b;
4983 nv->port_name[5] = 0x1c;
4984 nv->port_name[6] = 0x55;
4985 nv->port_name[7] = 0x86;
4986 nv->node_name[0] = 0x20;
4987 nv->node_name[1] = 0x00;
4988 nv->node_name[2] = 0x00;
4989 nv->node_name[3] = 0xe0;
4990 nv->node_name[4] = 0x8b;
4991 nv->node_name[5] = 0x1c;
4992 nv->node_name[6] = 0x55;
4993 nv->node_name[7] = 0x86;
4994 nv->login_retry_count = __constant_cpu_to_le16(8);
4995 nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
4996 nv->login_timeout = __constant_cpu_to_le16(0);
4997 nv->firmware_options_1 =
4998 __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
4999 nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
5000 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
5001 nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
5002 nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
5003 nv->efi_parameters = __constant_cpu_to_le32(0);
5004 nv->reset_delay = 5;
5005 nv->max_luns_per_target = __constant_cpu_to_le16(128);
5006 nv->port_down_retry_count = __constant_cpu_to_le16(30);
5007 nv->link_down_timeout = __constant_cpu_to_le16(30);
5008 nv->enode_mac[0] = 0x00;
5009 nv->enode_mac[1] = 0x02;
5010 nv->enode_mac[2] = 0x03;
5011 nv->enode_mac[3] = 0x04;
5012 nv->enode_mac[4] = 0x05;
5013 nv->enode_mac[5] = 0x06 + ha->port_no;
5018 /* Reset Initialization control block */
5019 memset(icb, 0, sizeof(struct init_cb_81xx));
5021 /* Copy 1st segment. */
5022 dptr1 = (uint8_t *)icb;
5023 dptr2 = (uint8_t *)&nv->version;
5024 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
5026 *dptr1++ = *dptr2++;
5028 icb->login_retry_count = nv->login_retry_count;
5030 /* Copy 2nd segment. */
5031 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
5032 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
5033 cnt = (uint8_t *)&icb->reserved_5 -
5034 (uint8_t *)&icb->interrupt_delay_timer;
5036 *dptr1++ = *dptr2++;
5038 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
5039 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
5040 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
5041 icb->enode_mac[0] = 0x01;
5042 icb->enode_mac[1] = 0x02;
5043 icb->enode_mac[2] = 0x03;
5044 icb->enode_mac[3] = 0x04;
5045 icb->enode_mac[4] = 0x05;
5046 icb->enode_mac[5] = 0x06 + ha->port_no;
5049 /* Use extended-initialization control block. */
5050 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
5053 * Setup driver NVRAM options.
5055 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
5058 /* Use alternate WWN? */
5059 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
5060 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
5061 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
5064 /* Prepare nodename */
5065 if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) {
5067 * Firmware will apply the following mask if the nodename was
5070 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
5071 icb->node_name[0] &= 0xF0;
5074 /* Set host adapter parameters. */
5075 ha->flags.disable_risc_code_load = 0;
5076 ha->flags.enable_lip_reset = 0;
5077 ha->flags.enable_lip_full_login =
5078 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
5079 ha->flags.enable_target_reset =
5080 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
5081 ha->flags.enable_led_scheme = 0;
5082 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
5084 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
5085 (BIT_6 | BIT_5 | BIT_4)) >> 4;
5087 /* save HBA serial number */
5088 ha->serial0 = icb->port_name[5];
5089 ha->serial1 = icb->port_name[6];
5090 ha->serial2 = icb->port_name[7];
5091 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
5092 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
5094 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
5096 ha->retry_count = le16_to_cpu(nv->login_retry_count);
5098 /* Set minimum login_timeout to 4 seconds. */
5099 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
5100 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
5101 if (le16_to_cpu(nv->login_timeout) < 4)
5102 nv->login_timeout = __constant_cpu_to_le16(4);
5103 ha->login_timeout = le16_to_cpu(nv->login_timeout);
5104 icb->login_timeout = nv->login_timeout;
5106 /* Set minimum RATOV to 100 tenths of a second. */
5109 ha->loop_reset_delay = nv->reset_delay;
5111 /* Link Down Timeout = 0:
5113 * When Port Down timer expires we will start returning
5114 * I/O's to OS with "DID_NO_CONNECT".
5116 * Link Down Timeout != 0:
5118 * The driver waits for the link to come up after link down
5119 * before returning I/Os to OS with "DID_NO_CONNECT".
5121 if (le16_to_cpu(nv->link_down_timeout) == 0) {
5122 ha->loop_down_abort_time =
5123 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
5125 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
5126 ha->loop_down_abort_time =
5127 (LOOP_DOWN_TIME - ha->link_down_timeout);
5130 /* Need enough time to try and get the port back. */
5131 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
5132 if (qlport_down_retry)
5133 ha->port_down_retry_count = qlport_down_retry;
5135 /* Set login_retry_count */
5136 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
5137 if (ha->port_down_retry_count ==
5138 le16_to_cpu(nv->port_down_retry_count) &&
5139 ha->port_down_retry_count > 3)
5140 ha->login_retry_count = ha->port_down_retry_count;
5141 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
5142 ha->login_retry_count = ha->port_down_retry_count;
5143 if (ql2xloginretrycount)
5144 ha->login_retry_count = ql2xloginretrycount;
5147 if (!vha->flags.init_done) {
5148 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
5149 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
5150 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
5151 le16_to_cpu(icb->interrupt_delay_timer): 2;
5153 icb->firmware_options_2 &= __constant_cpu_to_le32(
5154 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
5155 vha->flags.process_response_queue = 0;
5156 if (ha->zio_mode != QLA_ZIO_DISABLED) {
5157 ha->zio_mode = QLA_ZIO_MODE_6;
5159 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay "
5160 "(%d us).\n", vha->host_no, ha->zio_mode,
5161 ha->zio_timer * 100));
5162 qla_printk(KERN_INFO, ha,
5163 "ZIO mode %d enabled; timer delay (%d us).\n",
5164 ha->zio_mode, ha->zio_timer * 100);
5166 icb->firmware_options_2 |= cpu_to_le32(
5167 (uint32_t)ha->zio_mode);
5168 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
5169 vha->flags.process_response_queue = 1;
5173 DEBUG2_3(printk(KERN_WARNING
5174 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
5180 qla82xx_restart_isp(scsi_qla_host_t *vha)
5184 struct qla_hw_data *ha = vha->hw;
5185 struct req_que *req = ha->req_q_map[0];
5186 struct rsp_que *rsp = ha->rsp_q_map[0];
5187 struct scsi_qla_host *vp;
5188 struct scsi_qla_host *tvp;
5190 status = qla2x00_init_rings(vha);
5192 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
5193 ha->flags.chip_reset_done = 1;
5195 status = qla2x00_fw_ready(vha);
5197 qla_printk(KERN_INFO, ha,
5198 "%s(): Start configure loop, "
5199 "status = %d\n", __func__, status);
5201 /* Issue a marker after FW becomes ready. */
5202 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
5204 vha->flags.online = 1;
5205 /* Wait at most MAX_TARGET RSCNs for a stable link. */
5208 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5209 qla2x00_configure_loop(vha);
5211 } while (!atomic_read(&vha->loop_down_timer) &&
5212 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) &&
5214 (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)));
5217 /* if no cable then assume it's good */
5218 if ((vha->device_flags & DFLG_NO_CABLE))
5221 qla_printk(KERN_INFO, ha,
5222 "%s(): Configure loop done, status = 0x%x\n",
5227 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
5229 if (!atomic_read(&vha->loop_down_timer)) {
5231 * Issue marker command only when we are going
5232 * to start the I/O .
5234 vha->marker_needed = 1;
5237 vha->flags.online = 1;
5239 ha->isp_ops->enable_intrs(ha);
5241 ha->isp_abort_cnt = 0;
5242 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
5245 ha->flags.fce_enabled = 1;
5247 fce_calc_size(ha->fce_bufs));
5248 rval = qla2x00_enable_fce_trace(vha,
5249 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
5252 qla_printk(KERN_WARNING, ha,
5253 "Unable to reinitialize FCE "
5255 ha->flags.fce_enabled = 0;
5260 memset(ha->eft, 0, EFT_SIZE);
5261 rval = qla2x00_enable_eft_trace(vha,
5262 ha->eft_dma, EFT_NUM_BUFFERS);
5264 qla_printk(KERN_WARNING, ha,
5265 "Unable to reinitialize EFT "
5272 DEBUG(printk(KERN_INFO
5273 "qla82xx_restart_isp(%ld): succeeded.\n",
5275 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
5277 qla2x00_vp_abort_isp(vp);
5280 qla_printk(KERN_INFO, ha,
5281 "qla82xx_restart_isp: **** FAILED ****\n");
5288 qla81xx_update_fw_options(scsi_qla_host_t *vha)
5290 struct qla_hw_data *ha = vha->hw;
5295 /* Enable ETS Burst. */
5296 memset(ha->fw_options, 0, sizeof(ha->fw_options));
5297 ha->fw_options[2] |= BIT_9;
5298 qla2x00_set_fw_options(vha, ha->fw_options);
5302 * qla24xx_get_fcp_prio
5303 * Gets the fcp cmd priority value for the logged in port.
5304 * Looks for a match of the port descriptors within
5305 * each of the fcp prio config entries. If a match is found,
5306 * the tag (priority) value is returned.
5309 * ha = adapter block po
5310 * fcport = port structure pointer.
5313 * non-zero (if found)
5320 qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
5323 uint8_t pid_match, wwn_match;
5325 uint32_t pid1, pid2;
5326 uint64_t wwn1, wwn2;
5327 struct qla_fcp_prio_entry *pri_entry;
5328 struct qla_hw_data *ha = vha->hw;
5330 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
5334 entries = ha->fcp_prio_cfg->num_entries;
5335 pri_entry = &ha->fcp_prio_cfg->entry[0];
5337 for (i = 0; i < entries; i++) {
5338 pid_match = wwn_match = 0;
5340 if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
5345 /* check source pid for a match */
5346 if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
5347 pid1 = pri_entry->src_pid & INVALID_PORT_ID;
5348 pid2 = vha->d_id.b24 & INVALID_PORT_ID;
5349 if (pid1 == INVALID_PORT_ID)
5351 else if (pid1 == pid2)
5355 /* check destination pid for a match */
5356 if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
5357 pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
5358 pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
5359 if (pid1 == INVALID_PORT_ID)
5361 else if (pid1 == pid2)
5365 /* check source WWN for a match */
5366 if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
5367 wwn1 = wwn_to_u64(vha->port_name);
5368 wwn2 = wwn_to_u64(pri_entry->src_wwpn);
5369 if (wwn2 == (uint64_t)-1)
5371 else if (wwn1 == wwn2)
5375 /* check destination WWN for a match */
5376 if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
5377 wwn1 = wwn_to_u64(fcport->port_name);
5378 wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
5379 if (wwn2 == (uint64_t)-1)
5381 else if (wwn1 == wwn2)
5385 if (pid_match == 2 || wwn_match == 2) {
5386 /* Found a matching entry */
5387 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
5388 priority = pri_entry->tag;
5399 * qla24xx_update_fcport_fcp_prio
5400 * Activates fcp priority for the logged in fc port
5403 * ha = adapter block pointer.
5404 * fcp = port structure pointer.
5407 * QLA_SUCCESS or QLA_FUNCTION_FAILED
5413 qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *ha, fc_port_t *fcport)
5419 if (atomic_read(&fcport->state) == FCS_UNCONFIGURED ||
5420 fcport->port_type != FCT_TARGET ||
5421 fcport->loop_id == FC_NO_LOOP_ID)
5422 return QLA_FUNCTION_FAILED;
5424 priority = qla24xx_get_fcp_prio(ha, fcport);
5425 ret = qla24xx_set_fcp_prio(ha, fcport->loop_id, priority, mb);
5426 if (ret == QLA_SUCCESS)
5427 fcport->fcp_prio = priority;
5429 DEBUG2(printk(KERN_WARNING
5430 "scsi(%ld): Unable to activate fcp priority, "
5431 " ret=0x%x\n", ha->host_no, ret));
5437 * qla24xx_update_all_fcp_prio
5438 * Activates fcp priority for all the logged in ports
5441 * ha = adapter block pointer.
5444 * QLA_SUCCESS or QLA_FUNCTION_FAILED
5450 qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
5455 ret = QLA_FUNCTION_FAILED;
5456 /* We need to set priority for all logged in ports */
5457 list_for_each_entry(fcport, &vha->vp_fcports, list)
5458 ret = qla24xx_update_fcport_fcp_prio(vha, fcport);