2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
10 #include <linux/delay.h>
11 #include <linux/vmalloc.h>
13 #include "qla_devtbl.h"
20 * QLogic ISP2x00 Hardware Support Function Prototypes.
22 static int qla2x00_isp_firmware(scsi_qla_host_t *);
23 static int qla2x00_setup_chip(scsi_qla_host_t *);
24 static int qla2x00_init_rings(scsi_qla_host_t *);
25 static int qla2x00_fw_ready(scsi_qla_host_t *);
26 static int qla2x00_configure_hba(scsi_qla_host_t *);
27 static int qla2x00_configure_loop(scsi_qla_host_t *);
28 static int qla2x00_configure_local_loop(scsi_qla_host_t *);
29 static int qla2x00_configure_fabric(scsi_qla_host_t *);
30 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *);
31 static int qla2x00_device_resync(scsi_qla_host_t *);
32 static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *,
35 static int qla2x00_restart_isp(scsi_qla_host_t *);
37 static int qla2x00_find_new_loop_id(scsi_qla_host_t *, fc_port_t *);
39 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
40 static int qla84xx_init_chip(scsi_qla_host_t *);
41 static int qla25xx_init_queues(struct qla_hw_data *);
43 /****************************************************************************/
44 /* QLogic ISP2x00 Hardware Support Functions. */
45 /****************************************************************************/
48 * qla2x00_initialize_adapter
52 * ha = adapter block pointer.
58 qla2x00_initialize_adapter(scsi_qla_host_t *vha)
61 struct qla_hw_data *ha = vha->hw;
62 struct req_que *req = ha->req_q_map[0];
64 /* Clear adapter flags. */
65 vha->flags.online = 0;
66 ha->flags.chip_reset_done = 0;
67 vha->flags.reset_active = 0;
68 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
69 atomic_set(&vha->loop_state, LOOP_DOWN);
70 vha->device_flags = DFLG_NO_CABLE;
72 vha->flags.management_server_logged_in = 0;
73 vha->marker_needed = 0;
74 ha->isp_abort_cnt = 0;
75 ha->beacon_blink_led = 0;
76 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
78 set_bit(0, ha->req_qid_map);
79 set_bit(0, ha->rsp_qid_map);
81 qla_printk(KERN_INFO, ha, "Configuring PCI space...\n");
82 rval = ha->isp_ops->pci_config(vha);
84 DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n",
89 ha->isp_ops->reset_chip(vha);
91 rval = qla2xxx_get_flash_info(vha);
93 DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n",
98 ha->isp_ops->get_flash_version(vha, req->ring);
100 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n");
102 ha->isp_ops->nvram_config(vha);
104 if (ha->flags.disable_serdes) {
105 /* Mask HBA via NVRAM settings? */
106 qla_printk(KERN_INFO, ha, "Masking HBA WWPN "
107 "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n",
108 vha->port_name[0], vha->port_name[1],
109 vha->port_name[2], vha->port_name[3],
110 vha->port_name[4], vha->port_name[5],
111 vha->port_name[6], vha->port_name[7]);
112 return QLA_FUNCTION_FAILED;
115 qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n");
117 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
118 rval = ha->isp_ops->chip_diag(vha);
121 rval = qla2x00_setup_chip(vha);
125 if (IS_QLA84XX(ha)) {
126 ha->cs84xx = qla84xx_get_chip(vha);
128 qla_printk(KERN_ERR, ha,
129 "Unable to configure ISP84XX.\n");
130 return QLA_FUNCTION_FAILED;
133 rval = qla2x00_init_rings(vha);
134 ha->flags.chip_reset_done = 1;
140 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
143 * Returns 0 on success.
146 qla2100_pci_config(scsi_qla_host_t *vha)
150 struct qla_hw_data *ha = vha->hw;
151 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
153 pci_set_master(ha->pdev);
154 pci_try_set_mwi(ha->pdev);
156 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
157 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
158 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
160 pci_disable_rom(ha->pdev);
162 /* Get PCI bus information. */
163 spin_lock_irqsave(&ha->hardware_lock, flags);
164 ha->pci_attr = RD_REG_WORD(®->ctrl_status);
165 spin_unlock_irqrestore(&ha->hardware_lock, flags);
171 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
174 * Returns 0 on success.
177 qla2300_pci_config(scsi_qla_host_t *vha)
180 unsigned long flags = 0;
182 struct qla_hw_data *ha = vha->hw;
183 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
185 pci_set_master(ha->pdev);
186 pci_try_set_mwi(ha->pdev);
188 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
189 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
191 if (IS_QLA2322(ha) || IS_QLA6322(ha))
192 w &= ~PCI_COMMAND_INTX_DISABLE;
193 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
196 * If this is a 2300 card and not 2312, reset the
197 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
198 * the 2310 also reports itself as a 2300 so we need to get the
199 * fb revision level -- a 6 indicates it really is a 2300 and
202 if (IS_QLA2300(ha)) {
203 spin_lock_irqsave(&ha->hardware_lock, flags);
206 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC);
207 for (cnt = 0; cnt < 30000; cnt++) {
208 if ((RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) != 0)
214 /* Select FPM registers. */
215 WRT_REG_WORD(®->ctrl_status, 0x20);
216 RD_REG_WORD(®->ctrl_status);
218 /* Get the fb rev level */
219 ha->fb_rev = RD_FB_CMD_REG(ha, reg);
221 if (ha->fb_rev == FPM_2300)
222 pci_clear_mwi(ha->pdev);
224 /* Deselect FPM registers. */
225 WRT_REG_WORD(®->ctrl_status, 0x0);
226 RD_REG_WORD(®->ctrl_status);
228 /* Release RISC module. */
229 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC);
230 for (cnt = 0; cnt < 30000; cnt++) {
231 if ((RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0)
237 spin_unlock_irqrestore(&ha->hardware_lock, flags);
240 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
242 pci_disable_rom(ha->pdev);
244 /* Get PCI bus information. */
245 spin_lock_irqsave(&ha->hardware_lock, flags);
246 ha->pci_attr = RD_REG_WORD(®->ctrl_status);
247 spin_unlock_irqrestore(&ha->hardware_lock, flags);
253 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
256 * Returns 0 on success.
259 qla24xx_pci_config(scsi_qla_host_t *vha)
262 unsigned long flags = 0;
263 struct qla_hw_data *ha = vha->hw;
264 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
266 pci_set_master(ha->pdev);
267 pci_try_set_mwi(ha->pdev);
269 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
270 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
271 w &= ~PCI_COMMAND_INTX_DISABLE;
272 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
274 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
276 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
277 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
278 pcix_set_mmrbc(ha->pdev, 2048);
280 /* PCIe -- adjust Maximum Read Request Size (2048). */
281 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
282 pcie_set_readrq(ha->pdev, 2048);
284 pci_disable_rom(ha->pdev);
286 ha->chip_revision = ha->pdev->revision;
288 /* Get PCI bus information. */
289 spin_lock_irqsave(&ha->hardware_lock, flags);
290 ha->pci_attr = RD_REG_DWORD(®->ctrl_status);
291 spin_unlock_irqrestore(&ha->hardware_lock, flags);
297 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
300 * Returns 0 on success.
303 qla25xx_pci_config(scsi_qla_host_t *vha)
306 struct qla_hw_data *ha = vha->hw;
308 pci_set_master(ha->pdev);
309 pci_try_set_mwi(ha->pdev);
311 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
312 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
313 w &= ~PCI_COMMAND_INTX_DISABLE;
314 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
316 /* PCIe -- adjust Maximum Read Request Size (2048). */
317 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
318 pcie_set_readrq(ha->pdev, 2048);
320 pci_disable_rom(ha->pdev);
322 ha->chip_revision = ha->pdev->revision;
328 * qla2x00_isp_firmware() - Choose firmware image.
331 * Returns 0 on success.
334 qla2x00_isp_firmware(scsi_qla_host_t *vha)
337 uint16_t loop_id, topo, sw_cap;
338 uint8_t domain, area, al_pa;
339 struct qla_hw_data *ha = vha->hw;
341 /* Assume loading risc code */
342 rval = QLA_FUNCTION_FAILED;
344 if (ha->flags.disable_risc_code_load) {
345 DEBUG2(printk("scsi(%ld): RISC CODE NOT loaded\n",
347 qla_printk(KERN_INFO, ha, "RISC CODE NOT loaded\n");
349 /* Verify checksum of loaded RISC code. */
350 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
351 if (rval == QLA_SUCCESS) {
352 /* And, verify we are not in ROM code. */
353 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
354 &area, &domain, &topo, &sw_cap);
359 DEBUG2_3(printk("scsi(%ld): **** Load RISC code ****\n",
367 * qla2x00_reset_chip() - Reset ISP chip.
370 * Returns 0 on success.
373 qla2x00_reset_chip(scsi_qla_host_t *vha)
375 unsigned long flags = 0;
376 struct qla_hw_data *ha = vha->hw;
377 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
381 ha->isp_ops->disable_intrs(ha);
383 spin_lock_irqsave(&ha->hardware_lock, flags);
385 /* Turn off master enable */
387 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
388 cmd &= ~PCI_COMMAND_MASTER;
389 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
391 if (!IS_QLA2100(ha)) {
393 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC);
394 if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
395 for (cnt = 0; cnt < 30000; cnt++) {
396 if ((RD_REG_WORD(®->hccr) &
397 HCCR_RISC_PAUSE) != 0)
402 RD_REG_WORD(®->hccr); /* PCI Posting. */
406 /* Select FPM registers. */
407 WRT_REG_WORD(®->ctrl_status, 0x20);
408 RD_REG_WORD(®->ctrl_status); /* PCI Posting. */
410 /* FPM Soft Reset. */
411 WRT_REG_WORD(®->fpm_diag_config, 0x100);
412 RD_REG_WORD(®->fpm_diag_config); /* PCI Posting. */
414 /* Toggle Fpm Reset. */
415 if (!IS_QLA2200(ha)) {
416 WRT_REG_WORD(®->fpm_diag_config, 0x0);
417 RD_REG_WORD(®->fpm_diag_config); /* PCI Posting. */
420 /* Select frame buffer registers. */
421 WRT_REG_WORD(®->ctrl_status, 0x10);
422 RD_REG_WORD(®->ctrl_status); /* PCI Posting. */
424 /* Reset frame buffer FIFOs. */
425 if (IS_QLA2200(ha)) {
426 WRT_FB_CMD_REG(ha, reg, 0xa000);
427 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */
429 WRT_FB_CMD_REG(ha, reg, 0x00fc);
431 /* Read back fb_cmd until zero or 3 seconds max */
432 for (cnt = 0; cnt < 3000; cnt++) {
433 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
439 /* Select RISC module registers. */
440 WRT_REG_WORD(®->ctrl_status, 0);
441 RD_REG_WORD(®->ctrl_status); /* PCI Posting. */
443 /* Reset RISC processor. */
444 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
445 RD_REG_WORD(®->hccr); /* PCI Posting. */
447 /* Release RISC processor. */
448 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC);
449 RD_REG_WORD(®->hccr); /* PCI Posting. */
452 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
453 WRT_REG_WORD(®->hccr, HCCR_CLR_HOST_INT);
455 /* Reset ISP chip. */
456 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET);
458 /* Wait for RISC to recover from reset. */
459 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
461 * It is necessary to for a delay here since the card doesn't
462 * respond to PCI reads during a reset. On some architectures
463 * this will result in an MCA.
466 for (cnt = 30000; cnt; cnt--) {
467 if ((RD_REG_WORD(®->ctrl_status) &
468 CSR_ISP_SOFT_RESET) == 0)
475 /* Reset RISC processor. */
476 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
478 WRT_REG_WORD(®->semaphore, 0);
480 /* Release RISC processor. */
481 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC);
482 RD_REG_WORD(®->hccr); /* PCI Posting. */
484 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
485 for (cnt = 0; cnt < 30000; cnt++) {
486 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
494 /* Turn on master enable */
495 cmd |= PCI_COMMAND_MASTER;
496 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
498 /* Disable RISC pause on FPM parity error. */
499 if (!IS_QLA2100(ha)) {
500 WRT_REG_WORD(®->hccr, HCCR_DISABLE_PARITY_PAUSE);
501 RD_REG_WORD(®->hccr); /* PCI Posting. */
504 spin_unlock_irqrestore(&ha->hardware_lock, flags);
508 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
511 * Returns 0 on success.
514 qla24xx_reset_risc(scsi_qla_host_t *vha)
516 unsigned long flags = 0;
517 struct qla_hw_data *ha = vha->hw;
518 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
522 spin_lock_irqsave(&ha->hardware_lock, flags);
525 WRT_REG_DWORD(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
526 for (cnt = 0; cnt < 30000; cnt++) {
527 if ((RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
533 WRT_REG_DWORD(®->ctrl_status,
534 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
535 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
538 /* Wait for firmware to complete NVRAM accesses. */
539 d2 = (uint32_t) RD_REG_WORD(®->mailbox0);
540 for (cnt = 10000 ; cnt && d2; cnt--) {
542 d2 = (uint32_t) RD_REG_WORD(®->mailbox0);
546 /* Wait for soft-reset to complete. */
547 d2 = RD_REG_DWORD(®->ctrl_status);
548 for (cnt = 6000000 ; cnt && (d2 & CSRX_ISP_SOFT_RESET); cnt--) {
550 d2 = RD_REG_DWORD(®->ctrl_status);
554 WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET);
555 RD_REG_DWORD(®->hccr);
557 WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE);
558 RD_REG_DWORD(®->hccr);
560 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET);
561 RD_REG_DWORD(®->hccr);
563 d2 = (uint32_t) RD_REG_WORD(®->mailbox0);
564 for (cnt = 6000000 ; cnt && d2; cnt--) {
566 d2 = (uint32_t) RD_REG_WORD(®->mailbox0);
570 spin_unlock_irqrestore(&ha->hardware_lock, flags);
572 if (IS_NOPOLLING_TYPE(ha))
573 ha->isp_ops->enable_intrs(ha);
577 * qla24xx_reset_chip() - Reset ISP24xx chip.
580 * Returns 0 on success.
583 qla24xx_reset_chip(scsi_qla_host_t *vha)
585 struct qla_hw_data *ha = vha->hw;
586 ha->isp_ops->disable_intrs(ha);
588 /* Perform RISC reset. */
589 qla24xx_reset_risc(vha);
593 * qla2x00_chip_diag() - Test chip for proper operation.
596 * Returns 0 on success.
599 qla2x00_chip_diag(scsi_qla_host_t *vha)
602 struct qla_hw_data *ha = vha->hw;
603 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
604 unsigned long flags = 0;
608 struct req_que *req = ha->req_q_map[0];
610 /* Assume a failed state */
611 rval = QLA_FUNCTION_FAILED;
613 DEBUG3(printk("scsi(%ld): Testing device at %lx.\n",
614 vha->host_no, (u_long)®->flash_address));
616 spin_lock_irqsave(&ha->hardware_lock, flags);
618 /* Reset ISP chip. */
619 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET);
622 * We need to have a delay here since the card will not respond while
623 * in reset causing an MCA on some architectures.
626 data = qla2x00_debounce_register(®->ctrl_status);
627 for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
629 data = RD_REG_WORD(®->ctrl_status);
634 goto chip_diag_failed;
636 DEBUG3(printk("scsi(%ld): Reset register cleared by chip reset\n",
639 /* Reset RISC processor. */
640 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
641 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC);
643 /* Workaround for QLA2312 PCI parity error */
644 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
645 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
646 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
648 data = RD_MAILBOX_REG(ha, reg, 0);
655 goto chip_diag_failed;
657 /* Check product ID of chip */
658 DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", vha->host_no));
660 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
661 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
662 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
663 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
664 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
665 mb[3] != PROD_ID_3) {
666 qla_printk(KERN_WARNING, ha,
667 "Wrong product ID = 0x%x,0x%x,0x%x\n", mb[1], mb[2], mb[3]);
669 goto chip_diag_failed;
671 ha->product_id[0] = mb[1];
672 ha->product_id[1] = mb[2];
673 ha->product_id[2] = mb[3];
674 ha->product_id[3] = mb[4];
676 /* Adjust fw RISC transfer size */
677 if (req->length > 1024)
678 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
680 ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
683 if (IS_QLA2200(ha) &&
684 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
685 /* Limit firmware transfer size with a 2200A */
686 DEBUG3(printk("scsi(%ld): Found QLA2200A chip.\n",
689 ha->device_type |= DT_ISP2200A;
690 ha->fw_transfer_size = 128;
693 /* Wrap Incoming Mailboxes Test. */
694 spin_unlock_irqrestore(&ha->hardware_lock, flags);
696 DEBUG3(printk("scsi(%ld): Checking mailboxes.\n", vha->host_no));
697 rval = qla2x00_mbx_reg_test(vha);
699 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n",
701 qla_printk(KERN_WARNING, ha,
702 "Failed mailbox send register test\n");
705 /* Flag a successful rval */
708 spin_lock_irqsave(&ha->hardware_lock, flags);
712 DEBUG2_3(printk("scsi(%ld): Chip diagnostics **** FAILED "
713 "****\n", vha->host_no));
715 spin_unlock_irqrestore(&ha->hardware_lock, flags);
721 * qla24xx_chip_diag() - Test ISP24xx for proper operation.
724 * Returns 0 on success.
727 qla24xx_chip_diag(scsi_qla_host_t *vha)
730 struct qla_hw_data *ha = vha->hw;
731 struct req_que *req = ha->req_q_map[0];
733 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
735 rval = qla2x00_mbx_reg_test(vha);
737 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n",
739 qla_printk(KERN_WARNING, ha,
740 "Failed mailbox send register test\n");
742 /* Flag a successful rval */
750 qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
753 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
754 eft_size, fce_size, mq_size;
757 struct qla_hw_data *ha = vha->hw;
758 struct req_que *req = ha->req_q_map[0];
759 struct rsp_que *rsp = ha->rsp_q_map[0];
762 qla_printk(KERN_WARNING, ha,
763 "Firmware dump previously allocated.\n");
768 fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
769 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
770 fixed_size = sizeof(struct qla2100_fw_dump);
771 } else if (IS_QLA23XX(ha)) {
772 fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
773 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
775 } else if (IS_FWI2_CAPABLE(ha)) {
777 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
778 else if (IS_QLA25XX(ha))
779 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
781 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
782 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
785 mq_size = sizeof(struct qla2xxx_mq_chain);
786 /* Allocate memory for Fibre Channel Event Buffer. */
787 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
790 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
793 qla_printk(KERN_WARNING, ha, "Unable to allocate "
794 "(%d KB) for FCE.\n", FCE_SIZE / 1024);
798 memset(tc, 0, FCE_SIZE);
799 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
800 ha->fce_mb, &ha->fce_bufs);
802 qla_printk(KERN_WARNING, ha, "Unable to initialize "
803 "FCE (%d).\n", rval);
804 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
806 ha->flags.fce_enabled = 0;
810 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for FCE...\n",
813 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
814 ha->flags.fce_enabled = 1;
815 ha->fce_dma = tc_dma;
818 /* Allocate memory for Extended Trace Buffer. */
819 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
822 qla_printk(KERN_WARNING, ha, "Unable to allocate "
823 "(%d KB) for EFT.\n", EFT_SIZE / 1024);
827 memset(tc, 0, EFT_SIZE);
828 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
830 qla_printk(KERN_WARNING, ha, "Unable to initialize "
831 "EFT (%d).\n", rval);
832 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
837 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for EFT...\n",
841 ha->eft_dma = tc_dma;
845 req_q_size = req->length * sizeof(request_t);
846 rsp_q_size = rsp->length * sizeof(response_t);
848 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
849 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
850 ha->chain_offset = dump_size;
851 dump_size += mq_size + fce_size;
853 ha->fw_dump = vmalloc(dump_size);
855 qla_printk(KERN_WARNING, ha, "Unable to allocate (%d KB) for "
856 "firmware dump!!!\n", dump_size / 1024);
859 dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft,
866 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for firmware dump...\n",
869 ha->fw_dump_len = dump_size;
870 ha->fw_dump->signature[0] = 'Q';
871 ha->fw_dump->signature[1] = 'L';
872 ha->fw_dump->signature[2] = 'G';
873 ha->fw_dump->signature[3] = 'C';
874 ha->fw_dump->version = __constant_htonl(1);
876 ha->fw_dump->fixed_size = htonl(fixed_size);
877 ha->fw_dump->mem_size = htonl(mem_size);
878 ha->fw_dump->req_q_size = htonl(req_q_size);
879 ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
881 ha->fw_dump->eft_size = htonl(eft_size);
882 ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
883 ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
885 ha->fw_dump->header_size =
886 htonl(offsetof(struct qla2xxx_fw_dump, isp));
890 qla81xx_mpi_sync(scsi_qla_host_t *vha)
892 #define MPS_MASK 0xe0
896 struct qla_hw_data *ha = vha->hw;
898 if (!IS_QLA81XX(vha->hw))
901 rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
902 if (rval != QLA_SUCCESS) {
903 DEBUG2(qla_printk(KERN_WARNING, ha,
904 "Sync-MPI: Unable to acquire semaphore.\n"));
908 pci_read_config_word(vha->hw->pdev, 0x54, &dc);
909 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
910 if (rval != QLA_SUCCESS) {
911 DEBUG2(qla_printk(KERN_WARNING, ha,
912 "Sync-MPI: Unable to read sync.\n"));
917 if (dc == (dw & MPS_MASK))
922 rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
923 if (rval != QLA_SUCCESS) {
924 DEBUG2(qla_printk(KERN_WARNING, ha,
925 "Sync-MPI: Unable to gain sync.\n"));
929 rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
930 if (rval != QLA_SUCCESS) {
931 DEBUG2(qla_printk(KERN_WARNING, ha,
932 "Sync-MPI: Unable to release semaphore.\n"));
940 * qla2x00_setup_chip() - Load and start RISC firmware.
943 * Returns 0 on success.
946 qla2x00_setup_chip(scsi_qla_host_t *vha)
949 uint32_t srisc_address = 0;
950 struct qla_hw_data *ha = vha->hw;
951 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
953 uint16_t fw_major_version;
955 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
956 /* Disable SRAM, Instruction RAM and GP RAM parity. */
957 spin_lock_irqsave(&ha->hardware_lock, flags);
958 WRT_REG_WORD(®->hccr, (HCCR_ENABLE_PARITY + 0x0));
959 RD_REG_WORD(®->hccr);
960 spin_unlock_irqrestore(&ha->hardware_lock, flags);
963 qla81xx_mpi_sync(vha);
965 /* Load firmware sequences */
966 rval = ha->isp_ops->load_risc(vha, &srisc_address);
967 if (rval == QLA_SUCCESS) {
968 DEBUG(printk("scsi(%ld): Verifying Checksum of loaded RISC "
969 "code.\n", vha->host_no));
971 rval = qla2x00_verify_checksum(vha, srisc_address);
972 if (rval == QLA_SUCCESS) {
973 /* Start firmware execution. */
974 DEBUG(printk("scsi(%ld): Checksum OK, start "
975 "firmware.\n", vha->host_no));
977 rval = qla2x00_execute_fw(vha, srisc_address);
978 /* Retrieve firmware information. */
979 if (rval == QLA_SUCCESS) {
980 fw_major_version = ha->fw_major_version;
981 rval = qla2x00_get_fw_version(vha,
982 &ha->fw_major_version,
983 &ha->fw_minor_version,
984 &ha->fw_subminor_version,
985 &ha->fw_attributes, &ha->fw_memory_size,
986 ha->mpi_version, &ha->mpi_capabilities,
988 if (rval != QLA_SUCCESS)
991 ha->flags.npiv_supported = 0;
992 if (IS_QLA2XXX_MIDTYPE(ha) &&
993 (ha->fw_attributes & BIT_2)) {
994 ha->flags.npiv_supported = 1;
995 if ((!ha->max_npiv_vports) ||
996 ((ha->max_npiv_vports + 1) %
997 MIN_MULTI_ID_FABRIC))
998 ha->max_npiv_vports =
999 MIN_MULTI_ID_FABRIC - 1;
1001 qla2x00_get_resource_cnts(vha, NULL,
1002 &ha->fw_xcb_count, NULL, NULL,
1003 &ha->max_npiv_vports);
1005 if (!fw_major_version && ql2xallocfwdump)
1006 qla2x00_alloc_fw_dump(vha);
1009 DEBUG2(printk(KERN_INFO
1010 "scsi(%ld): ISP Firmware failed checksum.\n",
1015 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
1016 /* Enable proper parity. */
1017 spin_lock_irqsave(&ha->hardware_lock, flags);
1020 WRT_REG_WORD(®->hccr, HCCR_ENABLE_PARITY + 0x1);
1022 /* SRAM, Instruction RAM and GP RAM parity */
1023 WRT_REG_WORD(®->hccr, HCCR_ENABLE_PARITY + 0x7);
1024 RD_REG_WORD(®->hccr);
1025 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1028 if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
1031 rval = qla81xx_fac_get_sector_size(vha, &size);
1032 if (rval == QLA_SUCCESS) {
1033 ha->flags.fac_supported = 1;
1034 ha->fdt_block_size = size << 2;
1036 qla_printk(KERN_ERR, ha,
1037 "Unsupported FAC firmware (%d.%02d.%02d).\n",
1038 ha->fw_major_version, ha->fw_minor_version,
1039 ha->fw_subminor_version);
1044 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n",
1052 * qla2x00_init_response_q_entries() - Initializes response queue entries.
1055 * Beginning of request ring has initialization control block already built
1056 * by nvram config routine.
1058 * Returns 0 on success.
1061 qla2x00_init_response_q_entries(struct rsp_que *rsp)
1066 rsp->ring_ptr = rsp->ring;
1067 rsp->ring_index = 0;
1068 rsp->status_srb = NULL;
1069 pkt = rsp->ring_ptr;
1070 for (cnt = 0; cnt < rsp->length; cnt++) {
1071 pkt->signature = RESPONSE_PROCESSED;
1077 * qla2x00_update_fw_options() - Read and process firmware options.
1080 * Returns 0 on success.
1083 qla2x00_update_fw_options(scsi_qla_host_t *vha)
1085 uint16_t swing, emphasis, tx_sens, rx_sens;
1086 struct qla_hw_data *ha = vha->hw;
1088 memset(ha->fw_options, 0, sizeof(ha->fw_options));
1089 qla2x00_get_fw_options(vha, ha->fw_options);
1091 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1094 /* Serial Link options. */
1095 DEBUG3(printk("scsi(%ld): Serial link options:\n",
1097 DEBUG3(qla2x00_dump_buffer((uint8_t *)&ha->fw_seriallink_options,
1098 sizeof(ha->fw_seriallink_options)));
1100 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
1101 if (ha->fw_seriallink_options[3] & BIT_2) {
1102 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
1105 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
1106 emphasis = (ha->fw_seriallink_options[2] &
1107 (BIT_4 | BIT_3)) >> 3;
1108 tx_sens = ha->fw_seriallink_options[0] &
1109 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1110 rx_sens = (ha->fw_seriallink_options[0] &
1111 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
1112 ha->fw_options[10] = (emphasis << 14) | (swing << 8);
1113 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
1116 ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
1117 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
1118 ha->fw_options[10] |= BIT_5 |
1119 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
1120 (tx_sens & (BIT_1 | BIT_0));
1123 swing = (ha->fw_seriallink_options[2] &
1124 (BIT_7 | BIT_6 | BIT_5)) >> 5;
1125 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
1126 tx_sens = ha->fw_seriallink_options[1] &
1127 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1128 rx_sens = (ha->fw_seriallink_options[1] &
1129 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
1130 ha->fw_options[11] = (emphasis << 14) | (swing << 8);
1131 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
1134 ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
1135 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
1136 ha->fw_options[11] |= BIT_5 |
1137 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
1138 (tx_sens & (BIT_1 | BIT_0));
1142 /* Return command IOCBs without waiting for an ABTS to complete. */
1143 ha->fw_options[3] |= BIT_13;
1146 if (ha->flags.enable_led_scheme)
1147 ha->fw_options[2] |= BIT_12;
1149 /* Detect ISP6312. */
1151 ha->fw_options[2] |= BIT_13;
1153 /* Update firmware options. */
1154 qla2x00_set_fw_options(vha, ha->fw_options);
1158 qla24xx_update_fw_options(scsi_qla_host_t *vha)
1161 struct qla_hw_data *ha = vha->hw;
1163 /* Update Serial Link options. */
1164 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
1167 rval = qla2x00_set_serdes_params(vha,
1168 le16_to_cpu(ha->fw_seriallink_options24[1]),
1169 le16_to_cpu(ha->fw_seriallink_options24[2]),
1170 le16_to_cpu(ha->fw_seriallink_options24[3]));
1171 if (rval != QLA_SUCCESS) {
1172 qla_printk(KERN_WARNING, ha,
1173 "Unable to update Serial Link options (%x).\n", rval);
1178 qla2x00_config_rings(struct scsi_qla_host *vha)
1180 struct qla_hw_data *ha = vha->hw;
1181 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1182 struct req_que *req = ha->req_q_map[0];
1183 struct rsp_que *rsp = ha->rsp_q_map[0];
1185 /* Setup ring parameters in initialization control block. */
1186 ha->init_cb->request_q_outpointer = __constant_cpu_to_le16(0);
1187 ha->init_cb->response_q_inpointer = __constant_cpu_to_le16(0);
1188 ha->init_cb->request_q_length = cpu_to_le16(req->length);
1189 ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
1190 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1191 ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1192 ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1193 ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1195 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
1196 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
1197 WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0);
1198 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0);
1199 RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
1203 qla24xx_config_rings(struct scsi_qla_host *vha)
1205 struct qla_hw_data *ha = vha->hw;
1206 device_reg_t __iomem *reg = ISP_QUE_REG(ha, 0);
1207 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
1208 struct qla_msix_entry *msix;
1209 struct init_cb_24xx *icb;
1211 struct req_que *req = ha->req_q_map[0];
1212 struct rsp_que *rsp = ha->rsp_q_map[0];
1214 /* Setup ring parameters in initialization control block. */
1215 icb = (struct init_cb_24xx *)ha->init_cb;
1216 icb->request_q_outpointer = __constant_cpu_to_le16(0);
1217 icb->response_q_inpointer = __constant_cpu_to_le16(0);
1218 icb->request_q_length = cpu_to_le16(req->length);
1219 icb->response_q_length = cpu_to_le16(rsp->length);
1220 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1221 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1222 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1223 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1226 icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
1227 icb->rid = __constant_cpu_to_le16(rid);
1228 if (ha->flags.msix_enabled) {
1229 msix = &ha->msix_entries[1];
1230 DEBUG2_17(printk(KERN_INFO
1231 "Registering vector 0x%x for base que\n", msix->entry));
1232 icb->msix = cpu_to_le16(msix->entry);
1234 /* Use alternate PCI bus number */
1236 icb->firmware_options_2 |=
1237 __constant_cpu_to_le32(BIT_19);
1238 /* Use alternate PCI devfn */
1240 icb->firmware_options_2 |=
1241 __constant_cpu_to_le32(BIT_18);
1243 icb->firmware_options_2 &= __constant_cpu_to_le32(~BIT_22);
1244 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23);
1246 WRT_REG_DWORD(®->isp25mq.req_q_in, 0);
1247 WRT_REG_DWORD(®->isp25mq.req_q_out, 0);
1248 WRT_REG_DWORD(®->isp25mq.rsp_q_in, 0);
1249 WRT_REG_DWORD(®->isp25mq.rsp_q_out, 0);
1251 WRT_REG_DWORD(®->isp24.req_q_in, 0);
1252 WRT_REG_DWORD(®->isp24.req_q_out, 0);
1253 WRT_REG_DWORD(®->isp24.rsp_q_in, 0);
1254 WRT_REG_DWORD(®->isp24.rsp_q_out, 0);
1257 RD_REG_DWORD(&ioreg->hccr);
1261 * qla2x00_init_rings() - Initializes firmware.
1264 * Beginning of request ring has initialization control block already built
1265 * by nvram config routine.
1267 * Returns 0 on success.
1270 qla2x00_init_rings(scsi_qla_host_t *vha)
1273 unsigned long flags = 0;
1275 struct qla_hw_data *ha = vha->hw;
1276 struct req_que *req;
1277 struct rsp_que *rsp;
1278 struct scsi_qla_host *vp;
1279 struct mid_init_cb_24xx *mid_init_cb =
1280 (struct mid_init_cb_24xx *) ha->init_cb;
1282 spin_lock_irqsave(&ha->hardware_lock, flags);
1284 /* Clear outstanding commands array. */
1285 for (que = 0; que < ha->max_req_queues; que++) {
1286 req = ha->req_q_map[que];
1289 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
1290 req->outstanding_cmds[cnt] = NULL;
1292 req->current_outstanding_cmd = 1;
1294 /* Initialize firmware. */
1295 req->ring_ptr = req->ring;
1296 req->ring_index = 0;
1297 req->cnt = req->length;
1300 for (que = 0; que < ha->max_rsp_queues; que++) {
1301 rsp = ha->rsp_q_map[que];
1304 /* Initialize response queue entries */
1305 qla2x00_init_response_q_entries(rsp);
1308 /* Clear RSCN queue. */
1309 list_for_each_entry(vp, &ha->vp_list, list) {
1310 vp->rscn_in_ptr = 0;
1311 vp->rscn_out_ptr = 0;
1313 ha->isp_ops->config_rings(vha);
1315 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1317 /* Update any ISP specific firmware options before initialization. */
1318 ha->isp_ops->update_fw_options(vha);
1320 DEBUG(printk("scsi(%ld): Issue init firmware.\n", vha->host_no));
1322 if (ha->flags.npiv_supported) {
1323 if (ha->operating_mode == LOOP)
1324 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
1325 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
1328 if (IS_FWI2_CAPABLE(ha)) {
1329 mid_init_cb->options = __constant_cpu_to_le16(BIT_1);
1330 mid_init_cb->init_cb.execution_throttle =
1331 cpu_to_le16(ha->fw_xcb_count);
1334 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
1336 DEBUG2_3(printk("scsi(%ld): Init firmware **** FAILED ****.\n",
1339 DEBUG3(printk("scsi(%ld): Init firmware -- success.\n",
1347 * qla2x00_fw_ready() - Waits for firmware ready.
1350 * Returns 0 on success.
1353 qla2x00_fw_ready(scsi_qla_host_t *vha)
1356 unsigned long wtime, mtime, cs84xx_time;
1357 uint16_t min_wait; /* Minimum wait time if loop is down */
1358 uint16_t wait_time; /* Wait time if loop is coming ready */
1360 struct qla_hw_data *ha = vha->hw;
1364 /* 20 seconds for loop down. */
1368 * Firmware should take at most one RATOV to login, plus 5 seconds for
1369 * our own processing.
1371 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
1372 wait_time = min_wait;
1375 /* Min wait time if loop down */
1376 mtime = jiffies + (min_wait * HZ);
1378 /* wait time before firmware ready */
1379 wtime = jiffies + (wait_time * HZ);
1381 /* Wait for ISP to finish LIP */
1382 if (!vha->flags.init_done)
1383 qla_printk(KERN_INFO, ha, "Waiting for LIP to complete...\n");
1385 DEBUG3(printk("scsi(%ld): Waiting for LIP to complete...\n",
1389 rval = qla2x00_get_firmware_state(vha, state);
1390 if (rval == QLA_SUCCESS) {
1391 if (state[0] < FSTATE_LOSS_OF_SYNC) {
1392 vha->device_flags &= ~DFLG_NO_CABLE;
1394 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
1395 DEBUG16(printk("scsi(%ld): fw_state=%x "
1396 "84xx=%x.\n", vha->host_no, state[0],
1398 if ((state[2] & FSTATE_LOGGED_IN) &&
1399 (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
1400 DEBUG16(printk("scsi(%ld): Sending "
1401 "verify iocb.\n", vha->host_no));
1403 cs84xx_time = jiffies;
1404 rval = qla84xx_init_chip(vha);
1405 if (rval != QLA_SUCCESS)
1408 /* Add time taken to initialize. */
1409 cs84xx_time = jiffies - cs84xx_time;
1410 wtime += cs84xx_time;
1411 mtime += cs84xx_time;
1412 DEBUG16(printk("scsi(%ld): Increasing "
1413 "wait time by %ld. New time %ld\n",
1414 vha->host_no, cs84xx_time, wtime));
1416 } else if (state[0] == FSTATE_READY) {
1417 DEBUG(printk("scsi(%ld): F/W Ready - OK \n",
1420 qla2x00_get_retry_cnt(vha, &ha->retry_count,
1421 &ha->login_timeout, &ha->r_a_tov);
1427 rval = QLA_FUNCTION_FAILED;
1429 if (atomic_read(&vha->loop_down_timer) &&
1430 state[0] != FSTATE_READY) {
1431 /* Loop down. Timeout on min_wait for states
1432 * other than Wait for Login.
1434 if (time_after_eq(jiffies, mtime)) {
1435 qla_printk(KERN_INFO, ha,
1436 "Cable is unplugged...\n");
1438 vha->device_flags |= DFLG_NO_CABLE;
1443 /* Mailbox cmd failed. Timeout on min_wait. */
1444 if (time_after_eq(jiffies, mtime))
1448 if (time_after_eq(jiffies, wtime))
1451 /* Delay for a while */
1454 DEBUG3(printk("scsi(%ld): fw_state=%x curr time=%lx.\n",
1455 vha->host_no, state[0], jiffies));
1458 DEBUG(printk("scsi(%ld): fw_state=%x (%x, %x, %x, %x) curr time=%lx.\n",
1459 vha->host_no, state[0], state[1], state[2], state[3], state[4],
1463 DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n",
1471 * qla2x00_configure_hba
1472 * Setup adapter context.
1475 * ha = adapter state pointer.
1484 qla2x00_configure_hba(scsi_qla_host_t *vha)
1493 char connect_type[22];
1494 struct qla_hw_data *ha = vha->hw;
1496 /* Get host addresses. */
1497 rval = qla2x00_get_adapter_id(vha,
1498 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
1499 if (rval != QLA_SUCCESS) {
1500 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
1501 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
1502 DEBUG2(printk("%s(%ld) Loop is in a transition state\n",
1503 __func__, vha->host_no));
1505 qla_printk(KERN_WARNING, ha,
1506 "ERROR -- Unable to get host loop ID.\n");
1507 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1513 qla_printk(KERN_INFO, ha,
1514 "Cannot get topology - retrying.\n");
1515 return (QLA_FUNCTION_FAILED);
1518 vha->loop_id = loop_id;
1521 ha->min_external_loopid = SNS_FIRST_LOOP_ID;
1522 ha->operating_mode = LOOP;
1527 DEBUG3(printk("scsi(%ld): HBA in NL topology.\n",
1529 ha->current_topology = ISP_CFG_NL;
1530 strcpy(connect_type, "(Loop)");
1534 DEBUG3(printk("scsi(%ld): HBA in FL topology.\n",
1536 ha->switch_cap = sw_cap;
1537 ha->current_topology = ISP_CFG_FL;
1538 strcpy(connect_type, "(FL_Port)");
1542 DEBUG3(printk("scsi(%ld): HBA in N P2P topology.\n",
1544 ha->operating_mode = P2P;
1545 ha->current_topology = ISP_CFG_N;
1546 strcpy(connect_type, "(N_Port-to-N_Port)");
1550 DEBUG3(printk("scsi(%ld): HBA in F P2P topology.\n",
1552 ha->switch_cap = sw_cap;
1553 ha->operating_mode = P2P;
1554 ha->current_topology = ISP_CFG_F;
1555 strcpy(connect_type, "(F_Port)");
1559 DEBUG3(printk("scsi(%ld): HBA in unknown topology %x. "
1561 vha->host_no, topo));
1562 ha->current_topology = ISP_CFG_NL;
1563 strcpy(connect_type, "(Loop)");
1567 /* Save Host port and loop ID. */
1568 /* byte order - Big Endian */
1569 vha->d_id.b.domain = domain;
1570 vha->d_id.b.area = area;
1571 vha->d_id.b.al_pa = al_pa;
1573 if (!vha->flags.init_done)
1574 qla_printk(KERN_INFO, ha,
1575 "Topology - %s, Host Loop address 0x%x\n",
1576 connect_type, vha->loop_id);
1579 DEBUG2_3(printk("scsi(%ld): FAILED.\n", vha->host_no));
1581 DEBUG3(printk("scsi(%ld): exiting normally.\n", vha->host_no));
1588 qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
1593 struct qla_hw_data *ha = vha->hw;
1594 int use_tbl = !IS_QLA25XX(ha) && !IS_QLA81XX(ha);
1596 if (memcmp(model, BINZERO, len) != 0) {
1597 strncpy(ha->model_number, model, len);
1598 st = en = ha->model_number;
1601 if (*en != 0x20 && *en != 0x00)
1606 index = (ha->pdev->subsystem_device & 0xff);
1608 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
1609 index < QLA_MODEL_NAMES)
1610 strncpy(ha->model_desc,
1611 qla2x00_model_name[index * 2 + 1],
1612 sizeof(ha->model_desc) - 1);
1614 index = (ha->pdev->subsystem_device & 0xff);
1616 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
1617 index < QLA_MODEL_NAMES) {
1618 strcpy(ha->model_number,
1619 qla2x00_model_name[index * 2]);
1620 strncpy(ha->model_desc,
1621 qla2x00_model_name[index * 2 + 1],
1622 sizeof(ha->model_desc) - 1);
1624 strcpy(ha->model_number, def);
1627 if (IS_FWI2_CAPABLE(ha))
1628 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
1629 sizeof(ha->model_desc));
1632 /* On sparc systems, obtain port and node WWN from firmware
1635 static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
1638 struct qla_hw_data *ha = vha->hw;
1639 struct pci_dev *pdev = ha->pdev;
1640 struct device_node *dp = pci_device_to_OF_node(pdev);
1644 val = of_get_property(dp, "port-wwn", &len);
1645 if (val && len >= WWN_SIZE)
1646 memcpy(nv->port_name, val, WWN_SIZE);
1648 val = of_get_property(dp, "node-wwn", &len);
1649 if (val && len >= WWN_SIZE)
1650 memcpy(nv->node_name, val, WWN_SIZE);
1655 * NVRAM configuration for ISP 2xxx
1658 * ha = adapter block pointer.
1661 * initialization control block in response_ring
1662 * host adapters parameters in host adapter block
1668 qla2x00_nvram_config(scsi_qla_host_t *vha)
1673 uint8_t *dptr1, *dptr2;
1674 struct qla_hw_data *ha = vha->hw;
1675 init_cb_t *icb = ha->init_cb;
1676 nvram_t *nv = ha->nvram;
1677 uint8_t *ptr = ha->nvram;
1678 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1682 /* Determine NVRAM starting address. */
1683 ha->nvram_size = sizeof(nvram_t);
1685 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
1686 if ((RD_REG_WORD(®->ctrl_status) >> 14) == 1)
1687 ha->nvram_base = 0x80;
1689 /* Get NVRAM data and calculate checksum. */
1690 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
1691 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
1694 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
1695 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
1697 /* Bad NVRAM data, set defaults parameters. */
1698 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
1699 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
1700 /* Reset NVRAM data. */
1701 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
1702 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
1704 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
1705 "invalid -- WWPN) defaults.\n");
1708 * Set default initialization control block.
1710 memset(nv, 0, ha->nvram_size);
1711 nv->parameter_block_version = ICB_VERSION;
1713 if (IS_QLA23XX(ha)) {
1714 nv->firmware_options[0] = BIT_2 | BIT_1;
1715 nv->firmware_options[1] = BIT_7 | BIT_5;
1716 nv->add_firmware_options[0] = BIT_5;
1717 nv->add_firmware_options[1] = BIT_5 | BIT_4;
1718 nv->frame_payload_size = __constant_cpu_to_le16(2048);
1719 nv->special_options[1] = BIT_7;
1720 } else if (IS_QLA2200(ha)) {
1721 nv->firmware_options[0] = BIT_2 | BIT_1;
1722 nv->firmware_options[1] = BIT_7 | BIT_5;
1723 nv->add_firmware_options[0] = BIT_5;
1724 nv->add_firmware_options[1] = BIT_5 | BIT_4;
1725 nv->frame_payload_size = __constant_cpu_to_le16(1024);
1726 } else if (IS_QLA2100(ha)) {
1727 nv->firmware_options[0] = BIT_3 | BIT_1;
1728 nv->firmware_options[1] = BIT_5;
1729 nv->frame_payload_size = __constant_cpu_to_le16(1024);
1732 nv->max_iocb_allocation = __constant_cpu_to_le16(256);
1733 nv->execution_throttle = __constant_cpu_to_le16(16);
1734 nv->retry_count = 8;
1735 nv->retry_delay = 1;
1737 nv->port_name[0] = 33;
1738 nv->port_name[3] = 224;
1739 nv->port_name[4] = 139;
1741 qla2xxx_nvram_wwn_from_ofw(vha, nv);
1743 nv->login_timeout = 4;
1746 * Set default host adapter parameters
1748 nv->host_p[1] = BIT_2;
1749 nv->reset_delay = 5;
1750 nv->port_down_retry_count = 8;
1751 nv->max_luns_per_target = __constant_cpu_to_le16(8);
1752 nv->link_down_timeout = 60;
1757 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
1759 * The SN2 does not provide BIOS emulation which means you can't change
1760 * potentially bogus BIOS settings. Force the use of default settings
1761 * for link rate and frame size. Hope that the rest of the settings
1764 if (ia64_platform_is("sn2")) {
1765 nv->frame_payload_size = __constant_cpu_to_le16(2048);
1767 nv->special_options[1] = BIT_7;
1771 /* Reset Initialization control block */
1772 memset(icb, 0, ha->init_cb_size);
1775 * Setup driver NVRAM options.
1777 nv->firmware_options[0] |= (BIT_6 | BIT_1);
1778 nv->firmware_options[0] &= ~(BIT_5 | BIT_4);
1779 nv->firmware_options[1] |= (BIT_5 | BIT_0);
1780 nv->firmware_options[1] &= ~BIT_4;
1782 if (IS_QLA23XX(ha)) {
1783 nv->firmware_options[0] |= BIT_2;
1784 nv->firmware_options[0] &= ~BIT_3;
1785 nv->add_firmware_options[1] |= BIT_5 | BIT_4;
1787 if (IS_QLA2300(ha)) {
1788 if (ha->fb_rev == FPM_2310) {
1789 strcpy(ha->model_number, "QLA2310");
1791 strcpy(ha->model_number, "QLA2300");
1794 qla2x00_set_model_info(vha, nv->model_number,
1795 sizeof(nv->model_number), "QLA23xx");
1797 } else if (IS_QLA2200(ha)) {
1798 nv->firmware_options[0] |= BIT_2;
1800 * 'Point-to-point preferred, else loop' is not a safe
1801 * connection mode setting.
1803 if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) ==
1805 /* Force 'loop preferred, else point-to-point'. */
1806 nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4);
1807 nv->add_firmware_options[0] |= BIT_5;
1809 strcpy(ha->model_number, "QLA22xx");
1810 } else /*if (IS_QLA2100(ha))*/ {
1811 strcpy(ha->model_number, "QLA2100");
1815 * Copy over NVRAM RISC parameter block to initialization control block.
1817 dptr1 = (uint8_t *)icb;
1818 dptr2 = (uint8_t *)&nv->parameter_block_version;
1819 cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version;
1821 *dptr1++ = *dptr2++;
1823 /* Copy 2nd half. */
1824 dptr1 = (uint8_t *)icb->add_firmware_options;
1825 cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
1827 *dptr1++ = *dptr2++;
1829 /* Use alternate WWN? */
1830 if (nv->host_p[1] & BIT_7) {
1831 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
1832 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
1835 /* Prepare nodename */
1836 if ((icb->firmware_options[1] & BIT_6) == 0) {
1838 * Firmware will apply the following mask if the nodename was
1841 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
1842 icb->node_name[0] &= 0xF0;
1846 * Set host adapter parameters.
1848 if (nv->host_p[0] & BIT_7)
1849 ql2xextended_error_logging = 1;
1850 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
1851 /* Always load RISC code on non ISP2[12]00 chips. */
1852 if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
1853 ha->flags.disable_risc_code_load = 0;
1854 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
1855 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
1856 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
1857 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
1858 ha->flags.disable_serdes = 0;
1860 ha->operating_mode =
1861 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
1863 memcpy(ha->fw_seriallink_options, nv->seriallink_options,
1864 sizeof(ha->fw_seriallink_options));
1866 /* save HBA serial number */
1867 ha->serial0 = icb->port_name[5];
1868 ha->serial1 = icb->port_name[6];
1869 ha->serial2 = icb->port_name[7];
1870 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
1871 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
1873 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
1875 ha->retry_count = nv->retry_count;
1877 /* Set minimum login_timeout to 4 seconds. */
1878 if (nv->login_timeout < ql2xlogintimeout)
1879 nv->login_timeout = ql2xlogintimeout;
1880 if (nv->login_timeout < 4)
1881 nv->login_timeout = 4;
1882 ha->login_timeout = nv->login_timeout;
1883 icb->login_timeout = nv->login_timeout;
1885 /* Set minimum RATOV to 100 tenths of a second. */
1888 ha->loop_reset_delay = nv->reset_delay;
1890 /* Link Down Timeout = 0:
1892 * When Port Down timer expires we will start returning
1893 * I/O's to OS with "DID_NO_CONNECT".
1895 * Link Down Timeout != 0:
1897 * The driver waits for the link to come up after link down
1898 * before returning I/Os to OS with "DID_NO_CONNECT".
1900 if (nv->link_down_timeout == 0) {
1901 ha->loop_down_abort_time =
1902 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
1904 ha->link_down_timeout = nv->link_down_timeout;
1905 ha->loop_down_abort_time =
1906 (LOOP_DOWN_TIME - ha->link_down_timeout);
1910 * Need enough time to try and get the port back.
1912 ha->port_down_retry_count = nv->port_down_retry_count;
1913 if (qlport_down_retry)
1914 ha->port_down_retry_count = qlport_down_retry;
1915 /* Set login_retry_count */
1916 ha->login_retry_count = nv->retry_count;
1917 if (ha->port_down_retry_count == nv->port_down_retry_count &&
1918 ha->port_down_retry_count > 3)
1919 ha->login_retry_count = ha->port_down_retry_count;
1920 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
1921 ha->login_retry_count = ha->port_down_retry_count;
1922 if (ql2xloginretrycount)
1923 ha->login_retry_count = ql2xloginretrycount;
1925 icb->lun_enables = __constant_cpu_to_le16(0);
1926 icb->command_resource_count = 0;
1927 icb->immediate_notify_resource_count = 0;
1928 icb->timeout = __constant_cpu_to_le16(0);
1930 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
1932 icb->firmware_options[0] &= ~BIT_3;
1933 icb->add_firmware_options[0] &=
1934 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
1935 icb->add_firmware_options[0] |= BIT_2;
1936 icb->response_accumulation_timer = 3;
1937 icb->interrupt_delay_timer = 5;
1939 vha->flags.process_response_queue = 1;
1942 if (!vha->flags.init_done) {
1943 ha->zio_mode = icb->add_firmware_options[0] &
1944 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1945 ha->zio_timer = icb->interrupt_delay_timer ?
1946 icb->interrupt_delay_timer: 2;
1948 icb->add_firmware_options[0] &=
1949 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
1950 vha->flags.process_response_queue = 0;
1951 if (ha->zio_mode != QLA_ZIO_DISABLED) {
1952 ha->zio_mode = QLA_ZIO_MODE_6;
1954 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer "
1955 "delay (%d us).\n", vha->host_no, ha->zio_mode,
1956 ha->zio_timer * 100));
1957 qla_printk(KERN_INFO, ha,
1958 "ZIO mode %d enabled; timer delay (%d us).\n",
1959 ha->zio_mode, ha->zio_timer * 100);
1961 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
1962 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
1963 vha->flags.process_response_queue = 1;
1968 DEBUG2_3(printk(KERN_WARNING
1969 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
1975 qla2x00_rport_del(void *data)
1977 fc_port_t *fcport = data;
1978 struct fc_rport *rport;
1980 spin_lock_irq(fcport->vha->host->host_lock);
1981 rport = fcport->drport;
1982 fcport->drport = NULL;
1983 spin_unlock_irq(fcport->vha->host->host_lock);
1985 fc_remote_port_delete(rport);
1989 * qla2x00_alloc_fcport() - Allocate a generic fcport.
1991 * @flags: allocation flags
1993 * Returns a pointer to the allocated fcport, or NULL, if none available.
1996 qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
2000 fcport = kzalloc(sizeof(fc_port_t), flags);
2004 /* Setup fcport template structure. */
2006 fcport->vp_idx = vha->vp_idx;
2007 fcport->port_type = FCT_UNKNOWN;
2008 fcport->loop_id = FC_NO_LOOP_ID;
2009 atomic_set(&fcport->state, FCS_UNCONFIGURED);
2010 fcport->supported_classes = FC_COS_UNSPECIFIED;
2016 * qla2x00_configure_loop
2017 * Updates Fibre Channel Device Database with what is actually on loop.
2020 * ha = adapter block pointer.
2025 * 2 = database was full and device was not configured.
2028 qla2x00_configure_loop(scsi_qla_host_t *vha)
2031 unsigned long flags, save_flags;
2032 struct qla_hw_data *ha = vha->hw;
2035 /* Get Initiator ID */
2036 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
2037 rval = qla2x00_configure_hba(vha);
2038 if (rval != QLA_SUCCESS) {
2039 DEBUG(printk("scsi(%ld): Unable to configure HBA.\n",
2045 save_flags = flags = vha->dpc_flags;
2046 DEBUG(printk("scsi(%ld): Configure loop -- dpc flags =0x%lx\n",
2047 vha->host_no, flags));
2050 * If we have both an RSCN and PORT UPDATE pending then handle them
2051 * both at the same time.
2053 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2054 clear_bit(RSCN_UPDATE, &vha->dpc_flags);
2056 /* Determine what we need to do */
2057 if (ha->current_topology == ISP_CFG_FL &&
2058 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
2060 vha->flags.rscn_queue_overflow = 1;
2061 set_bit(RSCN_UPDATE, &flags);
2063 } else if (ha->current_topology == ISP_CFG_F &&
2064 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
2066 vha->flags.rscn_queue_overflow = 1;
2067 set_bit(RSCN_UPDATE, &flags);
2068 clear_bit(LOCAL_LOOP_UPDATE, &flags);
2070 } else if (ha->current_topology == ISP_CFG_N) {
2071 clear_bit(RSCN_UPDATE, &flags);
2073 } else if (!vha->flags.online ||
2074 (test_bit(ABORT_ISP_ACTIVE, &flags))) {
2076 vha->flags.rscn_queue_overflow = 1;
2077 set_bit(RSCN_UPDATE, &flags);
2078 set_bit(LOCAL_LOOP_UPDATE, &flags);
2081 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
2082 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2083 rval = QLA_FUNCTION_FAILED;
2085 rval = qla2x00_configure_local_loop(vha);
2088 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
2089 if (LOOP_TRANSITION(vha))
2090 rval = QLA_FUNCTION_FAILED;
2092 rval = qla2x00_configure_fabric(vha);
2095 if (rval == QLA_SUCCESS) {
2096 if (atomic_read(&vha->loop_down_timer) ||
2097 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
2098 rval = QLA_FUNCTION_FAILED;
2100 atomic_set(&vha->loop_state, LOOP_READY);
2102 DEBUG(printk("scsi(%ld): LOOP READY\n", vha->host_no));
2107 DEBUG2_3(printk("%s(%ld): *** FAILED ***\n",
2108 __func__, vha->host_no));
2110 DEBUG3(printk("%s: exiting normally\n", __func__));
2113 /* Restore state if a resync event occurred during processing */
2114 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
2115 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
2116 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2117 if (test_bit(RSCN_UPDATE, &save_flags)) {
2118 set_bit(RSCN_UPDATE, &vha->dpc_flags);
2119 vha->flags.rscn_queue_overflow = 1;
2129 * qla2x00_configure_local_loop
2130 * Updates Fibre Channel Device Database with local loop devices.
2133 * ha = adapter block pointer.
2139 qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2144 fc_port_t *fcport, *new_fcport;
2150 uint8_t domain, area, al_pa;
2151 struct qla_hw_data *ha = vha->hw;
2155 entries = MAX_FIBRE_DEVICES;
2157 DEBUG3(printk("scsi(%ld): Getting FCAL position map\n", vha->host_no));
2158 DEBUG3(qla2x00_get_fcal_position_map(vha, NULL));
2160 /* Get list of logged in devices. */
2161 memset(ha->gid_list, 0, GID_LIST_SIZE);
2162 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
2164 if (rval != QLA_SUCCESS)
2165 goto cleanup_allocation;
2167 DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n",
2168 vha->host_no, entries));
2169 DEBUG3(qla2x00_dump_buffer((uint8_t *)ha->gid_list,
2170 entries * sizeof(struct gid_list_info)));
2172 /* Allocate temporary fcport for any new fcports discovered. */
2173 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2174 if (new_fcport == NULL) {
2175 rval = QLA_MEMORY_ALLOC_FAILED;
2176 goto cleanup_allocation;
2178 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
2181 * Mark local devices that were present with FCF_DEVICE_LOST for now.
2183 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2184 if (atomic_read(&fcport->state) == FCS_ONLINE &&
2185 fcport->port_type != FCT_BROADCAST &&
2186 (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
2188 DEBUG(printk("scsi(%ld): Marking port lost, "
2190 vha->host_no, fcport->loop_id));
2192 atomic_set(&fcport->state, FCS_DEVICE_LOST);
2196 /* Add devices to port list. */
2197 id_iter = (char *)ha->gid_list;
2198 for (index = 0; index < entries; index++) {
2199 domain = ((struct gid_list_info *)id_iter)->domain;
2200 area = ((struct gid_list_info *)id_iter)->area;
2201 al_pa = ((struct gid_list_info *)id_iter)->al_pa;
2202 if (IS_QLA2100(ha) || IS_QLA2200(ha))
2203 loop_id = (uint16_t)
2204 ((struct gid_list_info *)id_iter)->loop_id_2100;
2206 loop_id = le16_to_cpu(
2207 ((struct gid_list_info *)id_iter)->loop_id);
2208 id_iter += ha->gid_list_info_size;
2210 /* Bypass reserved domain fields. */
2211 if ((domain & 0xf0) == 0xf0)
2214 /* Bypass if not same domain and area of adapter. */
2215 if (area && domain &&
2216 (area != vha->d_id.b.area || domain != vha->d_id.b.domain))
2219 /* Bypass invalid local loop ID. */
2220 if (loop_id > LAST_LOCAL_LOOP_ID)
2223 /* Fill in member data. */
2224 new_fcport->d_id.b.domain = domain;
2225 new_fcport->d_id.b.area = area;
2226 new_fcport->d_id.b.al_pa = al_pa;
2227 new_fcport->loop_id = loop_id;
2228 new_fcport->vp_idx = vha->vp_idx;
2229 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
2230 if (rval2 != QLA_SUCCESS) {
2231 DEBUG2(printk("scsi(%ld): Failed to retrieve fcport "
2232 "information -- get_port_database=%x, "
2234 vha->host_no, rval2, new_fcport->loop_id));
2235 DEBUG2(printk("scsi(%ld): Scheduling resync...\n",
2237 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
2241 /* Check for matching device in port list. */
2244 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2245 if (memcmp(new_fcport->port_name, fcport->port_name,
2249 fcport->flags &= ~FCF_FABRIC_DEVICE;
2250 fcport->loop_id = new_fcport->loop_id;
2251 fcport->port_type = new_fcport->port_type;
2252 fcport->d_id.b24 = new_fcport->d_id.b24;
2253 memcpy(fcport->node_name, new_fcport->node_name,
2261 /* New device, add to fcports list. */
2263 new_fcport->vha = vha;
2264 new_fcport->vp_idx = vha->vp_idx;
2266 list_add_tail(&new_fcport->list, &vha->vp_fcports);
2268 /* Allocate a new replacement fcport. */
2269 fcport = new_fcport;
2270 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2271 if (new_fcport == NULL) {
2272 rval = QLA_MEMORY_ALLOC_FAILED;
2273 goto cleanup_allocation;
2275 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
2278 /* Base iIDMA settings on HBA port speed. */
2279 fcport->fp_speed = ha->link_data_rate;
2281 qla2x00_update_fcport(vha, fcport);
2289 if (rval != QLA_SUCCESS) {
2290 DEBUG2(printk("scsi(%ld): Configure local loop error exit: "
2291 "rval=%x\n", vha->host_no, rval));
2298 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2300 #define LS_UNKNOWN 2
2301 static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
2305 struct qla_hw_data *ha = vha->hw;
2307 if (!IS_IIDMA_CAPABLE(ha))
2310 if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
2311 fcport->fp_speed > ha->link_data_rate)
2314 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
2316 if (rval != QLA_SUCCESS) {
2317 DEBUG2(printk("scsi(%ld): Unable to adjust iIDMA "
2318 "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x %04x.\n",
2319 vha->host_no, fcport->port_name[0], fcport->port_name[1],
2320 fcport->port_name[2], fcport->port_name[3],
2321 fcport->port_name[4], fcport->port_name[5],
2322 fcport->port_name[6], fcport->port_name[7], rval,
2323 fcport->fp_speed, mb[0], mb[1]));
2325 link_speed = link_speeds[LS_UNKNOWN];
2326 if (fcport->fp_speed < 5)
2327 link_speed = link_speeds[fcport->fp_speed];
2328 else if (fcport->fp_speed == 0x13)
2329 link_speed = link_speeds[5];
2330 DEBUG2(qla_printk(KERN_INFO, ha,
2331 "iIDMA adjusted to %s GB/s on "
2332 "%02x%02x%02x%02x%02x%02x%02x%02x.\n",
2333 link_speed, fcport->port_name[0],
2334 fcport->port_name[1], fcport->port_name[2],
2335 fcport->port_name[3], fcport->port_name[4],
2336 fcport->port_name[5], fcport->port_name[6],
2337 fcport->port_name[7]));
2342 qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
2344 struct fc_rport_identifiers rport_ids;
2345 struct fc_rport *rport;
2346 struct qla_hw_data *ha = vha->hw;
2349 qla2x00_rport_del(fcport);
2351 rport_ids.node_name = wwn_to_u64(fcport->node_name);
2352 rport_ids.port_name = wwn_to_u64(fcport->port_name);
2353 rport_ids.port_id = fcport->d_id.b.domain << 16 |
2354 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
2355 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
2356 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
2358 qla_printk(KERN_WARNING, ha,
2359 "Unable to allocate fc remote port!\n");
2362 spin_lock_irq(fcport->vha->host->host_lock);
2363 *((fc_port_t **)rport->dd_data) = fcport;
2364 spin_unlock_irq(fcport->vha->host->host_lock);
2366 rport->supported_classes = fcport->supported_classes;
2368 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
2369 if (fcport->port_type == FCT_INITIATOR)
2370 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
2371 if (fcport->port_type == FCT_TARGET)
2372 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
2373 fc_remote_port_rolechg(rport, rport_ids.roles);
2377 * qla2x00_update_fcport
2378 * Updates device on list.
2381 * ha = adapter block pointer.
2382 * fcport = port structure pointer.
2392 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2394 struct qla_hw_data *ha = vha->hw;
2397 fcport->login_retry = 0;
2398 fcport->port_login_retry_count = ha->port_down_retry_count *
2400 atomic_set(&fcport->port_down_timer, ha->port_down_retry_count *
2402 fcport->flags &= ~FCF_LOGIN_NEEDED;
2404 qla2x00_iidma_fcport(vha, fcport);
2406 atomic_set(&fcport->state, FCS_ONLINE);
2408 qla2x00_reg_remote_port(vha, fcport);
2412 * qla2x00_configure_fabric
2413 * Setup SNS devices with loop ID's.
2416 * ha = adapter block pointer.
2423 qla2x00_configure_fabric(scsi_qla_host_t *vha)
2426 fc_port_t *fcport, *fcptemp;
2427 uint16_t next_loopid;
2428 uint16_t mb[MAILBOX_REGISTER_COUNT];
2430 LIST_HEAD(new_fcports);
2431 struct qla_hw_data *ha = vha->hw;
2432 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2434 /* If FL port exists, then SNS is present */
2435 if (IS_FWI2_CAPABLE(ha))
2436 loop_id = NPH_F_PORT;
2438 loop_id = SNS_FL_PORT;
2439 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
2440 if (rval != QLA_SUCCESS) {
2441 DEBUG2(printk("scsi(%ld): MBC_GET_PORT_NAME Failed, No FL "
2442 "Port\n", vha->host_no));
2444 vha->device_flags &= ~SWITCH_FOUND;
2445 return (QLA_SUCCESS);
2447 vha->device_flags |= SWITCH_FOUND;
2449 /* Mark devices that need re-synchronization. */
2450 rval2 = qla2x00_device_resync(vha);
2451 if (rval2 == QLA_RSCNS_HANDLED) {
2452 /* No point doing the scan, just continue. */
2453 return (QLA_SUCCESS);
2457 if (ql2xfdmienable &&
2458 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
2459 qla2x00_fdmi_register(vha);
2461 /* Ensure we are logged into the SNS. */
2462 if (IS_FWI2_CAPABLE(ha))
2465 loop_id = SIMPLE_NAME_SERVER;
2466 ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
2467 0xfc, mb, BIT_1 | BIT_0);
2468 if (mb[0] != MBS_COMMAND_COMPLETE) {
2469 DEBUG2(qla_printk(KERN_INFO, ha,
2470 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
2471 "mb[2]=%x mb[6]=%x mb[7]=%x\n", loop_id,
2472 mb[0], mb[1], mb[2], mb[6], mb[7]));
2473 return (QLA_SUCCESS);
2476 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
2477 if (qla2x00_rft_id(vha)) {
2479 DEBUG2(printk("scsi(%ld): Register FC-4 "
2480 "TYPE failed.\n", vha->host_no));
2482 if (qla2x00_rff_id(vha)) {
2484 DEBUG2(printk("scsi(%ld): Register FC-4 "
2485 "Features failed.\n", vha->host_no));
2487 if (qla2x00_rnn_id(vha)) {
2489 DEBUG2(printk("scsi(%ld): Register Node Name "
2490 "failed.\n", vha->host_no));
2491 } else if (qla2x00_rsnn_nn(vha)) {
2493 DEBUG2(printk("scsi(%ld): Register Symbolic "
2494 "Node Name failed.\n", vha->host_no));
2498 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
2499 if (rval != QLA_SUCCESS)
2503 * Logout all previous fabric devices marked lost, except
2506 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2507 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2510 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
2513 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
2514 qla2x00_mark_device_lost(vha, fcport,
2515 ql2xplogiabsentdevice, 0);
2516 if (fcport->loop_id != FC_NO_LOOP_ID &&
2517 (fcport->flags & FCF_TAPE_PRESENT) == 0 &&
2518 fcport->port_type != FCT_INITIATOR &&
2519 fcport->port_type != FCT_BROADCAST) {
2520 ha->isp_ops->fabric_logout(vha,
2522 fcport->d_id.b.domain,
2523 fcport->d_id.b.area,
2524 fcport->d_id.b.al_pa);
2525 fcport->loop_id = FC_NO_LOOP_ID;
2530 /* Starting free loop ID. */
2531 next_loopid = ha->min_external_loopid;
2534 * Scan through our port list and login entries that need to be
2537 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2538 if (atomic_read(&vha->loop_down_timer) ||
2539 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2542 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
2543 (fcport->flags & FCF_LOGIN_NEEDED) == 0)
2546 if (fcport->loop_id == FC_NO_LOOP_ID) {
2547 fcport->loop_id = next_loopid;
2548 rval = qla2x00_find_new_loop_id(
2550 if (rval != QLA_SUCCESS) {
2551 /* Ran out of IDs to use */
2555 /* Login and update database */
2556 qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
2559 /* Exit if out of loop IDs. */
2560 if (rval != QLA_SUCCESS) {
2565 * Login and add the new devices to our port list.
2567 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
2568 if (atomic_read(&vha->loop_down_timer) ||
2569 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2572 /* Find a new loop ID to use. */
2573 fcport->loop_id = next_loopid;
2574 rval = qla2x00_find_new_loop_id(base_vha, fcport);
2575 if (rval != QLA_SUCCESS) {
2576 /* Ran out of IDs to use */
2580 /* Login and update database */
2581 qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
2585 fcport->vp_idx = vha->vp_idx;
2587 list_move_tail(&fcport->list, &vha->vp_fcports);
2591 /* Free all new device structures not processed. */
2592 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
2593 list_del(&fcport->list);
2598 DEBUG2(printk("scsi(%ld): Configure fabric error exit: "
2599 "rval=%d\n", vha->host_no, rval));
2607 * qla2x00_find_all_fabric_devs
2610 * ha = adapter block pointer.
2611 * dev = database device entry pointer.
2620 qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
2621 struct list_head *new_fcports)
2625 fc_port_t *fcport, *new_fcport, *fcptemp;
2630 int first_dev, last_dev;
2631 port_id_t wrap, nxt_d_id;
2632 struct qla_hw_data *ha = vha->hw;
2633 struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
2634 struct scsi_qla_host *tvp;
2638 /* Try GID_PT to get device list, else GAN. */
2639 swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL);
2642 DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback "
2643 "on GA_NXT\n", vha->host_no));
2645 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
2648 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
2651 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
2654 } else if (ql2xiidmaenable &&
2655 qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) {
2656 qla2x00_gpsc(vha, swl);
2661 /* Allocate temporary fcport for any new fcports discovered. */
2662 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2663 if (new_fcport == NULL) {
2665 return (QLA_MEMORY_ALLOC_FAILED);
2667 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
2668 /* Set start port ID scan at adapter ID. */
2672 /* Starting free loop ID. */
2673 loop_id = ha->min_external_loopid;
2674 for (; loop_id <= ha->max_loop_id; loop_id++) {
2675 if (qla2x00_is_reserved_id(vha, loop_id))
2678 if (atomic_read(&vha->loop_down_timer) || LOOP_TRANSITION(vha))
2683 wrap.b24 = new_fcport->d_id.b24;
2685 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24;
2686 memcpy(new_fcport->node_name,
2687 swl[swl_idx].node_name, WWN_SIZE);
2688 memcpy(new_fcport->port_name,
2689 swl[swl_idx].port_name, WWN_SIZE);
2690 memcpy(new_fcport->fabric_port_name,
2691 swl[swl_idx].fabric_port_name, WWN_SIZE);
2692 new_fcport->fp_speed = swl[swl_idx].fp_speed;
2694 if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
2700 /* Send GA_NXT to the switch */
2701 rval = qla2x00_ga_nxt(vha, new_fcport);
2702 if (rval != QLA_SUCCESS) {
2703 qla_printk(KERN_WARNING, ha,
2704 "SNS scan failed -- assuming zero-entry "
2706 list_for_each_entry_safe(fcport, fcptemp,
2707 new_fcports, list) {
2708 list_del(&fcport->list);
2716 /* If wrap on switch device list, exit. */
2718 wrap.b24 = new_fcport->d_id.b24;
2720 } else if (new_fcport->d_id.b24 == wrap.b24) {
2721 DEBUG2(printk("scsi(%ld): device wrap (%02x%02x%02x)\n",
2722 vha->host_no, new_fcport->d_id.b.domain,
2723 new_fcport->d_id.b.area, new_fcport->d_id.b.al_pa));
2727 /* Bypass if same physical adapter. */
2728 if (new_fcport->d_id.b24 == base_vha->d_id.b24)
2731 /* Bypass virtual ports of the same host. */
2733 if (ha->num_vhosts) {
2734 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
2735 if (new_fcport->d_id.b24 == vp->d_id.b24) {
2744 /* Bypass if same domain and area of adapter. */
2745 if (((new_fcport->d_id.b24 & 0xffff00) ==
2746 (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
2750 /* Bypass reserved domain fields. */
2751 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
2754 /* Locate matching device in database. */
2756 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2757 if (memcmp(new_fcport->port_name, fcport->port_name,
2763 /* Update port state. */
2764 memcpy(fcport->fabric_port_name,
2765 new_fcport->fabric_port_name, WWN_SIZE);
2766 fcport->fp_speed = new_fcport->fp_speed;
2769 * If address the same and state FCS_ONLINE, nothing
2772 if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
2773 atomic_read(&fcport->state) == FCS_ONLINE) {
2778 * If device was not a fabric device before.
2780 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
2781 fcport->d_id.b24 = new_fcport->d_id.b24;
2782 fcport->loop_id = FC_NO_LOOP_ID;
2783 fcport->flags |= (FCF_FABRIC_DEVICE |
2789 * Port ID changed or device was marked to be updated;
2790 * Log it out if still logged in and mark it for
2793 fcport->d_id.b24 = new_fcport->d_id.b24;
2794 fcport->flags |= FCF_LOGIN_NEEDED;
2795 if (fcport->loop_id != FC_NO_LOOP_ID &&
2796 (fcport->flags & FCF_TAPE_PRESENT) == 0 &&
2797 fcport->port_type != FCT_INITIATOR &&
2798 fcport->port_type != FCT_BROADCAST) {
2799 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
2800 fcport->d_id.b.domain, fcport->d_id.b.area,
2801 fcport->d_id.b.al_pa);
2802 fcport->loop_id = FC_NO_LOOP_ID;
2810 /* If device was not in our fcports list, then add it. */
2811 list_add_tail(&new_fcport->list, new_fcports);
2813 /* Allocate a new replacement fcport. */
2814 nxt_d_id.b24 = new_fcport->d_id.b24;
2815 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2816 if (new_fcport == NULL) {
2818 return (QLA_MEMORY_ALLOC_FAILED);
2820 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
2821 new_fcport->d_id.b24 = nxt_d_id.b24;
2831 * qla2x00_find_new_loop_id
2832 * Scan through our port list and find a new usable loop ID.
2835 * ha: adapter state pointer.
2836 * dev: port structure pointer.
2839 * qla2x00 local function return status code.
2845 qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
2850 uint16_t first_loop_id;
2851 struct qla_hw_data *ha = vha->hw;
2852 struct scsi_qla_host *vp;
2853 struct scsi_qla_host *tvp;
2857 /* Save starting loop ID. */
2858 first_loop_id = dev->loop_id;
2861 /* Skip loop ID if already used by adapter. */
2862 if (dev->loop_id == vha->loop_id)
2865 /* Skip reserved loop IDs. */
2866 while (qla2x00_is_reserved_id(vha, dev->loop_id))
2869 /* Reset loop ID if passed the end. */
2870 if (dev->loop_id > ha->max_loop_id) {
2871 /* first loop ID. */
2872 dev->loop_id = ha->min_external_loopid;
2875 /* Check for loop ID being already in use. */
2878 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
2879 list_for_each_entry(fcport, &vp->vp_fcports, list) {
2880 if (fcport->loop_id == dev->loop_id &&
2882 /* ID possibly in use */
2891 /* If not in use then it is free to use. */
2896 /* ID in use. Try next value. */
2899 /* If wrap around. No free ID to use. */
2900 if (dev->loop_id == first_loop_id) {
2901 dev->loop_id = FC_NO_LOOP_ID;
2902 rval = QLA_FUNCTION_FAILED;
2911 * qla2x00_device_resync
2912 * Marks devices in the database that needs resynchronization.
2915 * ha = adapter block pointer.
2921 qla2x00_device_resync(scsi_qla_host_t *vha)
2926 uint32_t rscn_entry;
2927 uint8_t rscn_out_iter;
2931 rval = QLA_RSCNS_HANDLED;
2933 while (vha->rscn_out_ptr != vha->rscn_in_ptr ||
2934 vha->flags.rscn_queue_overflow) {
2936 rscn_entry = vha->rscn_queue[vha->rscn_out_ptr];
2937 format = MSB(MSW(rscn_entry));
2938 d_id.b.domain = LSB(MSW(rscn_entry));
2939 d_id.b.area = MSB(LSW(rscn_entry));
2940 d_id.b.al_pa = LSB(LSW(rscn_entry));
2942 DEBUG(printk("scsi(%ld): RSCN queue entry[%d] = "
2943 "[%02x/%02x%02x%02x].\n",
2944 vha->host_no, vha->rscn_out_ptr, format, d_id.b.domain,
2945 d_id.b.area, d_id.b.al_pa));
2947 vha->rscn_out_ptr++;
2948 if (vha->rscn_out_ptr == MAX_RSCN_COUNT)
2949 vha->rscn_out_ptr = 0;
2951 /* Skip duplicate entries. */
2952 for (rscn_out_iter = vha->rscn_out_ptr;
2953 !vha->flags.rscn_queue_overflow &&
2954 rscn_out_iter != vha->rscn_in_ptr;
2955 rscn_out_iter = (rscn_out_iter ==
2956 (MAX_RSCN_COUNT - 1)) ? 0: rscn_out_iter + 1) {
2958 if (rscn_entry != vha->rscn_queue[rscn_out_iter])
2961 DEBUG(printk("scsi(%ld): Skipping duplicate RSCN queue "
2962 "entry found at [%d].\n", vha->host_no,
2965 vha->rscn_out_ptr = rscn_out_iter;
2968 /* Queue overflow, set switch default case. */
2969 if (vha->flags.rscn_queue_overflow) {
2970 DEBUG(printk("scsi(%ld): device_resync: rscn "
2971 "overflow.\n", vha->host_no));
2974 vha->flags.rscn_queue_overflow = 0;
2990 vha->rscn_out_ptr = vha->rscn_in_ptr;
2996 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2997 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
2998 (fcport->d_id.b24 & mask) != d_id.b24 ||
2999 fcport->port_type == FCT_BROADCAST)
3002 if (atomic_read(&fcport->state) == FCS_ONLINE) {
3004 fcport->port_type != FCT_INITIATOR) {
3005 qla2x00_mark_device_lost(vha, fcport,
3015 * qla2x00_fabric_dev_login
3016 * Login fabric target device and update FC port database.
3019 * ha: adapter state pointer.
3020 * fcport: port structure list pointer.
3021 * next_loopid: contains value of a new loop ID that can be used
3022 * by the next login attempt.
3025 * qla2x00 local function return status code.
3031 qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3032 uint16_t *next_loopid)
3037 struct qla_hw_data *ha = vha->hw;
3042 rval = qla2x00_fabric_login(vha, fcport, next_loopid);
3043 if (rval == QLA_SUCCESS) {
3044 /* Send an ADISC to tape devices.*/
3046 if (fcport->flags & FCF_TAPE_PRESENT)
3048 rval = qla2x00_get_port_database(vha, fcport, opts);
3049 if (rval != QLA_SUCCESS) {
3050 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3051 fcport->d_id.b.domain, fcport->d_id.b.area,
3052 fcport->d_id.b.al_pa);
3053 qla2x00_mark_device_lost(vha, fcport, 1, 0);
3055 qla2x00_update_fcport(vha, fcport);
3063 * qla2x00_fabric_login
3064 * Issue fabric login command.
3067 * ha = adapter block pointer.
3068 * device = pointer to FC device type structure.
3071 * 0 - Login successfully
3073 * 2 - Initiator device
3077 qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3078 uint16_t *next_loopid)
3082 uint16_t tmp_loopid;
3083 uint16_t mb[MAILBOX_REGISTER_COUNT];
3084 struct qla_hw_data *ha = vha->hw;
3090 DEBUG(printk("scsi(%ld): Trying Fabric Login w/loop id 0x%04x "
3091 "for port %02x%02x%02x.\n",
3092 vha->host_no, fcport->loop_id, fcport->d_id.b.domain,
3093 fcport->d_id.b.area, fcport->d_id.b.al_pa));
3095 /* Login fcport on switch. */
3096 ha->isp_ops->fabric_login(vha, fcport->loop_id,
3097 fcport->d_id.b.domain, fcport->d_id.b.area,
3098 fcport->d_id.b.al_pa, mb, BIT_0);
3099 if (mb[0] == MBS_PORT_ID_USED) {
3101 * Device has another loop ID. The firmware team
3102 * recommends the driver perform an implicit login with
3103 * the specified ID again. The ID we just used is save
3104 * here so we return with an ID that can be tried by
3108 tmp_loopid = fcport->loop_id;
3109 fcport->loop_id = mb[1];
3111 DEBUG(printk("Fabric Login: port in use - next "
3112 "loop id=0x%04x, port Id=%02x%02x%02x.\n",
3113 fcport->loop_id, fcport->d_id.b.domain,
3114 fcport->d_id.b.area, fcport->d_id.b.al_pa));
3116 } else if (mb[0] == MBS_COMMAND_COMPLETE) {
3121 /* A retry occurred before. */
3122 *next_loopid = tmp_loopid;
3125 * No retry occurred before. Just increment the
3126 * ID value for next login.
3128 *next_loopid = (fcport->loop_id + 1);
3131 if (mb[1] & BIT_0) {
3132 fcport->port_type = FCT_INITIATOR;
3134 fcport->port_type = FCT_TARGET;
3135 if (mb[1] & BIT_1) {
3136 fcport->flags |= FCF_TAPE_PRESENT;
3141 fcport->supported_classes |= FC_COS_CLASS2;
3143 fcport->supported_classes |= FC_COS_CLASS3;
3147 } else if (mb[0] == MBS_LOOP_ID_USED) {
3149 * Loop ID already used, try next loop ID.
3152 rval = qla2x00_find_new_loop_id(vha, fcport);
3153 if (rval != QLA_SUCCESS) {
3154 /* Ran out of loop IDs to use */
3157 } else if (mb[0] == MBS_COMMAND_ERROR) {
3159 * Firmware possibly timed out during login. If NO
3160 * retries are left to do then the device is declared
3163 *next_loopid = fcport->loop_id;
3164 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3165 fcport->d_id.b.domain, fcport->d_id.b.area,
3166 fcport->d_id.b.al_pa);
3167 qla2x00_mark_device_lost(vha, fcport, 1, 0);
3173 * unrecoverable / not handled error
3175 DEBUG2(printk("%s(%ld): failed=%x port_id=%02x%02x%02x "
3176 "loop_id=%x jiffies=%lx.\n",
3177 __func__, vha->host_no, mb[0],
3178 fcport->d_id.b.domain, fcport->d_id.b.area,
3179 fcport->d_id.b.al_pa, fcport->loop_id, jiffies));
3181 *next_loopid = fcport->loop_id;
3182 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3183 fcport->d_id.b.domain, fcport->d_id.b.area,
3184 fcport->d_id.b.al_pa);
3185 fcport->loop_id = FC_NO_LOOP_ID;
3186 fcport->login_retry = 0;
3197 * qla2x00_local_device_login
3198 * Issue local device login command.
3201 * ha = adapter block pointer.
3202 * loop_id = loop id of device to login to.
3204 * Returns (Where's the #define!!!!):
3205 * 0 - Login successfully
3210 qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
3213 uint16_t mb[MAILBOX_REGISTER_COUNT];
3215 memset(mb, 0, sizeof(mb));
3216 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
3217 if (rval == QLA_SUCCESS) {
3218 /* Interrogate mailbox registers for any errors */
3219 if (mb[0] == MBS_COMMAND_ERROR)
3221 else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR)
3222 /* device not in PCB table */
3230 * qla2x00_loop_resync
3231 * Resync with fibre channel devices.
3234 * ha = adapter block pointer.
3240 qla2x00_loop_resync(scsi_qla_host_t *vha)
3242 int rval = QLA_SUCCESS;
3244 struct req_que *req;
3245 struct rsp_que *rsp;
3247 if (ql2xmultique_tag)
3248 req = vha->hw->req_q_map[0];
3253 atomic_set(&vha->loop_state, LOOP_UPDATE);
3254 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3255 if (vha->flags.online) {
3256 if (!(rval = qla2x00_fw_ready(vha))) {
3257 /* Wait at most MAX_TARGET RSCNs for a stable link. */
3260 atomic_set(&vha->loop_state, LOOP_UPDATE);
3262 /* Issue a marker after FW becomes ready. */
3263 qla2x00_marker(vha, req, rsp, 0, 0,
3265 vha->marker_needed = 0;
3267 /* Remap devices on Loop. */
3268 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3270 qla2x00_configure_loop(vha);
3272 } while (!atomic_read(&vha->loop_down_timer) &&
3273 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
3274 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
3279 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
3280 return (QLA_FUNCTION_FAILED);
3283 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
3289 qla2x00_update_fcports(scsi_qla_host_t *vha)
3293 /* Go with deferred removal of rport references. */
3294 list_for_each_entry(fcport, &vha->vp_fcports, list)
3295 if (fcport && fcport->drport &&
3296 atomic_read(&fcport->state) != FCS_UNCONFIGURED)
3297 qla2x00_rport_del(fcport);
3302 * Resets ISP and aborts all outstanding commands.
3305 * ha = adapter block pointer.
3311 qla2x00_abort_isp(scsi_qla_host_t *vha)
3315 struct qla_hw_data *ha = vha->hw;
3316 struct scsi_qla_host *vp;
3317 struct scsi_qla_host *tvp;
3318 struct req_que *req = ha->req_q_map[0];
3320 if (vha->flags.online) {
3321 vha->flags.online = 0;
3322 ha->flags.chip_reset_done = 0;
3323 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3324 ha->qla_stats.total_isp_aborts++;
3326 qla_printk(KERN_INFO, ha,
3327 "Performing ISP error recovery - ha= %p.\n", ha);
3328 ha->isp_ops->reset_chip(vha);
3330 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
3331 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
3332 atomic_set(&vha->loop_state, LOOP_DOWN);
3333 qla2x00_mark_all_devices_lost(vha, 0);
3334 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list)
3335 qla2x00_mark_all_devices_lost(vp, 0);
3337 if (!atomic_read(&vha->loop_down_timer))
3338 atomic_set(&vha->loop_down_timer,
3342 /* Requeue all commands in outstanding command list. */
3343 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
3345 ha->isp_ops->get_flash_version(vha, req->ring);
3347 ha->isp_ops->nvram_config(vha);
3349 if (!qla2x00_restart_isp(vha)) {
3350 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
3352 if (!atomic_read(&vha->loop_down_timer)) {
3354 * Issue marker command only when we are going
3355 * to start the I/O .
3357 vha->marker_needed = 1;
3360 vha->flags.online = 1;
3362 ha->isp_ops->enable_intrs(ha);
3364 ha->isp_abort_cnt = 0;
3365 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3368 ha->flags.fce_enabled = 1;
3370 fce_calc_size(ha->fce_bufs));
3371 rval = qla2x00_enable_fce_trace(vha,
3372 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
3375 qla_printk(KERN_WARNING, ha,
3376 "Unable to reinitialize FCE "
3378 ha->flags.fce_enabled = 0;
3383 memset(ha->eft, 0, EFT_SIZE);
3384 rval = qla2x00_enable_eft_trace(vha,
3385 ha->eft_dma, EFT_NUM_BUFFERS);
3387 qla_printk(KERN_WARNING, ha,
3388 "Unable to reinitialize EFT "
3392 } else { /* failed the ISP abort */
3393 vha->flags.online = 1;
3394 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
3395 if (ha->isp_abort_cnt == 0) {
3396 qla_printk(KERN_WARNING, ha,
3397 "ISP error recovery failed - "
3398 "board disabled\n");
3400 * The next call disables the board
3403 ha->isp_ops->reset_adapter(vha);
3404 vha->flags.online = 0;
3405 clear_bit(ISP_ABORT_RETRY,
3408 } else { /* schedule another ISP abort */
3409 ha->isp_abort_cnt--;
3410 DEBUG(printk("qla%ld: ISP abort - "
3411 "retry remaining %d\n",
3412 vha->host_no, ha->isp_abort_cnt));
3416 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
3417 DEBUG(printk("qla2x00(%ld): ISP error recovery "
3418 "- retrying (%d) more times\n",
3419 vha->host_no, ha->isp_abort_cnt));
3420 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3428 DEBUG(printk(KERN_INFO
3429 "qla2x00_abort_isp(%ld): succeeded.\n",
3431 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
3433 qla2x00_vp_abort_isp(vp);
3436 qla_printk(KERN_INFO, ha,
3437 "qla2x00_abort_isp: **** FAILED ****\n");
3444 * qla2x00_restart_isp
3445 * restarts the ISP after a reset
3448 * ha = adapter block pointer.
3454 qla2x00_restart_isp(scsi_qla_host_t *vha)
3458 struct qla_hw_data *ha = vha->hw;
3459 struct req_que *req = ha->req_q_map[0];
3460 struct rsp_que *rsp = ha->rsp_q_map[0];
3462 /* If firmware needs to be loaded */
3463 if (qla2x00_isp_firmware(vha)) {
3464 vha->flags.online = 0;
3465 status = ha->isp_ops->chip_diag(vha);
3467 status = qla2x00_setup_chip(vha);
3470 if (!status && !(status = qla2x00_init_rings(vha))) {
3471 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
3472 ha->flags.chip_reset_done = 1;
3473 /* Initialize the queues in use */
3474 qla25xx_init_queues(ha);
3476 status = qla2x00_fw_ready(vha);
3478 DEBUG(printk("%s(): Start configure loop, "
3479 "status = %d\n", __func__, status));
3481 /* Issue a marker after FW becomes ready. */
3482 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
3484 vha->flags.online = 1;
3485 /* Wait at most MAX_TARGET RSCNs for a stable link. */
3488 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3489 qla2x00_configure_loop(vha);
3491 } while (!atomic_read(&vha->loop_down_timer) &&
3492 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
3493 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
3497 /* if no cable then assume it's good */
3498 if ((vha->device_flags & DFLG_NO_CABLE))
3501 DEBUG(printk("%s(): Configure loop done, status = 0x%x\n",
3509 qla25xx_init_queues(struct qla_hw_data *ha)
3511 struct rsp_que *rsp = NULL;
3512 struct req_que *req = NULL;
3513 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3517 for (i = 1; i < ha->max_rsp_queues; i++) {
3518 rsp = ha->rsp_q_map[i];
3520 rsp->options &= ~BIT_0;
3521 ret = qla25xx_init_rsp_que(base_vha, rsp);
3522 if (ret != QLA_SUCCESS)
3523 DEBUG2_17(printk(KERN_WARNING
3524 "%s Rsp que:%d init failed\n", __func__,
3527 DEBUG2_17(printk(KERN_INFO
3528 "%s Rsp que:%d inited\n", __func__,
3532 for (i = 1; i < ha->max_req_queues; i++) {
3533 req = ha->req_q_map[i];
3535 /* Clear outstanding commands array. */
3536 req->options &= ~BIT_0;
3537 ret = qla25xx_init_req_que(base_vha, req);
3538 if (ret != QLA_SUCCESS)
3539 DEBUG2_17(printk(KERN_WARNING
3540 "%s Req que:%d init failed\n", __func__,
3543 DEBUG2_17(printk(KERN_WARNING
3544 "%s Req que:%d inited\n", __func__,
3552 * qla2x00_reset_adapter
3556 * ha = adapter block pointer.
3559 qla2x00_reset_adapter(scsi_qla_host_t *vha)
3561 unsigned long flags = 0;
3562 struct qla_hw_data *ha = vha->hw;
3563 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3565 vha->flags.online = 0;
3566 ha->isp_ops->disable_intrs(ha);
3568 spin_lock_irqsave(&ha->hardware_lock, flags);
3569 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
3570 RD_REG_WORD(®->hccr); /* PCI Posting. */
3571 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC);
3572 RD_REG_WORD(®->hccr); /* PCI Posting. */
3573 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3577 qla24xx_reset_adapter(scsi_qla_host_t *vha)
3579 unsigned long flags = 0;
3580 struct qla_hw_data *ha = vha->hw;
3581 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3583 vha->flags.online = 0;
3584 ha->isp_ops->disable_intrs(ha);
3586 spin_lock_irqsave(&ha->hardware_lock, flags);
3587 WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET);
3588 RD_REG_DWORD(®->hccr);
3589 WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE);
3590 RD_REG_DWORD(®->hccr);
3591 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3593 if (IS_NOPOLLING_TYPE(ha))
3594 ha->isp_ops->enable_intrs(ha);
3597 /* On sparc systems, obtain port and node WWN from firmware
3600 static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
3601 struct nvram_24xx *nv)
3604 struct qla_hw_data *ha = vha->hw;
3605 struct pci_dev *pdev = ha->pdev;
3606 struct device_node *dp = pci_device_to_OF_node(pdev);
3610 val = of_get_property(dp, "port-wwn", &len);
3611 if (val && len >= WWN_SIZE)
3612 memcpy(nv->port_name, val, WWN_SIZE);
3614 val = of_get_property(dp, "node-wwn", &len);
3615 if (val && len >= WWN_SIZE)
3616 memcpy(nv->node_name, val, WWN_SIZE);
3621 qla24xx_nvram_config(scsi_qla_host_t *vha)
3624 struct init_cb_24xx *icb;
3625 struct nvram_24xx *nv;
3627 uint8_t *dptr1, *dptr2;
3630 struct qla_hw_data *ha = vha->hw;
3633 icb = (struct init_cb_24xx *)ha->init_cb;
3636 /* Determine NVRAM starting address. */
3637 if (ha->flags.port0) {
3638 ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
3639 ha->vpd_base = FA_NVRAM_VPD0_ADDR;
3641 ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
3642 ha->vpd_base = FA_NVRAM_VPD1_ADDR;
3644 ha->nvram_size = sizeof(struct nvram_24xx);
3645 ha->vpd_size = FA_NVRAM_VPD_SIZE;
3647 /* Get VPD data into cache */
3648 ha->vpd = ha->nvram + VPD_OFFSET;
3649 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
3650 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
3652 /* Get NVRAM data into cache and calculate checksum. */
3653 dptr = (uint32_t *)nv;
3654 ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
3656 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
3657 chksum += le32_to_cpu(*dptr++);
3659 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
3660 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
3662 /* Bad NVRAM data, set defaults parameters. */
3663 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
3664 || nv->id[3] != ' ' ||
3665 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
3666 /* Reset NVRAM data. */
3667 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
3668 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
3669 le16_to_cpu(nv->nvram_version));
3670 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
3671 "invalid -- WWPN) defaults.\n");
3674 * Set default initialization control block.
3676 memset(nv, 0, ha->nvram_size);
3677 nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
3678 nv->version = __constant_cpu_to_le16(ICB_VERSION);
3679 nv->frame_payload_size = __constant_cpu_to_le16(2048);
3680 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
3681 nv->exchange_count = __constant_cpu_to_le16(0);
3682 nv->hard_address = __constant_cpu_to_le16(124);
3683 nv->port_name[0] = 0x21;
3684 nv->port_name[1] = 0x00 + ha->port_no;
3685 nv->port_name[2] = 0x00;
3686 nv->port_name[3] = 0xe0;
3687 nv->port_name[4] = 0x8b;
3688 nv->port_name[5] = 0x1c;
3689 nv->port_name[6] = 0x55;
3690 nv->port_name[7] = 0x86;
3691 nv->node_name[0] = 0x20;
3692 nv->node_name[1] = 0x00;
3693 nv->node_name[2] = 0x00;
3694 nv->node_name[3] = 0xe0;
3695 nv->node_name[4] = 0x8b;
3696 nv->node_name[5] = 0x1c;
3697 nv->node_name[6] = 0x55;
3698 nv->node_name[7] = 0x86;
3699 qla24xx_nvram_wwn_from_ofw(vha, nv);
3700 nv->login_retry_count = __constant_cpu_to_le16(8);
3701 nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
3702 nv->login_timeout = __constant_cpu_to_le16(0);
3703 nv->firmware_options_1 =
3704 __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
3705 nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
3706 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
3707 nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
3708 nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
3709 nv->efi_parameters = __constant_cpu_to_le32(0);
3710 nv->reset_delay = 5;
3711 nv->max_luns_per_target = __constant_cpu_to_le16(128);
3712 nv->port_down_retry_count = __constant_cpu_to_le16(30);
3713 nv->link_down_timeout = __constant_cpu_to_le16(30);
3718 /* Reset Initialization control block */
3719 memset(icb, 0, ha->init_cb_size);
3721 /* Copy 1st segment. */
3722 dptr1 = (uint8_t *)icb;
3723 dptr2 = (uint8_t *)&nv->version;
3724 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
3726 *dptr1++ = *dptr2++;
3728 icb->login_retry_count = nv->login_retry_count;
3729 icb->link_down_on_nos = nv->link_down_on_nos;
3731 /* Copy 2nd segment. */
3732 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
3733 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
3734 cnt = (uint8_t *)&icb->reserved_3 -
3735 (uint8_t *)&icb->interrupt_delay_timer;
3737 *dptr1++ = *dptr2++;
3740 * Setup driver NVRAM options.
3742 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
3745 /* Use alternate WWN? */
3746 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
3747 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
3748 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
3751 /* Prepare nodename */
3752 if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) {
3754 * Firmware will apply the following mask if the nodename was
3757 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
3758 icb->node_name[0] &= 0xF0;
3761 /* Set host adapter parameters. */
3762 ha->flags.disable_risc_code_load = 0;
3763 ha->flags.enable_lip_reset = 0;
3764 ha->flags.enable_lip_full_login =
3765 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
3766 ha->flags.enable_target_reset =
3767 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
3768 ha->flags.enable_led_scheme = 0;
3769 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
3771 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
3772 (BIT_6 | BIT_5 | BIT_4)) >> 4;
3774 memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
3775 sizeof(ha->fw_seriallink_options24));
3777 /* save HBA serial number */
3778 ha->serial0 = icb->port_name[5];
3779 ha->serial1 = icb->port_name[6];
3780 ha->serial2 = icb->port_name[7];
3781 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
3782 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
3784 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
3786 ha->retry_count = le16_to_cpu(nv->login_retry_count);
3788 /* Set minimum login_timeout to 4 seconds. */
3789 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
3790 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
3791 if (le16_to_cpu(nv->login_timeout) < 4)
3792 nv->login_timeout = __constant_cpu_to_le16(4);
3793 ha->login_timeout = le16_to_cpu(nv->login_timeout);
3794 icb->login_timeout = nv->login_timeout;
3796 /* Set minimum RATOV to 100 tenths of a second. */
3799 ha->loop_reset_delay = nv->reset_delay;
3801 /* Link Down Timeout = 0:
3803 * When Port Down timer expires we will start returning
3804 * I/O's to OS with "DID_NO_CONNECT".
3806 * Link Down Timeout != 0:
3808 * The driver waits for the link to come up after link down
3809 * before returning I/Os to OS with "DID_NO_CONNECT".
3811 if (le16_to_cpu(nv->link_down_timeout) == 0) {
3812 ha->loop_down_abort_time =
3813 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
3815 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
3816 ha->loop_down_abort_time =
3817 (LOOP_DOWN_TIME - ha->link_down_timeout);
3820 /* Need enough time to try and get the port back. */
3821 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
3822 if (qlport_down_retry)
3823 ha->port_down_retry_count = qlport_down_retry;
3825 /* Set login_retry_count */
3826 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
3827 if (ha->port_down_retry_count ==
3828 le16_to_cpu(nv->port_down_retry_count) &&
3829 ha->port_down_retry_count > 3)
3830 ha->login_retry_count = ha->port_down_retry_count;
3831 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
3832 ha->login_retry_count = ha->port_down_retry_count;
3833 if (ql2xloginretrycount)
3834 ha->login_retry_count = ql2xloginretrycount;
3837 if (!vha->flags.init_done) {
3838 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
3839 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
3840 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
3841 le16_to_cpu(icb->interrupt_delay_timer): 2;
3843 icb->firmware_options_2 &= __constant_cpu_to_le32(
3844 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
3845 vha->flags.process_response_queue = 0;
3846 if (ha->zio_mode != QLA_ZIO_DISABLED) {
3847 ha->zio_mode = QLA_ZIO_MODE_6;
3849 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay "
3850 "(%d us).\n", vha->host_no, ha->zio_mode,
3851 ha->zio_timer * 100));
3852 qla_printk(KERN_INFO, ha,
3853 "ZIO mode %d enabled; timer delay (%d us).\n",
3854 ha->zio_mode, ha->zio_timer * 100);
3856 icb->firmware_options_2 |= cpu_to_le32(
3857 (uint32_t)ha->zio_mode);
3858 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
3859 vha->flags.process_response_queue = 1;
3863 DEBUG2_3(printk(KERN_WARNING
3864 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
3870 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
3873 int rval = QLA_SUCCESS;
3874 int segments, fragment;
3875 uint32_t *dcode, dlen;
3879 struct qla_hw_data *ha = vha->hw;
3880 struct req_que *req = ha->req_q_map[0];
3882 qla_printk(KERN_INFO, ha,
3883 "FW: Loading from flash (%x)...\n", faddr);
3887 segments = FA_RISC_CODE_SEGMENTS;
3888 dcode = (uint32_t *)req->ring;
3891 /* Validate firmware image by checking version. */
3892 qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
3893 for (i = 0; i < 4; i++)
3894 dcode[i] = be32_to_cpu(dcode[i]);
3895 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
3896 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
3897 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
3899 qla_printk(KERN_WARNING, ha,
3900 "Unable to verify integrity of flash firmware image!\n");
3901 qla_printk(KERN_WARNING, ha,
3902 "Firmware data: %08x %08x %08x %08x!\n", dcode[0],
3903 dcode[1], dcode[2], dcode[3]);
3905 return QLA_FUNCTION_FAILED;
3908 while (segments && rval == QLA_SUCCESS) {
3909 /* Read segment's load information. */
3910 qla24xx_read_flash_data(vha, dcode, faddr, 4);
3912 risc_addr = be32_to_cpu(dcode[2]);
3913 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
3914 risc_size = be32_to_cpu(dcode[3]);
3917 while (risc_size > 0 && rval == QLA_SUCCESS) {
3918 dlen = (uint32_t)(ha->fw_transfer_size >> 2);
3919 if (dlen > risc_size)
3922 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
3923 "addr %x, number of dwords 0x%x, offset 0x%x.\n",
3924 vha->host_no, risc_addr, dlen, faddr));
3926 qla24xx_read_flash_data(vha, dcode, faddr, dlen);
3927 for (i = 0; i < dlen; i++)
3928 dcode[i] = swab32(dcode[i]);
3930 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
3933 DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
3934 "segment %d of firmware\n", vha->host_no,
3936 qla_printk(KERN_WARNING, ha,
3937 "[ERROR] Failed to load segment %d of "
3938 "firmware\n", fragment);
3955 #define QLA_FW_URL "ftp://ftp.qlogic.com/outgoing/linux/firmware/"
3958 qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3962 uint16_t *wcode, *fwcode;
3963 uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
3964 struct fw_blob *blob;
3965 struct qla_hw_data *ha = vha->hw;
3966 struct req_que *req = ha->req_q_map[0];
3968 /* Load firmware blob. */
3969 blob = qla2x00_request_firmware(vha);
3971 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n");
3972 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved "
3973 "from: " QLA_FW_URL ".\n");
3974 return QLA_FUNCTION_FAILED;
3979 wcode = (uint16_t *)req->ring;
3981 fwcode = (uint16_t *)blob->fw->data;
3984 /* Validate firmware image by checking version. */
3985 if (blob->fw->size < 8 * sizeof(uint16_t)) {
3986 qla_printk(KERN_WARNING, ha,
3987 "Unable to verify integrity of firmware image (%Zd)!\n",
3989 goto fail_fw_integrity;
3991 for (i = 0; i < 4; i++)
3992 wcode[i] = be16_to_cpu(fwcode[i + 4]);
3993 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
3994 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
3995 wcode[2] == 0 && wcode[3] == 0)) {
3996 qla_printk(KERN_WARNING, ha,
3997 "Unable to verify integrity of firmware image!\n");
3998 qla_printk(KERN_WARNING, ha,
3999 "Firmware data: %04x %04x %04x %04x!\n", wcode[0],
4000 wcode[1], wcode[2], wcode[3]);
4001 goto fail_fw_integrity;
4005 while (*seg && rval == QLA_SUCCESS) {
4007 *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
4008 risc_size = be16_to_cpu(fwcode[3]);
4010 /* Validate firmware image size. */
4011 fwclen += risc_size * sizeof(uint16_t);
4012 if (blob->fw->size < fwclen) {
4013 qla_printk(KERN_WARNING, ha,
4014 "Unable to verify integrity of firmware image "
4015 "(%Zd)!\n", blob->fw->size);
4016 goto fail_fw_integrity;
4020 while (risc_size > 0 && rval == QLA_SUCCESS) {
4021 wlen = (uint16_t)(ha->fw_transfer_size >> 1);
4022 if (wlen > risc_size)
4025 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
4026 "addr %x, number of words 0x%x.\n", vha->host_no,
4029 for (i = 0; i < wlen; i++)
4030 wcode[i] = swab16(fwcode[i]);
4032 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4035 DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
4036 "segment %d of firmware\n", vha->host_no,
4038 qla_printk(KERN_WARNING, ha,
4039 "[ERROR] Failed to load segment %d of "
4040 "firmware\n", fragment);
4056 return QLA_FUNCTION_FAILED;
4060 qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4063 int segments, fragment;
4064 uint32_t *dcode, dlen;
4068 struct fw_blob *blob;
4069 uint32_t *fwcode, fwclen;
4070 struct qla_hw_data *ha = vha->hw;
4071 struct req_que *req = ha->req_q_map[0];
4073 /* Load firmware blob. */
4074 blob = qla2x00_request_firmware(vha);
4076 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n");
4077 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved "
4078 "from: " QLA_FW_URL ".\n");
4080 return QLA_FUNCTION_FAILED;
4083 qla_printk(KERN_INFO, ha,
4084 "FW: Loading via request-firmware...\n");
4088 segments = FA_RISC_CODE_SEGMENTS;
4089 dcode = (uint32_t *)req->ring;
4091 fwcode = (uint32_t *)blob->fw->data;
4094 /* Validate firmware image by checking version. */
4095 if (blob->fw->size < 8 * sizeof(uint32_t)) {
4096 qla_printk(KERN_WARNING, ha,
4097 "Unable to verify integrity of firmware image (%Zd)!\n",
4099 goto fail_fw_integrity;
4101 for (i = 0; i < 4; i++)
4102 dcode[i] = be32_to_cpu(fwcode[i + 4]);
4103 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
4104 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
4105 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
4107 qla_printk(KERN_WARNING, ha,
4108 "Unable to verify integrity of firmware image!\n");
4109 qla_printk(KERN_WARNING, ha,
4110 "Firmware data: %08x %08x %08x %08x!\n", dcode[0],
4111 dcode[1], dcode[2], dcode[3]);
4112 goto fail_fw_integrity;
4115 while (segments && rval == QLA_SUCCESS) {
4116 risc_addr = be32_to_cpu(fwcode[2]);
4117 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
4118 risc_size = be32_to_cpu(fwcode[3]);
4120 /* Validate firmware image size. */
4121 fwclen += risc_size * sizeof(uint32_t);
4122 if (blob->fw->size < fwclen) {
4123 qla_printk(KERN_WARNING, ha,
4124 "Unable to verify integrity of firmware image "
4125 "(%Zd)!\n", blob->fw->size);
4127 goto fail_fw_integrity;
4131 while (risc_size > 0 && rval == QLA_SUCCESS) {
4132 dlen = (uint32_t)(ha->fw_transfer_size >> 2);
4133 if (dlen > risc_size)
4136 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
4137 "addr %x, number of dwords 0x%x.\n", vha->host_no,
4140 for (i = 0; i < dlen; i++)
4141 dcode[i] = swab32(fwcode[i]);
4143 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4146 DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
4147 "segment %d of firmware\n", vha->host_no,
4149 qla_printk(KERN_WARNING, ha,
4150 "[ERROR] Failed to load segment %d of "
4151 "firmware\n", fragment);
4167 return QLA_FUNCTION_FAILED;
4171 qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4175 if (ql2xfwloadbin == 1)
4176 return qla81xx_load_risc(vha, srisc_addr);
4180 * 1) Firmware via request-firmware interface (.bin file).
4181 * 2) Firmware residing in flash.
4183 rval = qla24xx_load_risc_blob(vha, srisc_addr);
4184 if (rval == QLA_SUCCESS)
4187 return qla24xx_load_risc_flash(vha, srisc_addr,
4188 vha->hw->flt_region_fw);
4192 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4195 struct qla_hw_data *ha = vha->hw;
4197 if (ql2xfwloadbin == 2)
4202 * 1) Firmware residing in flash.
4203 * 2) Firmware via request-firmware interface (.bin file).
4204 * 3) Golden-Firmware residing in flash -- limited operation.
4206 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
4207 if (rval == QLA_SUCCESS)
4211 rval = qla24xx_load_risc_blob(vha, srisc_addr);
4212 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
4215 qla_printk(KERN_ERR, ha,
4216 "FW: Attempting to fallback to golden firmware...\n");
4217 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
4218 if (rval != QLA_SUCCESS)
4221 qla_printk(KERN_ERR, ha,
4222 "FW: Please update operational firmware...\n");
4223 ha->flags.running_gold_fw = 1;
4229 qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
4232 struct qla_hw_data *ha = vha->hw;
4234 if (!IS_FWI2_CAPABLE(ha))
4236 if (!ha->fw_major_version)
4239 ret = qla2x00_stop_firmware(vha);
4240 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
4241 ret != QLA_INVALID_COMMAND && retries ; retries--) {
4242 ha->isp_ops->reset_chip(vha);
4243 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
4245 if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
4247 qla_printk(KERN_INFO, ha,
4248 "Attempting retry of stop-firmware command...\n");
4249 ret = qla2x00_stop_firmware(vha);
4254 qla24xx_configure_vhba(scsi_qla_host_t *vha)
4256 int rval = QLA_SUCCESS;
4257 uint16_t mb[MAILBOX_REGISTER_COUNT];
4258 struct qla_hw_data *ha = vha->hw;
4259 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4260 struct req_que *req;
4261 struct rsp_que *rsp;
4266 rval = qla2x00_fw_ready(base_vha);
4267 if (ql2xmultique_tag)
4268 req = ha->req_q_map[0];
4273 if (rval == QLA_SUCCESS) {
4274 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
4275 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
4278 vha->flags.management_server_logged_in = 0;
4280 /* Login to SNS first */
4281 ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1);
4282 if (mb[0] != MBS_COMMAND_COMPLETE) {
4283 DEBUG15(qla_printk(KERN_INFO, ha,
4284 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
4285 "mb[2]=%x mb[6]=%x mb[7]=%x\n", NPH_SNS,
4286 mb[0], mb[1], mb[2], mb[6], mb[7]));
4287 return (QLA_FUNCTION_FAILED);
4290 atomic_set(&vha->loop_down_timer, 0);
4291 atomic_set(&vha->loop_state, LOOP_UP);
4292 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4293 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4294 rval = qla2x00_loop_resync(base_vha);
4299 /* 84XX Support **************************************************************/
4301 static LIST_HEAD(qla_cs84xx_list);
4302 static DEFINE_MUTEX(qla_cs84xx_mutex);
4304 static struct qla_chip_state_84xx *
4305 qla84xx_get_chip(struct scsi_qla_host *vha)
4307 struct qla_chip_state_84xx *cs84xx;
4308 struct qla_hw_data *ha = vha->hw;
4310 mutex_lock(&qla_cs84xx_mutex);
4312 /* Find any shared 84xx chip. */
4313 list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
4314 if (cs84xx->bus == ha->pdev->bus) {
4315 kref_get(&cs84xx->kref);
4320 cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
4324 kref_init(&cs84xx->kref);
4325 spin_lock_init(&cs84xx->access_lock);
4326 mutex_init(&cs84xx->fw_update_mutex);
4327 cs84xx->bus = ha->pdev->bus;
4329 list_add_tail(&cs84xx->list, &qla_cs84xx_list);
4331 mutex_unlock(&qla_cs84xx_mutex);
4336 __qla84xx_chip_release(struct kref *kref)
4338 struct qla_chip_state_84xx *cs84xx =
4339 container_of(kref, struct qla_chip_state_84xx, kref);
4341 mutex_lock(&qla_cs84xx_mutex);
4342 list_del(&cs84xx->list);
4343 mutex_unlock(&qla_cs84xx_mutex);
4348 qla84xx_put_chip(struct scsi_qla_host *vha)
4350 struct qla_hw_data *ha = vha->hw;
4352 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
4356 qla84xx_init_chip(scsi_qla_host_t *vha)
4360 struct qla_hw_data *ha = vha->hw;
4362 mutex_lock(&ha->cs84xx->fw_update_mutex);
4364 rval = qla84xx_verify_chip(vha, status);
4366 mutex_unlock(&ha->cs84xx->fw_update_mutex);
4368 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED:
4372 /* 81XX Support **************************************************************/
4375 qla81xx_nvram_config(scsi_qla_host_t *vha)
4378 struct init_cb_81xx *icb;
4379 struct nvram_81xx *nv;
4381 uint8_t *dptr1, *dptr2;
4384 struct qla_hw_data *ha = vha->hw;
4387 icb = (struct init_cb_81xx *)ha->init_cb;
4390 /* Determine NVRAM starting address. */
4391 ha->nvram_size = sizeof(struct nvram_81xx);
4392 ha->vpd_size = FA_NVRAM_VPD_SIZE;
4394 /* Get VPD data into cache */
4395 ha->vpd = ha->nvram + VPD_OFFSET;
4396 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
4399 /* Get NVRAM data into cache and calculate checksum. */
4400 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
4402 dptr = (uint32_t *)nv;
4403 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
4404 chksum += le32_to_cpu(*dptr++);
4406 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
4407 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
4409 /* Bad NVRAM data, set defaults parameters. */
4410 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
4411 || nv->id[3] != ' ' ||
4412 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
4413 /* Reset NVRAM data. */
4414 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
4415 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
4416 le16_to_cpu(nv->nvram_version));
4417 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
4418 "invalid -- WWPN) defaults.\n");
4421 * Set default initialization control block.
4423 memset(nv, 0, ha->nvram_size);
4424 nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
4425 nv->version = __constant_cpu_to_le16(ICB_VERSION);
4426 nv->frame_payload_size = __constant_cpu_to_le16(2048);
4427 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
4428 nv->exchange_count = __constant_cpu_to_le16(0);
4429 nv->port_name[0] = 0x21;
4430 nv->port_name[1] = 0x00 + ha->port_no;
4431 nv->port_name[2] = 0x00;
4432 nv->port_name[3] = 0xe0;
4433 nv->port_name[4] = 0x8b;
4434 nv->port_name[5] = 0x1c;
4435 nv->port_name[6] = 0x55;
4436 nv->port_name[7] = 0x86;
4437 nv->node_name[0] = 0x20;
4438 nv->node_name[1] = 0x00;
4439 nv->node_name[2] = 0x00;
4440 nv->node_name[3] = 0xe0;
4441 nv->node_name[4] = 0x8b;
4442 nv->node_name[5] = 0x1c;
4443 nv->node_name[6] = 0x55;
4444 nv->node_name[7] = 0x86;
4445 nv->login_retry_count = __constant_cpu_to_le16(8);
4446 nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
4447 nv->login_timeout = __constant_cpu_to_le16(0);
4448 nv->firmware_options_1 =
4449 __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
4450 nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
4451 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
4452 nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
4453 nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
4454 nv->efi_parameters = __constant_cpu_to_le32(0);
4455 nv->reset_delay = 5;
4456 nv->max_luns_per_target = __constant_cpu_to_le16(128);
4457 nv->port_down_retry_count = __constant_cpu_to_le16(30);
4458 nv->link_down_timeout = __constant_cpu_to_le16(30);
4459 nv->enode_mac[0] = 0x00;
4460 nv->enode_mac[1] = 0x02;
4461 nv->enode_mac[2] = 0x03;
4462 nv->enode_mac[3] = 0x04;
4463 nv->enode_mac[4] = 0x05;
4464 nv->enode_mac[5] = 0x06 + ha->port_no;
4469 /* Reset Initialization control block */
4470 memset(icb, 0, sizeof(struct init_cb_81xx));
4472 /* Copy 1st segment. */
4473 dptr1 = (uint8_t *)icb;
4474 dptr2 = (uint8_t *)&nv->version;
4475 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
4477 *dptr1++ = *dptr2++;
4479 icb->login_retry_count = nv->login_retry_count;
4481 /* Copy 2nd segment. */
4482 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
4483 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
4484 cnt = (uint8_t *)&icb->reserved_5 -
4485 (uint8_t *)&icb->interrupt_delay_timer;
4487 *dptr1++ = *dptr2++;
4489 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
4490 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
4491 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
4492 icb->enode_mac[0] = 0x01;
4493 icb->enode_mac[1] = 0x02;
4494 icb->enode_mac[2] = 0x03;
4495 icb->enode_mac[3] = 0x04;
4496 icb->enode_mac[4] = 0x05;
4497 icb->enode_mac[5] = 0x06 + ha->port_no;
4500 /* Use extended-initialization control block. */
4501 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
4504 * Setup driver NVRAM options.
4506 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
4509 /* Use alternate WWN? */
4510 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
4511 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
4512 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
4515 /* Prepare nodename */
4516 if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) {
4518 * Firmware will apply the following mask if the nodename was
4521 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
4522 icb->node_name[0] &= 0xF0;
4525 /* Set host adapter parameters. */
4526 ha->flags.disable_risc_code_load = 0;
4527 ha->flags.enable_lip_reset = 0;
4528 ha->flags.enable_lip_full_login =
4529 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
4530 ha->flags.enable_target_reset =
4531 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
4532 ha->flags.enable_led_scheme = 0;
4533 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
4535 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
4536 (BIT_6 | BIT_5 | BIT_4)) >> 4;
4538 /* save HBA serial number */
4539 ha->serial0 = icb->port_name[5];
4540 ha->serial1 = icb->port_name[6];
4541 ha->serial2 = icb->port_name[7];
4542 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
4543 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
4545 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
4547 ha->retry_count = le16_to_cpu(nv->login_retry_count);
4549 /* Set minimum login_timeout to 4 seconds. */
4550 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
4551 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
4552 if (le16_to_cpu(nv->login_timeout) < 4)
4553 nv->login_timeout = __constant_cpu_to_le16(4);
4554 ha->login_timeout = le16_to_cpu(nv->login_timeout);
4555 icb->login_timeout = nv->login_timeout;
4557 /* Set minimum RATOV to 100 tenths of a second. */
4560 ha->loop_reset_delay = nv->reset_delay;
4562 /* Link Down Timeout = 0:
4564 * When Port Down timer expires we will start returning
4565 * I/O's to OS with "DID_NO_CONNECT".
4567 * Link Down Timeout != 0:
4569 * The driver waits for the link to come up after link down
4570 * before returning I/Os to OS with "DID_NO_CONNECT".
4572 if (le16_to_cpu(nv->link_down_timeout) == 0) {
4573 ha->loop_down_abort_time =
4574 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
4576 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
4577 ha->loop_down_abort_time =
4578 (LOOP_DOWN_TIME - ha->link_down_timeout);
4581 /* Need enough time to try and get the port back. */
4582 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
4583 if (qlport_down_retry)
4584 ha->port_down_retry_count = qlport_down_retry;
4586 /* Set login_retry_count */
4587 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
4588 if (ha->port_down_retry_count ==
4589 le16_to_cpu(nv->port_down_retry_count) &&
4590 ha->port_down_retry_count > 3)
4591 ha->login_retry_count = ha->port_down_retry_count;
4592 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
4593 ha->login_retry_count = ha->port_down_retry_count;
4594 if (ql2xloginretrycount)
4595 ha->login_retry_count = ql2xloginretrycount;
4598 if (!vha->flags.init_done) {
4599 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
4600 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
4601 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
4602 le16_to_cpu(icb->interrupt_delay_timer): 2;
4604 icb->firmware_options_2 &= __constant_cpu_to_le32(
4605 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
4606 vha->flags.process_response_queue = 0;
4607 if (ha->zio_mode != QLA_ZIO_DISABLED) {
4608 ha->zio_mode = QLA_ZIO_MODE_6;
4610 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay "
4611 "(%d us).\n", vha->host_no, ha->zio_mode,
4612 ha->zio_timer * 100));
4613 qla_printk(KERN_INFO, ha,
4614 "ZIO mode %d enabled; timer delay (%d us).\n",
4615 ha->zio_mode, ha->zio_timer * 100);
4617 icb->firmware_options_2 |= cpu_to_le32(
4618 (uint32_t)ha->zio_mode);
4619 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
4620 vha->flags.process_response_queue = 1;
4624 DEBUG2_3(printk(KERN_WARNING
4625 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
4631 qla81xx_update_fw_options(scsi_qla_host_t *ha)