1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2011 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/idr.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
28 #include <linux/kthread.h>
29 #include <linux/pci.h>
30 #include <linux/spinlock.h>
31 #include <linux/ctype.h>
32 #include <linux/aer.h>
33 #include <linux/slab.h>
34 #include <linux/firmware.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_device.h>
38 #include <scsi/scsi_host.h>
39 #include <scsi/scsi_transport_fc.h>
44 #include "lpfc_sli4.h"
46 #include "lpfc_disc.h"
47 #include "lpfc_scsi.h"
49 #include "lpfc_logmsg.h"
50 #include "lpfc_crtn.h"
51 #include "lpfc_vport.h"
52 #include "lpfc_version.h"
55 unsigned long _dump_buf_data_order;
57 unsigned long _dump_buf_dif_order;
58 spinlock_t _dump_buf_lock;
60 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
61 static int lpfc_post_rcv_buf(struct lpfc_hba *);
62 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
63 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
64 static int lpfc_setup_endian_order(struct lpfc_hba *);
65 static int lpfc_sli4_read_config(struct lpfc_hba *);
66 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
67 static void lpfc_free_sgl_list(struct lpfc_hba *);
68 static int lpfc_init_sgl_list(struct lpfc_hba *);
69 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
70 static void lpfc_free_active_sgl(struct lpfc_hba *);
71 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
72 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
73 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
74 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
75 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
77 static struct scsi_transport_template *lpfc_transport_template = NULL;
78 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
79 static DEFINE_IDR(lpfc_hba_index);
82 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
83 * @phba: pointer to lpfc hba data structure.
85 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
86 * mailbox command. It retrieves the revision information from the HBA and
87 * collects the Vital Product Data (VPD) about the HBA for preparing the
88 * configuration of the HBA.
92 * -ERESTART - requests the SLI layer to reset the HBA and try again.
93 * Any other value - indicates an error.
96 lpfc_config_port_prep(struct lpfc_hba *phba)
98 lpfc_vpd_t *vp = &phba->vpd;
102 char *lpfc_vpd_data = NULL;
104 static char licensed[56] =
105 "key unlock for use with gnu public licensed code only\0";
106 static int init_key = 1;
108 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
110 phba->link_state = LPFC_HBA_ERROR;
115 phba->link_state = LPFC_INIT_MBX_CMDS;
117 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
119 uint32_t *ptext = (uint32_t *) licensed;
121 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
122 *ptext = cpu_to_be32(*ptext);
126 lpfc_read_nv(phba, pmb);
127 memset((char*)mb->un.varRDnvp.rsvd3, 0,
128 sizeof (mb->un.varRDnvp.rsvd3));
129 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
132 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
134 if (rc != MBX_SUCCESS) {
135 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
136 "0324 Config Port initialization "
137 "error, mbxCmd x%x READ_NVPARM, "
139 mb->mbxCommand, mb->mbxStatus);
140 mempool_free(pmb, phba->mbox_mem_pool);
143 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
145 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
149 phba->sli3_options = 0x0;
151 /* Setup and issue mailbox READ REV command */
152 lpfc_read_rev(phba, pmb);
153 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
154 if (rc != MBX_SUCCESS) {
155 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
156 "0439 Adapter failed to init, mbxCmd x%x "
157 "READ_REV, mbxStatus x%x\n",
158 mb->mbxCommand, mb->mbxStatus);
159 mempool_free( pmb, phba->mbox_mem_pool);
165 * The value of rr must be 1 since the driver set the cv field to 1.
166 * This setting requires the FW to set all revision fields.
168 if (mb->un.varRdRev.rr == 0) {
170 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
171 "0440 Adapter failed to init, READ_REV has "
172 "missing revision information.\n");
173 mempool_free(pmb, phba->mbox_mem_pool);
177 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
178 mempool_free(pmb, phba->mbox_mem_pool);
182 /* Save information as VPD data */
184 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
185 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
186 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
187 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
188 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
189 vp->rev.biuRev = mb->un.varRdRev.biuRev;
190 vp->rev.smRev = mb->un.varRdRev.smRev;
191 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
192 vp->rev.endecRev = mb->un.varRdRev.endecRev;
193 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
194 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
195 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
196 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
197 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
198 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
200 /* If the sli feature level is less then 9, we must
201 * tear down all RPIs and VPIs on link down if NPIV
204 if (vp->rev.feaLevelHigh < 9)
205 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
207 if (lpfc_is_LC_HBA(phba->pcidev->device))
208 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
209 sizeof (phba->RandomData));
211 /* Get adapter VPD information */
212 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
216 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
217 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
219 if (rc != MBX_SUCCESS) {
220 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
221 "0441 VPD not present on adapter, "
222 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
223 mb->mbxCommand, mb->mbxStatus);
224 mb->un.varDmp.word_cnt = 0;
226 /* dump mem may return a zero when finished or we got a
227 * mailbox error, either way we are done.
229 if (mb->un.varDmp.word_cnt == 0)
231 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
232 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
233 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
234 lpfc_vpd_data + offset,
235 mb->un.varDmp.word_cnt);
236 offset += mb->un.varDmp.word_cnt;
237 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
238 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
240 kfree(lpfc_vpd_data);
242 mempool_free(pmb, phba->mbox_mem_pool);
247 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
248 * @phba: pointer to lpfc hba data structure.
249 * @pmboxq: pointer to the driver internal queue element for mailbox command.
251 * This is the completion handler for driver's configuring asynchronous event
252 * mailbox command to the device. If the mailbox command returns successfully,
253 * it will set internal async event support flag to 1; otherwise, it will
254 * set internal async event support flag to 0.
257 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
259 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
260 phba->temp_sensor_support = 1;
262 phba->temp_sensor_support = 0;
263 mempool_free(pmboxq, phba->mbox_mem_pool);
268 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
269 * @phba: pointer to lpfc hba data structure.
270 * @pmboxq: pointer to the driver internal queue element for mailbox command.
272 * This is the completion handler for dump mailbox command for getting
273 * wake up parameters. When this command complete, the response contain
274 * Option rom version of the HBA. This function translate the version number
275 * into a human readable string and store it in OptionROMVersion.
278 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
281 uint32_t prog_id_word;
283 /* character array used for decoding dist type. */
284 char dist_char[] = "nabx";
286 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
287 mempool_free(pmboxq, phba->mbox_mem_pool);
291 prg = (struct prog_id *) &prog_id_word;
293 /* word 7 contain option rom version */
294 prog_id_word = pmboxq->u.mb.un.varWords[7];
296 /* Decode the Option rom version word to a readable string */
298 dist = dist_char[prg->dist];
300 if ((prg->dist == 3) && (prg->num == 0))
301 sprintf(phba->OptionROMVersion, "%d.%d%d",
302 prg->ver, prg->rev, prg->lev);
304 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
305 prg->ver, prg->rev, prg->lev,
307 mempool_free(pmboxq, phba->mbox_mem_pool);
312 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
313 * cfg_soft_wwnn, cfg_soft_wwpn
314 * @vport: pointer to lpfc vport data structure.
321 lpfc_update_vport_wwn(struct lpfc_vport *vport)
323 /* If the soft name exists then update it using the service params */
324 if (vport->phba->cfg_soft_wwnn)
325 u64_to_wwn(vport->phba->cfg_soft_wwnn,
326 vport->fc_sparam.nodeName.u.wwn);
327 if (vport->phba->cfg_soft_wwpn)
328 u64_to_wwn(vport->phba->cfg_soft_wwpn,
329 vport->fc_sparam.portName.u.wwn);
332 * If the name is empty or there exists a soft name
333 * then copy the service params name, otherwise use the fc name
335 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
336 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
337 sizeof(struct lpfc_name));
339 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
340 sizeof(struct lpfc_name));
342 if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn)
343 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
344 sizeof(struct lpfc_name));
346 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
347 sizeof(struct lpfc_name));
351 * lpfc_config_port_post - Perform lpfc initialization after config port
352 * @phba: pointer to lpfc hba data structure.
354 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
355 * command call. It performs all internal resource and state setups on the
356 * port: post IOCB buffers, enable appropriate host interrupt attentions,
357 * ELS ring timers, etc.
361 * Any other value - error.
364 lpfc_config_port_post(struct lpfc_hba *phba)
366 struct lpfc_vport *vport = phba->pport;
367 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
370 struct lpfc_dmabuf *mp;
371 struct lpfc_sli *psli = &phba->sli;
372 uint32_t status, timeout;
376 spin_lock_irq(&phba->hbalock);
378 * If the Config port completed correctly the HBA is not
379 * over heated any more.
381 if (phba->over_temp_state == HBA_OVER_TEMP)
382 phba->over_temp_state = HBA_NORMAL_TEMP;
383 spin_unlock_irq(&phba->hbalock);
385 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
387 phba->link_state = LPFC_HBA_ERROR;
392 /* Get login parameters for NID. */
393 rc = lpfc_read_sparam(phba, pmb, 0);
395 mempool_free(pmb, phba->mbox_mem_pool);
400 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
401 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
402 "0448 Adapter failed init, mbxCmd x%x "
403 "READ_SPARM mbxStatus x%x\n",
404 mb->mbxCommand, mb->mbxStatus);
405 phba->link_state = LPFC_HBA_ERROR;
406 mp = (struct lpfc_dmabuf *) pmb->context1;
407 mempool_free(pmb, phba->mbox_mem_pool);
408 lpfc_mbuf_free(phba, mp->virt, mp->phys);
413 mp = (struct lpfc_dmabuf *) pmb->context1;
415 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
416 lpfc_mbuf_free(phba, mp->virt, mp->phys);
418 pmb->context1 = NULL;
419 lpfc_update_vport_wwn(vport);
421 /* Update the fc_host data structures with new wwn. */
422 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
423 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
424 fc_host_max_npiv_vports(shost) = phba->max_vpi;
426 /* If no serial number in VPD data, use low 6 bytes of WWNN */
427 /* This should be consolidated into parse_vpd ? - mr */
428 if (phba->SerialNumber[0] == 0) {
431 outptr = &vport->fc_nodename.u.s.IEEE[0];
432 for (i = 0; i < 12; i++) {
434 j = ((status & 0xf0) >> 4);
436 phba->SerialNumber[i] =
437 (char)((uint8_t) 0x30 + (uint8_t) j);
439 phba->SerialNumber[i] =
440 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
444 phba->SerialNumber[i] =
445 (char)((uint8_t) 0x30 + (uint8_t) j);
447 phba->SerialNumber[i] =
448 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
452 lpfc_read_config(phba, pmb);
454 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
455 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
456 "0453 Adapter failed to init, mbxCmd x%x "
457 "READ_CONFIG, mbxStatus x%x\n",
458 mb->mbxCommand, mb->mbxStatus);
459 phba->link_state = LPFC_HBA_ERROR;
460 mempool_free( pmb, phba->mbox_mem_pool);
464 /* Check if the port is disabled */
465 lpfc_sli_read_link_ste(phba);
467 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
468 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
469 phba->cfg_hba_queue_depth =
470 (mb->un.varRdConfig.max_xri + 1) -
471 lpfc_sli4_get_els_iocb_cnt(phba);
473 phba->lmt = mb->un.varRdConfig.lmt;
475 /* Get the default values for Model Name and Description */
476 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
478 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_16G)
479 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G)
480 && !(phba->lmt & LMT_1Gb))
481 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G)
482 && !(phba->lmt & LMT_2Gb))
483 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G)
484 && !(phba->lmt & LMT_4Gb))
485 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G)
486 && !(phba->lmt & LMT_8Gb))
487 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G)
488 && !(phba->lmt & LMT_10Gb))
489 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G)
490 && !(phba->lmt & LMT_16Gb))) {
491 /* Reset link speed to auto */
492 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
493 "1302 Invalid speed for this board: "
494 "Reset link speed to auto: x%x\n",
495 phba->cfg_link_speed);
496 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
499 phba->link_state = LPFC_LINK_DOWN;
501 /* Only process IOCBs on ELS ring till hba_state is READY */
502 if (psli->ring[psli->extra_ring].cmdringaddr)
503 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
504 if (psli->ring[psli->fcp_ring].cmdringaddr)
505 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
506 if (psli->ring[psli->next_ring].cmdringaddr)
507 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
509 /* Post receive buffers for desired rings */
510 if (phba->sli_rev != 3)
511 lpfc_post_rcv_buf(phba);
514 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
516 if (phba->intr_type == MSIX) {
517 rc = lpfc_config_msi(phba, pmb);
519 mempool_free(pmb, phba->mbox_mem_pool);
522 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
523 if (rc != MBX_SUCCESS) {
524 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
525 "0352 Config MSI mailbox command "
526 "failed, mbxCmd x%x, mbxStatus x%x\n",
527 pmb->u.mb.mbxCommand,
528 pmb->u.mb.mbxStatus);
529 mempool_free(pmb, phba->mbox_mem_pool);
534 spin_lock_irq(&phba->hbalock);
535 /* Initialize ERATT handling flag */
536 phba->hba_flag &= ~HBA_ERATT_HANDLED;
538 /* Enable appropriate host interrupts */
539 if (lpfc_readl(phba->HCregaddr, &status)) {
540 spin_unlock_irq(&phba->hbalock);
543 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
544 if (psli->num_rings > 0)
545 status |= HC_R0INT_ENA;
546 if (psli->num_rings > 1)
547 status |= HC_R1INT_ENA;
548 if (psli->num_rings > 2)
549 status |= HC_R2INT_ENA;
550 if (psli->num_rings > 3)
551 status |= HC_R3INT_ENA;
553 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
554 (phba->cfg_poll & DISABLE_FCP_RING_INT))
555 status &= ~(HC_R0INT_ENA);
557 writel(status, phba->HCregaddr);
558 readl(phba->HCregaddr); /* flush */
559 spin_unlock_irq(&phba->hbalock);
561 /* Set up ring-0 (ELS) timer */
562 timeout = phba->fc_ratov * 2;
563 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
564 /* Set up heart beat (HB) timer */
565 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
566 phba->hb_outstanding = 0;
567 phba->last_completion_time = jiffies;
568 /* Set up error attention (ERATT) polling timer */
569 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
571 if (phba->hba_flag & LINK_DISABLED) {
572 lpfc_printf_log(phba,
574 "2598 Adapter Link is disabled.\n");
575 lpfc_down_link(phba, pmb);
576 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
577 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
578 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
579 lpfc_printf_log(phba,
581 "2599 Adapter failed to issue DOWN_LINK"
582 " mbox command rc 0x%x\n", rc);
584 mempool_free(pmb, phba->mbox_mem_pool);
587 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
588 lpfc_init_link(phba, pmb, phba->cfg_topology,
589 phba->cfg_link_speed);
590 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
591 lpfc_set_loopback_flag(phba);
592 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
593 if (rc != MBX_SUCCESS) {
594 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
595 "0454 Adapter failed to init, mbxCmd x%x "
596 "INIT_LINK, mbxStatus x%x\n",
597 mb->mbxCommand, mb->mbxStatus);
599 /* Clear all interrupt enable conditions */
600 writel(0, phba->HCregaddr);
601 readl(phba->HCregaddr); /* flush */
602 /* Clear all pending interrupts */
603 writel(0xffffffff, phba->HAregaddr);
604 readl(phba->HAregaddr); /* flush */
605 phba->link_state = LPFC_HBA_ERROR;
607 mempool_free(pmb, phba->mbox_mem_pool);
611 /* MBOX buffer will be freed in mbox compl */
612 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
614 phba->link_state = LPFC_HBA_ERROR;
618 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
619 pmb->mbox_cmpl = lpfc_config_async_cmpl;
620 pmb->vport = phba->pport;
621 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
623 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
624 lpfc_printf_log(phba,
627 "0456 Adapter failed to issue "
628 "ASYNCEVT_ENABLE mbox status x%x\n",
630 mempool_free(pmb, phba->mbox_mem_pool);
633 /* Get Option rom version */
634 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
636 phba->link_state = LPFC_HBA_ERROR;
640 lpfc_dump_wakeup_param(phba, pmb);
641 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
642 pmb->vport = phba->pport;
643 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
645 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
646 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
647 "to get Option ROM version status x%x\n", rc);
648 mempool_free(pmb, phba->mbox_mem_pool);
655 * lpfc_hba_init_link - Initialize the FC link
656 * @phba: pointer to lpfc hba data structure.
657 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
659 * This routine will issue the INIT_LINK mailbox command call.
660 * It is available to other drivers through the lpfc_hba data
661 * structure for use as a delayed link up mechanism with the
662 * module parameter lpfc_suppress_link_up.
666 * Any other value - error
669 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
671 struct lpfc_vport *vport = phba->pport;
676 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
678 phba->link_state = LPFC_HBA_ERROR;
684 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
685 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
686 lpfc_set_loopback_flag(phba);
687 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
688 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
689 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
690 "0498 Adapter failed to init, mbxCmd x%x "
691 "INIT_LINK, mbxStatus x%x\n",
692 mb->mbxCommand, mb->mbxStatus);
693 if (phba->sli_rev <= LPFC_SLI_REV3) {
694 /* Clear all interrupt enable conditions */
695 writel(0, phba->HCregaddr);
696 readl(phba->HCregaddr); /* flush */
697 /* Clear all pending interrupts */
698 writel(0xffffffff, phba->HAregaddr);
699 readl(phba->HAregaddr); /* flush */
701 phba->link_state = LPFC_HBA_ERROR;
702 if (rc != MBX_BUSY || flag == MBX_POLL)
703 mempool_free(pmb, phba->mbox_mem_pool);
706 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
707 if (flag == MBX_POLL)
708 mempool_free(pmb, phba->mbox_mem_pool);
714 * lpfc_hba_down_link - this routine downs the FC link
715 * @phba: pointer to lpfc hba data structure.
716 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
718 * This routine will issue the DOWN_LINK mailbox command call.
719 * It is available to other drivers through the lpfc_hba data
720 * structure for use to stop the link.
724 * Any other value - error
727 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
732 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
734 phba->link_state = LPFC_HBA_ERROR;
738 lpfc_printf_log(phba,
740 "0491 Adapter Link is disabled.\n");
741 lpfc_down_link(phba, pmb);
742 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
743 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
744 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
745 lpfc_printf_log(phba,
747 "2522 Adapter failed to issue DOWN_LINK"
748 " mbox command rc 0x%x\n", rc);
750 mempool_free(pmb, phba->mbox_mem_pool);
753 if (flag == MBX_POLL)
754 mempool_free(pmb, phba->mbox_mem_pool);
760 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
761 * @phba: pointer to lpfc HBA data structure.
763 * This routine will do LPFC uninitialization before the HBA is reset when
764 * bringing down the SLI Layer.
768 * Any other value - error.
771 lpfc_hba_down_prep(struct lpfc_hba *phba)
773 struct lpfc_vport **vports;
776 if (phba->sli_rev <= LPFC_SLI_REV3) {
777 /* Disable interrupts */
778 writel(0, phba->HCregaddr);
779 readl(phba->HCregaddr); /* flush */
782 if (phba->pport->load_flag & FC_UNLOADING)
783 lpfc_cleanup_discovery_resources(phba->pport);
785 vports = lpfc_create_vport_work_array(phba);
787 for (i = 0; i <= phba->max_vports &&
788 vports[i] != NULL; i++)
789 lpfc_cleanup_discovery_resources(vports[i]);
790 lpfc_destroy_vport_work_array(phba, vports);
796 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
797 * @phba: pointer to lpfc HBA data structure.
799 * This routine will do uninitialization after the HBA is reset when bring
800 * down the SLI Layer.
804 * Any other value - error.
807 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
809 struct lpfc_sli *psli = &phba->sli;
810 struct lpfc_sli_ring *pring;
811 struct lpfc_dmabuf *mp, *next_mp;
812 LIST_HEAD(completions);
815 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
816 lpfc_sli_hbqbuf_free_all(phba);
818 /* Cleanup preposted buffers on the ELS ring */
819 pring = &psli->ring[LPFC_ELS_RING];
820 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
822 pring->postbufq_cnt--;
823 lpfc_mbuf_free(phba, mp->virt, mp->phys);
828 spin_lock_irq(&phba->hbalock);
829 for (i = 0; i < psli->num_rings; i++) {
830 pring = &psli->ring[i];
832 /* At this point in time the HBA is either reset or DOA. Either
833 * way, nothing should be on txcmplq as it will NEVER complete.
835 list_splice_init(&pring->txcmplq, &completions);
836 pring->txcmplq_cnt = 0;
837 spin_unlock_irq(&phba->hbalock);
839 /* Cancel all the IOCBs from the completions list */
840 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
843 lpfc_sli_abort_iocb_ring(phba, pring);
844 spin_lock_irq(&phba->hbalock);
846 spin_unlock_irq(&phba->hbalock);
852 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
853 * @phba: pointer to lpfc HBA data structure.
855 * This routine will do uninitialization after the HBA is reset when bring
856 * down the SLI Layer.
860 * Any other value - error.
863 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
865 struct lpfc_scsi_buf *psb, *psb_next;
868 unsigned long iflag = 0;
869 struct lpfc_sglq *sglq_entry = NULL;
871 ret = lpfc_hba_down_post_s3(phba);
874 /* At this point in time the HBA is either reset or DOA. Either
875 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
876 * on the lpfc_sgl_list so that it can either be freed if the
877 * driver is unloading or reposted if the driver is restarting
880 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */
882 /* abts_sgl_list_lock required because worker thread uses this
885 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
886 list_for_each_entry(sglq_entry,
887 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
888 sglq_entry->state = SGL_FREED;
890 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
891 &phba->sli4_hba.lpfc_sgl_list);
892 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
893 /* abts_scsi_buf_list_lock required because worker thread uses this
896 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
897 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
899 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
900 spin_unlock_irq(&phba->hbalock);
902 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
904 psb->status = IOSTAT_SUCCESS;
906 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
907 list_splice(&aborts, &phba->lpfc_scsi_buf_list);
908 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
913 * lpfc_hba_down_post - Wrapper func for hba down post routine
914 * @phba: pointer to lpfc HBA data structure.
916 * This routine wraps the actual SLI3 or SLI4 routine for performing
917 * uninitialization after the HBA is reset when bring down the SLI Layer.
921 * Any other value - error.
924 lpfc_hba_down_post(struct lpfc_hba *phba)
926 return (*phba->lpfc_hba_down_post)(phba);
930 * lpfc_hb_timeout - The HBA-timer timeout handler
931 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
933 * This is the HBA-timer timeout handler registered to the lpfc driver. When
934 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
935 * work-port-events bitmap and the worker thread is notified. This timeout
936 * event will be used by the worker thread to invoke the actual timeout
937 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
938 * be performed in the timeout handler and the HBA timeout event bit shall
939 * be cleared by the worker thread after it has taken the event bitmap out.
942 lpfc_hb_timeout(unsigned long ptr)
944 struct lpfc_hba *phba;
948 phba = (struct lpfc_hba *)ptr;
950 /* Check for heart beat timeout conditions */
951 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
952 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
954 phba->pport->work_port_events |= WORKER_HB_TMO;
955 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
957 /* Tell the worker thread there is work to do */
959 lpfc_worker_wake_up(phba);
964 * lpfc_rrq_timeout - The RRQ-timer timeout handler
965 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
967 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
968 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
969 * work-port-events bitmap and the worker thread is notified. This timeout
970 * event will be used by the worker thread to invoke the actual timeout
971 * handler routine, lpfc_rrq_handler. Any periodical operations will
972 * be performed in the timeout handler and the RRQ timeout event bit shall
973 * be cleared by the worker thread after it has taken the event bitmap out.
976 lpfc_rrq_timeout(unsigned long ptr)
978 struct lpfc_hba *phba;
981 phba = (struct lpfc_hba *)ptr;
982 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
983 phba->hba_flag |= HBA_RRQ_ACTIVE;
984 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
985 lpfc_worker_wake_up(phba);
989 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
990 * @phba: pointer to lpfc hba data structure.
991 * @pmboxq: pointer to the driver internal queue element for mailbox command.
993 * This is the callback function to the lpfc heart-beat mailbox command.
994 * If configured, the lpfc driver issues the heart-beat mailbox command to
995 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
996 * heart-beat mailbox command is issued, the driver shall set up heart-beat
997 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
998 * heart-beat outstanding state. Once the mailbox command comes back and
999 * no error conditions detected, the heart-beat mailbox command timer is
1000 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1001 * state is cleared for the next heart-beat. If the timer expired with the
1002 * heart-beat outstanding state set, the driver will put the HBA offline.
1005 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1007 unsigned long drvr_flag;
1009 spin_lock_irqsave(&phba->hbalock, drvr_flag);
1010 phba->hb_outstanding = 0;
1011 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1013 /* Check and reset heart-beat timer is necessary */
1014 mempool_free(pmboxq, phba->mbox_mem_pool);
1015 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1016 !(phba->link_state == LPFC_HBA_ERROR) &&
1017 !(phba->pport->load_flag & FC_UNLOADING))
1018 mod_timer(&phba->hb_tmofunc,
1019 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1024 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1025 * @phba: pointer to lpfc hba data structure.
1027 * This is the actual HBA-timer timeout handler to be invoked by the worker
1028 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1029 * handler performs any periodic operations needed for the device. If such
1030 * periodic event has already been attended to either in the interrupt handler
1031 * or by processing slow-ring or fast-ring events within the HBA-timer
1032 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1033 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1034 * is configured and there is no heart-beat mailbox command outstanding, a
1035 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1036 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1040 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1042 struct lpfc_vport **vports;
1043 LPFC_MBOXQ_t *pmboxq;
1044 struct lpfc_dmabuf *buf_ptr;
1046 struct lpfc_sli *psli = &phba->sli;
1047 LIST_HEAD(completions);
1049 vports = lpfc_create_vport_work_array(phba);
1051 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
1052 lpfc_rcv_seq_check_edtov(vports[i]);
1053 lpfc_destroy_vport_work_array(phba, vports);
1055 if ((phba->link_state == LPFC_HBA_ERROR) ||
1056 (phba->pport->load_flag & FC_UNLOADING) ||
1057 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1060 spin_lock_irq(&phba->pport->work_port_lock);
1062 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
1064 spin_unlock_irq(&phba->pport->work_port_lock);
1065 if (!phba->hb_outstanding)
1066 mod_timer(&phba->hb_tmofunc,
1067 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1069 mod_timer(&phba->hb_tmofunc,
1070 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1073 spin_unlock_irq(&phba->pport->work_port_lock);
1075 if (phba->elsbuf_cnt &&
1076 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1077 spin_lock_irq(&phba->hbalock);
1078 list_splice_init(&phba->elsbuf, &completions);
1079 phba->elsbuf_cnt = 0;
1080 phba->elsbuf_prev_cnt = 0;
1081 spin_unlock_irq(&phba->hbalock);
1083 while (!list_empty(&completions)) {
1084 list_remove_head(&completions, buf_ptr,
1085 struct lpfc_dmabuf, list);
1086 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1090 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1092 /* If there is no heart beat outstanding, issue a heartbeat command */
1093 if (phba->cfg_enable_hba_heartbeat) {
1094 if (!phba->hb_outstanding) {
1095 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1096 (list_empty(&psli->mboxq))) {
1097 pmboxq = mempool_alloc(phba->mbox_mem_pool,
1100 mod_timer(&phba->hb_tmofunc,
1102 HZ * LPFC_HB_MBOX_INTERVAL);
1106 lpfc_heart_beat(phba, pmboxq);
1107 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1108 pmboxq->vport = phba->pport;
1109 retval = lpfc_sli_issue_mbox(phba, pmboxq,
1112 if (retval != MBX_BUSY &&
1113 retval != MBX_SUCCESS) {
1114 mempool_free(pmboxq,
1115 phba->mbox_mem_pool);
1116 mod_timer(&phba->hb_tmofunc,
1118 HZ * LPFC_HB_MBOX_INTERVAL);
1121 phba->skipped_hb = 0;
1122 phba->hb_outstanding = 1;
1123 } else if (time_before_eq(phba->last_completion_time,
1124 phba->skipped_hb)) {
1125 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1126 "2857 Last completion time not "
1127 " updated in %d ms\n",
1128 jiffies_to_msecs(jiffies
1129 - phba->last_completion_time));
1131 phba->skipped_hb = jiffies;
1133 mod_timer(&phba->hb_tmofunc,
1134 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1138 * If heart beat timeout called with hb_outstanding set
1139 * we need to give the hb mailbox cmd a chance to
1142 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1143 "0459 Adapter heartbeat still out"
1144 "standing:last compl time was %d ms.\n",
1145 jiffies_to_msecs(jiffies
1146 - phba->last_completion_time));
1147 mod_timer(&phba->hb_tmofunc,
1148 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1154 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1155 * @phba: pointer to lpfc hba data structure.
1157 * This routine is called to bring the HBA offline when HBA hardware error
1158 * other than Port Error 6 has been detected.
1161 lpfc_offline_eratt(struct lpfc_hba *phba)
1163 struct lpfc_sli *psli = &phba->sli;
1165 spin_lock_irq(&phba->hbalock);
1166 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1167 spin_unlock_irq(&phba->hbalock);
1168 lpfc_offline_prep(phba);
1171 lpfc_reset_barrier(phba);
1172 spin_lock_irq(&phba->hbalock);
1173 lpfc_sli_brdreset(phba);
1174 spin_unlock_irq(&phba->hbalock);
1175 lpfc_hba_down_post(phba);
1176 lpfc_sli_brdready(phba, HS_MBRDY);
1177 lpfc_unblock_mgmt_io(phba);
1178 phba->link_state = LPFC_HBA_ERROR;
1183 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1184 * @phba: pointer to lpfc hba data structure.
1186 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1187 * other than Port Error 6 has been detected.
1190 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1192 lpfc_offline_prep(phba);
1194 lpfc_sli4_brdreset(phba);
1195 lpfc_hba_down_post(phba);
1196 lpfc_sli4_post_status_check(phba);
1197 lpfc_unblock_mgmt_io(phba);
1198 phba->link_state = LPFC_HBA_ERROR;
1202 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1203 * @phba: pointer to lpfc hba data structure.
1205 * This routine is invoked to handle the deferred HBA hardware error
1206 * conditions. This type of error is indicated by HBA by setting ER1
1207 * and another ER bit in the host status register. The driver will
1208 * wait until the ER1 bit clears before handling the error condition.
1211 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1213 uint32_t old_host_status = phba->work_hs;
1214 struct lpfc_sli_ring *pring;
1215 struct lpfc_sli *psli = &phba->sli;
1217 /* If the pci channel is offline, ignore possible errors,
1218 * since we cannot communicate with the pci card anyway.
1220 if (pci_channel_offline(phba->pcidev)) {
1221 spin_lock_irq(&phba->hbalock);
1222 phba->hba_flag &= ~DEFER_ERATT;
1223 spin_unlock_irq(&phba->hbalock);
1227 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1228 "0479 Deferred Adapter Hardware Error "
1229 "Data: x%x x%x x%x\n",
1231 phba->work_status[0], phba->work_status[1]);
1233 spin_lock_irq(&phba->hbalock);
1234 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1235 spin_unlock_irq(&phba->hbalock);
1239 * Firmware stops when it triggred erratt. That could cause the I/Os
1240 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1241 * SCSI layer retry it after re-establishing link.
1243 pring = &psli->ring[psli->fcp_ring];
1244 lpfc_sli_abort_iocb_ring(phba, pring);
1247 * There was a firmware error. Take the hba offline and then
1248 * attempt to restart it.
1250 lpfc_offline_prep(phba);
1253 /* Wait for the ER1 bit to clear.*/
1254 while (phba->work_hs & HS_FFER1) {
1256 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1257 phba->work_hs = UNPLUG_ERR ;
1260 /* If driver is unloading let the worker thread continue */
1261 if (phba->pport->load_flag & FC_UNLOADING) {
1268 * This is to ptrotect against a race condition in which
1269 * first write to the host attention register clear the
1270 * host status register.
1272 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1273 phba->work_hs = old_host_status & ~HS_FFER1;
1275 spin_lock_irq(&phba->hbalock);
1276 phba->hba_flag &= ~DEFER_ERATT;
1277 spin_unlock_irq(&phba->hbalock);
1278 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1279 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1283 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1285 struct lpfc_board_event_header board_event;
1286 struct Scsi_Host *shost;
1288 board_event.event_type = FC_REG_BOARD_EVENT;
1289 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1290 shost = lpfc_shost_from_vport(phba->pport);
1291 fc_host_post_vendor_event(shost, fc_get_event_number(),
1292 sizeof(board_event),
1293 (char *) &board_event,
1298 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1299 * @phba: pointer to lpfc hba data structure.
1301 * This routine is invoked to handle the following HBA hardware error
1303 * 1 - HBA error attention interrupt
1304 * 2 - DMA ring index out of range
1305 * 3 - Mailbox command came back as unknown
1308 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1310 struct lpfc_vport *vport = phba->pport;
1311 struct lpfc_sli *psli = &phba->sli;
1312 struct lpfc_sli_ring *pring;
1313 uint32_t event_data;
1314 unsigned long temperature;
1315 struct temp_event temp_event_data;
1316 struct Scsi_Host *shost;
1318 /* If the pci channel is offline, ignore possible errors,
1319 * since we cannot communicate with the pci card anyway.
1321 if (pci_channel_offline(phba->pcidev)) {
1322 spin_lock_irq(&phba->hbalock);
1323 phba->hba_flag &= ~DEFER_ERATT;
1324 spin_unlock_irq(&phba->hbalock);
1328 /* If resets are disabled then leave the HBA alone and return */
1329 if (!phba->cfg_enable_hba_reset)
1332 /* Send an internal error event to mgmt application */
1333 lpfc_board_errevt_to_mgmt(phba);
1335 if (phba->hba_flag & DEFER_ERATT)
1336 lpfc_handle_deferred_eratt(phba);
1338 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1339 if (phba->work_hs & HS_FFER6)
1340 /* Re-establishing Link */
1341 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1342 "1301 Re-establishing Link "
1343 "Data: x%x x%x x%x\n",
1344 phba->work_hs, phba->work_status[0],
1345 phba->work_status[1]);
1346 if (phba->work_hs & HS_FFER8)
1347 /* Device Zeroization */
1348 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1349 "2861 Host Authentication device "
1350 "zeroization Data:x%x x%x x%x\n",
1351 phba->work_hs, phba->work_status[0],
1352 phba->work_status[1]);
1354 spin_lock_irq(&phba->hbalock);
1355 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1356 spin_unlock_irq(&phba->hbalock);
1359 * Firmware stops when it triggled erratt with HS_FFER6.
1360 * That could cause the I/Os dropped by the firmware.
1361 * Error iocb (I/O) on txcmplq and let the SCSI layer
1362 * retry it after re-establishing link.
1364 pring = &psli->ring[psli->fcp_ring];
1365 lpfc_sli_abort_iocb_ring(phba, pring);
1368 * There was a firmware error. Take the hba offline and then
1369 * attempt to restart it.
1371 lpfc_offline_prep(phba);
1373 lpfc_sli_brdrestart(phba);
1374 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
1375 lpfc_unblock_mgmt_io(phba);
1378 lpfc_unblock_mgmt_io(phba);
1379 } else if (phba->work_hs & HS_CRIT_TEMP) {
1380 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1381 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1382 temp_event_data.event_code = LPFC_CRIT_TEMP;
1383 temp_event_data.data = (uint32_t)temperature;
1385 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1386 "0406 Adapter maximum temperature exceeded "
1387 "(%ld), taking this port offline "
1388 "Data: x%x x%x x%x\n",
1389 temperature, phba->work_hs,
1390 phba->work_status[0], phba->work_status[1]);
1392 shost = lpfc_shost_from_vport(phba->pport);
1393 fc_host_post_vendor_event(shost, fc_get_event_number(),
1394 sizeof(temp_event_data),
1395 (char *) &temp_event_data,
1396 SCSI_NL_VID_TYPE_PCI
1397 | PCI_VENDOR_ID_EMULEX);
1399 spin_lock_irq(&phba->hbalock);
1400 phba->over_temp_state = HBA_OVER_TEMP;
1401 spin_unlock_irq(&phba->hbalock);
1402 lpfc_offline_eratt(phba);
1405 /* The if clause above forces this code path when the status
1406 * failure is a value other than FFER6. Do not call the offline
1407 * twice. This is the adapter hardware error path.
1409 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1410 "0457 Adapter Hardware Error "
1411 "Data: x%x x%x x%x\n",
1413 phba->work_status[0], phba->work_status[1]);
1415 event_data = FC_REG_DUMP_EVENT;
1416 shost = lpfc_shost_from_vport(vport);
1417 fc_host_post_vendor_event(shost, fc_get_event_number(),
1418 sizeof(event_data), (char *) &event_data,
1419 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1421 lpfc_offline_eratt(phba);
1427 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1428 * @phba: pointer to lpfc hba data structure.
1430 * This routine is invoked to handle the SLI4 HBA hardware error attention
1434 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1436 struct lpfc_vport *vport = phba->pport;
1437 uint32_t event_data;
1438 struct Scsi_Host *shost;
1440 struct lpfc_register portstat_reg;
1443 /* If the pci channel is offline, ignore possible errors, since
1444 * we cannot communicate with the pci card anyway.
1446 if (pci_channel_offline(phba->pcidev))
1448 /* If resets are disabled then leave the HBA alone and return */
1449 if (!phba->cfg_enable_hba_reset)
1452 /* Send an internal error event to mgmt application */
1453 lpfc_board_errevt_to_mgmt(phba);
1455 /* For now, the actual action for SLI4 device handling is not
1456 * specified yet, just treated it as adaptor hardware failure
1458 event_data = FC_REG_DUMP_EVENT;
1459 shost = lpfc_shost_from_vport(vport);
1460 fc_host_post_vendor_event(shost, fc_get_event_number(),
1461 sizeof(event_data), (char *) &event_data,
1462 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1464 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1466 case LPFC_SLI_INTF_IF_TYPE_0:
1467 lpfc_sli4_offline_eratt(phba);
1469 case LPFC_SLI_INTF_IF_TYPE_2:
1470 portstat_reg.word0 =
1471 readl(phba->sli4_hba.u.if_type2.STATUSregaddr);
1473 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
1474 /* TODO: Register for Overtemp async events. */
1475 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1476 "2889 Port Overtemperature event, "
1478 spin_lock_irq(&phba->hbalock);
1479 phba->over_temp_state = HBA_OVER_TEMP;
1480 spin_unlock_irq(&phba->hbalock);
1481 lpfc_sli4_offline_eratt(phba);
1485 * On error status condition, driver need to wait for port
1486 * ready before performing reset.
1488 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1490 /* need reset: attempt for port recovery */
1491 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1492 "2887 Port Error: Attempting "
1494 lpfc_offline_prep(phba);
1496 lpfc_sli_brdrestart(phba);
1497 if (lpfc_online(phba) == 0) {
1498 lpfc_unblock_mgmt_io(phba);
1501 /* fall through for not able to recover */
1503 lpfc_sli4_offline_eratt(phba);
1505 case LPFC_SLI_INTF_IF_TYPE_1:
1512 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1513 * @phba: pointer to lpfc HBA data structure.
1515 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1516 * routine from the API jump table function pointer from the lpfc_hba struct.
1520 * Any other value - error.
1523 lpfc_handle_eratt(struct lpfc_hba *phba)
1525 (*phba->lpfc_handle_eratt)(phba);
1529 * lpfc_handle_latt - The HBA link event handler
1530 * @phba: pointer to lpfc hba data structure.
1532 * This routine is invoked from the worker thread to handle a HBA host
1533 * attention link event.
1536 lpfc_handle_latt(struct lpfc_hba *phba)
1538 struct lpfc_vport *vport = phba->pport;
1539 struct lpfc_sli *psli = &phba->sli;
1541 volatile uint32_t control;
1542 struct lpfc_dmabuf *mp;
1545 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1548 goto lpfc_handle_latt_err_exit;
1551 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1554 goto lpfc_handle_latt_free_pmb;
1557 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1560 goto lpfc_handle_latt_free_mp;
1563 /* Cleanup any outstanding ELS commands */
1564 lpfc_els_flush_all_cmd(phba);
1566 psli->slistat.link_event++;
1567 lpfc_read_topology(phba, pmb, mp);
1568 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
1570 /* Block ELS IOCBs until we have processed this mbox command */
1571 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
1572 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
1573 if (rc == MBX_NOT_FINISHED) {
1575 goto lpfc_handle_latt_free_mbuf;
1578 /* Clear Link Attention in HA REG */
1579 spin_lock_irq(&phba->hbalock);
1580 writel(HA_LATT, phba->HAregaddr);
1581 readl(phba->HAregaddr); /* flush */
1582 spin_unlock_irq(&phba->hbalock);
1586 lpfc_handle_latt_free_mbuf:
1587 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1588 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1589 lpfc_handle_latt_free_mp:
1591 lpfc_handle_latt_free_pmb:
1592 mempool_free(pmb, phba->mbox_mem_pool);
1593 lpfc_handle_latt_err_exit:
1594 /* Enable Link attention interrupts */
1595 spin_lock_irq(&phba->hbalock);
1596 psli->sli_flag |= LPFC_PROCESS_LA;
1597 control = readl(phba->HCregaddr);
1598 control |= HC_LAINT_ENA;
1599 writel(control, phba->HCregaddr);
1600 readl(phba->HCregaddr); /* flush */
1602 /* Clear Link Attention in HA REG */
1603 writel(HA_LATT, phba->HAregaddr);
1604 readl(phba->HAregaddr); /* flush */
1605 spin_unlock_irq(&phba->hbalock);
1606 lpfc_linkdown(phba);
1607 phba->link_state = LPFC_HBA_ERROR;
1609 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1610 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
1616 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
1617 * @phba: pointer to lpfc hba data structure.
1618 * @vpd: pointer to the vital product data.
1619 * @len: length of the vital product data in bytes.
1621 * This routine parses the Vital Product Data (VPD). The VPD is treated as
1622 * an array of characters. In this routine, the ModelName, ProgramType, and
1623 * ModelDesc, etc. fields of the phba data structure will be populated.
1626 * 0 - pointer to the VPD passed in is NULL
1630 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1632 uint8_t lenlo, lenhi;
1642 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1643 "0455 Vital Product Data: x%x x%x x%x x%x\n",
1644 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1646 while (!finished && (index < (len - 4))) {
1647 switch (vpd[index]) {
1655 i = ((((unsigned short)lenhi) << 8) + lenlo);
1664 Length = ((((unsigned short)lenhi) << 8) + lenlo);
1665 if (Length > len - index)
1666 Length = len - index;
1667 while (Length > 0) {
1668 /* Look for Serial Number */
1669 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1676 phba->SerialNumber[j++] = vpd[index++];
1680 phba->SerialNumber[j] = 0;
1683 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1684 phba->vpd_flag |= VPD_MODEL_DESC;
1691 phba->ModelDesc[j++] = vpd[index++];
1695 phba->ModelDesc[j] = 0;
1698 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1699 phba->vpd_flag |= VPD_MODEL_NAME;
1706 phba->ModelName[j++] = vpd[index++];
1710 phba->ModelName[j] = 0;
1713 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1714 phba->vpd_flag |= VPD_PROGRAM_TYPE;
1721 phba->ProgramType[j++] = vpd[index++];
1725 phba->ProgramType[j] = 0;
1728 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1729 phba->vpd_flag |= VPD_PORT;
1736 if ((phba->sli_rev == LPFC_SLI_REV4) &&
1737 (phba->sli4_hba.pport_name_sta ==
1738 LPFC_SLI4_PPNAME_GET)) {
1742 phba->Port[j++] = vpd[index++];
1746 if ((phba->sli_rev != LPFC_SLI_REV4) ||
1747 (phba->sli4_hba.pport_name_sta ==
1748 LPFC_SLI4_PPNAME_NON))
1775 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
1776 * @phba: pointer to lpfc hba data structure.
1777 * @mdp: pointer to the data structure to hold the derived model name.
1778 * @descp: pointer to the data structure to hold the derived description.
1780 * This routine retrieves HBA's description based on its registered PCI device
1781 * ID. The @descp passed into this function points to an array of 256 chars. It
1782 * shall be returned with the model name, maximum speed, and the host bus type.
1783 * The @mdp passed into this function points to an array of 80 chars. When the
1784 * function returns, the @mdp will be filled with the model name.
1787 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1790 uint16_t dev_id = phba->pcidev->device;
1793 int oneConnect = 0; /* default is not a oneConnect */
1798 } m = {"<Unknown>", "", ""};
1800 if (mdp && mdp[0] != '\0'
1801 && descp && descp[0] != '\0')
1804 if (phba->lmt & LMT_16Gb)
1806 else if (phba->lmt & LMT_10Gb)
1808 else if (phba->lmt & LMT_8Gb)
1810 else if (phba->lmt & LMT_4Gb)
1812 else if (phba->lmt & LMT_2Gb)
1820 case PCI_DEVICE_ID_FIREFLY:
1821 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"};
1823 case PCI_DEVICE_ID_SUPERFLY:
1824 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1825 m = (typeof(m)){"LP7000", "PCI",
1826 "Fibre Channel Adapter"};
1828 m = (typeof(m)){"LP7000E", "PCI",
1829 "Fibre Channel Adapter"};
1831 case PCI_DEVICE_ID_DRAGONFLY:
1832 m = (typeof(m)){"LP8000", "PCI",
1833 "Fibre Channel Adapter"};
1835 case PCI_DEVICE_ID_CENTAUR:
1836 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1837 m = (typeof(m)){"LP9002", "PCI",
1838 "Fibre Channel Adapter"};
1840 m = (typeof(m)){"LP9000", "PCI",
1841 "Fibre Channel Adapter"};
1843 case PCI_DEVICE_ID_RFLY:
1844 m = (typeof(m)){"LP952", "PCI",
1845 "Fibre Channel Adapter"};
1847 case PCI_DEVICE_ID_PEGASUS:
1848 m = (typeof(m)){"LP9802", "PCI-X",
1849 "Fibre Channel Adapter"};
1851 case PCI_DEVICE_ID_THOR:
1852 m = (typeof(m)){"LP10000", "PCI-X",
1853 "Fibre Channel Adapter"};
1855 case PCI_DEVICE_ID_VIPER:
1856 m = (typeof(m)){"LPX1000", "PCI-X",
1857 "Fibre Channel Adapter"};
1859 case PCI_DEVICE_ID_PFLY:
1860 m = (typeof(m)){"LP982", "PCI-X",
1861 "Fibre Channel Adapter"};
1863 case PCI_DEVICE_ID_TFLY:
1864 m = (typeof(m)){"LP1050", "PCI-X",
1865 "Fibre Channel Adapter"};
1867 case PCI_DEVICE_ID_HELIOS:
1868 m = (typeof(m)){"LP11000", "PCI-X2",
1869 "Fibre Channel Adapter"};
1871 case PCI_DEVICE_ID_HELIOS_SCSP:
1872 m = (typeof(m)){"LP11000-SP", "PCI-X2",
1873 "Fibre Channel Adapter"};
1875 case PCI_DEVICE_ID_HELIOS_DCSP:
1876 m = (typeof(m)){"LP11002-SP", "PCI-X2",
1877 "Fibre Channel Adapter"};
1879 case PCI_DEVICE_ID_NEPTUNE:
1880 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
1882 case PCI_DEVICE_ID_NEPTUNE_SCSP:
1883 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
1885 case PCI_DEVICE_ID_NEPTUNE_DCSP:
1886 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
1888 case PCI_DEVICE_ID_BMID:
1889 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
1891 case PCI_DEVICE_ID_BSMB:
1892 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
1894 case PCI_DEVICE_ID_ZEPHYR:
1895 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1897 case PCI_DEVICE_ID_ZEPHYR_SCSP:
1898 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1900 case PCI_DEVICE_ID_ZEPHYR_DCSP:
1901 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
1904 case PCI_DEVICE_ID_ZMID:
1905 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
1907 case PCI_DEVICE_ID_ZSMB:
1908 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
1910 case PCI_DEVICE_ID_LP101:
1911 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"};
1913 case PCI_DEVICE_ID_LP10000S:
1914 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
1916 case PCI_DEVICE_ID_LP11000S:
1917 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
1919 case PCI_DEVICE_ID_LPE11000S:
1920 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
1922 case PCI_DEVICE_ID_SAT:
1923 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
1925 case PCI_DEVICE_ID_SAT_MID:
1926 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
1928 case PCI_DEVICE_ID_SAT_SMB:
1929 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
1931 case PCI_DEVICE_ID_SAT_DCSP:
1932 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
1934 case PCI_DEVICE_ID_SAT_SCSP:
1935 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
1937 case PCI_DEVICE_ID_SAT_S:
1938 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
1940 case PCI_DEVICE_ID_HORNET:
1941 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"};
1944 case PCI_DEVICE_ID_PROTEUS_VF:
1945 m = (typeof(m)){"LPev12000", "PCIe IOV",
1946 "Fibre Channel Adapter"};
1948 case PCI_DEVICE_ID_PROTEUS_PF:
1949 m = (typeof(m)){"LPev12000", "PCIe IOV",
1950 "Fibre Channel Adapter"};
1952 case PCI_DEVICE_ID_PROTEUS_S:
1953 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
1954 "Fibre Channel Adapter"};
1956 case PCI_DEVICE_ID_TIGERSHARK:
1958 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
1960 case PCI_DEVICE_ID_TOMCAT:
1962 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
1964 case PCI_DEVICE_ID_FALCON:
1965 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
1966 "EmulexSecure Fibre"};
1968 case PCI_DEVICE_ID_BALIUS:
1969 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
1970 "Fibre Channel Adapter"};
1972 case PCI_DEVICE_ID_LANCER_FC:
1973 case PCI_DEVICE_ID_LANCER_FC_VF:
1974 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
1976 case PCI_DEVICE_ID_LANCER_FCOE:
1977 case PCI_DEVICE_ID_LANCER_FCOE_VF:
1979 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
1982 m = (typeof(m)){"Unknown", "", ""};
1986 if (mdp && mdp[0] == '\0')
1987 snprintf(mdp, 79,"%s", m.name);
1989 * oneConnect hba requires special processing, they are all initiators
1990 * and we put the port number on the end
1992 if (descp && descp[0] == '\0') {
1994 snprintf(descp, 255,
1995 "Emulex OneConnect %s, %s Initiator, Port %s",
1999 snprintf(descp, 255,
2000 "Emulex %s %d%s %s %s",
2001 m.name, max_speed, (GE) ? "GE" : "Gb",
2007 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2008 * @phba: pointer to lpfc hba data structure.
2009 * @pring: pointer to a IOCB ring.
2010 * @cnt: the number of IOCBs to be posted to the IOCB ring.
2012 * This routine posts a given number of IOCBs with the associated DMA buffer
2013 * descriptors specified by the cnt argument to the given IOCB ring.
2016 * The number of IOCBs NOT able to be posted to the IOCB ring.
2019 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2022 struct lpfc_iocbq *iocb;
2023 struct lpfc_dmabuf *mp1, *mp2;
2025 cnt += pring->missbufcnt;
2027 /* While there are buffers to post */
2029 /* Allocate buffer for command iocb */
2030 iocb = lpfc_sli_get_iocbq(phba);
2032 pring->missbufcnt = cnt;
2037 /* 2 buffers can be posted per command */
2038 /* Allocate buffer to post */
2039 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2041 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2042 if (!mp1 || !mp1->virt) {
2044 lpfc_sli_release_iocbq(phba, iocb);
2045 pring->missbufcnt = cnt;
2049 INIT_LIST_HEAD(&mp1->list);
2050 /* Allocate buffer to post */
2052 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2054 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2056 if (!mp2 || !mp2->virt) {
2058 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2060 lpfc_sli_release_iocbq(phba, iocb);
2061 pring->missbufcnt = cnt;
2065 INIT_LIST_HEAD(&mp2->list);
2070 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2071 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2072 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2073 icmd->ulpBdeCount = 1;
2076 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2077 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2078 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2080 icmd->ulpBdeCount = 2;
2083 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2086 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2088 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2092 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2096 lpfc_sli_release_iocbq(phba, iocb);
2097 pring->missbufcnt = cnt;
2100 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2102 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2104 pring->missbufcnt = 0;
2109 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2110 * @phba: pointer to lpfc hba data structure.
2112 * This routine posts initial receive IOCB buffers to the ELS ring. The
2113 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2117 * 0 - success (currently always success)
2120 lpfc_post_rcv_buf(struct lpfc_hba *phba)
2122 struct lpfc_sli *psli = &phba->sli;
2124 /* Ring 0, ELS / CT buffers */
2125 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2126 /* Ring 2 - FCP no buffers needed */
2131 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2134 * lpfc_sha_init - Set up initial array of hash table entries
2135 * @HashResultPointer: pointer to an array as hash table.
2137 * This routine sets up the initial values to the array of hash table entries
2141 lpfc_sha_init(uint32_t * HashResultPointer)
2143 HashResultPointer[0] = 0x67452301;
2144 HashResultPointer[1] = 0xEFCDAB89;
2145 HashResultPointer[2] = 0x98BADCFE;
2146 HashResultPointer[3] = 0x10325476;
2147 HashResultPointer[4] = 0xC3D2E1F0;
2151 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2152 * @HashResultPointer: pointer to an initial/result hash table.
2153 * @HashWorkingPointer: pointer to an working hash table.
2155 * This routine iterates an initial hash table pointed by @HashResultPointer
2156 * with the values from the working hash table pointeed by @HashWorkingPointer.
2157 * The results are putting back to the initial hash table, returned through
2158 * the @HashResultPointer as the result hash table.
2161 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2165 uint32_t A, B, C, D, E;
2168 HashWorkingPointer[t] =
2170 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2172 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2173 } while (++t <= 79);
2175 A = HashResultPointer[0];
2176 B = HashResultPointer[1];
2177 C = HashResultPointer[2];
2178 D = HashResultPointer[3];
2179 E = HashResultPointer[4];
2183 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2184 } else if (t < 40) {
2185 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2186 } else if (t < 60) {
2187 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2189 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2191 TEMP += S(5, A) + E + HashWorkingPointer[t];
2197 } while (++t <= 79);
2199 HashResultPointer[0] += A;
2200 HashResultPointer[1] += B;
2201 HashResultPointer[2] += C;
2202 HashResultPointer[3] += D;
2203 HashResultPointer[4] += E;
2208 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2209 * @RandomChallenge: pointer to the entry of host challenge random number array.
2210 * @HashWorking: pointer to the entry of the working hash array.
2212 * This routine calculates the working hash array referred by @HashWorking
2213 * from the challenge random numbers associated with the host, referred by
2214 * @RandomChallenge. The result is put into the entry of the working hash
2215 * array and returned by reference through @HashWorking.
2218 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2220 *HashWorking = (*RandomChallenge ^ *HashWorking);
2224 * lpfc_hba_init - Perform special handling for LC HBA initialization
2225 * @phba: pointer to lpfc hba data structure.
2226 * @hbainit: pointer to an array of unsigned 32-bit integers.
2228 * This routine performs the special handling for LC HBA initialization.
2231 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2234 uint32_t *HashWorking;
2235 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2237 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2241 HashWorking[0] = HashWorking[78] = *pwwnn++;
2242 HashWorking[1] = HashWorking[79] = *pwwnn;
2244 for (t = 0; t < 7; t++)
2245 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2247 lpfc_sha_init(hbainit);
2248 lpfc_sha_iterate(hbainit, HashWorking);
2253 * lpfc_cleanup - Performs vport cleanups before deleting a vport
2254 * @vport: pointer to a virtual N_Port data structure.
2256 * This routine performs the necessary cleanups before deleting the @vport.
2257 * It invokes the discovery state machine to perform necessary state
2258 * transitions and to release the ndlps associated with the @vport. Note,
2259 * the physical port is treated as @vport 0.
2262 lpfc_cleanup(struct lpfc_vport *vport)
2264 struct lpfc_hba *phba = vport->phba;
2265 struct lpfc_nodelist *ndlp, *next_ndlp;
2268 if (phba->link_state > LPFC_LINK_DOWN)
2269 lpfc_port_link_failure(vport);
2271 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2272 if (!NLP_CHK_NODE_ACT(ndlp)) {
2273 ndlp = lpfc_enable_node(vport, ndlp,
2274 NLP_STE_UNUSED_NODE);
2277 spin_lock_irq(&phba->ndlp_lock);
2278 NLP_SET_FREE_REQ(ndlp);
2279 spin_unlock_irq(&phba->ndlp_lock);
2280 /* Trigger the release of the ndlp memory */
2284 spin_lock_irq(&phba->ndlp_lock);
2285 if (NLP_CHK_FREE_REQ(ndlp)) {
2286 /* The ndlp should not be in memory free mode already */
2287 spin_unlock_irq(&phba->ndlp_lock);
2290 /* Indicate request for freeing ndlp memory */
2291 NLP_SET_FREE_REQ(ndlp);
2292 spin_unlock_irq(&phba->ndlp_lock);
2294 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2295 ndlp->nlp_DID == Fabric_DID) {
2296 /* Just free up ndlp with Fabric_DID for vports */
2301 if (ndlp->nlp_type & NLP_FABRIC)
2302 lpfc_disc_state_machine(vport, ndlp, NULL,
2303 NLP_EVT_DEVICE_RECOVERY);
2305 lpfc_disc_state_machine(vport, ndlp, NULL,
2310 /* At this point, ALL ndlp's should be gone
2311 * because of the previous NLP_EVT_DEVICE_RM.
2312 * Lets wait for this to happen, if needed.
2314 while (!list_empty(&vport->fc_nodes)) {
2316 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2317 "0233 Nodelist not empty\n");
2318 list_for_each_entry_safe(ndlp, next_ndlp,
2319 &vport->fc_nodes, nlp_listp) {
2320 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2322 "0282 did:x%x ndlp:x%p "
2323 "usgmap:x%x refcnt:%d\n",
2324 ndlp->nlp_DID, (void *)ndlp,
2327 &ndlp->kref.refcount));
2332 /* Wait for any activity on ndlps to settle */
2335 lpfc_cleanup_vports_rrqs(vport, NULL);
2339 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2340 * @vport: pointer to a virtual N_Port data structure.
2342 * This routine stops all the timers associated with a @vport. This function
2343 * is invoked before disabling or deleting a @vport. Note that the physical
2344 * port is treated as @vport 0.
2347 lpfc_stop_vport_timers(struct lpfc_vport *vport)
2349 del_timer_sync(&vport->els_tmofunc);
2350 del_timer_sync(&vport->fc_fdmitmo);
2351 del_timer_sync(&vport->delayed_disc_tmo);
2352 lpfc_can_disctmo(vport);
2357 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2358 * @phba: pointer to lpfc hba data structure.
2360 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2361 * caller of this routine should already hold the host lock.
2364 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2366 /* Clear pending FCF rediscovery wait flag */
2367 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2369 /* Now, try to stop the timer */
2370 del_timer(&phba->fcf.redisc_wait);
2374 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2375 * @phba: pointer to lpfc hba data structure.
2377 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2378 * checks whether the FCF rediscovery wait timer is pending with the host
2379 * lock held before proceeding with disabling the timer and clearing the
2380 * wait timer pendig flag.
2383 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2385 spin_lock_irq(&phba->hbalock);
2386 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2387 /* FCF rediscovery timer already fired or stopped */
2388 spin_unlock_irq(&phba->hbalock);
2391 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2392 /* Clear failover in progress flags */
2393 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
2394 spin_unlock_irq(&phba->hbalock);
2398 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2399 * @phba: pointer to lpfc hba data structure.
2401 * This routine stops all the timers associated with a HBA. This function is
2402 * invoked before either putting a HBA offline or unloading the driver.
2405 lpfc_stop_hba_timers(struct lpfc_hba *phba)
2407 lpfc_stop_vport_timers(phba->pport);
2408 del_timer_sync(&phba->sli.mbox_tmo);
2409 del_timer_sync(&phba->fabric_block_timer);
2410 del_timer_sync(&phba->eratt_poll);
2411 del_timer_sync(&phba->hb_tmofunc);
2412 if (phba->sli_rev == LPFC_SLI_REV4) {
2413 del_timer_sync(&phba->rrq_tmr);
2414 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
2416 phba->hb_outstanding = 0;
2418 switch (phba->pci_dev_grp) {
2419 case LPFC_PCI_DEV_LP:
2420 /* Stop any LightPulse device specific driver timers */
2421 del_timer_sync(&phba->fcp_poll_timer);
2423 case LPFC_PCI_DEV_OC:
2424 /* Stop any OneConnect device sepcific driver timers */
2425 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2428 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2429 "0297 Invalid device group (x%x)\n",
2437 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2438 * @phba: pointer to lpfc hba data structure.
2440 * This routine marks a HBA's management interface as blocked. Once the HBA's
2441 * management interface is marked as blocked, all the user space access to
2442 * the HBA, whether they are from sysfs interface or libdfc interface will
2443 * all be blocked. The HBA is set to block the management interface when the
2444 * driver prepares the HBA interface for online or offline.
2447 lpfc_block_mgmt_io(struct lpfc_hba * phba)
2449 unsigned long iflag;
2450 uint8_t actcmd = MBX_HEARTBEAT;
2451 unsigned long timeout;
2453 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
2454 spin_lock_irqsave(&phba->hbalock, iflag);
2455 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2456 if (phba->sli.mbox_active) {
2457 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
2458 /* Determine how long we might wait for the active mailbox
2459 * command to be gracefully completed by firmware.
2461 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
2462 phba->sli.mbox_active) * 1000) + jiffies;
2464 spin_unlock_irqrestore(&phba->hbalock, iflag);
2466 /* Wait for the outstnading mailbox command to complete */
2467 while (phba->sli.mbox_active) {
2468 /* Check active mailbox complete status every 2ms */
2470 if (time_after(jiffies, timeout)) {
2471 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2472 "2813 Mgmt IO is Blocked %x "
2473 "- mbox cmd %x still active\n",
2474 phba->sli.sli_flag, actcmd);
2481 * lpfc_online - Initialize and bring a HBA online
2482 * @phba: pointer to lpfc hba data structure.
2484 * This routine initializes the HBA and brings a HBA online. During this
2485 * process, the management interface is blocked to prevent user space access
2486 * to the HBA interfering with the driver initialization.
2493 lpfc_online(struct lpfc_hba *phba)
2495 struct lpfc_vport *vport;
2496 struct lpfc_vport **vports;
2501 vport = phba->pport;
2503 if (!(vport->fc_flag & FC_OFFLINE_MODE))
2506 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2507 "0458 Bring Adapter online\n");
2509 lpfc_block_mgmt_io(phba);
2511 if (!lpfc_sli_queue_setup(phba)) {
2512 lpfc_unblock_mgmt_io(phba);
2516 if (phba->sli_rev == LPFC_SLI_REV4) {
2517 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
2518 lpfc_unblock_mgmt_io(phba);
2522 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
2523 lpfc_unblock_mgmt_io(phba);
2528 vports = lpfc_create_vport_work_array(phba);
2529 if (vports != NULL) {
2530 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2531 struct Scsi_Host *shost;
2532 shost = lpfc_shost_from_vport(vports[i]);
2533 spin_lock_irq(shost->host_lock);
2534 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2535 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2536 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2537 if (phba->sli_rev == LPFC_SLI_REV4)
2538 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
2539 spin_unlock_irq(shost->host_lock);
2542 lpfc_destroy_vport_work_array(phba, vports);
2544 lpfc_unblock_mgmt_io(phba);
2549 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
2550 * @phba: pointer to lpfc hba data structure.
2552 * This routine marks a HBA's management interface as not blocked. Once the
2553 * HBA's management interface is marked as not blocked, all the user space
2554 * access to the HBA, whether they are from sysfs interface or libdfc
2555 * interface will be allowed. The HBA is set to block the management interface
2556 * when the driver prepares the HBA interface for online or offline and then
2557 * set to unblock the management interface afterwards.
2560 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
2562 unsigned long iflag;
2564 spin_lock_irqsave(&phba->hbalock, iflag);
2565 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
2566 spin_unlock_irqrestore(&phba->hbalock, iflag);
2570 * lpfc_offline_prep - Prepare a HBA to be brought offline
2571 * @phba: pointer to lpfc hba data structure.
2573 * This routine is invoked to prepare a HBA to be brought offline. It performs
2574 * unregistration login to all the nodes on all vports and flushes the mailbox
2575 * queue to make it ready to be brought offline.
2578 lpfc_offline_prep(struct lpfc_hba * phba)
2580 struct lpfc_vport *vport = phba->pport;
2581 struct lpfc_nodelist *ndlp, *next_ndlp;
2582 struct lpfc_vport **vports;
2583 struct Scsi_Host *shost;
2586 if (vport->fc_flag & FC_OFFLINE_MODE)
2589 lpfc_block_mgmt_io(phba);
2591 lpfc_linkdown(phba);
2593 /* Issue an unreg_login to all nodes on all vports */
2594 vports = lpfc_create_vport_work_array(phba);
2595 if (vports != NULL) {
2596 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2597 if (vports[i]->load_flag & FC_UNLOADING)
2599 shost = lpfc_shost_from_vport(vports[i]);
2600 spin_lock_irq(shost->host_lock);
2601 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
2602 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2603 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
2604 spin_unlock_irq(shost->host_lock);
2606 shost = lpfc_shost_from_vport(vports[i]);
2607 list_for_each_entry_safe(ndlp, next_ndlp,
2608 &vports[i]->fc_nodes,
2610 if (!NLP_CHK_NODE_ACT(ndlp))
2612 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2614 if (ndlp->nlp_type & NLP_FABRIC) {
2615 lpfc_disc_state_machine(vports[i], ndlp,
2616 NULL, NLP_EVT_DEVICE_RECOVERY);
2617 lpfc_disc_state_machine(vports[i], ndlp,
2618 NULL, NLP_EVT_DEVICE_RM);
2620 spin_lock_irq(shost->host_lock);
2621 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2622 spin_unlock_irq(shost->host_lock);
2623 lpfc_unreg_rpi(vports[i], ndlp);
2627 lpfc_destroy_vport_work_array(phba, vports);
2629 lpfc_sli_mbox_sys_shutdown(phba);
2633 * lpfc_offline - Bring a HBA offline
2634 * @phba: pointer to lpfc hba data structure.
2636 * This routine actually brings a HBA offline. It stops all the timers
2637 * associated with the HBA, brings down the SLI layer, and eventually
2638 * marks the HBA as in offline state for the upper layer protocol.
2641 lpfc_offline(struct lpfc_hba *phba)
2643 struct Scsi_Host *shost;
2644 struct lpfc_vport **vports;
2647 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
2650 /* stop port and all timers associated with this hba */
2651 lpfc_stop_port(phba);
2652 vports = lpfc_create_vport_work_array(phba);
2654 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2655 lpfc_stop_vport_timers(vports[i]);
2656 lpfc_destroy_vport_work_array(phba, vports);
2657 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2658 "0460 Bring Adapter offline\n");
2659 /* Bring down the SLI Layer and cleanup. The HBA is offline
2661 lpfc_sli_hba_down(phba);
2662 spin_lock_irq(&phba->hbalock);
2664 spin_unlock_irq(&phba->hbalock);
2665 vports = lpfc_create_vport_work_array(phba);
2667 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2668 shost = lpfc_shost_from_vport(vports[i]);
2669 spin_lock_irq(shost->host_lock);
2670 vports[i]->work_port_events = 0;
2671 vports[i]->fc_flag |= FC_OFFLINE_MODE;
2672 spin_unlock_irq(shost->host_lock);
2674 lpfc_destroy_vport_work_array(phba, vports);
2678 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
2679 * @phba: pointer to lpfc hba data structure.
2681 * This routine is to free all the SCSI buffers and IOCBs from the driver
2682 * list back to kernel. It is called from lpfc_pci_remove_one to free
2683 * the internal resources before the device is removed from the system.
2686 * 0 - successful (for now, it always returns 0)
2689 lpfc_scsi_free(struct lpfc_hba *phba)
2691 struct lpfc_scsi_buf *sb, *sb_next;
2692 struct lpfc_iocbq *io, *io_next;
2694 spin_lock_irq(&phba->hbalock);
2695 /* Release all the lpfc_scsi_bufs maintained by this host. */
2696 spin_lock(&phba->scsi_buf_list_lock);
2697 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2698 list_del(&sb->list);
2699 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2702 phba->total_scsi_bufs--;
2704 spin_unlock(&phba->scsi_buf_list_lock);
2706 /* Release all the lpfc_iocbq entries maintained by this host. */
2707 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
2708 list_del(&io->list);
2710 phba->total_iocbq_bufs--;
2713 spin_unlock_irq(&phba->hbalock);
2718 * lpfc_create_port - Create an FC port
2719 * @phba: pointer to lpfc hba data structure.
2720 * @instance: a unique integer ID to this FC port.
2721 * @dev: pointer to the device data structure.
2723 * This routine creates a FC port for the upper layer protocol. The FC port
2724 * can be created on top of either a physical port or a virtual port provided
2725 * by the HBA. This routine also allocates a SCSI host data structure (shost)
2726 * and associates the FC port created before adding the shost into the SCSI
2730 * @vport - pointer to the virtual N_Port data structure.
2731 * NULL - port create failed.
2734 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2736 struct lpfc_vport *vport;
2737 struct Scsi_Host *shost;
2740 if (dev != &phba->pcidev->dev)
2741 shost = scsi_host_alloc(&lpfc_vport_template,
2742 sizeof(struct lpfc_vport));
2744 shost = scsi_host_alloc(&lpfc_template,
2745 sizeof(struct lpfc_vport));
2749 vport = (struct lpfc_vport *) shost->hostdata;
2751 vport->load_flag |= FC_LOADING;
2752 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2753 vport->fc_rscn_flush = 0;
2755 lpfc_get_vport_cfgparam(vport);
2756 shost->unique_id = instance;
2757 shost->max_id = LPFC_MAX_TARGET;
2758 shost->max_lun = vport->cfg_max_luns;
2759 shost->this_id = -1;
2760 shost->max_cmd_len = 16;
2761 if (phba->sli_rev == LPFC_SLI_REV4) {
2762 shost->dma_boundary =
2763 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
2764 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2768 * Set initial can_queue value since 0 is no longer supported and
2769 * scsi_add_host will fail. This will be adjusted later based on the
2770 * max xri value determined in hba setup.
2772 shost->can_queue = phba->cfg_hba_queue_depth - 10;
2773 if (dev != &phba->pcidev->dev) {
2774 shost->transportt = lpfc_vport_transport_template;
2775 vport->port_type = LPFC_NPIV_PORT;
2777 shost->transportt = lpfc_transport_template;
2778 vport->port_type = LPFC_PHYSICAL_PORT;
2781 /* Initialize all internally managed lists. */
2782 INIT_LIST_HEAD(&vport->fc_nodes);
2783 INIT_LIST_HEAD(&vport->rcv_buffer_list);
2784 spin_lock_init(&vport->work_port_lock);
2786 init_timer(&vport->fc_disctmo);
2787 vport->fc_disctmo.function = lpfc_disc_timeout;
2788 vport->fc_disctmo.data = (unsigned long)vport;
2790 init_timer(&vport->fc_fdmitmo);
2791 vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
2792 vport->fc_fdmitmo.data = (unsigned long)vport;
2794 init_timer(&vport->els_tmofunc);
2795 vport->els_tmofunc.function = lpfc_els_timeout;
2796 vport->els_tmofunc.data = (unsigned long)vport;
2798 init_timer(&vport->delayed_disc_tmo);
2799 vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo;
2800 vport->delayed_disc_tmo.data = (unsigned long)vport;
2802 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2806 spin_lock_irq(&phba->hbalock);
2807 list_add_tail(&vport->listentry, &phba->port_list);
2808 spin_unlock_irq(&phba->hbalock);
2812 scsi_host_put(shost);
2818 * destroy_port - destroy an FC port
2819 * @vport: pointer to an lpfc virtual N_Port data structure.
2821 * This routine destroys a FC port from the upper layer protocol. All the
2822 * resources associated with the port are released.
2825 destroy_port(struct lpfc_vport *vport)
2827 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2828 struct lpfc_hba *phba = vport->phba;
2830 lpfc_debugfs_terminate(vport);
2831 fc_remove_host(shost);
2832 scsi_remove_host(shost);
2834 spin_lock_irq(&phba->hbalock);
2835 list_del_init(&vport->listentry);
2836 spin_unlock_irq(&phba->hbalock);
2838 lpfc_cleanup(vport);
2843 * lpfc_get_instance - Get a unique integer ID
2845 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
2846 * uses the kernel idr facility to perform the task.
2849 * instance - a unique integer ID allocated as the new instance.
2850 * -1 - lpfc get instance failed.
2853 lpfc_get_instance(void)
2857 /* Assign an unused number */
2858 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
2860 if (idr_get_new(&lpfc_hba_index, NULL, &instance))
2866 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
2867 * @shost: pointer to SCSI host data structure.
2868 * @time: elapsed time of the scan in jiffies.
2870 * This routine is called by the SCSI layer with a SCSI host to determine
2871 * whether the scan host is finished.
2873 * Note: there is no scan_start function as adapter initialization will have
2874 * asynchronously kicked off the link initialization.
2877 * 0 - SCSI host scan is not over yet.
2878 * 1 - SCSI host scan is over.
2880 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2882 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2883 struct lpfc_hba *phba = vport->phba;
2886 spin_lock_irq(shost->host_lock);
2888 if (vport->load_flag & FC_UNLOADING) {
2892 if (time >= 30 * HZ) {
2893 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2894 "0461 Scanning longer than 30 "
2895 "seconds. Continuing initialization\n");
2899 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
2900 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2901 "0465 Link down longer than 15 "
2902 "seconds. Continuing initialization\n");
2907 if (vport->port_state != LPFC_VPORT_READY)
2909 if (vport->num_disc_nodes || vport->fc_prli_sent)
2911 if (vport->fc_map_cnt == 0 && time < 2 * HZ)
2913 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
2919 spin_unlock_irq(shost->host_lock);
2924 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
2925 * @shost: pointer to SCSI host data structure.
2927 * This routine initializes a given SCSI host attributes on a FC port. The
2928 * SCSI host can be either on top of a physical port or a virtual port.
2930 void lpfc_host_attrib_init(struct Scsi_Host *shost)
2932 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2933 struct lpfc_hba *phba = vport->phba;
2935 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
2938 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
2939 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
2940 fc_host_supported_classes(shost) = FC_COS_CLASS3;
2942 memset(fc_host_supported_fc4s(shost), 0,
2943 sizeof(fc_host_supported_fc4s(shost)));
2944 fc_host_supported_fc4s(shost)[2] = 1;
2945 fc_host_supported_fc4s(shost)[7] = 1;
2947 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
2948 sizeof fc_host_symbolic_name(shost));
2950 fc_host_supported_speeds(shost) = 0;
2951 if (phba->lmt & LMT_16Gb)
2952 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
2953 if (phba->lmt & LMT_10Gb)
2954 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
2955 if (phba->lmt & LMT_8Gb)
2956 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
2957 if (phba->lmt & LMT_4Gb)
2958 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
2959 if (phba->lmt & LMT_2Gb)
2960 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
2961 if (phba->lmt & LMT_1Gb)
2962 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
2964 fc_host_maxframe_size(shost) =
2965 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
2966 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
2968 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
2970 /* This value is also unchanging */
2971 memset(fc_host_active_fc4s(shost), 0,
2972 sizeof(fc_host_active_fc4s(shost)));
2973 fc_host_active_fc4s(shost)[2] = 1;
2974 fc_host_active_fc4s(shost)[7] = 1;
2976 fc_host_max_npiv_vports(shost) = phba->max_vpi;
2977 spin_lock_irq(shost->host_lock);
2978 vport->load_flag &= ~FC_LOADING;
2979 spin_unlock_irq(shost->host_lock);
2983 * lpfc_stop_port_s3 - Stop SLI3 device port
2984 * @phba: pointer to lpfc hba data structure.
2986 * This routine is invoked to stop an SLI3 device port, it stops the device
2987 * from generating interrupts and stops the device driver's timers for the
2991 lpfc_stop_port_s3(struct lpfc_hba *phba)
2993 /* Clear all interrupt enable conditions */
2994 writel(0, phba->HCregaddr);
2995 readl(phba->HCregaddr); /* flush */
2996 /* Clear all pending interrupts */
2997 writel(0xffffffff, phba->HAregaddr);
2998 readl(phba->HAregaddr); /* flush */
3000 /* Reset some HBA SLI setup states */
3001 lpfc_stop_hba_timers(phba);
3002 phba->pport->work_port_events = 0;
3006 * lpfc_stop_port_s4 - Stop SLI4 device port
3007 * @phba: pointer to lpfc hba data structure.
3009 * This routine is invoked to stop an SLI4 device port, it stops the device
3010 * from generating interrupts and stops the device driver's timers for the
3014 lpfc_stop_port_s4(struct lpfc_hba *phba)
3016 /* Reset some HBA SLI4 setup states */
3017 lpfc_stop_hba_timers(phba);
3018 phba->pport->work_port_events = 0;
3019 phba->sli4_hba.intr_enable = 0;
3023 * lpfc_stop_port - Wrapper function for stopping hba port
3024 * @phba: Pointer to HBA context object.
3026 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
3027 * the API jump table function pointer from the lpfc_hba struct.
3030 lpfc_stop_port(struct lpfc_hba *phba)
3032 phba->lpfc_stop_port(phba);
3036 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
3037 * @phba: Pointer to hba for which this call is being executed.
3039 * This routine starts the timer waiting for the FCF rediscovery to complete.
3042 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
3044 unsigned long fcf_redisc_wait_tmo =
3045 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
3046 /* Start fcf rediscovery wait period timer */
3047 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
3048 spin_lock_irq(&phba->hbalock);
3049 /* Allow action to new fcf asynchronous event */
3050 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
3051 /* Mark the FCF rediscovery pending state */
3052 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
3053 spin_unlock_irq(&phba->hbalock);
3057 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
3058 * @ptr: Map to lpfc_hba data structure pointer.
3060 * This routine is invoked when waiting for FCF table rediscover has been
3061 * timed out. If new FCF record(s) has (have) been discovered during the
3062 * wait period, a new FCF event shall be added to the FCOE async event
3063 * list, and then worker thread shall be waked up for processing from the
3064 * worker thread context.
3067 lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
3069 struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
3071 /* Don't send FCF rediscovery event if timer cancelled */
3072 spin_lock_irq(&phba->hbalock);
3073 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3074 spin_unlock_irq(&phba->hbalock);
3077 /* Clear FCF rediscovery timer pending flag */
3078 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
3079 /* FCF rediscovery event to worker thread */
3080 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
3081 spin_unlock_irq(&phba->hbalock);
3082 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
3083 "2776 FCF rediscover quiescent timer expired\n");
3084 /* wake up worker thread */
3085 lpfc_worker_wake_up(phba);
3089 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
3090 * @phba: pointer to lpfc hba data structure.
3091 * @acqe_link: pointer to the async link completion queue entry.
3093 * This routine is to parse the SLI4 link-attention link fault code and
3094 * translate it into the base driver's read link attention mailbox command
3097 * Return: Link-attention status in terms of base driver's coding.
3100 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
3101 struct lpfc_acqe_link *acqe_link)
3103 uint16_t latt_fault;
3105 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
3106 case LPFC_ASYNC_LINK_FAULT_NONE:
3107 case LPFC_ASYNC_LINK_FAULT_LOCAL:
3108 case LPFC_ASYNC_LINK_FAULT_REMOTE:
3112 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3113 "0398 Invalid link fault code: x%x\n",
3114 bf_get(lpfc_acqe_link_fault, acqe_link));
3115 latt_fault = MBXERR_ERROR;
3122 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
3123 * @phba: pointer to lpfc hba data structure.
3124 * @acqe_link: pointer to the async link completion queue entry.
3126 * This routine is to parse the SLI4 link attention type and translate it
3127 * into the base driver's link attention type coding.
3129 * Return: Link attention type in terms of base driver's coding.
3132 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
3133 struct lpfc_acqe_link *acqe_link)
3137 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
3138 case LPFC_ASYNC_LINK_STATUS_DOWN:
3139 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
3140 att_type = LPFC_ATT_LINK_DOWN;
3142 case LPFC_ASYNC_LINK_STATUS_UP:
3143 /* Ignore physical link up events - wait for logical link up */
3144 att_type = LPFC_ATT_RESERVED;
3146 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
3147 att_type = LPFC_ATT_LINK_UP;
3150 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3151 "0399 Invalid link attention type: x%x\n",
3152 bf_get(lpfc_acqe_link_status, acqe_link));
3153 att_type = LPFC_ATT_RESERVED;
3160 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
3161 * @phba: pointer to lpfc hba data structure.
3162 * @acqe_link: pointer to the async link completion queue entry.
3164 * This routine is to parse the SLI4 link-attention link speed and translate
3165 * it into the base driver's link-attention link speed coding.
3167 * Return: Link-attention link speed in terms of base driver's coding.
3170 lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
3171 struct lpfc_acqe_link *acqe_link)
3175 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
3176 case LPFC_ASYNC_LINK_SPEED_ZERO:
3177 case LPFC_ASYNC_LINK_SPEED_10MBPS:
3178 case LPFC_ASYNC_LINK_SPEED_100MBPS:
3179 link_speed = LPFC_LINK_SPEED_UNKNOWN;
3181 case LPFC_ASYNC_LINK_SPEED_1GBPS:
3182 link_speed = LPFC_LINK_SPEED_1GHZ;
3184 case LPFC_ASYNC_LINK_SPEED_10GBPS:
3185 link_speed = LPFC_LINK_SPEED_10GHZ;
3188 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3189 "0483 Invalid link-attention link speed: x%x\n",
3190 bf_get(lpfc_acqe_link_speed, acqe_link));
3191 link_speed = LPFC_LINK_SPEED_UNKNOWN;
3198 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
3199 * @phba: pointer to lpfc hba data structure.
3200 * @acqe_link: pointer to the async link completion queue entry.
3202 * This routine is to handle the SLI4 asynchronous FCoE link event.
3205 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3206 struct lpfc_acqe_link *acqe_link)
3208 struct lpfc_dmabuf *mp;
3211 struct lpfc_mbx_read_top *la;
3215 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
3216 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
3218 phba->fcoe_eventtag = acqe_link->event_tag;
3219 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3221 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3222 "0395 The mboxq allocation failed\n");
3225 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3227 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3228 "0396 The lpfc_dmabuf allocation failed\n");
3231 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3233 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3234 "0397 The mbuf allocation failed\n");
3235 goto out_free_dmabuf;
3238 /* Cleanup any outstanding ELS commands */
3239 lpfc_els_flush_all_cmd(phba);
3241 /* Block ELS IOCBs until we have done process link event */
3242 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3244 /* Update link event statistics */
3245 phba->sli.slistat.link_event++;
3247 /* Create lpfc_handle_latt mailbox command from link ACQE */
3248 lpfc_read_topology(phba, pmb, mp);
3249 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
3250 pmb->vport = phba->pport;
3252 /* Keep the link status for extra SLI4 state machine reference */
3253 phba->sli4_hba.link_state.speed =
3254 bf_get(lpfc_acqe_link_speed, acqe_link);
3255 phba->sli4_hba.link_state.duplex =
3256 bf_get(lpfc_acqe_link_duplex, acqe_link);
3257 phba->sli4_hba.link_state.status =
3258 bf_get(lpfc_acqe_link_status, acqe_link);
3259 phba->sli4_hba.link_state.type =
3260 bf_get(lpfc_acqe_link_type, acqe_link);
3261 phba->sli4_hba.link_state.number =
3262 bf_get(lpfc_acqe_link_number, acqe_link);
3263 phba->sli4_hba.link_state.fault =
3264 bf_get(lpfc_acqe_link_fault, acqe_link);
3265 phba->sli4_hba.link_state.logical_speed =
3266 bf_get(lpfc_acqe_logical_link_speed, acqe_link);
3267 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3268 "2900 Async FC/FCoE Link event - Speed:%dGBit "
3269 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
3270 "Logical speed:%dMbps Fault:%d\n",
3271 phba->sli4_hba.link_state.speed,
3272 phba->sli4_hba.link_state.topology,
3273 phba->sli4_hba.link_state.status,
3274 phba->sli4_hba.link_state.type,
3275 phba->sli4_hba.link_state.number,
3276 phba->sli4_hba.link_state.logical_speed * 10,
3277 phba->sli4_hba.link_state.fault);
3279 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
3280 * topology info. Note: Optional for non FC-AL ports.
3282 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3283 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3284 if (rc == MBX_NOT_FINISHED)
3285 goto out_free_dmabuf;
3289 * For FCoE Mode: fill in all the topology information we need and call
3290 * the READ_TOPOLOGY completion routine to continue without actually
3291 * sending the READ_TOPOLOGY mailbox command to the port.
3293 /* Parse and translate status field */
3295 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
3297 /* Parse and translate link attention fields */
3298 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
3299 la->eventTag = acqe_link->event_tag;
3300 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
3301 bf_set(lpfc_mbx_read_top_link_spd, la,
3302 lpfc_sli4_parse_latt_link_speed(phba, acqe_link));
3304 /* Fake the the following irrelvant fields */
3305 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
3306 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
3307 bf_set(lpfc_mbx_read_top_il, la, 0);
3308 bf_set(lpfc_mbx_read_top_pb, la, 0);
3309 bf_set(lpfc_mbx_read_top_fa, la, 0);
3310 bf_set(lpfc_mbx_read_top_mm, la, 0);
3312 /* Invoke the lpfc_handle_latt mailbox command callback function */
3313 lpfc_mbx_cmpl_read_topology(phba, pmb);
3320 mempool_free(pmb, phba->mbox_mem_pool);
3324 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
3325 * @phba: pointer to lpfc hba data structure.
3326 * @acqe_fc: pointer to the async fc completion queue entry.
3328 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
3329 * that the event was received and then issue a read_topology mailbox command so
3330 * that the rest of the driver will treat it the same as SLI3.
3333 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
3335 struct lpfc_dmabuf *mp;
3339 if (bf_get(lpfc_trailer_type, acqe_fc) !=
3340 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
3341 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3342 "2895 Non FC link Event detected.(%d)\n",
3343 bf_get(lpfc_trailer_type, acqe_fc));
3346 /* Keep the link status for extra SLI4 state machine reference */
3347 phba->sli4_hba.link_state.speed =
3348 bf_get(lpfc_acqe_fc_la_speed, acqe_fc);
3349 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
3350 phba->sli4_hba.link_state.topology =
3351 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
3352 phba->sli4_hba.link_state.status =
3353 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
3354 phba->sli4_hba.link_state.type =
3355 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
3356 phba->sli4_hba.link_state.number =
3357 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
3358 phba->sli4_hba.link_state.fault =
3359 bf_get(lpfc_acqe_link_fault, acqe_fc);
3360 phba->sli4_hba.link_state.logical_speed =
3361 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc);
3362 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3363 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
3364 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
3365 "%dMbps Fault:%d\n",
3366 phba->sli4_hba.link_state.speed,
3367 phba->sli4_hba.link_state.topology,
3368 phba->sli4_hba.link_state.status,
3369 phba->sli4_hba.link_state.type,
3370 phba->sli4_hba.link_state.number,
3371 phba->sli4_hba.link_state.logical_speed * 10,
3372 phba->sli4_hba.link_state.fault);
3373 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3375 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3376 "2897 The mboxq allocation failed\n");
3379 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3381 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3382 "2898 The lpfc_dmabuf allocation failed\n");
3385 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3387 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3388 "2899 The mbuf allocation failed\n");
3389 goto out_free_dmabuf;
3392 /* Cleanup any outstanding ELS commands */
3393 lpfc_els_flush_all_cmd(phba);
3395 /* Block ELS IOCBs until we have done process link event */
3396 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3398 /* Update link event statistics */
3399 phba->sli.slistat.link_event++;
3401 /* Create lpfc_handle_latt mailbox command from link ACQE */
3402 lpfc_read_topology(phba, pmb, mp);
3403 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
3404 pmb->vport = phba->pport;
3406 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3407 if (rc == MBX_NOT_FINISHED)
3408 goto out_free_dmabuf;
3414 mempool_free(pmb, phba->mbox_mem_pool);
3418 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
3419 * @phba: pointer to lpfc hba data structure.
3420 * @acqe_fc: pointer to the async SLI completion queue entry.
3422 * This routine is to handle the SLI4 asynchronous SLI events.
3425 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
3427 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3428 "2901 Async SLI event - Event Data1:x%08x Event Data2:"
3429 "x%08x SLI Event Type:%d",
3430 acqe_sli->event_data1, acqe_sli->event_data2,
3431 bf_get(lpfc_trailer_type, acqe_sli));
3436 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
3437 * @vport: pointer to vport data structure.
3439 * This routine is to perform Clear Virtual Link (CVL) on a vport in
3440 * response to a CVL event.
3442 * Return the pointer to the ndlp with the vport if successful, otherwise
3445 static struct lpfc_nodelist *
3446 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
3448 struct lpfc_nodelist *ndlp;
3449 struct Scsi_Host *shost;
3450 struct lpfc_hba *phba;
3457 ndlp = lpfc_findnode_did(vport, Fabric_DID);
3459 /* Cannot find existing Fabric ndlp, so allocate a new one */
3460 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
3463 lpfc_nlp_init(vport, ndlp, Fabric_DID);
3464 /* Set the node type */
3465 ndlp->nlp_type |= NLP_FABRIC;
3466 /* Put ndlp onto node list */
3467 lpfc_enqueue_node(vport, ndlp);
3468 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
3469 /* re-setup ndlp without removing from node list */
3470 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
3474 if ((phba->pport->port_state < LPFC_FLOGI) &&
3475 (phba->pport->port_state != LPFC_VPORT_FAILED))
3477 /* If virtual link is not yet instantiated ignore CVL */
3478 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
3479 && (vport->port_state != LPFC_VPORT_FAILED))
3481 shost = lpfc_shost_from_vport(vport);
3484 lpfc_linkdown_port(vport);
3485 lpfc_cleanup_pending_mbox(vport);
3486 spin_lock_irq(shost->host_lock);
3487 vport->fc_flag |= FC_VPORT_CVL_RCVD;
3488 spin_unlock_irq(shost->host_lock);
3494 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
3495 * @vport: pointer to lpfc hba data structure.
3497 * This routine is to perform Clear Virtual Link (CVL) on all vports in
3498 * response to a FCF dead event.
3501 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
3503 struct lpfc_vport **vports;
3506 vports = lpfc_create_vport_work_array(phba);
3508 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3509 lpfc_sli4_perform_vport_cvl(vports[i]);
3510 lpfc_destroy_vport_work_array(phba, vports);
3514 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
3515 * @phba: pointer to lpfc hba data structure.
3516 * @acqe_link: pointer to the async fcoe completion queue entry.
3518 * This routine is to handle the SLI4 asynchronous fcoe event.
3521 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
3522 struct lpfc_acqe_fip *acqe_fip)
3524 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
3526 struct lpfc_vport *vport;
3527 struct lpfc_nodelist *ndlp;
3528 struct Scsi_Host *shost;
3529 int active_vlink_present;
3530 struct lpfc_vport **vports;
3533 phba->fc_eventTag = acqe_fip->event_tag;
3534 phba->fcoe_eventtag = acqe_fip->event_tag;
3535 switch (event_type) {
3536 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
3537 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
3538 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
3539 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3541 "2546 New FCF event, evt_tag:x%x, "
3543 acqe_fip->event_tag,
3546 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
3548 "2788 FCF param modified event, "
3549 "evt_tag:x%x, index:x%x\n",
3550 acqe_fip->event_tag,
3552 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3554 * During period of FCF discovery, read the FCF
3555 * table record indexed by the event to update
3556 * FCF roundrobin failover eligible FCF bmask.
3558 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3560 "2779 Read FCF (x%x) for updating "
3561 "roundrobin FCF failover bmask\n",
3563 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
3566 /* If the FCF discovery is in progress, do nothing. */
3567 spin_lock_irq(&phba->hbalock);
3568 if (phba->hba_flag & FCF_TS_INPROG) {
3569 spin_unlock_irq(&phba->hbalock);
3572 /* If fast FCF failover rescan event is pending, do nothing */
3573 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
3574 spin_unlock_irq(&phba->hbalock);
3578 /* If the FCF has been in discovered state, do nothing. */
3579 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
3580 spin_unlock_irq(&phba->hbalock);
3583 spin_unlock_irq(&phba->hbalock);
3585 /* Otherwise, scan the entire FCF table and re-discover SAN */
3586 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3587 "2770 Start FCF table scan per async FCF "
3588 "event, evt_tag:x%x, index:x%x\n",
3589 acqe_fip->event_tag, acqe_fip->index);
3590 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3591 LPFC_FCOE_FCF_GET_FIRST);
3593 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3594 "2547 Issue FCF scan read FCF mailbox "
3595 "command failed (x%x)\n", rc);
3598 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
3599 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3600 "2548 FCF Table full count 0x%x tag 0x%x\n",
3601 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
3602 acqe_fip->event_tag);
3605 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
3606 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3607 "2549 FCF (x%x) disconnected from network, "
3608 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
3610 * If we are in the middle of FCF failover process, clear
3611 * the corresponding FCF bit in the roundrobin bitmap.
3613 spin_lock_irq(&phba->hbalock);
3614 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3615 spin_unlock_irq(&phba->hbalock);
3616 /* Update FLOGI FCF failover eligible FCF bmask */
3617 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
3620 spin_unlock_irq(&phba->hbalock);
3622 /* If the event is not for currently used fcf do nothing */
3623 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
3627 * Otherwise, request the port to rediscover the entire FCF
3628 * table for a fast recovery from case that the current FCF
3629 * is no longer valid as we are not in the middle of FCF
3630 * failover process already.
3632 spin_lock_irq(&phba->hbalock);
3633 /* Mark the fast failover process in progress */
3634 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
3635 spin_unlock_irq(&phba->hbalock);
3637 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3638 "2771 Start FCF fast failover process due to "
3639 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
3640 "\n", acqe_fip->event_tag, acqe_fip->index);
3641 rc = lpfc_sli4_redisc_fcf_table(phba);
3643 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3645 "2772 Issue FCF rediscover mabilbox "
3646 "command failed, fail through to FCF "
3648 spin_lock_irq(&phba->hbalock);
3649 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
3650 spin_unlock_irq(&phba->hbalock);
3652 * Last resort will fail over by treating this
3653 * as a link down to FCF registration.
3655 lpfc_sli4_fcf_dead_failthrough(phba);
3657 /* Reset FCF roundrobin bmask for new discovery */
3658 lpfc_sli4_clear_fcf_rr_bmask(phba);
3660 * Handling fast FCF failover to a DEAD FCF event is
3661 * considered equalivant to receiving CVL to all vports.
3663 lpfc_sli4_perform_all_vport_cvl(phba);
3666 case LPFC_FIP_EVENT_TYPE_CVL:
3667 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3668 "2718 Clear Virtual Link Received for VPI 0x%x"
3669 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
3671 vport = lpfc_find_vport_by_vpid(phba,
3673 ndlp = lpfc_sli4_perform_vport_cvl(vport);
3676 active_vlink_present = 0;
3678 vports = lpfc_create_vport_work_array(phba);
3680 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
3682 if ((!(vports[i]->fc_flag &
3683 FC_VPORT_CVL_RCVD)) &&
3684 (vports[i]->port_state > LPFC_FDISC)) {
3685 active_vlink_present = 1;
3689 lpfc_destroy_vport_work_array(phba, vports);
3692 if (active_vlink_present) {
3694 * If there are other active VLinks present,
3695 * re-instantiate the Vlink using FDISC.
3697 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
3698 shost = lpfc_shost_from_vport(vport);
3699 spin_lock_irq(shost->host_lock);
3700 ndlp->nlp_flag |= NLP_DELAY_TMO;
3701 spin_unlock_irq(shost->host_lock);
3702 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
3703 vport->port_state = LPFC_FDISC;
3706 * Otherwise, we request port to rediscover
3707 * the entire FCF table for a fast recovery
3708 * from possible case that the current FCF
3709 * is no longer valid if we are not already
3710 * in the FCF failover process.
3712 spin_lock_irq(&phba->hbalock);
3713 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3714 spin_unlock_irq(&phba->hbalock);
3717 /* Mark the fast failover process in progress */
3718 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
3719 spin_unlock_irq(&phba->hbalock);
3720 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3722 "2773 Start FCF failover per CVL, "
3723 "evt_tag:x%x\n", acqe_fip->event_tag);
3724 rc = lpfc_sli4_redisc_fcf_table(phba);
3726 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3728 "2774 Issue FCF rediscover "
3729 "mabilbox command failed, "
3730 "through to CVL event\n");
3731 spin_lock_irq(&phba->hbalock);
3732 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
3733 spin_unlock_irq(&phba->hbalock);
3735 * Last resort will be re-try on the
3736 * the current registered FCF entry.
3738 lpfc_retry_pport_discovery(phba);
3741 * Reset FCF roundrobin bmask for new
3744 lpfc_sli4_clear_fcf_rr_bmask(phba);
3748 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3749 "0288 Unknown FCoE event type 0x%x event tag "
3750 "0x%x\n", event_type, acqe_fip->event_tag);
3756 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
3757 * @phba: pointer to lpfc hba data structure.
3758 * @acqe_link: pointer to the async dcbx completion queue entry.
3760 * This routine is to handle the SLI4 asynchronous dcbx event.
3763 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
3764 struct lpfc_acqe_dcbx *acqe_dcbx)
3766 phba->fc_eventTag = acqe_dcbx->event_tag;
3767 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3768 "0290 The SLI4 DCBX asynchronous event is not "
3773 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
3774 * @phba: pointer to lpfc hba data structure.
3775 * @acqe_link: pointer to the async grp5 completion queue entry.
3777 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
3778 * is an asynchronous notified of a logical link speed change. The Port
3779 * reports the logical link speed in units of 10Mbps.
3782 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
3783 struct lpfc_acqe_grp5 *acqe_grp5)
3785 uint16_t prev_ll_spd;
3787 phba->fc_eventTag = acqe_grp5->event_tag;
3788 phba->fcoe_eventtag = acqe_grp5->event_tag;
3789 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
3790 phba->sli4_hba.link_state.logical_speed =
3791 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5));
3792 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3793 "2789 GRP5 Async Event: Updating logical link speed "
3794 "from %dMbps to %dMbps\n", (prev_ll_spd * 10),
3795 (phba->sli4_hba.link_state.logical_speed*10));
3799 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
3800 * @phba: pointer to lpfc hba data structure.
3802 * This routine is invoked by the worker thread to process all the pending
3803 * SLI4 asynchronous events.
3805 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
3807 struct lpfc_cq_event *cq_event;
3809 /* First, declare the async event has been handled */
3810 spin_lock_irq(&phba->hbalock);
3811 phba->hba_flag &= ~ASYNC_EVENT;
3812 spin_unlock_irq(&phba->hbalock);
3813 /* Now, handle all the async events */
3814 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
3815 /* Get the first event from the head of the event queue */
3816 spin_lock_irq(&phba->hbalock);
3817 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
3818 cq_event, struct lpfc_cq_event, list);
3819 spin_unlock_irq(&phba->hbalock);
3820 /* Process the asynchronous event */
3821 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
3822 case LPFC_TRAILER_CODE_LINK:
3823 lpfc_sli4_async_link_evt(phba,
3824 &cq_event->cqe.acqe_link);
3826 case LPFC_TRAILER_CODE_FCOE:
3827 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
3829 case LPFC_TRAILER_CODE_DCBX:
3830 lpfc_sli4_async_dcbx_evt(phba,
3831 &cq_event->cqe.acqe_dcbx);
3833 case LPFC_TRAILER_CODE_GRP5:
3834 lpfc_sli4_async_grp5_evt(phba,
3835 &cq_event->cqe.acqe_grp5);
3837 case LPFC_TRAILER_CODE_FC:
3838 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
3840 case LPFC_TRAILER_CODE_SLI:
3841 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
3844 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3845 "1804 Invalid asynchrous event code: "
3846 "x%x\n", bf_get(lpfc_trailer_code,
3847 &cq_event->cqe.mcqe_cmpl));
3850 /* Free the completion event processed to the free pool */
3851 lpfc_sli4_cq_event_release(phba, cq_event);
3856 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
3857 * @phba: pointer to lpfc hba data structure.
3859 * This routine is invoked by the worker thread to process FCF table
3860 * rediscovery pending completion event.
3862 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
3866 spin_lock_irq(&phba->hbalock);
3867 /* Clear FCF rediscovery timeout event */
3868 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
3869 /* Clear driver fast failover FCF record flag */
3870 phba->fcf.failover_rec.flag = 0;
3871 /* Set state for FCF fast failover */
3872 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
3873 spin_unlock_irq(&phba->hbalock);
3875 /* Scan FCF table from the first entry to re-discover SAN */
3876 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3877 "2777 Start post-quiescent FCF table scan\n");
3878 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
3880 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3881 "2747 Issue FCF scan read FCF mailbox "
3882 "command failed 0x%x\n", rc);
3886 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3887 * @phba: pointer to lpfc hba data structure.
3888 * @dev_grp: The HBA PCI-Device group number.
3890 * This routine is invoked to set up the per HBA PCI-Device group function
3891 * API jump table entries.
3893 * Return: 0 if success, otherwise -ENODEV
3896 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3900 /* Set up lpfc PCI-device group */
3901 phba->pci_dev_grp = dev_grp;
3903 /* The LPFC_PCI_DEV_OC uses SLI4 */
3904 if (dev_grp == LPFC_PCI_DEV_OC)
3905 phba->sli_rev = LPFC_SLI_REV4;
3907 /* Set up device INIT API function jump table */
3908 rc = lpfc_init_api_table_setup(phba, dev_grp);
3911 /* Set up SCSI API function jump table */
3912 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
3915 /* Set up SLI API function jump table */
3916 rc = lpfc_sli_api_table_setup(phba, dev_grp);
3919 /* Set up MBOX API function jump table */
3920 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
3928 * lpfc_log_intr_mode - Log the active interrupt mode
3929 * @phba: pointer to lpfc hba data structure.
3930 * @intr_mode: active interrupt mode adopted.
3932 * This routine it invoked to log the currently used active interrupt mode
3935 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
3937 switch (intr_mode) {
3939 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3940 "0470 Enable INTx interrupt mode.\n");
3943 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3944 "0481 Enabled MSI interrupt mode.\n");
3947 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3948 "0480 Enabled MSI-X interrupt mode.\n");
3951 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3952 "0482 Illegal interrupt mode.\n");
3959 * lpfc_enable_pci_dev - Enable a generic PCI device.
3960 * @phba: pointer to lpfc hba data structure.
3962 * This routine is invoked to enable the PCI device that is common to all
3967 * other values - error
3970 lpfc_enable_pci_dev(struct lpfc_hba *phba)
3972 struct pci_dev *pdev;
3975 /* Obtain PCI device reference */
3979 pdev = phba->pcidev;
3980 /* Select PCI BARs */
3981 bars = pci_select_bars(pdev, IORESOURCE_MEM);
3982 /* Enable PCI device */
3983 if (pci_enable_device_mem(pdev))
3985 /* Request PCI resource for the device */
3986 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
3987 goto out_disable_device;
3988 /* Set up device as PCI master and save state for EEH */
3989 pci_set_master(pdev);
3990 pci_try_set_mwi(pdev);
3991 pci_save_state(pdev);
3993 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
3994 if (pci_find_capability(pdev, PCI_CAP_ID_EXP))
3995 pdev->needs_freset = 1;
4000 pci_disable_device(pdev);
4002 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4003 "1401 Failed to enable pci device, bars:x%x\n", bars);
4008 * lpfc_disable_pci_dev - Disable a generic PCI device.
4009 * @phba: pointer to lpfc hba data structure.
4011 * This routine is invoked to disable the PCI device that is common to all
4015 lpfc_disable_pci_dev(struct lpfc_hba *phba)
4017 struct pci_dev *pdev;
4020 /* Obtain PCI device reference */
4024 pdev = phba->pcidev;
4025 /* Select PCI BARs */
4026 bars = pci_select_bars(pdev, IORESOURCE_MEM);
4027 /* Release PCI resource and disable PCI device */
4028 pci_release_selected_regions(pdev, bars);
4029 pci_disable_device(pdev);
4030 /* Null out PCI private reference to driver */
4031 pci_set_drvdata(pdev, NULL);
4037 * lpfc_reset_hba - Reset a hba
4038 * @phba: pointer to lpfc hba data structure.
4040 * This routine is invoked to reset a hba device. It brings the HBA
4041 * offline, performs a board restart, and then brings the board back
4042 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
4043 * on outstanding mailbox commands.
4046 lpfc_reset_hba(struct lpfc_hba *phba)
4048 /* If resets are disabled then set error state and return. */
4049 if (!phba->cfg_enable_hba_reset) {
4050 phba->link_state = LPFC_HBA_ERROR;
4053 lpfc_offline_prep(phba);
4055 lpfc_sli_brdrestart(phba);
4057 lpfc_unblock_mgmt_io(phba);
4061 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
4062 * @phba: pointer to lpfc hba data structure.
4064 * This function enables the PCI SR-IOV virtual functions to a physical
4065 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4066 * enable the number of virtual functions to the physical function. As
4067 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4068 * API call does not considered as an error condition for most of the device.
4071 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
4073 struct pci_dev *pdev = phba->pcidev;
4077 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4081 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
4086 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
4087 * @phba: pointer to lpfc hba data structure.
4088 * @nr_vfn: number of virtual functions to be enabled.
4090 * This function enables the PCI SR-IOV virtual functions to a physical
4091 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4092 * enable the number of virtual functions to the physical function. As
4093 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4094 * API call does not considered as an error condition for most of the device.
4097 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
4099 struct pci_dev *pdev = phba->pcidev;
4100 uint16_t max_nr_vfn;
4103 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
4104 if (nr_vfn > max_nr_vfn) {
4105 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4106 "3057 Requested vfs (%d) greater than "
4107 "supported vfs (%d)", nr_vfn, max_nr_vfn);
4111 rc = pci_enable_sriov(pdev, nr_vfn);
4113 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4114 "2806 Failed to enable sriov on this device "
4115 "with vfn number nr_vf:%d, rc:%d\n",
4118 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4119 "2807 Successful enable sriov on this device "
4120 "with vfn number nr_vf:%d\n", nr_vfn);
4125 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
4126 * @phba: pointer to lpfc hba data structure.
4128 * This routine is invoked to set up the driver internal resources specific to
4129 * support the SLI-3 HBA device it attached to.
4133 * other values - error
4136 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
4138 struct lpfc_sli *psli;
4142 * Initialize timers used by driver
4145 /* Heartbeat timer */
4146 init_timer(&phba->hb_tmofunc);
4147 phba->hb_tmofunc.function = lpfc_hb_timeout;
4148 phba->hb_tmofunc.data = (unsigned long)phba;
4151 /* MBOX heartbeat timer */
4152 init_timer(&psli->mbox_tmo);
4153 psli->mbox_tmo.function = lpfc_mbox_timeout;
4154 psli->mbox_tmo.data = (unsigned long) phba;
4155 /* FCP polling mode timer */
4156 init_timer(&phba->fcp_poll_timer);
4157 phba->fcp_poll_timer.function = lpfc_poll_timeout;
4158 phba->fcp_poll_timer.data = (unsigned long) phba;
4159 /* Fabric block timer */
4160 init_timer(&phba->fabric_block_timer);
4161 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
4162 phba->fabric_block_timer.data = (unsigned long) phba;
4163 /* EA polling mode timer */
4164 init_timer(&phba->eratt_poll);
4165 phba->eratt_poll.function = lpfc_poll_eratt;
4166 phba->eratt_poll.data = (unsigned long) phba;
4168 /* Host attention work mask setup */
4169 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
4170 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
4172 /* Get all the module params for configuring this host */
4173 lpfc_get_cfgparam(phba);
4174 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
4175 phba->menlo_flag |= HBA_MENLO_SUPPORT;
4176 /* check for menlo minimum sg count */
4177 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
4178 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
4182 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4183 * used to create the sg_dma_buf_pool must be dynamically calculated.
4184 * 2 segments are added since the IOCB needs a command and response bde.
4186 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4187 sizeof(struct fcp_rsp) +
4188 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
4190 if (phba->cfg_enable_bg) {
4191 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
4192 phba->cfg_sg_dma_buf_size +=
4193 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
4196 /* Also reinitialize the host templates with new values. */
4197 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4198 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4200 phba->max_vpi = LPFC_MAX_VPI;
4201 /* This will be set to correct value after config_port mbox */
4202 phba->max_vports = 0;
4205 * Initialize the SLI Layer to run with lpfc HBAs.
4207 lpfc_sli_setup(phba);
4208 lpfc_sli_queue_setup(phba);
4210 /* Allocate device driver memory */
4211 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
4215 * Enable sr-iov virtual functions if supported and configured
4216 * through the module parameter.
4218 if (phba->cfg_sriov_nr_virtfn > 0) {
4219 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
4220 phba->cfg_sriov_nr_virtfn);
4222 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4223 "2808 Requested number of SR-IOV "
4224 "virtual functions (%d) is not "
4226 phba->cfg_sriov_nr_virtfn);
4227 phba->cfg_sriov_nr_virtfn = 0;
4235 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
4236 * @phba: pointer to lpfc hba data structure.
4238 * This routine is invoked to unset the driver internal resources set up
4239 * specific for supporting the SLI-3 HBA device it attached to.
4242 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
4244 /* Free device driver memory allocated */
4245 lpfc_mem_free_all(phba);
4251 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
4252 * @phba: pointer to lpfc hba data structure.
4254 * This routine is invoked to set up the driver internal resources specific to
4255 * support the SLI-4 HBA device it attached to.
4259 * other values - error
4262 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4264 struct lpfc_sli *psli;
4265 LPFC_MBOXQ_t *mboxq;
4266 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
4267 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
4268 struct lpfc_mqe *mqe;
4269 int longs, sli_family;
4271 /* Before proceed, wait for POST done and device ready */
4272 rc = lpfc_sli4_post_status_check(phba);
4277 * Initialize timers used by driver
4280 /* Heartbeat timer */
4281 init_timer(&phba->hb_tmofunc);
4282 phba->hb_tmofunc.function = lpfc_hb_timeout;
4283 phba->hb_tmofunc.data = (unsigned long)phba;
4284 init_timer(&phba->rrq_tmr);
4285 phba->rrq_tmr.function = lpfc_rrq_timeout;
4286 phba->rrq_tmr.data = (unsigned long)phba;
4289 /* MBOX heartbeat timer */
4290 init_timer(&psli->mbox_tmo);
4291 psli->mbox_tmo.function = lpfc_mbox_timeout;
4292 psli->mbox_tmo.data = (unsigned long) phba;
4293 /* Fabric block timer */
4294 init_timer(&phba->fabric_block_timer);
4295 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
4296 phba->fabric_block_timer.data = (unsigned long) phba;
4297 /* EA polling mode timer */
4298 init_timer(&phba->eratt_poll);
4299 phba->eratt_poll.function = lpfc_poll_eratt;
4300 phba->eratt_poll.data = (unsigned long) phba;
4301 /* FCF rediscover timer */
4302 init_timer(&phba->fcf.redisc_wait);
4303 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
4304 phba->fcf.redisc_wait.data = (unsigned long)phba;
4307 * Control structure for handling external multi-buffer mailbox
4308 * command pass-through.
4310 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
4311 sizeof(struct lpfc_mbox_ext_buf_ctx));
4312 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4315 * We need to do a READ_CONFIG mailbox command here before
4316 * calling lpfc_get_cfgparam. For VFs this will report the
4317 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
4318 * All of the resources allocated
4319 * for this Port are tied to these values.
4321 /* Get all the module params for configuring this host */
4322 lpfc_get_cfgparam(phba);
4323 phba->max_vpi = LPFC_MAX_VPI;
4324 /* This will be set to correct value after the read_config mbox */
4325 phba->max_vports = 0;
4327 /* Program the default value of vlan_id and fc_map */
4328 phba->valid_vlan = 0;
4329 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4330 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4331 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4334 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4335 * used to create the sg_dma_buf_pool must be dynamically calculated.
4336 * 2 segments are added since the IOCB needs a command and response bde.
4337 * To insure that the scsi sgl does not cross a 4k page boundary only
4338 * sgl sizes of must be a power of 2.
4340 buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
4341 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)));
4343 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
4344 max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
4345 switch (sli_family) {
4346 case LPFC_SLI_INTF_FAMILY_BE2:
4347 case LPFC_SLI_INTF_FAMILY_BE3:
4348 /* There is a single hint for BE - 2 pages per BPL. */
4349 if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) ==
4350 LPFC_SLI_INTF_SLI_HINT1_1)
4351 max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
4353 case LPFC_SLI_INTF_FAMILY_LNCR_A0:
4354 case LPFC_SLI_INTF_FAMILY_LNCR_B0:
4358 for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
4359 dma_buf_size < max_buf_size && buf_size > dma_buf_size;
4360 dma_buf_size = dma_buf_size << 1)
4362 if (dma_buf_size == max_buf_size)
4363 phba->cfg_sg_seg_cnt = (dma_buf_size -
4364 sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
4365 (2 * sizeof(struct sli4_sge))) /
4366 sizeof(struct sli4_sge);
4367 phba->cfg_sg_dma_buf_size = dma_buf_size;
4369 /* Initialize buffer queue management fields */
4370 hbq_count = lpfc_sli_hbq_count();
4371 for (i = 0; i < hbq_count; ++i)
4372 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
4373 INIT_LIST_HEAD(&phba->rb_pend_list);
4374 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
4375 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
4378 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
4380 /* Initialize the Abort scsi buffer list used by driver */
4381 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
4382 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
4383 /* This abort list used by worker thread */
4384 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
4387 * Initialize driver internal slow-path work queues
4390 /* Driver internel slow-path CQ Event pool */
4391 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
4392 /* Response IOCB work queue list */
4393 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
4394 /* Asynchronous event CQ Event work queue list */
4395 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
4396 /* Fast-path XRI aborted CQ Event work queue list */
4397 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
4398 /* Slow-path XRI aborted CQ Event work queue list */
4399 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
4400 /* Receive queue CQ Event work queue list */
4401 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
4403 /* Initialize extent block lists. */
4404 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
4405 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
4406 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
4407 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
4409 /* Initialize the driver internal SLI layer lists. */
4410 lpfc_sli_setup(phba);
4411 lpfc_sli_queue_setup(phba);
4413 /* Allocate device driver memory */
4414 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
4418 /* IF Type 2 ports get initialized now. */
4419 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
4420 LPFC_SLI_INTF_IF_TYPE_2) {
4421 rc = lpfc_pci_function_reset(phba);
4426 /* Create the bootstrap mailbox command */
4427 rc = lpfc_create_bootstrap_mbox(phba);
4431 /* Set up the host's endian order with the device. */
4432 rc = lpfc_setup_endian_order(phba);
4434 goto out_free_bsmbx;
4436 /* Set up the hba's configuration parameters. */
4437 rc = lpfc_sli4_read_config(phba);
4439 goto out_free_bsmbx;
4441 /* IF Type 0 ports get initialized now. */
4442 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
4443 LPFC_SLI_INTF_IF_TYPE_0) {
4444 rc = lpfc_pci_function_reset(phba);
4446 goto out_free_bsmbx;
4449 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4453 goto out_free_bsmbx;
4456 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
4457 lpfc_supported_pages(mboxq);
4458 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4460 mqe = &mboxq->u.mqe;
4461 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
4462 LPFC_MAX_SUPPORTED_PAGES);
4463 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
4464 switch (pn_page[i]) {
4465 case LPFC_SLI4_PARAMETERS:
4466 phba->sli4_hba.pc_sli4_params.supported = 1;
4472 /* Read the port's SLI4 Parameters capabilities if supported. */
4473 if (phba->sli4_hba.pc_sli4_params.supported)
4474 rc = lpfc_pc_sli4_params_get(phba, mboxq);
4476 mempool_free(mboxq, phba->mbox_mem_pool);
4478 goto out_free_bsmbx;
4482 * Get sli4 parameters that override parameters from Port capabilities.
4483 * If this call fails, it isn't critical unless the SLI4 parameters come
4486 rc = lpfc_get_sli4_parameters(phba, mboxq);
4488 if (phba->sli4_hba.extents_in_use &&
4489 phba->sli4_hba.rpi_hdrs_in_use) {
4490 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4491 "2999 Unsupported SLI4 Parameters "
4492 "Extents and RPI headers enabled.\n");
4493 goto out_free_bsmbx;
4496 mempool_free(mboxq, phba->mbox_mem_pool);
4497 /* Verify all the SLI4 queues */
4498 rc = lpfc_sli4_queue_verify(phba);
4500 goto out_free_bsmbx;
4502 /* Create driver internal CQE event pool */
4503 rc = lpfc_sli4_cq_event_pool_create(phba);
4505 goto out_free_bsmbx;
4507 /* Initialize and populate the iocb list per host */
4508 rc = lpfc_init_sgl_list(phba);
4510 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4511 "1400 Failed to initialize sgl list.\n");
4512 goto out_destroy_cq_event_pool;
4514 rc = lpfc_init_active_sgl_array(phba);
4516 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4517 "1430 Failed to initialize sgl list.\n");
4518 goto out_free_sgl_list;
4520 rc = lpfc_sli4_init_rpi_hdrs(phba);
4522 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4523 "1432 Failed to initialize rpi headers.\n");
4524 goto out_free_active_sgl;
4527 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
4528 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
4529 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
4531 if (!phba->fcf.fcf_rr_bmask) {
4532 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4533 "2759 Failed allocate memory for FCF round "
4534 "robin failover bmask\n");
4536 goto out_remove_rpi_hdrs;
4540 * The cfg_fcp_eq_count can be zero whenever there is exactly one
4541 * interrupt vector. This is not an error
4543 if (phba->cfg_fcp_eq_count) {
4544 phba->sli4_hba.fcp_eq_hdl =
4545 kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
4546 phba->cfg_fcp_eq_count), GFP_KERNEL);
4547 if (!phba->sli4_hba.fcp_eq_hdl) {
4548 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4549 "2572 Failed allocate memory for "
4550 "fast-path per-EQ handle array\n");
4552 goto out_free_fcf_rr_bmask;
4556 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
4557 phba->sli4_hba.cfg_eqn), GFP_KERNEL);
4558 if (!phba->sli4_hba.msix_entries) {
4559 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4560 "2573 Failed allocate memory for msi-x "
4561 "interrupt vector entries\n");
4563 goto out_free_fcp_eq_hdl;
4567 * Enable sr-iov virtual functions if supported and configured
4568 * through the module parameter.
4570 if (phba->cfg_sriov_nr_virtfn > 0) {
4571 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
4572 phba->cfg_sriov_nr_virtfn);
4574 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4575 "3020 Requested number of SR-IOV "
4576 "virtual functions (%d) is not "
4578 phba->cfg_sriov_nr_virtfn);
4579 phba->cfg_sriov_nr_virtfn = 0;
4585 out_free_fcp_eq_hdl:
4586 kfree(phba->sli4_hba.fcp_eq_hdl);
4587 out_free_fcf_rr_bmask:
4588 kfree(phba->fcf.fcf_rr_bmask);
4589 out_remove_rpi_hdrs:
4590 lpfc_sli4_remove_rpi_hdrs(phba);
4591 out_free_active_sgl:
4592 lpfc_free_active_sgl(phba);
4594 lpfc_free_sgl_list(phba);
4595 out_destroy_cq_event_pool:
4596 lpfc_sli4_cq_event_pool_destroy(phba);
4598 lpfc_destroy_bootstrap_mbox(phba);
4600 lpfc_mem_free(phba);
4605 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
4606 * @phba: pointer to lpfc hba data structure.
4608 * This routine is invoked to unset the driver internal resources set up
4609 * specific for supporting the SLI-4 HBA device it attached to.
4612 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4614 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
4616 /* Free memory allocated for msi-x interrupt vector entries */
4617 kfree(phba->sli4_hba.msix_entries);
4619 /* Free memory allocated for fast-path work queue handles */
4620 kfree(phba->sli4_hba.fcp_eq_hdl);
4622 /* Free the allocated rpi headers. */
4623 lpfc_sli4_remove_rpi_hdrs(phba);
4624 lpfc_sli4_remove_rpis(phba);
4626 /* Free eligible FCF index bmask */
4627 kfree(phba->fcf.fcf_rr_bmask);
4629 /* Free the ELS sgl list */
4630 lpfc_free_active_sgl(phba);
4631 lpfc_free_sgl_list(phba);
4633 /* Free the SCSI sgl management array */
4634 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4636 /* Free the completion queue EQ event pool */
4637 lpfc_sli4_cq_event_release_all(phba);
4638 lpfc_sli4_cq_event_pool_destroy(phba);
4640 /* Release resource identifiers. */
4641 lpfc_sli4_dealloc_resource_identifiers(phba);
4643 /* Free the bsmbx region. */
4644 lpfc_destroy_bootstrap_mbox(phba);
4646 /* Free the SLI Layer memory with SLI4 HBAs */
4647 lpfc_mem_free_all(phba);
4649 /* Free the current connect table */
4650 list_for_each_entry_safe(conn_entry, next_conn_entry,
4651 &phba->fcf_conn_rec_list, list) {
4652 list_del_init(&conn_entry->list);
4660 * lpfc_init_api_table_setup - Set up init api function jump table
4661 * @phba: The hba struct for which this call is being executed.
4662 * @dev_grp: The HBA PCI-Device group number.
4664 * This routine sets up the device INIT interface API function jump table
4667 * Returns: 0 - success, -ENODEV - failure.
4670 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4672 phba->lpfc_hba_init_link = lpfc_hba_init_link;
4673 phba->lpfc_hba_down_link = lpfc_hba_down_link;
4674 phba->lpfc_selective_reset = lpfc_selective_reset;
4676 case LPFC_PCI_DEV_LP:
4677 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
4678 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
4679 phba->lpfc_stop_port = lpfc_stop_port_s3;
4681 case LPFC_PCI_DEV_OC:
4682 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
4683 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
4684 phba->lpfc_stop_port = lpfc_stop_port_s4;
4687 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4688 "1431 Invalid HBA PCI-device group: 0x%x\n",
4697 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
4698 * @phba: pointer to lpfc hba data structure.
4700 * This routine is invoked to set up the driver internal resources before the
4701 * device specific resource setup to support the HBA device it attached to.
4705 * other values - error
4708 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
4711 * Driver resources common to all SLI revisions
4713 atomic_set(&phba->fast_event_count, 0);
4714 spin_lock_init(&phba->hbalock);
4716 /* Initialize ndlp management spinlock */
4717 spin_lock_init(&phba->ndlp_lock);
4719 INIT_LIST_HEAD(&phba->port_list);
4720 INIT_LIST_HEAD(&phba->work_list);
4721 init_waitqueue_head(&phba->wait_4_mlo_m_q);
4723 /* Initialize the wait queue head for the kernel thread */
4724 init_waitqueue_head(&phba->work_waitq);
4726 /* Initialize the scsi buffer list used by driver for scsi IO */
4727 spin_lock_init(&phba->scsi_buf_list_lock);
4728 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
4730 /* Initialize the fabric iocb list */
4731 INIT_LIST_HEAD(&phba->fabric_iocb_list);
4733 /* Initialize list to save ELS buffers */
4734 INIT_LIST_HEAD(&phba->elsbuf);
4736 /* Initialize FCF connection rec list */
4737 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
4743 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
4744 * @phba: pointer to lpfc hba data structure.
4746 * This routine is invoked to set up the driver internal resources after the
4747 * device specific resource setup to support the HBA device it attached to.
4751 * other values - error
4754 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
4758 /* Startup the kernel thread for this host adapter. */
4759 phba->worker_thread = kthread_run(lpfc_do_work, phba,
4760 "lpfc_worker_%d", phba->brd_no);
4761 if (IS_ERR(phba->worker_thread)) {
4762 error = PTR_ERR(phba->worker_thread);
4770 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
4771 * @phba: pointer to lpfc hba data structure.
4773 * This routine is invoked to unset the driver internal resources set up after
4774 * the device specific resource setup for supporting the HBA device it
4778 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
4780 /* Stop kernel worker thread */
4781 kthread_stop(phba->worker_thread);
4785 * lpfc_free_iocb_list - Free iocb list.
4786 * @phba: pointer to lpfc hba data structure.
4788 * This routine is invoked to free the driver's IOCB list and memory.
4791 lpfc_free_iocb_list(struct lpfc_hba *phba)
4793 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
4795 spin_lock_irq(&phba->hbalock);
4796 list_for_each_entry_safe(iocbq_entry, iocbq_next,
4797 &phba->lpfc_iocb_list, list) {
4798 list_del(&iocbq_entry->list);
4800 phba->total_iocbq_bufs--;
4802 spin_unlock_irq(&phba->hbalock);
4808 * lpfc_init_iocb_list - Allocate and initialize iocb list.
4809 * @phba: pointer to lpfc hba data structure.
4811 * This routine is invoked to allocate and initizlize the driver's IOCB
4812 * list and set up the IOCB tag array accordingly.
4816 * other values - error
4819 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
4821 struct lpfc_iocbq *iocbq_entry = NULL;
4825 /* Initialize and populate the iocb list per host. */
4826 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
4827 for (i = 0; i < iocb_count; i++) {
4828 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
4829 if (iocbq_entry == NULL) {
4830 printk(KERN_ERR "%s: only allocated %d iocbs of "
4831 "expected %d count. Unloading driver.\n",
4832 __func__, i, LPFC_IOCB_LIST_CNT);
4833 goto out_free_iocbq;
4836 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
4839 printk(KERN_ERR "%s: failed to allocate IOTAG. "
4840 "Unloading driver.\n", __func__);
4841 goto out_free_iocbq;
4843 iocbq_entry->sli4_lxritag = NO_XRI;
4844 iocbq_entry->sli4_xritag = NO_XRI;
4846 spin_lock_irq(&phba->hbalock);
4847 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
4848 phba->total_iocbq_bufs++;
4849 spin_unlock_irq(&phba->hbalock);
4855 lpfc_free_iocb_list(phba);
4861 * lpfc_free_sgl_list - Free sgl list.
4862 * @phba: pointer to lpfc hba data structure.
4864 * This routine is invoked to free the driver's sgl list and memory.
4867 lpfc_free_sgl_list(struct lpfc_hba *phba)
4869 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
4870 LIST_HEAD(sglq_list);
4872 spin_lock_irq(&phba->hbalock);
4873 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
4874 spin_unlock_irq(&phba->hbalock);
4876 list_for_each_entry_safe(sglq_entry, sglq_next,
4878 list_del(&sglq_entry->list);
4879 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
4881 phba->sli4_hba.total_sglq_bufs--;
4883 kfree(phba->sli4_hba.lpfc_els_sgl_array);
4887 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
4888 * @phba: pointer to lpfc hba data structure.
4890 * This routine is invoked to allocate the driver's active sgl memory.
4891 * This array will hold the sglq_entry's for active IOs.
4894 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
4897 size = sizeof(struct lpfc_sglq *);
4898 size *= phba->sli4_hba.max_cfg_param.max_xri;
4900 phba->sli4_hba.lpfc_sglq_active_list =
4901 kzalloc(size, GFP_KERNEL);
4902 if (!phba->sli4_hba.lpfc_sglq_active_list)
4908 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
4909 * @phba: pointer to lpfc hba data structure.
4911 * This routine is invoked to walk through the array of active sglq entries
4912 * and free all of the resources.
4913 * This is just a place holder for now.
4916 lpfc_free_active_sgl(struct lpfc_hba *phba)
4918 kfree(phba->sli4_hba.lpfc_sglq_active_list);
4922 * lpfc_init_sgl_list - Allocate and initialize sgl list.
4923 * @phba: pointer to lpfc hba data structure.
4925 * This routine is invoked to allocate and initizlize the driver's sgl
4926 * list and set up the sgl xritag tag array accordingly.
4930 * other values - error
4933 lpfc_init_sgl_list(struct lpfc_hba *phba)
4935 struct lpfc_sglq *sglq_entry = NULL;
4939 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4940 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4941 "2400 ELS XRI count %d.\n",
4943 /* Initialize and populate the sglq list per host/VF. */
4944 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
4945 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
4947 /* Sanity check on XRI management */
4948 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
4949 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4950 "2562 No room left for SCSI XRI allocation: "
4951 "max_xri=%d, els_xri=%d\n",
4952 phba->sli4_hba.max_cfg_param.max_xri,
4957 /* Allocate memory for the ELS XRI management array */
4958 phba->sli4_hba.lpfc_els_sgl_array =
4959 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
4962 if (!phba->sli4_hba.lpfc_els_sgl_array) {
4963 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4964 "2401 Failed to allocate memory for ELS "
4965 "XRI management array of size %d.\n",
4970 /* Keep the SCSI XRI into the XRI management array */
4971 phba->sli4_hba.scsi_xri_max =
4972 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4973 phba->sli4_hba.scsi_xri_cnt = 0;
4974 phba->sli4_hba.lpfc_scsi_psb_array =
4975 kzalloc((sizeof(struct lpfc_scsi_buf *) *
4976 phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
4978 if (!phba->sli4_hba.lpfc_scsi_psb_array) {
4979 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4980 "2563 Failed to allocate memory for SCSI "
4981 "XRI management array of size %d.\n",
4982 phba->sli4_hba.scsi_xri_max);
4983 kfree(phba->sli4_hba.lpfc_els_sgl_array);
4987 for (i = 0; i < els_xri_cnt; i++) {
4988 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
4989 if (sglq_entry == NULL) {
4990 printk(KERN_ERR "%s: only allocated %d sgls of "
4991 "expected %d count. Unloading driver.\n",
4992 __func__, i, els_xri_cnt);
4996 sglq_entry->buff_type = GEN_BUFF_TYPE;
4997 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
4998 if (sglq_entry->virt == NULL) {
5000 printk(KERN_ERR "%s: failed to allocate mbuf.\n"
5001 "Unloading driver.\n", __func__);
5004 sglq_entry->sgl = sglq_entry->virt;
5005 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
5007 /* The list order is used by later block SGL registraton */
5008 spin_lock_irq(&phba->hbalock);
5009 sglq_entry->state = SGL_FREED;
5010 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
5011 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
5012 phba->sli4_hba.total_sglq_bufs++;
5013 spin_unlock_irq(&phba->hbalock);
5018 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
5019 lpfc_free_sgl_list(phba);
5024 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
5025 * @phba: pointer to lpfc hba data structure.
5027 * This routine is invoked to post rpi header templates to the
5028 * port for those SLI4 ports that do not support extents. This routine
5029 * posts a PAGE_SIZE memory region to the port to hold up to
5030 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
5031 * and should be called only when interrupts are disabled.
5035 * -ERROR - otherwise.
5038 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
5041 struct lpfc_rpi_hdr *rpi_hdr;
5043 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
5045 * If the SLI4 port supports extents, posting the rpi header isn't
5046 * required. Set the expected maximum count and let the actual value
5047 * get set when extents are fully allocated.
5049 if (!phba->sli4_hba.rpi_hdrs_in_use) {
5050 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
5053 if (phba->sli4_hba.extents_in_use)
5056 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
5058 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5059 "0391 Error during rpi post operation\n");
5060 lpfc_sli4_remove_rpis(phba);
5068 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
5069 * @phba: pointer to lpfc hba data structure.
5071 * This routine is invoked to allocate a single 4KB memory region to
5072 * support rpis and stores them in the phba. This single region
5073 * provides support for up to 64 rpis. The region is used globally
5077 * A valid rpi hdr on success.
5078 * A NULL pointer on any failure.
5080 struct lpfc_rpi_hdr *
5081 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
5083 uint16_t rpi_limit, curr_rpi_range;
5084 struct lpfc_dmabuf *dmabuf;
5085 struct lpfc_rpi_hdr *rpi_hdr;
5089 * If the SLI4 port supports extents, posting the rpi header isn't
5090 * required. Set the expected maximum count and let the actual value
5091 * get set when extents are fully allocated.
5093 if (!phba->sli4_hba.rpi_hdrs_in_use)
5095 if (phba->sli4_hba.extents_in_use)
5098 /* The limit on the logical index is just the max_rpi count. */
5099 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
5100 phba->sli4_hba.max_cfg_param.max_rpi - 1;
5102 spin_lock_irq(&phba->hbalock);
5104 * Establish the starting RPI in this header block. The starting
5105 * rpi is normalized to a zero base because the physical rpi is
5108 curr_rpi_range = phba->sli4_hba.next_rpi -
5109 phba->sli4_hba.max_cfg_param.rpi_base;
5110 spin_unlock_irq(&phba->hbalock);
5113 * The port has a limited number of rpis. The increment here
5114 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
5115 * and to allow the full max_rpi range per port.
5117 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
5118 rpi_count = rpi_limit - curr_rpi_range;
5120 rpi_count = LPFC_RPI_HDR_COUNT;
5125 * First allocate the protocol header region for the port. The
5126 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
5128 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5132 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
5133 LPFC_HDR_TEMPLATE_SIZE,
5136 if (!dmabuf->virt) {
5138 goto err_free_dmabuf;
5141 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
5142 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
5144 goto err_free_coherent;
5147 /* Save the rpi header data for cleanup later. */
5148 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
5150 goto err_free_coherent;
5152 rpi_hdr->dmabuf = dmabuf;
5153 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
5154 rpi_hdr->page_count = 1;
5155 spin_lock_irq(&phba->hbalock);
5157 /* The rpi_hdr stores the logical index only. */
5158 rpi_hdr->start_rpi = curr_rpi_range;
5159 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
5162 * The next_rpi stores the next logical module-64 rpi value used
5163 * to post physical rpis in subsequent rpi postings.
5165 phba->sli4_hba.next_rpi += rpi_count;
5166 spin_unlock_irq(&phba->hbalock);
5170 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
5171 dmabuf->virt, dmabuf->phys);
5178 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
5179 * @phba: pointer to lpfc hba data structure.
5181 * This routine is invoked to remove all memory resources allocated
5182 * to support rpis for SLI4 ports not supporting extents. This routine
5183 * presumes the caller has released all rpis consumed by fabric or port
5184 * logins and is prepared to have the header pages removed.
5187 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
5189 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
5191 if (!phba->sli4_hba.rpi_hdrs_in_use)
5194 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
5195 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
5196 list_del(&rpi_hdr->list);
5197 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
5198 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
5199 kfree(rpi_hdr->dmabuf);
5203 /* There are no rpis available to the port now. */
5204 phba->sli4_hba.next_rpi = 0;
5208 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
5209 * @pdev: pointer to pci device data structure.
5211 * This routine is invoked to allocate the driver hba data structure for an
5212 * HBA device. If the allocation is successful, the phba reference to the
5213 * PCI device data structure is set.
5216 * pointer to @phba - successful
5219 static struct lpfc_hba *
5220 lpfc_hba_alloc(struct pci_dev *pdev)
5222 struct lpfc_hba *phba;
5224 /* Allocate memory for HBA structure */
5225 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
5227 dev_err(&pdev->dev, "failed to allocate hba struct\n");
5231 /* Set reference to PCI device in HBA structure */
5232 phba->pcidev = pdev;
5234 /* Assign an unused board number */
5235 phba->brd_no = lpfc_get_instance();
5236 if (phba->brd_no < 0) {
5241 spin_lock_init(&phba->ct_ev_lock);
5242 INIT_LIST_HEAD(&phba->ct_ev_waiters);
5248 * lpfc_hba_free - Free driver hba data structure with a device.
5249 * @phba: pointer to lpfc hba data structure.
5251 * This routine is invoked to free the driver hba data structure with an
5255 lpfc_hba_free(struct lpfc_hba *phba)
5257 /* Release the driver assigned board number */
5258 idr_remove(&lpfc_hba_index, phba->brd_no);
5265 * lpfc_create_shost - Create hba physical port with associated scsi host.
5266 * @phba: pointer to lpfc hba data structure.
5268 * This routine is invoked to create HBA physical port and associate a SCSI
5273 * other values - error
5276 lpfc_create_shost(struct lpfc_hba *phba)
5278 struct lpfc_vport *vport;
5279 struct Scsi_Host *shost;
5281 /* Initialize HBA FC structure */
5282 phba->fc_edtov = FF_DEF_EDTOV;
5283 phba->fc_ratov = FF_DEF_RATOV;
5284 phba->fc_altov = FF_DEF_ALTOV;
5285 phba->fc_arbtov = FF_DEF_ARBTOV;
5287 atomic_set(&phba->sdev_cnt, 0);
5288 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
5292 shost = lpfc_shost_from_vport(vport);
5293 phba->pport = vport;
5294 lpfc_debugfs_initialize(vport);
5295 /* Put reference to SCSI host to driver's device private data */
5296 pci_set_drvdata(phba->pcidev, shost);
5302 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
5303 * @phba: pointer to lpfc hba data structure.
5305 * This routine is invoked to destroy HBA physical port and the associated
5309 lpfc_destroy_shost(struct lpfc_hba *phba)
5311 struct lpfc_vport *vport = phba->pport;
5313 /* Destroy physical port that associated with the SCSI host */
5314 destroy_port(vport);
5320 * lpfc_setup_bg - Setup Block guard structures and debug areas.
5321 * @phba: pointer to lpfc hba data structure.
5322 * @shost: the shost to be used to detect Block guard settings.
5324 * This routine sets up the local Block guard protocol settings for @shost.
5325 * This routine also allocates memory for debugging bg buffers.
5328 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
5331 if (lpfc_prot_mask && lpfc_prot_guard) {
5332 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5333 "1478 Registering BlockGuard with the "
5335 scsi_host_set_prot(shost, lpfc_prot_mask);
5336 scsi_host_set_guard(shost, lpfc_prot_guard);
5338 if (!_dump_buf_data) {
5340 spin_lock_init(&_dump_buf_lock);
5342 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
5343 if (_dump_buf_data) {
5344 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5345 "9043 BLKGRD: allocated %d pages for "
5346 "_dump_buf_data at 0x%p\n",
5347 (1 << pagecnt), _dump_buf_data);
5348 _dump_buf_data_order = pagecnt;
5349 memset(_dump_buf_data, 0,
5350 ((1 << PAGE_SHIFT) << pagecnt));
5355 if (!_dump_buf_data_order)
5356 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5357 "9044 BLKGRD: ERROR unable to allocate "
5358 "memory for hexdump\n");
5360 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5361 "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
5362 "\n", _dump_buf_data);
5363 if (!_dump_buf_dif) {
5366 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
5367 if (_dump_buf_dif) {
5368 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5369 "9046 BLKGRD: allocated %d pages for "
5370 "_dump_buf_dif at 0x%p\n",
5371 (1 << pagecnt), _dump_buf_dif);
5372 _dump_buf_dif_order = pagecnt;
5373 memset(_dump_buf_dif, 0,
5374 ((1 << PAGE_SHIFT) << pagecnt));
5379 if (!_dump_buf_dif_order)
5380 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5381 "9047 BLKGRD: ERROR unable to allocate "
5382 "memory for hexdump\n");
5384 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5385 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
5390 * lpfc_post_init_setup - Perform necessary device post initialization setup.
5391 * @phba: pointer to lpfc hba data structure.
5393 * This routine is invoked to perform all the necessary post initialization
5394 * setup for the device.
5397 lpfc_post_init_setup(struct lpfc_hba *phba)
5399 struct Scsi_Host *shost;
5400 struct lpfc_adapter_event_header adapter_event;
5402 /* Get the default values for Model Name and Description */
5403 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
5406 * hba setup may have changed the hba_queue_depth so we need to
5407 * adjust the value of can_queue.
5409 shost = pci_get_drvdata(phba->pcidev);
5410 shost->can_queue = phba->cfg_hba_queue_depth - 10;
5411 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
5412 lpfc_setup_bg(phba, shost);
5414 lpfc_host_attrib_init(shost);
5416 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
5417 spin_lock_irq(shost->host_lock);
5418 lpfc_poll_start_timer(phba);
5419 spin_unlock_irq(shost->host_lock);
5422 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5423 "0428 Perform SCSI scan\n");
5424 /* Send board arrival event to upper layer */
5425 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
5426 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
5427 fc_host_post_vendor_event(shost, fc_get_event_number(),
5428 sizeof(adapter_event),
5429 (char *) &adapter_event,
5435 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
5436 * @phba: pointer to lpfc hba data structure.
5438 * This routine is invoked to set up the PCI device memory space for device
5439 * with SLI-3 interface spec.
5443 * other values - error
5446 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
5448 struct pci_dev *pdev;
5449 unsigned long bar0map_len, bar2map_len;
5452 int error = -ENODEV;
5454 /* Obtain PCI device reference */
5458 pdev = phba->pcidev;
5460 /* Set the device DMA mask size */
5461 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
5462 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
5463 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
5464 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
5469 /* Get the bus address of Bar0 and Bar2 and the number of bytes
5470 * required by each mapping.
5472 phba->pci_bar0_map = pci_resource_start(pdev, 0);
5473 bar0map_len = pci_resource_len(pdev, 0);
5475 phba->pci_bar2_map = pci_resource_start(pdev, 2);
5476 bar2map_len = pci_resource_len(pdev, 2);
5478 /* Map HBA SLIM to a kernel virtual address. */
5479 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
5480 if (!phba->slim_memmap_p) {
5481 dev_printk(KERN_ERR, &pdev->dev,
5482 "ioremap failed for SLIM memory.\n");
5486 /* Map HBA Control Registers to a kernel virtual address. */
5487 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
5488 if (!phba->ctrl_regs_memmap_p) {
5489 dev_printk(KERN_ERR, &pdev->dev,
5490 "ioremap failed for HBA control registers.\n");
5491 goto out_iounmap_slim;
5494 /* Allocate memory for SLI-2 structures */
5495 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
5499 if (!phba->slim2p.virt)
5502 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
5503 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
5504 phba->mbox_ext = (phba->slim2p.virt +
5505 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
5506 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
5507 phba->IOCBs = (phba->slim2p.virt +
5508 offsetof(struct lpfc_sli2_slim, IOCBs));
5510 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
5511 lpfc_sli_hbq_size(),
5512 &phba->hbqslimp.phys,
5514 if (!phba->hbqslimp.virt)
5517 hbq_count = lpfc_sli_hbq_count();
5518 ptr = phba->hbqslimp.virt;
5519 for (i = 0; i < hbq_count; ++i) {
5520 phba->hbqs[i].hbq_virt = ptr;
5521 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
5522 ptr += (lpfc_hbq_defs[i]->entry_count *
5523 sizeof(struct lpfc_hbq_entry));
5525 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
5526 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
5528 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
5530 INIT_LIST_HEAD(&phba->rb_pend_list);
5532 phba->MBslimaddr = phba->slim_memmap_p;
5533 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
5534 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
5535 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
5536 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
5541 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5542 phba->slim2p.virt, phba->slim2p.phys);
5544 iounmap(phba->ctrl_regs_memmap_p);
5546 iounmap(phba->slim_memmap_p);
5552 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
5553 * @phba: pointer to lpfc hba data structure.
5555 * This routine is invoked to unset the PCI device memory space for device
5556 * with SLI-3 interface spec.
5559 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
5561 struct pci_dev *pdev;
5563 /* Obtain PCI device reference */
5567 pdev = phba->pcidev;
5569 /* Free coherent DMA memory allocated */
5570 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
5571 phba->hbqslimp.virt, phba->hbqslimp.phys);
5572 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5573 phba->slim2p.virt, phba->slim2p.phys);
5575 /* I/O memory unmap */
5576 iounmap(phba->ctrl_regs_memmap_p);
5577 iounmap(phba->slim_memmap_p);
5583 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
5584 * @phba: pointer to lpfc hba data structure.
5586 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
5587 * done and check status.
5589 * Return 0 if successful, otherwise -ENODEV.
5592 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
5594 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
5595 struct lpfc_register reg_data;
5596 int i, port_error = 0;
5599 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
5600 memset(®_data, 0, sizeof(reg_data));
5601 if (!phba->sli4_hba.PSMPHRregaddr)
5604 /* Wait up to 30 seconds for the SLI Port POST done and ready */
5605 for (i = 0; i < 3000; i++) {
5606 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
5607 &portsmphr_reg.word0) ||
5608 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
5609 /* Port has a fatal POST error, break out */
5610 port_error = -ENODEV;
5613 if (LPFC_POST_STAGE_PORT_READY ==
5614 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
5620 * If there was a port error during POST, then don't proceed with
5621 * other register reads as the data may not be valid. Just exit.
5624 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5625 "1408 Port Failed POST - portsmphr=0x%x, "
5626 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
5627 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
5628 portsmphr_reg.word0,
5629 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
5630 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
5631 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
5632 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
5633 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
5634 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
5635 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
5636 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
5638 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5639 "2534 Device Info: SLIFamily=0x%x, "
5640 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
5641 "SLIHint_2=0x%x, FT=0x%x\n",
5642 bf_get(lpfc_sli_intf_sli_family,
5643 &phba->sli4_hba.sli_intf),
5644 bf_get(lpfc_sli_intf_slirev,
5645 &phba->sli4_hba.sli_intf),
5646 bf_get(lpfc_sli_intf_if_type,
5647 &phba->sli4_hba.sli_intf),
5648 bf_get(lpfc_sli_intf_sli_hint1,
5649 &phba->sli4_hba.sli_intf),
5650 bf_get(lpfc_sli_intf_sli_hint2,
5651 &phba->sli4_hba.sli_intf),
5652 bf_get(lpfc_sli_intf_func_type,
5653 &phba->sli4_hba.sli_intf));
5655 * Check for other Port errors during the initialization
5656 * process. Fail the load if the port did not come up
5659 if_type = bf_get(lpfc_sli_intf_if_type,
5660 &phba->sli4_hba.sli_intf);
5662 case LPFC_SLI_INTF_IF_TYPE_0:
5663 phba->sli4_hba.ue_mask_lo =
5664 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
5665 phba->sli4_hba.ue_mask_hi =
5666 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
5668 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
5670 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
5671 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
5672 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
5673 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5674 "1422 Unrecoverable Error "
5675 "Detected during POST "
5676 "uerr_lo_reg=0x%x, "
5677 "uerr_hi_reg=0x%x, "
5678 "ue_mask_lo_reg=0x%x, "
5679 "ue_mask_hi_reg=0x%x\n",
5682 phba->sli4_hba.ue_mask_lo,
5683 phba->sli4_hba.ue_mask_hi);
5684 port_error = -ENODEV;
5687 case LPFC_SLI_INTF_IF_TYPE_2:
5688 /* Final checks. The port status should be clean. */
5689 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
5691 (bf_get(lpfc_sliport_status_err, ®_data) &&
5692 !bf_get(lpfc_sliport_status_rn, ®_data))) {
5693 phba->work_status[0] =
5694 readl(phba->sli4_hba.u.if_type2.
5696 phba->work_status[1] =
5697 readl(phba->sli4_hba.u.if_type2.
5699 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5700 "2888 Port Error Detected "
5702 "port status reg 0x%x, "
5703 "port_smphr reg 0x%x, "
5704 "error 1=0x%x, error 2=0x%x\n",
5706 portsmphr_reg.word0,
5707 phba->work_status[0],
5708 phba->work_status[1]);
5709 port_error = -ENODEV;
5712 case LPFC_SLI_INTF_IF_TYPE_1:
5721 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
5722 * @phba: pointer to lpfc hba data structure.
5723 * @if_type: The SLI4 interface type getting configured.
5725 * This routine is invoked to set up SLI4 BAR0 PCI config space register
5729 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
5732 case LPFC_SLI_INTF_IF_TYPE_0:
5733 phba->sli4_hba.u.if_type0.UERRLOregaddr =
5734 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
5735 phba->sli4_hba.u.if_type0.UERRHIregaddr =
5736 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
5737 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
5738 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
5739 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
5740 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
5741 phba->sli4_hba.SLIINTFregaddr =
5742 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
5744 case LPFC_SLI_INTF_IF_TYPE_2:
5745 phba->sli4_hba.u.if_type2.ERR1regaddr =
5746 phba->sli4_hba.conf_regs_memmap_p +
5747 LPFC_CTL_PORT_ER1_OFFSET;
5748 phba->sli4_hba.u.if_type2.ERR2regaddr =
5749 phba->sli4_hba.conf_regs_memmap_p +
5750 LPFC_CTL_PORT_ER2_OFFSET;
5751 phba->sli4_hba.u.if_type2.CTRLregaddr =
5752 phba->sli4_hba.conf_regs_memmap_p +
5753 LPFC_CTL_PORT_CTL_OFFSET;
5754 phba->sli4_hba.u.if_type2.STATUSregaddr =
5755 phba->sli4_hba.conf_regs_memmap_p +
5756 LPFC_CTL_PORT_STA_OFFSET;
5757 phba->sli4_hba.SLIINTFregaddr =
5758 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
5759 phba->sli4_hba.PSMPHRregaddr =
5760 phba->sli4_hba.conf_regs_memmap_p +
5761 LPFC_CTL_PORT_SEM_OFFSET;
5762 phba->sli4_hba.RQDBregaddr =
5763 phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL;
5764 phba->sli4_hba.WQDBregaddr =
5765 phba->sli4_hba.conf_regs_memmap_p + LPFC_WQ_DOORBELL;
5766 phba->sli4_hba.EQCQDBregaddr =
5767 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
5768 phba->sli4_hba.MQDBregaddr =
5769 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
5770 phba->sli4_hba.BMBXregaddr =
5771 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
5773 case LPFC_SLI_INTF_IF_TYPE_1:
5775 dev_printk(KERN_ERR, &phba->pcidev->dev,
5776 "FATAL - unsupported SLI4 interface type - %d\n",
5783 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
5784 * @phba: pointer to lpfc hba data structure.
5786 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
5790 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
5792 phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5793 LPFC_SLIPORT_IF0_SMPHR;
5794 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5796 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5798 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5803 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
5804 * @phba: pointer to lpfc hba data structure.
5805 * @vf: virtual function number
5807 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
5808 * based on the given viftual function number, @vf.
5810 * Return 0 if successful, otherwise -ENODEV.
5813 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
5815 if (vf > LPFC_VIR_FUNC_MAX)
5818 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5819 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
5820 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5821 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
5822 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5823 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
5824 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5825 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
5826 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5827 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
5832 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
5833 * @phba: pointer to lpfc hba data structure.
5835 * This routine is invoked to create the bootstrap mailbox
5836 * region consistent with the SLI-4 interface spec. This
5837 * routine allocates all memory necessary to communicate
5838 * mailbox commands to the port and sets up all alignment
5839 * needs. No locks are expected to be held when calling
5844 * -ENOMEM - could not allocated memory.
5847 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
5850 struct lpfc_dmabuf *dmabuf;
5851 struct dma_address *dma_address;
5855 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5860 * The bootstrap mailbox region is comprised of 2 parts
5861 * plus an alignment restriction of 16 bytes.
5863 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
5864 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
5868 if (!dmabuf->virt) {
5872 memset(dmabuf->virt, 0, bmbx_size);
5875 * Initialize the bootstrap mailbox pointers now so that the register
5876 * operations are simple later. The mailbox dma address is required
5877 * to be 16-byte aligned. Also align the virtual memory as each
5878 * maibox is copied into the bmbx mailbox region before issuing the
5879 * command to the port.
5881 phba->sli4_hba.bmbx.dmabuf = dmabuf;
5882 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
5884 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
5885 LPFC_ALIGN_16_BYTE);
5886 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
5887 LPFC_ALIGN_16_BYTE);
5890 * Set the high and low physical addresses now. The SLI4 alignment
5891 * requirement is 16 bytes and the mailbox is posted to the port
5892 * as two 30-bit addresses. The other data is a bit marking whether
5893 * the 30-bit address is the high or low address.
5894 * Upcast bmbx aphys to 64bits so shift instruction compiles
5895 * clean on 32 bit machines.
5897 dma_address = &phba->sli4_hba.bmbx.dma_address;
5898 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
5899 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
5900 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
5901 LPFC_BMBX_BIT1_ADDR_HI);
5903 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
5904 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
5905 LPFC_BMBX_BIT1_ADDR_LO);
5910 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
5911 * @phba: pointer to lpfc hba data structure.
5913 * This routine is invoked to teardown the bootstrap mailbox
5914 * region and release all host resources. This routine requires
5915 * the caller to ensure all mailbox commands recovered, no
5916 * additional mailbox comands are sent, and interrupts are disabled
5917 * before calling this routine.
5921 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
5923 dma_free_coherent(&phba->pcidev->dev,
5924 phba->sli4_hba.bmbx.bmbx_size,
5925 phba->sli4_hba.bmbx.dmabuf->virt,
5926 phba->sli4_hba.bmbx.dmabuf->phys);
5928 kfree(phba->sli4_hba.bmbx.dmabuf);
5929 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
5933 * lpfc_sli4_read_config - Get the config parameters.
5934 * @phba: pointer to lpfc hba data structure.
5936 * This routine is invoked to read the configuration parameters from the HBA.
5937 * The configuration parameters are used to set the base and maximum values
5938 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
5939 * allocation for the port.
5943 * -ENOMEM - No available memory
5944 * -EIO - The mailbox failed to complete successfully.
5947 lpfc_sli4_read_config(struct lpfc_hba *phba)
5950 struct lpfc_mbx_read_config *rd_config;
5951 union lpfc_sli4_cfg_shdr *shdr;
5952 uint32_t shdr_status, shdr_add_status;
5953 struct lpfc_mbx_get_func_cfg *get_func_cfg;
5954 struct lpfc_rsrc_desc_fcfcoe *desc;
5955 uint32_t desc_count;
5956 int length, i, rc = 0;
5958 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5960 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5961 "2011 Unable to allocate memory for issuing "
5962 "SLI_CONFIG_SPECIAL mailbox command\n");
5966 lpfc_read_config(phba, pmb);
5968 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5969 if (rc != MBX_SUCCESS) {
5970 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5971 "2012 Mailbox failed , mbxCmd x%x "
5972 "READ_CONFIG, mbxStatus x%x\n",
5973 bf_get(lpfc_mqe_command, &pmb->u.mqe),
5974 bf_get(lpfc_mqe_status, &pmb->u.mqe));
5977 rd_config = &pmb->u.mqe.un.rd_config;
5978 phba->sli4_hba.extents_in_use =
5979 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
5980 phba->sli4_hba.max_cfg_param.max_xri =
5981 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
5982 phba->sli4_hba.max_cfg_param.xri_base =
5983 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
5984 phba->sli4_hba.max_cfg_param.max_vpi =
5985 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
5986 phba->sli4_hba.max_cfg_param.vpi_base =
5987 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
5988 phba->sli4_hba.max_cfg_param.max_rpi =
5989 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
5990 phba->sli4_hba.max_cfg_param.rpi_base =
5991 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
5992 phba->sli4_hba.max_cfg_param.max_vfi =
5993 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
5994 phba->sli4_hba.max_cfg_param.vfi_base =
5995 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
5996 phba->sli4_hba.max_cfg_param.max_fcfi =
5997 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
5998 phba->sli4_hba.max_cfg_param.max_eq =
5999 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
6000 phba->sli4_hba.max_cfg_param.max_rq =
6001 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
6002 phba->sli4_hba.max_cfg_param.max_wq =
6003 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
6004 phba->sli4_hba.max_cfg_param.max_cq =
6005 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
6006 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
6007 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
6008 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
6009 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
6010 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
6011 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
6012 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
6013 phba->max_vports = phba->max_vpi;
6014 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6015 "2003 cfg params Extents? %d "
6021 phba->sli4_hba.extents_in_use,
6022 phba->sli4_hba.max_cfg_param.xri_base,
6023 phba->sli4_hba.max_cfg_param.max_xri,
6024 phba->sli4_hba.max_cfg_param.vpi_base,
6025 phba->sli4_hba.max_cfg_param.max_vpi,
6026 phba->sli4_hba.max_cfg_param.vfi_base,
6027 phba->sli4_hba.max_cfg_param.max_vfi,
6028 phba->sli4_hba.max_cfg_param.rpi_base,
6029 phba->sli4_hba.max_cfg_param.max_rpi,
6030 phba->sli4_hba.max_cfg_param.max_fcfi);
6036 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
6037 if (phba->cfg_hba_queue_depth >
6038 (phba->sli4_hba.max_cfg_param.max_xri -
6039 lpfc_sli4_get_els_iocb_cnt(phba)))
6040 phba->cfg_hba_queue_depth =
6041 phba->sli4_hba.max_cfg_param.max_xri -
6042 lpfc_sli4_get_els_iocb_cnt(phba);
6044 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
6045 LPFC_SLI_INTF_IF_TYPE_2)
6048 /* get the pf# and vf# for SLI4 if_type 2 port */
6049 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
6050 sizeof(struct lpfc_sli4_cfg_mhdr));
6051 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
6052 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
6053 length, LPFC_SLI4_MBX_EMBED);
6055 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6056 shdr = (union lpfc_sli4_cfg_shdr *)
6057 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
6058 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6059 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6060 if (rc || shdr_status || shdr_add_status) {
6061 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6062 "3026 Mailbox failed , mbxCmd x%x "
6063 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
6064 bf_get(lpfc_mqe_command, &pmb->u.mqe),
6065 bf_get(lpfc_mqe_status, &pmb->u.mqe));
6070 /* search for fc_fcoe resrouce descriptor */
6071 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
6072 desc_count = get_func_cfg->func_cfg.rsrc_desc_count;
6074 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
6075 desc = (struct lpfc_rsrc_desc_fcfcoe *)
6076 &get_func_cfg->func_cfg.desc[i];
6077 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
6078 bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
6079 phba->sli4_hba.iov.pf_number =
6080 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
6081 phba->sli4_hba.iov.vf_number =
6082 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
6087 if (i < LPFC_RSRC_DESC_MAX_NUM)
6088 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6089 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
6090 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
6091 phba->sli4_hba.iov.vf_number);
6093 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6094 "3028 GET_FUNCTION_CONFIG: failed to find "
6095 "Resrouce Descriptor:x%x\n",
6096 LPFC_RSRC_DESC_TYPE_FCFCOE);
6101 mempool_free(pmb, phba->mbox_mem_pool);
6106 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
6107 * @phba: pointer to lpfc hba data structure.
6109 * This routine is invoked to setup the port-side endian order when
6110 * the port if_type is 0. This routine has no function for other
6115 * -ENOMEM - No available memory
6116 * -EIO - The mailbox failed to complete successfully.
6119 lpfc_setup_endian_order(struct lpfc_hba *phba)
6121 LPFC_MBOXQ_t *mboxq;
6122 uint32_t if_type, rc = 0;
6123 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
6124 HOST_ENDIAN_HIGH_WORD1};
6126 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
6128 case LPFC_SLI_INTF_IF_TYPE_0:
6129 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6132 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6133 "0492 Unable to allocate memory for "
6134 "issuing SLI_CONFIG_SPECIAL mailbox "
6140 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
6141 * two words to contain special data values and no other data.
6143 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
6144 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
6145 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6146 if (rc != MBX_SUCCESS) {
6147 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6148 "0493 SLI_CONFIG_SPECIAL mailbox "
6149 "failed with status x%x\n",
6153 mempool_free(mboxq, phba->mbox_mem_pool);
6155 case LPFC_SLI_INTF_IF_TYPE_2:
6156 case LPFC_SLI_INTF_IF_TYPE_1:
6164 * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts
6165 * @phba: pointer to lpfc hba data structure.
6167 * This routine is invoked to check the user settable queue counts for EQs and
6168 * CQs. after this routine is called the counts will be set to valid values that
6169 * adhere to the constraints of the system's interrupt vectors and the port's
6174 * -ENOMEM - No available memory
6177 lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6179 int cfg_fcp_wq_count;
6180 int cfg_fcp_eq_count;
6183 * Sanity check for confiugred queue parameters against the run-time
6187 /* Sanity check on FCP fast-path WQ parameters */
6188 cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
6189 if (cfg_fcp_wq_count >
6190 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
6191 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
6193 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
6194 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6195 "2581 Not enough WQs (%d) from "
6196 "the pci function for supporting "
6198 phba->sli4_hba.max_cfg_param.max_wq,
6199 phba->cfg_fcp_wq_count);
6202 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6203 "2582 Not enough WQs (%d) from the pci "
6204 "function for supporting the requested "
6205 "FCP WQs (%d), the actual FCP WQs can "
6206 "be supported: %d\n",
6207 phba->sli4_hba.max_cfg_param.max_wq,
6208 phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
6210 /* The actual number of FCP work queues adopted */
6211 phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
6213 /* Sanity check on FCP fast-path EQ parameters */
6214 cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
6215 if (cfg_fcp_eq_count >
6216 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
6217 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
6219 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
6220 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6221 "2574 Not enough EQs (%d) from the "
6222 "pci function for supporting FCP "
6224 phba->sli4_hba.max_cfg_param.max_eq,
6225 phba->cfg_fcp_eq_count);
6228 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6229 "2575 Not enough EQs (%d) from the pci "
6230 "function for supporting the requested "
6231 "FCP EQs (%d), the actual FCP EQs can "
6232 "be supported: %d\n",
6233 phba->sli4_hba.max_cfg_param.max_eq,
6234 phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
6236 /* It does not make sense to have more EQs than WQs */
6237 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
6238 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6239 "2593 The FCP EQ count(%d) cannot be greater "
6240 "than the FCP WQ count(%d), limiting the "
6241 "FCP EQ count to %d\n", cfg_fcp_eq_count,
6242 phba->cfg_fcp_wq_count,
6243 phba->cfg_fcp_wq_count);
6244 cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
6246 /* The actual number of FCP event queues adopted */
6247 phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
6248 /* The overall number of event queues used */
6249 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
6251 /* Get EQ depth from module parameter, fake the default for now */
6252 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
6253 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
6255 /* Get CQ depth from module parameter, fake the default for now */
6256 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
6257 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
6265 * lpfc_sli4_queue_create - Create all the SLI4 queues
6266 * @phba: pointer to lpfc hba data structure.
6268 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
6269 * operation. For each SLI4 queue type, the parameters such as queue entry
6270 * count (queue depth) shall be taken from the module parameter. For now,
6271 * we just use some constant number as place holder.
6275 * -ENOMEM - No availble memory
6276 * -EIO - The mailbox failed to complete successfully.
6279 lpfc_sli4_queue_create(struct lpfc_hba *phba)
6281 struct lpfc_queue *qdesc;
6282 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
6285 * Create Event Queues (EQs)
6288 /* Create slow path event queue */
6289 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
6290 phba->sli4_hba.eq_ecount);
6292 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6293 "0496 Failed allocate slow-path EQ\n");
6296 phba->sli4_hba.sp_eq = qdesc;
6299 * Create fast-path FCP Event Queue(s). The cfg_fcp_eq_count can be
6300 * zero whenever there is exactly one interrupt vector. This is not
6303 if (phba->cfg_fcp_eq_count) {
6304 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
6305 phba->cfg_fcp_eq_count), GFP_KERNEL);
6306 if (!phba->sli4_hba.fp_eq) {
6307 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6308 "2576 Failed allocate memory for "
6309 "fast-path EQ record array\n");
6310 goto out_free_sp_eq;
6313 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
6314 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
6315 phba->sli4_hba.eq_ecount);
6317 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6318 "0497 Failed allocate fast-path EQ\n");
6319 goto out_free_fp_eq;
6321 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
6325 * Create Complete Queues (CQs)
6328 /* Create slow-path Mailbox Command Complete Queue */
6329 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6330 phba->sli4_hba.cq_ecount);
6332 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6333 "0500 Failed allocate slow-path mailbox CQ\n");
6334 goto out_free_fp_eq;
6336 phba->sli4_hba.mbx_cq = qdesc;
6338 /* Create slow-path ELS Complete Queue */
6339 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6340 phba->sli4_hba.cq_ecount);
6342 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6343 "0501 Failed allocate slow-path ELS CQ\n");
6344 goto out_free_mbx_cq;
6346 phba->sli4_hba.els_cq = qdesc;
6350 * Create fast-path FCP Completion Queue(s), one-to-one with FCP EQs.
6351 * If there are no FCP EQs then create exactly one FCP CQ.
6353 if (phba->cfg_fcp_eq_count)
6354 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
6355 phba->cfg_fcp_eq_count),
6358 phba->sli4_hba.fcp_cq = kzalloc(sizeof(struct lpfc_queue *),
6360 if (!phba->sli4_hba.fcp_cq) {
6361 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6362 "2577 Failed allocate memory for fast-path "
6363 "CQ record array\n");
6364 goto out_free_els_cq;
6368 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6369 phba->sli4_hba.cq_ecount);
6371 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6372 "0499 Failed allocate fast-path FCP "
6373 "CQ (%d)\n", fcp_cqidx);
6374 goto out_free_fcp_cq;
6376 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
6377 } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
6379 /* Create Mailbox Command Queue */
6380 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
6381 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
6383 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
6384 phba->sli4_hba.mq_ecount);
6386 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6387 "0505 Failed allocate slow-path MQ\n");
6388 goto out_free_fcp_cq;
6390 phba->sli4_hba.mbx_wq = qdesc;
6393 * Create all the Work Queues (WQs)
6395 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
6396 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
6398 /* Create slow-path ELS Work Queue */
6399 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
6400 phba->sli4_hba.wq_ecount);
6402 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6403 "0504 Failed allocate slow-path ELS WQ\n");
6404 goto out_free_mbx_wq;
6406 phba->sli4_hba.els_wq = qdesc;
6408 /* Create fast-path FCP Work Queue(s) */
6409 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
6410 phba->cfg_fcp_wq_count), GFP_KERNEL);
6411 if (!phba->sli4_hba.fcp_wq) {
6412 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6413 "2578 Failed allocate memory for fast-path "
6414 "WQ record array\n");
6415 goto out_free_els_wq;
6417 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6418 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
6419 phba->sli4_hba.wq_ecount);
6421 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6422 "0503 Failed allocate fast-path FCP "
6423 "WQ (%d)\n", fcp_wqidx);
6424 goto out_free_fcp_wq;
6426 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
6430 * Create Receive Queue (RQ)
6432 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
6433 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
6435 /* Create Receive Queue for header */
6436 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
6437 phba->sli4_hba.rq_ecount);
6439 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6440 "0506 Failed allocate receive HRQ\n");
6441 goto out_free_fcp_wq;
6443 phba->sli4_hba.hdr_rq = qdesc;
6445 /* Create Receive Queue for data */
6446 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
6447 phba->sli4_hba.rq_ecount);
6449 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6450 "0507 Failed allocate receive DRQ\n");
6451 goto out_free_hdr_rq;
6453 phba->sli4_hba.dat_rq = qdesc;
6458 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
6459 phba->sli4_hba.hdr_rq = NULL;
6461 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
6462 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
6463 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
6465 kfree(phba->sli4_hba.fcp_wq);
6467 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
6468 phba->sli4_hba.els_wq = NULL;
6470 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
6471 phba->sli4_hba.mbx_wq = NULL;
6473 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
6474 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
6475 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
6477 kfree(phba->sli4_hba.fcp_cq);
6479 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
6480 phba->sli4_hba.els_cq = NULL;
6482 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
6483 phba->sli4_hba.mbx_cq = NULL;
6485 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
6486 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
6487 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
6489 kfree(phba->sli4_hba.fp_eq);
6491 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
6492 phba->sli4_hba.sp_eq = NULL;
6498 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
6499 * @phba: pointer to lpfc hba data structure.
6501 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
6506 * -ENOMEM - No available memory
6507 * -EIO - The mailbox failed to complete successfully.
6510 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6514 /* Release mailbox command work queue */
6515 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
6516 phba->sli4_hba.mbx_wq = NULL;
6518 /* Release ELS work queue */
6519 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
6520 phba->sli4_hba.els_wq = NULL;
6522 /* Release FCP work queue */
6523 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
6524 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
6525 kfree(phba->sli4_hba.fcp_wq);
6526 phba->sli4_hba.fcp_wq = NULL;
6528 /* Release unsolicited receive queue */
6529 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
6530 phba->sli4_hba.hdr_rq = NULL;
6531 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
6532 phba->sli4_hba.dat_rq = NULL;
6534 /* Release ELS complete queue */
6535 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
6536 phba->sli4_hba.els_cq = NULL;
6538 /* Release mailbox command complete queue */
6539 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
6540 phba->sli4_hba.mbx_cq = NULL;
6542 /* Release FCP response complete queue */
6545 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
6546 while (++fcp_qidx < phba->cfg_fcp_eq_count);
6547 kfree(phba->sli4_hba.fcp_cq);
6548 phba->sli4_hba.fcp_cq = NULL;
6550 /* Release fast-path event queue */
6551 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6552 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
6553 kfree(phba->sli4_hba.fp_eq);
6554 phba->sli4_hba.fp_eq = NULL;
6556 /* Release slow-path event queue */
6557 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
6558 phba->sli4_hba.sp_eq = NULL;
6564 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
6565 * @phba: pointer to lpfc hba data structure.
6567 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
6572 * -ENOMEM - No available memory
6573 * -EIO - The mailbox failed to complete successfully.
6576 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6579 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
6580 int fcp_cq_index = 0;
6583 * Set up Event Queues (EQs)
6586 /* Set up slow-path event queue */
6587 if (!phba->sli4_hba.sp_eq) {
6588 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6589 "0520 Slow-path EQ not allocated\n");
6592 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
6595 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6596 "0521 Failed setup of slow-path EQ: "
6600 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6601 "2583 Slow-path EQ setup: queue-id=%d\n",
6602 phba->sli4_hba.sp_eq->queue_id);
6604 /* Set up fast-path event queue */
6605 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
6606 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
6607 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6608 "0522 Fast-path EQ (%d) not "
6609 "allocated\n", fcp_eqidx);
6610 goto out_destroy_fp_eq;
6612 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
6613 phba->cfg_fcp_imax);
6615 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6616 "0523 Failed setup of fast-path EQ "
6617 "(%d), rc = 0x%x\n", fcp_eqidx, rc);
6618 goto out_destroy_fp_eq;
6620 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6621 "2584 Fast-path EQ setup: "
6622 "queue[%d]-id=%d\n", fcp_eqidx,
6623 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
6627 * Set up Complete Queues (CQs)
6630 /* Set up slow-path MBOX Complete Queue as the first CQ */
6631 if (!phba->sli4_hba.mbx_cq) {
6632 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6633 "0528 Mailbox CQ not allocated\n");
6634 goto out_destroy_fp_eq;
6636 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
6637 LPFC_MCQ, LPFC_MBOX);
6639 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6640 "0529 Failed setup of slow-path mailbox CQ: "
6642 goto out_destroy_fp_eq;
6644 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6645 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
6646 phba->sli4_hba.mbx_cq->queue_id,
6647 phba->sli4_hba.sp_eq->queue_id);
6649 /* Set up slow-path ELS Complete Queue */
6650 if (!phba->sli4_hba.els_cq) {
6651 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6652 "0530 ELS CQ not allocated\n");
6653 goto out_destroy_mbx_cq;
6655 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
6656 LPFC_WCQ, LPFC_ELS);
6658 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6659 "0531 Failed setup of slow-path ELS CQ: "
6661 goto out_destroy_mbx_cq;
6663 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6664 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
6665 phba->sli4_hba.els_cq->queue_id,
6666 phba->sli4_hba.sp_eq->queue_id);
6668 /* Set up fast-path FCP Response Complete Queue */
6671 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
6672 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6673 "0526 Fast-path FCP CQ (%d) not "
6674 "allocated\n", fcp_cqidx);
6675 goto out_destroy_fcp_cq;
6677 if (phba->cfg_fcp_eq_count)
6678 rc = lpfc_cq_create(phba,
6679 phba->sli4_hba.fcp_cq[fcp_cqidx],
6680 phba->sli4_hba.fp_eq[fcp_cqidx],
6681 LPFC_WCQ, LPFC_FCP);
6683 rc = lpfc_cq_create(phba,
6684 phba->sli4_hba.fcp_cq[fcp_cqidx],
6685 phba->sli4_hba.sp_eq,
6686 LPFC_WCQ, LPFC_FCP);
6688 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6689 "0527 Failed setup of fast-path FCP "
6690 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
6691 goto out_destroy_fcp_cq;
6693 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6694 "2588 FCP CQ setup: cq[%d]-id=%d, "
6695 "parent %seq[%d]-id=%d\n",
6697 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
6698 (phba->cfg_fcp_eq_count) ? "" : "sp_",
6700 (phba->cfg_fcp_eq_count) ?
6701 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id :
6702 phba->sli4_hba.sp_eq->queue_id);
6703 } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
6706 * Set up all the Work Queues (WQs)
6709 /* Set up Mailbox Command Queue */
6710 if (!phba->sli4_hba.mbx_wq) {
6711 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6712 "0538 Slow-path MQ not allocated\n");
6713 goto out_destroy_fcp_cq;
6715 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
6716 phba->sli4_hba.mbx_cq, LPFC_MBOX);
6718 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6719 "0539 Failed setup of slow-path MQ: "
6721 goto out_destroy_fcp_cq;
6723 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6724 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
6725 phba->sli4_hba.mbx_wq->queue_id,
6726 phba->sli4_hba.mbx_cq->queue_id);
6728 /* Set up slow-path ELS Work Queue */
6729 if (!phba->sli4_hba.els_wq) {
6730 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6731 "0536 Slow-path ELS WQ not allocated\n");
6732 goto out_destroy_mbx_wq;
6734 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
6735 phba->sli4_hba.els_cq, LPFC_ELS);
6737 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6738 "0537 Failed setup of slow-path ELS WQ: "
6740 goto out_destroy_mbx_wq;
6742 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6743 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
6744 phba->sli4_hba.els_wq->queue_id,
6745 phba->sli4_hba.els_cq->queue_id);
6747 /* Set up fast-path FCP Work Queue */
6748 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6749 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
6750 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6751 "0534 Fast-path FCP WQ (%d) not "
6752 "allocated\n", fcp_wqidx);
6753 goto out_destroy_fcp_wq;
6755 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
6756 phba->sli4_hba.fcp_cq[fcp_cq_index],
6759 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6760 "0535 Failed setup of fast-path FCP "
6761 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
6762 goto out_destroy_fcp_wq;
6764 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6765 "2591 FCP WQ setup: wq[%d]-id=%d, "
6766 "parent cq[%d]-id=%d\n",
6768 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
6770 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
6771 /* Round robin FCP Work Queue's Completion Queue assignment */
6772 if (phba->cfg_fcp_eq_count)
6773 fcp_cq_index = ((fcp_cq_index + 1) %
6774 phba->cfg_fcp_eq_count);
6778 * Create Receive Queue (RQ)
6780 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
6781 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6782 "0540 Receive Queue not allocated\n");
6783 goto out_destroy_fcp_wq;
6786 lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ);
6787 lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ);
6789 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
6790 phba->sli4_hba.els_cq, LPFC_USOL);
6792 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6793 "0541 Failed setup of Receive Queue: "
6795 goto out_destroy_fcp_wq;
6798 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6799 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
6800 "parent cq-id=%d\n",
6801 phba->sli4_hba.hdr_rq->queue_id,
6802 phba->sli4_hba.dat_rq->queue_id,
6803 phba->sli4_hba.els_cq->queue_id);
6807 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
6808 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
6809 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6811 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6813 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
6814 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
6815 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6817 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6819 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
6820 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
6821 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6827 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
6828 * @phba: pointer to lpfc hba data structure.
6830 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
6835 * -ENOMEM - No available memory
6836 * -EIO - The mailbox failed to complete successfully.
6839 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
6843 /* Unset mailbox command work queue */
6844 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6845 /* Unset ELS work queue */
6846 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6847 /* Unset unsolicited receive queue */
6848 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
6849 /* Unset FCP work queue */
6850 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
6851 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
6852 /* Unset mailbox command complete queue */
6853 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6854 /* Unset ELS complete queue */
6855 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6856 /* Unset FCP response complete queue */
6859 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
6860 } while (++fcp_qidx < phba->cfg_fcp_eq_count);
6861 /* Unset fast-path event queue */
6862 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6863 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
6864 /* Unset slow-path event queue */
6865 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6869 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
6870 * @phba: pointer to lpfc hba data structure.
6872 * This routine is invoked to allocate and set up a pool of completion queue
6873 * events. The body of the completion queue event is a completion queue entry
6874 * CQE. For now, this pool is used for the interrupt service routine to queue
6875 * the following HBA completion queue events for the worker thread to process:
6876 * - Mailbox asynchronous events
6877 * - Receive queue completion unsolicited events
6878 * Later, this can be used for all the slow-path events.
6882 * -ENOMEM - No available memory
6885 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
6887 struct lpfc_cq_event *cq_event;
6890 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
6891 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
6893 goto out_pool_create_fail;
6894 list_add_tail(&cq_event->list,
6895 &phba->sli4_hba.sp_cqe_event_pool);
6899 out_pool_create_fail:
6900 lpfc_sli4_cq_event_pool_destroy(phba);
6905 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
6906 * @phba: pointer to lpfc hba data structure.
6908 * This routine is invoked to free the pool of completion queue events at
6909 * driver unload time. Note that, it is the responsibility of the driver
6910 * cleanup routine to free all the outstanding completion-queue events
6911 * allocated from this pool back into the pool before invoking this routine
6912 * to destroy the pool.
6915 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
6917 struct lpfc_cq_event *cq_event, *next_cq_event;
6919 list_for_each_entry_safe(cq_event, next_cq_event,
6920 &phba->sli4_hba.sp_cqe_event_pool, list) {
6921 list_del(&cq_event->list);
6927 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
6928 * @phba: pointer to lpfc hba data structure.
6930 * This routine is the lock free version of the API invoked to allocate a
6931 * completion-queue event from the free pool.
6933 * Return: Pointer to the newly allocated completion-queue event if successful
6936 struct lpfc_cq_event *
6937 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
6939 struct lpfc_cq_event *cq_event = NULL;
6941 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
6942 struct lpfc_cq_event, list);
6947 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
6948 * @phba: pointer to lpfc hba data structure.
6950 * This routine is the lock version of the API invoked to allocate a
6951 * completion-queue event from the free pool.
6953 * Return: Pointer to the newly allocated completion-queue event if successful
6956 struct lpfc_cq_event *
6957 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
6959 struct lpfc_cq_event *cq_event;
6960 unsigned long iflags;
6962 spin_lock_irqsave(&phba->hbalock, iflags);
6963 cq_event = __lpfc_sli4_cq_event_alloc(phba);
6964 spin_unlock_irqrestore(&phba->hbalock, iflags);
6969 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
6970 * @phba: pointer to lpfc hba data structure.
6971 * @cq_event: pointer to the completion queue event to be freed.
6973 * This routine is the lock free version of the API invoked to release a
6974 * completion-queue event back into the free pool.
6977 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
6978 struct lpfc_cq_event *cq_event)
6980 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
6984 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
6985 * @phba: pointer to lpfc hba data structure.
6986 * @cq_event: pointer to the completion queue event to be freed.
6988 * This routine is the lock version of the API invoked to release a
6989 * completion-queue event back into the free pool.
6992 lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
6993 struct lpfc_cq_event *cq_event)
6995 unsigned long iflags;
6996 spin_lock_irqsave(&phba->hbalock, iflags);
6997 __lpfc_sli4_cq_event_release(phba, cq_event);
6998 spin_unlock_irqrestore(&phba->hbalock, iflags);
7002 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
7003 * @phba: pointer to lpfc hba data structure.
7005 * This routine is to free all the pending completion-queue events to the
7006 * back into the free pool for device reset.
7009 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
7012 struct lpfc_cq_event *cqe;
7013 unsigned long iflags;
7015 /* Retrieve all the pending WCQEs from pending WCQE lists */
7016 spin_lock_irqsave(&phba->hbalock, iflags);
7017 /* Pending FCP XRI abort events */
7018 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
7020 /* Pending ELS XRI abort events */
7021 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
7023 /* Pending asynnc events */
7024 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
7026 spin_unlock_irqrestore(&phba->hbalock, iflags);
7028 while (!list_empty(&cqelist)) {
7029 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
7030 lpfc_sli4_cq_event_release(phba, cqe);
7035 * lpfc_pci_function_reset - Reset pci function.
7036 * @phba: pointer to lpfc hba data structure.
7038 * This routine is invoked to request a PCI function reset. It will destroys
7039 * all resources assigned to the PCI function which originates this request.
7043 * -ENOMEM - No available memory
7044 * -EIO - The mailbox failed to complete successfully.
7047 lpfc_pci_function_reset(struct lpfc_hba *phba)
7049 LPFC_MBOXQ_t *mboxq;
7050 uint32_t rc = 0, if_type;
7051 uint32_t shdr_status, shdr_add_status;
7052 uint32_t rdy_chk, num_resets = 0, reset_again = 0;
7053 union lpfc_sli4_cfg_shdr *shdr;
7054 struct lpfc_register reg_data;
7056 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7058 case LPFC_SLI_INTF_IF_TYPE_0:
7059 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
7062 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7063 "0494 Unable to allocate memory for "
7064 "issuing SLI_FUNCTION_RESET mailbox "
7069 /* Setup PCI function reset mailbox-ioctl command */
7070 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7071 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
7072 LPFC_SLI4_MBX_EMBED);
7073 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7074 shdr = (union lpfc_sli4_cfg_shdr *)
7075 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
7076 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7077 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
7079 if (rc != MBX_TIMEOUT)
7080 mempool_free(mboxq, phba->mbox_mem_pool);
7081 if (shdr_status || shdr_add_status || rc) {
7082 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7083 "0495 SLI_FUNCTION_RESET mailbox "
7084 "failed with status x%x add_status x%x,"
7085 " mbx status x%x\n",
7086 shdr_status, shdr_add_status, rc);
7090 case LPFC_SLI_INTF_IF_TYPE_2:
7091 for (num_resets = 0;
7092 num_resets < MAX_IF_TYPE_2_RESETS;
7095 bf_set(lpfc_sliport_ctrl_end, ®_data,
7096 LPFC_SLIPORT_LITTLE_ENDIAN);
7097 bf_set(lpfc_sliport_ctrl_ip, ®_data,
7098 LPFC_SLIPORT_INIT_PORT);
7099 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
7103 * Poll the Port Status Register and wait for RDY for
7104 * up to 10 seconds. If the port doesn't respond, treat
7105 * it as an error. If the port responds with RN, start
7108 for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) {
7110 if (lpfc_readl(phba->sli4_hba.u.if_type2.
7111 STATUSregaddr, ®_data.word0)) {
7115 if (bf_get(lpfc_sliport_status_rdy, ®_data))
7117 if (bf_get(lpfc_sliport_status_rn, ®_data)) {
7124 * If the port responds to the init request with
7125 * reset needed, delay for a bit and restart the loop.
7133 /* Detect any port errors. */
7134 if ((bf_get(lpfc_sliport_status_err, ®_data)) ||
7135 (rdy_chk >= 1000)) {
7136 phba->work_status[0] = readl(
7137 phba->sli4_hba.u.if_type2.ERR1regaddr);
7138 phba->work_status[1] = readl(
7139 phba->sli4_hba.u.if_type2.ERR2regaddr);
7140 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7141 "2890 Port Error Detected "
7142 "during Port Reset: "
7143 "port status reg 0x%x, "
7144 "error 1=0x%x, error 2=0x%x\n",
7146 phba->work_status[0],
7147 phba->work_status[1]);
7152 * Terminate the outer loop provided the Port indicated
7153 * ready within 10 seconds.
7158 /* delay driver action following IF_TYPE_2 function reset */
7161 case LPFC_SLI_INTF_IF_TYPE_1:
7167 /* Catch the not-ready port failure after a port reset. */
7168 if (num_resets >= MAX_IF_TYPE_2_RESETS)
7175 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
7176 * @phba: pointer to lpfc hba data structure.
7177 * @cnt: number of nop mailbox commands to send.
7179 * This routine is invoked to send a number @cnt of NOP mailbox command and
7180 * wait for each command to complete.
7182 * Return: the number of NOP mailbox command completed.
7185 lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
7187 LPFC_MBOXQ_t *mboxq;
7188 int length, cmdsent;
7191 uint32_t shdr_status, shdr_add_status;
7192 union lpfc_sli4_cfg_shdr *shdr;
7195 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7196 "2518 Requested to send 0 NOP mailbox cmd\n");
7200 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7202 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7203 "2519 Unable to allocate memory for issuing "
7204 "NOP mailbox command\n");
7208 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */
7209 length = (sizeof(struct lpfc_mbx_nop) -
7210 sizeof(struct lpfc_sli4_cfg_mhdr));
7211 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7212 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
7214 for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
7215 if (!phba->sli4_hba.intr_enable)
7216 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7218 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
7219 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
7221 if (rc == MBX_TIMEOUT)
7223 /* Check return status */
7224 shdr = (union lpfc_sli4_cfg_shdr *)
7225 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
7226 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7227 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
7229 if (shdr_status || shdr_add_status || rc) {
7230 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7231 "2520 NOP mailbox command failed "
7232 "status x%x add_status x%x mbx "
7233 "status x%x\n", shdr_status,
7234 shdr_add_status, rc);
7239 if (rc != MBX_TIMEOUT)
7240 mempool_free(mboxq, phba->mbox_mem_pool);
7246 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
7247 * @phba: pointer to lpfc hba data structure.
7249 * This routine is invoked to set up the PCI device memory space for device
7250 * with SLI-4 interface spec.
7254 * other values - error
7257 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
7259 struct pci_dev *pdev;
7260 unsigned long bar0map_len, bar1map_len, bar2map_len;
7261 int error = -ENODEV;
7264 /* Obtain PCI device reference */
7268 pdev = phba->pcidev;
7270 /* Set the device DMA mask size */
7271 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
7272 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
7273 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
7274 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
7280 * The BARs and register set definitions and offset locations are
7281 * dependent on the if_type.
7283 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
7284 &phba->sli4_hba.sli_intf.word0)) {
7288 /* There is no SLI3 failback for SLI4 devices. */
7289 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
7290 LPFC_SLI_INTF_VALID) {
7291 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7292 "2894 SLI_INTF reg contents invalid "
7293 "sli_intf reg 0x%x\n",
7294 phba->sli4_hba.sli_intf.word0);
7298 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7300 * Get the bus address of SLI4 device Bar regions and the
7301 * number of bytes required by each mapping. The mapping of the
7302 * particular PCI BARs regions is dependent on the type of
7305 if (pci_resource_start(pdev, 0)) {
7306 phba->pci_bar0_map = pci_resource_start(pdev, 0);
7307 bar0map_len = pci_resource_len(pdev, 0);
7310 * Map SLI4 PCI Config Space Register base to a kernel virtual
7313 phba->sli4_hba.conf_regs_memmap_p =
7314 ioremap(phba->pci_bar0_map, bar0map_len);
7315 if (!phba->sli4_hba.conf_regs_memmap_p) {
7316 dev_printk(KERN_ERR, &pdev->dev,
7317 "ioremap failed for SLI4 PCI config "
7321 /* Set up BAR0 PCI config space register memory map */
7322 lpfc_sli4_bar0_register_memmap(phba, if_type);
7324 phba->pci_bar0_map = pci_resource_start(pdev, 1);
7325 bar0map_len = pci_resource_len(pdev, 1);
7326 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
7327 dev_printk(KERN_ERR, &pdev->dev,
7328 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
7331 phba->sli4_hba.conf_regs_memmap_p =
7332 ioremap(phba->pci_bar0_map, bar0map_len);
7333 if (!phba->sli4_hba.conf_regs_memmap_p) {
7334 dev_printk(KERN_ERR, &pdev->dev,
7335 "ioremap failed for SLI4 PCI config "
7339 lpfc_sli4_bar0_register_memmap(phba, if_type);
7342 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
7343 (pci_resource_start(pdev, 2))) {
7345 * Map SLI4 if type 0 HBA Control Register base to a kernel
7346 * virtual address and setup the registers.
7348 phba->pci_bar1_map = pci_resource_start(pdev, 2);
7349 bar1map_len = pci_resource_len(pdev, 2);
7350 phba->sli4_hba.ctrl_regs_memmap_p =
7351 ioremap(phba->pci_bar1_map, bar1map_len);
7352 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
7353 dev_printk(KERN_ERR, &pdev->dev,
7354 "ioremap failed for SLI4 HBA control registers.\n");
7355 goto out_iounmap_conf;
7357 lpfc_sli4_bar1_register_memmap(phba);
7360 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
7361 (pci_resource_start(pdev, 4))) {
7363 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
7364 * virtual address and setup the registers.
7366 phba->pci_bar2_map = pci_resource_start(pdev, 4);
7367 bar2map_len = pci_resource_len(pdev, 4);
7368 phba->sli4_hba.drbl_regs_memmap_p =
7369 ioremap(phba->pci_bar2_map, bar2map_len);
7370 if (!phba->sli4_hba.drbl_regs_memmap_p) {
7371 dev_printk(KERN_ERR, &pdev->dev,
7372 "ioremap failed for SLI4 HBA doorbell registers.\n");
7373 goto out_iounmap_ctrl;
7375 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
7377 goto out_iounmap_all;
7383 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
7385 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
7387 iounmap(phba->sli4_hba.conf_regs_memmap_p);
7393 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
7394 * @phba: pointer to lpfc hba data structure.
7396 * This routine is invoked to unset the PCI device memory space for device
7397 * with SLI-4 interface spec.
7400 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
7402 struct pci_dev *pdev;
7404 /* Obtain PCI device reference */
7408 pdev = phba->pcidev;
7410 /* Free coherent DMA memory allocated */
7412 /* Unmap I/O memory space */
7413 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
7414 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
7415 iounmap(phba->sli4_hba.conf_regs_memmap_p);
7421 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
7422 * @phba: pointer to lpfc hba data structure.
7424 * This routine is invoked to enable the MSI-X interrupt vectors to device
7425 * with SLI-3 interface specs. The kernel function pci_enable_msix() is
7426 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
7427 * invoked, enables either all or nothing, depending on the current
7428 * availability of PCI vector resources. The device driver is responsible
7429 * for calling the individual request_irq() to register each MSI-X vector
7430 * with a interrupt handler, which is done in this function. Note that
7431 * later when device is unloading, the driver should always call free_irq()
7432 * on all MSI-X vectors it has done request_irq() on before calling
7433 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
7434 * will be left with MSI-X enabled and leaks its vectors.
7438 * other values - error
7441 lpfc_sli_enable_msix(struct lpfc_hba *phba)
7446 /* Set up MSI-X multi-message vectors */
7447 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
7448 phba->msix_entries[i].entry = i;
7450 /* Configure MSI-X capability structure */
7451 rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
7452 ARRAY_SIZE(phba->msix_entries));
7454 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7455 "0420 PCI enable MSI-X failed (%d)\n", rc);
7458 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
7459 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7460 "0477 MSI-X entry[%d]: vector=x%x "
7462 phba->msix_entries[i].vector,
7463 phba->msix_entries[i].entry);
7465 * Assign MSI-X vectors to interrupt handlers
7468 /* vector-0 is associated to slow-path handler */
7469 rc = request_irq(phba->msix_entries[0].vector,
7470 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
7471 LPFC_SP_DRIVER_HANDLER_NAME, phba);
7473 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7474 "0421 MSI-X slow-path request_irq failed "
7479 /* vector-1 is associated to fast-path handler */
7480 rc = request_irq(phba->msix_entries[1].vector,
7481 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
7482 LPFC_FP_DRIVER_HANDLER_NAME, phba);
7485 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7486 "0429 MSI-X fast-path request_irq failed "
7492 * Configure HBA MSI-X attention conditions to messages
7494 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7498 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7499 "0474 Unable to allocate memory for issuing "
7500 "MBOX_CONFIG_MSI command\n");
7503 rc = lpfc_config_msi(phba, pmb);
7506 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
7507 if (rc != MBX_SUCCESS) {
7508 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
7509 "0351 Config MSI mailbox command failed, "
7510 "mbxCmd x%x, mbxStatus x%x\n",
7511 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
7515 /* Free memory allocated for mailbox command */
7516 mempool_free(pmb, phba->mbox_mem_pool);
7520 /* Free memory allocated for mailbox command */
7521 mempool_free(pmb, phba->mbox_mem_pool);
7524 /* free the irq already requested */
7525 free_irq(phba->msix_entries[1].vector, phba);
7528 /* free the irq already requested */
7529 free_irq(phba->msix_entries[0].vector, phba);
7532 /* Unconfigure MSI-X capability structure */
7533 pci_disable_msix(phba->pcidev);
7538 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
7539 * @phba: pointer to lpfc hba data structure.
7541 * This routine is invoked to release the MSI-X vectors and then disable the
7542 * MSI-X interrupt mode to device with SLI-3 interface spec.
7545 lpfc_sli_disable_msix(struct lpfc_hba *phba)
7549 /* Free up MSI-X multi-message vectors */
7550 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
7551 free_irq(phba->msix_entries[i].vector, phba);
7553 pci_disable_msix(phba->pcidev);
7559 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
7560 * @phba: pointer to lpfc hba data structure.
7562 * This routine is invoked to enable the MSI interrupt mode to device with
7563 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
7564 * enable the MSI vector. The device driver is responsible for calling the
7565 * request_irq() to register MSI vector with a interrupt the handler, which
7566 * is done in this function.
7570 * other values - error
7573 lpfc_sli_enable_msi(struct lpfc_hba *phba)
7577 rc = pci_enable_msi(phba->pcidev);
7579 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7580 "0462 PCI enable MSI mode success.\n");
7582 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7583 "0471 PCI enable MSI mode failed (%d)\n", rc);
7587 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
7588 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7590 pci_disable_msi(phba->pcidev);
7591 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7592 "0478 MSI request_irq failed (%d)\n", rc);
7598 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
7599 * @phba: pointer to lpfc hba data structure.
7601 * This routine is invoked to disable the MSI interrupt mode to device with
7602 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
7603 * done request_irq() on before calling pci_disable_msi(). Failure to do so
7604 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
7608 lpfc_sli_disable_msi(struct lpfc_hba *phba)
7610 free_irq(phba->pcidev->irq, phba);
7611 pci_disable_msi(phba->pcidev);
7616 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
7617 * @phba: pointer to lpfc hba data structure.
7619 * This routine is invoked to enable device interrupt and associate driver's
7620 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
7621 * spec. Depends on the interrupt mode configured to the driver, the driver
7622 * will try to fallback from the configured interrupt mode to an interrupt
7623 * mode which is supported by the platform, kernel, and device in the order
7625 * MSI-X -> MSI -> IRQ.
7629 * other values - error
7632 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
7634 uint32_t intr_mode = LPFC_INTR_ERROR;
7637 if (cfg_mode == 2) {
7638 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
7639 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
7641 /* Now, try to enable MSI-X interrupt mode */
7642 retval = lpfc_sli_enable_msix(phba);
7644 /* Indicate initialization to MSI-X mode */
7645 phba->intr_type = MSIX;
7651 /* Fallback to MSI if MSI-X initialization failed */
7652 if (cfg_mode >= 1 && phba->intr_type == NONE) {
7653 retval = lpfc_sli_enable_msi(phba);
7655 /* Indicate initialization to MSI mode */
7656 phba->intr_type = MSI;
7661 /* Fallback to INTx if both MSI-X/MSI initalization failed */
7662 if (phba->intr_type == NONE) {
7663 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
7664 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7666 /* Indicate initialization to INTx mode */
7667 phba->intr_type = INTx;
7675 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
7676 * @phba: pointer to lpfc hba data structure.
7678 * This routine is invoked to disable device interrupt and disassociate the
7679 * driver's interrupt handler(s) from interrupt vector(s) to device with
7680 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
7681 * release the interrupt vector(s) for the message signaled interrupt.
7684 lpfc_sli_disable_intr(struct lpfc_hba *phba)
7686 /* Disable the currently initialized interrupt mode */
7687 if (phba->intr_type == MSIX)
7688 lpfc_sli_disable_msix(phba);
7689 else if (phba->intr_type == MSI)
7690 lpfc_sli_disable_msi(phba);
7691 else if (phba->intr_type == INTx)
7692 free_irq(phba->pcidev->irq, phba);
7694 /* Reset interrupt management states */
7695 phba->intr_type = NONE;
7696 phba->sli.slistat.sli_intr = 0;
7702 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
7703 * @phba: pointer to lpfc hba data structure.
7705 * This routine is invoked to enable the MSI-X interrupt vectors to device
7706 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
7707 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
7708 * enables either all or nothing, depending on the current availability of
7709 * PCI vector resources. The device driver is responsible for calling the
7710 * individual request_irq() to register each MSI-X vector with a interrupt
7711 * handler, which is done in this function. Note that later when device is
7712 * unloading, the driver should always call free_irq() on all MSI-X vectors
7713 * it has done request_irq() on before calling pci_disable_msix(). Failure
7714 * to do so results in a BUG_ON() and a device will be left with MSI-X
7715 * enabled and leaks its vectors.
7719 * other values - error
7722 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
7724 int vectors, rc, index;
7726 /* Set up MSI-X multi-message vectors */
7727 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
7728 phba->sli4_hba.msix_entries[index].entry = index;
7730 /* Configure MSI-X capability structure */
7731 vectors = phba->sli4_hba.cfg_eqn;
7732 enable_msix_vectors:
7733 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
7737 goto enable_msix_vectors;
7739 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7740 "0484 PCI enable MSI-X failed (%d)\n", rc);
7744 /* Log MSI-X vector assignment */
7745 for (index = 0; index < vectors; index++)
7746 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7747 "0489 MSI-X entry[%d]: vector=x%x "
7748 "message=%d\n", index,
7749 phba->sli4_hba.msix_entries[index].vector,
7750 phba->sli4_hba.msix_entries[index].entry);
7752 * Assign MSI-X vectors to interrupt handlers
7755 rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
7756 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
7757 LPFC_SP_DRIVER_HANDLER_NAME, phba);
7759 /* All Interrupts need to be handled by one EQ */
7760 rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
7761 &lpfc_sli4_intr_handler, IRQF_SHARED,
7762 LPFC_DRIVER_NAME, phba);
7764 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7765 "0485 MSI-X slow-path request_irq failed "
7770 /* The rest of the vector(s) are associated to fast-path handler(s) */
7771 for (index = 1; index < vectors; index++) {
7772 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
7773 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
7774 rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
7775 &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
7776 LPFC_FP_DRIVER_HANDLER_NAME,
7777 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7779 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7780 "0486 MSI-X fast-path (%d) "
7781 "request_irq failed (%d)\n", index, rc);
7785 phba->sli4_hba.msix_vec_nr = vectors;
7790 /* free the irq already requested */
7791 for (--index; index >= 1; index--)
7792 free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
7793 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7795 /* free the irq already requested */
7796 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7799 /* Unconfigure MSI-X capability structure */
7800 pci_disable_msix(phba->pcidev);
7805 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
7806 * @phba: pointer to lpfc hba data structure.
7808 * This routine is invoked to release the MSI-X vectors and then disable the
7809 * MSI-X interrupt mode to device with SLI-4 interface spec.
7812 lpfc_sli4_disable_msix(struct lpfc_hba *phba)
7816 /* Free up MSI-X multi-message vectors */
7817 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7819 for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++)
7820 free_irq(phba->sli4_hba.msix_entries[index].vector,
7821 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7824 pci_disable_msix(phba->pcidev);
7830 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
7831 * @phba: pointer to lpfc hba data structure.
7833 * This routine is invoked to enable the MSI interrupt mode to device with
7834 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
7835 * to enable the MSI vector. The device driver is responsible for calling
7836 * the request_irq() to register MSI vector with a interrupt the handler,
7837 * which is done in this function.
7841 * other values - error
7844 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
7848 rc = pci_enable_msi(phba->pcidev);
7850 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7851 "0487 PCI enable MSI mode success.\n");
7853 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7854 "0488 PCI enable MSI mode failed (%d)\n", rc);
7858 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
7859 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7861 pci_disable_msi(phba->pcidev);
7862 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7863 "0490 MSI request_irq failed (%d)\n", rc);
7867 for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
7868 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
7869 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
7876 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
7877 * @phba: pointer to lpfc hba data structure.
7879 * This routine is invoked to disable the MSI interrupt mode to device with
7880 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
7881 * done request_irq() on before calling pci_disable_msi(). Failure to do so
7882 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
7886 lpfc_sli4_disable_msi(struct lpfc_hba *phba)
7888 free_irq(phba->pcidev->irq, phba);
7889 pci_disable_msi(phba->pcidev);
7894 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
7895 * @phba: pointer to lpfc hba data structure.
7897 * This routine is invoked to enable device interrupt and associate driver's
7898 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
7899 * interface spec. Depends on the interrupt mode configured to the driver,
7900 * the driver will try to fallback from the configured interrupt mode to an
7901 * interrupt mode which is supported by the platform, kernel, and device in
7903 * MSI-X -> MSI -> IRQ.
7907 * other values - error
7910 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
7912 uint32_t intr_mode = LPFC_INTR_ERROR;
7915 if (cfg_mode == 2) {
7916 /* Preparation before conf_msi mbox cmd */
7919 /* Now, try to enable MSI-X interrupt mode */
7920 retval = lpfc_sli4_enable_msix(phba);
7922 /* Indicate initialization to MSI-X mode */
7923 phba->intr_type = MSIX;
7929 /* Fallback to MSI if MSI-X initialization failed */
7930 if (cfg_mode >= 1 && phba->intr_type == NONE) {
7931 retval = lpfc_sli4_enable_msi(phba);
7933 /* Indicate initialization to MSI mode */
7934 phba->intr_type = MSI;
7939 /* Fallback to INTx if both MSI-X/MSI initalization failed */
7940 if (phba->intr_type == NONE) {
7941 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
7942 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7944 /* Indicate initialization to INTx mode */
7945 phba->intr_type = INTx;
7947 for (index = 0; index < phba->cfg_fcp_eq_count;
7949 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
7950 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
7958 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
7959 * @phba: pointer to lpfc hba data structure.
7961 * This routine is invoked to disable device interrupt and disassociate
7962 * the driver's interrupt handler(s) from interrupt vector(s) to device
7963 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
7964 * will release the interrupt vector(s) for the message signaled interrupt.
7967 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
7969 /* Disable the currently initialized interrupt mode */
7970 if (phba->intr_type == MSIX)
7971 lpfc_sli4_disable_msix(phba);
7972 else if (phba->intr_type == MSI)
7973 lpfc_sli4_disable_msi(phba);
7974 else if (phba->intr_type == INTx)
7975 free_irq(phba->pcidev->irq, phba);
7977 /* Reset interrupt management states */
7978 phba->intr_type = NONE;
7979 phba->sli.slistat.sli_intr = 0;
7985 * lpfc_unset_hba - Unset SLI3 hba device initialization
7986 * @phba: pointer to lpfc hba data structure.
7988 * This routine is invoked to unset the HBA device initialization steps to
7989 * a device with SLI-3 interface spec.
7992 lpfc_unset_hba(struct lpfc_hba *phba)
7994 struct lpfc_vport *vport = phba->pport;
7995 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7997 spin_lock_irq(shost->host_lock);
7998 vport->load_flag |= FC_UNLOADING;
7999 spin_unlock_irq(shost->host_lock);
8001 lpfc_stop_hba_timers(phba);
8003 phba->pport->work_port_events = 0;
8005 lpfc_sli_hba_down(phba);
8007 lpfc_sli_brdrestart(phba);
8009 lpfc_sli_disable_intr(phba);
8015 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
8016 * @phba: pointer to lpfc hba data structure.
8018 * This routine is invoked to unset the HBA device initialization steps to
8019 * a device with SLI-4 interface spec.
8022 lpfc_sli4_unset_hba(struct lpfc_hba *phba)
8024 struct lpfc_vport *vport = phba->pport;
8025 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8027 spin_lock_irq(shost->host_lock);
8028 vport->load_flag |= FC_UNLOADING;
8029 spin_unlock_irq(shost->host_lock);
8031 phba->pport->work_port_events = 0;
8033 /* Stop the SLI4 device port */
8034 lpfc_stop_port(phba);
8036 lpfc_sli4_disable_intr(phba);
8038 /* Reset SLI4 HBA FCoE function */
8039 lpfc_pci_function_reset(phba);
8040 lpfc_sli4_queue_destroy(phba);
8046 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
8047 * @phba: Pointer to HBA context object.
8049 * This function is called in the SLI4 code path to wait for completion
8050 * of device's XRIs exchange busy. It will check the XRI exchange busy
8051 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
8052 * that, it will check the XRI exchange busy on outstanding FCP and ELS
8053 * I/Os every 30 seconds, log error message, and wait forever. Only when
8054 * all XRI exchange busy complete, the driver unload shall proceed with
8055 * invoking the function reset ioctl mailbox command to the CNA and the
8056 * the rest of the driver unload resource release.
8059 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
8062 int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
8063 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8065 while (!fcp_xri_cmpl || !els_xri_cmpl) {
8066 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
8068 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8069 "2877 FCP XRI exchange busy "
8070 "wait time: %d seconds.\n",
8073 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8074 "2878 ELS XRI exchange busy "
8075 "wait time: %d seconds.\n",
8077 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
8078 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
8080 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
8081 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
8084 list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
8086 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8091 * lpfc_sli4_hba_unset - Unset the fcoe hba
8092 * @phba: Pointer to HBA context object.
8094 * This function is called in the SLI4 code path to reset the HBA's FCoE
8095 * function. The caller is not required to hold any lock. This routine
8096 * issues PCI function reset mailbox command to reset the FCoE function.
8097 * At the end of the function, it calls lpfc_hba_down_post function to
8098 * free any pending commands.
8101 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
8104 LPFC_MBOXQ_t *mboxq;
8105 struct pci_dev *pdev = phba->pcidev;
8107 lpfc_stop_hba_timers(phba);
8108 phba->sli4_hba.intr_enable = 0;
8111 * Gracefully wait out the potential current outstanding asynchronous
8115 /* First, block any pending async mailbox command from posted */
8116 spin_lock_irq(&phba->hbalock);
8117 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
8118 spin_unlock_irq(&phba->hbalock);
8119 /* Now, trying to wait it out if we can */
8120 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8122 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
8125 /* Forcefully release the outstanding mailbox command if timed out */
8126 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8127 spin_lock_irq(&phba->hbalock);
8128 mboxq = phba->sli.mbox_active;
8129 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8130 __lpfc_mbox_cmpl_put(phba, mboxq);
8131 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8132 phba->sli.mbox_active = NULL;
8133 spin_unlock_irq(&phba->hbalock);
8136 /* Abort all iocbs associated with the hba */
8137 lpfc_sli_hba_iocb_abort(phba);
8139 /* Wait for completion of device XRI exchange busy */
8140 lpfc_sli4_xri_exchange_busy_wait(phba);
8142 /* Disable PCI subsystem interrupt */
8143 lpfc_sli4_disable_intr(phba);
8145 /* Disable SR-IOV if enabled */
8146 if (phba->cfg_sriov_nr_virtfn)
8147 pci_disable_sriov(pdev);
8149 /* Stop kthread signal shall trigger work_done one more time */
8150 kthread_stop(phba->worker_thread);
8152 /* Reset SLI4 HBA FCoE function */
8153 lpfc_pci_function_reset(phba);
8154 lpfc_sli4_queue_destroy(phba);
8156 /* Stop the SLI4 device port */
8157 phba->pport->work_port_events = 0;
8161 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
8162 * @phba: Pointer to HBA context object.
8163 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
8165 * This function is called in the SLI4 code path to read the port's
8166 * sli4 capabilities.
8168 * This function may be be called from any context that can block-wait
8169 * for the completion. The expectation is that this routine is called
8170 * typically from probe_one or from the online routine.
8173 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8176 struct lpfc_mqe *mqe;
8177 struct lpfc_pc_sli4_params *sli4_params;
8181 mqe = &mboxq->u.mqe;
8183 /* Read the port's SLI4 Parameters port capabilities */
8184 lpfc_pc_sli4_params(mboxq);
8185 if (!phba->sli4_hba.intr_enable)
8186 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8188 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
8189 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
8195 sli4_params = &phba->sli4_hba.pc_sli4_params;
8196 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
8197 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
8198 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
8199 sli4_params->featurelevel_1 = bf_get(featurelevel_1,
8200 &mqe->un.sli4_params);
8201 sli4_params->featurelevel_2 = bf_get(featurelevel_2,
8202 &mqe->un.sli4_params);
8203 sli4_params->proto_types = mqe->un.sli4_params.word3;
8204 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
8205 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
8206 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
8207 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
8208 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
8209 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
8210 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
8211 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
8212 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
8213 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
8214 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
8215 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
8216 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
8217 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
8218 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
8219 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
8220 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
8221 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
8222 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
8223 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
8225 /* Make sure that sge_supp_len can be handled by the driver */
8226 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
8227 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
8233 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
8234 * @phba: Pointer to HBA context object.
8235 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
8237 * This function is called in the SLI4 code path to read the port's
8238 * sli4 capabilities.
8240 * This function may be be called from any context that can block-wait
8241 * for the completion. The expectation is that this routine is called
8242 * typically from probe_one or from the online routine.
8245 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8248 struct lpfc_mqe *mqe = &mboxq->u.mqe;
8249 struct lpfc_pc_sli4_params *sli4_params;
8252 struct lpfc_sli4_parameters *mbx_sli4_parameters;
8255 * By default, the driver assumes the SLI4 port requires RPI
8256 * header postings. The SLI4_PARAM response will correct this
8259 phba->sli4_hba.rpi_hdrs_in_use = 1;
8261 /* Read the port's SLI4 Config Parameters */
8262 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
8263 sizeof(struct lpfc_sli4_cfg_mhdr));
8264 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
8265 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
8266 length, LPFC_SLI4_MBX_EMBED);
8267 if (!phba->sli4_hba.intr_enable)
8268 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8270 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
8271 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
8275 sli4_params = &phba->sli4_hba.pc_sli4_params;
8276 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
8277 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
8278 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
8279 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
8280 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
8281 mbx_sli4_parameters);
8282 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
8283 mbx_sli4_parameters);
8284 if (bf_get(cfg_phwq, mbx_sli4_parameters))
8285 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
8287 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
8288 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
8289 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
8290 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
8291 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
8292 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
8293 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
8294 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
8295 mbx_sli4_parameters);
8296 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
8297 mbx_sli4_parameters);
8298 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
8299 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
8301 /* Make sure that sge_supp_len can be handled by the driver */
8302 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
8303 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
8309 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
8310 * @pdev: pointer to PCI device
8311 * @pid: pointer to PCI device identifier
8313 * This routine is to be called to attach a device with SLI-3 interface spec
8314 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
8315 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
8316 * information of the device and driver to see if the driver state that it can
8317 * support this kind of device. If the match is successful, the driver core
8318 * invokes this routine. If this routine determines it can claim the HBA, it
8319 * does all the initialization that it needs to do to handle the HBA properly.
8322 * 0 - driver can claim the device
8323 * negative value - driver can not claim the device
8325 static int __devinit
8326 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
8328 struct lpfc_hba *phba;
8329 struct lpfc_vport *vport = NULL;
8330 struct Scsi_Host *shost = NULL;
8332 uint32_t cfg_mode, intr_mode;
8334 /* Allocate memory for HBA structure */
8335 phba = lpfc_hba_alloc(pdev);
8339 /* Perform generic PCI device enabling operation */
8340 error = lpfc_enable_pci_dev(phba);
8344 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
8345 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
8347 goto out_disable_pci_dev;
8349 /* Set up SLI-3 specific device PCI memory space */
8350 error = lpfc_sli_pci_mem_setup(phba);
8352 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8353 "1402 Failed to set up pci memory space.\n");
8354 goto out_disable_pci_dev;
8357 /* Set up phase-1 common device driver resources */
8358 error = lpfc_setup_driver_resource_phase1(phba);
8360 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8361 "1403 Failed to set up driver resource.\n");
8362 goto out_unset_pci_mem_s3;
8365 /* Set up SLI-3 specific device driver resources */
8366 error = lpfc_sli_driver_resource_setup(phba);
8368 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8369 "1404 Failed to set up driver resource.\n");
8370 goto out_unset_pci_mem_s3;
8373 /* Initialize and populate the iocb list per host */
8374 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
8376 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8377 "1405 Failed to initialize iocb list.\n");
8378 goto out_unset_driver_resource_s3;
8381 /* Set up common device driver resources */
8382 error = lpfc_setup_driver_resource_phase2(phba);
8384 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8385 "1406 Failed to set up driver resource.\n");
8386 goto out_free_iocb_list;
8389 /* Get the default values for Model Name and Description */
8390 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
8392 /* Create SCSI host to the physical port */
8393 error = lpfc_create_shost(phba);
8395 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8396 "1407 Failed to create scsi host.\n");
8397 goto out_unset_driver_resource;
8400 /* Configure sysfs attributes */
8401 vport = phba->pport;
8402 error = lpfc_alloc_sysfs_attr(vport);
8404 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8405 "1476 Failed to allocate sysfs attr\n");
8406 goto out_destroy_shost;
8409 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
8410 /* Now, trying to enable interrupt and bring up the device */
8411 cfg_mode = phba->cfg_use_msi;
8413 /* Put device to a known state before enabling interrupt */
8414 lpfc_stop_port(phba);
8415 /* Configure and enable interrupt */
8416 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
8417 if (intr_mode == LPFC_INTR_ERROR) {
8418 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8419 "0431 Failed to enable interrupt.\n");
8421 goto out_free_sysfs_attr;
8423 /* SLI-3 HBA setup */
8424 if (lpfc_sli_hba_setup(phba)) {
8425 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8426 "1477 Failed to set up hba\n");
8428 goto out_remove_device;
8431 /* Wait 50ms for the interrupts of previous mailbox commands */
8433 /* Check active interrupts on message signaled interrupts */
8434 if (intr_mode == 0 ||
8435 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
8436 /* Log the current active interrupt mode */
8437 phba->intr_mode = intr_mode;
8438 lpfc_log_intr_mode(phba, intr_mode);
8441 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8442 "0447 Configure interrupt mode (%d) "
8443 "failed active interrupt test.\n",
8445 /* Disable the current interrupt mode */
8446 lpfc_sli_disable_intr(phba);
8447 /* Try next level of interrupt mode */
8448 cfg_mode = --intr_mode;
8452 /* Perform post initialization setup */
8453 lpfc_post_init_setup(phba);
8455 /* Check if there are static vports to be created. */
8456 lpfc_create_static_vport(phba);
8461 lpfc_unset_hba(phba);
8462 out_free_sysfs_attr:
8463 lpfc_free_sysfs_attr(vport);
8465 lpfc_destroy_shost(phba);
8466 out_unset_driver_resource:
8467 lpfc_unset_driver_resource_phase2(phba);
8469 lpfc_free_iocb_list(phba);
8470 out_unset_driver_resource_s3:
8471 lpfc_sli_driver_resource_unset(phba);
8472 out_unset_pci_mem_s3:
8473 lpfc_sli_pci_mem_unset(phba);
8474 out_disable_pci_dev:
8475 lpfc_disable_pci_dev(phba);
8477 scsi_host_put(shost);
8479 lpfc_hba_free(phba);
8484 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
8485 * @pdev: pointer to PCI device
8487 * This routine is to be called to disattach a device with SLI-3 interface
8488 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
8489 * removed from PCI bus, it performs all the necessary cleanup for the HBA
8490 * device to be removed from the PCI subsystem properly.
8492 static void __devexit
8493 lpfc_pci_remove_one_s3(struct pci_dev *pdev)
8495 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8496 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
8497 struct lpfc_vport **vports;
8498 struct lpfc_hba *phba = vport->phba;
8500 int bars = pci_select_bars(pdev, IORESOURCE_MEM);
8502 spin_lock_irq(&phba->hbalock);
8503 vport->load_flag |= FC_UNLOADING;
8504 spin_unlock_irq(&phba->hbalock);
8506 lpfc_free_sysfs_attr(vport);
8508 /* Release all the vports against this physical port */
8509 vports = lpfc_create_vport_work_array(phba);
8511 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
8512 fc_vport_terminate(vports[i]->fc_vport);
8513 lpfc_destroy_vport_work_array(phba, vports);
8515 /* Remove FC host and then SCSI host with the physical port */
8516 fc_remove_host(shost);
8517 scsi_remove_host(shost);
8518 lpfc_cleanup(vport);
8521 * Bring down the SLI Layer. This step disable all interrupts,
8522 * clears the rings, discards all mailbox commands, and resets
8526 /* HBA interrupt will be disabled after this call */
8527 lpfc_sli_hba_down(phba);
8528 /* Stop kthread signal shall trigger work_done one more time */
8529 kthread_stop(phba->worker_thread);
8530 /* Final cleanup of txcmplq and reset the HBA */
8531 lpfc_sli_brdrestart(phba);
8533 lpfc_stop_hba_timers(phba);
8534 spin_lock_irq(&phba->hbalock);
8535 list_del_init(&vport->listentry);
8536 spin_unlock_irq(&phba->hbalock);
8538 lpfc_debugfs_terminate(vport);
8540 /* Disable SR-IOV if enabled */
8541 if (phba->cfg_sriov_nr_virtfn)
8542 pci_disable_sriov(pdev);
8544 /* Disable interrupt */
8545 lpfc_sli_disable_intr(phba);
8547 pci_set_drvdata(pdev, NULL);
8548 scsi_host_put(shost);
8551 * Call scsi_free before mem_free since scsi bufs are released to their
8552 * corresponding pools here.
8554 lpfc_scsi_free(phba);
8555 lpfc_mem_free_all(phba);
8557 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
8558 phba->hbqslimp.virt, phba->hbqslimp.phys);
8560 /* Free resources associated with SLI2 interface */
8561 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
8562 phba->slim2p.virt, phba->slim2p.phys);
8564 /* unmap adapter SLIM and Control Registers */
8565 iounmap(phba->ctrl_regs_memmap_p);
8566 iounmap(phba->slim_memmap_p);
8568 lpfc_hba_free(phba);
8570 pci_release_selected_regions(pdev, bars);
8571 pci_disable_device(pdev);
8575 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
8576 * @pdev: pointer to PCI device
8577 * @msg: power management message
8579 * This routine is to be called from the kernel's PCI subsystem to support
8580 * system Power Management (PM) to device with SLI-3 interface spec. When
8581 * PM invokes this method, it quiesces the device by stopping the driver's
8582 * worker thread for the device, turning off device's interrupt and DMA,
8583 * and bring the device offline. Note that as the driver implements the
8584 * minimum PM requirements to a power-aware driver's PM support for the
8585 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
8586 * to the suspend() method call will be treated as SUSPEND and the driver will
8587 * fully reinitialize its device during resume() method call, the driver will
8588 * set device to PCI_D3hot state in PCI config space instead of setting it
8589 * according to the @msg provided by the PM.
8592 * 0 - driver suspended the device
8596 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
8598 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8599 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8601 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8602 "0473 PCI device Power Management suspend.\n");
8604 /* Bring down the device */
8605 lpfc_offline_prep(phba);
8607 kthread_stop(phba->worker_thread);
8609 /* Disable interrupt from device */
8610 lpfc_sli_disable_intr(phba);
8612 /* Save device state to PCI config space */
8613 pci_save_state(pdev);
8614 pci_set_power_state(pdev, PCI_D3hot);
8620 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
8621 * @pdev: pointer to PCI device
8623 * This routine is to be called from the kernel's PCI subsystem to support
8624 * system Power Management (PM) to device with SLI-3 interface spec. When PM
8625 * invokes this method, it restores the device's PCI config space state and
8626 * fully reinitializes the device and brings it online. Note that as the
8627 * driver implements the minimum PM requirements to a power-aware driver's
8628 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
8629 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
8630 * driver will fully reinitialize its device during resume() method call,
8631 * the device will be set to PCI_D0 directly in PCI config space before
8632 * restoring the state.
8635 * 0 - driver suspended the device
8639 lpfc_pci_resume_one_s3(struct pci_dev *pdev)
8641 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8642 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8646 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8647 "0452 PCI device Power Management resume.\n");
8649 /* Restore device state from PCI config space */
8650 pci_set_power_state(pdev, PCI_D0);
8651 pci_restore_state(pdev);
8654 * As the new kernel behavior of pci_restore_state() API call clears
8655 * device saved_state flag, need to save the restored state again.
8657 pci_save_state(pdev);
8659 if (pdev->is_busmaster)
8660 pci_set_master(pdev);
8662 /* Startup the kernel thread for this host adapter. */
8663 phba->worker_thread = kthread_run(lpfc_do_work, phba,
8664 "lpfc_worker_%d", phba->brd_no);
8665 if (IS_ERR(phba->worker_thread)) {
8666 error = PTR_ERR(phba->worker_thread);
8667 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8668 "0434 PM resume failed to start worker "
8669 "thread: error=x%x.\n", error);
8673 /* Configure and enable interrupt */
8674 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
8675 if (intr_mode == LPFC_INTR_ERROR) {
8676 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8677 "0430 PM resume Failed to enable interrupt\n");
8680 phba->intr_mode = intr_mode;
8682 /* Restart HBA and bring it online */
8683 lpfc_sli_brdrestart(phba);
8686 /* Log the current active interrupt mode */
8687 lpfc_log_intr_mode(phba, phba->intr_mode);
8693 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
8694 * @phba: pointer to lpfc hba data structure.
8696 * This routine is called to prepare the SLI3 device for PCI slot recover. It
8697 * aborts all the outstanding SCSI I/Os to the pci device.
8700 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
8702 struct lpfc_sli *psli = &phba->sli;
8703 struct lpfc_sli_ring *pring;
8705 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8706 "2723 PCI channel I/O abort preparing for recovery\n");
8709 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
8710 * and let the SCSI mid-layer to retry them to recover.
8712 pring = &psli->ring[psli->fcp_ring];
8713 lpfc_sli_abort_iocb_ring(phba, pring);
8717 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
8718 * @phba: pointer to lpfc hba data structure.
8720 * This routine is called to prepare the SLI3 device for PCI slot reset. It
8721 * disables the device interrupt and pci device, and aborts the internal FCP
8725 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
8727 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8728 "2710 PCI channel disable preparing for reset\n");
8730 /* Block any management I/Os to the device */
8731 lpfc_block_mgmt_io(phba);
8733 /* Block all SCSI devices' I/Os on the host */
8734 lpfc_scsi_dev_block(phba);
8736 /* stop all timers */
8737 lpfc_stop_hba_timers(phba);
8739 /* Disable interrupt and pci device */
8740 lpfc_sli_disable_intr(phba);
8741 pci_disable_device(phba->pcidev);
8743 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
8744 lpfc_sli_flush_fcp_rings(phba);
8748 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
8749 * @phba: pointer to lpfc hba data structure.
8751 * This routine is called to prepare the SLI3 device for PCI slot permanently
8752 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
8756 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
8758 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8759 "2711 PCI channel permanent disable for failure\n");
8760 /* Block all SCSI devices' I/Os on the host */
8761 lpfc_scsi_dev_block(phba);
8763 /* stop all timers */
8764 lpfc_stop_hba_timers(phba);
8766 /* Clean up all driver's outstanding SCSI I/Os */
8767 lpfc_sli_flush_fcp_rings(phba);
8771 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
8772 * @pdev: pointer to PCI device.
8773 * @state: the current PCI connection state.
8775 * This routine is called from the PCI subsystem for I/O error handling to
8776 * device with SLI-3 interface spec. This function is called by the PCI
8777 * subsystem after a PCI bus error affecting this device has been detected.
8778 * When this function is invoked, it will need to stop all the I/Os and
8779 * interrupt(s) to the device. Once that is done, it will return
8780 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
8784 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
8785 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
8786 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8788 static pci_ers_result_t
8789 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
8791 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8792 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8795 case pci_channel_io_normal:
8796 /* Non-fatal error, prepare for recovery */
8797 lpfc_sli_prep_dev_for_recover(phba);
8798 return PCI_ERS_RESULT_CAN_RECOVER;
8799 case pci_channel_io_frozen:
8800 /* Fatal error, prepare for slot reset */
8801 lpfc_sli_prep_dev_for_reset(phba);
8802 return PCI_ERS_RESULT_NEED_RESET;
8803 case pci_channel_io_perm_failure:
8804 /* Permanent failure, prepare for device down */
8805 lpfc_sli_prep_dev_for_perm_failure(phba);
8806 return PCI_ERS_RESULT_DISCONNECT;
8808 /* Unknown state, prepare and request slot reset */
8809 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8810 "0472 Unknown PCI error state: x%x\n", state);
8811 lpfc_sli_prep_dev_for_reset(phba);
8812 return PCI_ERS_RESULT_NEED_RESET;
8817 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
8818 * @pdev: pointer to PCI device.
8820 * This routine is called from the PCI subsystem for error handling to
8821 * device with SLI-3 interface spec. This is called after PCI bus has been
8822 * reset to restart the PCI card from scratch, as if from a cold-boot.
8823 * During the PCI subsystem error recovery, after driver returns
8824 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
8825 * recovery and then call this routine before calling the .resume method
8826 * to recover the device. This function will initialize the HBA device,
8827 * enable the interrupt, but it will just put the HBA to offline state
8828 * without passing any I/O traffic.
8831 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
8832 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8834 static pci_ers_result_t
8835 lpfc_io_slot_reset_s3(struct pci_dev *pdev)
8837 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8838 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8839 struct lpfc_sli *psli = &phba->sli;
8842 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
8843 if (pci_enable_device_mem(pdev)) {
8844 printk(KERN_ERR "lpfc: Cannot re-enable "
8845 "PCI device after reset.\n");
8846 return PCI_ERS_RESULT_DISCONNECT;
8849 pci_restore_state(pdev);
8852 * As the new kernel behavior of pci_restore_state() API call clears
8853 * device saved_state flag, need to save the restored state again.
8855 pci_save_state(pdev);
8857 if (pdev->is_busmaster)
8858 pci_set_master(pdev);
8860 spin_lock_irq(&phba->hbalock);
8861 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
8862 spin_unlock_irq(&phba->hbalock);
8864 /* Configure and enable interrupt */
8865 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
8866 if (intr_mode == LPFC_INTR_ERROR) {
8867 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8868 "0427 Cannot re-enable interrupt after "
8870 return PCI_ERS_RESULT_DISCONNECT;
8872 phba->intr_mode = intr_mode;
8874 /* Take device offline, it will perform cleanup */
8875 lpfc_offline_prep(phba);
8877 lpfc_sli_brdrestart(phba);
8879 /* Log the current active interrupt mode */
8880 lpfc_log_intr_mode(phba, phba->intr_mode);
8882 return PCI_ERS_RESULT_RECOVERED;
8886 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
8887 * @pdev: pointer to PCI device
8889 * This routine is called from the PCI subsystem for error handling to device
8890 * with SLI-3 interface spec. It is called when kernel error recovery tells
8891 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
8892 * error recovery. After this call, traffic can start to flow from this device
8896 lpfc_io_resume_s3(struct pci_dev *pdev)
8898 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8899 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8901 /* Bring device online, it will be no-op for non-fatal error resume */
8904 /* Clean up Advanced Error Reporting (AER) if needed */
8905 if (phba->hba_flag & HBA_AER_ENABLED)
8906 pci_cleanup_aer_uncorrect_error_status(pdev);
8910 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
8911 * @phba: pointer to lpfc hba data structure.
8913 * returns the number of ELS/CT IOCBs to reserve
8916 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
8918 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
8920 if (phba->sli_rev == LPFC_SLI_REV4) {
8923 else if (max_xri <= 256)
8925 else if (max_xri <= 512)
8927 else if (max_xri <= 1024)
8936 * lpfc_write_firmware - attempt to write a firmware image to the port
8937 * @phba: pointer to lpfc hba data structure.
8938 * @fw: pointer to firmware image returned from request_firmware.
8940 * returns the number of bytes written if write is successful.
8941 * returns a negative error value if there were errors.
8942 * returns 0 if firmware matches currently active firmware on port.
8945 lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
8948 struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data;
8949 struct list_head dma_buffer_list;
8951 struct lpfc_dmabuf *dmabuf, *next;
8952 uint32_t offset = 0, temp_offset = 0;
8954 INIT_LIST_HEAD(&dma_buffer_list);
8955 if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) ||
8956 (bf_get_be32(lpfc_grp_hdr_file_type, image) !=
8957 LPFC_FILE_TYPE_GROUP) ||
8958 (bf_get_be32(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
8959 (be32_to_cpu(image->size) != fw->size)) {
8960 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8961 "3022 Invalid FW image found. "
8962 "Magic:%x Type:%x ID:%x\n",
8963 be32_to_cpu(image->magic_number),
8964 bf_get_be32(lpfc_grp_hdr_file_type, image),
8965 bf_get_be32(lpfc_grp_hdr_id, image));
8968 lpfc_decode_firmware_rev(phba, fwrev, 1);
8969 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
8970 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8971 "3023 Updating Firmware. Current Version:%s "
8973 fwrev, image->revision);
8974 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
8975 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
8981 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
8985 if (!dmabuf->virt) {
8990 list_add_tail(&dmabuf->list, &dma_buffer_list);
8992 while (offset < fw->size) {
8993 temp_offset = offset;
8994 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
8995 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
8996 memcpy(dmabuf->virt,
8997 fw->data + temp_offset,
8998 fw->size - temp_offset);
8999 temp_offset = fw->size;
9002 memcpy(dmabuf->virt, fw->data + temp_offset,
9004 temp_offset += SLI4_PAGE_SIZE;
9006 rc = lpfc_wr_object(phba, &dma_buffer_list,
9007 (fw->size - offset), &offset);
9009 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9010 "3024 Firmware update failed. "
9018 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
9019 list_del(&dmabuf->list);
9020 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
9021 dmabuf->virt, dmabuf->phys);
9028 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
9029 * @pdev: pointer to PCI device
9030 * @pid: pointer to PCI device identifier
9032 * This routine is called from the kernel's PCI subsystem to device with
9033 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
9034 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
9035 * information of the device and driver to see if the driver state that it
9036 * can support this kind of device. If the match is successful, the driver
9037 * core invokes this routine. If this routine determines it can claim the HBA,
9038 * it does all the initialization that it needs to do to handle the HBA
9042 * 0 - driver can claim the device
9043 * negative value - driver can not claim the device
9045 static int __devinit
9046 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9048 struct lpfc_hba *phba;
9049 struct lpfc_vport *vport = NULL;
9050 struct Scsi_Host *shost = NULL;
9052 uint32_t cfg_mode, intr_mode;
9054 int adjusted_fcp_eq_count;
9055 const struct firmware *fw;
9056 uint8_t file_name[16];
9058 /* Allocate memory for HBA structure */
9059 phba = lpfc_hba_alloc(pdev);
9063 /* Perform generic PCI device enabling operation */
9064 error = lpfc_enable_pci_dev(phba);
9068 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
9069 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
9071 goto out_disable_pci_dev;
9073 /* Set up SLI-4 specific device PCI memory space */
9074 error = lpfc_sli4_pci_mem_setup(phba);
9076 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9077 "1410 Failed to set up pci memory space.\n");
9078 goto out_disable_pci_dev;
9081 /* Set up phase-1 common device driver resources */
9082 error = lpfc_setup_driver_resource_phase1(phba);
9084 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9085 "1411 Failed to set up driver resource.\n");
9086 goto out_unset_pci_mem_s4;
9089 /* Set up SLI-4 Specific device driver resources */
9090 error = lpfc_sli4_driver_resource_setup(phba);
9092 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9093 "1412 Failed to set up driver resource.\n");
9094 goto out_unset_pci_mem_s4;
9097 /* Initialize and populate the iocb list per host */
9099 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9100 "2821 initialize iocb list %d.\n",
9101 phba->cfg_iocb_cnt*1024);
9102 error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024);
9105 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9106 "1413 Failed to initialize iocb list.\n");
9107 goto out_unset_driver_resource_s4;
9110 INIT_LIST_HEAD(&phba->active_rrq_list);
9111 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
9113 /* Set up common device driver resources */
9114 error = lpfc_setup_driver_resource_phase2(phba);
9116 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9117 "1414 Failed to set up driver resource.\n");
9118 goto out_free_iocb_list;
9121 /* Get the default values for Model Name and Description */
9122 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
9124 /* Create SCSI host to the physical port */
9125 error = lpfc_create_shost(phba);
9127 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9128 "1415 Failed to create scsi host.\n");
9129 goto out_unset_driver_resource;
9132 /* Configure sysfs attributes */
9133 vport = phba->pport;
9134 error = lpfc_alloc_sysfs_attr(vport);
9136 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9137 "1416 Failed to allocate sysfs attr\n");
9138 goto out_destroy_shost;
9141 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
9142 /* Now, trying to enable interrupt and bring up the device */
9143 cfg_mode = phba->cfg_use_msi;
9145 /* Put device to a known state before enabling interrupt */
9146 lpfc_stop_port(phba);
9147 /* Configure and enable interrupt */
9148 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
9149 if (intr_mode == LPFC_INTR_ERROR) {
9150 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9151 "0426 Failed to enable interrupt.\n");
9153 goto out_free_sysfs_attr;
9155 /* Default to single EQ for non-MSI-X */
9156 if (phba->intr_type != MSIX)
9157 adjusted_fcp_eq_count = 0;
9158 else if (phba->sli4_hba.msix_vec_nr <
9159 phba->cfg_fcp_eq_count + 1)
9160 adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
9162 adjusted_fcp_eq_count = phba->cfg_fcp_eq_count;
9163 phba->cfg_fcp_eq_count = adjusted_fcp_eq_count;
9164 /* Set up SLI-4 HBA */
9165 if (lpfc_sli4_hba_setup(phba)) {
9166 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9167 "1421 Failed to set up hba\n");
9169 goto out_disable_intr;
9172 /* Send NOP mbx cmds for non-INTx mode active interrupt test */
9174 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
9177 /* Check active interrupts received only for MSI/MSI-X */
9178 if (intr_mode == 0 ||
9179 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
9180 /* Log the current active interrupt mode */
9181 phba->intr_mode = intr_mode;
9182 lpfc_log_intr_mode(phba, intr_mode);
9185 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9186 "0451 Configure interrupt mode (%d) "
9187 "failed active interrupt test.\n",
9189 /* Unset the previous SLI-4 HBA setup. */
9191 * TODO: Is this operation compatible with IF TYPE 2
9192 * devices? All port state is deleted and cleared.
9194 lpfc_sli4_unset_hba(phba);
9195 /* Try next level of interrupt mode */
9196 cfg_mode = --intr_mode;
9199 /* Perform post initialization setup */
9200 lpfc_post_init_setup(phba);
9202 /* check for firmware upgrade or downgrade */
9203 snprintf(file_name, 16, "%s.grp", phba->ModelName);
9204 error = request_firmware(&fw, file_name, &phba->pcidev->dev);
9206 lpfc_write_firmware(phba, fw);
9207 release_firmware(fw);
9210 /* Check if there are static vports to be created. */
9211 lpfc_create_static_vport(phba);
9215 lpfc_sli4_disable_intr(phba);
9216 out_free_sysfs_attr:
9217 lpfc_free_sysfs_attr(vport);
9219 lpfc_destroy_shost(phba);
9220 out_unset_driver_resource:
9221 lpfc_unset_driver_resource_phase2(phba);
9223 lpfc_free_iocb_list(phba);
9224 out_unset_driver_resource_s4:
9225 lpfc_sli4_driver_resource_unset(phba);
9226 out_unset_pci_mem_s4:
9227 lpfc_sli4_pci_mem_unset(phba);
9228 out_disable_pci_dev:
9229 lpfc_disable_pci_dev(phba);
9231 scsi_host_put(shost);
9233 lpfc_hba_free(phba);
9238 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
9239 * @pdev: pointer to PCI device
9241 * This routine is called from the kernel's PCI subsystem to device with
9242 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
9243 * removed from PCI bus, it performs all the necessary cleanup for the HBA
9244 * device to be removed from the PCI subsystem properly.
9246 static void __devexit
9247 lpfc_pci_remove_one_s4(struct pci_dev *pdev)
9249 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9250 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
9251 struct lpfc_vport **vports;
9252 struct lpfc_hba *phba = vport->phba;
9255 /* Mark the device unloading flag */
9256 spin_lock_irq(&phba->hbalock);
9257 vport->load_flag |= FC_UNLOADING;
9258 spin_unlock_irq(&phba->hbalock);
9260 /* Free the HBA sysfs attributes */
9261 lpfc_free_sysfs_attr(vport);
9263 /* Release all the vports against this physical port */
9264 vports = lpfc_create_vport_work_array(phba);
9266 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
9267 fc_vport_terminate(vports[i]->fc_vport);
9268 lpfc_destroy_vport_work_array(phba, vports);
9270 /* Remove FC host and then SCSI host with the physical port */
9271 fc_remove_host(shost);
9272 scsi_remove_host(shost);
9274 /* Perform cleanup on the physical port */
9275 lpfc_cleanup(vport);
9278 * Bring down the SLI Layer. This step disables all interrupts,
9279 * clears the rings, discards all mailbox commands, and resets
9280 * the HBA FCoE function.
9282 lpfc_debugfs_terminate(vport);
9283 lpfc_sli4_hba_unset(phba);
9285 spin_lock_irq(&phba->hbalock);
9286 list_del_init(&vport->listentry);
9287 spin_unlock_irq(&phba->hbalock);
9289 /* Perform scsi free before driver resource_unset since scsi
9290 * buffers are released to their corresponding pools here.
9292 lpfc_scsi_free(phba);
9293 lpfc_sli4_driver_resource_unset(phba);
9295 /* Unmap adapter Control and Doorbell registers */
9296 lpfc_sli4_pci_mem_unset(phba);
9298 /* Release PCI resources and disable device's PCI function */
9299 scsi_host_put(shost);
9300 lpfc_disable_pci_dev(phba);
9302 /* Finally, free the driver's device data structure */
9303 lpfc_hba_free(phba);
9309 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
9310 * @pdev: pointer to PCI device
9311 * @msg: power management message
9313 * This routine is called from the kernel's PCI subsystem to support system
9314 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
9315 * this method, it quiesces the device by stopping the driver's worker
9316 * thread for the device, turning off device's interrupt and DMA, and bring
9317 * the device offline. Note that as the driver implements the minimum PM
9318 * requirements to a power-aware driver's PM support for suspend/resume -- all
9319 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
9320 * method call will be treated as SUSPEND and the driver will fully
9321 * reinitialize its device during resume() method call, the driver will set
9322 * device to PCI_D3hot state in PCI config space instead of setting it
9323 * according to the @msg provided by the PM.
9326 * 0 - driver suspended the device
9330 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
9332 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9333 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9335 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9336 "2843 PCI device Power Management suspend.\n");
9338 /* Bring down the device */
9339 lpfc_offline_prep(phba);
9341 kthread_stop(phba->worker_thread);
9343 /* Disable interrupt from device */
9344 lpfc_sli4_disable_intr(phba);
9345 lpfc_sli4_queue_destroy(phba);
9347 /* Save device state to PCI config space */
9348 pci_save_state(pdev);
9349 pci_set_power_state(pdev, PCI_D3hot);
9355 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
9356 * @pdev: pointer to PCI device
9358 * This routine is called from the kernel's PCI subsystem to support system
9359 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
9360 * this method, it restores the device's PCI config space state and fully
9361 * reinitializes the device and brings it online. Note that as the driver
9362 * implements the minimum PM requirements to a power-aware driver's PM for
9363 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
9364 * to the suspend() method call will be treated as SUSPEND and the driver
9365 * will fully reinitialize its device during resume() method call, the device
9366 * will be set to PCI_D0 directly in PCI config space before restoring the
9370 * 0 - driver suspended the device
9374 lpfc_pci_resume_one_s4(struct pci_dev *pdev)
9376 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9377 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9381 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9382 "0292 PCI device Power Management resume.\n");
9384 /* Restore device state from PCI config space */
9385 pci_set_power_state(pdev, PCI_D0);
9386 pci_restore_state(pdev);
9389 * As the new kernel behavior of pci_restore_state() API call clears
9390 * device saved_state flag, need to save the restored state again.
9392 pci_save_state(pdev);
9394 if (pdev->is_busmaster)
9395 pci_set_master(pdev);
9397 /* Startup the kernel thread for this host adapter. */
9398 phba->worker_thread = kthread_run(lpfc_do_work, phba,
9399 "lpfc_worker_%d", phba->brd_no);
9400 if (IS_ERR(phba->worker_thread)) {
9401 error = PTR_ERR(phba->worker_thread);
9402 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9403 "0293 PM resume failed to start worker "
9404 "thread: error=x%x.\n", error);
9408 /* Configure and enable interrupt */
9409 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
9410 if (intr_mode == LPFC_INTR_ERROR) {
9411 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9412 "0294 PM resume Failed to enable interrupt\n");
9415 phba->intr_mode = intr_mode;
9417 /* Restart HBA and bring it online */
9418 lpfc_sli_brdrestart(phba);
9421 /* Log the current active interrupt mode */
9422 lpfc_log_intr_mode(phba, phba->intr_mode);
9428 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
9429 * @phba: pointer to lpfc hba data structure.
9431 * This routine is called to prepare the SLI4 device for PCI slot recover. It
9432 * aborts all the outstanding SCSI I/Os to the pci device.
9435 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
9437 struct lpfc_sli *psli = &phba->sli;
9438 struct lpfc_sli_ring *pring;
9440 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9441 "2828 PCI channel I/O abort preparing for recovery\n");
9443 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
9444 * and let the SCSI mid-layer to retry them to recover.
9446 pring = &psli->ring[psli->fcp_ring];
9447 lpfc_sli_abort_iocb_ring(phba, pring);
9451 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
9452 * @phba: pointer to lpfc hba data structure.
9454 * This routine is called to prepare the SLI4 device for PCI slot reset. It
9455 * disables the device interrupt and pci device, and aborts the internal FCP
9459 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
9461 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9462 "2826 PCI channel disable preparing for reset\n");
9464 /* Block any management I/Os to the device */
9465 lpfc_block_mgmt_io(phba);
9467 /* Block all SCSI devices' I/Os on the host */
9468 lpfc_scsi_dev_block(phba);
9470 /* stop all timers */
9471 lpfc_stop_hba_timers(phba);
9473 /* Disable interrupt and pci device */
9474 lpfc_sli4_disable_intr(phba);
9475 lpfc_sli4_queue_destroy(phba);
9476 pci_disable_device(phba->pcidev);
9478 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
9479 lpfc_sli_flush_fcp_rings(phba);
9483 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
9484 * @phba: pointer to lpfc hba data structure.
9486 * This routine is called to prepare the SLI4 device for PCI slot permanently
9487 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
9491 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
9493 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9494 "2827 PCI channel permanent disable for failure\n");
9496 /* Block all SCSI devices' I/Os on the host */
9497 lpfc_scsi_dev_block(phba);
9499 /* stop all timers */
9500 lpfc_stop_hba_timers(phba);
9502 /* Clean up all driver's outstanding SCSI I/Os */
9503 lpfc_sli_flush_fcp_rings(phba);
9507 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
9508 * @pdev: pointer to PCI device.
9509 * @state: the current PCI connection state.
9511 * This routine is called from the PCI subsystem for error handling to device
9512 * with SLI-4 interface spec. This function is called by the PCI subsystem
9513 * after a PCI bus error affecting this device has been detected. When this
9514 * function is invoked, it will need to stop all the I/Os and interrupt(s)
9515 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
9516 * for the PCI subsystem to perform proper recovery as desired.
9519 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
9520 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9522 static pci_ers_result_t
9523 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
9525 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9526 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9529 case pci_channel_io_normal:
9530 /* Non-fatal error, prepare for recovery */
9531 lpfc_sli4_prep_dev_for_recover(phba);
9532 return PCI_ERS_RESULT_CAN_RECOVER;
9533 case pci_channel_io_frozen:
9534 /* Fatal error, prepare for slot reset */
9535 lpfc_sli4_prep_dev_for_reset(phba);
9536 return PCI_ERS_RESULT_NEED_RESET;
9537 case pci_channel_io_perm_failure:
9538 /* Permanent failure, prepare for device down */
9539 lpfc_sli4_prep_dev_for_perm_failure(phba);
9540 return PCI_ERS_RESULT_DISCONNECT;
9542 /* Unknown state, prepare and request slot reset */
9543 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9544 "2825 Unknown PCI error state: x%x\n", state);
9545 lpfc_sli4_prep_dev_for_reset(phba);
9546 return PCI_ERS_RESULT_NEED_RESET;
9551 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
9552 * @pdev: pointer to PCI device.
9554 * This routine is called from the PCI subsystem for error handling to device
9555 * with SLI-4 interface spec. It is called after PCI bus has been reset to
9556 * restart the PCI card from scratch, as if from a cold-boot. During the
9557 * PCI subsystem error recovery, after the driver returns
9558 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
9559 * recovery and then call this routine before calling the .resume method to
9560 * recover the device. This function will initialize the HBA device, enable
9561 * the interrupt, but it will just put the HBA to offline state without
9562 * passing any I/O traffic.
9565 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
9566 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9568 static pci_ers_result_t
9569 lpfc_io_slot_reset_s4(struct pci_dev *pdev)
9571 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9572 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9573 struct lpfc_sli *psli = &phba->sli;
9576 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
9577 if (pci_enable_device_mem(pdev)) {
9578 printk(KERN_ERR "lpfc: Cannot re-enable "
9579 "PCI device after reset.\n");
9580 return PCI_ERS_RESULT_DISCONNECT;
9583 pci_restore_state(pdev);
9586 * As the new kernel behavior of pci_restore_state() API call clears
9587 * device saved_state flag, need to save the restored state again.
9589 pci_save_state(pdev);
9591 if (pdev->is_busmaster)
9592 pci_set_master(pdev);
9594 spin_lock_irq(&phba->hbalock);
9595 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
9596 spin_unlock_irq(&phba->hbalock);
9598 /* Configure and enable interrupt */
9599 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
9600 if (intr_mode == LPFC_INTR_ERROR) {
9601 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9602 "2824 Cannot re-enable interrupt after "
9604 return PCI_ERS_RESULT_DISCONNECT;
9606 phba->intr_mode = intr_mode;
9608 /* Log the current active interrupt mode */
9609 lpfc_log_intr_mode(phba, phba->intr_mode);
9611 return PCI_ERS_RESULT_RECOVERED;
9615 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
9616 * @pdev: pointer to PCI device
9618 * This routine is called from the PCI subsystem for error handling to device
9619 * with SLI-4 interface spec. It is called when kernel error recovery tells
9620 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
9621 * error recovery. After this call, traffic can start to flow from this device
9625 lpfc_io_resume_s4(struct pci_dev *pdev)
9627 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9628 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9631 * In case of slot reset, as function reset is performed through
9632 * mailbox command which needs DMA to be enabled, this operation
9633 * has to be moved to the io resume phase. Taking device offline
9634 * will perform the necessary cleanup.
9636 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
9637 /* Perform device reset */
9638 lpfc_offline_prep(phba);
9640 lpfc_sli_brdrestart(phba);
9641 /* Bring the device back online */
9645 /* Clean up Advanced Error Reporting (AER) if needed */
9646 if (phba->hba_flag & HBA_AER_ENABLED)
9647 pci_cleanup_aer_uncorrect_error_status(pdev);
9651 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
9652 * @pdev: pointer to PCI device
9653 * @pid: pointer to PCI device identifier
9655 * This routine is to be registered to the kernel's PCI subsystem. When an
9656 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
9657 * at PCI device-specific information of the device and driver to see if the
9658 * driver state that it can support this kind of device. If the match is
9659 * successful, the driver core invokes this routine. This routine dispatches
9660 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
9661 * do all the initialization that it needs to do to handle the HBA device
9665 * 0 - driver can claim the device
9666 * negative value - driver can not claim the device
9668 static int __devinit
9669 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
9672 struct lpfc_sli_intf intf;
9674 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
9677 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
9678 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
9679 rc = lpfc_pci_probe_one_s4(pdev, pid);
9681 rc = lpfc_pci_probe_one_s3(pdev, pid);
9687 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
9688 * @pdev: pointer to PCI device
9690 * This routine is to be registered to the kernel's PCI subsystem. When an
9691 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
9692 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
9693 * remove routine, which will perform all the necessary cleanup for the
9694 * device to be removed from the PCI subsystem properly.
9696 static void __devexit
9697 lpfc_pci_remove_one(struct pci_dev *pdev)
9699 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9700 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9702 switch (phba->pci_dev_grp) {
9703 case LPFC_PCI_DEV_LP:
9704 lpfc_pci_remove_one_s3(pdev);
9706 case LPFC_PCI_DEV_OC:
9707 lpfc_pci_remove_one_s4(pdev);
9710 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9711 "1424 Invalid PCI device group: 0x%x\n",
9719 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
9720 * @pdev: pointer to PCI device
9721 * @msg: power management message
9723 * This routine is to be registered to the kernel's PCI subsystem to support
9724 * system Power Management (PM). When PM invokes this method, it dispatches
9725 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
9726 * suspend the device.
9729 * 0 - driver suspended the device
9733 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
9735 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9736 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9739 switch (phba->pci_dev_grp) {
9740 case LPFC_PCI_DEV_LP:
9741 rc = lpfc_pci_suspend_one_s3(pdev, msg);
9743 case LPFC_PCI_DEV_OC:
9744 rc = lpfc_pci_suspend_one_s4(pdev, msg);
9747 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9748 "1425 Invalid PCI device group: 0x%x\n",
9756 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
9757 * @pdev: pointer to PCI device
9759 * This routine is to be registered to the kernel's PCI subsystem to support
9760 * system Power Management (PM). When PM invokes this method, it dispatches
9761 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
9762 * resume the device.
9765 * 0 - driver suspended the device
9769 lpfc_pci_resume_one(struct pci_dev *pdev)
9771 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9772 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9775 switch (phba->pci_dev_grp) {
9776 case LPFC_PCI_DEV_LP:
9777 rc = lpfc_pci_resume_one_s3(pdev);
9779 case LPFC_PCI_DEV_OC:
9780 rc = lpfc_pci_resume_one_s4(pdev);
9783 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9784 "1426 Invalid PCI device group: 0x%x\n",
9792 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
9793 * @pdev: pointer to PCI device.
9794 * @state: the current PCI connection state.
9796 * This routine is registered to the PCI subsystem for error handling. This
9797 * function is called by the PCI subsystem after a PCI bus error affecting
9798 * this device has been detected. When this routine is invoked, it dispatches
9799 * the action to the proper SLI-3 or SLI-4 device error detected handling
9800 * routine, which will perform the proper error detected operation.
9803 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
9804 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9806 static pci_ers_result_t
9807 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
9809 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9810 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9811 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
9813 switch (phba->pci_dev_grp) {
9814 case LPFC_PCI_DEV_LP:
9815 rc = lpfc_io_error_detected_s3(pdev, state);
9817 case LPFC_PCI_DEV_OC:
9818 rc = lpfc_io_error_detected_s4(pdev, state);
9821 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9822 "1427 Invalid PCI device group: 0x%x\n",
9830 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
9831 * @pdev: pointer to PCI device.
9833 * This routine is registered to the PCI subsystem for error handling. This
9834 * function is called after PCI bus has been reset to restart the PCI card
9835 * from scratch, as if from a cold-boot. When this routine is invoked, it
9836 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
9837 * routine, which will perform the proper device reset.
9840 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
9841 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9843 static pci_ers_result_t
9844 lpfc_io_slot_reset(struct pci_dev *pdev)
9846 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9847 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9848 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
9850 switch (phba->pci_dev_grp) {
9851 case LPFC_PCI_DEV_LP:
9852 rc = lpfc_io_slot_reset_s3(pdev);
9854 case LPFC_PCI_DEV_OC:
9855 rc = lpfc_io_slot_reset_s4(pdev);
9858 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9859 "1428 Invalid PCI device group: 0x%x\n",
9867 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
9868 * @pdev: pointer to PCI device
9870 * This routine is registered to the PCI subsystem for error handling. It
9871 * is called when kernel error recovery tells the lpfc driver that it is
9872 * OK to resume normal PCI operation after PCI bus error recovery. When
9873 * this routine is invoked, it dispatches the action to the proper SLI-3
9874 * or SLI-4 device io_resume routine, which will resume the device operation.
9877 lpfc_io_resume(struct pci_dev *pdev)
9879 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9880 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9882 switch (phba->pci_dev_grp) {
9883 case LPFC_PCI_DEV_LP:
9884 lpfc_io_resume_s3(pdev);
9886 case LPFC_PCI_DEV_OC:
9887 lpfc_io_resume_s4(pdev);
9890 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9891 "1429 Invalid PCI device group: 0x%x\n",
9898 static struct pci_device_id lpfc_id_table[] = {
9899 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
9900 PCI_ANY_ID, PCI_ANY_ID, },
9901 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
9902 PCI_ANY_ID, PCI_ANY_ID, },
9903 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
9904 PCI_ANY_ID, PCI_ANY_ID, },
9905 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
9906 PCI_ANY_ID, PCI_ANY_ID, },
9907 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
9908 PCI_ANY_ID, PCI_ANY_ID, },
9909 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
9910 PCI_ANY_ID, PCI_ANY_ID, },
9911 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
9912 PCI_ANY_ID, PCI_ANY_ID, },
9913 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
9914 PCI_ANY_ID, PCI_ANY_ID, },
9915 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
9916 PCI_ANY_ID, PCI_ANY_ID, },
9917 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
9918 PCI_ANY_ID, PCI_ANY_ID, },
9919 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
9920 PCI_ANY_ID, PCI_ANY_ID, },
9921 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
9922 PCI_ANY_ID, PCI_ANY_ID, },
9923 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
9924 PCI_ANY_ID, PCI_ANY_ID, },
9925 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
9926 PCI_ANY_ID, PCI_ANY_ID, },
9927 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
9928 PCI_ANY_ID, PCI_ANY_ID, },
9929 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
9930 PCI_ANY_ID, PCI_ANY_ID, },
9931 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
9932 PCI_ANY_ID, PCI_ANY_ID, },
9933 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
9934 PCI_ANY_ID, PCI_ANY_ID, },
9935 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
9936 PCI_ANY_ID, PCI_ANY_ID, },
9937 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
9938 PCI_ANY_ID, PCI_ANY_ID, },
9939 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
9940 PCI_ANY_ID, PCI_ANY_ID, },
9941 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
9942 PCI_ANY_ID, PCI_ANY_ID, },
9943 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
9944 PCI_ANY_ID, PCI_ANY_ID, },
9945 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
9946 PCI_ANY_ID, PCI_ANY_ID, },
9947 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
9948 PCI_ANY_ID, PCI_ANY_ID, },
9949 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
9950 PCI_ANY_ID, PCI_ANY_ID, },
9951 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
9952 PCI_ANY_ID, PCI_ANY_ID, },
9953 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
9954 PCI_ANY_ID, PCI_ANY_ID, },
9955 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
9956 PCI_ANY_ID, PCI_ANY_ID, },
9957 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
9958 PCI_ANY_ID, PCI_ANY_ID, },
9959 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
9960 PCI_ANY_ID, PCI_ANY_ID, },
9961 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
9962 PCI_ANY_ID, PCI_ANY_ID, },
9963 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
9964 PCI_ANY_ID, PCI_ANY_ID, },
9965 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
9966 PCI_ANY_ID, PCI_ANY_ID, },
9967 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
9968 PCI_ANY_ID, PCI_ANY_ID, },
9969 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
9970 PCI_ANY_ID, PCI_ANY_ID, },
9971 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
9972 PCI_ANY_ID, PCI_ANY_ID, },
9973 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
9974 PCI_ANY_ID, PCI_ANY_ID, },
9975 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
9976 PCI_ANY_ID, PCI_ANY_ID, },
9977 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
9978 PCI_ANY_ID, PCI_ANY_ID, },
9979 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS,
9980 PCI_ANY_ID, PCI_ANY_ID, },
9981 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC,
9982 PCI_ANY_ID, PCI_ANY_ID, },
9983 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
9984 PCI_ANY_ID, PCI_ANY_ID, },
9985 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF,
9986 PCI_ANY_ID, PCI_ANY_ID, },
9987 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
9988 PCI_ANY_ID, PCI_ANY_ID, },
9992 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
9994 static struct pci_error_handlers lpfc_err_handler = {
9995 .error_detected = lpfc_io_error_detected,
9996 .slot_reset = lpfc_io_slot_reset,
9997 .resume = lpfc_io_resume,
10000 static struct pci_driver lpfc_driver = {
10001 .name = LPFC_DRIVER_NAME,
10002 .id_table = lpfc_id_table,
10003 .probe = lpfc_pci_probe_one,
10004 .remove = __devexit_p(lpfc_pci_remove_one),
10005 .shutdown = lpfc_pci_remove_one,
10006 .suspend = lpfc_pci_suspend_one,
10007 .resume = lpfc_pci_resume_one,
10008 .err_handler = &lpfc_err_handler,
10012 * lpfc_init - lpfc module initialization routine
10014 * This routine is to be invoked when the lpfc module is loaded into the
10015 * kernel. The special kernel macro module_init() is used to indicate the
10016 * role of this routine to the kernel as lpfc module entry point.
10020 * -ENOMEM - FC attach transport failed
10021 * all others - failed
10028 printk(LPFC_MODULE_DESC "\n");
10029 printk(LPFC_COPYRIGHT "\n");
10031 if (lpfc_enable_npiv) {
10032 lpfc_transport_functions.vport_create = lpfc_vport_create;
10033 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
10035 lpfc_transport_template =
10036 fc_attach_transport(&lpfc_transport_functions);
10037 if (lpfc_transport_template == NULL)
10039 if (lpfc_enable_npiv) {
10040 lpfc_vport_transport_template =
10041 fc_attach_transport(&lpfc_vport_transport_functions);
10042 if (lpfc_vport_transport_template == NULL) {
10043 fc_release_transport(lpfc_transport_template);
10047 error = pci_register_driver(&lpfc_driver);
10049 fc_release_transport(lpfc_transport_template);
10050 if (lpfc_enable_npiv)
10051 fc_release_transport(lpfc_vport_transport_template);
10058 * lpfc_exit - lpfc module removal routine
10060 * This routine is invoked when the lpfc module is removed from the kernel.
10061 * The special kernel macro module_exit() is used to indicate the role of
10062 * this routine to the kernel as lpfc module exit point.
10067 pci_unregister_driver(&lpfc_driver);
10068 fc_release_transport(lpfc_transport_template);
10069 if (lpfc_enable_npiv)
10070 fc_release_transport(lpfc_vport_transport_template);
10071 if (_dump_buf_data) {
10072 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
10073 "_dump_buf_data at 0x%p\n",
10074 (1L << _dump_buf_data_order), _dump_buf_data);
10075 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
10078 if (_dump_buf_dif) {
10079 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
10080 "_dump_buf_dif at 0x%p\n",
10081 (1L << _dump_buf_dif_order), _dump_buf_dif);
10082 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
10086 module_init(lpfc_init);
10087 module_exit(lpfc_exit);
10088 MODULE_LICENSE("GPL");
10089 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
10090 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
10091 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);