1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/idr.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
28 #include <linux/kthread.h>
29 #include <linux/pci.h>
30 #include <linux/spinlock.h>
31 #include <linux/ctype.h>
32 #include <linux/aer.h>
33 #include <linux/slab.h>
34 #include <linux/firmware.h>
35 #include <linux/miscdevice.h>
37 #include <scsi/scsi.h>
38 #include <scsi/scsi_device.h>
39 #include <scsi/scsi_host.h>
40 #include <scsi/scsi_transport_fc.h>
45 #include "lpfc_sli4.h"
47 #include "lpfc_disc.h"
48 #include "lpfc_scsi.h"
50 #include "lpfc_logmsg.h"
51 #include "lpfc_crtn.h"
52 #include "lpfc_vport.h"
53 #include "lpfc_version.h"
56 unsigned long _dump_buf_data_order;
58 unsigned long _dump_buf_dif_order;
59 spinlock_t _dump_buf_lock;
61 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
62 static int lpfc_post_rcv_buf(struct lpfc_hba *);
63 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
64 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
65 static int lpfc_setup_endian_order(struct lpfc_hba *);
66 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
67 static void lpfc_free_els_sgl_list(struct lpfc_hba *);
68 static void lpfc_init_sgl_list(struct lpfc_hba *);
69 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
70 static void lpfc_free_active_sgl(struct lpfc_hba *);
71 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
72 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
73 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
74 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
75 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
76 static void lpfc_sli4_disable_intr(struct lpfc_hba *);
77 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
79 static struct scsi_transport_template *lpfc_transport_template = NULL;
80 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
81 static DEFINE_IDR(lpfc_hba_index);
84 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
85 * @phba: pointer to lpfc hba data structure.
87 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
88 * mailbox command. It retrieves the revision information from the HBA and
89 * collects the Vital Product Data (VPD) about the HBA for preparing the
90 * configuration of the HBA.
94 * -ERESTART - requests the SLI layer to reset the HBA and try again.
95 * Any other value - indicates an error.
98 lpfc_config_port_prep(struct lpfc_hba *phba)
100 lpfc_vpd_t *vp = &phba->vpd;
104 char *lpfc_vpd_data = NULL;
106 static char licensed[56] =
107 "key unlock for use with gnu public licensed code only\0";
108 static int init_key = 1;
110 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
112 phba->link_state = LPFC_HBA_ERROR;
117 phba->link_state = LPFC_INIT_MBX_CMDS;
119 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
121 uint32_t *ptext = (uint32_t *) licensed;
123 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
124 *ptext = cpu_to_be32(*ptext);
128 lpfc_read_nv(phba, pmb);
129 memset((char*)mb->un.varRDnvp.rsvd3, 0,
130 sizeof (mb->un.varRDnvp.rsvd3));
131 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
134 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
136 if (rc != MBX_SUCCESS) {
137 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
138 "0324 Config Port initialization "
139 "error, mbxCmd x%x READ_NVPARM, "
141 mb->mbxCommand, mb->mbxStatus);
142 mempool_free(pmb, phba->mbox_mem_pool);
145 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
147 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
151 phba->sli3_options = 0x0;
153 /* Setup and issue mailbox READ REV command */
154 lpfc_read_rev(phba, pmb);
155 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
156 if (rc != MBX_SUCCESS) {
157 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
158 "0439 Adapter failed to init, mbxCmd x%x "
159 "READ_REV, mbxStatus x%x\n",
160 mb->mbxCommand, mb->mbxStatus);
161 mempool_free( pmb, phba->mbox_mem_pool);
167 * The value of rr must be 1 since the driver set the cv field to 1.
168 * This setting requires the FW to set all revision fields.
170 if (mb->un.varRdRev.rr == 0) {
172 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
173 "0440 Adapter failed to init, READ_REV has "
174 "missing revision information.\n");
175 mempool_free(pmb, phba->mbox_mem_pool);
179 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
180 mempool_free(pmb, phba->mbox_mem_pool);
184 /* Save information as VPD data */
186 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
187 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
188 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
189 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
190 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
191 vp->rev.biuRev = mb->un.varRdRev.biuRev;
192 vp->rev.smRev = mb->un.varRdRev.smRev;
193 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
194 vp->rev.endecRev = mb->un.varRdRev.endecRev;
195 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
196 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
197 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
198 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
199 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
200 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
202 /* If the sli feature level is less then 9, we must
203 * tear down all RPIs and VPIs on link down if NPIV
206 if (vp->rev.feaLevelHigh < 9)
207 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
209 if (lpfc_is_LC_HBA(phba->pcidev->device))
210 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
211 sizeof (phba->RandomData));
213 /* Get adapter VPD information */
214 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
218 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
219 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
221 if (rc != MBX_SUCCESS) {
222 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
223 "0441 VPD not present on adapter, "
224 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
225 mb->mbxCommand, mb->mbxStatus);
226 mb->un.varDmp.word_cnt = 0;
228 /* dump mem may return a zero when finished or we got a
229 * mailbox error, either way we are done.
231 if (mb->un.varDmp.word_cnt == 0)
233 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
234 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
235 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
236 lpfc_vpd_data + offset,
237 mb->un.varDmp.word_cnt);
238 offset += mb->un.varDmp.word_cnt;
239 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
240 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
242 kfree(lpfc_vpd_data);
244 mempool_free(pmb, phba->mbox_mem_pool);
249 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
250 * @phba: pointer to lpfc hba data structure.
251 * @pmboxq: pointer to the driver internal queue element for mailbox command.
253 * This is the completion handler for driver's configuring asynchronous event
254 * mailbox command to the device. If the mailbox command returns successfully,
255 * it will set internal async event support flag to 1; otherwise, it will
256 * set internal async event support flag to 0.
259 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
261 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
262 phba->temp_sensor_support = 1;
264 phba->temp_sensor_support = 0;
265 mempool_free(pmboxq, phba->mbox_mem_pool);
270 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
271 * @phba: pointer to lpfc hba data structure.
272 * @pmboxq: pointer to the driver internal queue element for mailbox command.
274 * This is the completion handler for dump mailbox command for getting
275 * wake up parameters. When this command complete, the response contain
276 * Option rom version of the HBA. This function translate the version number
277 * into a human readable string and store it in OptionROMVersion.
280 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
283 uint32_t prog_id_word;
285 /* character array used for decoding dist type. */
286 char dist_char[] = "nabx";
288 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
289 mempool_free(pmboxq, phba->mbox_mem_pool);
293 prg = (struct prog_id *) &prog_id_word;
295 /* word 7 contain option rom version */
296 prog_id_word = pmboxq->u.mb.un.varWords[7];
298 /* Decode the Option rom version word to a readable string */
300 dist = dist_char[prg->dist];
302 if ((prg->dist == 3) && (prg->num == 0))
303 sprintf(phba->OptionROMVersion, "%d.%d%d",
304 prg->ver, prg->rev, prg->lev);
306 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
307 prg->ver, prg->rev, prg->lev,
309 mempool_free(pmboxq, phba->mbox_mem_pool);
314 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
315 * cfg_soft_wwnn, cfg_soft_wwpn
316 * @vport: pointer to lpfc vport data structure.
323 lpfc_update_vport_wwn(struct lpfc_vport *vport)
325 /* If the soft name exists then update it using the service params */
326 if (vport->phba->cfg_soft_wwnn)
327 u64_to_wwn(vport->phba->cfg_soft_wwnn,
328 vport->fc_sparam.nodeName.u.wwn);
329 if (vport->phba->cfg_soft_wwpn)
330 u64_to_wwn(vport->phba->cfg_soft_wwpn,
331 vport->fc_sparam.portName.u.wwn);
334 * If the name is empty or there exists a soft name
335 * then copy the service params name, otherwise use the fc name
337 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
338 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
339 sizeof(struct lpfc_name));
341 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
342 sizeof(struct lpfc_name));
344 if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn)
345 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
346 sizeof(struct lpfc_name));
348 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
349 sizeof(struct lpfc_name));
353 * lpfc_config_port_post - Perform lpfc initialization after config port
354 * @phba: pointer to lpfc hba data structure.
356 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
357 * command call. It performs all internal resource and state setups on the
358 * port: post IOCB buffers, enable appropriate host interrupt attentions,
359 * ELS ring timers, etc.
363 * Any other value - error.
366 lpfc_config_port_post(struct lpfc_hba *phba)
368 struct lpfc_vport *vport = phba->pport;
369 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
372 struct lpfc_dmabuf *mp;
373 struct lpfc_sli *psli = &phba->sli;
374 uint32_t status, timeout;
378 spin_lock_irq(&phba->hbalock);
380 * If the Config port completed correctly the HBA is not
381 * over heated any more.
383 if (phba->over_temp_state == HBA_OVER_TEMP)
384 phba->over_temp_state = HBA_NORMAL_TEMP;
385 spin_unlock_irq(&phba->hbalock);
387 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
389 phba->link_state = LPFC_HBA_ERROR;
394 /* Get login parameters for NID. */
395 rc = lpfc_read_sparam(phba, pmb, 0);
397 mempool_free(pmb, phba->mbox_mem_pool);
402 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
403 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
404 "0448 Adapter failed init, mbxCmd x%x "
405 "READ_SPARM mbxStatus x%x\n",
406 mb->mbxCommand, mb->mbxStatus);
407 phba->link_state = LPFC_HBA_ERROR;
408 mp = (struct lpfc_dmabuf *) pmb->context1;
409 mempool_free(pmb, phba->mbox_mem_pool);
410 lpfc_mbuf_free(phba, mp->virt, mp->phys);
415 mp = (struct lpfc_dmabuf *) pmb->context1;
417 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
418 lpfc_mbuf_free(phba, mp->virt, mp->phys);
420 pmb->context1 = NULL;
421 lpfc_update_vport_wwn(vport);
423 /* Update the fc_host data structures with new wwn. */
424 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
425 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
426 fc_host_max_npiv_vports(shost) = phba->max_vpi;
428 /* If no serial number in VPD data, use low 6 bytes of WWNN */
429 /* This should be consolidated into parse_vpd ? - mr */
430 if (phba->SerialNumber[0] == 0) {
433 outptr = &vport->fc_nodename.u.s.IEEE[0];
434 for (i = 0; i < 12; i++) {
436 j = ((status & 0xf0) >> 4);
438 phba->SerialNumber[i] =
439 (char)((uint8_t) 0x30 + (uint8_t) j);
441 phba->SerialNumber[i] =
442 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
446 phba->SerialNumber[i] =
447 (char)((uint8_t) 0x30 + (uint8_t) j);
449 phba->SerialNumber[i] =
450 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
454 lpfc_read_config(phba, pmb);
456 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
457 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
458 "0453 Adapter failed to init, mbxCmd x%x "
459 "READ_CONFIG, mbxStatus x%x\n",
460 mb->mbxCommand, mb->mbxStatus);
461 phba->link_state = LPFC_HBA_ERROR;
462 mempool_free( pmb, phba->mbox_mem_pool);
466 /* Check if the port is disabled */
467 lpfc_sli_read_link_ste(phba);
469 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
470 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
471 phba->cfg_hba_queue_depth =
472 (mb->un.varRdConfig.max_xri + 1) -
473 lpfc_sli4_get_els_iocb_cnt(phba);
475 phba->lmt = mb->un.varRdConfig.lmt;
477 /* Get the default values for Model Name and Description */
478 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
480 phba->link_state = LPFC_LINK_DOWN;
482 /* Only process IOCBs on ELS ring till hba_state is READY */
483 if (psli->ring[psli->extra_ring].sli.sli3.cmdringaddr)
484 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
485 if (psli->ring[psli->fcp_ring].sli.sli3.cmdringaddr)
486 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
487 if (psli->ring[psli->next_ring].sli.sli3.cmdringaddr)
488 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
490 /* Post receive buffers for desired rings */
491 if (phba->sli_rev != 3)
492 lpfc_post_rcv_buf(phba);
495 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
497 if (phba->intr_type == MSIX) {
498 rc = lpfc_config_msi(phba, pmb);
500 mempool_free(pmb, phba->mbox_mem_pool);
503 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
504 if (rc != MBX_SUCCESS) {
505 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
506 "0352 Config MSI mailbox command "
507 "failed, mbxCmd x%x, mbxStatus x%x\n",
508 pmb->u.mb.mbxCommand,
509 pmb->u.mb.mbxStatus);
510 mempool_free(pmb, phba->mbox_mem_pool);
515 spin_lock_irq(&phba->hbalock);
516 /* Initialize ERATT handling flag */
517 phba->hba_flag &= ~HBA_ERATT_HANDLED;
519 /* Enable appropriate host interrupts */
520 if (lpfc_readl(phba->HCregaddr, &status)) {
521 spin_unlock_irq(&phba->hbalock);
524 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
525 if (psli->num_rings > 0)
526 status |= HC_R0INT_ENA;
527 if (psli->num_rings > 1)
528 status |= HC_R1INT_ENA;
529 if (psli->num_rings > 2)
530 status |= HC_R2INT_ENA;
531 if (psli->num_rings > 3)
532 status |= HC_R3INT_ENA;
534 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
535 (phba->cfg_poll & DISABLE_FCP_RING_INT))
536 status &= ~(HC_R0INT_ENA);
538 writel(status, phba->HCregaddr);
539 readl(phba->HCregaddr); /* flush */
540 spin_unlock_irq(&phba->hbalock);
542 /* Set up ring-0 (ELS) timer */
543 timeout = phba->fc_ratov * 2;
544 mod_timer(&vport->els_tmofunc,
545 jiffies + msecs_to_jiffies(1000 * timeout));
546 /* Set up heart beat (HB) timer */
547 mod_timer(&phba->hb_tmofunc,
548 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
549 phba->hb_outstanding = 0;
550 phba->last_completion_time = jiffies;
551 /* Set up error attention (ERATT) polling timer */
552 mod_timer(&phba->eratt_poll,
553 jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
555 if (phba->hba_flag & LINK_DISABLED) {
556 lpfc_printf_log(phba,
558 "2598 Adapter Link is disabled.\n");
559 lpfc_down_link(phba, pmb);
560 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
561 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
562 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
563 lpfc_printf_log(phba,
565 "2599 Adapter failed to issue DOWN_LINK"
566 " mbox command rc 0x%x\n", rc);
568 mempool_free(pmb, phba->mbox_mem_pool);
571 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
572 mempool_free(pmb, phba->mbox_mem_pool);
573 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
577 /* MBOX buffer will be freed in mbox compl */
578 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
580 phba->link_state = LPFC_HBA_ERROR;
584 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
585 pmb->mbox_cmpl = lpfc_config_async_cmpl;
586 pmb->vport = phba->pport;
587 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
589 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
590 lpfc_printf_log(phba,
593 "0456 Adapter failed to issue "
594 "ASYNCEVT_ENABLE mbox status x%x\n",
596 mempool_free(pmb, phba->mbox_mem_pool);
599 /* Get Option rom version */
600 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
602 phba->link_state = LPFC_HBA_ERROR;
606 lpfc_dump_wakeup_param(phba, pmb);
607 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
608 pmb->vport = phba->pport;
609 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
611 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
612 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
613 "to get Option ROM version status x%x\n", rc);
614 mempool_free(pmb, phba->mbox_mem_pool);
621 * lpfc_hba_init_link - Initialize the FC link
622 * @phba: pointer to lpfc hba data structure.
623 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
625 * This routine will issue the INIT_LINK mailbox command call.
626 * It is available to other drivers through the lpfc_hba data
627 * structure for use as a delayed link up mechanism with the
628 * module parameter lpfc_suppress_link_up.
632 * Any other value - error
635 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
637 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
641 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
642 * @phba: pointer to lpfc hba data structure.
643 * @fc_topology: desired fc topology.
644 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
646 * This routine will issue the INIT_LINK mailbox command call.
647 * It is available to other drivers through the lpfc_hba data
648 * structure for use as a delayed link up mechanism with the
649 * module parameter lpfc_suppress_link_up.
653 * Any other value - error
656 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
659 struct lpfc_vport *vport = phba->pport;
664 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
666 phba->link_state = LPFC_HBA_ERROR;
672 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
673 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
674 !(phba->lmt & LMT_1Gb)) ||
675 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
676 !(phba->lmt & LMT_2Gb)) ||
677 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
678 !(phba->lmt & LMT_4Gb)) ||
679 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
680 !(phba->lmt & LMT_8Gb)) ||
681 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
682 !(phba->lmt & LMT_10Gb)) ||
683 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
684 !(phba->lmt & LMT_16Gb))) {
685 /* Reset link speed to auto */
686 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
687 "1302 Invalid speed for this board:%d "
688 "Reset link speed to auto.\n",
689 phba->cfg_link_speed);
690 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
692 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
693 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
694 if (phba->sli_rev < LPFC_SLI_REV4)
695 lpfc_set_loopback_flag(phba);
696 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
697 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
698 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
699 "0498 Adapter failed to init, mbxCmd x%x "
700 "INIT_LINK, mbxStatus x%x\n",
701 mb->mbxCommand, mb->mbxStatus);
702 if (phba->sli_rev <= LPFC_SLI_REV3) {
703 /* Clear all interrupt enable conditions */
704 writel(0, phba->HCregaddr);
705 readl(phba->HCregaddr); /* flush */
706 /* Clear all pending interrupts */
707 writel(0xffffffff, phba->HAregaddr);
708 readl(phba->HAregaddr); /* flush */
710 phba->link_state = LPFC_HBA_ERROR;
711 if (rc != MBX_BUSY || flag == MBX_POLL)
712 mempool_free(pmb, phba->mbox_mem_pool);
715 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
716 if (flag == MBX_POLL)
717 mempool_free(pmb, phba->mbox_mem_pool);
723 * lpfc_hba_down_link - this routine downs the FC link
724 * @phba: pointer to lpfc hba data structure.
725 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
727 * This routine will issue the DOWN_LINK mailbox command call.
728 * It is available to other drivers through the lpfc_hba data
729 * structure for use to stop the link.
733 * Any other value - error
736 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
741 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
743 phba->link_state = LPFC_HBA_ERROR;
747 lpfc_printf_log(phba,
749 "0491 Adapter Link is disabled.\n");
750 lpfc_down_link(phba, pmb);
751 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
752 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
753 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
754 lpfc_printf_log(phba,
756 "2522 Adapter failed to issue DOWN_LINK"
757 " mbox command rc 0x%x\n", rc);
759 mempool_free(pmb, phba->mbox_mem_pool);
762 if (flag == MBX_POLL)
763 mempool_free(pmb, phba->mbox_mem_pool);
769 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
770 * @phba: pointer to lpfc HBA data structure.
772 * This routine will do LPFC uninitialization before the HBA is reset when
773 * bringing down the SLI Layer.
777 * Any other value - error.
780 lpfc_hba_down_prep(struct lpfc_hba *phba)
782 struct lpfc_vport **vports;
785 if (phba->sli_rev <= LPFC_SLI_REV3) {
786 /* Disable interrupts */
787 writel(0, phba->HCregaddr);
788 readl(phba->HCregaddr); /* flush */
791 if (phba->pport->load_flag & FC_UNLOADING)
792 lpfc_cleanup_discovery_resources(phba->pport);
794 vports = lpfc_create_vport_work_array(phba);
796 for (i = 0; i <= phba->max_vports &&
797 vports[i] != NULL; i++)
798 lpfc_cleanup_discovery_resources(vports[i]);
799 lpfc_destroy_vport_work_array(phba, vports);
805 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
806 * @phba: pointer to lpfc HBA data structure.
808 * This routine will do uninitialization after the HBA is reset when bring
809 * down the SLI Layer.
813 * Any other value - error.
816 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
818 struct lpfc_sli *psli = &phba->sli;
819 struct lpfc_sli_ring *pring;
820 struct lpfc_dmabuf *mp, *next_mp;
821 LIST_HEAD(completions);
824 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
825 lpfc_sli_hbqbuf_free_all(phba);
827 /* Cleanup preposted buffers on the ELS ring */
828 pring = &psli->ring[LPFC_ELS_RING];
829 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
831 pring->postbufq_cnt--;
832 lpfc_mbuf_free(phba, mp->virt, mp->phys);
837 spin_lock_irq(&phba->hbalock);
838 for (i = 0; i < psli->num_rings; i++) {
839 pring = &psli->ring[i];
841 /* At this point in time the HBA is either reset or DOA. Either
842 * way, nothing should be on txcmplq as it will NEVER complete.
844 list_splice_init(&pring->txcmplq, &completions);
845 spin_unlock_irq(&phba->hbalock);
847 /* Cancel all the IOCBs from the completions list */
848 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
851 lpfc_sli_abort_iocb_ring(phba, pring);
852 spin_lock_irq(&phba->hbalock);
854 spin_unlock_irq(&phba->hbalock);
860 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
861 * @phba: pointer to lpfc HBA data structure.
863 * This routine will do uninitialization after the HBA is reset when bring
864 * down the SLI Layer.
868 * Any other value - error.
871 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
873 struct lpfc_scsi_buf *psb, *psb_next;
876 unsigned long iflag = 0;
877 struct lpfc_sglq *sglq_entry = NULL;
879 ret = lpfc_hba_down_post_s3(phba);
882 /* At this point in time the HBA is either reset or DOA. Either
883 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
884 * on the lpfc_sgl_list so that it can either be freed if the
885 * driver is unloading or reposted if the driver is restarting
888 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */
890 /* abts_sgl_list_lock required because worker thread uses this
893 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
894 list_for_each_entry(sglq_entry,
895 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
896 sglq_entry->state = SGL_FREED;
898 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
899 &phba->sli4_hba.lpfc_sgl_list);
900 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
901 /* abts_scsi_buf_list_lock required because worker thread uses this
904 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
905 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
907 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
908 spin_unlock_irq(&phba->hbalock);
910 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
912 psb->status = IOSTAT_SUCCESS;
914 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
915 list_splice(&aborts, &phba->lpfc_scsi_buf_list);
916 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
921 * lpfc_hba_down_post - Wrapper func for hba down post routine
922 * @phba: pointer to lpfc HBA data structure.
924 * This routine wraps the actual SLI3 or SLI4 routine for performing
925 * uninitialization after the HBA is reset when bring down the SLI Layer.
929 * Any other value - error.
932 lpfc_hba_down_post(struct lpfc_hba *phba)
934 return (*phba->lpfc_hba_down_post)(phba);
938 * lpfc_hb_timeout - The HBA-timer timeout handler
939 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
941 * This is the HBA-timer timeout handler registered to the lpfc driver. When
942 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
943 * work-port-events bitmap and the worker thread is notified. This timeout
944 * event will be used by the worker thread to invoke the actual timeout
945 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
946 * be performed in the timeout handler and the HBA timeout event bit shall
947 * be cleared by the worker thread after it has taken the event bitmap out.
950 lpfc_hb_timeout(unsigned long ptr)
952 struct lpfc_hba *phba;
956 phba = (struct lpfc_hba *)ptr;
958 /* Check for heart beat timeout conditions */
959 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
960 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
962 phba->pport->work_port_events |= WORKER_HB_TMO;
963 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
965 /* Tell the worker thread there is work to do */
967 lpfc_worker_wake_up(phba);
972 * lpfc_rrq_timeout - The RRQ-timer timeout handler
973 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
975 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
976 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
977 * work-port-events bitmap and the worker thread is notified. This timeout
978 * event will be used by the worker thread to invoke the actual timeout
979 * handler routine, lpfc_rrq_handler. Any periodical operations will
980 * be performed in the timeout handler and the RRQ timeout event bit shall
981 * be cleared by the worker thread after it has taken the event bitmap out.
984 lpfc_rrq_timeout(unsigned long ptr)
986 struct lpfc_hba *phba;
989 phba = (struct lpfc_hba *)ptr;
990 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
991 phba->hba_flag |= HBA_RRQ_ACTIVE;
992 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
993 lpfc_worker_wake_up(phba);
997 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
998 * @phba: pointer to lpfc hba data structure.
999 * @pmboxq: pointer to the driver internal queue element for mailbox command.
1001 * This is the callback function to the lpfc heart-beat mailbox command.
1002 * If configured, the lpfc driver issues the heart-beat mailbox command to
1003 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1004 * heart-beat mailbox command is issued, the driver shall set up heart-beat
1005 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1006 * heart-beat outstanding state. Once the mailbox command comes back and
1007 * no error conditions detected, the heart-beat mailbox command timer is
1008 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1009 * state is cleared for the next heart-beat. If the timer expired with the
1010 * heart-beat outstanding state set, the driver will put the HBA offline.
1013 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1015 unsigned long drvr_flag;
1017 spin_lock_irqsave(&phba->hbalock, drvr_flag);
1018 phba->hb_outstanding = 0;
1019 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1021 /* Check and reset heart-beat timer is necessary */
1022 mempool_free(pmboxq, phba->mbox_mem_pool);
1023 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1024 !(phba->link_state == LPFC_HBA_ERROR) &&
1025 !(phba->pport->load_flag & FC_UNLOADING))
1026 mod_timer(&phba->hb_tmofunc,
1028 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1033 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1034 * @phba: pointer to lpfc hba data structure.
1036 * This is the actual HBA-timer timeout handler to be invoked by the worker
1037 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1038 * handler performs any periodic operations needed for the device. If such
1039 * periodic event has already been attended to either in the interrupt handler
1040 * or by processing slow-ring or fast-ring events within the HBA-timer
1041 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1042 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1043 * is configured and there is no heart-beat mailbox command outstanding, a
1044 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1045 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1049 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1051 struct lpfc_vport **vports;
1052 LPFC_MBOXQ_t *pmboxq;
1053 struct lpfc_dmabuf *buf_ptr;
1055 struct lpfc_sli *psli = &phba->sli;
1056 LIST_HEAD(completions);
1058 vports = lpfc_create_vport_work_array(phba);
1060 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
1061 lpfc_rcv_seq_check_edtov(vports[i]);
1062 lpfc_destroy_vport_work_array(phba, vports);
1064 if ((phba->link_state == LPFC_HBA_ERROR) ||
1065 (phba->pport->load_flag & FC_UNLOADING) ||
1066 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1069 spin_lock_irq(&phba->pport->work_port_lock);
1071 if (time_after(phba->last_completion_time +
1072 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1074 spin_unlock_irq(&phba->pport->work_port_lock);
1075 if (!phba->hb_outstanding)
1076 mod_timer(&phba->hb_tmofunc,
1078 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1080 mod_timer(&phba->hb_tmofunc,
1082 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1085 spin_unlock_irq(&phba->pport->work_port_lock);
1087 if (phba->elsbuf_cnt &&
1088 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1089 spin_lock_irq(&phba->hbalock);
1090 list_splice_init(&phba->elsbuf, &completions);
1091 phba->elsbuf_cnt = 0;
1092 phba->elsbuf_prev_cnt = 0;
1093 spin_unlock_irq(&phba->hbalock);
1095 while (!list_empty(&completions)) {
1096 list_remove_head(&completions, buf_ptr,
1097 struct lpfc_dmabuf, list);
1098 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1102 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1104 /* If there is no heart beat outstanding, issue a heartbeat command */
1105 if (phba->cfg_enable_hba_heartbeat) {
1106 if (!phba->hb_outstanding) {
1107 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1108 (list_empty(&psli->mboxq))) {
1109 pmboxq = mempool_alloc(phba->mbox_mem_pool,
1112 mod_timer(&phba->hb_tmofunc,
1114 msecs_to_jiffies(1000 *
1115 LPFC_HB_MBOX_INTERVAL));
1119 lpfc_heart_beat(phba, pmboxq);
1120 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1121 pmboxq->vport = phba->pport;
1122 retval = lpfc_sli_issue_mbox(phba, pmboxq,
1125 if (retval != MBX_BUSY &&
1126 retval != MBX_SUCCESS) {
1127 mempool_free(pmboxq,
1128 phba->mbox_mem_pool);
1129 mod_timer(&phba->hb_tmofunc,
1131 msecs_to_jiffies(1000 *
1132 LPFC_HB_MBOX_INTERVAL));
1135 phba->skipped_hb = 0;
1136 phba->hb_outstanding = 1;
1137 } else if (time_before_eq(phba->last_completion_time,
1138 phba->skipped_hb)) {
1139 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1140 "2857 Last completion time not "
1141 " updated in %d ms\n",
1142 jiffies_to_msecs(jiffies
1143 - phba->last_completion_time));
1145 phba->skipped_hb = jiffies;
1147 mod_timer(&phba->hb_tmofunc,
1149 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1153 * If heart beat timeout called with hb_outstanding set
1154 * we need to give the hb mailbox cmd a chance to
1157 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1158 "0459 Adapter heartbeat still out"
1159 "standing:last compl time was %d ms.\n",
1160 jiffies_to_msecs(jiffies
1161 - phba->last_completion_time));
1162 mod_timer(&phba->hb_tmofunc,
1164 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1170 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1171 * @phba: pointer to lpfc hba data structure.
1173 * This routine is called to bring the HBA offline when HBA hardware error
1174 * other than Port Error 6 has been detected.
1177 lpfc_offline_eratt(struct lpfc_hba *phba)
1179 struct lpfc_sli *psli = &phba->sli;
1181 spin_lock_irq(&phba->hbalock);
1182 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1183 spin_unlock_irq(&phba->hbalock);
1184 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1187 lpfc_reset_barrier(phba);
1188 spin_lock_irq(&phba->hbalock);
1189 lpfc_sli_brdreset(phba);
1190 spin_unlock_irq(&phba->hbalock);
1191 lpfc_hba_down_post(phba);
1192 lpfc_sli_brdready(phba, HS_MBRDY);
1193 lpfc_unblock_mgmt_io(phba);
1194 phba->link_state = LPFC_HBA_ERROR;
1199 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1200 * @phba: pointer to lpfc hba data structure.
1202 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1203 * other than Port Error 6 has been detected.
1206 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1208 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1210 lpfc_sli4_brdreset(phba);
1211 lpfc_hba_down_post(phba);
1212 lpfc_sli4_post_status_check(phba);
1213 lpfc_unblock_mgmt_io(phba);
1214 phba->link_state = LPFC_HBA_ERROR;
1218 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1219 * @phba: pointer to lpfc hba data structure.
1221 * This routine is invoked to handle the deferred HBA hardware error
1222 * conditions. This type of error is indicated by HBA by setting ER1
1223 * and another ER bit in the host status register. The driver will
1224 * wait until the ER1 bit clears before handling the error condition.
1227 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1229 uint32_t old_host_status = phba->work_hs;
1230 struct lpfc_sli_ring *pring;
1231 struct lpfc_sli *psli = &phba->sli;
1233 /* If the pci channel is offline, ignore possible errors,
1234 * since we cannot communicate with the pci card anyway.
1236 if (pci_channel_offline(phba->pcidev)) {
1237 spin_lock_irq(&phba->hbalock);
1238 phba->hba_flag &= ~DEFER_ERATT;
1239 spin_unlock_irq(&phba->hbalock);
1243 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1244 "0479 Deferred Adapter Hardware Error "
1245 "Data: x%x x%x x%x\n",
1247 phba->work_status[0], phba->work_status[1]);
1249 spin_lock_irq(&phba->hbalock);
1250 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1251 spin_unlock_irq(&phba->hbalock);
1255 * Firmware stops when it triggred erratt. That could cause the I/Os
1256 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1257 * SCSI layer retry it after re-establishing link.
1259 pring = &psli->ring[psli->fcp_ring];
1260 lpfc_sli_abort_iocb_ring(phba, pring);
1263 * There was a firmware error. Take the hba offline and then
1264 * attempt to restart it.
1266 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1269 /* Wait for the ER1 bit to clear.*/
1270 while (phba->work_hs & HS_FFER1) {
1272 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1273 phba->work_hs = UNPLUG_ERR ;
1276 /* If driver is unloading let the worker thread continue */
1277 if (phba->pport->load_flag & FC_UNLOADING) {
1284 * This is to ptrotect against a race condition in which
1285 * first write to the host attention register clear the
1286 * host status register.
1288 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1289 phba->work_hs = old_host_status & ~HS_FFER1;
1291 spin_lock_irq(&phba->hbalock);
1292 phba->hba_flag &= ~DEFER_ERATT;
1293 spin_unlock_irq(&phba->hbalock);
1294 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1295 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1299 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1301 struct lpfc_board_event_header board_event;
1302 struct Scsi_Host *shost;
1304 board_event.event_type = FC_REG_BOARD_EVENT;
1305 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1306 shost = lpfc_shost_from_vport(phba->pport);
1307 fc_host_post_vendor_event(shost, fc_get_event_number(),
1308 sizeof(board_event),
1309 (char *) &board_event,
1314 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1315 * @phba: pointer to lpfc hba data structure.
1317 * This routine is invoked to handle the following HBA hardware error
1319 * 1 - HBA error attention interrupt
1320 * 2 - DMA ring index out of range
1321 * 3 - Mailbox command came back as unknown
1324 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1326 struct lpfc_vport *vport = phba->pport;
1327 struct lpfc_sli *psli = &phba->sli;
1328 struct lpfc_sli_ring *pring;
1329 uint32_t event_data;
1330 unsigned long temperature;
1331 struct temp_event temp_event_data;
1332 struct Scsi_Host *shost;
1334 /* If the pci channel is offline, ignore possible errors,
1335 * since we cannot communicate with the pci card anyway.
1337 if (pci_channel_offline(phba->pcidev)) {
1338 spin_lock_irq(&phba->hbalock);
1339 phba->hba_flag &= ~DEFER_ERATT;
1340 spin_unlock_irq(&phba->hbalock);
1344 /* If resets are disabled then leave the HBA alone and return */
1345 if (!phba->cfg_enable_hba_reset)
1348 /* Send an internal error event to mgmt application */
1349 lpfc_board_errevt_to_mgmt(phba);
1351 if (phba->hba_flag & DEFER_ERATT)
1352 lpfc_handle_deferred_eratt(phba);
1354 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1355 if (phba->work_hs & HS_FFER6)
1356 /* Re-establishing Link */
1357 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1358 "1301 Re-establishing Link "
1359 "Data: x%x x%x x%x\n",
1360 phba->work_hs, phba->work_status[0],
1361 phba->work_status[1]);
1362 if (phba->work_hs & HS_FFER8)
1363 /* Device Zeroization */
1364 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1365 "2861 Host Authentication device "
1366 "zeroization Data:x%x x%x x%x\n",
1367 phba->work_hs, phba->work_status[0],
1368 phba->work_status[1]);
1370 spin_lock_irq(&phba->hbalock);
1371 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1372 spin_unlock_irq(&phba->hbalock);
1375 * Firmware stops when it triggled erratt with HS_FFER6.
1376 * That could cause the I/Os dropped by the firmware.
1377 * Error iocb (I/O) on txcmplq and let the SCSI layer
1378 * retry it after re-establishing link.
1380 pring = &psli->ring[psli->fcp_ring];
1381 lpfc_sli_abort_iocb_ring(phba, pring);
1384 * There was a firmware error. Take the hba offline and then
1385 * attempt to restart it.
1387 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1389 lpfc_sli_brdrestart(phba);
1390 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
1391 lpfc_unblock_mgmt_io(phba);
1394 lpfc_unblock_mgmt_io(phba);
1395 } else if (phba->work_hs & HS_CRIT_TEMP) {
1396 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1397 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1398 temp_event_data.event_code = LPFC_CRIT_TEMP;
1399 temp_event_data.data = (uint32_t)temperature;
1401 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1402 "0406 Adapter maximum temperature exceeded "
1403 "(%ld), taking this port offline "
1404 "Data: x%x x%x x%x\n",
1405 temperature, phba->work_hs,
1406 phba->work_status[0], phba->work_status[1]);
1408 shost = lpfc_shost_from_vport(phba->pport);
1409 fc_host_post_vendor_event(shost, fc_get_event_number(),
1410 sizeof(temp_event_data),
1411 (char *) &temp_event_data,
1412 SCSI_NL_VID_TYPE_PCI
1413 | PCI_VENDOR_ID_EMULEX);
1415 spin_lock_irq(&phba->hbalock);
1416 phba->over_temp_state = HBA_OVER_TEMP;
1417 spin_unlock_irq(&phba->hbalock);
1418 lpfc_offline_eratt(phba);
1421 /* The if clause above forces this code path when the status
1422 * failure is a value other than FFER6. Do not call the offline
1423 * twice. This is the adapter hardware error path.
1425 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1426 "0457 Adapter Hardware Error "
1427 "Data: x%x x%x x%x\n",
1429 phba->work_status[0], phba->work_status[1]);
1431 event_data = FC_REG_DUMP_EVENT;
1432 shost = lpfc_shost_from_vport(vport);
1433 fc_host_post_vendor_event(shost, fc_get_event_number(),
1434 sizeof(event_data), (char *) &event_data,
1435 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1437 lpfc_offline_eratt(phba);
1443 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1444 * @phba: pointer to lpfc hba data structure.
1445 * @mbx_action: flag for mailbox shutdown action.
1447 * This routine is invoked to perform an SLI4 port PCI function reset in
1448 * response to port status register polling attention. It waits for port
1449 * status register (ERR, RDY, RN) bits before proceeding with function reset.
1450 * During this process, interrupt vectors are freed and later requested
1451 * for handling possible port resource change.
1454 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action)
1460 * On error status condition, driver need to wait for port
1461 * ready before performing reset.
1463 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1465 /* need reset: attempt for port recovery */
1466 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1467 "2887 Reset Needed: Attempting Port "
1469 lpfc_offline_prep(phba, mbx_action);
1471 /* release interrupt for possible resource change */
1472 lpfc_sli4_disable_intr(phba);
1473 lpfc_sli_brdrestart(phba);
1474 /* request and enable interrupt */
1475 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1476 if (intr_mode == LPFC_INTR_ERROR) {
1477 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1478 "3175 Failed to enable interrupt\n");
1481 phba->intr_mode = intr_mode;
1483 rc = lpfc_online(phba);
1485 lpfc_unblock_mgmt_io(phba);
1491 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1492 * @phba: pointer to lpfc hba data structure.
1494 * This routine is invoked to handle the SLI4 HBA hardware error attention
1498 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1500 struct lpfc_vport *vport = phba->pport;
1501 uint32_t event_data;
1502 struct Scsi_Host *shost;
1504 struct lpfc_register portstat_reg = {0};
1505 uint32_t reg_err1, reg_err2;
1506 uint32_t uerrlo_reg, uemasklo_reg;
1507 uint32_t pci_rd_rc1, pci_rd_rc2;
1510 /* If the pci channel is offline, ignore possible errors, since
1511 * we cannot communicate with the pci card anyway.
1513 if (pci_channel_offline(phba->pcidev))
1515 /* If resets are disabled then leave the HBA alone and return */
1516 if (!phba->cfg_enable_hba_reset)
1519 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1521 case LPFC_SLI_INTF_IF_TYPE_0:
1522 pci_rd_rc1 = lpfc_readl(
1523 phba->sli4_hba.u.if_type0.UERRLOregaddr,
1525 pci_rd_rc2 = lpfc_readl(
1526 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1528 /* consider PCI bus read error as pci_channel_offline */
1529 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1531 lpfc_sli4_offline_eratt(phba);
1533 case LPFC_SLI_INTF_IF_TYPE_2:
1534 pci_rd_rc1 = lpfc_readl(
1535 phba->sli4_hba.u.if_type2.STATUSregaddr,
1536 &portstat_reg.word0);
1537 /* consider PCI bus read error as pci_channel_offline */
1538 if (pci_rd_rc1 == -EIO) {
1539 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1540 "3151 PCI bus read access failure: x%x\n",
1541 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
1544 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
1545 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
1546 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
1547 /* TODO: Register for Overtemp async events. */
1548 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1549 "2889 Port Overtemperature event, "
1550 "taking port offline\n");
1551 spin_lock_irq(&phba->hbalock);
1552 phba->over_temp_state = HBA_OVER_TEMP;
1553 spin_unlock_irq(&phba->hbalock);
1554 lpfc_sli4_offline_eratt(phba);
1557 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1558 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART)
1559 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1560 "3143 Port Down: Firmware Restarted\n");
1561 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1562 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1563 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1564 "3144 Port Down: Debug Dump\n");
1565 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1566 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
1567 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1568 "3145 Port Down: Provisioning\n");
1570 /* Check port status register for function reset */
1571 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT);
1573 /* don't report event on forced debug dump */
1574 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1575 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1580 /* fall through for not able to recover */
1581 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1582 "3152 Unrecoverable error, bring the port "
1584 lpfc_sli4_offline_eratt(phba);
1586 case LPFC_SLI_INTF_IF_TYPE_1:
1590 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1591 "3123 Report dump event to upper layer\n");
1592 /* Send an internal error event to mgmt application */
1593 lpfc_board_errevt_to_mgmt(phba);
1595 event_data = FC_REG_DUMP_EVENT;
1596 shost = lpfc_shost_from_vport(vport);
1597 fc_host_post_vendor_event(shost, fc_get_event_number(),
1598 sizeof(event_data), (char *) &event_data,
1599 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1603 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1604 * @phba: pointer to lpfc HBA data structure.
1606 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1607 * routine from the API jump table function pointer from the lpfc_hba struct.
1611 * Any other value - error.
1614 lpfc_handle_eratt(struct lpfc_hba *phba)
1616 (*phba->lpfc_handle_eratt)(phba);
1620 * lpfc_handle_latt - The HBA link event handler
1621 * @phba: pointer to lpfc hba data structure.
1623 * This routine is invoked from the worker thread to handle a HBA host
1624 * attention link event.
1627 lpfc_handle_latt(struct lpfc_hba *phba)
1629 struct lpfc_vport *vport = phba->pport;
1630 struct lpfc_sli *psli = &phba->sli;
1632 volatile uint32_t control;
1633 struct lpfc_dmabuf *mp;
1636 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1639 goto lpfc_handle_latt_err_exit;
1642 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1645 goto lpfc_handle_latt_free_pmb;
1648 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1651 goto lpfc_handle_latt_free_mp;
1654 /* Cleanup any outstanding ELS commands */
1655 lpfc_els_flush_all_cmd(phba);
1657 psli->slistat.link_event++;
1658 lpfc_read_topology(phba, pmb, mp);
1659 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
1661 /* Block ELS IOCBs until we have processed this mbox command */
1662 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
1663 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
1664 if (rc == MBX_NOT_FINISHED) {
1666 goto lpfc_handle_latt_free_mbuf;
1669 /* Clear Link Attention in HA REG */
1670 spin_lock_irq(&phba->hbalock);
1671 writel(HA_LATT, phba->HAregaddr);
1672 readl(phba->HAregaddr); /* flush */
1673 spin_unlock_irq(&phba->hbalock);
1677 lpfc_handle_latt_free_mbuf:
1678 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1679 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1680 lpfc_handle_latt_free_mp:
1682 lpfc_handle_latt_free_pmb:
1683 mempool_free(pmb, phba->mbox_mem_pool);
1684 lpfc_handle_latt_err_exit:
1685 /* Enable Link attention interrupts */
1686 spin_lock_irq(&phba->hbalock);
1687 psli->sli_flag |= LPFC_PROCESS_LA;
1688 control = readl(phba->HCregaddr);
1689 control |= HC_LAINT_ENA;
1690 writel(control, phba->HCregaddr);
1691 readl(phba->HCregaddr); /* flush */
1693 /* Clear Link Attention in HA REG */
1694 writel(HA_LATT, phba->HAregaddr);
1695 readl(phba->HAregaddr); /* flush */
1696 spin_unlock_irq(&phba->hbalock);
1697 lpfc_linkdown(phba);
1698 phba->link_state = LPFC_HBA_ERROR;
1700 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1701 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
1707 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
1708 * @phba: pointer to lpfc hba data structure.
1709 * @vpd: pointer to the vital product data.
1710 * @len: length of the vital product data in bytes.
1712 * This routine parses the Vital Product Data (VPD). The VPD is treated as
1713 * an array of characters. In this routine, the ModelName, ProgramType, and
1714 * ModelDesc, etc. fields of the phba data structure will be populated.
1717 * 0 - pointer to the VPD passed in is NULL
1721 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1723 uint8_t lenlo, lenhi;
1733 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1734 "0455 Vital Product Data: x%x x%x x%x x%x\n",
1735 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1737 while (!finished && (index < (len - 4))) {
1738 switch (vpd[index]) {
1746 i = ((((unsigned short)lenhi) << 8) + lenlo);
1755 Length = ((((unsigned short)lenhi) << 8) + lenlo);
1756 if (Length > len - index)
1757 Length = len - index;
1758 while (Length > 0) {
1759 /* Look for Serial Number */
1760 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1767 phba->SerialNumber[j++] = vpd[index++];
1771 phba->SerialNumber[j] = 0;
1774 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1775 phba->vpd_flag |= VPD_MODEL_DESC;
1782 phba->ModelDesc[j++] = vpd[index++];
1786 phba->ModelDesc[j] = 0;
1789 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1790 phba->vpd_flag |= VPD_MODEL_NAME;
1797 phba->ModelName[j++] = vpd[index++];
1801 phba->ModelName[j] = 0;
1804 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1805 phba->vpd_flag |= VPD_PROGRAM_TYPE;
1812 phba->ProgramType[j++] = vpd[index++];
1816 phba->ProgramType[j] = 0;
1819 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1820 phba->vpd_flag |= VPD_PORT;
1827 if ((phba->sli_rev == LPFC_SLI_REV4) &&
1828 (phba->sli4_hba.pport_name_sta ==
1829 LPFC_SLI4_PPNAME_GET)) {
1833 phba->Port[j++] = vpd[index++];
1837 if ((phba->sli_rev != LPFC_SLI_REV4) ||
1838 (phba->sli4_hba.pport_name_sta ==
1839 LPFC_SLI4_PPNAME_NON))
1866 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
1867 * @phba: pointer to lpfc hba data structure.
1868 * @mdp: pointer to the data structure to hold the derived model name.
1869 * @descp: pointer to the data structure to hold the derived description.
1871 * This routine retrieves HBA's description based on its registered PCI device
1872 * ID. The @descp passed into this function points to an array of 256 chars. It
1873 * shall be returned with the model name, maximum speed, and the host bus type.
1874 * The @mdp passed into this function points to an array of 80 chars. When the
1875 * function returns, the @mdp will be filled with the model name.
1878 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1881 uint16_t dev_id = phba->pcidev->device;
1884 int oneConnect = 0; /* default is not a oneConnect */
1889 } m = {"<Unknown>", "", ""};
1891 if (mdp && mdp[0] != '\0'
1892 && descp && descp[0] != '\0')
1895 if (phba->lmt & LMT_16Gb)
1897 else if (phba->lmt & LMT_10Gb)
1899 else if (phba->lmt & LMT_8Gb)
1901 else if (phba->lmt & LMT_4Gb)
1903 else if (phba->lmt & LMT_2Gb)
1905 else if (phba->lmt & LMT_1Gb)
1913 case PCI_DEVICE_ID_FIREFLY:
1914 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"};
1916 case PCI_DEVICE_ID_SUPERFLY:
1917 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1918 m = (typeof(m)){"LP7000", "PCI",
1919 "Fibre Channel Adapter"};
1921 m = (typeof(m)){"LP7000E", "PCI",
1922 "Fibre Channel Adapter"};
1924 case PCI_DEVICE_ID_DRAGONFLY:
1925 m = (typeof(m)){"LP8000", "PCI",
1926 "Fibre Channel Adapter"};
1928 case PCI_DEVICE_ID_CENTAUR:
1929 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1930 m = (typeof(m)){"LP9002", "PCI",
1931 "Fibre Channel Adapter"};
1933 m = (typeof(m)){"LP9000", "PCI",
1934 "Fibre Channel Adapter"};
1936 case PCI_DEVICE_ID_RFLY:
1937 m = (typeof(m)){"LP952", "PCI",
1938 "Fibre Channel Adapter"};
1940 case PCI_DEVICE_ID_PEGASUS:
1941 m = (typeof(m)){"LP9802", "PCI-X",
1942 "Fibre Channel Adapter"};
1944 case PCI_DEVICE_ID_THOR:
1945 m = (typeof(m)){"LP10000", "PCI-X",
1946 "Fibre Channel Adapter"};
1948 case PCI_DEVICE_ID_VIPER:
1949 m = (typeof(m)){"LPX1000", "PCI-X",
1950 "Fibre Channel Adapter"};
1952 case PCI_DEVICE_ID_PFLY:
1953 m = (typeof(m)){"LP982", "PCI-X",
1954 "Fibre Channel Adapter"};
1956 case PCI_DEVICE_ID_TFLY:
1957 m = (typeof(m)){"LP1050", "PCI-X",
1958 "Fibre Channel Adapter"};
1960 case PCI_DEVICE_ID_HELIOS:
1961 m = (typeof(m)){"LP11000", "PCI-X2",
1962 "Fibre Channel Adapter"};
1964 case PCI_DEVICE_ID_HELIOS_SCSP:
1965 m = (typeof(m)){"LP11000-SP", "PCI-X2",
1966 "Fibre Channel Adapter"};
1968 case PCI_DEVICE_ID_HELIOS_DCSP:
1969 m = (typeof(m)){"LP11002-SP", "PCI-X2",
1970 "Fibre Channel Adapter"};
1972 case PCI_DEVICE_ID_NEPTUNE:
1973 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
1975 case PCI_DEVICE_ID_NEPTUNE_SCSP:
1976 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
1978 case PCI_DEVICE_ID_NEPTUNE_DCSP:
1979 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
1981 case PCI_DEVICE_ID_BMID:
1982 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
1984 case PCI_DEVICE_ID_BSMB:
1985 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
1987 case PCI_DEVICE_ID_ZEPHYR:
1988 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1990 case PCI_DEVICE_ID_ZEPHYR_SCSP:
1991 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1993 case PCI_DEVICE_ID_ZEPHYR_DCSP:
1994 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
1997 case PCI_DEVICE_ID_ZMID:
1998 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2000 case PCI_DEVICE_ID_ZSMB:
2001 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2003 case PCI_DEVICE_ID_LP101:
2004 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"};
2006 case PCI_DEVICE_ID_LP10000S:
2007 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
2009 case PCI_DEVICE_ID_LP11000S:
2010 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
2012 case PCI_DEVICE_ID_LPE11000S:
2013 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
2015 case PCI_DEVICE_ID_SAT:
2016 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2018 case PCI_DEVICE_ID_SAT_MID:
2019 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2021 case PCI_DEVICE_ID_SAT_SMB:
2022 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2024 case PCI_DEVICE_ID_SAT_DCSP:
2025 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2027 case PCI_DEVICE_ID_SAT_SCSP:
2028 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2030 case PCI_DEVICE_ID_SAT_S:
2031 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2033 case PCI_DEVICE_ID_HORNET:
2034 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"};
2037 case PCI_DEVICE_ID_PROTEUS_VF:
2038 m = (typeof(m)){"LPev12000", "PCIe IOV",
2039 "Fibre Channel Adapter"};
2041 case PCI_DEVICE_ID_PROTEUS_PF:
2042 m = (typeof(m)){"LPev12000", "PCIe IOV",
2043 "Fibre Channel Adapter"};
2045 case PCI_DEVICE_ID_PROTEUS_S:
2046 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2047 "Fibre Channel Adapter"};
2049 case PCI_DEVICE_ID_TIGERSHARK:
2051 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2053 case PCI_DEVICE_ID_TOMCAT:
2055 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2057 case PCI_DEVICE_ID_FALCON:
2058 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2059 "EmulexSecure Fibre"};
2061 case PCI_DEVICE_ID_BALIUS:
2062 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2063 "Fibre Channel Adapter"};
2065 case PCI_DEVICE_ID_LANCER_FC:
2066 case PCI_DEVICE_ID_LANCER_FC_VF:
2067 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2069 case PCI_DEVICE_ID_LANCER_FCOE:
2070 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2072 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2074 case PCI_DEVICE_ID_SKYHAWK:
2075 case PCI_DEVICE_ID_SKYHAWK_VF:
2077 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2080 m = (typeof(m)){"Unknown", "", ""};
2084 if (mdp && mdp[0] == '\0')
2085 snprintf(mdp, 79,"%s", m.name);
2087 * oneConnect hba requires special processing, they are all initiators
2088 * and we put the port number on the end
2090 if (descp && descp[0] == '\0') {
2092 snprintf(descp, 255,
2093 "Emulex OneConnect %s, %s Initiator %s",
2096 else if (max_speed == 0)
2097 snprintf(descp, 255,
2099 m.name, m.bus, m.function);
2101 snprintf(descp, 255,
2102 "Emulex %s %d%s %s %s",
2103 m.name, max_speed, (GE) ? "GE" : "Gb",
2109 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2110 * @phba: pointer to lpfc hba data structure.
2111 * @pring: pointer to a IOCB ring.
2112 * @cnt: the number of IOCBs to be posted to the IOCB ring.
2114 * This routine posts a given number of IOCBs with the associated DMA buffer
2115 * descriptors specified by the cnt argument to the given IOCB ring.
2118 * The number of IOCBs NOT able to be posted to the IOCB ring.
2121 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2124 struct lpfc_iocbq *iocb;
2125 struct lpfc_dmabuf *mp1, *mp2;
2127 cnt += pring->missbufcnt;
2129 /* While there are buffers to post */
2131 /* Allocate buffer for command iocb */
2132 iocb = lpfc_sli_get_iocbq(phba);
2134 pring->missbufcnt = cnt;
2139 /* 2 buffers can be posted per command */
2140 /* Allocate buffer to post */
2141 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2143 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2144 if (!mp1 || !mp1->virt) {
2146 lpfc_sli_release_iocbq(phba, iocb);
2147 pring->missbufcnt = cnt;
2151 INIT_LIST_HEAD(&mp1->list);
2152 /* Allocate buffer to post */
2154 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2156 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2158 if (!mp2 || !mp2->virt) {
2160 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2162 lpfc_sli_release_iocbq(phba, iocb);
2163 pring->missbufcnt = cnt;
2167 INIT_LIST_HEAD(&mp2->list);
2172 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2173 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2174 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2175 icmd->ulpBdeCount = 1;
2178 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2179 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2180 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2182 icmd->ulpBdeCount = 2;
2185 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2188 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2190 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2194 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2198 lpfc_sli_release_iocbq(phba, iocb);
2199 pring->missbufcnt = cnt;
2202 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2204 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2206 pring->missbufcnt = 0;
2211 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2212 * @phba: pointer to lpfc hba data structure.
2214 * This routine posts initial receive IOCB buffers to the ELS ring. The
2215 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2219 * 0 - success (currently always success)
2222 lpfc_post_rcv_buf(struct lpfc_hba *phba)
2224 struct lpfc_sli *psli = &phba->sli;
2226 /* Ring 0, ELS / CT buffers */
2227 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2228 /* Ring 2 - FCP no buffers needed */
2233 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2236 * lpfc_sha_init - Set up initial array of hash table entries
2237 * @HashResultPointer: pointer to an array as hash table.
2239 * This routine sets up the initial values to the array of hash table entries
2243 lpfc_sha_init(uint32_t * HashResultPointer)
2245 HashResultPointer[0] = 0x67452301;
2246 HashResultPointer[1] = 0xEFCDAB89;
2247 HashResultPointer[2] = 0x98BADCFE;
2248 HashResultPointer[3] = 0x10325476;
2249 HashResultPointer[4] = 0xC3D2E1F0;
2253 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2254 * @HashResultPointer: pointer to an initial/result hash table.
2255 * @HashWorkingPointer: pointer to an working hash table.
2257 * This routine iterates an initial hash table pointed by @HashResultPointer
2258 * with the values from the working hash table pointeed by @HashWorkingPointer.
2259 * The results are putting back to the initial hash table, returned through
2260 * the @HashResultPointer as the result hash table.
2263 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2267 uint32_t A, B, C, D, E;
2270 HashWorkingPointer[t] =
2272 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2274 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2275 } while (++t <= 79);
2277 A = HashResultPointer[0];
2278 B = HashResultPointer[1];
2279 C = HashResultPointer[2];
2280 D = HashResultPointer[3];
2281 E = HashResultPointer[4];
2285 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2286 } else if (t < 40) {
2287 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2288 } else if (t < 60) {
2289 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2291 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2293 TEMP += S(5, A) + E + HashWorkingPointer[t];
2299 } while (++t <= 79);
2301 HashResultPointer[0] += A;
2302 HashResultPointer[1] += B;
2303 HashResultPointer[2] += C;
2304 HashResultPointer[3] += D;
2305 HashResultPointer[4] += E;
2310 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2311 * @RandomChallenge: pointer to the entry of host challenge random number array.
2312 * @HashWorking: pointer to the entry of the working hash array.
2314 * This routine calculates the working hash array referred by @HashWorking
2315 * from the challenge random numbers associated with the host, referred by
2316 * @RandomChallenge. The result is put into the entry of the working hash
2317 * array and returned by reference through @HashWorking.
2320 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2322 *HashWorking = (*RandomChallenge ^ *HashWorking);
2326 * lpfc_hba_init - Perform special handling for LC HBA initialization
2327 * @phba: pointer to lpfc hba data structure.
2328 * @hbainit: pointer to an array of unsigned 32-bit integers.
2330 * This routine performs the special handling for LC HBA initialization.
2333 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2336 uint32_t *HashWorking;
2337 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2339 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2343 HashWorking[0] = HashWorking[78] = *pwwnn++;
2344 HashWorking[1] = HashWorking[79] = *pwwnn;
2346 for (t = 0; t < 7; t++)
2347 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2349 lpfc_sha_init(hbainit);
2350 lpfc_sha_iterate(hbainit, HashWorking);
2355 * lpfc_cleanup - Performs vport cleanups before deleting a vport
2356 * @vport: pointer to a virtual N_Port data structure.
2358 * This routine performs the necessary cleanups before deleting the @vport.
2359 * It invokes the discovery state machine to perform necessary state
2360 * transitions and to release the ndlps associated with the @vport. Note,
2361 * the physical port is treated as @vport 0.
2364 lpfc_cleanup(struct lpfc_vport *vport)
2366 struct lpfc_hba *phba = vport->phba;
2367 struct lpfc_nodelist *ndlp, *next_ndlp;
2370 if (phba->link_state > LPFC_LINK_DOWN)
2371 lpfc_port_link_failure(vport);
2373 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2374 if (!NLP_CHK_NODE_ACT(ndlp)) {
2375 ndlp = lpfc_enable_node(vport, ndlp,
2376 NLP_STE_UNUSED_NODE);
2379 spin_lock_irq(&phba->ndlp_lock);
2380 NLP_SET_FREE_REQ(ndlp);
2381 spin_unlock_irq(&phba->ndlp_lock);
2382 /* Trigger the release of the ndlp memory */
2386 spin_lock_irq(&phba->ndlp_lock);
2387 if (NLP_CHK_FREE_REQ(ndlp)) {
2388 /* The ndlp should not be in memory free mode already */
2389 spin_unlock_irq(&phba->ndlp_lock);
2392 /* Indicate request for freeing ndlp memory */
2393 NLP_SET_FREE_REQ(ndlp);
2394 spin_unlock_irq(&phba->ndlp_lock);
2396 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2397 ndlp->nlp_DID == Fabric_DID) {
2398 /* Just free up ndlp with Fabric_DID for vports */
2403 /* take care of nodes in unused state before the state
2404 * machine taking action.
2406 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2411 if (ndlp->nlp_type & NLP_FABRIC)
2412 lpfc_disc_state_machine(vport, ndlp, NULL,
2413 NLP_EVT_DEVICE_RECOVERY);
2415 lpfc_disc_state_machine(vport, ndlp, NULL,
2419 /* At this point, ALL ndlp's should be gone
2420 * because of the previous NLP_EVT_DEVICE_RM.
2421 * Lets wait for this to happen, if needed.
2423 while (!list_empty(&vport->fc_nodes)) {
2425 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2426 "0233 Nodelist not empty\n");
2427 list_for_each_entry_safe(ndlp, next_ndlp,
2428 &vport->fc_nodes, nlp_listp) {
2429 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2431 "0282 did:x%x ndlp:x%p "
2432 "usgmap:x%x refcnt:%d\n",
2433 ndlp->nlp_DID, (void *)ndlp,
2436 &ndlp->kref.refcount));
2441 /* Wait for any activity on ndlps to settle */
2444 lpfc_cleanup_vports_rrqs(vport, NULL);
2448 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2449 * @vport: pointer to a virtual N_Port data structure.
2451 * This routine stops all the timers associated with a @vport. This function
2452 * is invoked before disabling or deleting a @vport. Note that the physical
2453 * port is treated as @vport 0.
2456 lpfc_stop_vport_timers(struct lpfc_vport *vport)
2458 del_timer_sync(&vport->els_tmofunc);
2459 del_timer_sync(&vport->fc_fdmitmo);
2460 del_timer_sync(&vport->delayed_disc_tmo);
2461 lpfc_can_disctmo(vport);
2466 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2467 * @phba: pointer to lpfc hba data structure.
2469 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2470 * caller of this routine should already hold the host lock.
2473 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2475 /* Clear pending FCF rediscovery wait flag */
2476 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2478 /* Now, try to stop the timer */
2479 del_timer(&phba->fcf.redisc_wait);
2483 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2484 * @phba: pointer to lpfc hba data structure.
2486 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2487 * checks whether the FCF rediscovery wait timer is pending with the host
2488 * lock held before proceeding with disabling the timer and clearing the
2489 * wait timer pendig flag.
2492 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2494 spin_lock_irq(&phba->hbalock);
2495 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2496 /* FCF rediscovery timer already fired or stopped */
2497 spin_unlock_irq(&phba->hbalock);
2500 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2501 /* Clear failover in progress flags */
2502 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
2503 spin_unlock_irq(&phba->hbalock);
2507 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2508 * @phba: pointer to lpfc hba data structure.
2510 * This routine stops all the timers associated with a HBA. This function is
2511 * invoked before either putting a HBA offline or unloading the driver.
2514 lpfc_stop_hba_timers(struct lpfc_hba *phba)
2516 lpfc_stop_vport_timers(phba->pport);
2517 del_timer_sync(&phba->sli.mbox_tmo);
2518 del_timer_sync(&phba->fabric_block_timer);
2519 del_timer_sync(&phba->eratt_poll);
2520 del_timer_sync(&phba->hb_tmofunc);
2521 if (phba->sli_rev == LPFC_SLI_REV4) {
2522 del_timer_sync(&phba->rrq_tmr);
2523 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
2525 phba->hb_outstanding = 0;
2527 switch (phba->pci_dev_grp) {
2528 case LPFC_PCI_DEV_LP:
2529 /* Stop any LightPulse device specific driver timers */
2530 del_timer_sync(&phba->fcp_poll_timer);
2532 case LPFC_PCI_DEV_OC:
2533 /* Stop any OneConnect device sepcific driver timers */
2534 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2537 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2538 "0297 Invalid device group (x%x)\n",
2546 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2547 * @phba: pointer to lpfc hba data structure.
2549 * This routine marks a HBA's management interface as blocked. Once the HBA's
2550 * management interface is marked as blocked, all the user space access to
2551 * the HBA, whether they are from sysfs interface or libdfc interface will
2552 * all be blocked. The HBA is set to block the management interface when the
2553 * driver prepares the HBA interface for online or offline.
2556 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
2558 unsigned long iflag;
2559 uint8_t actcmd = MBX_HEARTBEAT;
2560 unsigned long timeout;
2562 spin_lock_irqsave(&phba->hbalock, iflag);
2563 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2564 spin_unlock_irqrestore(&phba->hbalock, iflag);
2565 if (mbx_action == LPFC_MBX_NO_WAIT)
2567 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
2568 spin_lock_irqsave(&phba->hbalock, iflag);
2569 if (phba->sli.mbox_active) {
2570 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
2571 /* Determine how long we might wait for the active mailbox
2572 * command to be gracefully completed by firmware.
2574 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
2575 phba->sli.mbox_active) * 1000) + jiffies;
2577 spin_unlock_irqrestore(&phba->hbalock, iflag);
2579 /* Wait for the outstnading mailbox command to complete */
2580 while (phba->sli.mbox_active) {
2581 /* Check active mailbox complete status every 2ms */
2583 if (time_after(jiffies, timeout)) {
2584 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2585 "2813 Mgmt IO is Blocked %x "
2586 "- mbox cmd %x still active\n",
2587 phba->sli.sli_flag, actcmd);
2594 * lpfc_sli4_node_prep - Assign RPIs for active nodes.
2595 * @phba: pointer to lpfc hba data structure.
2597 * Allocate RPIs for all active remote nodes. This is needed whenever
2598 * an SLI4 adapter is reset and the driver is not unloading. Its purpose
2599 * is to fixup the temporary rpi assignments.
2602 lpfc_sli4_node_prep(struct lpfc_hba *phba)
2604 struct lpfc_nodelist *ndlp, *next_ndlp;
2605 struct lpfc_vport **vports;
2608 if (phba->sli_rev != LPFC_SLI_REV4)
2611 vports = lpfc_create_vport_work_array(phba);
2612 if (vports != NULL) {
2613 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2614 if (vports[i]->load_flag & FC_UNLOADING)
2617 list_for_each_entry_safe(ndlp, next_ndlp,
2618 &vports[i]->fc_nodes,
2620 if (NLP_CHK_NODE_ACT(ndlp))
2622 lpfc_sli4_alloc_rpi(phba);
2626 lpfc_destroy_vport_work_array(phba, vports);
2630 * lpfc_online - Initialize and bring a HBA online
2631 * @phba: pointer to lpfc hba data structure.
2633 * This routine initializes the HBA and brings a HBA online. During this
2634 * process, the management interface is blocked to prevent user space access
2635 * to the HBA interfering with the driver initialization.
2642 lpfc_online(struct lpfc_hba *phba)
2644 struct lpfc_vport *vport;
2645 struct lpfc_vport **vports;
2647 bool vpis_cleared = false;
2651 vport = phba->pport;
2653 if (!(vport->fc_flag & FC_OFFLINE_MODE))
2656 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2657 "0458 Bring Adapter online\n");
2659 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
2661 if (!lpfc_sli_queue_setup(phba)) {
2662 lpfc_unblock_mgmt_io(phba);
2666 if (phba->sli_rev == LPFC_SLI_REV4) {
2667 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
2668 lpfc_unblock_mgmt_io(phba);
2671 spin_lock_irq(&phba->hbalock);
2672 if (!phba->sli4_hba.max_cfg_param.vpi_used)
2673 vpis_cleared = true;
2674 spin_unlock_irq(&phba->hbalock);
2676 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
2677 lpfc_unblock_mgmt_io(phba);
2682 vports = lpfc_create_vport_work_array(phba);
2684 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2685 struct Scsi_Host *shost;
2686 shost = lpfc_shost_from_vport(vports[i]);
2687 spin_lock_irq(shost->host_lock);
2688 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2689 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2690 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2691 if (phba->sli_rev == LPFC_SLI_REV4) {
2692 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
2693 if ((vpis_cleared) &&
2694 (vports[i]->port_type !=
2695 LPFC_PHYSICAL_PORT))
2698 spin_unlock_irq(shost->host_lock);
2700 lpfc_destroy_vport_work_array(phba, vports);
2702 lpfc_unblock_mgmt_io(phba);
2707 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
2708 * @phba: pointer to lpfc hba data structure.
2710 * This routine marks a HBA's management interface as not blocked. Once the
2711 * HBA's management interface is marked as not blocked, all the user space
2712 * access to the HBA, whether they are from sysfs interface or libdfc
2713 * interface will be allowed. The HBA is set to block the management interface
2714 * when the driver prepares the HBA interface for online or offline and then
2715 * set to unblock the management interface afterwards.
2718 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
2720 unsigned long iflag;
2722 spin_lock_irqsave(&phba->hbalock, iflag);
2723 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
2724 spin_unlock_irqrestore(&phba->hbalock, iflag);
2728 * lpfc_offline_prep - Prepare a HBA to be brought offline
2729 * @phba: pointer to lpfc hba data structure.
2731 * This routine is invoked to prepare a HBA to be brought offline. It performs
2732 * unregistration login to all the nodes on all vports and flushes the mailbox
2733 * queue to make it ready to be brought offline.
2736 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
2738 struct lpfc_vport *vport = phba->pport;
2739 struct lpfc_nodelist *ndlp, *next_ndlp;
2740 struct lpfc_vport **vports;
2741 struct Scsi_Host *shost;
2744 if (vport->fc_flag & FC_OFFLINE_MODE)
2747 lpfc_block_mgmt_io(phba, mbx_action);
2749 lpfc_linkdown(phba);
2751 /* Issue an unreg_login to all nodes on all vports */
2752 vports = lpfc_create_vport_work_array(phba);
2753 if (vports != NULL) {
2754 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2755 if (vports[i]->load_flag & FC_UNLOADING)
2757 shost = lpfc_shost_from_vport(vports[i]);
2758 spin_lock_irq(shost->host_lock);
2759 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
2760 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2761 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
2762 spin_unlock_irq(shost->host_lock);
2764 shost = lpfc_shost_from_vport(vports[i]);
2765 list_for_each_entry_safe(ndlp, next_ndlp,
2766 &vports[i]->fc_nodes,
2768 if (!NLP_CHK_NODE_ACT(ndlp))
2770 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2772 if (ndlp->nlp_type & NLP_FABRIC) {
2773 lpfc_disc_state_machine(vports[i], ndlp,
2774 NULL, NLP_EVT_DEVICE_RECOVERY);
2775 lpfc_disc_state_machine(vports[i], ndlp,
2776 NULL, NLP_EVT_DEVICE_RM);
2778 spin_lock_irq(shost->host_lock);
2779 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2780 spin_unlock_irq(shost->host_lock);
2782 * Whenever an SLI4 port goes offline, free the
2783 * RPI. Get a new RPI when the adapter port
2784 * comes back online.
2786 if (phba->sli_rev == LPFC_SLI_REV4)
2787 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
2788 lpfc_unreg_rpi(vports[i], ndlp);
2792 lpfc_destroy_vport_work_array(phba, vports);
2794 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
2798 * lpfc_offline - Bring a HBA offline
2799 * @phba: pointer to lpfc hba data structure.
2801 * This routine actually brings a HBA offline. It stops all the timers
2802 * associated with the HBA, brings down the SLI layer, and eventually
2803 * marks the HBA as in offline state for the upper layer protocol.
2806 lpfc_offline(struct lpfc_hba *phba)
2808 struct Scsi_Host *shost;
2809 struct lpfc_vport **vports;
2812 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
2815 /* stop port and all timers associated with this hba */
2816 lpfc_stop_port(phba);
2817 vports = lpfc_create_vport_work_array(phba);
2819 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2820 lpfc_stop_vport_timers(vports[i]);
2821 lpfc_destroy_vport_work_array(phba, vports);
2822 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2823 "0460 Bring Adapter offline\n");
2824 /* Bring down the SLI Layer and cleanup. The HBA is offline
2826 lpfc_sli_hba_down(phba);
2827 spin_lock_irq(&phba->hbalock);
2829 spin_unlock_irq(&phba->hbalock);
2830 vports = lpfc_create_vport_work_array(phba);
2832 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2833 shost = lpfc_shost_from_vport(vports[i]);
2834 spin_lock_irq(shost->host_lock);
2835 vports[i]->work_port_events = 0;
2836 vports[i]->fc_flag |= FC_OFFLINE_MODE;
2837 spin_unlock_irq(shost->host_lock);
2839 lpfc_destroy_vport_work_array(phba, vports);
2843 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
2844 * @phba: pointer to lpfc hba data structure.
2846 * This routine is to free all the SCSI buffers and IOCBs from the driver
2847 * list back to kernel. It is called from lpfc_pci_remove_one to free
2848 * the internal resources before the device is removed from the system.
2851 lpfc_scsi_free(struct lpfc_hba *phba)
2853 struct lpfc_scsi_buf *sb, *sb_next;
2854 struct lpfc_iocbq *io, *io_next;
2856 spin_lock_irq(&phba->hbalock);
2857 /* Release all the lpfc_scsi_bufs maintained by this host. */
2858 spin_lock(&phba->scsi_buf_list_lock);
2859 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2860 list_del(&sb->list);
2861 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2864 phba->total_scsi_bufs--;
2866 spin_unlock(&phba->scsi_buf_list_lock);
2868 /* Release all the lpfc_iocbq entries maintained by this host. */
2869 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
2870 list_del(&io->list);
2872 phba->total_iocbq_bufs--;
2875 spin_unlock_irq(&phba->hbalock);
2879 * lpfc_sli4_xri_sgl_update - update xri-sgl sizing and mapping
2880 * @phba: pointer to lpfc hba data structure.
2882 * This routine first calculates the sizes of the current els and allocated
2883 * scsi sgl lists, and then goes through all sgls to updates the physical
2884 * XRIs assigned due to port function reset. During port initialization, the
2885 * current els and allocated scsi sgl lists are 0s.
2888 * 0 - successful (for now, it always returns 0)
2891 lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
2893 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
2894 struct lpfc_scsi_buf *psb = NULL, *psb_next = NULL;
2895 uint16_t i, lxri, xri_cnt, els_xri_cnt, scsi_xri_cnt;
2896 LIST_HEAD(els_sgl_list);
2897 LIST_HEAD(scsi_sgl_list);
2901 * update on pci function's els xri-sgl list
2903 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
2904 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
2905 /* els xri-sgl expanded */
2906 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
2907 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2908 "3157 ELS xri-sgl count increased from "
2909 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
2911 /* allocate the additional els sgls */
2912 for (i = 0; i < xri_cnt; i++) {
2913 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
2915 if (sglq_entry == NULL) {
2916 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2917 "2562 Failure to allocate an "
2918 "ELS sgl entry:%d\n", i);
2922 sglq_entry->buff_type = GEN_BUFF_TYPE;
2923 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
2925 if (sglq_entry->virt == NULL) {
2927 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2928 "2563 Failure to allocate an "
2929 "ELS mbuf:%d\n", i);
2933 sglq_entry->sgl = sglq_entry->virt;
2934 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
2935 sglq_entry->state = SGL_FREED;
2936 list_add_tail(&sglq_entry->list, &els_sgl_list);
2938 spin_lock_irq(&phba->hbalock);
2939 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
2940 spin_unlock_irq(&phba->hbalock);
2941 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
2942 /* els xri-sgl shrinked */
2943 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
2944 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2945 "3158 ELS xri-sgl count decreased from "
2946 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
2948 spin_lock_irq(&phba->hbalock);
2949 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &els_sgl_list);
2950 spin_unlock_irq(&phba->hbalock);
2951 /* release extra els sgls from list */
2952 for (i = 0; i < xri_cnt; i++) {
2953 list_remove_head(&els_sgl_list,
2954 sglq_entry, struct lpfc_sglq, list);
2956 lpfc_mbuf_free(phba, sglq_entry->virt,
2961 spin_lock_irq(&phba->hbalock);
2962 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
2963 spin_unlock_irq(&phba->hbalock);
2965 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2966 "3163 ELS xri-sgl count unchanged: %d\n",
2968 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
2970 /* update xris to els sgls on the list */
2972 sglq_entry_next = NULL;
2973 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
2974 &phba->sli4_hba.lpfc_sgl_list, list) {
2975 lxri = lpfc_sli4_next_xritag(phba);
2976 if (lxri == NO_XRI) {
2977 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2978 "2400 Failed to allocate xri for "
2983 sglq_entry->sli4_lxritag = lxri;
2984 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
2988 * update on pci function's allocated scsi xri-sgl list
2990 phba->total_scsi_bufs = 0;
2992 /* maximum number of xris available for scsi buffers */
2993 phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri -
2996 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2997 "2401 Current allocated SCSI xri-sgl count:%d, "
2998 "maximum SCSI xri count:%d\n",
2999 phba->sli4_hba.scsi_xri_cnt,
3000 phba->sli4_hba.scsi_xri_max);
3002 spin_lock_irq(&phba->scsi_buf_list_lock);
3003 list_splice_init(&phba->lpfc_scsi_buf_list, &scsi_sgl_list);
3004 spin_unlock_irq(&phba->scsi_buf_list_lock);
3006 if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
3007 /* max scsi xri shrinked below the allocated scsi buffers */
3008 scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt -
3009 phba->sli4_hba.scsi_xri_max;
3010 /* release the extra allocated scsi buffers */
3011 for (i = 0; i < scsi_xri_cnt; i++) {
3012 list_remove_head(&scsi_sgl_list, psb,
3013 struct lpfc_scsi_buf, list);
3014 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, psb->data,
3018 spin_lock_irq(&phba->scsi_buf_list_lock);
3019 phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
3020 spin_unlock_irq(&phba->scsi_buf_list_lock);
3023 /* update xris associated to remaining allocated scsi buffers */
3026 list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) {
3027 lxri = lpfc_sli4_next_xritag(phba);
3028 if (lxri == NO_XRI) {
3029 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3030 "2560 Failed to allocate xri for "
3035 psb->cur_iocbq.sli4_lxritag = lxri;
3036 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3038 spin_lock_irq(&phba->scsi_buf_list_lock);
3039 list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list);
3040 spin_unlock_irq(&phba->scsi_buf_list_lock);
3045 lpfc_free_els_sgl_list(phba);
3046 lpfc_scsi_free(phba);
3051 * lpfc_create_port - Create an FC port
3052 * @phba: pointer to lpfc hba data structure.
3053 * @instance: a unique integer ID to this FC port.
3054 * @dev: pointer to the device data structure.
3056 * This routine creates a FC port for the upper layer protocol. The FC port
3057 * can be created on top of either a physical port or a virtual port provided
3058 * by the HBA. This routine also allocates a SCSI host data structure (shost)
3059 * and associates the FC port created before adding the shost into the SCSI
3063 * @vport - pointer to the virtual N_Port data structure.
3064 * NULL - port create failed.
3067 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
3069 struct lpfc_vport *vport;
3070 struct Scsi_Host *shost;
3073 if (dev != &phba->pcidev->dev)
3074 shost = scsi_host_alloc(&lpfc_vport_template,
3075 sizeof(struct lpfc_vport));
3077 shost = scsi_host_alloc(&lpfc_template,
3078 sizeof(struct lpfc_vport));
3082 vport = (struct lpfc_vport *) shost->hostdata;
3084 vport->load_flag |= FC_LOADING;
3085 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3086 vport->fc_rscn_flush = 0;
3088 lpfc_get_vport_cfgparam(vport);
3089 shost->unique_id = instance;
3090 shost->max_id = LPFC_MAX_TARGET;
3091 shost->max_lun = vport->cfg_max_luns;
3092 shost->this_id = -1;
3093 shost->max_cmd_len = 16;
3094 if (phba->sli_rev == LPFC_SLI_REV4) {
3095 shost->dma_boundary =
3096 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
3097 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
3101 * Set initial can_queue value since 0 is no longer supported and
3102 * scsi_add_host will fail. This will be adjusted later based on the
3103 * max xri value determined in hba setup.
3105 shost->can_queue = phba->cfg_hba_queue_depth - 10;
3106 if (dev != &phba->pcidev->dev) {
3107 shost->transportt = lpfc_vport_transport_template;
3108 vport->port_type = LPFC_NPIV_PORT;
3110 shost->transportt = lpfc_transport_template;
3111 vport->port_type = LPFC_PHYSICAL_PORT;
3114 /* Initialize all internally managed lists. */
3115 INIT_LIST_HEAD(&vport->fc_nodes);
3116 INIT_LIST_HEAD(&vport->rcv_buffer_list);
3117 spin_lock_init(&vport->work_port_lock);
3119 init_timer(&vport->fc_disctmo);
3120 vport->fc_disctmo.function = lpfc_disc_timeout;
3121 vport->fc_disctmo.data = (unsigned long)vport;
3123 init_timer(&vport->fc_fdmitmo);
3124 vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
3125 vport->fc_fdmitmo.data = (unsigned long)vport;
3127 init_timer(&vport->els_tmofunc);
3128 vport->els_tmofunc.function = lpfc_els_timeout;
3129 vport->els_tmofunc.data = (unsigned long)vport;
3131 init_timer(&vport->delayed_disc_tmo);
3132 vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo;
3133 vport->delayed_disc_tmo.data = (unsigned long)vport;
3135 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
3139 spin_lock_irq(&phba->hbalock);
3140 list_add_tail(&vport->listentry, &phba->port_list);
3141 spin_unlock_irq(&phba->hbalock);
3145 scsi_host_put(shost);
3151 * destroy_port - destroy an FC port
3152 * @vport: pointer to an lpfc virtual N_Port data structure.
3154 * This routine destroys a FC port from the upper layer protocol. All the
3155 * resources associated with the port are released.
3158 destroy_port(struct lpfc_vport *vport)
3160 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3161 struct lpfc_hba *phba = vport->phba;
3163 lpfc_debugfs_terminate(vport);
3164 fc_remove_host(shost);
3165 scsi_remove_host(shost);
3167 spin_lock_irq(&phba->hbalock);
3168 list_del_init(&vport->listentry);
3169 spin_unlock_irq(&phba->hbalock);
3171 lpfc_cleanup(vport);
3176 * lpfc_get_instance - Get a unique integer ID
3178 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
3179 * uses the kernel idr facility to perform the task.
3182 * instance - a unique integer ID allocated as the new instance.
3183 * -1 - lpfc get instance failed.
3186 lpfc_get_instance(void)
3190 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
3191 return ret < 0 ? -1 : ret;
3195 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
3196 * @shost: pointer to SCSI host data structure.
3197 * @time: elapsed time of the scan in jiffies.
3199 * This routine is called by the SCSI layer with a SCSI host to determine
3200 * whether the scan host is finished.
3202 * Note: there is no scan_start function as adapter initialization will have
3203 * asynchronously kicked off the link initialization.
3206 * 0 - SCSI host scan is not over yet.
3207 * 1 - SCSI host scan is over.
3209 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
3211 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3212 struct lpfc_hba *phba = vport->phba;
3215 spin_lock_irq(shost->host_lock);
3217 if (vport->load_flag & FC_UNLOADING) {
3221 if (time >= msecs_to_jiffies(30 * 1000)) {
3222 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3223 "0461 Scanning longer than 30 "
3224 "seconds. Continuing initialization\n");
3228 if (time >= msecs_to_jiffies(15 * 1000) &&
3229 phba->link_state <= LPFC_LINK_DOWN) {
3230 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3231 "0465 Link down longer than 15 "
3232 "seconds. Continuing initialization\n");
3237 if (vport->port_state != LPFC_VPORT_READY)
3239 if (vport->num_disc_nodes || vport->fc_prli_sent)
3241 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
3243 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
3249 spin_unlock_irq(shost->host_lock);
3254 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
3255 * @shost: pointer to SCSI host data structure.
3257 * This routine initializes a given SCSI host attributes on a FC port. The
3258 * SCSI host can be either on top of a physical port or a virtual port.
3260 void lpfc_host_attrib_init(struct Scsi_Host *shost)
3262 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3263 struct lpfc_hba *phba = vport->phba;
3265 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
3268 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
3269 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
3270 fc_host_supported_classes(shost) = FC_COS_CLASS3;
3272 memset(fc_host_supported_fc4s(shost), 0,
3273 sizeof(fc_host_supported_fc4s(shost)));
3274 fc_host_supported_fc4s(shost)[2] = 1;
3275 fc_host_supported_fc4s(shost)[7] = 1;
3277 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
3278 sizeof fc_host_symbolic_name(shost));
3280 fc_host_supported_speeds(shost) = 0;
3281 if (phba->lmt & LMT_16Gb)
3282 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
3283 if (phba->lmt & LMT_10Gb)
3284 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
3285 if (phba->lmt & LMT_8Gb)
3286 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
3287 if (phba->lmt & LMT_4Gb)
3288 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
3289 if (phba->lmt & LMT_2Gb)
3290 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
3291 if (phba->lmt & LMT_1Gb)
3292 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
3294 fc_host_maxframe_size(shost) =
3295 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
3296 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
3298 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
3300 /* This value is also unchanging */
3301 memset(fc_host_active_fc4s(shost), 0,
3302 sizeof(fc_host_active_fc4s(shost)));
3303 fc_host_active_fc4s(shost)[2] = 1;
3304 fc_host_active_fc4s(shost)[7] = 1;
3306 fc_host_max_npiv_vports(shost) = phba->max_vpi;
3307 spin_lock_irq(shost->host_lock);
3308 vport->load_flag &= ~FC_LOADING;
3309 spin_unlock_irq(shost->host_lock);
3313 * lpfc_stop_port_s3 - Stop SLI3 device port
3314 * @phba: pointer to lpfc hba data structure.
3316 * This routine is invoked to stop an SLI3 device port, it stops the device
3317 * from generating interrupts and stops the device driver's timers for the
3321 lpfc_stop_port_s3(struct lpfc_hba *phba)
3323 /* Clear all interrupt enable conditions */
3324 writel(0, phba->HCregaddr);
3325 readl(phba->HCregaddr); /* flush */
3326 /* Clear all pending interrupts */
3327 writel(0xffffffff, phba->HAregaddr);
3328 readl(phba->HAregaddr); /* flush */
3330 /* Reset some HBA SLI setup states */
3331 lpfc_stop_hba_timers(phba);
3332 phba->pport->work_port_events = 0;
3336 * lpfc_stop_port_s4 - Stop SLI4 device port
3337 * @phba: pointer to lpfc hba data structure.
3339 * This routine is invoked to stop an SLI4 device port, it stops the device
3340 * from generating interrupts and stops the device driver's timers for the
3344 lpfc_stop_port_s4(struct lpfc_hba *phba)
3346 /* Reset some HBA SLI4 setup states */
3347 lpfc_stop_hba_timers(phba);
3348 phba->pport->work_port_events = 0;
3349 phba->sli4_hba.intr_enable = 0;
3353 * lpfc_stop_port - Wrapper function for stopping hba port
3354 * @phba: Pointer to HBA context object.
3356 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
3357 * the API jump table function pointer from the lpfc_hba struct.
3360 lpfc_stop_port(struct lpfc_hba *phba)
3362 phba->lpfc_stop_port(phba);
3366 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
3367 * @phba: Pointer to hba for which this call is being executed.
3369 * This routine starts the timer waiting for the FCF rediscovery to complete.
3372 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
3374 unsigned long fcf_redisc_wait_tmo =
3375 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
3376 /* Start fcf rediscovery wait period timer */
3377 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
3378 spin_lock_irq(&phba->hbalock);
3379 /* Allow action to new fcf asynchronous event */
3380 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
3381 /* Mark the FCF rediscovery pending state */
3382 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
3383 spin_unlock_irq(&phba->hbalock);
3387 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
3388 * @ptr: Map to lpfc_hba data structure pointer.
3390 * This routine is invoked when waiting for FCF table rediscover has been
3391 * timed out. If new FCF record(s) has (have) been discovered during the
3392 * wait period, a new FCF event shall be added to the FCOE async event
3393 * list, and then worker thread shall be waked up for processing from the
3394 * worker thread context.
3397 lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
3399 struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
3401 /* Don't send FCF rediscovery event if timer cancelled */
3402 spin_lock_irq(&phba->hbalock);
3403 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3404 spin_unlock_irq(&phba->hbalock);
3407 /* Clear FCF rediscovery timer pending flag */
3408 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
3409 /* FCF rediscovery event to worker thread */
3410 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
3411 spin_unlock_irq(&phba->hbalock);
3412 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
3413 "2776 FCF rediscover quiescent timer expired\n");
3414 /* wake up worker thread */
3415 lpfc_worker_wake_up(phba);
3419 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
3420 * @phba: pointer to lpfc hba data structure.
3421 * @acqe_link: pointer to the async link completion queue entry.
3423 * This routine is to parse the SLI4 link-attention link fault code and
3424 * translate it into the base driver's read link attention mailbox command
3427 * Return: Link-attention status in terms of base driver's coding.
3430 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
3431 struct lpfc_acqe_link *acqe_link)
3433 uint16_t latt_fault;
3435 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
3436 case LPFC_ASYNC_LINK_FAULT_NONE:
3437 case LPFC_ASYNC_LINK_FAULT_LOCAL:
3438 case LPFC_ASYNC_LINK_FAULT_REMOTE:
3442 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3443 "0398 Invalid link fault code: x%x\n",
3444 bf_get(lpfc_acqe_link_fault, acqe_link));
3445 latt_fault = MBXERR_ERROR;
3452 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
3453 * @phba: pointer to lpfc hba data structure.
3454 * @acqe_link: pointer to the async link completion queue entry.
3456 * This routine is to parse the SLI4 link attention type and translate it
3457 * into the base driver's link attention type coding.
3459 * Return: Link attention type in terms of base driver's coding.
3462 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
3463 struct lpfc_acqe_link *acqe_link)
3467 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
3468 case LPFC_ASYNC_LINK_STATUS_DOWN:
3469 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
3470 att_type = LPFC_ATT_LINK_DOWN;
3472 case LPFC_ASYNC_LINK_STATUS_UP:
3473 /* Ignore physical link up events - wait for logical link up */
3474 att_type = LPFC_ATT_RESERVED;
3476 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
3477 att_type = LPFC_ATT_LINK_UP;
3480 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3481 "0399 Invalid link attention type: x%x\n",
3482 bf_get(lpfc_acqe_link_status, acqe_link));
3483 att_type = LPFC_ATT_RESERVED;
3490 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
3491 * @phba: pointer to lpfc hba data structure.
3492 * @acqe_link: pointer to the async link completion queue entry.
3494 * This routine is to parse the SLI4 link-attention link speed and translate
3495 * it into the base driver's link-attention link speed coding.
3497 * Return: Link-attention link speed in terms of base driver's coding.
3500 lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
3501 struct lpfc_acqe_link *acqe_link)
3505 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
3506 case LPFC_ASYNC_LINK_SPEED_ZERO:
3507 case LPFC_ASYNC_LINK_SPEED_10MBPS:
3508 case LPFC_ASYNC_LINK_SPEED_100MBPS:
3509 link_speed = LPFC_LINK_SPEED_UNKNOWN;
3511 case LPFC_ASYNC_LINK_SPEED_1GBPS:
3512 link_speed = LPFC_LINK_SPEED_1GHZ;
3514 case LPFC_ASYNC_LINK_SPEED_10GBPS:
3515 link_speed = LPFC_LINK_SPEED_10GHZ;
3518 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3519 "0483 Invalid link-attention link speed: x%x\n",
3520 bf_get(lpfc_acqe_link_speed, acqe_link));
3521 link_speed = LPFC_LINK_SPEED_UNKNOWN;
3528 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
3529 * @phba: pointer to lpfc hba data structure.
3531 * This routine is to get an SLI3 FC port's link speed in Mbps.
3533 * Return: link speed in terms of Mbps.
3536 lpfc_sli_port_speed_get(struct lpfc_hba *phba)
3538 uint32_t link_speed;
3540 if (!lpfc_is_link_up(phba))
3543 switch (phba->fc_linkspeed) {
3544 case LPFC_LINK_SPEED_1GHZ:
3547 case LPFC_LINK_SPEED_2GHZ:
3550 case LPFC_LINK_SPEED_4GHZ:
3553 case LPFC_LINK_SPEED_8GHZ:
3556 case LPFC_LINK_SPEED_10GHZ:
3559 case LPFC_LINK_SPEED_16GHZ:
3569 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
3570 * @phba: pointer to lpfc hba data structure.
3571 * @evt_code: asynchronous event code.
3572 * @speed_code: asynchronous event link speed code.
3574 * This routine is to parse the giving SLI4 async event link speed code into
3575 * value of Mbps for the link speed.
3577 * Return: link speed in terms of Mbps.
3580 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
3583 uint32_t port_speed;
3586 case LPFC_TRAILER_CODE_LINK:
3587 switch (speed_code) {
3588 case LPFC_EVT_CODE_LINK_NO_LINK:
3591 case LPFC_EVT_CODE_LINK_10_MBIT:
3594 case LPFC_EVT_CODE_LINK_100_MBIT:
3597 case LPFC_EVT_CODE_LINK_1_GBIT:
3600 case LPFC_EVT_CODE_LINK_10_GBIT:
3607 case LPFC_TRAILER_CODE_FC:
3608 switch (speed_code) {
3609 case LPFC_EVT_CODE_FC_NO_LINK:
3612 case LPFC_EVT_CODE_FC_1_GBAUD:
3615 case LPFC_EVT_CODE_FC_2_GBAUD:
3618 case LPFC_EVT_CODE_FC_4_GBAUD:
3621 case LPFC_EVT_CODE_FC_8_GBAUD:
3624 case LPFC_EVT_CODE_FC_10_GBAUD:
3627 case LPFC_EVT_CODE_FC_16_GBAUD:
3641 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
3642 * @phba: pointer to lpfc hba data structure.
3643 * @acqe_link: pointer to the async link completion queue entry.
3645 * This routine is to handle the SLI4 asynchronous FCoE link event.
3648 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3649 struct lpfc_acqe_link *acqe_link)
3651 struct lpfc_dmabuf *mp;
3654 struct lpfc_mbx_read_top *la;
3658 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
3659 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
3661 phba->fcoe_eventtag = acqe_link->event_tag;
3662 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3664 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3665 "0395 The mboxq allocation failed\n");
3668 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3670 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3671 "0396 The lpfc_dmabuf allocation failed\n");
3674 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3676 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3677 "0397 The mbuf allocation failed\n");
3678 goto out_free_dmabuf;
3681 /* Cleanup any outstanding ELS commands */
3682 lpfc_els_flush_all_cmd(phba);
3684 /* Block ELS IOCBs until we have done process link event */
3685 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3687 /* Update link event statistics */
3688 phba->sli.slistat.link_event++;
3690 /* Create lpfc_handle_latt mailbox command from link ACQE */
3691 lpfc_read_topology(phba, pmb, mp);
3692 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
3693 pmb->vport = phba->pport;
3695 /* Keep the link status for extra SLI4 state machine reference */
3696 phba->sli4_hba.link_state.speed =
3697 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
3698 bf_get(lpfc_acqe_link_speed, acqe_link));
3699 phba->sli4_hba.link_state.duplex =
3700 bf_get(lpfc_acqe_link_duplex, acqe_link);
3701 phba->sli4_hba.link_state.status =
3702 bf_get(lpfc_acqe_link_status, acqe_link);
3703 phba->sli4_hba.link_state.type =
3704 bf_get(lpfc_acqe_link_type, acqe_link);
3705 phba->sli4_hba.link_state.number =
3706 bf_get(lpfc_acqe_link_number, acqe_link);
3707 phba->sli4_hba.link_state.fault =
3708 bf_get(lpfc_acqe_link_fault, acqe_link);
3709 phba->sli4_hba.link_state.logical_speed =
3710 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
3712 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3713 "2900 Async FC/FCoE Link event - Speed:%dGBit "
3714 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
3715 "Logical speed:%dMbps Fault:%d\n",
3716 phba->sli4_hba.link_state.speed,
3717 phba->sli4_hba.link_state.topology,
3718 phba->sli4_hba.link_state.status,
3719 phba->sli4_hba.link_state.type,
3720 phba->sli4_hba.link_state.number,
3721 phba->sli4_hba.link_state.logical_speed,
3722 phba->sli4_hba.link_state.fault);
3724 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
3725 * topology info. Note: Optional for non FC-AL ports.
3727 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3728 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3729 if (rc == MBX_NOT_FINISHED)
3730 goto out_free_dmabuf;
3734 * For FCoE Mode: fill in all the topology information we need and call
3735 * the READ_TOPOLOGY completion routine to continue without actually
3736 * sending the READ_TOPOLOGY mailbox command to the port.
3738 /* Parse and translate status field */
3740 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
3742 /* Parse and translate link attention fields */
3743 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
3744 la->eventTag = acqe_link->event_tag;
3745 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
3746 bf_set(lpfc_mbx_read_top_link_spd, la,
3747 lpfc_sli4_parse_latt_link_speed(phba, acqe_link));
3749 /* Fake the the following irrelvant fields */
3750 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
3751 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
3752 bf_set(lpfc_mbx_read_top_il, la, 0);
3753 bf_set(lpfc_mbx_read_top_pb, la, 0);
3754 bf_set(lpfc_mbx_read_top_fa, la, 0);
3755 bf_set(lpfc_mbx_read_top_mm, la, 0);
3757 /* Invoke the lpfc_handle_latt mailbox command callback function */
3758 lpfc_mbx_cmpl_read_topology(phba, pmb);
3765 mempool_free(pmb, phba->mbox_mem_pool);
3769 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
3770 * @phba: pointer to lpfc hba data structure.
3771 * @acqe_fc: pointer to the async fc completion queue entry.
3773 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
3774 * that the event was received and then issue a read_topology mailbox command so
3775 * that the rest of the driver will treat it the same as SLI3.
3778 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
3780 struct lpfc_dmabuf *mp;
3784 if (bf_get(lpfc_trailer_type, acqe_fc) !=
3785 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
3786 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3787 "2895 Non FC link Event detected.(%d)\n",
3788 bf_get(lpfc_trailer_type, acqe_fc));
3791 /* Keep the link status for extra SLI4 state machine reference */
3792 phba->sli4_hba.link_state.speed =
3793 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
3794 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
3795 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
3796 phba->sli4_hba.link_state.topology =
3797 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
3798 phba->sli4_hba.link_state.status =
3799 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
3800 phba->sli4_hba.link_state.type =
3801 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
3802 phba->sli4_hba.link_state.number =
3803 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
3804 phba->sli4_hba.link_state.fault =
3805 bf_get(lpfc_acqe_link_fault, acqe_fc);
3806 phba->sli4_hba.link_state.logical_speed =
3807 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
3808 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3809 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
3810 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
3811 "%dMbps Fault:%d\n",
3812 phba->sli4_hba.link_state.speed,
3813 phba->sli4_hba.link_state.topology,
3814 phba->sli4_hba.link_state.status,
3815 phba->sli4_hba.link_state.type,
3816 phba->sli4_hba.link_state.number,
3817 phba->sli4_hba.link_state.logical_speed,
3818 phba->sli4_hba.link_state.fault);
3819 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3821 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3822 "2897 The mboxq allocation failed\n");
3825 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3827 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3828 "2898 The lpfc_dmabuf allocation failed\n");
3831 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3833 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3834 "2899 The mbuf allocation failed\n");
3835 goto out_free_dmabuf;
3838 /* Cleanup any outstanding ELS commands */
3839 lpfc_els_flush_all_cmd(phba);
3841 /* Block ELS IOCBs until we have done process link event */
3842 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3844 /* Update link event statistics */
3845 phba->sli.slistat.link_event++;
3847 /* Create lpfc_handle_latt mailbox command from link ACQE */
3848 lpfc_read_topology(phba, pmb, mp);
3849 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
3850 pmb->vport = phba->pport;
3852 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3853 if (rc == MBX_NOT_FINISHED)
3854 goto out_free_dmabuf;
3860 mempool_free(pmb, phba->mbox_mem_pool);
3864 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
3865 * @phba: pointer to lpfc hba data structure.
3866 * @acqe_fc: pointer to the async SLI completion queue entry.
3868 * This routine is to handle the SLI4 asynchronous SLI events.
3871 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
3876 struct lpfc_acqe_misconfigured_event *misconfigured;
3878 /* special case misconfigured event as it contains data for all ports */
3879 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3880 LPFC_SLI_INTF_IF_TYPE_2) ||
3881 (bf_get(lpfc_trailer_type, acqe_sli) !=
3882 LPFC_SLI_EVENT_TYPE_MISCONFIGURED)) {
3883 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3884 "2901 Async SLI event - Event Data1:x%08x Event Data2:"
3885 "x%08x SLI Event Type:%d\n",
3886 acqe_sli->event_data1, acqe_sli->event_data2,
3887 bf_get(lpfc_trailer_type, acqe_sli));
3891 port_name = phba->Port[0];
3892 if (port_name == 0x00)
3893 port_name = '?'; /* get port name is empty */
3895 misconfigured = (struct lpfc_acqe_misconfigured_event *)
3896 &acqe_sli->event_data1;
3898 /* fetch the status for this port */
3899 switch (phba->sli4_hba.lnk_info.lnk_no) {
3900 case LPFC_LINK_NUMBER_0:
3901 status = bf_get(lpfc_sli_misconfigured_port0,
3902 &misconfigured->theEvent);
3904 case LPFC_LINK_NUMBER_1:
3905 status = bf_get(lpfc_sli_misconfigured_port1,
3906 &misconfigured->theEvent);
3908 case LPFC_LINK_NUMBER_2:
3909 status = bf_get(lpfc_sli_misconfigured_port2,
3910 &misconfigured->theEvent);
3912 case LPFC_LINK_NUMBER_3:
3913 status = bf_get(lpfc_sli_misconfigured_port3,
3914 &misconfigured->theEvent);
3917 status = ~LPFC_SLI_EVENT_STATUS_VALID;
3922 case LPFC_SLI_EVENT_STATUS_VALID:
3923 return; /* no message if the sfp is okay */
3924 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
3925 sprintf(message, "Optics faulted/incorrectly installed/not " \
3926 "installed - Reseat optics, if issue not "
3927 "resolved, replace.");
3929 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
3931 "Optics of two types installed - Remove one optic or " \
3932 "install matching pair of optics.");
3934 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
3935 sprintf(message, "Incompatible optics - Replace with " \
3936 "compatible optics for card to function.");
3939 /* firmware is reporting a status we don't know about */
3940 sprintf(message, "Unknown event status x%02x", status);
3944 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3945 "3176 Misconfigured Physical Port - "
3946 "Port Name %c %s\n", port_name, message);
3950 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
3951 * @vport: pointer to vport data structure.
3953 * This routine is to perform Clear Virtual Link (CVL) on a vport in
3954 * response to a CVL event.
3956 * Return the pointer to the ndlp with the vport if successful, otherwise
3959 static struct lpfc_nodelist *
3960 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
3962 struct lpfc_nodelist *ndlp;
3963 struct Scsi_Host *shost;
3964 struct lpfc_hba *phba;
3971 ndlp = lpfc_findnode_did(vport, Fabric_DID);
3973 /* Cannot find existing Fabric ndlp, so allocate a new one */
3974 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
3977 lpfc_nlp_init(vport, ndlp, Fabric_DID);
3978 /* Set the node type */
3979 ndlp->nlp_type |= NLP_FABRIC;
3980 /* Put ndlp onto node list */
3981 lpfc_enqueue_node(vport, ndlp);
3982 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
3983 /* re-setup ndlp without removing from node list */
3984 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
3988 if ((phba->pport->port_state < LPFC_FLOGI) &&
3989 (phba->pport->port_state != LPFC_VPORT_FAILED))
3991 /* If virtual link is not yet instantiated ignore CVL */
3992 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
3993 && (vport->port_state != LPFC_VPORT_FAILED))
3995 shost = lpfc_shost_from_vport(vport);
3998 lpfc_linkdown_port(vport);
3999 lpfc_cleanup_pending_mbox(vport);
4000 spin_lock_irq(shost->host_lock);
4001 vport->fc_flag |= FC_VPORT_CVL_RCVD;
4002 spin_unlock_irq(shost->host_lock);
4008 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
4009 * @vport: pointer to lpfc hba data structure.
4011 * This routine is to perform Clear Virtual Link (CVL) on all vports in
4012 * response to a FCF dead event.
4015 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
4017 struct lpfc_vport **vports;
4020 vports = lpfc_create_vport_work_array(phba);
4022 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
4023 lpfc_sli4_perform_vport_cvl(vports[i]);
4024 lpfc_destroy_vport_work_array(phba, vports);
4028 * lpfc_sli4_perform_inuse_fcf_recovery - Perform inuse fcf recovery
4029 * @vport: pointer to lpfc hba data structure.
4031 * This routine is to perform FCF recovery when the in-use FCF either dead or
4035 lpfc_sli4_perform_inuse_fcf_recovery(struct lpfc_hba *phba,
4036 struct lpfc_acqe_fip *acqe_fip)
4040 spin_lock_irq(&phba->hbalock);
4041 /* Mark the fast failover process in progress */
4042 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
4043 spin_unlock_irq(&phba->hbalock);
4045 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
4046 "2771 Start FCF fast failover process due to in-use "
4047 "FCF DEAD/MODIFIED event: evt_tag:x%x, index:x%x\n",
4048 acqe_fip->event_tag, acqe_fip->index);
4049 rc = lpfc_sli4_redisc_fcf_table(phba);
4051 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4052 "2772 Issue FCF rediscover mabilbox command "
4053 "failed, fail through to FCF dead event\n");
4054 spin_lock_irq(&phba->hbalock);
4055 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
4056 spin_unlock_irq(&phba->hbalock);
4058 * Last resort will fail over by treating this as a link
4059 * down to FCF registration.
4061 lpfc_sli4_fcf_dead_failthrough(phba);
4063 /* Reset FCF roundrobin bmask for new discovery */
4064 lpfc_sli4_clear_fcf_rr_bmask(phba);
4066 * Handling fast FCF failover to a DEAD FCF event is
4067 * considered equalivant to receiving CVL to all vports.
4069 lpfc_sli4_perform_all_vport_cvl(phba);
4074 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
4075 * @phba: pointer to lpfc hba data structure.
4076 * @acqe_link: pointer to the async fcoe completion queue entry.
4078 * This routine is to handle the SLI4 asynchronous fcoe event.
4081 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
4082 struct lpfc_acqe_fip *acqe_fip)
4084 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
4086 struct lpfc_vport *vport;
4087 struct lpfc_nodelist *ndlp;
4088 struct Scsi_Host *shost;
4089 int active_vlink_present;
4090 struct lpfc_vport **vports;
4093 phba->fc_eventTag = acqe_fip->event_tag;
4094 phba->fcoe_eventtag = acqe_fip->event_tag;
4095 switch (event_type) {
4096 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
4097 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
4098 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
4099 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
4101 "2546 New FCF event, evt_tag:x%x, "
4103 acqe_fip->event_tag,
4106 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
4108 "2788 FCF param modified event, "
4109 "evt_tag:x%x, index:x%x\n",
4110 acqe_fip->event_tag,
4112 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
4114 * During period of FCF discovery, read the FCF
4115 * table record indexed by the event to update
4116 * FCF roundrobin failover eligible FCF bmask.
4118 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
4120 "2779 Read FCF (x%x) for updating "
4121 "roundrobin FCF failover bmask\n",
4123 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
4126 /* If the FCF discovery is in progress, do nothing. */
4127 spin_lock_irq(&phba->hbalock);
4128 if (phba->hba_flag & FCF_TS_INPROG) {
4129 spin_unlock_irq(&phba->hbalock);
4132 /* If fast FCF failover rescan event is pending, do nothing */
4133 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
4134 spin_unlock_irq(&phba->hbalock);
4138 /* If FCF has been in discovered state, perform rediscovery
4139 * only if the FCF with the same index of the in-use FCF got
4140 * modified during normal operation. Otherwise, do nothing.
4142 if (phba->pport->port_state > LPFC_FLOGI) {
4143 spin_unlock_irq(&phba->hbalock);
4144 if (phba->fcf.current_rec.fcf_indx ==
4146 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
4147 "3300 In-use FCF (%d) "
4148 "modified, perform FCF "
4151 lpfc_sli4_perform_inuse_fcf_recovery(phba,
4156 spin_unlock_irq(&phba->hbalock);
4158 /* Otherwise, scan the entire FCF table and re-discover SAN */
4159 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
4160 "2770 Start FCF table scan per async FCF "
4161 "event, evt_tag:x%x, index:x%x\n",
4162 acqe_fip->event_tag, acqe_fip->index);
4163 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
4164 LPFC_FCOE_FCF_GET_FIRST);
4166 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4167 "2547 Issue FCF scan read FCF mailbox "
4168 "command failed (x%x)\n", rc);
4171 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
4172 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4173 "2548 FCF Table full count 0x%x tag 0x%x\n",
4174 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
4175 acqe_fip->event_tag);
4178 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
4179 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
4180 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4181 "2549 FCF (x%x) disconnected from network, "
4182 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
4184 * If we are in the middle of FCF failover process, clear
4185 * the corresponding FCF bit in the roundrobin bitmap.
4187 spin_lock_irq(&phba->hbalock);
4188 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
4189 spin_unlock_irq(&phba->hbalock);
4190 /* Update FLOGI FCF failover eligible FCF bmask */
4191 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
4194 spin_unlock_irq(&phba->hbalock);
4196 /* If the event is not for currently used fcf do nothing */
4197 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
4201 * Otherwise, request the port to rediscover the entire FCF
4202 * table for a fast recovery from case that the current FCF
4203 * is no longer valid as we are not in the middle of FCF
4204 * failover process already.
4206 lpfc_sli4_perform_inuse_fcf_recovery(phba, acqe_fip);
4208 case LPFC_FIP_EVENT_TYPE_CVL:
4209 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
4210 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4211 "2718 Clear Virtual Link Received for VPI 0x%x"
4212 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
4214 vport = lpfc_find_vport_by_vpid(phba,
4216 ndlp = lpfc_sli4_perform_vport_cvl(vport);
4219 active_vlink_present = 0;
4221 vports = lpfc_create_vport_work_array(phba);
4223 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
4225 if ((!(vports[i]->fc_flag &
4226 FC_VPORT_CVL_RCVD)) &&
4227 (vports[i]->port_state > LPFC_FDISC)) {
4228 active_vlink_present = 1;
4232 lpfc_destroy_vport_work_array(phba, vports);
4235 if (active_vlink_present) {
4237 * If there are other active VLinks present,
4238 * re-instantiate the Vlink using FDISC.
4240 mod_timer(&ndlp->nlp_delayfunc,
4241 jiffies + msecs_to_jiffies(1000));
4242 shost = lpfc_shost_from_vport(vport);
4243 spin_lock_irq(shost->host_lock);
4244 ndlp->nlp_flag |= NLP_DELAY_TMO;
4245 spin_unlock_irq(shost->host_lock);
4246 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
4247 vport->port_state = LPFC_FDISC;
4250 * Otherwise, we request port to rediscover
4251 * the entire FCF table for a fast recovery
4252 * from possible case that the current FCF
4253 * is no longer valid if we are not already
4254 * in the FCF failover process.
4256 spin_lock_irq(&phba->hbalock);
4257 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
4258 spin_unlock_irq(&phba->hbalock);
4261 /* Mark the fast failover process in progress */
4262 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
4263 spin_unlock_irq(&phba->hbalock);
4264 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
4266 "2773 Start FCF failover per CVL, "
4267 "evt_tag:x%x\n", acqe_fip->event_tag);
4268 rc = lpfc_sli4_redisc_fcf_table(phba);
4270 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
4272 "2774 Issue FCF rediscover "
4273 "mabilbox command failed, "
4274 "through to CVL event\n");
4275 spin_lock_irq(&phba->hbalock);
4276 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
4277 spin_unlock_irq(&phba->hbalock);
4279 * Last resort will be re-try on the
4280 * the current registered FCF entry.
4282 lpfc_retry_pport_discovery(phba);
4285 * Reset FCF roundrobin bmask for new
4288 lpfc_sli4_clear_fcf_rr_bmask(phba);
4292 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4293 "0288 Unknown FCoE event type 0x%x event tag "
4294 "0x%x\n", event_type, acqe_fip->event_tag);
4300 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
4301 * @phba: pointer to lpfc hba data structure.
4302 * @acqe_link: pointer to the async dcbx completion queue entry.
4304 * This routine is to handle the SLI4 asynchronous dcbx event.
4307 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
4308 struct lpfc_acqe_dcbx *acqe_dcbx)
4310 phba->fc_eventTag = acqe_dcbx->event_tag;
4311 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4312 "0290 The SLI4 DCBX asynchronous event is not "
4317 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
4318 * @phba: pointer to lpfc hba data structure.
4319 * @acqe_link: pointer to the async grp5 completion queue entry.
4321 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
4322 * is an asynchronous notified of a logical link speed change. The Port
4323 * reports the logical link speed in units of 10Mbps.
4326 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
4327 struct lpfc_acqe_grp5 *acqe_grp5)
4329 uint16_t prev_ll_spd;
4331 phba->fc_eventTag = acqe_grp5->event_tag;
4332 phba->fcoe_eventtag = acqe_grp5->event_tag;
4333 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
4334 phba->sli4_hba.link_state.logical_speed =
4335 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
4336 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4337 "2789 GRP5 Async Event: Updating logical link speed "
4338 "from %dMbps to %dMbps\n", prev_ll_spd,
4339 phba->sli4_hba.link_state.logical_speed);
4343 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
4344 * @phba: pointer to lpfc hba data structure.
4346 * This routine is invoked by the worker thread to process all the pending
4347 * SLI4 asynchronous events.
4349 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
4351 struct lpfc_cq_event *cq_event;
4353 /* First, declare the async event has been handled */
4354 spin_lock_irq(&phba->hbalock);
4355 phba->hba_flag &= ~ASYNC_EVENT;
4356 spin_unlock_irq(&phba->hbalock);
4357 /* Now, handle all the async events */
4358 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
4359 /* Get the first event from the head of the event queue */
4360 spin_lock_irq(&phba->hbalock);
4361 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
4362 cq_event, struct lpfc_cq_event, list);
4363 spin_unlock_irq(&phba->hbalock);
4364 /* Process the asynchronous event */
4365 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
4366 case LPFC_TRAILER_CODE_LINK:
4367 lpfc_sli4_async_link_evt(phba,
4368 &cq_event->cqe.acqe_link);
4370 case LPFC_TRAILER_CODE_FCOE:
4371 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
4373 case LPFC_TRAILER_CODE_DCBX:
4374 lpfc_sli4_async_dcbx_evt(phba,
4375 &cq_event->cqe.acqe_dcbx);
4377 case LPFC_TRAILER_CODE_GRP5:
4378 lpfc_sli4_async_grp5_evt(phba,
4379 &cq_event->cqe.acqe_grp5);
4381 case LPFC_TRAILER_CODE_FC:
4382 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
4384 case LPFC_TRAILER_CODE_SLI:
4385 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
4388 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4389 "1804 Invalid asynchrous event code: "
4390 "x%x\n", bf_get(lpfc_trailer_code,
4391 &cq_event->cqe.mcqe_cmpl));
4394 /* Free the completion event processed to the free pool */
4395 lpfc_sli4_cq_event_release(phba, cq_event);
4400 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
4401 * @phba: pointer to lpfc hba data structure.
4403 * This routine is invoked by the worker thread to process FCF table
4404 * rediscovery pending completion event.
4406 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
4410 spin_lock_irq(&phba->hbalock);
4411 /* Clear FCF rediscovery timeout event */
4412 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
4413 /* Clear driver fast failover FCF record flag */
4414 phba->fcf.failover_rec.flag = 0;
4415 /* Set state for FCF fast failover */
4416 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
4417 spin_unlock_irq(&phba->hbalock);
4419 /* Scan FCF table from the first entry to re-discover SAN */
4420 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
4421 "2777 Start post-quiescent FCF table scan\n");
4422 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
4424 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4425 "2747 Issue FCF scan read FCF mailbox "
4426 "command failed 0x%x\n", rc);
4430 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
4431 * @phba: pointer to lpfc hba data structure.
4432 * @dev_grp: The HBA PCI-Device group number.
4434 * This routine is invoked to set up the per HBA PCI-Device group function
4435 * API jump table entries.
4437 * Return: 0 if success, otherwise -ENODEV
4440 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4444 /* Set up lpfc PCI-device group */
4445 phba->pci_dev_grp = dev_grp;
4447 /* The LPFC_PCI_DEV_OC uses SLI4 */
4448 if (dev_grp == LPFC_PCI_DEV_OC)
4449 phba->sli_rev = LPFC_SLI_REV4;
4451 /* Set up device INIT API function jump table */
4452 rc = lpfc_init_api_table_setup(phba, dev_grp);
4455 /* Set up SCSI API function jump table */
4456 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
4459 /* Set up SLI API function jump table */
4460 rc = lpfc_sli_api_table_setup(phba, dev_grp);
4463 /* Set up MBOX API function jump table */
4464 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
4472 * lpfc_log_intr_mode - Log the active interrupt mode
4473 * @phba: pointer to lpfc hba data structure.
4474 * @intr_mode: active interrupt mode adopted.
4476 * This routine it invoked to log the currently used active interrupt mode
4479 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
4481 switch (intr_mode) {
4483 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4484 "0470 Enable INTx interrupt mode.\n");
4487 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4488 "0481 Enabled MSI interrupt mode.\n");
4491 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4492 "0480 Enabled MSI-X interrupt mode.\n");
4495 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4496 "0482 Illegal interrupt mode.\n");
4503 * lpfc_enable_pci_dev - Enable a generic PCI device.
4504 * @phba: pointer to lpfc hba data structure.
4506 * This routine is invoked to enable the PCI device that is common to all
4511 * other values - error
4514 lpfc_enable_pci_dev(struct lpfc_hba *phba)
4516 struct pci_dev *pdev;
4519 /* Obtain PCI device reference */
4523 pdev = phba->pcidev;
4524 /* Select PCI BARs */
4525 bars = pci_select_bars(pdev, IORESOURCE_MEM);
4526 /* Enable PCI device */
4527 if (pci_enable_device_mem(pdev))
4529 /* Request PCI resource for the device */
4530 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
4531 goto out_disable_device;
4532 /* Set up device as PCI master and save state for EEH */
4533 pci_set_master(pdev);
4534 pci_try_set_mwi(pdev);
4535 pci_save_state(pdev);
4537 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
4538 if (pci_find_capability(pdev, PCI_CAP_ID_EXP))
4539 pdev->needs_freset = 1;
4544 pci_disable_device(pdev);
4546 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4547 "1401 Failed to enable pci device, bars:x%x\n", bars);
4552 * lpfc_disable_pci_dev - Disable a generic PCI device.
4553 * @phba: pointer to lpfc hba data structure.
4555 * This routine is invoked to disable the PCI device that is common to all
4559 lpfc_disable_pci_dev(struct lpfc_hba *phba)
4561 struct pci_dev *pdev;
4564 /* Obtain PCI device reference */
4568 pdev = phba->pcidev;
4569 /* Select PCI BARs */
4570 bars = pci_select_bars(pdev, IORESOURCE_MEM);
4571 /* Release PCI resource and disable PCI device */
4572 pci_release_selected_regions(pdev, bars);
4573 pci_disable_device(pdev);
4574 /* Null out PCI private reference to driver */
4575 pci_set_drvdata(pdev, NULL);
4581 * lpfc_reset_hba - Reset a hba
4582 * @phba: pointer to lpfc hba data structure.
4584 * This routine is invoked to reset a hba device. It brings the HBA
4585 * offline, performs a board restart, and then brings the board back
4586 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
4587 * on outstanding mailbox commands.
4590 lpfc_reset_hba(struct lpfc_hba *phba)
4592 /* If resets are disabled then set error state and return. */
4593 if (!phba->cfg_enable_hba_reset) {
4594 phba->link_state = LPFC_HBA_ERROR;
4597 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
4599 lpfc_sli_brdrestart(phba);
4601 lpfc_unblock_mgmt_io(phba);
4605 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
4606 * @phba: pointer to lpfc hba data structure.
4608 * This function enables the PCI SR-IOV virtual functions to a physical
4609 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4610 * enable the number of virtual functions to the physical function. As
4611 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4612 * API call does not considered as an error condition for most of the device.
4615 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
4617 struct pci_dev *pdev = phba->pcidev;
4621 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4625 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
4630 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
4631 * @phba: pointer to lpfc hba data structure.
4632 * @nr_vfn: number of virtual functions to be enabled.
4634 * This function enables the PCI SR-IOV virtual functions to a physical
4635 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4636 * enable the number of virtual functions to the physical function. As
4637 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4638 * API call does not considered as an error condition for most of the device.
4641 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
4643 struct pci_dev *pdev = phba->pcidev;
4644 uint16_t max_nr_vfn;
4647 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
4648 if (nr_vfn > max_nr_vfn) {
4649 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4650 "3057 Requested vfs (%d) greater than "
4651 "supported vfs (%d)", nr_vfn, max_nr_vfn);
4655 rc = pci_enable_sriov(pdev, nr_vfn);
4657 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4658 "2806 Failed to enable sriov on this device "
4659 "with vfn number nr_vf:%d, rc:%d\n",
4662 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4663 "2807 Successful enable sriov on this device "
4664 "with vfn number nr_vf:%d\n", nr_vfn);
4669 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
4670 * @phba: pointer to lpfc hba data structure.
4672 * This routine is invoked to set up the driver internal resources specific to
4673 * support the SLI-3 HBA device it attached to.
4677 * other values - error
4680 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
4682 struct lpfc_sli *psli;
4686 * Initialize timers used by driver
4689 /* Heartbeat timer */
4690 init_timer(&phba->hb_tmofunc);
4691 phba->hb_tmofunc.function = lpfc_hb_timeout;
4692 phba->hb_tmofunc.data = (unsigned long)phba;
4695 /* MBOX heartbeat timer */
4696 init_timer(&psli->mbox_tmo);
4697 psli->mbox_tmo.function = lpfc_mbox_timeout;
4698 psli->mbox_tmo.data = (unsigned long) phba;
4699 /* FCP polling mode timer */
4700 init_timer(&phba->fcp_poll_timer);
4701 phba->fcp_poll_timer.function = lpfc_poll_timeout;
4702 phba->fcp_poll_timer.data = (unsigned long) phba;
4703 /* Fabric block timer */
4704 init_timer(&phba->fabric_block_timer);
4705 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
4706 phba->fabric_block_timer.data = (unsigned long) phba;
4707 /* EA polling mode timer */
4708 init_timer(&phba->eratt_poll);
4709 phba->eratt_poll.function = lpfc_poll_eratt;
4710 phba->eratt_poll.data = (unsigned long) phba;
4712 /* Host attention work mask setup */
4713 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
4714 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
4716 /* Get all the module params for configuring this host */
4717 lpfc_get_cfgparam(phba);
4718 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
4719 phba->menlo_flag |= HBA_MENLO_SUPPORT;
4720 /* check for menlo minimum sg count */
4721 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
4722 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
4725 if (!phba->sli.ring)
4726 phba->sli.ring = (struct lpfc_sli_ring *)
4727 kzalloc(LPFC_SLI3_MAX_RING *
4728 sizeof(struct lpfc_sli_ring), GFP_KERNEL);
4729 if (!phba->sli.ring)
4733 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4734 * used to create the sg_dma_buf_pool must be dynamically calculated.
4735 * 2 segments are added since the IOCB needs a command and response bde.
4737 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4738 sizeof(struct fcp_rsp) +
4739 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
4741 if (phba->cfg_enable_bg) {
4742 phba->cfg_sg_seg_cnt = LPFC_MAX_BPL_SEG_CNT;
4743 phba->cfg_sg_dma_buf_size +=
4744 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
4747 /* Also reinitialize the host templates with new values. */
4748 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4749 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4751 phba->max_vpi = LPFC_MAX_VPI;
4752 /* This will be set to correct value after config_port mbox */
4753 phba->max_vports = 0;
4756 * Initialize the SLI Layer to run with lpfc HBAs.
4758 lpfc_sli_setup(phba);
4759 lpfc_sli_queue_setup(phba);
4761 /* Allocate device driver memory */
4762 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
4766 * Enable sr-iov virtual functions if supported and configured
4767 * through the module parameter.
4769 if (phba->cfg_sriov_nr_virtfn > 0) {
4770 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
4771 phba->cfg_sriov_nr_virtfn);
4773 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4774 "2808 Requested number of SR-IOV "
4775 "virtual functions (%d) is not "
4777 phba->cfg_sriov_nr_virtfn);
4778 phba->cfg_sriov_nr_virtfn = 0;
4786 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
4787 * @phba: pointer to lpfc hba data structure.
4789 * This routine is invoked to unset the driver internal resources set up
4790 * specific for supporting the SLI-3 HBA device it attached to.
4793 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
4795 /* Free device driver memory allocated */
4796 lpfc_mem_free_all(phba);
4802 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
4803 * @phba: pointer to lpfc hba data structure.
4805 * This routine is invoked to set up the driver internal resources specific to
4806 * support the SLI-4 HBA device it attached to.
4810 * other values - error
4813 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4815 struct lpfc_sli *psli;
4816 LPFC_MBOXQ_t *mboxq;
4817 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
4818 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
4819 struct lpfc_mqe *mqe;
4821 int sges_per_segment;
4823 /* Before proceed, wait for POST done and device ready */
4824 rc = lpfc_sli4_post_status_check(phba);
4829 * Initialize timers used by driver
4832 /* Heartbeat timer */
4833 init_timer(&phba->hb_tmofunc);
4834 phba->hb_tmofunc.function = lpfc_hb_timeout;
4835 phba->hb_tmofunc.data = (unsigned long)phba;
4836 init_timer(&phba->rrq_tmr);
4837 phba->rrq_tmr.function = lpfc_rrq_timeout;
4838 phba->rrq_tmr.data = (unsigned long)phba;
4841 /* MBOX heartbeat timer */
4842 init_timer(&psli->mbox_tmo);
4843 psli->mbox_tmo.function = lpfc_mbox_timeout;
4844 psli->mbox_tmo.data = (unsigned long) phba;
4845 /* Fabric block timer */
4846 init_timer(&phba->fabric_block_timer);
4847 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
4848 phba->fabric_block_timer.data = (unsigned long) phba;
4849 /* EA polling mode timer */
4850 init_timer(&phba->eratt_poll);
4851 phba->eratt_poll.function = lpfc_poll_eratt;
4852 phba->eratt_poll.data = (unsigned long) phba;
4853 /* FCF rediscover timer */
4854 init_timer(&phba->fcf.redisc_wait);
4855 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
4856 phba->fcf.redisc_wait.data = (unsigned long)phba;
4859 * Control structure for handling external multi-buffer mailbox
4860 * command pass-through.
4862 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
4863 sizeof(struct lpfc_mbox_ext_buf_ctx));
4864 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4867 * We need to do a READ_CONFIG mailbox command here before
4868 * calling lpfc_get_cfgparam. For VFs this will report the
4869 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
4870 * All of the resources allocated
4871 * for this Port are tied to these values.
4873 /* Get all the module params for configuring this host */
4874 lpfc_get_cfgparam(phba);
4875 phba->max_vpi = LPFC_MAX_VPI;
4877 /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */
4878 phba->cfg_fcp_io_channel = phba->cfg_fcp_eq_count;
4880 /* This will be set to correct value after the read_config mbox */
4881 phba->max_vports = 0;
4883 /* Program the default value of vlan_id and fc_map */
4884 phba->valid_vlan = 0;
4885 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4886 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4887 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4889 /* With BlockGuard we can have multiple SGEs per Data Segemnt */
4890 sges_per_segment = 1;
4891 if (phba->cfg_enable_bg)
4892 sges_per_segment = 2;
4895 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
4896 * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple.
4898 if (!phba->sli.ring)
4899 phba->sli.ring = kzalloc(
4900 (LPFC_SLI3_MAX_RING + phba->cfg_fcp_io_channel) *
4901 sizeof(struct lpfc_sli_ring), GFP_KERNEL);
4902 if (!phba->sli.ring)
4906 * It doesn't matter what family our adapter is in, we are
4907 * limited to 2 Pages, 512 SGEs, for our SGL.
4908 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
4910 max_buf_size = (2 * SLI4_PAGE_SIZE);
4911 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2)
4912 phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
4913 max_buf_size += (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
4916 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4917 * used to create the sg_dma_buf_pool must be dynamically calculated.
4918 * 2 segments are added since the IOCB needs a command and response bde.
4919 * To insure that the scsi sgl does not cross a 4k page boundary only
4920 * sgl sizes of must be a power of 2.
4922 buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
4923 (((phba->cfg_sg_seg_cnt * sges_per_segment) + 2) *
4924 sizeof(struct sli4_sge)));
4926 for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
4927 dma_buf_size < max_buf_size && buf_size > dma_buf_size;
4928 dma_buf_size = dma_buf_size << 1)
4930 if (dma_buf_size == max_buf_size)
4931 phba->cfg_sg_seg_cnt = (dma_buf_size -
4932 sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
4933 (2 * sizeof(struct sli4_sge))) /
4934 sizeof(struct sli4_sge);
4935 phba->cfg_sg_dma_buf_size = dma_buf_size;
4937 /* Initialize buffer queue management fields */
4938 hbq_count = lpfc_sli_hbq_count();
4939 for (i = 0; i < hbq_count; ++i)
4940 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
4941 INIT_LIST_HEAD(&phba->rb_pend_list);
4942 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
4943 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
4946 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
4948 /* Initialize the Abort scsi buffer list used by driver */
4949 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
4950 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
4951 /* This abort list used by worker thread */
4952 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
4955 * Initialize driver internal slow-path work queues
4958 /* Driver internel slow-path CQ Event pool */
4959 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
4960 /* Response IOCB work queue list */
4961 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
4962 /* Asynchronous event CQ Event work queue list */
4963 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
4964 /* Fast-path XRI aborted CQ Event work queue list */
4965 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
4966 /* Slow-path XRI aborted CQ Event work queue list */
4967 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
4968 /* Receive queue CQ Event work queue list */
4969 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
4971 /* Initialize extent block lists. */
4972 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
4973 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
4974 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
4975 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
4977 /* Initialize the driver internal SLI layer lists. */
4978 lpfc_sli_setup(phba);
4979 lpfc_sli_queue_setup(phba);
4981 /* Allocate device driver memory */
4982 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
4986 /* IF Type 2 ports get initialized now. */
4987 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
4988 LPFC_SLI_INTF_IF_TYPE_2) {
4989 rc = lpfc_pci_function_reset(phba);
4994 /* Create the bootstrap mailbox command */
4995 rc = lpfc_create_bootstrap_mbox(phba);
4999 /* Set up the host's endian order with the device. */
5000 rc = lpfc_setup_endian_order(phba);
5002 goto out_free_bsmbx;
5004 /* Set up the hba's configuration parameters. */
5005 rc = lpfc_sli4_read_config(phba);
5007 goto out_free_bsmbx;
5009 /* IF Type 0 ports get initialized now. */
5010 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
5011 LPFC_SLI_INTF_IF_TYPE_0) {
5012 rc = lpfc_pci_function_reset(phba);
5014 goto out_free_bsmbx;
5017 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
5021 goto out_free_bsmbx;
5024 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
5025 lpfc_supported_pages(mboxq);
5026 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5028 mqe = &mboxq->u.mqe;
5029 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
5030 LPFC_MAX_SUPPORTED_PAGES);
5031 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
5032 switch (pn_page[i]) {
5033 case LPFC_SLI4_PARAMETERS:
5034 phba->sli4_hba.pc_sli4_params.supported = 1;
5040 /* Read the port's SLI4 Parameters capabilities if supported. */
5041 if (phba->sli4_hba.pc_sli4_params.supported)
5042 rc = lpfc_pc_sli4_params_get(phba, mboxq);
5044 mempool_free(mboxq, phba->mbox_mem_pool);
5046 goto out_free_bsmbx;
5050 * Get sli4 parameters that override parameters from Port capabilities.
5051 * If this call fails, it isn't critical unless the SLI4 parameters come
5054 rc = lpfc_get_sli4_parameters(phba, mboxq);
5056 if (phba->sli4_hba.extents_in_use &&
5057 phba->sli4_hba.rpi_hdrs_in_use) {
5058 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5059 "2999 Unsupported SLI4 Parameters "
5060 "Extents and RPI headers enabled.\n");
5061 goto out_free_bsmbx;
5064 mempool_free(mboxq, phba->mbox_mem_pool);
5065 /* Verify all the SLI4 queues */
5066 rc = lpfc_sli4_queue_verify(phba);
5068 goto out_free_bsmbx;
5070 /* Create driver internal CQE event pool */
5071 rc = lpfc_sli4_cq_event_pool_create(phba);
5073 goto out_free_bsmbx;
5075 /* Initialize sgl lists per host */
5076 lpfc_init_sgl_list(phba);
5078 /* Allocate and initialize active sgl array */
5079 rc = lpfc_init_active_sgl_array(phba);
5081 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5082 "1430 Failed to initialize sgl list.\n");
5083 goto out_destroy_cq_event_pool;
5085 rc = lpfc_sli4_init_rpi_hdrs(phba);
5087 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5088 "1432 Failed to initialize rpi headers.\n");
5089 goto out_free_active_sgl;
5092 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
5093 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
5094 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
5096 if (!phba->fcf.fcf_rr_bmask) {
5097 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5098 "2759 Failed allocate memory for FCF round "
5099 "robin failover bmask\n");
5101 goto out_remove_rpi_hdrs;
5104 phba->sli4_hba.fcp_eq_hdl =
5105 kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
5106 phba->cfg_fcp_io_channel), GFP_KERNEL);
5107 if (!phba->sli4_hba.fcp_eq_hdl) {
5108 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5109 "2572 Failed allocate memory for "
5110 "fast-path per-EQ handle array\n");
5112 goto out_free_fcf_rr_bmask;
5115 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
5116 phba->cfg_fcp_io_channel), GFP_KERNEL);
5117 if (!phba->sli4_hba.msix_entries) {
5118 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5119 "2573 Failed allocate memory for msi-x "
5120 "interrupt vector entries\n");
5122 goto out_free_fcp_eq_hdl;
5126 * Enable sr-iov virtual functions if supported and configured
5127 * through the module parameter.
5129 if (phba->cfg_sriov_nr_virtfn > 0) {
5130 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
5131 phba->cfg_sriov_nr_virtfn);
5133 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5134 "3020 Requested number of SR-IOV "
5135 "virtual functions (%d) is not "
5137 phba->cfg_sriov_nr_virtfn);
5138 phba->cfg_sriov_nr_virtfn = 0;
5144 out_free_fcp_eq_hdl:
5145 kfree(phba->sli4_hba.fcp_eq_hdl);
5146 out_free_fcf_rr_bmask:
5147 kfree(phba->fcf.fcf_rr_bmask);
5148 out_remove_rpi_hdrs:
5149 lpfc_sli4_remove_rpi_hdrs(phba);
5150 out_free_active_sgl:
5151 lpfc_free_active_sgl(phba);
5152 out_destroy_cq_event_pool:
5153 lpfc_sli4_cq_event_pool_destroy(phba);
5155 lpfc_destroy_bootstrap_mbox(phba);
5157 lpfc_mem_free(phba);
5162 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
5163 * @phba: pointer to lpfc hba data structure.
5165 * This routine is invoked to unset the driver internal resources set up
5166 * specific for supporting the SLI-4 HBA device it attached to.
5169 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
5171 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
5173 /* Free memory allocated for msi-x interrupt vector entries */
5174 kfree(phba->sli4_hba.msix_entries);
5176 /* Free memory allocated for fast-path work queue handles */
5177 kfree(phba->sli4_hba.fcp_eq_hdl);
5179 /* Free the allocated rpi headers. */
5180 lpfc_sli4_remove_rpi_hdrs(phba);
5181 lpfc_sli4_remove_rpis(phba);
5183 /* Free eligible FCF index bmask */
5184 kfree(phba->fcf.fcf_rr_bmask);
5186 /* Free the ELS sgl list */
5187 lpfc_free_active_sgl(phba);
5188 lpfc_free_els_sgl_list(phba);
5190 /* Free the completion queue EQ event pool */
5191 lpfc_sli4_cq_event_release_all(phba);
5192 lpfc_sli4_cq_event_pool_destroy(phba);
5194 /* Release resource identifiers. */
5195 lpfc_sli4_dealloc_resource_identifiers(phba);
5197 /* Free the bsmbx region. */
5198 lpfc_destroy_bootstrap_mbox(phba);
5200 /* Free the SLI Layer memory with SLI4 HBAs */
5201 lpfc_mem_free_all(phba);
5203 /* Free the current connect table */
5204 list_for_each_entry_safe(conn_entry, next_conn_entry,
5205 &phba->fcf_conn_rec_list, list) {
5206 list_del_init(&conn_entry->list);
5214 * lpfc_init_api_table_setup - Set up init api function jump table
5215 * @phba: The hba struct for which this call is being executed.
5216 * @dev_grp: The HBA PCI-Device group number.
5218 * This routine sets up the device INIT interface API function jump table
5221 * Returns: 0 - success, -ENODEV - failure.
5224 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5226 phba->lpfc_hba_init_link = lpfc_hba_init_link;
5227 phba->lpfc_hba_down_link = lpfc_hba_down_link;
5228 phba->lpfc_selective_reset = lpfc_selective_reset;
5230 case LPFC_PCI_DEV_LP:
5231 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
5232 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
5233 phba->lpfc_stop_port = lpfc_stop_port_s3;
5235 case LPFC_PCI_DEV_OC:
5236 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
5237 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
5238 phba->lpfc_stop_port = lpfc_stop_port_s4;
5241 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5242 "1431 Invalid HBA PCI-device group: 0x%x\n",
5251 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
5252 * @phba: pointer to lpfc hba data structure.
5254 * This routine is invoked to set up the driver internal resources before the
5255 * device specific resource setup to support the HBA device it attached to.
5259 * other values - error
5262 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
5265 * Driver resources common to all SLI revisions
5267 atomic_set(&phba->fast_event_count, 0);
5268 spin_lock_init(&phba->hbalock);
5270 /* Initialize ndlp management spinlock */
5271 spin_lock_init(&phba->ndlp_lock);
5273 INIT_LIST_HEAD(&phba->port_list);
5274 INIT_LIST_HEAD(&phba->work_list);
5275 init_waitqueue_head(&phba->wait_4_mlo_m_q);
5277 /* Initialize the wait queue head for the kernel thread */
5278 init_waitqueue_head(&phba->work_waitq);
5280 /* Initialize the scsi buffer list used by driver for scsi IO */
5281 spin_lock_init(&phba->scsi_buf_list_lock);
5282 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
5284 /* Initialize the fabric iocb list */
5285 INIT_LIST_HEAD(&phba->fabric_iocb_list);
5287 /* Initialize list to save ELS buffers */
5288 INIT_LIST_HEAD(&phba->elsbuf);
5290 /* Initialize FCF connection rec list */
5291 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
5297 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
5298 * @phba: pointer to lpfc hba data structure.
5300 * This routine is invoked to set up the driver internal resources after the
5301 * device specific resource setup to support the HBA device it attached to.
5305 * other values - error
5308 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
5312 /* Startup the kernel thread for this host adapter. */
5313 phba->worker_thread = kthread_run(lpfc_do_work, phba,
5314 "lpfc_worker_%d", phba->brd_no);
5315 if (IS_ERR(phba->worker_thread)) {
5316 error = PTR_ERR(phba->worker_thread);
5324 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
5325 * @phba: pointer to lpfc hba data structure.
5327 * This routine is invoked to unset the driver internal resources set up after
5328 * the device specific resource setup for supporting the HBA device it
5332 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
5334 /* Stop kernel worker thread */
5335 kthread_stop(phba->worker_thread);
5339 * lpfc_free_iocb_list - Free iocb list.
5340 * @phba: pointer to lpfc hba data structure.
5342 * This routine is invoked to free the driver's IOCB list and memory.
5345 lpfc_free_iocb_list(struct lpfc_hba *phba)
5347 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
5349 spin_lock_irq(&phba->hbalock);
5350 list_for_each_entry_safe(iocbq_entry, iocbq_next,
5351 &phba->lpfc_iocb_list, list) {
5352 list_del(&iocbq_entry->list);
5354 phba->total_iocbq_bufs--;
5356 spin_unlock_irq(&phba->hbalock);
5362 * lpfc_init_iocb_list - Allocate and initialize iocb list.
5363 * @phba: pointer to lpfc hba data structure.
5365 * This routine is invoked to allocate and initizlize the driver's IOCB
5366 * list and set up the IOCB tag array accordingly.
5370 * other values - error
5373 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
5375 struct lpfc_iocbq *iocbq_entry = NULL;
5379 /* Initialize and populate the iocb list per host. */
5380 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
5381 for (i = 0; i < iocb_count; i++) {
5382 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
5383 if (iocbq_entry == NULL) {
5384 printk(KERN_ERR "%s: only allocated %d iocbs of "
5385 "expected %d count. Unloading driver.\n",
5386 __func__, i, LPFC_IOCB_LIST_CNT);
5387 goto out_free_iocbq;
5390 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
5393 printk(KERN_ERR "%s: failed to allocate IOTAG. "
5394 "Unloading driver.\n", __func__);
5395 goto out_free_iocbq;
5397 iocbq_entry->sli4_lxritag = NO_XRI;
5398 iocbq_entry->sli4_xritag = NO_XRI;
5400 spin_lock_irq(&phba->hbalock);
5401 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
5402 phba->total_iocbq_bufs++;
5403 spin_unlock_irq(&phba->hbalock);
5409 lpfc_free_iocb_list(phba);
5415 * lpfc_free_sgl_list - Free a given sgl list.
5416 * @phba: pointer to lpfc hba data structure.
5417 * @sglq_list: pointer to the head of sgl list.
5419 * This routine is invoked to free a give sgl list and memory.
5422 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
5424 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
5426 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
5427 list_del(&sglq_entry->list);
5428 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
5434 * lpfc_free_els_sgl_list - Free els sgl list.
5435 * @phba: pointer to lpfc hba data structure.
5437 * This routine is invoked to free the driver's els sgl list and memory.
5440 lpfc_free_els_sgl_list(struct lpfc_hba *phba)
5442 LIST_HEAD(sglq_list);
5444 /* Retrieve all els sgls from driver list */
5445 spin_lock_irq(&phba->hbalock);
5446 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
5447 spin_unlock_irq(&phba->hbalock);
5449 /* Now free the sgl list */
5450 lpfc_free_sgl_list(phba, &sglq_list);
5454 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
5455 * @phba: pointer to lpfc hba data structure.
5457 * This routine is invoked to allocate the driver's active sgl memory.
5458 * This array will hold the sglq_entry's for active IOs.
5461 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
5464 size = sizeof(struct lpfc_sglq *);
5465 size *= phba->sli4_hba.max_cfg_param.max_xri;
5467 phba->sli4_hba.lpfc_sglq_active_list =
5468 kzalloc(size, GFP_KERNEL);
5469 if (!phba->sli4_hba.lpfc_sglq_active_list)
5475 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
5476 * @phba: pointer to lpfc hba data structure.
5478 * This routine is invoked to walk through the array of active sglq entries
5479 * and free all of the resources.
5480 * This is just a place holder for now.
5483 lpfc_free_active_sgl(struct lpfc_hba *phba)
5485 kfree(phba->sli4_hba.lpfc_sglq_active_list);
5489 * lpfc_init_sgl_list - Allocate and initialize sgl list.
5490 * @phba: pointer to lpfc hba data structure.
5492 * This routine is invoked to allocate and initizlize the driver's sgl
5493 * list and set up the sgl xritag tag array accordingly.
5497 lpfc_init_sgl_list(struct lpfc_hba *phba)
5499 /* Initialize and populate the sglq list per host/VF. */
5500 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
5501 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
5503 /* els xri-sgl book keeping */
5504 phba->sli4_hba.els_xri_cnt = 0;
5506 /* scsi xri-buffer book keeping */
5507 phba->sli4_hba.scsi_xri_cnt = 0;
5511 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
5512 * @phba: pointer to lpfc hba data structure.
5514 * This routine is invoked to post rpi header templates to the
5515 * port for those SLI4 ports that do not support extents. This routine
5516 * posts a PAGE_SIZE memory region to the port to hold up to
5517 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
5518 * and should be called only when interrupts are disabled.
5522 * -ERROR - otherwise.
5525 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
5528 struct lpfc_rpi_hdr *rpi_hdr;
5530 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
5531 if (!phba->sli4_hba.rpi_hdrs_in_use)
5533 if (phba->sli4_hba.extents_in_use)
5536 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
5538 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5539 "0391 Error during rpi post operation\n");
5540 lpfc_sli4_remove_rpis(phba);
5548 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
5549 * @phba: pointer to lpfc hba data structure.
5551 * This routine is invoked to allocate a single 4KB memory region to
5552 * support rpis and stores them in the phba. This single region
5553 * provides support for up to 64 rpis. The region is used globally
5557 * A valid rpi hdr on success.
5558 * A NULL pointer on any failure.
5560 struct lpfc_rpi_hdr *
5561 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
5563 uint16_t rpi_limit, curr_rpi_range;
5564 struct lpfc_dmabuf *dmabuf;
5565 struct lpfc_rpi_hdr *rpi_hdr;
5569 * If the SLI4 port supports extents, posting the rpi header isn't
5570 * required. Set the expected maximum count and let the actual value
5571 * get set when extents are fully allocated.
5573 if (!phba->sli4_hba.rpi_hdrs_in_use)
5575 if (phba->sli4_hba.extents_in_use)
5578 /* The limit on the logical index is just the max_rpi count. */
5579 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
5580 phba->sli4_hba.max_cfg_param.max_rpi - 1;
5582 spin_lock_irq(&phba->hbalock);
5584 * Establish the starting RPI in this header block. The starting
5585 * rpi is normalized to a zero base because the physical rpi is
5588 curr_rpi_range = phba->sli4_hba.next_rpi;
5589 spin_unlock_irq(&phba->hbalock);
5592 * The port has a limited number of rpis. The increment here
5593 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
5594 * and to allow the full max_rpi range per port.
5596 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
5597 rpi_count = rpi_limit - curr_rpi_range;
5599 rpi_count = LPFC_RPI_HDR_COUNT;
5604 * First allocate the protocol header region for the port. The
5605 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
5607 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5611 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
5612 LPFC_HDR_TEMPLATE_SIZE,
5615 if (!dmabuf->virt) {
5617 goto err_free_dmabuf;
5620 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
5621 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
5623 goto err_free_coherent;
5626 /* Save the rpi header data for cleanup later. */
5627 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
5629 goto err_free_coherent;
5631 rpi_hdr->dmabuf = dmabuf;
5632 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
5633 rpi_hdr->page_count = 1;
5634 spin_lock_irq(&phba->hbalock);
5636 /* The rpi_hdr stores the logical index only. */
5637 rpi_hdr->start_rpi = curr_rpi_range;
5638 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
5641 * The next_rpi stores the next logical module-64 rpi value used
5642 * to post physical rpis in subsequent rpi postings.
5644 phba->sli4_hba.next_rpi += rpi_count;
5645 spin_unlock_irq(&phba->hbalock);
5649 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
5650 dmabuf->virt, dmabuf->phys);
5657 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
5658 * @phba: pointer to lpfc hba data structure.
5660 * This routine is invoked to remove all memory resources allocated
5661 * to support rpis for SLI4 ports not supporting extents. This routine
5662 * presumes the caller has released all rpis consumed by fabric or port
5663 * logins and is prepared to have the header pages removed.
5666 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
5668 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
5670 if (!phba->sli4_hba.rpi_hdrs_in_use)
5673 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
5674 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
5675 list_del(&rpi_hdr->list);
5676 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
5677 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
5678 kfree(rpi_hdr->dmabuf);
5682 /* There are no rpis available to the port now. */
5683 phba->sli4_hba.next_rpi = 0;
5687 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
5688 * @pdev: pointer to pci device data structure.
5690 * This routine is invoked to allocate the driver hba data structure for an
5691 * HBA device. If the allocation is successful, the phba reference to the
5692 * PCI device data structure is set.
5695 * pointer to @phba - successful
5698 static struct lpfc_hba *
5699 lpfc_hba_alloc(struct pci_dev *pdev)
5701 struct lpfc_hba *phba;
5703 /* Allocate memory for HBA structure */
5704 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
5706 dev_err(&pdev->dev, "failed to allocate hba struct\n");
5710 /* Set reference to PCI device in HBA structure */
5711 phba->pcidev = pdev;
5713 /* Assign an unused board number */
5714 phba->brd_no = lpfc_get_instance();
5715 if (phba->brd_no < 0) {
5720 spin_lock_init(&phba->ct_ev_lock);
5721 INIT_LIST_HEAD(&phba->ct_ev_waiters);
5727 * lpfc_hba_free - Free driver hba data structure with a device.
5728 * @phba: pointer to lpfc hba data structure.
5730 * This routine is invoked to free the driver hba data structure with an
5734 lpfc_hba_free(struct lpfc_hba *phba)
5736 /* Release the driver assigned board number */
5737 idr_remove(&lpfc_hba_index, phba->brd_no);
5739 /* Free memory allocated with sli rings */
5740 kfree(phba->sli.ring);
5741 phba->sli.ring = NULL;
5748 * lpfc_create_shost - Create hba physical port with associated scsi host.
5749 * @phba: pointer to lpfc hba data structure.
5751 * This routine is invoked to create HBA physical port and associate a SCSI
5756 * other values - error
5759 lpfc_create_shost(struct lpfc_hba *phba)
5761 struct lpfc_vport *vport;
5762 struct Scsi_Host *shost;
5764 /* Initialize HBA FC structure */
5765 phba->fc_edtov = FF_DEF_EDTOV;
5766 phba->fc_ratov = FF_DEF_RATOV;
5767 phba->fc_altov = FF_DEF_ALTOV;
5768 phba->fc_arbtov = FF_DEF_ARBTOV;
5770 atomic_set(&phba->sdev_cnt, 0);
5771 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
5775 shost = lpfc_shost_from_vport(vport);
5776 phba->pport = vport;
5777 lpfc_debugfs_initialize(vport);
5778 /* Put reference to SCSI host to driver's device private data */
5779 pci_set_drvdata(phba->pcidev, shost);
5785 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
5786 * @phba: pointer to lpfc hba data structure.
5788 * This routine is invoked to destroy HBA physical port and the associated
5792 lpfc_destroy_shost(struct lpfc_hba *phba)
5794 struct lpfc_vport *vport = phba->pport;
5796 /* Destroy physical port that associated with the SCSI host */
5797 destroy_port(vport);
5803 * lpfc_setup_bg - Setup Block guard structures and debug areas.
5804 * @phba: pointer to lpfc hba data structure.
5805 * @shost: the shost to be used to detect Block guard settings.
5807 * This routine sets up the local Block guard protocol settings for @shost.
5808 * This routine also allocates memory for debugging bg buffers.
5811 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
5817 if (lpfc_prot_mask && lpfc_prot_guard) {
5818 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5819 "1478 Registering BlockGuard with the "
5822 old_mask = lpfc_prot_mask;
5823 old_guard = lpfc_prot_guard;
5825 /* Only allow supported values */
5826 lpfc_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
5827 SHOST_DIX_TYPE0_PROTECTION |
5828 SHOST_DIX_TYPE1_PROTECTION);
5829 lpfc_prot_guard &= (SHOST_DIX_GUARD_IP | SHOST_DIX_GUARD_CRC);
5831 /* DIF Type 1 protection for profiles AST1/C1 is end to end */
5832 if (lpfc_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
5833 lpfc_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
5835 if (lpfc_prot_mask && lpfc_prot_guard) {
5836 if ((old_mask != lpfc_prot_mask) ||
5837 (old_guard != lpfc_prot_guard))
5838 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5839 "1475 Registering BlockGuard with the "
5840 "SCSI layer: mask %d guard %d\n",
5841 lpfc_prot_mask, lpfc_prot_guard);
5843 scsi_host_set_prot(shost, lpfc_prot_mask);
5844 scsi_host_set_guard(shost, lpfc_prot_guard);
5846 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5847 "1479 Not Registering BlockGuard with the SCSI "
5848 "layer, Bad protection parameters: %d %d\n",
5849 old_mask, old_guard);
5852 if (!_dump_buf_data) {
5854 spin_lock_init(&_dump_buf_lock);
5856 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
5857 if (_dump_buf_data) {
5858 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5859 "9043 BLKGRD: allocated %d pages for "
5860 "_dump_buf_data at 0x%p\n",
5861 (1 << pagecnt), _dump_buf_data);
5862 _dump_buf_data_order = pagecnt;
5863 memset(_dump_buf_data, 0,
5864 ((1 << PAGE_SHIFT) << pagecnt));
5869 if (!_dump_buf_data_order)
5870 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5871 "9044 BLKGRD: ERROR unable to allocate "
5872 "memory for hexdump\n");
5874 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5875 "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
5876 "\n", _dump_buf_data);
5877 if (!_dump_buf_dif) {
5880 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
5881 if (_dump_buf_dif) {
5882 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5883 "9046 BLKGRD: allocated %d pages for "
5884 "_dump_buf_dif at 0x%p\n",
5885 (1 << pagecnt), _dump_buf_dif);
5886 _dump_buf_dif_order = pagecnt;
5887 memset(_dump_buf_dif, 0,
5888 ((1 << PAGE_SHIFT) << pagecnt));
5893 if (!_dump_buf_dif_order)
5894 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5895 "9047 BLKGRD: ERROR unable to allocate "
5896 "memory for hexdump\n");
5898 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5899 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
5904 * lpfc_post_init_setup - Perform necessary device post initialization setup.
5905 * @phba: pointer to lpfc hba data structure.
5907 * This routine is invoked to perform all the necessary post initialization
5908 * setup for the device.
5911 lpfc_post_init_setup(struct lpfc_hba *phba)
5913 struct Scsi_Host *shost;
5914 struct lpfc_adapter_event_header adapter_event;
5916 /* Get the default values for Model Name and Description */
5917 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
5920 * hba setup may have changed the hba_queue_depth so we need to
5921 * adjust the value of can_queue.
5923 shost = pci_get_drvdata(phba->pcidev);
5924 shost->can_queue = phba->cfg_hba_queue_depth - 10;
5925 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
5926 lpfc_setup_bg(phba, shost);
5928 lpfc_host_attrib_init(shost);
5930 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
5931 spin_lock_irq(shost->host_lock);
5932 lpfc_poll_start_timer(phba);
5933 spin_unlock_irq(shost->host_lock);
5936 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5937 "0428 Perform SCSI scan\n");
5938 /* Send board arrival event to upper layer */
5939 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
5940 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
5941 fc_host_post_vendor_event(shost, fc_get_event_number(),
5942 sizeof(adapter_event),
5943 (char *) &adapter_event,
5949 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
5950 * @phba: pointer to lpfc hba data structure.
5952 * This routine is invoked to set up the PCI device memory space for device
5953 * with SLI-3 interface spec.
5957 * other values - error
5960 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
5962 struct pci_dev *pdev;
5963 unsigned long bar0map_len, bar2map_len;
5966 int error = -ENODEV;
5968 /* Obtain PCI device reference */
5972 pdev = phba->pcidev;
5974 /* Set the device DMA mask size */
5975 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
5976 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
5977 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
5978 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
5983 /* Get the bus address of Bar0 and Bar2 and the number of bytes
5984 * required by each mapping.
5986 phba->pci_bar0_map = pci_resource_start(pdev, 0);
5987 bar0map_len = pci_resource_len(pdev, 0);
5989 phba->pci_bar2_map = pci_resource_start(pdev, 2);
5990 bar2map_len = pci_resource_len(pdev, 2);
5992 /* Map HBA SLIM to a kernel virtual address. */
5993 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
5994 if (!phba->slim_memmap_p) {
5995 dev_printk(KERN_ERR, &pdev->dev,
5996 "ioremap failed for SLIM memory.\n");
6000 /* Map HBA Control Registers to a kernel virtual address. */
6001 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
6002 if (!phba->ctrl_regs_memmap_p) {
6003 dev_printk(KERN_ERR, &pdev->dev,
6004 "ioremap failed for HBA control registers.\n");
6005 goto out_iounmap_slim;
6008 /* Allocate memory for SLI-2 structures */
6009 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
6013 if (!phba->slim2p.virt)
6016 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
6017 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
6018 phba->mbox_ext = (phba->slim2p.virt +
6019 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
6020 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
6021 phba->IOCBs = (phba->slim2p.virt +
6022 offsetof(struct lpfc_sli2_slim, IOCBs));
6024 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
6025 lpfc_sli_hbq_size(),
6026 &phba->hbqslimp.phys,
6028 if (!phba->hbqslimp.virt)
6031 hbq_count = lpfc_sli_hbq_count();
6032 ptr = phba->hbqslimp.virt;
6033 for (i = 0; i < hbq_count; ++i) {
6034 phba->hbqs[i].hbq_virt = ptr;
6035 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
6036 ptr += (lpfc_hbq_defs[i]->entry_count *
6037 sizeof(struct lpfc_hbq_entry));
6039 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
6040 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
6042 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
6044 INIT_LIST_HEAD(&phba->rb_pend_list);
6046 phba->MBslimaddr = phba->slim_memmap_p;
6047 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
6048 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
6049 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
6050 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
6055 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
6056 phba->slim2p.virt, phba->slim2p.phys);
6058 iounmap(phba->ctrl_regs_memmap_p);
6060 iounmap(phba->slim_memmap_p);
6066 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
6067 * @phba: pointer to lpfc hba data structure.
6069 * This routine is invoked to unset the PCI device memory space for device
6070 * with SLI-3 interface spec.
6073 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
6075 struct pci_dev *pdev;
6077 /* Obtain PCI device reference */
6081 pdev = phba->pcidev;
6083 /* Free coherent DMA memory allocated */
6084 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
6085 phba->hbqslimp.virt, phba->hbqslimp.phys);
6086 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
6087 phba->slim2p.virt, phba->slim2p.phys);
6089 /* I/O memory unmap */
6090 iounmap(phba->ctrl_regs_memmap_p);
6091 iounmap(phba->slim_memmap_p);
6097 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
6098 * @phba: pointer to lpfc hba data structure.
6100 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
6101 * done and check status.
6103 * Return 0 if successful, otherwise -ENODEV.
6106 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
6108 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
6109 struct lpfc_register reg_data;
6110 int i, port_error = 0;
6113 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
6114 memset(®_data, 0, sizeof(reg_data));
6115 if (!phba->sli4_hba.PSMPHRregaddr)
6118 /* Wait up to 30 seconds for the SLI Port POST done and ready */
6119 for (i = 0; i < 3000; i++) {
6120 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
6121 &portsmphr_reg.word0) ||
6122 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
6123 /* Port has a fatal POST error, break out */
6124 port_error = -ENODEV;
6127 if (LPFC_POST_STAGE_PORT_READY ==
6128 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
6134 * If there was a port error during POST, then don't proceed with
6135 * other register reads as the data may not be valid. Just exit.
6138 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6139 "1408 Port Failed POST - portsmphr=0x%x, "
6140 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
6141 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
6142 portsmphr_reg.word0,
6143 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
6144 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
6145 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
6146 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
6147 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
6148 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
6149 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
6150 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
6152 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6153 "2534 Device Info: SLIFamily=0x%x, "
6154 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
6155 "SLIHint_2=0x%x, FT=0x%x\n",
6156 bf_get(lpfc_sli_intf_sli_family,
6157 &phba->sli4_hba.sli_intf),
6158 bf_get(lpfc_sli_intf_slirev,
6159 &phba->sli4_hba.sli_intf),
6160 bf_get(lpfc_sli_intf_if_type,
6161 &phba->sli4_hba.sli_intf),
6162 bf_get(lpfc_sli_intf_sli_hint1,
6163 &phba->sli4_hba.sli_intf),
6164 bf_get(lpfc_sli_intf_sli_hint2,
6165 &phba->sli4_hba.sli_intf),
6166 bf_get(lpfc_sli_intf_func_type,
6167 &phba->sli4_hba.sli_intf));
6169 * Check for other Port errors during the initialization
6170 * process. Fail the load if the port did not come up
6173 if_type = bf_get(lpfc_sli_intf_if_type,
6174 &phba->sli4_hba.sli_intf);
6176 case LPFC_SLI_INTF_IF_TYPE_0:
6177 phba->sli4_hba.ue_mask_lo =
6178 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
6179 phba->sli4_hba.ue_mask_hi =
6180 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
6182 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
6184 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
6185 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
6186 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
6187 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6188 "1422 Unrecoverable Error "
6189 "Detected during POST "
6190 "uerr_lo_reg=0x%x, "
6191 "uerr_hi_reg=0x%x, "
6192 "ue_mask_lo_reg=0x%x, "
6193 "ue_mask_hi_reg=0x%x\n",
6196 phba->sli4_hba.ue_mask_lo,
6197 phba->sli4_hba.ue_mask_hi);
6198 port_error = -ENODEV;
6201 case LPFC_SLI_INTF_IF_TYPE_2:
6202 /* Final checks. The port status should be clean. */
6203 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
6205 (bf_get(lpfc_sliport_status_err, ®_data) &&
6206 !bf_get(lpfc_sliport_status_rn, ®_data))) {
6207 phba->work_status[0] =
6208 readl(phba->sli4_hba.u.if_type2.
6210 phba->work_status[1] =
6211 readl(phba->sli4_hba.u.if_type2.
6213 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6214 "2888 Unrecoverable port error "
6215 "following POST: port status reg "
6216 "0x%x, port_smphr reg 0x%x, "
6217 "error 1=0x%x, error 2=0x%x\n",
6219 portsmphr_reg.word0,
6220 phba->work_status[0],
6221 phba->work_status[1]);
6222 port_error = -ENODEV;
6225 case LPFC_SLI_INTF_IF_TYPE_1:
6234 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
6235 * @phba: pointer to lpfc hba data structure.
6236 * @if_type: The SLI4 interface type getting configured.
6238 * This routine is invoked to set up SLI4 BAR0 PCI config space register
6242 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
6245 case LPFC_SLI_INTF_IF_TYPE_0:
6246 phba->sli4_hba.u.if_type0.UERRLOregaddr =
6247 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
6248 phba->sli4_hba.u.if_type0.UERRHIregaddr =
6249 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
6250 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
6251 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
6252 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
6253 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
6254 phba->sli4_hba.SLIINTFregaddr =
6255 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
6257 case LPFC_SLI_INTF_IF_TYPE_2:
6258 phba->sli4_hba.u.if_type2.ERR1regaddr =
6259 phba->sli4_hba.conf_regs_memmap_p +
6260 LPFC_CTL_PORT_ER1_OFFSET;
6261 phba->sli4_hba.u.if_type2.ERR2regaddr =
6262 phba->sli4_hba.conf_regs_memmap_p +
6263 LPFC_CTL_PORT_ER2_OFFSET;
6264 phba->sli4_hba.u.if_type2.CTRLregaddr =
6265 phba->sli4_hba.conf_regs_memmap_p +
6266 LPFC_CTL_PORT_CTL_OFFSET;
6267 phba->sli4_hba.u.if_type2.STATUSregaddr =
6268 phba->sli4_hba.conf_regs_memmap_p +
6269 LPFC_CTL_PORT_STA_OFFSET;
6270 phba->sli4_hba.SLIINTFregaddr =
6271 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
6272 phba->sli4_hba.PSMPHRregaddr =
6273 phba->sli4_hba.conf_regs_memmap_p +
6274 LPFC_CTL_PORT_SEM_OFFSET;
6275 phba->sli4_hba.RQDBregaddr =
6276 phba->sli4_hba.conf_regs_memmap_p +
6277 LPFC_ULP0_RQ_DOORBELL;
6278 phba->sli4_hba.WQDBregaddr =
6279 phba->sli4_hba.conf_regs_memmap_p +
6280 LPFC_ULP0_WQ_DOORBELL;
6281 phba->sli4_hba.EQCQDBregaddr =
6282 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
6283 phba->sli4_hba.MQDBregaddr =
6284 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
6285 phba->sli4_hba.BMBXregaddr =
6286 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
6288 case LPFC_SLI_INTF_IF_TYPE_1:
6290 dev_printk(KERN_ERR, &phba->pcidev->dev,
6291 "FATAL - unsupported SLI4 interface type - %d\n",
6298 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
6299 * @phba: pointer to lpfc hba data structure.
6301 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
6305 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
6307 phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
6308 LPFC_SLIPORT_IF0_SMPHR;
6309 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
6311 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
6313 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
6318 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
6319 * @phba: pointer to lpfc hba data structure.
6320 * @vf: virtual function number
6322 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
6323 * based on the given viftual function number, @vf.
6325 * Return 0 if successful, otherwise -ENODEV.
6328 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
6330 if (vf > LPFC_VIR_FUNC_MAX)
6333 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6334 vf * LPFC_VFR_PAGE_SIZE +
6335 LPFC_ULP0_RQ_DOORBELL);
6336 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6337 vf * LPFC_VFR_PAGE_SIZE +
6338 LPFC_ULP0_WQ_DOORBELL);
6339 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6340 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
6341 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6342 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
6343 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6344 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
6349 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
6350 * @phba: pointer to lpfc hba data structure.
6352 * This routine is invoked to create the bootstrap mailbox
6353 * region consistent with the SLI-4 interface spec. This
6354 * routine allocates all memory necessary to communicate
6355 * mailbox commands to the port and sets up all alignment
6356 * needs. No locks are expected to be held when calling
6361 * -ENOMEM - could not allocated memory.
6364 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
6367 struct lpfc_dmabuf *dmabuf;
6368 struct dma_address *dma_address;
6372 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
6377 * The bootstrap mailbox region is comprised of 2 parts
6378 * plus an alignment restriction of 16 bytes.
6380 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
6381 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6385 if (!dmabuf->virt) {
6389 memset(dmabuf->virt, 0, bmbx_size);
6392 * Initialize the bootstrap mailbox pointers now so that the register
6393 * operations are simple later. The mailbox dma address is required
6394 * to be 16-byte aligned. Also align the virtual memory as each
6395 * maibox is copied into the bmbx mailbox region before issuing the
6396 * command to the port.
6398 phba->sli4_hba.bmbx.dmabuf = dmabuf;
6399 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
6401 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
6402 LPFC_ALIGN_16_BYTE);
6403 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
6404 LPFC_ALIGN_16_BYTE);
6407 * Set the high and low physical addresses now. The SLI4 alignment
6408 * requirement is 16 bytes and the mailbox is posted to the port
6409 * as two 30-bit addresses. The other data is a bit marking whether
6410 * the 30-bit address is the high or low address.
6411 * Upcast bmbx aphys to 64bits so shift instruction compiles
6412 * clean on 32 bit machines.
6414 dma_address = &phba->sli4_hba.bmbx.dma_address;
6415 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
6416 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
6417 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
6418 LPFC_BMBX_BIT1_ADDR_HI);
6420 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
6421 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
6422 LPFC_BMBX_BIT1_ADDR_LO);
6427 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
6428 * @phba: pointer to lpfc hba data structure.
6430 * This routine is invoked to teardown the bootstrap mailbox
6431 * region and release all host resources. This routine requires
6432 * the caller to ensure all mailbox commands recovered, no
6433 * additional mailbox comands are sent, and interrupts are disabled
6434 * before calling this routine.
6438 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
6440 dma_free_coherent(&phba->pcidev->dev,
6441 phba->sli4_hba.bmbx.bmbx_size,
6442 phba->sli4_hba.bmbx.dmabuf->virt,
6443 phba->sli4_hba.bmbx.dmabuf->phys);
6445 kfree(phba->sli4_hba.bmbx.dmabuf);
6446 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
6450 * lpfc_sli4_read_config - Get the config parameters.
6451 * @phba: pointer to lpfc hba data structure.
6453 * This routine is invoked to read the configuration parameters from the HBA.
6454 * The configuration parameters are used to set the base and maximum values
6455 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
6456 * allocation for the port.
6460 * -ENOMEM - No available memory
6461 * -EIO - The mailbox failed to complete successfully.
6464 lpfc_sli4_read_config(struct lpfc_hba *phba)
6467 struct lpfc_mbx_read_config *rd_config;
6468 union lpfc_sli4_cfg_shdr *shdr;
6469 uint32_t shdr_status, shdr_add_status;
6470 struct lpfc_mbx_get_func_cfg *get_func_cfg;
6471 struct lpfc_rsrc_desc_fcfcoe *desc;
6473 uint32_t desc_count;
6474 int length, i, rc = 0, rc2;
6476 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6478 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6479 "2011 Unable to allocate memory for issuing "
6480 "SLI_CONFIG_SPECIAL mailbox command\n");
6484 lpfc_read_config(phba, pmb);
6486 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6487 if (rc != MBX_SUCCESS) {
6488 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6489 "2012 Mailbox failed , mbxCmd x%x "
6490 "READ_CONFIG, mbxStatus x%x\n",
6491 bf_get(lpfc_mqe_command, &pmb->u.mqe),
6492 bf_get(lpfc_mqe_status, &pmb->u.mqe));
6495 rd_config = &pmb->u.mqe.un.rd_config;
6496 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
6497 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
6498 phba->sli4_hba.lnk_info.lnk_tp =
6499 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
6500 phba->sli4_hba.lnk_info.lnk_no =
6501 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
6502 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6503 "3081 lnk_type:%d, lnk_numb:%d\n",
6504 phba->sli4_hba.lnk_info.lnk_tp,
6505 phba->sli4_hba.lnk_info.lnk_no);
6507 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6508 "3082 Mailbox (x%x) returned ldv:x0\n",
6509 bf_get(lpfc_mqe_command, &pmb->u.mqe));
6510 phba->sli4_hba.extents_in_use =
6511 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
6512 phba->sli4_hba.max_cfg_param.max_xri =
6513 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
6514 phba->sli4_hba.max_cfg_param.xri_base =
6515 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
6516 phba->sli4_hba.max_cfg_param.max_vpi =
6517 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
6518 phba->sli4_hba.max_cfg_param.vpi_base =
6519 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
6520 phba->sli4_hba.max_cfg_param.max_rpi =
6521 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
6522 phba->sli4_hba.max_cfg_param.rpi_base =
6523 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
6524 phba->sli4_hba.max_cfg_param.max_vfi =
6525 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
6526 phba->sli4_hba.max_cfg_param.vfi_base =
6527 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
6528 phba->sli4_hba.max_cfg_param.max_fcfi =
6529 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
6530 phba->sli4_hba.max_cfg_param.max_eq =
6531 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
6532 phba->sli4_hba.max_cfg_param.max_rq =
6533 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
6534 phba->sli4_hba.max_cfg_param.max_wq =
6535 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
6536 phba->sli4_hba.max_cfg_param.max_cq =
6537 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
6538 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
6539 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
6540 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
6541 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
6542 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
6543 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
6544 phba->max_vports = phba->max_vpi;
6545 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6546 "2003 cfg params Extents? %d "
6552 phba->sli4_hba.extents_in_use,
6553 phba->sli4_hba.max_cfg_param.xri_base,
6554 phba->sli4_hba.max_cfg_param.max_xri,
6555 phba->sli4_hba.max_cfg_param.vpi_base,
6556 phba->sli4_hba.max_cfg_param.max_vpi,
6557 phba->sli4_hba.max_cfg_param.vfi_base,
6558 phba->sli4_hba.max_cfg_param.max_vfi,
6559 phba->sli4_hba.max_cfg_param.rpi_base,
6560 phba->sli4_hba.max_cfg_param.max_rpi,
6561 phba->sli4_hba.max_cfg_param.max_fcfi);
6567 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
6568 if (phba->cfg_hba_queue_depth >
6569 (phba->sli4_hba.max_cfg_param.max_xri -
6570 lpfc_sli4_get_els_iocb_cnt(phba)))
6571 phba->cfg_hba_queue_depth =
6572 phba->sli4_hba.max_cfg_param.max_xri -
6573 lpfc_sli4_get_els_iocb_cnt(phba);
6575 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
6576 LPFC_SLI_INTF_IF_TYPE_2)
6579 /* get the pf# and vf# for SLI4 if_type 2 port */
6580 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
6581 sizeof(struct lpfc_sli4_cfg_mhdr));
6582 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
6583 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
6584 length, LPFC_SLI4_MBX_EMBED);
6586 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6587 shdr = (union lpfc_sli4_cfg_shdr *)
6588 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
6589 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6590 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6591 if (rc2 || shdr_status || shdr_add_status) {
6592 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6593 "3026 Mailbox failed , mbxCmd x%x "
6594 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
6595 bf_get(lpfc_mqe_command, &pmb->u.mqe),
6596 bf_get(lpfc_mqe_status, &pmb->u.mqe));
6600 /* search for fc_fcoe resrouce descriptor */
6601 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
6602 desc_count = get_func_cfg->func_cfg.rsrc_desc_count;
6604 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
6605 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
6606 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
6607 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
6608 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
6609 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
6612 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
6613 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
6614 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
6615 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
6616 phba->sli4_hba.iov.pf_number =
6617 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
6618 phba->sli4_hba.iov.vf_number =
6619 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
6624 if (i < LPFC_RSRC_DESC_MAX_NUM)
6625 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6626 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
6627 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
6628 phba->sli4_hba.iov.vf_number);
6630 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6631 "3028 GET_FUNCTION_CONFIG: failed to find "
6632 "Resrouce Descriptor:x%x\n",
6633 LPFC_RSRC_DESC_TYPE_FCFCOE);
6636 mempool_free(pmb, phba->mbox_mem_pool);
6641 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
6642 * @phba: pointer to lpfc hba data structure.
6644 * This routine is invoked to setup the port-side endian order when
6645 * the port if_type is 0. This routine has no function for other
6650 * -ENOMEM - No available memory
6651 * -EIO - The mailbox failed to complete successfully.
6654 lpfc_setup_endian_order(struct lpfc_hba *phba)
6656 LPFC_MBOXQ_t *mboxq;
6657 uint32_t if_type, rc = 0;
6658 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
6659 HOST_ENDIAN_HIGH_WORD1};
6661 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
6663 case LPFC_SLI_INTF_IF_TYPE_0:
6664 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6667 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6668 "0492 Unable to allocate memory for "
6669 "issuing SLI_CONFIG_SPECIAL mailbox "
6675 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
6676 * two words to contain special data values and no other data.
6678 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
6679 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
6680 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6681 if (rc != MBX_SUCCESS) {
6682 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6683 "0493 SLI_CONFIG_SPECIAL mailbox "
6684 "failed with status x%x\n",
6688 mempool_free(mboxq, phba->mbox_mem_pool);
6690 case LPFC_SLI_INTF_IF_TYPE_2:
6691 case LPFC_SLI_INTF_IF_TYPE_1:
6699 * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts
6700 * @phba: pointer to lpfc hba data structure.
6702 * This routine is invoked to check the user settable queue counts for EQs and
6703 * CQs. after this routine is called the counts will be set to valid values that
6704 * adhere to the constraints of the system's interrupt vectors and the port's
6709 * -ENOMEM - No available memory
6712 lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6714 int cfg_fcp_io_channel;
6720 * Sanity check for configured queue parameters against the run-time
6724 /* Sanity check on HBA EQ parameters */
6725 cfg_fcp_io_channel = phba->cfg_fcp_io_channel;
6727 /* It doesn't make sense to have more io channels then CPUs */
6728 for_each_online_cpu(cpu) {
6731 if (i < cfg_fcp_io_channel) {
6732 lpfc_printf_log(phba,
6734 "3188 Reducing IO channels to match number of "
6735 "CPUs: from %d to %d\n", cfg_fcp_io_channel, i);
6736 cfg_fcp_io_channel = i;
6739 if (cfg_fcp_io_channel >
6740 phba->sli4_hba.max_cfg_param.max_eq) {
6741 if (phba->sli4_hba.max_cfg_param.max_eq <
6742 LPFC_FCP_IO_CHAN_MIN) {
6743 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6744 "2574 Not enough EQs (%d) from the "
6745 "pci function for supporting FCP "
6747 phba->sli4_hba.max_cfg_param.max_eq,
6748 phba->cfg_fcp_io_channel);
6751 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6752 "2575 Reducing IO channels to match number of "
6753 "available EQs: from %d to %d\n",
6755 phba->sli4_hba.max_cfg_param.max_eq);
6756 cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq;
6759 /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */
6761 /* The actual number of FCP event queues adopted */
6762 phba->cfg_fcp_eq_count = cfg_fcp_io_channel;
6763 phba->cfg_fcp_wq_count = cfg_fcp_io_channel;
6764 phba->cfg_fcp_io_channel = cfg_fcp_io_channel;
6766 /* Get EQ depth from module parameter, fake the default for now */
6767 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
6768 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
6770 /* Get CQ depth from module parameter, fake the default for now */
6771 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
6772 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
6780 * lpfc_sli4_queue_create - Create all the SLI4 queues
6781 * @phba: pointer to lpfc hba data structure.
6783 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
6784 * operation. For each SLI4 queue type, the parameters such as queue entry
6785 * count (queue depth) shall be taken from the module parameter. For now,
6786 * we just use some constant number as place holder.
6790 * -ENOMEM - No availble memory
6791 * -EIO - The mailbox failed to complete successfully.
6794 lpfc_sli4_queue_create(struct lpfc_hba *phba)
6796 struct lpfc_queue *qdesc;
6800 * Create HBA Record arrays.
6802 if (!phba->cfg_fcp_io_channel)
6805 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
6806 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
6807 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
6808 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
6809 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
6810 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
6812 phba->sli4_hba.hba_eq = kzalloc((sizeof(struct lpfc_queue *) *
6813 phba->cfg_fcp_io_channel), GFP_KERNEL);
6814 if (!phba->sli4_hba.hba_eq) {
6815 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6816 "2576 Failed allocate memory for "
6817 "fast-path EQ record array\n");
6821 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
6822 phba->cfg_fcp_io_channel), GFP_KERNEL);
6823 if (!phba->sli4_hba.fcp_cq) {
6824 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6825 "2577 Failed allocate memory for fast-path "
6826 "CQ record array\n");
6830 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
6831 phba->cfg_fcp_io_channel), GFP_KERNEL);
6832 if (!phba->sli4_hba.fcp_wq) {
6833 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6834 "2578 Failed allocate memory for fast-path "
6835 "WQ record array\n");
6840 * Since the first EQ can have multiple CQs associated with it,
6841 * this array is used to quickly see if we have a FCP fast-path
6844 phba->sli4_hba.fcp_cq_map = kzalloc((sizeof(uint16_t) *
6845 phba->cfg_fcp_io_channel), GFP_KERNEL);
6846 if (!phba->sli4_hba.fcp_cq_map) {
6847 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6848 "2545 Failed allocate memory for fast-path "
6854 * Create HBA Event Queues (EQs). The cfg_fcp_io_channel specifies
6855 * how many EQs to create.
6857 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
6860 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
6861 phba->sli4_hba.eq_ecount);
6863 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6864 "0497 Failed allocate EQ (%d)\n", idx);
6867 phba->sli4_hba.hba_eq[idx] = qdesc;
6869 /* Create Fast Path FCP CQs */
6870 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6871 phba->sli4_hba.cq_ecount);
6873 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6874 "0499 Failed allocate fast-path FCP "
6878 phba->sli4_hba.fcp_cq[idx] = qdesc;
6880 /* Create Fast Path FCP WQs */
6881 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
6882 phba->sli4_hba.wq_ecount);
6884 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6885 "0503 Failed allocate fast-path FCP "
6889 phba->sli4_hba.fcp_wq[idx] = qdesc;
6894 * Create Slow Path Completion Queues (CQs)
6897 /* Create slow-path Mailbox Command Complete Queue */
6898 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6899 phba->sli4_hba.cq_ecount);
6901 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6902 "0500 Failed allocate slow-path mailbox CQ\n");
6905 phba->sli4_hba.mbx_cq = qdesc;
6907 /* Create slow-path ELS Complete Queue */
6908 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6909 phba->sli4_hba.cq_ecount);
6911 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6912 "0501 Failed allocate slow-path ELS CQ\n");
6915 phba->sli4_hba.els_cq = qdesc;
6919 * Create Slow Path Work Queues (WQs)
6922 /* Create Mailbox Command Queue */
6924 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
6925 phba->sli4_hba.mq_ecount);
6927 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6928 "0505 Failed allocate slow-path MQ\n");
6931 phba->sli4_hba.mbx_wq = qdesc;
6934 * Create ELS Work Queues
6937 /* Create slow-path ELS Work Queue */
6938 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
6939 phba->sli4_hba.wq_ecount);
6941 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6942 "0504 Failed allocate slow-path ELS WQ\n");
6945 phba->sli4_hba.els_wq = qdesc;
6948 * Create Receive Queue (RQ)
6951 /* Create Receive Queue for header */
6952 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
6953 phba->sli4_hba.rq_ecount);
6955 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6956 "0506 Failed allocate receive HRQ\n");
6959 phba->sli4_hba.hdr_rq = qdesc;
6961 /* Create Receive Queue for data */
6962 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
6963 phba->sli4_hba.rq_ecount);
6965 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6966 "0507 Failed allocate receive DRQ\n");
6969 phba->sli4_hba.dat_rq = qdesc;
6974 lpfc_sli4_queue_destroy(phba);
6979 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
6980 * @phba: pointer to lpfc hba data structure.
6982 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
6987 * -ENOMEM - No available memory
6988 * -EIO - The mailbox failed to complete successfully.
6991 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6995 if (phba->sli4_hba.hba_eq != NULL) {
6996 /* Release HBA event queue */
6997 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
6998 if (phba->sli4_hba.hba_eq[idx] != NULL) {
6999 lpfc_sli4_queue_free(
7000 phba->sli4_hba.hba_eq[idx]);
7001 phba->sli4_hba.hba_eq[idx] = NULL;
7004 kfree(phba->sli4_hba.hba_eq);
7005 phba->sli4_hba.hba_eq = NULL;
7008 if (phba->sli4_hba.fcp_cq != NULL) {
7009 /* Release FCP completion queue */
7010 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
7011 if (phba->sli4_hba.fcp_cq[idx] != NULL) {
7012 lpfc_sli4_queue_free(
7013 phba->sli4_hba.fcp_cq[idx]);
7014 phba->sli4_hba.fcp_cq[idx] = NULL;
7017 kfree(phba->sli4_hba.fcp_cq);
7018 phba->sli4_hba.fcp_cq = NULL;
7021 if (phba->sli4_hba.fcp_wq != NULL) {
7022 /* Release FCP work queue */
7023 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
7024 if (phba->sli4_hba.fcp_wq[idx] != NULL) {
7025 lpfc_sli4_queue_free(
7026 phba->sli4_hba.fcp_wq[idx]);
7027 phba->sli4_hba.fcp_wq[idx] = NULL;
7030 kfree(phba->sli4_hba.fcp_wq);
7031 phba->sli4_hba.fcp_wq = NULL;
7034 if (phba->pci_bar0_memmap_p) {
7035 iounmap(phba->pci_bar0_memmap_p);
7036 phba->pci_bar0_memmap_p = NULL;
7038 if (phba->pci_bar2_memmap_p) {
7039 iounmap(phba->pci_bar2_memmap_p);
7040 phba->pci_bar2_memmap_p = NULL;
7042 if (phba->pci_bar4_memmap_p) {
7043 iounmap(phba->pci_bar4_memmap_p);
7044 phba->pci_bar4_memmap_p = NULL;
7047 /* Release FCP CQ mapping array */
7048 if (phba->sli4_hba.fcp_cq_map != NULL) {
7049 kfree(phba->sli4_hba.fcp_cq_map);
7050 phba->sli4_hba.fcp_cq_map = NULL;
7053 /* Release mailbox command work queue */
7054 if (phba->sli4_hba.mbx_wq != NULL) {
7055 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
7056 phba->sli4_hba.mbx_wq = NULL;
7059 /* Release ELS work queue */
7060 if (phba->sli4_hba.els_wq != NULL) {
7061 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
7062 phba->sli4_hba.els_wq = NULL;
7065 /* Release unsolicited receive queue */
7066 if (phba->sli4_hba.hdr_rq != NULL) {
7067 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
7068 phba->sli4_hba.hdr_rq = NULL;
7070 if (phba->sli4_hba.dat_rq != NULL) {
7071 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
7072 phba->sli4_hba.dat_rq = NULL;
7075 /* Release ELS complete queue */
7076 if (phba->sli4_hba.els_cq != NULL) {
7077 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
7078 phba->sli4_hba.els_cq = NULL;
7081 /* Release mailbox command complete queue */
7082 if (phba->sli4_hba.mbx_cq != NULL) {
7083 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
7084 phba->sli4_hba.mbx_cq = NULL;
7091 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
7092 * @phba: pointer to lpfc hba data structure.
7094 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
7099 * -ENOMEM - No available memory
7100 * -EIO - The mailbox failed to complete successfully.
7103 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7105 struct lpfc_sli *psli = &phba->sli;
7106 struct lpfc_sli_ring *pring;
7108 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
7109 int fcp_cq_index = 0;
7110 uint32_t shdr_status, shdr_add_status;
7111 union lpfc_sli4_cfg_shdr *shdr;
7112 LPFC_MBOXQ_t *mboxq;
7115 /* Check for dual-ULP support */
7116 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7118 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7119 "3249 Unable to allocate memory for "
7120 "QUERY_FW_CFG mailbox command\n");
7123 length = (sizeof(struct lpfc_mbx_query_fw_config) -
7124 sizeof(struct lpfc_sli4_cfg_mhdr));
7125 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7126 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
7127 length, LPFC_SLI4_MBX_EMBED);
7129 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7131 shdr = (union lpfc_sli4_cfg_shdr *)
7132 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
7133 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7134 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7135 if (shdr_status || shdr_add_status || rc) {
7136 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7137 "3250 QUERY_FW_CFG mailbox failed with status "
7138 "x%x add_status x%x, mbx status x%x\n",
7139 shdr_status, shdr_add_status, rc);
7140 if (rc != MBX_TIMEOUT)
7141 mempool_free(mboxq, phba->mbox_mem_pool);
7146 phba->sli4_hba.fw_func_mode =
7147 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
7148 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
7149 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
7150 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7151 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
7152 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
7153 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
7155 if (rc != MBX_TIMEOUT)
7156 mempool_free(mboxq, phba->mbox_mem_pool);
7159 * Set up HBA Event Queues (EQs)
7162 /* Set up HBA event queue */
7163 if (phba->cfg_fcp_io_channel && !phba->sli4_hba.hba_eq) {
7164 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7165 "3147 Fast-path EQs not allocated\n");
7169 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
7170 if (!phba->sli4_hba.hba_eq[fcp_eqidx]) {
7171 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7172 "0522 Fast-path EQ (%d) not "
7173 "allocated\n", fcp_eqidx);
7175 goto out_destroy_hba_eq;
7177 rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[fcp_eqidx],
7178 (phba->cfg_fcp_imax / phba->cfg_fcp_io_channel));
7180 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7181 "0523 Failed setup of fast-path EQ "
7182 "(%d), rc = 0x%x\n", fcp_eqidx, rc);
7183 goto out_destroy_hba_eq;
7185 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7186 "2584 HBA EQ setup: "
7187 "queue[%d]-id=%d\n", fcp_eqidx,
7188 phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id);
7191 /* Set up fast-path FCP Response Complete Queue */
7192 if (!phba->sli4_hba.fcp_cq) {
7193 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7194 "3148 Fast-path FCP CQ array not "
7197 goto out_destroy_hba_eq;
7200 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) {
7201 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
7202 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7203 "0526 Fast-path FCP CQ (%d) not "
7204 "allocated\n", fcp_cqidx);
7206 goto out_destroy_fcp_cq;
7208 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
7209 phba->sli4_hba.hba_eq[fcp_cqidx], LPFC_WCQ, LPFC_FCP);
7211 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7212 "0527 Failed setup of fast-path FCP "
7213 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
7214 goto out_destroy_fcp_cq;
7217 /* Setup fcp_cq_map for fast lookup */
7218 phba->sli4_hba.fcp_cq_map[fcp_cqidx] =
7219 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id;
7221 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7222 "2588 FCP CQ setup: cq[%d]-id=%d, "
7223 "parent seq[%d]-id=%d\n",
7225 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
7227 phba->sli4_hba.hba_eq[fcp_cqidx]->queue_id);
7230 /* Set up fast-path FCP Work Queue */
7231 if (!phba->sli4_hba.fcp_wq) {
7232 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7233 "3149 Fast-path FCP WQ array not "
7236 goto out_destroy_fcp_cq;
7239 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) {
7240 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
7241 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7242 "0534 Fast-path FCP WQ (%d) not "
7243 "allocated\n", fcp_wqidx);
7245 goto out_destroy_fcp_wq;
7247 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
7248 phba->sli4_hba.fcp_cq[fcp_wqidx],
7251 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7252 "0535 Failed setup of fast-path FCP "
7253 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
7254 goto out_destroy_fcp_wq;
7257 /* Bind this WQ to the next FCP ring */
7258 pring = &psli->ring[MAX_SLI3_CONFIGURED_RINGS + fcp_wqidx];
7259 pring->sli.sli4.wqp = (void *)phba->sli4_hba.fcp_wq[fcp_wqidx];
7260 phba->sli4_hba.fcp_cq[fcp_wqidx]->pring = pring;
7262 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7263 "2591 FCP WQ setup: wq[%d]-id=%d, "
7264 "parent cq[%d]-id=%d\n",
7266 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
7268 phba->sli4_hba.fcp_cq[fcp_wqidx]->queue_id);
7271 * Set up Complete Queues (CQs)
7274 /* Set up slow-path MBOX Complete Queue as the first CQ */
7275 if (!phba->sli4_hba.mbx_cq) {
7276 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7277 "0528 Mailbox CQ not allocated\n");
7279 goto out_destroy_fcp_wq;
7281 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq,
7282 phba->sli4_hba.hba_eq[0], LPFC_MCQ, LPFC_MBOX);
7284 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7285 "0529 Failed setup of slow-path mailbox CQ: "
7287 goto out_destroy_fcp_wq;
7289 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7290 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
7291 phba->sli4_hba.mbx_cq->queue_id,
7292 phba->sli4_hba.hba_eq[0]->queue_id);
7294 /* Set up slow-path ELS Complete Queue */
7295 if (!phba->sli4_hba.els_cq) {
7296 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7297 "0530 ELS CQ not allocated\n");
7299 goto out_destroy_mbx_cq;
7301 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq,
7302 phba->sli4_hba.hba_eq[0], LPFC_WCQ, LPFC_ELS);
7304 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7305 "0531 Failed setup of slow-path ELS CQ: "
7307 goto out_destroy_mbx_cq;
7309 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7310 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
7311 phba->sli4_hba.els_cq->queue_id,
7312 phba->sli4_hba.hba_eq[0]->queue_id);
7315 * Set up all the Work Queues (WQs)
7318 /* Set up Mailbox Command Queue */
7319 if (!phba->sli4_hba.mbx_wq) {
7320 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7321 "0538 Slow-path MQ not allocated\n");
7323 goto out_destroy_els_cq;
7325 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
7326 phba->sli4_hba.mbx_cq, LPFC_MBOX);
7328 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7329 "0539 Failed setup of slow-path MQ: "
7331 goto out_destroy_els_cq;
7333 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7334 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
7335 phba->sli4_hba.mbx_wq->queue_id,
7336 phba->sli4_hba.mbx_cq->queue_id);
7338 /* Set up slow-path ELS Work Queue */
7339 if (!phba->sli4_hba.els_wq) {
7340 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7341 "0536 Slow-path ELS WQ not allocated\n");
7343 goto out_destroy_mbx_wq;
7345 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
7346 phba->sli4_hba.els_cq, LPFC_ELS);
7348 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7349 "0537 Failed setup of slow-path ELS WQ: "
7351 goto out_destroy_mbx_wq;
7354 /* Bind this WQ to the ELS ring */
7355 pring = &psli->ring[LPFC_ELS_RING];
7356 pring->sli.sli4.wqp = (void *)phba->sli4_hba.els_wq;
7357 phba->sli4_hba.els_cq->pring = pring;
7359 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7360 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
7361 phba->sli4_hba.els_wq->queue_id,
7362 phba->sli4_hba.els_cq->queue_id);
7365 * Create Receive Queue (RQ)
7367 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
7368 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7369 "0540 Receive Queue not allocated\n");
7371 goto out_destroy_els_wq;
7374 lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ);
7375 lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ);
7377 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
7378 phba->sli4_hba.els_cq, LPFC_USOL);
7380 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7381 "0541 Failed setup of Receive Queue: "
7383 goto out_destroy_fcp_wq;
7386 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7387 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
7388 "parent cq-id=%d\n",
7389 phba->sli4_hba.hdr_rq->queue_id,
7390 phba->sli4_hba.dat_rq->queue_id,
7391 phba->sli4_hba.els_cq->queue_id);
7395 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
7397 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
7399 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
7401 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
7403 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
7404 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
7406 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
7407 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
7409 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
7410 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_eqidx]);
7416 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
7417 * @phba: pointer to lpfc hba data structure.
7419 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
7424 * -ENOMEM - No available memory
7425 * -EIO - The mailbox failed to complete successfully.
7428 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
7432 /* Unset mailbox command work queue */
7433 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
7434 /* Unset ELS work queue */
7435 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
7436 /* Unset unsolicited receive queue */
7437 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
7438 /* Unset FCP work queue */
7439 if (phba->sli4_hba.fcp_wq) {
7440 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
7442 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
7444 /* Unset mailbox command complete queue */
7445 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
7446 /* Unset ELS complete queue */
7447 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
7448 /* Unset FCP response complete queue */
7449 if (phba->sli4_hba.fcp_cq) {
7450 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
7452 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
7454 /* Unset fast-path event queue */
7455 if (phba->sli4_hba.hba_eq) {
7456 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
7458 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_qidx]);
7463 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
7464 * @phba: pointer to lpfc hba data structure.
7466 * This routine is invoked to allocate and set up a pool of completion queue
7467 * events. The body of the completion queue event is a completion queue entry
7468 * CQE. For now, this pool is used for the interrupt service routine to queue
7469 * the following HBA completion queue events for the worker thread to process:
7470 * - Mailbox asynchronous events
7471 * - Receive queue completion unsolicited events
7472 * Later, this can be used for all the slow-path events.
7476 * -ENOMEM - No available memory
7479 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
7481 struct lpfc_cq_event *cq_event;
7484 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
7485 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
7487 goto out_pool_create_fail;
7488 list_add_tail(&cq_event->list,
7489 &phba->sli4_hba.sp_cqe_event_pool);
7493 out_pool_create_fail:
7494 lpfc_sli4_cq_event_pool_destroy(phba);
7499 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
7500 * @phba: pointer to lpfc hba data structure.
7502 * This routine is invoked to free the pool of completion queue events at
7503 * driver unload time. Note that, it is the responsibility of the driver
7504 * cleanup routine to free all the outstanding completion-queue events
7505 * allocated from this pool back into the pool before invoking this routine
7506 * to destroy the pool.
7509 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
7511 struct lpfc_cq_event *cq_event, *next_cq_event;
7513 list_for_each_entry_safe(cq_event, next_cq_event,
7514 &phba->sli4_hba.sp_cqe_event_pool, list) {
7515 list_del(&cq_event->list);
7521 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
7522 * @phba: pointer to lpfc hba data structure.
7524 * This routine is the lock free version of the API invoked to allocate a
7525 * completion-queue event from the free pool.
7527 * Return: Pointer to the newly allocated completion-queue event if successful
7530 struct lpfc_cq_event *
7531 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
7533 struct lpfc_cq_event *cq_event = NULL;
7535 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
7536 struct lpfc_cq_event, list);
7541 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
7542 * @phba: pointer to lpfc hba data structure.
7544 * This routine is the lock version of the API invoked to allocate a
7545 * completion-queue event from the free pool.
7547 * Return: Pointer to the newly allocated completion-queue event if successful
7550 struct lpfc_cq_event *
7551 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
7553 struct lpfc_cq_event *cq_event;
7554 unsigned long iflags;
7556 spin_lock_irqsave(&phba->hbalock, iflags);
7557 cq_event = __lpfc_sli4_cq_event_alloc(phba);
7558 spin_unlock_irqrestore(&phba->hbalock, iflags);
7563 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
7564 * @phba: pointer to lpfc hba data structure.
7565 * @cq_event: pointer to the completion queue event to be freed.
7567 * This routine is the lock free version of the API invoked to release a
7568 * completion-queue event back into the free pool.
7571 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
7572 struct lpfc_cq_event *cq_event)
7574 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
7578 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
7579 * @phba: pointer to lpfc hba data structure.
7580 * @cq_event: pointer to the completion queue event to be freed.
7582 * This routine is the lock version of the API invoked to release a
7583 * completion-queue event back into the free pool.
7586 lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
7587 struct lpfc_cq_event *cq_event)
7589 unsigned long iflags;
7590 spin_lock_irqsave(&phba->hbalock, iflags);
7591 __lpfc_sli4_cq_event_release(phba, cq_event);
7592 spin_unlock_irqrestore(&phba->hbalock, iflags);
7596 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
7597 * @phba: pointer to lpfc hba data structure.
7599 * This routine is to free all the pending completion-queue events to the
7600 * back into the free pool for device reset.
7603 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
7606 struct lpfc_cq_event *cqe;
7607 unsigned long iflags;
7609 /* Retrieve all the pending WCQEs from pending WCQE lists */
7610 spin_lock_irqsave(&phba->hbalock, iflags);
7611 /* Pending FCP XRI abort events */
7612 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
7614 /* Pending ELS XRI abort events */
7615 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
7617 /* Pending asynnc events */
7618 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
7620 spin_unlock_irqrestore(&phba->hbalock, iflags);
7622 while (!list_empty(&cqelist)) {
7623 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
7624 lpfc_sli4_cq_event_release(phba, cqe);
7629 * lpfc_pci_function_reset - Reset pci function.
7630 * @phba: pointer to lpfc hba data structure.
7632 * This routine is invoked to request a PCI function reset. It will destroys
7633 * all resources assigned to the PCI function which originates this request.
7637 * -ENOMEM - No available memory
7638 * -EIO - The mailbox failed to complete successfully.
7641 lpfc_pci_function_reset(struct lpfc_hba *phba)
7643 LPFC_MBOXQ_t *mboxq;
7644 uint32_t rc = 0, if_type;
7645 uint32_t shdr_status, shdr_add_status;
7646 uint32_t rdy_chk, num_resets = 0, reset_again = 0;
7647 union lpfc_sli4_cfg_shdr *shdr;
7648 struct lpfc_register reg_data;
7651 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7653 case LPFC_SLI_INTF_IF_TYPE_0:
7654 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
7657 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7658 "0494 Unable to allocate memory for "
7659 "issuing SLI_FUNCTION_RESET mailbox "
7664 /* Setup PCI function reset mailbox-ioctl command */
7665 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7666 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
7667 LPFC_SLI4_MBX_EMBED);
7668 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7669 shdr = (union lpfc_sli4_cfg_shdr *)
7670 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
7671 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7672 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
7674 if (rc != MBX_TIMEOUT)
7675 mempool_free(mboxq, phba->mbox_mem_pool);
7676 if (shdr_status || shdr_add_status || rc) {
7677 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7678 "0495 SLI_FUNCTION_RESET mailbox "
7679 "failed with status x%x add_status x%x,"
7680 " mbx status x%x\n",
7681 shdr_status, shdr_add_status, rc);
7685 case LPFC_SLI_INTF_IF_TYPE_2:
7686 for (num_resets = 0;
7687 num_resets < MAX_IF_TYPE_2_RESETS;
7690 bf_set(lpfc_sliport_ctrl_end, ®_data,
7691 LPFC_SLIPORT_LITTLE_ENDIAN);
7692 bf_set(lpfc_sliport_ctrl_ip, ®_data,
7693 LPFC_SLIPORT_INIT_PORT);
7694 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
7697 pci_read_config_word(phba->pcidev,
7698 PCI_DEVICE_ID, &devid);
7700 * Poll the Port Status Register and wait for RDY for
7701 * up to 10 seconds. If the port doesn't respond, treat
7702 * it as an error. If the port responds with RN, start
7705 for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) {
7707 if (lpfc_readl(phba->sli4_hba.u.if_type2.
7708 STATUSregaddr, ®_data.word0)) {
7712 if (bf_get(lpfc_sliport_status_rn, ®_data))
7714 if (bf_get(lpfc_sliport_status_rdy, ®_data))
7719 * If the port responds to the init request with
7720 * reset needed, delay for a bit and restart the loop.
7722 if (reset_again && (rdy_chk < 1000)) {
7728 /* Detect any port errors. */
7729 if ((bf_get(lpfc_sliport_status_err, ®_data)) ||
7730 (rdy_chk >= 1000)) {
7731 phba->work_status[0] = readl(
7732 phba->sli4_hba.u.if_type2.ERR1regaddr);
7733 phba->work_status[1] = readl(
7734 phba->sli4_hba.u.if_type2.ERR2regaddr);
7735 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7736 "2890 Port error detected during port "
7737 "reset(%d): wait_tmo:%d ms, "
7738 "port status reg 0x%x, "
7739 "error 1=0x%x, error 2=0x%x\n",
7740 num_resets, rdy_chk*10,
7742 phba->work_status[0],
7743 phba->work_status[1]);
7748 * Terminate the outer loop provided the Port indicated
7749 * ready within 10 seconds.
7754 /* delay driver action following IF_TYPE_2 function reset */
7757 case LPFC_SLI_INTF_IF_TYPE_1:
7763 /* Catch the not-ready port failure after a port reset. */
7764 if (num_resets >= MAX_IF_TYPE_2_RESETS)
7771 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
7772 * @phba: pointer to lpfc hba data structure.
7774 * This routine is invoked to set up the PCI device memory space for device
7775 * with SLI-4 interface spec.
7779 * other values - error
7782 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
7784 struct pci_dev *pdev;
7785 unsigned long bar0map_len, bar1map_len, bar2map_len;
7786 int error = -ENODEV;
7789 /* Obtain PCI device reference */
7793 pdev = phba->pcidev;
7795 /* Set the device DMA mask size */
7796 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
7797 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
7798 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
7799 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
7805 * The BARs and register set definitions and offset locations are
7806 * dependent on the if_type.
7808 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
7809 &phba->sli4_hba.sli_intf.word0)) {
7813 /* There is no SLI3 failback for SLI4 devices. */
7814 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
7815 LPFC_SLI_INTF_VALID) {
7816 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7817 "2894 SLI_INTF reg contents invalid "
7818 "sli_intf reg 0x%x\n",
7819 phba->sli4_hba.sli_intf.word0);
7823 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7825 * Get the bus address of SLI4 device Bar regions and the
7826 * number of bytes required by each mapping. The mapping of the
7827 * particular PCI BARs regions is dependent on the type of
7830 if (pci_resource_start(pdev, 0)) {
7831 phba->pci_bar0_map = pci_resource_start(pdev, 0);
7832 bar0map_len = pci_resource_len(pdev, 0);
7835 * Map SLI4 PCI Config Space Register base to a kernel virtual
7838 phba->sli4_hba.conf_regs_memmap_p =
7839 ioremap(phba->pci_bar0_map, bar0map_len);
7840 if (!phba->sli4_hba.conf_regs_memmap_p) {
7841 dev_printk(KERN_ERR, &pdev->dev,
7842 "ioremap failed for SLI4 PCI config "
7846 /* Set up BAR0 PCI config space register memory map */
7847 lpfc_sli4_bar0_register_memmap(phba, if_type);
7849 phba->pci_bar0_map = pci_resource_start(pdev, 1);
7850 bar0map_len = pci_resource_len(pdev, 1);
7851 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
7852 dev_printk(KERN_ERR, &pdev->dev,
7853 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
7856 phba->sli4_hba.conf_regs_memmap_p =
7857 ioremap(phba->pci_bar0_map, bar0map_len);
7858 if (!phba->sli4_hba.conf_regs_memmap_p) {
7859 dev_printk(KERN_ERR, &pdev->dev,
7860 "ioremap failed for SLI4 PCI config "
7864 lpfc_sli4_bar0_register_memmap(phba, if_type);
7867 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
7868 (pci_resource_start(pdev, 2))) {
7870 * Map SLI4 if type 0 HBA Control Register base to a kernel
7871 * virtual address and setup the registers.
7873 phba->pci_bar1_map = pci_resource_start(pdev, 2);
7874 bar1map_len = pci_resource_len(pdev, 2);
7875 phba->sli4_hba.ctrl_regs_memmap_p =
7876 ioremap(phba->pci_bar1_map, bar1map_len);
7877 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
7878 dev_printk(KERN_ERR, &pdev->dev,
7879 "ioremap failed for SLI4 HBA control registers.\n");
7880 goto out_iounmap_conf;
7882 lpfc_sli4_bar1_register_memmap(phba);
7885 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
7886 (pci_resource_start(pdev, 4))) {
7888 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
7889 * virtual address and setup the registers.
7891 phba->pci_bar2_map = pci_resource_start(pdev, 4);
7892 bar2map_len = pci_resource_len(pdev, 4);
7893 phba->sli4_hba.drbl_regs_memmap_p =
7894 ioremap(phba->pci_bar2_map, bar2map_len);
7895 if (!phba->sli4_hba.drbl_regs_memmap_p) {
7896 dev_printk(KERN_ERR, &pdev->dev,
7897 "ioremap failed for SLI4 HBA doorbell registers.\n");
7898 goto out_iounmap_ctrl;
7900 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
7902 goto out_iounmap_all;
7908 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
7910 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
7912 iounmap(phba->sli4_hba.conf_regs_memmap_p);
7918 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
7919 * @phba: pointer to lpfc hba data structure.
7921 * This routine is invoked to unset the PCI device memory space for device
7922 * with SLI-4 interface spec.
7925 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
7928 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7931 case LPFC_SLI_INTF_IF_TYPE_0:
7932 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
7933 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
7934 iounmap(phba->sli4_hba.conf_regs_memmap_p);
7936 case LPFC_SLI_INTF_IF_TYPE_2:
7937 iounmap(phba->sli4_hba.conf_regs_memmap_p);
7939 case LPFC_SLI_INTF_IF_TYPE_1:
7941 dev_printk(KERN_ERR, &phba->pcidev->dev,
7942 "FATAL - unsupported SLI4 interface type - %d\n",
7949 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
7950 * @phba: pointer to lpfc hba data structure.
7952 * This routine is invoked to enable the MSI-X interrupt vectors to device
7953 * with SLI-3 interface specs. The kernel function pci_enable_msix() is
7954 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
7955 * invoked, enables either all or nothing, depending on the current
7956 * availability of PCI vector resources. The device driver is responsible
7957 * for calling the individual request_irq() to register each MSI-X vector
7958 * with a interrupt handler, which is done in this function. Note that
7959 * later when device is unloading, the driver should always call free_irq()
7960 * on all MSI-X vectors it has done request_irq() on before calling
7961 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
7962 * will be left with MSI-X enabled and leaks its vectors.
7966 * other values - error
7969 lpfc_sli_enable_msix(struct lpfc_hba *phba)
7974 /* Set up MSI-X multi-message vectors */
7975 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
7976 phba->msix_entries[i].entry = i;
7978 /* Configure MSI-X capability structure */
7979 rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
7980 ARRAY_SIZE(phba->msix_entries));
7982 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7983 "0420 PCI enable MSI-X failed (%d)\n", rc);
7986 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
7987 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7988 "0477 MSI-X entry[%d]: vector=x%x "
7990 phba->msix_entries[i].vector,
7991 phba->msix_entries[i].entry);
7993 * Assign MSI-X vectors to interrupt handlers
7996 /* vector-0 is associated to slow-path handler */
7997 rc = request_irq(phba->msix_entries[0].vector,
7998 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
7999 LPFC_SP_DRIVER_HANDLER_NAME, phba);
8001 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8002 "0421 MSI-X slow-path request_irq failed "
8007 /* vector-1 is associated to fast-path handler */
8008 rc = request_irq(phba->msix_entries[1].vector,
8009 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
8010 LPFC_FP_DRIVER_HANDLER_NAME, phba);
8013 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8014 "0429 MSI-X fast-path request_irq failed "
8020 * Configure HBA MSI-X attention conditions to messages
8022 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8026 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8027 "0474 Unable to allocate memory for issuing "
8028 "MBOX_CONFIG_MSI command\n");
8031 rc = lpfc_config_msi(phba, pmb);
8034 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8035 if (rc != MBX_SUCCESS) {
8036 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
8037 "0351 Config MSI mailbox command failed, "
8038 "mbxCmd x%x, mbxStatus x%x\n",
8039 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
8043 /* Free memory allocated for mailbox command */
8044 mempool_free(pmb, phba->mbox_mem_pool);
8048 /* Free memory allocated for mailbox command */
8049 mempool_free(pmb, phba->mbox_mem_pool);
8052 /* free the irq already requested */
8053 free_irq(phba->msix_entries[1].vector, phba);
8056 /* free the irq already requested */
8057 free_irq(phba->msix_entries[0].vector, phba);
8060 /* Unconfigure MSI-X capability structure */
8061 pci_disable_msix(phba->pcidev);
8066 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
8067 * @phba: pointer to lpfc hba data structure.
8069 * This routine is invoked to release the MSI-X vectors and then disable the
8070 * MSI-X interrupt mode to device with SLI-3 interface spec.
8073 lpfc_sli_disable_msix(struct lpfc_hba *phba)
8077 /* Free up MSI-X multi-message vectors */
8078 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
8079 free_irq(phba->msix_entries[i].vector, phba);
8081 pci_disable_msix(phba->pcidev);
8087 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
8088 * @phba: pointer to lpfc hba data structure.
8090 * This routine is invoked to enable the MSI interrupt mode to device with
8091 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
8092 * enable the MSI vector. The device driver is responsible for calling the
8093 * request_irq() to register MSI vector with a interrupt the handler, which
8094 * is done in this function.
8098 * other values - error
8101 lpfc_sli_enable_msi(struct lpfc_hba *phba)
8105 rc = pci_enable_msi(phba->pcidev);
8107 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8108 "0462 PCI enable MSI mode success.\n");
8110 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8111 "0471 PCI enable MSI mode failed (%d)\n", rc);
8115 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
8116 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
8118 pci_disable_msi(phba->pcidev);
8119 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8120 "0478 MSI request_irq failed (%d)\n", rc);
8126 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
8127 * @phba: pointer to lpfc hba data structure.
8129 * This routine is invoked to disable the MSI interrupt mode to device with
8130 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
8131 * done request_irq() on before calling pci_disable_msi(). Failure to do so
8132 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
8136 lpfc_sli_disable_msi(struct lpfc_hba *phba)
8138 free_irq(phba->pcidev->irq, phba);
8139 pci_disable_msi(phba->pcidev);
8144 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
8145 * @phba: pointer to lpfc hba data structure.
8147 * This routine is invoked to enable device interrupt and associate driver's
8148 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
8149 * spec. Depends on the interrupt mode configured to the driver, the driver
8150 * will try to fallback from the configured interrupt mode to an interrupt
8151 * mode which is supported by the platform, kernel, and device in the order
8153 * MSI-X -> MSI -> IRQ.
8157 * other values - error
8160 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
8162 uint32_t intr_mode = LPFC_INTR_ERROR;
8165 if (cfg_mode == 2) {
8166 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
8167 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
8169 /* Now, try to enable MSI-X interrupt mode */
8170 retval = lpfc_sli_enable_msix(phba);
8172 /* Indicate initialization to MSI-X mode */
8173 phba->intr_type = MSIX;
8179 /* Fallback to MSI if MSI-X initialization failed */
8180 if (cfg_mode >= 1 && phba->intr_type == NONE) {
8181 retval = lpfc_sli_enable_msi(phba);
8183 /* Indicate initialization to MSI mode */
8184 phba->intr_type = MSI;
8189 /* Fallback to INTx if both MSI-X/MSI initalization failed */
8190 if (phba->intr_type == NONE) {
8191 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
8192 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
8194 /* Indicate initialization to INTx mode */
8195 phba->intr_type = INTx;
8203 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
8204 * @phba: pointer to lpfc hba data structure.
8206 * This routine is invoked to disable device interrupt and disassociate the
8207 * driver's interrupt handler(s) from interrupt vector(s) to device with
8208 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
8209 * release the interrupt vector(s) for the message signaled interrupt.
8212 lpfc_sli_disable_intr(struct lpfc_hba *phba)
8214 /* Disable the currently initialized interrupt mode */
8215 if (phba->intr_type == MSIX)
8216 lpfc_sli_disable_msix(phba);
8217 else if (phba->intr_type == MSI)
8218 lpfc_sli_disable_msi(phba);
8219 else if (phba->intr_type == INTx)
8220 free_irq(phba->pcidev->irq, phba);
8222 /* Reset interrupt management states */
8223 phba->intr_type = NONE;
8224 phba->sli.slistat.sli_intr = 0;
8230 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
8231 * @phba: pointer to lpfc hba data structure.
8233 * This routine is invoked to enable the MSI-X interrupt vectors to device
8234 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
8235 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
8236 * enables either all or nothing, depending on the current availability of
8237 * PCI vector resources. The device driver is responsible for calling the
8238 * individual request_irq() to register each MSI-X vector with a interrupt
8239 * handler, which is done in this function. Note that later when device is
8240 * unloading, the driver should always call free_irq() on all MSI-X vectors
8241 * it has done request_irq() on before calling pci_disable_msix(). Failure
8242 * to do so results in a BUG_ON() and a device will be left with MSI-X
8243 * enabled and leaks its vectors.
8247 * other values - error
8250 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
8252 int vectors, rc, index;
8254 /* Set up MSI-X multi-message vectors */
8255 for (index = 0; index < phba->cfg_fcp_io_channel; index++)
8256 phba->sli4_hba.msix_entries[index].entry = index;
8258 /* Configure MSI-X capability structure */
8259 vectors = phba->cfg_fcp_io_channel;
8260 enable_msix_vectors:
8261 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
8265 goto enable_msix_vectors;
8267 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8268 "0484 PCI enable MSI-X failed (%d)\n", rc);
8272 /* Log MSI-X vector assignment */
8273 for (index = 0; index < vectors; index++)
8274 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8275 "0489 MSI-X entry[%d]: vector=x%x "
8276 "message=%d\n", index,
8277 phba->sli4_hba.msix_entries[index].vector,
8278 phba->sli4_hba.msix_entries[index].entry);
8281 * Assign MSI-X vectors to interrupt handlers
8283 for (index = 0; index < vectors; index++) {
8284 memset(&phba->sli4_hba.handler_name[index], 0, 16);
8285 sprintf((char *)&phba->sli4_hba.handler_name[index],
8286 LPFC_DRIVER_HANDLER_NAME"%d", index);
8288 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8289 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8290 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1);
8291 rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
8292 &lpfc_sli4_hba_intr_handler, IRQF_SHARED,
8293 (char *)&phba->sli4_hba.handler_name[index],
8294 &phba->sli4_hba.fcp_eq_hdl[index]);
8296 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8297 "0486 MSI-X fast-path (%d) "
8298 "request_irq failed (%d)\n", index, rc);
8303 if (vectors != phba->cfg_fcp_io_channel) {
8304 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8305 "3238 Reducing IO channels to match number of "
8306 "MSI-X vectors, requested %d got %d\n",
8307 phba->cfg_fcp_io_channel, vectors);
8308 phba->cfg_fcp_io_channel = vectors;
8313 /* free the irq already requested */
8314 for (--index; index >= 0; index--)
8315 free_irq(phba->sli4_hba.msix_entries[index].vector,
8316 &phba->sli4_hba.fcp_eq_hdl[index]);
8319 /* Unconfigure MSI-X capability structure */
8320 pci_disable_msix(phba->pcidev);
8325 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
8326 * @phba: pointer to lpfc hba data structure.
8328 * This routine is invoked to release the MSI-X vectors and then disable the
8329 * MSI-X interrupt mode to device with SLI-4 interface spec.
8332 lpfc_sli4_disable_msix(struct lpfc_hba *phba)
8336 /* Free up MSI-X multi-message vectors */
8337 for (index = 0; index < phba->cfg_fcp_io_channel; index++)
8338 free_irq(phba->sli4_hba.msix_entries[index].vector,
8339 &phba->sli4_hba.fcp_eq_hdl[index]);
8342 pci_disable_msix(phba->pcidev);
8348 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
8349 * @phba: pointer to lpfc hba data structure.
8351 * This routine is invoked to enable the MSI interrupt mode to device with
8352 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
8353 * to enable the MSI vector. The device driver is responsible for calling
8354 * the request_irq() to register MSI vector with a interrupt the handler,
8355 * which is done in this function.
8359 * other values - error
8362 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
8366 rc = pci_enable_msi(phba->pcidev);
8368 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8369 "0487 PCI enable MSI mode success.\n");
8371 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8372 "0488 PCI enable MSI mode failed (%d)\n", rc);
8376 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
8377 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
8379 pci_disable_msi(phba->pcidev);
8380 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8381 "0490 MSI request_irq failed (%d)\n", rc);
8385 for (index = 0; index < phba->cfg_fcp_io_channel; index++) {
8386 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8387 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8394 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
8395 * @phba: pointer to lpfc hba data structure.
8397 * This routine is invoked to disable the MSI interrupt mode to device with
8398 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
8399 * done request_irq() on before calling pci_disable_msi(). Failure to do so
8400 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
8404 lpfc_sli4_disable_msi(struct lpfc_hba *phba)
8406 free_irq(phba->pcidev->irq, phba);
8407 pci_disable_msi(phba->pcidev);
8412 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
8413 * @phba: pointer to lpfc hba data structure.
8415 * This routine is invoked to enable device interrupt and associate driver's
8416 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
8417 * interface spec. Depends on the interrupt mode configured to the driver,
8418 * the driver will try to fallback from the configured interrupt mode to an
8419 * interrupt mode which is supported by the platform, kernel, and device in
8421 * MSI-X -> MSI -> IRQ.
8425 * other values - error
8428 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
8430 uint32_t intr_mode = LPFC_INTR_ERROR;
8433 if (cfg_mode == 2) {
8434 /* Preparation before conf_msi mbox cmd */
8437 /* Now, try to enable MSI-X interrupt mode */
8438 retval = lpfc_sli4_enable_msix(phba);
8440 /* Indicate initialization to MSI-X mode */
8441 phba->intr_type = MSIX;
8447 /* Fallback to MSI if MSI-X initialization failed */
8448 if (cfg_mode >= 1 && phba->intr_type == NONE) {
8449 retval = lpfc_sli4_enable_msi(phba);
8451 /* Indicate initialization to MSI mode */
8452 phba->intr_type = MSI;
8457 /* Fallback to INTx if both MSI-X/MSI initalization failed */
8458 if (phba->intr_type == NONE) {
8459 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
8460 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
8462 /* Indicate initialization to INTx mode */
8463 phba->intr_type = INTx;
8465 for (index = 0; index < phba->cfg_fcp_io_channel;
8467 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8468 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8469 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
8478 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
8479 * @phba: pointer to lpfc hba data structure.
8481 * This routine is invoked to disable device interrupt and disassociate
8482 * the driver's interrupt handler(s) from interrupt vector(s) to device
8483 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
8484 * will release the interrupt vector(s) for the message signaled interrupt.
8487 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
8489 /* Disable the currently initialized interrupt mode */
8490 if (phba->intr_type == MSIX)
8491 lpfc_sli4_disable_msix(phba);
8492 else if (phba->intr_type == MSI)
8493 lpfc_sli4_disable_msi(phba);
8494 else if (phba->intr_type == INTx)
8495 free_irq(phba->pcidev->irq, phba);
8497 /* Reset interrupt management states */
8498 phba->intr_type = NONE;
8499 phba->sli.slistat.sli_intr = 0;
8505 * lpfc_unset_hba - Unset SLI3 hba device initialization
8506 * @phba: pointer to lpfc hba data structure.
8508 * This routine is invoked to unset the HBA device initialization steps to
8509 * a device with SLI-3 interface spec.
8512 lpfc_unset_hba(struct lpfc_hba *phba)
8514 struct lpfc_vport *vport = phba->pport;
8515 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8517 spin_lock_irq(shost->host_lock);
8518 vport->load_flag |= FC_UNLOADING;
8519 spin_unlock_irq(shost->host_lock);
8521 kfree(phba->vpi_bmask);
8522 kfree(phba->vpi_ids);
8524 lpfc_stop_hba_timers(phba);
8526 phba->pport->work_port_events = 0;
8528 lpfc_sli_hba_down(phba);
8530 lpfc_sli_brdrestart(phba);
8532 lpfc_sli_disable_intr(phba);
8538 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
8539 * @phba: Pointer to HBA context object.
8541 * This function is called in the SLI4 code path to wait for completion
8542 * of device's XRIs exchange busy. It will check the XRI exchange busy
8543 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
8544 * that, it will check the XRI exchange busy on outstanding FCP and ELS
8545 * I/Os every 30 seconds, log error message, and wait forever. Only when
8546 * all XRI exchange busy complete, the driver unload shall proceed with
8547 * invoking the function reset ioctl mailbox command to the CNA and the
8548 * the rest of the driver unload resource release.
8551 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
8554 int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
8555 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8557 while (!fcp_xri_cmpl || !els_xri_cmpl) {
8558 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
8560 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8561 "2877 FCP XRI exchange busy "
8562 "wait time: %d seconds.\n",
8565 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8566 "2878 ELS XRI exchange busy "
8567 "wait time: %d seconds.\n",
8569 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
8570 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
8572 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
8573 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
8576 list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
8578 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8583 * lpfc_sli4_hba_unset - Unset the fcoe hba
8584 * @phba: Pointer to HBA context object.
8586 * This function is called in the SLI4 code path to reset the HBA's FCoE
8587 * function. The caller is not required to hold any lock. This routine
8588 * issues PCI function reset mailbox command to reset the FCoE function.
8589 * At the end of the function, it calls lpfc_hba_down_post function to
8590 * free any pending commands.
8593 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
8596 LPFC_MBOXQ_t *mboxq;
8597 struct pci_dev *pdev = phba->pcidev;
8599 lpfc_stop_hba_timers(phba);
8600 phba->sli4_hba.intr_enable = 0;
8603 * Gracefully wait out the potential current outstanding asynchronous
8607 /* First, block any pending async mailbox command from posted */
8608 spin_lock_irq(&phba->hbalock);
8609 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
8610 spin_unlock_irq(&phba->hbalock);
8611 /* Now, trying to wait it out if we can */
8612 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8614 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
8617 /* Forcefully release the outstanding mailbox command if timed out */
8618 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8619 spin_lock_irq(&phba->hbalock);
8620 mboxq = phba->sli.mbox_active;
8621 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8622 __lpfc_mbox_cmpl_put(phba, mboxq);
8623 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8624 phba->sli.mbox_active = NULL;
8625 spin_unlock_irq(&phba->hbalock);
8628 /* Abort all iocbs associated with the hba */
8629 lpfc_sli_hba_iocb_abort(phba);
8631 /* Wait for completion of device XRI exchange busy */
8632 lpfc_sli4_xri_exchange_busy_wait(phba);
8634 /* Disable PCI subsystem interrupt */
8635 lpfc_sli4_disable_intr(phba);
8637 /* Disable SR-IOV if enabled */
8638 if (phba->cfg_sriov_nr_virtfn)
8639 pci_disable_sriov(pdev);
8641 /* Stop kthread signal shall trigger work_done one more time */
8642 kthread_stop(phba->worker_thread);
8644 /* Reset SLI4 HBA FCoE function */
8645 lpfc_pci_function_reset(phba);
8646 lpfc_sli4_queue_destroy(phba);
8648 /* Stop the SLI4 device port */
8649 phba->pport->work_port_events = 0;
8653 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
8654 * @phba: Pointer to HBA context object.
8655 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
8657 * This function is called in the SLI4 code path to read the port's
8658 * sli4 capabilities.
8660 * This function may be be called from any context that can block-wait
8661 * for the completion. The expectation is that this routine is called
8662 * typically from probe_one or from the online routine.
8665 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8668 struct lpfc_mqe *mqe;
8669 struct lpfc_pc_sli4_params *sli4_params;
8673 mqe = &mboxq->u.mqe;
8675 /* Read the port's SLI4 Parameters port capabilities */
8676 lpfc_pc_sli4_params(mboxq);
8677 if (!phba->sli4_hba.intr_enable)
8678 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8680 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
8681 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
8687 sli4_params = &phba->sli4_hba.pc_sli4_params;
8688 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
8689 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
8690 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
8691 sli4_params->featurelevel_1 = bf_get(featurelevel_1,
8692 &mqe->un.sli4_params);
8693 sli4_params->featurelevel_2 = bf_get(featurelevel_2,
8694 &mqe->un.sli4_params);
8695 sli4_params->proto_types = mqe->un.sli4_params.word3;
8696 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
8697 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
8698 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
8699 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
8700 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
8701 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
8702 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
8703 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
8704 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
8705 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
8706 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
8707 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
8708 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
8709 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
8710 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
8711 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
8712 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
8713 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
8714 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
8715 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
8717 /* Make sure that sge_supp_len can be handled by the driver */
8718 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
8719 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
8725 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
8726 * @phba: Pointer to HBA context object.
8727 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
8729 * This function is called in the SLI4 code path to read the port's
8730 * sli4 capabilities.
8732 * This function may be be called from any context that can block-wait
8733 * for the completion. The expectation is that this routine is called
8734 * typically from probe_one or from the online routine.
8737 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8740 struct lpfc_mqe *mqe = &mboxq->u.mqe;
8741 struct lpfc_pc_sli4_params *sli4_params;
8744 struct lpfc_sli4_parameters *mbx_sli4_parameters;
8747 * By default, the driver assumes the SLI4 port requires RPI
8748 * header postings. The SLI4_PARAM response will correct this
8751 phba->sli4_hba.rpi_hdrs_in_use = 1;
8753 /* Read the port's SLI4 Config Parameters */
8754 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
8755 sizeof(struct lpfc_sli4_cfg_mhdr));
8756 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
8757 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
8758 length, LPFC_SLI4_MBX_EMBED);
8759 if (!phba->sli4_hba.intr_enable)
8760 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8762 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
8763 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
8767 sli4_params = &phba->sli4_hba.pc_sli4_params;
8768 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
8769 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
8770 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
8771 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
8772 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
8773 mbx_sli4_parameters);
8774 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
8775 mbx_sli4_parameters);
8776 if (bf_get(cfg_phwq, mbx_sli4_parameters))
8777 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
8779 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
8780 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
8781 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
8782 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
8783 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
8784 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
8785 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
8786 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
8787 mbx_sli4_parameters);
8788 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
8789 mbx_sli4_parameters);
8790 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
8791 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
8793 /* Make sure that sge_supp_len can be handled by the driver */
8794 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
8795 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
8801 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
8802 * @pdev: pointer to PCI device
8803 * @pid: pointer to PCI device identifier
8805 * This routine is to be called to attach a device with SLI-3 interface spec
8806 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
8807 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
8808 * information of the device and driver to see if the driver state that it can
8809 * support this kind of device. If the match is successful, the driver core
8810 * invokes this routine. If this routine determines it can claim the HBA, it
8811 * does all the initialization that it needs to do to handle the HBA properly.
8814 * 0 - driver can claim the device
8815 * negative value - driver can not claim the device
8818 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
8820 struct lpfc_hba *phba;
8821 struct lpfc_vport *vport = NULL;
8822 struct Scsi_Host *shost = NULL;
8824 uint32_t cfg_mode, intr_mode;
8826 /* Allocate memory for HBA structure */
8827 phba = lpfc_hba_alloc(pdev);
8831 /* Perform generic PCI device enabling operation */
8832 error = lpfc_enable_pci_dev(phba);
8836 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
8837 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
8839 goto out_disable_pci_dev;
8841 /* Set up SLI-3 specific device PCI memory space */
8842 error = lpfc_sli_pci_mem_setup(phba);
8844 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8845 "1402 Failed to set up pci memory space.\n");
8846 goto out_disable_pci_dev;
8849 /* Set up phase-1 common device driver resources */
8850 error = lpfc_setup_driver_resource_phase1(phba);
8852 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8853 "1403 Failed to set up driver resource.\n");
8854 goto out_unset_pci_mem_s3;
8857 /* Set up SLI-3 specific device driver resources */
8858 error = lpfc_sli_driver_resource_setup(phba);
8860 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8861 "1404 Failed to set up driver resource.\n");
8862 goto out_unset_pci_mem_s3;
8865 /* Initialize and populate the iocb list per host */
8866 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
8868 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8869 "1405 Failed to initialize iocb list.\n");
8870 goto out_unset_driver_resource_s3;
8873 /* Set up common device driver resources */
8874 error = lpfc_setup_driver_resource_phase2(phba);
8876 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8877 "1406 Failed to set up driver resource.\n");
8878 goto out_free_iocb_list;
8881 /* Get the default values for Model Name and Description */
8882 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
8884 /* Create SCSI host to the physical port */
8885 error = lpfc_create_shost(phba);
8887 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8888 "1407 Failed to create scsi host.\n");
8889 goto out_unset_driver_resource;
8892 /* Configure sysfs attributes */
8893 vport = phba->pport;
8894 error = lpfc_alloc_sysfs_attr(vport);
8896 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8897 "1476 Failed to allocate sysfs attr\n");
8898 goto out_destroy_shost;
8901 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
8902 /* Now, trying to enable interrupt and bring up the device */
8903 cfg_mode = phba->cfg_use_msi;
8905 /* Put device to a known state before enabling interrupt */
8906 lpfc_stop_port(phba);
8907 /* Configure and enable interrupt */
8908 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
8909 if (intr_mode == LPFC_INTR_ERROR) {
8910 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8911 "0431 Failed to enable interrupt.\n");
8913 goto out_free_sysfs_attr;
8915 /* SLI-3 HBA setup */
8916 if (lpfc_sli_hba_setup(phba)) {
8917 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8918 "1477 Failed to set up hba\n");
8920 goto out_remove_device;
8923 /* Wait 50ms for the interrupts of previous mailbox commands */
8925 /* Check active interrupts on message signaled interrupts */
8926 if (intr_mode == 0 ||
8927 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
8928 /* Log the current active interrupt mode */
8929 phba->intr_mode = intr_mode;
8930 lpfc_log_intr_mode(phba, intr_mode);
8933 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8934 "0447 Configure interrupt mode (%d) "
8935 "failed active interrupt test.\n",
8937 /* Disable the current interrupt mode */
8938 lpfc_sli_disable_intr(phba);
8939 /* Try next level of interrupt mode */
8940 cfg_mode = --intr_mode;
8944 /* Perform post initialization setup */
8945 lpfc_post_init_setup(phba);
8947 /* Check if there are static vports to be created. */
8948 lpfc_create_static_vport(phba);
8953 lpfc_unset_hba(phba);
8954 out_free_sysfs_attr:
8955 lpfc_free_sysfs_attr(vport);
8957 lpfc_destroy_shost(phba);
8958 out_unset_driver_resource:
8959 lpfc_unset_driver_resource_phase2(phba);
8961 lpfc_free_iocb_list(phba);
8962 out_unset_driver_resource_s3:
8963 lpfc_sli_driver_resource_unset(phba);
8964 out_unset_pci_mem_s3:
8965 lpfc_sli_pci_mem_unset(phba);
8966 out_disable_pci_dev:
8967 lpfc_disable_pci_dev(phba);
8969 scsi_host_put(shost);
8971 lpfc_hba_free(phba);
8976 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
8977 * @pdev: pointer to PCI device
8979 * This routine is to be called to disattach a device with SLI-3 interface
8980 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
8981 * removed from PCI bus, it performs all the necessary cleanup for the HBA
8982 * device to be removed from the PCI subsystem properly.
8985 lpfc_pci_remove_one_s3(struct pci_dev *pdev)
8987 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8988 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
8989 struct lpfc_vport **vports;
8990 struct lpfc_hba *phba = vport->phba;
8992 int bars = pci_select_bars(pdev, IORESOURCE_MEM);
8994 spin_lock_irq(&phba->hbalock);
8995 vport->load_flag |= FC_UNLOADING;
8996 spin_unlock_irq(&phba->hbalock);
8998 lpfc_free_sysfs_attr(vport);
9000 /* Release all the vports against this physical port */
9001 vports = lpfc_create_vport_work_array(phba);
9003 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
9004 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
9006 fc_vport_terminate(vports[i]->fc_vport);
9008 lpfc_destroy_vport_work_array(phba, vports);
9010 /* Remove FC host and then SCSI host with the physical port */
9011 fc_remove_host(shost);
9012 scsi_remove_host(shost);
9013 lpfc_cleanup(vport);
9016 * Bring down the SLI Layer. This step disable all interrupts,
9017 * clears the rings, discards all mailbox commands, and resets
9021 /* HBA interrupt will be disabled after this call */
9022 lpfc_sli_hba_down(phba);
9023 /* Stop kthread signal shall trigger work_done one more time */
9024 kthread_stop(phba->worker_thread);
9025 /* Final cleanup of txcmplq and reset the HBA */
9026 lpfc_sli_brdrestart(phba);
9028 kfree(phba->vpi_bmask);
9029 kfree(phba->vpi_ids);
9031 lpfc_stop_hba_timers(phba);
9032 spin_lock_irq(&phba->hbalock);
9033 list_del_init(&vport->listentry);
9034 spin_unlock_irq(&phba->hbalock);
9036 lpfc_debugfs_terminate(vport);
9038 /* Disable SR-IOV if enabled */
9039 if (phba->cfg_sriov_nr_virtfn)
9040 pci_disable_sriov(pdev);
9042 /* Disable interrupt */
9043 lpfc_sli_disable_intr(phba);
9045 pci_set_drvdata(pdev, NULL);
9046 scsi_host_put(shost);
9049 * Call scsi_free before mem_free since scsi bufs are released to their
9050 * corresponding pools here.
9052 lpfc_scsi_free(phba);
9053 lpfc_mem_free_all(phba);
9055 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
9056 phba->hbqslimp.virt, phba->hbqslimp.phys);
9058 /* Free resources associated with SLI2 interface */
9059 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9060 phba->slim2p.virt, phba->slim2p.phys);
9062 /* unmap adapter SLIM and Control Registers */
9063 iounmap(phba->ctrl_regs_memmap_p);
9064 iounmap(phba->slim_memmap_p);
9066 lpfc_hba_free(phba);
9068 pci_release_selected_regions(pdev, bars);
9069 pci_disable_device(pdev);
9073 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
9074 * @pdev: pointer to PCI device
9075 * @msg: power management message
9077 * This routine is to be called from the kernel's PCI subsystem to support
9078 * system Power Management (PM) to device with SLI-3 interface spec. When
9079 * PM invokes this method, it quiesces the device by stopping the driver's
9080 * worker thread for the device, turning off device's interrupt and DMA,
9081 * and bring the device offline. Note that as the driver implements the
9082 * minimum PM requirements to a power-aware driver's PM support for the
9083 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
9084 * to the suspend() method call will be treated as SUSPEND and the driver will
9085 * fully reinitialize its device during resume() method call, the driver will
9086 * set device to PCI_D3hot state in PCI config space instead of setting it
9087 * according to the @msg provided by the PM.
9090 * 0 - driver suspended the device
9094 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
9096 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9097 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9099 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9100 "0473 PCI device Power Management suspend.\n");
9102 /* Bring down the device */
9103 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
9105 kthread_stop(phba->worker_thread);
9107 /* Disable interrupt from device */
9108 lpfc_sli_disable_intr(phba);
9110 /* Save device state to PCI config space */
9111 pci_save_state(pdev);
9112 pci_set_power_state(pdev, PCI_D3hot);
9118 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
9119 * @pdev: pointer to PCI device
9121 * This routine is to be called from the kernel's PCI subsystem to support
9122 * system Power Management (PM) to device with SLI-3 interface spec. When PM
9123 * invokes this method, it restores the device's PCI config space state and
9124 * fully reinitializes the device and brings it online. Note that as the
9125 * driver implements the minimum PM requirements to a power-aware driver's
9126 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
9127 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
9128 * driver will fully reinitialize its device during resume() method call,
9129 * the device will be set to PCI_D0 directly in PCI config space before
9130 * restoring the state.
9133 * 0 - driver suspended the device
9137 lpfc_pci_resume_one_s3(struct pci_dev *pdev)
9139 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9140 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9144 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9145 "0452 PCI device Power Management resume.\n");
9147 /* Restore device state from PCI config space */
9148 pci_set_power_state(pdev, PCI_D0);
9149 pci_restore_state(pdev);
9152 * As the new kernel behavior of pci_restore_state() API call clears
9153 * device saved_state flag, need to save the restored state again.
9155 pci_save_state(pdev);
9157 if (pdev->is_busmaster)
9158 pci_set_master(pdev);
9160 /* Startup the kernel thread for this host adapter. */
9161 phba->worker_thread = kthread_run(lpfc_do_work, phba,
9162 "lpfc_worker_%d", phba->brd_no);
9163 if (IS_ERR(phba->worker_thread)) {
9164 error = PTR_ERR(phba->worker_thread);
9165 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9166 "0434 PM resume failed to start worker "
9167 "thread: error=x%x.\n", error);
9171 /* Configure and enable interrupt */
9172 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
9173 if (intr_mode == LPFC_INTR_ERROR) {
9174 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9175 "0430 PM resume Failed to enable interrupt\n");
9178 phba->intr_mode = intr_mode;
9180 /* Restart HBA and bring it online */
9181 lpfc_sli_brdrestart(phba);
9184 /* Log the current active interrupt mode */
9185 lpfc_log_intr_mode(phba, phba->intr_mode);
9191 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
9192 * @phba: pointer to lpfc hba data structure.
9194 * This routine is called to prepare the SLI3 device for PCI slot recover. It
9195 * aborts all the outstanding SCSI I/Os to the pci device.
9198 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
9200 struct lpfc_sli *psli = &phba->sli;
9201 struct lpfc_sli_ring *pring;
9203 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9204 "2723 PCI channel I/O abort preparing for recovery\n");
9207 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
9208 * and let the SCSI mid-layer to retry them to recover.
9210 pring = &psli->ring[psli->fcp_ring];
9211 lpfc_sli_abort_iocb_ring(phba, pring);
9215 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
9216 * @phba: pointer to lpfc hba data structure.
9218 * This routine is called to prepare the SLI3 device for PCI slot reset. It
9219 * disables the device interrupt and pci device, and aborts the internal FCP
9223 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
9225 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9226 "2710 PCI channel disable preparing for reset\n");
9228 /* Block any management I/Os to the device */
9229 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
9231 /* Block all SCSI devices' I/Os on the host */
9232 lpfc_scsi_dev_block(phba);
9234 /* stop all timers */
9235 lpfc_stop_hba_timers(phba);
9237 /* Disable interrupt and pci device */
9238 lpfc_sli_disable_intr(phba);
9239 pci_disable_device(phba->pcidev);
9241 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
9242 lpfc_sli_flush_fcp_rings(phba);
9246 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
9247 * @phba: pointer to lpfc hba data structure.
9249 * This routine is called to prepare the SLI3 device for PCI slot permanently
9250 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
9254 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
9256 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9257 "2711 PCI channel permanent disable for failure\n");
9258 /* Block all SCSI devices' I/Os on the host */
9259 lpfc_scsi_dev_block(phba);
9261 /* stop all timers */
9262 lpfc_stop_hba_timers(phba);
9264 /* Clean up all driver's outstanding SCSI I/Os */
9265 lpfc_sli_flush_fcp_rings(phba);
9269 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
9270 * @pdev: pointer to PCI device.
9271 * @state: the current PCI connection state.
9273 * This routine is called from the PCI subsystem for I/O error handling to
9274 * device with SLI-3 interface spec. This function is called by the PCI
9275 * subsystem after a PCI bus error affecting this device has been detected.
9276 * When this function is invoked, it will need to stop all the I/Os and
9277 * interrupt(s) to the device. Once that is done, it will return
9278 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
9282 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
9283 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
9284 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9286 static pci_ers_result_t
9287 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
9289 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9290 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9293 case pci_channel_io_normal:
9294 /* Non-fatal error, prepare for recovery */
9295 lpfc_sli_prep_dev_for_recover(phba);
9296 return PCI_ERS_RESULT_CAN_RECOVER;
9297 case pci_channel_io_frozen:
9298 /* Fatal error, prepare for slot reset */
9299 lpfc_sli_prep_dev_for_reset(phba);
9300 return PCI_ERS_RESULT_NEED_RESET;
9301 case pci_channel_io_perm_failure:
9302 /* Permanent failure, prepare for device down */
9303 lpfc_sli_prep_dev_for_perm_failure(phba);
9304 return PCI_ERS_RESULT_DISCONNECT;
9306 /* Unknown state, prepare and request slot reset */
9307 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9308 "0472 Unknown PCI error state: x%x\n", state);
9309 lpfc_sli_prep_dev_for_reset(phba);
9310 return PCI_ERS_RESULT_NEED_RESET;
9315 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
9316 * @pdev: pointer to PCI device.
9318 * This routine is called from the PCI subsystem for error handling to
9319 * device with SLI-3 interface spec. This is called after PCI bus has been
9320 * reset to restart the PCI card from scratch, as if from a cold-boot.
9321 * During the PCI subsystem error recovery, after driver returns
9322 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
9323 * recovery and then call this routine before calling the .resume method
9324 * to recover the device. This function will initialize the HBA device,
9325 * enable the interrupt, but it will just put the HBA to offline state
9326 * without passing any I/O traffic.
9329 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
9330 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9332 static pci_ers_result_t
9333 lpfc_io_slot_reset_s3(struct pci_dev *pdev)
9335 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9336 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9337 struct lpfc_sli *psli = &phba->sli;
9340 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
9341 if (pci_enable_device_mem(pdev)) {
9342 printk(KERN_ERR "lpfc: Cannot re-enable "
9343 "PCI device after reset.\n");
9344 return PCI_ERS_RESULT_DISCONNECT;
9347 pci_restore_state(pdev);
9350 * As the new kernel behavior of pci_restore_state() API call clears
9351 * device saved_state flag, need to save the restored state again.
9353 pci_save_state(pdev);
9355 if (pdev->is_busmaster)
9356 pci_set_master(pdev);
9358 spin_lock_irq(&phba->hbalock);
9359 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
9360 spin_unlock_irq(&phba->hbalock);
9362 /* Configure and enable interrupt */
9363 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
9364 if (intr_mode == LPFC_INTR_ERROR) {
9365 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9366 "0427 Cannot re-enable interrupt after "
9368 return PCI_ERS_RESULT_DISCONNECT;
9370 phba->intr_mode = intr_mode;
9372 /* Take device offline, it will perform cleanup */
9373 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
9375 lpfc_sli_brdrestart(phba);
9377 /* Log the current active interrupt mode */
9378 lpfc_log_intr_mode(phba, phba->intr_mode);
9380 return PCI_ERS_RESULT_RECOVERED;
9384 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
9385 * @pdev: pointer to PCI device
9387 * This routine is called from the PCI subsystem for error handling to device
9388 * with SLI-3 interface spec. It is called when kernel error recovery tells
9389 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
9390 * error recovery. After this call, traffic can start to flow from this device
9394 lpfc_io_resume_s3(struct pci_dev *pdev)
9396 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9397 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9399 /* Bring device online, it will be no-op for non-fatal error resume */
9402 /* Clean up Advanced Error Reporting (AER) if needed */
9403 if (phba->hba_flag & HBA_AER_ENABLED)
9404 pci_cleanup_aer_uncorrect_error_status(pdev);
9408 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
9409 * @phba: pointer to lpfc hba data structure.
9411 * returns the number of ELS/CT IOCBs to reserve
9414 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
9416 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
9418 if (phba->sli_rev == LPFC_SLI_REV4) {
9421 else if (max_xri <= 256)
9423 else if (max_xri <= 512)
9425 else if (max_xri <= 1024)
9427 else if (max_xri <= 1536)
9429 else if (max_xri <= 2048)
9438 * lpfc_write_firmware - attempt to write a firmware image to the port
9439 * @fw: pointer to firmware image returned from request_firmware.
9440 * @phba: pointer to lpfc hba data structure.
9444 lpfc_write_firmware(const struct firmware *fw, void *context)
9446 struct lpfc_hba *phba = (struct lpfc_hba *)context;
9447 char fwrev[FW_REV_STR_SIZE];
9448 struct lpfc_grp_hdr *image;
9449 struct list_head dma_buffer_list;
9451 struct lpfc_dmabuf *dmabuf, *next;
9452 uint32_t offset = 0, temp_offset = 0;
9454 /* It can be null in no-wait mode, sanity check */
9459 image = (struct lpfc_grp_hdr *)fw->data;
9461 INIT_LIST_HEAD(&dma_buffer_list);
9462 if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) ||
9463 (bf_get_be32(lpfc_grp_hdr_file_type, image) !=
9464 LPFC_FILE_TYPE_GROUP) ||
9465 (bf_get_be32(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
9466 (be32_to_cpu(image->size) != fw->size)) {
9467 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9468 "3022 Invalid FW image found. "
9469 "Magic:%x Type:%x ID:%x\n",
9470 be32_to_cpu(image->magic_number),
9471 bf_get_be32(lpfc_grp_hdr_file_type, image),
9472 bf_get_be32(lpfc_grp_hdr_id, image));
9476 lpfc_decode_firmware_rev(phba, fwrev, 1);
9477 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
9478 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9479 "3023 Updating Firmware, Current Version:%s "
9481 fwrev, image->revision);
9482 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
9483 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
9489 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
9493 if (!dmabuf->virt) {
9498 list_add_tail(&dmabuf->list, &dma_buffer_list);
9500 while (offset < fw->size) {
9501 temp_offset = offset;
9502 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
9503 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
9504 memcpy(dmabuf->virt,
9505 fw->data + temp_offset,
9506 fw->size - temp_offset);
9507 temp_offset = fw->size;
9510 memcpy(dmabuf->virt, fw->data + temp_offset,
9512 temp_offset += SLI4_PAGE_SIZE;
9514 rc = lpfc_wr_object(phba, &dma_buffer_list,
9515 (fw->size - offset), &offset);
9523 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
9524 list_del(&dmabuf->list);
9525 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
9526 dmabuf->virt, dmabuf->phys);
9529 release_firmware(fw);
9531 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9532 "3024 Firmware update done: %d.\n", rc);
9537 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
9538 * @phba: pointer to lpfc hba data structure.
9540 * This routine is called to perform Linux generic firmware upgrade on device
9541 * that supports such feature.
9544 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
9546 uint8_t file_name[ELX_MODEL_NAME_SIZE];
9548 const struct firmware *fw;
9550 /* Only supported on SLI4 interface type 2 for now */
9551 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
9552 LPFC_SLI_INTF_IF_TYPE_2)
9555 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
9557 if (fw_upgrade == INT_FW_UPGRADE) {
9558 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
9559 file_name, &phba->pcidev->dev,
9560 GFP_KERNEL, (void *)phba,
9561 lpfc_write_firmware);
9562 } else if (fw_upgrade == RUN_FW_UPGRADE) {
9563 ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
9565 lpfc_write_firmware(fw, (void *)phba);
9574 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
9575 * @pdev: pointer to PCI device
9576 * @pid: pointer to PCI device identifier
9578 * This routine is called from the kernel's PCI subsystem to device with
9579 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
9580 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
9581 * information of the device and driver to see if the driver state that it
9582 * can support this kind of device. If the match is successful, the driver
9583 * core invokes this routine. If this routine determines it can claim the HBA,
9584 * it does all the initialization that it needs to do to handle the HBA
9588 * 0 - driver can claim the device
9589 * negative value - driver can not claim the device
9592 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9594 struct lpfc_hba *phba;
9595 struct lpfc_vport *vport = NULL;
9596 struct Scsi_Host *shost = NULL;
9598 uint32_t cfg_mode, intr_mode;
9599 int adjusted_fcp_io_channel;
9601 /* Allocate memory for HBA structure */
9602 phba = lpfc_hba_alloc(pdev);
9606 /* Perform generic PCI device enabling operation */
9607 error = lpfc_enable_pci_dev(phba);
9611 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
9612 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
9614 goto out_disable_pci_dev;
9616 /* Set up SLI-4 specific device PCI memory space */
9617 error = lpfc_sli4_pci_mem_setup(phba);
9619 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9620 "1410 Failed to set up pci memory space.\n");
9621 goto out_disable_pci_dev;
9624 /* Set up phase-1 common device driver resources */
9625 error = lpfc_setup_driver_resource_phase1(phba);
9627 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9628 "1411 Failed to set up driver resource.\n");
9629 goto out_unset_pci_mem_s4;
9632 /* Set up SLI-4 Specific device driver resources */
9633 error = lpfc_sli4_driver_resource_setup(phba);
9635 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9636 "1412 Failed to set up driver resource.\n");
9637 goto out_unset_pci_mem_s4;
9640 /* Initialize and populate the iocb list per host */
9642 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9643 "2821 initialize iocb list %d.\n",
9644 phba->cfg_iocb_cnt*1024);
9645 error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024);
9648 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9649 "1413 Failed to initialize iocb list.\n");
9650 goto out_unset_driver_resource_s4;
9653 INIT_LIST_HEAD(&phba->active_rrq_list);
9654 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
9656 /* Set up common device driver resources */
9657 error = lpfc_setup_driver_resource_phase2(phba);
9659 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9660 "1414 Failed to set up driver resource.\n");
9661 goto out_free_iocb_list;
9664 /* Get the default values for Model Name and Description */
9665 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
9667 /* Create SCSI host to the physical port */
9668 error = lpfc_create_shost(phba);
9670 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9671 "1415 Failed to create scsi host.\n");
9672 goto out_unset_driver_resource;
9675 /* Configure sysfs attributes */
9676 vport = phba->pport;
9677 error = lpfc_alloc_sysfs_attr(vport);
9679 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9680 "1416 Failed to allocate sysfs attr\n");
9681 goto out_destroy_shost;
9684 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
9685 /* Now, trying to enable interrupt and bring up the device */
9686 cfg_mode = phba->cfg_use_msi;
9688 /* Put device to a known state before enabling interrupt */
9689 lpfc_stop_port(phba);
9690 /* Configure and enable interrupt */
9691 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
9692 if (intr_mode == LPFC_INTR_ERROR) {
9693 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9694 "0426 Failed to enable interrupt.\n");
9696 goto out_free_sysfs_attr;
9698 /* Default to single EQ for non-MSI-X */
9699 if (phba->intr_type != MSIX)
9700 adjusted_fcp_io_channel = 1;
9702 adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
9703 phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
9704 /* Set up SLI-4 HBA */
9705 if (lpfc_sli4_hba_setup(phba)) {
9706 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9707 "1421 Failed to set up hba\n");
9709 goto out_disable_intr;
9712 /* Log the current active interrupt mode */
9713 phba->intr_mode = intr_mode;
9714 lpfc_log_intr_mode(phba, intr_mode);
9716 /* Perform post initialization setup */
9717 lpfc_post_init_setup(phba);
9719 /* check for firmware upgrade or downgrade */
9720 if (phba->cfg_request_firmware_upgrade)
9721 ret = lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
9723 /* Check if there are static vports to be created. */
9724 lpfc_create_static_vport(phba);
9728 lpfc_sli4_disable_intr(phba);
9729 out_free_sysfs_attr:
9730 lpfc_free_sysfs_attr(vport);
9732 lpfc_destroy_shost(phba);
9733 out_unset_driver_resource:
9734 lpfc_unset_driver_resource_phase2(phba);
9736 lpfc_free_iocb_list(phba);
9737 out_unset_driver_resource_s4:
9738 lpfc_sli4_driver_resource_unset(phba);
9739 out_unset_pci_mem_s4:
9740 lpfc_sli4_pci_mem_unset(phba);
9741 out_disable_pci_dev:
9742 lpfc_disable_pci_dev(phba);
9744 scsi_host_put(shost);
9746 lpfc_hba_free(phba);
9751 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
9752 * @pdev: pointer to PCI device
9754 * This routine is called from the kernel's PCI subsystem to device with
9755 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
9756 * removed from PCI bus, it performs all the necessary cleanup for the HBA
9757 * device to be removed from the PCI subsystem properly.
9760 lpfc_pci_remove_one_s4(struct pci_dev *pdev)
9762 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9763 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
9764 struct lpfc_vport **vports;
9765 struct lpfc_hba *phba = vport->phba;
9768 /* Mark the device unloading flag */
9769 spin_lock_irq(&phba->hbalock);
9770 vport->load_flag |= FC_UNLOADING;
9771 spin_unlock_irq(&phba->hbalock);
9773 /* Free the HBA sysfs attributes */
9774 lpfc_free_sysfs_attr(vport);
9776 /* Release all the vports against this physical port */
9777 vports = lpfc_create_vport_work_array(phba);
9779 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
9780 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
9782 fc_vport_terminate(vports[i]->fc_vport);
9784 lpfc_destroy_vport_work_array(phba, vports);
9786 /* Remove FC host and then SCSI host with the physical port */
9787 fc_remove_host(shost);
9788 scsi_remove_host(shost);
9790 /* Perform cleanup on the physical port */
9791 lpfc_cleanup(vport);
9794 * Bring down the SLI Layer. This step disables all interrupts,
9795 * clears the rings, discards all mailbox commands, and resets
9796 * the HBA FCoE function.
9798 lpfc_debugfs_terminate(vport);
9799 lpfc_sli4_hba_unset(phba);
9801 spin_lock_irq(&phba->hbalock);
9802 list_del_init(&vport->listentry);
9803 spin_unlock_irq(&phba->hbalock);
9805 /* Perform scsi free before driver resource_unset since scsi
9806 * buffers are released to their corresponding pools here.
9808 lpfc_scsi_free(phba);
9810 lpfc_sli4_driver_resource_unset(phba);
9812 /* Unmap adapter Control and Doorbell registers */
9813 lpfc_sli4_pci_mem_unset(phba);
9815 /* Release PCI resources and disable device's PCI function */
9816 scsi_host_put(shost);
9817 lpfc_disable_pci_dev(phba);
9819 /* Finally, free the driver's device data structure */
9820 lpfc_hba_free(phba);
9826 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
9827 * @pdev: pointer to PCI device
9828 * @msg: power management message
9830 * This routine is called from the kernel's PCI subsystem to support system
9831 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
9832 * this method, it quiesces the device by stopping the driver's worker
9833 * thread for the device, turning off device's interrupt and DMA, and bring
9834 * the device offline. Note that as the driver implements the minimum PM
9835 * requirements to a power-aware driver's PM support for suspend/resume -- all
9836 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
9837 * method call will be treated as SUSPEND and the driver will fully
9838 * reinitialize its device during resume() method call, the driver will set
9839 * device to PCI_D3hot state in PCI config space instead of setting it
9840 * according to the @msg provided by the PM.
9843 * 0 - driver suspended the device
9847 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
9849 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9850 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9852 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9853 "2843 PCI device Power Management suspend.\n");
9855 /* Bring down the device */
9856 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
9858 kthread_stop(phba->worker_thread);
9860 /* Disable interrupt from device */
9861 lpfc_sli4_disable_intr(phba);
9862 lpfc_sli4_queue_destroy(phba);
9864 /* Save device state to PCI config space */
9865 pci_save_state(pdev);
9866 pci_set_power_state(pdev, PCI_D3hot);
9872 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
9873 * @pdev: pointer to PCI device
9875 * This routine is called from the kernel's PCI subsystem to support system
9876 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
9877 * this method, it restores the device's PCI config space state and fully
9878 * reinitializes the device and brings it online. Note that as the driver
9879 * implements the minimum PM requirements to a power-aware driver's PM for
9880 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
9881 * to the suspend() method call will be treated as SUSPEND and the driver
9882 * will fully reinitialize its device during resume() method call, the device
9883 * will be set to PCI_D0 directly in PCI config space before restoring the
9887 * 0 - driver suspended the device
9891 lpfc_pci_resume_one_s4(struct pci_dev *pdev)
9893 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9894 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9898 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9899 "0292 PCI device Power Management resume.\n");
9901 /* Restore device state from PCI config space */
9902 pci_set_power_state(pdev, PCI_D0);
9903 pci_restore_state(pdev);
9906 * As the new kernel behavior of pci_restore_state() API call clears
9907 * device saved_state flag, need to save the restored state again.
9909 pci_save_state(pdev);
9911 if (pdev->is_busmaster)
9912 pci_set_master(pdev);
9914 /* Startup the kernel thread for this host adapter. */
9915 phba->worker_thread = kthread_run(lpfc_do_work, phba,
9916 "lpfc_worker_%d", phba->brd_no);
9917 if (IS_ERR(phba->worker_thread)) {
9918 error = PTR_ERR(phba->worker_thread);
9919 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9920 "0293 PM resume failed to start worker "
9921 "thread: error=x%x.\n", error);
9925 /* Configure and enable interrupt */
9926 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
9927 if (intr_mode == LPFC_INTR_ERROR) {
9928 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9929 "0294 PM resume Failed to enable interrupt\n");
9932 phba->intr_mode = intr_mode;
9934 /* Restart HBA and bring it online */
9935 lpfc_sli_brdrestart(phba);
9938 /* Log the current active interrupt mode */
9939 lpfc_log_intr_mode(phba, phba->intr_mode);
9945 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
9946 * @phba: pointer to lpfc hba data structure.
9948 * This routine is called to prepare the SLI4 device for PCI slot recover. It
9949 * aborts all the outstanding SCSI I/Os to the pci device.
9952 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
9954 struct lpfc_sli *psli = &phba->sli;
9955 struct lpfc_sli_ring *pring;
9957 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9958 "2828 PCI channel I/O abort preparing for recovery\n");
9960 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
9961 * and let the SCSI mid-layer to retry them to recover.
9963 pring = &psli->ring[psli->fcp_ring];
9964 lpfc_sli_abort_iocb_ring(phba, pring);
9968 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
9969 * @phba: pointer to lpfc hba data structure.
9971 * This routine is called to prepare the SLI4 device for PCI slot reset. It
9972 * disables the device interrupt and pci device, and aborts the internal FCP
9976 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
9978 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9979 "2826 PCI channel disable preparing for reset\n");
9981 /* Block any management I/Os to the device */
9982 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
9984 /* Block all SCSI devices' I/Os on the host */
9985 lpfc_scsi_dev_block(phba);
9987 /* stop all timers */
9988 lpfc_stop_hba_timers(phba);
9990 /* Disable interrupt and pci device */
9991 lpfc_sli4_disable_intr(phba);
9992 lpfc_sli4_queue_destroy(phba);
9993 pci_disable_device(phba->pcidev);
9995 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
9996 lpfc_sli_flush_fcp_rings(phba);
10000 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
10001 * @phba: pointer to lpfc hba data structure.
10003 * This routine is called to prepare the SLI4 device for PCI slot permanently
10004 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
10008 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
10010 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10011 "2827 PCI channel permanent disable for failure\n");
10013 /* Block all SCSI devices' I/Os on the host */
10014 lpfc_scsi_dev_block(phba);
10016 /* stop all timers */
10017 lpfc_stop_hba_timers(phba);
10019 /* Clean up all driver's outstanding SCSI I/Os */
10020 lpfc_sli_flush_fcp_rings(phba);
10024 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
10025 * @pdev: pointer to PCI device.
10026 * @state: the current PCI connection state.
10028 * This routine is called from the PCI subsystem for error handling to device
10029 * with SLI-4 interface spec. This function is called by the PCI subsystem
10030 * after a PCI bus error affecting this device has been detected. When this
10031 * function is invoked, it will need to stop all the I/Os and interrupt(s)
10032 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
10033 * for the PCI subsystem to perform proper recovery as desired.
10036 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
10037 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
10039 static pci_ers_result_t
10040 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
10042 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10043 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10046 case pci_channel_io_normal:
10047 /* Non-fatal error, prepare for recovery */
10048 lpfc_sli4_prep_dev_for_recover(phba);
10049 return PCI_ERS_RESULT_CAN_RECOVER;
10050 case pci_channel_io_frozen:
10051 /* Fatal error, prepare for slot reset */
10052 lpfc_sli4_prep_dev_for_reset(phba);
10053 return PCI_ERS_RESULT_NEED_RESET;
10054 case pci_channel_io_perm_failure:
10055 /* Permanent failure, prepare for device down */
10056 lpfc_sli4_prep_dev_for_perm_failure(phba);
10057 return PCI_ERS_RESULT_DISCONNECT;
10059 /* Unknown state, prepare and request slot reset */
10060 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10061 "2825 Unknown PCI error state: x%x\n", state);
10062 lpfc_sli4_prep_dev_for_reset(phba);
10063 return PCI_ERS_RESULT_NEED_RESET;
10068 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
10069 * @pdev: pointer to PCI device.
10071 * This routine is called from the PCI subsystem for error handling to device
10072 * with SLI-4 interface spec. It is called after PCI bus has been reset to
10073 * restart the PCI card from scratch, as if from a cold-boot. During the
10074 * PCI subsystem error recovery, after the driver returns
10075 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
10076 * recovery and then call this routine before calling the .resume method to
10077 * recover the device. This function will initialize the HBA device, enable
10078 * the interrupt, but it will just put the HBA to offline state without
10079 * passing any I/O traffic.
10082 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
10083 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
10085 static pci_ers_result_t
10086 lpfc_io_slot_reset_s4(struct pci_dev *pdev)
10088 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10089 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10090 struct lpfc_sli *psli = &phba->sli;
10091 uint32_t intr_mode;
10093 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
10094 if (pci_enable_device_mem(pdev)) {
10095 printk(KERN_ERR "lpfc: Cannot re-enable "
10096 "PCI device after reset.\n");
10097 return PCI_ERS_RESULT_DISCONNECT;
10100 pci_restore_state(pdev);
10103 * As the new kernel behavior of pci_restore_state() API call clears
10104 * device saved_state flag, need to save the restored state again.
10106 pci_save_state(pdev);
10108 if (pdev->is_busmaster)
10109 pci_set_master(pdev);
10111 spin_lock_irq(&phba->hbalock);
10112 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
10113 spin_unlock_irq(&phba->hbalock);
10115 /* Configure and enable interrupt */
10116 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
10117 if (intr_mode == LPFC_INTR_ERROR) {
10118 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10119 "2824 Cannot re-enable interrupt after "
10121 return PCI_ERS_RESULT_DISCONNECT;
10123 phba->intr_mode = intr_mode;
10125 /* Log the current active interrupt mode */
10126 lpfc_log_intr_mode(phba, phba->intr_mode);
10128 return PCI_ERS_RESULT_RECOVERED;
10132 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
10133 * @pdev: pointer to PCI device
10135 * This routine is called from the PCI subsystem for error handling to device
10136 * with SLI-4 interface spec. It is called when kernel error recovery tells
10137 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
10138 * error recovery. After this call, traffic can start to flow from this device
10142 lpfc_io_resume_s4(struct pci_dev *pdev)
10144 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10145 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10148 * In case of slot reset, as function reset is performed through
10149 * mailbox command which needs DMA to be enabled, this operation
10150 * has to be moved to the io resume phase. Taking device offline
10151 * will perform the necessary cleanup.
10153 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
10154 /* Perform device reset */
10155 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
10156 lpfc_offline(phba);
10157 lpfc_sli_brdrestart(phba);
10158 /* Bring the device back online */
10162 /* Clean up Advanced Error Reporting (AER) if needed */
10163 if (phba->hba_flag & HBA_AER_ENABLED)
10164 pci_cleanup_aer_uncorrect_error_status(pdev);
10168 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
10169 * @pdev: pointer to PCI device
10170 * @pid: pointer to PCI device identifier
10172 * This routine is to be registered to the kernel's PCI subsystem. When an
10173 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
10174 * at PCI device-specific information of the device and driver to see if the
10175 * driver state that it can support this kind of device. If the match is
10176 * successful, the driver core invokes this routine. This routine dispatches
10177 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
10178 * do all the initialization that it needs to do to handle the HBA device
10182 * 0 - driver can claim the device
10183 * negative value - driver can not claim the device
10186 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
10189 struct lpfc_sli_intf intf;
10191 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
10194 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
10195 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
10196 rc = lpfc_pci_probe_one_s4(pdev, pid);
10198 rc = lpfc_pci_probe_one_s3(pdev, pid);
10204 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
10205 * @pdev: pointer to PCI device
10207 * This routine is to be registered to the kernel's PCI subsystem. When an
10208 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
10209 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
10210 * remove routine, which will perform all the necessary cleanup for the
10211 * device to be removed from the PCI subsystem properly.
10214 lpfc_pci_remove_one(struct pci_dev *pdev)
10216 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10217 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10219 switch (phba->pci_dev_grp) {
10220 case LPFC_PCI_DEV_LP:
10221 lpfc_pci_remove_one_s3(pdev);
10223 case LPFC_PCI_DEV_OC:
10224 lpfc_pci_remove_one_s4(pdev);
10227 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10228 "1424 Invalid PCI device group: 0x%x\n",
10229 phba->pci_dev_grp);
10236 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
10237 * @pdev: pointer to PCI device
10238 * @msg: power management message
10240 * This routine is to be registered to the kernel's PCI subsystem to support
10241 * system Power Management (PM). When PM invokes this method, it dispatches
10242 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
10243 * suspend the device.
10246 * 0 - driver suspended the device
10250 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
10252 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10253 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10256 switch (phba->pci_dev_grp) {
10257 case LPFC_PCI_DEV_LP:
10258 rc = lpfc_pci_suspend_one_s3(pdev, msg);
10260 case LPFC_PCI_DEV_OC:
10261 rc = lpfc_pci_suspend_one_s4(pdev, msg);
10264 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10265 "1425 Invalid PCI device group: 0x%x\n",
10266 phba->pci_dev_grp);
10273 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
10274 * @pdev: pointer to PCI device
10276 * This routine is to be registered to the kernel's PCI subsystem to support
10277 * system Power Management (PM). When PM invokes this method, it dispatches
10278 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
10279 * resume the device.
10282 * 0 - driver suspended the device
10286 lpfc_pci_resume_one(struct pci_dev *pdev)
10288 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10289 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10292 switch (phba->pci_dev_grp) {
10293 case LPFC_PCI_DEV_LP:
10294 rc = lpfc_pci_resume_one_s3(pdev);
10296 case LPFC_PCI_DEV_OC:
10297 rc = lpfc_pci_resume_one_s4(pdev);
10300 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10301 "1426 Invalid PCI device group: 0x%x\n",
10302 phba->pci_dev_grp);
10309 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
10310 * @pdev: pointer to PCI device.
10311 * @state: the current PCI connection state.
10313 * This routine is registered to the PCI subsystem for error handling. This
10314 * function is called by the PCI subsystem after a PCI bus error affecting
10315 * this device has been detected. When this routine is invoked, it dispatches
10316 * the action to the proper SLI-3 or SLI-4 device error detected handling
10317 * routine, which will perform the proper error detected operation.
10320 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
10321 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
10323 static pci_ers_result_t
10324 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
10326 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10327 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10328 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
10330 switch (phba->pci_dev_grp) {
10331 case LPFC_PCI_DEV_LP:
10332 rc = lpfc_io_error_detected_s3(pdev, state);
10334 case LPFC_PCI_DEV_OC:
10335 rc = lpfc_io_error_detected_s4(pdev, state);
10338 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10339 "1427 Invalid PCI device group: 0x%x\n",
10340 phba->pci_dev_grp);
10347 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
10348 * @pdev: pointer to PCI device.
10350 * This routine is registered to the PCI subsystem for error handling. This
10351 * function is called after PCI bus has been reset to restart the PCI card
10352 * from scratch, as if from a cold-boot. When this routine is invoked, it
10353 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
10354 * routine, which will perform the proper device reset.
10357 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
10358 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
10360 static pci_ers_result_t
10361 lpfc_io_slot_reset(struct pci_dev *pdev)
10363 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10364 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10365 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
10367 switch (phba->pci_dev_grp) {
10368 case LPFC_PCI_DEV_LP:
10369 rc = lpfc_io_slot_reset_s3(pdev);
10371 case LPFC_PCI_DEV_OC:
10372 rc = lpfc_io_slot_reset_s4(pdev);
10375 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10376 "1428 Invalid PCI device group: 0x%x\n",
10377 phba->pci_dev_grp);
10384 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
10385 * @pdev: pointer to PCI device
10387 * This routine is registered to the PCI subsystem for error handling. It
10388 * is called when kernel error recovery tells the lpfc driver that it is
10389 * OK to resume normal PCI operation after PCI bus error recovery. When
10390 * this routine is invoked, it dispatches the action to the proper SLI-3
10391 * or SLI-4 device io_resume routine, which will resume the device operation.
10394 lpfc_io_resume(struct pci_dev *pdev)
10396 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10397 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10399 switch (phba->pci_dev_grp) {
10400 case LPFC_PCI_DEV_LP:
10401 lpfc_io_resume_s3(pdev);
10403 case LPFC_PCI_DEV_OC:
10404 lpfc_io_resume_s4(pdev);
10407 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10408 "1429 Invalid PCI device group: 0x%x\n",
10409 phba->pci_dev_grp);
10416 * lpfc_mgmt_open - method called when 'lpfcmgmt' is opened from userspace
10417 * @inode: pointer to the inode representing the lpfcmgmt device
10418 * @filep: pointer to the file representing the open lpfcmgmt device
10420 * This routine puts a reference count on the lpfc module whenever the
10421 * character device is opened
10424 lpfc_mgmt_open(struct inode *inode, struct file *filep)
10426 try_module_get(THIS_MODULE);
10431 * lpfc_mgmt_release - method called when 'lpfcmgmt' is closed in userspace
10432 * @inode: pointer to the inode representing the lpfcmgmt device
10433 * @filep: pointer to the file representing the open lpfcmgmt device
10435 * This routine removes a reference count from the lpfc module when the
10436 * character device is closed
10439 lpfc_mgmt_release(struct inode *inode, struct file *filep)
10441 module_put(THIS_MODULE);
10445 static struct pci_device_id lpfc_id_table[] = {
10446 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
10447 PCI_ANY_ID, PCI_ANY_ID, },
10448 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
10449 PCI_ANY_ID, PCI_ANY_ID, },
10450 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
10451 PCI_ANY_ID, PCI_ANY_ID, },
10452 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
10453 PCI_ANY_ID, PCI_ANY_ID, },
10454 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
10455 PCI_ANY_ID, PCI_ANY_ID, },
10456 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
10457 PCI_ANY_ID, PCI_ANY_ID, },
10458 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
10459 PCI_ANY_ID, PCI_ANY_ID, },
10460 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
10461 PCI_ANY_ID, PCI_ANY_ID, },
10462 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
10463 PCI_ANY_ID, PCI_ANY_ID, },
10464 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
10465 PCI_ANY_ID, PCI_ANY_ID, },
10466 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
10467 PCI_ANY_ID, PCI_ANY_ID, },
10468 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
10469 PCI_ANY_ID, PCI_ANY_ID, },
10470 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
10471 PCI_ANY_ID, PCI_ANY_ID, },
10472 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
10473 PCI_ANY_ID, PCI_ANY_ID, },
10474 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
10475 PCI_ANY_ID, PCI_ANY_ID, },
10476 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
10477 PCI_ANY_ID, PCI_ANY_ID, },
10478 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
10479 PCI_ANY_ID, PCI_ANY_ID, },
10480 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
10481 PCI_ANY_ID, PCI_ANY_ID, },
10482 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
10483 PCI_ANY_ID, PCI_ANY_ID, },
10484 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
10485 PCI_ANY_ID, PCI_ANY_ID, },
10486 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
10487 PCI_ANY_ID, PCI_ANY_ID, },
10488 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
10489 PCI_ANY_ID, PCI_ANY_ID, },
10490 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
10491 PCI_ANY_ID, PCI_ANY_ID, },
10492 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
10493 PCI_ANY_ID, PCI_ANY_ID, },
10494 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
10495 PCI_ANY_ID, PCI_ANY_ID, },
10496 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
10497 PCI_ANY_ID, PCI_ANY_ID, },
10498 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
10499 PCI_ANY_ID, PCI_ANY_ID, },
10500 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
10501 PCI_ANY_ID, PCI_ANY_ID, },
10502 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
10503 PCI_ANY_ID, PCI_ANY_ID, },
10504 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
10505 PCI_ANY_ID, PCI_ANY_ID, },
10506 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
10507 PCI_ANY_ID, PCI_ANY_ID, },
10508 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
10509 PCI_ANY_ID, PCI_ANY_ID, },
10510 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
10511 PCI_ANY_ID, PCI_ANY_ID, },
10512 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
10513 PCI_ANY_ID, PCI_ANY_ID, },
10514 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
10515 PCI_ANY_ID, PCI_ANY_ID, },
10516 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
10517 PCI_ANY_ID, PCI_ANY_ID, },
10518 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
10519 PCI_ANY_ID, PCI_ANY_ID, },
10520 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
10521 PCI_ANY_ID, PCI_ANY_ID, },
10522 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
10523 PCI_ANY_ID, PCI_ANY_ID, },
10524 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
10525 PCI_ANY_ID, PCI_ANY_ID, },
10526 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS,
10527 PCI_ANY_ID, PCI_ANY_ID, },
10528 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC,
10529 PCI_ANY_ID, PCI_ANY_ID, },
10530 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
10531 PCI_ANY_ID, PCI_ANY_ID, },
10532 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF,
10533 PCI_ANY_ID, PCI_ANY_ID, },
10534 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
10535 PCI_ANY_ID, PCI_ANY_ID, },
10536 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK,
10537 PCI_ANY_ID, PCI_ANY_ID, },
10538 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF,
10539 PCI_ANY_ID, PCI_ANY_ID, },
10543 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
10545 static const struct pci_error_handlers lpfc_err_handler = {
10546 .error_detected = lpfc_io_error_detected,
10547 .slot_reset = lpfc_io_slot_reset,
10548 .resume = lpfc_io_resume,
10551 static struct pci_driver lpfc_driver = {
10552 .name = LPFC_DRIVER_NAME,
10553 .id_table = lpfc_id_table,
10554 .probe = lpfc_pci_probe_one,
10555 .remove = lpfc_pci_remove_one,
10556 .suspend = lpfc_pci_suspend_one,
10557 .resume = lpfc_pci_resume_one,
10558 .err_handler = &lpfc_err_handler,
10561 static const struct file_operations lpfc_mgmt_fop = {
10562 .open = lpfc_mgmt_open,
10563 .release = lpfc_mgmt_release,
10566 static struct miscdevice lpfc_mgmt_dev = {
10567 .minor = MISC_DYNAMIC_MINOR,
10568 .name = "lpfcmgmt",
10569 .fops = &lpfc_mgmt_fop,
10573 * lpfc_init - lpfc module initialization routine
10575 * This routine is to be invoked when the lpfc module is loaded into the
10576 * kernel. The special kernel macro module_init() is used to indicate the
10577 * role of this routine to the kernel as lpfc module entry point.
10581 * -ENOMEM - FC attach transport failed
10582 * all others - failed
10589 printk(LPFC_MODULE_DESC "\n");
10590 printk(LPFC_COPYRIGHT "\n");
10592 error = misc_register(&lpfc_mgmt_dev);
10594 printk(KERN_ERR "Could not register lpfcmgmt device, "
10595 "misc_register returned with status %d", error);
10597 if (lpfc_enable_npiv) {
10598 lpfc_transport_functions.vport_create = lpfc_vport_create;
10599 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
10601 lpfc_transport_template =
10602 fc_attach_transport(&lpfc_transport_functions);
10603 if (lpfc_transport_template == NULL)
10605 if (lpfc_enable_npiv) {
10606 lpfc_vport_transport_template =
10607 fc_attach_transport(&lpfc_vport_transport_functions);
10608 if (lpfc_vport_transport_template == NULL) {
10609 fc_release_transport(lpfc_transport_template);
10613 error = pci_register_driver(&lpfc_driver);
10615 fc_release_transport(lpfc_transport_template);
10616 if (lpfc_enable_npiv)
10617 fc_release_transport(lpfc_vport_transport_template);
10624 * lpfc_exit - lpfc module removal routine
10626 * This routine is invoked when the lpfc module is removed from the kernel.
10627 * The special kernel macro module_exit() is used to indicate the role of
10628 * this routine to the kernel as lpfc module exit point.
10633 misc_deregister(&lpfc_mgmt_dev);
10634 pci_unregister_driver(&lpfc_driver);
10635 fc_release_transport(lpfc_transport_template);
10636 if (lpfc_enable_npiv)
10637 fc_release_transport(lpfc_vport_transport_template);
10638 if (_dump_buf_data) {
10639 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
10640 "_dump_buf_data at 0x%p\n",
10641 (1L << _dump_buf_data_order), _dump_buf_data);
10642 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
10645 if (_dump_buf_dif) {
10646 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
10647 "_dump_buf_dif at 0x%p\n",
10648 (1L << _dump_buf_dif_order), _dump_buf_dif);
10649 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
10653 module_init(lpfc_init);
10654 module_exit(lpfc_exit);
10655 MODULE_LICENSE("GPL");
10656 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
10657 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
10658 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);