2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
19 #include "bfa_modules.h"
22 BFA_TRC_FILE(HAL, CORE);
25 * BFA module list terminated by NULL
27 static struct bfa_module_s *hal_mods[] = {
39 * Message handlers for various modules.
41 static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
42 bfa_isr_unhandled, /* NONE */
43 bfa_isr_unhandled, /* BFI_MC_IOC */
44 bfa_isr_unhandled, /* BFI_MC_DIAG */
45 bfa_isr_unhandled, /* BFI_MC_FLASH */
46 bfa_isr_unhandled, /* BFI_MC_CEE */
47 bfa_fcport_isr, /* BFI_MC_FCPORT */
48 bfa_isr_unhandled, /* BFI_MC_IOCFC */
49 bfa_isr_unhandled, /* BFI_MC_LL */
50 bfa_uf_isr, /* BFI_MC_UF */
51 bfa_fcxp_isr, /* BFI_MC_FCXP */
52 bfa_lps_isr, /* BFI_MC_LPS */
53 bfa_rport_isr, /* BFI_MC_RPORT */
54 bfa_itn_isr, /* BFI_MC_ITN */
55 bfa_isr_unhandled, /* BFI_MC_IOIM_READ */
56 bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */
57 bfa_isr_unhandled, /* BFI_MC_IOIM_IO */
58 bfa_ioim_isr, /* BFI_MC_IOIM */
59 bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */
60 bfa_tskim_isr, /* BFI_MC_TSKIM */
61 bfa_isr_unhandled, /* BFI_MC_SBOOT */
62 bfa_isr_unhandled, /* BFI_MC_IPFC */
63 bfa_isr_unhandled, /* BFI_MC_PORT */
64 bfa_isr_unhandled, /* --------- */
65 bfa_isr_unhandled, /* --------- */
66 bfa_isr_unhandled, /* --------- */
67 bfa_isr_unhandled, /* --------- */
68 bfa_isr_unhandled, /* --------- */
69 bfa_isr_unhandled, /* --------- */
70 bfa_isr_unhandled, /* --------- */
71 bfa_isr_unhandled, /* --------- */
72 bfa_isr_unhandled, /* --------- */
73 bfa_isr_unhandled, /* --------- */
76 * Message handlers for mailbox command classes
78 static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
80 NULL, /* BFI_MC_IOC */
81 NULL, /* BFI_MC_DIAG */
82 NULL, /* BFI_MC_FLASH */
83 NULL, /* BFI_MC_CEE */
84 NULL, /* BFI_MC_PORT */
85 bfa_iocfc_isr, /* BFI_MC_IOCFC */
92 bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
94 struct bfa_port_s *port = &bfa->modules.port;
99 dm_len = bfa_port_meminfo();
100 dm_kva = bfa_meminfo_dma_virt(mi);
101 dm_pa = bfa_meminfo_dma_phys(mi);
103 memset(port, 0, sizeof(struct bfa_port_s));
104 bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
105 bfa_port_mem_claim(port, dm_kva, dm_pa);
107 bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
108 bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
115 bfa_com_ablk_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
117 struct bfa_ablk_s *ablk = &bfa->modules.ablk;
122 dm_len = bfa_ablk_meminfo();
123 dm_kva = bfa_meminfo_dma_virt(mi);
124 dm_pa = bfa_meminfo_dma_phys(mi);
126 memset(ablk, 0, sizeof(struct bfa_ablk_s));
127 bfa_ablk_attach(ablk, &bfa->ioc);
128 bfa_ablk_memclaim(ablk, dm_kva, dm_pa);
130 bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
131 bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
135 * BFA IOC FC related definitions
139 * IOC local definitions
141 #define BFA_IOCFC_TOV 5000 /* msecs */
144 BFA_IOCFC_ACT_NONE = 0,
145 BFA_IOCFC_ACT_INIT = 1,
146 BFA_IOCFC_ACT_STOP = 2,
147 BFA_IOCFC_ACT_DISABLE = 3,
150 #define DEF_CFG_NUM_FABRICS 1
151 #define DEF_CFG_NUM_LPORTS 256
152 #define DEF_CFG_NUM_CQS 4
153 #define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX)
154 #define DEF_CFG_NUM_TSKIM_REQS 128
155 #define DEF_CFG_NUM_FCXP_REQS 64
156 #define DEF_CFG_NUM_UF_BUFS 64
157 #define DEF_CFG_NUM_RPORTS 1024
158 #define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS)
159 #define DEF_CFG_NUM_TINS 256
161 #define DEF_CFG_NUM_SGPGS 2048
162 #define DEF_CFG_NUM_REQQ_ELEMS 256
163 #define DEF_CFG_NUM_RSPQ_ELEMS 64
164 #define DEF_CFG_NUM_SBOOT_TGTS 16
165 #define DEF_CFG_NUM_SBOOT_LUNS 16
168 * forward declaration for IOC FC functions
170 static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
171 static void bfa_iocfc_disable_cbfn(void *bfa_arg);
172 static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
173 static void bfa_iocfc_reset_cbfn(void *bfa_arg);
174 static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
177 * BFA Interrupt handling functions
180 bfa_reqq_resume(struct bfa_s *bfa, int qid)
182 struct list_head *waitq, *qe, *qen;
183 struct bfa_reqq_wait_s *wqe;
185 waitq = bfa_reqq(bfa, qid);
186 list_for_each_safe(qe, qen, waitq) {
188 * Callback only as long as there is room in request queue
190 if (bfa_reqq_full(bfa, qid))
194 wqe = (struct bfa_reqq_wait_s *) qe;
195 wqe->qresume(wqe->cbarg);
200 bfa_isr_rspq(struct bfa_s *bfa, int qid)
204 struct list_head *waitq;
206 bfa_isr_rspq_ack(bfa, qid);
208 ci = bfa_rspq_ci(bfa, qid);
209 pi = bfa_rspq_pi(bfa, qid);
212 m = bfa_rspq_elem(bfa, qid, ci);
213 WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX);
215 bfa_isrs[m->mhdr.msg_class] (bfa, m);
216 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
222 bfa_rspq_ci(bfa, qid) = pi;
223 writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
227 * Resume any pending requests in the corresponding reqq.
229 waitq = bfa_reqq(bfa, qid);
230 if (!list_empty(waitq))
231 bfa_reqq_resume(bfa, qid);
235 bfa_isr_reqq(struct bfa_s *bfa, int qid)
237 struct list_head *waitq;
239 bfa_isr_reqq_ack(bfa, qid);
242 * Resume any pending requests in the corresponding reqq.
244 waitq = bfa_reqq(bfa, qid);
245 if (!list_empty(waitq))
246 bfa_reqq_resume(bfa, qid);
250 bfa_msix_all(struct bfa_s *bfa, int vec)
255 intr = readl(bfa->iocfc.bfa_regs.intr_status);
260 * RME completion queue interrupt
262 qintr = intr & __HFN_INT_RME_MASK;
263 if (qintr && bfa->queue_process) {
264 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
265 bfa_isr_rspq(bfa, queue);
273 * CPE completion queue interrupt
275 qintr = intr & __HFN_INT_CPE_MASK;
276 if (qintr && bfa->queue_process) {
277 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
278 bfa_isr_reqq(bfa, queue);
284 bfa_msix_lpu_err(bfa, intr);
288 bfa_intx(struct bfa_s *bfa)
293 intr = readl(bfa->iocfc.bfa_regs.intr_status);
297 qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK);
299 writel(qintr, bfa->iocfc.bfa_regs.intr_status);
302 * RME completion queue interrupt
304 qintr = intr & __HFN_INT_RME_MASK;
305 if (qintr && bfa->queue_process) {
306 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
307 bfa_isr_rspq(bfa, queue);
315 * CPE completion queue interrupt
317 qintr = intr & __HFN_INT_CPE_MASK;
318 if (qintr && bfa->queue_process) {
319 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
320 bfa_isr_reqq(bfa, queue);
326 bfa_msix_lpu_err(bfa, intr);
332 bfa_isr_enable(struct bfa_s *bfa)
335 int pci_func = bfa_ioc_pcifn(&bfa->ioc);
337 bfa_trc(bfa, pci_func);
339 bfa_msix_ctrl_install(bfa);
341 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
342 umsk = __HFN_INT_ERR_MASK_CT2;
343 umsk |= pci_func == 0 ?
344 __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2;
346 umsk = __HFN_INT_ERR_MASK;
347 umsk |= pci_func == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK;
350 writel(umsk, bfa->iocfc.bfa_regs.intr_status);
351 writel(~umsk, bfa->iocfc.bfa_regs.intr_mask);
352 bfa->iocfc.intr_mask = ~umsk;
353 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
357 bfa_isr_disable(struct bfa_s *bfa)
359 bfa_isr_mode_set(bfa, BFA_FALSE);
360 writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
361 bfa_msix_uninstall(bfa);
365 bfa_msix_reqq(struct bfa_s *bfa, int vec)
367 bfa_isr_reqq(bfa, vec - bfa->iocfc.hwif.cpe_vec_q0);
371 bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
373 bfa_trc(bfa, m->mhdr.msg_class);
374 bfa_trc(bfa, m->mhdr.msg_id);
375 bfa_trc(bfa, m->mhdr.mtag.i2htok);
377 bfa_trc_stop(bfa->trcmod);
381 bfa_msix_rspq(struct bfa_s *bfa, int vec)
383 bfa_isr_rspq(bfa, vec - bfa->iocfc.hwif.rme_vec_q0);
387 bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
389 u32 intr, curr_value;
390 bfa_boolean_t lpu_isr, halt_isr, pss_isr;
392 intr = readl(bfa->iocfc.bfa_regs.intr_status);
394 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
395 halt_isr = intr & __HFN_INT_CPQ_HALT_CT2;
396 pss_isr = intr & __HFN_INT_ERR_PSS_CT2;
397 lpu_isr = intr & (__HFN_INT_MBOX_LPU0_CT2 |
398 __HFN_INT_MBOX_LPU1_CT2);
399 intr &= __HFN_INT_ERR_MASK_CT2;
401 halt_isr = intr & __HFN_INT_LL_HALT;
402 pss_isr = intr & __HFN_INT_ERR_PSS;
403 lpu_isr = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1);
404 intr &= __HFN_INT_ERR_MASK;
408 bfa_ioc_mbox_isr(&bfa->ioc);
413 * If LL_HALT bit is set then FW Init Halt LL Port
414 * Register needs to be cleared as well so Interrupt
415 * Status Register will be cleared.
417 curr_value = readl(bfa->ioc.ioc_regs.ll_halt);
418 curr_value &= ~__FW_INIT_HALT_P;
419 writel(curr_value, bfa->ioc.ioc_regs.ll_halt);
424 * ERR_PSS bit needs to be cleared as well in case
425 * interrups are shared so driver's interrupt handler is
426 * still called even though it is already masked out.
429 bfa->ioc.ioc_regs.pss_err_status_reg);
431 bfa->ioc.ioc_regs.pss_err_status_reg);
434 writel(intr, bfa->iocfc.bfa_regs.intr_status);
435 bfa_ioc_error_isr(&bfa->ioc);
440 * BFA IOC FC related functions
444 * BFA IOC private functions
448 bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
450 int i, per_reqq_sz, per_rspq_sz;
452 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
454 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
460 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
461 *dm_len = *dm_len + per_reqq_sz;
462 *dm_len = *dm_len + per_rspq_sz;
466 * Calculate Shadow CI/PI size
468 for (i = 0; i < cfg->fwcfg.num_cqs; i++)
469 *dm_len += (2 * BFA_CACHELINE_SZ);
473 bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
476 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
478 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
483 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
486 bfa_iocfc_send_cfg(void *bfa_arg)
488 struct bfa_s *bfa = bfa_arg;
489 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
490 struct bfi_iocfc_cfg_req_s cfg_req;
491 struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
492 struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg;
495 WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS);
496 bfa_trc(bfa, cfg->fwcfg.num_cqs);
498 bfa_iocfc_reset_queues(bfa);
501 * initialize IOC configuration info
503 cfg_info->single_msix_vec = 0;
504 if (bfa->msix.nvecs == 1)
505 cfg_info->single_msix_vec = 1;
506 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
507 cfg_info->num_cqs = cfg->fwcfg.num_cqs;
508 cfg_info->num_ioim_reqs = cpu_to_be16(cfg->fwcfg.num_ioim_reqs);
509 cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs);
511 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
513 * dma map REQ and RSP circular queues and shadow pointers
515 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
516 bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
517 iocfc->req_cq_ba[i].pa);
518 bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
519 iocfc->req_cq_shadow_ci[i].pa);
520 cfg_info->req_cq_elems[i] =
521 cpu_to_be16(cfg->drvcfg.num_reqq_elems);
523 bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
524 iocfc->rsp_cq_ba[i].pa);
525 bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
526 iocfc->rsp_cq_shadow_pi[i].pa);
527 cfg_info->rsp_cq_elems[i] =
528 cpu_to_be16(cfg->drvcfg.num_rspq_elems);
532 * Enable interrupt coalescing if it is driver init path
533 * and not ioc disable/enable path.
536 cfg_info->intr_attr.coalesce = BFA_TRUE;
538 iocfc->cfgdone = BFA_FALSE;
541 * dma map IOC configuration itself
543 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
545 bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
547 bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
548 sizeof(struct bfi_iocfc_cfg_req_s));
552 bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
553 struct bfa_pcidev_s *pcidev)
555 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
559 iocfc->action = BFA_IOCFC_ACT_NONE;
564 * Initialize chip specific handlers.
566 if (bfa_asic_id_ctc(bfa_ioc_devid(&bfa->ioc))) {
567 iocfc->hwif.hw_reginit = bfa_hwct_reginit;
568 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
569 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
570 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
571 iocfc->hwif.hw_msix_ctrl_install = bfa_hwct_msix_ctrl_install;
572 iocfc->hwif.hw_msix_queue_install = bfa_hwct_msix_queue_install;
573 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
574 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
575 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
576 iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
577 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CT;
578 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CT;
580 iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
581 iocfc->hwif.hw_reqq_ack = NULL;
582 iocfc->hwif.hw_rspq_ack = NULL;
583 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
584 iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install;
585 iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install;
586 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
587 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
588 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
589 iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
590 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CB +
591 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
592 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CB +
593 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
596 if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) {
597 iocfc->hwif.hw_reginit = bfa_hwct2_reginit;
598 iocfc->hwif.hw_isr_mode_set = NULL;
599 iocfc->hwif.hw_rspq_ack = NULL;
602 iocfc->hwif.hw_reginit(bfa);
607 bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
608 struct bfa_meminfo_s *meminfo)
612 int i, per_reqq_sz, per_rspq_sz;
613 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
616 dm_kva = bfa_meminfo_dma_virt(meminfo);
617 dm_pa = bfa_meminfo_dma_phys(meminfo);
620 * First allocate dma memory for IOC.
622 bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa);
623 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
624 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
627 * Claim DMA-able memory for the request/response queues and for shadow
630 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
632 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
635 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
636 iocfc->req_cq_ba[i].kva = dm_kva;
637 iocfc->req_cq_ba[i].pa = dm_pa;
638 memset(dm_kva, 0, per_reqq_sz);
639 dm_kva += per_reqq_sz;
640 dm_pa += per_reqq_sz;
642 iocfc->rsp_cq_ba[i].kva = dm_kva;
643 iocfc->rsp_cq_ba[i].pa = dm_pa;
644 memset(dm_kva, 0, per_rspq_sz);
645 dm_kva += per_rspq_sz;
646 dm_pa += per_rspq_sz;
649 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
650 iocfc->req_cq_shadow_ci[i].kva = dm_kva;
651 iocfc->req_cq_shadow_ci[i].pa = dm_pa;
652 dm_kva += BFA_CACHELINE_SZ;
653 dm_pa += BFA_CACHELINE_SZ;
655 iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
656 iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
657 dm_kva += BFA_CACHELINE_SZ;
658 dm_pa += BFA_CACHELINE_SZ;
662 * Claim DMA-able memory for the config info page
664 bfa->iocfc.cfg_info.kva = dm_kva;
665 bfa->iocfc.cfg_info.pa = dm_pa;
666 bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
667 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
668 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
671 * Claim DMA-able memory for the config response
673 bfa->iocfc.cfgrsp_dma.kva = dm_kva;
674 bfa->iocfc.cfgrsp_dma.pa = dm_pa;
675 bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
678 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
680 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
684 bfa_meminfo_dma_virt(meminfo) = dm_kva;
685 bfa_meminfo_dma_phys(meminfo) = dm_pa;
687 dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
689 bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo));
690 bfa_meminfo_kva(meminfo) += dbgsz;
695 * Start BFA submodules.
698 bfa_iocfc_start_submod(struct bfa_s *bfa)
702 bfa->queue_process = BFA_TRUE;
703 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
704 bfa_isr_rspq_ack(bfa, i);
706 for (i = 0; hal_mods[i]; i++)
707 hal_mods[i]->start(bfa);
711 * Disable BFA submodules.
714 bfa_iocfc_disable_submod(struct bfa_s *bfa)
718 for (i = 0; hal_mods[i]; i++)
719 hal_mods[i]->iocdisable(bfa);
723 bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
725 struct bfa_s *bfa = bfa_arg;
728 if (bfa->iocfc.cfgdone)
729 bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
731 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
733 if (bfa->iocfc.cfgdone)
734 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
739 bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
741 struct bfa_s *bfa = bfa_arg;
742 struct bfad_s *bfad = bfa->bfad;
745 complete(&bfad->comp);
747 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
751 bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
753 struct bfa_s *bfa = bfa_arg;
754 struct bfad_s *bfad = bfa->bfad;
757 complete(&bfad->disable_comp);
761 * configure queue registers from firmware response
764 bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg)
767 struct bfa_iocfc_regs_s *r = &bfa->iocfc.bfa_regs;
768 void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
770 for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
771 bfa->iocfc.hw_qid[i] = qreg->hw_qid[i];
772 r->cpe_q_ci[i] = kva + be32_to_cpu(qreg->cpe_q_ci_off[i]);
773 r->cpe_q_pi[i] = kva + be32_to_cpu(qreg->cpe_q_pi_off[i]);
774 r->cpe_q_ctrl[i] = kva + be32_to_cpu(qreg->cpe_qctl_off[i]);
775 r->rme_q_ci[i] = kva + be32_to_cpu(qreg->rme_q_ci_off[i]);
776 r->rme_q_pi[i] = kva + be32_to_cpu(qreg->rme_q_pi_off[i]);
777 r->rme_q_ctrl[i] = kva + be32_to_cpu(qreg->rme_qctl_off[i]);
782 bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg)
784 bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs);
785 bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs);
786 bfa_rport_res_recfg(bfa, fwcfg->num_rports);
787 bfa_fcp_res_recfg(bfa, fwcfg->num_ioim_reqs);
788 bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs);
792 * Update BFA configuration from firmware configuration.
795 bfa_iocfc_cfgrsp(struct bfa_s *bfa)
797 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
798 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
799 struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
801 fwcfg->num_cqs = fwcfg->num_cqs;
802 fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs);
803 fwcfg->num_fwtio_reqs = be16_to_cpu(fwcfg->num_fwtio_reqs);
804 fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);
805 fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs);
806 fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs);
807 fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports);
809 iocfc->cfgdone = BFA_TRUE;
812 * configure queue register offsets as learnt from firmware
814 bfa_iocfc_qreg(bfa, &cfgrsp->qreg);
817 * Re-configure resources as learnt from Firmware
819 bfa_iocfc_res_recfg(bfa, fwcfg);
822 * Install MSIX queue handlers
824 bfa_msix_queue_install(bfa);
827 * Configuration is complete - initialize/start submodules
829 bfa_fcport_init(bfa);
831 if (iocfc->action == BFA_IOCFC_ACT_INIT)
832 bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
834 bfa_iocfc_start_submod(bfa);
837 bfa_iocfc_reset_queues(struct bfa_s *bfa)
841 for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
842 bfa_reqq_ci(bfa, q) = 0;
843 bfa_reqq_pi(bfa, q) = 0;
844 bfa_rspq_ci(bfa, q) = 0;
845 bfa_rspq_pi(bfa, q) = 0;
849 /* Fabric Assigned Address specific functions */
852 * Check whether IOC is ready before sending command down
855 bfa_faa_validate_request(struct bfa_s *bfa)
857 enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
858 u32 card_type = bfa->ioc.attr->card_type;
860 if (bfa_ioc_is_operational(&bfa->ioc)) {
861 if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type))
862 return BFA_STATUS_FEATURE_NOT_SUPPORTED;
864 if (!bfa_ioc_is_acq_addr(&bfa->ioc))
865 return BFA_STATUS_IOC_NON_OP;
868 return BFA_STATUS_OK;
872 bfa_faa_enable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn, void *cbarg)
874 struct bfi_faa_en_dis_s faa_enable_req;
875 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
878 iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
879 iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
881 status = bfa_faa_validate_request(bfa);
882 if (status != BFA_STATUS_OK)
885 if (iocfc->faa_args.busy == BFA_TRUE)
886 return BFA_STATUS_DEVBUSY;
888 if (iocfc->faa_args.faa_state == BFA_FAA_ENABLED)
889 return BFA_STATUS_FAA_ENABLED;
891 if (bfa_fcport_is_trunk_enabled(bfa))
892 return BFA_STATUS_ERROR_TRUNK_ENABLED;
894 bfa_fcport_cfg_faa(bfa, BFA_FAA_ENABLED);
895 iocfc->faa_args.busy = BFA_TRUE;
897 memset(&faa_enable_req, 0, sizeof(struct bfi_faa_en_dis_s));
898 bfi_h2i_set(faa_enable_req.mh, BFI_MC_IOCFC,
899 BFI_IOCFC_H2I_FAA_ENABLE_REQ, bfa_fn_lpu(bfa));
901 bfa_ioc_mbox_send(&bfa->ioc, &faa_enable_req,
902 sizeof(struct bfi_faa_en_dis_s));
904 return BFA_STATUS_OK;
908 bfa_faa_disable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn,
911 struct bfi_faa_en_dis_s faa_disable_req;
912 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
915 iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
916 iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
918 status = bfa_faa_validate_request(bfa);
919 if (status != BFA_STATUS_OK)
922 if (iocfc->faa_args.busy == BFA_TRUE)
923 return BFA_STATUS_DEVBUSY;
925 if (iocfc->faa_args.faa_state == BFA_FAA_DISABLED)
926 return BFA_STATUS_FAA_DISABLED;
928 bfa_fcport_cfg_faa(bfa, BFA_FAA_DISABLED);
929 iocfc->faa_args.busy = BFA_TRUE;
931 memset(&faa_disable_req, 0, sizeof(struct bfi_faa_en_dis_s));
932 bfi_h2i_set(faa_disable_req.mh, BFI_MC_IOCFC,
933 BFI_IOCFC_H2I_FAA_DISABLE_REQ, bfa_fn_lpu(bfa));
935 bfa_ioc_mbox_send(&bfa->ioc, &faa_disable_req,
936 sizeof(struct bfi_faa_en_dis_s));
938 return BFA_STATUS_OK;
942 bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
943 bfa_cb_iocfc_t cbfn, void *cbarg)
945 struct bfi_faa_query_s faa_attr_req;
946 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
949 iocfc->faa_args.faa_attr = attr;
950 iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
951 iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
953 status = bfa_faa_validate_request(bfa);
954 if (status != BFA_STATUS_OK)
957 if (iocfc->faa_args.busy == BFA_TRUE)
958 return BFA_STATUS_DEVBUSY;
960 iocfc->faa_args.busy = BFA_TRUE;
961 memset(&faa_attr_req, 0, sizeof(struct bfi_faa_query_s));
962 bfi_h2i_set(faa_attr_req.mh, BFI_MC_IOCFC,
963 BFI_IOCFC_H2I_FAA_QUERY_REQ, bfa_fn_lpu(bfa));
965 bfa_ioc_mbox_send(&bfa->ioc, &faa_attr_req,
966 sizeof(struct bfi_faa_query_s));
968 return BFA_STATUS_OK;
972 * FAA enable response
975 bfa_faa_enable_reply(struct bfa_iocfc_s *iocfc,
976 struct bfi_faa_en_dis_rsp_s *rsp)
978 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
979 bfa_status_t status = rsp->status;
981 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
983 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
984 iocfc->faa_args.busy = BFA_FALSE;
988 * FAA disable response
991 bfa_faa_disable_reply(struct bfa_iocfc_s *iocfc,
992 struct bfi_faa_en_dis_rsp_s *rsp)
994 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
995 bfa_status_t status = rsp->status;
997 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
999 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
1000 iocfc->faa_args.busy = BFA_FALSE;
1004 * FAA query response
1007 bfa_faa_query_reply(struct bfa_iocfc_s *iocfc,
1008 bfi_faa_query_rsp_t *rsp)
1010 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
1012 if (iocfc->faa_args.faa_attr) {
1013 iocfc->faa_args.faa_attr->faa = rsp->faa;
1014 iocfc->faa_args.faa_attr->faa_state = rsp->faa_status;
1015 iocfc->faa_args.faa_attr->pwwn_source = rsp->addr_source;
1018 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
1020 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, BFA_STATUS_OK);
1021 iocfc->faa_args.busy = BFA_FALSE;
1025 * IOC enable request is complete
1028 bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
1030 struct bfa_s *bfa = bfa_arg;
1032 if (status == BFA_STATUS_FAA_ACQ_ADDR) {
1033 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
1034 bfa_iocfc_init_cb, bfa);
1038 if (status != BFA_STATUS_OK) {
1039 bfa_isr_disable(bfa);
1040 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
1041 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
1042 bfa_iocfc_init_cb, bfa);
1046 bfa_iocfc_send_cfg(bfa);
1050 * IOC disable request is complete
1053 bfa_iocfc_disable_cbfn(void *bfa_arg)
1055 struct bfa_s *bfa = bfa_arg;
1057 bfa_isr_disable(bfa);
1058 bfa_iocfc_disable_submod(bfa);
1060 if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP)
1061 bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
1064 WARN_ON(bfa->iocfc.action != BFA_IOCFC_ACT_DISABLE);
1065 bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
1071 * Notify sub-modules of hardware failure.
1074 bfa_iocfc_hbfail_cbfn(void *bfa_arg)
1076 struct bfa_s *bfa = bfa_arg;
1078 bfa->queue_process = BFA_FALSE;
1080 bfa_isr_disable(bfa);
1081 bfa_iocfc_disable_submod(bfa);
1083 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
1084 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb,
1089 * Actions on chip-reset completion.
1092 bfa_iocfc_reset_cbfn(void *bfa_arg)
1094 struct bfa_s *bfa = bfa_arg;
1096 bfa_iocfc_reset_queues(bfa);
1097 bfa_isr_enable(bfa);
1102 * Query IOC memory requirement information.
1105 bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
1108 /* dma memory for IOC */
1109 *dm_len += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
1111 bfa_iocfc_fw_cfg_sz(cfg, dm_len);
1112 bfa_iocfc_cqs_sz(cfg, dm_len);
1113 *km_len += (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
1117 * Query IOC memory requirement information.
1120 bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1121 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1124 struct bfa_ioc_s *ioc = &bfa->ioc;
1126 bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
1127 bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
1128 bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
1129 bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
1131 ioc->trcmod = bfa->trcmod;
1132 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
1134 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_PCIFN_CLASS_FC);
1135 bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
1137 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
1138 bfa_iocfc_mem_claim(bfa, cfg, meminfo);
1139 INIT_LIST_HEAD(&bfa->timer_mod.timer_q);
1141 INIT_LIST_HEAD(&bfa->comp_q);
1142 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
1143 INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
1147 * Query IOC memory requirement information.
1150 bfa_iocfc_init(struct bfa_s *bfa)
1152 bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
1153 bfa_ioc_enable(&bfa->ioc);
1157 * IOC start called from bfa_start(). Called to start IOC operations
1158 * at driver instantiation for this instance.
1161 bfa_iocfc_start(struct bfa_s *bfa)
1163 if (bfa->iocfc.cfgdone)
1164 bfa_iocfc_start_submod(bfa);
1168 * IOC stop called from bfa_stop(). Called only when driver is unloaded
1169 * for this instance.
1172 bfa_iocfc_stop(struct bfa_s *bfa)
1174 bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
1176 bfa->queue_process = BFA_FALSE;
1177 bfa_ioc_disable(&bfa->ioc);
1181 bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
1183 struct bfa_s *bfa = bfaarg;
1184 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1185 union bfi_iocfc_i2h_msg_u *msg;
1187 msg = (union bfi_iocfc_i2h_msg_u *) m;
1188 bfa_trc(bfa, msg->mh.msg_id);
1190 switch (msg->mh.msg_id) {
1191 case BFI_IOCFC_I2H_CFG_REPLY:
1192 bfa_iocfc_cfgrsp(bfa);
1194 case BFI_IOCFC_I2H_UPDATEQ_RSP:
1195 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
1197 case BFI_IOCFC_I2H_FAA_ENABLE_RSP:
1198 bfa_faa_enable_reply(iocfc,
1199 (struct bfi_faa_en_dis_rsp_s *)msg);
1201 case BFI_IOCFC_I2H_FAA_DISABLE_RSP:
1202 bfa_faa_disable_reply(iocfc,
1203 (struct bfi_faa_en_dis_rsp_s *)msg);
1205 case BFI_IOCFC_I2H_FAA_QUERY_RSP:
1206 bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg);
1214 bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
1216 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1218 attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
1220 attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
1221 be16_to_cpu(iocfc->cfginfo->intr_attr.delay) :
1222 be16_to_cpu(iocfc->cfgrsp->intr_attr.delay);
1224 attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
1225 be16_to_cpu(iocfc->cfginfo->intr_attr.latency) :
1226 be16_to_cpu(iocfc->cfgrsp->intr_attr.latency);
1228 attr->config = iocfc->cfg;
1232 bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
1234 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1235 struct bfi_iocfc_set_intr_req_s *m;
1237 iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
1238 iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay);
1239 iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency);
1241 if (!bfa_iocfc_is_operational(bfa))
1242 return BFA_STATUS_OK;
1244 m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
1246 return BFA_STATUS_DEVBUSY;
1248 bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
1250 m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
1251 m->delay = iocfc->cfginfo->intr_attr.delay;
1252 m->latency = iocfc->cfginfo->intr_attr.latency;
1254 bfa_trc(bfa, attr->delay);
1255 bfa_trc(bfa, attr->latency);
1257 bfa_reqq_produce(bfa, BFA_REQQ_IOC, m->mh);
1258 return BFA_STATUS_OK;
1262 bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa)
1264 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1266 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
1267 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
1270 * Enable IOC after it is disabled.
1273 bfa_iocfc_enable(struct bfa_s *bfa)
1275 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
1277 bfa_ioc_enable(&bfa->ioc);
1281 bfa_iocfc_disable(struct bfa_s *bfa)
1283 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
1285 bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
1287 bfa->queue_process = BFA_FALSE;
1288 bfa_ioc_disable(&bfa->ioc);
1293 bfa_iocfc_is_operational(struct bfa_s *bfa)
1295 return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
1299 * Return boot target port wwns -- read from boot information in flash.
1302 bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
1304 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1305 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1308 if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) {
1309 bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns);
1310 *nwwns = cfgrsp->pbc_cfg.nbluns;
1311 for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++)
1312 wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn;
1317 *nwwns = cfgrsp->bootwwns.nwwns;
1318 memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
1322 bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
1324 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1325 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1327 memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport));
1328 return cfgrsp->pbc_cfg.nvports;
1333 * Use this function query the memory requirement of the BFA library.
1334 * This function needs to be called before bfa_attach() to get the
1335 * memory required of the BFA layer for a given driver configuration.
1337 * This call will fail, if the cap is out of range compared to pre-defined
1338 * values within the BFA library
1340 * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate
1341 * its configuration in this structure.
1342 * The default values for struct bfa_iocfc_cfg_s can be
1343 * fetched using bfa_cfg_get_default() API.
1345 * If cap's boundary check fails, the library will use
1346 * the default bfa_cap_t values (and log a warning msg).
1348 * @param[out] meminfo - pointer to bfa_meminfo_t. This content
1349 * indicates the memory type (see bfa_mem_type_t) and
1350 * amount of memory required.
1352 * Driver should allocate the memory, populate the
1353 * starting address for each block and provide the same
1354 * structure as input parameter to bfa_attach() call.
1358 * Special Considerations: @note
1361 bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
1364 u32 km_len = 0, dm_len = 0;
1366 WARN_ON((cfg == NULL) || (meminfo == NULL));
1368 memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
1369 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type =
1371 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_type =
1374 bfa_iocfc_meminfo(cfg, &km_len, &dm_len);
1376 for (i = 0; hal_mods[i]; i++)
1377 hal_mods[i]->meminfo(cfg, &km_len, &dm_len);
1379 dm_len += bfa_port_meminfo();
1380 dm_len += bfa_ablk_meminfo();
1382 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len;
1383 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len;
1387 * Use this function to do attach the driver instance with the BFA
1388 * library. This function will not trigger any HW initialization
1389 * process (which will be done in bfa_init() call)
1391 * This call will fail, if the cap is out of range compared to
1392 * pre-defined values within the BFA library
1394 * @param[out] bfa Pointer to bfa_t.
1395 * @param[in] bfad Opaque handle back to the driver's IOC structure
1396 * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure
1397 * that was used in bfa_cfg_get_meminfo().
1398 * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should
1399 * use the bfa_cfg_get_meminfo() call to
1400 * find the memory blocks required, allocate the
1401 * required memory and provide the starting addresses.
1402 * @param[in] pcidev pointer to struct bfa_pcidev_s
1407 * Special Considerations:
1413 bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1414 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1417 struct bfa_mem_elem_s *melem;
1419 bfa->fcs = BFA_FALSE;
1421 WARN_ON((cfg == NULL) || (meminfo == NULL));
1424 * initialize all memory pointers for iterative allocation
1426 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
1427 melem = meminfo->meminfo + i;
1428 melem->kva_curp = melem->kva;
1429 melem->dma_curp = melem->dma;
1432 bfa_iocfc_attach(bfa, bfad, cfg, meminfo, pcidev);
1434 for (i = 0; hal_mods[i]; i++)
1435 hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev);
1437 bfa_com_port_attach(bfa, meminfo);
1438 bfa_com_ablk_attach(bfa, meminfo);
1442 * Use this function to delete a BFA IOC. IOC should be stopped (by
1443 * calling bfa_stop()) before this function call.
1445 * @param[in] bfa - pointer to bfa_t.
1450 * Special Considerations:
1455 bfa_detach(struct bfa_s *bfa)
1459 for (i = 0; hal_mods[i]; i++)
1460 hal_mods[i]->detach(bfa);
1461 bfa_ioc_detach(&bfa->ioc);
1465 bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q)
1467 INIT_LIST_HEAD(comp_q);
1468 list_splice_tail_init(&bfa->comp_q, comp_q);
1472 bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
1474 struct list_head *qe;
1475 struct list_head *qen;
1476 struct bfa_cb_qe_s *hcb_qe;
1478 list_for_each_safe(qe, qen, comp_q) {
1479 hcb_qe = (struct bfa_cb_qe_s *) qe;
1480 hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
1485 bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
1487 struct list_head *qe;
1488 struct bfa_cb_qe_s *hcb_qe;
1490 while (!list_empty(comp_q)) {
1491 bfa_q_deq(comp_q, &qe);
1492 hcb_qe = (struct bfa_cb_qe_s *) qe;
1493 hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE);
1499 * Return the list of PCI vendor/device id lists supported by this
1503 bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
1505 static struct bfa_pciid_s __pciids[] = {
1506 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P},
1507 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P},
1508 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT},
1509 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
1512 *npciids = sizeof(__pciids) / sizeof(__pciids[0]);
1517 * Use this function query the default struct bfa_iocfc_cfg_s value (compiled
1518 * into BFA layer). The OS driver can then turn back and overwrite entries that
1519 * have been configured by the user.
1521 * @param[in] cfg - pointer to bfa_ioc_cfg_t
1526 * Special Considerations:
1530 bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg)
1532 cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS;
1533 cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS;
1534 cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS;
1535 cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS;
1536 cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS;
1537 cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS;
1538 cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS;
1539 cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS;
1540 cfg->fwcfg.num_fwtio_reqs = 0;
1542 cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS;
1543 cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS;
1544 cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS;
1545 cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS;
1546 cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS;
1547 cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF;
1548 cfg->drvcfg.ioc_recover = BFA_FALSE;
1549 cfg->drvcfg.delay_comp = BFA_FALSE;
1554 bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
1556 bfa_cfg_get_default(cfg);
1557 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
1558 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
1559 cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN;
1560 cfg->fwcfg.num_uf_bufs = BFA_UF_MIN;
1561 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
1562 cfg->fwcfg.num_fwtio_reqs = 0;
1564 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
1565 cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN;
1566 cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN;
1567 cfg->drvcfg.min_cfg = BFA_TRUE;