2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
19 #include "bfi_ctreg.h"
22 BFA_TRC_FILE(CNA, IOC_CT);
25 * forward declarations
27 static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
28 static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
29 static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc);
30 static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc);
31 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
32 static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc);
33 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
35 struct bfa_ioc_hwif_s hwif_ct;
38 * Called from bfa_ioc_attach() to map asic specific calls.
41 bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
43 hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
44 hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
45 hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
46 hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
47 hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
48 hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
49 hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail;
50 hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
52 ioc->ioc_hwif = &hwif_ct;
56 * Return true if firmware of current driver matches the running firmware.
59 bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
61 enum bfi_ioc_state ioc_fwstate;
63 struct bfi_ioc_image_hdr_s fwhdr;
66 * Firmware match check is relevant only for CNA.
72 * If bios boot (flash based) -- do not increment usage count
74 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
78 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
79 usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
82 * If usage count is 0, always return TRUE.
85 writel(1, ioc->ioc_regs.ioc_usage_reg);
86 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
91 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
92 bfa_trc(ioc, ioc_fwstate);
95 * Use count cannot be non-zero and chip in uninitialized state.
97 bfa_assert(ioc_fwstate != BFI_IOC_UNINIT);
100 * Check if another driver with a different firmware is active
102 bfa_ioc_fwver_get(ioc, &fwhdr);
103 if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
104 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
105 bfa_trc(ioc, usecnt);
110 * Same firmware version. Increment the reference count.
113 writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
114 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
115 bfa_trc(ioc, usecnt);
120 bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
125 * Firmware lock is relevant only for CNA.
131 * If bios boot (flash based) -- do not decrement usage count
133 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
138 * decrement usage count
140 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
141 usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
142 bfa_assert(usecnt > 0);
145 writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
146 bfa_trc(ioc, usecnt);
148 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
152 * Notify other functions on HB failure.
155 bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc)
158 writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
159 /* Wait for halt to take effect */
160 readl(ioc->ioc_regs.ll_halt);
162 writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
163 readl(ioc->ioc_regs.err_set);
168 * Host to LPU mailbox message addresses
170 static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
171 { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
172 { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
173 { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
174 { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
178 * Host <-> LPU mailbox command/status registers - port 0
180 static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
181 { HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT },
182 { HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT },
183 { HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT },
184 { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT }
188 * Host <-> LPU mailbox command/status registers - port 1
190 static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
191 { HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT },
192 { HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT },
193 { HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT },
194 { HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT }
198 bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
201 int pcifn = bfa_ioc_pcifn(ioc);
203 rb = bfa_ioc_bar0(ioc);
205 ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
206 ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
207 ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
209 if (ioc->port_id == 0) {
210 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
211 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
212 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
213 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
214 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
216 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
217 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
218 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
219 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
220 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
224 * PSS control registers
226 ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
227 ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
228 ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG);
229 ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG);
232 * IOC semaphore registers and serialization
234 ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
235 ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
236 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
237 ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
242 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
243 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
246 * err set reg : for notification of hb failure in fcmode
248 ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
252 * Initialize IOC to port mapping.
255 #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
257 bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
259 void __iomem *rb = ioc->pcidev.pci_bar_kva;
263 * For catapult, base port id on personality register and IOC type
265 r32 = readl(rb + FNC_PERS_REG);
266 r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
267 ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
269 bfa_trc(ioc, bfa_ioc_pcifn(ioc));
270 bfa_trc(ioc, ioc->port_id);
274 * Set interrupt mode for a function: INTX or MSIX
277 bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
279 void __iomem *rb = ioc->pcidev.pci_bar_kva;
282 r32 = readl(rb + FNC_PERS_REG);
285 mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
289 * If already in desired mode, do not change anything
295 mode = __F0_INTX_STATUS_MSIX;
297 mode = __F0_INTX_STATUS_INTA;
299 r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
300 r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
303 writel(r32, rb + FNC_PERS_REG);
307 * Cleanup hw semaphore and usecnt registers
310 bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
314 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
315 writel(0, ioc->ioc_regs.ioc_usage_reg);
316 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
320 * Read the hw sem reg to make sure that it is locked
321 * before we clear it. If it is not locked, writing 1
322 * will lock it instead of clearing it.
324 readl(ioc->ioc_regs.ioc_sem_reg);
325 bfa_ioc_hw_sem_release(ioc);
331 * Check the firmware state to know if pll_init has been completed already
334 bfa_ioc_ct_pll_init_complete(void __iomem *rb)
336 if ((readl(rb + BFA_IOC0_STATE_REG) == BFI_IOC_OP) ||
337 (readl(rb + BFA_IOC1_STATE_REG) == BFI_IOC_OP))
344 bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
346 u32 pll_sclk, pll_fclk, r32;
348 pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST |
349 __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) |
350 __APP_PLL_312_JITLMT0_1(3U) |
351 __APP_PLL_312_CNTLMT0_1(1U);
352 pll_fclk = __APP_PLL_425_LRESETN | __APP_PLL_425_ENARST |
353 __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
354 __APP_PLL_425_JITLMT0_1(3U) |
355 __APP_PLL_425_CNTLMT0_1(1U);
357 writel(0, (rb + OP_MODE));
358 writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 |
359 __APP_EMS_CHANNEL_SEL, (rb + ETH_MAC_SER_REG));
361 writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
362 writel(__APP_EMS_REFCKBUFEN1, (rb + ETH_MAC_SER_REG));
364 writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
365 writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
366 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
367 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
368 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
369 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
370 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
371 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
372 writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET,
373 rb + APP_PLL_312_CTL_REG);
374 writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET,
375 rb + APP_PLL_425_CTL_REG);
376 writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE,
377 rb + APP_PLL_312_CTL_REG);
378 writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE,
379 rb + APP_PLL_425_CTL_REG);
380 readl(rb + HOSTFN0_INT_MSK);
382 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
383 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
384 writel(pll_sclk | __APP_PLL_312_ENABLE, rb + APP_PLL_312_CTL_REG);
385 writel(pll_fclk | __APP_PLL_425_ENABLE, rb + APP_PLL_425_CTL_REG);
387 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
388 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
390 r32 = readl((rb + PSS_CTL_REG));
391 r32 &= ~__PSS_LMEM_RESET;
392 writel(r32, (rb + PSS_CTL_REG));
395 writel(0, (rb + PMM_1T_RESET_REG_P0));
396 writel(0, (rb + PMM_1T_RESET_REG_P1));
399 writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
401 r32 = readl((rb + MBIST_STAT_REG));
402 writel(0, (rb + MBIST_CTL_REG));
403 return BFA_STATUS_OK;