2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
20 #include <bfa_fwimg_priv.h>
21 #include <cna/bfa_cna_trcmod.h>
22 #include <cs/bfa_debug.h>
23 #include <bfi/bfi_ioc.h>
24 #include <bfi/bfi_ctreg.h>
25 #include <aen/bfa_aen_ioc.h>
26 #include <aen/bfa_aen.h>
27 #include <log/bfa_log_hal.h>
28 #include <defs/bfa_defs_pci.h>
30 BFA_TRC_FILE(CNA, IOC);
33 * IOC local definitions
35 #define BFA_IOC_TOV 2000 /* msecs */
36 #define BFA_IOC_HWSEM_TOV 500 /* msecs */
37 #define BFA_IOC_HB_TOV 500 /* msecs */
38 #define BFA_IOC_HWINIT_MAX 2
39 #define BFA_IOC_FWIMG_MINSZ (16 * 1024)
40 #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
42 #define bfa_ioc_timer_start(__ioc) \
43 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
44 bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
45 #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
47 #define BFA_DBG_FWTRC_ENTS (BFI_IOC_TRC_ENTS)
48 #define BFA_DBG_FWTRC_LEN \
49 (BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) + \
50 (sizeof(struct bfa_trc_mod_s) - \
51 BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
52 #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
55 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
58 #define bfa_ioc_firmware_lock(__ioc) \
59 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
60 #define bfa_ioc_firmware_unlock(__ioc) \
61 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
62 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
63 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
64 #define bfa_ioc_notify_hbfail(__ioc) \
65 ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
66 #define bfa_ioc_is_optrom(__ioc) \
67 (bfi_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
69 bfa_boolean_t bfa_auto_recover = BFA_TRUE;
72 * forward declarations
74 static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
75 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc);
76 static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
77 static void bfa_ioc_timeout(void *ioc);
78 static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
79 static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
80 static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
81 static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
82 static void bfa_ioc_hb_stop(struct bfa_ioc_s *ioc);
83 static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force);
84 static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
85 static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
86 static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
87 static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
88 static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
95 * IOC state machine events
98 IOC_E_ENABLE = 1, /* IOC enable request */
99 IOC_E_DISABLE = 2, /* IOC disable request */
100 IOC_E_TIMEOUT = 3, /* f/w response timeout */
101 IOC_E_FWREADY = 4, /* f/w initialization done */
102 IOC_E_FWRSP_GETATTR = 5, /* IOC get attribute response */
103 IOC_E_FWRSP_ENABLE = 6, /* enable f/w response */
104 IOC_E_FWRSP_DISABLE = 7, /* disable f/w response */
105 IOC_E_HBFAIL = 8, /* heartbeat failure */
106 IOC_E_HWERROR = 9, /* hardware error interrupt */
107 IOC_E_SEMLOCKED = 10, /* h/w semaphore is locked */
108 IOC_E_DETACH = 11, /* driver detach cleanup */
111 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
112 bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc_s, enum ioc_event);
113 bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc_s, enum ioc_event);
114 bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc_s, enum ioc_event);
115 bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc_s, enum ioc_event);
116 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
117 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
118 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
119 bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc_s, enum ioc_event);
120 bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc_s, enum ioc_event);
121 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
122 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
124 static struct bfa_sm_table_s ioc_sm_table[] = {
125 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
126 {BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH},
127 {BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH},
128 {BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT},
129 {BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT},
130 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT},
131 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
132 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
133 {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
134 {BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL},
135 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
136 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
140 * Reset entry actions -- initialize state machine
143 bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
145 ioc->retry_count = 0;
146 ioc->auto_recover = bfa_auto_recover;
150 * Beginning state. IOC is in reset state.
153 bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
159 bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
163 bfa_ioc_disable_comp(ioc);
170 bfa_sm_fault(ioc, event);
175 * Semaphore should be acquired for version check.
178 bfa_ioc_sm_fwcheck_entry(struct bfa_ioc_s *ioc)
180 bfa_ioc_hw_sem_get(ioc);
184 * Awaiting h/w semaphore to continue with version check.
187 bfa_ioc_sm_fwcheck(struct bfa_ioc_s *ioc, enum ioc_event event)
192 case IOC_E_SEMLOCKED:
193 if (bfa_ioc_firmware_lock(ioc)) {
194 ioc->retry_count = 0;
195 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
197 bfa_ioc_hw_sem_release(ioc);
198 bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch);
203 bfa_ioc_disable_comp(ioc);
209 bfa_ioc_hw_sem_get_cancel(ioc);
210 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
217 bfa_sm_fault(ioc, event);
222 * Notify enable completion callback and generate mismatch AEN.
225 bfa_ioc_sm_mismatch_entry(struct bfa_ioc_s *ioc)
228 * Provide enable completion callback and AEN notification only once.
230 if (ioc->retry_count == 0) {
231 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
232 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
235 bfa_ioc_timer_start(ioc);
239 * Awaiting firmware version match.
242 bfa_ioc_sm_mismatch(struct bfa_ioc_s *ioc, enum ioc_event event)
248 bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
252 bfa_ioc_disable_comp(ioc);
258 bfa_ioc_timer_stop(ioc);
259 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
266 bfa_sm_fault(ioc, event);
271 * Request for semaphore.
274 bfa_ioc_sm_semwait_entry(struct bfa_ioc_s *ioc)
276 bfa_ioc_hw_sem_get(ioc);
280 * Awaiting semaphore for h/w initialzation.
283 bfa_ioc_sm_semwait(struct bfa_ioc_s *ioc, enum ioc_event event)
288 case IOC_E_SEMLOCKED:
289 ioc->retry_count = 0;
290 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
294 bfa_ioc_hw_sem_get_cancel(ioc);
295 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
299 bfa_sm_fault(ioc, event);
305 bfa_ioc_sm_hwinit_entry(struct bfa_ioc_s *ioc)
307 bfa_ioc_timer_start(ioc);
308 bfa_ioc_reset(ioc, BFA_FALSE);
312 * Hardware is being initialized. Interrupts are enabled.
313 * Holding hardware semaphore lock.
316 bfa_ioc_sm_hwinit(struct bfa_ioc_s *ioc, enum ioc_event event)
322 bfa_ioc_timer_stop(ioc);
323 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
327 bfa_ioc_timer_stop(ioc);
334 if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
335 bfa_ioc_timer_start(ioc);
336 bfa_ioc_reset(ioc, BFA_TRUE);
340 bfa_ioc_hw_sem_release(ioc);
341 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
345 bfa_ioc_hw_sem_release(ioc);
346 bfa_ioc_timer_stop(ioc);
347 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
351 bfa_sm_fault(ioc, event);
357 bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
359 bfa_ioc_timer_start(ioc);
360 bfa_ioc_send_enable(ioc);
364 * Host IOC function is being enabled, awaiting response from firmware.
365 * Semaphore is acquired.
368 bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
373 case IOC_E_FWRSP_ENABLE:
374 bfa_ioc_timer_stop(ioc);
375 bfa_ioc_hw_sem_release(ioc);
376 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
380 bfa_ioc_timer_stop(ioc);
387 if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
388 bfa_reg_write(ioc->ioc_regs.ioc_fwstate,
390 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
394 bfa_ioc_hw_sem_release(ioc);
395 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
399 bfa_ioc_timer_stop(ioc);
400 bfa_ioc_hw_sem_release(ioc);
401 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
405 bfa_ioc_send_enable(ioc);
409 bfa_sm_fault(ioc, event);
415 bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
417 bfa_ioc_timer_start(ioc);
418 bfa_ioc_send_getattr(ioc);
422 * IOC configuration in progress. Timer is active.
425 bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
430 case IOC_E_FWRSP_GETATTR:
431 bfa_ioc_timer_stop(ioc);
432 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
436 bfa_ioc_timer_stop(ioc);
442 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
446 bfa_ioc_timer_stop(ioc);
447 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
451 bfa_sm_fault(ioc, event);
457 bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
459 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
460 bfa_ioc_hb_monitor(ioc);
461 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
465 bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
474 bfa_ioc_hb_stop(ioc);
475 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
481 * Hard error or IOC recovery by other function.
482 * Treat it same as heartbeat failure.
484 bfa_ioc_hb_stop(ioc);
486 * !!! fall through !!!
490 bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail);
494 bfa_sm_fault(ioc, event);
500 bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
502 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
503 bfa_ioc_timer_start(ioc);
504 bfa_ioc_send_disable(ioc);
508 * IOC is being disabled
511 bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
516 case IOC_E_FWRSP_DISABLE:
517 bfa_ioc_timer_stop(ioc);
518 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
522 bfa_ioc_timer_stop(ioc);
524 * !!! fall through !!!
528 bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
529 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
533 bfa_sm_fault(ioc, event);
538 * IOC disable completion entry.
541 bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
543 bfa_ioc_disable_comp(ioc);
547 bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
553 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
557 ioc->cbfn->disable_cbfn(ioc->bfa);
564 bfa_ioc_firmware_unlock(ioc);
565 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
569 bfa_sm_fault(ioc, event);
575 bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc)
577 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
578 bfa_ioc_timer_start(ioc);
582 * Hardware initialization failed.
585 bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event)
591 bfa_ioc_timer_stop(ioc);
592 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
596 bfa_ioc_timer_stop(ioc);
597 bfa_ioc_firmware_unlock(ioc);
598 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
602 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
606 bfa_sm_fault(ioc, event);
612 bfa_ioc_sm_hbfail_entry(struct bfa_ioc_s *ioc)
614 struct list_head *qe;
615 struct bfa_ioc_hbfail_notify_s *notify;
618 * Mark IOC as failed in hardware and stop firmware.
620 bfa_ioc_lpu_stop(ioc);
621 bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
624 * Notify other functions on HB failure.
626 bfa_ioc_notify_hbfail(ioc);
629 * Notify driver and common modules registered for notification.
631 ioc->cbfn->hbfail_cbfn(ioc->bfa);
632 list_for_each(qe, &ioc->hb_notify_q) {
633 notify = (struct bfa_ioc_hbfail_notify_s *)qe;
634 notify->cbfn(notify->cbarg);
638 * Flush any queued up mailbox requests.
640 bfa_ioc_mbox_hbfail(ioc);
641 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
644 * Trigger auto-recovery after a delay.
646 if (ioc->auto_recover) {
647 bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer,
648 bfa_ioc_timeout, ioc, BFA_IOC_TOV_RECOVER);
653 * IOC heartbeat failure.
656 bfa_ioc_sm_hbfail(struct bfa_ioc_s *ioc, enum ioc_event event)
663 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
667 if (ioc->auto_recover)
668 bfa_ioc_timer_stop(ioc);
669 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
673 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
678 * Recovery is already initiated by other function.
684 * HB failure notification, ignore.
689 bfa_sm_fault(ioc, event);
696 * bfa_ioc_pvt BFA IOC private functions
700 bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
702 struct list_head *qe;
703 struct bfa_ioc_hbfail_notify_s *notify;
705 ioc->cbfn->disable_cbfn(ioc->bfa);
708 * Notify common modules registered for notification.
710 list_for_each(qe, &ioc->hb_notify_q) {
711 notify = (struct bfa_ioc_hbfail_notify_s *)qe;
712 notify->cbfn(notify->cbarg);
717 bfa_ioc_sem_timeout(void *ioc_arg)
719 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg;
721 bfa_ioc_hw_sem_get(ioc);
725 bfa_ioc_sem_get(bfa_os_addr_t sem_reg)
729 #define BFA_SEM_SPINCNT 3000
731 r32 = bfa_reg_read(sem_reg);
733 while (r32 && (cnt < BFA_SEM_SPINCNT)) {
736 r32 = bfa_reg_read(sem_reg);
742 bfa_assert(cnt < BFA_SEM_SPINCNT);
747 bfa_ioc_sem_release(bfa_os_addr_t sem_reg)
749 bfa_reg_write(sem_reg, 1);
753 bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
758 * First read to the semaphore register will return 0, subsequent reads
759 * will return 1. Semaphore is released by writing 1 to the register
761 r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
763 bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED);
767 bfa_timer_begin(ioc->timer_mod, &ioc->sem_timer, bfa_ioc_sem_timeout,
768 ioc, BFA_IOC_HWSEM_TOV);
772 bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc)
774 bfa_reg_write(ioc->ioc_regs.ioc_sem_reg, 1);
778 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc)
780 bfa_timer_stop(&ioc->sem_timer);
784 * Initialize LPU local memory (aka secondary memory / SRAM)
787 bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
791 #define PSS_LMEM_INIT_TIME 10000
793 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
794 pss_ctl &= ~__PSS_LMEM_RESET;
795 pss_ctl |= __PSS_LMEM_INIT_EN;
796 pss_ctl |= __PSS_I2C_CLK_DIV(3UL); /* i2c workaround 12.5khz clock */
797 bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
800 * wait for memory initialization to be complete
804 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
806 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
809 * If memory initialization is not successful, IOC timeout will catch
812 bfa_assert(pss_ctl & __PSS_LMEM_INIT_DONE);
813 bfa_trc(ioc, pss_ctl);
815 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
816 bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
820 bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
825 * Take processor out of reset.
827 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
828 pss_ctl &= ~__PSS_LPU0_RESET;
830 bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
834 bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
839 * Put processors in reset.
841 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
842 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
844 bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
848 * Get driver and firmware versions.
851 bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
856 u32 *fwsig = (u32 *) fwhdr;
858 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
859 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
860 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
862 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
864 fwsig[i] = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
870 * Returns TRUE if same.
873 bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
875 struct bfi_ioc_image_hdr_s *drv_fwhdr;
878 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
879 bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
881 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
882 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
884 bfa_trc(ioc, fwhdr->md5sum[i]);
885 bfa_trc(ioc, drv_fwhdr->md5sum[i]);
890 bfa_trc(ioc, fwhdr->md5sum[0]);
895 * Return true if current running version is valid. Firmware signature and
896 * execution context (driver/bios) must match.
899 bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc)
901 struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
904 * If bios/efi boot (flash based) -- return true
906 if (bfa_ioc_is_optrom(ioc))
909 bfa_ioc_fwver_get(ioc, &fwhdr);
910 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
911 bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
914 if (fwhdr.signature != drv_fwhdr->signature) {
915 bfa_trc(ioc, fwhdr.signature);
916 bfa_trc(ioc, drv_fwhdr->signature);
920 if (fwhdr.exec != drv_fwhdr->exec) {
921 bfa_trc(ioc, fwhdr.exec);
922 bfa_trc(ioc, drv_fwhdr->exec);
926 return bfa_ioc_fwver_cmp(ioc, &fwhdr);
930 * Conditionally flush any pending message from firmware at start.
933 bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
937 r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd);
939 bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1);
944 bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
946 enum bfi_ioc_state ioc_fwstate;
947 bfa_boolean_t fwvalid;
949 ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
952 ioc_fwstate = BFI_IOC_UNINIT;
954 bfa_trc(ioc, ioc_fwstate);
957 * check if firmware is valid
959 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
960 BFA_FALSE : bfa_ioc_fwver_valid(ioc);
963 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
968 * If hardware initialization is in progress (initialized by other IOC),
969 * just wait for an initialization completion interrupt.
971 if (ioc_fwstate == BFI_IOC_INITING) {
972 bfa_trc(ioc, ioc_fwstate);
973 ioc->cbfn->reset_cbfn(ioc->bfa);
978 * If IOC function is disabled and firmware version is same,
979 * just re-enable IOC.
981 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
982 bfa_trc(ioc, ioc_fwstate);
985 * When using MSI-X any pending firmware ready event should
986 * be flushed. Otherwise MSI-X interrupts are not delivered.
988 bfa_ioc_msgflush(ioc);
989 ioc->cbfn->reset_cbfn(ioc->bfa);
990 bfa_fsm_send_event(ioc, IOC_E_FWREADY);
995 * Initialize the h/w for any other states.
997 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
1001 bfa_ioc_timeout(void *ioc_arg)
1003 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg;
1006 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1010 bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1012 u32 *msgp = (u32 *) ioc_msg;
1015 bfa_trc(ioc, msgp[0]);
1018 bfa_assert(len <= BFI_IOC_MSGLEN_MAX);
1021 * first write msg to mailbox registers
1023 for (i = 0; i < len / sizeof(u32); i++)
1024 bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32),
1025 bfa_os_wtole(msgp[i]));
1027 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1028 bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32), 0);
1031 * write 1 to mailbox CMD to trigger LPU event
1033 bfa_reg_write(ioc->ioc_regs.hfn_mbox_cmd, 1);
1034 (void)bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
1038 bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1040 struct bfi_ioc_ctrl_req_s enable_req;
1042 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1043 bfa_ioc_portid(ioc));
1044 enable_req.ioc_class = ioc->ioc_mc;
1045 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1049 bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1051 struct bfi_ioc_ctrl_req_s disable_req;
1053 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1054 bfa_ioc_portid(ioc));
1055 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1059 bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1061 struct bfi_ioc_getattr_req_s attr_req;
1063 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1064 bfa_ioc_portid(ioc));
1065 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1066 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1070 bfa_ioc_hb_check(void *cbarg)
1072 struct bfa_ioc_s *ioc = cbarg;
1075 hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
1076 if (ioc->hb_count == hb_count) {
1077 bfa_log(ioc->logm, BFA_LOG_HAL_HEARTBEAT_FAILURE,
1079 bfa_ioc_recover(ioc);
1082 ioc->hb_count = hb_count;
1085 bfa_ioc_mbox_poll(ioc);
1086 bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check,
1087 ioc, BFA_IOC_HB_TOV);
1091 bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1093 ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
1094 bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc,
1099 bfa_ioc_hb_stop(struct bfa_ioc_s *ioc)
1101 bfa_timer_stop(&ioc->ioc_timer);
1105 * Initiate a full firmware download.
1108 bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1118 * Initialize LMEM first before code download
1120 bfa_ioc_lmem_init(ioc);
1123 * Flash based firmware boot
1125 bfa_trc(ioc, bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)));
1126 if (bfa_ioc_is_optrom(ioc))
1127 boot_type = BFI_BOOT_TYPE_FLASH;
1128 fwimg = bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
1131 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1132 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
1134 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1136 for (i = 0; i < bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
1138 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1139 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1140 fwimg = bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
1141 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1147 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1148 fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
1150 loff += sizeof(u32);
1153 * handle page offset wrap around
1155 loff = PSS_SMEM_PGOFF(loff);
1158 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1162 bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
1163 bfa_ioc_smem_pgnum(ioc, 0));
1166 * Set boot type and boot param at the end.
1168 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF,
1169 bfa_os_swap32(boot_type));
1170 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_PARAM_OFF,
1171 bfa_os_swap32(boot_param));
1175 bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1177 bfa_ioc_hwinit(ioc, force);
1181 * Update BFA configuration from firmware configuration.
1184 bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1186 struct bfi_ioc_attr_s *attr = ioc->attr;
1188 attr->adapter_prop = bfa_os_ntohl(attr->adapter_prop);
1189 attr->card_type = bfa_os_ntohl(attr->card_type);
1190 attr->maxfrsize = bfa_os_ntohs(attr->maxfrsize);
1192 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1196 * Attach time initialization of mbox logic.
1199 bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1201 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1204 INIT_LIST_HEAD(&mod->cmd_q);
1205 for (mc = 0; mc < BFI_MC_MAX; mc++) {
1206 mod->mbhdlr[mc].cbfn = NULL;
1207 mod->mbhdlr[mc].cbarg = ioc->bfa;
1212 * Mbox poll timer -- restarts any pending mailbox requests.
1215 bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1217 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1218 struct bfa_mbox_cmd_s *cmd;
1222 * If no command pending, do nothing
1224 if (list_empty(&mod->cmd_q))
1228 * If previous command is not yet fetched by firmware, do nothing
1230 stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
1235 * Enqueue command to firmware.
1237 bfa_q_deq(&mod->cmd_q, &cmd);
1238 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1242 * Cleanup any pending requests.
1245 bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
1247 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1248 struct bfa_mbox_cmd_s *cmd;
1250 while (!list_empty(&mod->cmd_q))
1251 bfa_q_deq(&mod->cmd_q, &cmd);
1259 * Interface used by diag module to do firmware boot with memory test
1260 * as the entry vector.
1263 bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param)
1267 bfa_ioc_stats(ioc, ioc_boots);
1269 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1273 * Initialize IOC state of all functions on a chip reset.
1275 rb = ioc->pcidev.pci_bar_kva;
1276 if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
1277 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_MEMTEST);
1278 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_MEMTEST);
1280 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_INITING);
1281 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_INITING);
1284 bfa_ioc_download_fw(ioc, boot_type, boot_param);
1287 * Enable interrupts just before starting LPU
1289 ioc->cbfn->reset_cbfn(ioc->bfa);
1290 bfa_ioc_lpu_start(ioc);
1294 * Enable/disable IOC failure auto recovery.
1297 bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
1299 bfa_auto_recover = auto_recover;
1304 bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
1306 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
1310 bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
1319 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1321 r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox +
1323 msgp[i] = bfa_os_htonl(r32);
1327 * turn off mailbox interrupt by clearing mailbox status
1329 bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1);
1330 bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd);
1334 bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
1336 union bfi_ioc_i2h_msg_u *msg;
1338 msg = (union bfi_ioc_i2h_msg_u *)m;
1340 bfa_ioc_stats(ioc, ioc_isrs);
1342 switch (msg->mh.msg_id) {
1343 case BFI_IOC_I2H_HBEAT:
1346 case BFI_IOC_I2H_READY_EVENT:
1347 bfa_fsm_send_event(ioc, IOC_E_FWREADY);
1350 case BFI_IOC_I2H_ENABLE_REPLY:
1351 bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE);
1354 case BFI_IOC_I2H_DISABLE_REPLY:
1355 bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE);
1358 case BFI_IOC_I2H_GETATTR_REPLY:
1359 bfa_ioc_getattr_reply(ioc);
1363 bfa_trc(ioc, msg->mh.msg_id);
1369 * IOC attach time initialization and setup.
1371 * @param[in] ioc memory for IOC
1372 * @param[in] bfa driver instance structure
1373 * @param[in] trcmod kernel trace module
1374 * @param[in] aen kernel aen event module
1375 * @param[in] logm kernel logging module
1378 bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
1379 struct bfa_timer_mod_s *timer_mod, struct bfa_trc_mod_s *trcmod,
1380 struct bfa_aen_s *aen, struct bfa_log_mod_s *logm)
1384 ioc->timer_mod = timer_mod;
1385 ioc->trcmod = trcmod;
1388 ioc->fcmode = BFA_FALSE;
1389 ioc->pllinit = BFA_FALSE;
1390 ioc->dbg_fwsave_once = BFA_TRUE;
1392 bfa_ioc_mbox_attach(ioc);
1393 INIT_LIST_HEAD(&ioc->hb_notify_q);
1395 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
1399 * Driver detach time IOC cleanup.
1402 bfa_ioc_detach(struct bfa_ioc_s *ioc)
1404 bfa_fsm_send_event(ioc, IOC_E_DETACH);
1408 * Setup IOC PCI properties.
1410 * @param[in] pcidev PCI device information for this IOC
1413 bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
1417 ioc->pcidev = *pcidev;
1418 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
1419 ioc->cna = ioc->ctdev && !ioc->fcmode;
1422 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
1425 bfa_ioc_set_ct_hwif(ioc);
1427 bfa_ioc_set_cb_hwif(ioc);
1429 bfa_ioc_map_port(ioc);
1430 bfa_ioc_reg_init(ioc);
1434 * Initialize IOC dma memory
1436 * @param[in] dm_kva kernel virtual address of IOC dma memory
1437 * @param[in] dm_pa physical address of IOC dma memory
1440 bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
1443 * dma memory for firmware attribute
1445 ioc->attr_dma.kva = dm_kva;
1446 ioc->attr_dma.pa = dm_pa;
1447 ioc->attr = (struct bfi_ioc_attr_s *)dm_kva;
1451 * Return size of dma memory required.
1454 bfa_ioc_meminfo(void)
1456 return BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
1460 bfa_ioc_enable(struct bfa_ioc_s *ioc)
1462 bfa_ioc_stats(ioc, ioc_enables);
1463 ioc->dbg_fwsave_once = BFA_TRUE;
1465 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
1469 bfa_ioc_disable(struct bfa_ioc_s *ioc)
1471 bfa_ioc_stats(ioc, ioc_disables);
1472 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
1476 * Returns memory required for saving firmware trace in case of crash.
1477 * Driver must call this interface to allocate memory required for
1478 * automatic saving of firmware trace. Driver should call
1479 * bfa_ioc_debug_memclaim() right after bfa_ioc_attach() to setup this
1483 bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover)
1485 return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
1489 * Initialize memory for saving firmware trace. Driver must initialize
1490 * trace memory before call bfa_ioc_enable().
1493 bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
1495 ioc->dbg_fwsave = dbg_fwsave;
1496 ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->auto_recover);
1500 bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr)
1502 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
1506 bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr)
1508 return PSS_SMEM_PGOFF(fmaddr);
1512 * Register mailbox message handler functions
1514 * @param[in] ioc IOC instance
1515 * @param[in] mcfuncs message class handler functions
1518 bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
1520 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1523 for (mc = 0; mc < BFI_MC_MAX; mc++)
1524 mod->mbhdlr[mc].cbfn = mcfuncs[mc];
1528 * Register mailbox message handler function, to be called by common modules
1531 bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
1532 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
1534 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1536 mod->mbhdlr[mc].cbfn = cbfn;
1537 mod->mbhdlr[mc].cbarg = cbarg;
1541 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
1542 * Responsibility of caller to serialize
1544 * @param[in] ioc IOC instance
1545 * @param[i] cmd Mailbox command
1548 bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
1550 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1554 * If a previous command is pending, queue new command
1556 if (!list_empty(&mod->cmd_q)) {
1557 list_add_tail(&cmd->qe, &mod->cmd_q);
1562 * If mailbox is busy, queue command for poll timer
1564 stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
1566 list_add_tail(&cmd->qe, &mod->cmd_q);
1571 * mailbox is free -- queue command to firmware
1573 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1577 * Handle mailbox interrupts
1580 bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
1582 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1583 struct bfi_mbmsg_s m;
1586 bfa_ioc_msgget(ioc, &m);
1589 * Treat IOC message class as special.
1591 mc = m.mh.msg_class;
1592 if (mc == BFI_MC_IOC) {
1593 bfa_ioc_isr(ioc, &m);
1597 if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
1600 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
1604 bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
1606 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
1610 bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
1612 ioc->fcmode = BFA_TRUE;
1613 ioc->port_id = bfa_ioc_pcifn(ioc);
1616 #ifndef BFA_BIOS_BUILD
1619 * return true if IOC is disabled
1622 bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
1624 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling)
1625 || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
1629 * return true if IOC firmware is different.
1632 bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
1634 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset)
1635 || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_fwcheck)
1636 || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_mismatch);
1639 #define bfa_ioc_state_disabled(__sm) \
1640 (((__sm) == BFI_IOC_UNINIT) || \
1641 ((__sm) == BFI_IOC_INITING) || \
1642 ((__sm) == BFI_IOC_HWINIT) || \
1643 ((__sm) == BFI_IOC_DISABLED) || \
1644 ((__sm) == BFI_IOC_FAIL) || \
1645 ((__sm) == BFI_IOC_CFG_DISABLED))
1648 * Check if adapter is disabled -- both IOCs should be in a disabled
1652 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
1655 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
1657 if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
1660 ioc_state = bfa_reg_read(rb + BFA_IOC0_STATE_REG);
1661 if (!bfa_ioc_state_disabled(ioc_state))
1664 ioc_state = bfa_reg_read(rb + BFA_IOC1_STATE_REG);
1665 if (!bfa_ioc_state_disabled(ioc_state))
1672 * Add to IOC heartbeat failure notification queue. To be used by common
1676 bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
1677 struct bfa_ioc_hbfail_notify_s *notify)
1679 list_add_tail(¬ify->qe, &ioc->hb_notify_q);
1682 #define BFA_MFG_NAME "Brocade"
1684 bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
1685 struct bfa_adapter_attr_s *ad_attr)
1687 struct bfi_ioc_attr_s *ioc_attr;
1689 ioc_attr = ioc->attr;
1691 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
1692 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
1693 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
1694 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
1695 bfa_os_memcpy(&ad_attr->vpd, &ioc_attr->vpd,
1696 sizeof(struct bfa_mfg_vpd_s));
1698 ad_attr->nports = bfa_ioc_get_nports(ioc);
1699 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
1701 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
1702 /* For now, model descr uses same model string */
1703 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
1705 ad_attr->card_type = ioc_attr->card_type;
1706 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
1708 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
1709 ad_attr->prototype = 1;
1711 ad_attr->prototype = 0;
1713 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
1714 ad_attr->mac = bfa_ioc_get_mac(ioc);
1716 ad_attr->pcie_gen = ioc_attr->pcie_gen;
1717 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
1718 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
1719 ad_attr->asic_rev = ioc_attr->asic_rev;
1721 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
1723 ad_attr->cna_capable = ioc->cna;
1727 bfa_ioc_get_type(struct bfa_ioc_s *ioc)
1729 if (!ioc->ctdev || ioc->fcmode)
1730 return BFA_IOC_TYPE_FC;
1731 else if (ioc->ioc_mc == BFI_MC_IOCFC)
1732 return BFA_IOC_TYPE_FCoE;
1733 else if (ioc->ioc_mc == BFI_MC_LL)
1734 return BFA_IOC_TYPE_LL;
1736 bfa_assert(ioc->ioc_mc == BFI_MC_LL);
1737 return BFA_IOC_TYPE_LL;
1742 bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
1744 bfa_os_memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
1745 bfa_os_memcpy((void *)serial_num,
1746 (void *)ioc->attr->brcd_serialnum,
1747 BFA_ADAPTER_SERIAL_NUM_LEN);
1751 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
1753 bfa_os_memset((void *)fw_ver, 0, BFA_VERSION_LEN);
1754 bfa_os_memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
1758 bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
1760 bfa_assert(chip_rev);
1762 bfa_os_memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
1768 chip_rev[4] = ioc->attr->asic_rev;
1773 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
1775 bfa_os_memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
1776 bfa_os_memcpy(optrom_ver, ioc->attr->optrom_version,
1781 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
1783 bfa_os_memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
1784 bfa_os_memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
1788 bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
1790 struct bfi_ioc_attr_s *ioc_attr;
1795 bfa_os_memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
1797 ioc_attr = ioc->attr;
1799 nports = bfa_ioc_get_nports(ioc);
1800 max_speed = bfa_ioc_speed_sup(ioc);
1805 if (max_speed == 10) {
1806 strcpy(model, "BR-10?0");
1807 model[5] = '0' + nports;
1809 strcpy(model, "Brocade-??5");
1810 model[8] = '0' + max_speed;
1811 model[9] = '0' + nports;
1816 bfa_ioc_get_state(struct bfa_ioc_s *ioc)
1818 return bfa_sm_to_state(ioc_sm_table, ioc->fsm);
1822 bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
1824 bfa_os_memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
1826 ioc_attr->state = bfa_ioc_get_state(ioc);
1827 ioc_attr->port_id = ioc->port_id;
1829 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
1831 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
1833 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
1834 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
1835 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
1842 bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc)
1844 return ioc->attr->pwwn;
1848 bfa_ioc_get_nwwn(struct bfa_ioc_s *ioc)
1850 return ioc->attr->nwwn;
1854 bfa_ioc_get_adid(struct bfa_ioc_s *ioc)
1856 return ioc->attr->mfg_pwwn;
1860 bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
1863 * Currently mfg mac is used as FCoE enode mac (not configured by PBC)
1865 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
1866 return bfa_ioc_get_mfg_mac(ioc);
1868 return ioc->attr->mac;
1872 bfa_ioc_get_mfg_pwwn(struct bfa_ioc_s *ioc)
1874 return ioc->attr->mfg_pwwn;
1878 bfa_ioc_get_mfg_nwwn(struct bfa_ioc_s *ioc)
1880 return ioc->attr->mfg_nwwn;
1884 bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
1888 mac = ioc->attr->mfg_mac;
1889 mac.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
1895 bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
1897 return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
1901 * Send AEN notification
1904 bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
1906 union bfa_aen_data_u aen_data;
1907 struct bfa_log_mod_s *logmod = ioc->logm;
1909 enum bfa_ioc_type_e ioc_type;
1911 bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, event), inst_num);
1913 memset(&aen_data.ioc.pwwn, 0, sizeof(aen_data.ioc.pwwn));
1914 memset(&aen_data.ioc.mac, 0, sizeof(aen_data.ioc.mac));
1915 ioc_type = bfa_ioc_get_type(ioc);
1917 case BFA_IOC_TYPE_FC:
1918 aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc);
1920 case BFA_IOC_TYPE_FCoE:
1921 aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc);
1922 aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
1924 case BFA_IOC_TYPE_LL:
1925 aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
1928 bfa_assert(ioc_type == BFA_IOC_TYPE_FC);
1931 aen_data.ioc.ioc_type = ioc_type;
1935 * Retrieve saved firmware trace from a prior IOC failure.
1938 bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
1942 if (ioc->dbg_fwsave_len == 0)
1943 return BFA_STATUS_ENOFSAVE;
1946 if (tlen > ioc->dbg_fwsave_len)
1947 tlen = ioc->dbg_fwsave_len;
1949 bfa_os_memcpy(trcdata, ioc->dbg_fwsave, tlen);
1951 return BFA_STATUS_OK;
1955 * Clear saved firmware trace
1958 bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc)
1960 ioc->dbg_fwsave_once = BFA_TRUE;
1964 * Retrieve saved firmware trace from a prior IOC failure.
1967 bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
1970 u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
1972 u32 *tbuf = trcdata, r32;
1974 bfa_trc(ioc, *trclen);
1976 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1977 loff = bfa_ioc_smem_pgoff(ioc, loff);
1980 * Hold semaphore to serialize pll init and fwtrc.
1982 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg))
1983 return BFA_STATUS_FAILED;
1985 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1988 if (tlen > BFA_DBG_FWTRC_LEN)
1989 tlen = BFA_DBG_FWTRC_LEN;
1990 tlen /= sizeof(u32);
1994 for (i = 0; i < tlen; i++) {
1995 r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1996 tbuf[i] = bfa_os_ntohl(r32);
1997 loff += sizeof(u32);
2000 * handle page offset wrap around
2002 loff = PSS_SMEM_PGOFF(loff);
2005 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
2008 bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
2009 bfa_ioc_smem_pgnum(ioc, 0));
2012 * release semaphore.
2014 bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
2016 bfa_trc(ioc, pgnum);
2018 *trclen = tlen * sizeof(u32);
2019 return BFA_STATUS_OK;
2023 * Save firmware trace if configured.
2026 bfa_ioc_debug_save(struct bfa_ioc_s *ioc)
2030 if (ioc->dbg_fwsave_len) {
2031 tlen = ioc->dbg_fwsave_len;
2032 bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2037 * Firmware failure detected. Start recovery actions.
2040 bfa_ioc_recover(struct bfa_ioc_s *ioc)
2042 if (ioc->dbg_fwsave_once) {
2043 ioc->dbg_fwsave_once = BFA_FALSE;
2044 bfa_ioc_debug_save(ioc);
2047 bfa_ioc_stats(ioc, ioc_hbfails);
2048 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2054 bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
2059 bfa_ioc_recover(struct bfa_ioc_s *ioc)