2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
22 #include "bfa_defs_svc.h"
24 BFA_TRC_FILE(CNA, IOC);
27 * IOC local definitions
29 #define BFA_IOC_TOV 3000 /* msecs */
30 #define BFA_IOC_HWSEM_TOV 500 /* msecs */
31 #define BFA_IOC_HB_TOV 500 /* msecs */
32 #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
33 #define BFA_IOC_POLL_TOV BFA_TIMER_FREQ
35 #define bfa_ioc_timer_start(__ioc) \
36 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
37 bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
38 #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
40 #define bfa_hb_timer_start(__ioc) \
41 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
42 bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
43 #define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
45 #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
48 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
51 #define bfa_ioc_firmware_lock(__ioc) \
52 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
53 #define bfa_ioc_firmware_unlock(__ioc) \
54 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
55 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
56 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
57 #define bfa_ioc_notify_fail(__ioc) \
58 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
59 #define bfa_ioc_sync_start(__ioc) \
60 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
61 #define bfa_ioc_sync_join(__ioc) \
62 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
63 #define bfa_ioc_sync_leave(__ioc) \
64 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
65 #define bfa_ioc_sync_ack(__ioc) \
66 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
67 #define bfa_ioc_sync_complete(__ioc) \
68 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
70 #define bfa_ioc_mbox_cmd_pending(__ioc) \
71 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
72 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
74 bfa_boolean_t bfa_auto_recover = BFA_TRUE;
77 * forward declarations
79 static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
80 static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
81 static void bfa_ioc_timeout(void *ioc);
82 static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
83 static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
84 static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
85 static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
86 static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
87 static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
88 static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
89 static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
90 static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
91 static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
92 enum bfa_ioc_event_e event);
93 static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
94 static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
95 static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
96 static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
97 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
101 * IOC state machine definitions/declarations
104 IOC_E_RESET = 1, /* IOC reset request */
105 IOC_E_ENABLE = 2, /* IOC enable request */
106 IOC_E_DISABLE = 3, /* IOC disable request */
107 IOC_E_DETACH = 4, /* driver detach cleanup */
108 IOC_E_ENABLED = 5, /* f/w enabled */
109 IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
110 IOC_E_DISABLED = 7, /* f/w disabled */
111 IOC_E_PFFAILED = 8, /* failure notice by iocpf sm */
112 IOC_E_HBFAIL = 9, /* heartbeat failure */
113 IOC_E_HWERROR = 10, /* hardware error interrupt */
114 IOC_E_TIMEOUT = 11, /* timeout */
115 IOC_E_HWFAILED = 12, /* PCI mapping failure notice */
116 IOC_E_FWRSP_ACQ_ADDR = 13, /* Acquiring address */
119 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
120 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
121 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
122 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
123 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
124 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
125 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
126 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
127 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
128 bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
129 bfa_fsm_state_decl(bfa_ioc, acq_addr, struct bfa_ioc_s, enum ioc_event);
131 static struct bfa_sm_table_s ioc_sm_table[] = {
132 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
133 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
134 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
135 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
136 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
137 {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
138 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
139 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
140 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
141 {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
142 {BFA_SM(bfa_ioc_sm_acq_addr), BFA_IOC_ACQ_ADDR},
146 * IOCPF state machine definitions/declarations
149 #define bfa_iocpf_timer_start(__ioc) \
150 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
151 bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
152 #define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
154 #define bfa_iocpf_poll_timer_start(__ioc) \
155 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
156 bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
158 #define bfa_sem_timer_start(__ioc) \
159 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
160 bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
161 #define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
164 * Forward declareations for iocpf state machine
166 static void bfa_iocpf_timeout(void *ioc_arg);
167 static void bfa_iocpf_sem_timeout(void *ioc_arg);
168 static void bfa_iocpf_poll_timeout(void *ioc_arg);
171 * IOCPF state machine events
174 IOCPF_E_ENABLE = 1, /* IOCPF enable request */
175 IOCPF_E_DISABLE = 2, /* IOCPF disable request */
176 IOCPF_E_STOP = 3, /* stop on driver detach */
177 IOCPF_E_FWREADY = 4, /* f/w initialization done */
178 IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */
179 IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */
180 IOCPF_E_FAIL = 7, /* failure notice by ioc sm */
181 IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */
182 IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */
183 IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */
184 IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
185 IOCPF_E_SEM_ERROR = 12, /* h/w sem mapping error */
191 enum bfa_iocpf_state {
192 BFA_IOCPF_RESET = 1, /* IOC is in reset state */
193 BFA_IOCPF_SEMWAIT = 2, /* Waiting for IOC h/w semaphore */
194 BFA_IOCPF_HWINIT = 3, /* IOC h/w is being initialized */
195 BFA_IOCPF_READY = 4, /* IOCPF is initialized */
196 BFA_IOCPF_INITFAIL = 5, /* IOCPF failed */
197 BFA_IOCPF_FAIL = 6, /* IOCPF failed */
198 BFA_IOCPF_DISABLING = 7, /* IOCPF is being disabled */
199 BFA_IOCPF_DISABLED = 8, /* IOCPF is disabled */
200 BFA_IOCPF_FWMISMATCH = 9, /* IOC f/w different from drivers */
203 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
204 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
205 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
206 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
207 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
208 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
209 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
210 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
212 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
213 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
214 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
215 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
216 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
218 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
220 static struct bfa_sm_table_s iocpf_sm_table[] = {
221 {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
222 {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
223 {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
224 {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
225 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
226 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
227 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
228 {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
229 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
230 {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
231 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
232 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
233 {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
234 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
242 * Beginning state. IOC uninit state.
246 bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
251 * IOC is in uninit state.
254 bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
260 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
264 bfa_sm_fault(ioc, event);
268 * Reset entry actions -- initialize state machine
271 bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
273 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
277 * IOC is in reset state.
280 bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
286 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
290 bfa_ioc_disable_comp(ioc);
294 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
298 bfa_sm_fault(ioc, event);
304 bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
306 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
310 * Host IOC function is being enabled, awaiting response from firmware.
311 * Semaphore is acquired.
314 bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
320 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
324 /* !!! fall through !!! */
326 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
327 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
328 if (event != IOC_E_PFFAILED)
329 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
333 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
334 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
338 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
342 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
343 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
350 bfa_sm_fault(ioc, event);
356 bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
358 bfa_ioc_timer_start(ioc);
359 bfa_ioc_send_getattr(ioc);
363 * IOC configuration in progress. Timer is active.
366 bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
371 case IOC_E_FWRSP_GETATTR:
372 bfa_ioc_timer_stop(ioc);
373 bfa_ioc_check_attr_wwns(ioc);
374 bfa_ioc_hb_monitor(ioc);
375 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
378 case IOC_E_FWRSP_ACQ_ADDR:
379 bfa_ioc_timer_stop(ioc);
380 bfa_ioc_hb_monitor(ioc);
381 bfa_fsm_set_state(ioc, bfa_ioc_sm_acq_addr);
386 bfa_ioc_timer_stop(ioc);
387 /* !!! fall through !!! */
389 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
390 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
391 if (event != IOC_E_PFFAILED)
392 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
396 bfa_ioc_timer_stop(ioc);
397 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
404 bfa_sm_fault(ioc, event);
409 * Acquiring address from fabric (entry function)
412 bfa_ioc_sm_acq_addr_entry(struct bfa_ioc_s *ioc)
417 * Acquiring address from the fabric
420 bfa_ioc_sm_acq_addr(struct bfa_ioc_s *ioc, enum ioc_event event)
425 case IOC_E_FWRSP_GETATTR:
426 bfa_ioc_check_attr_wwns(ioc);
427 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
432 bfa_hb_timer_stop(ioc);
434 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
435 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
436 if (event != IOC_E_PFFAILED)
437 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
441 bfa_hb_timer_stop(ioc);
442 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
449 bfa_sm_fault(ioc, event);
454 bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
456 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
458 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
459 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
460 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
464 bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
473 bfa_hb_timer_stop(ioc);
474 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
479 bfa_hb_timer_stop(ioc);
480 /* !!! fall through !!! */
482 if (ioc->iocpf.auto_recover)
483 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
485 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
487 bfa_ioc_fail_notify(ioc);
489 if (event != IOC_E_PFFAILED)
490 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
494 bfa_sm_fault(ioc, event);
500 bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
502 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
503 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
504 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
508 * IOC is being disabled
511 bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
517 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
522 * No state change. Will move to disabled state
523 * after iocpf sm completes failure processing and
524 * moves to disabled state.
526 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
530 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
531 bfa_ioc_disable_comp(ioc);
535 bfa_sm_fault(ioc, event);
540 * IOC disable completion entry.
543 bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
545 bfa_ioc_disable_comp(ioc);
549 bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
555 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
559 ioc->cbfn->disable_cbfn(ioc->bfa);
563 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
564 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
568 bfa_sm_fault(ioc, event);
574 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
580 * Hardware initialization retry.
583 bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
589 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
595 * Initialization retry failed.
597 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
598 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
599 if (event != IOC_E_PFFAILED)
600 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
604 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
605 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
612 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
616 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
617 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
621 bfa_sm_fault(ioc, event);
627 bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
636 bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
643 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
647 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
651 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
652 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
657 * HB failure notification, ignore.
661 bfa_sm_fault(ioc, event);
666 bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc)
672 bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
678 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
682 ioc->cbfn->disable_cbfn(ioc->bfa);
686 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
690 bfa_sm_fault(ioc, event);
695 * IOCPF State Machine
699 * Reset entry actions -- initialize state machine
702 bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
704 iocpf->fw_mismatch_notified = BFA_FALSE;
705 iocpf->auto_recover = bfa_auto_recover;
709 * Beginning state. IOC is in reset state.
712 bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
714 struct bfa_ioc_s *ioc = iocpf->ioc;
720 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
727 bfa_sm_fault(ioc, event);
732 * Semaphore should be acquired for version check.
735 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
737 struct bfi_ioc_image_hdr_s fwhdr;
738 u32 fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate);
741 if (fwstate == BFI_IOC_UNINIT)
744 bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
746 if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL)
749 bfa_trc(iocpf->ioc, fwstate);
750 bfa_trc(iocpf->ioc, fwhdr.exec);
751 writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate);
754 * Try to lock and then unlock the semaphore.
756 readl(iocpf->ioc->ioc_regs.ioc_sem_reg);
757 writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg);
759 bfa_ioc_hw_sem_get(iocpf->ioc);
763 * Awaiting h/w semaphore to continue with version check.
766 bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
768 struct bfa_ioc_s *ioc = iocpf->ioc;
773 case IOCPF_E_SEMLOCKED:
774 if (bfa_ioc_firmware_lock(ioc)) {
775 if (bfa_ioc_sync_start(ioc)) {
776 bfa_ioc_sync_join(ioc);
777 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
779 bfa_ioc_firmware_unlock(ioc);
780 writel(1, ioc->ioc_regs.ioc_sem_reg);
781 bfa_sem_timer_start(ioc);
784 writel(1, ioc->ioc_regs.ioc_sem_reg);
785 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
789 case IOCPF_E_SEM_ERROR:
790 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
791 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
794 case IOCPF_E_DISABLE:
795 bfa_sem_timer_stop(ioc);
796 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
797 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
801 bfa_sem_timer_stop(ioc);
802 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
806 bfa_sm_fault(ioc, event);
811 * Notify enable completion callback.
814 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
817 * Call only the first time sm enters fwmismatch state.
819 if (iocpf->fw_mismatch_notified == BFA_FALSE)
820 bfa_ioc_pf_fwmismatch(iocpf->ioc);
822 iocpf->fw_mismatch_notified = BFA_TRUE;
823 bfa_iocpf_timer_start(iocpf->ioc);
827 * Awaiting firmware version match.
830 bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
832 struct bfa_ioc_s *ioc = iocpf->ioc;
837 case IOCPF_E_TIMEOUT:
838 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
841 case IOCPF_E_DISABLE:
842 bfa_iocpf_timer_stop(ioc);
843 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
844 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
848 bfa_iocpf_timer_stop(ioc);
849 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
853 bfa_sm_fault(ioc, event);
858 * Request for semaphore.
861 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
863 bfa_ioc_hw_sem_get(iocpf->ioc);
867 * Awaiting semaphore for h/w initialzation.
870 bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
872 struct bfa_ioc_s *ioc = iocpf->ioc;
877 case IOCPF_E_SEMLOCKED:
878 if (bfa_ioc_sync_complete(ioc)) {
879 bfa_ioc_sync_join(ioc);
880 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
882 writel(1, ioc->ioc_regs.ioc_sem_reg);
883 bfa_sem_timer_start(ioc);
887 case IOCPF_E_SEM_ERROR:
888 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
889 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
892 case IOCPF_E_DISABLE:
893 bfa_sem_timer_stop(ioc);
894 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
898 bfa_sm_fault(ioc, event);
903 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
905 iocpf->poll_time = 0;
906 bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
910 * Hardware is being initialized. Interrupts are enabled.
911 * Holding hardware semaphore lock.
914 bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
916 struct bfa_ioc_s *ioc = iocpf->ioc;
921 case IOCPF_E_FWREADY:
922 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
925 case IOCPF_E_TIMEOUT:
926 writel(1, ioc->ioc_regs.ioc_sem_reg);
927 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
928 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
931 case IOCPF_E_DISABLE:
932 bfa_iocpf_timer_stop(ioc);
933 bfa_ioc_sync_leave(ioc);
934 writel(1, ioc->ioc_regs.ioc_sem_reg);
935 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
939 bfa_sm_fault(ioc, event);
944 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
946 bfa_iocpf_timer_start(iocpf->ioc);
948 * Enable Interrupts before sending fw IOC ENABLE cmd.
950 iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
951 bfa_ioc_send_enable(iocpf->ioc);
955 * Host IOC function is being enabled, awaiting response from firmware.
956 * Semaphore is acquired.
959 bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
961 struct bfa_ioc_s *ioc = iocpf->ioc;
966 case IOCPF_E_FWRSP_ENABLE:
967 bfa_iocpf_timer_stop(ioc);
968 writel(1, ioc->ioc_regs.ioc_sem_reg);
969 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
972 case IOCPF_E_INITFAIL:
973 bfa_iocpf_timer_stop(ioc);
975 * !!! fall through !!!
978 case IOCPF_E_TIMEOUT:
979 writel(1, ioc->ioc_regs.ioc_sem_reg);
980 if (event == IOCPF_E_TIMEOUT)
981 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
982 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
985 case IOCPF_E_DISABLE:
986 bfa_iocpf_timer_stop(ioc);
987 writel(1, ioc->ioc_regs.ioc_sem_reg);
988 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
992 bfa_sm_fault(ioc, event);
997 bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
999 bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
1003 bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1005 struct bfa_ioc_s *ioc = iocpf->ioc;
1007 bfa_trc(ioc, event);
1010 case IOCPF_E_DISABLE:
1011 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
1014 case IOCPF_E_GETATTRFAIL:
1015 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
1019 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
1023 bfa_sm_fault(ioc, event);
1028 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
1030 bfa_iocpf_timer_start(iocpf->ioc);
1031 bfa_ioc_send_disable(iocpf->ioc);
1035 * IOC is being disabled
1038 bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1040 struct bfa_ioc_s *ioc = iocpf->ioc;
1042 bfa_trc(ioc, event);
1045 case IOCPF_E_FWRSP_DISABLE:
1046 bfa_iocpf_timer_stop(ioc);
1047 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1051 bfa_iocpf_timer_stop(ioc);
1053 * !!! fall through !!!
1056 case IOCPF_E_TIMEOUT:
1057 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1058 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1061 case IOCPF_E_FWRSP_ENABLE:
1065 bfa_sm_fault(ioc, event);
1070 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
1072 bfa_ioc_hw_sem_get(iocpf->ioc);
1076 * IOC hb ack request is being removed.
1079 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1081 struct bfa_ioc_s *ioc = iocpf->ioc;
1083 bfa_trc(ioc, event);
1086 case IOCPF_E_SEMLOCKED:
1087 bfa_ioc_sync_leave(ioc);
1088 writel(1, ioc->ioc_regs.ioc_sem_reg);
1089 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1092 case IOCPF_E_SEM_ERROR:
1093 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1094 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1101 bfa_sm_fault(ioc, event);
1106 * IOC disable completion entry.
1109 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
1111 bfa_ioc_mbox_flush(iocpf->ioc);
1112 bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
1116 bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1118 struct bfa_ioc_s *ioc = iocpf->ioc;
1120 bfa_trc(ioc, event);
1123 case IOCPF_E_ENABLE:
1124 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1128 bfa_ioc_firmware_unlock(ioc);
1129 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1133 bfa_sm_fault(ioc, event);
1138 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
1140 bfa_ioc_debug_save_ftrc(iocpf->ioc);
1141 bfa_ioc_hw_sem_get(iocpf->ioc);
1145 * Hardware initialization failed.
1148 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1150 struct bfa_ioc_s *ioc = iocpf->ioc;
1152 bfa_trc(ioc, event);
1155 case IOCPF_E_SEMLOCKED:
1156 bfa_ioc_notify_fail(ioc);
1157 bfa_ioc_sync_leave(ioc);
1158 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1159 writel(1, ioc->ioc_regs.ioc_sem_reg);
1160 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1163 case IOCPF_E_SEM_ERROR:
1164 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1165 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1168 case IOCPF_E_DISABLE:
1169 bfa_sem_timer_stop(ioc);
1170 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1174 bfa_sem_timer_stop(ioc);
1175 bfa_ioc_firmware_unlock(ioc);
1176 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1183 bfa_sm_fault(ioc, event);
1188 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1190 bfa_trc(iocpf->ioc, 0);
1194 * Hardware initialization failed.
1197 bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1199 struct bfa_ioc_s *ioc = iocpf->ioc;
1201 bfa_trc(ioc, event);
1204 case IOCPF_E_DISABLE:
1205 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1209 bfa_ioc_firmware_unlock(ioc);
1210 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1214 bfa_sm_fault(ioc, event);
1219 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
1222 * Mark IOC as failed in hardware and stop firmware.
1224 bfa_ioc_lpu_stop(iocpf->ioc);
1227 * Flush any queued up mailbox requests.
1229 bfa_ioc_mbox_flush(iocpf->ioc);
1231 bfa_ioc_hw_sem_get(iocpf->ioc);
1235 bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1237 struct bfa_ioc_s *ioc = iocpf->ioc;
1239 bfa_trc(ioc, event);
1242 case IOCPF_E_SEMLOCKED:
1243 bfa_ioc_sync_ack(ioc);
1244 bfa_ioc_notify_fail(ioc);
1245 if (!iocpf->auto_recover) {
1246 bfa_ioc_sync_leave(ioc);
1247 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1248 writel(1, ioc->ioc_regs.ioc_sem_reg);
1249 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1251 if (bfa_ioc_sync_complete(ioc))
1252 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1254 writel(1, ioc->ioc_regs.ioc_sem_reg);
1255 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1260 case IOCPF_E_SEM_ERROR:
1261 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1262 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1265 case IOCPF_E_DISABLE:
1266 bfa_sem_timer_stop(ioc);
1267 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1274 bfa_sm_fault(ioc, event);
1279 bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1281 bfa_trc(iocpf->ioc, 0);
1285 * IOC is in failed state.
1288 bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1290 struct bfa_ioc_s *ioc = iocpf->ioc;
1292 bfa_trc(ioc, event);
1295 case IOCPF_E_DISABLE:
1296 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1300 bfa_sm_fault(ioc, event);
1305 * BFA IOC private functions
1309 * Notify common modules registered for notification.
1312 bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
1314 struct bfa_ioc_notify_s *notify;
1315 struct list_head *qe;
1317 list_for_each(qe, &ioc->notify_q) {
1318 notify = (struct bfa_ioc_notify_s *)qe;
1319 notify->cbfn(notify->cbarg, event);
1324 bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1326 ioc->cbfn->disable_cbfn(ioc->bfa);
1327 bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1331 bfa_ioc_sem_get(void __iomem *sem_reg)
1335 #define BFA_SEM_SPINCNT 3000
1337 r32 = readl(sem_reg);
1339 while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1342 r32 = readl(sem_reg);
1352 bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1357 * First read to the semaphore register will return 0, subsequent reads
1358 * will return 1. Semaphore is released by writing 1 to the register
1360 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1363 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1367 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1371 bfa_sem_timer_start(ioc);
1375 * Initialize LPU local memory (aka secondary memory / SRAM)
1378 bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1382 #define PSS_LMEM_INIT_TIME 10000
1384 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1385 pss_ctl &= ~__PSS_LMEM_RESET;
1386 pss_ctl |= __PSS_LMEM_INIT_EN;
1389 * i2c workaround 12.5khz clock
1391 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1392 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1395 * wait for memory initialization to be complete
1399 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1401 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1404 * If memory initialization is not successful, IOC timeout will catch
1407 WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1408 bfa_trc(ioc, pss_ctl);
1410 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1411 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1415 bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
1420 * Take processor out of reset.
1422 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1423 pss_ctl &= ~__PSS_LPU0_RESET;
1425 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1429 bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1434 * Put processors in reset.
1436 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1437 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1439 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1443 * Get driver and firmware versions.
1446 bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1451 u32 *fwsig = (u32 *) fwhdr;
1453 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1454 pgoff = PSS_SMEM_PGOFF(loff);
1455 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1457 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
1460 bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1461 loff += sizeof(u32);
1466 * Returns TRUE if same.
1469 bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1471 struct bfi_ioc_image_hdr_s *drv_fwhdr;
1474 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1475 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1477 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1478 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
1480 bfa_trc(ioc, fwhdr->md5sum[i]);
1481 bfa_trc(ioc, drv_fwhdr->md5sum[i]);
1486 bfa_trc(ioc, fwhdr->md5sum[0]);
1491 * Return true if current running version is valid. Firmware signature and
1492 * execution context (driver/bios) must match.
1494 static bfa_boolean_t
1495 bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1497 struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
1499 bfa_ioc_fwver_get(ioc, &fwhdr);
1500 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1501 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1503 if (fwhdr.signature != drv_fwhdr->signature) {
1504 bfa_trc(ioc, fwhdr.signature);
1505 bfa_trc(ioc, drv_fwhdr->signature);
1509 if (swab32(fwhdr.bootenv) != boot_env) {
1510 bfa_trc(ioc, fwhdr.bootenv);
1511 bfa_trc(ioc, boot_env);
1515 return bfa_ioc_fwver_cmp(ioc, &fwhdr);
1519 * Conditionally flush any pending message from firmware at start.
1522 bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1526 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1528 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1532 bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1534 enum bfi_ioc_state ioc_fwstate;
1535 bfa_boolean_t fwvalid;
1539 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1542 ioc_fwstate = BFI_IOC_UNINIT;
1544 bfa_trc(ioc, ioc_fwstate);
1546 boot_type = BFI_FWBOOT_TYPE_NORMAL;
1547 boot_env = BFI_FWBOOT_ENV_OS;
1550 * check if firmware is valid
1552 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1553 BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
1556 bfa_ioc_boot(ioc, boot_type, boot_env);
1557 bfa_ioc_poll_fwinit(ioc);
1562 * If hardware initialization is in progress (initialized by other IOC),
1563 * just wait for an initialization completion interrupt.
1565 if (ioc_fwstate == BFI_IOC_INITING) {
1566 bfa_ioc_poll_fwinit(ioc);
1571 * If IOC function is disabled and firmware version is same,
1572 * just re-enable IOC.
1574 * If option rom, IOC must not be in operational state. With
1575 * convergence, IOC will be in operational state when 2nd driver
1578 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1581 * When using MSI-X any pending firmware ready event should
1582 * be flushed. Otherwise MSI-X interrupts are not delivered.
1584 bfa_ioc_msgflush(ioc);
1585 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1590 * Initialize the h/w for any other states.
1592 bfa_ioc_boot(ioc, boot_type, boot_env);
1593 bfa_ioc_poll_fwinit(ioc);
1597 bfa_ioc_timeout(void *ioc_arg)
1599 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
1602 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1606 bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1608 u32 *msgp = (u32 *) ioc_msg;
1611 bfa_trc(ioc, msgp[0]);
1614 WARN_ON(len > BFI_IOC_MSGLEN_MAX);
1617 * first write msg to mailbox registers
1619 for (i = 0; i < len / sizeof(u32); i++)
1620 writel(cpu_to_le32(msgp[i]),
1621 ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1623 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1624 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1627 * write 1 to mailbox CMD to trigger LPU event
1629 writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1630 (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1634 bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1636 struct bfi_ioc_ctrl_req_s enable_req;
1639 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1640 bfa_ioc_portid(ioc));
1641 enable_req.clscode = cpu_to_be16(ioc->clscode);
1642 do_gettimeofday(&tv);
1643 enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
1644 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1648 bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1650 struct bfi_ioc_ctrl_req_s disable_req;
1652 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1653 bfa_ioc_portid(ioc));
1654 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1658 bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1660 struct bfi_ioc_getattr_req_s attr_req;
1662 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1663 bfa_ioc_portid(ioc));
1664 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1665 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1669 bfa_ioc_hb_check(void *cbarg)
1671 struct bfa_ioc_s *ioc = cbarg;
1674 hb_count = readl(ioc->ioc_regs.heartbeat);
1675 if (ioc->hb_count == hb_count) {
1676 bfa_ioc_recover(ioc);
1679 ioc->hb_count = hb_count;
1682 bfa_ioc_mbox_poll(ioc);
1683 bfa_hb_timer_start(ioc);
1687 bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1689 ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1690 bfa_hb_timer_start(ioc);
1694 * Initiate a full firmware download.
1697 bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1708 * Initialize LMEM first before code download
1710 bfa_ioc_lmem_init(ioc);
1712 bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)));
1713 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
1715 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1716 pgoff = PSS_SMEM_PGOFF(loff);
1718 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1720 for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
1722 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1723 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1724 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
1725 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1731 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1732 fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
1734 loff += sizeof(u32);
1737 * handle page offset wrap around
1739 loff = PSS_SMEM_PGOFF(loff);
1742 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1746 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1747 ioc->ioc_regs.host_page_num_fn);
1750 * Set boot type and device mode at the end.
1752 asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1753 ioc->port0_mode, ioc->port1_mode);
1754 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
1756 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
1758 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
1764 * Update BFA configuration from firmware configuration.
1767 bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1769 struct bfi_ioc_attr_s *attr = ioc->attr;
1771 attr->adapter_prop = be32_to_cpu(attr->adapter_prop);
1772 attr->card_type = be32_to_cpu(attr->card_type);
1773 attr->maxfrsize = be16_to_cpu(attr->maxfrsize);
1774 ioc->fcmode = (attr->port_mode == BFI_PORT_MODE_FC);
1776 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1780 * Attach time initialization of mbox logic.
1783 bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1785 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1788 INIT_LIST_HEAD(&mod->cmd_q);
1789 for (mc = 0; mc < BFI_MC_MAX; mc++) {
1790 mod->mbhdlr[mc].cbfn = NULL;
1791 mod->mbhdlr[mc].cbarg = ioc->bfa;
1796 * Mbox poll timer -- restarts any pending mailbox requests.
1799 bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1801 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1802 struct bfa_mbox_cmd_s *cmd;
1806 * If no command pending, do nothing
1808 if (list_empty(&mod->cmd_q))
1812 * If previous command is not yet fetched by firmware, do nothing
1814 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1819 * Enqueue command to firmware.
1821 bfa_q_deq(&mod->cmd_q, &cmd);
1822 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1826 * Cleanup any pending requests.
1829 bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc)
1831 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1832 struct bfa_mbox_cmd_s *cmd;
1834 while (!list_empty(&mod->cmd_q))
1835 bfa_q_deq(&mod->cmd_q, &cmd);
1839 * Read data from SMEM to host through PCI memmap
1841 * @param[in] ioc memory for IOC
1842 * @param[in] tbuf app memory to store data from smem
1843 * @param[in] soff smem offset
1844 * @param[in] sz size of smem in bytes
1847 bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
1854 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1855 loff = PSS_SMEM_PGOFF(soff);
1856 bfa_trc(ioc, pgnum);
1861 * Hold semaphore to serialize pll init and fwtrc.
1863 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1865 return BFA_STATUS_FAILED;
1868 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1870 len = sz/sizeof(u32);
1872 for (i = 0; i < len; i++) {
1873 r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1874 buf[i] = be32_to_cpu(r32);
1875 loff += sizeof(u32);
1878 * handle page offset wrap around
1880 loff = PSS_SMEM_PGOFF(loff);
1883 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1886 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1887 ioc->ioc_regs.host_page_num_fn);
1889 * release semaphore.
1891 readl(ioc->ioc_regs.ioc_init_sem_reg);
1892 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1894 bfa_trc(ioc, pgnum);
1895 return BFA_STATUS_OK;
1899 * Clear SMEM data from host through PCI memmap
1901 * @param[in] ioc memory for IOC
1902 * @param[in] soff smem offset
1903 * @param[in] sz size of smem in bytes
1906 bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
1911 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1912 loff = PSS_SMEM_PGOFF(soff);
1913 bfa_trc(ioc, pgnum);
1918 * Hold semaphore to serialize pll init and fwtrc.
1920 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1922 return BFA_STATUS_FAILED;
1925 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1927 len = sz/sizeof(u32); /* len in words */
1929 for (i = 0; i < len; i++) {
1930 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
1931 loff += sizeof(u32);
1934 * handle page offset wrap around
1936 loff = PSS_SMEM_PGOFF(loff);
1939 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1942 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1943 ioc->ioc_regs.host_page_num_fn);
1946 * release semaphore.
1948 readl(ioc->ioc_regs.ioc_init_sem_reg);
1949 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1950 bfa_trc(ioc, pgnum);
1951 return BFA_STATUS_OK;
1955 bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
1957 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1960 * Notify driver and common modules registered for notification.
1962 ioc->cbfn->hbfail_cbfn(ioc->bfa);
1963 bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
1965 bfa_ioc_debug_save_ftrc(ioc);
1967 BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
1968 "Heart Beat of IOC has failed\n");
1973 bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
1975 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1977 * Provide enable completion callback.
1979 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1980 BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
1981 "Running firmware version is incompatible "
1982 "with the driver version\n");
1986 bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
1990 * Hold semaphore so that nobody can access the chip during init.
1992 bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1994 bfa_ioc_pll_init_asic(ioc);
1996 ioc->pllinit = BFA_TRUE;
1998 * release semaphore.
2000 readl(ioc->ioc_regs.ioc_init_sem_reg);
2001 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2003 return BFA_STATUS_OK;
2007 * Interface used by diag module to do firmware boot with memory test
2008 * as the entry vector.
2011 bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
2013 bfa_ioc_stats(ioc, ioc_boots);
2015 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
2019 * Initialize IOC state of all functions on a chip reset.
2021 if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
2022 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
2023 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
2025 writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
2026 writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
2029 bfa_ioc_msgflush(ioc);
2030 bfa_ioc_download_fw(ioc, boot_type, boot_env);
2031 bfa_ioc_lpu_start(ioc);
2035 * Enable/disable IOC failure auto recovery.
2038 bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
2040 bfa_auto_recover = auto_recover;
2046 bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
2048 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2052 bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
2054 u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
2056 return ((r32 != BFI_IOC_UNINIT) &&
2057 (r32 != BFI_IOC_INITING) &&
2058 (r32 != BFI_IOC_MEMTEST));
2062 bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
2064 __be32 *msgp = mbmsg;
2068 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
2075 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
2077 r32 = readl(ioc->ioc_regs.lpu_mbox +
2079 msgp[i] = cpu_to_be32(r32);
2083 * turn off mailbox interrupt by clearing mailbox status
2085 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
2086 readl(ioc->ioc_regs.lpu_mbox_cmd);
2092 bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
2094 union bfi_ioc_i2h_msg_u *msg;
2095 struct bfa_iocpf_s *iocpf = &ioc->iocpf;
2097 msg = (union bfi_ioc_i2h_msg_u *) m;
2099 bfa_ioc_stats(ioc, ioc_isrs);
2101 switch (msg->mh.msg_id) {
2102 case BFI_IOC_I2H_HBEAT:
2105 case BFI_IOC_I2H_ENABLE_REPLY:
2106 ioc->port_mode = ioc->port_mode_cfg =
2107 (enum bfa_mode_s)msg->fw_event.port_mode;
2108 ioc->ad_cap_bm = msg->fw_event.cap_bm;
2109 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
2112 case BFI_IOC_I2H_DISABLE_REPLY:
2113 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
2116 case BFI_IOC_I2H_GETATTR_REPLY:
2117 bfa_ioc_getattr_reply(ioc);
2120 case BFI_IOC_I2H_ACQ_ADDR_REPLY:
2121 bfa_fsm_send_event(ioc, IOC_E_FWRSP_ACQ_ADDR);
2125 bfa_trc(ioc, msg->mh.msg_id);
2131 * IOC attach time initialization and setup.
2133 * @param[in] ioc memory for IOC
2134 * @param[in] bfa driver instance structure
2137 bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
2138 struct bfa_timer_mod_s *timer_mod)
2142 ioc->timer_mod = timer_mod;
2143 ioc->fcmode = BFA_FALSE;
2144 ioc->pllinit = BFA_FALSE;
2145 ioc->dbg_fwsave_once = BFA_TRUE;
2146 ioc->iocpf.ioc = ioc;
2148 bfa_ioc_mbox_attach(ioc);
2149 INIT_LIST_HEAD(&ioc->notify_q);
2151 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2152 bfa_fsm_send_event(ioc, IOC_E_RESET);
2156 * Driver detach time IOC cleanup.
2159 bfa_ioc_detach(struct bfa_ioc_s *ioc)
2161 bfa_fsm_send_event(ioc, IOC_E_DETACH);
2162 INIT_LIST_HEAD(&ioc->notify_q);
2166 * Setup IOC PCI properties.
2168 * @param[in] pcidev PCI device information for this IOC
2171 bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
2172 enum bfi_pcifn_class clscode)
2174 ioc->clscode = clscode;
2175 ioc->pcidev = *pcidev;
2178 * Initialize IOC and device personality
2180 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
2181 ioc->asic_mode = BFI_ASIC_MODE_FC;
2183 switch (pcidev->device_id) {
2184 case BFA_PCI_DEVICE_ID_FC_8G1P:
2185 case BFA_PCI_DEVICE_ID_FC_8G2P:
2186 ioc->asic_gen = BFI_ASIC_GEN_CB;
2187 ioc->fcmode = BFA_TRUE;
2188 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2189 ioc->ad_cap_bm = BFA_CM_HBA;
2192 case BFA_PCI_DEVICE_ID_CT:
2193 ioc->asic_gen = BFI_ASIC_GEN_CT;
2194 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2195 ioc->asic_mode = BFI_ASIC_MODE_ETH;
2196 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2197 ioc->ad_cap_bm = BFA_CM_CNA;
2200 case BFA_PCI_DEVICE_ID_CT_FC:
2201 ioc->asic_gen = BFI_ASIC_GEN_CT;
2202 ioc->fcmode = BFA_TRUE;
2203 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2204 ioc->ad_cap_bm = BFA_CM_HBA;
2207 case BFA_PCI_DEVICE_ID_CT2:
2208 ioc->asic_gen = BFI_ASIC_GEN_CT2;
2209 if (clscode == BFI_PCIFN_CLASS_FC &&
2210 pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
2211 ioc->asic_mode = BFI_ASIC_MODE_FC16;
2212 ioc->fcmode = BFA_TRUE;
2213 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2214 ioc->ad_cap_bm = BFA_CM_HBA;
2216 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2217 ioc->asic_mode = BFI_ASIC_MODE_ETH;
2218 if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2220 ioc->port_mode_cfg = BFA_MODE_CNA;
2221 ioc->ad_cap_bm = BFA_CM_CNA;
2224 ioc->port_mode_cfg = BFA_MODE_NIC;
2225 ioc->ad_cap_bm = BFA_CM_NIC;
2235 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2237 if (ioc->asic_gen == BFI_ASIC_GEN_CB)
2238 bfa_ioc_set_cb_hwif(ioc);
2239 else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2240 bfa_ioc_set_ct_hwif(ioc);
2242 WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2243 bfa_ioc_set_ct2_hwif(ioc);
2244 bfa_ioc_ct2_poweron(ioc);
2247 bfa_ioc_map_port(ioc);
2248 bfa_ioc_reg_init(ioc);
2252 * Initialize IOC dma memory
2254 * @param[in] dm_kva kernel virtual address of IOC dma memory
2255 * @param[in] dm_pa physical address of IOC dma memory
2258 bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
2261 * dma memory for firmware attribute
2263 ioc->attr_dma.kva = dm_kva;
2264 ioc->attr_dma.pa = dm_pa;
2265 ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
2269 bfa_ioc_enable(struct bfa_ioc_s *ioc)
2271 bfa_ioc_stats(ioc, ioc_enables);
2272 ioc->dbg_fwsave_once = BFA_TRUE;
2274 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2278 bfa_ioc_disable(struct bfa_ioc_s *ioc)
2280 bfa_ioc_stats(ioc, ioc_disables);
2281 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2286 * Initialize memory for saving firmware trace. Driver must initialize
2287 * trace memory before call bfa_ioc_enable().
2290 bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
2292 ioc->dbg_fwsave = dbg_fwsave;
2293 ioc->dbg_fwsave_len = (ioc->iocpf.auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
2297 * Register mailbox message handler functions
2299 * @param[in] ioc IOC instance
2300 * @param[in] mcfuncs message class handler functions
2303 bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
2305 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2308 for (mc = 0; mc < BFI_MC_MAX; mc++)
2309 mod->mbhdlr[mc].cbfn = mcfuncs[mc];
2313 * Register mailbox message handler function, to be called by common modules
2316 bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
2317 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2319 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2321 mod->mbhdlr[mc].cbfn = cbfn;
2322 mod->mbhdlr[mc].cbarg = cbarg;
2326 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2327 * Responsibility of caller to serialize
2329 * @param[in] ioc IOC instance
2330 * @param[i] cmd Mailbox command
2333 bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2335 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2339 * If a previous command is pending, queue new command
2341 if (!list_empty(&mod->cmd_q)) {
2342 list_add_tail(&cmd->qe, &mod->cmd_q);
2347 * If mailbox is busy, queue command for poll timer
2349 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2351 list_add_tail(&cmd->qe, &mod->cmd_q);
2356 * mailbox is free -- queue command to firmware
2358 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2362 * Handle mailbox interrupts
2365 bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2367 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2368 struct bfi_mbmsg_s m;
2371 if (bfa_ioc_msgget(ioc, &m)) {
2373 * Treat IOC message class as special.
2375 mc = m.mh.msg_class;
2376 if (mc == BFI_MC_IOC) {
2377 bfa_ioc_isr(ioc, &m);
2381 if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2384 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2387 bfa_ioc_lpu_read_stat(ioc);
2390 * Try to send pending mailbox commands
2392 bfa_ioc_mbox_poll(ioc);
2396 bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
2398 bfa_ioc_stats(ioc, ioc_hbfails);
2399 ioc->stats.hb_count = ioc->hb_count;
2400 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2404 * return true if IOC is disabled
2407 bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2409 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2410 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2414 * Return TRUE if IOC is in acquiring address state
2417 bfa_ioc_is_acq_addr(struct bfa_ioc_s *ioc)
2419 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_acq_addr);
2423 * return true if IOC firmware is different.
2426 bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
2428 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
2429 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
2430 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
2433 #define bfa_ioc_state_disabled(__sm) \
2434 (((__sm) == BFI_IOC_UNINIT) || \
2435 ((__sm) == BFI_IOC_INITING) || \
2436 ((__sm) == BFI_IOC_HWINIT) || \
2437 ((__sm) == BFI_IOC_DISABLED) || \
2438 ((__sm) == BFI_IOC_FAIL) || \
2439 ((__sm) == BFI_IOC_CFG_DISABLED))
2442 * Check if adapter is disabled -- both IOCs should be in a disabled
2446 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2450 if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2453 ioc_state = readl(ioc->ioc_regs.ioc_fwstate);
2454 if (!bfa_ioc_state_disabled(ioc_state))
2457 if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
2458 ioc_state = readl(ioc->ioc_regs.alt_ioc_fwstate);
2459 if (!bfa_ioc_state_disabled(ioc_state))
2467 * Reset IOC fwstate registers.
2470 bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
2472 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
2473 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
2476 #define BFA_MFG_NAME "Brocade"
2478 bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2479 struct bfa_adapter_attr_s *ad_attr)
2481 struct bfi_ioc_attr_s *ioc_attr;
2483 ioc_attr = ioc->attr;
2485 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2486 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2487 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2488 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2489 memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2490 sizeof(struct bfa_mfg_vpd_s));
2492 ad_attr->nports = bfa_ioc_get_nports(ioc);
2493 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2495 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2496 /* For now, model descr uses same model string */
2497 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2499 ad_attr->card_type = ioc_attr->card_type;
2500 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2502 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2503 ad_attr->prototype = 1;
2505 ad_attr->prototype = 0;
2507 ad_attr->pwwn = ioc->attr->pwwn;
2508 ad_attr->mac = bfa_ioc_get_mac(ioc);
2510 ad_attr->pcie_gen = ioc_attr->pcie_gen;
2511 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2512 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2513 ad_attr->asic_rev = ioc_attr->asic_rev;
2515 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2517 ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
2518 ad_attr->trunk_capable = (ad_attr->nports > 1) &&
2519 !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
2523 bfa_ioc_get_type(struct bfa_ioc_s *ioc)
2525 if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2526 return BFA_IOC_TYPE_LL;
2528 WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC);
2530 return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2531 ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2535 bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
2537 memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2538 memcpy((void *)serial_num,
2539 (void *)ioc->attr->brcd_serialnum,
2540 BFA_ADAPTER_SERIAL_NUM_LEN);
2544 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
2546 memset((void *)fw_ver, 0, BFA_VERSION_LEN);
2547 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2551 bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2555 memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2561 chip_rev[4] = ioc->attr->asic_rev;
2566 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
2568 memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
2569 memcpy(optrom_ver, ioc->attr->optrom_version,
2574 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
2576 memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2577 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2581 bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2583 struct bfi_ioc_attr_s *ioc_attr;
2586 memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2588 ioc_attr = ioc->attr;
2590 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2591 BFA_MFG_NAME, ioc_attr->card_type);
2595 bfa_ioc_get_state(struct bfa_ioc_s *ioc)
2597 enum bfa_iocpf_state iocpf_st;
2598 enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2600 if (ioc_st == BFA_IOC_ENABLING ||
2601 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2603 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2606 case BFA_IOCPF_SEMWAIT:
2607 ioc_st = BFA_IOC_SEMWAIT;
2610 case BFA_IOCPF_HWINIT:
2611 ioc_st = BFA_IOC_HWINIT;
2614 case BFA_IOCPF_FWMISMATCH:
2615 ioc_st = BFA_IOC_FWMISMATCH;
2618 case BFA_IOCPF_FAIL:
2619 ioc_st = BFA_IOC_FAIL;
2622 case BFA_IOCPF_INITFAIL:
2623 ioc_st = BFA_IOC_INITFAIL;
2635 bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2637 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
2639 ioc_attr->state = bfa_ioc_get_state(ioc);
2640 ioc_attr->port_id = ioc->port_id;
2641 ioc_attr->port_mode = ioc->port_mode;
2642 ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2643 ioc_attr->cap_bm = ioc->ad_cap_bm;
2645 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2647 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2649 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2650 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2651 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2655 bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2658 * Check the IOC type and return the appropriate MAC
2660 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
2661 return ioc->attr->fcoe_mac;
2663 return ioc->attr->mac;
2667 bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2671 m = ioc->attr->mfg_mac;
2672 if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
2673 m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2675 bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
2676 bfa_ioc_pcifn(ioc));
2682 * Retrieve saved firmware trace from a prior IOC failure.
2685 bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2689 if (ioc->dbg_fwsave_len == 0)
2690 return BFA_STATUS_ENOFSAVE;
2693 if (tlen > ioc->dbg_fwsave_len)
2694 tlen = ioc->dbg_fwsave_len;
2696 memcpy(trcdata, ioc->dbg_fwsave, tlen);
2698 return BFA_STATUS_OK;
2703 * Retrieve saved firmware trace from a prior IOC failure.
2706 bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2708 u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
2710 bfa_status_t status;
2712 bfa_trc(ioc, *trclen);
2715 if (tlen > BFA_DBG_FWTRC_LEN)
2716 tlen = BFA_DBG_FWTRC_LEN;
2718 status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2724 bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
2726 struct bfa_mbox_cmd_s cmd;
2727 struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
2729 bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
2730 bfa_ioc_portid(ioc));
2731 req->clscode = cpu_to_be16(ioc->clscode);
2732 bfa_ioc_mbox_queue(ioc, &cmd);
2736 bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
2738 u32 fwsync_iter = 1000;
2740 bfa_ioc_send_fwsync(ioc);
2743 * After sending a fw sync mbox command wait for it to
2744 * take effect. We will not wait for a response because
2745 * 1. fw_sync mbox cmd doesn't have a response.
2746 * 2. Even if we implement that, interrupts might not
2747 * be enabled when we call this function.
2748 * So, just keep checking if any mbox cmd is pending, and
2749 * after waiting for a reasonable amount of time, go ahead.
2750 * It is possible that fw has crashed and the mbox command
2751 * is never acknowledged.
2753 while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
2758 * Dump firmware smem
2761 bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
2762 u32 *offset, int *buflen)
2766 bfa_status_t status;
2767 u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
2769 if (*offset >= smem_len) {
2770 *offset = *buflen = 0;
2771 return BFA_STATUS_EINVAL;
2778 * First smem read, sync smem before proceeding
2779 * No need to sync before reading every chunk.
2782 bfa_ioc_fwsync(ioc);
2784 if ((loff + dlen) >= smem_len)
2785 dlen = smem_len - loff;
2787 status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
2789 if (status != BFA_STATUS_OK) {
2790 *offset = *buflen = 0;
2796 if (*offset >= smem_len)
2805 * Firmware statistics
2808 bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
2810 u32 loff = BFI_IOC_FWSTATS_OFF + \
2811 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2813 bfa_status_t status;
2815 if (ioc->stats_busy) {
2816 bfa_trc(ioc, ioc->stats_busy);
2817 return BFA_STATUS_DEVBUSY;
2819 ioc->stats_busy = BFA_TRUE;
2821 tlen = sizeof(struct bfa_fw_stats_s);
2822 status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
2824 ioc->stats_busy = BFA_FALSE;
2829 bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
2831 u32 loff = BFI_IOC_FWSTATS_OFF + \
2832 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2834 bfa_status_t status;
2836 if (ioc->stats_busy) {
2837 bfa_trc(ioc, ioc->stats_busy);
2838 return BFA_STATUS_DEVBUSY;
2840 ioc->stats_busy = BFA_TRUE;
2842 tlen = sizeof(struct bfa_fw_stats_s);
2843 status = bfa_ioc_smem_clr(ioc, loff, tlen);
2845 ioc->stats_busy = BFA_FALSE;
2850 * Save firmware trace if configured.
2853 bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
2857 if (ioc->dbg_fwsave_once) {
2858 ioc->dbg_fwsave_once = BFA_FALSE;
2859 if (ioc->dbg_fwsave_len) {
2860 tlen = ioc->dbg_fwsave_len;
2861 bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2867 * Firmware failure detected. Start recovery actions.
2870 bfa_ioc_recover(struct bfa_ioc_s *ioc)
2872 bfa_ioc_stats(ioc, ioc_hbfails);
2873 ioc->stats.hb_count = ioc->hb_count;
2874 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2878 bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
2880 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2885 * BFA IOC PF private functions
2888 bfa_iocpf_timeout(void *ioc_arg)
2890 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2893 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2897 bfa_iocpf_sem_timeout(void *ioc_arg)
2899 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2901 bfa_ioc_hw_sem_get(ioc);
2905 bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
2907 u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
2909 bfa_trc(ioc, fwstate);
2911 if (fwstate == BFI_IOC_DISABLED) {
2912 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
2916 if (ioc->iocpf.poll_time >= BFA_IOC_TOV)
2917 bfa_iocpf_timeout(ioc);
2919 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
2920 bfa_iocpf_poll_timer_start(ioc);
2925 bfa_iocpf_poll_timeout(void *ioc_arg)
2927 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2929 bfa_ioc_poll_fwinit(ioc);
2933 * bfa timer function
2936 bfa_timer_beat(struct bfa_timer_mod_s *mod)
2938 struct list_head *qh = &mod->timer_q;
2939 struct list_head *qe, *qe_next;
2940 struct bfa_timer_s *elem;
2941 struct list_head timedout_q;
2943 INIT_LIST_HEAD(&timedout_q);
2945 qe = bfa_q_next(qh);
2948 qe_next = bfa_q_next(qe);
2950 elem = (struct bfa_timer_s *) qe;
2951 if (elem->timeout <= BFA_TIMER_FREQ) {
2953 list_del(&elem->qe);
2954 list_add_tail(&elem->qe, &timedout_q);
2956 elem->timeout -= BFA_TIMER_FREQ;
2959 qe = qe_next; /* go to next elem */
2963 * Pop all the timeout entries
2965 while (!list_empty(&timedout_q)) {
2966 bfa_q_deq(&timedout_q, &elem);
2967 elem->timercb(elem->arg);
2972 * Should be called with lock protection
2975 bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
2976 void (*timercb) (void *), void *arg, unsigned int timeout)
2979 WARN_ON(timercb == NULL);
2980 WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
2982 timer->timeout = timeout;
2983 timer->timercb = timercb;
2986 list_add_tail(&timer->qe, &mod->timer_q);
2990 * Should be called with lock protection
2993 bfa_timer_stop(struct bfa_timer_s *timer)
2995 WARN_ON(list_empty(&timer->qe));
2997 list_del(&timer->qe);
3001 * ASIC block related
3004 bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
3006 struct bfa_ablk_cfg_inst_s *cfg_inst;
3011 for (i = 0; i < BFA_ABLK_MAX; i++) {
3012 cfg_inst = &cfg->inst[i];
3013 for (j = 0; j < BFA_ABLK_MAX_PFS; j++) {
3014 be16 = cfg_inst->pf_cfg[j].pers;
3015 cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16);
3016 be16 = cfg_inst->pf_cfg[j].num_qpairs;
3017 cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
3018 be16 = cfg_inst->pf_cfg[j].num_vectors;
3019 cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
3020 be32 = cfg_inst->pf_cfg[j].bw;
3021 cfg_inst->pf_cfg[j].bw = be16_to_cpu(be32);
3027 bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
3029 struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3030 struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg;
3031 bfa_ablk_cbfn_t cbfn;
3033 WARN_ON(msg->mh.msg_class != BFI_MC_ABLK);
3034 bfa_trc(ablk->ioc, msg->mh.msg_id);
3036 switch (msg->mh.msg_id) {
3037 case BFI_ABLK_I2H_QUERY:
3038 if (rsp->status == BFA_STATUS_OK) {
3039 memcpy(ablk->cfg, ablk->dma_addr.kva,
3040 sizeof(struct bfa_ablk_cfg_s));
3041 bfa_ablk_config_swap(ablk->cfg);
3046 case BFI_ABLK_I2H_ADPT_CONFIG:
3047 case BFI_ABLK_I2H_PORT_CONFIG:
3048 /* update config port mode */
3049 ablk->ioc->port_mode_cfg = rsp->port_mode;
3051 case BFI_ABLK_I2H_PF_DELETE:
3052 case BFI_ABLK_I2H_PF_UPDATE:
3053 case BFI_ABLK_I2H_OPTROM_ENABLE:
3054 case BFI_ABLK_I2H_OPTROM_DISABLE:
3058 case BFI_ABLK_I2H_PF_CREATE:
3059 *(ablk->pcifn) = rsp->pcifn;
3067 ablk->busy = BFA_FALSE;
3071 cbfn(ablk->cbarg, rsp->status);
3076 bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event)
3078 struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3080 bfa_trc(ablk->ioc, event);
3083 case BFA_IOC_E_ENABLED:
3084 WARN_ON(ablk->busy != BFA_FALSE);
3087 case BFA_IOC_E_DISABLED:
3088 case BFA_IOC_E_FAILED:
3089 /* Fail any pending requests */
3093 ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED);
3095 ablk->busy = BFA_FALSE;
3106 bfa_ablk_meminfo(void)
3108 return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ);
3112 bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa)
3114 ablk->dma_addr.kva = dma_kva;
3115 ablk->dma_addr.pa = dma_pa;
3119 bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc)
3123 bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk);
3124 bfa_q_qe_init(&ablk->ioc_notify);
3125 bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk);
3126 list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q);
3130 bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
3131 bfa_ablk_cbfn_t cbfn, void *cbarg)
3133 struct bfi_ablk_h2i_query_s *m;
3137 if (!bfa_ioc_is_operational(ablk->ioc)) {
3138 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3139 return BFA_STATUS_IOC_FAILURE;
3143 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3144 return BFA_STATUS_DEVBUSY;
3147 ablk->cfg = ablk_cfg;
3149 ablk->cbarg = cbarg;
3150 ablk->busy = BFA_TRUE;
3152 m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg;
3153 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY,
3154 bfa_ioc_portid(ablk->ioc));
3155 bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa);
3156 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3158 return BFA_STATUS_OK;
3162 bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
3163 u8 port, enum bfi_pcifn_class personality, int bw,
3164 bfa_ablk_cbfn_t cbfn, void *cbarg)
3166 struct bfi_ablk_h2i_pf_req_s *m;
3168 if (!bfa_ioc_is_operational(ablk->ioc)) {
3169 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3170 return BFA_STATUS_IOC_FAILURE;
3174 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3175 return BFA_STATUS_DEVBUSY;
3178 ablk->pcifn = pcifn;
3180 ablk->cbarg = cbarg;
3181 ablk->busy = BFA_TRUE;
3183 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3184 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
3185 bfa_ioc_portid(ablk->ioc));
3186 m->pers = cpu_to_be16((u16)personality);
3187 m->bw = cpu_to_be32(bw);
3189 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3191 return BFA_STATUS_OK;
3195 bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
3196 bfa_ablk_cbfn_t cbfn, void *cbarg)
3198 struct bfi_ablk_h2i_pf_req_s *m;
3200 if (!bfa_ioc_is_operational(ablk->ioc)) {
3201 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3202 return BFA_STATUS_IOC_FAILURE;
3206 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3207 return BFA_STATUS_DEVBUSY;
3211 ablk->cbarg = cbarg;
3212 ablk->busy = BFA_TRUE;
3214 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3215 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE,
3216 bfa_ioc_portid(ablk->ioc));
3217 m->pcifn = (u8)pcifn;
3218 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3220 return BFA_STATUS_OK;
3224 bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode,
3225 int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3227 struct bfi_ablk_h2i_cfg_req_s *m;
3229 if (!bfa_ioc_is_operational(ablk->ioc)) {
3230 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3231 return BFA_STATUS_IOC_FAILURE;
3235 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3236 return BFA_STATUS_DEVBUSY;
3240 ablk->cbarg = cbarg;
3241 ablk->busy = BFA_TRUE;
3243 m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3244 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG,
3245 bfa_ioc_portid(ablk->ioc));
3247 m->max_pf = (u8)max_pf;
3248 m->max_vf = (u8)max_vf;
3249 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3251 return BFA_STATUS_OK;
3255 bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
3256 int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3258 struct bfi_ablk_h2i_cfg_req_s *m;
3260 if (!bfa_ioc_is_operational(ablk->ioc)) {
3261 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3262 return BFA_STATUS_IOC_FAILURE;
3266 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3267 return BFA_STATUS_DEVBUSY;
3271 ablk->cbarg = cbarg;
3272 ablk->busy = BFA_TRUE;
3274 m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3275 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG,
3276 bfa_ioc_portid(ablk->ioc));
3279 m->max_pf = (u8)max_pf;
3280 m->max_vf = (u8)max_vf;
3281 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3283 return BFA_STATUS_OK;
3287 bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw,
3288 bfa_ablk_cbfn_t cbfn, void *cbarg)
3290 struct bfi_ablk_h2i_pf_req_s *m;
3292 if (!bfa_ioc_is_operational(ablk->ioc)) {
3293 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3294 return BFA_STATUS_IOC_FAILURE;
3298 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3299 return BFA_STATUS_DEVBUSY;
3303 ablk->cbarg = cbarg;
3304 ablk->busy = BFA_TRUE;
3306 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3307 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
3308 bfa_ioc_portid(ablk->ioc));
3309 m->pcifn = (u8)pcifn;
3310 m->bw = cpu_to_be32(bw);
3311 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3313 return BFA_STATUS_OK;
3317 bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3319 struct bfi_ablk_h2i_optrom_s *m;
3321 if (!bfa_ioc_is_operational(ablk->ioc)) {
3322 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3323 return BFA_STATUS_IOC_FAILURE;
3327 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3328 return BFA_STATUS_DEVBUSY;
3332 ablk->cbarg = cbarg;
3333 ablk->busy = BFA_TRUE;
3335 m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3336 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE,
3337 bfa_ioc_portid(ablk->ioc));
3338 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3340 return BFA_STATUS_OK;
3344 bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3346 struct bfi_ablk_h2i_optrom_s *m;
3348 if (!bfa_ioc_is_operational(ablk->ioc)) {
3349 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3350 return BFA_STATUS_IOC_FAILURE;
3354 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3355 return BFA_STATUS_DEVBUSY;
3359 ablk->cbarg = cbarg;
3360 ablk->busy = BFA_TRUE;
3362 m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3363 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE,
3364 bfa_ioc_portid(ablk->ioc));
3365 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3367 return BFA_STATUS_OK;
3371 * SFP module specific
3374 /* forward declarations */
3375 static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp);
3376 static void bfa_sfp_media_get(struct bfa_sfp_s *sfp);
3377 static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp,
3378 enum bfa_port_speed portspeed);
3381 bfa_cb_sfp_show(struct bfa_sfp_s *sfp)
3383 bfa_trc(sfp, sfp->lock);
3385 sfp->cbfn(sfp->cbarg, sfp->status);
3391 bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
3393 bfa_trc(sfp, sfp->portspeed);
3395 bfa_sfp_media_get(sfp);
3396 if (sfp->state_query_cbfn)
3397 sfp->state_query_cbfn(sfp->state_query_cbarg,
3402 if (sfp->portspeed) {
3403 sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
3404 if (sfp->state_query_cbfn)
3405 sfp->state_query_cbfn(sfp->state_query_cbarg,
3407 sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3410 sfp->state_query_lock = 0;
3411 sfp->state_query_cbfn = NULL;
3415 * IOC event handler.
3418 bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
3420 struct bfa_sfp_s *sfp = sfp_arg;
3422 bfa_trc(sfp, event);
3423 bfa_trc(sfp, sfp->lock);
3424 bfa_trc(sfp, sfp->state_query_lock);
3427 case BFA_IOC_E_DISABLED:
3428 case BFA_IOC_E_FAILED:
3430 sfp->status = BFA_STATUS_IOC_FAILURE;
3431 bfa_cb_sfp_show(sfp);
3434 if (sfp->state_query_lock) {
3435 sfp->status = BFA_STATUS_IOC_FAILURE;
3436 bfa_cb_sfp_state_query(sfp);
3449 bfa_sfp_getdata_send(struct bfa_sfp_s *sfp)
3451 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3453 bfa_trc(sfp, req->memtype);
3455 /* build host command */
3456 bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW,
3457 bfa_ioc_portid(sfp->ioc));
3460 bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd);
3464 * SFP is valid, read sfp data
3467 bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
3469 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3471 WARN_ON(sfp->lock != 0);
3472 bfa_trc(sfp, sfp->state);
3475 sfp->memtype = memtype;
3476 req->memtype = memtype;
3479 bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa);
3481 bfa_sfp_getdata_send(sfp);
3488 bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3490 struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg;
3494 * receiving response after ioc failure
3496 bfa_trc(sfp, sfp->lock);
3500 bfa_trc(sfp, rsp->status);
3501 if (rsp->status == BFA_STATUS_OK) {
3502 sfp->data_valid = 1;
3503 if (sfp->state == BFA_SFP_STATE_VALID)
3504 sfp->status = BFA_STATUS_OK;
3505 else if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3506 sfp->status = BFA_STATUS_SFP_UNSUPP;
3508 bfa_trc(sfp, sfp->state);
3510 sfp->data_valid = 0;
3511 sfp->status = rsp->status;
3512 /* sfpshow shouldn't change sfp state */
3515 bfa_trc(sfp, sfp->memtype);
3516 if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) {
3517 bfa_trc(sfp, sfp->data_valid);
3518 if (sfp->data_valid) {
3519 u32 size = sizeof(struct sfp_mem_s);
3520 u8 *des = (u8 *) &(sfp->sfpmem->srlid_base);
3521 memcpy(des, sfp->dbuf_kva, size);
3524 * Queue completion callback.
3526 bfa_cb_sfp_show(sfp);
3530 bfa_trc(sfp, sfp->state_query_lock);
3531 if (sfp->state_query_lock) {
3532 sfp->state = rsp->state;
3533 /* Complete callback */
3534 bfa_cb_sfp_state_query(sfp);
3539 * SFP query fw sfp state
3542 bfa_sfp_state_query(struct bfa_sfp_s *sfp)
3544 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3546 /* Should not be doing query if not in _INIT state */
3547 WARN_ON(sfp->state != BFA_SFP_STATE_INIT);
3548 WARN_ON(sfp->state_query_lock != 0);
3549 bfa_trc(sfp, sfp->state);
3551 sfp->state_query_lock = 1;
3555 bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3559 bfa_sfp_media_get(struct bfa_sfp_s *sfp)
3561 enum bfa_defs_sfp_media_e *media = sfp->media;
3563 *media = BFA_SFP_MEDIA_UNKNOWN;
3565 if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3566 *media = BFA_SFP_MEDIA_UNSUPPORT;
3567 else if (sfp->state == BFA_SFP_STATE_VALID) {
3568 union sfp_xcvr_e10g_code_u e10g;
3569 struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3570 u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 |
3571 (sfpmem->srlid_base.xcvr[5] >> 1);
3573 e10g.b = sfpmem->srlid_base.xcvr[0];
3574 bfa_trc(sfp, e10g.b);
3575 bfa_trc(sfp, xmtr_tech);
3576 /* check fc transmitter tech */
3577 if ((xmtr_tech & SFP_XMTR_TECH_CU) ||
3578 (xmtr_tech & SFP_XMTR_TECH_CP) ||
3579 (xmtr_tech & SFP_XMTR_TECH_CA))
3580 *media = BFA_SFP_MEDIA_CU;
3581 else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) ||
3582 (xmtr_tech & SFP_XMTR_TECH_EL_INTER))
3583 *media = BFA_SFP_MEDIA_EL;
3584 else if ((xmtr_tech & SFP_XMTR_TECH_LL) ||
3585 (xmtr_tech & SFP_XMTR_TECH_LC))
3586 *media = BFA_SFP_MEDIA_LW;
3587 else if ((xmtr_tech & SFP_XMTR_TECH_SL) ||
3588 (xmtr_tech & SFP_XMTR_TECH_SN) ||
3589 (xmtr_tech & SFP_XMTR_TECH_SA))
3590 *media = BFA_SFP_MEDIA_SW;
3591 /* Check 10G Ethernet Compilance code */
3592 else if (e10g.b & 0x10)
3593 *media = BFA_SFP_MEDIA_SW;
3594 else if (e10g.b & 0x60)
3595 *media = BFA_SFP_MEDIA_LW;
3596 else if (e10g.r.e10g_unall & 0x80)
3597 *media = BFA_SFP_MEDIA_UNKNOWN;
3601 bfa_trc(sfp, sfp->state);
3605 bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed)
3607 struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3608 struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr;
3609 union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3;
3610 union sfp_xcvr_e10g_code_u e10g = xcvr->e10g;
3612 if (portspeed == BFA_PORT_SPEED_10GBPS) {
3613 if (e10g.r.e10g_sr || e10g.r.e10g_lr)
3614 return BFA_STATUS_OK;
3616 bfa_trc(sfp, e10g.b);
3617 return BFA_STATUS_UNSUPP_SPEED;
3620 if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) ||
3621 ((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) ||
3622 ((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) ||
3623 ((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) ||
3624 ((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100))
3625 return BFA_STATUS_OK;
3627 bfa_trc(sfp, portspeed);
3628 bfa_trc(sfp, fc3.b);
3629 bfa_trc(sfp, e10g.b);
3630 return BFA_STATUS_UNSUPP_SPEED;
3638 bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
3640 struct bfa_sfp_s *sfp = sfparg;
3642 switch (msg->mh.msg_id) {
3643 case BFI_SFP_I2H_SHOW:
3644 bfa_sfp_show_comp(sfp, msg);
3647 case BFI_SFP_I2H_SCN:
3648 bfa_trc(sfp, msg->mh.msg_id);
3652 bfa_trc(sfp, msg->mh.msg_id);
3658 * Return DMA memory needed by sfp module.
3661 bfa_sfp_meminfo(void)
3663 return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3667 * Attach virtual and physical memory for SFP.
3670 bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev,
3671 struct bfa_trc_mod_s *trcmod)
3675 sfp->trcmod = trcmod;
3681 sfp->data_valid = 0;
3682 sfp->state = BFA_SFP_STATE_INIT;
3683 sfp->state_query_lock = 0;
3684 sfp->state_query_cbfn = NULL;
3685 sfp->state_query_cbarg = NULL;
3687 sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3688 sfp->is_elb = BFA_FALSE;
3690 bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp);
3691 bfa_q_qe_init(&sfp->ioc_notify);
3692 bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp);
3693 list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q);
3697 * Claim Memory for SFP
3700 bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa)
3702 sfp->dbuf_kva = dm_kva;
3703 sfp->dbuf_pa = dm_pa;
3704 memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s));
3706 dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3707 dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3711 * Show SFP eeprom content
3713 * @param[in] sfp - bfa sfp module
3715 * @param[out] sfpmem - sfp eeprom data
3719 bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
3720 bfa_cb_sfp_t cbfn, void *cbarg)
3723 if (!bfa_ioc_is_operational(sfp->ioc)) {
3725 return BFA_STATUS_IOC_NON_OP;
3730 return BFA_STATUS_DEVBUSY;
3735 sfp->sfpmem = sfpmem;
3737 bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT);
3738 return BFA_STATUS_OK;
3742 * Return SFP Media type
3744 * @param[in] sfp - bfa sfp module
3746 * @param[out] media - port speed from user
3750 bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media,
3751 bfa_cb_sfp_t cbfn, void *cbarg)
3753 if (!bfa_ioc_is_operational(sfp->ioc)) {
3755 return BFA_STATUS_IOC_NON_OP;
3759 if (sfp->state == BFA_SFP_STATE_INIT) {
3760 if (sfp->state_query_lock) {
3762 return BFA_STATUS_DEVBUSY;
3764 sfp->state_query_cbfn = cbfn;
3765 sfp->state_query_cbarg = cbarg;
3766 bfa_sfp_state_query(sfp);
3767 return BFA_STATUS_SFP_NOT_READY;
3771 bfa_sfp_media_get(sfp);
3772 return BFA_STATUS_OK;
3776 * Check if user set port speed is allowed by the SFP
3778 * @param[in] sfp - bfa sfp module
3779 * @param[in] portspeed - port speed from user
3783 bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
3784 bfa_cb_sfp_t cbfn, void *cbarg)
3786 WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN);
3788 if (!bfa_ioc_is_operational(sfp->ioc))
3789 return BFA_STATUS_IOC_NON_OP;
3791 /* For Mezz card, all speed is allowed */
3792 if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type))
3793 return BFA_STATUS_OK;
3795 /* Check SFP state */
3796 sfp->portspeed = portspeed;
3797 if (sfp->state == BFA_SFP_STATE_INIT) {
3798 if (sfp->state_query_lock) {
3800 return BFA_STATUS_DEVBUSY;
3802 sfp->state_query_cbfn = cbfn;
3803 sfp->state_query_cbarg = cbarg;
3804 bfa_sfp_state_query(sfp);
3805 return BFA_STATUS_SFP_NOT_READY;
3809 if (sfp->state == BFA_SFP_STATE_REMOVED ||
3810 sfp->state == BFA_SFP_STATE_FAILED) {
3811 bfa_trc(sfp, sfp->state);
3812 return BFA_STATUS_NO_SFP_DEV;
3815 if (sfp->state == BFA_SFP_STATE_INSERTED) {
3816 bfa_trc(sfp, sfp->state);
3817 return BFA_STATUS_DEVBUSY; /* sfp is reading data */
3820 /* For eloopback, all speed is allowed */
3822 return BFA_STATUS_OK;
3824 return bfa_sfp_speed_valid(sfp, portspeed);
3828 * Flash module specific
3832 * FLASH DMA buffer should be big enough to hold both MFG block and
3833 * asic block(64k) at the same time and also should be 2k aligned to
3834 * avoid write segement to cross sector boundary.
3836 #define BFA_FLASH_SEG_SZ 2048
3837 #define BFA_FLASH_DMA_BUF_SZ \
3838 BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
3841 bfa_flash_cb(struct bfa_flash_s *flash)
3845 flash->cbfn(flash->cbarg, flash->status);
3849 bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event)
3851 struct bfa_flash_s *flash = cbarg;
3853 bfa_trc(flash, event);
3855 case BFA_IOC_E_DISABLED:
3856 case BFA_IOC_E_FAILED:
3857 if (flash->op_busy) {
3858 flash->status = BFA_STATUS_IOC_FAILURE;
3859 flash->cbfn(flash->cbarg, flash->status);
3870 * Send flash attribute query request.
3872 * @param[in] cbarg - callback argument
3875 bfa_flash_query_send(void *cbarg)
3877 struct bfa_flash_s *flash = cbarg;
3878 struct bfi_flash_query_req_s *msg =
3879 (struct bfi_flash_query_req_s *) flash->mb.msg;
3881 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
3882 bfa_ioc_portid(flash->ioc));
3883 bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s),
3885 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
3889 * Send flash write request.
3891 * @param[in] cbarg - callback argument
3894 bfa_flash_write_send(struct bfa_flash_s *flash)
3896 struct bfi_flash_write_req_s *msg =
3897 (struct bfi_flash_write_req_s *) flash->mb.msg;
3900 msg->type = be32_to_cpu(flash->type);
3901 msg->instance = flash->instance;
3902 msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
3903 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
3904 flash->residue : BFA_FLASH_DMA_BUF_SZ;
3905 msg->length = be32_to_cpu(len);
3907 /* indicate if it's the last msg of the whole write operation */
3908 msg->last = (len == flash->residue) ? 1 : 0;
3910 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
3911 bfa_ioc_portid(flash->ioc));
3912 bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
3913 memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
3914 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
3916 flash->residue -= len;
3917 flash->offset += len;
3921 * Send flash read request.
3923 * @param[in] cbarg - callback argument
3926 bfa_flash_read_send(void *cbarg)
3928 struct bfa_flash_s *flash = cbarg;
3929 struct bfi_flash_read_req_s *msg =
3930 (struct bfi_flash_read_req_s *) flash->mb.msg;
3933 msg->type = be32_to_cpu(flash->type);
3934 msg->instance = flash->instance;
3935 msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
3936 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
3937 flash->residue : BFA_FLASH_DMA_BUF_SZ;
3938 msg->length = be32_to_cpu(len);
3939 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
3940 bfa_ioc_portid(flash->ioc));
3941 bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
3942 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
3946 * Send flash erase request.
3948 * @param[in] cbarg - callback argument
3951 bfa_flash_erase_send(void *cbarg)
3953 struct bfa_flash_s *flash = cbarg;
3954 struct bfi_flash_erase_req_s *msg =
3955 (struct bfi_flash_erase_req_s *) flash->mb.msg;
3957 msg->type = be32_to_cpu(flash->type);
3958 msg->instance = flash->instance;
3959 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ,
3960 bfa_ioc_portid(flash->ioc));
3961 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
3965 * Process flash response messages upon receiving interrupts.
3967 * @param[in] flasharg - flash structure
3968 * @param[in] msg - message structure
3971 bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
3973 struct bfa_flash_s *flash = flasharg;
3977 struct bfi_flash_query_rsp_s *query;
3978 struct bfi_flash_erase_rsp_s *erase;
3979 struct bfi_flash_write_rsp_s *write;
3980 struct bfi_flash_read_rsp_s *read;
3981 struct bfi_mbmsg_s *msg;
3985 bfa_trc(flash, msg->mh.msg_id);
3987 if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) {
3988 /* receiving response after ioc failure */
3989 bfa_trc(flash, 0x9999);
3993 switch (msg->mh.msg_id) {
3994 case BFI_FLASH_I2H_QUERY_RSP:
3995 status = be32_to_cpu(m.query->status);
3996 bfa_trc(flash, status);
3997 if (status == BFA_STATUS_OK) {
3999 struct bfa_flash_attr_s *attr, *f;
4001 attr = (struct bfa_flash_attr_s *) flash->ubuf;
4002 f = (struct bfa_flash_attr_s *) flash->dbuf_kva;
4003 attr->status = be32_to_cpu(f->status);
4004 attr->npart = be32_to_cpu(f->npart);
4005 bfa_trc(flash, attr->status);
4006 bfa_trc(flash, attr->npart);
4007 for (i = 0; i < attr->npart; i++) {
4008 attr->part[i].part_type =
4009 be32_to_cpu(f->part[i].part_type);
4010 attr->part[i].part_instance =
4011 be32_to_cpu(f->part[i].part_instance);
4012 attr->part[i].part_off =
4013 be32_to_cpu(f->part[i].part_off);
4014 attr->part[i].part_size =
4015 be32_to_cpu(f->part[i].part_size);
4016 attr->part[i].part_len =
4017 be32_to_cpu(f->part[i].part_len);
4018 attr->part[i].part_status =
4019 be32_to_cpu(f->part[i].part_status);
4022 flash->status = status;
4023 bfa_flash_cb(flash);
4025 case BFI_FLASH_I2H_ERASE_RSP:
4026 status = be32_to_cpu(m.erase->status);
4027 bfa_trc(flash, status);
4028 flash->status = status;
4029 bfa_flash_cb(flash);
4031 case BFI_FLASH_I2H_WRITE_RSP:
4032 status = be32_to_cpu(m.write->status);
4033 bfa_trc(flash, status);
4034 if (status != BFA_STATUS_OK || flash->residue == 0) {
4035 flash->status = status;
4036 bfa_flash_cb(flash);
4038 bfa_trc(flash, flash->offset);
4039 bfa_flash_write_send(flash);
4042 case BFI_FLASH_I2H_READ_RSP:
4043 status = be32_to_cpu(m.read->status);
4044 bfa_trc(flash, status);
4045 if (status != BFA_STATUS_OK) {
4046 flash->status = status;
4047 bfa_flash_cb(flash);
4049 u32 len = be32_to_cpu(m.read->length);
4050 bfa_trc(flash, flash->offset);
4051 bfa_trc(flash, len);
4052 memcpy(flash->ubuf + flash->offset,
4053 flash->dbuf_kva, len);
4054 flash->residue -= len;
4055 flash->offset += len;
4056 if (flash->residue == 0) {
4057 flash->status = status;
4058 bfa_flash_cb(flash);
4060 bfa_flash_read_send(flash);
4063 case BFI_FLASH_I2H_BOOT_VER_RSP:
4064 case BFI_FLASH_I2H_EVENT:
4065 bfa_trc(flash, msg->mh.msg_id);
4074 * Flash memory info API.
4076 * @param[in] mincfg - minimal cfg variable
4079 bfa_flash_meminfo(bfa_boolean_t mincfg)
4081 /* min driver doesn't need flash */
4084 return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4090 * @param[in] flash - flash structure
4091 * @param[in] ioc - ioc structure
4092 * @param[in] dev - device structure
4093 * @param[in] trcmod - trace module
4094 * @param[in] logmod - log module
4097 bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev,
4098 struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
4101 flash->trcmod = trcmod;
4103 flash->cbarg = NULL;
4106 bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
4107 bfa_q_qe_init(&flash->ioc_notify);
4108 bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
4109 list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
4111 /* min driver doesn't need flash */
4113 flash->dbuf_kva = NULL;
4119 * Claim memory for flash
4121 * @param[in] flash - flash structure
4122 * @param[in] dm_kva - pointer to virtual memory address
4123 * @param[in] dm_pa - physical memory address
4124 * @param[in] mincfg - minimal cfg variable
4127 bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa,
4128 bfa_boolean_t mincfg)
4133 flash->dbuf_kva = dm_kva;
4134 flash->dbuf_pa = dm_pa;
4135 memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
4136 dm_kva += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4137 dm_pa += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4141 * Get flash attribute.
4143 * @param[in] flash - flash structure
4144 * @param[in] attr - flash attribute structure
4145 * @param[in] cbfn - callback function
4146 * @param[in] cbarg - callback argument
4151 bfa_flash_get_attr(struct bfa_flash_s *flash, struct bfa_flash_attr_s *attr,
4152 bfa_cb_flash_t cbfn, void *cbarg)
4154 bfa_trc(flash, BFI_FLASH_H2I_QUERY_REQ);
4156 if (!bfa_ioc_is_operational(flash->ioc))
4157 return BFA_STATUS_IOC_NON_OP;
4159 if (flash->op_busy) {
4160 bfa_trc(flash, flash->op_busy);
4161 return BFA_STATUS_DEVBUSY;
4166 flash->cbarg = cbarg;
4167 flash->ubuf = (u8 *) attr;
4168 bfa_flash_query_send(flash);
4170 return BFA_STATUS_OK;
4174 * Erase flash partition.
4176 * @param[in] flash - flash structure
4177 * @param[in] type - flash partition type
4178 * @param[in] instance - flash partition instance
4179 * @param[in] cbfn - callback function
4180 * @param[in] cbarg - callback argument
4185 bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4186 u8 instance, bfa_cb_flash_t cbfn, void *cbarg)
4188 bfa_trc(flash, BFI_FLASH_H2I_ERASE_REQ);
4189 bfa_trc(flash, type);
4190 bfa_trc(flash, instance);
4192 if (!bfa_ioc_is_operational(flash->ioc))
4193 return BFA_STATUS_IOC_NON_OP;
4195 if (flash->op_busy) {
4196 bfa_trc(flash, flash->op_busy);
4197 return BFA_STATUS_DEVBUSY;
4202 flash->cbarg = cbarg;
4204 flash->instance = instance;
4206 bfa_flash_erase_send(flash);
4207 return BFA_STATUS_OK;
4211 * Update flash partition.
4213 * @param[in] flash - flash structure
4214 * @param[in] type - flash partition type
4215 * @param[in] instance - flash partition instance
4216 * @param[in] buf - update data buffer
4217 * @param[in] len - data buffer length
4218 * @param[in] offset - offset relative to the partition starting address
4219 * @param[in] cbfn - callback function
4220 * @param[in] cbarg - callback argument
4225 bfa_flash_update_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4226 u8 instance, void *buf, u32 len, u32 offset,
4227 bfa_cb_flash_t cbfn, void *cbarg)
4229 bfa_trc(flash, BFI_FLASH_H2I_WRITE_REQ);
4230 bfa_trc(flash, type);
4231 bfa_trc(flash, instance);
4232 bfa_trc(flash, len);
4233 bfa_trc(flash, offset);
4235 if (!bfa_ioc_is_operational(flash->ioc))
4236 return BFA_STATUS_IOC_NON_OP;
4239 * 'len' must be in word (4-byte) boundary
4240 * 'offset' must be in sector (16kb) boundary
4242 if (!len || (len & 0x03) || (offset & 0x00003FFF))
4243 return BFA_STATUS_FLASH_BAD_LEN;
4245 if (type == BFA_FLASH_PART_MFG)
4246 return BFA_STATUS_EINVAL;
4248 if (flash->op_busy) {
4249 bfa_trc(flash, flash->op_busy);
4250 return BFA_STATUS_DEVBUSY;
4255 flash->cbarg = cbarg;
4257 flash->instance = instance;
4258 flash->residue = len;
4260 flash->addr_off = offset;
4263 bfa_flash_write_send(flash);
4264 return BFA_STATUS_OK;
4268 * Read flash partition.
4270 * @param[in] flash - flash structure
4271 * @param[in] type - flash partition type
4272 * @param[in] instance - flash partition instance
4273 * @param[in] buf - read data buffer
4274 * @param[in] len - data buffer length
4275 * @param[in] offset - offset relative to the partition starting address
4276 * @param[in] cbfn - callback function
4277 * @param[in] cbarg - callback argument
4282 bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4283 u8 instance, void *buf, u32 len, u32 offset,
4284 bfa_cb_flash_t cbfn, void *cbarg)
4286 bfa_trc(flash, BFI_FLASH_H2I_READ_REQ);
4287 bfa_trc(flash, type);
4288 bfa_trc(flash, instance);
4289 bfa_trc(flash, len);
4290 bfa_trc(flash, offset);
4292 if (!bfa_ioc_is_operational(flash->ioc))
4293 return BFA_STATUS_IOC_NON_OP;
4296 * 'len' must be in word (4-byte) boundary
4297 * 'offset' must be in sector (16kb) boundary
4299 if (!len || (len & 0x03) || (offset & 0x00003FFF))
4300 return BFA_STATUS_FLASH_BAD_LEN;
4302 if (flash->op_busy) {
4303 bfa_trc(flash, flash->op_busy);
4304 return BFA_STATUS_DEVBUSY;
4309 flash->cbarg = cbarg;
4311 flash->instance = instance;
4312 flash->residue = len;
4314 flash->addr_off = offset;
4316 bfa_flash_read_send(flash);
4318 return BFA_STATUS_OK;
4322 * DIAG module specific
4325 #define BFA_DIAG_MEMTEST_TOV 50000 /* memtest timeout in msec */
4326 #define BFA_DIAG_FWPING_TOV 1000 /* msec */
4328 /* IOC event handler */
4330 bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event)
4332 struct bfa_diag_s *diag = diag_arg;
4334 bfa_trc(diag, event);
4335 bfa_trc(diag, diag->block);
4336 bfa_trc(diag, diag->fwping.lock);
4337 bfa_trc(diag, diag->tsensor.lock);
4340 case BFA_IOC_E_DISABLED:
4341 case BFA_IOC_E_FAILED:
4342 if (diag->fwping.lock) {
4343 diag->fwping.status = BFA_STATUS_IOC_FAILURE;
4344 diag->fwping.cbfn(diag->fwping.cbarg,
4345 diag->fwping.status);
4346 diag->fwping.lock = 0;
4349 if (diag->tsensor.lock) {
4350 diag->tsensor.status = BFA_STATUS_IOC_FAILURE;
4351 diag->tsensor.cbfn(diag->tsensor.cbarg,
4352 diag->tsensor.status);
4353 diag->tsensor.lock = 0;
4357 if (diag->timer_active) {
4358 bfa_timer_stop(&diag->timer);
4359 diag->timer_active = 0;
4362 diag->status = BFA_STATUS_IOC_FAILURE;
4363 diag->cbfn(diag->cbarg, diag->status);
4374 bfa_diag_memtest_done(void *cbarg)
4376 struct bfa_diag_s *diag = cbarg;
4377 struct bfa_ioc_s *ioc = diag->ioc;
4378 struct bfa_diag_memtest_result *res = diag->result;
4379 u32 loff = BFI_BOOT_MEMTEST_RES_ADDR;
4380 u32 pgnum, pgoff, i;
4382 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
4383 pgoff = PSS_SMEM_PGOFF(loff);
4385 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
4387 for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) /
4388 sizeof(u32)); i++) {
4389 /* read test result from smem */
4390 *((u32 *) res + i) =
4391 bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
4392 loff += sizeof(u32);
4395 /* Reset IOC fwstates to BFI_IOC_UNINIT */
4396 bfa_ioc_reset_fwstate(ioc);
4398 res->status = swab32(res->status);
4399 bfa_trc(diag, res->status);
4401 if (res->status == BFI_BOOT_MEMTEST_RES_SIG)
4402 diag->status = BFA_STATUS_OK;
4404 diag->status = BFA_STATUS_MEMTEST_FAILED;
4405 res->addr = swab32(res->addr);
4406 res->exp = swab32(res->exp);
4407 res->act = swab32(res->act);
4408 res->err_status = swab32(res->err_status);
4409 res->err_status1 = swab32(res->err_status1);
4410 res->err_addr = swab32(res->err_addr);
4411 bfa_trc(diag, res->addr);
4412 bfa_trc(diag, res->exp);
4413 bfa_trc(diag, res->act);
4414 bfa_trc(diag, res->err_status);
4415 bfa_trc(diag, res->err_status1);
4416 bfa_trc(diag, res->err_addr);
4418 diag->timer_active = 0;
4419 diag->cbfn(diag->cbarg, diag->status);
4428 * Perform DMA test directly
4431 diag_fwping_send(struct bfa_diag_s *diag)
4433 struct bfi_diag_fwping_req_s *fwping_req;
4436 bfa_trc(diag, diag->fwping.dbuf_pa);
4438 /* fill DMA area with pattern */
4439 for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++)
4440 *((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data;
4443 fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg;
4446 bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ,
4447 diag->fwping.dbuf_pa);
4448 /* Set up dma count */
4449 fwping_req->count = cpu_to_be32(diag->fwping.count);
4450 /* Set up data pattern */
4451 fwping_req->data = diag->fwping.data;
4453 /* build host command */
4454 bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING,
4455 bfa_ioc_portid(diag->ioc));
4458 bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd);
4462 diag_fwping_comp(struct bfa_diag_s *diag,
4463 struct bfi_diag_fwping_rsp_s *diag_rsp)
4465 u32 rsp_data = diag_rsp->data;
4466 u8 rsp_dma_status = diag_rsp->dma_status;
4468 bfa_trc(diag, rsp_data);
4469 bfa_trc(diag, rsp_dma_status);
4471 if (rsp_dma_status == BFA_STATUS_OK) {
4473 pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) :
4475 /* Check mbox data */
4476 if (diag->fwping.data != rsp_data) {
4477 bfa_trc(diag, rsp_data);
4478 diag->fwping.result->dmastatus =
4479 BFA_STATUS_DATACORRUPTED;
4480 diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4481 diag->fwping.cbfn(diag->fwping.cbarg,
4482 diag->fwping.status);
4483 diag->fwping.lock = 0;
4486 /* Check dma pattern */
4487 for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) {
4488 if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) {
4492 *((u32 *)diag->fwping.dbuf_kva + i));
4493 diag->fwping.result->dmastatus =
4494 BFA_STATUS_DATACORRUPTED;
4495 diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4496 diag->fwping.cbfn(diag->fwping.cbarg,
4497 diag->fwping.status);
4498 diag->fwping.lock = 0;
4502 diag->fwping.result->dmastatus = BFA_STATUS_OK;
4503 diag->fwping.status = BFA_STATUS_OK;
4504 diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4505 diag->fwping.lock = 0;
4507 diag->fwping.status = BFA_STATUS_HDMA_FAILED;
4508 diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4509 diag->fwping.lock = 0;
4514 * Temperature Sensor
4518 diag_tempsensor_send(struct bfa_diag_s *diag)
4520 struct bfi_diag_ts_req_s *msg;
4522 msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg;
4523 bfa_trc(diag, msg->temp);
4524 /* build host command */
4525 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR,
4526 bfa_ioc_portid(diag->ioc));
4528 bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd);
4532 diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
4534 if (!diag->tsensor.lock) {
4535 /* receiving response after ioc failure */
4536 bfa_trc(diag, diag->tsensor.lock);
4541 * ASIC junction tempsensor is a reg read operation
4542 * it will always return OK
4544 diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
4545 diag->tsensor.temp->ts_junc = rsp->ts_junc;
4546 diag->tsensor.temp->ts_brd = rsp->ts_brd;
4547 diag->tsensor.temp->status = BFA_STATUS_OK;
4550 if (rsp->status == BFA_STATUS_OK) {
4551 diag->tsensor.temp->brd_temp =
4552 be16_to_cpu(rsp->brd_temp);
4554 bfa_trc(diag, rsp->status);
4555 diag->tsensor.temp->brd_temp = 0;
4556 diag->tsensor.temp->status = BFA_STATUS_DEVBUSY;
4559 bfa_trc(diag, rsp->ts_junc);
4560 bfa_trc(diag, rsp->temp);
4561 bfa_trc(diag, rsp->ts_brd);
4562 bfa_trc(diag, rsp->brd_temp);
4563 diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
4564 diag->tsensor.lock = 0;
4571 diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4573 struct bfi_diag_ledtest_req_s *msg;
4575 msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg;
4576 /* build host command */
4577 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST,
4578 bfa_ioc_portid(diag->ioc));
4581 * convert the freq from N blinks per 10 sec to
4582 * crossbow ontime value. We do it here because division is need
4585 ledtest->freq = 500 / ledtest->freq;
4587 if (ledtest->freq == 0)
4590 bfa_trc(diag, ledtest->freq);
4591 /* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
4592 msg->cmd = (u8) ledtest->cmd;
4593 msg->color = (u8) ledtest->color;
4594 msg->portid = bfa_ioc_portid(diag->ioc);
4595 msg->led = ledtest->led;
4596 msg->freq = cpu_to_be16(ledtest->freq);
4599 bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd);
4603 diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s * msg)
4605 bfa_trc(diag, diag->ledtest.lock);
4606 diag->ledtest.lock = BFA_FALSE;
4607 /* no bfa_cb_queue is needed because driver is not waiting */
4614 diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec)
4616 struct bfi_diag_portbeacon_req_s *msg;
4618 msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg;
4619 /* build host command */
4620 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON,
4621 bfa_ioc_portid(diag->ioc));
4622 msg->beacon = beacon;
4623 msg->period = cpu_to_be32(sec);
4625 bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd);
4629 diag_portbeacon_comp(struct bfa_diag_s *diag)
4631 bfa_trc(diag, diag->beacon.state);
4632 diag->beacon.state = BFA_FALSE;
4633 if (diag->cbfn_beacon)
4634 diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e);
4638 * Diag hmbox handler
4641 bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg)
4643 struct bfa_diag_s *diag = diagarg;
4645 switch (msg->mh.msg_id) {
4646 case BFI_DIAG_I2H_PORTBEACON:
4647 diag_portbeacon_comp(diag);
4649 case BFI_DIAG_I2H_FWPING:
4650 diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg);
4652 case BFI_DIAG_I2H_TEMPSENSOR:
4653 diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg);
4655 case BFI_DIAG_I2H_LEDTEST:
4656 diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg);
4659 bfa_trc(diag, msg->mh.msg_id);
4667 * @param[in] *diag - diag data struct
4668 * @param[in] *memtest - mem test params input from upper layer,
4669 * @param[in] pattern - mem test pattern
4670 * @param[in] *result - mem test result
4671 * @param[in] cbfn - mem test callback functioin
4672 * @param[in] cbarg - callback functioin arg
4677 bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
4678 u32 pattern, struct bfa_diag_memtest_result *result,
4679 bfa_cb_diag_t cbfn, void *cbarg)
4681 bfa_trc(diag, pattern);
4683 if (!bfa_ioc_adapter_is_disabled(diag->ioc))
4684 return BFA_STATUS_ADAPTER_ENABLED;
4686 /* check to see if there is another destructive diag cmd running */
4688 bfa_trc(diag, diag->block);
4689 return BFA_STATUS_DEVBUSY;
4693 diag->result = result;
4695 diag->cbarg = cbarg;
4697 /* download memtest code and take LPU0 out of reset */
4698 bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
4700 bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
4701 bfa_diag_memtest_done, diag, BFA_DIAG_MEMTEST_TOV);
4702 diag->timer_active = 1;
4703 return BFA_STATUS_OK;
4707 * DIAG firmware ping command
4709 * @param[in] *diag - diag data struct
4710 * @param[in] cnt - dma loop count for testing PCIE
4711 * @param[in] data - data pattern to pass in fw
4712 * @param[in] *result - pt to bfa_diag_fwping_result_t data struct
4713 * @param[in] cbfn - callback function
4714 * @param[in] *cbarg - callback functioin arg
4719 bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, u32 data,
4720 struct bfa_diag_results_fwping *result, bfa_cb_diag_t cbfn,
4724 bfa_trc(diag, data);
4726 if (!bfa_ioc_is_operational(diag->ioc))
4727 return BFA_STATUS_IOC_NON_OP;
4729 if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) &&
4730 ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH))
4731 return BFA_STATUS_CMD_NOTSUPP;
4733 /* check to see if there is another destructive diag cmd running */
4734 if (diag->block || diag->fwping.lock) {
4735 bfa_trc(diag, diag->block);
4736 bfa_trc(diag, diag->fwping.lock);
4737 return BFA_STATUS_DEVBUSY;
4740 /* Initialization */
4741 diag->fwping.lock = 1;
4742 diag->fwping.cbfn = cbfn;
4743 diag->fwping.cbarg = cbarg;
4744 diag->fwping.result = result;
4745 diag->fwping.data = data;
4746 diag->fwping.count = cnt;
4748 /* Init test results */
4749 diag->fwping.result->data = 0;
4750 diag->fwping.result->status = BFA_STATUS_OK;
4752 /* kick off the first ping */
4753 diag_fwping_send(diag);
4754 return BFA_STATUS_OK;
4758 * Read Temperature Sensor
4760 * @param[in] *diag - diag data struct
4761 * @param[in] *result - pt to bfa_diag_temp_t data struct
4762 * @param[in] cbfn - callback function
4763 * @param[in] *cbarg - callback functioin arg
4768 bfa_diag_tsensor_query(struct bfa_diag_s *diag,
4769 struct bfa_diag_results_tempsensor_s *result,
4770 bfa_cb_diag_t cbfn, void *cbarg)
4772 /* check to see if there is a destructive diag cmd running */
4773 if (diag->block || diag->tsensor.lock) {
4774 bfa_trc(diag, diag->block);
4775 bfa_trc(diag, diag->tsensor.lock);
4776 return BFA_STATUS_DEVBUSY;
4779 if (!bfa_ioc_is_operational(diag->ioc))
4780 return BFA_STATUS_IOC_NON_OP;
4782 /* Init diag mod params */
4783 diag->tsensor.lock = 1;
4784 diag->tsensor.temp = result;
4785 diag->tsensor.cbfn = cbfn;
4786 diag->tsensor.cbarg = cbarg;
4788 /* Send msg to fw */
4789 diag_tempsensor_send(diag);
4791 return BFA_STATUS_OK;
4797 * @param[in] *diag - diag data struct
4798 * @param[in] *ledtest - pt to ledtest data structure
4803 bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4805 bfa_trc(diag, ledtest->cmd);
4807 if (!bfa_ioc_is_operational(diag->ioc))
4808 return BFA_STATUS_IOC_NON_OP;
4810 if (diag->beacon.state)
4811 return BFA_STATUS_BEACON_ON;
4813 if (diag->ledtest.lock)
4814 return BFA_STATUS_LEDTEST_OP;
4816 /* Send msg to fw */
4817 diag->ledtest.lock = BFA_TRUE;
4818 diag_ledtest_send(diag, ledtest);
4820 return BFA_STATUS_OK;
4824 * Port beaconing command
4826 * @param[in] *diag - diag data struct
4827 * @param[in] beacon - port beaconing 1:ON 0:OFF
4828 * @param[in] link_e2e_beacon - link beaconing 1:ON 0:OFF
4829 * @param[in] sec - beaconing duration in seconds
4834 bfa_diag_beacon_port(struct bfa_diag_s *diag, bfa_boolean_t beacon,
4835 bfa_boolean_t link_e2e_beacon, uint32_t sec)
4837 bfa_trc(diag, beacon);
4838 bfa_trc(diag, link_e2e_beacon);
4841 if (!bfa_ioc_is_operational(diag->ioc))
4842 return BFA_STATUS_IOC_NON_OP;
4844 if (diag->ledtest.lock)
4845 return BFA_STATUS_LEDTEST_OP;
4847 if (diag->beacon.state && beacon) /* beacon alread on */
4848 return BFA_STATUS_BEACON_ON;
4850 diag->beacon.state = beacon;
4851 diag->beacon.link_e2e = link_e2e_beacon;
4852 if (diag->cbfn_beacon)
4853 diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon);
4855 /* Send msg to fw */
4856 diag_portbeacon_send(diag, beacon, sec);
4858 return BFA_STATUS_OK;
4862 * Return DMA memory needed by diag module.
4865 bfa_diag_meminfo(void)
4867 return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4871 * Attach virtual and physical memory for Diag.
4874 bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
4875 bfa_cb_diag_beacon_t cbfn_beacon, struct bfa_trc_mod_s *trcmod)
4879 diag->trcmod = trcmod;
4884 diag->result = NULL;
4885 diag->cbfn_beacon = cbfn_beacon;
4887 bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag);
4888 bfa_q_qe_init(&diag->ioc_notify);
4889 bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag);
4890 list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q);
4894 bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa)
4896 diag->fwping.dbuf_kva = dm_kva;
4897 diag->fwping.dbuf_pa = dm_pa;
4898 memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ);
4902 * PHY module specific
4904 #define BFA_PHY_DMA_BUF_SZ 0x02000 /* 8k dma buffer */
4905 #define BFA_PHY_LOCK_STATUS 0x018878 /* phy semaphore status reg */
4908 bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz)
4912 for (i = 0; i < m; i++)
4913 obuf[i] = be32_to_cpu(ibuf[i]);
4916 static bfa_boolean_t
4917 bfa_phy_present(struct bfa_phy_s *phy)
4919 return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING);
4923 bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event)
4925 struct bfa_phy_s *phy = cbarg;
4927 bfa_trc(phy, event);
4930 case BFA_IOC_E_DISABLED:
4931 case BFA_IOC_E_FAILED:
4933 phy->status = BFA_STATUS_IOC_FAILURE;
4934 phy->cbfn(phy->cbarg, phy->status);
4945 * Send phy attribute query request.
4947 * @param[in] cbarg - callback argument
4950 bfa_phy_query_send(void *cbarg)
4952 struct bfa_phy_s *phy = cbarg;
4953 struct bfi_phy_query_req_s *msg =
4954 (struct bfi_phy_query_req_s *) phy->mb.msg;
4956 msg->instance = phy->instance;
4957 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ,
4958 bfa_ioc_portid(phy->ioc));
4959 bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa);
4960 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
4964 * Send phy write request.
4966 * @param[in] cbarg - callback argument
4969 bfa_phy_write_send(void *cbarg)
4971 struct bfa_phy_s *phy = cbarg;
4972 struct bfi_phy_write_req_s *msg =
4973 (struct bfi_phy_write_req_s *) phy->mb.msg;
4978 msg->instance = phy->instance;
4979 msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
4980 len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
4981 phy->residue : BFA_PHY_DMA_BUF_SZ;
4982 msg->length = cpu_to_be32(len);
4984 /* indicate if it's the last msg of the whole write operation */
4985 msg->last = (len == phy->residue) ? 1 : 0;
4987 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ,
4988 bfa_ioc_portid(phy->ioc));
4989 bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
4991 buf = (u16 *) (phy->ubuf + phy->offset);
4992 dbuf = (u16 *)phy->dbuf_kva;
4994 for (i = 0; i < sz; i++)
4995 buf[i] = cpu_to_be16(dbuf[i]);
4997 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
4999 phy->residue -= len;
5004 * Send phy read request.
5006 * @param[in] cbarg - callback argument
5009 bfa_phy_read_send(void *cbarg)
5011 struct bfa_phy_s *phy = cbarg;
5012 struct bfi_phy_read_req_s *msg =
5013 (struct bfi_phy_read_req_s *) phy->mb.msg;
5016 msg->instance = phy->instance;
5017 msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5018 len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5019 phy->residue : BFA_PHY_DMA_BUF_SZ;
5020 msg->length = cpu_to_be32(len);
5021 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ,
5022 bfa_ioc_portid(phy->ioc));
5023 bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5024 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5028 * Send phy stats request.
5030 * @param[in] cbarg - callback argument
5033 bfa_phy_stats_send(void *cbarg)
5035 struct bfa_phy_s *phy = cbarg;
5036 struct bfi_phy_stats_req_s *msg =
5037 (struct bfi_phy_stats_req_s *) phy->mb.msg;
5039 msg->instance = phy->instance;
5040 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ,
5041 bfa_ioc_portid(phy->ioc));
5042 bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa);
5043 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5047 * Flash memory info API.
5049 * @param[in] mincfg - minimal cfg variable
5052 bfa_phy_meminfo(bfa_boolean_t mincfg)
5054 /* min driver doesn't need phy */
5058 return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5064 * @param[in] phy - phy structure
5065 * @param[in] ioc - ioc structure
5066 * @param[in] dev - device structure
5067 * @param[in] trcmod - trace module
5068 * @param[in] logmod - log module
5071 bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev,
5072 struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
5075 phy->trcmod = trcmod;
5080 bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy);
5081 bfa_q_qe_init(&phy->ioc_notify);
5082 bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy);
5083 list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q);
5085 /* min driver doesn't need phy */
5087 phy->dbuf_kva = NULL;
5093 * Claim memory for phy
5095 * @param[in] phy - phy structure
5096 * @param[in] dm_kva - pointer to virtual memory address
5097 * @param[in] dm_pa - physical memory address
5098 * @param[in] mincfg - minimal cfg variable
5101 bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa,
5102 bfa_boolean_t mincfg)
5107 phy->dbuf_kva = dm_kva;
5108 phy->dbuf_pa = dm_pa;
5109 memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ);
5110 dm_kva += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5111 dm_pa += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5115 bfa_phy_busy(struct bfa_ioc_s *ioc)
5119 rb = bfa_ioc_bar0(ioc);
5120 return readl(rb + BFA_PHY_LOCK_STATUS);
5124 * Get phy attribute.
5126 * @param[in] phy - phy structure
5127 * @param[in] attr - phy attribute structure
5128 * @param[in] cbfn - callback function
5129 * @param[in] cbarg - callback argument
5134 bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
5135 struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg)
5137 bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ);
5138 bfa_trc(phy, instance);
5140 if (!bfa_phy_present(phy))
5141 return BFA_STATUS_PHY_NOT_PRESENT;
5143 if (!bfa_ioc_is_operational(phy->ioc))
5144 return BFA_STATUS_IOC_NON_OP;
5146 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5147 bfa_trc(phy, phy->op_busy);
5148 return BFA_STATUS_DEVBUSY;
5154 phy->instance = instance;
5155 phy->ubuf = (uint8_t *) attr;
5156 bfa_phy_query_send(phy);
5158 return BFA_STATUS_OK;
5164 * @param[in] phy - phy structure
5165 * @param[in] instance - phy image instance
5166 * @param[in] stats - pointer to phy stats
5167 * @param[in] cbfn - callback function
5168 * @param[in] cbarg - callback argument
5173 bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
5174 struct bfa_phy_stats_s *stats,
5175 bfa_cb_phy_t cbfn, void *cbarg)
5177 bfa_trc(phy, BFI_PHY_H2I_STATS_REQ);
5178 bfa_trc(phy, instance);
5180 if (!bfa_phy_present(phy))
5181 return BFA_STATUS_PHY_NOT_PRESENT;
5183 if (!bfa_ioc_is_operational(phy->ioc))
5184 return BFA_STATUS_IOC_NON_OP;
5186 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5187 bfa_trc(phy, phy->op_busy);
5188 return BFA_STATUS_DEVBUSY;
5194 phy->instance = instance;
5195 phy->ubuf = (u8 *) stats;
5196 bfa_phy_stats_send(phy);
5198 return BFA_STATUS_OK;
5204 * @param[in] phy - phy structure
5205 * @param[in] instance - phy image instance
5206 * @param[in] buf - update data buffer
5207 * @param[in] len - data buffer length
5208 * @param[in] offset - offset relative to starting address
5209 * @param[in] cbfn - callback function
5210 * @param[in] cbarg - callback argument
5215 bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
5216 void *buf, u32 len, u32 offset,
5217 bfa_cb_phy_t cbfn, void *cbarg)
5219 bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ);
5220 bfa_trc(phy, instance);
5222 bfa_trc(phy, offset);
5224 if (!bfa_phy_present(phy))
5225 return BFA_STATUS_PHY_NOT_PRESENT;
5227 if (!bfa_ioc_is_operational(phy->ioc))
5228 return BFA_STATUS_IOC_NON_OP;
5230 /* 'len' must be in word (4-byte) boundary */
5231 if (!len || (len & 0x03))
5232 return BFA_STATUS_FAILED;
5234 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5235 bfa_trc(phy, phy->op_busy);
5236 return BFA_STATUS_DEVBUSY;
5242 phy->instance = instance;
5245 phy->addr_off = offset;
5248 bfa_phy_write_send(phy);
5249 return BFA_STATUS_OK;
5255 * @param[in] phy - phy structure
5256 * @param[in] instance - phy image instance
5257 * @param[in] buf - read data buffer
5258 * @param[in] len - data buffer length
5259 * @param[in] offset - offset relative to starting address
5260 * @param[in] cbfn - callback function
5261 * @param[in] cbarg - callback argument
5266 bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
5267 void *buf, u32 len, u32 offset,
5268 bfa_cb_phy_t cbfn, void *cbarg)
5270 bfa_trc(phy, BFI_PHY_H2I_READ_REQ);
5271 bfa_trc(phy, instance);
5273 bfa_trc(phy, offset);
5275 if (!bfa_phy_present(phy))
5276 return BFA_STATUS_PHY_NOT_PRESENT;
5278 if (!bfa_ioc_is_operational(phy->ioc))
5279 return BFA_STATUS_IOC_NON_OP;
5281 /* 'len' must be in word (4-byte) boundary */
5282 if (!len || (len & 0x03))
5283 return BFA_STATUS_FAILED;
5285 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5286 bfa_trc(phy, phy->op_busy);
5287 return BFA_STATUS_DEVBUSY;
5293 phy->instance = instance;
5296 phy->addr_off = offset;
5298 bfa_phy_read_send(phy);
5300 return BFA_STATUS_OK;
5304 * Process phy response messages upon receiving interrupts.
5306 * @param[in] phyarg - phy structure
5307 * @param[in] msg - message structure
5310 bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
5312 struct bfa_phy_s *phy = phyarg;
5316 struct bfi_phy_query_rsp_s *query;
5317 struct bfi_phy_stats_rsp_s *stats;
5318 struct bfi_phy_write_rsp_s *write;
5319 struct bfi_phy_read_rsp_s *read;
5320 struct bfi_mbmsg_s *msg;
5324 bfa_trc(phy, msg->mh.msg_id);
5326 if (!phy->op_busy) {
5327 /* receiving response after ioc failure */
5328 bfa_trc(phy, 0x9999);
5332 switch (msg->mh.msg_id) {
5333 case BFI_PHY_I2H_QUERY_RSP:
5334 status = be32_to_cpu(m.query->status);
5335 bfa_trc(phy, status);
5337 if (status == BFA_STATUS_OK) {
5338 struct bfa_phy_attr_s *attr =
5339 (struct bfa_phy_attr_s *) phy->ubuf;
5340 bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva,
5341 sizeof(struct bfa_phy_attr_s));
5342 bfa_trc(phy, attr->status);
5343 bfa_trc(phy, attr->length);
5346 phy->status = status;
5349 phy->cbfn(phy->cbarg, phy->status);
5351 case BFI_PHY_I2H_STATS_RSP:
5352 status = be32_to_cpu(m.stats->status);
5353 bfa_trc(phy, status);
5355 if (status == BFA_STATUS_OK) {
5356 struct bfa_phy_stats_s *stats =
5357 (struct bfa_phy_stats_s *) phy->ubuf;
5358 bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
5359 sizeof(struct bfa_phy_stats_s));
5360 bfa_trc(phy, stats->status);
5363 phy->status = status;
5366 phy->cbfn(phy->cbarg, phy->status);
5368 case BFI_PHY_I2H_WRITE_RSP:
5369 status = be32_to_cpu(m.write->status);
5370 bfa_trc(phy, status);
5372 if (status != BFA_STATUS_OK || phy->residue == 0) {
5373 phy->status = status;
5376 phy->cbfn(phy->cbarg, phy->status);
5378 bfa_trc(phy, phy->offset);
5379 bfa_phy_write_send(phy);
5382 case BFI_PHY_I2H_READ_RSP:
5383 status = be32_to_cpu(m.read->status);
5384 bfa_trc(phy, status);
5386 if (status != BFA_STATUS_OK) {
5387 phy->status = status;
5390 phy->cbfn(phy->cbarg, phy->status);
5392 u32 len = be32_to_cpu(m.read->length);
5393 u16 *buf = (u16 *)(phy->ubuf + phy->offset);
5394 u16 *dbuf = (u16 *)phy->dbuf_kva;
5395 int i, sz = len >> 1;
5397 bfa_trc(phy, phy->offset);
5400 for (i = 0; i < sz; i++)
5401 buf[i] = be16_to_cpu(dbuf[i]);
5403 phy->residue -= len;
5406 if (phy->residue == 0) {
5407 phy->status = status;
5410 phy->cbfn(phy->cbarg, phy->status);
5412 bfa_phy_read_send(phy);