2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
21 ethport_can_be_up(struct bna_ethport *ethport)
24 if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
25 ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
26 (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
27 (ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
29 ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
30 (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
31 !(ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
35 #define ethport_is_up ethport_can_be_up
37 enum bna_ethport_event {
43 ETHPORT_E_FWRESP_UP_OK = 6,
44 ETHPORT_E_FWRESP_DOWN = 7,
45 ETHPORT_E_FWRESP_UP_FAIL = 8,
54 ENET_E_FWRESP_PAUSE = 6,
55 ENET_E_CHLD_STOPPED = 7,
58 enum bna_ioceth_event {
61 IOCETH_E_IOC_RESET = 3,
62 IOCETH_E_IOC_FAILED = 4,
63 IOCETH_E_IOC_READY = 5,
64 IOCETH_E_ENET_ATTR_RESP = 6,
65 IOCETH_E_ENET_STOPPED = 7,
66 IOCETH_E_IOC_DISABLED = 8,
69 #define bna_stats_copy(_name, _type) \
71 count = sizeof(struct bfi_enet_stats_ ## _type) / sizeof(u64); \
72 stats_src = (u64 *)&bna->stats.hw_stats_kva->_name ## _stats; \
73 stats_dst = (u64 *)&bna->stats.hw_stats._name ## _stats; \
74 for (i = 0; i < count; i++) \
75 stats_dst[i] = be64_to_cpu(stats_src[i]); \
79 * FW response handlers
83 bna_bfi_ethport_enable_aen(struct bna_ethport *ethport,
84 struct bfi_msgq_mhdr *msghdr)
86 ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
88 if (ethport_can_be_up(ethport))
89 bfa_fsm_send_event(ethport, ETHPORT_E_UP);
93 bna_bfi_ethport_disable_aen(struct bna_ethport *ethport,
94 struct bfi_msgq_mhdr *msghdr)
96 int ethport_up = ethport_is_up(ethport);
98 ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
101 bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
105 bna_bfi_ethport_admin_rsp(struct bna_ethport *ethport,
106 struct bfi_msgq_mhdr *msghdr)
108 struct bfi_enet_enable_req *admin_req =
109 ðport->bfi_enet_cmd.admin_req;
110 struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr;
112 switch (admin_req->enable) {
113 case BNA_STATUS_T_ENABLED:
114 if (rsp->error == BFI_ENET_CMD_OK)
115 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
117 ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
118 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
122 case BNA_STATUS_T_DISABLED:
123 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
124 ethport->link_status = BNA_LINK_DOWN;
125 ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
131 bna_bfi_ethport_lpbk_rsp(struct bna_ethport *ethport,
132 struct bfi_msgq_mhdr *msghdr)
134 struct bfi_enet_diag_lb_req *diag_lb_req =
135 ðport->bfi_enet_cmd.lpbk_req;
136 struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr;
138 switch (diag_lb_req->enable) {
139 case BNA_STATUS_T_ENABLED:
140 if (rsp->error == BFI_ENET_CMD_OK)
141 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
143 ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
144 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
148 case BNA_STATUS_T_DISABLED:
149 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
155 bna_bfi_pause_set_rsp(struct bna_enet *enet, struct bfi_msgq_mhdr *msghdr)
157 bfa_fsm_send_event(enet, ENET_E_FWRESP_PAUSE);
161 bna_bfi_attr_get_rsp(struct bna_ioceth *ioceth,
162 struct bfi_msgq_mhdr *msghdr)
164 struct bfi_enet_attr_rsp *rsp = (struct bfi_enet_attr_rsp *)msghdr;
167 * Store only if not set earlier, since BNAD can override the HW
170 if (!ioceth->attr.fw_query_complete) {
171 ioceth->attr.num_txq = ntohl(rsp->max_cfg);
172 ioceth->attr.num_rxp = ntohl(rsp->max_cfg);
173 ioceth->attr.num_ucmac = ntohl(rsp->max_ucmac);
174 ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
175 ioceth->attr.max_rit_size = ntohl(rsp->rit_size);
176 ioceth->attr.fw_query_complete = true;
179 bfa_fsm_send_event(ioceth, IOCETH_E_ENET_ATTR_RESP);
183 bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr)
185 struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
188 u32 tx_enet_mask = ntohl(stats_req->tx_enet_mask);
189 u32 rx_enet_mask = ntohl(stats_req->rx_enet_mask);
193 bna_stats_copy(mac, mac);
194 bna_stats_copy(bpc, bpc);
195 bna_stats_copy(rad, rad);
196 bna_stats_copy(rlb, rad);
197 bna_stats_copy(fc_rx, fc_rx);
198 bna_stats_copy(fc_tx, fc_tx);
200 stats_src = (u64 *)&(bna->stats.hw_stats_kva->rxf_stats[0]);
202 /* Copy Rxf stats to SW area, scatter them while copying */
203 for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
204 stats_dst = (u64 *)&(bna->stats.hw_stats.rxf_stats[i]);
205 memset(stats_dst, 0, sizeof(struct bfi_enet_stats_rxf));
206 if (rx_enet_mask & ((u32)(1 << i))) {
208 count = sizeof(struct bfi_enet_stats_rxf) /
210 for (k = 0; k < count; k++) {
211 stats_dst[k] = be64_to_cpu(*stats_src);
217 /* Copy Txf stats to SW area, scatter them while copying */
218 for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
219 stats_dst = (u64 *)&(bna->stats.hw_stats.txf_stats[i]);
220 memset(stats_dst, 0, sizeof(struct bfi_enet_stats_txf));
221 if (tx_enet_mask & ((u32)(1 << i))) {
223 count = sizeof(struct bfi_enet_stats_txf) /
225 for (k = 0; k < count; k++) {
226 stats_dst[k] = be64_to_cpu(*stats_src);
232 bna->stats_mod.stats_get_busy = false;
233 bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats);
237 bna_bfi_ethport_linkup_aen(struct bna_ethport *ethport,
238 struct bfi_msgq_mhdr *msghdr)
240 ethport->link_status = BNA_LINK_UP;
242 /* Dispatch events */
243 ethport->link_cbfn(ethport->bna->bnad, ethport->link_status);
247 bna_bfi_ethport_linkdown_aen(struct bna_ethport *ethport,
248 struct bfi_msgq_mhdr *msghdr)
250 ethport->link_status = BNA_LINK_DOWN;
252 /* Dispatch events */
253 ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
257 bna_err_handler(struct bna *bna, u32 intr_status)
259 if (BNA_IS_HALT_INTR(bna, intr_status))
262 bfa_nw_ioc_error_isr(&bna->ioceth.ioc);
266 bna_mbox_handler(struct bna *bna, u32 intr_status)
268 if (BNA_IS_ERR_INTR(bna, intr_status)) {
269 bna_err_handler(bna, intr_status);
272 if (BNA_IS_MBOX_INTR(bna, intr_status))
273 bfa_nw_ioc_mbox_isr(&bna->ioceth.ioc);
277 bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
279 struct bna *bna = (struct bna *)arg;
283 switch (msghdr->msg_id) {
284 case BFI_ENET_I2H_RX_CFG_SET_RSP:
285 bna_rx_from_rid(bna, msghdr->enet_id, rx);
287 bna_bfi_rx_enet_start_rsp(rx, msghdr);
290 case BFI_ENET_I2H_RX_CFG_CLR_RSP:
291 bna_rx_from_rid(bna, msghdr->enet_id, rx);
293 bna_bfi_rx_enet_stop_rsp(rx, msghdr);
296 case BFI_ENET_I2H_RIT_CFG_RSP:
297 case BFI_ENET_I2H_RSS_CFG_RSP:
298 case BFI_ENET_I2H_RSS_ENABLE_RSP:
299 case BFI_ENET_I2H_RX_PROMISCUOUS_RSP:
300 case BFI_ENET_I2H_RX_DEFAULT_RSP:
301 case BFI_ENET_I2H_MAC_UCAST_SET_RSP:
302 case BFI_ENET_I2H_MAC_UCAST_CLR_RSP:
303 case BFI_ENET_I2H_MAC_UCAST_ADD_RSP:
304 case BFI_ENET_I2H_MAC_UCAST_DEL_RSP:
305 case BFI_ENET_I2H_MAC_MCAST_DEL_RSP:
306 case BFI_ENET_I2H_MAC_MCAST_FILTER_RSP:
307 case BFI_ENET_I2H_RX_VLAN_SET_RSP:
308 case BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP:
309 bna_rx_from_rid(bna, msghdr->enet_id, rx);
311 bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr);
314 case BFI_ENET_I2H_MAC_MCAST_ADD_RSP:
315 bna_rx_from_rid(bna, msghdr->enet_id, rx);
317 bna_bfi_rxf_mcast_add_rsp(&rx->rxf, msghdr);
320 case BFI_ENET_I2H_TX_CFG_SET_RSP:
321 bna_tx_from_rid(bna, msghdr->enet_id, tx);
323 bna_bfi_tx_enet_start_rsp(tx, msghdr);
326 case BFI_ENET_I2H_TX_CFG_CLR_RSP:
327 bna_tx_from_rid(bna, msghdr->enet_id, tx);
329 bna_bfi_tx_enet_stop_rsp(tx, msghdr);
332 case BFI_ENET_I2H_PORT_ADMIN_RSP:
333 bna_bfi_ethport_admin_rsp(&bna->ethport, msghdr);
336 case BFI_ENET_I2H_DIAG_LOOPBACK_RSP:
337 bna_bfi_ethport_lpbk_rsp(&bna->ethport, msghdr);
340 case BFI_ENET_I2H_SET_PAUSE_RSP:
341 bna_bfi_pause_set_rsp(&bna->enet, msghdr);
344 case BFI_ENET_I2H_GET_ATTR_RSP:
345 bna_bfi_attr_get_rsp(&bna->ioceth, msghdr);
348 case BFI_ENET_I2H_STATS_GET_RSP:
349 bna_bfi_stats_get_rsp(bna, msghdr);
352 case BFI_ENET_I2H_STATS_CLR_RSP:
356 case BFI_ENET_I2H_LINK_UP_AEN:
357 bna_bfi_ethport_linkup_aen(&bna->ethport, msghdr);
360 case BFI_ENET_I2H_LINK_DOWN_AEN:
361 bna_bfi_ethport_linkdown_aen(&bna->ethport, msghdr);
364 case BFI_ENET_I2H_PORT_ENABLE_AEN:
365 bna_bfi_ethport_enable_aen(&bna->ethport, msghdr);
368 case BFI_ENET_I2H_PORT_DISABLE_AEN:
369 bna_bfi_ethport_disable_aen(&bna->ethport, msghdr);
372 case BFI_ENET_I2H_BW_UPDATE_AEN:
373 bna_bfi_bw_update_aen(&bna->tx_mod);
383 #define call_ethport_stop_cbfn(_ethport) \
385 if ((_ethport)->stop_cbfn) { \
386 void (*cbfn)(struct bna_enet *); \
387 cbfn = (_ethport)->stop_cbfn; \
388 (_ethport)->stop_cbfn = NULL; \
389 cbfn(&(_ethport)->bna->enet); \
393 #define call_ethport_adminup_cbfn(ethport, status) \
395 if ((ethport)->adminup_cbfn) { \
396 void (*cbfn)(struct bnad *, enum bna_cb_status); \
397 cbfn = (ethport)->adminup_cbfn; \
398 (ethport)->adminup_cbfn = NULL; \
399 cbfn((ethport)->bna->bnad, status); \
404 bna_bfi_ethport_admin_up(struct bna_ethport *ethport)
406 struct bfi_enet_enable_req *admin_up_req =
407 ðport->bfi_enet_cmd.admin_req;
409 bfi_msgq_mhdr_set(admin_up_req->mh, BFI_MC_ENET,
410 BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
411 admin_up_req->mh.num_entries = htons(
412 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
413 admin_up_req->enable = BNA_STATUS_T_ENABLED;
415 bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL,
416 sizeof(struct bfi_enet_enable_req), &admin_up_req->mh);
417 bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd);
421 bna_bfi_ethport_admin_down(struct bna_ethport *ethport)
423 struct bfi_enet_enable_req *admin_down_req =
424 ðport->bfi_enet_cmd.admin_req;
426 bfi_msgq_mhdr_set(admin_down_req->mh, BFI_MC_ENET,
427 BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
428 admin_down_req->mh.num_entries = htons(
429 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
430 admin_down_req->enable = BNA_STATUS_T_DISABLED;
432 bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL,
433 sizeof(struct bfi_enet_enable_req), &admin_down_req->mh);
434 bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd);
438 bna_bfi_ethport_lpbk_up(struct bna_ethport *ethport)
440 struct bfi_enet_diag_lb_req *lpbk_up_req =
441 ðport->bfi_enet_cmd.lpbk_req;
443 bfi_msgq_mhdr_set(lpbk_up_req->mh, BFI_MC_ENET,
444 BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
445 lpbk_up_req->mh.num_entries = htons(
446 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
447 lpbk_up_req->mode = (ethport->bna->enet.type ==
448 BNA_ENET_T_LOOPBACK_INTERNAL) ?
449 BFI_ENET_DIAG_LB_OPMODE_EXT :
450 BFI_ENET_DIAG_LB_OPMODE_CBL;
451 lpbk_up_req->enable = BNA_STATUS_T_ENABLED;
453 bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL,
454 sizeof(struct bfi_enet_diag_lb_req), &lpbk_up_req->mh);
455 bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd);
459 bna_bfi_ethport_lpbk_down(struct bna_ethport *ethport)
461 struct bfi_enet_diag_lb_req *lpbk_down_req =
462 ðport->bfi_enet_cmd.lpbk_req;
464 bfi_msgq_mhdr_set(lpbk_down_req->mh, BFI_MC_ENET,
465 BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
466 lpbk_down_req->mh.num_entries = htons(
467 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
468 lpbk_down_req->enable = BNA_STATUS_T_DISABLED;
470 bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL,
471 sizeof(struct bfi_enet_diag_lb_req), &lpbk_down_req->mh);
472 bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd);
476 bna_bfi_ethport_up(struct bna_ethport *ethport)
478 if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
479 bna_bfi_ethport_admin_up(ethport);
481 bna_bfi_ethport_lpbk_up(ethport);
485 bna_bfi_ethport_down(struct bna_ethport *ethport)
487 if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
488 bna_bfi_ethport_admin_down(ethport);
490 bna_bfi_ethport_lpbk_down(ethport);
493 bfa_fsm_state_decl(bna_ethport, stopped, struct bna_ethport,
494 enum bna_ethport_event);
495 bfa_fsm_state_decl(bna_ethport, down, struct bna_ethport,
496 enum bna_ethport_event);
497 bfa_fsm_state_decl(bna_ethport, up_resp_wait, struct bna_ethport,
498 enum bna_ethport_event);
499 bfa_fsm_state_decl(bna_ethport, down_resp_wait, struct bna_ethport,
500 enum bna_ethport_event);
501 bfa_fsm_state_decl(bna_ethport, up, struct bna_ethport,
502 enum bna_ethport_event);
503 bfa_fsm_state_decl(bna_ethport, last_resp_wait, struct bna_ethport,
504 enum bna_ethport_event);
507 bna_ethport_sm_stopped_entry(struct bna_ethport *ethport)
509 call_ethport_stop_cbfn(ethport);
513 bna_ethport_sm_stopped(struct bna_ethport *ethport,
514 enum bna_ethport_event event)
517 case ETHPORT_E_START:
518 bfa_fsm_set_state(ethport, bna_ethport_sm_down);
522 call_ethport_stop_cbfn(ethport);
530 /* This event is received due to Rx objects failing */
540 bna_ethport_sm_down_entry(struct bna_ethport *ethport)
545 bna_ethport_sm_down(struct bna_ethport *ethport,
546 enum bna_ethport_event event)
550 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
554 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
558 bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
559 bna_bfi_ethport_up(ethport);
568 bna_ethport_sm_up_resp_wait_entry(struct bna_ethport *ethport)
573 bna_ethport_sm_up_resp_wait(struct bna_ethport *ethport,
574 enum bna_ethport_event event)
578 bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
582 call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
583 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
587 call_ethport_adminup_cbfn(ethport, BNA_CB_INTERRUPT);
588 bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
591 case ETHPORT_E_FWRESP_UP_OK:
592 call_ethport_adminup_cbfn(ethport, BNA_CB_SUCCESS);
593 bfa_fsm_set_state(ethport, bna_ethport_sm_up);
596 case ETHPORT_E_FWRESP_UP_FAIL:
597 call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
598 bfa_fsm_set_state(ethport, bna_ethport_sm_down);
601 case ETHPORT_E_FWRESP_DOWN:
602 /* down_resp_wait -> up_resp_wait transition on ETHPORT_E_UP */
603 bna_bfi_ethport_up(ethport);
612 bna_ethport_sm_down_resp_wait_entry(struct bna_ethport *ethport)
615 * NOTE: Do not call bna_bfi_ethport_down() here. That will over step
616 * mbox due to up_resp_wait -> down_resp_wait transition on event
622 bna_ethport_sm_down_resp_wait(struct bna_ethport *ethport,
623 enum bna_ethport_event event)
627 bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
631 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
635 bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
638 case ETHPORT_E_FWRESP_UP_OK:
639 /* up_resp_wait->down_resp_wait transition on ETHPORT_E_DOWN */
640 bna_bfi_ethport_down(ethport);
643 case ETHPORT_E_FWRESP_UP_FAIL:
644 case ETHPORT_E_FWRESP_DOWN:
645 bfa_fsm_set_state(ethport, bna_ethport_sm_down);
654 bna_ethport_sm_up_entry(struct bna_ethport *ethport)
659 bna_ethport_sm_up(struct bna_ethport *ethport,
660 enum bna_ethport_event event)
664 bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
665 bna_bfi_ethport_down(ethport);
669 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
673 bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
674 bna_bfi_ethport_down(ethport);
683 bna_ethport_sm_last_resp_wait_entry(struct bna_ethport *ethport)
688 bna_ethport_sm_last_resp_wait(struct bna_ethport *ethport,
689 enum bna_ethport_event event)
693 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
698 * This event is received due to Rx objects stopping in
699 * parallel to ethport
704 case ETHPORT_E_FWRESP_UP_OK:
705 /* up_resp_wait->last_resp_wait transition on ETHPORT_T_STOP */
706 bna_bfi_ethport_down(ethport);
709 case ETHPORT_E_FWRESP_UP_FAIL:
710 case ETHPORT_E_FWRESP_DOWN:
711 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
720 bna_ethport_init(struct bna_ethport *ethport, struct bna *bna)
722 ethport->flags |= (BNA_ETHPORT_F_ADMIN_UP | BNA_ETHPORT_F_PORT_ENABLED);
725 ethport->link_status = BNA_LINK_DOWN;
726 ethport->link_cbfn = bnad_cb_ethport_link_status;
728 ethport->rx_started_count = 0;
730 ethport->stop_cbfn = NULL;
731 ethport->adminup_cbfn = NULL;
733 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
737 bna_ethport_uninit(struct bna_ethport *ethport)
739 ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
740 ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
746 bna_ethport_start(struct bna_ethport *ethport)
748 bfa_fsm_send_event(ethport, ETHPORT_E_START);
752 bna_enet_cb_ethport_stopped(struct bna_enet *enet)
754 bfa_wc_down(&enet->chld_stop_wc);
758 bna_ethport_stop(struct bna_ethport *ethport)
760 ethport->stop_cbfn = bna_enet_cb_ethport_stopped;
761 bfa_fsm_send_event(ethport, ETHPORT_E_STOP);
765 bna_ethport_fail(struct bna_ethport *ethport)
767 /* Reset the physical port status to enabled */
768 ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
770 if (ethport->link_status != BNA_LINK_DOWN) {
771 ethport->link_status = BNA_LINK_DOWN;
772 ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
774 bfa_fsm_send_event(ethport, ETHPORT_E_FAIL);
777 /* Should be called only when ethport is disabled */
779 bna_ethport_cb_rx_started(struct bna_ethport *ethport)
781 ethport->rx_started_count++;
783 if (ethport->rx_started_count == 1) {
784 ethport->flags |= BNA_ETHPORT_F_RX_STARTED;
786 if (ethport_can_be_up(ethport))
787 bfa_fsm_send_event(ethport, ETHPORT_E_UP);
792 bna_ethport_cb_rx_stopped(struct bna_ethport *ethport)
794 int ethport_up = ethport_is_up(ethport);
796 ethport->rx_started_count--;
798 if (ethport->rx_started_count == 0) {
799 ethport->flags &= ~BNA_ETHPORT_F_RX_STARTED;
802 bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
808 #define bna_enet_chld_start(enet) \
810 enum bna_tx_type tx_type = \
811 ((enet)->type == BNA_ENET_T_REGULAR) ? \
812 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
813 enum bna_rx_type rx_type = \
814 ((enet)->type == BNA_ENET_T_REGULAR) ? \
815 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
816 bna_ethport_start(&(enet)->bna->ethport); \
817 bna_tx_mod_start(&(enet)->bna->tx_mod, tx_type); \
818 bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
821 #define bna_enet_chld_stop(enet) \
823 enum bna_tx_type tx_type = \
824 ((enet)->type == BNA_ENET_T_REGULAR) ? \
825 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
826 enum bna_rx_type rx_type = \
827 ((enet)->type == BNA_ENET_T_REGULAR) ? \
828 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
829 bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
830 bfa_wc_up(&(enet)->chld_stop_wc); \
831 bna_ethport_stop(&(enet)->bna->ethport); \
832 bfa_wc_up(&(enet)->chld_stop_wc); \
833 bna_tx_mod_stop(&(enet)->bna->tx_mod, tx_type); \
834 bfa_wc_up(&(enet)->chld_stop_wc); \
835 bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
836 bfa_wc_wait(&(enet)->chld_stop_wc); \
839 #define bna_enet_chld_fail(enet) \
841 bna_ethport_fail(&(enet)->bna->ethport); \
842 bna_tx_mod_fail(&(enet)->bna->tx_mod); \
843 bna_rx_mod_fail(&(enet)->bna->rx_mod); \
846 #define bna_enet_rx_start(enet) \
848 enum bna_rx_type rx_type = \
849 ((enet)->type == BNA_ENET_T_REGULAR) ? \
850 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
851 bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
854 #define bna_enet_rx_stop(enet) \
856 enum bna_rx_type rx_type = \
857 ((enet)->type == BNA_ENET_T_REGULAR) ? \
858 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
859 bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
860 bfa_wc_up(&(enet)->chld_stop_wc); \
861 bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
862 bfa_wc_wait(&(enet)->chld_stop_wc); \
865 #define call_enet_stop_cbfn(enet) \
867 if ((enet)->stop_cbfn) { \
868 void (*cbfn)(void *); \
870 cbfn = (enet)->stop_cbfn; \
871 cbarg = (enet)->stop_cbarg; \
872 (enet)->stop_cbfn = NULL; \
873 (enet)->stop_cbarg = NULL; \
878 #define call_enet_pause_cbfn(enet) \
880 if ((enet)->pause_cbfn) { \
881 void (*cbfn)(struct bnad *); \
882 cbfn = (enet)->pause_cbfn; \
883 (enet)->pause_cbfn = NULL; \
884 cbfn((enet)->bna->bnad); \
888 #define call_enet_mtu_cbfn(enet) \
890 if ((enet)->mtu_cbfn) { \
891 void (*cbfn)(struct bnad *); \
892 cbfn = (enet)->mtu_cbfn; \
893 (enet)->mtu_cbfn = NULL; \
894 cbfn((enet)->bna->bnad); \
898 static void bna_enet_cb_chld_stopped(void *arg);
899 static void bna_bfi_pause_set(struct bna_enet *enet);
901 bfa_fsm_state_decl(bna_enet, stopped, struct bna_enet,
902 enum bna_enet_event);
903 bfa_fsm_state_decl(bna_enet, pause_init_wait, struct bna_enet,
904 enum bna_enet_event);
905 bfa_fsm_state_decl(bna_enet, last_resp_wait, struct bna_enet,
906 enum bna_enet_event);
907 bfa_fsm_state_decl(bna_enet, started, struct bna_enet,
908 enum bna_enet_event);
909 bfa_fsm_state_decl(bna_enet, cfg_wait, struct bna_enet,
910 enum bna_enet_event);
911 bfa_fsm_state_decl(bna_enet, cfg_stop_wait, struct bna_enet,
912 enum bna_enet_event);
913 bfa_fsm_state_decl(bna_enet, chld_stop_wait, struct bna_enet,
914 enum bna_enet_event);
917 bna_enet_sm_stopped_entry(struct bna_enet *enet)
919 call_enet_pause_cbfn(enet);
920 call_enet_mtu_cbfn(enet);
921 call_enet_stop_cbfn(enet);
925 bna_enet_sm_stopped(struct bna_enet *enet, enum bna_enet_event event)
929 bfa_fsm_set_state(enet, bna_enet_sm_pause_init_wait);
933 call_enet_stop_cbfn(enet);
940 case ENET_E_PAUSE_CFG:
941 call_enet_pause_cbfn(enet);
945 call_enet_mtu_cbfn(enet);
948 case ENET_E_CHLD_STOPPED:
950 * This event is received due to Ethport, Tx and Rx objects
962 bna_enet_sm_pause_init_wait_entry(struct bna_enet *enet)
964 bna_bfi_pause_set(enet);
968 bna_enet_sm_pause_init_wait(struct bna_enet *enet,
969 enum bna_enet_event event)
973 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
974 bfa_fsm_set_state(enet, bna_enet_sm_last_resp_wait);
978 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
979 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
982 case ENET_E_PAUSE_CFG:
983 enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
990 case ENET_E_FWRESP_PAUSE:
991 if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
992 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
993 bna_bfi_pause_set(enet);
995 bfa_fsm_set_state(enet, bna_enet_sm_started);
996 bna_enet_chld_start(enet);
1001 bfa_sm_fault(event);
1006 bna_enet_sm_last_resp_wait_entry(struct bna_enet *enet)
1008 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1012 bna_enet_sm_last_resp_wait(struct bna_enet *enet,
1013 enum bna_enet_event event)
1017 case ENET_E_FWRESP_PAUSE:
1018 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1022 bfa_sm_fault(event);
1027 bna_enet_sm_started_entry(struct bna_enet *enet)
1030 * NOTE: Do not call bna_enet_chld_start() here, since it will be
1031 * inadvertently called during cfg_wait->started transition as well
1033 call_enet_pause_cbfn(enet);
1034 call_enet_mtu_cbfn(enet);
1038 bna_enet_sm_started(struct bna_enet *enet,
1039 enum bna_enet_event event)
1043 bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
1047 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1048 bna_enet_chld_fail(enet);
1051 case ENET_E_PAUSE_CFG:
1052 bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
1053 bna_bfi_pause_set(enet);
1056 case ENET_E_MTU_CFG:
1057 bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
1058 bna_enet_rx_stop(enet);
1062 bfa_sm_fault(event);
1067 bna_enet_sm_cfg_wait_entry(struct bna_enet *enet)
1072 bna_enet_sm_cfg_wait(struct bna_enet *enet,
1073 enum bna_enet_event event)
1077 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1078 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1079 bfa_fsm_set_state(enet, bna_enet_sm_cfg_stop_wait);
1083 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1084 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1085 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1086 bna_enet_chld_fail(enet);
1089 case ENET_E_PAUSE_CFG:
1090 enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
1093 case ENET_E_MTU_CFG:
1094 enet->flags |= BNA_ENET_F_MTU_CHANGED;
1097 case ENET_E_CHLD_STOPPED:
1098 bna_enet_rx_start(enet);
1100 case ENET_E_FWRESP_PAUSE:
1101 if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
1102 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1103 bna_bfi_pause_set(enet);
1104 } else if (enet->flags & BNA_ENET_F_MTU_CHANGED) {
1105 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1106 bna_enet_rx_stop(enet);
1108 bfa_fsm_set_state(enet, bna_enet_sm_started);
1113 bfa_sm_fault(event);
1118 bna_enet_sm_cfg_stop_wait_entry(struct bna_enet *enet)
1120 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1121 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1125 bna_enet_sm_cfg_stop_wait(struct bna_enet *enet,
1126 enum bna_enet_event event)
1130 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1131 bna_enet_chld_fail(enet);
1134 case ENET_E_FWRESP_PAUSE:
1135 case ENET_E_CHLD_STOPPED:
1136 bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
1140 bfa_sm_fault(event);
1145 bna_enet_sm_chld_stop_wait_entry(struct bna_enet *enet)
1147 bna_enet_chld_stop(enet);
1151 bna_enet_sm_chld_stop_wait(struct bna_enet *enet,
1152 enum bna_enet_event event)
1156 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1157 bna_enet_chld_fail(enet);
1160 case ENET_E_CHLD_STOPPED:
1161 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1165 bfa_sm_fault(event);
1170 bna_bfi_pause_set(struct bna_enet *enet)
1172 struct bfi_enet_set_pause_req *pause_req = &enet->pause_req;
1174 bfi_msgq_mhdr_set(pause_req->mh, BFI_MC_ENET,
1175 BFI_ENET_H2I_SET_PAUSE_REQ, 0, 0);
1176 pause_req->mh.num_entries = htons(
1177 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_set_pause_req)));
1178 pause_req->tx_pause = enet->pause_config.tx_pause;
1179 pause_req->rx_pause = enet->pause_config.rx_pause;
1181 bfa_msgq_cmd_set(&enet->msgq_cmd, NULL, NULL,
1182 sizeof(struct bfi_enet_set_pause_req), &pause_req->mh);
1183 bfa_msgq_cmd_post(&enet->bna->msgq, &enet->msgq_cmd);
1187 bna_enet_cb_chld_stopped(void *arg)
1189 struct bna_enet *enet = (struct bna_enet *)arg;
1191 bfa_fsm_send_event(enet, ENET_E_CHLD_STOPPED);
1195 bna_enet_init(struct bna_enet *enet, struct bna *bna)
1200 enet->type = BNA_ENET_T_REGULAR;
1202 enet->stop_cbfn = NULL;
1203 enet->stop_cbarg = NULL;
1205 enet->pause_cbfn = NULL;
1207 enet->mtu_cbfn = NULL;
1209 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1213 bna_enet_uninit(struct bna_enet *enet)
1221 bna_enet_start(struct bna_enet *enet)
1223 enet->flags |= BNA_ENET_F_IOCETH_READY;
1224 if (enet->flags & BNA_ENET_F_ENABLED)
1225 bfa_fsm_send_event(enet, ENET_E_START);
1229 bna_ioceth_cb_enet_stopped(void *arg)
1231 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1233 bfa_fsm_send_event(ioceth, IOCETH_E_ENET_STOPPED);
1237 bna_enet_stop(struct bna_enet *enet)
1239 enet->stop_cbfn = bna_ioceth_cb_enet_stopped;
1240 enet->stop_cbarg = &enet->bna->ioceth;
1242 enet->flags &= ~BNA_ENET_F_IOCETH_READY;
1243 bfa_fsm_send_event(enet, ENET_E_STOP);
1247 bna_enet_fail(struct bna_enet *enet)
1249 enet->flags &= ~BNA_ENET_F_IOCETH_READY;
1250 bfa_fsm_send_event(enet, ENET_E_FAIL);
1254 bna_enet_cb_tx_stopped(struct bna_enet *enet)
1256 bfa_wc_down(&enet->chld_stop_wc);
1260 bna_enet_cb_rx_stopped(struct bna_enet *enet)
1262 bfa_wc_down(&enet->chld_stop_wc);
1266 bna_enet_mtu_get(struct bna_enet *enet)
1272 bna_enet_enable(struct bna_enet *enet)
1274 if (enet->fsm != (bfa_sm_t)bna_enet_sm_stopped)
1277 enet->flags |= BNA_ENET_F_ENABLED;
1279 if (enet->flags & BNA_ENET_F_IOCETH_READY)
1280 bfa_fsm_send_event(enet, ENET_E_START);
1284 bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
1285 void (*cbfn)(void *))
1287 if (type == BNA_SOFT_CLEANUP) {
1288 (*cbfn)(enet->bna->bnad);
1292 enet->stop_cbfn = cbfn;
1293 enet->stop_cbarg = enet->bna->bnad;
1295 enet->flags &= ~BNA_ENET_F_ENABLED;
1297 bfa_fsm_send_event(enet, ENET_E_STOP);
1301 bna_enet_pause_config(struct bna_enet *enet,
1302 struct bna_pause_config *pause_config,
1303 void (*cbfn)(struct bnad *))
1305 enet->pause_config = *pause_config;
1307 enet->pause_cbfn = cbfn;
1309 bfa_fsm_send_event(enet, ENET_E_PAUSE_CFG);
1313 bna_enet_mtu_set(struct bna_enet *enet, int mtu,
1314 void (*cbfn)(struct bnad *))
1318 enet->mtu_cbfn = cbfn;
1320 bfa_fsm_send_event(enet, ENET_E_MTU_CFG);
1324 bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac)
1326 *mac = bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc);
1331 #define enable_mbox_intr(_ioceth) \
1334 bna_intr_status_get((_ioceth)->bna, intr_status); \
1335 bnad_cb_mbox_intr_enable((_ioceth)->bna->bnad); \
1336 bna_mbox_intr_enable((_ioceth)->bna); \
1339 #define disable_mbox_intr(_ioceth) \
1341 bna_mbox_intr_disable((_ioceth)->bna); \
1342 bnad_cb_mbox_intr_disable((_ioceth)->bna->bnad); \
1345 #define call_ioceth_stop_cbfn(_ioceth) \
1347 if ((_ioceth)->stop_cbfn) { \
1348 void (*cbfn)(struct bnad *); \
1349 struct bnad *cbarg; \
1350 cbfn = (_ioceth)->stop_cbfn; \
1351 cbarg = (_ioceth)->stop_cbarg; \
1352 (_ioceth)->stop_cbfn = NULL; \
1353 (_ioceth)->stop_cbarg = NULL; \
1358 #define bna_stats_mod_uninit(_stats_mod) \
1362 #define bna_stats_mod_start(_stats_mod) \
1364 (_stats_mod)->ioc_ready = true; \
1367 #define bna_stats_mod_stop(_stats_mod) \
1369 (_stats_mod)->ioc_ready = false; \
1372 #define bna_stats_mod_fail(_stats_mod) \
1374 (_stats_mod)->ioc_ready = false; \
1375 (_stats_mod)->stats_get_busy = false; \
1376 (_stats_mod)->stats_clr_busy = false; \
1379 static void bna_bfi_attr_get(struct bna_ioceth *ioceth);
1381 bfa_fsm_state_decl(bna_ioceth, stopped, struct bna_ioceth,
1382 enum bna_ioceth_event);
1383 bfa_fsm_state_decl(bna_ioceth, ioc_ready_wait, struct bna_ioceth,
1384 enum bna_ioceth_event);
1385 bfa_fsm_state_decl(bna_ioceth, enet_attr_wait, struct bna_ioceth,
1386 enum bna_ioceth_event);
1387 bfa_fsm_state_decl(bna_ioceth, ready, struct bna_ioceth,
1388 enum bna_ioceth_event);
1389 bfa_fsm_state_decl(bna_ioceth, last_resp_wait, struct bna_ioceth,
1390 enum bna_ioceth_event);
1391 bfa_fsm_state_decl(bna_ioceth, enet_stop_wait, struct bna_ioceth,
1392 enum bna_ioceth_event);
1393 bfa_fsm_state_decl(bna_ioceth, ioc_disable_wait, struct bna_ioceth,
1394 enum bna_ioceth_event);
1395 bfa_fsm_state_decl(bna_ioceth, failed, struct bna_ioceth,
1396 enum bna_ioceth_event);
1399 bna_ioceth_sm_stopped_entry(struct bna_ioceth *ioceth)
1401 call_ioceth_stop_cbfn(ioceth);
1405 bna_ioceth_sm_stopped(struct bna_ioceth *ioceth,
1406 enum bna_ioceth_event event)
1409 case IOCETH_E_ENABLE:
1410 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
1411 bfa_nw_ioc_enable(&ioceth->ioc);
1414 case IOCETH_E_DISABLE:
1415 bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1418 case IOCETH_E_IOC_RESET:
1419 enable_mbox_intr(ioceth);
1422 case IOCETH_E_IOC_FAILED:
1423 disable_mbox_intr(ioceth);
1424 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1428 bfa_sm_fault(event);
1433 bna_ioceth_sm_ioc_ready_wait_entry(struct bna_ioceth *ioceth)
1436 * Do not call bfa_nw_ioc_enable() here. It must be called in the
1437 * previous state due to failed -> ioc_ready_wait transition.
1442 bna_ioceth_sm_ioc_ready_wait(struct bna_ioceth *ioceth,
1443 enum bna_ioceth_event event)
1446 case IOCETH_E_DISABLE:
1447 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1448 bfa_nw_ioc_disable(&ioceth->ioc);
1451 case IOCETH_E_IOC_RESET:
1452 enable_mbox_intr(ioceth);
1455 case IOCETH_E_IOC_FAILED:
1456 disable_mbox_intr(ioceth);
1457 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1460 case IOCETH_E_IOC_READY:
1461 bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_attr_wait);
1465 bfa_sm_fault(event);
1470 bna_ioceth_sm_enet_attr_wait_entry(struct bna_ioceth *ioceth)
1472 bna_bfi_attr_get(ioceth);
1476 bna_ioceth_sm_enet_attr_wait(struct bna_ioceth *ioceth,
1477 enum bna_ioceth_event event)
1480 case IOCETH_E_DISABLE:
1481 bfa_fsm_set_state(ioceth, bna_ioceth_sm_last_resp_wait);
1484 case IOCETH_E_IOC_FAILED:
1485 disable_mbox_intr(ioceth);
1486 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1489 case IOCETH_E_ENET_ATTR_RESP:
1490 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ready);
1494 bfa_sm_fault(event);
1499 bna_ioceth_sm_ready_entry(struct bna_ioceth *ioceth)
1501 bna_enet_start(&ioceth->bna->enet);
1502 bna_stats_mod_start(&ioceth->bna->stats_mod);
1503 bnad_cb_ioceth_ready(ioceth->bna->bnad);
1507 bna_ioceth_sm_ready(struct bna_ioceth *ioceth, enum bna_ioceth_event event)
1510 case IOCETH_E_DISABLE:
1511 bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_stop_wait);
1514 case IOCETH_E_IOC_FAILED:
1515 disable_mbox_intr(ioceth);
1516 bna_enet_fail(&ioceth->bna->enet);
1517 bna_stats_mod_fail(&ioceth->bna->stats_mod);
1518 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1522 bfa_sm_fault(event);
1527 bna_ioceth_sm_last_resp_wait_entry(struct bna_ioceth *ioceth)
1532 bna_ioceth_sm_last_resp_wait(struct bna_ioceth *ioceth,
1533 enum bna_ioceth_event event)
1536 case IOCETH_E_IOC_FAILED:
1537 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1538 disable_mbox_intr(ioceth);
1539 bfa_nw_ioc_disable(&ioceth->ioc);
1542 case IOCETH_E_ENET_ATTR_RESP:
1543 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1544 bfa_nw_ioc_disable(&ioceth->ioc);
1548 bfa_sm_fault(event);
1553 bna_ioceth_sm_enet_stop_wait_entry(struct bna_ioceth *ioceth)
1555 bna_stats_mod_stop(&ioceth->bna->stats_mod);
1556 bna_enet_stop(&ioceth->bna->enet);
1560 bna_ioceth_sm_enet_stop_wait(struct bna_ioceth *ioceth,
1561 enum bna_ioceth_event event)
1564 case IOCETH_E_IOC_FAILED:
1565 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1566 disable_mbox_intr(ioceth);
1567 bna_enet_fail(&ioceth->bna->enet);
1568 bna_stats_mod_fail(&ioceth->bna->stats_mod);
1569 bfa_nw_ioc_disable(&ioceth->ioc);
1572 case IOCETH_E_ENET_STOPPED:
1573 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1574 bfa_nw_ioc_disable(&ioceth->ioc);
1578 bfa_sm_fault(event);
1583 bna_ioceth_sm_ioc_disable_wait_entry(struct bna_ioceth *ioceth)
1588 bna_ioceth_sm_ioc_disable_wait(struct bna_ioceth *ioceth,
1589 enum bna_ioceth_event event)
1592 case IOCETH_E_IOC_DISABLED:
1593 disable_mbox_intr(ioceth);
1594 bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1597 case IOCETH_E_ENET_STOPPED:
1598 /* This event is received due to enet failing */
1603 bfa_sm_fault(event);
1608 bna_ioceth_sm_failed_entry(struct bna_ioceth *ioceth)
1610 bnad_cb_ioceth_failed(ioceth->bna->bnad);
1614 bna_ioceth_sm_failed(struct bna_ioceth *ioceth,
1615 enum bna_ioceth_event event)
1618 case IOCETH_E_DISABLE:
1619 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1620 bfa_nw_ioc_disable(&ioceth->ioc);
1623 case IOCETH_E_IOC_RESET:
1624 enable_mbox_intr(ioceth);
1625 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
1628 case IOCETH_E_IOC_FAILED:
1632 bfa_sm_fault(event);
1637 bna_bfi_attr_get(struct bna_ioceth *ioceth)
1639 struct bfi_enet_attr_req *attr_req = &ioceth->attr_req;
1641 bfi_msgq_mhdr_set(attr_req->mh, BFI_MC_ENET,
1642 BFI_ENET_H2I_GET_ATTR_REQ, 0, 0);
1643 attr_req->mh.num_entries = htons(
1644 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_attr_req)));
1645 bfa_msgq_cmd_set(&ioceth->msgq_cmd, NULL, NULL,
1646 sizeof(struct bfi_enet_attr_req), &attr_req->mh);
1647 bfa_msgq_cmd_post(&ioceth->bna->msgq, &ioceth->msgq_cmd);
1650 /* IOC callback functions */
1653 bna_cb_ioceth_enable(void *arg, enum bfa_status error)
1655 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1658 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
1660 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_READY);
1664 bna_cb_ioceth_disable(void *arg)
1666 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1668 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_DISABLED);
1672 bna_cb_ioceth_hbfail(void *arg)
1674 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1676 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
1680 bna_cb_ioceth_reset(void *arg)
1682 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1684 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_RESET);
1687 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
1688 bna_cb_ioceth_enable,
1689 bna_cb_ioceth_disable,
1690 bna_cb_ioceth_hbfail,
1694 static void bna_attr_init(struct bna_ioceth *ioceth)
1696 ioceth->attr.num_txq = BFI_ENET_DEF_TXQ;
1697 ioceth->attr.num_rxp = BFI_ENET_DEF_RXP;
1698 ioceth->attr.num_ucmac = BFI_ENET_DEF_UCAM;
1699 ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
1700 ioceth->attr.max_rit_size = BFI_ENET_DEF_RITSZ;
1701 ioceth->attr.fw_query_complete = false;
1705 bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
1706 struct bna_res_info *res_info)
1714 * Attach IOC and claim:
1715 * 1. DMA memory for IOC attributes
1716 * 2. Kernel memory for FW trace
1718 bfa_nw_ioc_attach(&ioceth->ioc, ioceth, &bna_ioceth_cbfn);
1719 bfa_nw_ioc_pci_init(&ioceth->ioc, &bna->pcidev, BFI_PCIFN_CLASS_ETH);
1722 &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
1723 kva = res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva;
1724 bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma);
1726 kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
1727 bfa_nw_ioc_debug_memclaim(&ioceth->ioc, kva);
1730 * Attach common modules (Diag, SFP, CEE, Port) and claim respective
1734 &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
1735 kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
1736 bfa_nw_cee_attach(&bna->cee, &ioceth->ioc, bna);
1737 bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
1738 kva += bfa_nw_cee_meminfo();
1739 dma += bfa_nw_cee_meminfo();
1741 bfa_nw_flash_attach(&bna->flash, &ioceth->ioc, bna);
1742 bfa_nw_flash_memclaim(&bna->flash, kva, dma);
1743 kva += bfa_nw_flash_meminfo();
1744 dma += bfa_nw_flash_meminfo();
1746 bfa_msgq_attach(&bna->msgq, &ioceth->ioc);
1747 bfa_msgq_memclaim(&bna->msgq, kva, dma);
1748 bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna);
1749 kva += bfa_msgq_meminfo();
1750 dma += bfa_msgq_meminfo();
1752 ioceth->stop_cbfn = NULL;
1753 ioceth->stop_cbarg = NULL;
1755 bna_attr_init(ioceth);
1757 bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1761 bna_ioceth_uninit(struct bna_ioceth *ioceth)
1763 bfa_nw_ioc_detach(&ioceth->ioc);
1769 bna_ioceth_enable(struct bna_ioceth *ioceth)
1771 if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_ready) {
1772 bnad_cb_ioceth_ready(ioceth->bna->bnad);
1776 if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_stopped)
1777 bfa_fsm_send_event(ioceth, IOCETH_E_ENABLE);
1781 bna_ioceth_disable(struct bna_ioceth *ioceth, enum bna_cleanup_type type)
1783 if (type == BNA_SOFT_CLEANUP) {
1784 bnad_cb_ioceth_disabled(ioceth->bna->bnad);
1788 ioceth->stop_cbfn = bnad_cb_ioceth_disabled;
1789 ioceth->stop_cbarg = ioceth->bna->bnad;
1791 bfa_fsm_send_event(ioceth, IOCETH_E_DISABLE);
1795 bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
1796 struct bna_res_info *res_info)
1800 ucam_mod->ucmac = (struct bna_mac *)
1801 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
1803 INIT_LIST_HEAD(&ucam_mod->free_q);
1804 for (i = 0; i < bna->ioceth.attr.num_ucmac; i++) {
1805 bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
1806 list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
1809 ucam_mod->bna = bna;
1813 bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
1815 struct list_head *qe;
1818 list_for_each(qe, &ucam_mod->free_q)
1821 ucam_mod->bna = NULL;
1825 bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
1826 struct bna_res_info *res_info)
1830 mcam_mod->mcmac = (struct bna_mac *)
1831 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
1833 INIT_LIST_HEAD(&mcam_mod->free_q);
1834 for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
1835 bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
1836 list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
1839 mcam_mod->mchandle = (struct bna_mcam_handle *)
1840 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mdl[0].kva;
1842 INIT_LIST_HEAD(&mcam_mod->free_handle_q);
1843 for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
1844 bfa_q_qe_init(&mcam_mod->mchandle[i].qe);
1845 list_add_tail(&mcam_mod->mchandle[i].qe,
1846 &mcam_mod->free_handle_q);
1849 mcam_mod->bna = bna;
1853 bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
1855 struct list_head *qe;
1859 list_for_each(qe, &mcam_mod->free_q) i++;
1862 list_for_each(qe, &mcam_mod->free_handle_q) i++;
1864 mcam_mod->bna = NULL;
1868 bna_bfi_stats_get(struct bna *bna)
1870 struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
1872 bna->stats_mod.stats_get_busy = true;
1874 bfi_msgq_mhdr_set(stats_req->mh, BFI_MC_ENET,
1875 BFI_ENET_H2I_STATS_GET_REQ, 0, 0);
1876 stats_req->mh.num_entries = htons(
1877 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_stats_req)));
1878 stats_req->stats_mask = htons(BFI_ENET_STATS_ALL);
1879 stats_req->tx_enet_mask = htonl(bna->tx_mod.rid_mask);
1880 stats_req->rx_enet_mask = htonl(bna->rx_mod.rid_mask);
1881 stats_req->host_buffer.a32.addr_hi = bna->stats.hw_stats_dma.msb;
1882 stats_req->host_buffer.a32.addr_lo = bna->stats.hw_stats_dma.lsb;
1884 bfa_msgq_cmd_set(&bna->stats_mod.stats_get_cmd, NULL, NULL,
1885 sizeof(struct bfi_enet_stats_req), &stats_req->mh);
1886 bfa_msgq_cmd_post(&bna->msgq, &bna->stats_mod.stats_get_cmd);
1890 bna_res_req(struct bna_res_info *res_info)
1892 /* DMA memory for COMMON_MODULE */
1893 res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM;
1894 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1895 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
1896 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
1897 (bfa_nw_cee_meminfo() +
1898 bfa_nw_flash_meminfo() +
1899 bfa_msgq_meminfo()), PAGE_SIZE);
1901 /* DMA memory for retrieving IOC attributes */
1902 res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
1903 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1904 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
1905 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
1906 ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE);
1908 /* Virtual memory for retreiving fw_trc */
1909 res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
1910 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
1911 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 1;
1912 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = BNA_DBG_FWTRC_LEN;
1914 /* DMA memory for retreiving stats */
1915 res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
1916 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1917 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1;
1918 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len =
1919 ALIGN(sizeof(struct bfi_enet_stats),
1924 bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
1926 struct bna_attr *attr = &bna->ioceth.attr;
1928 /* Virtual memory for Tx objects - stored by Tx module */
1929 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM;
1930 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type =
1932 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1;
1933 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.len =
1934 attr->num_txq * sizeof(struct bna_tx);
1936 /* Virtual memory for TxQ - stored by Tx module */
1937 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM;
1938 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type =
1940 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1;
1941 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len =
1942 attr->num_txq * sizeof(struct bna_txq);
1944 /* Virtual memory for Rx objects - stored by Rx module */
1945 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM;
1946 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type =
1948 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1;
1949 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.len =
1950 attr->num_rxp * sizeof(struct bna_rx);
1952 /* Virtual memory for RxPath - stored by Rx module */
1953 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM;
1954 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type =
1956 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1;
1957 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len =
1958 attr->num_rxp * sizeof(struct bna_rxp);
1960 /* Virtual memory for RxQ - stored by Rx module */
1961 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM;
1962 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type =
1964 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1;
1965 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len =
1966 (attr->num_rxp * 2) * sizeof(struct bna_rxq);
1968 /* Virtual memory for Unicast MAC address - stored by ucam module */
1969 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM;
1970 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type =
1972 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
1973 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
1974 attr->num_ucmac * sizeof(struct bna_mac);
1976 /* Virtual memory for Multicast MAC address - stored by mcam module */
1977 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
1978 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type =
1980 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
1981 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
1982 attr->num_mcmac * sizeof(struct bna_mac);
1984 /* Virtual memory for Multicast handle - stored by mcam module */
1985 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM;
1986 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mem_type =
1988 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.num = 1;
1989 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.len =
1990 attr->num_mcmac * sizeof(struct bna_mcam_handle);
1994 bna_init(struct bna *bna, struct bnad *bnad,
1995 struct bfa_pcidev *pcidev, struct bna_res_info *res_info)
1998 bna->pcidev = *pcidev;
2000 bna->stats.hw_stats_kva = (struct bfi_enet_stats *)
2001 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva;
2002 bna->stats.hw_stats_dma.msb =
2003 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb;
2004 bna->stats.hw_stats_dma.lsb =
2005 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb;
2007 bna_reg_addr_init(bna, &bna->pcidev);
2009 /* Also initializes diag, cee, sfp, phy_port, msgq */
2010 bna_ioceth_init(&bna->ioceth, bna, res_info);
2012 bna_enet_init(&bna->enet, bna);
2013 bna_ethport_init(&bna->ethport, bna);
2017 bna_mod_init(struct bna *bna, struct bna_res_info *res_info)
2019 bna_tx_mod_init(&bna->tx_mod, bna, res_info);
2021 bna_rx_mod_init(&bna->rx_mod, bna, res_info);
2023 bna_ucam_mod_init(&bna->ucam_mod, bna, res_info);
2025 bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
2027 bna->default_mode_rid = BFI_INVALID_RID;
2028 bna->promisc_rid = BFI_INVALID_RID;
2030 bna->mod_flags |= BNA_MOD_F_INIT_DONE;
2034 bna_uninit(struct bna *bna)
2036 if (bna->mod_flags & BNA_MOD_F_INIT_DONE) {
2037 bna_mcam_mod_uninit(&bna->mcam_mod);
2038 bna_ucam_mod_uninit(&bna->ucam_mod);
2039 bna_rx_mod_uninit(&bna->rx_mod);
2040 bna_tx_mod_uninit(&bna->tx_mod);
2041 bna->mod_flags &= ~BNA_MOD_F_INIT_DONE;
2044 bna_stats_mod_uninit(&bna->stats_mod);
2045 bna_ethport_uninit(&bna->ethport);
2046 bna_enet_uninit(&bna->enet);
2048 bna_ioceth_uninit(&bna->ioceth);
2054 bna_num_txq_set(struct bna *bna, int num_txq)
2056 if (bna->ioceth.attr.fw_query_complete &&
2057 (num_txq <= bna->ioceth.attr.num_txq)) {
2058 bna->ioceth.attr.num_txq = num_txq;
2059 return BNA_CB_SUCCESS;
2066 bna_num_rxp_set(struct bna *bna, int num_rxp)
2068 if (bna->ioceth.attr.fw_query_complete &&
2069 (num_rxp <= bna->ioceth.attr.num_rxp)) {
2070 bna->ioceth.attr.num_rxp = num_rxp;
2071 return BNA_CB_SUCCESS;
2078 bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod)
2080 struct list_head *qe;
2082 if (list_empty(&ucam_mod->free_q))
2085 bfa_q_deq(&ucam_mod->free_q, &qe);
2087 return (struct bna_mac *)qe;
2091 bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, struct bna_mac *mac)
2093 list_add_tail(&mac->qe, &ucam_mod->free_q);
2097 bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod)
2099 struct list_head *qe;
2101 if (list_empty(&mcam_mod->free_q))
2104 bfa_q_deq(&mcam_mod->free_q, &qe);
2106 return (struct bna_mac *)qe;
2110 bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, struct bna_mac *mac)
2112 list_add_tail(&mac->qe, &mcam_mod->free_q);
2115 struct bna_mcam_handle *
2116 bna_mcam_mod_handle_get(struct bna_mcam_mod *mcam_mod)
2118 struct list_head *qe;
2120 if (list_empty(&mcam_mod->free_handle_q))
2123 bfa_q_deq(&mcam_mod->free_handle_q, &qe);
2125 return (struct bna_mcam_handle *)qe;
2129 bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
2130 struct bna_mcam_handle *handle)
2132 list_add_tail(&handle->qe, &mcam_mod->free_handle_q);
2136 bna_hw_stats_get(struct bna *bna)
2138 if (!bna->stats_mod.ioc_ready) {
2139 bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
2142 if (bna->stats_mod.stats_get_busy) {
2143 bnad_cb_stats_get(bna->bnad, BNA_CB_BUSY, &bna->stats);
2147 bna_bfi_stats_get(bna);