2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
20 #include <bfi/bfi_pport.h>
21 #include <cs/bfa_debug.h>
22 #include <aen/bfa_aen.h>
23 #include <cs/bfa_plog.h>
24 #include <aen/bfa_aen_port.h>
26 BFA_TRC_FILE(HAL, FCPORT);
30 * The port is considered disabled if corresponding physical port or IOC are
33 #define BFA_PORT_IS_DISABLED(bfa) \
34 ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
35 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
38 * forward declarations
40 static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
41 static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
42 static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
43 static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
44 static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
45 static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
46 static void bfa_fcport_callback(struct bfa_fcport_s *fcport,
47 enum bfa_pport_linkstate event);
48 static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
49 enum bfa_pport_linkstate event);
50 static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
51 static void bfa_fcport_stats_get_timeout(void *cbarg);
52 static void bfa_fcport_stats_clr_timeout(void *cbarg);
59 * BFA port state machine events
61 enum bfa_fcport_sm_event {
62 BFA_FCPORT_SM_START = 1, /* start port state machine */
63 BFA_FCPORT_SM_STOP = 2, /* stop port state machine */
64 BFA_FCPORT_SM_ENABLE = 3, /* enable port */
65 BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */
66 BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
67 BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */
68 BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */
69 BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */
70 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
74 * BFA port link notification state machine events
77 enum bfa_fcport_ln_sm_event {
78 BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */
79 BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */
80 BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
83 static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
84 enum bfa_fcport_sm_event event);
85 static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
86 enum bfa_fcport_sm_event event);
87 static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
88 enum bfa_fcport_sm_event event);
89 static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
90 enum bfa_fcport_sm_event event);
91 static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
92 enum bfa_fcport_sm_event event);
93 static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
94 enum bfa_fcport_sm_event event);
95 static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
96 enum bfa_fcport_sm_event event);
97 static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
98 enum bfa_fcport_sm_event event);
99 static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
100 enum bfa_fcport_sm_event event);
101 static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
102 enum bfa_fcport_sm_event event);
103 static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
104 enum bfa_fcport_sm_event event);
106 static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
107 enum bfa_fcport_ln_sm_event event);
108 static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
109 enum bfa_fcport_ln_sm_event event);
110 static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
111 enum bfa_fcport_ln_sm_event event);
112 static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
113 enum bfa_fcport_ln_sm_event event);
114 static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
115 enum bfa_fcport_ln_sm_event event);
116 static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
117 enum bfa_fcport_ln_sm_event event);
118 static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
119 enum bfa_fcport_ln_sm_event event);
121 static struct bfa_sm_table_s hal_pport_sm_table[] = {
122 {BFA_SM(bfa_fcport_sm_uninit), BFA_PPORT_ST_UNINIT},
123 {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PPORT_ST_ENABLING_QWAIT},
124 {BFA_SM(bfa_fcport_sm_enabling), BFA_PPORT_ST_ENABLING},
125 {BFA_SM(bfa_fcport_sm_linkdown), BFA_PPORT_ST_LINKDOWN},
126 {BFA_SM(bfa_fcport_sm_linkup), BFA_PPORT_ST_LINKUP},
127 {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PPORT_ST_DISABLING_QWAIT},
128 {BFA_SM(bfa_fcport_sm_disabling), BFA_PPORT_ST_DISABLING},
129 {BFA_SM(bfa_fcport_sm_disabled), BFA_PPORT_ST_DISABLED},
130 {BFA_SM(bfa_fcport_sm_stopped), BFA_PPORT_ST_STOPPED},
131 {BFA_SM(bfa_fcport_sm_iocdown), BFA_PPORT_ST_IOCDOWN},
132 {BFA_SM(bfa_fcport_sm_iocfail), BFA_PPORT_ST_IOCDOWN},
136 bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
138 union bfa_aen_data_u aen_data;
139 struct bfa_log_mod_s *logmod = fcport->bfa->logm;
140 wwn_t pwwn = fcport->pwwn;
141 char pwwn_ptr[BFA_STRING_32];
143 memset(&aen_data, 0, sizeof(aen_data));
144 wwn2str(pwwn_ptr, pwwn);
145 bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, event), pwwn_ptr);
147 aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
148 aen_data.port.pwwn = pwwn;
152 bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
153 enum bfa_fcport_sm_event event)
155 bfa_trc(fcport->bfa, event);
158 case BFA_FCPORT_SM_START:
160 * Start event after IOC is configured and BFA is started.
162 if (bfa_fcport_send_enable(fcport))
163 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
165 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
168 case BFA_FCPORT_SM_ENABLE:
170 * Port is persistently configured to be in enabled state. Do
171 * not change state. Port enabling is done when START event is
176 case BFA_FCPORT_SM_DISABLE:
178 * If a port is persistently configured to be disabled, the
179 * first event will a port disable request.
181 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
184 case BFA_FCPORT_SM_HWFAIL:
185 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
189 bfa_sm_fault(fcport->bfa, event);
194 bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
195 enum bfa_fcport_sm_event event)
197 bfa_trc(fcport->bfa, event);
200 case BFA_FCPORT_SM_QRESUME:
201 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
202 bfa_fcport_send_enable(fcport);
205 case BFA_FCPORT_SM_STOP:
206 bfa_reqq_wcancel(&fcport->reqq_wait);
207 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
210 case BFA_FCPORT_SM_ENABLE:
212 * Already enable is in progress.
216 case BFA_FCPORT_SM_DISABLE:
218 * Just send disable request to firmware when room becomes
219 * available in request queue.
221 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
222 bfa_reqq_wcancel(&fcport->reqq_wait);
223 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
224 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
225 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
228 case BFA_FCPORT_SM_LINKUP:
229 case BFA_FCPORT_SM_LINKDOWN:
231 * Possible to get link events when doing back-to-back
236 case BFA_FCPORT_SM_HWFAIL:
237 bfa_reqq_wcancel(&fcport->reqq_wait);
238 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
242 bfa_sm_fault(fcport->bfa, event);
247 bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
248 enum bfa_fcport_sm_event event)
250 bfa_trc(fcport->bfa, event);
253 case BFA_FCPORT_SM_FWRSP:
254 case BFA_FCPORT_SM_LINKDOWN:
255 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
258 case BFA_FCPORT_SM_LINKUP:
259 bfa_fcport_update_linkinfo(fcport);
260 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
262 bfa_assert(fcport->event_cbfn);
263 bfa_fcport_callback(fcport, BFA_PPORT_LINKUP);
266 case BFA_FCPORT_SM_ENABLE:
268 * Already being enabled.
272 case BFA_FCPORT_SM_DISABLE:
273 if (bfa_fcport_send_disable(fcport))
274 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
276 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
278 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
279 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
280 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
283 case BFA_FCPORT_SM_STOP:
284 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
287 case BFA_FCPORT_SM_HWFAIL:
288 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
292 bfa_sm_fault(fcport->bfa, event);
297 bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
298 enum bfa_fcport_sm_event event)
300 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
301 bfa_trc(fcport->bfa, event);
304 case BFA_FCPORT_SM_LINKUP:
305 bfa_fcport_update_linkinfo(fcport);
306 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
307 bfa_assert(fcport->event_cbfn);
308 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
309 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
311 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
313 bfa_trc(fcport->bfa, pevent->link_state.fcf.fipenabled);
314 bfa_trc(fcport->bfa, pevent->link_state.fcf.fipfailed);
316 if (pevent->link_state.fcf.fipfailed)
317 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
318 BFA_PL_EID_FIP_FCF_DISC, 0,
319 "FIP FCF Discovery Failed");
321 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
322 BFA_PL_EID_FIP_FCF_DISC, 0,
323 "FIP FCF Discovered");
326 bfa_fcport_callback(fcport, BFA_PPORT_LINKUP);
327 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
329 * If QoS is enabled and it is not online,
330 * Send a separate event.
332 if ((fcport->cfg.qos_enabled)
333 && (bfa_os_ntohl(fcport->qos_attr.state) != BFA_QOS_ONLINE))
334 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
338 case BFA_FCPORT_SM_LINKDOWN:
340 * Possible to get link down event.
344 case BFA_FCPORT_SM_ENABLE:
350 case BFA_FCPORT_SM_DISABLE:
351 if (bfa_fcport_send_disable(fcport))
352 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
354 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
356 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
357 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
358 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
361 case BFA_FCPORT_SM_STOP:
362 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
365 case BFA_FCPORT_SM_HWFAIL:
366 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
370 bfa_sm_fault(fcport->bfa, event);
375 bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
376 enum bfa_fcport_sm_event event)
378 bfa_trc(fcport->bfa, event);
381 case BFA_FCPORT_SM_ENABLE:
387 case BFA_FCPORT_SM_DISABLE:
388 if (bfa_fcport_send_disable(fcport))
389 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
391 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
393 bfa_fcport_reset_linkinfo(fcport);
394 bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN);
395 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
396 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
397 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
398 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
401 case BFA_FCPORT_SM_LINKDOWN:
402 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
403 bfa_fcport_reset_linkinfo(fcport);
404 bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN);
405 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
406 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
407 if (BFA_PORT_IS_DISABLED(fcport->bfa))
408 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
410 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
413 case BFA_FCPORT_SM_STOP:
414 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
415 bfa_fcport_reset_linkinfo(fcport);
416 if (BFA_PORT_IS_DISABLED(fcport->bfa))
417 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
419 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
422 case BFA_FCPORT_SM_HWFAIL:
423 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
424 bfa_fcport_reset_linkinfo(fcport);
425 bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN);
426 if (BFA_PORT_IS_DISABLED(fcport->bfa))
427 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
429 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
433 bfa_sm_fault(fcport->bfa, event);
438 bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
439 enum bfa_fcport_sm_event event)
441 bfa_trc(fcport->bfa, event);
444 case BFA_FCPORT_SM_QRESUME:
445 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
446 bfa_fcport_send_disable(fcport);
449 case BFA_FCPORT_SM_STOP:
450 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
451 bfa_reqq_wcancel(&fcport->reqq_wait);
454 case BFA_FCPORT_SM_DISABLE:
456 * Already being disabled.
460 case BFA_FCPORT_SM_LINKUP:
461 case BFA_FCPORT_SM_LINKDOWN:
463 * Possible to get link events when doing back-to-back
468 case BFA_FCPORT_SM_HWFAIL:
469 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
470 bfa_reqq_wcancel(&fcport->reqq_wait);
474 bfa_sm_fault(fcport->bfa, event);
479 bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
480 enum bfa_fcport_sm_event event)
482 bfa_trc(fcport->bfa, event);
485 case BFA_FCPORT_SM_FWRSP:
486 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
489 case BFA_FCPORT_SM_DISABLE:
491 * Already being disabled.
495 case BFA_FCPORT_SM_ENABLE:
496 if (bfa_fcport_send_enable(fcport))
497 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
499 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
501 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
502 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
503 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
506 case BFA_FCPORT_SM_STOP:
507 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
510 case BFA_FCPORT_SM_LINKUP:
511 case BFA_FCPORT_SM_LINKDOWN:
513 * Possible to get link events when doing back-to-back
518 case BFA_FCPORT_SM_HWFAIL:
519 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
523 bfa_sm_fault(fcport->bfa, event);
528 bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
529 enum bfa_fcport_sm_event event)
531 bfa_trc(fcport->bfa, event);
534 case BFA_FCPORT_SM_START:
536 * Ignore start event for a port that is disabled.
540 case BFA_FCPORT_SM_STOP:
541 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
544 case BFA_FCPORT_SM_ENABLE:
545 if (bfa_fcport_send_enable(fcport))
546 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
548 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
550 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
551 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
552 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
555 case BFA_FCPORT_SM_DISABLE:
561 case BFA_FCPORT_SM_HWFAIL:
562 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
566 bfa_sm_fault(fcport->bfa, event);
571 bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
572 enum bfa_fcport_sm_event event)
574 bfa_trc(fcport->bfa, event);
577 case BFA_FCPORT_SM_START:
578 if (bfa_fcport_send_enable(fcport))
579 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
581 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
586 * Ignore all other events.
593 * Port is enabled. IOC is down/failed.
596 bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
597 enum bfa_fcport_sm_event event)
599 bfa_trc(fcport->bfa, event);
602 case BFA_FCPORT_SM_START:
603 if (bfa_fcport_send_enable(fcport))
604 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
606 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
618 * Port is disabled. IOC is down/failed.
621 bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
622 enum bfa_fcport_sm_event event)
624 bfa_trc(fcport->bfa, event);
627 case BFA_FCPORT_SM_START:
628 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
631 case BFA_FCPORT_SM_ENABLE:
632 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
647 bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
648 enum bfa_fcport_ln_sm_event event)
650 bfa_trc(ln->fcport->bfa, event);
653 case BFA_FCPORT_LN_SM_LINKUP:
654 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
655 bfa_fcport_queue_cb(ln, BFA_PPORT_LINKUP);
659 bfa_sm_fault(ln->fcport->bfa, event);
664 * Link state is waiting for down notification
667 bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
668 enum bfa_fcport_ln_sm_event event)
670 bfa_trc(ln->fcport->bfa, event);
673 case BFA_FCPORT_LN_SM_LINKUP:
674 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
677 case BFA_FCPORT_LN_SM_NOTIFICATION:
678 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
682 bfa_sm_fault(ln->fcport->bfa, event);
687 * Link state is waiting for down notification and there is a pending up
690 bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
691 enum bfa_fcport_ln_sm_event event)
693 bfa_trc(ln->fcport->bfa, event);
696 case BFA_FCPORT_LN_SM_LINKDOWN:
697 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
700 case BFA_FCPORT_LN_SM_NOTIFICATION:
701 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
702 bfa_fcport_queue_cb(ln, BFA_PPORT_LINKUP);
706 bfa_sm_fault(ln->fcport->bfa, event);
714 bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
715 enum bfa_fcport_ln_sm_event event)
717 bfa_trc(ln->fcport->bfa, event);
720 case BFA_FCPORT_LN_SM_LINKDOWN:
721 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
722 bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN);
726 bfa_sm_fault(ln->fcport->bfa, event);
731 * Link state is waiting for up notification
734 bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
735 enum bfa_fcport_ln_sm_event event)
737 bfa_trc(ln->fcport->bfa, event);
740 case BFA_FCPORT_LN_SM_LINKDOWN:
741 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
744 case BFA_FCPORT_LN_SM_NOTIFICATION:
745 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
749 bfa_sm_fault(ln->fcport->bfa, event);
754 * Link state is waiting for up notification and there is a pending down
757 bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
758 enum bfa_fcport_ln_sm_event event)
760 bfa_trc(ln->fcport->bfa, event);
763 case BFA_FCPORT_LN_SM_LINKUP:
764 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
767 case BFA_FCPORT_LN_SM_NOTIFICATION:
768 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
769 bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN);
773 bfa_sm_fault(ln->fcport->bfa, event);
778 * Link state is waiting for up notification and there are pending down and up
781 bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
782 enum bfa_fcport_ln_sm_event event)
784 bfa_trc(ln->fcport->bfa, event);
787 case BFA_FCPORT_LN_SM_LINKDOWN:
788 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
791 case BFA_FCPORT_LN_SM_NOTIFICATION:
792 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
793 bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN);
797 bfa_sm_fault(ln->fcport->bfa, event);
806 __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
808 struct bfa_fcport_ln_s *ln = cbarg;
811 ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
813 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
817 bfa_fcport_callback(struct bfa_fcport_s *fcport, enum bfa_pport_linkstate event)
819 if (fcport->bfa->fcs) {
820 fcport->event_cbfn(fcport->event_cbarg, event);
825 case BFA_PPORT_LINKUP:
826 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
828 case BFA_PPORT_LINKDOWN:
829 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
837 bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_pport_linkstate event)
839 ln->ln_event = event;
840 bfa_cb_queue(ln->fcport->bfa, &ln->ln_qe, __bfa_cb_fcport_event, ln);
843 #define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
847 bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
850 *dm_len += FCPORT_STATS_DMA_SZ;
854 bfa_fcport_qresume(void *cbarg)
856 struct bfa_fcport_s *fcport = cbarg;
858 bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
862 bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
867 dm_kva = bfa_meminfo_dma_virt(meminfo);
868 dm_pa = bfa_meminfo_dma_phys(meminfo);
870 fcport->stats_kva = dm_kva;
871 fcport->stats_pa = dm_pa;
872 fcport->stats = (union bfa_fcport_stats_u *)dm_kva;
874 dm_kva += FCPORT_STATS_DMA_SZ;
875 dm_pa += FCPORT_STATS_DMA_SZ;
877 bfa_meminfo_dma_virt(meminfo) = dm_kva;
878 bfa_meminfo_dma_phys(meminfo) = dm_pa;
882 * Memory initialization.
885 bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
886 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
888 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
889 struct bfa_pport_cfg_s *port_cfg = &fcport->cfg;
890 struct bfa_fcport_ln_s *ln = &fcport->ln;
892 bfa_os_memset(fcport, 0, sizeof(struct bfa_fcport_s));
896 bfa_fcport_mem_claim(fcport, meminfo);
898 bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
899 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
902 * initialize and set default configuration
904 port_cfg->topology = BFA_PPORT_TOPOLOGY_P2P;
905 port_cfg->speed = BFA_PPORT_SPEED_AUTO;
906 port_cfg->trunked = BFA_FALSE;
907 port_cfg->maxfrsize = 0;
909 port_cfg->trl_def_speed = BFA_PPORT_SPEED_1GBPS;
911 bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
915 bfa_fcport_detach(struct bfa_s *bfa)
920 * Called when IOC is ready.
923 bfa_fcport_start(struct bfa_s *bfa)
925 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
929 * Called before IOC is stopped.
932 bfa_fcport_stop(struct bfa_s *bfa)
934 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
938 * Called when IOC failure is detected.
941 bfa_fcport_iocdisable(struct bfa_s *bfa)
943 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_HWFAIL);
947 bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
949 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
951 fcport->speed = pevent->link_state.speed;
952 fcport->topology = pevent->link_state.topology;
954 if (fcport->topology == BFA_PPORT_TOPOLOGY_LOOP)
956 pevent->link_state.tl.loop_info.myalpa;
961 bfa_os_assign(fcport->qos_attr, pevent->link_state.qos_attr);
962 bfa_os_assign(fcport->qos_vc_attr, pevent->link_state.qos_vc_attr);
964 bfa_trc(fcport->bfa, fcport->speed);
965 bfa_trc(fcport->bfa, fcport->topology);
969 bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
971 fcport->speed = BFA_PPORT_SPEED_UNKNOWN;
972 fcport->topology = BFA_PPORT_TOPOLOGY_NONE;
976 * Send port enable message to firmware.
979 bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
981 struct bfi_fcport_enable_req_s *m;
984 * Increment message tag before queue check, so that responses to old
985 * requests are discarded.
990 * check for room in queue to send request now
992 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
994 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
999 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
1000 bfa_lpuid(fcport->bfa));
1001 m->nwwn = fcport->nwwn;
1002 m->pwwn = fcport->pwwn;
1003 m->port_cfg = fcport->cfg;
1004 m->msgtag = fcport->msgtag;
1005 m->port_cfg.maxfrsize = bfa_os_htons(fcport->cfg.maxfrsize);
1006 bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
1007 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
1008 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
1011 * queue I/O message to firmware
1013 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
1018 * Send port disable message to firmware.
1020 static bfa_boolean_t
1021 bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
1023 struct bfi_fcport_req_s *m;
1026 * Increment message tag before queue check, so that responses to old
1027 * requests are discarded.
1032 * check for room in queue to send request now
1034 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
1036 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
1037 &fcport->reqq_wait);
1041 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
1042 bfa_lpuid(fcport->bfa));
1043 m->msgtag = fcport->msgtag;
1046 * queue I/O message to firmware
1048 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
1054 bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
1056 fcport->pwwn = bfa_ioc_get_pwwn(&fcport->bfa->ioc);
1057 fcport->nwwn = bfa_ioc_get_nwwn(&fcport->bfa->ioc);
1059 bfa_trc(fcport->bfa, fcport->pwwn);
1060 bfa_trc(fcport->bfa, fcport->nwwn);
1064 bfa_fcport_send_txcredit(void *port_cbarg)
1067 struct bfa_fcport_s *fcport = port_cbarg;
1068 struct bfi_fcport_set_svc_params_req_s *m;
1071 * check for room in queue to send request now
1073 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
1075 bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
1079 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
1080 bfa_lpuid(fcport->bfa));
1081 m->tx_bbcredit = bfa_os_htons((u16) fcport->cfg.tx_bbcredit);
1084 * queue I/O message to firmware
1086 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
1090 bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
1091 struct bfa_qos_stats_s *s)
1093 u32 *dip = (u32 *) d;
1094 u32 *sip = (u32 *) s;
1097 /* Now swap the 32 bit fields */
1098 for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
1099 dip[i] = bfa_os_ntohl(sip[i]);
1103 bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
1104 struct bfa_fcoe_stats_s *s)
1106 u32 *dip = (u32 *) d;
1107 u32 *sip = (u32 *) s;
1110 for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
1113 dip[i] = bfa_os_ntohl(sip[i]);
1114 dip[i + 1] = bfa_os_ntohl(sip[i + 1]);
1116 dip[i] = bfa_os_ntohl(sip[i + 1]);
1117 dip[i + 1] = bfa_os_ntohl(sip[i]);
1123 __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
1125 struct bfa_fcport_s *fcport = cbarg;
1128 if (fcport->stats_status == BFA_STATUS_OK) {
1130 /* Swap FC QoS or FCoE stats */
1131 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc))
1132 bfa_fcport_qos_stats_swap(
1133 &fcport->stats_ret->fcqos,
1134 &fcport->stats->fcqos);
1136 bfa_fcport_fcoe_stats_swap(
1137 &fcport->stats_ret->fcoe,
1138 &fcport->stats->fcoe);
1140 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
1142 fcport->stats_busy = BFA_FALSE;
1143 fcport->stats_status = BFA_STATUS_OK;
1148 bfa_fcport_stats_get_timeout(void *cbarg)
1150 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
1152 bfa_trc(fcport->bfa, fcport->stats_qfull);
1154 if (fcport->stats_qfull) {
1155 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
1156 fcport->stats_qfull = BFA_FALSE;
1159 fcport->stats_status = BFA_STATUS_ETIMER;
1160 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get,
1165 bfa_fcport_send_stats_get(void *cbarg)
1167 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
1168 struct bfi_fcport_req_s *msg;
1170 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
1173 fcport->stats_qfull = BFA_TRUE;
1174 bfa_reqq_winit(&fcport->stats_reqq_wait,
1175 bfa_fcport_send_stats_get, fcport);
1176 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
1177 &fcport->stats_reqq_wait);
1180 fcport->stats_qfull = BFA_FALSE;
1182 bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
1183 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
1184 bfa_lpuid(fcport->bfa));
1185 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
1189 __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
1191 struct bfa_fcport_s *fcport = cbarg;
1194 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
1196 fcport->stats_busy = BFA_FALSE;
1197 fcport->stats_status = BFA_STATUS_OK;
1202 bfa_fcport_stats_clr_timeout(void *cbarg)
1204 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
1206 bfa_trc(fcport->bfa, fcport->stats_qfull);
1208 if (fcport->stats_qfull) {
1209 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
1210 fcport->stats_qfull = BFA_FALSE;
1213 fcport->stats_status = BFA_STATUS_ETIMER;
1214 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
1215 __bfa_cb_fcport_stats_clr, fcport);
1219 bfa_fcport_send_stats_clear(void *cbarg)
1221 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
1222 struct bfi_fcport_req_s *msg;
1224 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
1227 fcport->stats_qfull = BFA_TRUE;
1228 bfa_reqq_winit(&fcport->stats_reqq_wait,
1229 bfa_fcport_send_stats_clear, fcport);
1230 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
1231 &fcport->stats_reqq_wait);
1234 fcport->stats_qfull = BFA_FALSE;
1236 bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
1237 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
1238 bfa_lpuid(fcport->bfa));
1239 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
1247 * Called to initialize port attributes
1250 bfa_fcport_init(struct bfa_s *bfa)
1252 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1255 * Initialize port attributes from IOC hardware data.
1257 bfa_fcport_set_wwns(fcport);
1258 if (fcport->cfg.maxfrsize == 0)
1259 fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
1260 fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
1261 fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
1263 bfa_assert(fcport->cfg.maxfrsize);
1264 bfa_assert(fcport->cfg.rx_bbcredit);
1265 bfa_assert(fcport->speed_sup);
1270 * Firmware message handler.
1273 bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1275 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1276 union bfi_fcport_i2h_msg_u i2hmsg;
1279 fcport->event_arg.i2hmsg = i2hmsg;
1281 switch (msg->mhdr.msg_id) {
1282 case BFI_FCPORT_I2H_ENABLE_RSP:
1283 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
1284 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
1287 case BFI_FCPORT_I2H_DISABLE_RSP:
1288 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
1289 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
1292 case BFI_FCPORT_I2H_EVENT:
1293 switch (i2hmsg.event->link_state.linkstate) {
1294 case BFA_PPORT_LINKUP:
1295 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
1297 case BFA_PPORT_LINKDOWN:
1298 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN);
1300 case BFA_PPORT_TRUNK_LINKDOWN:
1301 /** todo: event notification */
1306 case BFI_FCPORT_I2H_STATS_GET_RSP:
1308 * check for timer pop before processing the rsp
1310 if (fcport->stats_busy == BFA_FALSE ||
1311 fcport->stats_status == BFA_STATUS_ETIMER)
1314 bfa_timer_stop(&fcport->timer);
1315 fcport->stats_status = i2hmsg.pstatsget_rsp->status;
1316 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
1317 __bfa_cb_fcport_stats_get, fcport);
1320 case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
1322 * check for timer pop before processing the rsp
1324 if (fcport->stats_busy == BFA_FALSE ||
1325 fcport->stats_status == BFA_STATUS_ETIMER)
1328 bfa_timer_stop(&fcport->timer);
1329 fcport->stats_status = BFA_STATUS_OK;
1330 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
1331 __bfa_cb_fcport_stats_clr, fcport);
1345 * Registered callback for port events.
1348 bfa_fcport_event_register(struct bfa_s *bfa,
1349 void (*cbfn) (void *cbarg, bfa_pport_event_t event),
1352 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1354 fcport->event_cbfn = cbfn;
1355 fcport->event_cbarg = cbarg;
1359 bfa_fcport_enable(struct bfa_s *bfa)
1361 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1363 if (fcport->diag_busy)
1364 return BFA_STATUS_DIAG_BUSY;
1365 else if (bfa_sm_cmp_state
1366 (BFA_FCPORT_MOD(bfa), bfa_fcport_sm_disabling_qwait))
1367 return BFA_STATUS_DEVBUSY;
1369 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
1370 return BFA_STATUS_OK;
1374 bfa_fcport_disable(struct bfa_s *bfa)
1376 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
1377 return BFA_STATUS_OK;
1381 * Configure port speed.
1384 bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
1386 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1388 bfa_trc(bfa, speed);
1390 if ((speed != BFA_PPORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
1391 bfa_trc(bfa, fcport->speed_sup);
1392 return BFA_STATUS_UNSUPP_SPEED;
1395 fcport->cfg.speed = speed;
1397 return BFA_STATUS_OK;
1401 * Get current speed.
1403 enum bfa_pport_speed
1404 bfa_fcport_get_speed(struct bfa_s *bfa)
1406 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1408 return fcport->speed;
1412 * Configure port topology.
1415 bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topology)
1417 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1419 bfa_trc(bfa, topology);
1420 bfa_trc(bfa, fcport->cfg.topology);
1423 case BFA_PPORT_TOPOLOGY_P2P:
1424 case BFA_PPORT_TOPOLOGY_LOOP:
1425 case BFA_PPORT_TOPOLOGY_AUTO:
1429 return BFA_STATUS_EINVAL;
1432 fcport->cfg.topology = topology;
1433 return BFA_STATUS_OK;
1437 * Get current topology.
1439 enum bfa_pport_topology
1440 bfa_fcport_get_topology(struct bfa_s *bfa)
1442 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1444 return fcport->topology;
1448 bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
1450 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1453 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
1454 bfa_trc(bfa, fcport->cfg.hardalpa);
1456 fcport->cfg.cfg_hardalpa = BFA_TRUE;
1457 fcport->cfg.hardalpa = alpa;
1459 return BFA_STATUS_OK;
1463 bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
1465 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1467 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
1468 bfa_trc(bfa, fcport->cfg.hardalpa);
1470 fcport->cfg.cfg_hardalpa = BFA_FALSE;
1471 return BFA_STATUS_OK;
1475 bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
1477 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1479 *alpa = fcport->cfg.hardalpa;
1480 return fcport->cfg.cfg_hardalpa;
1484 bfa_fcport_get_myalpa(struct bfa_s *bfa)
1486 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1488 return fcport->myalpa;
1492 bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
1494 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1496 bfa_trc(bfa, maxfrsize);
1497 bfa_trc(bfa, fcport->cfg.maxfrsize);
1502 if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
1503 return BFA_STATUS_INVLD_DFSZ;
1506 * power of 2, if not the max frame size of 2112
1508 if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
1509 return BFA_STATUS_INVLD_DFSZ;
1511 fcport->cfg.maxfrsize = maxfrsize;
1512 return BFA_STATUS_OK;
1516 bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
1518 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1520 return fcport->cfg.maxfrsize;
1524 bfa_fcport_mypid(struct bfa_s *bfa)
1526 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1528 return fcport->mypid;
1532 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
1534 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1536 return fcport->cfg.rx_bbcredit;
1540 bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
1542 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1544 fcport->cfg.tx_bbcredit = (u8) tx_bbcredit;
1545 bfa_fcport_send_txcredit(fcport);
1549 * Get port attributes.
1553 bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
1555 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1557 return fcport->nwwn;
1559 return fcport->pwwn;
1563 bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr)
1565 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1567 bfa_os_memset(attr, 0, sizeof(struct bfa_pport_attr_s));
1569 attr->nwwn = fcport->nwwn;
1570 attr->pwwn = fcport->pwwn;
1572 attr->factorypwwn = bfa_ioc_get_mfg_pwwn(&bfa->ioc);
1573 attr->factorynwwn = bfa_ioc_get_mfg_nwwn(&bfa->ioc);
1575 bfa_os_memcpy(&attr->pport_cfg, &fcport->cfg,
1576 sizeof(struct bfa_pport_cfg_s));
1580 attr->pport_cfg.speed = fcport->cfg.speed;
1581 attr->speed_supported = fcport->speed_sup;
1582 attr->speed = fcport->speed;
1583 attr->cos_supported = FC_CLASS_3;
1586 * topology attributes
1588 attr->pport_cfg.topology = fcport->cfg.topology;
1589 attr->topology = fcport->topology;
1594 attr->beacon = fcport->beacon;
1595 attr->link_e2e_beacon = fcport->link_e2e_beacon;
1596 attr->plog_enabled = bfa_plog_get_setting(fcport->bfa->plog);
1598 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
1599 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
1600 attr->port_state = bfa_sm_to_state(hal_pport_sm_table, fcport->sm);
1601 if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
1602 attr->port_state = BFA_PPORT_ST_IOCDIS;
1603 else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
1604 attr->port_state = BFA_PPORT_ST_FWMISMATCH;
1607 #define BFA_FCPORT_STATS_TOV 1000
1610 * Fetch port attributes (FCQoS or FCoE).
1613 bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
1614 bfa_cb_pport_t cbfn, void *cbarg)
1616 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1618 if (fcport->stats_busy) {
1619 bfa_trc(bfa, fcport->stats_busy);
1620 return BFA_STATUS_DEVBUSY;
1623 fcport->stats_busy = BFA_TRUE;
1624 fcport->stats_ret = stats;
1625 fcport->stats_cbfn = cbfn;
1626 fcport->stats_cbarg = cbarg;
1628 bfa_fcport_send_stats_get(fcport);
1630 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
1631 fcport, BFA_FCPORT_STATS_TOV);
1632 return BFA_STATUS_OK;
1636 * Reset port statistics (FCQoS or FCoE).
1639 bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
1641 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1643 if (fcport->stats_busy) {
1644 bfa_trc(bfa, fcport->stats_busy);
1645 return BFA_STATUS_DEVBUSY;
1648 fcport->stats_busy = BFA_TRUE;
1649 fcport->stats_cbfn = cbfn;
1650 fcport->stats_cbarg = cbarg;
1652 bfa_fcport_send_stats_clear(fcport);
1654 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
1655 fcport, BFA_FCPORT_STATS_TOV);
1656 return BFA_STATUS_OK;
1660 * Fetch FCQoS port statistics
1663 bfa_fcport_get_qos_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
1664 bfa_cb_pport_t cbfn, void *cbarg)
1666 /* Meaningful only for FC mode */
1667 bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc));
1669 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
1673 * Reset FCoE port statistics
1676 bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
1678 /* Meaningful only for FC mode */
1679 bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc));
1681 return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
1685 * Fetch FCQoS port statistics
1688 bfa_fcport_get_fcoe_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
1689 bfa_cb_pport_t cbfn, void *cbarg)
1691 /* Meaningful only for FCoE mode */
1692 bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc));
1694 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
1698 * Reset FCoE port statistics
1701 bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
1703 /* Meaningful only for FCoE mode */
1704 bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc));
1706 return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
1710 bfa_fcport_trunk_enable(struct bfa_s *bfa, u8 bitmap)
1712 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1714 bfa_trc(bfa, bitmap);
1715 bfa_trc(bfa, fcport->cfg.trunked);
1716 bfa_trc(bfa, fcport->cfg.trunk_ports);
1718 if (!bitmap || (bitmap & (bitmap - 1)))
1719 return BFA_STATUS_EINVAL;
1721 fcport->cfg.trunked = BFA_TRUE;
1722 fcport->cfg.trunk_ports = bitmap;
1724 return BFA_STATUS_OK;
1728 bfa_fcport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr)
1730 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1732 qos_attr->state = bfa_os_ntohl(fcport->qos_attr.state);
1733 qos_attr->total_bb_cr = bfa_os_ntohl(fcport->qos_attr.total_bb_cr);
1737 bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
1738 struct bfa_qos_vc_attr_s *qos_vc_attr)
1740 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1741 struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
1744 qos_vc_attr->total_vc_count = bfa_os_ntohs(bfa_vc_attr->total_vc_count);
1745 qos_vc_attr->shared_credit = bfa_os_ntohs(bfa_vc_attr->shared_credit);
1746 qos_vc_attr->elp_opmode_flags =
1747 bfa_os_ntohl(bfa_vc_attr->elp_opmode_flags);
1750 * Individual VC info
1752 while (i < qos_vc_attr->total_vc_count) {
1753 qos_vc_attr->vc_info[i].vc_credit =
1754 bfa_vc_attr->vc_info[i].vc_credit;
1755 qos_vc_attr->vc_info[i].borrow_credit =
1756 bfa_vc_attr->vc_info[i].borrow_credit;
1757 qos_vc_attr->vc_info[i].priority =
1758 bfa_vc_attr->vc_info[i].priority;
1764 * Fetch port attributes.
1767 bfa_fcport_trunk_disable(struct bfa_s *bfa)
1769 return BFA_STATUS_OK;
1773 bfa_fcport_trunk_query(struct bfa_s *bfa, u32 *bitmap)
1775 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1777 *bitmap = fcport->cfg.trunk_ports;
1778 return fcport->cfg.trunked;
1782 bfa_fcport_is_disabled(struct bfa_s *bfa)
1784 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1786 return bfa_sm_to_state(hal_pport_sm_table, fcport->sm) ==
1787 BFA_PPORT_ST_DISABLED;
1792 bfa_fcport_is_ratelim(struct bfa_s *bfa)
1794 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1796 return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
1801 bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off)
1803 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1804 enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
1806 bfa_trc(bfa, on_off);
1807 bfa_trc(bfa, fcport->cfg.qos_enabled);
1809 bfa_trc(bfa, ioc_type);
1811 if (ioc_type == BFA_IOC_TYPE_FC)
1812 fcport->cfg.qos_enabled = on_off;
1816 bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off)
1818 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1820 bfa_trc(bfa, on_off);
1821 bfa_trc(bfa, fcport->cfg.ratelimit);
1823 fcport->cfg.ratelimit = on_off;
1824 if (fcport->cfg.trl_def_speed == BFA_PPORT_SPEED_UNKNOWN)
1825 fcport->cfg.trl_def_speed = BFA_PPORT_SPEED_1GBPS;
1829 * Configure default minimum ratelim speed
1832 bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
1834 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1836 bfa_trc(bfa, speed);
1839 * Auto and speeds greater than the supported speed, are invalid
1841 if ((speed == BFA_PPORT_SPEED_AUTO) || (speed > fcport->speed_sup)) {
1842 bfa_trc(bfa, fcport->speed_sup);
1843 return BFA_STATUS_UNSUPP_SPEED;
1846 fcport->cfg.trl_def_speed = speed;
1848 return BFA_STATUS_OK;
1852 * Get default minimum ratelim speed
1854 enum bfa_pport_speed
1855 bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
1857 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1859 bfa_trc(bfa, fcport->cfg.trl_def_speed);
1860 return fcport->cfg.trl_def_speed;
1865 bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status)
1867 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1869 bfa_trc(bfa, status);
1870 bfa_trc(bfa, fcport->diag_busy);
1872 fcport->diag_busy = status;
1876 bfa_fcport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon,
1877 bfa_boolean_t link_e2e_beacon)
1879 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1881 bfa_trc(bfa, beacon);
1882 bfa_trc(bfa, link_e2e_beacon);
1883 bfa_trc(bfa, fcport->beacon);
1884 bfa_trc(bfa, fcport->link_e2e_beacon);
1886 fcport->beacon = beacon;
1887 fcport->link_e2e_beacon = link_e2e_beacon;
1891 bfa_fcport_is_linkup(struct bfa_s *bfa)
1893 return bfa_sm_cmp_state(BFA_FCPORT_MOD(bfa), bfa_fcport_sm_linkup);