1 /* bnx2x_sriov.c: Broadcom Everest network driver.
3 * Copyright 2009-2013 Broadcom Corporation
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Shmulik Ravid <shmulikr@broadcom.com>
17 * Ariel Elior <ariele@broadcom.com>
21 #include "bnx2x_init.h"
22 #include "bnx2x_cmn.h"
24 #include <linux/crc32.h>
25 #include <linux/if_vlan.h>
27 /* General service functions */
28 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
31 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
33 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
35 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
37 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
41 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
44 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
46 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
48 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
50 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
54 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
59 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
65 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
67 u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
68 return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
71 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
72 u8 igu_sb_id, u8 segment, u16 index, u8 op,
75 /* acking a VF sb through the PF - use the GRC */
77 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
78 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
79 u32 func_encode = vf->abs_vfid;
80 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id;
81 struct igu_regular cmd_data = {0};
83 cmd_data.sb_id_and_flags =
84 ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
85 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
86 (update << IGU_REGULAR_BUPDATE_SHIFT) |
87 (op << IGU_REGULAR_ENABLE_INT_SHIFT));
89 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
90 func_encode << IGU_CTRL_REG_FID_SHIFT |
91 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
93 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
94 cmd_data.sb_id_and_flags, igu_addr_data);
95 REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
99 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
101 REG_WR(bp, igu_addr_ctl, ctl);
106 static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp,
107 struct bnx2x_virtf *vf,
110 if (!bnx2x_leading_vfq(vf, sp_initialized)) {
112 BNX2X_ERR("Slowpath objects not yet initialized!\n");
114 DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n");
120 /* VFOP operations states */
121 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
122 struct bnx2x_queue_init_params *init_params,
123 struct bnx2x_queue_setup_params *setup_params,
124 u16 q_idx, u16 sb_idx)
127 "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d",
131 init_params->tx.sb_cq_index,
132 init_params->tx.hc_rate,
134 setup_params->txq_params.traffic_type);
137 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
138 struct bnx2x_queue_init_params *init_params,
139 struct bnx2x_queue_setup_params *setup_params,
140 u16 q_idx, u16 sb_idx)
142 struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params;
144 DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n"
145 "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n",
149 init_params->rx.sb_cq_index,
150 init_params->rx.hc_rate,
151 setup_params->gen_params.mtu,
153 rxq_params->sge_buf_sz,
154 rxq_params->max_sges_pkt,
155 rxq_params->tpa_agg_sz,
157 rxq_params->drop_flags,
158 rxq_params->cache_line_log);
161 void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
162 struct bnx2x_virtf *vf,
163 struct bnx2x_vf_queue *q,
164 struct bnx2x_vf_queue_construct_params *p,
165 unsigned long q_type)
167 struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
168 struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup;
172 /* Enable host coalescing in the transition to INIT state */
173 if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags))
174 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags);
176 if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags))
177 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags);
180 init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
181 init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
184 init_p->cxts[0] = q->cxt;
188 /* Setup-op general parameters */
189 setup_p->gen_params.spcl_id = vf->sp_cl_id;
190 setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
192 /* Setup-op pause params:
193 * Nothing to do, the pause thresholds are set by default to 0 which
194 * effectively turns off the feature for this queue. We don't want
195 * one queue (VF) to interfering with another queue (another VF)
197 if (vf->cfg_flags & VF_CFG_FW_FC)
198 BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n",
201 * collect statistics, zero statistics, local-switching, security,
202 * OV for Flex10, RSS and MCAST for leading
204 if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags))
205 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags);
207 /* for VFs, enable tx switching, bd coherency, and mac address
210 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags);
211 __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
212 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
214 /* Setup-op rx parameters */
215 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
216 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
218 rxq_p->cl_qzone_id = vfq_qzone_id(vf, q);
219 rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx);
220 rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid);
222 if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags))
223 rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES;
226 /* Setup-op tx parameters */
227 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) {
228 setup_p->txq_params.tss_leading_cl_id = vf->leading_rss;
229 setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
233 static int bnx2x_vf_queue_create(struct bnx2x *bp,
234 struct bnx2x_virtf *vf, int qid,
235 struct bnx2x_vf_queue_construct_params *qctor)
237 struct bnx2x_queue_state_params *q_params;
240 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
242 /* Prepare ramrod information */
243 q_params = &qctor->qstate;
244 q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
245 set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags);
247 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
248 BNX2X_Q_LOGICAL_STATE_ACTIVE) {
249 DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n");
253 /* Run Queue 'construction' ramrods */
254 q_params->cmd = BNX2X_Q_CMD_INIT;
255 rc = bnx2x_queue_state_change(bp, q_params);
259 memcpy(&q_params->params.setup, &qctor->prep_qsetup,
260 sizeof(struct bnx2x_queue_setup_params));
261 q_params->cmd = BNX2X_Q_CMD_SETUP;
262 rc = bnx2x_queue_state_change(bp, q_params);
266 /* enable interrupts */
267 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)),
268 USTORM_ID, 0, IGU_INT_ENABLE, 0);
273 static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf,
276 enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT,
277 BNX2X_Q_CMD_TERMINATE,
278 BNX2X_Q_CMD_CFC_DEL};
279 struct bnx2x_queue_state_params q_params;
282 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
284 /* Prepare ramrod information */
285 memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params));
286 q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
287 set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
289 if (bnx2x_get_q_logical_state(bp, q_params.q_obj) ==
290 BNX2X_Q_LOGICAL_STATE_STOPPED) {
291 DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n");
295 /* Run Queue 'destruction' ramrods */
296 for (i = 0; i < ARRAY_SIZE(cmds); i++) {
297 q_params.cmd = cmds[i];
298 rc = bnx2x_queue_state_change(bp, &q_params);
300 BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]);
306 if (bnx2x_vfq(vf, qid, cxt)) {
307 bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0;
308 bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0;
315 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
317 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
319 /* the first igu entry belonging to VFs of this PF */
320 if (!BP_VFDB(bp)->first_vf_igu_entry)
321 BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id;
323 /* the first igu entry belonging to this VF */
324 if (!vf_sb_count(vf))
325 vf->igu_base_id = igu_sb_id;
330 BP_VFDB(bp)->vf_sbs_pool++;
333 static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp,
334 struct bnx2x_vlan_mac_obj *obj,
337 struct list_head *pos;
341 read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
343 DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
345 list_for_each(pos, &obj->head)
349 bnx2x_vlan_mac_h_read_unlock(bp, obj);
351 atomic_set(counter, cnt);
354 static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
355 int qid, bool drv_only, bool mac)
357 struct bnx2x_vlan_mac_ramrod_params ramrod;
360 DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid,
361 mac ? "MACs" : "VLANs");
363 /* Prepare ramrod params */
364 memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
366 set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
367 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
369 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
370 &ramrod.user_req.vlan_mac_flags);
371 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
373 ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL;
375 set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
377 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
379 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
382 rc = ramrod.vlan_mac_obj->delete_all(bp,
384 &ramrod.user_req.vlan_mac_flags,
385 &ramrod.ramrod_flags);
387 BNX2X_ERR("Failed to delete all %s\n",
388 mac ? "MACs" : "VLANs");
392 /* Clear the vlan counters */
394 atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0);
399 static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
400 struct bnx2x_virtf *vf, int qid,
401 struct bnx2x_vf_mac_vlan_filter *filter,
404 struct bnx2x_vlan_mac_ramrod_params ramrod;
407 DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n",
408 vf->abs_vfid, filter->add ? "Adding" : "Deleting",
409 filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN");
411 /* Prepare ramrod params */
412 memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
413 if (filter->type == BNX2X_VF_FILTER_VLAN) {
414 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
415 &ramrod.user_req.vlan_mac_flags);
416 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
417 ramrod.user_req.u.vlan.vlan = filter->vid;
419 set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
420 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
421 memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
423 ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD :
426 /* Verify there are available vlan credits */
427 if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN &&
428 (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >=
429 vf_vlan_rules_cnt(vf))) {
430 BNX2X_ERR("No credits for vlan [%d >= %d]\n",
431 atomic_read(&bnx2x_vfq(vf, qid, vlan_count)),
432 vf_vlan_rules_cnt(vf));
436 set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
438 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
440 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
442 /* Add/Remove the filter */
443 rc = bnx2x_config_vlan_mac(bp, &ramrod);
444 if (rc && rc != -EEXIST) {
445 BNX2X_ERR("Failed to %s %s\n",
446 filter->add ? "add" : "delete",
447 filter->type == BNX2X_VF_FILTER_MAC ? "MAC" :
452 /* Update the vlan counters */
453 if (filter->type == BNX2X_VF_FILTER_VLAN)
454 bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj,
455 &bnx2x_vfq(vf, qid, vlan_count));
460 int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
461 struct bnx2x_vf_mac_vlan_filters *filters,
462 int qid, bool drv_only)
466 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
468 if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
471 /* Prepare ramrod params */
472 for (i = 0; i < filters->count; i++) {
473 rc = bnx2x_vf_mac_vlan_config(bp, vf, qid,
474 &filters->filters[i], drv_only);
479 /* Rollback if needed */
480 if (i != filters->count) {
481 BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
482 i, filters->count + 1);
484 filters->filters[i].add = !filters->filters[i].add;
485 bnx2x_vf_mac_vlan_config(bp, vf, qid,
486 &filters->filters[i],
491 /* It's our responsibility to free the filters */
497 int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
498 struct bnx2x_vf_queue_construct_params *qctor)
502 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
504 rc = bnx2x_vf_queue_create(bp, vf, qid, qctor);
508 /* Configure vlan0 for leading queue */
510 struct bnx2x_vf_mac_vlan_filter filter;
512 memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter));
513 filter.type = BNX2X_VF_FILTER_VLAN;
516 rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false);
521 /* Schedule the configuration of any pending vlan filters */
522 vf->cfg_flags |= VF_CFG_VLAN;
523 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
527 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
531 static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf,
536 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
538 /* If needed, clean the filtering data base */
539 if ((qid == LEADING_IDX) &&
540 bnx2x_validate_vf_sp_objs(bp, vf, false)) {
541 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false);
544 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true);
549 /* Terminate queue */
550 if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) {
551 struct bnx2x_queue_state_params qstate;
553 memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
554 qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
555 qstate.q_obj->state = BNX2X_Q_STATE_STOPPED;
556 qstate.cmd = BNX2X_Q_CMD_TERMINATE;
557 set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
558 rc = bnx2x_queue_state_change(bp, &qstate);
565 BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
569 int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
570 bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only)
572 struct bnx2x_mcast_list_elem *mc = NULL;
573 struct bnx2x_mcast_ramrod_params mcast;
576 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
578 /* Prepare Multicast command */
579 memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params));
580 mcast.mcast_obj = &vf->mcast_obj;
582 set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags);
584 set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags);
586 mc = kzalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem),
589 BNX2X_ERR("Cannot Configure mulicasts due to lack of memory\n");
594 /* clear existing mcasts */
595 mcast.mcast_list_len = vf->mcast_list_len;
596 vf->mcast_list_len = mc_num;
597 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL);
599 BNX2X_ERR("Failed to remove multicasts\n");
605 /* update mcast list on the ramrod params */
607 INIT_LIST_HEAD(&mcast.mcast_list);
608 for (i = 0; i < mc_num; i++) {
609 mc[i].mac = mcasts[i];
610 list_add_tail(&mc[i].link,
615 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD);
617 BNX2X_ERR("Faled to add multicasts\n");
624 static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
625 struct bnx2x_rx_mode_ramrod_params *ramrod,
626 struct bnx2x_virtf *vf,
627 unsigned long accept_flags)
629 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
631 memset(ramrod, 0, sizeof(*ramrod));
632 ramrod->cid = vfq->cid;
633 ramrod->cl_id = vfq_cl_id(vf, vfq);
634 ramrod->rx_mode_obj = &bp->rx_mode_obj;
635 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
636 ramrod->rx_accept_flags = accept_flags;
637 ramrod->tx_accept_flags = accept_flags;
638 ramrod->pstate = &vf->filter_state;
639 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
641 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
642 set_bit(RAMROD_RX, &ramrod->ramrod_flags);
643 set_bit(RAMROD_TX, &ramrod->ramrod_flags);
645 ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
646 ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
649 int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
650 int qid, unsigned long accept_flags)
652 struct bnx2x_rx_mode_ramrod_params ramrod;
654 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
656 bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags);
657 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
658 vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags;
659 return bnx2x_config_rx_mode(bp, &ramrod);
662 int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid)
666 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
668 /* Remove all classification configuration for leading queue */
669 if (qid == LEADING_IDX) {
670 rc = bnx2x_vf_rxmode(bp, vf, qid, 0);
674 /* Remove filtering if feasible */
675 if (bnx2x_validate_vf_sp_objs(bp, vf, true)) {
676 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
680 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
684 rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false);
691 rc = bnx2x_vf_queue_destroy(bp, vf, qid);
696 BNX2X_ERR("vf[%d:%d] error: rc %d\n",
697 vf->abs_vfid, qid, rc);
701 /* VF enable primitives
702 * when pretend is required the caller is responsible
703 * for calling pretend prior to calling these routines
706 /* internal vf enable - until vf is enabled internally all transactions
707 * are blocked. This routine should always be called last with pretend.
709 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
711 REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
714 /* clears vf error in all semi blocks */
715 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
717 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
718 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
719 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
720 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
723 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
725 u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
728 switch (was_err_group) {
730 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
733 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
736 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
739 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
742 REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
745 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
750 /* Set VF masks and configuration - pretend */
751 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
753 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
754 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
755 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
756 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
757 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
758 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
760 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
761 val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
762 if (vf->cfg_flags & VF_CFG_INT_SIMD)
763 val |= IGU_VF_CONF_SINGLE_ISR_EN;
764 val &= ~IGU_VF_CONF_PARENT_MASK;
765 val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
766 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
769 "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n",
772 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
774 /* iterate over all queues, clear sb consumer */
775 for (i = 0; i < vf_sb_count(vf); i++) {
776 u8 igu_sb_id = vf_igu_sb(vf, i);
778 /* zero prod memory */
779 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0);
781 /* clear sb state machine */
782 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id,
785 /* disable + update */
786 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0,
791 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
793 /* set the VF-PF association in the FW */
794 storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
795 storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
798 bnx2x_vf_semi_clear_err(bp, abs_vfid);
799 bnx2x_vf_pglue_clear_err(bp, abs_vfid);
801 /* internal vf-enable - pretend */
802 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
803 DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
804 bnx2x_vf_enable_internal(bp, true);
805 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
808 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
810 /* Reset vf in IGU interrupts are still disabled */
811 bnx2x_vf_igu_reset(bp, vf);
813 /* pretend to enable the vf with the PBF */
814 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
815 REG_WR(bp, PBF_REG_DISABLE_VF, 0);
816 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
819 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
822 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
827 dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
829 return bnx2x_is_pcie_pending(dev);
833 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
835 /* Verify no pending pci transactions */
836 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
837 BNX2X_ERR("PCIE Transactions still pending\n");
842 static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp,
843 struct bnx2x_virtf *vf,
846 int num = vf_vlan_rules_cnt(vf);
847 int diff = new - num;
850 DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n",
851 vf->abs_vfid, new, num);
854 rc = bp->vlans_pool.get(&bp->vlans_pool, diff);
856 rc = bp->vlans_pool.put(&bp->vlans_pool, -diff);
859 vf_vlan_rules_cnt(vf) = new;
861 DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n",
865 /* must be called after the number of PF queues and the number of VFs are
869 bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
871 struct vf_pf_resc_request *resc = &vf->alloc_resc;
874 /* will be set only during VF-ACQUIRE */
878 /* no credit calculations for macs (just yet) */
879 resc->num_mac_filters = 1;
881 /* divvy up vlan rules */
882 bnx2x_iov_re_set_vlan_filters(bp, vf, 0);
883 vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
884 vlan_count = 1 << ilog2(vlan_count);
885 bnx2x_iov_re_set_vlan_filters(bp, vf,
886 vlan_count / BNX2X_NR_VIRTFN(bp));
888 /* no real limitation */
889 resc->num_mc_filters = 0;
891 /* num_sbs already set */
892 resc->num_sbs = vf->sb_count;
896 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
898 /* reset the state variables */
899 bnx2x_iov_static_resc(bp, vf);
903 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
905 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
907 /* DQ usage counter */
908 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
909 bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT,
910 "DQ VF usage counter timed out",
912 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
914 /* FW cleanup command - poll for the results */
915 if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid),
917 BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid);
919 /* verify TX hw is flushed */
920 bnx2x_tx_hw_flushed(bp, poll_cnt);
923 static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
927 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
929 /* the cleanup operations are valid if and only if the VF
930 * was first acquired.
932 for (i = 0; i < vf_rxq_count(vf); i++) {
933 rc = bnx2x_vf_queue_flr(bp, vf, i);
938 /* remove multicasts */
939 bnx2x_vf_mcast(bp, vf, NULL, 0, true);
941 /* dispatch final cleanup and wait for HW queues to flush */
942 bnx2x_vf_flr_clnup_hw(bp, vf);
944 /* release VF resources */
945 bnx2x_vf_free_resc(bp, vf);
947 /* re-open the mailbox */
948 bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
951 BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n",
952 vf->abs_vfid, i, rc);
955 static void bnx2x_vf_flr_clnup(struct bnx2x *bp)
957 struct bnx2x_virtf *vf;
960 for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) {
961 /* VF should be RESET & in FLR cleanup states */
962 if (bnx2x_vf(bp, i, state) != VF_RESET ||
963 !bnx2x_vf(bp, i, flr_clnup_stage))
966 DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n",
967 i, BNX2X_NR_VIRTFN(bp));
971 /* lock the vf pf channel */
972 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
974 /* invoke the VF FLR SM */
975 bnx2x_vf_flr(bp, vf);
977 /* mark the VF to be ACKED and continue */
978 vf->flr_clnup_stage = false;
979 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
982 /* Acknowledge the handled VFs.
983 * we are acknowledge all the vfs which an flr was requested for, even
984 * if amongst them there are such that we never opened, since the mcp
985 * will interrupt us immediately again if we only ack some of the bits,
986 * resulting in an endless loop. This can happen for example in KVM
987 * where an 'all ones' flr request is sometimes given by hyper visor
989 DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n",
990 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
991 for (i = 0; i < FLRD_VFS_DWORDS; i++)
992 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i],
993 bp->vfdb->flrd_vfs[i]);
995 bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0);
997 /* clear the acked bits - better yet if the MCP implemented
998 * write to clear semantics
1000 for (i = 0; i < FLRD_VFS_DWORDS; i++)
1001 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0);
1004 void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
1008 /* Read FLR'd VFs */
1009 for (i = 0; i < FLRD_VFS_DWORDS; i++)
1010 bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]);
1013 "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n",
1014 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
1016 for_each_vf(bp, i) {
1017 struct bnx2x_virtf *vf = BP_VF(bp, i);
1020 if (vf->abs_vfid < 32)
1021 reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid);
1023 reset = bp->vfdb->flrd_vfs[1] &
1024 (1 << (vf->abs_vfid - 32));
1027 /* set as reset and ready for cleanup */
1028 vf->state = VF_RESET;
1029 vf->flr_clnup_stage = true;
1032 "Initiating Final cleanup for VF %d\n",
1037 /* do the FLR cleanup for all marked VFs*/
1038 bnx2x_vf_flr_clnup(bp);
1041 /* IOV global initialization routines */
1042 void bnx2x_iov_init_dq(struct bnx2x *bp)
1047 /* Set the DQ such that the CID reflect the abs_vfid */
1048 REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
1049 REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
1051 /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
1054 REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
1056 /* The VF window size is the log2 of the max number of CIDs per VF */
1057 REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
1059 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
1060 * the Pf doorbell size although the 2 are independent.
1062 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3);
1064 /* No security checks for now -
1065 * configure single rule (out of 16) mask = 0x1, value = 0x0,
1066 * CID range 0 - 0x1ffff
1068 REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
1069 REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
1070 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
1071 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
1073 /* set the VF doorbell threshold */
1074 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
1077 void bnx2x_iov_init_dmae(struct bnx2x *bp)
1079 if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
1080 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
1083 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
1085 struct pci_dev *dev = bp->pdev;
1086 struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1088 return dev->bus->number + ((dev->devfn + iov->offset +
1089 iov->stride * vfid) >> 8);
1092 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
1094 struct pci_dev *dev = bp->pdev;
1095 struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1097 return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
1100 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
1103 struct pci_dev *dev = bp->pdev;
1104 struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1106 for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
1107 u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
1108 u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
1111 vf->bars[n].bar = start + size * vf->abs_vfid;
1112 vf->bars[n].size = size;
1116 static int bnx2x_ari_enabled(struct pci_dev *dev)
1118 return dev->bus->self && dev->bus->self->ari_enabled;
1122 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1126 u8 fid, current_pf = 0;
1128 /* IGU in normal mode - read CAM */
1129 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
1130 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
1131 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
1133 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
1134 if (fid & IGU_FID_ENCODE_IS_PF)
1135 current_pf = fid & IGU_FID_PF_NUM_MASK;
1136 else if (current_pf == BP_FUNC(bp))
1137 bnx2x_vf_set_igu_info(bp, sb_id,
1138 (fid & IGU_FID_VF_NUM_MASK));
1139 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
1140 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
1141 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
1142 (fid & IGU_FID_VF_NUM_MASK)), sb_id,
1143 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
1145 DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool);
1148 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
1151 kfree(bp->vfdb->vfqs);
1152 kfree(bp->vfdb->vfs);
1158 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1161 struct pci_dev *dev = bp->pdev;
1163 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
1165 BNX2X_ERR("failed to find SRIOV capability in device\n");
1170 DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
1171 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
1172 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
1173 pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
1174 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
1175 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
1176 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
1177 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
1178 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
1183 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1187 /* read the SRIOV capability structure
1188 * The fields can be read via configuration read or
1189 * directly from the device (starting at offset PCICFG_OFFSET)
1191 if (bnx2x_sriov_pci_cfg_info(bp, iov))
1194 /* get the number of SRIOV bars */
1197 /* read the first_vfid */
1198 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
1199 iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
1200 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
1203 "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
1205 iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
1206 iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
1211 /* must be called after PF bars are mapped */
1212 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1216 struct bnx2x_sriov *iov;
1217 struct pci_dev *dev = bp->pdev;
1225 /* verify sriov capability is present in configuration space */
1226 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV))
1229 /* verify chip revision */
1230 if (CHIP_IS_E1x(bp))
1233 /* check if SRIOV support is turned off */
1237 /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
1238 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
1239 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
1240 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
1244 /* SRIOV can be enabled only with MSIX */
1245 if (int_mode_param == BNX2X_INT_MODE_MSI ||
1246 int_mode_param == BNX2X_INT_MODE_INTX) {
1247 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
1252 /* verify ari is enabled */
1253 if (!bnx2x_ari_enabled(bp->pdev)) {
1254 BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n");
1258 /* verify igu is in normal mode */
1259 if (CHIP_INT_MODE_IS_BC(bp)) {
1260 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n");
1264 /* allocate the vfs database */
1265 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
1267 BNX2X_ERR("failed to allocate vf database\n");
1272 /* get the sriov info - Linux already collected all the pertinent
1273 * information, however the sriov structure is for the private use
1274 * of the pci module. Also we want this information regardless
1275 * of the hyper-visor.
1277 iov = &(bp->vfdb->sriov);
1278 err = bnx2x_sriov_info(bp, iov);
1282 /* SR-IOV capability was enabled but there are no VFs*/
1283 if (iov->total == 0)
1286 iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
1288 DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n",
1289 num_vfs_param, iov->nr_virtfn);
1291 /* allocate the vf array */
1292 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
1293 BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
1294 if (!bp->vfdb->vfs) {
1295 BNX2X_ERR("failed to allocate vf array\n");
1300 /* Initial VF init - index and abs_vfid - nr_virtfn must be set */
1301 for_each_vf(bp, i) {
1302 bnx2x_vf(bp, i, index) = i;
1303 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
1304 bnx2x_vf(bp, i, state) = VF_FREE;
1305 mutex_init(&bnx2x_vf(bp, i, op_mutex));
1306 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
1309 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
1310 bnx2x_get_vf_igu_cam_info(bp);
1312 /* allocate the queue arrays for all VFs */
1313 bp->vfdb->vfqs = kzalloc(
1314 BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue),
1317 DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs);
1319 if (!bp->vfdb->vfqs) {
1320 BNX2X_ERR("failed to allocate vf queue array\n");
1325 /* Prepare the VFs event synchronization mechanism */
1326 mutex_init(&bp->vfdb->event_mutex);
1330 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
1331 __bnx2x_iov_free_vfdb(bp);
1335 void bnx2x_iov_remove_one(struct bnx2x *bp)
1339 /* if SRIOV is not enabled there's nothing to do */
1343 DP(BNX2X_MSG_IOV, "about to call disable sriov\n");
1344 pci_disable_sriov(bp->pdev);
1345 DP(BNX2X_MSG_IOV, "sriov disabled\n");
1347 /* disable access to all VFs */
1348 for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) {
1349 bnx2x_pretend_func(bp,
1351 bp->vfdb->sriov.first_vf_in_pf +
1353 DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n",
1354 bp->vfdb->sriov.first_vf_in_pf + vf_idx);
1355 bnx2x_vf_enable_internal(bp, 0);
1356 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1359 /* free vf database */
1360 __bnx2x_iov_free_vfdb(bp);
1363 void bnx2x_iov_free_mem(struct bnx2x *bp)
1370 /* free vfs hw contexts */
1371 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1372 struct hw_dma *cxt = &bp->vfdb->context[i];
1373 BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
1376 BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
1377 BP_VFDB(bp)->sp_dma.mapping,
1378 BP_VFDB(bp)->sp_dma.size);
1380 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
1381 BP_VF_MBX_DMA(bp)->mapping,
1382 BP_VF_MBX_DMA(bp)->size);
1384 BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr,
1385 BP_VF_BULLETIN_DMA(bp)->mapping,
1386 BP_VF_BULLETIN_DMA(bp)->size);
1389 int bnx2x_iov_alloc_mem(struct bnx2x *bp)
1397 /* allocate vfs hw contexts */
1398 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
1399 BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
1401 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1402 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
1403 cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
1406 cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size);
1413 tot_size -= cxt->size;
1416 /* allocate vfs ramrods dma memory - client_init and set_mac */
1417 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
1418 BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping,
1420 if (!BP_VFDB(bp)->sp_dma.addr)
1422 BP_VFDB(bp)->sp_dma.size = tot_size;
1424 /* allocate mailboxes */
1425 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
1426 BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping,
1428 if (!BP_VF_MBX_DMA(bp)->addr)
1431 BP_VF_MBX_DMA(bp)->size = tot_size;
1433 /* allocate local bulletin boards */
1434 tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE;
1435 BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping,
1437 if (!BP_VF_BULLETIN_DMA(bp)->addr)
1440 BP_VF_BULLETIN_DMA(bp)->size = tot_size;
1448 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
1449 struct bnx2x_vf_queue *q)
1451 u8 cl_id = vfq_cl_id(vf, q);
1452 u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
1453 unsigned long q_type = 0;
1455 set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
1456 set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
1458 /* Queue State object */
1459 bnx2x_init_queue_obj(bp, &q->sp_obj,
1460 cl_id, &q->cid, 1, func_id,
1461 bnx2x_vf_sp(bp, vf, q_data),
1462 bnx2x_vf_sp_map(bp, vf, q_data),
1465 /* sp indication is set only when vlan/mac/etc. are initialized */
1466 q->sp_initialized = false;
1469 "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
1470 vf->abs_vfid, q->sp_obj.func_id, q->cid);
1473 /* called by bnx2x_nic_load */
1474 int bnx2x_iov_nic_init(struct bnx2x *bp)
1478 if (!IS_SRIOV(bp)) {
1479 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
1483 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
1485 /* let FLR complete ... */
1488 /* initialize vf database */
1489 for_each_vf(bp, vfid) {
1490 struct bnx2x_virtf *vf = BP_VF(bp, vfid);
1492 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
1495 union cdu_context *base_cxt = (union cdu_context *)
1496 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
1497 (base_vf_cid & (ILT_PAGE_CIDS-1));
1500 "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
1501 vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
1502 BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
1504 /* init statically provisioned resources */
1505 bnx2x_iov_static_resc(bp, vf);
1507 /* queues are initialized during VF-ACQUIRE */
1508 vf->filter_state = 0;
1509 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
1511 /* init mcast object - This object will be re-initialized
1512 * during VF-ACQUIRE with the proper cl_id and cid.
1513 * It needs to be initialized here so that it can be safely
1514 * handled by a subsequent FLR flow.
1516 vf->mcast_list_len = 0;
1517 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
1519 bnx2x_vf_sp(bp, vf, mcast_rdata),
1520 bnx2x_vf_sp_map(bp, vf, mcast_rdata),
1521 BNX2X_FILTER_MCAST_PENDING,
1523 BNX2X_OBJ_TYPE_RX_TX);
1525 /* set the mailbox message addresses */
1526 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
1527 (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
1528 MBX_MSG_ALIGNED_SIZE);
1530 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
1531 vfid * MBX_MSG_ALIGNED_SIZE;
1533 /* Enable vf mailbox */
1534 bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
1538 for_each_vf(bp, vfid) {
1539 struct bnx2x_virtf *vf = BP_VF(bp, vfid);
1541 /* fill in the BDF and bars */
1542 vf->bus = bnx2x_vf_bus(bp, vfid);
1543 vf->devfn = bnx2x_vf_devfn(bp, vfid);
1544 bnx2x_vf_set_bars(bp, vf);
1547 "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
1548 vf->abs_vfid, vf->bus, vf->devfn,
1549 (unsigned)vf->bars[0].bar, vf->bars[0].size,
1550 (unsigned)vf->bars[1].bar, vf->bars[1].size,
1551 (unsigned)vf->bars[2].bar, vf->bars[2].size);
1557 /* called by bnx2x_chip_cleanup */
1558 int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
1565 /* release all the VFs */
1567 bnx2x_vf_release(bp, BP_VF(bp, i));
1572 /* called by bnx2x_init_hw_func, returns the next ilt line */
1573 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
1576 struct bnx2x_ilt *ilt = BP_ILT(bp);
1581 /* set vfs ilt lines */
1582 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1583 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
1585 ilt->lines[line+i].page = hw_cxt->addr;
1586 ilt->lines[line+i].page_mapping = hw_cxt->mapping;
1587 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
1592 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
1594 return ((cid >= BNX2X_FIRST_VF_CID) &&
1595 ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
1599 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
1600 struct bnx2x_vf_queue *vfq,
1601 union event_ring_elem *elem)
1603 unsigned long ramrod_flags = 0;
1606 /* Always push next commands out, don't wait here */
1607 set_bit(RAMROD_CONT, &ramrod_flags);
1609 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
1610 case BNX2X_FILTER_MAC_PENDING:
1611 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
1614 case BNX2X_FILTER_VLAN_PENDING:
1615 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
1619 BNX2X_ERR("Unsupported classification command: %d\n",
1620 elem->message.data.eth_event.echo);
1624 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
1626 DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
1630 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
1631 struct bnx2x_virtf *vf)
1633 struct bnx2x_mcast_ramrod_params rparam = {NULL};
1636 rparam.mcast_obj = &vf->mcast_obj;
1637 vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
1639 /* If there are pending mcast commands - send them */
1640 if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
1641 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1643 BNX2X_ERR("Failed to send pending mcast commands: %d\n",
1649 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
1650 struct bnx2x_virtf *vf)
1652 smp_mb__before_clear_bit();
1653 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1654 smp_mb__after_clear_bit();
1657 static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp,
1658 struct bnx2x_virtf *vf)
1660 vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw);
1663 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
1665 struct bnx2x_virtf *vf;
1666 int qidx = 0, abs_vfid;
1673 /* first get the cid - the only events we handle here are cfc-delete
1674 * and set-mac completion
1676 opcode = elem->message.opcode;
1679 case EVENT_RING_OPCODE_CFC_DEL:
1680 cid = SW_CID((__force __le32)
1681 elem->message.data.cfc_del_event.cid);
1682 DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
1684 case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
1685 case EVENT_RING_OPCODE_MULTICAST_RULES:
1686 case EVENT_RING_OPCODE_FILTERS_RULES:
1687 case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
1688 cid = (elem->message.data.eth_event.echo &
1690 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
1692 case EVENT_RING_OPCODE_VF_FLR:
1693 abs_vfid = elem->message.data.vf_flr_event.vf_id;
1694 DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
1697 case EVENT_RING_OPCODE_MALICIOUS_VF:
1698 abs_vfid = elem->message.data.malicious_vf_event.vf_id;
1699 BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
1701 elem->message.data.malicious_vf_event.err_id);
1707 /* check if the cid is the VF range */
1708 if (!bnx2x_iov_is_vf_cid(bp, cid)) {
1709 DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
1713 /* extract vf and rxq index from vf_cid - relies on the following:
1714 * 1. vfid on cid reflects the true abs_vfid
1715 * 2. The max number of VFs (per path) is 64
1717 qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
1718 abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
1720 vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
1723 BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
1729 case EVENT_RING_OPCODE_CFC_DEL:
1730 DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
1731 vf->abs_vfid, qidx);
1732 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
1735 BNX2X_Q_CMD_CFC_DEL);
1737 case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
1738 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
1739 vf->abs_vfid, qidx);
1740 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
1742 case EVENT_RING_OPCODE_MULTICAST_RULES:
1743 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
1744 vf->abs_vfid, qidx);
1745 bnx2x_vf_handle_mcast_eqe(bp, vf);
1747 case EVENT_RING_OPCODE_FILTERS_RULES:
1748 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
1749 vf->abs_vfid, qidx);
1750 bnx2x_vf_handle_filters_eqe(bp, vf);
1752 case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
1753 DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n",
1754 vf->abs_vfid, qidx);
1755 bnx2x_vf_handle_rss_update_eqe(bp, vf);
1756 case EVENT_RING_OPCODE_VF_FLR:
1757 case EVENT_RING_OPCODE_MALICIOUS_VF:
1758 /* Do nothing for now */
1765 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
1767 /* extract the vf from vf_cid - relies on the following:
1768 * 1. vfid on cid reflects the true abs_vfid
1769 * 2. The max number of VFs (per path) is 64
1771 int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
1772 return bnx2x_vf_by_abs_fid(bp, abs_vfid);
1775 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
1776 struct bnx2x_queue_sp_obj **q_obj)
1778 struct bnx2x_virtf *vf;
1783 vf = bnx2x_vf_by_cid(bp, vf_cid);
1786 /* extract queue index from vf_cid - relies on the following:
1787 * 1. vfid on cid reflects the true abs_vfid
1788 * 2. The max number of VFs (per path) is 64
1790 int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
1791 *q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
1793 BNX2X_ERR("No vf matching cid %d\n", vf_cid);
1797 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
1800 int first_queue_query_index, num_queues_req;
1801 dma_addr_t cur_data_offset;
1802 struct stats_query_entry *cur_query_entry;
1804 bool is_fcoe = false;
1812 /* fcoe adds one global request and one queue request */
1813 num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe;
1814 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
1817 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1818 "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
1819 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
1820 first_queue_query_index + num_queues_req);
1822 cur_data_offset = bp->fw_stats_data_mapping +
1823 offsetof(struct bnx2x_fw_stats_data, queue_stats) +
1824 num_queues_req * sizeof(struct per_queue_stats);
1826 cur_query_entry = &bp->fw_stats_req->
1827 query[first_queue_query_index + num_queues_req];
1829 for_each_vf(bp, i) {
1831 struct bnx2x_virtf *vf = BP_VF(bp, i);
1833 if (vf->state != VF_ENABLED) {
1834 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1835 "vf %d not enabled so no stats for it\n",
1840 DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid);
1841 for_each_vfq(vf, j) {
1842 struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
1844 dma_addr_t q_stats_addr =
1845 vf->fw_stat_map + j * vf->stats_stride;
1847 /* collect stats fro active queues only */
1848 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
1849 BNX2X_Q_LOGICAL_STATE_STOPPED)
1852 /* create stats query entry for this queue */
1853 cur_query_entry->kind = STATS_TYPE_QUEUE;
1854 cur_query_entry->index = vfq_stat_id(vf, rxq);
1855 cur_query_entry->funcID =
1856 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
1857 cur_query_entry->address.hi =
1858 cpu_to_le32(U64_HI(q_stats_addr));
1859 cur_query_entry->address.lo =
1860 cpu_to_le32(U64_LO(q_stats_addr));
1862 "added address %x %x for vf %d queue %d client %d\n",
1863 cur_query_entry->address.hi,
1864 cur_query_entry->address.lo, cur_query_entry->funcID,
1865 j, cur_query_entry->index);
1867 cur_data_offset += sizeof(struct per_queue_stats);
1870 /* all stats are coalesced to the leading queue */
1871 if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
1875 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
1879 struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id)
1882 struct bnx2x_virtf *vf = NULL;
1884 for_each_vf(bp, i) {
1886 if (stat_id >= vf->igu_base_id &&
1887 stat_id < vf->igu_base_id + vf_sb_count(vf))
1893 /* VF API helpers */
1894 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
1897 u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4;
1898 u32 val = enable ? (abs_vfid | (1 << 6)) : 0;
1900 REG_WR(bp, reg, val);
1903 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf)
1908 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
1909 vfq_qzone_id(vf, vfq_get(vf, i)), false);
1912 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf)
1916 /* clear the VF configuration - pretend */
1917 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1918 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
1919 val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN |
1920 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK);
1921 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
1922 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1925 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
1927 return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
1928 BNX2X_VF_MAX_QUEUES);
1932 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
1933 struct vf_pf_resc_request *req_resc)
1935 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
1936 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
1938 /* Save a vlan filter for the Hypervisor */
1939 return ((req_resc->num_rxqs <= rxq_cnt) &&
1940 (req_resc->num_txqs <= txq_cnt) &&
1941 (req_resc->num_sbs <= vf_sb_count(vf)) &&
1942 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
1943 (req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf)));
1947 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
1948 struct vf_pf_resc_request *resc)
1950 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
1953 union cdu_context *base_cxt = (union cdu_context *)
1954 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
1955 (base_vf_cid & (ILT_PAGE_CIDS-1));
1958 /* if state is 'acquired' the VF was not released or FLR'd, in
1959 * this case the returned resources match the acquired already
1960 * acquired resources. Verify that the requested numbers do
1961 * not exceed the already acquired numbers.
1963 if (vf->state == VF_ACQUIRED) {
1964 DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
1967 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
1968 BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
1975 /* Otherwise vf state must be 'free' or 'reset' */
1976 if (vf->state != VF_FREE && vf->state != VF_RESET) {
1977 BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
1978 vf->abs_vfid, vf->state);
1982 /* static allocation:
1983 * the global maximum number are fixed per VF. Fail the request if
1984 * requested number exceed these globals
1986 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
1988 "cannot fulfill vf resource request. Placing maximal available values in response\n");
1989 /* set the max resource in the vf */
1993 /* Set resources counters - 0 request means max available */
1994 vf_sb_count(vf) = resc->num_sbs;
1995 vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
1996 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
1997 if (resc->num_mac_filters)
1998 vf_mac_rules_cnt(vf) = resc->num_mac_filters;
1999 /* Add an additional vlan filter credit for the hypervisor */
2000 bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1);
2003 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
2004 vf_sb_count(vf), vf_rxq_count(vf),
2005 vf_txq_count(vf), vf_mac_rules_cnt(vf),
2006 vf_vlan_rules_visible_cnt(vf));
2008 /* Initialize the queues */
2010 DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
2014 for_each_vfq(vf, i) {
2015 struct bnx2x_vf_queue *q = vfq_get(vf, i);
2018 BNX2X_ERR("q number %d was not allocated\n", i);
2023 q->cxt = &((base_cxt + i)->eth);
2024 q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
2026 DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
2027 vf->abs_vfid, i, q->index, q->cid, q->cxt);
2029 /* init SP objects */
2030 bnx2x_vfq_init(bp, vf, q);
2032 vf->state = VF_ACQUIRED;
2036 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
2038 struct bnx2x_func_init_params func_init = {0};
2042 /* the sb resources are initialized at this point, do the
2043 * FW/HW initializations
2045 for_each_vf_sb(vf, i)
2046 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true,
2047 vf_igu_sb(vf, i), vf_igu_sb(vf, i));
2050 if (vf->state != VF_ACQUIRED) {
2051 DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n",
2052 vf->abs_vfid, vf->state);
2056 /* let FLR complete ... */
2059 /* FLR cleanup epilogue */
2060 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
2063 /* reset IGU VF statistics: MSIX */
2064 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
2067 if (vf->cfg_flags & VF_CFG_STATS)
2068 flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ);
2070 if (vf->cfg_flags & VF_CFG_TPA)
2071 flags |= FUNC_FLG_TPA;
2073 if (is_vf_multi(vf))
2074 flags |= FUNC_FLG_RSS;
2076 /* function setup */
2077 func_init.func_flgs = flags;
2078 func_init.pf_id = BP_FUNC(bp);
2079 func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
2080 func_init.fw_stat_map = vf->fw_stat_map;
2081 func_init.spq_map = vf->spq_map;
2082 func_init.spq_prod = 0;
2083 bnx2x_func_init(bp, &func_init);
2086 bnx2x_vf_enable_access(bp, vf->abs_vfid);
2087 bnx2x_vf_enable_traffic(bp, vf);
2089 /* queue protection table */
2091 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2092 vfq_qzone_id(vf, vfq_get(vf, i)), true);
2094 vf->state = VF_ENABLED;
2096 /* update vf bulletin board */
2097 bnx2x_post_vf_bulletin(bp, vf->index);
2102 struct set_vf_state_cookie {
2103 struct bnx2x_virtf *vf;
2107 static void bnx2x_set_vf_state(void *cookie)
2109 struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
2111 p->vf->state = p->state;
2114 int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
2118 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2120 /* Close all queues */
2121 for (i = 0; i < vf_rxq_count(vf); i++) {
2122 rc = bnx2x_vf_queue_teardown(bp, vf, i);
2127 /* disable the interrupts */
2128 DP(BNX2X_MSG_IOV, "disabling igu\n");
2129 bnx2x_vf_igu_disable(bp, vf);
2131 /* disable the VF */
2132 DP(BNX2X_MSG_IOV, "clearing qtbl\n");
2133 bnx2x_vf_clr_qtbl(bp, vf);
2135 /* need to make sure there are no outstanding stats ramrods which may
2136 * cause the device to access the VF's stats buffer which it will free
2137 * as soon as we return from the close flow.
2140 struct set_vf_state_cookie cookie;
2143 cookie.state = VF_ACQUIRED;
2144 bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
2147 DP(BNX2X_MSG_IOV, "set state to acquired\n");
2151 BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc);
2155 /* VF release can be called either: 1. The VF was acquired but
2156 * not enabled 2. the vf was enabled or in the process of being
2159 int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf)
2163 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
2164 vf->state == VF_FREE ? "Free" :
2165 vf->state == VF_ACQUIRED ? "Acquired" :
2166 vf->state == VF_ENABLED ? "Enabled" :
2167 vf->state == VF_RESET ? "Reset" :
2170 switch (vf->state) {
2172 rc = bnx2x_vf_close(bp, vf);
2175 /* Fallthrough to release resources */
2177 DP(BNX2X_MSG_IOV, "about to free resources\n");
2178 bnx2x_vf_free_resc(bp, vf);
2188 BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc);
2192 int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
2193 struct bnx2x_config_rss_params *rss)
2195 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2196 set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags);
2197 return bnx2x_config_rss(bp, rss);
2200 int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
2201 struct vfpf_tpa_tlv *tlv,
2202 struct bnx2x_queue_update_tpa_params *params)
2204 aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr;
2205 struct bnx2x_queue_state_params qstate;
2208 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2210 /* Set ramrod params */
2211 memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
2212 memcpy(&qstate.params.update_tpa, params,
2213 sizeof(struct bnx2x_queue_update_tpa_params));
2214 qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA;
2215 set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
2217 for (qid = 0; qid < vf_rxq_count(vf); qid++) {
2218 qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
2219 qstate.params.update_tpa.sge_map = sge_addr[qid];
2220 DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n",
2221 vf->abs_vfid, qid, U64_HI(sge_addr[qid]),
2222 U64_LO(sge_addr[qid]));
2223 rc = bnx2x_queue_state_change(bp, &qstate);
2225 BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n",
2226 U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]),
2235 /* VF release ~ VF close + VF release-resources
2236 * Release is the ultimate SW shutdown and is called whenever an
2237 * irrecoverable error is encountered.
2239 int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
2243 DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
2244 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
2246 rc = bnx2x_vf_free(bp, vf);
2249 "VF[%d] Failed to allocate resources for release op- rc=%d\n",
2251 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
2255 static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
2256 struct bnx2x_virtf *vf, u32 *sbdf)
2258 *sbdf = vf->devfn | (vf->bus << 8);
2261 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2262 enum channel_tlvs tlv)
2264 /* we don't lock the channel for unsupported tlvs */
2265 if (!bnx2x_tlv_supported(tlv)) {
2266 BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n");
2270 /* lock the channel */
2271 mutex_lock(&vf->op_mutex);
2273 /* record the locking op */
2274 vf->op_current = tlv;
2277 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
2281 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2282 enum channel_tlvs expected_tlv)
2284 enum channel_tlvs current_tlv;
2287 BNX2X_ERR("VF was %p\n", vf);
2291 current_tlv = vf->op_current;
2293 /* we don't unlock the channel for unsupported tlvs */
2294 if (!bnx2x_tlv_supported(expected_tlv))
2297 WARN(expected_tlv != vf->op_current,
2298 "lock mismatch: expected %d found %d", expected_tlv,
2301 /* record the locking op */
2302 vf->op_current = CHANNEL_TLV_NONE;
2304 /* lock the channel */
2305 mutex_unlock(&vf->op_mutex);
2307 /* log the unlock */
2308 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
2309 vf->abs_vfid, vf->op_current);
2312 static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
2314 struct bnx2x_queue_state_params q_params;
2318 /* Verify changes are needed and record current Tx switching state */
2319 prev_flags = bp->flags;
2321 bp->flags |= TX_SWITCHING;
2323 bp->flags &= ~TX_SWITCHING;
2324 if (prev_flags == bp->flags)
2327 /* Verify state enables the sending of queue ramrods */
2328 if ((bp->state != BNX2X_STATE_OPEN) ||
2329 (bnx2x_get_q_logical_state(bp,
2330 &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) !=
2331 BNX2X_Q_LOGICAL_STATE_ACTIVE))
2334 /* send q. update ramrod to configure Tx switching */
2335 memset(&q_params, 0, sizeof(q_params));
2336 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
2337 q_params.cmd = BNX2X_Q_CMD_UPDATE;
2338 __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
2339 &q_params.params.update.update_flags);
2341 __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
2342 &q_params.params.update.update_flags);
2344 __clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
2345 &q_params.params.update.update_flags);
2347 /* send the ramrod on all the queues of the PF */
2348 for_each_eth_queue(bp, i) {
2349 struct bnx2x_fastpath *fp = &bp->fp[i];
2351 /* Set the appropriate Queue object */
2352 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
2354 /* Update the Queue state */
2355 rc = bnx2x_queue_state_change(bp, &q_params);
2357 BNX2X_ERR("Failed to configure Tx switching\n");
2362 DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled");
2366 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
2368 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
2370 if (!IS_SRIOV(bp)) {
2371 BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n");
2375 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
2376 num_vfs_param, BNX2X_NR_VIRTFN(bp));
2378 /* HW channel is only operational when PF is up */
2379 if (bp->state != BNX2X_STATE_OPEN) {
2380 BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n");
2384 /* we are always bound by the total_vfs in the configuration space */
2385 if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) {
2386 BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n",
2387 num_vfs_param, BNX2X_NR_VIRTFN(bp));
2388 num_vfs_param = BNX2X_NR_VIRTFN(bp);
2391 bp->requested_nr_virtfn = num_vfs_param;
2392 if (num_vfs_param == 0) {
2393 bnx2x_set_pf_tx_switching(bp, false);
2394 pci_disable_sriov(dev);
2397 return bnx2x_enable_sriov(bp);
2401 #define IGU_ENTRY_SIZE 4
2403 int bnx2x_enable_sriov(struct bnx2x *bp)
2405 int rc = 0, req_vfs = bp->requested_nr_virtfn;
2406 int vf_idx, sb_idx, vfq_idx, qcount, first_vf;
2407 u32 igu_entry, address;
2413 first_vf = bp->vfdb->sriov.first_vf_in_pf;
2415 /* statically distribute vf sb pool between VFs */
2416 num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES,
2417 BP_VFDB(bp)->vf_sbs_pool / req_vfs);
2419 /* zero previous values learned from igu cam */
2420 for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) {
2421 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
2424 vf_sb_count(BP_VF(bp, vf_idx)) = 0;
2426 bp->vfdb->vf_sbs_pool = 0;
2428 /* prepare IGU cam */
2429 sb_idx = BP_VFDB(bp)->first_vf_igu_entry;
2430 address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE;
2431 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
2432 for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) {
2433 igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT |
2434 vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT |
2435 IGU_REG_MAPPING_MEMORY_VALID;
2436 DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n",
2438 REG_WR(bp, address, igu_entry);
2440 address += IGU_ENTRY_SIZE;
2444 /* Reinitialize vf database according to igu cam */
2445 bnx2x_get_vf_igu_cam_info(bp);
2447 DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n",
2448 BP_VFDB(bp)->vf_sbs_pool, num_vf_queues);
2451 for_each_vf(bp, vf_idx) {
2452 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
2454 /* set local queue arrays */
2455 vf->vfqs = &bp->vfdb->vfqs[qcount];
2456 qcount += vf_sb_count(vf);
2457 bnx2x_iov_static_resc(bp, vf);
2460 /* prepare msix vectors in VF configuration space - the value in the
2461 * PCI configuration space should be the index of the last entry,
2462 * namely one less than the actual size of the table
2464 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
2465 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
2466 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
2468 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
2469 vf_idx, num_vf_queues - 1);
2471 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
2473 /* enable sriov. This will probe all the VFs, and consequentially cause
2474 * the "acquire" messages to appear on the VF PF channel.
2476 DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
2477 bnx2x_disable_sriov(bp);
2479 rc = bnx2x_set_pf_tx_switching(bp, true);
2483 rc = pci_enable_sriov(bp->pdev, req_vfs);
2485 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
2488 DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs);
2492 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
2495 struct pf_vf_bulletin_content *bulletin;
2497 DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
2498 for_each_vf(bp, vfidx) {
2499 bulletin = BP_VF_BULLETIN(bp, vfidx);
2500 if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN)
2501 bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
2505 void bnx2x_disable_sriov(struct bnx2x *bp)
2507 pci_disable_sriov(bp->pdev);
2510 static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
2511 struct bnx2x_virtf **vf,
2512 struct pf_vf_bulletin_content **bulletin)
2514 if (bp->state != BNX2X_STATE_OPEN) {
2515 BNX2X_ERR("vf ndo called though PF is down\n");
2519 if (!IS_SRIOV(bp)) {
2520 BNX2X_ERR("vf ndo called though sriov is disabled\n");
2524 if (vfidx >= BNX2X_NR_VIRTFN(bp)) {
2525 BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
2526 vfidx, BNX2X_NR_VIRTFN(bp));
2531 *vf = BP_VF(bp, vfidx);
2532 *bulletin = BP_VF_BULLETIN(bp, vfidx);
2535 BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n",
2541 BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n",
2547 BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n",
2555 int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
2556 struct ifla_vf_info *ivi)
2558 struct bnx2x *bp = netdev_priv(dev);
2559 struct bnx2x_virtf *vf = NULL;
2560 struct pf_vf_bulletin_content *bulletin = NULL;
2561 struct bnx2x_vlan_mac_obj *mac_obj;
2562 struct bnx2x_vlan_mac_obj *vlan_obj;
2565 /* sanity and init */
2566 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
2569 mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
2570 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
2571 if (!mac_obj || !vlan_obj) {
2572 BNX2X_ERR("VF partially initialized\n");
2578 ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */
2579 ivi->spoofchk = 1; /*always enabled */
2580 if (vf->state == VF_ENABLED) {
2581 /* mac and vlan are in vlan_mac objects */
2582 if (bnx2x_validate_vf_sp_objs(bp, vf, false)) {
2583 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
2585 vlan_obj->get_n_elements(bp, vlan_obj, 1,
2586 (u8 *)&ivi->vlan, 0,
2591 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
2592 /* mac configured by ndo so its in bulletin board */
2593 memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
2595 /* function has not been loaded yet. Show mac as 0s */
2596 memset(&ivi->mac, 0, ETH_ALEN);
2599 if (bulletin->valid_bitmap & (1 << VLAN_VALID))
2600 /* vlan configured by ndo so its in bulletin board */
2601 memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
2603 /* function has not been loaded yet. Show vlans as 0s */
2604 memset(&ivi->vlan, 0, VLAN_HLEN);
2610 /* New mac for VF. Consider these cases:
2611 * 1. VF hasn't been acquired yet - save the mac in local bulletin board and
2612 * supply at acquire.
2613 * 2. VF has already been acquired but has not yet initialized - store in local
2614 * bulletin board. mac will be posted on VF bulletin board after VF init. VF
2615 * will configure this mac when it is ready.
2616 * 3. VF has already initialized but has not yet setup a queue - post the new
2617 * mac on VF's bulletin board right now. VF will configure this mac when it
2619 * 4. VF has already set a queue - delete any macs already configured for this
2620 * queue and manually config the new mac.
2621 * In any event, once this function has been called refuse any attempts by the
2622 * VF to configure any mac for itself except for this mac. In case of a race
2623 * where the VF fails to see the new post on its bulletin board before sending a
2624 * mac configuration request, the PF will simply fail the request and VF can try
2625 * again after consulting its bulletin board.
2627 int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
2629 struct bnx2x *bp = netdev_priv(dev);
2630 int rc, q_logical_state;
2631 struct bnx2x_virtf *vf = NULL;
2632 struct pf_vf_bulletin_content *bulletin = NULL;
2634 /* sanity and init */
2635 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
2638 if (!is_valid_ether_addr(mac)) {
2639 BNX2X_ERR("mac address invalid\n");
2643 /* update PF's copy of the VF's bulletin. Will no longer accept mac
2644 * configuration requests from vf unless match this mac
2646 bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
2647 memcpy(bulletin->mac, mac, ETH_ALEN);
2649 /* Post update on VF's bulletin board */
2650 rc = bnx2x_post_vf_bulletin(bp, vfidx);
2652 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
2657 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
2658 if (vf->state == VF_ENABLED &&
2659 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
2660 /* configure the mac in device on this vf's queue */
2661 unsigned long ramrod_flags = 0;
2662 struct bnx2x_vlan_mac_obj *mac_obj;
2664 /* User should be able to see failure reason in system logs */
2665 if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
2668 /* must lock vfpf channel to protect against vf flows */
2669 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
2671 /* remove existing eth macs */
2672 mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
2673 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
2675 BNX2X_ERR("failed to delete eth macs\n");
2680 /* remove existing uc list macs */
2681 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
2683 BNX2X_ERR("failed to delete uc_list macs\n");
2688 /* configure the new mac to device */
2689 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2690 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
2691 BNX2X_ETH_MAC, &ramrod_flags);
2694 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
2700 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
2702 struct bnx2x_queue_state_params q_params = {NULL};
2703 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
2704 struct bnx2x_queue_update_params *update_params;
2705 struct pf_vf_bulletin_content *bulletin = NULL;
2706 struct bnx2x_rx_mode_ramrod_params rx_ramrod;
2707 struct bnx2x *bp = netdev_priv(dev);
2708 struct bnx2x_vlan_mac_obj *vlan_obj;
2709 unsigned long vlan_mac_flags = 0;
2710 unsigned long ramrod_flags = 0;
2711 struct bnx2x_virtf *vf = NULL;
2712 unsigned long accept_flags;
2715 /* sanity and init */
2716 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
2721 BNX2X_ERR("illegal vlan value %d\n", vlan);
2725 DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
2728 /* update PF's copy of the VF's bulletin. No point in posting the vlan
2729 * to the VF since it doesn't have anything to do with it. But it useful
2730 * to store it here in case the VF is not up yet and we can only
2731 * configure the vlan later when it does. Treat vlan id 0 as remove the
2735 bulletin->valid_bitmap |= 1 << VLAN_VALID;
2737 bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
2738 bulletin->vlan = vlan;
2740 /* is vf initialized and queue set up? */
2741 if (vf->state != VF_ENABLED ||
2742 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
2743 BNX2X_Q_LOGICAL_STATE_ACTIVE)
2746 /* User should be able to see error in system logs */
2747 if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
2750 /* must lock vfpf channel to protect against vf flows */
2751 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
2753 /* remove existing vlans */
2754 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2755 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
2756 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
2759 BNX2X_ERR("failed to delete vlans\n");
2764 /* need to remove/add the VF's accept_any_vlan bit */
2765 accept_flags = bnx2x_leading_vfq(vf, accept_flags);
2767 clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
2769 set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
2771 bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
2773 bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
2774 bnx2x_config_rx_mode(bp, &rx_ramrod);
2776 /* configure the new vlan to device */
2777 memset(&ramrod_param, 0, sizeof(ramrod_param));
2778 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2779 ramrod_param.vlan_mac_obj = vlan_obj;
2780 ramrod_param.ramrod_flags = ramrod_flags;
2781 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
2782 &ramrod_param.user_req.vlan_mac_flags);
2783 ramrod_param.user_req.u.vlan.vlan = vlan;
2784 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
2785 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
2787 BNX2X_ERR("failed to configure vlan\n");
2792 /* send queue update ramrod to configure default vlan and silent
2795 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
2796 q_params.cmd = BNX2X_Q_CMD_UPDATE;
2797 q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
2798 update_params = &q_params.params.update;
2799 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
2800 &update_params->update_flags);
2801 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
2802 &update_params->update_flags);
2804 /* if vlan is 0 then we want to leave the VF traffic
2805 * untagged, and leave the incoming traffic untouched
2806 * (i.e. do not remove any vlan tags).
2808 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
2809 &update_params->update_flags);
2810 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
2811 &update_params->update_flags);
2813 /* configure default vlan to vf queue and set silent
2814 * vlan removal (the vf remains unaware of this vlan).
2816 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
2817 &update_params->update_flags);
2818 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
2819 &update_params->update_flags);
2820 update_params->def_vlan = vlan;
2821 update_params->silent_removal_value =
2822 vlan & VLAN_VID_MASK;
2823 update_params->silent_removal_mask = VLAN_VID_MASK;
2826 /* Update the Queue state */
2827 rc = bnx2x_queue_state_change(bp, &q_params);
2829 BNX2X_ERR("Failed to configure default VLAN\n");
2834 /* clear the flag indicating that this VF needs its vlan
2835 * (will only be set if the HV configured the Vlan before vf was
2836 * up and we were called because the VF came up later
2839 vf->cfg_flags &= ~VF_CFG_VLAN;
2840 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
2845 /* crc is the first field in the bulletin board. Compute the crc over the
2846 * entire bulletin board excluding the crc field itself. Use the length field
2847 * as the Bulletin Board was posted by a PF with possibly a different version
2848 * from the vf which will sample it. Therefore, the length is computed by the
2849 * PF and the used blindly by the VF.
2851 u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
2852 struct pf_vf_bulletin_content *bulletin)
2854 return crc32(BULLETIN_CRC_SEED,
2855 ((u8 *)bulletin) + sizeof(bulletin->crc),
2856 bulletin->length - sizeof(bulletin->crc));
2859 /* Check for new posts on the bulletin board */
2860 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
2862 struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
2865 /* bulletin board hasn't changed since last sample */
2866 if (bp->old_bulletin.version == bulletin.version)
2867 return PFVF_BULLETIN_UNCHANGED;
2869 /* validate crc of new bulletin board */
2870 if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) {
2871 /* sampling structure in mid post may result with corrupted data
2872 * validate crc to ensure coherency.
2874 for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) {
2875 bulletin = bp->pf2vf_bulletin->content;
2876 if (bulletin.crc == bnx2x_crc_vf_bulletin(bp,
2879 BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n",
2881 bnx2x_crc_vf_bulletin(bp, &bulletin));
2883 if (attempts >= BULLETIN_ATTEMPTS) {
2884 BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
2886 return PFVF_BULLETIN_CRC_ERR;
2890 /* the mac address in bulletin board is valid and is new */
2891 if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID &&
2892 !ether_addr_equal(bulletin.mac, bp->old_bulletin.mac)) {
2893 /* update new mac to net device */
2894 memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
2897 /* the vlan in bulletin board is valid and is new */
2898 if (bulletin.valid_bitmap & 1 << VLAN_VALID)
2899 memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN);
2901 /* copy new bulletin board to bp */
2902 bp->old_bulletin = bulletin;
2904 return PFVF_BULLETIN_UPDATED;
2907 void bnx2x_timer_sriov(struct bnx2x *bp)
2909 bnx2x_sample_bulletin(bp);
2911 /* if channel is down we need to self destruct */
2912 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN)
2913 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
2917 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
2919 /* vf doorbells are embedded within the regview */
2920 return bp->regview + PXP_VF_ADDR_DB_START;
2923 void bnx2x_vf_pci_dealloc(struct bnx2x *bp)
2925 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
2926 sizeof(struct bnx2x_vf_mbx_msg));
2927 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
2928 sizeof(union pf_vf_bulletin));
2931 int bnx2x_vf_pci_alloc(struct bnx2x *bp)
2933 mutex_init(&bp->vf2pf_mutex);
2935 /* allocate vf2pf mailbox for vf to pf channel */
2936 bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping,
2937 sizeof(struct bnx2x_vf_mbx_msg));
2938 if (!bp->vf2pf_mbox)
2941 /* allocate pf 2 vf bulletin board */
2942 bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping,
2943 sizeof(union pf_vf_bulletin));
2944 if (!bp->pf2vf_bulletin)
2950 bnx2x_vf_pci_dealloc(bp);
2954 void bnx2x_iov_channel_down(struct bnx2x *bp)
2957 struct pf_vf_bulletin_content *bulletin;
2962 for_each_vf(bp, vf_idx) {
2963 /* locate this VFs bulletin board and update the channel down
2966 bulletin = BP_VF_BULLETIN(bp, vf_idx);
2967 bulletin->valid_bitmap |= 1 << CHANNEL_DOWN;
2969 /* update vf bulletin board */
2970 bnx2x_post_vf_bulletin(bp, vf_idx);
2974 void bnx2x_iov_task(struct work_struct *work)
2976 struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work);
2978 if (!netif_running(bp->dev))
2981 if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR,
2982 &bp->iov_task_state))
2983 bnx2x_vf_handle_flr_event(bp);
2985 if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG,
2986 &bp->iov_task_state))
2990 void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag)
2992 smp_mb__before_clear_bit();
2993 set_bit(flag, &bp->iov_task_state);
2994 smp_mb__after_clear_bit();
2995 DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
2996 queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0);