1 /* bnx2x_sriov.c: Broadcom Everest network driver.
3 * Copyright 2009-2012 Broadcom Corporation
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Shmulik Ravid <shmulikr@broadcom.com>
17 * Ariel Elior <ariele@broadcom.com>
21 #include "bnx2x_init.h"
22 #include "bnx2x_cmn.h"
23 #include "bnx2x_sriov.h"
25 /* General service functions */
26 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
29 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
31 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
33 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
35 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
39 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
42 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
44 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
46 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
48 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
52 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
57 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
63 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
65 u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
66 return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
69 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
70 u8 igu_sb_id, u8 segment, u16 index, u8 op,
73 /* acking a VF sb through the PF - use the GRC */
75 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
76 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
77 u32 func_encode = vf->abs_vfid;
78 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id;
79 struct igu_regular cmd_data = {0};
81 cmd_data.sb_id_and_flags =
82 ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
83 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
84 (update << IGU_REGULAR_BUPDATE_SHIFT) |
85 (op << IGU_REGULAR_ENABLE_INT_SHIFT));
87 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
88 func_encode << IGU_CTRL_REG_FID_SHIFT |
89 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
91 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
92 cmd_data.sb_id_and_flags, igu_addr_data);
93 REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
97 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
99 REG_WR(bp, igu_addr_ctl, ctl);
104 static int bnx2x_ari_enabled(struct pci_dev *dev)
106 return dev->bus->self && dev->bus->self->ari_enabled;
110 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
112 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
114 if (!vf_sb_count(vf))
115 vf->igu_base_id = igu_sb_id;
121 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
127 /* IGU in normal mode - read CAM */
128 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
129 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
130 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
132 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
133 if (!(fid & IGU_FID_ENCODE_IS_PF))
134 bnx2x_vf_set_igu_info(bp, sb_id,
135 (fid & IGU_FID_VF_NUM_MASK));
137 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
138 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
139 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
140 (fid & IGU_FID_VF_NUM_MASK)), sb_id,
141 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
145 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
148 kfree(bp->vfdb->vfqs);
149 kfree(bp->vfdb->vfs);
155 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
158 struct pci_dev *dev = bp->pdev;
160 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
162 BNX2X_ERR("failed to find SRIOV capability in device\n");
167 DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
168 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
169 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
170 pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
171 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
172 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
173 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
174 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
175 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
180 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
184 /* read the SRIOV capability structure
185 * The fields can be read via configuration read or
186 * directly from the device (starting at offset PCICFG_OFFSET)
188 if (bnx2x_sriov_pci_cfg_info(bp, iov))
191 /* get the number of SRIOV bars */
194 /* read the first_vfid */
195 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
196 iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
197 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
200 "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
202 iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
203 iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
208 static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp)
215 queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
220 /* must be called after PF bars are mapped */
221 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
225 struct bnx2x_sriov *iov;
226 struct pci_dev *dev = bp->pdev;
230 /* verify sriov capability is present in configuration space */
231 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) {
232 DP(BNX2X_MSG_IOV, "no sriov - capability not found\n");
240 /* verify chip revision */
244 /* check if SRIOV support is turned off */
248 /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
249 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
250 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
251 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
255 /* SRIOV can be enabled only with MSIX */
256 if (int_mode_param == BNX2X_INT_MODE_MSI ||
257 int_mode_param == BNX2X_INT_MODE_INTX) {
258 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
262 /* verify ari is enabled */
263 if (!bnx2x_ari_enabled(bp->pdev)) {
264 BNX2X_ERR("ARI not supported, SRIOV can not be enabled\n");
268 /* verify igu is in normal mode */
269 if (CHIP_INT_MODE_IS_BC(bp)) {
270 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n");
274 /* allocate the vfs database */
275 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
277 BNX2X_ERR("failed to allocate vf database\n");
282 /* get the sriov info - Linux already collected all the pertinent
283 * information, however the sriov structure is for the private use
284 * of the pci module. Also we want this information regardless
285 * of the hyper-visor.
287 iov = &(bp->vfdb->sriov);
288 err = bnx2x_sriov_info(bp, iov);
292 /* SR-IOV capability was enabled but there are no VFs*/
296 /* calcuate the actual number of VFs */
297 iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param);
299 /* allcate the vf array */
300 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
301 BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
302 if (!bp->vfdb->vfs) {
303 BNX2X_ERR("failed to allocate vf array\n");
308 /* Initial VF init - index and abs_vfid - nr_virtfn must be set */
310 bnx2x_vf(bp, i, index) = i;
311 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
312 bnx2x_vf(bp, i, state) = VF_FREE;
313 INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
314 mutex_init(&bnx2x_vf(bp, i, op_mutex));
315 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
318 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
319 bnx2x_get_vf_igu_cam_info(bp);
321 /* get the total queue count and allocate the global queue arrays */
322 qcount = bnx2x_iov_get_max_queue_count(bp);
324 /* allocate the queue arrays for all VFs */
325 bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue),
327 if (!bp->vfdb->vfqs) {
328 BNX2X_ERR("failed to allocate vf queue array\n");
335 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
336 __bnx2x_iov_free_vfdb(bp);
339 /* VF enable primitives
340 * when pretend is required the caller is responsible
341 * for calling pretend prior to calling these routines
344 /* called only on E1H or E2.
345 * When pretending to be PF, the pretend value is the function number 0...7
346 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
349 int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
353 if (CHIP_IS_E1H(bp) && pretend_func_val > E1H_FUNC_MAX)
356 /* get my own pretend register */
357 pretend_reg = bnx2x_get_pretend_reg(bp);
358 REG_WR(bp, pretend_reg, pretend_func_val);
359 REG_RD(bp, pretend_reg);
363 /* internal vf enable - until vf is enabled internally all transactions
364 * are blocked. this routine should always be called last with pretend.
366 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
368 REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
371 /* clears vf error in all semi blocks */
372 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
374 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
375 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
376 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
377 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
380 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
382 u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
385 switch (was_err_group) {
387 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
390 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
393 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
396 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
399 REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
402 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
407 /* Set VF masks and configuration - pretend */
408 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
410 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
411 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
412 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
413 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
414 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
415 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
417 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
418 val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
419 if (vf->cfg_flags & VF_CFG_INT_SIMD)
420 val |= IGU_VF_CONF_SINGLE_ISR_EN;
421 val &= ~IGU_VF_CONF_PARENT_MASK;
422 val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT; /* parent PF */
423 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
426 "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n",
427 vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION));
429 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
431 /* iterate over all queues, clear sb consumer */
432 for (i = 0; i < vf_sb_count(vf); i++) {
433 u8 igu_sb_id = vf_igu_sb(vf, i);
435 /* zero prod memory */
436 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0);
438 /* clear sb state machine */
439 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id,
442 /* disable + update */
443 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0,
448 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
450 /* set the VF-PF association in the FW */
451 storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
452 storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
455 bnx2x_vf_semi_clear_err(bp, abs_vfid);
456 bnx2x_vf_pglue_clear_err(bp, abs_vfid);
458 /* internal vf-enable - pretend */
459 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
460 DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
461 bnx2x_vf_enable_internal(bp, true);
462 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
465 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
467 /* Reset vf in IGU interrupts are still disabled */
468 bnx2x_vf_igu_reset(bp, vf);
470 /* pretend to enable the vf with the PBF */
471 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
472 REG_WR(bp, PBF_REG_DISABLE_VF, 0);
473 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
476 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
479 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
484 dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
486 return bnx2x_is_pcie_pending(dev);
489 BNX2X_ERR("Unknown device\n");
493 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
498 /* Verify no pending pci transactions */
499 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
500 BNX2X_ERR("PCIE Transactions still pending\n");
505 /* must be called after the number of PF queues and the number of VFs are
509 bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc)
513 /* will be set only during VF-ACQUIRE */
517 /* no credit calculcis for macs (just yet) */
518 resc->num_mac_filters = 1;
520 /* divvy up vlan rules */
521 vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
522 vlan_count = 1 << ilog2(vlan_count);
523 resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp);
525 /* no real limitation */
526 resc->num_mc_filters = 0;
528 /* num_sbs already set */
531 /* IOV global initialization routines */
532 void bnx2x_iov_init_dq(struct bnx2x *bp)
537 /* Set the DQ such that the CID reflect the abs_vfid */
538 REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
539 REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
541 /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
544 REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
546 /* The VF window size is the log2 of the max number of CIDs per VF */
547 REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
549 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
550 * the Pf doorbell size although the 2 are independent.
552 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST,
553 BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT);
555 /* No security checks for now -
556 * configure single rule (out of 16) mask = 0x1, value = 0x0,
557 * CID range 0 - 0x1ffff
559 REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
560 REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
561 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
562 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
564 /* set the number of VF alllowed doorbells to the full DQ range */
565 REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
567 /* set the VF doorbell threshold */
568 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
571 void bnx2x_iov_init_dmae(struct bnx2x *bp)
573 DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF");
577 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
580 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
582 struct pci_dev *dev = bp->pdev;
583 struct bnx2x_sriov *iov = &bp->vfdb->sriov;
585 return dev->bus->number + ((dev->devfn + iov->offset +
586 iov->stride * vfid) >> 8);
589 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
591 struct pci_dev *dev = bp->pdev;
592 struct bnx2x_sriov *iov = &bp->vfdb->sriov;
594 return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
597 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
600 struct pci_dev *dev = bp->pdev;
601 struct bnx2x_sriov *iov = &bp->vfdb->sriov;
603 for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
604 u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
605 u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
607 do_div(size, iov->total);
608 vf->bars[n].bar = start + size * vf->abs_vfid;
609 vf->bars[n].size = size;
613 void bnx2x_iov_remove_one(struct bnx2x *bp)
615 /* if SRIOV is not enabled there's nothing to do */
619 /* free vf database */
620 __bnx2x_iov_free_vfdb(bp);
623 void bnx2x_iov_free_mem(struct bnx2x *bp)
630 /* free vfs hw contexts */
631 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
632 struct hw_dma *cxt = &bp->vfdb->context[i];
633 BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
636 BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
637 BP_VFDB(bp)->sp_dma.mapping,
638 BP_VFDB(bp)->sp_dma.size);
640 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
641 BP_VF_MBX_DMA(bp)->mapping,
642 BP_VF_MBX_DMA(bp)->size);
645 int bnx2x_iov_alloc_mem(struct bnx2x *bp)
653 /* allocate vfs hw contexts */
654 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
655 BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
657 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
658 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
659 cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
662 BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size);
667 tot_size -= cxt->size;
670 /* allocate vfs ramrods dma memory - client_init and set_mac */
671 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
672 BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping,
674 BP_VFDB(bp)->sp_dma.size = tot_size;
676 /* allocate mailboxes */
677 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
678 BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping,
680 BP_VF_MBX_DMA(bp)->size = tot_size;
688 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
689 struct bnx2x_vf_queue *q)
691 u8 cl_id = vfq_cl_id(vf, q);
692 u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
693 unsigned long q_type = 0;
695 set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
696 set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
698 /* Queue State object */
699 bnx2x_init_queue_obj(bp, &q->sp_obj,
700 cl_id, &q->cid, 1, func_id,
701 bnx2x_vf_sp(bp, vf, q_data),
702 bnx2x_vf_sp_map(bp, vf, q_data),
706 "initialized vf %d's queue object. func id set to %d\n",
707 vf->abs_vfid, q->sp_obj.func_id);
709 /* mac/vlan objects are per queue, but only those
710 * that belong to the leading queue are initialized
712 if (vfq_is_leading(q)) {
714 bnx2x_init_mac_obj(bp, &q->mac_obj,
715 cl_id, q->cid, func_id,
716 bnx2x_vf_sp(bp, vf, mac_rdata),
717 bnx2x_vf_sp_map(bp, vf, mac_rdata),
718 BNX2X_FILTER_MAC_PENDING,
720 BNX2X_OBJ_TYPE_RX_TX,
723 bnx2x_init_vlan_obj(bp, &q->vlan_obj,
724 cl_id, q->cid, func_id,
725 bnx2x_vf_sp(bp, vf, vlan_rdata),
726 bnx2x_vf_sp_map(bp, vf, vlan_rdata),
727 BNX2X_FILTER_VLAN_PENDING,
729 BNX2X_OBJ_TYPE_RX_TX,
733 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
734 q->cid, func_id, func_id,
735 bnx2x_vf_sp(bp, vf, mcast_rdata),
736 bnx2x_vf_sp_map(bp, vf, mcast_rdata),
737 BNX2X_FILTER_MCAST_PENDING,
739 BNX2X_OBJ_TYPE_RX_TX);
741 vf->leading_rss = cl_id;
745 /* called by bnx2x_nic_load */
746 int bnx2x_iov_nic_init(struct bnx2x *bp)
751 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
755 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
757 /* initialize vf database */
758 for_each_vf(bp, vfid) {
759 struct bnx2x_virtf *vf = BP_VF(bp, vfid);
761 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
764 union cdu_context *base_cxt = (union cdu_context *)
765 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
766 (base_vf_cid & (ILT_PAGE_CIDS-1));
769 "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
770 vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
771 BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
773 /* init statically provisioned resources */
774 bnx2x_iov_static_resc(bp, &vf->alloc_resc);
776 /* queues are initialized during VF-ACQUIRE */
778 /* reserve the vf vlan credit */
779 bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf));
781 vf->filter_state = 0;
782 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
784 /* init mcast object - This object will be re-initialized
785 * during VF-ACQUIRE with the proper cl_id and cid.
786 * It needs to be initialized here so that it can be safely
787 * handled by a subsequent FLR flow.
789 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
791 bnx2x_vf_sp(bp, vf, mcast_rdata),
792 bnx2x_vf_sp_map(bp, vf, mcast_rdata),
793 BNX2X_FILTER_MCAST_PENDING,
795 BNX2X_OBJ_TYPE_RX_TX);
797 /* set the mailbox message addresses */
798 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
799 (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
800 MBX_MSG_ALIGNED_SIZE);
802 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
803 vfid * MBX_MSG_ALIGNED_SIZE;
805 /* Enable vf mailbox */
806 bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
812 struct bnx2x_virtf *vf = BP_VF(bp, i);
814 /* fill in the BDF and bars */
815 vf->bus = bnx2x_vf_bus(bp, i);
816 vf->devfn = bnx2x_vf_devfn(bp, i);
817 bnx2x_vf_set_bars(bp, vf);
820 "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
821 vf->abs_vfid, vf->bus, vf->devfn,
822 (unsigned)vf->bars[0].bar, vf->bars[0].size,
823 (unsigned)vf->bars[1].bar, vf->bars[1].size,
824 (unsigned)vf->bars[2].bar, vf->bars[2].size);
826 /* set local queue arrays */
827 vf->vfqs = &bp->vfdb->vfqs[qcount];
828 qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs);
834 /* called by bnx2x_init_hw_func, returns the next ilt line */
835 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
838 struct bnx2x_ilt *ilt = BP_ILT(bp);
843 /* set vfs ilt lines */
844 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
845 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
847 ilt->lines[line+i].page = hw_cxt->addr;
848 ilt->lines[line+i].page_mapping = hw_cxt->mapping;
849 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
854 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
856 return ((cid >= BNX2X_FIRST_VF_CID) &&
857 ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
861 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
862 struct bnx2x_vf_queue *vfq,
863 union event_ring_elem *elem)
865 unsigned long ramrod_flags = 0;
868 /* Always push next commands out, don't wait here */
869 set_bit(RAMROD_CONT, &ramrod_flags);
871 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
872 case BNX2X_FILTER_MAC_PENDING:
873 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
876 case BNX2X_FILTER_VLAN_PENDING:
877 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
881 BNX2X_ERR("Unsupported classification command: %d\n",
882 elem->message.data.eth_event.echo);
886 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
888 DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
892 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
893 struct bnx2x_virtf *vf)
895 struct bnx2x_mcast_ramrod_params rparam = {NULL};
898 rparam.mcast_obj = &vf->mcast_obj;
899 vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
901 /* If there are pending mcast commands - send them */
902 if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
903 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
905 BNX2X_ERR("Failed to send pending mcast commands: %d\n",
911 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
912 struct bnx2x_virtf *vf)
914 smp_mb__before_clear_bit();
915 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
916 smp_mb__after_clear_bit();
919 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
921 struct bnx2x_virtf *vf;
922 int qidx = 0, abs_vfid;
929 /* first get the cid - the only events we handle here are cfc-delete
930 * and set-mac completion
932 opcode = elem->message.opcode;
935 case EVENT_RING_OPCODE_CFC_DEL:
936 cid = SW_CID((__force __le32)
937 elem->message.data.cfc_del_event.cid);
938 DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
940 case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
941 case EVENT_RING_OPCODE_MULTICAST_RULES:
942 case EVENT_RING_OPCODE_FILTERS_RULES:
943 cid = (elem->message.data.eth_event.echo &
945 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
947 case EVENT_RING_OPCODE_VF_FLR:
948 abs_vfid = elem->message.data.vf_flr_event.vf_id;
949 DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
952 case EVENT_RING_OPCODE_MALICIOUS_VF:
953 abs_vfid = elem->message.data.malicious_vf_event.vf_id;
954 DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d\n",
961 /* check if the cid is the VF range */
962 if (!bnx2x_iov_is_vf_cid(bp, cid)) {
963 DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
967 /* extract vf and rxq index from vf_cid - relies on the following:
968 * 1. vfid on cid reflects the true abs_vfid
969 * 2. the max number of VFs (per path) is 64
971 qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
972 abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
974 vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
977 BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
983 case EVENT_RING_OPCODE_CFC_DEL:
984 DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
986 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
989 BNX2X_Q_CMD_CFC_DEL);
991 case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
992 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
994 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
996 case EVENT_RING_OPCODE_MULTICAST_RULES:
997 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
999 bnx2x_vf_handle_mcast_eqe(bp, vf);
1001 case EVENT_RING_OPCODE_FILTERS_RULES:
1002 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
1003 vf->abs_vfid, qidx);
1004 bnx2x_vf_handle_filters_eqe(bp, vf);
1006 case EVENT_RING_OPCODE_VF_FLR:
1007 DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n",
1009 /* Do nothing for now */
1011 case EVENT_RING_OPCODE_MALICIOUS_VF:
1012 DP(BNX2X_MSG_IOV, "got VF [%d] MALICIOUS notification\n",
1014 /* Do nothing for now */
1017 /* SRIOV: reschedule any 'in_progress' operations */
1018 bnx2x_iov_sp_event(bp, cid, false);
1023 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
1025 /* extract the vf from vf_cid - relies on the following:
1026 * 1. vfid on cid reflects the true abs_vfid
1027 * 2. the max number of VFs (per path) is 64
1029 int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
1030 return bnx2x_vf_by_abs_fid(bp, abs_vfid);
1033 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
1034 struct bnx2x_queue_sp_obj **q_obj)
1036 struct bnx2x_virtf *vf;
1041 vf = bnx2x_vf_by_cid(bp, vf_cid);
1044 /* extract queue index from vf_cid - relies on the following:
1045 * 1. vfid on cid reflects the true abs_vfid
1046 * 2. the max number of VFs (per path) is 64
1048 int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
1049 *q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
1051 BNX2X_ERR("No vf matching cid %d\n", vf_cid);
1055 void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work)
1057 struct bnx2x_virtf *vf;
1059 /* check if the cid is the VF range */
1060 if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid))
1063 vf = bnx2x_vf_by_cid(bp, vf_cid);
1065 /* set in_progress flag */
1066 atomic_set(&vf->op_in_progress, 1);
1068 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1072 void bnx2x_iov_sp_task(struct bnx2x *bp)
1078 /* Iterate over all VFs and invoke state transition for VFs with
1079 * 'in-progress' slow-path operations
1081 DP(BNX2X_MSG_IOV, "searching for pending vf operations\n");
1082 for_each_vf(bp, i) {
1083 struct bnx2x_virtf *vf = BP_VF(bp, i);
1085 if (!list_empty(&vf->op_list_head) &&
1086 atomic_read(&vf->op_in_progress)) {
1087 DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
1088 bnx2x_vfop_cur(bp, vf)->transition(bp, vf);
1092 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
1095 u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4;
1096 u32 val = enable ? (abs_vfid | (1 << 6)) : 0;
1098 REG_WR(bp, reg, val);
1101 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
1103 return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
1104 BNX2X_VF_MAX_QUEUES);
1108 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
1109 struct vf_pf_resc_request *req_resc)
1111 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
1112 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
1114 return ((req_resc->num_rxqs <= rxq_cnt) &&
1115 (req_resc->num_txqs <= txq_cnt) &&
1116 (req_resc->num_sbs <= vf_sb_count(vf)) &&
1117 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
1118 (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
1122 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
1123 struct vf_pf_resc_request *resc)
1125 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
1128 union cdu_context *base_cxt = (union cdu_context *)
1129 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
1130 (base_vf_cid & (ILT_PAGE_CIDS-1));
1133 /* if state is 'acquired' the VF was not released or FLR'd, in
1134 * this case the returned resources match the acquired already
1135 * acquired resources. Verify that the requested numbers do
1136 * not exceed the already acquired numbers.
1138 if (vf->state == VF_ACQUIRED) {
1139 DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
1142 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
1143 BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
1150 /* Otherwise vf state must be 'free' or 'reset' */
1151 if (vf->state != VF_FREE && vf->state != VF_RESET) {
1152 BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
1153 vf->abs_vfid, vf->state);
1157 /* static allocation:
1158 * the global maximum number are fixed per VF. fail the request if
1159 * requested number exceed these globals
1161 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
1163 "cannot fulfill vf resource request. Placing maximal available values in response\n");
1164 /* set the max resource in the vf */
1168 /* Set resources counters - 0 request means max available */
1169 vf_sb_count(vf) = resc->num_sbs;
1170 vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
1171 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
1172 if (resc->num_mac_filters)
1173 vf_mac_rules_cnt(vf) = resc->num_mac_filters;
1174 if (resc->num_vlan_filters)
1175 vf_vlan_rules_cnt(vf) = resc->num_vlan_filters;
1178 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
1179 vf_sb_count(vf), vf_rxq_count(vf),
1180 vf_txq_count(vf), vf_mac_rules_cnt(vf),
1181 vf_vlan_rules_cnt(vf));
1183 /* Initialize the queues */
1185 DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
1189 for_each_vfq(vf, i) {
1190 struct bnx2x_vf_queue *q = vfq_get(vf, i);
1193 DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i);
1198 q->cxt = &((base_cxt + i)->eth);
1199 q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
1201 DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
1202 vf->abs_vfid, i, q->index, q->cid, q->cxt);
1204 /* init SP objects */
1205 bnx2x_vfq_init(bp, vf, q);
1207 vf->state = VF_ACQUIRED;
1211 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
1213 struct bnx2x_func_init_params func_init = {0};
1217 /* the sb resources are initialized at this point, do the
1218 * FW/HW initializations
1220 for_each_vf_sb(vf, i)
1221 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true,
1222 vf_igu_sb(vf, i), vf_igu_sb(vf, i));
1225 if (vf->state != VF_ACQUIRED) {
1226 DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n",
1227 vf->abs_vfid, vf->state);
1230 /* FLR cleanup epilogue */
1231 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
1234 /* reset IGU VF statistics: MSIX */
1235 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
1238 if (vf->cfg_flags & VF_CFG_STATS)
1239 flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ);
1241 if (vf->cfg_flags & VF_CFG_TPA)
1242 flags |= FUNC_FLG_TPA;
1244 if (is_vf_multi(vf))
1245 flags |= FUNC_FLG_RSS;
1247 /* function setup */
1248 func_init.func_flgs = flags;
1249 func_init.pf_id = BP_FUNC(bp);
1250 func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
1251 func_init.fw_stat_map = vf->fw_stat_map;
1252 func_init.spq_map = vf->spq_map;
1253 func_init.spq_prod = 0;
1254 bnx2x_func_init(bp, &func_init);
1257 bnx2x_vf_enable_access(bp, vf->abs_vfid);
1258 bnx2x_vf_enable_traffic(bp, vf);
1260 /* queue protection table */
1262 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
1263 vfq_qzone_id(vf, vfq_get(vf, i)), true);
1265 vf->state = VF_ENABLED;
1270 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
1271 enum channel_tlvs tlv)
1273 /* lock the channel */
1274 mutex_lock(&vf->op_mutex);
1276 /* record the locking op */
1277 vf->op_current = tlv;
1280 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
1284 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
1285 enum channel_tlvs expected_tlv)
1287 WARN(expected_tlv != vf->op_current,
1288 "lock mismatch: expected %d found %d", expected_tlv,
1291 /* lock the channel */
1292 mutex_unlock(&vf->op_mutex);
1294 /* log the unlock */
1295 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
1296 vf->abs_vfid, vf->op_current);
1298 /* record the locking op */
1299 vf->op_current = CHANNEL_TLV_NONE;