1 /* bnx2x_sriov.c: Broadcom Everest network driver.
3 * Copyright 2009-2012 Broadcom Corporation
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Shmulik Ravid <shmulikr@broadcom.com>
17 * Ariel Elior <ariele@broadcom.com>
21 #include "bnx2x_init.h"
22 #include "bnx2x_sriov.h"
23 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
28 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
34 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
36 u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
37 return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
40 static int bnx2x_ari_enabled(struct pci_dev *dev)
42 return dev->bus->self && dev->bus->self->ari_enabled;
46 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
48 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
51 vf->igu_base_id = igu_sb_id;
57 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
63 /* IGU in normal mode - read CAM */
64 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
65 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
66 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
68 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
69 if (!(fid & IGU_FID_ENCODE_IS_PF))
70 bnx2x_vf_set_igu_info(bp, sb_id,
71 (fid & IGU_FID_VF_NUM_MASK));
73 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
74 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
75 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
76 (fid & IGU_FID_VF_NUM_MASK)), sb_id,
77 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
81 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
84 kfree(bp->vfdb->vfqs);
91 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
94 struct pci_dev *dev = bp->pdev;
96 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
98 BNX2X_ERR("failed to find SRIOV capability in device\n");
103 DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
104 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
105 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
106 pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
107 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
108 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
109 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
110 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
111 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
116 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
120 /* read the SRIOV capability structure
121 * The fields can be read via configuration read or
122 * directly from the device (starting at offset PCICFG_OFFSET)
124 if (bnx2x_sriov_pci_cfg_info(bp, iov))
127 /* get the number of SRIOV bars */
130 /* read the first_vfid */
131 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
132 iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
133 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
136 "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
138 iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
139 iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
144 static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp)
151 queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
156 /* must be called after PF bars are mapped */
157 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
161 struct bnx2x_sriov *iov;
162 struct pci_dev *dev = bp->pdev;
166 /* verify sriov capability is present in configuration space */
167 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) {
168 DP(BNX2X_MSG_IOV, "no sriov - capability not found\n");
176 /* verify chip revision */
180 /* check if SRIOV support is turned off */
184 /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
185 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
186 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
187 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
191 /* SRIOV can be enabled only with MSIX */
192 if (int_mode_param == BNX2X_INT_MODE_MSI ||
193 int_mode_param == BNX2X_INT_MODE_INTX) {
194 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
198 /* verify ari is enabled */
199 if (!bnx2x_ari_enabled(bp->pdev)) {
200 BNX2X_ERR("ARI not supported, SRIOV can not be enabled\n");
204 /* verify igu is in normal mode */
205 if (CHIP_INT_MODE_IS_BC(bp)) {
206 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n");
210 /* allocate the vfs database */
211 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
213 BNX2X_ERR("failed to allocate vf database\n");
218 /* get the sriov info - Linux already collected all the pertinent
219 * information, however the sriov structure is for the private use
220 * of the pci module. Also we want this information regardless
221 * of the hyper-visor.
223 iov = &(bp->vfdb->sriov);
224 err = bnx2x_sriov_info(bp, iov);
228 /* SR-IOV capability was enabled but there are no VFs*/
232 /* calcuate the actual number of VFs */
233 iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param);
235 /* allcate the vf array */
236 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
237 BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
238 if (!bp->vfdb->vfs) {
239 BNX2X_ERR("failed to allocate vf array\n");
244 /* Initial VF init - index and abs_vfid - nr_virtfn must be set */
246 bnx2x_vf(bp, i, index) = i;
247 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
248 bnx2x_vf(bp, i, state) = VF_FREE;
249 INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
250 mutex_init(&bnx2x_vf(bp, i, op_mutex));
251 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
254 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
255 bnx2x_get_vf_igu_cam_info(bp);
257 /* get the total queue count and allocate the global queue arrays */
258 qcount = bnx2x_iov_get_max_queue_count(bp);
260 /* allocate the queue arrays for all VFs */
261 bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue),
263 if (!bp->vfdb->vfqs) {
264 BNX2X_ERR("failed to allocate vf queue array\n");
271 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
272 __bnx2x_iov_free_vfdb(bp);
276 /* called by bnx2x_init_hw_func, returns the next ilt line */
277 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
280 struct bnx2x_ilt *ilt = BP_ILT(bp);
285 /* set vfs ilt lines */
286 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
287 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
289 ilt->lines[line+i].page = hw_cxt->addr;
290 ilt->lines[line+i].page_mapping = hw_cxt->mapping;
291 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
296 void bnx2x_iov_remove_one(struct bnx2x *bp)
298 /* if SRIOV is not enabled there's nothing to do */
302 /* free vf database */
303 __bnx2x_iov_free_vfdb(bp);