+static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
+{
+ union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
+ struct bnx2x_fastpath *fp = &bp->fp[fp_index];
+
+ /* Common */
+#ifdef BCM_CNIC
+ if (IS_FCOE_IDX(fp_index)) {
+ memset(sb, 0, sizeof(union host_hc_status_block));
+ fp->status_blk_mapping = 0;
+
+ } else {
+#endif
+ /* status blocks */
+ if (CHIP_IS_E2(bp))
+ BNX2X_PCI_FREE(sb->e2_sb,
+ bnx2x_fp(bp, fp_index,
+ status_blk_mapping),
+ sizeof(struct host_hc_status_block_e2));
+ else
+ BNX2X_PCI_FREE(sb->e1x_sb,
+ bnx2x_fp(bp, fp_index,
+ status_blk_mapping),
+ sizeof(struct host_hc_status_block_e1x));
+#ifdef BCM_CNIC
+ }
+#endif
+ /* Rx */
+ if (!skip_rx_queue(bp, fp_index)) {
+ bnx2x_free_rx_bds(fp);
+
+ /* fastpath rx rings: rx_buf rx_desc rx_comp */
+ BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
+ BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
+ bnx2x_fp(bp, fp_index, rx_desc_mapping),
+ sizeof(struct eth_rx_bd) * NUM_RX_BD);
+
+ BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
+ bnx2x_fp(bp, fp_index, rx_comp_mapping),
+ sizeof(struct eth_fast_path_rx_cqe) *
+ NUM_RCQ_BD);
+
+ /* SGE ring */
+ BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
+ BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
+ bnx2x_fp(bp, fp_index, rx_sge_mapping),
+ BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
+ }
+
+ /* Tx */
+ if (!skip_tx_queue(bp, fp_index)) {
+ /* fastpath tx rings: tx_buf tx_desc */
+ BNX2X_FREE(bnx2x_fp(bp, fp_index, tx_buf_ring));
+ BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, tx_desc_ring),
+ bnx2x_fp(bp, fp_index, tx_desc_mapping),
+ sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+ }
+ /* end of fastpath */
+}
+
+void bnx2x_free_fp_mem(struct bnx2x *bp)
+{
+ int i;
+ for_each_queue(bp, i)
+ bnx2x_free_fp_mem_at(bp, i);
+}
+
+static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
+{
+ union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
+ if (CHIP_IS_E2(bp)) {
+ bnx2x_fp(bp, index, sb_index_values) =
+ (__le16 *)status_blk.e2_sb->sb.index_values;
+ bnx2x_fp(bp, index, sb_running_index) =
+ (__le16 *)status_blk.e2_sb->sb.running_index;
+ } else {
+ bnx2x_fp(bp, index, sb_index_values) =
+ (__le16 *)status_blk.e1x_sb->sb.index_values;
+ bnx2x_fp(bp, index, sb_running_index) =
+ (__le16 *)status_blk.e1x_sb->sb.running_index;
+ }
+}
+
+static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
+{
+ union host_hc_status_block *sb;
+ struct bnx2x_fastpath *fp = &bp->fp[index];
+ int ring_size = 0;
+
+ /* if rx_ring_size specified - use it */
+ int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
+ MAX_RX_AVAIL/bp->num_queues;
+
+ /* allocate at least number of buffers required by FW */
+ rx_ring_size = max_t(int, fp->disable_tpa ? MIN_RX_SIZE_NONTPA :
+ MIN_RX_SIZE_TPA,
+ rx_ring_size);
+
+ bnx2x_fp(bp, index, bp) = bp;
+ bnx2x_fp(bp, index, index) = index;
+
+ /* Common */
+ sb = &bnx2x_fp(bp, index, status_blk);
+#ifdef BCM_CNIC
+ if (!IS_FCOE_IDX(index)) {
+#endif
+ /* status blocks */
+ if (CHIP_IS_E2(bp))
+ BNX2X_PCI_ALLOC(sb->e2_sb,
+ &bnx2x_fp(bp, index, status_blk_mapping),
+ sizeof(struct host_hc_status_block_e2));
+ else
+ BNX2X_PCI_ALLOC(sb->e1x_sb,
+ &bnx2x_fp(bp, index, status_blk_mapping),
+ sizeof(struct host_hc_status_block_e1x));
+#ifdef BCM_CNIC
+ }
+#endif
+ set_sb_shortcuts(bp, index);
+
+ /* Tx */
+ if (!skip_tx_queue(bp, index)) {
+ /* fastpath tx rings: tx_buf tx_desc */
+ BNX2X_ALLOC(bnx2x_fp(bp, index, tx_buf_ring),
+ sizeof(struct sw_tx_bd) * NUM_TX_BD);
+ BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, tx_desc_ring),
+ &bnx2x_fp(bp, index, tx_desc_mapping),
+ sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+ }
+
+ /* Rx */
+ if (!skip_rx_queue(bp, index)) {
+ /* fastpath rx rings: rx_buf rx_desc rx_comp */
+ BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
+ sizeof(struct sw_rx_bd) * NUM_RX_BD);
+ BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
+ &bnx2x_fp(bp, index, rx_desc_mapping),
+ sizeof(struct eth_rx_bd) * NUM_RX_BD);
+
+ BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
+ &bnx2x_fp(bp, index, rx_comp_mapping),
+ sizeof(struct eth_fast_path_rx_cqe) *
+ NUM_RCQ_BD);
+
+ /* SGE ring */
+ BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
+ sizeof(struct sw_rx_page) * NUM_RX_SGE);
+ BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
+ &bnx2x_fp(bp, index, rx_sge_mapping),
+ BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
+ /* RX BD ring */
+ bnx2x_set_next_page_rx_bd(fp);
+
+ /* CQ ring */
+ bnx2x_set_next_page_rx_cq(fp);
+
+ /* BDs */
+ ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
+ if (ring_size < rx_ring_size)
+ goto alloc_mem_err;
+ }
+
+ return 0;
+
+/* handles low memory cases */
+alloc_mem_err:
+ BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
+ index, ring_size);
+ /* FW will drop all packets if queue is not big enough,
+ * In these cases we disable the queue
+ * Min size diferent for TPA and non-TPA queues
+ */
+ if (ring_size < (fp->disable_tpa ?
+ MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
+ /* release memory allocated for this queue */
+ bnx2x_free_fp_mem_at(bp, index);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+int bnx2x_alloc_fp_mem(struct bnx2x *bp)
+{
+ int i;
+
+ /**
+ * 1. Allocate FP for leading - fatal if error
+ * 2. {CNIC} Allocate FCoE FP - fatal if error
+ * 3. Allocate RSS - fix number of queues if error
+ */
+
+ /* leading */
+ if (bnx2x_alloc_fp_mem_at(bp, 0))
+ return -ENOMEM;
+#ifdef BCM_CNIC
+ /* FCoE */
+ if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
+ return -ENOMEM;
+#endif
+ /* RSS */
+ for_each_nondefault_eth_queue(bp, i)
+ if (bnx2x_alloc_fp_mem_at(bp, i))
+ break;
+
+ /* handle memory failures */
+ if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
+ int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
+
+ WARN_ON(delta < 0);
+#ifdef BCM_CNIC
+ /**
+ * move non eth FPs next to last eth FP
+ * must be done in that order
+ * FCOE_IDX < FWD_IDX < OOO_IDX
+ */
+
+ /* move FCoE fp */
+ bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
+#endif
+ bp->num_queues -= delta;
+ BNX2X_ERR("Adjusted num of queues from %d to %d\n",
+ bp->num_queues + delta, bp->num_queues);
+ }
+
+ return 0;
+}