1 /* bnx2x_cmn.h: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
20 #include <linux/types.h>
21 #include <linux/netdevice.h>
27 /*********************** Interfaces ****************************
28 * Functions that need to be implemented by each driver version
32 * Initialize link parameters structure variables.
39 u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
42 * Configure hw according to link parameters structure.
46 void bnx2x_link_set(struct bnx2x *bp);
54 * @return 0 - link is UP
56 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes);
59 * Handles link status change
63 void bnx2x__link_status_update(struct bnx2x *bp);
66 * MSI-X slowpath interrupt handler
73 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance);
76 * non MSI-X interrupt handler
83 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance);
87 * Send command to cnic driver
92 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
95 * Provides cnic information for proper interrupt handling
99 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
103 * Enable HW interrupts.
107 void bnx2x_int_enable(struct bnx2x *bp);
110 * Disable HW interrupts.
114 void bnx2x_int_disable(struct bnx2x *bp);
117 * Disable interrupts. This function ensures that there are no
118 * ISRs or SP DPCs (sp_task) are running after it returns.
121 * @param disable_hw if true, disable HW interrupts.
123 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
126 * Loads device firmware
132 int bnx2x_init_firmware(struct bnx2x *bp);
135 * Init HW blocks according to current initialization stage:
136 * COMMON, PORT or FUNCTION.
139 * @param load_code: COMMON, PORT or FUNCTION
143 int bnx2x_init_hw(struct bnx2x *bp, u32 load_code);
146 * Init driver internals:
152 * @param load_code COMMON, PORT or FUNCTION
154 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
157 * Allocate driver's memory.
163 int bnx2x_alloc_mem(struct bnx2x *bp);
166 * Release driver's memory.
170 void bnx2x_free_mem(struct bnx2x *bp);
181 int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
185 * Bring down an eth client.
192 int bnx2x_stop_fw_client(struct bnx2x *bp,
193 struct bnx2x_client_ramrod_params *p);
196 * Set number of quueus according to mode
201 void bnx2x_set_num_queues_msix(struct bnx2x *bp);
204 * Cleanup chip internals:
205 * - Cleanup MAC configuration.
212 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode);
218 * @param resource Resource bit which was locked
222 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource);
227 * @param bp driver handle
228 * @param resource Resource bit which was locked
232 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
235 * Configure eth MAC address in the HW according to the value in
236 * netdev->dev_addr for 57711
238 * @param bp driver handle
241 void bnx2x_set_eth_mac(struct bnx2x *bp, int set);
245 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
246 * MAC(s). The function will wait until the ramrod completion
249 * @param bp driver handle
250 * @param set set or clear the CAM entry
252 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
254 int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set);
258 * Initialize status block in FW and HW
260 * @param bp driver handle
261 * @param dma_addr_t mapping
265 * @param int fw_sb_id
266 * @param int igu_sb_id
268 void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
269 u8 vf_valid, int fw_sb_id, int igu_sb_id);
272 * Reconfigure FW/HW according to dev->flags rx mode
274 * @param dev net_device
277 void bnx2x_set_rx_mode(struct net_device *dev);
280 * Configure MAC filtering rules in a FW.
282 * @param bp driver handle
284 void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
286 /* Parity errors related */
287 void bnx2x_inc_load_cnt(struct bnx2x *bp);
288 u32 bnx2x_dec_load_cnt(struct bnx2x *bp);
289 bool bnx2x_chk_parity_attn(struct bnx2x *bp);
290 bool bnx2x_reset_is_done(struct bnx2x *bp);
291 void bnx2x_disable_close_the_gate(struct bnx2x *bp);
294 * Perform statistics handling according to event
296 * @param bp driver handle
297 * @param even tbnx2x_stats_event
299 void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
304 * @param fp fastpath handle for the event
305 * @param rr_cqe eth_rx_cqe
307 void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
310 * Init/halt function before/after sending
311 * CLIENT_SETUP/CFC_DEL for the first/last client.
317 int bnx2x_func_start(struct bnx2x *bp);
318 int bnx2x_func_stop(struct bnx2x *bp);
321 * Prepare ILT configurations according to current driver
326 void bnx2x_ilt_set_info(struct bnx2x *bp);
328 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
330 barrier(); /* status block is written to by the chip */
331 fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID];
334 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
335 struct bnx2x_fastpath *fp,
336 u16 bd_prod, u16 rx_comp_prod,
339 struct ustorm_eth_rx_producers rx_prods = {0};
342 /* Update producers */
343 rx_prods.bd_prod = bd_prod;
344 rx_prods.cqe_prod = rx_comp_prod;
345 rx_prods.sge_prod = rx_sge_prod;
348 * Make sure that the BD and SGE data is updated before updating the
349 * producers since FW might read the BD/SGE right after the producer
351 * This is only applicable for weak-ordered memory model archs such
352 * as IA-64. The following barrier is also mandatory since FW will
353 * assumes BDs must have buffers.
357 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
359 BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset + i*4,
360 ((u32 *)&rx_prods)[i]);
362 mmiowb(); /* keep prod updates ordered */
364 DP(NETIF_MSG_RX_STATUS,
365 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
366 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
371 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
372 u8 storm, u16 index, u8 op, u8 update)
374 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
375 COMMAND_REG_INT_ACK);
376 struct igu_ack_register igu_ack;
378 igu_ack.status_block_index = index;
379 igu_ack.sb_id_and_flags =
380 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
381 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
382 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
383 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
385 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
386 (*(u32 *)&igu_ack), hc_addr);
387 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
389 /* Make sure that ACK is written */
393 static inline u16 bnx2x_ack_int(struct bnx2x *bp)
395 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
396 COMMAND_REG_SIMD_MASK);
397 u32 result = REG_RD(bp, hc_addr);
399 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
406 * fast path service functions
409 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
411 /* Tell compiler that consumer and producer can change */
413 return fp->tx_pkt_prod != fp->tx_pkt_cons;
416 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
422 prod = fp->tx_bd_prod;
423 cons = fp->tx_bd_cons;
425 /* NUM_TX_RINGS = number of "next-page" entries
426 It will be used as a threshold */
427 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
429 #ifdef BNX2X_STOP_ON_ERROR
431 WARN_ON(used > fp->bp->tx_ring_size);
432 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
435 return (s16)(fp->bp->tx_ring_size) - used;
438 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
442 /* Tell compiler that status block fields can change */
444 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
445 return hw_cons != fp->tx_pkt_cons;
448 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
452 /* Tell compiler that status block fields can change */
454 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
455 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
457 return (fp->rx_comp_cons != rx_cons_sb);
459 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
460 struct bnx2x_fastpath *fp, u16 index)
462 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
463 struct page *page = sw_buf->page;
464 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
466 /* Skip "next page" elements */
470 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
471 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
472 __free_pages(page, PAGES_PER_SGE_SHIFT);
483 static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
487 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
488 int idx = RX_SGE_CNT * i - 1;
490 for (j = 0; j < 2; j++) {
491 SGE_MASK_CLEAR_BIT(fp, idx);
497 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
499 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
500 memset(fp->sge_mask, 0xff,
501 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
503 /* Clear the two last indices in the page to 1:
504 these are the indices that correspond to the "next" element,
505 hence will never be indicated and should be removed from
507 bnx2x_clear_sge_mask_next_elems(fp);
510 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
511 struct bnx2x_fastpath *fp, u16 index)
513 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
514 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
515 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
518 if (unlikely(page == NULL))
521 mapping = dma_map_page(&bp->pdev->dev, page, 0,
522 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
523 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
524 __free_pages(page, PAGES_PER_SGE_SHIFT);
529 dma_unmap_addr_set(sw_buf, mapping, mapping);
531 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
532 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
536 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
537 struct bnx2x_fastpath *fp, u16 index)
540 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
541 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
544 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
545 if (unlikely(skb == NULL))
548 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
550 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
556 dma_unmap_addr_set(rx_buf, mapping, mapping);
558 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
559 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
564 /* note that we are not allocating a new skb,
565 * we are just moving one from cons to prod
566 * we are not creating a new mapping,
567 * so there is no need to check for dma_mapping_error().
569 static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
570 struct sk_buff *skb, u16 cons, u16 prod)
572 struct bnx2x *bp = fp->bp;
573 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
574 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
575 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
576 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
578 dma_sync_single_for_device(&bp->pdev->dev,
579 dma_unmap_addr(cons_rx_buf, mapping),
580 RX_COPY_THRESH, DMA_FROM_DEVICE);
582 prod_rx_buf->skb = cons_rx_buf->skb;
583 dma_unmap_addr_set(prod_rx_buf, mapping,
584 dma_unmap_addr(cons_rx_buf, mapping));
587 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
588 struct bnx2x_fastpath *fp, int last)
592 for (i = 0; i < last; i++)
593 bnx2x_free_rx_sge(bp, fp, i);
596 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
597 struct bnx2x_fastpath *fp, int last)
601 for (i = 0; i < last; i++) {
602 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
603 struct sk_buff *skb = rx_buf->skb;
606 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
610 if (fp->tpa_state[i] == BNX2X_TPA_START)
611 dma_unmap_single(&bp->pdev->dev,
612 dma_unmap_addr(rx_buf, mapping),
613 bp->rx_buf_size, DMA_FROM_DEVICE);
621 static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
625 for_each_queue(bp, j) {
626 struct bnx2x_fastpath *fp = &bp->fp[j];
628 for (i = 1; i <= NUM_TX_RINGS; i++) {
629 struct eth_tx_next_bd *tx_next_bd =
630 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
632 tx_next_bd->addr_hi =
633 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
634 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
635 tx_next_bd->addr_lo =
636 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
637 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
640 SET_FLAG(fp->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
641 fp->tx_db.data.zero_fill1 = 0;
642 fp->tx_db.data.prod = 0;
651 static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
655 for (i = 1; i <= NUM_RX_RINGS; i++) {
656 struct eth_rx_bd *rx_bd;
658 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
660 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
661 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
663 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
664 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
668 static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
672 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
673 struct eth_rx_sge *sge;
675 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
677 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
678 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
681 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
682 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
686 static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
689 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
690 struct eth_rx_cqe_next_page *nextpg;
692 nextpg = (struct eth_rx_cqe_next_page *)
693 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
695 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
696 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
698 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
699 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
705 static inline void __storm_memset_struct(struct bnx2x *bp,
706 u32 addr, size_t size, u32 *data)
709 for (i = 0; i < size/4; i++)
710 REG_WR(bp, addr + (i * 4), data[i]);
713 static inline void storm_memset_mac_filters(struct bnx2x *bp,
714 struct tstorm_eth_mac_filter_config *mac_filters,
717 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
719 u32 addr = BAR_TSTRORM_INTMEM +
720 TSTORM_MAC_FILTER_CONFIG_OFFSET(abs_fid);
722 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
725 static inline void storm_memset_cmng(struct bnx2x *bp,
726 struct cmng_struct_per_port *cmng,
729 size_t size = sizeof(struct cmng_struct_per_port);
731 u32 addr = BAR_XSTRORM_INTMEM +
732 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
734 __storm_memset_struct(bp, addr, size, (u32 *)cmng);
736 /* HW Lock for shared dual port PHYs */
737 void bnx2x_acquire_phy_lock(struct bnx2x *bp);
738 void bnx2x_release_phy_lock(struct bnx2x *bp);
740 void bnx2x_link_report(struct bnx2x *bp);
741 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
742 int bnx2x_tx_int(struct bnx2x_fastpath *fp);
743 void bnx2x_init_rx_rings(struct bnx2x *bp);
744 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
746 int bnx2x_change_mac_addr(struct net_device *dev, void *p);
747 void bnx2x_tx_timeout(struct net_device *dev);
748 void bnx2x_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp);
749 void bnx2x_netif_start(struct bnx2x *bp);
750 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
751 void bnx2x_free_irq(struct bnx2x *bp, bool disable_only);
752 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
753 int bnx2x_resume(struct pci_dev *pdev);
754 void bnx2x_free_skbs(struct bnx2x *bp);
755 int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
756 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
757 int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
758 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
761 * Allocate/release memories outsize main driver structure
767 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp);
768 void bnx2x_free_mem_bp(struct bnx2x *bp);
770 #define BNX2X_FW_IP_HDR_ALIGN_PAD 2 /* FW places hdr with this padding */
772 #endif /* BNX2X_CMN_H */