Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Sat, 20 Aug 2011 17:39:12 +0000 (10:39 -0700)
committerDavid S. Miller <davem@davemloft.net>
Sat, 20 Aug 2011 17:39:12 +0000 (10:39 -0700)
22 files changed:
1  2 
MAINTAINERS
drivers/net/bonding/bond_main.c
drivers/net/ethernet/amd/pcnet32.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/freescale/gianfar_ethtool.c
drivers/net/ethernet/intel/e1000e/82571.c
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/lib.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/nvidia/forcedeth.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/via/via-velocity.c
drivers/net/vmxnet3/vmxnet3_drv.c
net/ipv4/route.c
net/ipv6/sit.c

diff --cc MAINTAINERS
Simple merge
Simple merge
index e19c1a7,0000000..c90fe91
mode 100644,000000..100644
--- /dev/null
@@@ -1,2937 -1,0 +1,2937 @@@
- static unsigned int pcnet32_portlist[] __initdata =
 +/* pcnet32.c: An AMD PCnet32 ethernet driver for linux. */
 +/*
 + *    Copyright 1996-1999 Thomas Bogendoerfer
 + *
 + *    Derived from the lance driver written 1993,1994,1995 by Donald Becker.
 + *
 + *    Copyright 1993 United States Government as represented by the
 + *    Director, National Security Agency.
 + *
 + *    This software may be used and distributed according to the terms
 + *    of the GNU General Public License, incorporated herein by reference.
 + *
 + *    This driver is for PCnet32 and PCnetPCI based ethercards
 + */
 +/**************************************************************************
 + *  23 Oct, 2000.
 + *  Fixed a few bugs, related to running the controller in 32bit mode.
 + *
 + *  Carsten Langgaard, carstenl@mips.com
 + *  Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
 + *
 + *************************************************************************/
 +
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
 +#define DRV_NAME      "pcnet32"
 +#define DRV_VERSION   "1.35"
 +#define DRV_RELDATE   "21.Apr.2008"
 +#define PFX           DRV_NAME ": "
 +
 +static const char *const version =
 +    DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n";
 +
 +#include <linux/module.h>
 +#include <linux/kernel.h>
 +#include <linux/sched.h>
 +#include <linux/string.h>
 +#include <linux/errno.h>
 +#include <linux/ioport.h>
 +#include <linux/slab.h>
 +#include <linux/interrupt.h>
 +#include <linux/pci.h>
 +#include <linux/delay.h>
 +#include <linux/init.h>
 +#include <linux/ethtool.h>
 +#include <linux/mii.h>
 +#include <linux/crc32.h>
 +#include <linux/netdevice.h>
 +#include <linux/etherdevice.h>
 +#include <linux/if_ether.h>
 +#include <linux/skbuff.h>
 +#include <linux/spinlock.h>
 +#include <linux/moduleparam.h>
 +#include <linux/bitops.h>
 +#include <linux/io.h>
 +#include <linux/uaccess.h>
 +
 +#include <asm/dma.h>
 +#include <asm/irq.h>
 +
 +/*
 + * PCI device identifiers for "new style" Linux PCI Device Drivers
 + */
 +static DEFINE_PCI_DEVICE_TABLE(pcnet32_pci_tbl) = {
 +      { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), },
 +      { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), },
 +
 +      /*
 +       * Adapters that were sold with IBM's RS/6000 or pSeries hardware have
 +       * the incorrect vendor id.
 +       */
 +      { PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE),
 +        .class = (PCI_CLASS_NETWORK_ETHERNET << 8), .class_mask = 0xffff00, },
 +
 +      { }     /* terminate list */
 +};
 +
 +MODULE_DEVICE_TABLE(pci, pcnet32_pci_tbl);
 +
 +static int cards_found;
 +
 +/*
 + * VLB I/O addresses
 + */
++static unsigned int pcnet32_portlist[] =
 +    { 0x300, 0x320, 0x340, 0x360, 0 };
 +
 +static int pcnet32_debug;
 +static int tx_start = 1;      /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */
 +static int pcnet32vlb;                /* check for VLB cards ? */
 +
 +static struct net_device *pcnet32_dev;
 +
 +static int max_interrupt_work = 2;
 +static int rx_copybreak = 200;
 +
 +#define PCNET32_PORT_AUI      0x00
 +#define PCNET32_PORT_10BT     0x01
 +#define PCNET32_PORT_GPSI     0x02
 +#define PCNET32_PORT_MII      0x03
 +
 +#define PCNET32_PORT_PORTSEL  0x03
 +#define PCNET32_PORT_ASEL     0x04
 +#define PCNET32_PORT_100      0x40
 +#define PCNET32_PORT_FD             0x80
 +
 +#define PCNET32_DMA_MASK 0xffffffff
 +
 +#define PCNET32_WATCHDOG_TIMEOUT (jiffies + (2 * HZ))
 +#define PCNET32_BLINK_TIMEOUT (jiffies + (HZ/4))
 +
 +/*
 + * table to translate option values from tulip
 + * to internal options
 + */
 +static const unsigned char options_mapping[] = {
 +      PCNET32_PORT_ASEL,                      /*  0 Auto-select      */
 +      PCNET32_PORT_AUI,                       /*  1 BNC/AUI          */
 +      PCNET32_PORT_AUI,                       /*  2 AUI/BNC          */
 +      PCNET32_PORT_ASEL,                      /*  3 not supported    */
 +      PCNET32_PORT_10BT | PCNET32_PORT_FD,    /*  4 10baseT-FD       */
 +      PCNET32_PORT_ASEL,                      /*  5 not supported    */
 +      PCNET32_PORT_ASEL,                      /*  6 not supported    */
 +      PCNET32_PORT_ASEL,                      /*  7 not supported    */
 +      PCNET32_PORT_ASEL,                      /*  8 not supported    */
 +      PCNET32_PORT_MII,                       /*  9 MII 10baseT      */
 +      PCNET32_PORT_MII | PCNET32_PORT_FD,     /* 10 MII 10baseT-FD   */
 +      PCNET32_PORT_MII,                       /* 11 MII (autosel)    */
 +      PCNET32_PORT_10BT,                      /* 12 10BaseT          */
 +      PCNET32_PORT_MII | PCNET32_PORT_100,    /* 13 MII 100BaseTx    */
 +                                              /* 14 MII 100BaseTx-FD */
 +      PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD,
 +      PCNET32_PORT_ASEL                       /* 15 not supported    */
 +};
 +
 +static const char pcnet32_gstrings_test[][ETH_GSTRING_LEN] = {
 +      "Loopback test  (offline)"
 +};
 +
 +#define PCNET32_TEST_LEN      ARRAY_SIZE(pcnet32_gstrings_test)
 +
 +#define PCNET32_NUM_REGS 136
 +
 +#define MAX_UNITS 8           /* More are supported, limit only on options */
 +static int options[MAX_UNITS];
 +static int full_duplex[MAX_UNITS];
 +static int homepna[MAX_UNITS];
 +
 +/*
 + *                            Theory of Operation
 + *
 + * This driver uses the same software structure as the normal lance
 + * driver. So look for a verbose description in lance.c. The differences
 + * to the normal lance driver is the use of the 32bit mode of PCnet32
 + * and PCnetPCI chips. Because these chips are 32bit chips, there is no
 + * 16MB limitation and we don't need bounce buffers.
 + */
 +
 +/*
 + * Set the number of Tx and Rx buffers, using Log_2(# buffers).
 + * Reasonable default values are 4 Tx buffers, and 16 Rx buffers.
 + * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4).
 + */
 +#ifndef PCNET32_LOG_TX_BUFFERS
 +#define PCNET32_LOG_TX_BUFFERS                4
 +#define PCNET32_LOG_RX_BUFFERS                5
 +#define PCNET32_LOG_MAX_TX_BUFFERS    9       /* 2^9 == 512 */
 +#define PCNET32_LOG_MAX_RX_BUFFERS    9
 +#endif
 +
 +#define TX_RING_SIZE          (1 << (PCNET32_LOG_TX_BUFFERS))
 +#define TX_MAX_RING_SIZE      (1 << (PCNET32_LOG_MAX_TX_BUFFERS))
 +
 +#define RX_RING_SIZE          (1 << (PCNET32_LOG_RX_BUFFERS))
 +#define RX_MAX_RING_SIZE      (1 << (PCNET32_LOG_MAX_RX_BUFFERS))
 +
 +#define PKT_BUF_SKB           1544
 +/* actual buffer length after being aligned */
 +#define PKT_BUF_SIZE          (PKT_BUF_SKB - NET_IP_ALIGN)
 +/* chip wants twos complement of the (aligned) buffer length */
 +#define NEG_BUF_SIZE          (NET_IP_ALIGN - PKT_BUF_SKB)
 +
 +/* Offsets from base I/O address. */
 +#define PCNET32_WIO_RDP               0x10
 +#define PCNET32_WIO_RAP               0x12
 +#define PCNET32_WIO_RESET     0x14
 +#define PCNET32_WIO_BDP               0x16
 +
 +#define PCNET32_DWIO_RDP      0x10
 +#define PCNET32_DWIO_RAP      0x14
 +#define PCNET32_DWIO_RESET    0x18
 +#define PCNET32_DWIO_BDP      0x1C
 +
 +#define PCNET32_TOTAL_SIZE    0x20
 +
 +#define CSR0          0
 +#define CSR0_INIT     0x1
 +#define CSR0_START    0x2
 +#define CSR0_STOP     0x4
 +#define CSR0_TXPOLL   0x8
 +#define CSR0_INTEN    0x40
 +#define CSR0_IDON     0x0100
 +#define CSR0_NORMAL   (CSR0_START | CSR0_INTEN)
 +#define PCNET32_INIT_LOW      1
 +#define PCNET32_INIT_HIGH     2
 +#define CSR3          3
 +#define CSR4          4
 +#define CSR5          5
 +#define CSR5_SUSPEND  0x0001
 +#define CSR15         15
 +#define PCNET32_MC_FILTER     8
 +
 +#define PCNET32_79C970A       0x2621
 +
 +/* The PCNET32 Rx and Tx ring descriptors. */
 +struct pcnet32_rx_head {
 +      __le32  base;
 +      __le16  buf_length;     /* two`s complement of length */
 +      __le16  status;
 +      __le32  msg_length;
 +      __le32  reserved;
 +};
 +
 +struct pcnet32_tx_head {
 +      __le32  base;
 +      __le16  length;         /* two`s complement of length */
 +      __le16  status;
 +      __le32  misc;
 +      __le32  reserved;
 +};
 +
 +/* The PCNET32 32-Bit initialization block, described in databook. */
 +struct pcnet32_init_block {
 +      __le16  mode;
 +      __le16  tlen_rlen;
 +      u8      phys_addr[6];
 +      __le16  reserved;
 +      __le32  filter[2];
 +      /* Receive and transmit ring base, along with extra bits. */
 +      __le32  rx_ring;
 +      __le32  tx_ring;
 +};
 +
 +/* PCnet32 access functions */
 +struct pcnet32_access {
 +      u16     (*read_csr) (unsigned long, int);
 +      void    (*write_csr) (unsigned long, int, u16);
 +      u16     (*read_bcr) (unsigned long, int);
 +      void    (*write_bcr) (unsigned long, int, u16);
 +      u16     (*read_rap) (unsigned long);
 +      void    (*write_rap) (unsigned long, u16);
 +      void    (*reset) (unsigned long);
 +};
 +
 +/*
 + * The first field of pcnet32_private is read by the ethernet device
 + * so the structure should be allocated using pci_alloc_consistent().
 + */
 +struct pcnet32_private {
 +      struct pcnet32_init_block *init_block;
 +      /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */
 +      struct pcnet32_rx_head  *rx_ring;
 +      struct pcnet32_tx_head  *tx_ring;
 +      dma_addr_t              init_dma_addr;/* DMA address of beginning of the init block,
 +                                 returned by pci_alloc_consistent */
 +      struct pci_dev          *pci_dev;
 +      const char              *name;
 +      /* The saved address of a sent-in-place packet/buffer, for skfree(). */
 +      struct sk_buff          **tx_skbuff;
 +      struct sk_buff          **rx_skbuff;
 +      dma_addr_t              *tx_dma_addr;
 +      dma_addr_t              *rx_dma_addr;
 +      struct pcnet32_access   a;
 +      spinlock_t              lock;           /* Guard lock */
 +      unsigned int            cur_rx, cur_tx; /* The next free ring entry */
 +      unsigned int            rx_ring_size;   /* current rx ring size */
 +      unsigned int            tx_ring_size;   /* current tx ring size */
 +      unsigned int            rx_mod_mask;    /* rx ring modular mask */
 +      unsigned int            tx_mod_mask;    /* tx ring modular mask */
 +      unsigned short          rx_len_bits;
 +      unsigned short          tx_len_bits;
 +      dma_addr_t              rx_ring_dma_addr;
 +      dma_addr_t              tx_ring_dma_addr;
 +      unsigned int            dirty_rx,       /* ring entries to be freed. */
 +                              dirty_tx;
 +
 +      struct net_device       *dev;
 +      struct napi_struct      napi;
 +      char                    tx_full;
 +      char                    phycount;       /* number of phys found */
 +      int                     options;
 +      unsigned int            shared_irq:1,   /* shared irq possible */
 +                              dxsuflo:1,   /* disable transmit stop on uflo */
 +                              mii:1;          /* mii port available */
 +      struct net_device       *next;
 +      struct mii_if_info      mii_if;
 +      struct timer_list       watchdog_timer;
 +      u32                     msg_enable;     /* debug message level */
 +
 +      /* each bit indicates an available PHY */
 +      u32                     phymask;
 +      unsigned short          chip_version;   /* which variant this is */
 +
 +      /* saved registers during ethtool blink */
 +      u16                     save_regs[4];
 +};
 +
 +static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *);
 +static int pcnet32_probe1(unsigned long, int, struct pci_dev *);
 +static int pcnet32_open(struct net_device *);
 +static int pcnet32_init_ring(struct net_device *);
 +static netdev_tx_t pcnet32_start_xmit(struct sk_buff *,
 +                                    struct net_device *);
 +static void pcnet32_tx_timeout(struct net_device *dev);
 +static irqreturn_t pcnet32_interrupt(int, void *);
 +static int pcnet32_close(struct net_device *);
 +static struct net_device_stats *pcnet32_get_stats(struct net_device *);
 +static void pcnet32_load_multicast(struct net_device *dev);
 +static void pcnet32_set_multicast_list(struct net_device *);
 +static int pcnet32_ioctl(struct net_device *, struct ifreq *, int);
 +static void pcnet32_watchdog(struct net_device *);
 +static int mdio_read(struct net_device *dev, int phy_id, int reg_num);
 +static void mdio_write(struct net_device *dev, int phy_id, int reg_num,
 +                     int val);
 +static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits);
 +static void pcnet32_ethtool_test(struct net_device *dev,
 +                               struct ethtool_test *eth_test, u64 * data);
 +static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1);
 +static int pcnet32_get_regs_len(struct net_device *dev);
 +static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
 +                           void *ptr);
 +static void pcnet32_purge_tx_ring(struct net_device *dev);
 +static int pcnet32_alloc_ring(struct net_device *dev, const char *name);
 +static void pcnet32_free_ring(struct net_device *dev);
 +static void pcnet32_check_media(struct net_device *dev, int verbose);
 +
 +static u16 pcnet32_wio_read_csr(unsigned long addr, int index)
 +{
 +      outw(index, addr + PCNET32_WIO_RAP);
 +      return inw(addr + PCNET32_WIO_RDP);
 +}
 +
 +static void pcnet32_wio_write_csr(unsigned long addr, int index, u16 val)
 +{
 +      outw(index, addr + PCNET32_WIO_RAP);
 +      outw(val, addr + PCNET32_WIO_RDP);
 +}
 +
 +static u16 pcnet32_wio_read_bcr(unsigned long addr, int index)
 +{
 +      outw(index, addr + PCNET32_WIO_RAP);
 +      return inw(addr + PCNET32_WIO_BDP);
 +}
 +
 +static void pcnet32_wio_write_bcr(unsigned long addr, int index, u16 val)
 +{
 +      outw(index, addr + PCNET32_WIO_RAP);
 +      outw(val, addr + PCNET32_WIO_BDP);
 +}
 +
 +static u16 pcnet32_wio_read_rap(unsigned long addr)
 +{
 +      return inw(addr + PCNET32_WIO_RAP);
 +}
 +
 +static void pcnet32_wio_write_rap(unsigned long addr, u16 val)
 +{
 +      outw(val, addr + PCNET32_WIO_RAP);
 +}
 +
 +static void pcnet32_wio_reset(unsigned long addr)
 +{
 +      inw(addr + PCNET32_WIO_RESET);
 +}
 +
 +static int pcnet32_wio_check(unsigned long addr)
 +{
 +      outw(88, addr + PCNET32_WIO_RAP);
 +      return inw(addr + PCNET32_WIO_RAP) == 88;
 +}
 +
 +static struct pcnet32_access pcnet32_wio = {
 +      .read_csr = pcnet32_wio_read_csr,
 +      .write_csr = pcnet32_wio_write_csr,
 +      .read_bcr = pcnet32_wio_read_bcr,
 +      .write_bcr = pcnet32_wio_write_bcr,
 +      .read_rap = pcnet32_wio_read_rap,
 +      .write_rap = pcnet32_wio_write_rap,
 +      .reset = pcnet32_wio_reset
 +};
 +
 +static u16 pcnet32_dwio_read_csr(unsigned long addr, int index)
 +{
 +      outl(index, addr + PCNET32_DWIO_RAP);
 +      return inl(addr + PCNET32_DWIO_RDP) & 0xffff;
 +}
 +
 +static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val)
 +{
 +      outl(index, addr + PCNET32_DWIO_RAP);
 +      outl(val, addr + PCNET32_DWIO_RDP);
 +}
 +
 +static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index)
 +{
 +      outl(index, addr + PCNET32_DWIO_RAP);
 +      return inl(addr + PCNET32_DWIO_BDP) & 0xffff;
 +}
 +
 +static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val)
 +{
 +      outl(index, addr + PCNET32_DWIO_RAP);
 +      outl(val, addr + PCNET32_DWIO_BDP);
 +}
 +
 +static u16 pcnet32_dwio_read_rap(unsigned long addr)
 +{
 +      return inl(addr + PCNET32_DWIO_RAP) & 0xffff;
 +}
 +
 +static void pcnet32_dwio_write_rap(unsigned long addr, u16 val)
 +{
 +      outl(val, addr + PCNET32_DWIO_RAP);
 +}
 +
 +static void pcnet32_dwio_reset(unsigned long addr)
 +{
 +      inl(addr + PCNET32_DWIO_RESET);
 +}
 +
 +static int pcnet32_dwio_check(unsigned long addr)
 +{
 +      outl(88, addr + PCNET32_DWIO_RAP);
 +      return (inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88;
 +}
 +
 +static struct pcnet32_access pcnet32_dwio = {
 +      .read_csr = pcnet32_dwio_read_csr,
 +      .write_csr = pcnet32_dwio_write_csr,
 +      .read_bcr = pcnet32_dwio_read_bcr,
 +      .write_bcr = pcnet32_dwio_write_bcr,
 +      .read_rap = pcnet32_dwio_read_rap,
 +      .write_rap = pcnet32_dwio_write_rap,
 +      .reset = pcnet32_dwio_reset
 +};
 +
 +static void pcnet32_netif_stop(struct net_device *dev)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +
 +      dev->trans_start = jiffies; /* prevent tx timeout */
 +      napi_disable(&lp->napi);
 +      netif_tx_disable(dev);
 +}
 +
 +static void pcnet32_netif_start(struct net_device *dev)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +      ulong ioaddr = dev->base_addr;
 +      u16 val;
 +
 +      netif_wake_queue(dev);
 +      val = lp->a.read_csr(ioaddr, CSR3);
 +      val &= 0x00ff;
 +      lp->a.write_csr(ioaddr, CSR3, val);
 +      napi_enable(&lp->napi);
 +}
 +
 +/*
 + * Allocate space for the new sized tx ring.
 + * Free old resources
 + * Save new resources.
 + * Any failure keeps old resources.
 + * Must be called with lp->lock held.
 + */
 +static void pcnet32_realloc_tx_ring(struct net_device *dev,
 +                                  struct pcnet32_private *lp,
 +                                  unsigned int size)
 +{
 +      dma_addr_t new_ring_dma_addr;
 +      dma_addr_t *new_dma_addr_list;
 +      struct pcnet32_tx_head *new_tx_ring;
 +      struct sk_buff **new_skb_list;
 +
 +      pcnet32_purge_tx_ring(dev);
 +
 +      new_tx_ring = pci_alloc_consistent(lp->pci_dev,
 +                                         sizeof(struct pcnet32_tx_head) *
 +                                         (1 << size),
 +                                         &new_ring_dma_addr);
 +      if (new_tx_ring == NULL) {
 +              netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
 +              return;
 +      }
 +      memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size));
 +
 +      new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
 +                              GFP_ATOMIC);
 +      if (!new_dma_addr_list) {
 +              netif_err(lp, drv, dev, "Memory allocation failed\n");
 +              goto free_new_tx_ring;
 +      }
 +
 +      new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
 +                              GFP_ATOMIC);
 +      if (!new_skb_list) {
 +              netif_err(lp, drv, dev, "Memory allocation failed\n");
 +              goto free_new_lists;
 +      }
 +
 +      kfree(lp->tx_skbuff);
 +      kfree(lp->tx_dma_addr);
 +      pci_free_consistent(lp->pci_dev,
 +                          sizeof(struct pcnet32_tx_head) *
 +                          lp->tx_ring_size, lp->tx_ring,
 +                          lp->tx_ring_dma_addr);
 +
 +      lp->tx_ring_size = (1 << size);
 +      lp->tx_mod_mask = lp->tx_ring_size - 1;
 +      lp->tx_len_bits = (size << 12);
 +      lp->tx_ring = new_tx_ring;
 +      lp->tx_ring_dma_addr = new_ring_dma_addr;
 +      lp->tx_dma_addr = new_dma_addr_list;
 +      lp->tx_skbuff = new_skb_list;
 +      return;
 +
 +free_new_lists:
 +      kfree(new_dma_addr_list);
 +free_new_tx_ring:
 +      pci_free_consistent(lp->pci_dev,
 +                          sizeof(struct pcnet32_tx_head) *
 +                          (1 << size),
 +                          new_tx_ring,
 +                          new_ring_dma_addr);
 +}
 +
 +/*
 + * Allocate space for the new sized rx ring.
 + * Re-use old receive buffers.
 + *   alloc extra buffers
 + *   free unneeded buffers
 + *   free unneeded buffers
 + * Save new resources.
 + * Any failure keeps old resources.
 + * Must be called with lp->lock held.
 + */
 +static void pcnet32_realloc_rx_ring(struct net_device *dev,
 +                                  struct pcnet32_private *lp,
 +                                  unsigned int size)
 +{
 +      dma_addr_t new_ring_dma_addr;
 +      dma_addr_t *new_dma_addr_list;
 +      struct pcnet32_rx_head *new_rx_ring;
 +      struct sk_buff **new_skb_list;
 +      int new, overlap;
 +
 +      new_rx_ring = pci_alloc_consistent(lp->pci_dev,
 +                                         sizeof(struct pcnet32_rx_head) *
 +                                         (1 << size),
 +                                         &new_ring_dma_addr);
 +      if (new_rx_ring == NULL) {
 +              netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
 +              return;
 +      }
 +      memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size));
 +
 +      new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
 +                              GFP_ATOMIC);
 +      if (!new_dma_addr_list) {
 +              netif_err(lp, drv, dev, "Memory allocation failed\n");
 +              goto free_new_rx_ring;
 +      }
 +
 +      new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
 +                              GFP_ATOMIC);
 +      if (!new_skb_list) {
 +              netif_err(lp, drv, dev, "Memory allocation failed\n");
 +              goto free_new_lists;
 +      }
 +
 +      /* first copy the current receive buffers */
 +      overlap = min(size, lp->rx_ring_size);
 +      for (new = 0; new < overlap; new++) {
 +              new_rx_ring[new] = lp->rx_ring[new];
 +              new_dma_addr_list[new] = lp->rx_dma_addr[new];
 +              new_skb_list[new] = lp->rx_skbuff[new];
 +      }
 +      /* now allocate any new buffers needed */
 +      for (; new < size; new++) {
 +              struct sk_buff *rx_skbuff;
 +              new_skb_list[new] = dev_alloc_skb(PKT_BUF_SKB);
 +              rx_skbuff = new_skb_list[new];
 +              if (!rx_skbuff) {
 +                      /* keep the original lists and buffers */
 +                      netif_err(lp, drv, dev, "%s dev_alloc_skb failed\n",
 +                                __func__);
 +                      goto free_all_new;
 +              }
 +              skb_reserve(rx_skbuff, NET_IP_ALIGN);
 +
 +              new_dma_addr_list[new] =
 +                          pci_map_single(lp->pci_dev, rx_skbuff->data,
 +                                         PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
 +              new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]);
 +              new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE);
 +              new_rx_ring[new].status = cpu_to_le16(0x8000);
 +      }
 +      /* and free any unneeded buffers */
 +      for (; new < lp->rx_ring_size; new++) {
 +              if (lp->rx_skbuff[new]) {
 +                      pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new],
 +                                       PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
 +                      dev_kfree_skb(lp->rx_skbuff[new]);
 +              }
 +      }
 +
 +      kfree(lp->rx_skbuff);
 +      kfree(lp->rx_dma_addr);
 +      pci_free_consistent(lp->pci_dev,
 +                          sizeof(struct pcnet32_rx_head) *
 +                          lp->rx_ring_size, lp->rx_ring,
 +                          lp->rx_ring_dma_addr);
 +
 +      lp->rx_ring_size = (1 << size);
 +      lp->rx_mod_mask = lp->rx_ring_size - 1;
 +      lp->rx_len_bits = (size << 4);
 +      lp->rx_ring = new_rx_ring;
 +      lp->rx_ring_dma_addr = new_ring_dma_addr;
 +      lp->rx_dma_addr = new_dma_addr_list;
 +      lp->rx_skbuff = new_skb_list;
 +      return;
 +
 +free_all_new:
 +      while (--new >= lp->rx_ring_size) {
 +              if (new_skb_list[new]) {
 +                      pci_unmap_single(lp->pci_dev, new_dma_addr_list[new],
 +                                       PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
 +                      dev_kfree_skb(new_skb_list[new]);
 +              }
 +      }
 +      kfree(new_skb_list);
 +free_new_lists:
 +      kfree(new_dma_addr_list);
 +free_new_rx_ring:
 +      pci_free_consistent(lp->pci_dev,
 +                          sizeof(struct pcnet32_rx_head) *
 +                          (1 << size),
 +                          new_rx_ring,
 +                          new_ring_dma_addr);
 +}
 +
 +static void pcnet32_purge_rx_ring(struct net_device *dev)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +      int i;
 +
 +      /* free all allocated skbuffs */
 +      for (i = 0; i < lp->rx_ring_size; i++) {
 +              lp->rx_ring[i].status = 0;      /* CPU owns buffer */
 +              wmb();          /* Make sure adapter sees owner change */
 +              if (lp->rx_skbuff[i]) {
 +                      pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
 +                                       PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
 +                      dev_kfree_skb_any(lp->rx_skbuff[i]);
 +              }
 +              lp->rx_skbuff[i] = NULL;
 +              lp->rx_dma_addr[i] = 0;
 +      }
 +}
 +
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +static void pcnet32_poll_controller(struct net_device *dev)
 +{
 +      disable_irq(dev->irq);
 +      pcnet32_interrupt(0, dev);
 +      enable_irq(dev->irq);
 +}
 +#endif
 +
 +static int pcnet32_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +      unsigned long flags;
 +      int r = -EOPNOTSUPP;
 +
 +      if (lp->mii) {
 +              spin_lock_irqsave(&lp->lock, flags);
 +              mii_ethtool_gset(&lp->mii_if, cmd);
 +              spin_unlock_irqrestore(&lp->lock, flags);
 +              r = 0;
 +      }
 +      return r;
 +}
 +
 +static int pcnet32_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +      unsigned long flags;
 +      int r = -EOPNOTSUPP;
 +
 +      if (lp->mii) {
 +              spin_lock_irqsave(&lp->lock, flags);
 +              r = mii_ethtool_sset(&lp->mii_if, cmd);
 +              spin_unlock_irqrestore(&lp->lock, flags);
 +      }
 +      return r;
 +}
 +
 +static void pcnet32_get_drvinfo(struct net_device *dev,
 +                              struct ethtool_drvinfo *info)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +
 +      strcpy(info->driver, DRV_NAME);
 +      strcpy(info->version, DRV_VERSION);
 +      if (lp->pci_dev)
 +              strcpy(info->bus_info, pci_name(lp->pci_dev));
 +      else
 +              sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr);
 +}
 +
 +static u32 pcnet32_get_link(struct net_device *dev)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +      unsigned long flags;
 +      int r;
 +
 +      spin_lock_irqsave(&lp->lock, flags);
 +      if (lp->mii) {
 +              r = mii_link_ok(&lp->mii_if);
 +      } else if (lp->chip_version >= PCNET32_79C970A) {
 +              ulong ioaddr = dev->base_addr;  /* card base I/O address */
 +              r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
 +      } else {        /* can not detect link on really old chips */
 +              r = 1;
 +      }
 +      spin_unlock_irqrestore(&lp->lock, flags);
 +
 +      return r;
 +}
 +
 +static u32 pcnet32_get_msglevel(struct net_device *dev)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +      return lp->msg_enable;
 +}
 +
 +static void pcnet32_set_msglevel(struct net_device *dev, u32 value)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +      lp->msg_enable = value;
 +}
 +
 +static int pcnet32_nway_reset(struct net_device *dev)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +      unsigned long flags;
 +      int r = -EOPNOTSUPP;
 +
 +      if (lp->mii) {
 +              spin_lock_irqsave(&lp->lock, flags);
 +              r = mii_nway_restart(&lp->mii_if);
 +              spin_unlock_irqrestore(&lp->lock, flags);
 +      }
 +      return r;
 +}
 +
 +static void pcnet32_get_ringparam(struct net_device *dev,
 +                                struct ethtool_ringparam *ering)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +
 +      ering->tx_max_pending = TX_MAX_RING_SIZE;
 +      ering->tx_pending = lp->tx_ring_size;
 +      ering->rx_max_pending = RX_MAX_RING_SIZE;
 +      ering->rx_pending = lp->rx_ring_size;
 +}
 +
 +static int pcnet32_set_ringparam(struct net_device *dev,
 +                               struct ethtool_ringparam *ering)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +      unsigned long flags;
 +      unsigned int size;
 +      ulong ioaddr = dev->base_addr;
 +      int i;
 +
 +      if (ering->rx_mini_pending || ering->rx_jumbo_pending)
 +              return -EINVAL;
 +
 +      if (netif_running(dev))
 +              pcnet32_netif_stop(dev);
 +
 +      spin_lock_irqsave(&lp->lock, flags);
 +      lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);       /* stop the chip */
 +
 +      size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
 +
 +      /* set the minimum ring size to 4, to allow the loopback test to work
 +       * unchanged.
 +       */
 +      for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) {
 +              if (size <= (1 << i))
 +                      break;
 +      }
 +      if ((1 << i) != lp->tx_ring_size)
 +              pcnet32_realloc_tx_ring(dev, lp, i);
 +
 +      size = min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE);
 +      for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) {
 +              if (size <= (1 << i))
 +                      break;
 +      }
 +      if ((1 << i) != lp->rx_ring_size)
 +              pcnet32_realloc_rx_ring(dev, lp, i);
 +
 +      lp->napi.weight = lp->rx_ring_size / 2;
 +
 +      if (netif_running(dev)) {
 +              pcnet32_netif_start(dev);
 +              pcnet32_restart(dev, CSR0_NORMAL);
 +      }
 +
 +      spin_unlock_irqrestore(&lp->lock, flags);
 +
 +      netif_info(lp, drv, dev, "Ring Param Settings: RX: %d, TX: %d\n",
 +                 lp->rx_ring_size, lp->tx_ring_size);
 +
 +      return 0;
 +}
 +
 +static void pcnet32_get_strings(struct net_device *dev, u32 stringset,
 +                              u8 *data)
 +{
 +      memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test));
 +}
 +
 +static int pcnet32_get_sset_count(struct net_device *dev, int sset)
 +{
 +      switch (sset) {
 +      case ETH_SS_TEST:
 +              return PCNET32_TEST_LEN;
 +      default:
 +              return -EOPNOTSUPP;
 +      }
 +}
 +
 +static void pcnet32_ethtool_test(struct net_device *dev,
 +                               struct ethtool_test *test, u64 * data)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +      int rc;
 +
 +      if (test->flags == ETH_TEST_FL_OFFLINE) {
 +              rc = pcnet32_loopback_test(dev, data);
 +              if (rc) {
 +                      netif_printk(lp, hw, KERN_DEBUG, dev,
 +                                   "Loopback test failed\n");
 +                      test->flags |= ETH_TEST_FL_FAILED;
 +              } else
 +                      netif_printk(lp, hw, KERN_DEBUG, dev,
 +                                   "Loopback test passed\n");
 +      } else
 +              netif_printk(lp, hw, KERN_DEBUG, dev,
 +                           "No tests to run (specify 'Offline' on ethtool)\n");
 +}                             /* end pcnet32_ethtool_test */
 +
 +static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +      struct pcnet32_access *a = &lp->a;      /* access to registers */
 +      ulong ioaddr = dev->base_addr;  /* card base I/O address */
 +      struct sk_buff *skb;    /* sk buff */
 +      int x, i;               /* counters */
 +      int numbuffs = 4;       /* number of TX/RX buffers and descs */
 +      u16 status = 0x8300;    /* TX ring status */
 +      __le16 teststatus;      /* test of ring status */
 +      int rc;                 /* return code */
 +      int size;               /* size of packets */
 +      unsigned char *packet;  /* source packet data */
 +      static const int data_len = 60; /* length of source packets */
 +      unsigned long flags;
 +      unsigned long ticks;
 +
 +      rc = 1;                 /* default to fail */
 +
 +      if (netif_running(dev))
 +              pcnet32_netif_stop(dev);
 +
 +      spin_lock_irqsave(&lp->lock, flags);
 +      lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);       /* stop the chip */
 +
 +      numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
 +
 +      /* Reset the PCNET32 */
 +      lp->a.reset(ioaddr);
 +      lp->a.write_csr(ioaddr, CSR4, 0x0915);  /* auto tx pad */
 +
 +      /* switch pcnet32 to 32bit mode */
 +      lp->a.write_bcr(ioaddr, 20, 2);
 +
 +      /* purge & init rings but don't actually restart */
 +      pcnet32_restart(dev, 0x0000);
 +
 +      lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);       /* Set STOP bit */
 +
 +      /* Initialize Transmit buffers. */
 +      size = data_len + 15;
 +      for (x = 0; x < numbuffs; x++) {
 +              skb = dev_alloc_skb(size);
 +              if (!skb) {
 +                      netif_printk(lp, hw, KERN_DEBUG, dev,
 +                                   "Cannot allocate skb at line: %d!\n",
 +                                   __LINE__);
 +                      goto clean_up;
 +              }
 +              packet = skb->data;
 +              skb_put(skb, size);     /* create space for data */
 +              lp->tx_skbuff[x] = skb;
 +              lp->tx_ring[x].length = cpu_to_le16(-skb->len);
 +              lp->tx_ring[x].misc = 0;
 +
 +              /* put DA and SA into the skb */
 +              for (i = 0; i < 6; i++)
 +                      *packet++ = dev->dev_addr[i];
 +              for (i = 0; i < 6; i++)
 +                      *packet++ = dev->dev_addr[i];
 +              /* type */
 +              *packet++ = 0x08;
 +              *packet++ = 0x06;
 +              /* packet number */
 +              *packet++ = x;
 +              /* fill packet with data */
 +              for (i = 0; i < data_len; i++)
 +                      *packet++ = i;
 +
 +              lp->tx_dma_addr[x] =
 +                      pci_map_single(lp->pci_dev, skb->data, skb->len,
 +                                     PCI_DMA_TODEVICE);
 +              lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]);
 +              wmb();  /* Make sure owner changes after all others are visible */
 +              lp->tx_ring[x].status = cpu_to_le16(status);
 +      }
 +
 +      x = a->read_bcr(ioaddr, 32);    /* set internal loopback in BCR32 */
 +      a->write_bcr(ioaddr, 32, x | 0x0002);
 +
 +      /* set int loopback in CSR15 */
 +      x = a->read_csr(ioaddr, CSR15) & 0xfffc;
 +      lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
 +
 +      teststatus = cpu_to_le16(0x8000);
 +      lp->a.write_csr(ioaddr, CSR0, CSR0_START);      /* Set STRT bit */
 +
 +      /* Check status of descriptors */
 +      for (x = 0; x < numbuffs; x++) {
 +              ticks = 0;
 +              rmb();
 +              while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) {
 +                      spin_unlock_irqrestore(&lp->lock, flags);
 +                      msleep(1);
 +                      spin_lock_irqsave(&lp->lock, flags);
 +                      rmb();
 +                      ticks++;
 +              }
 +              if (ticks == 200) {
 +                      netif_err(lp, hw, dev, "Desc %d failed to reset!\n", x);
 +                      break;
 +              }
 +      }
 +
 +      lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);       /* Set STOP bit */
 +      wmb();
 +      if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
 +              netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
 +
 +              for (x = 0; x < numbuffs; x++) {
 +                      netdev_printk(KERN_DEBUG, dev, "Packet %d: ", x);
 +                      skb = lp->rx_skbuff[x];
 +                      for (i = 0; i < size; i++)
 +                              pr_cont(" %02x", *(skb->data + i));
 +                      pr_cont("\n");
 +              }
 +      }
 +
 +      x = 0;
 +      rc = 0;
 +      while (x < numbuffs && !rc) {
 +              skb = lp->rx_skbuff[x];
 +              packet = lp->tx_skbuff[x]->data;
 +              for (i = 0; i < size; i++) {
 +                      if (*(skb->data + i) != packet[i]) {
 +                              netif_printk(lp, hw, KERN_DEBUG, dev,
 +                                           "Error in compare! %2x - %02x %02x\n",
 +                                           i, *(skb->data + i), packet[i]);
 +                              rc = 1;
 +                              break;
 +                      }
 +              }
 +              x++;
 +      }
 +
 +clean_up:
 +      *data1 = rc;
 +      pcnet32_purge_tx_ring(dev);
 +
 +      x = a->read_csr(ioaddr, CSR15);
 +      a->write_csr(ioaddr, CSR15, (x & ~0x0044));     /* reset bits 6 and 2 */
 +
 +      x = a->read_bcr(ioaddr, 32);    /* reset internal loopback */
 +      a->write_bcr(ioaddr, 32, (x & ~0x0002));
 +
 +      if (netif_running(dev)) {
 +              pcnet32_netif_start(dev);
 +              pcnet32_restart(dev, CSR0_NORMAL);
 +      } else {
 +              pcnet32_purge_rx_ring(dev);
 +              lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
 +      }
 +      spin_unlock_irqrestore(&lp->lock, flags);
 +
 +      return rc;
 +}                             /* end pcnet32_loopback_test  */
 +
 +static int pcnet32_set_phys_id(struct net_device *dev,
 +                             enum ethtool_phys_id_state state)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +      struct pcnet32_access *a = &lp->a;
 +      ulong ioaddr = dev->base_addr;
 +      unsigned long flags;
 +      int i;
 +
 +      switch (state) {
 +      case ETHTOOL_ID_ACTIVE:
 +              /* Save the current value of the bcrs */
 +              spin_lock_irqsave(&lp->lock, flags);
 +              for (i = 4; i < 8; i++)
 +                      lp->save_regs[i - 4] = a->read_bcr(ioaddr, i);
 +              spin_unlock_irqrestore(&lp->lock, flags);
 +              return 2;       /* cycle on/off twice per second */
 +
 +      case ETHTOOL_ID_ON:
 +      case ETHTOOL_ID_OFF:
 +              /* Blink the led */
 +              spin_lock_irqsave(&lp->lock, flags);
 +              for (i = 4; i < 8; i++)
 +                      a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000);
 +              spin_unlock_irqrestore(&lp->lock, flags);
 +              break;
 +
 +      case ETHTOOL_ID_INACTIVE:
 +              /* Restore the original value of the bcrs */
 +              spin_lock_irqsave(&lp->lock, flags);
 +              for (i = 4; i < 8; i++)
 +                      a->write_bcr(ioaddr, i, lp->save_regs[i - 4]);
 +              spin_unlock_irqrestore(&lp->lock, flags);
 +      }
 +      return 0;
 +}
 +
 +/*
 + * lp->lock must be held.
 + */
 +static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
 +              int can_sleep)
 +{
 +      int csr5;
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +      struct pcnet32_access *a = &lp->a;
 +      ulong ioaddr = dev->base_addr;
 +      int ticks;
 +
 +      /* really old chips have to be stopped. */
 +      if (lp->chip_version < PCNET32_79C970A)
 +              return 0;
 +
 +      /* set SUSPEND (SPND) - CSR5 bit 0 */
 +      csr5 = a->read_csr(ioaddr, CSR5);
 +      a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND);
 +
 +      /* poll waiting for bit to be set */
 +      ticks = 0;
 +      while (!(a->read_csr(ioaddr, CSR5) & CSR5_SUSPEND)) {
 +              spin_unlock_irqrestore(&lp->lock, *flags);
 +              if (can_sleep)
 +                      msleep(1);
 +              else
 +                      mdelay(1);
 +              spin_lock_irqsave(&lp->lock, *flags);
 +              ticks++;
 +              if (ticks > 200) {
 +                      netif_printk(lp, hw, KERN_DEBUG, dev,
 +                                   "Error getting into suspend!\n");
 +                      return 0;
 +              }
 +      }
 +      return 1;
 +}
 +
 +/*
 + * process one receive descriptor entry
 + */
 +
 +static void pcnet32_rx_entry(struct net_device *dev,
 +                           struct pcnet32_private *lp,
 +                           struct pcnet32_rx_head *rxp,
 +                           int entry)
 +{
 +      int status = (short)le16_to_cpu(rxp->status) >> 8;
 +      int rx_in_place = 0;
 +      struct sk_buff *skb;
 +      short pkt_len;
 +
 +      if (status != 0x03) {   /* There was an error. */
 +              /*
 +               * There is a tricky error noted by John Murphy,
 +               * <murf@perftech.com> to Russ Nelson: Even with full-sized
 +               * buffers it's possible for a jabber packet to use two
 +               * buffers, with only the last correctly noting the error.
 +               */
 +              if (status & 0x01)      /* Only count a general error at the */
 +                      dev->stats.rx_errors++; /* end of a packet. */
 +              if (status & 0x20)
 +                      dev->stats.rx_frame_errors++;
 +              if (status & 0x10)
 +                      dev->stats.rx_over_errors++;
 +              if (status & 0x08)
 +                      dev->stats.rx_crc_errors++;
 +              if (status & 0x04)
 +                      dev->stats.rx_fifo_errors++;
 +              return;
 +      }
 +
 +      pkt_len = (le32_to_cpu(rxp->msg_length) & 0xfff) - 4;
 +
 +      /* Discard oversize frames. */
 +      if (unlikely(pkt_len > PKT_BUF_SIZE)) {
 +              netif_err(lp, drv, dev, "Impossible packet size %d!\n",
 +                        pkt_len);
 +              dev->stats.rx_errors++;
 +              return;
 +      }
 +      if (pkt_len < 60) {
 +              netif_err(lp, rx_err, dev, "Runt packet!\n");
 +              dev->stats.rx_errors++;
 +              return;
 +      }
 +
 +      if (pkt_len > rx_copybreak) {
 +              struct sk_buff *newskb;
 +
 +              newskb = dev_alloc_skb(PKT_BUF_SKB);
 +              if (newskb) {
 +                      skb_reserve(newskb, NET_IP_ALIGN);
 +                      skb = lp->rx_skbuff[entry];
 +                      pci_unmap_single(lp->pci_dev,
 +                                       lp->rx_dma_addr[entry],
 +                                       PKT_BUF_SIZE,
 +                                       PCI_DMA_FROMDEVICE);
 +                      skb_put(skb, pkt_len);
 +                      lp->rx_skbuff[entry] = newskb;
 +                      lp->rx_dma_addr[entry] =
 +                                          pci_map_single(lp->pci_dev,
 +                                                         newskb->data,
 +                                                         PKT_BUF_SIZE,
 +                                                         PCI_DMA_FROMDEVICE);
 +                      rxp->base = cpu_to_le32(lp->rx_dma_addr[entry]);
 +                      rx_in_place = 1;
 +              } else
 +                      skb = NULL;
 +      } else
 +              skb = dev_alloc_skb(pkt_len + NET_IP_ALIGN);
 +
 +      if (skb == NULL) {
 +              netif_err(lp, drv, dev, "Memory squeeze, dropping packet\n");
 +              dev->stats.rx_dropped++;
 +              return;
 +      }
 +      if (!rx_in_place) {
 +              skb_reserve(skb, NET_IP_ALIGN);
 +              skb_put(skb, pkt_len);  /* Make room */
 +              pci_dma_sync_single_for_cpu(lp->pci_dev,
 +                                          lp->rx_dma_addr[entry],
 +                                          pkt_len,
 +                                          PCI_DMA_FROMDEVICE);
 +              skb_copy_to_linear_data(skb,
 +                               (unsigned char *)(lp->rx_skbuff[entry]->data),
 +                               pkt_len);
 +              pci_dma_sync_single_for_device(lp->pci_dev,
 +                                             lp->rx_dma_addr[entry],
 +                                             pkt_len,
 +                                             PCI_DMA_FROMDEVICE);
 +      }
 +      dev->stats.rx_bytes += skb->len;
 +      skb->protocol = eth_type_trans(skb, dev);
 +      netif_receive_skb(skb);
 +      dev->stats.rx_packets++;
 +}
 +
 +static int pcnet32_rx(struct net_device *dev, int budget)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +      int entry = lp->cur_rx & lp->rx_mod_mask;
 +      struct pcnet32_rx_head *rxp = &lp->rx_ring[entry];
 +      int npackets = 0;
 +
 +      /* If we own the next entry, it's a new packet. Send it up. */
 +      while (npackets < budget && (short)le16_to_cpu(rxp->status) >= 0) {
 +              pcnet32_rx_entry(dev, lp, rxp, entry);
 +              npackets += 1;
 +              /*
 +               * The docs say that the buffer length isn't touched, but Andrew
 +               * Boyd of QNX reports that some revs of the 79C965 clear it.
 +               */
 +              rxp->buf_length = cpu_to_le16(NEG_BUF_SIZE);
 +              wmb();  /* Make sure owner changes after others are visible */
 +              rxp->status = cpu_to_le16(0x8000);
 +              entry = (++lp->cur_rx) & lp->rx_mod_mask;
 +              rxp = &lp->rx_ring[entry];
 +      }
 +
 +      return npackets;
 +}
 +
 +static int pcnet32_tx(struct net_device *dev)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +      unsigned int dirty_tx = lp->dirty_tx;
 +      int delta;
 +      int must_restart = 0;
 +
 +      while (dirty_tx != lp->cur_tx) {
 +              int entry = dirty_tx & lp->tx_mod_mask;
 +              int status = (short)le16_to_cpu(lp->tx_ring[entry].status);
 +
 +              if (status < 0)
 +                      break;  /* It still hasn't been Txed */
 +
 +              lp->tx_ring[entry].base = 0;
 +
 +              if (status & 0x4000) {
 +                      /* There was a major error, log it. */
 +                      int err_status = le32_to_cpu(lp->tx_ring[entry].misc);
 +                      dev->stats.tx_errors++;
 +                      netif_err(lp, tx_err, dev,
 +                                "Tx error status=%04x err_status=%08x\n",
 +                                status, err_status);
 +                      if (err_status & 0x04000000)
 +                              dev->stats.tx_aborted_errors++;
 +                      if (err_status & 0x08000000)
 +                              dev->stats.tx_carrier_errors++;
 +                      if (err_status & 0x10000000)
 +                              dev->stats.tx_window_errors++;
 +#ifndef DO_DXSUFLO
 +                      if (err_status & 0x40000000) {
 +                              dev->stats.tx_fifo_errors++;
 +                              /* Ackk!  On FIFO errors the Tx unit is turned off! */
 +                              /* Remove this verbosity later! */
 +                              netif_err(lp, tx_err, dev, "Tx FIFO error!\n");
 +                              must_restart = 1;
 +                      }
 +#else
 +                      if (err_status & 0x40000000) {
 +                              dev->stats.tx_fifo_errors++;
 +                              if (!lp->dxsuflo) {     /* If controller doesn't recover ... */
 +                                      /* Ackk!  On FIFO errors the Tx unit is turned off! */
 +                                      /* Remove this verbosity later! */
 +                                      netif_err(lp, tx_err, dev, "Tx FIFO error!\n");
 +                                      must_restart = 1;
 +                              }
 +                      }
 +#endif
 +              } else {
 +                      if (status & 0x1800)
 +                              dev->stats.collisions++;
 +                      dev->stats.tx_packets++;
 +              }
 +
 +              /* We must free the original skb */
 +              if (lp->tx_skbuff[entry]) {
 +                      pci_unmap_single(lp->pci_dev,
 +                                       lp->tx_dma_addr[entry],
 +                                       lp->tx_skbuff[entry]->
 +                                       len, PCI_DMA_TODEVICE);
 +                      dev_kfree_skb_any(lp->tx_skbuff[entry]);
 +                      lp->tx_skbuff[entry] = NULL;
 +                      lp->tx_dma_addr[entry] = 0;
 +              }
 +              dirty_tx++;
 +      }
 +
 +      delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size);
 +      if (delta > lp->tx_ring_size) {
 +              netif_err(lp, drv, dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n",
 +                        dirty_tx, lp->cur_tx, lp->tx_full);
 +              dirty_tx += lp->tx_ring_size;
 +              delta -= lp->tx_ring_size;
 +      }
 +
 +      if (lp->tx_full &&
 +          netif_queue_stopped(dev) &&
 +          delta < lp->tx_ring_size - 2) {
 +              /* The ring is no longer full, clear tbusy. */
 +              lp->tx_full = 0;
 +              netif_wake_queue(dev);
 +      }
 +      lp->dirty_tx = dirty_tx;
 +
 +      return must_restart;
 +}
 +
 +static int pcnet32_poll(struct napi_struct *napi, int budget)
 +{
 +      struct pcnet32_private *lp = container_of(napi, struct pcnet32_private, napi);
 +      struct net_device *dev = lp->dev;
 +      unsigned long ioaddr = dev->base_addr;
 +      unsigned long flags;
 +      int work_done;
 +      u16 val;
 +
 +      work_done = pcnet32_rx(dev, budget);
 +
 +      spin_lock_irqsave(&lp->lock, flags);
 +      if (pcnet32_tx(dev)) {
 +              /* reset the chip to clear the error condition, then restart */
 +              lp->a.reset(ioaddr);
 +              lp->a.write_csr(ioaddr, CSR4, 0x0915);  /* auto tx pad */
 +              pcnet32_restart(dev, CSR0_START);
 +              netif_wake_queue(dev);
 +      }
 +      spin_unlock_irqrestore(&lp->lock, flags);
 +
 +      if (work_done < budget) {
 +              spin_lock_irqsave(&lp->lock, flags);
 +
 +              __napi_complete(napi);
 +
 +              /* clear interrupt masks */
 +              val = lp->a.read_csr(ioaddr, CSR3);
 +              val &= 0x00ff;
 +              lp->a.write_csr(ioaddr, CSR3, val);
 +
 +              /* Set interrupt enable. */
 +              lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
 +
 +              spin_unlock_irqrestore(&lp->lock, flags);
 +      }
 +      return work_done;
 +}
 +
 +#define PCNET32_REGS_PER_PHY  32
 +#define PCNET32_MAX_PHYS      32
 +static int pcnet32_get_regs_len(struct net_device *dev)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +      int j = lp->phycount * PCNET32_REGS_PER_PHY;
 +
 +      return (PCNET32_NUM_REGS + j) * sizeof(u16);
 +}
 +
 +static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
 +                           void *ptr)
 +{
 +      int i, csr0;
 +      u16 *buff = ptr;
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +      struct pcnet32_access *a = &lp->a;
 +      ulong ioaddr = dev->base_addr;
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(&lp->lock, flags);
 +
 +      csr0 = a->read_csr(ioaddr, CSR0);
 +      if (!(csr0 & CSR0_STOP))        /* If not stopped */
 +              pcnet32_suspend(dev, &flags, 1);
 +
 +      /* read address PROM */
 +      for (i = 0; i < 16; i += 2)
 +              *buff++ = inw(ioaddr + i);
 +
 +      /* read control and status registers */
 +      for (i = 0; i < 90; i++)
 +              *buff++ = a->read_csr(ioaddr, i);
 +
 +      *buff++ = a->read_csr(ioaddr, 112);
 +      *buff++ = a->read_csr(ioaddr, 114);
 +
 +      /* read bus configuration registers */
 +      for (i = 0; i < 30; i++)
 +              *buff++ = a->read_bcr(ioaddr, i);
 +
 +      *buff++ = 0;            /* skip bcr30 so as not to hang 79C976 */
 +
 +      for (i = 31; i < 36; i++)
 +              *buff++ = a->read_bcr(ioaddr, i);
 +
 +      /* read mii phy registers */
 +      if (lp->mii) {
 +              int j;
 +              for (j = 0; j < PCNET32_MAX_PHYS; j++) {
 +                      if (lp->phymask & (1 << j)) {
 +                              for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
 +                                      lp->a.write_bcr(ioaddr, 33,
 +                                                      (j << 5) | i);
 +                                      *buff++ = lp->a.read_bcr(ioaddr, 34);
 +                              }
 +                      }
 +              }
 +      }
 +
 +      if (!(csr0 & CSR0_STOP)) {      /* If not stopped */
 +              int csr5;
 +
 +              /* clear SUSPEND (SPND) - CSR5 bit 0 */
 +              csr5 = a->read_csr(ioaddr, CSR5);
 +              a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
 +      }
 +
 +      spin_unlock_irqrestore(&lp->lock, flags);
 +}
 +
 +static const struct ethtool_ops pcnet32_ethtool_ops = {
 +      .get_settings           = pcnet32_get_settings,
 +      .set_settings           = pcnet32_set_settings,
 +      .get_drvinfo            = pcnet32_get_drvinfo,
 +      .get_msglevel           = pcnet32_get_msglevel,
 +      .set_msglevel           = pcnet32_set_msglevel,
 +      .nway_reset             = pcnet32_nway_reset,
 +      .get_link               = pcnet32_get_link,
 +      .get_ringparam          = pcnet32_get_ringparam,
 +      .set_ringparam          = pcnet32_set_ringparam,
 +      .get_strings            = pcnet32_get_strings,
 +      .self_test              = pcnet32_ethtool_test,
 +      .set_phys_id            = pcnet32_set_phys_id,
 +      .get_regs_len           = pcnet32_get_regs_len,
 +      .get_regs               = pcnet32_get_regs,
 +      .get_sset_count         = pcnet32_get_sset_count,
 +};
 +
 +/* only probes for non-PCI devices, the rest are handled by
 + * pci_register_driver via pcnet32_probe_pci */
 +
 +static void __devinit pcnet32_probe_vlbus(unsigned int *pcnet32_portlist)
 +{
 +      unsigned int *port, ioaddr;
 +
 +      /* search for PCnet32 VLB cards at known addresses */
 +      for (port = pcnet32_portlist; (ioaddr = *port); port++) {
 +              if (request_region
 +                  (ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) {
 +                      /* check if there is really a pcnet chip on that ioaddr */
 +                      if ((inb(ioaddr + 14) == 0x57) &&
 +                          (inb(ioaddr + 15) == 0x57)) {
 +                              pcnet32_probe1(ioaddr, 0, NULL);
 +                      } else {
 +                              release_region(ioaddr, PCNET32_TOTAL_SIZE);
 +                      }
 +              }
 +      }
 +}
 +
 +static int __devinit
 +pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
 +{
 +      unsigned long ioaddr;
 +      int err;
 +
 +      err = pci_enable_device(pdev);
 +      if (err < 0) {
 +              if (pcnet32_debug & NETIF_MSG_PROBE)
 +                      pr_err("failed to enable device -- err=%d\n", err);
 +              return err;
 +      }
 +      pci_set_master(pdev);
 +
 +      ioaddr = pci_resource_start(pdev, 0);
 +      if (!ioaddr) {
 +              if (pcnet32_debug & NETIF_MSG_PROBE)
 +                      pr_err("card has no PCI IO resources, aborting\n");
 +              return -ENODEV;
 +      }
 +
 +      if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) {
 +              if (pcnet32_debug & NETIF_MSG_PROBE)
 +                      pr_err("architecture does not support 32bit PCI busmaster DMA\n");
 +              return -ENODEV;
 +      }
 +      if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
 +              if (pcnet32_debug & NETIF_MSG_PROBE)
 +                      pr_err("io address range already allocated\n");
 +              return -EBUSY;
 +      }
 +
 +      err = pcnet32_probe1(ioaddr, 1, pdev);
 +      if (err < 0)
 +              pci_disable_device(pdev);
 +
 +      return err;
 +}
 +
 +static const struct net_device_ops pcnet32_netdev_ops = {
 +      .ndo_open               = pcnet32_open,
 +      .ndo_stop               = pcnet32_close,
 +      .ndo_start_xmit         = pcnet32_start_xmit,
 +      .ndo_tx_timeout         = pcnet32_tx_timeout,
 +      .ndo_get_stats          = pcnet32_get_stats,
 +      .ndo_set_rx_mode        = pcnet32_set_multicast_list,
 +      .ndo_do_ioctl           = pcnet32_ioctl,
 +      .ndo_change_mtu         = eth_change_mtu,
 +      .ndo_set_mac_address    = eth_mac_addr,
 +      .ndo_validate_addr      = eth_validate_addr,
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +      .ndo_poll_controller    = pcnet32_poll_controller,
 +#endif
 +};
 +
 +/* pcnet32_probe1
 + *  Called from both pcnet32_probe_vlbus and pcnet_probe_pci.
 + *  pdev will be NULL when called from pcnet32_probe_vlbus.
 + */
 +static int __devinit
 +pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
 +{
 +      struct pcnet32_private *lp;
 +      int i, media;
 +      int fdx, mii, fset, dxsuflo;
 +      int chip_version;
 +      char *chipname;
 +      struct net_device *dev;
 +      struct pcnet32_access *a = NULL;
 +      u8 promaddr[6];
 +      int ret = -ENODEV;
 +
 +      /* reset the chip */
 +      pcnet32_wio_reset(ioaddr);
 +
 +      /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */
 +      if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) {
 +              a = &pcnet32_wio;
 +      } else {
 +              pcnet32_dwio_reset(ioaddr);
 +              if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 &&
 +                  pcnet32_dwio_check(ioaddr)) {
 +                      a = &pcnet32_dwio;
 +              } else {
 +                      if (pcnet32_debug & NETIF_MSG_PROBE)
 +                              pr_err("No access methods\n");
 +                      goto err_release_region;
 +              }
 +      }
 +
 +      chip_version =
 +          a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16);
 +      if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW))
 +              pr_info("  PCnet chip version is %#x\n", chip_version);
 +      if ((chip_version & 0xfff) != 0x003) {
 +              if (pcnet32_debug & NETIF_MSG_PROBE)
 +                      pr_info("Unsupported chip version\n");
 +              goto err_release_region;
 +      }
 +
 +      /* initialize variables */
 +      fdx = mii = fset = dxsuflo = 0;
 +      chip_version = (chip_version >> 12) & 0xffff;
 +
 +      switch (chip_version) {
 +      case 0x2420:
 +              chipname = "PCnet/PCI 79C970";  /* PCI */
 +              break;
 +      case 0x2430:
 +              if (shared)
 +                      chipname = "PCnet/PCI 79C970";  /* 970 gives the wrong chip id back */
 +              else
 +                      chipname = "PCnet/32 79C965";   /* 486/VL bus */
 +              break;
 +      case 0x2621:
 +              chipname = "PCnet/PCI II 79C970A";      /* PCI */
 +              fdx = 1;
 +              break;
 +      case 0x2623:
 +              chipname = "PCnet/FAST 79C971"; /* PCI */
 +              fdx = 1;
 +              mii = 1;
 +              fset = 1;
 +              break;
 +      case 0x2624:
 +              chipname = "PCnet/FAST+ 79C972";        /* PCI */
 +              fdx = 1;
 +              mii = 1;
 +              fset = 1;
 +              break;
 +      case 0x2625:
 +              chipname = "PCnet/FAST III 79C973";     /* PCI */
 +              fdx = 1;
 +              mii = 1;
 +              break;
 +      case 0x2626:
 +              chipname = "PCnet/Home 79C978"; /* PCI */
 +              fdx = 1;
 +              /*
 +               * This is based on specs published at www.amd.com.  This section
 +               * assumes that a card with a 79C978 wants to go into standard
 +               * ethernet mode.  The 79C978 can also go into 1Mb HomePNA mode,
 +               * and the module option homepna=1 can select this instead.
 +               */
 +              media = a->read_bcr(ioaddr, 49);
 +              media &= ~3;    /* default to 10Mb ethernet */
 +              if (cards_found < MAX_UNITS && homepna[cards_found])
 +                      media |= 1;     /* switch to home wiring mode */
 +              if (pcnet32_debug & NETIF_MSG_PROBE)
 +                      printk(KERN_DEBUG PFX "media set to %sMbit mode\n",
 +                             (media & 1) ? "1" : "10");
 +              a->write_bcr(ioaddr, 49, media);
 +              break;
 +      case 0x2627:
 +              chipname = "PCnet/FAST III 79C975";     /* PCI */
 +              fdx = 1;
 +              mii = 1;
 +              break;
 +      case 0x2628:
 +              chipname = "PCnet/PRO 79C976";
 +              fdx = 1;
 +              mii = 1;
 +              break;
 +      default:
 +              if (pcnet32_debug & NETIF_MSG_PROBE)
 +                      pr_info("PCnet version %#x, no PCnet32 chip\n",
 +                              chip_version);
 +              goto err_release_region;
 +      }
 +
 +      /*
 +       *  On selected chips turn on the BCR18:NOUFLO bit. This stops transmit
 +       *  starting until the packet is loaded. Strike one for reliability, lose
 +       *  one for latency - although on PCI this isn't a big loss. Older chips
 +       *  have FIFO's smaller than a packet, so you can't do this.
 +       *  Turn on BCR18:BurstRdEn and BCR18:BurstWrEn.
 +       */
 +
 +      if (fset) {
 +              a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0860));
 +              a->write_csr(ioaddr, 80,
 +                           (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00);
 +              dxsuflo = 1;
 +      }
 +
 +      dev = alloc_etherdev(sizeof(*lp));
 +      if (!dev) {
 +              if (pcnet32_debug & NETIF_MSG_PROBE)
 +                      pr_err("Memory allocation failed\n");
 +              ret = -ENOMEM;
 +              goto err_release_region;
 +      }
 +
 +      if (pdev)
 +              SET_NETDEV_DEV(dev, &pdev->dev);
 +
 +      if (pcnet32_debug & NETIF_MSG_PROBE)
 +              pr_info("%s at %#3lx,", chipname, ioaddr);
 +
 +      /* In most chips, after a chip reset, the ethernet address is read from the
 +       * station address PROM at the base address and programmed into the
 +       * "Physical Address Registers" CSR12-14.
 +       * As a precautionary measure, we read the PROM values and complain if
 +       * they disagree with the CSRs.  If they miscompare, and the PROM addr
 +       * is valid, then the PROM addr is used.
 +       */
 +      for (i = 0; i < 3; i++) {
 +              unsigned int val;
 +              val = a->read_csr(ioaddr, i + 12) & 0x0ffff;
 +              /* There may be endianness issues here. */
 +              dev->dev_addr[2 * i] = val & 0x0ff;
 +              dev->dev_addr[2 * i + 1] = (val >> 8) & 0x0ff;
 +      }
 +
 +      /* read PROM address and compare with CSR address */
 +      for (i = 0; i < 6; i++)
 +              promaddr[i] = inb(ioaddr + i);
 +
 +      if (memcmp(promaddr, dev->dev_addr, 6) ||
 +          !is_valid_ether_addr(dev->dev_addr)) {
 +              if (is_valid_ether_addr(promaddr)) {
 +                      if (pcnet32_debug & NETIF_MSG_PROBE) {
 +                              pr_cont(" warning: CSR address invalid,\n");
 +                              pr_info("    using instead PROM address of");
 +                      }
 +                      memcpy(dev->dev_addr, promaddr, 6);
 +              }
 +      }
 +      memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 +
 +      /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
 +      if (!is_valid_ether_addr(dev->perm_addr))
 +              memset(dev->dev_addr, 0, ETH_ALEN);
 +
 +      if (pcnet32_debug & NETIF_MSG_PROBE) {
 +              pr_cont(" %pM", dev->dev_addr);
 +
 +              /* Version 0x2623 and 0x2624 */
 +              if (((chip_version + 1) & 0xfffe) == 0x2624) {
 +                      i = a->read_csr(ioaddr, 80) & 0x0C00;   /* Check tx_start_pt */
 +                      pr_info("    tx_start_pt(0x%04x):", i);
 +                      switch (i >> 10) {
 +                      case 0:
 +                              pr_cont("  20 bytes,");
 +                              break;
 +                      case 1:
 +                              pr_cont("  64 bytes,");
 +                              break;
 +                      case 2:
 +                              pr_cont(" 128 bytes,");
 +                              break;
 +                      case 3:
 +                              pr_cont("~220 bytes,");
 +                              break;
 +                      }
 +                      i = a->read_bcr(ioaddr, 18);    /* Check Burst/Bus control */
 +                      pr_cont(" BCR18(%x):", i & 0xffff);
 +                      if (i & (1 << 5))
 +                              pr_cont("BurstWrEn ");
 +                      if (i & (1 << 6))
 +                              pr_cont("BurstRdEn ");
 +                      if (i & (1 << 7))
 +                              pr_cont("DWordIO ");
 +                      if (i & (1 << 11))
 +                              pr_cont("NoUFlow ");
 +                      i = a->read_bcr(ioaddr, 25);
 +                      pr_info("    SRAMSIZE=0x%04x,", i << 8);
 +                      i = a->read_bcr(ioaddr, 26);
 +                      pr_cont(" SRAM_BND=0x%04x,", i << 8);
 +                      i = a->read_bcr(ioaddr, 27);
 +                      if (i & (1 << 14))
 +                              pr_cont("LowLatRx");
 +              }
 +      }
 +
 +      dev->base_addr = ioaddr;
 +      lp = netdev_priv(dev);
 +      /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */
 +      lp->init_block = pci_alloc_consistent(pdev, sizeof(*lp->init_block),
 +                                            &lp->init_dma_addr);
 +      if (!lp->init_block) {
 +              if (pcnet32_debug & NETIF_MSG_PROBE)
 +                      pr_err("Consistent memory allocation failed\n");
 +              ret = -ENOMEM;
 +              goto err_free_netdev;
 +      }
 +      lp->pci_dev = pdev;
 +
 +      lp->dev = dev;
 +
 +      spin_lock_init(&lp->lock);
 +
 +      lp->name = chipname;
 +      lp->shared_irq = shared;
 +      lp->tx_ring_size = TX_RING_SIZE;        /* default tx ring size */
 +      lp->rx_ring_size = RX_RING_SIZE;        /* default rx ring size */
 +      lp->tx_mod_mask = lp->tx_ring_size - 1;
 +      lp->rx_mod_mask = lp->rx_ring_size - 1;
 +      lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12);
 +      lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4);
 +      lp->mii_if.full_duplex = fdx;
 +      lp->mii_if.phy_id_mask = 0x1f;
 +      lp->mii_if.reg_num_mask = 0x1f;
 +      lp->dxsuflo = dxsuflo;
 +      lp->mii = mii;
 +      lp->chip_version = chip_version;
 +      lp->msg_enable = pcnet32_debug;
 +      if ((cards_found >= MAX_UNITS) ||
 +          (options[cards_found] >= sizeof(options_mapping)))
 +              lp->options = PCNET32_PORT_ASEL;
 +      else
 +              lp->options = options_mapping[options[cards_found]];
 +      lp->mii_if.dev = dev;
 +      lp->mii_if.mdio_read = mdio_read;
 +      lp->mii_if.mdio_write = mdio_write;
 +
 +      /* napi.weight is used in both the napi and non-napi cases */
 +      lp->napi.weight = lp->rx_ring_size / 2;
 +
 +      netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2);
 +
 +      if (fdx && !(lp->options & PCNET32_PORT_ASEL) &&
 +          ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
 +              lp->options |= PCNET32_PORT_FD;
 +
 +      lp->a = *a;
 +
 +      /* prior to register_netdev, dev->name is not yet correct */
 +      if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
 +              ret = -ENOMEM;
 +              goto err_free_ring;
 +      }
 +      /* detect special T1/E1 WAN card by checking for MAC address */
 +      if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 &&
 +          dev->dev_addr[2] == 0x75)
 +              lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI;
 +
 +      lp->init_block->mode = cpu_to_le16(0x0003);     /* Disable Rx and Tx. */
 +      lp->init_block->tlen_rlen =
 +          cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits);
 +      for (i = 0; i < 6; i++)
 +              lp->init_block->phys_addr[i] = dev->dev_addr[i];
 +      lp->init_block->filter[0] = 0x00000000;
 +      lp->init_block->filter[1] = 0x00000000;
 +      lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr);
 +      lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr);
 +
 +      /* switch pcnet32 to 32bit mode */
 +      a->write_bcr(ioaddr, 20, 2);
 +
 +      a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
 +      a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
 +
 +      if (pdev) {             /* use the IRQ provided by PCI */
 +              dev->irq = pdev->irq;
 +              if (pcnet32_debug & NETIF_MSG_PROBE)
 +                      pr_cont(" assigned IRQ %d\n", dev->irq);
 +      } else {
 +              unsigned long irq_mask = probe_irq_on();
 +
 +              /*
 +               * To auto-IRQ we enable the initialization-done and DMA error
 +               * interrupts. For ISA boards we get a DMA error, but VLB and PCI
 +               * boards will work.
 +               */
 +              /* Trigger an initialization just for the interrupt. */
 +              a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_INIT);
 +              mdelay(1);
 +
 +              dev->irq = probe_irq_off(irq_mask);
 +              if (!dev->irq) {
 +                      if (pcnet32_debug & NETIF_MSG_PROBE)
 +                              pr_cont(", failed to detect IRQ line\n");
 +                      ret = -ENODEV;
 +                      goto err_free_ring;
 +              }
 +              if (pcnet32_debug & NETIF_MSG_PROBE)
 +                      pr_cont(", probed IRQ %d\n", dev->irq);
 +      }
 +
 +      /* Set the mii phy_id so that we can query the link state */
 +      if (lp->mii) {
 +              /* lp->phycount and lp->phymask are set to 0 by memset above */
 +
 +              lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
 +              /* scan for PHYs */
 +              for (i = 0; i < PCNET32_MAX_PHYS; i++) {
 +                      unsigned short id1, id2;
 +
 +                      id1 = mdio_read(dev, i, MII_PHYSID1);
 +                      if (id1 == 0xffff)
 +                              continue;
 +                      id2 = mdio_read(dev, i, MII_PHYSID2);
 +                      if (id2 == 0xffff)
 +                              continue;
 +                      if (i == 31 && ((chip_version + 1) & 0xfffe) == 0x2624)
 +                              continue;       /* 79C971 & 79C972 have phantom phy at id 31 */
 +                      lp->phycount++;
 +                      lp->phymask |= (1 << i);
 +                      lp->mii_if.phy_id = i;
 +                      if (pcnet32_debug & NETIF_MSG_PROBE)
 +                              pr_info("Found PHY %04x:%04x at address %d\n",
 +                                      id1, id2, i);
 +              }
 +              lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
 +              if (lp->phycount > 1)
 +                      lp->options |= PCNET32_PORT_MII;
 +      }
 +
 +      init_timer(&lp->watchdog_timer);
 +      lp->watchdog_timer.data = (unsigned long)dev;
 +      lp->watchdog_timer.function = (void *)&pcnet32_watchdog;
 +
 +      /* The PCNET32-specific entries in the device structure. */
 +      dev->netdev_ops = &pcnet32_netdev_ops;
 +      dev->ethtool_ops = &pcnet32_ethtool_ops;
 +      dev->watchdog_timeo = (5 * HZ);
 +
 +      /* Fill in the generic fields of the device structure. */
 +      if (register_netdev(dev))
 +              goto err_free_ring;
 +
 +      if (pdev) {
 +              pci_set_drvdata(pdev, dev);
 +      } else {
 +              lp->next = pcnet32_dev;
 +              pcnet32_dev = dev;
 +      }
 +
 +      if (pcnet32_debug & NETIF_MSG_PROBE)
 +              pr_info("%s: registered as %s\n", dev->name, lp->name);
 +      cards_found++;
 +
 +      /* enable LED writes */
 +      a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000);
 +
 +      return 0;
 +
 +err_free_ring:
 +      pcnet32_free_ring(dev);
 +      pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
 +                          lp->init_block, lp->init_dma_addr);
 +err_free_netdev:
 +      free_netdev(dev);
 +err_release_region:
 +      release_region(ioaddr, PCNET32_TOTAL_SIZE);
 +      return ret;
 +}
 +
 +/* if any allocation fails, caller must also call pcnet32_free_ring */
 +static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +
 +      lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
 +                                         sizeof(struct pcnet32_tx_head) *
 +                                         lp->tx_ring_size,
 +                                         &lp->tx_ring_dma_addr);
 +      if (lp->tx_ring == NULL) {
 +              netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
 +              return -ENOMEM;
 +      }
 +
 +      lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
 +                                         sizeof(struct pcnet32_rx_head) *
 +                                         lp->rx_ring_size,
 +                                         &lp->rx_ring_dma_addr);
 +      if (lp->rx_ring == NULL) {
 +              netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
 +              return -ENOMEM;
 +      }
 +
 +      lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t),
 +                                GFP_ATOMIC);
 +      if (!lp->tx_dma_addr) {
 +              netif_err(lp, drv, dev, "Memory allocation failed\n");
 +              return -ENOMEM;
 +      }
 +
 +      lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t),
 +                                GFP_ATOMIC);
 +      if (!lp->rx_dma_addr) {
 +              netif_err(lp, drv, dev, "Memory allocation failed\n");
 +              return -ENOMEM;
 +      }
 +
 +      lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *),
 +                              GFP_ATOMIC);
 +      if (!lp->tx_skbuff) {
 +              netif_err(lp, drv, dev, "Memory allocation failed\n");
 +              return -ENOMEM;
 +      }
 +
 +      lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *),
 +                              GFP_ATOMIC);
 +      if (!lp->rx_skbuff) {
 +              netif_err(lp, drv, dev, "Memory allocation failed\n");
 +              return -ENOMEM;
 +      }
 +
 +      return 0;
 +}
 +
 +static void pcnet32_free_ring(struct net_device *dev)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +
 +      kfree(lp->tx_skbuff);
 +      lp->tx_skbuff = NULL;
 +
 +      kfree(lp->rx_skbuff);
 +      lp->rx_skbuff = NULL;
 +
 +      kfree(lp->tx_dma_addr);
 +      lp->tx_dma_addr = NULL;
 +
 +      kfree(lp->rx_dma_addr);
 +      lp->rx_dma_addr = NULL;
 +
 +      if (lp->tx_ring) {
 +              pci_free_consistent(lp->pci_dev,
 +                                  sizeof(struct pcnet32_tx_head) *
 +                                  lp->tx_ring_size, lp->tx_ring,
 +                                  lp->tx_ring_dma_addr);
 +              lp->tx_ring = NULL;
 +      }
 +
 +      if (lp->rx_ring) {
 +              pci_free_consistent(lp->pci_dev,
 +                                  sizeof(struct pcnet32_rx_head) *
 +                                  lp->rx_ring_size, lp->rx_ring,
 +                                  lp->rx_ring_dma_addr);
 +              lp->rx_ring = NULL;
 +      }
 +}
 +
 +static int pcnet32_open(struct net_device *dev)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +      struct pci_dev *pdev = lp->pci_dev;
 +      unsigned long ioaddr = dev->base_addr;
 +      u16 val;
 +      int i;
 +      int rc;
 +      unsigned long flags;
 +
 +      if (request_irq(dev->irq, pcnet32_interrupt,
 +                      lp->shared_irq ? IRQF_SHARED : 0, dev->name,
 +                      (void *)dev)) {
 +              return -EAGAIN;
 +      }
 +
 +      spin_lock_irqsave(&lp->lock, flags);
 +      /* Check for a valid station address */
 +      if (!is_valid_ether_addr(dev->dev_addr)) {
 +              rc = -EINVAL;
 +              goto err_free_irq;
 +      }
 +
 +      /* Reset the PCNET32 */
 +      lp->a.reset(ioaddr);
 +
 +      /* switch pcnet32 to 32bit mode */
 +      lp->a.write_bcr(ioaddr, 20, 2);
 +
 +      netif_printk(lp, ifup, KERN_DEBUG, dev,
 +                   "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
 +                   __func__, dev->irq, (u32) (lp->tx_ring_dma_addr),
 +                   (u32) (lp->rx_ring_dma_addr),
 +                   (u32) (lp->init_dma_addr));
 +
 +      /* set/reset autoselect bit */
 +      val = lp->a.read_bcr(ioaddr, 2) & ~2;
 +      if (lp->options & PCNET32_PORT_ASEL)
 +              val |= 2;
 +      lp->a.write_bcr(ioaddr, 2, val);
 +
 +      /* handle full duplex setting */
 +      if (lp->mii_if.full_duplex) {
 +              val = lp->a.read_bcr(ioaddr, 9) & ~3;
 +              if (lp->options & PCNET32_PORT_FD) {
 +                      val |= 1;
 +                      if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
 +                              val |= 2;
 +              } else if (lp->options & PCNET32_PORT_ASEL) {
 +                      /* workaround of xSeries250, turn on for 79C975 only */
 +                      if (lp->chip_version == 0x2627)
 +                              val |= 3;
 +              }
 +              lp->a.write_bcr(ioaddr, 9, val);
 +      }
 +
 +      /* set/reset GPSI bit in test register */
 +      val = lp->a.read_csr(ioaddr, 124) & ~0x10;
 +      if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
 +              val |= 0x10;
 +      lp->a.write_csr(ioaddr, 124, val);
 +
 +      /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
 +      if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
 +          (pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX ||
 +           pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) {
 +              if (lp->options & PCNET32_PORT_ASEL) {
 +                      lp->options = PCNET32_PORT_FD | PCNET32_PORT_100;
 +                      netif_printk(lp, link, KERN_DEBUG, dev,
 +                                   "Setting 100Mb-Full Duplex\n");
 +              }
 +      }
 +      if (lp->phycount < 2) {
 +              /*
 +               * 24 Jun 2004 according AMD, in order to change the PHY,
 +               * DANAS (or DISPM for 79C976) must be set; then select the speed,
 +               * duplex, and/or enable auto negotiation, and clear DANAS
 +               */
 +              if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
 +                      lp->a.write_bcr(ioaddr, 32,
 +                                      lp->a.read_bcr(ioaddr, 32) | 0x0080);
 +                      /* disable Auto Negotiation, set 10Mpbs, HD */
 +                      val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
 +                      if (lp->options & PCNET32_PORT_FD)
 +                              val |= 0x10;
 +                      if (lp->options & PCNET32_PORT_100)
 +                              val |= 0x08;
 +                      lp->a.write_bcr(ioaddr, 32, val);
 +              } else {
 +                      if (lp->options & PCNET32_PORT_ASEL) {
 +                              lp->a.write_bcr(ioaddr, 32,
 +                                              lp->a.read_bcr(ioaddr,
 +                                                             32) | 0x0080);
 +                              /* enable auto negotiate, setup, disable fd */
 +                              val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
 +                              val |= 0x20;
 +                              lp->a.write_bcr(ioaddr, 32, val);
 +                      }
 +              }
 +      } else {
 +              int first_phy = -1;
 +              u16 bmcr;
 +              u32 bcr9;
 +              struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
 +
 +              /*
 +               * There is really no good other way to handle multiple PHYs
 +               * other than turning off all automatics
 +               */
 +              val = lp->a.read_bcr(ioaddr, 2);
 +              lp->a.write_bcr(ioaddr, 2, val & ~2);
 +              val = lp->a.read_bcr(ioaddr, 32);
 +              lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7));   /* stop MII manager */
 +
 +              if (!(lp->options & PCNET32_PORT_ASEL)) {
 +                      /* setup ecmd */
 +                      ecmd.port = PORT_MII;
 +                      ecmd.transceiver = XCVR_INTERNAL;
 +                      ecmd.autoneg = AUTONEG_DISABLE;
 +                      ethtool_cmd_speed_set(&ecmd,
 +                                            (lp->options & PCNET32_PORT_100) ?
 +                                            SPEED_100 : SPEED_10);
 +                      bcr9 = lp->a.read_bcr(ioaddr, 9);
 +
 +                      if (lp->options & PCNET32_PORT_FD) {
 +                              ecmd.duplex = DUPLEX_FULL;
 +                              bcr9 |= (1 << 0);
 +                      } else {
 +                              ecmd.duplex = DUPLEX_HALF;
 +                              bcr9 |= ~(1 << 0);
 +                      }
 +                      lp->a.write_bcr(ioaddr, 9, bcr9);
 +              }
 +
 +              for (i = 0; i < PCNET32_MAX_PHYS; i++) {
 +                      if (lp->phymask & (1 << i)) {
 +                              /* isolate all but the first PHY */
 +                              bmcr = mdio_read(dev, i, MII_BMCR);
 +                              if (first_phy == -1) {
 +                                      first_phy = i;
 +                                      mdio_write(dev, i, MII_BMCR,
 +                                                 bmcr & ~BMCR_ISOLATE);
 +                              } else {
 +                                      mdio_write(dev, i, MII_BMCR,
 +                                                 bmcr | BMCR_ISOLATE);
 +                              }
 +                              /* use mii_ethtool_sset to setup PHY */
 +                              lp->mii_if.phy_id = i;
 +                              ecmd.phy_address = i;
 +                              if (lp->options & PCNET32_PORT_ASEL) {
 +                                      mii_ethtool_gset(&lp->mii_if, &ecmd);
 +                                      ecmd.autoneg = AUTONEG_ENABLE;
 +                              }
 +                              mii_ethtool_sset(&lp->mii_if, &ecmd);
 +                      }
 +              }
 +              lp->mii_if.phy_id = first_phy;
 +              netif_info(lp, link, dev, "Using PHY number %d\n", first_phy);
 +      }
 +
 +#ifdef DO_DXSUFLO
 +      if (lp->dxsuflo) {      /* Disable transmit stop on underflow */
 +              val = lp->a.read_csr(ioaddr, CSR3);
 +              val |= 0x40;
 +              lp->a.write_csr(ioaddr, CSR3, val);
 +      }
 +#endif
 +
 +      lp->init_block->mode =
 +          cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
 +      pcnet32_load_multicast(dev);
 +
 +      if (pcnet32_init_ring(dev)) {
 +              rc = -ENOMEM;
 +              goto err_free_ring;
 +      }
 +
 +      napi_enable(&lp->napi);
 +
 +      /* Re-initialize the PCNET32, and start it when done. */
 +      lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
 +      lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
 +
 +      lp->a.write_csr(ioaddr, CSR4, 0x0915);  /* auto tx pad */
 +      lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
 +
 +      netif_start_queue(dev);
 +
 +      if (lp->chip_version >= PCNET32_79C970A) {
 +              /* Print the link status and start the watchdog */
 +              pcnet32_check_media(dev, 1);
 +              mod_timer(&lp->watchdog_timer, PCNET32_WATCHDOG_TIMEOUT);
 +      }
 +
 +      i = 0;
 +      while (i++ < 100)
 +              if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
 +                      break;
 +      /*
 +       * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
 +       * reports that doing so triggers a bug in the '974.
 +       */
 +      lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
 +
 +      netif_printk(lp, ifup, KERN_DEBUG, dev,
 +                   "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
 +                   i,
 +                   (u32) (lp->init_dma_addr),
 +                   lp->a.read_csr(ioaddr, CSR0));
 +
 +      spin_unlock_irqrestore(&lp->lock, flags);
 +
 +      return 0;               /* Always succeed */
 +
 +err_free_ring:
 +      /* free any allocated skbuffs */
 +      pcnet32_purge_rx_ring(dev);
 +
 +      /*
 +       * Switch back to 16bit mode to avoid problems with dumb
 +       * DOS packet driver after a warm reboot
 +       */
 +      lp->a.write_bcr(ioaddr, 20, 4);
 +
 +err_free_irq:
 +      spin_unlock_irqrestore(&lp->lock, flags);
 +      free_irq(dev->irq, dev);
 +      return rc;
 +}
 +
 +/*
 + * The LANCE has been halted for one reason or another (busmaster memory
 + * arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
 + * etc.).  Modern LANCE variants always reload their ring-buffer
 + * configuration when restarted, so we must reinitialize our ring
 + * context before restarting.  As part of this reinitialization,
 + * find all packets still on the Tx ring and pretend that they had been
 + * sent (in effect, drop the packets on the floor) - the higher-level
 + * protocols will time out and retransmit.  It'd be better to shuffle
 + * these skbs to a temp list and then actually re-Tx them after
 + * restarting the chip, but I'm too lazy to do so right now.  dplatt@3do.com
 + */
 +
 +static void pcnet32_purge_tx_ring(struct net_device *dev)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +      int i;
 +
 +      for (i = 0; i < lp->tx_ring_size; i++) {
 +              lp->tx_ring[i].status = 0;      /* CPU owns buffer */
 +              wmb();          /* Make sure adapter sees owner change */
 +              if (lp->tx_skbuff[i]) {
 +                      pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
 +                                       lp->tx_skbuff[i]->len,
 +                                       PCI_DMA_TODEVICE);
 +                      dev_kfree_skb_any(lp->tx_skbuff[i]);
 +              }
 +              lp->tx_skbuff[i] = NULL;
 +              lp->tx_dma_addr[i] = 0;
 +      }
 +}
 +
 +/* Initialize the PCNET32 Rx and Tx rings. */
 +static int pcnet32_init_ring(struct net_device *dev)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +      int i;
 +
 +      lp->tx_full = 0;
 +      lp->cur_rx = lp->cur_tx = 0;
 +      lp->dirty_rx = lp->dirty_tx = 0;
 +
 +      for (i = 0; i < lp->rx_ring_size; i++) {
 +              struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
 +              if (rx_skbuff == NULL) {
 +                      lp->rx_skbuff[i] = dev_alloc_skb(PKT_BUF_SKB);
 +                      rx_skbuff = lp->rx_skbuff[i];
 +                      if (!rx_skbuff) {
 +                              /* there is not much we can do at this point */
 +                              netif_err(lp, drv, dev, "%s dev_alloc_skb failed\n",
 +                                        __func__);
 +                              return -1;
 +                      }
 +                      skb_reserve(rx_skbuff, NET_IP_ALIGN);
 +              }
 +
 +              rmb();
 +              if (lp->rx_dma_addr[i] == 0)
 +                      lp->rx_dma_addr[i] =
 +                          pci_map_single(lp->pci_dev, rx_skbuff->data,
 +                                         PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
 +              lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]);
 +              lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE);
 +              wmb();          /* Make sure owner changes after all others are visible */
 +              lp->rx_ring[i].status = cpu_to_le16(0x8000);
 +      }
 +      /* The Tx buffer address is filled in as needed, but we do need to clear
 +       * the upper ownership bit. */
 +      for (i = 0; i < lp->tx_ring_size; i++) {
 +              lp->tx_ring[i].status = 0;      /* CPU owns buffer */
 +              wmb();          /* Make sure adapter sees owner change */
 +              lp->tx_ring[i].base = 0;
 +              lp->tx_dma_addr[i] = 0;
 +      }
 +
 +      lp->init_block->tlen_rlen =
 +          cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits);
 +      for (i = 0; i < 6; i++)
 +              lp->init_block->phys_addr[i] = dev->dev_addr[i];
 +      lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr);
 +      lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr);
 +      wmb();                  /* Make sure all changes are visible */
 +      return 0;
 +}
 +
 +/* the pcnet32 has been issued a stop or reset.  Wait for the stop bit
 + * then flush the pending transmit operations, re-initialize the ring,
 + * and tell the chip to initialize.
 + */
 +static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +      unsigned long ioaddr = dev->base_addr;
 +      int i;
 +
 +      /* wait for stop */
 +      for (i = 0; i < 100; i++)
 +              if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
 +                      break;
 +
 +      if (i >= 100)
 +              netif_err(lp, drv, dev, "%s timed out waiting for stop\n",
 +                        __func__);
 +
 +      pcnet32_purge_tx_ring(dev);
 +      if (pcnet32_init_ring(dev))
 +              return;
 +
 +      /* ReInit Ring */
 +      lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
 +      i = 0;
 +      while (i++ < 1000)
 +              if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
 +                      break;
 +
 +      lp->a.write_csr(ioaddr, CSR0, csr0_bits);
 +}
 +
 +static void pcnet32_tx_timeout(struct net_device *dev)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +      unsigned long ioaddr = dev->base_addr, flags;
 +
 +      spin_lock_irqsave(&lp->lock, flags);
 +      /* Transmitter timeout, serious problems. */
 +      if (pcnet32_debug & NETIF_MSG_DRV)
 +              pr_err("%s: transmit timed out, status %4.4x, resetting\n",
 +                     dev->name, lp->a.read_csr(ioaddr, CSR0));
 +      lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
 +      dev->stats.tx_errors++;
 +      if (netif_msg_tx_err(lp)) {
 +              int i;
 +              printk(KERN_DEBUG
 +                     " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
 +                     lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
 +                     lp->cur_rx);
 +              for (i = 0; i < lp->rx_ring_size; i++)
 +                      printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
 +                             le32_to_cpu(lp->rx_ring[i].base),
 +                             (-le16_to_cpu(lp->rx_ring[i].buf_length)) &
 +                             0xffff, le32_to_cpu(lp->rx_ring[i].msg_length),
 +                             le16_to_cpu(lp->rx_ring[i].status));
 +              for (i = 0; i < lp->tx_ring_size; i++)
 +                      printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
 +                             le32_to_cpu(lp->tx_ring[i].base),
 +                             (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff,
 +                             le32_to_cpu(lp->tx_ring[i].misc),
 +                             le16_to_cpu(lp->tx_ring[i].status));
 +              printk("\n");
 +      }
 +      pcnet32_restart(dev, CSR0_NORMAL);
 +
 +      dev->trans_start = jiffies; /* prevent tx timeout */
 +      netif_wake_queue(dev);
 +
 +      spin_unlock_irqrestore(&lp->lock, flags);
 +}
 +
 +static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
 +                                    struct net_device *dev)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +      unsigned long ioaddr = dev->base_addr;
 +      u16 status;
 +      int entry;
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(&lp->lock, flags);
 +
 +      netif_printk(lp, tx_queued, KERN_DEBUG, dev,
 +                   "%s() called, csr0 %4.4x\n",
 +                   __func__, lp->a.read_csr(ioaddr, CSR0));
 +
 +      /* Default status -- will not enable Successful-TxDone
 +       * interrupt when that option is available to us.
 +       */
 +      status = 0x8300;
 +
 +      /* Fill in a Tx ring entry */
 +
 +      /* Mask to ring buffer boundary. */
 +      entry = lp->cur_tx & lp->tx_mod_mask;
 +
 +      /* Caution: the write order is important here, set the status
 +       * with the "ownership" bits last. */
 +
 +      lp->tx_ring[entry].length = cpu_to_le16(-skb->len);
 +
 +      lp->tx_ring[entry].misc = 0x00000000;
 +
 +      lp->tx_skbuff[entry] = skb;
 +      lp->tx_dma_addr[entry] =
 +          pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
 +      lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]);
 +      wmb();                  /* Make sure owner changes after all others are visible */
 +      lp->tx_ring[entry].status = cpu_to_le16(status);
 +
 +      lp->cur_tx++;
 +      dev->stats.tx_bytes += skb->len;
 +
 +      /* Trigger an immediate send poll. */
 +      lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
 +
 +      if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
 +              lp->tx_full = 1;
 +              netif_stop_queue(dev);
 +      }
 +      spin_unlock_irqrestore(&lp->lock, flags);
 +      return NETDEV_TX_OK;
 +}
 +
 +/* The PCNET32 interrupt handler. */
 +static irqreturn_t
 +pcnet32_interrupt(int irq, void *dev_id)
 +{
 +      struct net_device *dev = dev_id;
 +      struct pcnet32_private *lp;
 +      unsigned long ioaddr;
 +      u16 csr0;
 +      int boguscnt = max_interrupt_work;
 +
 +      ioaddr = dev->base_addr;
 +      lp = netdev_priv(dev);
 +
 +      spin_lock(&lp->lock);
 +
 +      csr0 = lp->a.read_csr(ioaddr, CSR0);
 +      while ((csr0 & 0x8f00) && --boguscnt >= 0) {
 +              if (csr0 == 0xffff)
 +                      break;  /* PCMCIA remove happened */
 +              /* Acknowledge all of the current interrupt sources ASAP. */
 +              lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
 +
 +              netif_printk(lp, intr, KERN_DEBUG, dev,
 +                           "interrupt  csr0=%#2.2x new csr=%#2.2x\n",
 +                           csr0, lp->a.read_csr(ioaddr, CSR0));
 +
 +              /* Log misc errors. */
 +              if (csr0 & 0x4000)
 +                      dev->stats.tx_errors++; /* Tx babble. */
 +              if (csr0 & 0x1000) {
 +                      /*
 +                       * This happens when our receive ring is full. This
 +                       * shouldn't be a problem as we will see normal rx
 +                       * interrupts for the frames in the receive ring.  But
 +                       * there are some PCI chipsets (I can reproduce this
 +                       * on SP3G with Intel saturn chipset) which have
 +                       * sometimes problems and will fill up the receive
 +                       * ring with error descriptors.  In this situation we
 +                       * don't get a rx interrupt, but a missed frame
 +                       * interrupt sooner or later.
 +                       */
 +                      dev->stats.rx_errors++; /* Missed a Rx frame. */
 +              }
 +              if (csr0 & 0x0800) {
 +                      netif_err(lp, drv, dev, "Bus master arbitration failure, status %4.4x\n",
 +                                csr0);
 +                      /* unlike for the lance, there is no restart needed */
 +              }
 +              if (napi_schedule_prep(&lp->napi)) {
 +                      u16 val;
 +                      /* set interrupt masks */
 +                      val = lp->a.read_csr(ioaddr, CSR3);
 +                      val |= 0x5f00;
 +                      lp->a.write_csr(ioaddr, CSR3, val);
 +
 +                      __napi_schedule(&lp->napi);
 +                      break;
 +              }
 +              csr0 = lp->a.read_csr(ioaddr, CSR0);
 +      }
 +
 +      netif_printk(lp, intr, KERN_DEBUG, dev,
 +                   "exiting interrupt, csr0=%#4.4x\n",
 +                   lp->a.read_csr(ioaddr, CSR0));
 +
 +      spin_unlock(&lp->lock);
 +
 +      return IRQ_HANDLED;
 +}
 +
 +static int pcnet32_close(struct net_device *dev)
 +{
 +      unsigned long ioaddr = dev->base_addr;
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +      unsigned long flags;
 +
 +      del_timer_sync(&lp->watchdog_timer);
 +
 +      netif_stop_queue(dev);
 +      napi_disable(&lp->napi);
 +
 +      spin_lock_irqsave(&lp->lock, flags);
 +
 +      dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
 +
 +      netif_printk(lp, ifdown, KERN_DEBUG, dev,
 +                   "Shutting down ethercard, status was %2.2x\n",
 +                   lp->a.read_csr(ioaddr, CSR0));
 +
 +      /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
 +      lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
 +
 +      /*
 +       * Switch back to 16bit mode to avoid problems with dumb
 +       * DOS packet driver after a warm reboot
 +       */
 +      lp->a.write_bcr(ioaddr, 20, 4);
 +
 +      spin_unlock_irqrestore(&lp->lock, flags);
 +
 +      free_irq(dev->irq, dev);
 +
 +      spin_lock_irqsave(&lp->lock, flags);
 +
 +      pcnet32_purge_rx_ring(dev);
 +      pcnet32_purge_tx_ring(dev);
 +
 +      spin_unlock_irqrestore(&lp->lock, flags);
 +
 +      return 0;
 +}
 +
 +static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +      unsigned long ioaddr = dev->base_addr;
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(&lp->lock, flags);
 +      dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
 +      spin_unlock_irqrestore(&lp->lock, flags);
 +
 +      return &dev->stats;
 +}
 +
 +/* taken from the sunlance driver, which it took from the depca driver */
 +static void pcnet32_load_multicast(struct net_device *dev)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +      volatile struct pcnet32_init_block *ib = lp->init_block;
 +      volatile __le16 *mcast_table = (__le16 *)ib->filter;
 +      struct netdev_hw_addr *ha;
 +      unsigned long ioaddr = dev->base_addr;
 +      int i;
 +      u32 crc;
 +
 +      /* set all multicast bits */
 +      if (dev->flags & IFF_ALLMULTI) {
 +              ib->filter[0] = cpu_to_le32(~0U);
 +              ib->filter[1] = cpu_to_le32(~0U);
 +              lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
 +              lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
 +              lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
 +              lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
 +              return;
 +      }
 +      /* clear the multicast filter */
 +      ib->filter[0] = 0;
 +      ib->filter[1] = 0;
 +
 +      /* Add addresses */
 +      netdev_for_each_mc_addr(ha, dev) {
 +              crc = ether_crc_le(6, ha->addr);
 +              crc = crc >> 26;
 +              mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
 +      }
 +      for (i = 0; i < 4; i++)
 +              lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
 +                              le16_to_cpu(mcast_table[i]));
 +}
 +
 +/*
 + * Set or clear the multicast filter for this adaptor.
 + */
 +static void pcnet32_set_multicast_list(struct net_device *dev)
 +{
 +      unsigned long ioaddr = dev->base_addr, flags;
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +      int csr15, suspended;
 +
 +      spin_lock_irqsave(&lp->lock, flags);
 +      suspended = pcnet32_suspend(dev, &flags, 0);
 +      csr15 = lp->a.read_csr(ioaddr, CSR15);
 +      if (dev->flags & IFF_PROMISC) {
 +              /* Log any net taps. */
 +              netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
 +              lp->init_block->mode =
 +                  cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
 +                              7);
 +              lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
 +      } else {
 +              lp->init_block->mode =
 +                  cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
 +              lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
 +              pcnet32_load_multicast(dev);
 +      }
 +
 +      if (suspended) {
 +              int csr5;
 +              /* clear SUSPEND (SPND) - CSR5 bit 0 */
 +              csr5 = lp->a.read_csr(ioaddr, CSR5);
 +              lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
 +      } else {
 +              lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
 +              pcnet32_restart(dev, CSR0_NORMAL);
 +              netif_wake_queue(dev);
 +      }
 +
 +      spin_unlock_irqrestore(&lp->lock, flags);
 +}
 +
 +/* This routine assumes that the lp->lock is held */
 +static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +      unsigned long ioaddr = dev->base_addr;
 +      u16 val_out;
 +
 +      if (!lp->mii)
 +              return 0;
 +
 +      lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
 +      val_out = lp->a.read_bcr(ioaddr, 34);
 +
 +      return val_out;
 +}
 +
 +/* This routine assumes that the lp->lock is held */
 +static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +      unsigned long ioaddr = dev->base_addr;
 +
 +      if (!lp->mii)
 +              return;
 +
 +      lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
 +      lp->a.write_bcr(ioaddr, 34, val);
 +}
 +
 +static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +      int rc;
 +      unsigned long flags;
 +
 +      /* SIOC[GS]MIIxxx ioctls */
 +      if (lp->mii) {
 +              spin_lock_irqsave(&lp->lock, flags);
 +              rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL);
 +              spin_unlock_irqrestore(&lp->lock, flags);
 +      } else {
 +              rc = -EOPNOTSUPP;
 +      }
 +
 +      return rc;
 +}
 +
 +static int pcnet32_check_otherphy(struct net_device *dev)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +      struct mii_if_info mii = lp->mii_if;
 +      u16 bmcr;
 +      int i;
 +
 +      for (i = 0; i < PCNET32_MAX_PHYS; i++) {
 +              if (i == lp->mii_if.phy_id)
 +                      continue;       /* skip active phy */
 +              if (lp->phymask & (1 << i)) {
 +                      mii.phy_id = i;
 +                      if (mii_link_ok(&mii)) {
 +                              /* found PHY with active link */
 +                              netif_info(lp, link, dev, "Using PHY number %d\n",
 +                                         i);
 +
 +                              /* isolate inactive phy */
 +                              bmcr =
 +                                  mdio_read(dev, lp->mii_if.phy_id, MII_BMCR);
 +                              mdio_write(dev, lp->mii_if.phy_id, MII_BMCR,
 +                                         bmcr | BMCR_ISOLATE);
 +
 +                              /* de-isolate new phy */
 +                              bmcr = mdio_read(dev, i, MII_BMCR);
 +                              mdio_write(dev, i, MII_BMCR,
 +                                         bmcr & ~BMCR_ISOLATE);
 +
 +                              /* set new phy address */
 +                              lp->mii_if.phy_id = i;
 +                              return 1;
 +                      }
 +              }
 +      }
 +      return 0;
 +}
 +
 +/*
 + * Show the status of the media.  Similar to mii_check_media however it
 + * correctly shows the link speed for all (tested) pcnet32 variants.
 + * Devices with no mii just report link state without speed.
 + *
 + * Caller is assumed to hold and release the lp->lock.
 + */
 +
 +static void pcnet32_check_media(struct net_device *dev, int verbose)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +      int curr_link;
 +      int prev_link = netif_carrier_ok(dev) ? 1 : 0;
 +      u32 bcr9;
 +
 +      if (lp->mii) {
 +              curr_link = mii_link_ok(&lp->mii_if);
 +      } else {
 +              ulong ioaddr = dev->base_addr;  /* card base I/O address */
 +              curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
 +      }
 +      if (!curr_link) {
 +              if (prev_link || verbose) {
 +                      netif_carrier_off(dev);
 +                      netif_info(lp, link, dev, "link down\n");
 +              }
 +              if (lp->phycount > 1) {
 +                      curr_link = pcnet32_check_otherphy(dev);
 +                      prev_link = 0;
 +              }
 +      } else if (verbose || !prev_link) {
 +              netif_carrier_on(dev);
 +              if (lp->mii) {
 +                      if (netif_msg_link(lp)) {
 +                              struct ethtool_cmd ecmd = {
 +                                      .cmd = ETHTOOL_GSET };
 +                              mii_ethtool_gset(&lp->mii_if, &ecmd);
 +                              netdev_info(dev, "link up, %uMbps, %s-duplex\n",
 +                                          ethtool_cmd_speed(&ecmd),
 +                                          (ecmd.duplex == DUPLEX_FULL)
 +                                          ? "full" : "half");
 +                      }
 +                      bcr9 = lp->a.read_bcr(dev->base_addr, 9);
 +                      if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
 +                              if (lp->mii_if.full_duplex)
 +                                      bcr9 |= (1 << 0);
 +                              else
 +                                      bcr9 &= ~(1 << 0);
 +                              lp->a.write_bcr(dev->base_addr, 9, bcr9);
 +                      }
 +              } else {
 +                      netif_info(lp, link, dev, "link up\n");
 +              }
 +      }
 +}
 +
 +/*
 + * Check for loss of link and link establishment.
 + * Can not use mii_check_media because it does nothing if mode is forced.
 + */
 +
 +static void pcnet32_watchdog(struct net_device *dev)
 +{
 +      struct pcnet32_private *lp = netdev_priv(dev);
 +      unsigned long flags;
 +
 +      /* Print the link status if it has changed */
 +      spin_lock_irqsave(&lp->lock, flags);
 +      pcnet32_check_media(dev, 0);
 +      spin_unlock_irqrestore(&lp->lock, flags);
 +
 +      mod_timer(&lp->watchdog_timer, round_jiffies(PCNET32_WATCHDOG_TIMEOUT));
 +}
 +
 +static int pcnet32_pm_suspend(struct pci_dev *pdev, pm_message_t state)
 +{
 +      struct net_device *dev = pci_get_drvdata(pdev);
 +
 +      if (netif_running(dev)) {
 +              netif_device_detach(dev);
 +              pcnet32_close(dev);
 +      }
 +      pci_save_state(pdev);
 +      pci_set_power_state(pdev, pci_choose_state(pdev, state));
 +      return 0;
 +}
 +
 +static int pcnet32_pm_resume(struct pci_dev *pdev)
 +{
 +      struct net_device *dev = pci_get_drvdata(pdev);
 +
 +      pci_set_power_state(pdev, PCI_D0);
 +      pci_restore_state(pdev);
 +
 +      if (netif_running(dev)) {
 +              pcnet32_open(dev);
 +              netif_device_attach(dev);
 +      }
 +      return 0;
 +}
 +
 +static void __devexit pcnet32_remove_one(struct pci_dev *pdev)
 +{
 +      struct net_device *dev = pci_get_drvdata(pdev);
 +
 +      if (dev) {
 +              struct pcnet32_private *lp = netdev_priv(dev);
 +
 +              unregister_netdev(dev);
 +              pcnet32_free_ring(dev);
 +              release_region(dev->base_addr, PCNET32_TOTAL_SIZE);
 +              pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
 +                                  lp->init_block, lp->init_dma_addr);
 +              free_netdev(dev);
 +              pci_disable_device(pdev);
 +              pci_set_drvdata(pdev, NULL);
 +      }
 +}
 +
 +static struct pci_driver pcnet32_driver = {
 +      .name = DRV_NAME,
 +      .probe = pcnet32_probe_pci,
 +      .remove = __devexit_p(pcnet32_remove_one),
 +      .id_table = pcnet32_pci_tbl,
 +      .suspend = pcnet32_pm_suspend,
 +      .resume = pcnet32_pm_resume,
 +};
 +
 +/* An additional parameter that may be passed in... */
 +static int debug = -1;
 +static int tx_start_pt = -1;
 +static int pcnet32_have_pci;
 +
 +module_param(debug, int, 0);
 +MODULE_PARM_DESC(debug, DRV_NAME " debug level");
 +module_param(max_interrupt_work, int, 0);
 +MODULE_PARM_DESC(max_interrupt_work,
 +               DRV_NAME " maximum events handled per interrupt");
 +module_param(rx_copybreak, int, 0);
 +MODULE_PARM_DESC(rx_copybreak,
 +               DRV_NAME " copy breakpoint for copy-only-tiny-frames");
 +module_param(tx_start_pt, int, 0);
 +MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)");
 +module_param(pcnet32vlb, int, 0);
 +MODULE_PARM_DESC(pcnet32vlb, DRV_NAME " Vesa local bus (VLB) support (0/1)");
 +module_param_array(options, int, NULL, 0);
 +MODULE_PARM_DESC(options, DRV_NAME " initial option setting(s) (0-15)");
 +module_param_array(full_duplex, int, NULL, 0);
 +MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)");
 +/* Module Parameter for HomePNA cards added by Patrick Simmons, 2004 */
 +module_param_array(homepna, int, NULL, 0);
 +MODULE_PARM_DESC(homepna,
 +               DRV_NAME
 +               " mode for 79C978 cards (1 for HomePNA, 0 for Ethernet, default Ethernet");
 +
 +MODULE_AUTHOR("Thomas Bogendoerfer");
 +MODULE_DESCRIPTION("Driver for PCnet32 and PCnetPCI based ethercards");
 +MODULE_LICENSE("GPL");
 +
 +#define PCNET32_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
 +
 +static int __init pcnet32_init_module(void)
 +{
 +      pr_info("%s", version);
 +
 +      pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT);
 +
 +      if ((tx_start_pt >= 0) && (tx_start_pt <= 3))
 +              tx_start = tx_start_pt;
 +
 +      /* find the PCI devices */
 +      if (!pci_register_driver(&pcnet32_driver))
 +              pcnet32_have_pci = 1;
 +
 +      /* should we find any remaining VLbus devices ? */
 +      if (pcnet32vlb)
 +              pcnet32_probe_vlbus(pcnet32_portlist);
 +
 +      if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE))
 +              pr_info("%d cards_found\n", cards_found);
 +
 +      return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV;
 +}
 +
 +static void __exit pcnet32_cleanup_module(void)
 +{
 +      struct net_device *next_dev;
 +
 +      while (pcnet32_dev) {
 +              struct pcnet32_private *lp = netdev_priv(pcnet32_dev);
 +              next_dev = lp->next;
 +              unregister_netdev(pcnet32_dev);
 +              pcnet32_free_ring(pcnet32_dev);
 +              release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
 +              pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
 +                                  lp->init_block, lp->init_dma_addr);
 +              free_netdev(pcnet32_dev);
 +              pcnet32_dev = next_dev;
 +      }
 +
 +      if (pcnet32_have_pci)
 +              pci_unregister_driver(&pcnet32_driver);
 +}
 +
 +module_init(pcnet32_init_module);
 +module_exit(pcnet32_cleanup_module);
 +
 +/*
 + * Local variables:
 + *  c-indent-level: 4
 + *  tab-width: 8
 + * End:
 + */
index 23b37dd,0000000..93bff08
mode 100644,000000..100644
--- /dev/null
@@@ -1,3577 -1,0 +1,3598 @@@
-       /* We don't want TPA on FCoE, FWD and OOO L2 rings */
-       bnx2x_fcoe(bp, disable_tpa) = 1;
 +/* bnx2x_cmn.c: Broadcom Everest network driver.
 + *
 + * Copyright (c) 2007-2011 Broadcom Corporation
 + *
 + * This program is free software; you can redistribute it and/or modify
 + * it under the terms of the GNU General Public License as published by
 + * the Free Software Foundation.
 + *
 + * Maintained by: Eilon Greenstein <eilong@broadcom.com>
 + * Written by: Eliezer Tamir
 + * Based on code from Michael Chan's bnx2 driver
 + * UDP CSUM errata workaround by Arik Gendelman
 + * Slowpath and fastpath rework by Vladislav Zolotarov
 + * Statistics and Link management by Yitchak Gertner
 + *
 + */
 +
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
 +#include <linux/etherdevice.h>
 +#include <linux/if_vlan.h>
 +#include <linux/interrupt.h>
 +#include <linux/ip.h>
 +#include <net/ipv6.h>
 +#include <net/ip6_checksum.h>
 +#include <linux/firmware.h>
 +#include <linux/prefetch.h>
 +#include "bnx2x_cmn.h"
 +#include "bnx2x_init.h"
 +#include "bnx2x_sp.h"
 +
 +
 +
 +/**
 + * bnx2x_bz_fp - zero content of the fastpath structure.
 + *
 + * @bp:               driver handle
 + * @index:    fastpath index to be zeroed
 + *
 + * Makes sure the contents of the bp->fp[index].napi is kept
 + * intact.
 + */
 +static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
 +{
 +      struct bnx2x_fastpath *fp = &bp->fp[index];
 +      struct napi_struct orig_napi = fp->napi;
 +      /* bzero bnx2x_fastpath contents */
 +      memset(fp, 0, sizeof(*fp));
 +
 +      /* Restore the NAPI object as it has been already initialized */
 +      fp->napi = orig_napi;
 +
 +      fp->bp = bp;
 +      fp->index = index;
 +      if (IS_ETH_FP(fp))
 +              fp->max_cos = bp->max_cos;
 +      else
 +              /* Special queues support only one CoS */
 +              fp->max_cos = 1;
 +
 +      /*
 +       * set the tpa flag for each queue. The tpa flag determines the queue
 +       * minimal size so it must be set prior to queue memory allocation
 +       */
 +      fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0);
 +
 +#ifdef BCM_CNIC
-       if (NO_FCOE(bp))
-               return skb_tx_hash(dev, skb);
-       else {
++      /* We don't want TPA on an FCoE L2 ring */
++      if (IS_FCOE_FP(fp))
++              fp->disable_tpa = 1;
 +#endif
 +}
 +
 +/**
 + * bnx2x_move_fp - move content of the fastpath structure.
 + *
 + * @bp:               driver handle
 + * @from:     source FP index
 + * @to:               destination FP index
 + *
 + * Makes sure the contents of the bp->fp[to].napi is kept
 + * intact.
 + */
 +static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
 +{
 +      struct bnx2x_fastpath *from_fp = &bp->fp[from];
 +      struct bnx2x_fastpath *to_fp = &bp->fp[to];
 +      struct napi_struct orig_napi = to_fp->napi;
 +      /* Move bnx2x_fastpath contents */
 +      memcpy(to_fp, from_fp, sizeof(*to_fp));
 +      to_fp->index = to;
 +
 +      /* Restore the NAPI object as it has been already initialized */
 +      to_fp->napi = orig_napi;
 +}
 +
 +int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
 +
 +/* free skb in the packet ring at pos idx
 + * return idx of last bd freed
 + */
 +static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
 +                           u16 idx)
 +{
 +      struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
 +      struct eth_tx_start_bd *tx_start_bd;
 +      struct eth_tx_bd *tx_data_bd;
 +      struct sk_buff *skb = tx_buf->skb;
 +      u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
 +      int nbd;
 +
 +      /* prefetch skb end pointer to speedup dev_kfree_skb() */
 +      prefetch(&skb->end);
 +
 +      DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
 +         txdata->txq_index, idx, tx_buf, skb);
 +
 +      /* unmap first bd */
 +      DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
 +      tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
 +      dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
 +                       BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
 +
 +
 +      nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
 +#ifdef BNX2X_STOP_ON_ERROR
 +      if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
 +              BNX2X_ERR("BAD nbd!\n");
 +              bnx2x_panic();
 +      }
 +#endif
 +      new_cons = nbd + tx_buf->first_bd;
 +
 +      /* Get the next bd */
 +      bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 +
 +      /* Skip a parse bd... */
 +      --nbd;
 +      bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 +
 +      /* ...and the TSO split header bd since they have no mapping */
 +      if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
 +              --nbd;
 +              bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 +      }
 +
 +      /* now free frags */
 +      while (nbd > 0) {
 +
 +              DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
 +              tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
 +              dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
 +                             BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
 +              if (--nbd)
 +                      bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 +      }
 +
 +      /* release skb */
 +      WARN_ON(!skb);
 +      dev_kfree_skb_any(skb);
 +      tx_buf->first_bd = 0;
 +      tx_buf->skb = NULL;
 +
 +      return new_cons;
 +}
 +
 +int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
 +{
 +      struct netdev_queue *txq;
 +      u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +      if (unlikely(bp->panic))
 +              return -1;
 +#endif
 +
 +      txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
 +      hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
 +      sw_cons = txdata->tx_pkt_cons;
 +
 +      while (sw_cons != hw_cons) {
 +              u16 pkt_cons;
 +
 +              pkt_cons = TX_BD(sw_cons);
 +
 +              DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u  sw_cons %u "
 +                                    " pkt_cons %u\n",
 +                 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
 +
 +              bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons);
 +              sw_cons++;
 +      }
 +
 +      txdata->tx_pkt_cons = sw_cons;
 +      txdata->tx_bd_cons = bd_cons;
 +
 +      /* Need to make the tx_bd_cons update visible to start_xmit()
 +       * before checking for netif_tx_queue_stopped().  Without the
 +       * memory barrier, there is a small possibility that
 +       * start_xmit() will miss it and cause the queue to be stopped
 +       * forever.
 +       * On the other hand we need an rmb() here to ensure the proper
 +       * ordering of bit testing in the following
 +       * netif_tx_queue_stopped(txq) call.
 +       */
 +      smp_mb();
 +
 +      if (unlikely(netif_tx_queue_stopped(txq))) {
 +              /* Taking tx_lock() is needed to prevent reenabling the queue
 +               * while it's empty. This could have happen if rx_action() gets
 +               * suspended in bnx2x_tx_int() after the condition before
 +               * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
 +               *
 +               * stops the queue->sees fresh tx_bd_cons->releases the queue->
 +               * sends some packets consuming the whole queue again->
 +               * stops the queue
 +               */
 +
 +              __netif_tx_lock(txq, smp_processor_id());
 +
 +              if ((netif_tx_queue_stopped(txq)) &&
 +                  (bp->state == BNX2X_STATE_OPEN) &&
 +                  (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
 +                      netif_tx_wake_queue(txq);
 +
 +              __netif_tx_unlock(txq);
 +      }
 +      return 0;
 +}
 +
 +static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
 +                                           u16 idx)
 +{
 +      u16 last_max = fp->last_max_sge;
 +
 +      if (SUB_S16(idx, last_max) > 0)
 +              fp->last_max_sge = idx;
 +}
 +
 +static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
 +                                struct eth_fast_path_rx_cqe *fp_cqe)
 +{
 +      struct bnx2x *bp = fp->bp;
 +      u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
 +                                   le16_to_cpu(fp_cqe->len_on_bd)) >>
 +                    SGE_PAGE_SHIFT;
 +      u16 last_max, last_elem, first_elem;
 +      u16 delta = 0;
 +      u16 i;
 +
 +      if (!sge_len)
 +              return;
 +
 +      /* First mark all used pages */
 +      for (i = 0; i < sge_len; i++)
 +              BIT_VEC64_CLEAR_BIT(fp->sge_mask,
 +                      RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
 +
 +      DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
 +         sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
 +
 +      /* Here we assume that the last SGE index is the biggest */
 +      prefetch((void *)(fp->sge_mask));
 +      bnx2x_update_last_max_sge(fp,
 +              le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
 +
 +      last_max = RX_SGE(fp->last_max_sge);
 +      last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
 +      first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
 +
 +      /* If ring is not full */
 +      if (last_elem + 1 != first_elem)
 +              last_elem++;
 +
 +      /* Now update the prod */
 +      for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
 +              if (likely(fp->sge_mask[i]))
 +                      break;
 +
 +              fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
 +              delta += BIT_VEC64_ELEM_SZ;
 +      }
 +
 +      if (delta > 0) {
 +              fp->rx_sge_prod += delta;
 +              /* clear page-end entries */
 +              bnx2x_clear_sge_mask_next_elems(fp);
 +      }
 +
 +      DP(NETIF_MSG_RX_STATUS,
 +         "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
 +         fp->last_max_sge, fp->rx_sge_prod);
 +}
 +
 +static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
 +                          struct sk_buff *skb, u16 cons, u16 prod,
 +                          struct eth_fast_path_rx_cqe *cqe)
 +{
 +      struct bnx2x *bp = fp->bp;
 +      struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
 +      struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
 +      struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
 +      dma_addr_t mapping;
 +      struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
 +      struct sw_rx_bd *first_buf = &tpa_info->first_buf;
 +
 +      /* print error if current state != stop */
 +      if (tpa_info->tpa_state != BNX2X_TPA_STOP)
 +              BNX2X_ERR("start of bin not in stop [%d]\n", queue);
 +
 +      /* Try to map an empty skb from the aggregation info  */
 +      mapping = dma_map_single(&bp->pdev->dev,
 +                               first_buf->skb->data,
 +                               fp->rx_buf_size, DMA_FROM_DEVICE);
 +      /*
 +       *  ...if it fails - move the skb from the consumer to the producer
 +       *  and set the current aggregation state as ERROR to drop it
 +       *  when TPA_STOP arrives.
 +       */
 +
 +      if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 +              /* Move the BD from the consumer to the producer */
 +              bnx2x_reuse_rx_skb(fp, cons, prod);
 +              tpa_info->tpa_state = BNX2X_TPA_ERROR;
 +              return;
 +      }
 +
 +      /* move empty skb from pool to prod */
 +      prod_rx_buf->skb = first_buf->skb;
 +      dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
 +      /* point prod_bd to new skb */
 +      prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 +      prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 +
 +      /* move partial skb from cons to pool (don't unmap yet) */
 +      *first_buf = *cons_rx_buf;
 +
 +      /* mark bin state as START */
 +      tpa_info->parsing_flags =
 +              le16_to_cpu(cqe->pars_flags.flags);
 +      tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
 +      tpa_info->tpa_state = BNX2X_TPA_START;
 +      tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
 +      tpa_info->placement_offset = cqe->placement_offset;
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +      fp->tpa_queue_used |= (1 << queue);
 +#ifdef _ASM_GENERIC_INT_L64_H
 +      DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
 +#else
 +      DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
 +#endif
 +         fp->tpa_queue_used);
 +#endif
 +}
 +
 +/* Timestamp option length allowed for TPA aggregation:
 + *
 + *            nop nop kind length echo val
 + */
 +#define TPA_TSTAMP_OPT_LEN    12
 +/**
 + * bnx2x_set_lro_mss - calculate the approximate value of the MSS
 + *
 + * @bp:                       driver handle
 + * @parsing_flags:    parsing flags from the START CQE
 + * @len_on_bd:                total length of the first packet for the
 + *                    aggregation.
 + *
 + * Approximate value of the MSS for this aggregation calculated using
 + * the first packet of it.
 + */
 +static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
 +                                  u16 len_on_bd)
 +{
 +      /*
 +       * TPA arrgregation won't have either IP options or TCP options
 +       * other than timestamp or IPv6 extension headers.
 +       */
 +      u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
 +
 +      if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
 +          PRS_FLAG_OVERETH_IPV6)
 +              hdrs_len += sizeof(struct ipv6hdr);
 +      else /* IPv4 */
 +              hdrs_len += sizeof(struct iphdr);
 +
 +
 +      /* Check if there was a TCP timestamp, if there is it's will
 +       * always be 12 bytes length: nop nop kind length echo val.
 +       *
 +       * Otherwise FW would close the aggregation.
 +       */
 +      if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
 +              hdrs_len += TPA_TSTAMP_OPT_LEN;
 +
 +      return len_on_bd - hdrs_len;
 +}
 +
 +static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 +                             u16 queue, struct sk_buff *skb,
 +                             struct eth_end_agg_rx_cqe *cqe,
 +                             u16 cqe_idx)
 +{
 +      struct sw_rx_page *rx_pg, old_rx_pg;
 +      u32 i, frag_len, frag_size, pages;
 +      int err;
 +      int j;
 +      struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
 +      u16 len_on_bd = tpa_info->len_on_bd;
 +
 +      frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
 +      pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
 +
 +      /* This is needed in order to enable forwarding support */
 +      if (frag_size)
 +              skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
 +                                      tpa_info->parsing_flags, len_on_bd);
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +      if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
 +              BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
 +                        pages, cqe_idx);
 +              BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
 +              bnx2x_panic();
 +              return -EINVAL;
 +      }
 +#endif
 +
 +      /* Run through the SGL and compose the fragmented skb */
 +      for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
 +              u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
 +
 +              /* FW gives the indices of the SGE as if the ring is an array
 +                 (meaning that "next" element will consume 2 indices) */
 +              frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
 +              rx_pg = &fp->rx_page_ring[sge_idx];
 +              old_rx_pg = *rx_pg;
 +
 +              /* If we fail to allocate a substitute page, we simply stop
 +                 where we are and drop the whole packet */
 +              err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
 +              if (unlikely(err)) {
 +                      fp->eth_q_stats.rx_skb_alloc_failed++;
 +                      return err;
 +              }
 +
 +              /* Unmap the page as we r going to pass it to the stack */
 +              dma_unmap_page(&bp->pdev->dev,
 +                             dma_unmap_addr(&old_rx_pg, mapping),
 +                             SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
 +
 +              /* Add one frag and update the appropriate fields in the skb */
 +              skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
 +
 +              skb->data_len += frag_len;
 +              skb->truesize += frag_len;
 +              skb->len += frag_len;
 +
 +              frag_size -= frag_len;
 +      }
 +
 +      return 0;
 +}
 +
 +static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 +                         u16 queue, struct eth_end_agg_rx_cqe *cqe,
 +                         u16 cqe_idx)
 +{
 +      struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
 +      struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
 +      u8 pad = tpa_info->placement_offset;
 +      u16 len = tpa_info->len_on_bd;
 +      struct sk_buff *skb = rx_buf->skb;
 +      /* alloc new skb */
 +      struct sk_buff *new_skb;
 +      u8 old_tpa_state = tpa_info->tpa_state;
 +
 +      tpa_info->tpa_state = BNX2X_TPA_STOP;
 +
 +      /* If we there was an error during the handling of the TPA_START -
 +       * drop this aggregation.
 +       */
 +      if (old_tpa_state == BNX2X_TPA_ERROR)
 +              goto drop;
 +
 +      /* Try to allocate the new skb */
 +      new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
 +
 +      /* Unmap skb in the pool anyway, as we are going to change
 +         pool entry status to BNX2X_TPA_STOP even if new skb allocation
 +         fails. */
 +      dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
 +                       fp->rx_buf_size, DMA_FROM_DEVICE);
 +
 +      if (likely(new_skb)) {
 +              prefetch(skb);
 +              prefetch(((char *)(skb)) + L1_CACHE_BYTES);
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +              if (pad + len > fp->rx_buf_size) {
 +                      BNX2X_ERR("skb_put is about to fail...  "
 +                                "pad %d  len %d  rx_buf_size %d\n",
 +                                pad, len, fp->rx_buf_size);
 +                      bnx2x_panic();
 +                      return;
 +              }
 +#endif
 +
 +              skb_reserve(skb, pad);
 +              skb_put(skb, len);
 +
 +              skb->protocol = eth_type_trans(skb, bp->dev);
 +              skb->ip_summed = CHECKSUM_UNNECESSARY;
 +
 +              if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
 +                      if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
 +                              __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
 +                      napi_gro_receive(&fp->napi, skb);
 +              } else {
 +                      DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
 +                         " - dropping packet!\n");
 +                      dev_kfree_skb_any(skb);
 +              }
 +
 +
 +              /* put new skb in bin */
 +              rx_buf->skb = new_skb;
 +
 +              return;
 +      }
 +
 +drop:
 +      /* drop the packet and keep the buffer in the bin */
 +      DP(NETIF_MSG_RX_STATUS,
 +         "Failed to allocate or map a new skb - dropping packet!\n");
 +      fp->eth_q_stats.rx_skb_alloc_failed++;
 +}
 +
 +/* Set Toeplitz hash value in the skb using the value from the
 + * CQE (calculated by HW).
 + */
 +static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
 +                                      struct sk_buff *skb)
 +{
 +      /* Set Toeplitz hash from CQE */
 +      if ((bp->dev->features & NETIF_F_RXHASH) &&
 +          (cqe->fast_path_cqe.status_flags &
 +           ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
 +              skb->rxhash =
 +              le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
 +}
 +
 +int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
 +{
 +      struct bnx2x *bp = fp->bp;
 +      u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
 +      u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
 +      int rx_pkt = 0;
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +      if (unlikely(bp->panic))
 +              return 0;
 +#endif
 +
 +      /* CQ "next element" is of the size of the regular element,
 +         that's why it's ok here */
 +      hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
 +      if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
 +              hw_comp_cons++;
 +
 +      bd_cons = fp->rx_bd_cons;
 +      bd_prod = fp->rx_bd_prod;
 +      bd_prod_fw = bd_prod;
 +      sw_comp_cons = fp->rx_comp_cons;
 +      sw_comp_prod = fp->rx_comp_prod;
 +
 +      /* Memory barrier necessary as speculative reads of the rx
 +       * buffer can be ahead of the index in the status block
 +       */
 +      rmb();
 +
 +      DP(NETIF_MSG_RX_STATUS,
 +         "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
 +         fp->index, hw_comp_cons, sw_comp_cons);
 +
 +      while (sw_comp_cons != hw_comp_cons) {
 +              struct sw_rx_bd *rx_buf = NULL;
 +              struct sk_buff *skb;
 +              union eth_rx_cqe *cqe;
 +              struct eth_fast_path_rx_cqe *cqe_fp;
 +              u8 cqe_fp_flags;
 +              enum eth_rx_cqe_type cqe_fp_type;
 +              u16 len, pad;
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +              if (unlikely(bp->panic))
 +                      return 0;
 +#endif
 +
 +              comp_ring_cons = RCQ_BD(sw_comp_cons);
 +              bd_prod = RX_BD(bd_prod);
 +              bd_cons = RX_BD(bd_cons);
 +
 +              /* Prefetch the page containing the BD descriptor
 +                 at producer's index. It will be needed when new skb is
 +                 allocated */
 +              prefetch((void *)(PAGE_ALIGN((unsigned long)
 +                                           (&fp->rx_desc_ring[bd_prod])) -
 +                                PAGE_SIZE + 1));
 +
 +              cqe = &fp->rx_comp_ring[comp_ring_cons];
 +              cqe_fp = &cqe->fast_path_cqe;
 +              cqe_fp_flags = cqe_fp->type_error_flags;
 +              cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
 +
 +              DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
 +                 "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
 +                 cqe_fp_flags, cqe_fp->status_flags,
 +                 le32_to_cpu(cqe_fp->rss_hash_result),
 +                 le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len));
 +
 +              /* is this a slowpath msg? */
 +              if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
 +                      bnx2x_sp_event(fp, cqe);
 +                      goto next_cqe;
 +
 +              /* this is an rx packet */
 +              } else {
 +                      rx_buf = &fp->rx_buf_ring[bd_cons];
 +                      skb = rx_buf->skb;
 +                      prefetch(skb);
 +
 +                      if (!CQE_TYPE_FAST(cqe_fp_type)) {
 +#ifdef BNX2X_STOP_ON_ERROR
 +                              /* sanity check */
 +                              if (fp->disable_tpa &&
 +                                  (CQE_TYPE_START(cqe_fp_type) ||
 +                                   CQE_TYPE_STOP(cqe_fp_type)))
 +                                      BNX2X_ERR("START/STOP packet while "
 +                                                "disable_tpa type %x\n",
 +                                                CQE_TYPE(cqe_fp_type));
 +#endif
 +
 +                              if (CQE_TYPE_START(cqe_fp_type)) {
 +                                      u16 queue = cqe_fp->queue_index;
 +                                      DP(NETIF_MSG_RX_STATUS,
 +                                         "calling tpa_start on queue %d\n",
 +                                         queue);
 +
 +                                      bnx2x_tpa_start(fp, queue, skb,
 +                                                      bd_cons, bd_prod,
 +                                                      cqe_fp);
 +
 +                                      /* Set Toeplitz hash for LRO skb */
 +                                      bnx2x_set_skb_rxhash(bp, cqe, skb);
 +
 +                                      goto next_rx;
 +
 +                              } else {
 +                                      u16 queue =
 +                                              cqe->end_agg_cqe.queue_index;
 +                                      DP(NETIF_MSG_RX_STATUS,
 +                                         "calling tpa_stop on queue %d\n",
 +                                         queue);
 +
 +                                      bnx2x_tpa_stop(bp, fp, queue,
 +                                                     &cqe->end_agg_cqe,
 +                                                     comp_ring_cons);
 +#ifdef BNX2X_STOP_ON_ERROR
 +                                      if (bp->panic)
 +                                              return 0;
 +#endif
 +
 +                                      bnx2x_update_sge_prod(fp, cqe_fp);
 +                                      goto next_cqe;
 +                              }
 +                      }
 +                      /* non TPA */
 +                      len = le16_to_cpu(cqe_fp->pkt_len);
 +                      pad = cqe_fp->placement_offset;
 +                      dma_sync_single_for_cpu(&bp->pdev->dev,
 +                                      dma_unmap_addr(rx_buf, mapping),
 +                                                     pad + RX_COPY_THRESH,
 +                                                     DMA_FROM_DEVICE);
 +                      prefetch(((char *)(skb)) + L1_CACHE_BYTES);
 +
 +                      /* is this an error packet? */
 +                      if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
 +                              DP(NETIF_MSG_RX_ERR,
 +                                 "ERROR  flags %x  rx packet %u\n",
 +                                 cqe_fp_flags, sw_comp_cons);
 +                              fp->eth_q_stats.rx_err_discard_pkt++;
 +                              goto reuse_rx;
 +                      }
 +
 +                      /* Since we don't have a jumbo ring
 +                       * copy small packets if mtu > 1500
 +                       */
 +                      if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
 +                          (len <= RX_COPY_THRESH)) {
 +                              struct sk_buff *new_skb;
 +
 +                              new_skb = netdev_alloc_skb(bp->dev, len + pad);
 +                              if (new_skb == NULL) {
 +                                      DP(NETIF_MSG_RX_ERR,
 +                                         "ERROR  packet dropped "
 +                                         "because of alloc failure\n");
 +                                      fp->eth_q_stats.rx_skb_alloc_failed++;
 +                                      goto reuse_rx;
 +                              }
 +
 +                              /* aligned copy */
 +                              skb_copy_from_linear_data_offset(skb, pad,
 +                                                  new_skb->data + pad, len);
 +                              skb_reserve(new_skb, pad);
 +                              skb_put(new_skb, len);
 +
 +                              bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
 +
 +                              skb = new_skb;
 +
 +                      } else
 +                      if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
 +                              dma_unmap_single(&bp->pdev->dev,
 +                                      dma_unmap_addr(rx_buf, mapping),
 +                                               fp->rx_buf_size,
 +                                               DMA_FROM_DEVICE);
 +                              skb_reserve(skb, pad);
 +                              skb_put(skb, len);
 +
 +                      } else {
 +                              DP(NETIF_MSG_RX_ERR,
 +                                 "ERROR  packet dropped because "
 +                                 "of alloc failure\n");
 +                              fp->eth_q_stats.rx_skb_alloc_failed++;
 +reuse_rx:
 +                              bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
 +                              goto next_rx;
 +                      }
 +
 +                      skb->protocol = eth_type_trans(skb, bp->dev);
 +
 +                      /* Set Toeplitz hash for a none-LRO skb */
 +                      bnx2x_set_skb_rxhash(bp, cqe, skb);
 +
 +                      skb_checksum_none_assert(skb);
 +
 +                      if (bp->dev->features & NETIF_F_RXCSUM) {
 +
 +                              if (likely(BNX2X_RX_CSUM_OK(cqe)))
 +                                      skb->ip_summed = CHECKSUM_UNNECESSARY;
 +                              else
 +                                      fp->eth_q_stats.hw_csum_err++;
 +                      }
 +              }
 +
 +              skb_record_rx_queue(skb, fp->index);
 +
 +              if (le16_to_cpu(cqe_fp->pars_flags.flags) &
 +                  PARSING_FLAGS_VLAN)
 +                      __vlan_hwaccel_put_tag(skb,
 +                                             le16_to_cpu(cqe_fp->vlan_tag));
 +              napi_gro_receive(&fp->napi, skb);
 +
 +
 +next_rx:
 +              rx_buf->skb = NULL;
 +
 +              bd_cons = NEXT_RX_IDX(bd_cons);
 +              bd_prod = NEXT_RX_IDX(bd_prod);
 +              bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
 +              rx_pkt++;
 +next_cqe:
 +              sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
 +              sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
 +
 +              if (rx_pkt == budget)
 +                      break;
 +      } /* while */
 +
 +      fp->rx_bd_cons = bd_cons;
 +      fp->rx_bd_prod = bd_prod_fw;
 +      fp->rx_comp_cons = sw_comp_cons;
 +      fp->rx_comp_prod = sw_comp_prod;
 +
 +      /* Update producers */
 +      bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
 +                           fp->rx_sge_prod);
 +
 +      fp->rx_pkt += rx_pkt;
 +      fp->rx_calls++;
 +
 +      return rx_pkt;
 +}
 +
 +static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
 +{
 +      struct bnx2x_fastpath *fp = fp_cookie;
 +      struct bnx2x *bp = fp->bp;
 +      u8 cos;
 +
 +      DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
 +                       "[fp %d fw_sd %d igusb %d]\n",
 +         fp->index, fp->fw_sb_id, fp->igu_sb_id);
 +      bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +      if (unlikely(bp->panic))
 +              return IRQ_HANDLED;
 +#endif
 +
 +      /* Handle Rx and Tx according to MSI-X vector */
 +      prefetch(fp->rx_cons_sb);
 +
 +      for_each_cos_in_tx_queue(fp, cos)
 +              prefetch(fp->txdata[cos].tx_cons_sb);
 +
 +      prefetch(&fp->sb_running_index[SM_RX_ID]);
 +      napi_schedule(&bnx2x_fp(bp, fp->index, napi));
 +
 +      return IRQ_HANDLED;
 +}
 +
 +/* HW Lock for shared dual port PHYs */
 +void bnx2x_acquire_phy_lock(struct bnx2x *bp)
 +{
 +      mutex_lock(&bp->port.phy_mutex);
 +
 +      if (bp->port.need_hw_lock)
 +              bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
 +}
 +
 +void bnx2x_release_phy_lock(struct bnx2x *bp)
 +{
 +      if (bp->port.need_hw_lock)
 +              bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
 +
 +      mutex_unlock(&bp->port.phy_mutex);
 +}
 +
 +/* calculates MF speed according to current linespeed and MF configuration */
 +u16 bnx2x_get_mf_speed(struct bnx2x *bp)
 +{
 +      u16 line_speed = bp->link_vars.line_speed;
 +      if (IS_MF(bp)) {
 +              u16 maxCfg = bnx2x_extract_max_cfg(bp,
 +                                                 bp->mf_config[BP_VN(bp)]);
 +
 +              /* Calculate the current MAX line speed limit for the MF
 +               * devices
 +               */
 +              if (IS_MF_SI(bp))
 +                      line_speed = (line_speed * maxCfg) / 100;
 +              else { /* SD mode */
 +                      u16 vn_max_rate = maxCfg * 100;
 +
 +                      if (vn_max_rate < line_speed)
 +                              line_speed = vn_max_rate;
 +              }
 +      }
 +
 +      return line_speed;
 +}
 +
 +/**
 + * bnx2x_fill_report_data - fill link report data to report
 + *
 + * @bp:               driver handle
 + * @data:     link state to update
 + *
 + * It uses a none-atomic bit operations because is called under the mutex.
 + */
 +static inline void bnx2x_fill_report_data(struct bnx2x *bp,
 +                                        struct bnx2x_link_report_data *data)
 +{
 +      u16 line_speed = bnx2x_get_mf_speed(bp);
 +
 +      memset(data, 0, sizeof(*data));
 +
 +      /* Fill the report data: efective line speed */
 +      data->line_speed = line_speed;
 +
 +      /* Link is down */
 +      if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
 +              __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
 +                        &data->link_report_flags);
 +
 +      /* Full DUPLEX */
 +      if (bp->link_vars.duplex == DUPLEX_FULL)
 +              __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
 +
 +      /* Rx Flow Control is ON */
 +      if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
 +              __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
 +
 +      /* Tx Flow Control is ON */
 +      if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
 +              __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
 +}
 +
 +/**
 + * bnx2x_link_report - report link status to OS.
 + *
 + * @bp:               driver handle
 + *
 + * Calls the __bnx2x_link_report() under the same locking scheme
 + * as a link/PHY state managing code to ensure a consistent link
 + * reporting.
 + */
 +
 +void bnx2x_link_report(struct bnx2x *bp)
 +{
 +      bnx2x_acquire_phy_lock(bp);
 +      __bnx2x_link_report(bp);
 +      bnx2x_release_phy_lock(bp);
 +}
 +
 +/**
 + * __bnx2x_link_report - report link status to OS.
 + *
 + * @bp:               driver handle
 + *
 + * None atomic inmlementation.
 + * Should be called under the phy_lock.
 + */
 +void __bnx2x_link_report(struct bnx2x *bp)
 +{
 +      struct bnx2x_link_report_data cur_data;
 +
 +      /* reread mf_cfg */
 +      if (!CHIP_IS_E1(bp))
 +              bnx2x_read_mf_cfg(bp);
 +
 +      /* Read the current link report info */
 +      bnx2x_fill_report_data(bp, &cur_data);
 +
 +      /* Don't report link down or exactly the same link status twice */
 +      if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
 +          (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
 +                    &bp->last_reported_link.link_report_flags) &&
 +           test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
 +                    &cur_data.link_report_flags)))
 +              return;
 +
 +      bp->link_cnt++;
 +
 +      /* We are going to report a new link parameters now -
 +       * remember the current data for the next time.
 +       */
 +      memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
 +
 +      if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
 +                   &cur_data.link_report_flags)) {
 +              netif_carrier_off(bp->dev);
 +              netdev_err(bp->dev, "NIC Link is Down\n");
 +              return;
 +      } else {
 +              const char *duplex;
 +              const char *flow;
 +
 +              netif_carrier_on(bp->dev);
 +
 +              if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
 +                                     &cur_data.link_report_flags))
 +                      duplex = "full";
 +              else
 +                      duplex = "half";
 +
 +              /* Handle the FC at the end so that only these flags would be
 +               * possibly set. This way we may easily check if there is no FC
 +               * enabled.
 +               */
 +              if (cur_data.link_report_flags) {
 +                      if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
 +                                   &cur_data.link_report_flags)) {
 +                              if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
 +                                   &cur_data.link_report_flags))
 +                                      flow = "ON - receive & transmit";
 +                              else
 +                                      flow = "ON - receive";
 +                      } else {
 +                              flow = "ON - transmit";
 +                      }
 +              } else {
 +                      flow = "none";
 +              }
 +              netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
 +                          cur_data.line_speed, duplex, flow);
 +      }
 +}
 +
 +void bnx2x_init_rx_rings(struct bnx2x *bp)
 +{
 +      int func = BP_FUNC(bp);
 +      int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
 +                                            ETH_MAX_AGGREGATION_QUEUES_E1H_E2;
 +      u16 ring_prod;
 +      int i, j;
 +
 +      /* Allocate TPA resources */
 +      for_each_rx_queue(bp, j) {
 +              struct bnx2x_fastpath *fp = &bp->fp[j];
 +
 +              DP(NETIF_MSG_IFUP,
 +                 "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
 +
 +              if (!fp->disable_tpa) {
 +                      /* Fill the per-aggregtion pool */
 +                      for (i = 0; i < max_agg_queues; i++) {
 +                              struct bnx2x_agg_info *tpa_info =
 +                                      &fp->tpa_info[i];
 +                              struct sw_rx_bd *first_buf =
 +                                      &tpa_info->first_buf;
 +
 +                              first_buf->skb = netdev_alloc_skb(bp->dev,
 +                                                     fp->rx_buf_size);
 +                              if (!first_buf->skb) {
 +                                      BNX2X_ERR("Failed to allocate TPA "
 +                                                "skb pool for queue[%d] - "
 +                                                "disabling TPA on this "
 +                                                "queue!\n", j);
 +                                      bnx2x_free_tpa_pool(bp, fp, i);
 +                                      fp->disable_tpa = 1;
 +                                      break;
 +                              }
 +                              dma_unmap_addr_set(first_buf, mapping, 0);
 +                              tpa_info->tpa_state = BNX2X_TPA_STOP;
 +                      }
 +
 +                      /* "next page" elements initialization */
 +                      bnx2x_set_next_page_sgl(fp);
 +
 +                      /* set SGEs bit mask */
 +                      bnx2x_init_sge_ring_bit_mask(fp);
 +
 +                      /* Allocate SGEs and initialize the ring elements */
 +                      for (i = 0, ring_prod = 0;
 +                           i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
 +
 +                              if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
 +                                      BNX2X_ERR("was only able to allocate "
 +                                                "%d rx sges\n", i);
 +                                      BNX2X_ERR("disabling TPA for "
 +                                                "queue[%d]\n", j);
 +                                      /* Cleanup already allocated elements */
 +                                      bnx2x_free_rx_sge_range(bp, fp,
 +                                                              ring_prod);
 +                                      bnx2x_free_tpa_pool(bp, fp,
 +                                                          max_agg_queues);
 +                                      fp->disable_tpa = 1;
 +                                      ring_prod = 0;
 +                                      break;
 +                              }
 +                              ring_prod = NEXT_SGE_IDX(ring_prod);
 +                      }
 +
 +                      fp->rx_sge_prod = ring_prod;
 +              }
 +      }
 +
 +      for_each_rx_queue(bp, j) {
 +              struct bnx2x_fastpath *fp = &bp->fp[j];
 +
 +              fp->rx_bd_cons = 0;
 +
 +              /* Activate BD ring */
 +              /* Warning!
 +               * this will generate an interrupt (to the TSTORM)
 +               * must only be done after chip is initialized
 +               */
 +              bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
 +                                   fp->rx_sge_prod);
 +
 +              if (j != 0)
 +                      continue;
 +
 +              if (CHIP_IS_E1(bp)) {
 +                      REG_WR(bp, BAR_USTRORM_INTMEM +
 +                             USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
 +                             U64_LO(fp->rx_comp_mapping));
 +                      REG_WR(bp, BAR_USTRORM_INTMEM +
 +                             USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
 +                             U64_HI(fp->rx_comp_mapping));
 +              }
 +      }
 +}
 +
 +static void bnx2x_free_tx_skbs(struct bnx2x *bp)
 +{
 +      int i;
 +      u8 cos;
 +
 +      for_each_tx_queue(bp, i) {
 +              struct bnx2x_fastpath *fp = &bp->fp[i];
 +              for_each_cos_in_tx_queue(fp, cos) {
 +                      struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
 +
 +                      u16 bd_cons = txdata->tx_bd_cons;
 +                      u16 sw_prod = txdata->tx_pkt_prod;
 +                      u16 sw_cons = txdata->tx_pkt_cons;
 +
 +                      while (sw_cons != sw_prod) {
 +                              bd_cons = bnx2x_free_tx_pkt(bp, txdata,
 +                                                          TX_BD(sw_cons));
 +                              sw_cons++;
 +                      }
 +              }
 +      }
 +}
 +
 +static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
 +{
 +      struct bnx2x *bp = fp->bp;
 +      int i;
 +
 +      /* ring wasn't allocated */
 +      if (fp->rx_buf_ring == NULL)
 +              return;
 +
 +      for (i = 0; i < NUM_RX_BD; i++) {
 +              struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
 +              struct sk_buff *skb = rx_buf->skb;
 +
 +              if (skb == NULL)
 +                      continue;
 +              dma_unmap_single(&bp->pdev->dev,
 +                               dma_unmap_addr(rx_buf, mapping),
 +                               fp->rx_buf_size, DMA_FROM_DEVICE);
 +
 +              rx_buf->skb = NULL;
 +              dev_kfree_skb(skb);
 +      }
 +}
 +
 +static void bnx2x_free_rx_skbs(struct bnx2x *bp)
 +{
 +      int j;
 +
 +      for_each_rx_queue(bp, j) {
 +              struct bnx2x_fastpath *fp = &bp->fp[j];
 +
 +              bnx2x_free_rx_bds(fp);
 +
 +              if (!fp->disable_tpa)
 +                      bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
 +                                          ETH_MAX_AGGREGATION_QUEUES_E1 :
 +                                          ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
 +      }
 +}
 +
 +void bnx2x_free_skbs(struct bnx2x *bp)
 +{
 +      bnx2x_free_tx_skbs(bp);
 +      bnx2x_free_rx_skbs(bp);
 +}
 +
 +void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
 +{
 +      /* load old values */
 +      u32 mf_cfg = bp->mf_config[BP_VN(bp)];
 +
 +      if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
 +              /* leave all but MAX value */
 +              mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
 +
 +              /* set new MAX value */
 +              mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
 +                              & FUNC_MF_CFG_MAX_BW_MASK;
 +
 +              bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
 +      }
 +}
 +
 +/**
 + * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
 + *
 + * @bp:               driver handle
 + * @nvecs:    number of vectors to be released
 + */
 +static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
 +{
 +      int i, offset = 0;
 +
 +      if (nvecs == offset)
 +              return;
 +      free_irq(bp->msix_table[offset].vector, bp->dev);
 +      DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
 +         bp->msix_table[offset].vector);
 +      offset++;
 +#ifdef BCM_CNIC
 +      if (nvecs == offset)
 +              return;
 +      offset++;
 +#endif
 +
 +      for_each_eth_queue(bp, i) {
 +              if (nvecs == offset)
 +                      return;
 +              DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d "
 +                 "irq\n", i, bp->msix_table[offset].vector);
 +
 +              free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
 +      }
 +}
 +
 +void bnx2x_free_irq(struct bnx2x *bp)
 +{
 +      if (bp->flags & USING_MSIX_FLAG)
 +              bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
 +                                   CNIC_PRESENT + 1);
 +      else if (bp->flags & USING_MSI_FLAG)
 +              free_irq(bp->pdev->irq, bp->dev);
 +      else
 +              free_irq(bp->pdev->irq, bp->dev);
 +}
 +
 +int bnx2x_enable_msix(struct bnx2x *bp)
 +{
 +      int msix_vec = 0, i, rc, req_cnt;
 +
 +      bp->msix_table[msix_vec].entry = msix_vec;
 +      DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
 +         bp->msix_table[0].entry);
 +      msix_vec++;
 +
 +#ifdef BCM_CNIC
 +      bp->msix_table[msix_vec].entry = msix_vec;
 +      DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
 +         bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
 +      msix_vec++;
 +#endif
 +      /* We need separate vectors for ETH queues only (not FCoE) */
 +      for_each_eth_queue(bp, i) {
 +              bp->msix_table[msix_vec].entry = msix_vec;
 +              DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
 +                 "(fastpath #%u)\n", msix_vec, msix_vec, i);
 +              msix_vec++;
 +      }
 +
 +      req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
 +
 +      rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
 +
 +      /*
 +       * reconfigure number of tx/rx queues according to available
 +       * MSI-X vectors
 +       */
 +      if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
 +              /* how less vectors we will have? */
 +              int diff = req_cnt - rc;
 +
 +              DP(NETIF_MSG_IFUP,
 +                 "Trying to use less MSI-X vectors: %d\n", rc);
 +
 +              rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
 +
 +              if (rc) {
 +                      DP(NETIF_MSG_IFUP,
 +                         "MSI-X is not attainable  rc %d\n", rc);
 +                      return rc;
 +              }
 +              /*
 +               * decrease number of queues by number of unallocated entries
 +               */
 +              bp->num_queues -= diff;
 +
 +              DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
 +                                bp->num_queues);
 +      } else if (rc) {
 +              /* fall to INTx if not enough memory */
 +              if (rc == -ENOMEM)
 +                      bp->flags |= DISABLE_MSI_FLAG;
 +              DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
 +              return rc;
 +      }
 +
 +      bp->flags |= USING_MSIX_FLAG;
 +
 +      return 0;
 +}
 +
 +static int bnx2x_req_msix_irqs(struct bnx2x *bp)
 +{
 +      int i, rc, offset = 0;
 +
 +      rc = request_irq(bp->msix_table[offset++].vector,
 +                       bnx2x_msix_sp_int, 0,
 +                       bp->dev->name, bp->dev);
 +      if (rc) {
 +              BNX2X_ERR("request sp irq failed\n");
 +              return -EBUSY;
 +      }
 +
 +#ifdef BCM_CNIC
 +      offset++;
 +#endif
 +      for_each_eth_queue(bp, i) {
 +              struct bnx2x_fastpath *fp = &bp->fp[i];
 +              snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
 +                       bp->dev->name, i);
 +
 +              rc = request_irq(bp->msix_table[offset].vector,
 +                               bnx2x_msix_fp_int, 0, fp->name, fp);
 +              if (rc) {
 +                      BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
 +                            bp->msix_table[offset].vector, rc);
 +                      bnx2x_free_msix_irqs(bp, offset);
 +                      return -EBUSY;
 +              }
 +
 +              offset++;
 +      }
 +
 +      i = BNX2X_NUM_ETH_QUEUES(bp);
 +      offset = 1 + CNIC_PRESENT;
 +      netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d"
 +             " ... fp[%d] %d\n",
 +             bp->msix_table[0].vector,
 +             0, bp->msix_table[offset].vector,
 +             i - 1, bp->msix_table[offset + i - 1].vector);
 +
 +      return 0;
 +}
 +
 +int bnx2x_enable_msi(struct bnx2x *bp)
 +{
 +      int rc;
 +
 +      rc = pci_enable_msi(bp->pdev);
 +      if (rc) {
 +              DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
 +              return -1;
 +      }
 +      bp->flags |= USING_MSI_FLAG;
 +
 +      return 0;
 +}
 +
 +static int bnx2x_req_irq(struct bnx2x *bp)
 +{
 +      unsigned long flags;
 +      int rc;
 +
 +      if (bp->flags & USING_MSI_FLAG)
 +              flags = 0;
 +      else
 +              flags = IRQF_SHARED;
 +
 +      rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
 +                       bp->dev->name, bp->dev);
 +      return rc;
 +}
 +
 +static inline int bnx2x_setup_irqs(struct bnx2x *bp)
 +{
 +      int rc = 0;
 +      if (bp->flags & USING_MSIX_FLAG) {
 +              rc = bnx2x_req_msix_irqs(bp);
 +              if (rc)
 +                      return rc;
 +      } else {
 +              bnx2x_ack_int(bp);
 +              rc = bnx2x_req_irq(bp);
 +              if (rc) {
 +                      BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
 +                      return rc;
 +              }
 +              if (bp->flags & USING_MSI_FLAG) {
 +                      bp->dev->irq = bp->pdev->irq;
 +                      netdev_info(bp->dev, "using MSI  IRQ %d\n",
 +                             bp->pdev->irq);
 +              }
 +      }
 +
 +      return 0;
 +}
 +
 +static inline void bnx2x_napi_enable(struct bnx2x *bp)
 +{
 +      int i;
 +
 +      for_each_rx_queue(bp, i)
 +              napi_enable(&bnx2x_fp(bp, i, napi));
 +}
 +
 +static inline void bnx2x_napi_disable(struct bnx2x *bp)
 +{
 +      int i;
 +
 +      for_each_rx_queue(bp, i)
 +              napi_disable(&bnx2x_fp(bp, i, napi));
 +}
 +
 +void bnx2x_netif_start(struct bnx2x *bp)
 +{
 +      if (netif_running(bp->dev)) {
 +              bnx2x_napi_enable(bp);
 +              bnx2x_int_enable(bp);
 +              if (bp->state == BNX2X_STATE_OPEN)
 +                      netif_tx_wake_all_queues(bp->dev);
 +      }
 +}
 +
 +void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
 +{
 +      bnx2x_int_disable_sync(bp, disable_hw);
 +      bnx2x_napi_disable(bp);
 +}
 +
 +u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
++
 +#ifdef BCM_CNIC
-       /* Select a none-FCoE queue:  if FCoE is enabled, exclude FCoE L2 ring
-        */
++      if (!NO_FCOE(bp)) {
 +              struct ethhdr *hdr = (struct ethhdr *)skb->data;
 +              u16 ether_type = ntohs(hdr->h_proto);
 +
 +              /* Skip VLAN tag if present */
 +              if (ether_type == ETH_P_8021Q) {
 +                      struct vlan_ethhdr *vhdr =
 +                              (struct vlan_ethhdr *)skb->data;
 +
 +                      ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
 +              }
 +
 +              /* If ethertype is FCoE or FIP - use FCoE ring */
 +              if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
 +                      return bnx2x_fcoe_tx(bp, txq_index);
 +      }
 +#endif
++      /* select a non-FCoE queue */
 +      return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
 +}
 +
 +void bnx2x_set_num_queues(struct bnx2x *bp)
 +{
 +      switch (bp->multi_mode) {
 +      case ETH_RSS_MODE_DISABLED:
 +              bp->num_queues = 1;
 +              break;
 +      case ETH_RSS_MODE_REGULAR:
 +              bp->num_queues = bnx2x_calc_num_queues(bp);
 +              break;
 +
 +      default:
 +              bp->num_queues = 1;
 +              break;
 +      }
 +
 +      /* Add special queues */
 +      bp->num_queues += NON_ETH_CONTEXT_USE;
 +}
 +
++/**
++ * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
++ *
++ * @bp:               Driver handle
++ *
++ * We currently support for at most 16 Tx queues for each CoS thus we will
++ * allocate a multiple of 16 for ETH L2 rings according to the value of the
++ * bp->max_cos.
++ *
++ * If there is an FCoE L2 queue the appropriate Tx queue will have the next
++ * index after all ETH L2 indices.
++ *
++ * If the actual number of Tx queues (for each CoS) is less than 16 then there
++ * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
++ * 16..31,...) with indicies that are not coupled with any real Tx queue.
++ *
++ * The proper configuration of skb->queue_mapping is handled by
++ * bnx2x_select_queue() and __skb_tx_hash().
++ *
++ * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
++ * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
++ */
 +static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
 +{
 +      int rc, tx, rx;
 +
 +      tx = MAX_TXQS_PER_COS * bp->max_cos;
 +      rx = BNX2X_NUM_ETH_QUEUES(bp);
 +
 +/* account for fcoe queue */
 +#ifdef BCM_CNIC
 +      if (!NO_FCOE(bp)) {
 +              rx += FCOE_PRESENT;
 +              tx += FCOE_PRESENT;
 +      }
 +#endif
 +
 +      rc = netif_set_real_num_tx_queues(bp->dev, tx);
 +      if (rc) {
 +              BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
 +              return rc;
 +      }
 +      rc = netif_set_real_num_rx_queues(bp->dev, rx);
 +      if (rc) {
 +              BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
 +              return rc;
 +      }
 +
 +      DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n",
 +                        tx, rx);
 +
 +      return rc;
 +}
 +
 +static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
 +{
 +      int i;
 +
 +      for_each_queue(bp, i) {
 +              struct bnx2x_fastpath *fp = &bp->fp[i];
 +
 +              /* Always use a mini-jumbo MTU for the FCoE L2 ring */
 +              if (IS_FCOE_IDX(i))
 +                      /*
 +                       * Although there are no IP frames expected to arrive to
 +                       * this ring we still want to add an
 +                       * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
 +                       * overrun attack.
 +                       */
 +                      fp->rx_buf_size =
 +                              BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
 +                              BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
 +              else
 +                      fp->rx_buf_size =
 +                              bp->dev->mtu + ETH_OVREHEAD +
 +                              BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
 +      }
 +}
 +
 +static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
 +{
 +      int i;
 +      u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
 +      u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
 +
 +      /*
 +       * Prepare the inital contents fo the indirection table if RSS is
 +       * enabled
 +       */
 +      if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
 +              for (i = 0; i < sizeof(ind_table); i++)
 +                      ind_table[i] =
 +                              bp->fp->cl_id + (i % num_eth_queues);
 +      }
 +
 +      /*
 +       * For 57710 and 57711 SEARCHER configuration (rss_keys) is
 +       * per-port, so if explicit configuration is needed , do it only
 +       * for a PMF.
 +       *
 +       * For 57712 and newer on the other hand it's a per-function
 +       * configuration.
 +       */
 +      return bnx2x_config_rss_pf(bp, ind_table,
 +                                 bp->port.pmf || !CHIP_IS_E1x(bp));
 +}
 +
 +int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
 +{
 +      struct bnx2x_config_rss_params params = {0};
 +      int i;
 +
 +      /* Although RSS is meaningless when there is a single HW queue we
 +       * still need it enabled in order to have HW Rx hash generated.
 +       *
 +       * if (!is_eth_multi(bp))
 +       *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
 +       */
 +
 +      params.rss_obj = &bp->rss_conf_obj;
 +
 +      __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
 +
 +      /* RSS mode */
 +      switch (bp->multi_mode) {
 +      case ETH_RSS_MODE_DISABLED:
 +              __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
 +              break;
 +      case ETH_RSS_MODE_REGULAR:
 +              __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
 +              break;
 +      case ETH_RSS_MODE_VLAN_PRI:
 +              __set_bit(BNX2X_RSS_MODE_VLAN_PRI, &params.rss_flags);
 +              break;
 +      case ETH_RSS_MODE_E1HOV_PRI:
 +              __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, &params.rss_flags);
 +              break;
 +      case ETH_RSS_MODE_IP_DSCP:
 +              __set_bit(BNX2X_RSS_MODE_IP_DSCP, &params.rss_flags);
 +              break;
 +      default:
 +              BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
 +              return -EINVAL;
 +      }
 +
 +      /* If RSS is enabled */
 +      if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
 +              /* RSS configuration */
 +              __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
 +              __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
 +              __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
 +              __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
 +
 +              /* Hash bits */
 +              params.rss_result_mask = MULTI_MASK;
 +
 +              memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
 +
 +              if (config_hash) {
 +                      /* RSS keys */
 +                      for (i = 0; i < sizeof(params.rss_key) / 4; i++)
 +                              params.rss_key[i] = random32();
 +
 +                      __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
 +              }
 +      }
 +
 +      return bnx2x_config_rss(bp, &params);
 +}
 +
 +static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
 +{
 +      struct bnx2x_func_state_params func_params = {0};
 +
 +      /* Prepare parameters for function state transitions */
 +      __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
 +
 +      func_params.f_obj = &bp->func_obj;
 +      func_params.cmd = BNX2X_F_CMD_HW_INIT;
 +
 +      func_params.params.hw_init.load_phase = load_code;
 +
 +      return bnx2x_func_state_change(bp, &func_params);
 +}
 +
 +/*
 + * Cleans the object that have internal lists without sending
 + * ramrods. Should be run when interrutps are disabled.
 + */
 +static void bnx2x_squeeze_objects(struct bnx2x *bp)
 +{
 +      int rc;
 +      unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
 +      struct bnx2x_mcast_ramrod_params rparam = {0};
 +      struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
 +
 +      /***************** Cleanup MACs' object first *************************/
 +
 +      /* Wait for completion of requested */
 +      __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
 +      /* Perform a dry cleanup */
 +      __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
 +
 +      /* Clean ETH primary MAC */
 +      __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
 +      rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
 +                               &ramrod_flags);
 +      if (rc != 0)
 +              BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
 +
 +      /* Cleanup UC list */
 +      vlan_mac_flags = 0;
 +      __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
 +      rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
 +                               &ramrod_flags);
 +      if (rc != 0)
 +              BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
 +
 +      /***************** Now clean mcast object *****************************/
 +      rparam.mcast_obj = &bp->mcast_obj;
 +      __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
 +
 +      /* Add a DEL command... */
 +      rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
 +      if (rc < 0)
 +              BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
 +                        "object: %d\n", rc);
 +
 +      /* ...and wait until all pending commands are cleared */
 +      rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
 +      while (rc != 0) {
 +              if (rc < 0) {
 +                      BNX2X_ERR("Failed to clean multi-cast object: %d\n",
 +                                rc);
 +                      return;
 +              }
 +
 +              rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
 +      }
 +}
 +
 +#ifndef BNX2X_STOP_ON_ERROR
 +#define LOAD_ERROR_EXIT(bp, label) \
 +      do { \
 +              (bp)->state = BNX2X_STATE_ERROR; \
 +              goto label; \
 +      } while (0)
 +#else
 +#define LOAD_ERROR_EXIT(bp, label) \
 +      do { \
 +              (bp)->state = BNX2X_STATE_ERROR; \
 +              (bp)->panic = 1; \
 +              return -EBUSY; \
 +      } while (0)
 +#endif
 +
 +/* must be called with rtnl_lock */
 +int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
 +{
 +      int port = BP_PORT(bp);
 +      u32 load_code;
 +      int i, rc;
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +      if (unlikely(bp->panic))
 +              return -EPERM;
 +#endif
 +
 +      bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
 +
 +      /* Set the initial link reported state to link down */
 +      bnx2x_acquire_phy_lock(bp);
 +      memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
 +      __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
 +              &bp->last_reported_link.link_report_flags);
 +      bnx2x_release_phy_lock(bp);
 +
 +      /* must be called before memory allocation and HW init */
 +      bnx2x_ilt_set_info(bp);
 +
 +      /*
 +       * Zero fastpath structures preserving invariants like napi, which are
 +       * allocated only once, fp index, max_cos, bp pointer.
 +       * Also set fp->disable_tpa.
 +       */
 +      for_each_queue(bp, i)
 +              bnx2x_bz_fp(bp, i);
 +
 +
 +      /* Set the receive queues buffer size */
 +      bnx2x_set_rx_buf_size(bp);
 +
 +      if (bnx2x_alloc_mem(bp))
 +              return -ENOMEM;
 +
 +      /* As long as bnx2x_alloc_mem() may possibly update
 +       * bp->num_queues, bnx2x_set_real_num_queues() should always
 +       * come after it.
 +       */
 +      rc = bnx2x_set_real_num_queues(bp);
 +      if (rc) {
 +              BNX2X_ERR("Unable to set real_num_queues\n");
 +              LOAD_ERROR_EXIT(bp, load_error0);
 +      }
 +
 +      /* configure multi cos mappings in kernel.
 +       * this configuration may be overriden by a multi class queue discipline
 +       * or by a dcbx negotiation result.
 +       */
 +      bnx2x_setup_tc(bp->dev, bp->max_cos);
 +
 +      bnx2x_napi_enable(bp);
 +
 +      /* Send LOAD_REQUEST command to MCP
 +       * Returns the type of LOAD command:
 +       * if it is the first port to be initialized
 +       * common blocks should be initialized, otherwise - not
 +       */
 +      if (!BP_NOMCP(bp)) {
 +              load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
 +              if (!load_code) {
 +                      BNX2X_ERR("MCP response failure, aborting\n");
 +                      rc = -EBUSY;
 +                      LOAD_ERROR_EXIT(bp, load_error1);
 +              }
 +              if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
 +                      rc = -EBUSY; /* other port in diagnostic mode */
 +                      LOAD_ERROR_EXIT(bp, load_error1);
 +              }
 +
 +      } else {
 +              int path = BP_PATH(bp);
 +
 +              DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
 +                 path, load_count[path][0], load_count[path][1],
 +                 load_count[path][2]);
 +              load_count[path][0]++;
 +              load_count[path][1 + port]++;
 +              DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
 +                 path, load_count[path][0], load_count[path][1],
 +                 load_count[path][2]);
 +              if (load_count[path][0] == 1)
 +                      load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
 +              else if (load_count[path][1 + port] == 1)
 +                      load_code = FW_MSG_CODE_DRV_LOAD_PORT;
 +              else
 +                      load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
 +      }
 +
 +      if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
 +          (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
 +          (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
 +              bp->port.pmf = 1;
 +              /*
 +               * We need the barrier to ensure the ordering between the
 +               * writing to bp->port.pmf here and reading it from the
 +               * bnx2x_periodic_task().
 +               */
 +              smp_mb();
 +              queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
 +      } else
 +              bp->port.pmf = 0;
 +
 +      DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
 +
 +      /* Init Function state controlling object */
 +      bnx2x__init_func_obj(bp);
 +
 +      /* Initialize HW */
 +      rc = bnx2x_init_hw(bp, load_code);
 +      if (rc) {
 +              BNX2X_ERR("HW init failed, aborting\n");
 +              bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
 +              LOAD_ERROR_EXIT(bp, load_error2);
 +      }
 +
 +      /* Connect to IRQs */
 +      rc = bnx2x_setup_irqs(bp);
 +      if (rc) {
 +              bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
 +              LOAD_ERROR_EXIT(bp, load_error2);
 +      }
 +
 +      /* Setup NIC internals and enable interrupts */
 +      bnx2x_nic_init(bp, load_code);
 +
 +      /* Init per-function objects */
 +      bnx2x_init_bp_objs(bp);
 +
 +      if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
 +          (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
 +          (bp->common.shmem2_base)) {
 +              if (SHMEM2_HAS(bp, dcc_support))
 +                      SHMEM2_WR(bp, dcc_support,
 +                                (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
 +                                 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
 +      }
 +
 +      bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
 +      rc = bnx2x_func_start(bp);
 +      if (rc) {
 +              BNX2X_ERR("Function start failed!\n");
 +              bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
 +              LOAD_ERROR_EXIT(bp, load_error3);
 +      }
 +
 +      /* Send LOAD_DONE command to MCP */
 +      if (!BP_NOMCP(bp)) {
 +              load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
 +              if (!load_code) {
 +                      BNX2X_ERR("MCP response failure, aborting\n");
 +                      rc = -EBUSY;
 +                      LOAD_ERROR_EXIT(bp, load_error3);
 +              }
 +      }
 +
 +      rc = bnx2x_setup_leading(bp);
 +      if (rc) {
 +              BNX2X_ERR("Setup leading failed!\n");
 +              LOAD_ERROR_EXIT(bp, load_error3);
 +      }
 +
 +#ifdef BCM_CNIC
 +      /* Enable Timer scan */
 +      REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
 +#endif
 +
 +      for_each_nondefault_queue(bp, i) {
 +              rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
 +              if (rc)
 +                      LOAD_ERROR_EXIT(bp, load_error4);
 +      }
 +
 +      rc = bnx2x_init_rss_pf(bp);
 +      if (rc)
 +              LOAD_ERROR_EXIT(bp, load_error4);
 +
 +      /* Now when Clients are configured we are ready to work */
 +      bp->state = BNX2X_STATE_OPEN;
 +
 +      /* Configure a ucast MAC */
 +      rc = bnx2x_set_eth_mac(bp, true);
 +      if (rc)
 +              LOAD_ERROR_EXIT(bp, load_error4);
 +
 +      if (bp->pending_max) {
 +              bnx2x_update_max_mf_config(bp, bp->pending_max);
 +              bp->pending_max = 0;
 +      }
 +
 +      if (bp->port.pmf)
 +              bnx2x_initial_phy_init(bp, load_mode);
 +
 +      /* Start fast path */
 +
 +      /* Initialize Rx filter. */
 +      netif_addr_lock_bh(bp->dev);
 +      bnx2x_set_rx_mode(bp->dev);
 +      netif_addr_unlock_bh(bp->dev);
 +
 +      /* Start the Tx */
 +      switch (load_mode) {
 +      case LOAD_NORMAL:
 +              /* Tx queue should be only reenabled */
 +              netif_tx_wake_all_queues(bp->dev);
 +              break;
 +
 +      case LOAD_OPEN:
 +              netif_tx_start_all_queues(bp->dev);
 +              smp_mb__after_clear_bit();
 +              break;
 +
 +      case LOAD_DIAG:
 +              bp->state = BNX2X_STATE_DIAG;
 +              break;
 +
 +      default:
 +              break;
 +      }
 +
 +      if (!bp->port.pmf)
 +              bnx2x__link_status_update(bp);
 +
 +      /* start the timer */
 +      mod_timer(&bp->timer, jiffies + bp->current_interval);
 +
 +#ifdef BCM_CNIC
 +      bnx2x_setup_cnic_irq_info(bp);
 +      if (bp->state == BNX2X_STATE_OPEN)
 +              bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
 +#endif
 +      bnx2x_inc_load_cnt(bp);
 +
 +      /* Wait for all pending SP commands to complete */
 +      if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
 +              BNX2X_ERR("Timeout waiting for SP elements to complete\n");
 +              bnx2x_nic_unload(bp, UNLOAD_CLOSE);
 +              return -EBUSY;
 +      }
 +
 +      bnx2x_dcbx_init(bp);
 +      return 0;
 +
 +#ifndef BNX2X_STOP_ON_ERROR
 +load_error4:
 +#ifdef BCM_CNIC
 +      /* Disable Timer scan */
 +      REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
 +#endif
 +load_error3:
 +      bnx2x_int_disable_sync(bp, 1);
 +
 +      /* Clean queueable objects */
 +      bnx2x_squeeze_objects(bp);
 +
 +      /* Free SKBs, SGEs, TPA pool and driver internals */
 +      bnx2x_free_skbs(bp);
 +      for_each_rx_queue(bp, i)
 +              bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
 +
 +      /* Release IRQs */
 +      bnx2x_free_irq(bp);
 +load_error2:
 +      if (!BP_NOMCP(bp)) {
 +              bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
 +              bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
 +      }
 +
 +      bp->port.pmf = 0;
 +load_error1:
 +      bnx2x_napi_disable(bp);
 +load_error0:
 +      bnx2x_free_mem(bp);
 +
 +      return rc;
 +#endif /* ! BNX2X_STOP_ON_ERROR */
 +}
 +
 +/* must be called with rtnl_lock */
 +int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
 +{
 +      int i;
 +      bool global = false;
 +
 +      if ((bp->state == BNX2X_STATE_CLOSED) ||
 +          (bp->state == BNX2X_STATE_ERROR)) {
 +              /* We can get here if the driver has been unloaded
 +               * during parity error recovery and is either waiting for a
 +               * leader to complete or for other functions to unload and
 +               * then ifdown has been issued. In this case we want to
 +               * unload and let other functions to complete a recovery
 +               * process.
 +               */
 +              bp->recovery_state = BNX2X_RECOVERY_DONE;
 +              bp->is_leader = 0;
 +              bnx2x_release_leader_lock(bp);
 +              smp_mb();
 +
 +              DP(NETIF_MSG_HW, "Releasing a leadership...\n");
 +
 +              return -EINVAL;
 +      }
 +
 +      /*
 +       * It's important to set the bp->state to the value different from
 +       * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
 +       * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
 +       */
 +      bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
 +      smp_mb();
 +
 +      /* Stop Tx */
 +      bnx2x_tx_disable(bp);
 +
 +#ifdef BCM_CNIC
 +      bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
 +#endif
 +
 +      bp->rx_mode = BNX2X_RX_MODE_NONE;
 +
 +      del_timer_sync(&bp->timer);
 +
 +      /* Set ALWAYS_ALIVE bit in shmem */
 +      bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
 +
 +      bnx2x_drv_pulse(bp);
 +
 +      bnx2x_stats_handle(bp, STATS_EVENT_STOP);
 +
 +      /* Cleanup the chip if needed */
 +      if (unload_mode != UNLOAD_RECOVERY)
 +              bnx2x_chip_cleanup(bp, unload_mode);
 +      else {
 +              /* Send the UNLOAD_REQUEST to the MCP */
 +              bnx2x_send_unload_req(bp, unload_mode);
 +
 +              /*
 +               * Prevent transactions to host from the functions on the
 +               * engine that doesn't reset global blocks in case of global
 +               * attention once gloabl blocks are reset and gates are opened
 +               * (the engine which leader will perform the recovery
 +               * last).
 +               */
 +              if (!CHIP_IS_E1x(bp))
 +                      bnx2x_pf_disable(bp);
 +
 +              /* Disable HW interrupts, NAPI */
 +              bnx2x_netif_stop(bp, 1);
 +
 +              /* Release IRQs */
 +              bnx2x_free_irq(bp);
 +
 +              /* Report UNLOAD_DONE to MCP */
 +              bnx2x_send_unload_done(bp);
 +      }
 +
 +      /*
 +       * At this stage no more interrupts will arrive so we may safly clean
 +       * the queueable objects here in case they failed to get cleaned so far.
 +       */
 +      bnx2x_squeeze_objects(bp);
 +
 +      /* There should be no more pending SP commands at this stage */
 +      bp->sp_state = 0;
 +
 +      bp->port.pmf = 0;
 +
 +      /* Free SKBs, SGEs, TPA pool and driver internals */
 +      bnx2x_free_skbs(bp);
 +      for_each_rx_queue(bp, i)
 +              bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
 +
 +      bnx2x_free_mem(bp);
 +
 +      bp->state = BNX2X_STATE_CLOSED;
 +
 +      /* Check if there are pending parity attentions. If there are - set
 +       * RECOVERY_IN_PROGRESS.
 +       */
 +      if (bnx2x_chk_parity_attn(bp, &global, false)) {
 +              bnx2x_set_reset_in_progress(bp);
 +
 +              /* Set RESET_IS_GLOBAL if needed */
 +              if (global)
 +                      bnx2x_set_reset_global(bp);
 +      }
 +
 +
 +      /* The last driver must disable a "close the gate" if there is no
 +       * parity attention or "process kill" pending.
 +       */
 +      if (!bnx2x_dec_load_cnt(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
 +              bnx2x_disable_close_the_gate(bp);
 +
 +      return 0;
 +}
 +
 +int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
 +{
 +      u16 pmcsr;
 +
 +      /* If there is no power capability, silently succeed */
 +      if (!bp->pm_cap) {
 +              DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
 +              return 0;
 +      }
 +
 +      pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
 +
 +      switch (state) {
 +      case PCI_D0:
 +              pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
 +                                    ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
 +                                     PCI_PM_CTRL_PME_STATUS));
 +
 +              if (pmcsr & PCI_PM_CTRL_STATE_MASK)
 +                      /* delay required during transition out of D3hot */
 +                      msleep(20);
 +              break;
 +
 +      case PCI_D3hot:
 +              /* If there are other clients above don't
 +                 shut down the power */
 +              if (atomic_read(&bp->pdev->enable_cnt) != 1)
 +                      return 0;
 +              /* Don't shut down the power for emulation and FPGA */
 +              if (CHIP_REV_IS_SLOW(bp))
 +                      return 0;
 +
 +              pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
 +              pmcsr |= 3;
 +
 +              if (bp->wol)
 +                      pmcsr |= PCI_PM_CTRL_PME_ENABLE;
 +
 +              pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
 +                                    pmcsr);
 +
 +              /* No more memory access after this point until
 +              * device is brought back to D0.
 +              */
 +              break;
 +
 +      default:
 +              return -EINVAL;
 +      }
 +      return 0;
 +}
 +
 +/*
 + * net_device service functions
 + */
 +int bnx2x_poll(struct napi_struct *napi, int budget)
 +{
 +      int work_done = 0;
 +      u8 cos;
 +      struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
 +                                               napi);
 +      struct bnx2x *bp = fp->bp;
 +
 +      while (1) {
 +#ifdef BNX2X_STOP_ON_ERROR
 +              if (unlikely(bp->panic)) {
 +                      napi_complete(napi);
 +                      return 0;
 +              }
 +#endif
 +
 +              for_each_cos_in_tx_queue(fp, cos)
 +                      if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
 +                              bnx2x_tx_int(bp, &fp->txdata[cos]);
 +
 +
 +              if (bnx2x_has_rx_work(fp)) {
 +                      work_done += bnx2x_rx_int(fp, budget - work_done);
 +
 +                      /* must not complete if we consumed full budget */
 +                      if (work_done >= budget)
 +                              break;
 +              }
 +
 +              /* Fall out from the NAPI loop if needed */
 +              if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
 +#ifdef BCM_CNIC
 +                      /* No need to update SB for FCoE L2 ring as long as
 +                       * it's connected to the default SB and the SB
 +                       * has been updated when NAPI was scheduled.
 +                       */
 +                      if (IS_FCOE_FP(fp)) {
 +                              napi_complete(napi);
 +                              break;
 +                      }
 +#endif
 +
 +                      bnx2x_update_fpsb_idx(fp);
 +                      /* bnx2x_has_rx_work() reads the status block,
 +                       * thus we need to ensure that status block indices
 +                       * have been actually read (bnx2x_update_fpsb_idx)
 +                       * prior to this check (bnx2x_has_rx_work) so that
 +                       * we won't write the "newer" value of the status block
 +                       * to IGU (if there was a DMA right after
 +                       * bnx2x_has_rx_work and if there is no rmb, the memory
 +                       * reading (bnx2x_update_fpsb_idx) may be postponed
 +                       * to right before bnx2x_ack_sb). In this case there
 +                       * will never be another interrupt until there is
 +                       * another update of the status block, while there
 +                       * is still unhandled work.
 +                       */
 +                      rmb();
 +
 +                      if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
 +                              napi_complete(napi);
 +                              /* Re-enable interrupts */
 +                              DP(NETIF_MSG_HW,
 +                                 "Update index to %d\n", fp->fp_hc_idx);
 +                              bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
 +                                           le16_to_cpu(fp->fp_hc_idx),
 +                                           IGU_INT_ENABLE, 1);
 +                              break;
 +                      }
 +              }
 +      }
 +
 +      return work_done;
 +}
 +
 +/* we split the first BD into headers and data BDs
 + * to ease the pain of our fellow microcode engineers
 + * we use one mapping for both BDs
 + * So far this has only been observed to happen
 + * in Other Operating Systems(TM)
 + */
 +static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
 +                                 struct bnx2x_fp_txdata *txdata,
 +                                 struct sw_tx_bd *tx_buf,
 +                                 struct eth_tx_start_bd **tx_bd, u16 hlen,
 +                                 u16 bd_prod, int nbd)
 +{
 +      struct eth_tx_start_bd *h_tx_bd = *tx_bd;
 +      struct eth_tx_bd *d_tx_bd;
 +      dma_addr_t mapping;
 +      int old_len = le16_to_cpu(h_tx_bd->nbytes);
 +
 +      /* first fix first BD */
 +      h_tx_bd->nbd = cpu_to_le16(nbd);
 +      h_tx_bd->nbytes = cpu_to_le16(hlen);
 +
 +      DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
 +         "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
 +         h_tx_bd->addr_lo, h_tx_bd->nbd);
 +
 +      /* now get a new data BD
 +       * (after the pbd) and fill it */
 +      bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
 +      d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
 +
 +      mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
 +                         le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
 +
 +      d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 +      d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 +      d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
 +
 +      /* this marks the BD as one that has no individual mapping */
 +      tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
 +
 +      DP(NETIF_MSG_TX_QUEUED,
 +         "TSO split data size is %d (%x:%x)\n",
 +         d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
 +
 +      /* update tx_bd */
 +      *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
 +
 +      return bd_prod;
 +}
 +
 +static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
 +{
 +      if (fix > 0)
 +              csum = (u16) ~csum_fold(csum_sub(csum,
 +                              csum_partial(t_header - fix, fix, 0)));
 +
 +      else if (fix < 0)
 +              csum = (u16) ~csum_fold(csum_add(csum,
 +                              csum_partial(t_header, -fix, 0)));
 +
 +      return swab16(csum);
 +}
 +
 +static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
 +{
 +      u32 rc;
 +
 +      if (skb->ip_summed != CHECKSUM_PARTIAL)
 +              rc = XMIT_PLAIN;
 +
 +      else {
 +              if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
 +                      rc = XMIT_CSUM_V6;
 +                      if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
 +                              rc |= XMIT_CSUM_TCP;
 +
 +              } else {
 +                      rc = XMIT_CSUM_V4;
 +                      if (ip_hdr(skb)->protocol == IPPROTO_TCP)
 +                              rc |= XMIT_CSUM_TCP;
 +              }
 +      }
 +
 +      if (skb_is_gso_v6(skb))
 +              rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
 +      else if (skb_is_gso(skb))
 +              rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
 +
 +      return rc;
 +}
 +
 +#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
 +/* check if packet requires linearization (packet is too fragmented)
 +   no need to check fragmentation if page size > 8K (there will be no
 +   violation to FW restrictions) */
 +static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
 +                           u32 xmit_type)
 +{
 +      int to_copy = 0;
 +      int hlen = 0;
 +      int first_bd_sz = 0;
 +
 +      /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
 +      if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
 +
 +              if (xmit_type & XMIT_GSO) {
 +                      unsigned short lso_mss = skb_shinfo(skb)->gso_size;
 +                      /* Check if LSO packet needs to be copied:
 +                         3 = 1 (for headers BD) + 2 (for PBD and last BD) */
 +                      int wnd_size = MAX_FETCH_BD - 3;
 +                      /* Number of windows to check */
 +                      int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
 +                      int wnd_idx = 0;
 +                      int frag_idx = 0;
 +                      u32 wnd_sum = 0;
 +
 +                      /* Headers length */
 +                      hlen = (int)(skb_transport_header(skb) - skb->data) +
 +                              tcp_hdrlen(skb);
 +
 +                      /* Amount of data (w/o headers) on linear part of SKB*/
 +                      first_bd_sz = skb_headlen(skb) - hlen;
 +
 +                      wnd_sum  = first_bd_sz;
 +
 +                      /* Calculate the first sum - it's special */
 +                      for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
 +                              wnd_sum +=
 +                                      skb_shinfo(skb)->frags[frag_idx].size;
 +
 +                      /* If there was data on linear skb data - check it */
 +                      if (first_bd_sz > 0) {
 +                              if (unlikely(wnd_sum < lso_mss)) {
 +                                      to_copy = 1;
 +                                      goto exit_lbl;
 +                              }
 +
 +                              wnd_sum -= first_bd_sz;
 +                      }
 +
 +                      /* Others are easier: run through the frag list and
 +                         check all windows */
 +                      for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
 +                              wnd_sum +=
 +                        skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
 +
 +                              if (unlikely(wnd_sum < lso_mss)) {
 +                                      to_copy = 1;
 +                                      break;
 +                              }
 +                              wnd_sum -=
 +                                      skb_shinfo(skb)->frags[wnd_idx].size;
 +                      }
 +              } else {
 +                      /* in non-LSO too fragmented packet should always
 +                         be linearized */
 +                      to_copy = 1;
 +              }
 +      }
 +
 +exit_lbl:
 +      if (unlikely(to_copy))
 +              DP(NETIF_MSG_TX_QUEUED,
 +                 "Linearization IS REQUIRED for %s packet. "
 +                 "num_frags %d  hlen %d  first_bd_sz %d\n",
 +                 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
 +                 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
 +
 +      return to_copy;
 +}
 +#endif
 +
 +static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
 +                                      u32 xmit_type)
 +{
 +      *parsing_data |= (skb_shinfo(skb)->gso_size <<
 +                            ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
 +                            ETH_TX_PARSE_BD_E2_LSO_MSS;
 +      if ((xmit_type & XMIT_GSO_V6) &&
 +          (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
 +              *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
 +}
 +
 +/**
 + * bnx2x_set_pbd_gso - update PBD in GSO case.
 + *
 + * @skb:      packet skb
 + * @pbd:      parse BD
 + * @xmit_type:        xmit flags
 + */
 +static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
 +                                   struct eth_tx_parse_bd_e1x *pbd,
 +                                   u32 xmit_type)
 +{
 +      pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
 +      pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
 +      pbd->tcp_flags = pbd_tcp_flags(skb);
 +
 +      if (xmit_type & XMIT_GSO_V4) {
 +              pbd->ip_id = swab16(ip_hdr(skb)->id);
 +              pbd->tcp_pseudo_csum =
 +                      swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
 +                                                ip_hdr(skb)->daddr,
 +                                                0, IPPROTO_TCP, 0));
 +
 +      } else
 +              pbd->tcp_pseudo_csum =
 +                      swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
 +                                              &ipv6_hdr(skb)->daddr,
 +                                              0, IPPROTO_TCP, 0));
 +
 +      pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
 +}
 +
 +/**
 + * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
 + *
 + * @bp:                       driver handle
 + * @skb:              packet skb
 + * @parsing_data:     data to be updated
 + * @xmit_type:                xmit flags
 + *
 + * 57712 related
 + */
 +static inline  u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
 +      u32 *parsing_data, u32 xmit_type)
 +{
 +      *parsing_data |=
 +                      ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
 +                      ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
 +                      ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
 +
 +      if (xmit_type & XMIT_CSUM_TCP) {
 +              *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
 +                      ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
 +                      ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
 +
 +              return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
 +      } else
 +              /* We support checksum offload for TCP and UDP only.
 +               * No need to pass the UDP header length - it's a constant.
 +               */
 +              return skb_transport_header(skb) +
 +                              sizeof(struct udphdr) - skb->data;
 +}
 +
 +static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
 +      struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
 +{
 +      tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
 +
 +      if (xmit_type & XMIT_CSUM_V4)
 +              tx_start_bd->bd_flags.as_bitfield |=
 +                                      ETH_TX_BD_FLAGS_IP_CSUM;
 +      else
 +              tx_start_bd->bd_flags.as_bitfield |=
 +                                      ETH_TX_BD_FLAGS_IPV6;
 +
 +      if (!(xmit_type & XMIT_CSUM_TCP))
 +              tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
 +}
 +
 +/**
 + * bnx2x_set_pbd_csum - update PBD with checksum and return header length
 + *
 + * @bp:               driver handle
 + * @skb:      packet skb
 + * @pbd:      parse BD to be updated
 + * @xmit_type:        xmit flags
 + */
 +static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
 +      struct eth_tx_parse_bd_e1x *pbd,
 +      u32 xmit_type)
 +{
 +      u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
 +
 +      /* for now NS flag is not used in Linux */
 +      pbd->global_data =
 +              (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
 +                       ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
 +
 +      pbd->ip_hlen_w = (skb_transport_header(skb) -
 +                      skb_network_header(skb)) >> 1;
 +
 +      hlen += pbd->ip_hlen_w;
 +
 +      /* We support checksum offload for TCP and UDP only */
 +      if (xmit_type & XMIT_CSUM_TCP)
 +              hlen += tcp_hdrlen(skb) / 2;
 +      else
 +              hlen += sizeof(struct udphdr) / 2;
 +
 +      pbd->total_hlen_w = cpu_to_le16(hlen);
 +      hlen = hlen*2;
 +
 +      if (xmit_type & XMIT_CSUM_TCP) {
 +              pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
 +
 +      } else {
 +              s8 fix = SKB_CS_OFF(skb); /* signed! */
 +
 +              DP(NETIF_MSG_TX_QUEUED,
 +                 "hlen %d  fix %d  csum before fix %x\n",
 +                 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
 +
 +              /* HW bug: fixup the CSUM */
 +              pbd->tcp_pseudo_csum =
 +                      bnx2x_csum_fix(skb_transport_header(skb),
 +                                     SKB_CS(skb), fix);
 +
 +              DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
 +                 pbd->tcp_pseudo_csum);
 +      }
 +
 +      return hlen;
 +}
 +
 +/* called with netif_tx_lock
 + * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
 + * netif_wake_queue()
 + */
 +netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +
 +      struct bnx2x_fastpath *fp;
 +      struct netdev_queue *txq;
 +      struct bnx2x_fp_txdata *txdata;
 +      struct sw_tx_bd *tx_buf;
 +      struct eth_tx_start_bd *tx_start_bd, *first_bd;
 +      struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
 +      struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
 +      struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
 +      u32 pbd_e2_parsing_data = 0;
 +      u16 pkt_prod, bd_prod;
 +      int nbd, txq_index, fp_index, txdata_index;
 +      dma_addr_t mapping;
 +      u32 xmit_type = bnx2x_xmit_type(bp, skb);
 +      int i;
 +      u8 hlen = 0;
 +      __le16 pkt_size = 0;
 +      struct ethhdr *eth;
 +      u8 mac_type = UNICAST_ADDRESS;
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +      if (unlikely(bp->panic))
 +              return NETDEV_TX_BUSY;
 +#endif
 +
 +      txq_index = skb_get_queue_mapping(skb);
 +      txq = netdev_get_tx_queue(dev, txq_index);
 +
 +      BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
 +
 +      /* decode the fastpath index and the cos index from the txq */
 +      fp_index = TXQ_TO_FP(txq_index);
 +      txdata_index = TXQ_TO_COS(txq_index);
 +
 +#ifdef BCM_CNIC
 +      /*
 +       * Override the above for the FCoE queue:
 +       *   - FCoE fp entry is right after the ETH entries.
 +       *   - FCoE L2 queue uses bp->txdata[0] only.
 +       */
 +      if (unlikely(!NO_FCOE(bp) && (txq_index ==
 +                                    bnx2x_fcoe_tx(bp, txq_index)))) {
 +              fp_index = FCOE_IDX;
 +              txdata_index = 0;
 +      }
 +#endif
 +
 +      /* enable this debug print to view the transmission queue being used
 +      DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d\n",
 +         txq_index, fp_index, txdata_index); */
 +
 +      /* locate the fastpath and the txdata */
 +      fp = &bp->fp[fp_index];
 +      txdata = &fp->txdata[txdata_index];
 +
 +      /* enable this debug print to view the tranmission details
 +      DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d"
 +                      " tx_data ptr %p fp pointer %p\n",
 +         txdata->cid, fp_index, txdata_index, txdata, fp); */
 +
 +      if (unlikely(bnx2x_tx_avail(bp, txdata) <
 +                   (skb_shinfo(skb)->nr_frags + 3))) {
 +              fp->eth_q_stats.driver_xoff++;
 +              netif_tx_stop_queue(txq);
 +              BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
 +              return NETDEV_TX_BUSY;
 +      }
 +
 +      DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x  protocol %x  "
 +                              "protocol(%x,%x) gso type %x  xmit_type %x\n",
 +         txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
 +         ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
 +
 +      eth = (struct ethhdr *)skb->data;
 +
 +      /* set flag according to packet type (UNICAST_ADDRESS is default)*/
 +      if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
 +              if (is_broadcast_ether_addr(eth->h_dest))
 +                      mac_type = BROADCAST_ADDRESS;
 +              else
 +                      mac_type = MULTICAST_ADDRESS;
 +      }
 +
 +#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
 +      /* First, check if we need to linearize the skb (due to FW
 +         restrictions). No need to check fragmentation if page size > 8K
 +         (there will be no violation to FW restrictions) */
 +      if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
 +              /* Statistics of linearization */
 +              bp->lin_cnt++;
 +              if (skb_linearize(skb) != 0) {
 +                      DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
 +                         "silently dropping this SKB\n");
 +                      dev_kfree_skb_any(skb);
 +                      return NETDEV_TX_OK;
 +              }
 +      }
 +#endif
 +      /* Map skb linear data for DMA */
 +      mapping = dma_map_single(&bp->pdev->dev, skb->data,
 +                               skb_headlen(skb), DMA_TO_DEVICE);
 +      if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 +              DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - "
 +                 "silently dropping this SKB\n");
 +              dev_kfree_skb_any(skb);
 +              return NETDEV_TX_OK;
 +      }
 +      /*
 +      Please read carefully. First we use one BD which we mark as start,
 +      then we have a parsing info BD (used for TSO or xsum),
 +      and only then we have the rest of the TSO BDs.
 +      (don't forget to mark the last one as last,
 +      and to unmap only AFTER you write to the BD ...)
 +      And above all, all pdb sizes are in words - NOT DWORDS!
 +      */
 +
 +      /* get current pkt produced now - advance it just before sending packet
 +       * since mapping of pages may fail and cause packet to be dropped
 +       */
 +      pkt_prod = txdata->tx_pkt_prod;
 +      bd_prod = TX_BD(txdata->tx_bd_prod);
 +
 +      /* get a tx_buf and first BD
 +       * tx_start_bd may be changed during SPLIT,
 +       * but first_bd will always stay first
 +       */
 +      tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
 +      tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
 +      first_bd = tx_start_bd;
 +
 +      tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
 +      SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
 +               mac_type);
 +
 +      /* header nbd */
 +      SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
 +
 +      /* remember the first BD of the packet */
 +      tx_buf->first_bd = txdata->tx_bd_prod;
 +      tx_buf->skb = skb;
 +      tx_buf->flags = 0;
 +
 +      DP(NETIF_MSG_TX_QUEUED,
 +         "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
 +         pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
 +
 +      if (vlan_tx_tag_present(skb)) {
 +              tx_start_bd->vlan_or_ethertype =
 +                  cpu_to_le16(vlan_tx_tag_get(skb));
 +              tx_start_bd->bd_flags.as_bitfield |=
 +                  (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
 +      } else
 +              tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
 +
 +      /* turn on parsing and get a BD */
 +      bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
 +
 +      if (xmit_type & XMIT_CSUM)
 +              bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
 +
 +      if (!CHIP_IS_E1x(bp)) {
 +              pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
 +              memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
 +              /* Set PBD in checksum offload case */
 +              if (xmit_type & XMIT_CSUM)
 +                      hlen = bnx2x_set_pbd_csum_e2(bp, skb,
 +                                                   &pbd_e2_parsing_data,
 +                                                   xmit_type);
 +              if (IS_MF_SI(bp)) {
 +                      /*
 +                       * fill in the MAC addresses in the PBD - for local
 +                       * switching
 +                       */
 +                      bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
 +                                            &pbd_e2->src_mac_addr_mid,
 +                                            &pbd_e2->src_mac_addr_lo,
 +                                            eth->h_source);
 +                      bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
 +                                            &pbd_e2->dst_mac_addr_mid,
 +                                            &pbd_e2->dst_mac_addr_lo,
 +                                            eth->h_dest);
 +              }
 +      } else {
 +              pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
 +              memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
 +              /* Set PBD in checksum offload case */
 +              if (xmit_type & XMIT_CSUM)
 +                      hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
 +
 +      }
 +
 +      /* Setup the data pointer of the first BD of the packet */
 +      tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 +      tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 +      nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
 +      tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
 +      pkt_size = tx_start_bd->nbytes;
 +
 +      DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
 +         "  nbytes %d  flags %x  vlan %x\n",
 +         tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
 +         le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
 +         tx_start_bd->bd_flags.as_bitfield,
 +         le16_to_cpu(tx_start_bd->vlan_or_ethertype));
 +
 +      if (xmit_type & XMIT_GSO) {
 +
 +              DP(NETIF_MSG_TX_QUEUED,
 +                 "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
 +                 skb->len, hlen, skb_headlen(skb),
 +                 skb_shinfo(skb)->gso_size);
 +
 +              tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
 +
 +              if (unlikely(skb_headlen(skb) > hlen))
 +                      bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
 +                                               &tx_start_bd, hlen,
 +                                               bd_prod, ++nbd);
 +              if (!CHIP_IS_E1x(bp))
 +                      bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
 +                                           xmit_type);
 +              else
 +                      bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
 +      }
 +
 +      /* Set the PBD's parsing_data field if not zero
 +       * (for the chips newer than 57711).
 +       */
 +      if (pbd_e2_parsing_data)
 +              pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
 +
 +      tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
 +
 +      /* Handle fragmented skb */
 +      for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 +              skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 +
 +              mapping = dma_map_page(&bp->pdev->dev, frag->page,
 +                                     frag->page_offset, frag->size,
 +                                     DMA_TO_DEVICE);
 +              if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 +
 +                      DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
 +                                              "dropping packet...\n");
 +
 +                      /* we need unmap all buffers already mapped
 +                       * for this SKB;
 +                       * first_bd->nbd need to be properly updated
 +                       * before call to bnx2x_free_tx_pkt
 +                       */
 +                      first_bd->nbd = cpu_to_le16(nbd);
 +                      bnx2x_free_tx_pkt(bp, txdata,
 +                                        TX_BD(txdata->tx_pkt_prod));
 +                      return NETDEV_TX_OK;
 +              }
 +
 +              bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
 +              tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
 +              if (total_pkt_bd == NULL)
 +                      total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
 +
 +              tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 +              tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 +              tx_data_bd->nbytes = cpu_to_le16(frag->size);
 +              le16_add_cpu(&pkt_size, frag->size);
 +              nbd++;
 +
 +              DP(NETIF_MSG_TX_QUEUED,
 +                 "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
 +                 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
 +                 le16_to_cpu(tx_data_bd->nbytes));
 +      }
 +
 +      DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
 +
 +      /* update with actual num BDs */
 +      first_bd->nbd = cpu_to_le16(nbd);
 +
 +      bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
 +
 +      /* now send a tx doorbell, counting the next BD
 +       * if the packet contains or ends with it
 +       */
 +      if (TX_BD_POFF(bd_prod) < nbd)
 +              nbd++;
 +
 +      /* total_pkt_bytes should be set on the first data BD if
 +       * it's not an LSO packet and there is more than one
 +       * data BD. In this case pkt_size is limited by an MTU value.
 +       * However we prefer to set it for an LSO packet (while we don't
 +       * have to) in order to save some CPU cycles in a none-LSO
 +       * case, when we much more care about them.
 +       */
 +      if (total_pkt_bd != NULL)
 +              total_pkt_bd->total_pkt_bytes = pkt_size;
 +
 +      if (pbd_e1x)
 +              DP(NETIF_MSG_TX_QUEUED,
 +                 "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
 +                 "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
 +                 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
 +                 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
 +                 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
 +                  le16_to_cpu(pbd_e1x->total_hlen_w));
 +      if (pbd_e2)
 +              DP(NETIF_MSG_TX_QUEUED,
 +                 "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
 +                 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
 +                 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
 +                 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
 +                 pbd_e2->parsing_data);
 +      DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
 +
 +      txdata->tx_pkt_prod++;
 +      /*
 +       * Make sure that the BD data is updated before updating the producer
 +       * since FW might read the BD right after the producer is updated.
 +       * This is only applicable for weak-ordered memory model archs such
 +       * as IA-64. The following barrier is also mandatory since FW will
 +       * assumes packets must have BDs.
 +       */
 +      wmb();
 +
 +      txdata->tx_db.data.prod += nbd;
 +      barrier();
 +
 +      DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
 +
 +      mmiowb();
 +
 +      txdata->tx_bd_prod += nbd;
 +
 +      if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
 +              netif_tx_stop_queue(txq);
 +
 +              /* paired memory barrier is in bnx2x_tx_int(), we have to keep
 +               * ordering of set_bit() in netif_tx_stop_queue() and read of
 +               * fp->bd_tx_cons */
 +              smp_mb();
 +
 +              fp->eth_q_stats.driver_xoff++;
 +              if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
 +                      netif_tx_wake_queue(txq);
 +      }
 +      txdata->tx_pkt++;
 +
 +      return NETDEV_TX_OK;
 +}
 +
 +/**
 + * bnx2x_setup_tc - routine to configure net_device for multi tc
 + *
 + * @netdev: net device to configure
 + * @tc: number of traffic classes to enable
 + *
 + * callback connected to the ndo_setup_tc function pointer
 + */
 +int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
 +{
 +      int cos, prio, count, offset;
 +      struct bnx2x *bp = netdev_priv(dev);
 +
 +      /* setup tc must be called under rtnl lock */
 +      ASSERT_RTNL();
 +
 +      /* no traffic classes requested. aborting */
 +      if (!num_tc) {
 +              netdev_reset_tc(dev);
 +              return 0;
 +      }
 +
 +      /* requested to support too many traffic classes */
 +      if (num_tc > bp->max_cos) {
 +              DP(NETIF_MSG_TX_ERR, "support for too many traffic classes"
 +                                   " requested: %d. max supported is %d\n",
 +                                   num_tc, bp->max_cos);
 +              return -EINVAL;
 +      }
 +
 +      /* declare amount of supported traffic classes */
 +      if (netdev_set_num_tc(dev, num_tc)) {
 +              DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes\n",
 +                                   num_tc);
 +              return -EINVAL;
 +      }
 +
 +      /* configure priority to traffic class mapping */
 +      for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
 +              netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
 +              DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n",
 +                 prio, bp->prio_to_cos[prio]);
 +      }
 +
 +
 +      /* Use this configuration to diffrentiate tc0 from other COSes
 +         This can be used for ets or pfc, and save the effort of setting
 +         up a multio class queue disc or negotiating DCBX with a switch
 +      netdev_set_prio_tc_map(dev, 0, 0);
 +      DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
 +      for (prio = 1; prio < 16; prio++) {
 +              netdev_set_prio_tc_map(dev, prio, 1);
 +              DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
 +      } */
 +
 +      /* configure traffic class to transmission queue mapping */
 +      for (cos = 0; cos < bp->max_cos; cos++) {
 +              count = BNX2X_NUM_ETH_QUEUES(bp);
 +              offset = cos * MAX_TXQS_PER_COS;
 +              netdev_set_tc_queue(dev, cos, count, offset);
 +              DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d\n",
 +                 cos, offset, count);
 +      }
 +
 +      return 0;
 +}
 +
 +/* called with rtnl_lock */
 +int bnx2x_change_mac_addr(struct net_device *dev, void *p)
 +{
 +      struct sockaddr *addr = p;
 +      struct bnx2x *bp = netdev_priv(dev);
 +      int rc = 0;
 +
 +      if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
 +              return -EINVAL;
 +
 +      if (netif_running(dev))  {
 +              rc = bnx2x_set_eth_mac(bp, false);
 +              if (rc)
 +                      return rc;
 +      }
 +
 +      memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
 +
 +      if (netif_running(dev))
 +              rc = bnx2x_set_eth_mac(bp, true);
 +
 +      return rc;
 +}
 +
 +static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
 +{
 +      union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
 +      struct bnx2x_fastpath *fp = &bp->fp[fp_index];
 +      u8 cos;
 +
 +      /* Common */
 +#ifdef BCM_CNIC
 +      if (IS_FCOE_IDX(fp_index)) {
 +              memset(sb, 0, sizeof(union host_hc_status_block));
 +              fp->status_blk_mapping = 0;
 +
 +      } else {
 +#endif
 +              /* status blocks */
 +              if (!CHIP_IS_E1x(bp))
 +                      BNX2X_PCI_FREE(sb->e2_sb,
 +                                     bnx2x_fp(bp, fp_index,
 +                                              status_blk_mapping),
 +                                     sizeof(struct host_hc_status_block_e2));
 +              else
 +                      BNX2X_PCI_FREE(sb->e1x_sb,
 +                                     bnx2x_fp(bp, fp_index,
 +                                              status_blk_mapping),
 +                                     sizeof(struct host_hc_status_block_e1x));
 +#ifdef BCM_CNIC
 +      }
 +#endif
 +      /* Rx */
 +      if (!skip_rx_queue(bp, fp_index)) {
 +              bnx2x_free_rx_bds(fp);
 +
 +              /* fastpath rx rings: rx_buf rx_desc rx_comp */
 +              BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
 +              BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
 +                             bnx2x_fp(bp, fp_index, rx_desc_mapping),
 +                             sizeof(struct eth_rx_bd) * NUM_RX_BD);
 +
 +              BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
 +                             bnx2x_fp(bp, fp_index, rx_comp_mapping),
 +                             sizeof(struct eth_fast_path_rx_cqe) *
 +                             NUM_RCQ_BD);
 +
 +              /* SGE ring */
 +              BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
 +              BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
 +                             bnx2x_fp(bp, fp_index, rx_sge_mapping),
 +                             BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
 +      }
 +
 +      /* Tx */
 +      if (!skip_tx_queue(bp, fp_index)) {
 +              /* fastpath tx rings: tx_buf tx_desc */
 +              for_each_cos_in_tx_queue(fp, cos) {
 +                      struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
 +
 +                      DP(BNX2X_MSG_SP,
 +                         "freeing tx memory of fp %d cos %d cid %d\n",
 +                         fp_index, cos, txdata->cid);
 +
 +                      BNX2X_FREE(txdata->tx_buf_ring);
 +                      BNX2X_PCI_FREE(txdata->tx_desc_ring,
 +                              txdata->tx_desc_mapping,
 +                              sizeof(union eth_tx_bd_types) * NUM_TX_BD);
 +              }
 +      }
 +      /* end of fastpath */
 +}
 +
 +void bnx2x_free_fp_mem(struct bnx2x *bp)
 +{
 +      int i;
 +      for_each_queue(bp, i)
 +              bnx2x_free_fp_mem_at(bp, i);
 +}
 +
 +static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
 +{
 +      union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
 +      if (!CHIP_IS_E1x(bp)) {
 +              bnx2x_fp(bp, index, sb_index_values) =
 +                      (__le16 *)status_blk.e2_sb->sb.index_values;
 +              bnx2x_fp(bp, index, sb_running_index) =
 +                      (__le16 *)status_blk.e2_sb->sb.running_index;
 +      } else {
 +              bnx2x_fp(bp, index, sb_index_values) =
 +                      (__le16 *)status_blk.e1x_sb->sb.index_values;
 +              bnx2x_fp(bp, index, sb_running_index) =
 +                      (__le16 *)status_blk.e1x_sb->sb.running_index;
 +      }
 +}
 +
 +static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
 +{
 +      union host_hc_status_block *sb;
 +      struct bnx2x_fastpath *fp = &bp->fp[index];
 +      int ring_size = 0;
 +      u8 cos;
 +
 +      /* if rx_ring_size specified - use it */
 +      int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
 +                         MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
 +
 +      /* allocate at least number of buffers required by FW */
 +      rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
 +                                                  MIN_RX_SIZE_TPA,
 +                                rx_ring_size);
 +
 +      /* Common */
 +      sb = &bnx2x_fp(bp, index, status_blk);
 +#ifdef BCM_CNIC
 +      if (!IS_FCOE_IDX(index)) {
 +#endif
 +              /* status blocks */
 +              if (!CHIP_IS_E1x(bp))
 +                      BNX2X_PCI_ALLOC(sb->e2_sb,
 +                              &bnx2x_fp(bp, index, status_blk_mapping),
 +                              sizeof(struct host_hc_status_block_e2));
 +              else
 +                      BNX2X_PCI_ALLOC(sb->e1x_sb,
 +                              &bnx2x_fp(bp, index, status_blk_mapping),
 +                          sizeof(struct host_hc_status_block_e1x));
 +#ifdef BCM_CNIC
 +      }
 +#endif
 +
 +      /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
 +       * set shortcuts for it.
 +       */
 +      if (!IS_FCOE_IDX(index))
 +              set_sb_shortcuts(bp, index);
 +
 +      /* Tx */
 +      if (!skip_tx_queue(bp, index)) {
 +              /* fastpath tx rings: tx_buf tx_desc */
 +              for_each_cos_in_tx_queue(fp, cos) {
 +                      struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
 +
 +                      DP(BNX2X_MSG_SP, "allocating tx memory of "
 +                                       "fp %d cos %d\n",
 +                         index, cos);
 +
 +                      BNX2X_ALLOC(txdata->tx_buf_ring,
 +                              sizeof(struct sw_tx_bd) * NUM_TX_BD);
 +                      BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
 +                              &txdata->tx_desc_mapping,
 +                              sizeof(union eth_tx_bd_types) * NUM_TX_BD);
 +              }
 +      }
 +
 +      /* Rx */
 +      if (!skip_rx_queue(bp, index)) {
 +              /* fastpath rx rings: rx_buf rx_desc rx_comp */
 +              BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
 +                              sizeof(struct sw_rx_bd) * NUM_RX_BD);
 +              BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
 +                              &bnx2x_fp(bp, index, rx_desc_mapping),
 +                              sizeof(struct eth_rx_bd) * NUM_RX_BD);
 +
 +              BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
 +                              &bnx2x_fp(bp, index, rx_comp_mapping),
 +                              sizeof(struct eth_fast_path_rx_cqe) *
 +                              NUM_RCQ_BD);
 +
 +              /* SGE ring */
 +              BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
 +                              sizeof(struct sw_rx_page) * NUM_RX_SGE);
 +              BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
 +                              &bnx2x_fp(bp, index, rx_sge_mapping),
 +                              BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
 +              /* RX BD ring */
 +              bnx2x_set_next_page_rx_bd(fp);
 +
 +              /* CQ ring */
 +              bnx2x_set_next_page_rx_cq(fp);
 +
 +              /* BDs */
 +              ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
 +              if (ring_size < rx_ring_size)
 +                      goto alloc_mem_err;
 +      }
 +
 +      return 0;
 +
 +/* handles low memory cases */
 +alloc_mem_err:
 +      BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
 +                                              index, ring_size);
 +      /* FW will drop all packets if queue is not big enough,
 +       * In these cases we disable the queue
 +       * Min size is different for OOO, TPA and non-TPA queues
 +       */
 +      if (ring_size < (fp->disable_tpa ?
 +                              MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
 +                      /* release memory allocated for this queue */
 +                      bnx2x_free_fp_mem_at(bp, index);
 +                      return -ENOMEM;
 +      }
 +      return 0;
 +}
 +
 +int bnx2x_alloc_fp_mem(struct bnx2x *bp)
 +{
 +      int i;
 +
 +      /**
 +       * 1. Allocate FP for leading - fatal if error
 +       * 2. {CNIC} Allocate FCoE FP - fatal if error
 +       * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
 +       * 4. Allocate RSS - fix number of queues if error
 +       */
 +
 +      /* leading */
 +      if (bnx2x_alloc_fp_mem_at(bp, 0))
 +              return -ENOMEM;
 +
 +#ifdef BCM_CNIC
 +      if (!NO_FCOE(bp))
 +              /* FCoE */
 +              if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
 +                      /* we will fail load process instead of mark
 +                       * NO_FCOE_FLAG
 +                       */
 +                      return -ENOMEM;
 +#endif
 +
 +      /* RSS */
 +      for_each_nondefault_eth_queue(bp, i)
 +              if (bnx2x_alloc_fp_mem_at(bp, i))
 +                      break;
 +
 +      /* handle memory failures */
 +      if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
 +              int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
 +
 +              WARN_ON(delta < 0);
 +#ifdef BCM_CNIC
 +              /**
 +               * move non eth FPs next to last eth FP
 +               * must be done in that order
 +               * FCOE_IDX < FWD_IDX < OOO_IDX
 +               */
 +
 +              /* move FCoE fp even NO_FCOE_FLAG is on */
 +              bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
 +#endif
 +              bp->num_queues -= delta;
 +              BNX2X_ERR("Adjusted num of queues from %d to %d\n",
 +                        bp->num_queues + delta, bp->num_queues);
 +      }
 +
 +      return 0;
 +}
 +
 +void bnx2x_free_mem_bp(struct bnx2x *bp)
 +{
 +      kfree(bp->fp);
 +      kfree(bp->msix_table);
 +      kfree(bp->ilt);
 +}
 +
 +int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
 +{
 +      struct bnx2x_fastpath *fp;
 +      struct msix_entry *tbl;
 +      struct bnx2x_ilt *ilt;
 +      int msix_table_size = 0;
 +
 +      /*
 +       * The biggest MSI-X table we might need is as a maximum number of fast
 +       * path IGU SBs plus default SB (for PF).
 +       */
 +      msix_table_size = bp->igu_sb_cnt + 1;
 +
 +      /* fp array: RSS plus CNIC related L2 queues */
 +      fp = kzalloc((BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE) *
 +                   sizeof(*fp), GFP_KERNEL);
 +      if (!fp)
 +              goto alloc_err;
 +      bp->fp = fp;
 +
 +      /* msix table */
 +      tbl = kzalloc(msix_table_size * sizeof(*tbl), GFP_KERNEL);
 +      if (!tbl)
 +              goto alloc_err;
 +      bp->msix_table = tbl;
 +
 +      /* ilt */
 +      ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
 +      if (!ilt)
 +              goto alloc_err;
 +      bp->ilt = ilt;
 +
 +      return 0;
 +alloc_err:
 +      bnx2x_free_mem_bp(bp);
 +      return -ENOMEM;
 +
 +}
 +
 +int bnx2x_reload_if_running(struct net_device *dev)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +
 +      if (unlikely(!netif_running(dev)))
 +              return 0;
 +
 +      bnx2x_nic_unload(bp, UNLOAD_NORMAL);
 +      return bnx2x_nic_load(bp, LOAD_NORMAL);
 +}
 +
 +int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
 +{
 +      u32 sel_phy_idx = 0;
 +      if (bp->link_params.num_phys <= 1)
 +              return INT_PHY;
 +
 +      if (bp->link_vars.link_up) {
 +              sel_phy_idx = EXT_PHY1;
 +              /* In case link is SERDES, check if the EXT_PHY2 is the one */
 +              if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
 +                  (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
 +                      sel_phy_idx = EXT_PHY2;
 +      } else {
 +
 +              switch (bnx2x_phy_selection(&bp->link_params)) {
 +              case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
 +              case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
 +              case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
 +                     sel_phy_idx = EXT_PHY1;
 +                     break;
 +              case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
 +              case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
 +                     sel_phy_idx = EXT_PHY2;
 +                     break;
 +              }
 +      }
 +
 +      return sel_phy_idx;
 +
 +}
 +int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
 +{
 +      u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
 +      /*
 +       * The selected actived PHY is always after swapping (in case PHY
 +       * swapping is enabled). So when swapping is enabled, we need to reverse
 +       * the configuration
 +       */
 +
 +      if (bp->link_params.multi_phy_config &
 +          PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
 +              if (sel_phy_idx == EXT_PHY1)
 +                      sel_phy_idx = EXT_PHY2;
 +              else if (sel_phy_idx == EXT_PHY2)
 +                      sel_phy_idx = EXT_PHY1;
 +      }
 +      return LINK_CONFIG_IDX(sel_phy_idx);
 +}
 +
 +#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
 +int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +      struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 +
 +      switch (type) {
 +      case NETDEV_FCOE_WWNN:
 +              *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
 +                              cp->fcoe_wwn_node_name_lo);
 +              break;
 +      case NETDEV_FCOE_WWPN:
 +              *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
 +                              cp->fcoe_wwn_port_name_lo);
 +              break;
 +      default:
 +              return -EINVAL;
 +      }
 +
 +      return 0;
 +}
 +#endif
 +
 +/* called with rtnl_lock */
 +int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +
 +      if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
 +              pr_err("Handling parity error recovery. Try again later\n");
 +              return -EAGAIN;
 +      }
 +
 +      if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
 +          ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
 +              return -EINVAL;
 +
 +      /* This does not race with packet allocation
 +       * because the actual alloc size is
 +       * only updated as part of load
 +       */
 +      dev->mtu = new_mtu;
 +
 +      return bnx2x_reload_if_running(dev);
 +}
 +
 +u32 bnx2x_fix_features(struct net_device *dev, u32 features)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +
 +      /* TPA requires Rx CSUM offloading */
 +      if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
 +              features &= ~NETIF_F_LRO;
 +
 +      return features;
 +}
 +
 +int bnx2x_set_features(struct net_device *dev, u32 features)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +      u32 flags = bp->flags;
 +      bool bnx2x_reload = false;
 +
 +      if (features & NETIF_F_LRO)
 +              flags |= TPA_ENABLE_FLAG;
 +      else
 +              flags &= ~TPA_ENABLE_FLAG;
 +
 +      if (features & NETIF_F_LOOPBACK) {
 +              if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
 +                      bp->link_params.loopback_mode = LOOPBACK_BMAC;
 +                      bnx2x_reload = true;
 +              }
 +      } else {
 +              if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
 +                      bp->link_params.loopback_mode = LOOPBACK_NONE;
 +                      bnx2x_reload = true;
 +              }
 +      }
 +
 +      if (flags ^ bp->flags) {
 +              bp->flags = flags;
 +              bnx2x_reload = true;
 +      }
 +
 +      if (bnx2x_reload) {
 +              if (bp->recovery_state == BNX2X_RECOVERY_DONE)
 +                      return bnx2x_reload_if_running(dev);
 +              /* else: bnx2x_nic_load() will be called at end of recovery */
 +      }
 +
 +      return 0;
 +}
 +
 +void bnx2x_tx_timeout(struct net_device *dev)
 +{
 +      struct bnx2x *bp = netdev_priv(dev);
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +      if (!bp->panic)
 +              bnx2x_panic();
 +#endif
 +
 +      smp_mb__before_clear_bit();
 +      set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
 +      smp_mb__after_clear_bit();
 +
 +      /* This allows the netif to be shutdown gracefully before resetting */
 +      schedule_delayed_work(&bp->sp_rtnl_task, 0);
 +}
 +
 +int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
 +{
 +      struct net_device *dev = pci_get_drvdata(pdev);
 +      struct bnx2x *bp;
 +
 +      if (!dev) {
 +              dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
 +              return -ENODEV;
 +      }
 +      bp = netdev_priv(dev);
 +
 +      rtnl_lock();
 +
 +      pci_save_state(pdev);
 +
 +      if (!netif_running(dev)) {
 +              rtnl_unlock();
 +              return 0;
 +      }
 +
 +      netif_device_detach(dev);
 +
 +      bnx2x_nic_unload(bp, UNLOAD_CLOSE);
 +
 +      bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
 +
 +      rtnl_unlock();
 +
 +      return 0;
 +}
 +
 +int bnx2x_resume(struct pci_dev *pdev)
 +{
 +      struct net_device *dev = pci_get_drvdata(pdev);
 +      struct bnx2x *bp;
 +      int rc;
 +
 +      if (!dev) {
 +              dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
 +              return -ENODEV;
 +      }
 +      bp = netdev_priv(dev);
 +
 +      if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
 +              pr_err("Handling parity error recovery. Try again later\n");
 +              return -EAGAIN;
 +      }
 +
 +      rtnl_lock();
 +
 +      pci_restore_state(pdev);
 +
 +      if (!netif_running(dev)) {
 +              rtnl_unlock();
 +              return 0;
 +      }
 +
 +      bnx2x_set_power_state(bp, PCI_D0);
 +      netif_device_attach(dev);
 +
 +      /* Since the chip was reset, clear the FW sequence number */
 +      bp->fw_seq = 0;
 +      rc = bnx2x_nic_load(bp, LOAD_OPEN);
 +
 +      rtnl_unlock();
 +
 +      return rc;
 +}
 +
 +
 +void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
 +                            u32 cid)
 +{
 +      /* ustorm cxt validation */
 +      cxt->ustorm_ag_context.cdu_usage =
 +              CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
 +                      CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
 +      /* xcontext validation */
 +      cxt->xstorm_ag_context.cdu_reserved =
 +              CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
 +                      CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
 +}
 +
 +static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
 +                                           u8 fw_sb_id, u8 sb_index,
 +                                           u8 ticks)
 +{
 +
 +      u32 addr = BAR_CSTRORM_INTMEM +
 +                 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
 +      REG_WR8(bp, addr, ticks);
 +      DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
 +                        port, fw_sb_id, sb_index, ticks);
 +}
 +
 +static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
 +                                           u16 fw_sb_id, u8 sb_index,
 +                                           u8 disable)
 +{
 +      u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
 +      u32 addr = BAR_CSTRORM_INTMEM +
 +                 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
 +      u16 flags = REG_RD16(bp, addr);
 +      /* clear and set */
 +      flags &= ~HC_INDEX_DATA_HC_ENABLED;
 +      flags |= enable_flag;
 +      REG_WR16(bp, addr, flags);
 +      DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
 +                        port, fw_sb_id, sb_index, disable);
 +}
 +
 +void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
 +                                  u8 sb_index, u8 disable, u16 usec)
 +{
 +      int port = BP_PORT(bp);
 +      u8 ticks = usec / BNX2X_BTR;
 +
 +      storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
 +
 +      disable = disable ? 1 : (usec ? 0 : 1);
 +      storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
 +}
index 9525b93,0000000..0b9bd55
mode 100644,000000..100644
--- /dev/null
@@@ -1,2510 -1,0 +1,2510 @@@
-       if (!CHIP_IS_E1x(bp)) {
 +/* bnx2x_dcb.c: Broadcom Everest network driver.
 + *
 + * Copyright 2009-2011 Broadcom Corporation
 + *
 + * Unless you and Broadcom execute a separate written software license
 + * agreement governing use of this software, this software is licensed to you
 + * under the terms of the GNU General Public License version 2, available
 + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
 + *
 + * Notwithstanding the above, under no circumstances may you combine this
 + * software in any way with any other Broadcom software provided under a
 + * license other than the GPL, without Broadcom's express prior written
 + * consent.
 + *
 + * Maintained by: Eilon Greenstein <eilong@broadcom.com>
 + * Written by: Dmitry Kravkov
 + *
 + */
 +
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
 +#include <linux/netdevice.h>
 +#include <linux/types.h>
 +#include <linux/errno.h>
 +#include <linux/rtnetlink.h>
 +#include <net/dcbnl.h>
 +
 +#include "bnx2x.h"
 +#include "bnx2x_cmn.h"
 +#include "bnx2x_dcb.h"
 +
 +/* forward declarations of dcbx related functions */
 +static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
 +static void bnx2x_pfc_set_pfc(struct bnx2x *bp);
 +static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp);
 +static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
 +static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
 +                                        u32 *set_configuration_ets_pg,
 +                                        u32 *pri_pg_tbl);
 +static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
 +                                          u32 *pg_pri_orginal_spread,
 +                                          struct pg_help_data *help_data);
 +static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp,
 +                                     struct pg_help_data *help_data,
 +                                     struct dcbx_ets_feature *ets,
 +                                     u32 *pg_pri_orginal_spread);
 +static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
 +                              struct cos_help_data *cos_data,
 +                              u32 *pg_pri_orginal_spread,
 +                              struct dcbx_ets_feature *ets);
 +static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
 +                               struct bnx2x_func_tx_start_params*);
 +
 +/* helpers: read/write len bytes from addr into buff by REG_RD/REG_WR */
 +static void bnx2x_read_data(struct bnx2x *bp, u32 *buff,
 +                                 u32 addr, u32 len)
 +{
 +      int i;
 +      for (i = 0; i < len; i += 4, buff++)
 +              *buff = REG_RD(bp, addr + i);
 +}
 +
 +static void bnx2x_write_data(struct bnx2x *bp, u32 *buff,
 +                                  u32 addr, u32 len)
 +{
 +      int i;
 +      for (i = 0; i < len; i += 4, buff++)
 +              REG_WR(bp, addr + i, *buff);
 +}
 +
 +static void bnx2x_pfc_set(struct bnx2x *bp)
 +{
 +      struct bnx2x_nig_brb_pfc_port_params pfc_params = {0};
 +      u32 pri_bit, val = 0;
 +      int i;
 +
 +      pfc_params.num_of_rx_cos_priority_mask =
 +                                      bp->dcbx_port_params.ets.num_of_cos;
 +
 +      /* Tx COS configuration */
 +      for (i = 0; i < bp->dcbx_port_params.ets.num_of_cos; i++)
 +              /*
 +               * We configure only the pauseable bits (non pauseable aren't
 +               * configured at all) it's done to avoid false pauses from
 +               * network
 +               */
 +              pfc_params.rx_cos_priority_mask[i] =
 +                      bp->dcbx_port_params.ets.cos_params[i].pri_bitmask
 +                              & DCBX_PFC_PRI_PAUSE_MASK(bp);
 +
 +      /*
 +       * Rx COS configuration
 +       * Changing PFC RX configuration .
 +       * In RX COS0 will always be configured to lossy and COS1 to lossless
 +       */
 +      for (i = 0 ; i < MAX_PFC_PRIORITIES ; i++) {
 +              pri_bit = 1 << i;
 +
 +              if (pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp))
 +                      val |= 1 << (i * 4);
 +      }
 +
 +      pfc_params.pkt_priority_to_cos = val;
 +
 +      /* RX COS0 */
 +      pfc_params.llfc_low_priority_classes = 0;
 +      /* RX COS1 */
 +      pfc_params.llfc_high_priority_classes = DCBX_PFC_PRI_PAUSE_MASK(bp);
 +
 +      /* BRB configuration */
 +      pfc_params.cos0_pauseable = false;
 +      pfc_params.cos1_pauseable = true;
 +
 +      bnx2x_acquire_phy_lock(bp);
 +      bp->link_params.feature_config_flags |= FEATURE_CONFIG_PFC_ENABLED;
 +      bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &pfc_params);
 +      bnx2x_release_phy_lock(bp);
 +}
 +
 +static void bnx2x_pfc_clear(struct bnx2x *bp)
 +{
 +      struct bnx2x_nig_brb_pfc_port_params nig_params = {0};
 +      nig_params.pause_enable = 1;
 +#ifdef BNX2X_SAFC
 +      if (bp->flags & SAFC_TX_FLAG) {
 +              u32 high = 0, low = 0;
 +              int i;
 +
 +              for (i = 0; i < BNX2X_MAX_PRIORITY; i++) {
 +                      if (bp->pri_map[i] == 1)
 +                              high |= (1 << i);
 +                      if (bp->pri_map[i] == 0)
 +                              low |= (1 << i);
 +              }
 +
 +              nig_params.llfc_low_priority_classes = high;
 +              nig_params.llfc_low_priority_classes = low;
 +
 +              nig_params.pause_enable = 0;
 +              nig_params.llfc_enable = 1;
 +              nig_params.llfc_out_en = 1;
 +      }
 +#endif /* BNX2X_SAFC */
 +      bnx2x_acquire_phy_lock(bp);
 +      bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_PFC_ENABLED;
 +      bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &nig_params);
 +      bnx2x_release_phy_lock(bp);
 +}
 +
 +static void  bnx2x_dump_dcbx_drv_param(struct bnx2x *bp,
 +                                     struct dcbx_features *features,
 +                                     u32 error)
 +{
 +      u8 i = 0;
 +      DP(NETIF_MSG_LINK, "local_mib.error %x\n", error);
 +
 +      /* PG */
 +      DP(NETIF_MSG_LINK,
 +         "local_mib.features.ets.enabled %x\n", features->ets.enabled);
 +      for (i = 0; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++)
 +              DP(NETIF_MSG_LINK,
 +                 "local_mib.features.ets.pg_bw_tbl[%d] %d\n", i,
 +                 DCBX_PG_BW_GET(features->ets.pg_bw_tbl, i));
 +      for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++)
 +              DP(NETIF_MSG_LINK,
 +                 "local_mib.features.ets.pri_pg_tbl[%d] %d\n", i,
 +                 DCBX_PRI_PG_GET(features->ets.pri_pg_tbl, i));
 +
 +      /* pfc */
 +      DP(NETIF_MSG_LINK, "dcbx_features.pfc.pri_en_bitmap %x\n",
 +                                      features->pfc.pri_en_bitmap);
 +      DP(NETIF_MSG_LINK, "dcbx_features.pfc.pfc_caps %x\n",
 +                                      features->pfc.pfc_caps);
 +      DP(NETIF_MSG_LINK, "dcbx_features.pfc.enabled %x\n",
 +                                      features->pfc.enabled);
 +
 +      DP(NETIF_MSG_LINK, "dcbx_features.app.default_pri %x\n",
 +                                      features->app.default_pri);
 +      DP(NETIF_MSG_LINK, "dcbx_features.app.tc_supported %x\n",
 +                                      features->app.tc_supported);
 +      DP(NETIF_MSG_LINK, "dcbx_features.app.enabled %x\n",
 +                                      features->app.enabled);
 +      for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
 +              DP(NETIF_MSG_LINK,
 +                 "dcbx_features.app.app_pri_tbl[%x].app_id %x\n",
 +                 i, features->app.app_pri_tbl[i].app_id);
 +              DP(NETIF_MSG_LINK,
 +                 "dcbx_features.app.app_pri_tbl[%x].pri_bitmap %x\n",
 +                 i, features->app.app_pri_tbl[i].pri_bitmap);
 +              DP(NETIF_MSG_LINK,
 +                 "dcbx_features.app.app_pri_tbl[%x].appBitfield %x\n",
 +                 i, features->app.app_pri_tbl[i].appBitfield);
 +      }
 +}
 +
 +static void bnx2x_dcbx_get_ap_priority(struct bnx2x *bp,
 +                                     u8 pri_bitmap,
 +                                     u8 llfc_traf_type)
 +{
 +      u32 pri = MAX_PFC_PRIORITIES;
 +      u32 index = MAX_PFC_PRIORITIES - 1;
 +      u32 pri_mask;
 +      u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
 +
 +      /* Choose the highest priority */
 +      while ((MAX_PFC_PRIORITIES == pri) && (0 != index)) {
 +              pri_mask = 1 << index;
 +              if (GET_FLAGS(pri_bitmap, pri_mask))
 +                      pri = index ;
 +              index--;
 +      }
 +
 +      if (pri < MAX_PFC_PRIORITIES)
 +              ttp[llfc_traf_type] = max_t(u32, ttp[llfc_traf_type], pri);
 +}
 +
 +static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp,
 +                                 struct dcbx_app_priority_feature *app,
 +                                 u32 error) {
 +      u8 index;
 +      u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
 +
 +      if (GET_FLAGS(error, DCBX_LOCAL_APP_ERROR))
 +              DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_ERROR\n");
 +
 +      if (GET_FLAGS(error, DCBX_LOCAL_APP_MISMATCH))
 +              DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_MISMATCH\n");
 +
 +      if (app->enabled &&
 +          !GET_FLAGS(error, DCBX_LOCAL_APP_ERROR | DCBX_LOCAL_APP_MISMATCH)) {
 +
 +              bp->dcbx_port_params.app.enabled = true;
 +
 +              for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++)
 +                      ttp[index] = 0;
 +
 +              if (app->default_pri < MAX_PFC_PRIORITIES)
 +                      ttp[LLFC_TRAFFIC_TYPE_NW] = app->default_pri;
 +
 +              for (index = 0 ; index < DCBX_MAX_APP_PROTOCOL; index++) {
 +                      struct dcbx_app_priority_entry *entry =
 +                                                      app->app_pri_tbl;
 +
 +                      if (GET_FLAGS(entry[index].appBitfield,
 +                                   DCBX_APP_SF_ETH_TYPE) &&
 +                         ETH_TYPE_FCOE == entry[index].app_id)
 +                              bnx2x_dcbx_get_ap_priority(bp,
 +                                              entry[index].pri_bitmap,
 +                                              LLFC_TRAFFIC_TYPE_FCOE);
 +
 +                      if (GET_FLAGS(entry[index].appBitfield,
 +                                   DCBX_APP_SF_PORT) &&
 +                         TCP_PORT_ISCSI == entry[index].app_id)
 +                              bnx2x_dcbx_get_ap_priority(bp,
 +                                              entry[index].pri_bitmap,
 +                                              LLFC_TRAFFIC_TYPE_ISCSI);
 +              }
 +      } else {
 +              DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_DISABLED\n");
 +              bp->dcbx_port_params.app.enabled = false;
 +              for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++)
 +                      ttp[index] = INVALID_TRAFFIC_TYPE_PRIORITY;
 +      }
 +}
 +
 +static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp,
 +                                     struct dcbx_ets_feature *ets,
 +                                     u32 error) {
 +      int i = 0;
 +      u32 pg_pri_orginal_spread[DCBX_MAX_NUM_PG_BW_ENTRIES] = {0};
 +      struct pg_help_data pg_help_data;
 +      struct bnx2x_dcbx_cos_params *cos_params =
 +                      bp->dcbx_port_params.ets.cos_params;
 +
 +      memset(&pg_help_data, 0, sizeof(struct pg_help_data));
 +
 +
 +      if (GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR))
 +              DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ERROR\n");
 +
 +
 +      /* Clean up old settings of ets on COS */
 +      for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params) ; i++) {
 +              cos_params[i].pauseable = false;
 +              cos_params[i].strict = BNX2X_DCBX_STRICT_INVALID;
 +              cos_params[i].bw_tbl = DCBX_INVALID_COS_BW;
 +              cos_params[i].pri_bitmask = 0;
 +      }
 +
 +      if (bp->dcbx_port_params.app.enabled &&
 +         !GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR) &&
 +         ets->enabled) {
 +              DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ENABLE\n");
 +              bp->dcbx_port_params.ets.enabled = true;
 +
 +              bnx2x_dcbx_get_ets_pri_pg_tbl(bp,
 +                                            pg_pri_orginal_spread,
 +                                            ets->pri_pg_tbl);
 +
 +              bnx2x_dcbx_get_num_pg_traf_type(bp,
 +                                              pg_pri_orginal_spread,
 +                                              &pg_help_data);
 +
 +              bnx2x_dcbx_fill_cos_params(bp, &pg_help_data,
 +                                         ets, pg_pri_orginal_spread);
 +
 +      } else {
 +              DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_DISABLED\n");
 +              bp->dcbx_port_params.ets.enabled = false;
 +              ets->pri_pg_tbl[0] = 0;
 +
 +              for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES ; i++)
 +                      DCBX_PG_BW_SET(ets->pg_bw_tbl, i, 1);
 +      }
 +}
 +
 +static void  bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp,
 +                                      struct dcbx_pfc_feature *pfc, u32 error)
 +{
 +
 +      if (GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR))
 +              DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_ERROR\n");
 +
 +      if (bp->dcbx_port_params.app.enabled &&
 +         !GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR | DCBX_LOCAL_PFC_MISMATCH) &&
 +         pfc->enabled) {
 +              bp->dcbx_port_params.pfc.enabled = true;
 +              bp->dcbx_port_params.pfc.priority_non_pauseable_mask =
 +                      ~(pfc->pri_en_bitmap);
 +      } else {
 +              DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_DISABLED\n");
 +              bp->dcbx_port_params.pfc.enabled = false;
 +              bp->dcbx_port_params.pfc.priority_non_pauseable_mask = 0;
 +      }
 +}
 +
 +/* maps unmapped priorities to to the same COS as L2 */
 +static void bnx2x_dcbx_map_nw(struct bnx2x *bp)
 +{
 +      int i;
 +      u32 unmapped = (1 << MAX_PFC_PRIORITIES) - 1; /* all ones */
 +      u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
 +      u32 nw_prio = 1 << ttp[LLFC_TRAFFIC_TYPE_NW];
 +      struct bnx2x_dcbx_cos_params *cos_params =
 +                      bp->dcbx_port_params.ets.cos_params;
 +
 +      /* get unmapped priorities by clearing mapped bits */
 +      for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++)
 +              unmapped &= ~(1 << ttp[i]);
 +
 +      /* find cos for nw prio and extend it with unmapped */
 +      for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params); i++) {
 +              if (cos_params[i].pri_bitmask & nw_prio) {
 +                      /* extend the bitmask with unmapped */
 +                      DP(NETIF_MSG_LINK,
 +                         "cos %d extended with 0x%08x\n", i, unmapped);
 +                      cos_params[i].pri_bitmask |= unmapped;
 +                      break;
 +              }
 +      }
 +}
 +
 +static void bnx2x_get_dcbx_drv_param(struct bnx2x *bp,
 +                                   struct dcbx_features *features,
 +                                   u32 error)
 +{
 +      bnx2x_dcbx_get_ap_feature(bp, &features->app, error);
 +
 +      bnx2x_dcbx_get_pfc_feature(bp, &features->pfc, error);
 +
 +      bnx2x_dcbx_get_ets_feature(bp, &features->ets, error);
 +
 +      bnx2x_dcbx_map_nw(bp);
 +}
 +
 +#define DCBX_LOCAL_MIB_MAX_TRY_READ           (100)
 +static int bnx2x_dcbx_read_mib(struct bnx2x *bp,
 +                             u32 *base_mib_addr,
 +                             u32 offset,
 +                             int read_mib_type)
 +{
 +      int max_try_read = 0;
 +      u32 mib_size, prefix_seq_num, suffix_seq_num;
 +      struct lldp_remote_mib *remote_mib ;
 +      struct lldp_local_mib  *local_mib;
 +
 +
 +      switch (read_mib_type) {
 +      case DCBX_READ_LOCAL_MIB:
 +              mib_size = sizeof(struct lldp_local_mib);
 +              break;
 +      case DCBX_READ_REMOTE_MIB:
 +              mib_size = sizeof(struct lldp_remote_mib);
 +              break;
 +      default:
 +              return 1; /*error*/
 +      }
 +
 +      offset += BP_PORT(bp) * mib_size;
 +
 +      do {
 +              bnx2x_read_data(bp, base_mib_addr, offset, mib_size);
 +
 +              max_try_read++;
 +
 +              switch (read_mib_type) {
 +              case DCBX_READ_LOCAL_MIB:
 +                      local_mib = (struct lldp_local_mib *) base_mib_addr;
 +                      prefix_seq_num = local_mib->prefix_seq_num;
 +                      suffix_seq_num = local_mib->suffix_seq_num;
 +                      break;
 +              case DCBX_READ_REMOTE_MIB:
 +                      remote_mib = (struct lldp_remote_mib *) base_mib_addr;
 +                      prefix_seq_num = remote_mib->prefix_seq_num;
 +                      suffix_seq_num = remote_mib->suffix_seq_num;
 +                      break;
 +              default:
 +                      return 1; /*error*/
 +              }
 +      } while ((prefix_seq_num != suffix_seq_num) &&
 +             (max_try_read < DCBX_LOCAL_MIB_MAX_TRY_READ));
 +
 +      if (max_try_read >= DCBX_LOCAL_MIB_MAX_TRY_READ) {
 +              BNX2X_ERR("MIB could not be read\n");
 +              return 1;
 +      }
 +
 +      return 0;
 +}
 +
 +static void bnx2x_pfc_set_pfc(struct bnx2x *bp)
 +{
 +      if (bp->dcbx_port_params.pfc.enabled &&
 +          !(bp->dcbx_error & DCBX_REMOTE_MIB_ERROR))
 +              /*
 +               * 1. Fills up common PFC structures if required
 +               * 2. Configure NIG, MAC and BRB via the elink
 +               */
 +              bnx2x_pfc_set(bp);
 +      else
 +              bnx2x_pfc_clear(bp);
 +}
 +
 +static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
 +{
 +      struct bnx2x_func_state_params func_params = {0};
 +
 +      func_params.f_obj = &bp->func_obj;
 +      func_params.cmd = BNX2X_F_CMD_TX_STOP;
 +
 +      DP(NETIF_MSG_LINK, "STOP TRAFFIC\n");
 +      return bnx2x_func_state_change(bp, &func_params);
 +}
 +
 +static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
 +{
 +      struct bnx2x_func_state_params func_params = {0};
 +      struct bnx2x_func_tx_start_params *tx_params =
 +              &func_params.params.tx_start;
 +
 +      func_params.f_obj = &bp->func_obj;
 +      func_params.cmd = BNX2X_F_CMD_TX_START;
 +
 +      bnx2x_dcbx_fw_struct(bp, tx_params);
 +
 +      DP(NETIF_MSG_LINK, "START TRAFFIC\n");
 +      return bnx2x_func_state_change(bp, &func_params);
 +}
 +
 +static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp)
 +{
 +      struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets);
 +      int rc = 0;
 +
 +      if (ets->num_of_cos == 0 || ets->num_of_cos > DCBX_COS_MAX_NUM_E2) {
 +              BNX2X_ERR("Illegal number of COSes %d\n", ets->num_of_cos);
 +              return;
 +      }
 +
 +      /* valid COS entries */
 +      if (ets->num_of_cos == 1)   /* no ETS */
 +              return;
 +
 +      /* sanity */
 +      if (((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[0].strict) &&
 +           (DCBX_INVALID_COS_BW == ets->cos_params[0].bw_tbl)) ||
 +          ((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[1].strict) &&
 +           (DCBX_INVALID_COS_BW == ets->cos_params[1].bw_tbl))) {
 +              BNX2X_ERR("all COS should have at least bw_limit or strict"
 +                          "ets->cos_params[0].strict= %x"
 +                          "ets->cos_params[0].bw_tbl= %x"
 +                          "ets->cos_params[1].strict= %x"
 +                          "ets->cos_params[1].bw_tbl= %x",
 +                        ets->cos_params[0].strict,
 +                        ets->cos_params[0].bw_tbl,
 +                        ets->cos_params[1].strict,
 +                        ets->cos_params[1].bw_tbl);
 +              return;
 +      }
 +      /* If we join a group and there is bw_tbl and strict then bw rules */
 +      if ((DCBX_INVALID_COS_BW != ets->cos_params[0].bw_tbl) &&
 +          (DCBX_INVALID_COS_BW != ets->cos_params[1].bw_tbl)) {
 +              u32 bw_tbl_0 = ets->cos_params[0].bw_tbl;
 +              u32 bw_tbl_1 = ets->cos_params[1].bw_tbl;
 +              /* Do not allow 0-100 configuration
 +               * since PBF does not support it
 +               * force 1-99 instead
 +               */
 +              if (bw_tbl_0 == 0) {
 +                      bw_tbl_0 = 1;
 +                      bw_tbl_1 = 99;
 +              } else if (bw_tbl_1 == 0) {
 +                      bw_tbl_1 = 1;
 +                      bw_tbl_0 = 99;
 +              }
 +
 +              bnx2x_ets_bw_limit(&bp->link_params, bw_tbl_0, bw_tbl_1);
 +      } else {
 +              if (ets->cos_params[0].strict == BNX2X_DCBX_STRICT_COS_HIGHEST)
 +                      rc = bnx2x_ets_strict(&bp->link_params, 0);
 +              else if (ets->cos_params[1].strict
 +                                      == BNX2X_DCBX_STRICT_COS_HIGHEST)
 +                      rc = bnx2x_ets_strict(&bp->link_params, 1);
 +              if (rc)
 +                      BNX2X_ERR("update_ets_params failed\n");
 +      }
 +}
 +
 +/*
 + * In E3B0 the configuration may have more than 2 COS.
 + */
 +void bnx2x_dcbx_update_ets_config(struct bnx2x *bp)
 +{
 +      struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets);
 +      struct bnx2x_ets_params ets_params = { 0 };
 +      u8 i;
 +
 +      ets_params.num_of_cos = ets->num_of_cos;
 +
 +      for (i = 0; i < ets->num_of_cos; i++) {
 +              /* COS is SP */
 +              if (ets->cos_params[i].strict != BNX2X_DCBX_STRICT_INVALID) {
 +                      if (ets->cos_params[i].bw_tbl != DCBX_INVALID_COS_BW) {
 +                              BNX2X_ERR("COS can't be not BW and not SP\n");
 +                              return;
 +                      }
 +
 +                      ets_params.cos[i].state = bnx2x_cos_state_strict;
 +                      ets_params.cos[i].params.sp_params.pri =
 +                                              ets->cos_params[i].strict;
 +              } else { /* COS is BW */
 +                      if (ets->cos_params[i].bw_tbl == DCBX_INVALID_COS_BW) {
 +                              BNX2X_ERR("COS can't be not BW and not SP\n");
 +                              return;
 +                      }
 +                      ets_params.cos[i].state = bnx2x_cos_state_bw;
 +                      ets_params.cos[i].params.bw_params.bw =
 +                                              (u8)ets->cos_params[i].bw_tbl;
 +              }
 +      }
 +
 +      /* Configure the ETS in HW */
 +      if (bnx2x_ets_e3b0_config(&bp->link_params, &bp->link_vars,
 +                                &ets_params)) {
 +              BNX2X_ERR("bnx2x_ets_e3b0_config failed\n");
 +              bnx2x_ets_disabled(&bp->link_params, &bp->link_vars);
 +      }
 +}
 +
 +static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp)
 +{
 +      bnx2x_ets_disabled(&bp->link_params, &bp->link_vars);
 +
 +      if (!bp->dcbx_port_params.ets.enabled ||
 +          (bp->dcbx_error & DCBX_REMOTE_MIB_ERROR))
 +              return;
 +
 +      if (CHIP_IS_E3B0(bp))
 +              bnx2x_dcbx_update_ets_config(bp);
 +      else
 +              bnx2x_dcbx_2cos_limit_update_ets_config(bp);
 +}
 +
 +#ifdef BCM_DCBNL
 +static int bnx2x_dcbx_read_shmem_remote_mib(struct bnx2x *bp)
 +{
 +      struct lldp_remote_mib remote_mib = {0};
 +      u32 dcbx_remote_mib_offset = SHMEM2_RD(bp, dcbx_remote_mib_offset);
 +      int rc;
 +
 +      DP(NETIF_MSG_LINK, "dcbx_remote_mib_offset 0x%x\n",
 +         dcbx_remote_mib_offset);
 +
 +      if (SHMEM_DCBX_REMOTE_MIB_NONE == dcbx_remote_mib_offset) {
 +              BNX2X_ERR("FW doesn't support dcbx_remote_mib_offset\n");
 +              return -EINVAL;
 +      }
 +
 +      rc = bnx2x_dcbx_read_mib(bp, (u32 *)&remote_mib, dcbx_remote_mib_offset,
 +                               DCBX_READ_REMOTE_MIB);
 +
 +      if (rc) {
 +              BNX2X_ERR("Faild to read remote mib from FW\n");
 +              return rc;
 +      }
 +
 +      /* save features and flags */
 +      bp->dcbx_remote_feat = remote_mib.features;
 +      bp->dcbx_remote_flags = remote_mib.flags;
 +      return 0;
 +}
 +#endif
 +
 +static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp)
 +{
 +      struct lldp_local_mib local_mib = {0};
 +      u32 dcbx_neg_res_offset = SHMEM2_RD(bp, dcbx_neg_res_offset);
 +      int rc;
 +
 +      DP(NETIF_MSG_LINK, "dcbx_neg_res_offset 0x%x\n", dcbx_neg_res_offset);
 +
 +      if (SHMEM_DCBX_NEG_RES_NONE == dcbx_neg_res_offset) {
 +              BNX2X_ERR("FW doesn't support dcbx_neg_res_offset\n");
 +              return -EINVAL;
 +      }
 +
 +      rc = bnx2x_dcbx_read_mib(bp, (u32 *)&local_mib, dcbx_neg_res_offset,
 +                               DCBX_READ_LOCAL_MIB);
 +
 +      if (rc) {
 +              BNX2X_ERR("Faild to read local mib from FW\n");
 +              return rc;
 +      }
 +
 +      /* save features and error */
 +      bp->dcbx_local_feat = local_mib.features;
 +      bp->dcbx_error = local_mib.error;
 +      return 0;
 +}
 +
 +
 +#ifdef BCM_DCBNL
 +static inline
 +u8 bnx2x_dcbx_dcbnl_app_up(struct dcbx_app_priority_entry *ent)
 +{
 +      u8 pri;
 +
 +      /* Choose the highest priority */
 +      for (pri = MAX_PFC_PRIORITIES - 1; pri > 0; pri--)
 +              if (ent->pri_bitmap & (1 << pri))
 +                      break;
 +      return pri;
 +}
 +
 +static inline
 +u8 bnx2x_dcbx_dcbnl_app_idtype(struct dcbx_app_priority_entry *ent)
 +{
 +      return ((ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) ==
 +              DCBX_APP_SF_PORT) ? DCB_APP_IDTYPE_PORTNUM :
 +              DCB_APP_IDTYPE_ETHTYPE;
 +}
 +
 +int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall)
 +{
 +      int i, err = 0;
 +
 +      for (i = 0; i < DCBX_MAX_APP_PROTOCOL && err == 0; i++) {
 +              struct dcbx_app_priority_entry *ent =
 +                      &bp->dcbx_local_feat.app.app_pri_tbl[i];
 +
 +              if (ent->appBitfield & DCBX_APP_ENTRY_VALID) {
 +                      u8 up = bnx2x_dcbx_dcbnl_app_up(ent);
 +
 +                      /* avoid invalid user-priority */
 +                      if (up) {
 +                              struct dcb_app app;
 +                              app.selector = bnx2x_dcbx_dcbnl_app_idtype(ent);
 +                              app.protocol = ent->app_id;
 +                              app.priority = delall ? 0 : up;
 +                              err = dcb_setapp(bp->dev, &app);
 +                      }
 +              }
 +      }
 +      return err;
 +}
 +#endif
 +
 +static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
 +{
 +      if (SHMEM2_HAS(bp, drv_flags)) {
 +              u32 drv_flags;
 +              bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS);
 +              drv_flags = SHMEM2_RD(bp, drv_flags);
 +
 +              if (set)
 +                      SET_FLAGS(drv_flags, flags);
 +              else
 +                      RESET_FLAGS(drv_flags, flags);
 +
 +              SHMEM2_WR(bp, drv_flags, drv_flags);
 +              DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags);
 +              bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS);
 +      }
 +}
 +
 +static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
 +{
 +      u8 prio, cos;
 +      for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++) {
 +              for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
 +                      if (bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask
 +                          & (1 << prio)) {
 +                              bp->prio_to_cos[prio] = cos;
 +                              DP(NETIF_MSG_LINK,
 +                                 "tx_mapping %d --> %d\n", prio, cos);
 +                      }
 +              }
 +      }
 +
 +      /* setup tc must be called under rtnl lock, but we can't take it here
 +       * as we are handling an attetntion on a work queue which must be
 +       * flushed at some rtnl-locked contexts (e.g. if down)
 +       */
 +      if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
 +              schedule_delayed_work(&bp->sp_rtnl_task, 0);
 +}
 +
 +void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
 +{
 +      switch (state) {
 +      case BNX2X_DCBX_STATE_NEG_RECEIVED:
 +              {
 +                      DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n");
 +#ifdef BCM_DCBNL
 +                      /**
 +                       * Delete app tlvs from dcbnl before reading new
 +                       * negotiation results
 +                       */
 +                      bnx2x_dcbnl_update_applist(bp, true);
 +
 +                      /* Read rmeote mib if dcbx is in the FW */
 +                      if (bnx2x_dcbx_read_shmem_remote_mib(bp))
 +                              return;
 +#endif
 +                      /* Read neg results if dcbx is in the FW */
 +                      if (bnx2x_dcbx_read_shmem_neg_results(bp))
 +                              return;
 +
 +                      bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat,
 +                                                bp->dcbx_error);
 +
 +                      bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
 +                                               bp->dcbx_error);
 +
 +                      /* mark DCBX result for PMF migration */
 +                      bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 1);
 +#ifdef BCM_DCBNL
 +                      /**
 +                       * Add new app tlvs to dcbnl
 +                       */
 +                      bnx2x_dcbnl_update_applist(bp, false);
 +#endif
 +                      bnx2x_dcbx_stop_hw_tx(bp);
 +
 +                      /* reconfigure the netdevice with the results of the new
 +                       * dcbx negotiation.
 +                       */
 +                      bnx2x_dcbx_update_tc_mapping(bp);
 +
 +                      return;
 +              }
 +      case BNX2X_DCBX_STATE_TX_PAUSED:
 +              DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n");
 +              bnx2x_pfc_set_pfc(bp);
 +
 +              bnx2x_dcbx_update_ets_params(bp);
 +              bnx2x_dcbx_resume_hw_tx(bp);
 +              return;
 +      case BNX2X_DCBX_STATE_TX_RELEASED:
 +              DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n");
 +              bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0);
 +#ifdef BCM_DCBNL
 +              /*
 +               * Send a notification for the new negotiated parameters
 +               */
 +              dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0);
 +#endif
 +              return;
 +      default:
 +              BNX2X_ERR("Unknown DCBX_STATE\n");
 +      }
 +}
 +
 +#define LLDP_ADMIN_MIB_OFFSET(bp)     (PORT_MAX*sizeof(struct lldp_params) + \
 +                                    BP_PORT(bp)*sizeof(struct lldp_admin_mib))
 +
 +static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
 +                              u32 dcbx_lldp_params_offset)
 +{
 +      struct lldp_admin_mib admin_mib;
 +      u32 i, other_traf_type = PREDEFINED_APP_IDX_MAX, traf_type = 0;
 +      u32 offset = dcbx_lldp_params_offset + LLDP_ADMIN_MIB_OFFSET(bp);
 +
 +      /*shortcuts*/
 +      struct dcbx_features *af = &admin_mib.features;
 +      struct bnx2x_config_dcbx_params *dp = &bp->dcbx_config_params;
 +
 +      memset(&admin_mib, 0, sizeof(struct lldp_admin_mib));
 +
 +      /* Read the data first */
 +      bnx2x_read_data(bp, (u32 *)&admin_mib, offset,
 +                      sizeof(struct lldp_admin_mib));
 +
 +      if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON)
 +              SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED);
 +      else
 +              RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED);
 +
 +      if (dp->overwrite_settings == BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE) {
 +
 +              RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_CEE_VERSION_MASK);
 +              admin_mib.ver_cfg_flags |=
 +                      (dp->admin_dcbx_version << DCBX_CEE_VERSION_SHIFT) &
 +                       DCBX_CEE_VERSION_MASK;
 +
 +              af->ets.enabled = (u8)dp->admin_ets_enable;
 +
 +              af->pfc.enabled = (u8)dp->admin_pfc_enable;
 +
 +              /* FOR IEEE dp->admin_tc_supported_tx_enable */
 +              if (dp->admin_ets_configuration_tx_enable)
 +                      SET_FLAGS(admin_mib.ver_cfg_flags,
 +                                DCBX_ETS_CONFIG_TX_ENABLED);
 +              else
 +                      RESET_FLAGS(admin_mib.ver_cfg_flags,
 +                                  DCBX_ETS_CONFIG_TX_ENABLED);
 +              /* For IEEE admin_ets_recommendation_tx_enable */
 +              if (dp->admin_pfc_tx_enable)
 +                      SET_FLAGS(admin_mib.ver_cfg_flags,
 +                                DCBX_PFC_CONFIG_TX_ENABLED);
 +              else
 +                      RESET_FLAGS(admin_mib.ver_cfg_flags,
 +                                DCBX_PFC_CONFIG_TX_ENABLED);
 +
 +              if (dp->admin_application_priority_tx_enable)
 +                      SET_FLAGS(admin_mib.ver_cfg_flags,
 +                                DCBX_APP_CONFIG_TX_ENABLED);
 +              else
 +                      RESET_FLAGS(admin_mib.ver_cfg_flags,
 +                                DCBX_APP_CONFIG_TX_ENABLED);
 +
 +              if (dp->admin_ets_willing)
 +                      SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING);
 +              else
 +                      RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING);
 +              /* For IEEE admin_ets_reco_valid */
 +              if (dp->admin_pfc_willing)
 +                      SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING);
 +              else
 +                      RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING);
 +
 +              if (dp->admin_app_priority_willing)
 +                      SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING);
 +              else
 +                      RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING);
 +
 +              for (i = 0 ; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++) {
 +                      DCBX_PG_BW_SET(af->ets.pg_bw_tbl, i,
 +                              (u8)dp->admin_configuration_bw_precentage[i]);
 +
 +                      DP(NETIF_MSG_LINK, "pg_bw_tbl[%d] = %02x\n",
 +                         i, DCBX_PG_BW_GET(af->ets.pg_bw_tbl, i));
 +              }
 +
 +              for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) {
 +                      DCBX_PRI_PG_SET(af->ets.pri_pg_tbl, i,
 +                                      (u8)dp->admin_configuration_ets_pg[i]);
 +
 +                      DP(NETIF_MSG_LINK, "pri_pg_tbl[%d] = %02x\n",
 +                         i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i));
 +              }
 +
 +              /*For IEEE admin_recommendation_bw_precentage
 +               *For IEEE admin_recommendation_ets_pg */
 +              af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap;
 +              for (i = 0; i < 4; i++) {
 +                      if (dp->admin_priority_app_table[i].valid) {
 +                              struct bnx2x_admin_priority_app_table *table =
 +                                      dp->admin_priority_app_table;
 +                              if ((ETH_TYPE_FCOE == table[i].app_id) &&
 +                                 (TRAFFIC_TYPE_ETH == table[i].traffic_type))
 +                                      traf_type = FCOE_APP_IDX;
 +                              else if ((TCP_PORT_ISCSI == table[i].app_id) &&
 +                                 (TRAFFIC_TYPE_PORT == table[i].traffic_type))
 +                                      traf_type = ISCSI_APP_IDX;
 +                              else
 +                                      traf_type = other_traf_type++;
 +
 +                              af->app.app_pri_tbl[traf_type].app_id =
 +                                      table[i].app_id;
 +
 +                              af->app.app_pri_tbl[traf_type].pri_bitmap =
 +                                      (u8)(1 << table[i].priority);
 +
 +                              af->app.app_pri_tbl[traf_type].appBitfield =
 +                                  (DCBX_APP_ENTRY_VALID);
 +
 +                              af->app.app_pri_tbl[traf_type].appBitfield |=
 +                                 (TRAFFIC_TYPE_ETH == table[i].traffic_type) ?
 +                                      DCBX_APP_SF_ETH_TYPE : DCBX_APP_SF_PORT;
 +                      }
 +              }
 +
 +              af->app.default_pri = (u8)dp->admin_default_priority;
 +
 +      }
 +
 +      /* Write the data. */
 +      bnx2x_write_data(bp, (u32 *)&admin_mib, offset,
 +                       sizeof(struct lldp_admin_mib));
 +
 +}
 +
 +void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled)
 +{
++      if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3(bp)) {
 +              bp->dcb_state = dcb_on;
 +              bp->dcbx_enabled = dcbx_enabled;
 +      } else {
 +              bp->dcb_state = false;
 +              bp->dcbx_enabled = BNX2X_DCBX_ENABLED_INVALID;
 +      }
 +      DP(NETIF_MSG_LINK, "DCB state [%s:%s]\n",
 +         dcb_on ? "ON" : "OFF",
 +         dcbx_enabled == BNX2X_DCBX_ENABLED_OFF ? "user-mode" :
 +         dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF ? "on-chip static" :
 +         dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON ?
 +         "on-chip with negotiation" : "invalid");
 +}
 +
 +void bnx2x_dcbx_init_params(struct bnx2x *bp)
 +{
 +      bp->dcbx_config_params.admin_dcbx_version = 0x0; /* 0 - CEE; 1 - IEEE */
 +      bp->dcbx_config_params.admin_ets_willing = 1;
 +      bp->dcbx_config_params.admin_pfc_willing = 1;
 +      bp->dcbx_config_params.overwrite_settings = 1;
 +      bp->dcbx_config_params.admin_ets_enable = 1;
 +      bp->dcbx_config_params.admin_pfc_enable = 1;
 +      bp->dcbx_config_params.admin_tc_supported_tx_enable = 1;
 +      bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1;
 +      bp->dcbx_config_params.admin_pfc_tx_enable = 1;
 +      bp->dcbx_config_params.admin_application_priority_tx_enable = 1;
 +      bp->dcbx_config_params.admin_ets_reco_valid = 1;
 +      bp->dcbx_config_params.admin_app_priority_willing = 1;
 +      bp->dcbx_config_params.admin_configuration_bw_precentage[0] = 00;
 +      bp->dcbx_config_params.admin_configuration_bw_precentage[1] = 50;
 +      bp->dcbx_config_params.admin_configuration_bw_precentage[2] = 50;
 +      bp->dcbx_config_params.admin_configuration_bw_precentage[3] = 0;
 +      bp->dcbx_config_params.admin_configuration_bw_precentage[4] = 0;
 +      bp->dcbx_config_params.admin_configuration_bw_precentage[5] = 0;
 +      bp->dcbx_config_params.admin_configuration_bw_precentage[6] = 0;
 +      bp->dcbx_config_params.admin_configuration_bw_precentage[7] = 0;
 +      bp->dcbx_config_params.admin_configuration_ets_pg[0] = 1;
 +      bp->dcbx_config_params.admin_configuration_ets_pg[1] = 0;
 +      bp->dcbx_config_params.admin_configuration_ets_pg[2] = 0;
 +      bp->dcbx_config_params.admin_configuration_ets_pg[3] = 2;
 +      bp->dcbx_config_params.admin_configuration_ets_pg[4] = 0;
 +      bp->dcbx_config_params.admin_configuration_ets_pg[5] = 0;
 +      bp->dcbx_config_params.admin_configuration_ets_pg[6] = 0;
 +      bp->dcbx_config_params.admin_configuration_ets_pg[7] = 0;
 +      bp->dcbx_config_params.admin_recommendation_bw_precentage[0] = 0;
 +      bp->dcbx_config_params.admin_recommendation_bw_precentage[1] = 1;
 +      bp->dcbx_config_params.admin_recommendation_bw_precentage[2] = 2;
 +      bp->dcbx_config_params.admin_recommendation_bw_precentage[3] = 0;
 +      bp->dcbx_config_params.admin_recommendation_bw_precentage[4] = 7;
 +      bp->dcbx_config_params.admin_recommendation_bw_precentage[5] = 5;
 +      bp->dcbx_config_params.admin_recommendation_bw_precentage[6] = 6;
 +      bp->dcbx_config_params.admin_recommendation_bw_precentage[7] = 7;
 +      bp->dcbx_config_params.admin_recommendation_ets_pg[0] = 0;
 +      bp->dcbx_config_params.admin_recommendation_ets_pg[1] = 1;
 +      bp->dcbx_config_params.admin_recommendation_ets_pg[2] = 2;
 +      bp->dcbx_config_params.admin_recommendation_ets_pg[3] = 3;
 +      bp->dcbx_config_params.admin_recommendation_ets_pg[4] = 4;
 +      bp->dcbx_config_params.admin_recommendation_ets_pg[5] = 5;
 +      bp->dcbx_config_params.admin_recommendation_ets_pg[6] = 6;
 +      bp->dcbx_config_params.admin_recommendation_ets_pg[7] = 7;
 +      bp->dcbx_config_params.admin_pfc_bitmap = 0x8; /* FCoE(3) enable */
 +      bp->dcbx_config_params.admin_priority_app_table[0].valid = 1;
 +      bp->dcbx_config_params.admin_priority_app_table[1].valid = 1;
 +      bp->dcbx_config_params.admin_priority_app_table[2].valid = 0;
 +      bp->dcbx_config_params.admin_priority_app_table[3].valid = 0;
 +      bp->dcbx_config_params.admin_priority_app_table[0].priority = 3;
 +      bp->dcbx_config_params.admin_priority_app_table[1].priority = 0;
 +      bp->dcbx_config_params.admin_priority_app_table[2].priority = 0;
 +      bp->dcbx_config_params.admin_priority_app_table[3].priority = 0;
 +      bp->dcbx_config_params.admin_priority_app_table[0].traffic_type = 0;
 +      bp->dcbx_config_params.admin_priority_app_table[1].traffic_type = 1;
 +      bp->dcbx_config_params.admin_priority_app_table[2].traffic_type = 0;
 +      bp->dcbx_config_params.admin_priority_app_table[3].traffic_type = 0;
 +      bp->dcbx_config_params.admin_priority_app_table[0].app_id = 0x8906;
 +      bp->dcbx_config_params.admin_priority_app_table[1].app_id = 3260;
 +      bp->dcbx_config_params.admin_priority_app_table[2].app_id = 0;
 +      bp->dcbx_config_params.admin_priority_app_table[3].app_id = 0;
 +      bp->dcbx_config_params.admin_default_priority =
 +              bp->dcbx_config_params.admin_priority_app_table[1].priority;
 +}
 +
 +void bnx2x_dcbx_init(struct bnx2x *bp)
 +{
 +      u32 dcbx_lldp_params_offset = SHMEM_LLDP_DCBX_PARAMS_NONE;
 +
 +      if (bp->dcbx_enabled <= 0)
 +              return;
 +
 +      /* validate:
 +       * chip of good for dcbx version,
 +       * dcb is wanted
 +       * the function is pmf
 +       * shmem2 contains DCBX support fields
 +       */
 +      DP(NETIF_MSG_LINK, "dcb_state %d bp->port.pmf %d\n",
 +         bp->dcb_state, bp->port.pmf);
 +
 +      if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf &&