Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Sun, 4 Aug 2013 04:36:46 +0000 (21:36 -0700)
committerDavid S. Miller <davem@davemloft.net>
Sun, 4 Aug 2013 04:36:46 +0000 (21:36 -0700)
Merge net into net-next to setup some infrastructure Eric
Dumazet needs for usbnet changes.

Signed-off-by: David S. Miller <davem@davemloft.net>
38 files changed:
1  2 
MAINTAINERS
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/macvlan.c
drivers/net/usb/ax88179_178a.c
drivers/net/vxlan.c
include/linux/netdevice.h
include/linux/skbuff.h
include/net/sock.h
net/Kconfig
net/bridge/br_device.c
net/bridge/br_private.h
net/core/neighbour.c
net/core/sock.c
net/ipv4/devinet.c
net/ipv4/sysctl_net_ipv4.c
net/ipv6/addrconf.c
net/ipv6/ip6_fib.c
net/ipv6/ip6mr.c
net/ipv6/route.c
net/sunrpc/svcsock.c

diff --combined MAINTAINERS
@@@ -1406,7 -1406,7 +1406,7 @@@ ATHEROS ATH6KL WIRELESS DRIVE
  M:    Kalle Valo <kvalo@qca.qualcomm.com>
  L:    linux-wireless@vger.kernel.org
  W:    http://wireless.kernel.org/en/users/Drivers/ath6kl
- T:    git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath6kl.git
+ T:    git git://github.com/kvalo/ath.git
  S:    Supported
  F:    drivers/net/wireless/ath/ath6kl/
  
@@@ -1642,7 -1642,7 +1642,7 @@@ S:      Maintaine
  F:    drivers/net/hamradio/baycom*
  
  BCACHE (BLOCK LAYER CACHE)
- M:    Kent Overstreet <koverstreet@google.com>
+ M:    Kent Overstreet <kmo@daterainc.com>
  L:    linux-bcache@vger.kernel.org
  W:    http://bcache.evilpiepirate.org
  S:    Maintained:
@@@ -2871,7 -2871,7 +2871,7 @@@ F:      drivers/media/usb/dvb-usb-v2/dvb_usb
  F:    drivers/media/usb/dvb-usb-v2/usb_urb.c
  
  DYNAMIC DEBUG
- M:    Jason Baron <jbaron@redhat.com>
+ M:    Jason Baron <jbaron@akamai.com>
  S:    Maintained
  F:    lib/dynamic_debug.c
  F:    include/linux/dynamic_debug.h
@@@ -3346,7 -3346,7 +3346,7 @@@ F:      Documentation/firmware_class
  F:    drivers/base/firmware*.c
  F:    include/linux/firmware.h
  
- FLASHSYSTEM DRIVER (IBM FlashSystem 70/80 PCI SSD Flash Card)
+ FLASH ADAPTER DRIVER (IBM Flash Adapter 900GB Full Height PCI Flash Card)
  M:    Joshua Morris <josh.h.morris@us.ibm.com>
  M:    Philip Kelleher <pjk1939@linux.vnet.ibm.com>
  S:    Maintained
@@@ -3622,11 -3622,9 +3622,9 @@@ F:     drivers/isdn/gigaset
  F:    include/uapi/linux/gigaset_dev.h
  
  GPIO SUBSYSTEM
- M:    Grant Likely <grant.likely@linaro.org>
  M:    Linus Walleij <linus.walleij@linaro.org>
  S:    Maintained
  L:    linux-gpio@vger.kernel.org
- T:    git git://git.secretlab.ca/git/linux-2.6.git
  F:    Documentation/gpio.txt
  F:    drivers/gpio/
  F:    include/linux/gpio*
@@@ -4472,8 -4470,6 +4470,6 @@@ F:      drivers/irqchip
  
  IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
  M:    Benjamin Herrenschmidt <benh@kernel.crashing.org>
- M:    Grant Likely <grant.likely@linaro.org>
- T:    git git://git.secretlab.ca/git/linux-2.6.git irqdomain/next
  S:    Maintained
  F:    Documentation/IRQ-domain.txt
  F:    include/linux/irqdomain.h
@@@ -4990,7 -4986,7 +4986,7 @@@ F:      arch/powerpc/platforms/44x
  
  LINUX FOR POWERPC EMBEDDED XILINX VIRTEX
  L:    linuxppc-dev@lists.ozlabs.org
- S:    Unmaintained
+ S:    Orphan
  F:    arch/powerpc/*/*virtex*
  F:    arch/powerpc/*/*/*virtex*
  
@@@ -5886,7 -5882,7 +5882,7 @@@ OMAP DEVICE TREE SUPPOR
  M:    Benoît Cousson <b-cousson@ti.com>
  M:    Tony Lindgren <tony@atomide.com>
  L:    linux-omap@vger.kernel.org
- L:    devicetree-discuss@lists.ozlabs.org (moderated for non-subscribers)
+ L:    devicetree@vger.kernel.org
  S:    Maintained
  F:    arch/arm/boot/dts/*omap*
  F:    arch/arm/boot/dts/*am3*
@@@ -6050,17 -6046,28 +6046,28 @@@ F:   drivers/i2c/busses/i2c-ocores.
  OPEN FIRMWARE AND FLATTENED DEVICE TREE
  M:    Grant Likely <grant.likely@linaro.org>
  M:    Rob Herring <rob.herring@calxeda.com>
- L:    devicetree-discuss@lists.ozlabs.org (moderated for non-subscribers)
+ L:    devicetree@vger.kernel.org
  W:    http://fdt.secretlab.ca
  T:    git git://git.secretlab.ca/git/linux-2.6.git
  S:    Maintained
- F:    Documentation/devicetree
- F:    drivers/of
+ F:    drivers/of/
  F:    include/linux/of*.h
- F:    scripts/dtc
+ F:    scripts/dtc/
  K:    of_get_property
  K:    of_match_table
  
+ OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
+ M:    Rob Herring <rob.herring@calxeda.com>
+ M:    Pawel Moll <pawel.moll@arm.com>
+ M:    Mark Rutland <mark.rutland@arm.com>
+ M:    Stephen Warren <swarren@wwwdotorg.org>
+ M:    Ian Campbell <ian.campbell@citrix.com>
+ L:    devicetree@vger.kernel.org
+ S:    Maintained
+ F:    Documentation/devicetree/
+ F:    arch/*/boot/dts/
+ F:    include/dt-bindings/
  OPENRISC ARCHITECTURE
  M:    Jonas Bonn <jonas@southpole.se>
  W:    http://openrisc.net
@@@ -6719,6 -6726,14 +6726,14 @@@ T:    git git://linuxtv.org/anttip/media_t
  S:    Maintained
  F:    drivers/media/tuners/qt1010*
  
+ QUALCOMM ATHEROS ATH10K WIRELESS DRIVER
+ M:    Kalle Valo <kvalo@qca.qualcomm.com>
+ L:    ath10k@lists.infradead.org
+ W:    http://wireless.kernel.org/en/users/Drivers/ath10k
+ T:    git git://github.com/kvalo/ath.git
+ S:    Supported
+ F:    drivers/net/wireless/ath/ath10k/
  QUALCOMM HEXAGON ARCHITECTURE
  M:    Richard Kuo <rkuo@codeaurora.org>
  L:    linux-hexagon@vger.kernel.org
@@@ -7216,7 -7231,6 +7231,7 @@@ W:      http://lksctp.sourceforge.ne
  S:    Maintained
  F:    Documentation/networking/sctp.txt
  F:    include/linux/sctp.h
 +F:    include/uapi/linux/sctp.h
  F:    include/net/sctp/
  F:    net/sctp/
  
@@@ -7747,7 -7761,6 +7762,6 @@@ F:      drivers/clk/spear
  
  SPI SUBSYSTEM
  M:    Mark Brown <broonie@kernel.org>
- M:    Grant Likely <grant.likely@linaro.org>
  L:    linux-spi@vger.kernel.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi.git
  Q:    http://patchwork.kernel.org/project/spi-devel-general/list/
@@@ -7813,7 -7826,7 +7827,7 @@@ F:      drivers/staging/asus_oled
  
  STAGING - COMEDI
  M:    Ian Abbott <abbotti@mev.co.uk>
- M:    Mori Hess <fmhess@users.sourceforge.net>
+ M:    H Hartley Sweeten <hsweeten@visionengravers.com>
  S:    Odd Fixes
  F:    drivers/staging/comedi/
  
@@@ -8265,7 -8278,7 +8279,7 @@@ S:      Maintaine
  F:    sound/soc/codecs/twl4030*
  
  TI WILINK WIRELESS DRIVERS
- M:    Luciano Coelho <coelho@ti.com>
+ M:    Luciano Coelho <luca@coelho.fi>
  L:    linux-wireless@vger.kernel.org
  W:    http://wireless.kernel.org/en/users/Drivers/wl12xx
  W:    http://wireless.kernel.org/en/users/Drivers/wl1251
@@@ -9289,7 -9302,7 +9303,7 @@@ S:      Maintaine
  F:    drivers/net/ethernet/xilinx/xilinx_axienet*
  
  XILINX SYSTEMACE DRIVER
- S:    Unmaintained
+ S:    Orphan
  F:    drivers/block/xsysace.c
  
  XILINX UARTLITE SERIAL DRIVER
@@@ -486,7 -486,7 +486,7 @@@ struct bnx2x_fastpath 
  
        struct napi_struct      napi;
  
- #ifdef CONFIG_NET_LL_RX_POLL
+ #ifdef CONFIG_NET_RX_BUSY_POLL
        unsigned int state;
  #define BNX2X_FP_STATE_IDLE                 0
  #define BNX2X_FP_STATE_NAPI           (1 << 0)    /* NAPI owns this FP */
  #define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
        /* protect state */
        spinlock_t lock;
- #endif /* CONFIG_NET_LL_RX_POLL */
+ #endif /* CONFIG_NET_RX_BUSY_POLL */
  
        union host_hc_status_block      status_blk;
        /* chip independent shortcuts into sb structure */
  #define bnx2x_fp_stats(bp, fp)        (&((bp)->fp_stats[(fp)->index]))
  #define bnx2x_fp_qstats(bp, fp)       (&((bp)->fp_stats[(fp)->index].eth_q_stats))
  
- #ifdef CONFIG_NET_LL_RX_POLL
+ #ifdef CONFIG_NET_RX_BUSY_POLL
  static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
  {
        spin_lock_init(&fp->lock);
@@@ -680,7 -680,7 +680,7 @@@ static inline bool bnx2x_fp_ll_polling(
  {
        return false;
  }
- #endif /* CONFIG_NET_LL_RX_POLL */
+ #endif /* CONFIG_NET_RX_BUSY_POLL */
  
  /* Use 2500 as a mini-jumbo MTU for FCoE */
  #define BNX2X_FCOE_MINI_JUMBO_MTU     2500
@@@ -1331,7 -1331,7 +1331,7 @@@ enum 
        BNX2X_SP_RTNL_ENABLE_SRIOV,
        BNX2X_SP_RTNL_VFPF_MCAST,
        BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
 -      BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
 +      BNX2X_SP_RTNL_RX_MODE,
        BNX2X_SP_RTNL_HYPERVISOR_VLAN,
  };
  
@@@ -2060,11 -2060,7 +2060,11 @@@ void bnx2x_squeeze_objects(struct bnx2
        rparam.mcast_obj = &bp->mcast_obj;
        __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
  
 -      /* Add a DEL command... */
 +      /* Add a DEL command... - Since we're doing a driver cleanup only,
 +       * we take a lock surrounding both the initial send and the CONTs,
 +       * as we don't want a true completion to disrupt us in the middle.
 +       */
 +      netif_addr_lock_bh(bp->dev);
        rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
        if (rc < 0)
                BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
                if (rc < 0) {
                        BNX2X_ERR("Failed to clean multi-cast object: %d\n",
                                  rc);
 +                      netif_addr_unlock_bh(bp->dev);
                        return;
                }
  
                rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
        }
 +      netif_addr_unlock_bh(bp->dev);
  }
  
  #ifndef BNX2X_STOP_ON_ERROR
@@@ -2438,7 -2432,9 +2438,7 @@@ int bnx2x_load_cnic(struct bnx2x *bp
        }
  
        /* Initialize Rx filter. */
 -      netif_addr_lock_bh(bp->dev);
 -      bnx2x_set_rx_mode(bp->dev);
 -      netif_addr_unlock_bh(bp->dev);
 +      bnx2x_set_rx_mode_inner(bp);
  
        /* re-read iscsi info */
        bnx2x_get_iscsi_info(bp);
@@@ -2708,7 -2704,9 +2708,7 @@@ int bnx2x_nic_load(struct bnx2x *bp, in
        /* Start fast path */
  
        /* Initialize Rx filter. */
 -      netif_addr_lock_bh(bp->dev);
 -      bnx2x_set_rx_mode(bp->dev);
 -      netif_addr_unlock_bh(bp->dev);
 +      bnx2x_set_rx_mode_inner(bp);
  
        /* Start the Tx */
        switch (load_mode) {
@@@ -3119,7 -3117,7 +3119,7 @@@ int bnx2x_poll(struct napi_struct *napi
        return work_done;
  }
  
- #ifdef CONFIG_NET_LL_RX_POLL
+ #ifdef CONFIG_NET_RX_BUSY_POLL
  /* must be called with local_bh_disable()d */
  int bnx2x_low_latency_recv(struct napi_struct *napi)
  {
@@@ -9628,9 -9628,11 +9628,9 @@@ sp_rtnl_not_reset
                }
        }
  
 -      if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
 -                             &bp->sp_rtnl_state)) {
 -              DP(BNX2X_MSG_SP,
 -                 "sending set storm rx mode vf pf channel message from rtnl sp-task\n");
 -              bnx2x_vfpf_storm_rx_mode(bp);
 +      if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) {
 +              DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n");
 +              bnx2x_set_rx_mode_inner(bp);
        }
  
        if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
@@@ -11847,48 -11849,34 +11847,48 @@@ static int bnx2x_set_mc_list(struct bnx
  void bnx2x_set_rx_mode(struct net_device *dev)
  {
        struct bnx2x *bp = netdev_priv(dev);
 -      u32 rx_mode = BNX2X_RX_MODE_NORMAL;
  
        if (bp->state != BNX2X_STATE_OPEN) {
                DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
                return;
 +      } else {
 +              /* Schedule an SP task to handle rest of change */
 +              DP(NETIF_MSG_IFUP, "Scheduling an Rx mode change\n");
 +              smp_mb__before_clear_bit();
 +              set_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state);
 +              smp_mb__after_clear_bit();
 +              schedule_delayed_work(&bp->sp_rtnl_task, 0);
        }
 +}
 +
 +void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
 +{
 +      u32 rx_mode = BNX2X_RX_MODE_NORMAL;
  
        DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
  
 -      if (dev->flags & IFF_PROMISC)
 +      netif_addr_lock_bh(bp->dev);
 +
 +      if (bp->dev->flags & IFF_PROMISC) {
                rx_mode = BNX2X_RX_MODE_PROMISC;
 -      else if ((dev->flags & IFF_ALLMULTI) ||
 -               ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
 -                CHIP_IS_E1(bp)))
 +      } else if ((bp->dev->flags & IFF_ALLMULTI) ||
 +                 ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) &&
 +                  CHIP_IS_E1(bp))) {
                rx_mode = BNX2X_RX_MODE_ALLMULTI;
 -      else {
 +      else {
                if (IS_PF(bp)) {
                        /* some multicasts */
                        if (bnx2x_set_mc_list(bp) < 0)
                                rx_mode = BNX2X_RX_MODE_ALLMULTI;
  
 +                      /* release bh lock, as bnx2x_set_uc_list might sleep */
 +                      netif_addr_unlock_bh(bp->dev);
                        if (bnx2x_set_uc_list(bp) < 0)
                                rx_mode = BNX2X_RX_MODE_PROMISC;
 +                      netif_addr_lock_bh(bp->dev);
                } else {
                        /* configuring mcast to a vf involves sleeping (when we
 -                       * wait for the pf's response). Since this function is
 -                       * called from non sleepable context we must schedule
 -                       * a work item for this purpose
 +                       * wait for the pf's response).
                         */
                        smp_mb__before_clear_bit();
                        set_bit(BNX2X_SP_RTNL_VFPF_MCAST,
        /* Schedule the rx_mode command */
        if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
                set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
 +              netif_addr_unlock_bh(bp->dev);
                return;
        }
  
        if (IS_PF(bp)) {
                bnx2x_set_storm_rx_mode(bp);
 +              netif_addr_unlock_bh(bp->dev);
        } else {
 -              /* configuring rx mode to storms in a vf involves sleeping (when
 -               * we wait for the pf's response). Since this function is
 -               * called from non sleepable context we must schedule
 -               * a work item for this purpose
 +              /* VF will need to request the PF to make this change, and so
 +               * the VF needs to release the bottom-half lock prior to the
 +               * request (as it will likely require sleep on the VF side)
                 */
 -              smp_mb__before_clear_bit();
 -              set_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
 -                      &bp->sp_rtnl_state);
 -              smp_mb__after_clear_bit();
 -              schedule_delayed_work(&bp->sp_rtnl_task, 0);
 +              netif_addr_unlock_bh(bp->dev);
 +              bnx2x_vfpf_storm_rx_mode(bp);
        }
  }
  
@@@ -12036,7 -12026,7 +12036,7 @@@ static const struct net_device_ops bnx2
        .ndo_fcoe_get_wwn       = bnx2x_fcoe_get_wwn,
  #endif
  
- #ifdef CONFIG_NET_LL_RX_POLL
+ #ifdef CONFIG_NET_RX_BUSY_POLL
        .ndo_busy_poll          = bnx2x_low_latency_recv,
  #endif
  };
@@@ -94,10 -94,10 +94,10 @@@ static inline void _tg3_flag_clear(enu
  
  #define DRV_MODULE_NAME               "tg3"
  #define TG3_MAJ_NUM                   3
 -#define TG3_MIN_NUM                   132
 +#define TG3_MIN_NUM                   133
  #define DRV_MODULE_VERSION    \
        __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
 -#define DRV_MODULE_RELDATE    "May 21, 2013"
 +#define DRV_MODULE_RELDATE    "Jul 29, 2013"
  
  #define RESET_KIND_SHUTDOWN   0
  #define RESET_KIND_INIT               1
@@@ -4226,6 -4226,8 +4226,6 @@@ static int tg3_power_down_prepare(struc
  
  static void tg3_power_down(struct tg3 *tp)
  {
 -      tg3_power_down_prepare(tp);
 -
        pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
        pci_set_power_state(tp->pdev, PCI_D3hot);
  }
@@@ -6093,12 -6095,10 +6093,12 @@@ static u64 tg3_refclk_read(struct tg3 *
  /* tp->lock must be held */
  static void tg3_refclk_write(struct tg3 *tp, u64 newval)
  {
 -      tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
 +      u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
 +
 +      tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
        tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
        tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
 -      tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
 +      tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
  }
  
  static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
@@@ -6214,59 -6214,6 +6214,59 @@@ static int tg3_ptp_settime(struct ptp_c
  static int tg3_ptp_enable(struct ptp_clock_info *ptp,
                          struct ptp_clock_request *rq, int on)
  {
 +      struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
 +      u32 clock_ctl;
 +      int rval = 0;
 +
 +      switch (rq->type) {
 +      case PTP_CLK_REQ_PEROUT:
 +              if (rq->perout.index != 0)
 +                      return -EINVAL;
 +
 +              tg3_full_lock(tp, 0);
 +              clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
 +              clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
 +
 +              if (on) {
 +                      u64 nsec;
 +
 +                      nsec = rq->perout.start.sec * 1000000000ULL +
 +                             rq->perout.start.nsec;
 +
 +                      if (rq->perout.period.sec || rq->perout.period.nsec) {
 +                              netdev_warn(tp->dev,
 +                                          "Device supports only a one-shot timesync output, period must be 0\n");
 +                              rval = -EINVAL;
 +                              goto err_out;
 +                      }
 +
 +                      if (nsec & (1ULL << 63)) {
 +                              netdev_warn(tp->dev,
 +                                          "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
 +                              rval = -EINVAL;
 +                              goto err_out;
 +                      }
 +
 +                      tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
 +                      tw32(TG3_EAV_WATCHDOG0_MSB,
 +                           TG3_EAV_WATCHDOG0_EN |
 +                           ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
 +
 +                      tw32(TG3_EAV_REF_CLCK_CTL,
 +                           clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
 +              } else {
 +                      tw32(TG3_EAV_WATCHDOG0_MSB, 0);
 +                      tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
 +              }
 +
 +err_out:
 +              tg3_full_unlock(tp);
 +              return rval;
 +
 +      default:
 +              break;
 +      }
 +
        return -EOPNOTSUPP;
  }
  
@@@ -6276,7 -6223,7 +6276,7 @@@ static const struct ptp_clock_info tg3_
        .max_adj        = 250000000,
        .n_alarm        = 0,
        .n_ext_ts       = 0,
 -      .n_per_out      = 0,
 +      .n_per_out      = 1,
        .pps            = 0,
        .adjfreq        = tg3_ptp_adjfreq,
        .adjtime        = tg3_ptp_adjtime,
@@@ -10420,9 -10367,6 +10420,9 @@@ static int tg3_reset_hw(struct tg3 *tp
        if (tg3_flag(tp, 5755_PLUS))
                tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
  
 +      if (tg3_asic_rev(tp) == ASIC_REV_5762)
 +              tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
 +
        if (tg3_flag(tp, ENABLE_RSS))
                tp->rx_mode |= RX_MODE_RSS_ENABLE |
                               RX_MODE_RSS_ITBL_HASH_BITS_7 |
@@@ -11558,7 -11502,7 +11558,7 @@@ static int tg3_close(struct net_device 
        memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
        memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
  
 -      tg3_power_down(tp);
 +      tg3_power_down_prepare(tp);
  
        tg3_carrier_off(tp);
  
@@@ -11780,6 -11724,9 +11780,6 @@@ static int tg3_get_eeprom(struct net_de
        if (tg3_flag(tp, NO_NVRAM))
                return -EINVAL;
  
 -      if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
 -              return -EAGAIN;
 -
        offset = eeprom->offset;
        len = eeprom->len;
        eeprom->len = 0;
@@@ -11837,6 -11784,9 +11837,6 @@@ static int tg3_set_eeprom(struct net_de
        u8 *buf;
        __be32 start, end;
  
 -      if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
 -              return -EAGAIN;
 -
        if (tg3_flag(tp, NO_NVRAM) ||
            eeprom->magic != TG3_EEPROM_MAGIC)
                return -EINVAL;
@@@ -13565,7 -13515,7 +13565,7 @@@ static void tg3_self_test(struct net_de
                        tg3_phy_start(tp);
        }
        if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
 -              tg3_power_down(tp);
 +              tg3_power_down_prepare(tp);
  
  }
  
@@@ -17597,6 -17547,11 +17597,6 @@@ static int tg3_init_one(struct pci_dev 
            tg3_asic_rev(tp) == ASIC_REV_5762)
                tg3_flag_set(tp, PTP_CAPABLE);
  
 -      if (tg3_flag(tp, 5717_PLUS)) {
 -              /* Resume a low-power mode */
 -              tg3_frob_aux_power(tp, false);
 -      }
 -
        tg3_timer_init(tp);
  
        tg3_carrier_off(tp);
@@@ -17670,7 -17625,8 +17670,8 @@@ err_out_free_res
        pci_release_regions(pdev);
  
  err_out_disable_pdev:
-       pci_disable_device(pdev);
+       if (pci_is_enabled(pdev))
+               pci_disable_device(pdev);
        pci_set_drvdata(pdev, NULL);
        return err;
  }
  
  static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
  
 +static void tg3_shutdown(struct pci_dev *pdev)
 +{
 +      struct net_device *dev = pci_get_drvdata(pdev);
 +      struct tg3 *tp = netdev_priv(dev);
 +
 +      rtnl_lock();
 +      netif_device_detach(dev);
 +
 +      if (netif_running(dev))
 +              dev_close(dev);
 +
 +      if (system_state == SYSTEM_POWER_OFF)
 +              tg3_power_down(tp);
 +
 +      rtnl_unlock();
 +}
 +
  /**
   * tg3_io_error_detected - called when PCI error is detected
   * @pdev: Pointer to PCI device
@@@ -17835,7 -17774,8 +17836,8 @@@ static pci_ers_result_t tg3_io_error_de
  
        rtnl_lock();
  
-       if (!netif_running(netdev))
+       /* We probably don't have netdev yet */
+       if (!netdev || !netif_running(netdev))
                goto done;
  
        tg3_phy_stop(tp);
@@@ -17971,7 -17911,6 +17973,7 @@@ static struct pci_driver tg3_driver = 
        .remove         = tg3_remove_one,
        .err_handler    = &tg3_err_handler,
        .driver.pm      = &tg3_pm_ops,
 +      .shutdown       = tg3_shutdown,
  };
  
  module_pci_driver(tg3_driver);
@@@ -93,6 -93,20 +93,20 @@@ static void set_multicast_list(struct n
  #define FEC_QUIRK_HAS_CSUM            (1 << 5)
  /* Controller has hardware vlan support */
  #define FEC_QUIRK_HAS_VLAN            (1 << 6)
+ /* ENET IP errata ERR006358
+  *
+  * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously
+  * detected as not set during a prior frame transmission, then the
+  * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
+  * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
+  * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously
+  * detected as not set during a prior frame transmission, then the
+  * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
+  * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
+  * frames not being transmitted until there is a 0-to-1 transition on
+  * ENET_TDAR[TDAR].
+  */
+ #define FEC_QUIRK_ERR006358            (1 << 7)
  
  static struct platform_device_id fec_devtype[] = {
        {
                .name = "imx6q-fec",
                .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
                                FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
-                               FEC_QUIRK_HAS_VLAN,
+                               FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358,
        }, {
                .name = "mvf600-fec",
                .driver_data = FEC_QUIRK_ENET_MAC,
@@@ -275,16 -289,11 +289,11 @@@ fec_enet_start_xmit(struct sk_buff *skb
        struct fec_enet_private *fep = netdev_priv(ndev);
        const struct platform_device_id *id_entry =
                                platform_get_device_id(fep->pdev);
-       struct bufdesc *bdp;
+       struct bufdesc *bdp, *bdp_pre;
        void *bufaddr;
        unsigned short  status;
        unsigned int index;
  
-       if (!fep->link) {
-               /* Link is down or auto-negotiation is in progress. */
-               return NETDEV_TX_BUSY;
-       }
        /* Fill in a Tx ring entry */
        bdp = fep->cur_tx;
  
                                ebdp->cbd_esc |= BD_ENET_TX_PINS;
                }
        }
+       bdp_pre = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
+       if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
+           !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
+               fep->delay_work.trig_tx = true;
+               schedule_delayed_work(&(fep->delay_work.delay_work),
+                                       msecs_to_jiffies(1));
+       }
        /* If this was the last BD in the ring, start at the beginning again. */
        if (status & BD_ENET_TX_WRAP)
                bdp = fep->tx_bd_base;
@@@ -689,6 -707,11 +707,11 @@@ static void fec_enet_work(struct work_s
                fec_restart(fep->netdev, fep->full_duplex);
                netif_wake_queue(fep->netdev);
        }
+       if (fep->delay_work.trig_tx) {
+               fep->delay_work.trig_tx = false;
+               writel(0, fep->hwp + FEC_X_DES_ACTIVE);
+       }
  }
  
  static void
@@@ -2033,6 -2056,10 +2056,6 @@@ fec_probe(struct platform_device *pdev
        if (of_id)
                pdev->id_entry = of_id->data;
  
 -      r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 -      if (!r)
 -              return -ENXIO;
 -
        /* Init network device */
        ndev = alloc_etherdev(sizeof(struct fec_enet_private));
        if (!ndev)
                fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
  #endif
  
 +      r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        fep->hwp = devm_ioremap_resource(&pdev->dev, r);
        if (IS_ERR(fep->hwp)) {
                ret = PTR_ERR(fep->hwp);
                fep->bufdesc_ex = 0;
        }
  
 -      clk_prepare_enable(fep->clk_ahb);
 -      clk_prepare_enable(fep->clk_ipg);
 -      clk_prepare_enable(fep->clk_enet_out);
 -      clk_prepare_enable(fep->clk_ptp);
 +      ret = clk_prepare_enable(fep->clk_ahb);
 +      if (ret)
 +              goto failed_clk;
 +
 +      ret = clk_prepare_enable(fep->clk_ipg);
 +      if (ret)
 +              goto failed_clk_ipg;
 +
 +      if (fep->clk_enet_out) {
 +              ret = clk_prepare_enable(fep->clk_enet_out);
 +              if (ret)
 +                      goto failed_clk_enet_out;
 +      }
 +
 +      if (fep->clk_ptp) {
 +              ret = clk_prepare_enable(fep->clk_ptp);
 +              if (ret)
 +                      goto failed_clk_ptp;
 +      }
  
        fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
        if (!IS_ERR(fep->reg_phy)) {
                        ret = irq;
                        goto failed_irq;
                }
 -              ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
 -              if (ret) {
 -                      while (--i >= 0) {
 -                              irq = platform_get_irq(pdev, i);
 -                              free_irq(irq, ndev);
 -                      }
 +              ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
 +                                     IRQF_DISABLED, pdev->name, ndev);
 +              if (ret)
                        goto failed_irq;
 -              }
        }
  
        ret = fec_enet_mii_init(pdev);
@@@ -2176,19 -2191,19 +2199,19 @@@ failed_register
        fec_enet_mii_remove(fep);
  failed_mii_init:
  failed_irq:
 -      for (i = 0; i < FEC_IRQ_NUM; i++) {
 -              irq = platform_get_irq(pdev, i);
 -              if (irq > 0)
 -                      free_irq(irq, ndev);
 -      }
  failed_init:
        if (fep->reg_phy)
                regulator_disable(fep->reg_phy);
  failed_regulator:
 -      clk_disable_unprepare(fep->clk_ahb);
 +      if (fep->clk_ptp)
 +              clk_disable_unprepare(fep->clk_ptp);
 +failed_clk_ptp:
 +      if (fep->clk_enet_out)
 +              clk_disable_unprepare(fep->clk_enet_out);
 +failed_clk_enet_out:
        clk_disable_unprepare(fep->clk_ipg);
 -      clk_disable_unprepare(fep->clk_enet_out);
 -      clk_disable_unprepare(fep->clk_ptp);
 +failed_clk_ipg:
 +      clk_disable_unprepare(fep->clk_ahb);
  failed_clk:
  failed_ioremap:
        free_netdev(ndev);
@@@ -2201,21 -2216,25 +2224,21 @@@ fec_drv_remove(struct platform_device *
  {
        struct net_device *ndev = platform_get_drvdata(pdev);
        struct fec_enet_private *fep = netdev_priv(ndev);
 -      int i;
  
        cancel_delayed_work_sync(&(fep->delay_work.delay_work));
        unregister_netdev(ndev);
        fec_enet_mii_remove(fep);
        del_timer_sync(&fep->time_keep);
 -      for (i = 0; i < FEC_IRQ_NUM; i++) {
 -              int irq = platform_get_irq(pdev, i);
 -              if (irq > 0)
 -                      free_irq(irq, ndev);
 -      }
        if (fep->reg_phy)
                regulator_disable(fep->reg_phy);
 -      clk_disable_unprepare(fep->clk_ptp);
 +      if (fep->clk_ptp)
 +              clk_disable_unprepare(fep->clk_ptp);
        if (fep->ptp_clock)
                ptp_clock_unregister(fep->ptp_clock);
 -      clk_disable_unprepare(fep->clk_enet_out);
 -      clk_disable_unprepare(fep->clk_ahb);
 +      if (fep->clk_enet_out)
 +              clk_disable_unprepare(fep->clk_enet_out);
        clk_disable_unprepare(fep->clk_ipg);
 +      clk_disable_unprepare(fep->clk_ahb);
        free_netdev(ndev);
  
        return 0;
@@@ -2232,12 -2251,9 +2255,12 @@@ fec_suspend(struct device *dev
                fec_stop(ndev);
                netif_device_detach(ndev);
        }
 -      clk_disable_unprepare(fep->clk_enet_out);
 -      clk_disable_unprepare(fep->clk_ahb);
 +      if (fep->clk_ptp)
 +              clk_disable_unprepare(fep->clk_ptp);
 +      if (fep->clk_enet_out)
 +              clk_disable_unprepare(fep->clk_enet_out);
        clk_disable_unprepare(fep->clk_ipg);
 +      clk_disable_unprepare(fep->clk_ahb);
  
        if (fep->reg_phy)
                regulator_disable(fep->reg_phy);
@@@ -2258,44 -2274,15 +2281,44 @@@ fec_resume(struct device *dev
                        return ret;
        }
  
 -      clk_prepare_enable(fep->clk_enet_out);
 -      clk_prepare_enable(fep->clk_ahb);
 -      clk_prepare_enable(fep->clk_ipg);
 +      ret = clk_prepare_enable(fep->clk_ahb);
 +      if (ret)
 +              goto failed_clk_ahb;
 +
 +      ret = clk_prepare_enable(fep->clk_ipg);
 +      if (ret)
 +              goto failed_clk_ipg;
 +
 +      if (fep->clk_enet_out) {
 +              ret = clk_prepare_enable(fep->clk_enet_out);
 +              if (ret)
 +                      goto failed_clk_enet_out;
 +      }
 +
 +      if (fep->clk_ptp) {
 +              ret = clk_prepare_enable(fep->clk_ptp);
 +              if (ret)
 +                      goto failed_clk_ptp;
 +      }
 +
        if (netif_running(ndev)) {
                fec_restart(ndev, fep->full_duplex);
                netif_device_attach(ndev);
        }
  
        return 0;
 +
 +failed_clk_ptp:
 +      if (fep->clk_enet_out)
 +              clk_disable_unprepare(fep->clk_enet_out);
 +failed_clk_enet_out:
 +      clk_disable_unprepare(fep->clk_ipg);
 +failed_clk_ipg:
 +      clk_disable_unprepare(fep->clk_ahb);
 +failed_clk_ahb:
 +      if (fep->reg_phy)
 +              regulator_disable(fep->reg_phy);
 +      return ret;
  }
  #endif /* CONFIG_PM_SLEEP */
  
@@@ -2315,4 -2302,5 +2338,5 @@@ static struct platform_driver fec_drive
  
  module_platform_driver(fec_driver);
  
+ MODULE_ALIAS("platform:"DRIVER_NAME);
  MODULE_LICENSE("GPL");
@@@ -54,7 -54,7 +54,7 @@@
  
  #include <net/busy_poll.h>
  
- #ifdef CONFIG_NET_LL_RX_POLL
+ #ifdef CONFIG_NET_RX_BUSY_POLL
  #define LL_EXTENDED_STATS
  #endif
  /* common prefix used by pr_<> macros */
@@@ -366,7 -366,7 +366,7 @@@ struct ixgbe_q_vector 
        struct rcu_head rcu;    /* to avoid race with update stats on free */
        char name[IFNAMSIZ + 9];
  
- #ifdef CONFIG_NET_LL_RX_POLL
+ #ifdef CONFIG_NET_RX_BUSY_POLL
        unsigned int state;
  #define IXGBE_QV_STATE_IDLE        0
  #define IXGBE_QV_STATE_NAPI      1    /* NAPI owns this QV */
  #define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD)
  #define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD)
        spinlock_t lock;
- #endif  /* CONFIG_NET_LL_RX_POLL */
+ #endif  /* CONFIG_NET_RX_BUSY_POLL */
  
        /* for dynamic allocation of rings associated with this q_vector */
        struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
  };
- #ifdef CONFIG_NET_LL_RX_POLL
+ #ifdef CONFIG_NET_RX_BUSY_POLL
  static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
  {
  
@@@ -462,7 -462,7 +462,7 @@@ static inline bool ixgbe_qv_ll_polling(
        WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED));
        return q_vector->state & IXGBE_QV_USER_PEND;
  }
- #else /* CONFIG_NET_LL_RX_POLL */
+ #else /* CONFIG_NET_RX_BUSY_POLL */
  static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
  {
  }
@@@ -491,7 -491,7 +491,7 @@@ static inline bool ixgbe_qv_ll_polling(
  {
        return false;
  }
- #endif /* CONFIG_NET_LL_RX_POLL */
+ #endif /* CONFIG_NET_RX_BUSY_POLL */
  
  #ifdef CONFIG_IXGBE_HWMON
  
@@@ -618,8 -618,9 +618,8 @@@ struct ixgbe_adapter 
  #define IXGBE_FLAG2_FDIR_REQUIRES_REINIT        (u32)(1 << 7)
  #define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP                (u32)(1 << 8)
  #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP                (u32)(1 << 9)
 -#define IXGBE_FLAG2_PTP_ENABLED                       (u32)(1 << 10)
 -#define IXGBE_FLAG2_PTP_PPS_ENABLED           (u32)(1 << 11)
 -#define IXGBE_FLAG2_BRIDGE_MODE_VEB           (u32)(1 << 12)
 +#define IXGBE_FLAG2_PTP_PPS_ENABLED           (u32)(1 << 10)
 +#define IXGBE_FLAG2_BRIDGE_MODE_VEB           (u32)(1 << 11)
  
        /* Tx fast path data */
        int num_tx_queues;
@@@ -753,7 -754,7 +753,7 @@@ enum ixgbe_state_t 
        __IXGBE_DOWN,
        __IXGBE_SERVICE_SCHED,
        __IXGBE_IN_SFP_INIT,
 -      __IXGBE_READ_I2C,
 +      __IXGBE_PTP_RUNNING,
  };
  
  struct ixgbe_cb {
@@@ -63,7 -63,7 +63,7 @@@ char ixgbe_default_device_descr[] 
  static char ixgbe_default_device_descr[] =
                              "Intel(R) 10 Gigabit Network Connection";
  #endif
 -#define DRV_VERSION "3.13.10-k"
 +#define DRV_VERSION "3.15.1-k"
  const char ixgbe_driver_version[] = DRV_VERSION;
  static const char ixgbe_copyright[] =
                                "Copyright (c) 1999-2013 Intel Corporation.";
@@@ -109,7 -109,6 +109,7 @@@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pc
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
 +      {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
@@@ -196,86 -195,6 +196,86 @@@ static s32 ixgbe_get_parent_bus_info(st
        return 0;
  }
  
 +/**
 + * ixgbe_check_from_parent - Determine whether PCIe info should come from parent
 + * @hw: hw specific details
 + *
 + * This function is used by probe to determine whether a device's PCI-Express
 + * bandwidth details should be gathered from the parent bus instead of from the
 + * device. Used to ensure that various locations all have the correct device ID
 + * checks.
 + */
 +static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
 +{
 +      switch (hw->device_id) {
 +      case IXGBE_DEV_ID_82599_SFP_SF_QP:
 +      case IXGBE_DEV_ID_82599_QSFP_SF_QP:
 +              return true;
 +      default:
 +              return false;
 +      }
 +}
 +
 +static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
 +                                   int expected_gts)
 +{
 +      int max_gts = 0;
 +      enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
 +      enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
 +      struct pci_dev *pdev;
 +
 +      /* determine whether to use the the parent device
 +       */
 +      if (ixgbe_pcie_from_parent(&adapter->hw))
 +              pdev = adapter->pdev->bus->parent->self;
 +      else
 +              pdev = adapter->pdev;
 +
 +      if (pcie_get_minimum_link(pdev, &speed, &width) ||
 +          speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
 +              e_dev_warn("Unable to determine PCI Express bandwidth.\n");
 +              return;
 +      }
 +
 +      switch (speed) {
 +      case PCIE_SPEED_2_5GT:
 +              /* 8b/10b encoding reduces max throughput by 20% */
 +              max_gts = 2 * width;
 +              break;
 +      case PCIE_SPEED_5_0GT:
 +              /* 8b/10b encoding reduces max throughput by 20% */
 +              max_gts = 4 * width;
 +              break;
 +      case PCIE_SPEED_8_0GT:
 +              /* 128b/130b encoding only reduces throughput by 1% */
 +              max_gts = 8 * width;
 +              break;
 +      default:
 +              e_dev_warn("Unable to determine PCI Express bandwidth.\n");
 +              return;
 +      }
 +
 +      e_dev_info("PCI Express bandwidth of %dGT/s available\n",
 +                 max_gts);
 +      e_dev_info("(Speed:%s, Width: x%d, Encoding Loss:%s)\n",
 +                 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
 +                  speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
 +                  speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
 +                  "Unknown"),
 +                 width,
 +                 (speed == PCIE_SPEED_2_5GT ? "20%" :
 +                  speed == PCIE_SPEED_5_0GT ? "20%" :
 +                  speed == PCIE_SPEED_8_0GT ? "N/a" :
 +                  "Unknown"));
 +
 +      if (max_gts < expected_gts) {
 +              e_dev_warn("This is not sufficient for optimal performance of this card.\n");
 +              e_dev_warn("For optimal performance, at least %dGT/s of bandwidth is required.\n",
 +                      expected_gts);
 +              e_dev_warn("A slot with more lanes and/or higher speed is suggested.\n");
 +      }
 +}
 +
  static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
  {
        if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
@@@ -2079,7 -1998,7 +2079,7 @@@ static int ixgbe_clean_rx_irq(struct ix
        return total_rx_packets;
  }
  
- #ifdef CONFIG_NET_LL_RX_POLL
+ #ifdef CONFIG_NET_RX_BUSY_POLL
  /* must be called with local_bh_disable()d */
  static int ixgbe_low_latency_recv(struct napi_struct *napi)
  {
  
        return found;
  }
- #endif        /* CONFIG_NET_LL_RX_POLL */
+ #endif        /* CONFIG_NET_RX_BUSY_POLL */
  
  /**
   * ixgbe_configure_msix - Configure MSI-X hardware
@@@ -3805,15 -3724,8 +3805,15 @@@ void ixgbe_set_rx_mode(struct net_devic
                hw->addr_ctrl.user_set_promisc = true;
                fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
                vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
 -              /* don't hardware filter vlans in promisc mode */
 -              ixgbe_vlan_filter_disable(adapter);
 +              /* Only disable hardware filter vlans in promiscuous mode
 +               * if SR-IOV and VMDQ are disabled - otherwise ensure
 +               * that hardware VLAN filters remain enabled.
 +               */
 +              if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
 +                                      IXGBE_FLAG_SRIOV_ENABLED)))
 +                      ixgbe_vlan_filter_disable(adapter);
 +              else
 +                      ixgbe_vlan_filter_enable(adapter);
        } else {
                if (netdev->flags & IFF_ALLMULTI) {
                        fctrl |= IXGBE_FCTRL_MPE;
@@@ -4440,7 -4352,7 +4440,7 @@@ void ixgbe_reset(struct ixgbe_adapter *
        if (hw->mac.san_mac_rar_index)
                hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
  
 -      if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
 +      if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
                ixgbe_ptp_reset(adapter);
  }
  
@@@ -4802,7 -4714,8 +4802,7 @@@ static int ixgbe_sw_init(struct ixgbe_a
        ixgbe_pbthresh_setup(adapter);
        hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
        hw->fc.send_xon = true;
 -      hw->fc.disable_fc_autoneg =
 -              (ixgbe_device_supports_autoneg_fc(hw) == 0) ? false : true;
 +      hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
  
  #ifdef CONFIG_PCI_IOV
        /* assign number of SR-IOV VFs */
@@@ -5768,7 -5681,7 +5768,7 @@@ static void ixgbe_watchdog_link_is_up(s
  
        adapter->last_rx_ptp_check = jiffies;
  
 -      if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
 +      if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
                ixgbe_ptp_start_cyclecounter(adapter);
  
        e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
@@@ -5814,7 -5727,7 +5814,7 @@@ static void ixgbe_watchdog_link_is_down
        if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
                adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
  
 -      if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
 +      if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
                ixgbe_ptp_start_cyclecounter(adapter);
  
        e_info(drv, "NIC Link is Down\n");
@@@ -5913,6 -5826,10 +5913,6 @@@ static void ixgbe_sfp_detection_subtask
            !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
                return;
  
 -      /* concurent i2c reads are not supported */
 -      if (test_bit(__IXGBE_READ_I2C, &adapter->state))
 -              return;
 -
        /* someone else is in init, wait until next service event */
        if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
                return;
@@@ -6121,7 -6038,7 +6121,7 @@@ static void ixgbe_service_task(struct w
        ixgbe_fdir_reinit_subtask(adapter);
        ixgbe_check_hang_subtask(adapter);
  
 -      if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) {
 +      if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
                ixgbe_ptp_overflow_check(adapter);
                ixgbe_ptp_rx_hang(adapter);
        }
@@@ -7310,7 -7227,7 +7310,7 @@@ static const struct net_device_ops ixgb
  #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = ixgbe_netpoll,
  #endif
- #ifdef CONFIG_NET_LL_RX_POLL
+ #ifdef CONFIG_NET_RX_BUSY_POLL
        .ndo_busy_poll          = ixgbe_low_latency_recv,
  #endif
  #ifdef IXGBE_FCOE
        .ndo_bridge_getlink     = ixgbe_ndo_bridge_getlink,
  };
  
 +/**
 + * ixgbe_enumerate_functions - Get the number of ports this device has
 + * @adapter: adapter structure
 + *
 + * This function enumerates the phsyical functions co-located on a single slot,
 + * in order to determine how many ports a device has. This is most useful in
 + * determining the required GT/s of PCIe bandwidth necessary for optimal
 + * performance.
 + **/
 +static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      struct list_head *entry;
 +      int physfns = 0;
 +
 +      /* Some cards can not use the generic count PCIe functions method, and
 +       * so must be hardcoded to the correct value.
 +       */
 +      switch (hw->device_id) {
 +      case IXGBE_DEV_ID_82599_SFP_SF_QP:
 +      case IXGBE_DEV_ID_82599_QSFP_SF_QP:
 +              physfns = 4;
 +              break;
 +      default:
 +              list_for_each(entry, &adapter->pdev->bus_list) {
 +                      struct pci_dev *pdev =
 +                              list_entry(entry, struct pci_dev, bus_list);
 +                      /* don't count virtual functions */
 +                      if (!pdev->is_virtfn)
 +                              physfns++;
 +              }
 +      }
 +
 +      return physfns;
 +}
 +
  /**
   * ixgbe_wol_supported - Check whether device supports WoL
   * @hw: hw specific details
@@@ -7447,7 -7328,7 +7447,7 @@@ static int ixgbe_probe(struct pci_dev *
        struct ixgbe_hw *hw;
        const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
        static int cards_found;
 -      int i, err, pci_using_dac;
 +      int i, err, pci_using_dac, expected_gts;
        unsigned int indices = MAX_TX_QUEUES;
        u8 part_str[IXGBE_PBANUM_LENGTH];
  #ifdef IXGBE_FCOE
@@@ -7736,7 -7617,7 +7736,7 @@@ skip_sriov
  
        /* pick up the PCI bus settings for reporting later */
        hw->mac.ops.get_bus_info(hw);
 -      if (hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP)
 +      if (ixgbe_pcie_from_parent(hw))
                ixgbe_get_parent_bus_info(adapter);
  
        /* print bus type/speed/width info */
                e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
                           hw->mac.type, hw->phy.type, part_str);
  
 -      if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
 -              e_dev_warn("PCI-Express bandwidth available for this card is "
 -                         "not sufficient for optimal performance.\n");
 -              e_dev_warn("For optimal performance a x8 PCI-Express slot "
 -                         "is required.\n");
 +      /* calculate the expected PCIe bandwidth required for optimal
 +       * performance. Note that some older parts will never have enough
 +       * bandwidth due to being older generation PCIe parts. We clamp these
 +       * parts to ensure no warning is displayed if it can't be fixed.
 +       */
 +      switch (hw->mac.type) {
 +      case ixgbe_mac_82598EB:
 +              expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16);
 +              break;
 +      default:
 +              expected_gts = ixgbe_enumerate_functions(adapter) * 10;
 +              break;
        }
 +      ixgbe_check_minimum_link(adapter, expected_gts);
  
        /* reset the hardware with the new settings */
        err = hw->mac.ops.start_hw(hw);
  #define MVNETA_MAC_ADDR_HIGH                     0x2418
  #define MVNETA_SDMA_CONFIG                       0x241c
  #define      MVNETA_SDMA_BRST_SIZE_16            4
 -#define      MVNETA_NO_DESC_SWAP                 0x0
  #define      MVNETA_RX_BRST_SZ_MASK(burst)       ((burst) << 1)
  #define      MVNETA_RX_NO_DATA_SWAP              BIT(4)
  #define      MVNETA_TX_NO_DATA_SWAP              BIT(5)
 +#define      MVNETA_DESC_SWAP                    BIT(6)
  #define      MVNETA_TX_BRST_SZ_MASK(burst)       ((burst) << 22)
  #define MVNETA_PORT_STATUS                       0x2444
  #define      MVNETA_TX_IN_PRGRS                  BIT(1)
  #define      MVNETA_TX_FIFO_EMPTY                BIT(8)
  #define MVNETA_RX_MIN_FRAME_SIZE                 0x247c
+ #define MVNETA_SGMII_SERDES_CFG                        0x24A0
+ #define      MVNETA_SGMII_SERDES_PROTO                 0x0cc7
  #define MVNETA_TYPE_PRIO                         0x24bc
  #define      MVNETA_FORCE_UNI                    BIT(21)
  #define MVNETA_TXQ_CMD_1                         0x24e4
@@@ -262,7 -264,8 +264,7 @@@ struct mvneta_port 
   * layout of the transmit and reception DMA descriptors, and their
   * layout is therefore defined by the hardware design
   */
 -struct mvneta_tx_desc {
 -      u32  command;           /* Options used by HW for packet transmitting.*/
 +
  #define MVNETA_TX_L3_OFF_SHIFT        0
  #define MVNETA_TX_IP_HLEN_SHIFT       8
  #define MVNETA_TX_L4_UDP      BIT(16)
  #define MVNETA_TX_L4_CSUM_FULL        BIT(30)
  #define MVNETA_TX_L4_CSUM_NOT BIT(31)
  
 -      u16  reserverd1;        /* csum_l4 (for future use)             */
 -      u16  data_size;         /* Data size of transmitted packet in bytes */
 -      u32  buf_phys_addr;     /* Physical addr of transmitted buffer  */
 -      u32  reserved2;         /* hw_cmd - (for future use, PMT)       */
 -      u32  reserved3[4];      /* Reserved - (for future use)          */
 -};
 -
 -struct mvneta_rx_desc {
 -      u32  status;            /* Info about received packet           */
  #define MVNETA_RXD_ERR_CRC            0x0
  #define MVNETA_RXD_ERR_SUMMARY                BIT(16)
  #define MVNETA_RXD_ERR_OVERRUN                BIT(17)
  #define MVNETA_RXD_FIRST_LAST_DESC    (BIT(26) | BIT(27))
  #define MVNETA_RXD_L4_CSUM_OK         BIT(30)
  
 +#if defined(__LITTLE_ENDIAN)
 +struct mvneta_tx_desc {
 +      u32  command;           /* Options used by HW for packet transmitting.*/
 +      u16  reserverd1;        /* csum_l4 (for future use)             */
 +      u16  data_size;         /* Data size of transmitted packet in bytes */
 +      u32  buf_phys_addr;     /* Physical addr of transmitted buffer  */
 +      u32  reserved2;         /* hw_cmd - (for future use, PMT)       */
 +      u32  reserved3[4];      /* Reserved - (for future use)          */
 +};
 +
 +struct mvneta_rx_desc {
 +      u32  status;            /* Info about received packet           */
        u16  reserved1;         /* pnc_info - (for future use, PnC)     */
        u16  data_size;         /* Size of received packet in bytes     */
 +
        u32  buf_phys_addr;     /* Physical address of the buffer       */
        u32  reserved2;         /* pnc_flow_id  (for future use, PnC)   */
 +
        u32  buf_cookie;        /* cookie for access to RX buffer in rx path */
        u16  reserved3;         /* prefetch_cmd, for future use         */
        u16  reserved4;         /* csum_l4 - (for future use, PnC)      */
 +
 +      u32  reserved5;         /* pnc_extra PnC (for future use, PnC)  */
 +      u32  reserved6;         /* hw_cmd (for future use, PnC and HWF) */
 +};
 +#else
 +struct mvneta_tx_desc {
 +      u16  data_size;         /* Data size of transmitted packet in bytes */
 +      u16  reserverd1;        /* csum_l4 (for future use)             */
 +      u32  command;           /* Options used by HW for packet transmitting.*/
 +      u32  reserved2;         /* hw_cmd - (for future use, PMT)       */
 +      u32  buf_phys_addr;     /* Physical addr of transmitted buffer  */
 +      u32  reserved3[4];      /* Reserved - (for future use)          */
 +};
 +
 +struct mvneta_rx_desc {
 +      u16  data_size;         /* Size of received packet in bytes     */
 +      u16  reserved1;         /* pnc_info - (for future use, PnC)     */
 +      u32  status;            /* Info about received packet           */
 +
 +      u32  reserved2;         /* pnc_flow_id  (for future use, PnC)   */
 +      u32  buf_phys_addr;     /* Physical address of the buffer       */
 +
 +      u16  reserved4;         /* csum_l4 - (for future use, PnC)      */
 +      u16  reserved3;         /* prefetch_cmd, for future use         */
 +      u32  buf_cookie;        /* cookie for access to RX buffer in rx path */
 +
        u32  reserved5;         /* pnc_extra PnC (for future use, PnC)  */
        u32  reserved6;         /* hw_cmd (for future use, PnC and HWF) */
  };
 +#endif
  
  struct mvneta_tx_queue {
        /* Number of this TX queue, in the range 0-7 */
@@@ -686,6 -657,8 +688,8 @@@ static void mvneta_port_sgmii_config(st
        val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
        val |= MVNETA_GMAC2_PSC_ENABLE;
        mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
+       mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
  }
  
  /* Start the Ethernet port RX and TX activity */
@@@ -935,11 -908,9 +939,11 @@@ static void mvneta_defaults_set(struct 
        /* Default burst size */
        val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
        val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
 +      val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
  
 -      val |= (MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP |
 -              MVNETA_NO_DESC_SWAP);
 +#if defined(__BIG_ENDIAN)
 +      val |= MVNETA_DESC_SWAP;
 +#endif
  
        /* Assign port SDMA configuration */
        mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
@@@ -2761,28 -2732,24 +2765,24 @@@ static int mvneta_probe(struct platform
  
        pp = netdev_priv(dev);
  
-       pp->tx_done_timer.function = mvneta_tx_done_timer_callback;
-       init_timer(&pp->tx_done_timer);
-       clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
        pp->weight = MVNETA_RX_POLL_WEIGHT;
        pp->phy_node = phy_node;
        pp->phy_interface = phy_mode;
  
-       pp->base = of_iomap(dn, 0);
-       if (pp->base == NULL) {
-               err = -ENOMEM;
-               goto err_free_irq;
-       }
        pp->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(pp->clk)) {
                err = PTR_ERR(pp->clk);
-               goto err_unmap;
+               goto err_free_irq;
        }
  
        clk_prepare_enable(pp->clk);
  
+       pp->base = of_iomap(dn, 0);
+       if (pp->base == NULL) {
+               err = -ENOMEM;
+               goto err_clk;
+       }
        dt_mac_addr = of_get_mac_address(dn);
        if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
                mac_from = "device tree";
        }
  
        pp->tx_done_timer.data = (unsigned long)dev;
+       pp->tx_done_timer.function = mvneta_tx_done_timer_callback;
+       init_timer(&pp->tx_done_timer);
+       clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
  
        pp->tx_ring_size = MVNETA_MAX_TXD;
        pp->rx_ring_size = MVNETA_MAX_RXD;
        err = mvneta_init(pp, phy_addr);
        if (err < 0) {
                dev_err(&pdev->dev, "can't init eth hal\n");
-               goto err_clk;
+               goto err_unmap;
        }
        mvneta_port_power_up(pp, phy_mode);
  
  
  err_deinit:
        mvneta_deinit(pp);
- err_clk:
-       clk_disable_unprepare(pp->clk);
  err_unmap:
        iounmap(pp->base);
+ err_clk:
+       clk_disable_unprepare(pp->clk);
  err_free_irq:
        irq_dispose_mapping(dev->irq);
  err_free_netdev:
@@@ -845,16 -845,7 +845,7 @@@ int mlx4_QUERY_PORT_wrapper(struct mlx4
                           MLX4_CMD_NATIVE);
  
        if (!err && dev->caps.function != slave) {
-               /* if config MAC in DB use it */
-               if (priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac)
-                       def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
-               else {
-                       /* set slave default_mac address */
-                       MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET);
-                       def_mac += slave << 8;
-                       priv->mfunc.master.vf_admin[slave].vport[vhcr->in_modifier].mac = def_mac;
-               }
+               def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
                MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
  
                /* get port type - currently only eth is enabled */
@@@ -1705,107 -1696,3 +1696,107 @@@ int mlx4_wol_write(struct mlx4_dev *dev
                        MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
  }
  EXPORT_SYMBOL_GPL(mlx4_wol_write);
 +
 +enum {
 +      ADD_TO_MCG = 0x26,
 +};
 +
 +
 +void mlx4_opreq_action(struct work_struct *work)
 +{
 +      struct mlx4_priv *priv = container_of(work, struct mlx4_priv,
 +                                            opreq_task);
 +      struct mlx4_dev *dev = &priv->dev;
 +      int num_tasks = atomic_read(&priv->opreq_count);
 +      struct mlx4_cmd_mailbox *mailbox;
 +      struct mlx4_mgm *mgm;
 +      u32 *outbox;
 +      u32 modifier;
 +      u16 token;
 +      u16 type_m;
 +      u16 type;
 +      int err;
 +      u32 num_qps;
 +      struct mlx4_qp qp;
 +      int i;
 +      u8 rem_mcg;
 +      u8 prot;
 +
 +#define GET_OP_REQ_MODIFIER_OFFSET    0x08
 +#define GET_OP_REQ_TOKEN_OFFSET               0x14
 +#define GET_OP_REQ_TYPE_OFFSET                0x1a
 +#define GET_OP_REQ_DATA_OFFSET                0x20
 +
 +      mailbox = mlx4_alloc_cmd_mailbox(dev);
 +      if (IS_ERR(mailbox)) {
 +              mlx4_err(dev, "Failed to allocate mailbox for GET_OP_REQ\n");
 +              return;
 +      }
 +      outbox = mailbox->buf;
 +
 +      while (num_tasks) {
 +              err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
 +                                 MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
 +                                 MLX4_CMD_NATIVE);
 +              if (err) {
 +                      mlx4_err(dev, "Failed to retreive required operation: %d\n",
 +                               err);
 +                      return;
 +              }
 +              MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
 +              MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
 +              MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET);
 +              type_m = type >> 12;
 +              type &= 0xfff;
 +
 +              switch (type) {
 +              case ADD_TO_MCG:
 +                      if (dev->caps.steering_mode ==
 +                          MLX4_STEERING_MODE_DEVICE_MANAGED) {
 +                              mlx4_warn(dev, "ADD MCG operation is not supported in DEVICE_MANAGED steering mode\n");
 +                              err = EPERM;
 +                              break;
 +                      }
 +                      mgm = (struct mlx4_mgm *)((u8 *)(outbox) +
 +                                                GET_OP_REQ_DATA_OFFSET);
 +                      num_qps = be32_to_cpu(mgm->members_count) &
 +                                MGM_QPN_MASK;
 +                      rem_mcg = ((u8 *)(&mgm->members_count))[0] & 1;
 +                      prot = ((u8 *)(&mgm->members_count))[0] >> 6;
 +
 +                      for (i = 0; i < num_qps; i++) {
 +                              qp.qpn = be32_to_cpu(mgm->qp[i]);
 +                              if (rem_mcg)
 +                                      err = mlx4_multicast_detach(dev, &qp,
 +                                                                  mgm->gid,
 +                                                                  prot, 0);
 +                              else
 +                                      err = mlx4_multicast_attach(dev, &qp,
 +                                                                  mgm->gid,
 +                                                                  mgm->gid[5]
 +                                                                  , 0, prot,
 +                                                                  NULL);
 +                              if (err)
 +                                      break;
 +                      }
 +                      break;
 +              default:
 +                      mlx4_warn(dev, "Bad type for required operation\n");
 +                      err = EINVAL;
 +                      break;
 +              }
 +              err = mlx4_cmd(dev, 0, ((u32) err | cpu_to_be32(token) << 16),
 +                             1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
 +                             MLX4_CMD_NATIVE);
 +              if (err) {
 +                      mlx4_err(dev, "Failed to acknowledge required request: %d\n",
 +                               err);
 +                      goto out;
 +              }
 +              memset(outbox, 0, 0xffc);
 +              num_tasks = atomic_dec_return(&priv->opreq_count);
 +      }
 +
 +out:
 +      mlx4_free_cmd_mailbox(dev, mailbox);
 +}
@@@ -371,7 -371,7 +371,7 @@@ static int mlx4_dev_cap(struct mlx4_de
  
        dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
  
-       if (!enable_64b_cqe_eqe) {
+       if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) {
                if (dev_cap->flags &
                    (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) {
                        mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n");
@@@ -1692,19 -1692,11 +1692,19 @@@ static int mlx4_setup_hca(struct mlx4_d
                goto err_xrcd_table_free;
        }
  
 +      if (!mlx4_is_slave(dev)) {
 +              err = mlx4_init_mcg_table(dev);
 +              if (err) {
 +                      mlx4_err(dev, "Failed to initialize multicast group table, aborting.\n");
 +                      goto err_mr_table_free;
 +              }
 +      }
 +
        err = mlx4_init_eq_table(dev);
        if (err) {
                mlx4_err(dev, "Failed to initialize "
                         "event queue table, aborting.\n");
 -              goto err_mr_table_free;
 +              goto err_mcg_table_free;
        }
  
        err = mlx4_cmd_use_events(dev);
                goto err_srq_table_free;
        }
  
 -      if (!mlx4_is_slave(dev)) {
 -              err = mlx4_init_mcg_table(dev);
 -              if (err) {
 -                      mlx4_err(dev, "Failed to initialize "
 -                               "multicast group table, aborting.\n");
 -                      goto err_qp_table_free;
 -              }
 -      }
 -
        err = mlx4_init_counters_table(dev);
        if (err && err != -ENOENT) {
                mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
 -              goto err_mcg_table_free;
 +              goto err_qp_table_free;
        }
  
        if (!mlx4_is_slave(dev)) {
  err_counters_table_free:
        mlx4_cleanup_counters_table(dev);
  
 -err_mcg_table_free:
 -      mlx4_cleanup_mcg_table(dev);
 -
  err_qp_table_free:
        mlx4_cleanup_qp_table(dev);
  
@@@ -1817,10 -1821,6 +1817,10 @@@ err_cmd_poll
  err_eq_table_free:
        mlx4_cleanup_eq_table(dev);
  
 +err_mcg_table_free:
 +      if (!mlx4_is_slave(dev))
 +              mlx4_cleanup_mcg_table(dev);
 +
  err_mr_table_free:
        mlx4_cleanup_mr_table(dev);
  
@@@ -2197,9 -2197,6 +2197,9 @@@ static int __mlx4_init_one(struct pci_d
                        }
                }
  
 +              atomic_set(&priv->opreq_count, 0);
 +              INIT_WORK(&priv->opreq_task, mlx4_opreq_action);
 +
                /*
                 * Now reset the HCA before we touch the PCI capabilities or
                 * attempt a firmware command, since a boot ROM may have left
@@@ -2318,12 -2315,12 +2318,12 @@@ err_port
                mlx4_cleanup_port_info(&priv->port[port]);
  
        mlx4_cleanup_counters_table(dev);
 -      mlx4_cleanup_mcg_table(dev);
        mlx4_cleanup_qp_table(dev);
        mlx4_cleanup_srq_table(dev);
        mlx4_cleanup_cq_table(dev);
        mlx4_cmd_use_polling(dev);
        mlx4_cleanup_eq_table(dev);
 +      mlx4_cleanup_mcg_table(dev);
        mlx4_cleanup_mr_table(dev);
        mlx4_cleanup_xrcd_table(dev);
        mlx4_cleanup_pd_table(dev);
@@@ -2406,12 -2403,12 +2406,12 @@@ static void mlx4_remove_one(struct pci_
                                                   RES_TR_FREE_SLAVES_ONLY);
  
                mlx4_cleanup_counters_table(dev);
 -              mlx4_cleanup_mcg_table(dev);
                mlx4_cleanup_qp_table(dev);
                mlx4_cleanup_srq_table(dev);
                mlx4_cleanup_cq_table(dev);
                mlx4_cmd_use_polling(dev);
                mlx4_cleanup_eq_table(dev);
 +              mlx4_cleanup_mcg_table(dev);
                mlx4_cleanup_mr_table(dev);
                mlx4_cleanup_xrcd_table(dev);
                mlx4_cleanup_pd_table(dev);
@@@ -20,6 -20,7 +20,6 @@@
  #include <linux/tcp.h>
  #include <linux/skbuff.h>
  #include <linux/firmware.h>
 -
  #include <linux/ethtool.h>
  #include <linux/mii.h>
  #include <linux/timer.h>
@@@ -37,8 -38,8 +37,8 @@@
  
  #define _QLCNIC_LINUX_MAJOR 5
  #define _QLCNIC_LINUX_MINOR 2
 -#define _QLCNIC_LINUX_SUBVERSION 44
 -#define QLCNIC_LINUX_VERSIONID  "5.2.44"
 +#define _QLCNIC_LINUX_SUBVERSION 45
 +#define QLCNIC_LINUX_VERSIONID  "5.2.45"
  #define QLCNIC_DRV_IDC_VER  0x01
  #define QLCNIC_DRIVER_VERSION  ((_QLCNIC_LINUX_MAJOR << 16) |\
                 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@@ -466,7 -467,7 +466,7 @@@ struct qlcnic_hardware_context 
        u32 *ext_reg_tbl;
        u32 mbox_aen[QLC_83XX_MBX_AEN_CNT];
        u32 mbox_reg[4];
 -      spinlock_t mbx_lock;
 +      struct qlcnic_mailbox *mailbox;
  };
  
  struct qlcnic_adapter_stats {
@@@ -949,6 -950,12 +949,6 @@@ struct qlcnic_ipaddr 
  #define QLCNIC_READD_AGE      20
  #define QLCNIC_LB_MAX_FILTERS 64
  #define QLCNIC_LB_BUCKET_SIZE 32
 -
 -/* QLCNIC Driver Error Code */
 -#define QLCNIC_FW_NOT_RESPOND         51
 -#define QLCNIC_TEST_IN_PROGRESS               52
 -#define QLCNIC_UNDEFINED_ERROR                53
 -#define QLCNIC_LB_CABLE_NOT_CONN      54
  #define QLCNIC_ILB_MAX_RCV_LOOP       10
  
  struct qlcnic_filter {
@@@ -965,21 -972,6 +965,21 @@@ struct qlcnic_filter_hash 
        u16 fbucket_size;
  };
  
 +/* Mailbox specific data structures */
 +struct qlcnic_mailbox {
 +      struct workqueue_struct *work_q;
 +      struct qlcnic_adapter   *adapter;
 +      struct qlcnic_mbx_ops   *ops;
 +      struct work_struct      work;
 +      struct completion       completion;
 +      struct list_head        cmd_q;
 +      unsigned long           status;
 +      spinlock_t              queue_lock;     /* Mailbox queue lock */
 +      spinlock_t              aen_lock;       /* Mailbox response/AEN lock */
 +      atomic_t                rsp_status;
 +      u32                     num_cmds;
 +};
 +
  struct qlcnic_adapter {
        struct qlcnic_hardware_context *ahw;
        struct qlcnic_recv_context *recv_ctx;
@@@ -1393,20 -1385,9 +1393,20 @@@ struct _cdrp_cmd 
  };
  
  struct qlcnic_cmd_args {
 -      struct _cdrp_cmd req;
 -      struct _cdrp_cmd rsp;
 -      int op_type;
 +      struct completion       completion;
 +      struct list_head        list;
 +      struct _cdrp_cmd        req;
 +      struct _cdrp_cmd        rsp;
 +      atomic_t                rsp_status;
 +      int                     pay_size;
 +      u32                     rsp_opcode;
 +      u32                     total_cmds;
 +      u32                     op_type;
 +      u32                     type;
 +      u32                     cmd_op;
 +      u32                     *hdr;   /* Back channel message header */
 +      u32                     *pay;   /* Back channel message payload */
 +      u8                      func_num;
  };
  
  int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter);
@@@ -1419,8 -1400,8 +1419,8 @@@ void qlcnic_pci_camqm_write_2M(struct q
  #define ADDR_IN_RANGE(addr, low, high)        \
        (((addr) < (high)) && ((addr) >= (low)))
  
- #define QLCRD32(adapter, off) \
-       (adapter->ahw->hw_ops->read_reg)(adapter, off)
+ #define QLCRD32(adapter, off, err) \
+       (adapter->ahw->hw_ops->read_reg)(adapter, off, err)
  
  #define QLCWR32(adapter, off, val) \
        adapter->ahw->hw_ops->write_reg(adapter, off, val)
@@@ -1619,25 -1600,11 +1619,25 @@@ struct qlcnic_nic_template 
        int (*resume)(struct qlcnic_adapter *);
  };
  
 +struct qlcnic_mbx_ops {
 +      int (*enqueue_cmd) (struct qlcnic_adapter *,
 +                          struct qlcnic_cmd_args *, unsigned long *);
 +      void (*dequeue_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
 +      void (*decode_resp) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
 +      void (*encode_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
 +      void (*nofity_fw) (struct qlcnic_adapter *, u8);
 +};
 +
 +int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *);
 +void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *);
 +void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx);
 +void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx);
 +
  /* Adapter hardware abstraction */
  struct qlcnic_hardware_ops {
        void (*read_crb) (struct qlcnic_adapter *, char *, loff_t, size_t);
        void (*write_crb) (struct qlcnic_adapter *, char *, loff_t, size_t);
-       int (*read_reg) (struct qlcnic_adapter *, ulong);
+       int (*read_reg) (struct qlcnic_adapter *, ulong, int *);
        int (*write_reg) (struct qlcnic_adapter *, ulong, u32);
        void (*get_ocm_win) (struct qlcnic_hardware_context *);
        int (*get_mac_address) (struct qlcnic_adapter *, u8 *);
@@@ -1695,12 -1662,6 +1695,6 @@@ static inline void qlcnic_write_crb(str
        adapter->ahw->hw_ops->write_crb(adapter, buf, offset, size);
  }
  
- static inline int qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter,
-                                      ulong off)
- {
-       return adapter->ahw->hw_ops->read_reg(adapter, off);
- }
  static inline int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter,
                                        ulong off, u32 data)
  {
@@@ -1902,7 -1863,8 +1896,8 @@@ static inline void qlcnic_free_mac_list
  
  static inline void qlcnic_set_mac_filter_count(struct qlcnic_adapter *adapter)
  {
-       adapter->ahw->hw_ops->set_mac_filter_count(adapter);
+       if (adapter->ahw->hw_ops->set_mac_filter_count)
+               adapter->ahw->hw_ops->set_mac_filter_count(adapter);
  }
  
  static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter,
@@@ -149,7 -149,7 +149,7 @@@ static struct qlcnic_hardware_ops qlcni
        .get_mac_address                = qlcnic_83xx_get_mac_address,
        .setup_intr                     = qlcnic_83xx_setup_intr,
        .alloc_mbx_args                 = qlcnic_83xx_alloc_mbx_args,
 -      .mbx_cmd                        = qlcnic_83xx_mbx_op,
 +      .mbx_cmd                        = qlcnic_83xx_issue_cmd,
        .get_func_no                    = qlcnic_83xx_get_func_no,
        .api_lock                       = qlcnic_83xx_cam_lock,
        .api_unlock                     = qlcnic_83xx_cam_unlock,
@@@ -228,17 -228,17 +228,17 @@@ static int __qlcnic_set_win_base(struc
        return 0;
  }
  
- int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *adapter, ulong addr)
+ int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *adapter, ulong addr,
+                               int *err)
  {
-       int ret;
        struct qlcnic_hardware_context *ahw = adapter->ahw;
  
-       ret = __qlcnic_set_win_base(adapter, (u32) addr);
-       if (!ret) {
+       *err = __qlcnic_set_win_base(adapter, (u32) addr);
+       if (!*err) {
                return QLCRDX(ahw, QLCNIC_WILDCARD);
        } else {
                dev_err(&adapter->pdev->dev,
-                       "%s failed, addr = 0x%x\n", __func__, (int)addr);
+                       "%s failed, addr = 0x%lx\n", __func__, addr);
                return -EIO;
        }
  }
@@@ -362,10 -362,6 +362,10 @@@ static inline void qlcnic_83xx_get_mbx_
                                     struct qlcnic_cmd_args *cmd)
  {
        int i;
 +
 +      if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP)
 +              return;
 +
        for (i = 0; i < cmd->rsp.num; i++)
                cmd->rsp.arg[i] = readl(QLCNIC_MBX_FW(adapter->ahw, i));
  }
@@@ -402,33 -398,24 +402,33 @@@ irqreturn_t qlcnic_83xx_clear_legacy_in
        return IRQ_HANDLED;
  }
  
 +static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx)
 +{
 +      atomic_set(&mbx->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
 +      complete(&mbx->completion);
 +}
 +
  static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter)
  {
 -      u32 resp, event;
 +      u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
 +      struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
        unsigned long flags;
  
 -      spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
 -
 +      spin_lock_irqsave(&mbx->aen_lock, flags);
        resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
        if (!(resp & QLCNIC_SET_OWNER))
                goto out;
  
        event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
 -      if (event &  QLCNIC_MBX_ASYNC_EVENT)
 +      if (event &  QLCNIC_MBX_ASYNC_EVENT) {
                __qlcnic_83xx_process_aen(adapter);
 -
 +      } else {
 +              if (atomic_read(&mbx->rsp_status) != rsp_status)
 +                      qlcnic_83xx_notify_mbx_response(mbx);
 +      }
  out:
        qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
 -      spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
 +      spin_unlock_irqrestore(&mbx->aen_lock, flags);
  }
  
  irqreturn_t qlcnic_83xx_intr(int irq, void *data)
@@@ -528,7 -515,7 +528,7 @@@ int qlcnic_83xx_setup_mbx_intr(struct q
        }
  
        /* Enable mailbox interrupt */
 -      qlcnic_83xx_enable_mbx_intrpt(adapter);
 +      qlcnic_83xx_enable_mbx_interrupt(adapter);
  
        return err;
  }
@@@ -574,7 -561,7 +574,7 @@@ void qlcnic_83xx_cam_unlock(struct qlcn
  void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
                          loff_t offset, size_t size)
  {
-       int ret;
+       int ret = 0;
        u32 data;
  
        if (qlcnic_api_lock(adapter)) {
                return;
        }
  
-       ret = qlcnic_83xx_rd_reg_indirect(adapter, (u32) offset);
+       data = QLCRD32(adapter, (u32) offset, &ret);
        qlcnic_api_unlock(adapter);
  
        if (ret == -EIO) {
                        __func__, (u32)offset);
                return;
        }
-       data = ret;
        memcpy(buf, &data, size);
  }
  
@@@ -642,7 -628,7 +641,7 @@@ void qlcnic_83xx_set_mac_filter_count(s
        ahw->max_uc_count = count;
  }
  
 -void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *adapter)
 +void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *adapter)
  {
        u32 val;
  
@@@ -701,9 -687,6 +700,9 @@@ static void qlcnic_dump_mbx(struct qlcn
  {
        int i;
  
 +      if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP)
 +              return;
 +
        dev_info(&adapter->pdev->dev,
                 "Host MBX regs(%d)\n", cmd->req.num);
        for (i = 0; i < cmd->req.num; i++) {
        pr_info("\n");
  }
  
 -/* Mailbox response for mac rcode */
 -u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter)
 +static inline void
 +qlcnic_83xx_poll_for_mbx_completion(struct qlcnic_adapter *adapter,
 +                                  struct qlcnic_cmd_args *cmd)
  {
 -      u32 fw_data;
 -      u8 mac_cmd_rcode;
 +      struct qlcnic_hardware_context *ahw = adapter->ahw;
 +      int opcode = LSW(cmd->req.arg[0]);
 +      unsigned long max_loops;
  
 -      fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2));
 -      mac_cmd_rcode = (u8)fw_data;
 -      if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE ||
 -          mac_cmd_rcode == QLC_83XX_MAC_PRESENT ||
 -          mac_cmd_rcode == QLC_83XX_MAC_ABSENT)
 -              return QLCNIC_RCODE_SUCCESS;
 -      return 1;
 -}
 +      max_loops = cmd->total_cmds * QLC_83XX_MBX_CMD_LOOP;
  
 -u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter, u32 *wait_time)
 -{
 -      u32 data;
 -      struct qlcnic_hardware_context *ahw = adapter->ahw;
 -      /* wait for mailbox completion */
 -      do {
 -              data = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
 -              if (++(*wait_time) > QLCNIC_MBX_TIMEOUT) {
 -                      data = QLCNIC_RCODE_TIMEOUT;
 -                      break;
 -              }
 -              mdelay(1);
 -      } while (!data);
 -      return data;
 +      for (; max_loops; max_loops--) {
 +              if (atomic_read(&cmd->rsp_status) ==
 +                  QLC_83XX_MBX_RESPONSE_ARRIVED)
 +                      return;
 +
 +              udelay(1);
 +      }
 +
 +      dev_err(&adapter->pdev->dev,
 +              "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
 +              __func__, opcode, cmd->type, ahw->pci_func, ahw->op_mode);
 +      flush_workqueue(ahw->mailbox->work_q);
 +      return;
  }
  
 -int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter,
 -                     struct qlcnic_cmd_args *cmd)
 +int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *adapter,
 +                        struct qlcnic_cmd_args *cmd)
  {
 -      int i;
 -      u16 opcode;
 -      u8 mbx_err_code;
 -      unsigned long flags;
 +      struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
        struct qlcnic_hardware_context *ahw = adapter->ahw;
 -      u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, wait_time = 0;
 +      int cmd_type, err, opcode;
 +      unsigned long timeout;
  
        opcode = LSW(cmd->req.arg[0]);
 -      if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
 -              dev_info(&adapter->pdev->dev,
 -                       "Mailbox cmd attempted, 0x%x\n", opcode);
 -              dev_info(&adapter->pdev->dev, "Mailbox detached\n");
 -              return 0;
 +      cmd_type = cmd->type;
 +      err = mbx->ops->enqueue_cmd(adapter, cmd, &timeout);
 +      if (err) {
 +              dev_err(&adapter->pdev->dev,
 +                      "%s: Mailbox not available, cmd_op=0x%x, cmd_context=0x%x, pci_func=0x%x, op_mode=0x%x\n",
 +                      __func__, opcode, cmd->type, ahw->pci_func,
 +                      ahw->op_mode);
 +              return err;
        }
  
 -      spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
 -      mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
 -
 -      if (mbx_val) {
 -              QLCDB(adapter, DRV,
 -                    "Mailbox cmd attempted, 0x%x\n", opcode);
 -              QLCDB(adapter, DRV,
 -                    "Mailbox not available, 0x%x, collect FW dump\n",
 -                    mbx_val);
 -              cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
 -              spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
 -              return cmd->rsp.arg[0];
 -      }
 -
 -      /* Fill in mailbox registers */
 -      mbx_cmd = cmd->req.arg[0];
 -      writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
 -      for (i = 1; i < cmd->req.num; i++)
 -              writel(cmd->req.arg[i], QLCNIC_MBX_HOST(ahw, i));
 -
 -      /* Signal FW about the impending command */
 -      QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
 -poll:
 -      rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time);
 -      if (rsp != QLCNIC_RCODE_TIMEOUT) {
 -              /* Get the FW response data */
 -              fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
 -              if (fw_data &  QLCNIC_MBX_ASYNC_EVENT) {
 -                      __qlcnic_83xx_process_aen(adapter);
 -                      goto poll;
 -              }
 -              mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
 -              rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
 -              opcode = QLCNIC_MBX_RSP(fw_data);
 -              qlcnic_83xx_get_mbx_data(adapter, cmd);
 -
 -              switch (mbx_err_code) {
 -              case QLCNIC_MBX_RSP_OK:
 -              case QLCNIC_MBX_PORT_RSP_OK:
 -                      rsp = QLCNIC_RCODE_SUCCESS;
 -                      break;
 -              default:
 -                      if (opcode == QLCNIC_CMD_CONFIG_MAC_VLAN) {
 -                              rsp = qlcnic_83xx_mac_rcode(adapter);
 -                              if (!rsp)
 -                                      goto out;
 -                      }
 +      switch (cmd_type) {
 +      case QLC_83XX_MBX_CMD_WAIT:
 +              if (!wait_for_completion_timeout(&cmd->completion, timeout)) {
                        dev_err(&adapter->pdev->dev,
 -                              "MBX command 0x%x failed with err:0x%x\n",
 -                              opcode, mbx_err_code);
 -                      rsp = mbx_err_code;
 -                      qlcnic_dump_mbx(adapter, cmd);
 -                      break;
 +                              "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
 +                              __func__, opcode, cmd_type, ahw->pci_func,
 +                              ahw->op_mode);
 +                      flush_workqueue(mbx->work_q);
                }
 -              goto out;
 +              break;
 +      case QLC_83XX_MBX_CMD_NO_WAIT:
 +              return 0;
 +      case QLC_83XX_MBX_CMD_BUSY_WAIT:
 +              qlcnic_83xx_poll_for_mbx_completion(adapter, cmd);
 +              break;
 +      default:
 +              dev_err(&adapter->pdev->dev,
 +                      "%s: Invalid mailbox command, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
 +                      __func__, opcode, cmd_type, ahw->pci_func,
 +                      ahw->op_mode);
 +              qlcnic_83xx_detach_mailbox_work(adapter);
        }
  
 -      dev_err(&adapter->pdev->dev, "MBX command 0x%x timed out\n",
 -              QLCNIC_MBX_RSP(mbx_cmd));
 -      rsp = QLCNIC_RCODE_TIMEOUT;
 -out:
 -      /* clear fw mbx control register */
 -      QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
 -      spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
 -      return rsp;
 +      return cmd->rsp_opcode;
  }
  
  int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
        u32 temp;
        const struct qlcnic_mailbox_metadata *mbx_tbl;
  
 +      memset(mbx, 0, sizeof(struct qlcnic_cmd_args));
        mbx_tbl = qlcnic_83xx_mbx_tbl;
        size = ARRAY_SIZE(qlcnic_83xx_mbx_tbl);
        for (i = 0; i < size; i++) {
                        memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
                        temp = adapter->ahw->fw_hal_version << 29;
                        mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp);
 +                      mbx->cmd_op = type;
                        return 0;
                }
        }
@@@ -906,23 -933,20 +905,23 @@@ void __qlcnic_83xx_process_aen(struct q
  
  static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
  {
 +      u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
        struct qlcnic_hardware_context *ahw = adapter->ahw;
 -      u32 resp, event;
 +      struct qlcnic_mailbox *mbx = ahw->mailbox;
        unsigned long flags;
  
 -      spin_lock_irqsave(&ahw->mbx_lock, flags);
 -
 +      spin_lock_irqsave(&mbx->aen_lock, flags);
        resp = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
        if (resp & QLCNIC_SET_OWNER) {
                event = readl(QLCNIC_MBX_FW(ahw, 0));
 -              if (event &  QLCNIC_MBX_ASYNC_EVENT)
 +              if (event &  QLCNIC_MBX_ASYNC_EVENT) {
                        __qlcnic_83xx_process_aen(adapter);
 +              } else {
 +                      if (atomic_read(&mbx->rsp_status) != rsp_status)
 +                              qlcnic_83xx_notify_mbx_response(mbx);
 +              }
        }
 -
 -      spin_unlock_irqrestore(&ahw->mbx_lock, flags);
 +      spin_unlock_irqrestore(&mbx->aen_lock, flags);
  }
  
  static void qlcnic_83xx_mbx_poll_work(struct work_struct *work)
@@@ -945,7 -969,6 +944,7 @@@ void qlcnic_83xx_enable_mbx_poll(struc
                return;
  
        INIT_DELAYED_WORK(&adapter->mbx_poll_work, qlcnic_83xx_mbx_poll_work);
 +      queue_delayed_work(adapter->qlcnic_wq, &adapter->mbx_poll_work, 0);
  }
  
  void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *adapter)
@@@ -1332,10 -1355,8 +1331,10 @@@ static int qlcnic_83xx_diag_alloc_res(s
  
        if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
                /* disable and free mailbox interrupt */
 -              if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
 +              if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
 +                      qlcnic_83xx_enable_mbx_poll(adapter);
                        qlcnic_83xx_free_mbx_intr(adapter);
 +              }
                adapter->ahw->loopback_state = 0;
                adapter->ahw->hw_ops->setup_link_event(adapter, 1);
        }
@@@ -1356,8 -1377,6 +1355,8 @@@ static void qlcnic_83xx_diag_free_res(s
                for (ring = 0; ring < adapter->max_sds_rings; ring++) {
                        sds_ring = &adapter->recv_ctx->sds_rings[ring];
                        qlcnic_83xx_disable_intr(adapter, sds_ring);
 +                      if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
 +                              qlcnic_83xx_enable_mbx_poll(adapter);
                }
        }
  
        if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
                if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
                        err = qlcnic_83xx_setup_mbx_intr(adapter);
 +                      qlcnic_83xx_disable_mbx_poll(adapter);
                        if (err) {
                                dev_err(&adapter->pdev->dev,
                                        "%s: failed to setup mbx interrupt\n",
  
        if (netif_running(netdev))
                __qlcnic_up(adapter, netdev);
 +
 +      if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST &&
 +          !(adapter->flags & QLCNIC_MSIX_ENABLED))
 +              qlcnic_83xx_disable_mbx_poll(adapter);
  out:
        netif_device_attach(netdev);
  }
@@@ -1605,33 -1619,26 +1604,33 @@@ static void qlcnic_83xx_set_interface_i
  
  int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
  {
 -      int err;
 +      struct qlcnic_cmd_args *cmd = NULL;
        u32 temp = 0;
 -      struct qlcnic_cmd_args cmd;
 +      int err;
  
        if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
                return -EIO;
  
 -      err = qlcnic_alloc_mbx_args(&cmd, adapter,
 +      cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
 +      if (!cmd)
 +              return -ENOMEM;
 +
 +      err = qlcnic_alloc_mbx_args(cmd, adapter,
                                    QLCNIC_CMD_CONFIGURE_MAC_RX_MODE);
        if (err)
 -              return err;
 +              goto out;
  
 +      cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
        qlcnic_83xx_set_interface_id_promisc(adapter, &temp);
 -      cmd.req.arg[1] = (mode ? 1 : 0) | temp;
 -      err = qlcnic_issue_cmd(adapter, &cmd);
 -      if (err)
 -              dev_info(&adapter->pdev->dev,
 -                       "Promiscous mode config failed\n");
 +      cmd->req.arg[1] = (mode ? 1 : 0) | temp;
 +      err = qlcnic_issue_cmd(adapter, cmd);
 +      if (!err)
 +              return err;
  
 -      qlcnic_free_mbx_args(&cmd);
 +      qlcnic_free_mbx_args(cmd);
 +
 +out:
 +      kfree(cmd);
        return err;
  }
  
@@@ -1644,7 -1651,7 +1643,7 @@@ int qlcnic_83xx_loopback_test(struct ne
        if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
                netdev_warn(netdev,
                            "Loopback test not supported in non privileged mode\n");
 -              return ret;
 +              return -ENOTSUPP;
        }
  
        if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
        /* Poll for link up event before running traffic */
        do {
                msleep(QLC_83XX_LB_MSLEEP_COUNT);
 -              if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
 -                      qlcnic_83xx_process_aen(adapter);
  
                if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
                        netdev_info(netdev,
                                    "Device is resetting, free LB test resources\n");
 -                      ret = -EIO;
 +                      ret = -EBUSY;
                        goto free_diag_res;
                }
                if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
                        netdev_info(netdev,
                                    "Firmware didn't sent link up event to loopback request\n");
 -                      ret = -QLCNIC_FW_NOT_RESPOND;
 +                      ret = -ETIMEDOUT;
                        qlcnic_83xx_clear_lb_mode(adapter, mode);
                        goto free_diag_res;
                }
@@@ -1719,15 -1728,6 +1718,15 @@@ int qlcnic_83xx_set_lb_mode(struct qlcn
                return status;
  
        config = ahw->port_config;
 +
 +      /* Check if port is already in loopback mode */
 +      if ((config & QLC_83XX_CFG_LOOPBACK_HSS) ||
 +          (config & QLC_83XX_CFG_LOOPBACK_EXT)) {
 +              netdev_err(netdev,
 +                         "Port already in Loopback mode.\n");
 +              return -EINPROGRESS;
 +      }
 +
        set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
  
        if (mode == QLCNIC_ILB_MODE)
        /* Wait for Link and IDC Completion AEN */
        do {
                msleep(QLC_83XX_LB_MSLEEP_COUNT);
 -              if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
 -                      qlcnic_83xx_process_aen(adapter);
  
                if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
                        netdev_info(netdev,
                                    "Device is resetting, free LB test resources\n");
                        clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
 -                      return -EIO;
 +                      return -EBUSY;
                }
                if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
                        netdev_err(netdev,
                                   "Did not receive IDC completion AEN\n");
                        clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
                        qlcnic_83xx_clear_lb_mode(adapter, mode);
 -                      return -EIO;
 +                      return -ETIMEDOUT;
                }
        } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
  
@@@ -1795,19 -1797,21 +1794,19 @@@ int qlcnic_83xx_clear_lb_mode(struct ql
        /* Wait for Link and IDC Completion AEN */
        do {
                msleep(QLC_83XX_LB_MSLEEP_COUNT);
 -              if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
 -                      qlcnic_83xx_process_aen(adapter);
  
                if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
                        netdev_info(netdev,
                                    "Device is resetting, free LB test resources\n");
                        clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
 -                      return -EIO;
 +                      return -EBUSY;
                }
  
                if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
                        netdev_err(netdev,
                                   "Did not receive IDC completion AEN\n");
                        clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
 -                      return -EIO;
 +                      return -ETIMEDOUT;
                }
        } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
  
@@@ -1946,31 -1950,25 +1945,31 @@@ static void qlcnic_83xx_set_interface_i
  int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
                                   u16 vlan_id, u8 op)
  {
 -      int err;
 -      u32 *buf, temp = 0;
 -      struct qlcnic_cmd_args cmd;
 +      struct qlcnic_cmd_args *cmd = NULL;
        struct qlcnic_macvlan_mbx mv;
 +      u32 *buf, temp = 0;
 +      int err;
  
        if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
                return -EIO;
  
 -      err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
 +      cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
 +      if (!cmd)
 +              return -ENOMEM;
 +
 +      err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
        if (err)
 -              return err;
 +              goto out;
 +
 +      cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
  
        if (vlan_id)
                op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
                     QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL;
  
 -      cmd.req.arg[1] = op | (1 << 8);
 +      cmd->req.arg[1] = op | (1 << 8);
        qlcnic_83xx_set_interface_id_macaddr(adapter, &temp);
 -      cmd.req.arg[1] |= temp;
 +      cmd->req.arg[1] |= temp;
        mv.vlan = vlan_id;
        mv.mac_addr0 = addr[0];
        mv.mac_addr1 = addr[1];
        mv.mac_addr3 = addr[3];
        mv.mac_addr4 = addr[4];
        mv.mac_addr5 = addr[5];
 -      buf = &cmd.req.arg[2];
 +      buf = &cmd->req.arg[2];
        memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
 -      err = qlcnic_issue_cmd(adapter, &cmd);
 -      if (err)
 -              dev_err(&adapter->pdev->dev,
 -                      "MAC-VLAN %s to CAM failed, err=%d.\n",
 -                      ((op == 1) ? "add " : "delete "), err);
 -      qlcnic_free_mbx_args(&cmd);
 +      err = qlcnic_issue_cmd(adapter, cmd);
 +      if (!err)
 +              return err;
 +
 +      qlcnic_free_mbx_args(cmd);
 +out:
 +      kfree(cmd);
        return err;
  }
  
@@@ -2077,30 -2074,35 +2076,37 @@@ void qlcnic_83xx_config_intr_coal(struc
  static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
                                        u32 data[])
  {
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
        u8 link_status, duplex;
        /* link speed */
        link_status = LSB(data[3]) & 1;
-       adapter->ahw->link_speed = MSW(data[2]);
-       adapter->ahw->link_autoneg = MSB(MSW(data[3]));
-       adapter->ahw->module_type = MSB(LSW(data[3]));
-       duplex = LSB(MSW(data[3]));
-       if (duplex)
-               adapter->ahw->link_duplex = DUPLEX_FULL;
-       else
-               adapter->ahw->link_duplex = DUPLEX_HALF;
-       adapter->ahw->has_link_events = 1;
+       if (link_status) {
+               ahw->link_speed = MSW(data[2]);
+               duplex = LSB(MSW(data[3]));
+               if (duplex)
+                       ahw->link_duplex = DUPLEX_FULL;
+               else
+                       ahw->link_duplex = DUPLEX_HALF;
+       } else {
+               ahw->link_speed = SPEED_UNKNOWN;
+               ahw->link_duplex = DUPLEX_UNKNOWN;
+       }
+       ahw->link_autoneg = MSB(MSW(data[3]));
+       ahw->module_type = MSB(LSW(data[3]));
+       ahw->has_link_events = 1;
        qlcnic_advert_link_change(adapter, link_status);
  }
  
  irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
  {
        struct qlcnic_adapter *adapter = data;
 -      unsigned long flags;
 +      struct qlcnic_mailbox *mbx;
        u32 mask, resp, event;
 +      unsigned long flags;
  
 -      spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
 +      mbx = adapter->ahw->mailbox;
 +      spin_lock_irqsave(&mbx->aen_lock, flags);
        resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
        if (!(resp & QLCNIC_SET_OWNER))
                goto out;
        event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
        if (event &  QLCNIC_MBX_ASYNC_EVENT)
                __qlcnic_83xx_process_aen(adapter);
 +      else
 +              qlcnic_83xx_notify_mbx_response(mbx);
 +
  out:
        mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
        writel(0, adapter->ahw->pci_base0 + mask);
 -      spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
 -
 +      spin_unlock_irqrestore(&mbx->aen_lock, flags);
        return IRQ_HANDLED;
  }
  
@@@ -2390,9 -2390,9 +2396,9 @@@ int qlcnic_83xx_lockless_flash_read32(s
                                      u32 flash_addr, u8 *p_data,
                                      int count)
  {
-       int i, ret;
-       u32 word, range, flash_offset, addr = flash_addr;
+       u32 word, range, flash_offset, addr = flash_addr, ret;
        ulong indirect_add, direct_window;
+       int i, err = 0;
  
        flash_offset = addr & (QLCNIC_FLASH_SECTOR_SIZE - 1);
        if (addr & 0x3) {
                /* Multi sector read */
                for (i = 0; i < count; i++) {
                        indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr);
-                       ret = qlcnic_83xx_rd_reg_indirect(adapter,
-                                                         indirect_add);
-                       if (ret == -EIO)
-                               return -EIO;
+                       ret = QLCRD32(adapter, indirect_add, &err);
+                       if (err == -EIO)
+                               return err;
  
                        word = ret;
                        *(u32 *)p_data  = word;
                /* Single sector read */
                for (i = 0; i < count; i++) {
                        indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr);
-                       ret = qlcnic_83xx_rd_reg_indirect(adapter,
-                                                         indirect_add);
-                       if (ret == -EIO)
-                               return -EIO;
+                       ret = QLCRD32(adapter, indirect_add, &err);
+                       if (err == -EIO)
+                               return err;
  
                        word = ret;
                        *(u32 *)p_data  = word;
@@@ -2453,10 -2451,13 +2457,13 @@@ static int qlcnic_83xx_poll_flash_statu
  {
        u32 status;
        int retries = QLC_83XX_FLASH_READ_RETRY_COUNT;
+       int err = 0;
  
        do {
-               status = qlcnic_83xx_rd_reg_indirect(adapter,
-                                                    QLC_83XX_FLASH_STATUS);
+               status = QLCRD32(adapter, QLC_83XX_FLASH_STATUS, &err);
+               if (err == -EIO)
+                       return err;
                if ((status & QLC_83XX_FLASH_STATUS_READY) ==
                    QLC_83XX_FLASH_STATUS_READY)
                        break;
@@@ -2508,7 -2509,8 +2515,8 @@@ int qlcnic_83xx_disable_flash_write(str
  
  int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *adapter)
  {
-       int ret, mfg_id;
+       int ret, err = 0;
+       u32 mfg_id;
  
        if (qlcnic_83xx_lock_flash(adapter))
                return -EIO;
                return -EIO;
        }
  
-       mfg_id = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_RDDATA);
-       if (mfg_id == -EIO)
-               return -EIO;
+       mfg_id = QLCRD32(adapter, QLC_83XX_FLASH_RDDATA, &err);
+       if (err == -EIO) {
+               qlcnic_83xx_unlock_flash(adapter);
+               return err;
+       }
  
        adapter->flash_mfg_id = (mfg_id & 0xFF);
        qlcnic_83xx_unlock_flash(adapter);
@@@ -2642,7 -2646,7 +2652,7 @@@ int qlcnic_83xx_flash_bulk_write(struc
                                 u32 *p_data, int count)
  {
        u32 temp;
-       int ret = -EIO;
+       int ret = -EIO, err = 0;
  
        if ((count < QLC_83XX_FLASH_WRITE_MIN) ||
            (count > QLC_83XX_FLASH_WRITE_MAX)) {
                return -EIO;
        }
  
-       temp = qlcnic_83xx_rd_reg_indirect(adapter,
-                                          QLC_83XX_FLASH_SPI_CONTROL);
+       temp = QLCRD32(adapter, QLC_83XX_FLASH_SPI_CONTROL, &err);
+       if (err == -EIO)
+               return err;
        qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_SPI_CONTROL,
                                     (temp | QLC_83XX_FLASH_SPI_CTRL));
        qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
                return -EIO;
        }
  
-       ret = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_SPI_STATUS);
+       ret = QLCRD32(adapter, QLC_83XX_FLASH_SPI_STATUS, &err);
+       if (err == -EIO)
+               return err;
        if ((ret & QLC_83XX_FLASH_SPI_CTRL) == QLC_83XX_FLASH_SPI_CTRL) {
                dev_err(&adapter->pdev->dev, "%s: failed at %d\n",
                        __func__, __LINE__);
                /* Operation failed, clear error bit */
-               temp = qlcnic_83xx_rd_reg_indirect(adapter,
-                                                  QLC_83XX_FLASH_SPI_CONTROL);
+               temp = QLCRD32(adapter, QLC_83XX_FLASH_SPI_CONTROL, &err);
+               if (err == -EIO)
+                       return err;
                qlcnic_83xx_wrt_reg_indirect(adapter,
                                             QLC_83XX_FLASH_SPI_CONTROL,
                                             (temp | QLC_83XX_FLASH_SPI_CTRL));
@@@ -2829,6 -2840,7 +2846,7 @@@ int qlcnic_83xx_ms_mem_write128(struct 
  {
        int i, j, ret = 0;
        u32 temp;
+       int err = 0;
  
        /* Check alignment */
        if (addr & 0xF)
                                             QLCNIC_TA_WRITE_START);
  
                for (j = 0; j < MAX_CTL_CHECK; j++) {
-                       temp = qlcnic_83xx_rd_reg_indirect(adapter,
-                                                          QLCNIC_MS_CTRL);
+                       temp = QLCRD32(adapter, QLCNIC_MS_CTRL, &err);
+                       if (err == -EIO) {
+                               mutex_unlock(&adapter->ahw->mem_lock);
+                               return err;
+                       }
                        if ((temp & TA_CTL_BUSY) == 0)
                                break;
                }
  int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
                             u8 *p_data, int count)
  {
-       int i, ret;
-       u32 word, addr = flash_addr;
+       u32 word, addr = flash_addr, ret;
        ulong  indirect_addr;
+       int i, err = 0;
  
        if (qlcnic_83xx_lock_flash(adapter) != 0)
                return -EIO;
                }
  
                indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr);
-               ret = qlcnic_83xx_rd_reg_indirect(adapter,
-                                                 indirect_addr);
-               if (ret == -EIO)
-                       return -EIO;
+               ret = QLCRD32(adapter, indirect_addr, &err);
+               if (err == -EIO)
+                       return err;
                word = ret;
                *(u32 *)p_data  = word;
                p_data = p_data + 4;
@@@ -3020,8 -3036,8 +3042,8 @@@ int qlcnic_83xx_get_settings(struct qlc
        }
  
        if (ahw->port_type == QLCNIC_XGBE) {
-               ecmd->supported = SUPPORTED_1000baseT_Full;
-               ecmd->advertising = ADVERTISED_1000baseT_Full;
+               ecmd->supported = SUPPORTED_10000baseT_Full;
+               ecmd->advertising = ADVERTISED_10000baseT_Full;
        } else {
                ecmd->supported = (SUPPORTED_10baseT_Half |
                                   SUPPORTED_10baseT_Full |
@@@ -3375,7 -3391,8 +3397,8 @@@ int qlcnic_83xx_set_pauseparam(struct q
  
  static int qlcnic_83xx_read_flash_status_reg(struct qlcnic_adapter *adapter)
  {
-       int ret;
+       int ret, err = 0;
+       u32 temp;
  
        qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
                                     QLC_83XX_FLASH_OEM_READ_SIG);
        if (ret)
                return -EIO;
  
-       ret = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_RDDATA);
-       return ret & 0xFF;
+       temp = QLCRD32(adapter, QLC_83XX_FLASH_RDDATA, &err);
+       if (err == -EIO)
+               return err;
+       return temp & 0xFF;
  }
  
  int qlcnic_83xx_flash_test(struct qlcnic_adapter *adapter)
@@@ -3452,300 -3472,3 +3478,300 @@@ int qlcnic_83xx_resume(struct qlcnic_ad
                             idc->delay);
        return err;
  }
 +
 +void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx)
 +{
 +      INIT_COMPLETION(mbx->completion);
 +      set_bit(QLC_83XX_MBX_READY, &mbx->status);
 +}
 +
 +void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx)
 +{
 +      destroy_workqueue(mbx->work_q);
 +      kfree(mbx);
 +}
 +
 +static inline void
 +qlcnic_83xx_notify_cmd_completion(struct qlcnic_adapter *adapter,
 +                                struct qlcnic_cmd_args *cmd)
 +{
 +      atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
 +
 +      if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
 +              qlcnic_free_mbx_args(cmd);
 +              kfree(cmd);
 +              return;
 +      }
 +      complete(&cmd->completion);
 +}
 +
 +static inline void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter)
 +{
 +      struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
 +      struct list_head *head = &mbx->cmd_q;
 +      struct qlcnic_cmd_args *cmd = NULL;
 +
 +      spin_lock(&mbx->queue_lock);
 +
 +      while (!list_empty(head)) {
 +              cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
 +              list_del(&cmd->list);
 +              mbx->num_cmds--;
 +              qlcnic_83xx_notify_cmd_completion(adapter, cmd);
 +      }
 +
 +      spin_unlock(&mbx->queue_lock);
 +}
 +
 +static inline int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter)
 +{
 +      struct qlcnic_hardware_context *ahw = adapter->ahw;
 +      struct qlcnic_mailbox *mbx = ahw->mailbox;
 +      u32 host_mbx_ctrl;
 +
 +      if (!test_bit(QLC_83XX_MBX_READY, &mbx->status))
 +              return -EBUSY;
 +
 +      host_mbx_ctrl = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
 +      if (host_mbx_ctrl) {
 +              ahw->idc.collect_dump = 1;
 +              return -EIO;
 +      }
 +
 +      return 0;
 +}
 +
 +static inline void qlcnic_83xx_signal_mbx_cmd(struct qlcnic_adapter *adapter,
 +                                            u8 issue_cmd)
 +{
 +      if (issue_cmd)
 +              QLCWRX(adapter->ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
 +      else
 +              QLCWRX(adapter->ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
 +}
 +
 +static inline void qlcnic_83xx_dequeue_mbx_cmd(struct qlcnic_adapter *adapter,
 +                                             struct qlcnic_cmd_args *cmd)
 +{
 +      struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
 +
 +      spin_lock(&mbx->queue_lock);
 +
 +      list_del(&cmd->list);
 +      mbx->num_cmds--;
 +
 +      spin_unlock(&mbx->queue_lock);
 +
 +      qlcnic_83xx_notify_cmd_completion(adapter, cmd);
 +}
 +
 +static void qlcnic_83xx_encode_mbx_cmd(struct qlcnic_adapter *adapter,
 +                                     struct qlcnic_cmd_args *cmd)
 +{
 +      u32 mbx_cmd, fw_hal_version, hdr_size, total_size, tmp;
 +      struct qlcnic_hardware_context *ahw = adapter->ahw;
 +      int i, j;
 +
 +      if (cmd->op_type != QLC_83XX_MBX_POST_BC_OP) {
 +              mbx_cmd = cmd->req.arg[0];
 +              writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
 +              for (i = 1; i < cmd->req.num; i++)
 +                      writel(cmd->req.arg[i], QLCNIC_MBX_HOST(ahw, i));
 +      } else {
 +              fw_hal_version = ahw->fw_hal_version;
 +              hdr_size = sizeof(struct qlcnic_bc_hdr) / sizeof(u32);
 +              total_size = cmd->pay_size + hdr_size;
 +              tmp = QLCNIC_CMD_BC_EVENT_SETUP | total_size << 16;
 +              mbx_cmd = tmp | fw_hal_version << 29;
 +              writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
 +
 +              /* Back channel specific operations bits */
 +              mbx_cmd = 0x1 | 1 << 4;
 +
 +              if (qlcnic_sriov_pf_check(adapter))
 +                      mbx_cmd |= cmd->func_num << 5;
 +
 +              writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 1));
 +
 +              for (i = 2, j = 0; j < hdr_size; i++, j++)
 +                      writel(*(cmd->hdr++), QLCNIC_MBX_HOST(ahw, i));
 +              for (j = 0; j < cmd->pay_size; j++, i++)
 +                      writel(*(cmd->pay++), QLCNIC_MBX_HOST(ahw, i));
 +      }
 +}
 +
 +void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *adapter)
 +{
 +      struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
 +
 +      clear_bit(QLC_83XX_MBX_READY, &mbx->status);
 +      complete(&mbx->completion);
 +      cancel_work_sync(&mbx->work);
 +      flush_workqueue(mbx->work_q);
 +      qlcnic_83xx_flush_mbx_queue(adapter);
 +}
 +
 +static inline int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter,
 +                                            struct qlcnic_cmd_args *cmd,
 +                                            unsigned long *timeout)
 +{
 +      struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
 +
 +      if (test_bit(QLC_83XX_MBX_READY, &mbx->status)) {
 +              atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
 +              init_completion(&cmd->completion);
 +              cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN;
 +
 +              spin_lock(&mbx->queue_lock);
 +
 +              list_add_tail(&cmd->list, &mbx->cmd_q);
 +              mbx->num_cmds++;
 +              cmd->total_cmds = mbx->num_cmds;
 +              *timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT;
 +              queue_work(mbx->work_q, &mbx->work);
 +
 +              spin_unlock(&mbx->queue_lock);
 +
 +              return 0;
 +      }
 +
 +      return -EBUSY;
 +}
 +
 +static inline int qlcnic_83xx_check_mac_rcode(struct qlcnic_adapter *adapter,
 +                                            struct qlcnic_cmd_args *cmd)
 +{
 +      u8 mac_cmd_rcode;
 +      u32 fw_data;
 +
 +      if (cmd->cmd_op == QLCNIC_CMD_CONFIG_MAC_VLAN) {
 +              fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2));
 +              mac_cmd_rcode = (u8)fw_data;
 +              if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE ||
 +                  mac_cmd_rcode == QLC_83XX_MAC_PRESENT ||
 +                  mac_cmd_rcode == QLC_83XX_MAC_ABSENT) {
 +                      cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS;
 +                      return QLCNIC_RCODE_SUCCESS;
 +              }
 +      }
 +
 +      return -EINVAL;
 +}
 +
 +static void qlcnic_83xx_decode_mbx_rsp(struct qlcnic_adapter *adapter,
 +                                     struct qlcnic_cmd_args *cmd)
 +{
 +      struct qlcnic_hardware_context *ahw = adapter->ahw;
 +      struct device *dev = &adapter->pdev->dev;
 +      u8 mbx_err_code;
 +      u32 fw_data;
 +
 +      fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
 +      mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
 +      qlcnic_83xx_get_mbx_data(adapter, cmd);
 +
 +      switch (mbx_err_code) {
 +      case QLCNIC_MBX_RSP_OK:
 +      case QLCNIC_MBX_PORT_RSP_OK:
 +              cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS;
 +              break;
 +      default:
 +              if (!qlcnic_83xx_check_mac_rcode(adapter, cmd))
 +                      break;
 +
 +              dev_err(dev, "%s: Mailbox command failed, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x, error=0x%x\n",
 +                      __func__, cmd->cmd_op, cmd->type, ahw->pci_func,
 +                      ahw->op_mode, mbx_err_code);
 +              cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_FAILED;
 +              qlcnic_dump_mbx(adapter, cmd);
 +      }
 +
 +      return;
 +}
 +
 +static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
 +{
 +      struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox,
 +                                                work);
 +      struct qlcnic_adapter *adapter = mbx->adapter;
 +      struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
 +      struct device *dev = &adapter->pdev->dev;
 +      atomic_t *rsp_status = &mbx->rsp_status;
 +      struct list_head *head = &mbx->cmd_q;
 +      struct qlcnic_hardware_context *ahw;
 +      struct qlcnic_cmd_args *cmd = NULL;
 +
 +      ahw = adapter->ahw;
 +
 +      while (true) {
 +              if (qlcnic_83xx_check_mbx_status(adapter))
 +                      return;
 +
 +              atomic_set(rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
 +
 +              spin_lock(&mbx->queue_lock);
 +
 +              if (list_empty(head)) {
 +                      spin_unlock(&mbx->queue_lock);
 +                      return;
 +              }
 +              cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
 +
 +              spin_unlock(&mbx->queue_lock);
 +
 +              mbx_ops->encode_cmd(adapter, cmd);
 +              mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST);
 +
 +              if (wait_for_completion_timeout(&mbx->completion,
 +                                              QLC_83XX_MBX_TIMEOUT)) {
 +                      mbx_ops->decode_resp(adapter, cmd);
 +                      mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_COMPLETION);
 +              } else {
 +                      dev_err(dev, "%s: Mailbox command timeout, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x\n",
 +                              __func__, cmd->cmd_op, cmd->type, ahw->pci_func,
 +                              ahw->op_mode);
 +                      clear_bit(QLC_83XX_MBX_READY, &mbx->status);
 +                      qlcnic_83xx_idc_request_reset(adapter,
 +                                                    QLCNIC_FORCE_FW_DUMP_KEY);
 +                      cmd->rsp_opcode = QLCNIC_RCODE_TIMEOUT;
 +              }
 +              mbx_ops->dequeue_cmd(adapter, cmd);
 +      }
 +}
 +
 +static struct qlcnic_mbx_ops qlcnic_83xx_mbx_ops = {
 +      .enqueue_cmd    = qlcnic_83xx_enqueue_mbx_cmd,
 +      .dequeue_cmd    = qlcnic_83xx_dequeue_mbx_cmd,
 +      .decode_resp    = qlcnic_83xx_decode_mbx_rsp,
 +      .encode_cmd     = qlcnic_83xx_encode_mbx_cmd,
 +      .nofity_fw      = qlcnic_83xx_signal_mbx_cmd,
 +};
 +
 +int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *adapter)
 +{
 +      struct qlcnic_hardware_context *ahw = adapter->ahw;
 +      struct qlcnic_mailbox *mbx;
 +
 +      ahw->mailbox = kzalloc(sizeof(*mbx), GFP_KERNEL);
 +      if (!ahw->mailbox)
 +              return -ENOMEM;
 +
 +      mbx = ahw->mailbox;
 +      mbx->ops = &qlcnic_83xx_mbx_ops;
 +      mbx->adapter = adapter;
 +
 +      spin_lock_init(&mbx->queue_lock);
 +      spin_lock_init(&mbx->aen_lock);
 +      INIT_LIST_HEAD(&mbx->cmd_q);
 +      init_completion(&mbx->completion);
 +
 +      mbx->work_q = create_singlethread_workqueue("qlcnic_mailbox");
 +      if (mbx->work_q == NULL) {
 +              kfree(mbx);
 +              return -ENOMEM;
 +      }
 +
 +      INIT_WORK(&mbx->work, qlcnic_83xx_mailbox_worker);
 +      set_bit(QLC_83XX_MBX_READY, &mbx->status);
 +      return 0;
 +}
  
  #define QLC_83XX_MAX_RESET_SEQ_ENTRIES        16
  
 +#define QLC_83XX_MBX_POST_BC_OP               0x1
 +#define QLC_83XX_MBX_COMPLETION               0x0
 +#define QLC_83XX_MBX_REQUEST          0x1
 +
 +#define QLC_83XX_MBX_TIMEOUT          (5 * HZ)
 +#define QLC_83XX_MBX_CMD_LOOP         5000000
 +
  /* status descriptor mailbox data
   * @phy_addr_{low|high}: physical address of buffer
   * @sds_ring_size: buffer size
@@@ -456,20 -449,6 +456,20 @@@ enum qlcnic_83xx_states 
  #define QLC_83xx_FLASH_MAX_WAIT_USEC          100
  #define QLC_83XX_FLASH_LOCK_TIMEOUT           10000
  
 +enum qlc_83xx_mbx_cmd_type {
 +      QLC_83XX_MBX_CMD_WAIT = 0,
 +      QLC_83XX_MBX_CMD_NO_WAIT,
 +      QLC_83XX_MBX_CMD_BUSY_WAIT,
 +};
 +
 +enum qlc_83xx_mbx_response_states {
 +      QLC_83XX_MBX_RESPONSE_WAIT = 0,
 +      QLC_83XX_MBX_RESPONSE_ARRIVED,
 +};
 +
 +#define QLC_83XX_MBX_RESPONSE_FAILED  0x2
 +#define QLC_83XX_MBX_RESPONSE_UNKNOWN 0x3
 +
  /* Additional registers in 83xx */
  enum qlc_83xx_ext_regs {
        QLCNIC_GLOBAL_RESET = 0,
  
  /* 83xx funcitons */
  int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *);
 -int qlcnic_83xx_mbx_op(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
 +int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
  int qlcnic_83xx_setup_intr(struct qlcnic_adapter *, u8);
  void qlcnic_83xx_get_func_no(struct qlcnic_adapter *);
  int qlcnic_83xx_cam_lock(struct qlcnic_adapter *);
@@@ -529,7 -508,7 +529,7 @@@ void qlcnic_83xx_add_sysfs(struct qlcni
  void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *);
  void qlcnic_83xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
  void qlcnic_83xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
- int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *, ulong);
+ int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *, ulong, int *);
  int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32);
  void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *, int, u64 []);
  int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
@@@ -572,7 -551,7 +572,7 @@@ void qlcnic_set_npar_data(struct qlcnic
  void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *);
  irqreturn_t qlcnic_83xx_handle_aen(int, void *);
  int qlcnic_83xx_get_port_info(struct qlcnic_adapter *);
 -void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *);
 +void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *);
  void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *);
  irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *);
  irqreturn_t qlcnic_83xx_intr(int, void *);
@@@ -644,6 -623,8 +644,6 @@@ int qlcnic_83xx_set_led(struct net_devi
  int qlcnic_83xx_flash_test(struct qlcnic_adapter *);
  int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *);
  int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *);
 -u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *);
 -u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *, u32 *);
  void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *);
  void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *);
  void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *);
@@@ -399,7 -399,6 +399,7 @@@ static void qlcnic_83xx_idc_detach_driv
        struct net_device *netdev = adapter->netdev;
  
        netif_device_detach(netdev);
 +      qlcnic_83xx_detach_mailbox_work(adapter);
  
        /* Disable mailbox interrupt */
        qlcnic_83xx_disable_mbx_intr(adapter);
@@@ -611,9 -610,6 +611,9 @@@ int qlcnic_83xx_idc_reattach_driver(str
  {
        int err;
  
 +      qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
 +      qlcnic_83xx_enable_mbx_interrupt(adapter);
 +
        /* register for NIC IDC AEN Events */
        qlcnic_83xx_register_nic_idc_func(adapter, 1);
  
        if (err)
                return err;
  
 -      qlcnic_83xx_enable_mbx_intrpt(adapter);
 +      qlcnic_83xx_enable_mbx_interrupt(adapter);
  
        if (qlcnic_83xx_configure_opmode(adapter)) {
                qlcnic_83xx_idc_enter_failed_state(adapter, 1);
@@@ -644,6 -640,7 +644,6 @@@ static void qlcnic_83xx_idc_update_idc_
        struct qlcnic_hardware_context *ahw = adapter->ahw;
  
        qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1);
 -      set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
        qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
        set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
  
@@@ -813,10 -810,9 +813,10 @@@ static int qlcnic_83xx_idc_init_state(s
   **/
  static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
  {
 -      u32 val;
        struct qlcnic_hardware_context *ahw = adapter->ahw;
 +      struct qlcnic_mailbox *mbx = ahw->mailbox;
        int ret = 0;
 +      u32 val;
  
        /* Perform NIC configuration based ready state entry actions */
        if (ahw->idc.state_entry(adapter))
                        dev_err(&adapter->pdev->dev,
                                "Error: device temperature %d above limits\n",
                                adapter->ahw->temp);
 -                      clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
 +                      clear_bit(QLC_83XX_MBX_READY, &mbx->status);
                        set_bit(__QLCNIC_RESETTING, &adapter->state);
                        qlcnic_83xx_idc_detach_driver(adapter);
                        qlcnic_83xx_idc_enter_failed_state(adapter, 1);
        if (ret) {
                adapter->flags |= QLCNIC_FW_HANG;
                if (!(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) {
 -                      clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
 +                      clear_bit(QLC_83XX_MBX_READY, &mbx->status);
                        set_bit(__QLCNIC_RESETTING, &adapter->state);
                        qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
                }
        }
  
        if ((val & QLC_83XX_IDC_GRACEFULL_RESET) || ahw->idc.collect_dump) {
 +              clear_bit(QLC_83XX_MBX_READY, &mbx->status);
 +
                /* Move to need reset state and prepare for reset */
                qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
                return ret;
   **/
  static int qlcnic_83xx_idc_need_reset_state(struct qlcnic_adapter *adapter)
  {
 +      struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
        int ret = 0;
  
        if (adapter->ahw->idc.prev_state != QLC_83XX_IDC_DEV_NEED_RESET) {
                qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
                set_bit(__QLCNIC_RESETTING, &adapter->state);
 -              clear_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
 +              clear_bit(QLC_83XX_MBX_READY, &mbx->status);
                if (adapter->ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE)
                        qlcnic_83xx_disable_vnic_mode(adapter, 1);
  
@@@ -1086,6 -1079,7 +1086,6 @@@ static void qlcnic_83xx_setup_idc_param
        adapter->ahw->idc.name = (char **)qlc_83xx_idc_states;
  
        clear_bit(__QLCNIC_RESETTING, &adapter->state);
 -      set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
        set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
  
        /* Check if reset recovery is disabled */
@@@ -1196,9 -1190,6 +1196,9 @@@ void qlcnic_83xx_idc_request_reset(stru
  {
        u32 val;
  
 +      if (qlcnic_sriov_vf_check(adapter))
 +              return;
 +
        if (qlcnic_83xx_lock_driver(adapter)) {
                dev_err(&adapter->pdev->dev,
                        "%s:failed, please retry\n", __func__);
@@@ -1312,8 -1303,11 +1312,11 @@@ static void qlcnic_83xx_dump_pause_cont
  {
        int i, j;
        u32 val = 0, val1 = 0, reg = 0;
+       int err = 0;
  
-       val = QLCRD32(adapter, QLC_83XX_SRE_SHIM_REG);
+       val = QLCRD32(adapter, QLC_83XX_SRE_SHIM_REG, &err);
+       if (err == -EIO)
+               return;
        dev_info(&adapter->pdev->dev, "SRE-Shim Ctrl:0x%x\n", val);
  
        for (j = 0; j < 2; j++) {
                        reg = QLC_83XX_PORT1_THRESHOLD;
                }
                for (i = 0; i < 8; i++) {
-                       val = QLCRD32(adapter, reg + (i * 0x4));
+                       val = QLCRD32(adapter, reg + (i * 0x4), &err);
+                       if (err == -EIO)
+                               return;
                        dev_info(&adapter->pdev->dev, "0x%x  ", val);
                }
                dev_info(&adapter->pdev->dev, "\n");
                        reg = QLC_83XX_PORT1_TC_MC_REG;
                }
                for (i = 0; i < 4; i++) {
-                       val = QLCRD32(adapter, reg + (i * 0x4));
-                        dev_info(&adapter->pdev->dev, "0x%x  ", val);
+                       val = QLCRD32(adapter, reg + (i * 0x4), &err);
+                       if (err == -EIO)
+                               return;
+                       dev_info(&adapter->pdev->dev, "0x%x  ", val);
                }
                dev_info(&adapter->pdev->dev, "\n");
        }
                        reg = QLC_83XX_PORT1_TC_STATS;
                }
                for (i = 7; i >= 0; i--) {
-                       val = QLCRD32(adapter, reg);
+                       val = QLCRD32(adapter, reg, &err);
+                       if (err == -EIO)
+                               return;
                        val &= ~(0x7 << 29);    /* Reset bits 29 to 31 */
                        QLCWR32(adapter, reg, (val | (i << 29)));
-                       val = QLCRD32(adapter, reg);
+                       val = QLCRD32(adapter, reg, &err);
+                       if (err == -EIO)
+                               return;
                        dev_info(&adapter->pdev->dev, "0x%x  ", val);
                }
                dev_info(&adapter->pdev->dev, "\n");
        }
  
-       val = QLCRD32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD);
-       val1 = QLCRD32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD);
+       val = QLCRD32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD, &err);
+       if (err == -EIO)
+               return;
+       val1 = QLCRD32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD, &err);
+       if (err == -EIO)
+               return;
        dev_info(&adapter->pdev->dev,
                 "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n",
                 val, val1);
@@@ -1434,7 -1440,7 +1449,7 @@@ static void qlcnic_83xx_take_eport_out_
  static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev)
  {
        u32 heartbeat, peg_status;
-       int retries, ret = -EIO;
+       int retries, ret = -EIO, err = 0;
  
        retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT;
        p_dev->heartbeat = QLC_SHARED_REG_RD32(p_dev,
                         "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
                         "PEG_NET_4_PC: 0x%x\n", peg_status,
                         QLC_SHARED_REG_RD32(p_dev, QLCNIC_PEG_HALT_STATUS2),
-                        QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_0),
-                        QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_1),
-                        QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_2),
-                        QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_3),
-                        QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_4));
+                        QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_0, &err),
+                        QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_1, &err),
+                        QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_2, &err),
+                        QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_3, &err),
+                        QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_4, &err));
  
                if (QLCNIC_FWERROR_CODE(peg_status) == 0x67)
                        dev_err(&p_dev->pdev->dev,
@@@ -1510,18 -1516,22 +1525,22 @@@ int qlcnic_83xx_check_hw_status(struct 
  static int qlcnic_83xx_poll_reg(struct qlcnic_adapter *p_dev, u32 addr,
                                int duration, u32 mask, u32 status)
  {
+       int timeout_error, err = 0;
        u32 value;
-       int timeout_error;
        u8 retries;
  
-       value = qlcnic_83xx_rd_reg_indirect(p_dev, addr);
+       value = QLCRD32(p_dev, addr, &err);
+       if (err == -EIO)
+               return err;
        retries = duration / 10;
  
        do {
                if ((value & mask) != status) {
                        timeout_error = 1;
                        msleep(duration / 10);
-                       value = qlcnic_83xx_rd_reg_indirect(p_dev, addr);
+                       value = QLCRD32(p_dev, addr, &err);
+                       if (err == -EIO)
+                               return err;
                } else {
                        timeout_error = 0;
                        break;
@@@ -1615,9 -1625,12 +1634,12 @@@ int qlcnic_83xx_get_reset_instruction_t
  static void qlcnic_83xx_read_write_crb_reg(struct qlcnic_adapter *p_dev,
                                           u32 raddr, u32 waddr)
  {
-       int value;
+       int err = 0;
+       u32 value;
  
-       value = qlcnic_83xx_rd_reg_indirect(p_dev, raddr);
+       value = QLCRD32(p_dev, raddr, &err);
+       if (err == -EIO)
+               return;
        qlcnic_83xx_wrt_reg_indirect(p_dev, waddr, value);
  }
  
@@@ -1626,12 -1639,16 +1648,16 @@@ static void qlcnic_83xx_rmw_crb_reg(str
                                    u32 raddr, u32 waddr,
                                    struct qlc_83xx_rmw *p_rmw_hdr)
  {
-       int value;
+       int err = 0;
+       u32 value;
  
-       if (p_rmw_hdr->index_a)
+       if (p_rmw_hdr->index_a) {
                value = p_dev->ahw->reset.array[p_rmw_hdr->index_a];
-       else
-               value = qlcnic_83xx_rd_reg_indirect(p_dev, raddr);
+       } else {
+               value = QLCRD32(p_dev, raddr, &err);
+               if (err == -EIO)
+                       return;
+       }
  
        value &= p_rmw_hdr->mask;
        value <<= p_rmw_hdr->shl;
@@@ -1684,7 -1701,7 +1710,7 @@@ static void qlcnic_83xx_poll_list(struc
        long delay;
        struct qlc_83xx_entry *entry;
        struct qlc_83xx_poll *poll;
-       int i;
+       int i, err = 0;
        unsigned long arg1, arg2;
  
        poll = (struct qlc_83xx_poll *)((char *)p_hdr +
                                                         arg1, delay,
                                                         poll->mask,
                                                         poll->status)){
-                                       qlcnic_83xx_rd_reg_indirect(p_dev,
-                                                                   arg1);
-                                       qlcnic_83xx_rd_reg_indirect(p_dev,
-                                                                   arg2);
+                                       QLCRD32(p_dev, arg1, &err);
+                                       if (err == -EIO)
+                                               return;
+                                       QLCRD32(p_dev, arg2, &err);
+                                       if (err == -EIO)
+                                               return;
                                }
                        }
                }
@@@ -1777,7 -1796,7 +1805,7 @@@ static void qlcnic_83xx_poll_read_list(
                                       struct qlc_83xx_entry_hdr *p_hdr)
  {
        long delay;
-       int index, i, j;
+       int index, i, j, err;
        struct qlc_83xx_quad_entry *entry;
        struct qlc_83xx_poll *poll;
        unsigned long addr;
                                                  poll->mask, poll->status)){
                                index = p_dev->ahw->reset.array_index;
                                addr = entry->dr_addr;
-                               j = qlcnic_83xx_rd_reg_indirect(p_dev, addr);
+                               j = QLCRD32(p_dev, addr, &err);
+                               if (err == -EIO)
+                                       return;
                                p_dev->ahw->reset.array[index++] = j;
  
                                if (index == QLC_83XX_MAX_RESET_SEQ_ENTRIES)
@@@ -2119,72 -2141,40 +2150,72 @@@ static void qlcnic_83xx_clear_function_
  int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
  {
        struct qlcnic_hardware_context *ahw = adapter->ahw;
 +      int err = 0;
  
 -      if (qlcnic_sriov_vf_check(adapter))
 -              return qlcnic_sriov_vf_init(adapter, pci_using_dac);
 +      ahw->msix_supported = !!qlcnic_use_msi_x;
 +      err = qlcnic_83xx_init_mailbox_work(adapter);
 +      if (err)
 +              goto exit;
  
 -      if (qlcnic_83xx_check_hw_status(adapter))
 -              return -EIO;
 +      if (qlcnic_sriov_vf_check(adapter)) {
 +              err = qlcnic_sriov_vf_init(adapter, pci_using_dac);
 +              if (err)
 +                      goto detach_mbx;
 +              else
 +                      return err;
 +      }
  
 -      /* Initilaize 83xx mailbox spinlock */
 -      spin_lock_init(&ahw->mbx_lock);
 +      err = qlcnic_83xx_check_hw_status(adapter);
 +      if (err)
 +              goto detach_mbx;
 +
 +      err = qlcnic_setup_intr(adapter, 0);
 +      if (err) {
 +              dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
 +              goto disable_intr;
 +      }
 +
 +      err = qlcnic_83xx_setup_mbx_intr(adapter);
 +      if (err)
 +              goto disable_mbx_intr;
  
 -      set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
        qlcnic_83xx_clear_function_resources(adapter);
  
+       INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
        /* register for NIC IDC AEN Events */
        qlcnic_83xx_register_nic_idc_func(adapter, 1);
  
        if (!qlcnic_83xx_read_flash_descriptor_table(adapter))
                qlcnic_83xx_read_flash_mfg_id(adapter);
  
 -      if (qlcnic_83xx_idc_init(adapter))
 -              return -EIO;
 +      err = qlcnic_83xx_idc_init(adapter);
 +      if (err)
 +              goto disable_mbx_intr;
  
        /* Configure default, SR-IOV or Virtual NIC mode of operation */
 -      if (qlcnic_83xx_configure_opmode(adapter))
 -              return -EIO;
 +      err = qlcnic_83xx_configure_opmode(adapter);
 +      if (err)
 +              goto disable_mbx_intr;
  
        /* Perform operating mode specific initialization */
 -      if (adapter->nic_ops->init_driver(adapter))
 -              return -EIO;
 +      err = adapter->nic_ops->init_driver(adapter);
 +      if (err)
 +              goto disable_mbx_intr;
  
-       INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
        /* Periodically monitor device status */
        qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work);
 +      return 0;
  
 -      return adapter->ahw->idc.err_code;
 +disable_mbx_intr:
 +      qlcnic_83xx_free_mbx_intr(adapter);
 +
 +disable_intr:
 +      qlcnic_teardown_intr(adapter);
 +
 +detach_mbx:
 +      qlcnic_83xx_detach_mailbox_work(adapter);
 +      qlcnic_83xx_free_mailbox(ahw->mailbox);
 +exit:
 +      return err;
  }
@@@ -150,6 -150,7 +150,7 @@@ static const char qlcnic_gstrings_test[
        "Link_Test_on_offline",
        "Interrupt_Test_offline",
        "Internal_Loopback_offline",
+       "External_Loopback_offline",
        "EEPROM_Test_offline"
  };
  
@@@ -266,7 -267,7 +267,7 @@@ int qlcnic_82xx_get_settings(struct qlc
  {
        struct qlcnic_hardware_context *ahw = adapter->ahw;
        u32 speed, reg;
-       int check_sfp_module = 0;
+       int check_sfp_module = 0, err = 0;
        u16 pcifn = ahw->pci_func;
  
        /* read which mode */
  
        } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
                u32 val = 0;
-               val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
+               val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR, &err);
  
                if (val == QLCNIC_PORT_MODE_802_3_AP) {
                        ecmd->supported = SUPPORTED_1000baseT_Full;
                }
  
                if (netif_running(adapter->netdev) && ahw->has_link_events) {
-                       reg = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn));
-                       speed = P3P_LINK_SPEED_VAL(pcifn, reg);
-                       ahw->link_speed = speed * P3P_LINK_SPEED_MHZ;
+                       if (ahw->linkup) {
+                               reg = QLCRD32(adapter,
+                                             P3P_LINK_SPEED_REG(pcifn), &err);
+                               speed = P3P_LINK_SPEED_VAL(pcifn, reg);
+                               ahw->link_speed = speed * P3P_LINK_SPEED_MHZ;
+                       }
                        ethtool_cmd_speed_set(ecmd, ahw->link_speed);
                        ecmd->autoneg = ahw->link_autoneg;
                        ecmd->duplex = ahw->link_duplex;
@@@ -463,13 -468,14 +468,14 @@@ static int qlcnic_set_settings(struct n
  static int qlcnic_82xx_get_registers(struct qlcnic_adapter *adapter,
                                     u32 *regs_buff)
  {
-       int i, j = 0;
+       int i, j = 0, err = 0;
  
        for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++)
                regs_buff[i] = QLC_SHARED_REG_RD32(adapter, diag_registers[j]);
        j = 0;
        while (ext_diag_registers[j] != -1)
-               regs_buff[i++] = QLCRD32(adapter, ext_diag_registers[j++]);
+               regs_buff[i++] = QLCRD32(adapter, ext_diag_registers[j++],
+                                        &err);
        return i;
  }
  
@@@ -519,13 -525,16 +525,16 @@@ qlcnic_get_regs(struct net_device *dev
  static u32 qlcnic_test_link(struct net_device *dev)
  {
        struct qlcnic_adapter *adapter = netdev_priv(dev);
+       int err = 0;
        u32 val;
  
        if (qlcnic_83xx_check(adapter)) {
                val = qlcnic_83xx_test_link(adapter);
                return (val & 1) ? 0 : 1;
        }
-       val = QLCRD32(adapter, CRB_XG_STATE_P3P);
+       val = QLCRD32(adapter, CRB_XG_STATE_P3P, &err);
+       if (err == -EIO)
+               return err;
        val = XG_LINK_STATE_P3P(adapter->ahw->pci_func, val);
        return (val == XG_LINK_UP_P3P) ? 0 : 1;
  }
@@@ -658,6 -667,7 +667,7 @@@ qlcnic_get_pauseparam(struct net_devic
  {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        int port = adapter->ahw->physical_port;
+       int err = 0;
        __u32 val;
  
        if (qlcnic_83xx_check(adapter)) {
                if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
                        return;
                /* get flow control settings */
-               val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
+               val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), &err);
+               if (err == -EIO)
+                       return;
                pause->rx_pause = qlcnic_gb_get_rx_flowctl(val);
-               val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
+               val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, &err);
+               if (err == -EIO)
+                       return;
                switch (port) {
                case 0:
                        pause->tx_pause = !(qlcnic_gb_get_gb0_mask(val));
                if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
                        return;
                pause->rx_pause = 1;
-               val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
+               val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, &err);
+               if (err == -EIO)
+                       return;
                if (port == 0)
                        pause->tx_pause = !(qlcnic_xg_get_xg0_mask(val));
                else
@@@ -707,6 -723,7 +723,7 @@@ qlcnic_set_pauseparam(struct net_devic
  {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        int port = adapter->ahw->physical_port;
+       int err = 0;
        __u32 val;
  
        if (qlcnic_83xx_check(adapter))
                if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
                        return -EIO;
                /* set flow control */
-               val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
+               val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), &err);
+               if (err == -EIO)
+                       return err;
  
                if (pause->rx_pause)
                        qlcnic_gb_rx_flowctl(val);
                                val);
                QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), val);
                /* set autoneg */
-               val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
+               val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, &err);
+               if (err == -EIO)
+                       return err;
                switch (port) {
                case 0:
                        if (pause->tx_pause)
                if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
                        return -EIO;
  
-               val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
+               val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, &err);
+               if (err == -EIO)
+                       return err;
                if (port == 0) {
                        if (pause->tx_pause)
                                qlcnic_xg_unset_xg0_mask(val);
@@@ -788,11 -811,14 +811,14 @@@ static int qlcnic_reg_test(struct net_d
  {
        struct qlcnic_adapter *adapter = netdev_priv(dev);
        u32 data_read;
+       int err = 0;
  
        if (qlcnic_83xx_check(adapter))
                return qlcnic_83xx_reg_test(adapter);
  
-       data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0));
+       data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0), &err);
+       if (err == -EIO)
+               return err;
        if ((data_read & 0xffff) != adapter->pdev->vendor)
                return 1;
  
@@@ -980,9 -1006,9 +1006,9 @@@ int qlcnic_loopback_test(struct net_dev
                msleep(500);
                qlcnic_process_rcv_ring_diag(sds_ring);
                if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
 -                      netdev_info(netdev, "firmware didnt respond to loopback"
 -                              " configure request\n");
 -                      ret = -QLCNIC_FW_NOT_RESPOND;
 +                      netdev_info(netdev,
 +                                  "Firmware didn't sent link up event to loopback request\n");
 +                      ret = -ETIMEDOUT;
                        goto free_res;
                } else if (adapter->ahw->diag_cnt) {
                        ret = adapter->ahw->diag_cnt;
@@@ -1026,8 -1052,15 +1052,15 @@@ qlcnic_diag_test(struct net_device *dev
                if (data[3])
                        eth_test->flags |= ETH_TEST_FL_FAILED;
  
-               data[4] = qlcnic_eeprom_test(dev);
-               if (data[4])
+               if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) {
+                       data[4] = qlcnic_loopback_test(dev, QLCNIC_ELB_MODE);
+                       if (data[4])
+                               eth_test->flags |= ETH_TEST_FL_FAILED;
+                       eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
+               }
+               data[5] = qlcnic_eeprom_test(dev);
+               if (data[5])
                        eth_test->flags |= ETH_TEST_FL_FAILED;
        }
  }
@@@ -1257,17 -1290,20 +1290,20 @@@ qlcnic_get_wol(struct net_device *dev, 
  {
        struct qlcnic_adapter *adapter = netdev_priv(dev);
        u32 wol_cfg;
+       int err = 0;
  
        if (qlcnic_83xx_check(adapter))
                return;
        wol->supported = 0;
        wol->wolopts = 0;
  
-       wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
+       wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err);
+       if (err == -EIO)
+               return;
        if (wol_cfg & (1UL << adapter->portnum))
                wol->supported |= WAKE_MAGIC;
  
-       wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
+       wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err);
        if (wol_cfg & (1UL << adapter->portnum))
                wol->wolopts |= WAKE_MAGIC;
  }
@@@ -1277,17 -1313,22 +1313,22 @@@ qlcnic_set_wol(struct net_device *dev, 
  {
        struct qlcnic_adapter *adapter = netdev_priv(dev);
        u32 wol_cfg;
+       int err = 0;
  
        if (qlcnic_83xx_check(adapter))
                return -EOPNOTSUPP;
        if (wol->wolopts & ~WAKE_MAGIC)
                return -EINVAL;
  
-       wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
+       wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err);
+       if (err == -EIO)
+               return err;
        if (!(wol_cfg & (1 << adapter->portnum)))
                return -EOPNOTSUPP;
  
-       wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
+       wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err);
+       if (err == -EIO)
+               return err;
        if (wol->wolopts & WAKE_MAGIC)
                wol_cfg |= 1UL << adapter->portnum;
        else
@@@ -1540,7 -1581,7 +1581,7 @@@ qlcnic_set_dump(struct net_device *netd
                return 0;
        case QLCNIC_SET_QUIESCENT:
        case QLCNIC_RESET_QUIESCENT:
-               state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+               state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
                if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
                        netdev_info(netdev, "Device in FAILED state\n");
                return 0;
@@@ -161,36 -161,68 +161,68 @@@ static inline int qlcnic_82xx_is_lb_pkt
        return (qlcnic_get_sts_status(sts_data) == STATUS_CKSUM_LOOP) ? 1 : 0;
  }
  
+ static void qlcnic_delete_rx_list_mac(struct qlcnic_adapter *adapter,
+                                     struct qlcnic_filter *fil,
+                                     void *addr, u16 vlan_id)
+ {
+       int ret;
+       u8 op;
+       op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
+       ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op);
+       if (ret)
+               return;
+       op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
+       ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op);
+       if (!ret) {
+               hlist_del(&fil->fnode);
+               adapter->rx_fhash.fnum--;
+       }
+ }
+ static struct qlcnic_filter *qlcnic_find_mac_filter(struct hlist_head *head,
+                                                   void *addr, u16 vlan_id)
+ {
+       struct qlcnic_filter *tmp_fil = NULL;
+       struct hlist_node *n;
+       hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
+               if (!memcmp(tmp_fil->faddr, addr, ETH_ALEN) &&
+                   tmp_fil->vlan_id == vlan_id)
+                       return tmp_fil;
+       }
+       return NULL;
+ }
  void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
                          int loopback_pkt, u16 vlan_id)
  {
        struct ethhdr *phdr = (struct ethhdr *)(skb->data);
        struct qlcnic_filter *fil, *tmp_fil;
-       struct hlist_node *n;
        struct hlist_head *head;
        unsigned long time;
        u64 src_addr = 0;
-       u8 hindex, found = 0, op;
+       u8 hindex, op;
        int ret;
  
        memcpy(&src_addr, phdr->h_source, ETH_ALEN);
+       hindex = qlcnic_mac_hash(src_addr) &
+                (adapter->fhash.fbucket_size - 1);
  
        if (loopback_pkt) {
                if (adapter->rx_fhash.fnum >= adapter->rx_fhash.fmax)
                        return;
  
-               hindex = qlcnic_mac_hash(src_addr) &
-                        (adapter->fhash.fbucket_size - 1);
                head = &(adapter->rx_fhash.fhead[hindex]);
  
-               hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
-                       if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
-                           tmp_fil->vlan_id == vlan_id) {
-                               time = tmp_fil->ftime;
-                               if (jiffies > (QLCNIC_READD_AGE * HZ + time))
-                                       tmp_fil->ftime = jiffies;
-                               return;
-                       }
+               tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
+               if (tmp_fil) {
+                       time = tmp_fil->ftime;
+                       if (time_after(jiffies, QLCNIC_READD_AGE * HZ + time))
+                               tmp_fil->ftime = jiffies;
+                       return;
                }
  
                fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
                adapter->rx_fhash.fnum++;
                spin_unlock(&adapter->rx_mac_learn_lock);
        } else {
-               hindex = qlcnic_mac_hash(src_addr) &
-                        (adapter->fhash.fbucket_size - 1);
-               head = &(adapter->rx_fhash.fhead[hindex]);
-               spin_lock(&adapter->rx_mac_learn_lock);
-               hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
-                       if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
-                           tmp_fil->vlan_id == vlan_id) {
-                               found = 1;
-                               break;
-                       }
-               }
+               head = &adapter->fhash.fhead[hindex];
  
-               if (!found) {
-                       spin_unlock(&adapter->rx_mac_learn_lock);
-                       return;
-               }
+               spin_lock(&adapter->mac_learn_lock);
  
-               op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
-               ret = qlcnic_sre_macaddr_change(adapter, (u8 *)&src_addr,
-                                               vlan_id, op);
-               if (!ret) {
+               tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
+               if (tmp_fil) {
                        op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
                        ret = qlcnic_sre_macaddr_change(adapter,
                                                        (u8 *)&src_addr,
                                                        vlan_id, op);
                        if (!ret) {
-                               hlist_del(&(tmp_fil->fnode));
-                               adapter->rx_fhash.fnum--;
+                               hlist_del(&tmp_fil->fnode);
+                               adapter->fhash.fnum--;
                        }
+                       spin_unlock(&adapter->mac_learn_lock);
+                       return;
                }
+               spin_unlock(&adapter->mac_learn_lock);
+               head = &adapter->rx_fhash.fhead[hindex];
+               spin_lock(&adapter->rx_mac_learn_lock);
+               tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
+               if (tmp_fil)
+                       qlcnic_delete_rx_list_mac(adapter, tmp_fil, &src_addr,
+                                                 vlan_id);
                spin_unlock(&adapter->rx_mac_learn_lock);
        }
  }
@@@ -262,7 -295,7 +295,7 @@@ void qlcnic_82xx_change_filter(struct q
  
        mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
        mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
-       memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
+       memcpy(mac_req->mac_addr, uaddr, ETH_ALEN);
  
        vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
        vlan_req->vlan_id = cpu_to_le16(vlan_id);
@@@ -919,17 -952,17 +952,17 @@@ static void qlcnic_handle_fw_message(in
                        break;
                case 1:
                        dev_info(dev, "loopback already in progress\n");
 -                      adapter->ahw->diag_cnt = -QLCNIC_TEST_IN_PROGRESS;
 +                      adapter->ahw->diag_cnt = -EINPROGRESS;
                        break;
                case 2:
                        dev_info(dev, "loopback cable is not connected\n");
 -                      adapter->ahw->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN;
 +                      adapter->ahw->diag_cnt = -ENODEV;
                        break;
                default:
                        dev_info(dev,
                                 "loopback configure request failed, err %x\n",
                                 ret);
 -                      adapter->ahw->diag_cnt = -QLCNIC_UNDEFINED_ERROR;
 +                      adapter->ahw->diag_cnt = -EIO;
                        break;
                }
                break;
@@@ -977,8 -977,8 +977,8 @@@ qlcnic_check_options(struct qlcnic_adap
  static int
  qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
  {
-       int err;
        struct qlcnic_info nic_info;
+       int err = 0;
  
        memset(&nic_info, 0, sizeof(struct qlcnic_info));
        err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func);
  
        if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
                u32 temp;
-               temp = QLCRD32(adapter, CRB_FW_CAPABILITIES_2);
+               temp = QLCRD32(adapter, CRB_FW_CAPABILITIES_2, &err);
+               if (err == -EIO)
+                       return err;
                adapter->ahw->extra_capability[0] = temp;
        }
        adapter->ahw->max_mac_filters = nic_info.max_mac_filters;
@@@ -1383,6 -1385,8 +1385,8 @@@ qlcnic_request_irq(struct qlcnic_adapte
        if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
                if (qlcnic_82xx_check(adapter))
                        handler = qlcnic_tmp_intr;
+               else
+                       handler = qlcnic_83xx_tmp_intr;
                if (!QLCNIC_IS_MSI_FAMILY(adapter))
                        flags |= IRQF_SHARED;
  
@@@ -1531,12 -1535,12 +1535,12 @@@ int __qlcnic_up(struct qlcnic_adapter *
        if (netdev->features & NETIF_F_LRO)
                qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
  
+       set_bit(__QLCNIC_DEV_UP, &adapter->state);
        qlcnic_napi_enable(adapter);
  
        qlcnic_linkevent_request(adapter, 1);
  
        adapter->ahw->reset_context = 0;
-       set_bit(__QLCNIC_DEV_UP, &adapter->state);
        return 0;
  }
  
@@@ -2139,14 -2143,18 +2143,14 @@@ qlcnic_probe(struct pci_dev *pdev, cons
        if (qlcnic_83xx_check(adapter) && !qlcnic_use_msi_x &&
            !!qlcnic_use_msi)
                dev_warn(&pdev->dev,
-                        "83xx adapter do not support MSI interrupts\n");
+                        "Device does not support MSI interrupts\n");
  
 -      err = qlcnic_setup_intr(adapter, 0);
 -      if (err) {
 -              dev_err(&pdev->dev, "Failed to setup interrupt\n");
 -              goto err_out_disable_msi;
 -      }
 -
 -      if (qlcnic_83xx_check(adapter)) {
 -              err = qlcnic_83xx_setup_mbx_intr(adapter);
 -              if (err)
 +      if (qlcnic_82xx_check(adapter)) {
 +              err = qlcnic_setup_intr(adapter, 0);
 +              if (err) {
 +                      dev_err(&pdev->dev, "Failed to setup interrupt\n");
                        goto err_out_disable_msi;
 +              }
        }
  
        err = qlcnic_get_act_pci_func(adapter);
@@@ -2233,11 -2241,9 +2237,11 @@@ static void qlcnic_remove(struct pci_de
        qlcnic_sriov_cleanup(adapter);
  
        if (qlcnic_83xx_check(adapter)) {
 -              qlcnic_83xx_free_mbx_intr(adapter);
                qlcnic_83xx_register_nic_idc_func(adapter, 0);
                cancel_delayed_work_sync(&adapter->idc_aen_work);
 +              qlcnic_83xx_free_mbx_intr(adapter);
 +              qlcnic_83xx_detach_mailbox_work(adapter);
 +              qlcnic_83xx_free_mailbox(ahw->mailbox);
        }
  
        qlcnic_detach(adapter);
@@@ -3091,6 -3097,7 +3095,7 @@@ qlcnic_check_health(struct qlcnic_adapt
  {
        u32 state = 0, heartbeat;
        u32 peg_status;
+       int err = 0;
  
        if (qlcnic_check_temp(adapter))
                goto detach;
                        "PEG_NET_4_PC: 0x%x\n",
                        peg_status,
                        QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS2),
-                       QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c),
-                       QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c),
-                       QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c),
-                       QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c),
-                       QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c));
+                       QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c, &err),
+                       QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c, &err),
+                       QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c, &err),
+                       QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c, &err),
+                       QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c, &err));
        if (QLCNIC_FWERROR_CODE(peg_status) == 0x67)
                dev_err(&adapter->pdev->dev,
                        "Firmware aborted with error code 0x00006700. "
@@@ -33,7 -33,7 +33,7 @@@ static int qlcnic_sriov_alloc_bc_mbx_ar
  static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *);
  static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *);
  static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *);
 -static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *,
 +static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *,
                                  struct qlcnic_cmd_args *);
  static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
  
@@@ -45,7 -45,7 +45,7 @@@ static struct qlcnic_hardware_ops qlcni
        .get_mac_address                = qlcnic_83xx_get_mac_address,
        .setup_intr                     = qlcnic_83xx_setup_intr,
        .alloc_mbx_args                 = qlcnic_83xx_alloc_mbx_args,
 -      .mbx_cmd                        = qlcnic_sriov_vf_mbx_op,
 +      .mbx_cmd                        = qlcnic_sriov_issue_cmd,
        .get_func_no                    = qlcnic_83xx_get_func_no,
        .api_lock                       = qlcnic_83xx_cam_lock,
        .api_unlock                     = qlcnic_83xx_cam_unlock,
@@@ -286,38 -286,96 +286,38 @@@ void qlcnic_sriov_cleanup(struct qlcnic
  static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
                                    u32 *pay, u8 pci_func, u8 size)
  {
 -      u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val, wait_time = 0;
        struct qlcnic_hardware_context *ahw = adapter->ahw;
 -      unsigned long flags;
 -      u16 opcode;
 -      u8 mbx_err_code;
 -      int i, j;
 -
 -      opcode = ((struct qlcnic_bc_hdr *)hdr)->cmd_op;
 -
 -      if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
 -              dev_info(&adapter->pdev->dev,
 -                       "Mailbox cmd attempted, 0x%x\n", opcode);
 -              dev_info(&adapter->pdev->dev, "Mailbox detached\n");
 -              return 0;
 -      }
 -
 -      spin_lock_irqsave(&ahw->mbx_lock, flags);
 -
 -      mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
 -      if (mbx_val) {
 -              QLCDB(adapter, DRV, "Mailbox cmd attempted, 0x%x\n", opcode);
 -              spin_unlock_irqrestore(&ahw->mbx_lock, flags);
 -              return QLCNIC_RCODE_TIMEOUT;
 -      }
 -      /* Fill in mailbox registers */
 -      val = size + (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
 -      mbx_cmd = 0x31 | (val << 16) | (adapter->ahw->fw_hal_version << 29);
 -
 -      writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
 -      mbx_cmd = 0x1 | (1 << 4);
 +      struct qlcnic_mailbox *mbx = ahw->mailbox;
 +      struct qlcnic_cmd_args cmd;
 +      unsigned long timeout;
 +      int err;
  
 -      if (qlcnic_sriov_pf_check(adapter))
 -              mbx_cmd |= (pci_func << 5);
 +      memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
 +      cmd.hdr = hdr;
 +      cmd.pay = pay;
 +      cmd.pay_size = size;
 +      cmd.func_num = pci_func;
 +      cmd.op_type = QLC_83XX_MBX_POST_BC_OP;
 +      cmd.cmd_op = ((struct qlcnic_bc_hdr *)hdr)->cmd_op;
  
 -      writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 1));
 -      for (i = 2, j = 0; j < (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
 -                      i++, j++) {
 -              writel(*(hdr++), QLCNIC_MBX_HOST(ahw, i));
 +      err = mbx->ops->enqueue_cmd(adapter, &cmd, &timeout);
 +      if (err) {
 +              dev_err(&adapter->pdev->dev,
 +                      "%s: Mailbox not available, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
 +                      __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
 +                      ahw->op_mode);
 +              return err;
        }
 -      for (j = 0; j < size; j++, i++)
 -              writel(*(pay++), QLCNIC_MBX_HOST(ahw, i));
 -
 -      /* Signal FW about the impending command */
 -      QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
  
 -      /* Waiting for the mailbox cmd to complete and while waiting here
 -       * some AEN might arrive. If more than 5 seconds expire we can
 -       * assume something is wrong.
 -       */
 -poll:
 -      rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time);
 -      if (rsp != QLCNIC_RCODE_TIMEOUT) {
 -              /* Get the FW response data */
 -              fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
 -              if (fw_data &  QLCNIC_MBX_ASYNC_EVENT) {
 -                      __qlcnic_83xx_process_aen(adapter);
 -                      goto poll;
 -              }
 -              mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
 -              rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
 -              opcode = QLCNIC_MBX_RSP(fw_data);
 -
 -              switch (mbx_err_code) {
 -              case QLCNIC_MBX_RSP_OK:
 -              case QLCNIC_MBX_PORT_RSP_OK:
 -                      rsp = QLCNIC_RCODE_SUCCESS;
 -                      break;
 -              default:
 -                      if (opcode == QLCNIC_CMD_CONFIG_MAC_VLAN) {
 -                              rsp = qlcnic_83xx_mac_rcode(adapter);
 -                              if (!rsp)
 -                                      goto out;
 -                      }
 -                      dev_err(&adapter->pdev->dev,
 -                              "MBX command 0x%x failed with err:0x%x\n",
 -                              opcode, mbx_err_code);
 -                      rsp = mbx_err_code;
 -                      break;
 -              }
 -              goto out;
 +      if (!wait_for_completion_timeout(&cmd.completion, timeout)) {
 +              dev_err(&adapter->pdev->dev,
 +                      "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
 +                      __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
 +                      ahw->op_mode);
 +              flush_workqueue(mbx->work_q);
        }
  
 -      dev_err(&adapter->pdev->dev, "MBX command 0x%x timed out\n",
 -              QLCNIC_MBX_RSP(mbx_cmd));
 -      rsp = QLCNIC_RCODE_TIMEOUT;
 -out:
 -      /* clear fw mbx control register */
 -      QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
 -      spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
 -      return rsp;
 +      return cmd.rsp_opcode;
  }
  
  static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter)
@@@ -464,8 -522,8 +464,8 @@@ static int qlcnic_sriov_get_vf_acl(stru
  
  static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
  {
 -      struct qlcnic_info nic_info;
        struct qlcnic_hardware_context *ahw = adapter->ahw;
 +      struct qlcnic_info nic_info;
        int err;
  
        err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0);
@@@ -504,7 -562,7 +504,7 @@@ static int qlcnic_sriov_setup_vf(struc
        INIT_LIST_HEAD(&adapter->vf_mc_list);
        if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
                dev_warn(&adapter->pdev->dev,
-                        "83xx adapter do not support MSI interrupts\n");
+                        "Device does not support MSI interrupts\n");
  
        err = qlcnic_setup_intr(adapter, 1);
        if (err) {
@@@ -579,6 -637,8 +579,6 @@@ int qlcnic_sriov_vf_init(struct qlcnic_
        struct qlcnic_hardware_context *ahw = adapter->ahw;
        int err;
  
 -      spin_lock_init(&ahw->mbx_lock);
 -      set_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
        set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status);
        ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
        ahw->reset_context = 0;
@@@ -702,6 -762,7 +702,7 @@@ static int qlcnic_sriov_alloc_bc_mbx_ar
                        memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
                        mbx->req.arg[0] = (type | (mbx->req.num << 16) |
                                           (3 << 29));
+                       mbx->rsp.arg[0] = (type & 0xffff) | mbx->rsp.num << 16;
                        return 0;
                }
        }
@@@ -753,6 -814,7 +754,7 @@@ static int qlcnic_sriov_prepare_bc_hdr(
                cmd->req.num = trans->req_pay_size / 4;
                cmd->rsp.num = trans->rsp_pay_size / 4;
                hdr = trans->rsp_hdr;
+               cmd->op_type = trans->req_hdr->op_type;
        }
  
        trans->trans_id = seq;
@@@ -1023,7 -1085,6 +1025,7 @@@ static void qlcnic_sriov_process_bc_cmd
        if (test_bit(QLC_BC_VF_FLR, &vf->state))
                return;
  
 +      memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
        trans = list_first_entry(&vf->rcv_act.wait_list,
                                 struct qlcnic_bc_trans, list);
        adapter = vf->adapter;
@@@ -1173,7 -1234,6 +1175,7 @@@ static void qlcnic_sriov_handle_bc_cmd(
                return;
        }
  
 +      memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
        cmd_op = hdr->cmd_op;
        if (qlcnic_sriov_alloc_bc_trans(&trans))
                return;
@@@ -1299,7 -1359,7 +1301,7 @@@ int qlcnic_sriov_cfg_bc_intr(struct qlc
        if (enable)
                cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
  
 -      err = qlcnic_83xx_mbx_op(adapter, &cmd);
 +      err = qlcnic_83xx_issue_cmd(adapter, &cmd);
  
        if (err != QLCNIC_RCODE_SUCCESS) {
                dev_err(&adapter->pdev->dev,
@@@ -1331,11 -1391,10 +1333,11 @@@ static int qlcnic_sriov_retry_bc_cmd(st
        return -EIO;
  }
  
 -static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *adapter,
 +static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
                                  struct qlcnic_cmd_args *cmd)
  {
        struct qlcnic_hardware_context *ahw = adapter->ahw;
 +      struct qlcnic_mailbox *mbx = ahw->mailbox;
        struct device *dev = &adapter->pdev->dev;
        struct qlcnic_bc_trans *trans;
        int err;
                goto cleanup_transaction;
  
  retry:
 -      if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
 +      if (!test_bit(QLC_83XX_MBX_READY, &mbx->status)) {
                rsp = -EIO;
                QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n",
                      QLCNIC_MBX_RSP(cmd->req.arg[0]), func);
@@@ -1395,7 -1454,7 +1397,7 @@@ err_out
        if (rsp == QLCNIC_RCODE_TIMEOUT) {
                ahw->reset_context = 1;
                adapter->need_fw_reset = 1;
 -              clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
 +              clear_bit(QLC_83XX_MBX_READY, &mbx->status);
        }
  
  cleanup_transaction:
@@@ -1555,7 -1614,7 +1557,7 @@@ static int qlcnic_sriov_vf_reinit_drive
        int err;
  
        set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
 -      qlcnic_83xx_enable_mbx_intrpt(adapter);
 +      qlcnic_83xx_enable_mbx_interrupt(adapter);
  
        err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
        if (err)
@@@ -1598,10 -1657,8 +1600,10 @@@ static void qlcnic_sriov_vf_detach(stru
        struct net_device *netdev = adapter->netdev;
        u8 i, max_ints = ahw->num_msix - 1;
  
 -      qlcnic_83xx_disable_mbx_intr(adapter);
        netif_device_detach(netdev);
 +      qlcnic_83xx_detach_mailbox_work(adapter);
 +      qlcnic_83xx_disable_mbx_intr(adapter);
 +
        if (netif_running(netdev))
                qlcnic_down(adapter, netdev);
  
@@@ -1645,7 -1702,6 +1647,7 @@@ static int qlcnic_sriov_vf_handle_dev_r
  static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
  {
        struct qlcnic_hardware_context *ahw = adapter->ahw;
 +      struct qlcnic_mailbox *mbx = ahw->mailbox;
        struct device *dev = &adapter->pdev->dev;
        struct qlc_83xx_idc *idc = &ahw->idc;
        u8 func = ahw->pci_func;
        /* Skip the context reset and check if FW is hung */
        if (adapter->reset_ctx_cnt < 3) {
                adapter->need_fw_reset = 1;
 -              clear_bit(QLC_83XX_MBX_READY, &idc->status);
 +              clear_bit(QLC_83XX_MBX_READY, &mbx->status);
                dev_info(dev,
                         "Resetting context, wait here to check if FW is in failed state\n");
                return 0;
                 __func__, adapter->reset_ctx_cnt, func);
        set_bit(__QLCNIC_RESETTING, &adapter->state);
        adapter->need_fw_reset = 1;
 -      clear_bit(QLC_83XX_MBX_READY, &idc->status);
 +      clear_bit(QLC_83XX_MBX_READY, &mbx->status);
        qlcnic_sriov_vf_detach(adapter);
        adapter->need_fw_reset = 0;
  
@@@ -1731,7 -1787,6 +1733,7 @@@ static int qlcnic_sriov_vf_idc_failed_s
  static int
  qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
  {
 +      struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
        struct qlc_83xx_idc *idc = &adapter->ahw->idc;
  
        dev_info(&adapter->pdev->dev, "Device is in quiescent state\n");
                set_bit(__QLCNIC_RESETTING, &adapter->state);
                adapter->tx_timeo_cnt = 0;
                adapter->reset_ctx_cnt = 0;
 -              clear_bit(QLC_83XX_MBX_READY, &idc->status);
 +              clear_bit(QLC_83XX_MBX_READY, &mbx->status);
                qlcnic_sriov_vf_detach(adapter);
        }
  
  
  static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter)
  {
 +      struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
        struct qlc_83xx_idc *idc = &adapter->ahw->idc;
        u8 func = adapter->ahw->pci_func;
  
                set_bit(__QLCNIC_RESETTING, &adapter->state);
                adapter->tx_timeo_cnt = 0;
                adapter->reset_ctx_cnt = 0;
 -              clear_bit(QLC_83XX_MBX_READY, &idc->status);
 +              clear_bit(QLC_83XX_MBX_READY, &mbx->status);
                qlcnic_sriov_vf_detach(adapter);
        }
        return 0;
@@@ -1936,7 -1990,7 +1938,7 @@@ int qlcnic_sriov_vf_resume(struct qlcni
        int err;
  
        set_bit(QLC_83XX_MODULE_LOADED, &idc->status);
 -      qlcnic_83xx_enable_mbx_intrpt(adapter);
 +      qlcnic_83xx_enable_mbx_interrupt(adapter);
        err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
        if (err)
                return err;
@@@ -91,7 -91,6 +91,7 @@@ do {                                                          
  #define CPSW1_SLAVE_SIZE      0x040
  #define CPSW1_CPDMA_OFFSET    0x100
  #define CPSW1_STATERAM_OFFSET 0x200
 +#define CPSW1_HW_STATS                0x400
  #define CPSW1_CPTS_OFFSET     0x500
  #define CPSW1_ALE_OFFSET      0x600
  #define CPSW1_SLIVER_OFFSET   0x700
  #define CPSW2_SLAVE_OFFSET    0x200
  #define CPSW2_SLAVE_SIZE      0x100
  #define CPSW2_CPDMA_OFFSET    0x800
 +#define CPSW2_HW_STATS                0x900
  #define CPSW2_STATERAM_OFFSET 0xa00
  #define CPSW2_CPTS_OFFSET     0xc00
  #define CPSW2_ALE_OFFSET      0xd00
@@@ -301,44 -299,6 +301,44 @@@ struct cpsw_sliver_regs 
        u32     rx_pri_map;
  };
  
 +struct cpsw_hw_stats {
 +      u32     rxgoodframes;
 +      u32     rxbroadcastframes;
 +      u32     rxmulticastframes;
 +      u32     rxpauseframes;
 +      u32     rxcrcerrors;
 +      u32     rxaligncodeerrors;
 +      u32     rxoversizedframes;
 +      u32     rxjabberframes;
 +      u32     rxundersizedframes;
 +      u32     rxfragments;
 +      u32     __pad_0[2];
 +      u32     rxoctets;
 +      u32     txgoodframes;
 +      u32     txbroadcastframes;
 +      u32     txmulticastframes;
 +      u32     txpauseframes;
 +      u32     txdeferredframes;
 +      u32     txcollisionframes;
 +      u32     txsinglecollframes;
 +      u32     txmultcollframes;
 +      u32     txexcessivecollisions;
 +      u32     txlatecollisions;
 +      u32     txunderrun;
 +      u32     txcarriersenseerrors;
 +      u32     txoctets;
 +      u32     octetframes64;
 +      u32     octetframes65t127;
 +      u32     octetframes128t255;
 +      u32     octetframes256t511;
 +      u32     octetframes512t1023;
 +      u32     octetframes1024tup;
 +      u32     netoctets;
 +      u32     rxsofoverruns;
 +      u32     rxmofoverruns;
 +      u32     rxdmaoverruns;
 +};
 +
  struct cpsw_slave {
        void __iomem                    *regs;
        struct cpsw_sliver_regs __iomem *sliver;
@@@ -372,7 -332,6 +372,7 @@@ struct cpsw_priv 
        struct cpsw_platform_data       data;
        struct cpsw_ss_regs __iomem     *regs;
        struct cpsw_wr_regs __iomem     *wr_regs;
 +      u8 __iomem                      *hw_stats;
        struct cpsw_host_regs __iomem   *host_port_regs;
        u32                             msg_enable;
        u32                             version;
        u32 emac_port;
  };
  
 +struct cpsw_stats {
 +      char stat_string[ETH_GSTRING_LEN];
 +      int type;
 +      int sizeof_stat;
 +      int stat_offset;
 +};
 +
 +enum {
 +      CPSW_STATS,
 +      CPDMA_RX_STATS,
 +      CPDMA_TX_STATS,
 +};
 +
 +#define CPSW_STAT(m)          CPSW_STATS,                             \
 +                              sizeof(((struct cpsw_hw_stats *)0)->m), \
 +                              offsetof(struct cpsw_hw_stats, m)
 +#define CPDMA_RX_STAT(m)      CPDMA_RX_STATS,                            \
 +                              sizeof(((struct cpdma_chan_stats *)0)->m), \
 +                              offsetof(struct cpdma_chan_stats, m)
 +#define CPDMA_TX_STAT(m)      CPDMA_TX_STATS,                            \
 +                              sizeof(((struct cpdma_chan_stats *)0)->m), \
 +                              offsetof(struct cpdma_chan_stats, m)
 +
 +static const struct cpsw_stats cpsw_gstrings_stats[] = {
 +      { "Good Rx Frames", CPSW_STAT(rxgoodframes) },
 +      { "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) },
 +      { "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) },
 +      { "Pause Rx Frames", CPSW_STAT(rxpauseframes) },
 +      { "Rx CRC Errors", CPSW_STAT(rxcrcerrors) },
 +      { "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) },
 +      { "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) },
 +      { "Rx Jabbers", CPSW_STAT(rxjabberframes) },
 +      { "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) },
 +      { "Rx Fragments", CPSW_STAT(rxfragments) },
 +      { "Rx Octets", CPSW_STAT(rxoctets) },
 +      { "Good Tx Frames", CPSW_STAT(txgoodframes) },
 +      { "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) },
 +      { "Multicast Tx Frames", CPSW_STAT(txmulticastframes) },
 +      { "Pause Tx Frames", CPSW_STAT(txpauseframes) },
 +      { "Deferred Tx Frames", CPSW_STAT(txdeferredframes) },
 +      { "Collisions", CPSW_STAT(txcollisionframes) },
 +      { "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) },
 +      { "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) },
 +      { "Excessive Collisions", CPSW_STAT(txexcessivecollisions) },
 +      { "Late Collisions", CPSW_STAT(txlatecollisions) },
 +      { "Tx Underrun", CPSW_STAT(txunderrun) },
 +      { "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) },
 +      { "Tx Octets", CPSW_STAT(txoctets) },
 +      { "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) },
 +      { "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) },
 +      { "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) },
 +      { "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) },
 +      { "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) },
 +      { "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) },
 +      { "Net Octets", CPSW_STAT(netoctets) },
 +      { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) },
 +      { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) },
 +      { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) },
 +      { "Rx DMA chan: head_enqueue", CPDMA_RX_STAT(head_enqueue) },
 +      { "Rx DMA chan: tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
 +      { "Rx DMA chan: pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
 +      { "Rx DMA chan: misqueued", CPDMA_RX_STAT(misqueued) },
 +      { "Rx DMA chan: desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
 +      { "Rx DMA chan: pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
 +      { "Rx DMA chan: runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
 +      { "Rx DMA chan: runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
 +      { "Rx DMA chan: empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
 +      { "Rx DMA chan: busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
 +      { "Rx DMA chan: good_dequeue", CPDMA_RX_STAT(good_dequeue) },
 +      { "Rx DMA chan: requeue", CPDMA_RX_STAT(requeue) },
 +      { "Rx DMA chan: teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
 +      { "Tx DMA chan: head_enqueue", CPDMA_TX_STAT(head_enqueue) },
 +      { "Tx DMA chan: tail_enqueue", CPDMA_TX_STAT(tail_enqueue) },
 +      { "Tx DMA chan: pad_enqueue", CPDMA_TX_STAT(pad_enqueue) },
 +      { "Tx DMA chan: misqueued", CPDMA_TX_STAT(misqueued) },
 +      { "Tx DMA chan: desc_alloc_fail", CPDMA_TX_STAT(desc_alloc_fail) },
 +      { "Tx DMA chan: pad_alloc_fail", CPDMA_TX_STAT(pad_alloc_fail) },
 +      { "Tx DMA chan: runt_receive_buf", CPDMA_TX_STAT(runt_receive_buff) },
 +      { "Tx DMA chan: runt_transmit_buf", CPDMA_TX_STAT(runt_transmit_buff) },
 +      { "Tx DMA chan: empty_dequeue", CPDMA_TX_STAT(empty_dequeue) },
 +      { "Tx DMA chan: busy_dequeue", CPDMA_TX_STAT(busy_dequeue) },
 +      { "Tx DMA chan: good_dequeue", CPDMA_TX_STAT(good_dequeue) },
 +      { "Tx DMA chan: requeue", CPDMA_TX_STAT(requeue) },
 +      { "Tx DMA chan: teardown_dequeue", CPDMA_TX_STAT(teardown_dequeue) },
 +};
 +
 +#define CPSW_STATS_LEN        ARRAY_SIZE(cpsw_gstrings_stats)
 +
  #define napi_to_priv(napi)    container_of(napi, struct cpsw_priv, napi)
  #define for_each_slave(priv, func, arg...)                            \
        do {                                                            \
@@@ -852,69 -723,6 +852,69 @@@ static int cpsw_set_coalesce(struct net
        return 0;
  }
  
 +static int cpsw_get_sset_count(struct net_device *ndev, int sset)
 +{
 +      switch (sset) {
 +      case ETH_SS_STATS:
 +              return CPSW_STATS_LEN;
 +      default:
 +              return -EOPNOTSUPP;
 +      }
 +}
 +
 +static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
 +{
 +      u8 *p = data;
 +      int i;
 +
 +      switch (stringset) {
 +      case ETH_SS_STATS:
 +              for (i = 0; i < CPSW_STATS_LEN; i++) {
 +                      memcpy(p, cpsw_gstrings_stats[i].stat_string,
 +                             ETH_GSTRING_LEN);
 +                      p += ETH_GSTRING_LEN;
 +              }
 +              break;
 +      }
 +}
 +
 +static void cpsw_get_ethtool_stats(struct net_device *ndev,
 +                                  struct ethtool_stats *stats, u64 *data)
 +{
 +      struct cpsw_priv *priv = netdev_priv(ndev);
 +      struct cpdma_chan_stats rx_stats;
 +      struct cpdma_chan_stats tx_stats;
 +      u32 val;
 +      u8 *p;
 +      int i;
 +
 +      /* Collect Davinci CPDMA stats for Rx and Tx Channel */
 +      cpdma_chan_get_stats(priv->rxch, &rx_stats);
 +      cpdma_chan_get_stats(priv->txch, &tx_stats);
 +
 +      for (i = 0; i < CPSW_STATS_LEN; i++) {
 +              switch (cpsw_gstrings_stats[i].type) {
 +              case CPSW_STATS:
 +                      val = readl(priv->hw_stats +
 +                                  cpsw_gstrings_stats[i].stat_offset);
 +                      data[i] = val;
 +                      break;
 +
 +              case CPDMA_RX_STATS:
 +                      p = (u8 *)&rx_stats +
 +                              cpsw_gstrings_stats[i].stat_offset;
 +                      data[i] = *(u32 *)p;
 +                      break;
 +
 +              case CPDMA_TX_STATS:
 +                      p = (u8 *)&tx_stats +
 +                              cpsw_gstrings_stats[i].stat_offset;
 +                      data[i] = *(u32 *)p;
 +                      break;
 +              }
 +      }
 +}
 +
  static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val)
  {
        static char *leader = "........................................";
@@@ -1424,33 -1232,6 +1424,33 @@@ static void cpsw_ndo_tx_timeout(struct 
  
  }
  
 +static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
 +{
 +      struct cpsw_priv *priv = netdev_priv(ndev);
 +      struct sockaddr *addr = (struct sockaddr *)p;
 +      int flags = 0;
 +      u16 vid = 0;
 +
 +      if (!is_valid_ether_addr(addr->sa_data))
 +              return -EADDRNOTAVAIL;
 +
 +      if (priv->data.dual_emac) {
 +              vid = priv->slaves[priv->emac_port].port_vlan;
 +              flags = ALE_VLAN;
 +      }
 +
 +      cpsw_ale_del_ucast(priv->ale, priv->mac_addr, priv->host_port,
 +                         flags, vid);
 +      cpsw_ale_add_ucast(priv->ale, addr->sa_data, priv->host_port,
 +                         flags, vid);
 +
 +      memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
 +      memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
 +      for_each_slave(priv, cpsw_set_slave_mac, priv);
 +
 +      return 0;
 +}
 +
  static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev)
  {
        struct cpsw_priv *priv = netdev_priv(ndev);
@@@ -1545,7 -1326,6 +1545,7 @@@ static const struct net_device_ops cpsw
        .ndo_stop               = cpsw_ndo_stop,
        .ndo_start_xmit         = cpsw_ndo_start_xmit,
        .ndo_change_rx_flags    = cpsw_ndo_change_rx_flags,
 +      .ndo_set_mac_address    = cpsw_ndo_set_mac_address,
        .ndo_do_ioctl           = cpsw_ndo_ioctl,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_change_mtu         = eth_change_mtu,
@@@ -1646,9 -1426,6 +1646,9 @@@ static const struct ethtool_ops cpsw_et
        .set_settings   = cpsw_set_settings,
        .get_coalesce   = cpsw_get_coalesce,
        .set_coalesce   = cpsw_set_coalesce,
 +      .get_sset_count         = cpsw_get_sset_count,
 +      .get_strings            = cpsw_get_strings,
 +      .get_ethtool_stats      = cpsw_get_ethtool_stats,
  };
  
  static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
@@@ -1846,7 -1623,6 +1846,7 @@@ static int cpsw_probe_dual_emac(struct 
        priv_sl2->host_port = priv->host_port;
        priv_sl2->host_port_regs = priv->host_port_regs;
        priv_sl2->wr_regs = priv->wr_regs;
 +      priv_sl2->hw_stats = priv->hw_stats;
        priv_sl2->dma = priv->dma;
        priv_sl2->txch = priv->txch;
        priv_sl2->rxch = priv->rxch;
@@@ -2004,8 -1780,7 +2004,8 @@@ static int cpsw_probe(struct platform_d
        switch (priv->version) {
        case CPSW_VERSION_1:
                priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
 -              priv->cpts->reg       = ss_regs + CPSW1_CPTS_OFFSET;
 +              priv->cpts->reg      = ss_regs + CPSW1_CPTS_OFFSET;
 +              priv->hw_stats       = ss_regs + CPSW1_HW_STATS;
                dma_params.dmaregs   = ss_regs + CPSW1_CPDMA_OFFSET;
                dma_params.txhdp     = ss_regs + CPSW1_STATERAM_OFFSET;
                ale_params.ale_regs  = ss_regs + CPSW1_ALE_OFFSET;
                break;
        case CPSW_VERSION_2:
                priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
 -              priv->cpts->reg       = ss_regs + CPSW2_CPTS_OFFSET;
 +              priv->cpts->reg      = ss_regs + CPSW2_CPTS_OFFSET;
 +              priv->hw_stats       = ss_regs + CPSW2_HW_STATS;
                dma_params.dmaregs   = ss_regs + CPSW2_CPDMA_OFFSET;
                dma_params.txhdp     = ss_regs + CPSW2_STATERAM_OFFSET;
                ale_params.ale_regs  = ss_regs + CPSW2_ALE_OFFSET;
  
        while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
                for (i = res->start; i <= res->end; i++) {
-                       if (request_irq(i, cpsw_interrupt, IRQF_DISABLED,
+                       if (request_irq(i, cpsw_interrupt, 0,
                                        dev_name(&pdev->dev), priv)) {
                                dev_err(priv->dev, "error attaching irq\n");
                                goto clean_ale_ret;
diff --combined drivers/net/macvlan.c
@@@ -337,8 -337,11 +337,11 @@@ static int macvlan_open(struct net_devi
        int err;
  
        if (vlan->port->passthru) {
-               if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC))
-                       dev_set_promiscuity(lowerdev, 1);
+               if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) {
+                       err = dev_set_promiscuity(lowerdev, 1);
+                       if (err < 0)
+                               goto out;
+               }
                goto hash_add;
        }
  
@@@ -597,9 -600,6 +600,9 @@@ static int macvlan_fdb_add(struct ndms
        if (!vlan->port->passthru)
                return -EOPNOTSUPP;
  
 +      if (flags & NLM_F_REPLACE)
 +              return -EOPNOTSUPP;
 +
        if (is_unicast_ether_addr(addr))
                err = dev_uc_add_excl(dev, addr);
        else if (is_multicast_ether_addr(addr))
@@@ -866,6 -866,18 +869,18 @@@ static int macvlan_changelink(struct ne
                struct nlattr *tb[], struct nlattr *data[])
  {
        struct macvlan_dev *vlan = netdev_priv(dev);
+       enum macvlan_mode mode;
+       bool set_mode = false;
+       /* Validate mode, but don't set yet: setting flags may fail. */
+       if (data && data[IFLA_MACVLAN_MODE]) {
+               set_mode = true;
+               mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
+               /* Passthrough mode can't be set or cleared dynamically */
+               if ((mode == MACVLAN_MODE_PASSTHRU) !=
+                   (vlan->mode == MACVLAN_MODE_PASSTHRU))
+                       return -EINVAL;
+       }
  
        if (data && data[IFLA_MACVLAN_FLAGS]) {
                __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
                }
                vlan->flags = flags;
        }
-       if (data && data[IFLA_MACVLAN_MODE])
-               vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
+       if (set_mode)
+               vlan->mode = mode;
        return 0;
  }
  
@@@ -688,9 -688,6 +688,9 @@@ static int ax88179_change_mtu(struct ne
                                  2, 2, &tmp16);
        }
  
 +      /* max qlen depend on hard_mtu and rx_urb_size */
 +      usbnet_update_max_qlen(dev);
 +
        return 0;
  }
  
@@@ -1032,10 -1029,10 +1032,10 @@@ static int ax88179_bind(struct usbnet *
        dev->mii.supports_gmii = 1;
  
        dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-                             NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
+                             NETIF_F_RXCSUM;
  
        dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-                                NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
+                                NETIF_F_RXCSUM;
  
        /* Enable checksum offload */
        *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
@@@ -1176,7 -1173,6 +1176,6 @@@ ax88179_tx_fixup(struct usbnet *dev, st
        if (((skb->len + 8) % frame_size) == 0)
                tx_hdr2 |= 0x80008000;  /* Enable padding */
  
-       skb_linearize(skb);
        headroom = skb_headroom(skb);
        tailroom = skb_tailroom(skb);
  
@@@ -1320,10 -1316,10 +1319,10 @@@ static int ax88179_reset(struct usbnet 
                          1, 1, tmp);
  
        dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-                             NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
+                             NETIF_F_RXCSUM;
  
        dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-                                NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
+                                NETIF_F_RXCSUM;
  
        /* Enable checksum offload */
        *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
diff --combined drivers/net/vxlan.c
@@@ -136,7 -136,8 +136,8 @@@ struct vxlan_dev 
        u32               flags;        /* VXLAN_F_* below */
  
        struct work_struct sock_work;
-       struct work_struct igmp_work;
+       struct work_struct igmp_join;
+       struct work_struct igmp_leave;
  
        unsigned long     age_interval;
        struct timer_list age_timer;
@@@ -407,26 -408,6 +408,26 @@@ static struct vxlan_rdst *vxlan_fdb_fin
        return NULL;
  }
  
 +/* Replace destination of unicast mac */
 +static int vxlan_fdb_replace(struct vxlan_fdb *f,
 +                          __be32 ip, __be16 port, __u32 vni, __u32 ifindex)
 +{
 +      struct vxlan_rdst *rd;
 +
 +      rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
 +      if (rd)
 +              return 0;
 +
 +      rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list);
 +      if (!rd)
 +              return 0;
 +      rd->remote_ip = ip;
 +      rd->remote_port = port;
 +      rd->remote_vni = vni;
 +      rd->remote_ifindex = ifindex;
 +      return 1;
 +}
 +
  /* Add/update destinations for multicast */
  static int vxlan_fdb_append(struct vxlan_fdb *f,
                            __be32 ip, __be16 port, __u32 vni, __u32 ifindex)
@@@ -477,19 -458,6 +478,19 @@@ static int vxlan_fdb_create(struct vxla
                        f->updated = jiffies;
                        notify = 1;
                }
 +              if ((flags & NLM_F_REPLACE)) {
 +                      /* Only change unicasts */
 +                      if (!(is_multicast_ether_addr(f->eth_addr) ||
 +                           is_zero_ether_addr(f->eth_addr))) {
 +                              int rc = vxlan_fdb_replace(f, ip, port, vni,
 +                                                         ifindex);
 +
 +                              if (rc < 0)
 +                                      return rc;
 +                              notify |= rc;
 +                      } else
 +                              return -EOPNOTSUPP;
 +              }
                if ((flags & NLM_F_APPEND) &&
                    (is_multicast_ether_addr(f->eth_addr) ||
                     is_zero_ether_addr(f->eth_addr))) {
                if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
                        return -ENOSPC;
  
 +              /* Disallow replace to add a multicast entry */
 +              if ((flags & NLM_F_REPLACE) &&
 +                  (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
 +                      return -EOPNOTSUPP;
 +
                netdev_dbg(vxlan->dev, "add %pM -> %pI4\n", mac, &ip);
                f = kmalloc(sizeof(*f), GFP_ATOMIC);
                if (!f)
@@@ -774,7 -737,6 +775,6 @@@ static bool vxlan_snoop(struct net_devi
        return false;
  }
  
  /* See if multicast group is already in use by other ID */
  static bool vxlan_group_used(struct vxlan_net *vn, __be32 remote_ip)
  {
@@@ -808,12 -770,13 +808,13 @@@ static void vxlan_sock_release(struct v
        queue_work(vxlan_wq, &vs->del_work);
  }
  
- /* Callback to update multicast group membership.
-  * Scheduled when vxlan goes up/down.
+ /* Callback to update multicast group membership when first VNI on
+  * multicast asddress is brought up
+  * Done as workqueue because ip_mc_join_group acquires RTNL.
   */
- static void vxlan_igmp_work(struct work_struct *work)
+ static void vxlan_igmp_join(struct work_struct *work)
  {
-       struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_work);
+       struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_join);
        struct vxlan_net *vn = net_generic(dev_net(vxlan->dev), vxlan_net_id);
        struct vxlan_sock *vs = vxlan->vn_sock;
        struct sock *sk = vs->sock->sk;
        };
  
        lock_sock(sk);
-       if (vxlan_group_used(vn, vxlan->default_dst.remote_ip))
-               ip_mc_join_group(sk, &mreq);
-       else
-               ip_mc_leave_group(sk, &mreq);
+       ip_mc_join_group(sk, &mreq);
+       release_sock(sk);
+       vxlan_sock_release(vn, vs);
+       dev_put(vxlan->dev);
+ }
+ /* Inverse of vxlan_igmp_join when last VNI is brought down */
+ static void vxlan_igmp_leave(struct work_struct *work)
+ {
+       struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_leave);
+       struct vxlan_net *vn = net_generic(dev_net(vxlan->dev), vxlan_net_id);
+       struct vxlan_sock *vs = vxlan->vn_sock;
+       struct sock *sk = vs->sock->sk;
+       struct ip_mreqn mreq = {
+               .imr_multiaddr.s_addr   = vxlan->default_dst.remote_ip,
+               .imr_ifindex            = vxlan->default_dst.remote_ifindex,
+       };
+       lock_sock(sk);
+       ip_mc_leave_group(sk, &mreq);
        release_sock(sk);
  
        vxlan_sock_release(vn, vs);
@@@ -1397,6 -1377,7 +1415,7 @@@ static void vxlan_uninit(struct net_dev
  /* Start ageing timer and join group when device is brought up */
  static int vxlan_open(struct net_device *dev)
  {
+       struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct vxlan_sock *vs = vxlan->vn_sock;
  
        if (!vs)
                return -ENOTCONN;
  
-       if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip))) {
+       if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) &&
+           ! vxlan_group_used(vn, vxlan->default_dst.remote_ip)) {
                vxlan_sock_hold(vs);
                dev_hold(dev);
-               queue_work(vxlan_wq, &vxlan->igmp_work);
+               queue_work(vxlan_wq, &vxlan->igmp_join);
        }
  
        if (vxlan->age_interval)
@@@ -1438,13 -1420,15 +1458,15 @@@ static void vxlan_flush(struct vxlan_de
  /* Cleanup timer and forwarding table on shutdown */
  static int vxlan_stop(struct net_device *dev)
  {
+       struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct vxlan_sock *vs = vxlan->vn_sock;
  
-       if (vs && IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip))) {
+       if (vs && IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) &&
+           ! vxlan_group_used(vn, vxlan->default_dst.remote_ip)) {
                vxlan_sock_hold(vs);
                dev_hold(dev);
-               queue_work(vxlan_wq, &vxlan->igmp_work);
+               queue_work(vxlan_wq, &vxlan->igmp_leave);
        }
  
        del_timer_sync(&vxlan->age_timer);
@@@ -1509,7 -1493,8 +1531,8 @@@ static void vxlan_setup(struct net_devi
  
        INIT_LIST_HEAD(&vxlan->next);
        spin_lock_init(&vxlan->hash_lock);
-       INIT_WORK(&vxlan->igmp_work, vxlan_igmp_work);
+       INIT_WORK(&vxlan->igmp_join, vxlan_igmp_join);
+       INIT_WORK(&vxlan->igmp_leave, vxlan_igmp_leave);
        INIT_WORK(&vxlan->sock_work, vxlan_sock_work);
  
        init_timer_deferrable(&vxlan->age_timer);
@@@ -1916,10 -1901,12 +1939,12 @@@ static __net_exit void vxlan_exit_net(s
  {
        struct vxlan_net *vn = net_generic(net, vxlan_net_id);
        struct vxlan_dev *vxlan;
+       LIST_HEAD(list);
  
        rtnl_lock();
        list_for_each_entry(vxlan, &vn->vxlan_list, next)
-               dev_close(vxlan->dev);
+               unregister_netdevice_queue(vxlan->dev, &list);
+       unregister_netdevice_many(&list);
        rtnl_unlock();
  }
  
@@@ -728,16 -728,6 +728,16 @@@ struct netdev_fcoe_hbainfo 
  };
  #endif
  
 +#define MAX_PHYS_PORT_ID_LEN 32
 +
 +/* This structure holds a unique identifier to identify the
 + * physical port used by a netdevice.
 + */
 +struct netdev_phys_port_id {
 +      unsigned char id[MAX_PHYS_PORT_ID_LEN];
 +      unsigned char id_len;
 +};
 +
  /*
   * This structure defines the management hooks for network devices.
   * The following hooks can be defined; unless noted otherwise, they are
   *    that determine carrier state from physical hardware properties (eg
   *    network cables) or protocol-dependent mechanisms (eg
   *    USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
 + *
 + * int (*ndo_get_phys_port_id)(struct net_device *dev,
 + *                           struct netdev_phys_port_id *ppid);
 + *    Called to get ID of physical port of this device. If driver does
 + *    not implement this, it is assumed that the hw is not able to have
 + *    multiple net devices on single physical port.
   */
  struct net_device_ops {
        int                     (*ndo_init)(struct net_device *dev);
                                                     gfp_t gfp);
        void                    (*ndo_netpoll_cleanup)(struct net_device *dev);
  #endif
- #ifdef CONFIG_NET_LL_RX_POLL
+ #ifdef CONFIG_NET_RX_BUSY_POLL
        int                     (*ndo_busy_poll)(struct napi_struct *dev);
  #endif
        int                     (*ndo_set_vf_mac)(struct net_device *dev,
                                                      struct nlmsghdr *nlh);
        int                     (*ndo_change_carrier)(struct net_device *dev,
                                                      bool new_carrier);
 +      int                     (*ndo_get_phys_port_id)(struct net_device *dev,
 +                                                      struct netdev_phys_port_id *ppid);
  };
  
  /*
@@@ -1651,7 -1633,6 +1651,7 @@@ struct packet_offload 
  #define NETDEV_NOTIFY_PEERS   0x0013
  #define NETDEV_JOIN           0x0014
  #define NETDEV_CHANGEUPPER    0x0015
 +#define NETDEV_RESEND_IGMP    0x0016
  
  extern int register_netdevice_notifier(struct notifier_block *nb);
  extern int unregister_netdevice_notifier(struct notifier_block *nb);
@@@ -1684,6 -1665,9 +1684,6 @@@ extern int call_netdevice_notifiers(uns
  
  extern rwlock_t                               dev_base_lock;          /* Device list lock */
  
 -extern seqcount_t     devnet_rename_seq;      /* Device rename seq */
 -
 -
  #define for_each_netdev(net, d)               \
                list_for_each_entry(d, &(net)->dev_base_head, dev_list)
  #define for_each_netdev_reverse(net, d)       \
@@@ -2333,8 -2317,6 +2333,8 @@@ extern int              dev_set_mac_address(struct 
                                            struct sockaddr *);
  extern int            dev_change_carrier(struct net_device *,
                                           bool new_carrier);
 +extern int            dev_get_phys_port_id(struct net_device *dev,
 +                                           struct netdev_phys_port_id *ppid);
  extern int            dev_hard_start_xmit(struct sk_buff *skb,
                                            struct net_device *dev,
                                            struct netdev_queue *txq);
diff --combined include/linux/skbuff.h
@@@ -501,7 -501,7 +501,7 @@@ struct sk_buff 
        /* 7/9 bit hole (depending on ndisc_nodetype presence) */
        kmemcheck_bitfield_end(flags2);
  
- #if defined CONFIG_NET_DMA || defined CONFIG_NET_LL_RX_POLL
+ #if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL
        union {
                unsigned int    napi_id;
                dma_cookie_t    dma_cookie;
@@@ -1805,13 -1805,10 +1805,13 @@@ static inline void pskb_trim_unique(str
   */
  static inline void skb_orphan(struct sk_buff *skb)
  {
 -      if (skb->destructor)
 +      if (skb->destructor) {
                skb->destructor(skb);
 -      skb->destructor = NULL;
 -      skb->sk         = NULL;
 +              skb->destructor = NULL;
 +              skb->sk         = NULL;
 +      } else {
 +              BUG_ON(skb->sk);
 +      }
  }
  
  /**
diff --combined include/net/sock.h
@@@ -327,7 -327,7 +327,7 @@@ struct sock 
  #ifdef CONFIG_RPS
        __u32                   sk_rxhash;
  #endif
- #ifdef CONFIG_NET_LL_RX_POLL
+ #ifdef CONFIG_NET_RX_BUSY_POLL
        unsigned int            sk_napi_id;
        unsigned int            sk_ll_usec;
  #endif
@@@ -746,6 -746,11 +746,6 @@@ static inline int sk_stream_wspace(cons
  
  extern void sk_stream_write_space(struct sock *sk);
  
 -static inline bool sk_stream_memory_free(const struct sock *sk)
 -{
 -      return sk->sk_wmem_queued < sk->sk_sndbuf;
 -}
 -
  /* OOB backlog add */
  static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
  {
@@@ -945,7 -950,6 +945,7 @@@ struct proto 
        unsigned int            inuse_idx;
  #endif
  
 +      bool                    (*stream_memory_free)(const struct sock *sk);
        /* Memory pressure */
        void                    (*enter_memory_pressure)(struct sock *sk);
        atomic_long_t           *memory_allocated;      /* Current allocated memory. */
@@@ -1084,21 -1088,6 +1084,21 @@@ static inline struct cg_proto *parent_c
  }
  #endif
  
 +static inline bool sk_stream_memory_free(const struct sock *sk)
 +{
 +      if (sk->sk_wmem_queued >= sk->sk_sndbuf)
 +              return false;
 +
 +      return sk->sk_prot->stream_memory_free ?
 +              sk->sk_prot->stream_memory_free(sk) : true;
 +}
 +
 +static inline bool sk_stream_is_writeable(const struct sock *sk)
 +{
 +      return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) &&
 +             sk_stream_memory_free(sk);
 +}
 +
  
  static inline bool sk_has_memory_pressure(const struct sock *sk)
  {
@@@ -1520,7 -1509,6 +1520,7 @@@ extern struct sk_buff           *sock_rmalloc(st
                                              unsigned long size, int force,
                                              gfp_t priority);
  extern void                   sock_wfree(struct sk_buff *skb);
 +extern void                   skb_orphan_partial(struct sk_buff *skb);
  extern void                   sock_rfree(struct sk_buff *skb);
  extern void                   sock_edemux(struct sk_buff *skb);
  
@@@ -2261,8 -2249,6 +2261,8 @@@ static inline struct sock *skb_steal_so
  extern void sock_enable_timestamp(struct sock *sk, int flag);
  extern int sock_get_timestamp(struct sock *, struct timeval __user *);
  extern int sock_get_timestampns(struct sock *, struct timespec __user *);
 +extern int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
 +                            int level, int type);
  
  /*
   *    Enable debug/info messages
diff --combined net/Kconfig
@@@ -244,7 -244,7 +244,7 @@@ config NETPRIO_CGROU
          Cgroup subsystem for use in assigning processes to network priorities on
          a per-interface basis
  
- config NET_LL_RX_POLL
+ config NET_RX_BUSY_POLL
        boolean
        default y
  
@@@ -281,7 -281,7 +281,7 @@@ menu "Network testing
  
  config NET_PKTGEN
        tristate "Packet Generator (USE WITH CAUTION)"
 -      depends on PROC_FS
 +      depends on INET && PROC_FS
        ---help---
          This module will inject preconfigured packets, at a configurable
          rate, out of a given interface.  It is used for network interface
diff --combined net/bridge/br_device.c
@@@ -70,7 -70,8 +70,8 @@@ netdev_tx_t br_dev_xmit(struct sk_buff 
                }
  
                mdst = br_mdb_get(br, skb, vid);
-               if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb))
+               if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
+                   br_multicast_querier_exists(br))
                        br_multicast_deliver(mdst, skb);
                else
                        br_flood_deliver(br, skb, false);
@@@ -244,22 -245,22 +245,22 @@@ fail
  int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
  {
        struct netpoll *np;
 -      int err = 0;
 +      int err;
 +
 +      if (!p->br->dev->npinfo)
 +              return 0;
  
        np = kzalloc(sizeof(*p->np), gfp);
 -      err = -ENOMEM;
        if (!np)
 -              goto out;
 +              return -ENOMEM;
  
        err = __netpoll_setup(np, p->dev, gfp);
        if (err) {
                kfree(np);
 -              goto out;
 +              return err;
        }
  
        p->np = np;
 -
 -out:
        return err;
  }
  
diff --combined net/bridge/br_private.h
@@@ -267,6 -267,7 +267,7 @@@ struct net_bridg
        unsigned long                   multicast_query_interval;
        unsigned long                   multicast_query_response_interval;
        unsigned long                   multicast_startup_query_interval;
+       unsigned long                   multicast_querier_delay_time;
  
        spinlock_t                      multicast_lock;
        struct net_bridge_mdb_htable __rcu *mdb;
@@@ -333,6 -334,11 +334,6 @@@ extern void br_dev_delete(struct net_de
  extern netdev_tx_t br_dev_xmit(struct sk_buff *skb,
                               struct net_device *dev);
  #ifdef CONFIG_NET_POLL_CONTROLLER
 -static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br)
 -{
 -      return br->dev->npinfo;
 -}
 -
  static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
                                       struct sk_buff *skb)
  {
  extern int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp);
  extern void br_netpoll_disable(struct net_bridge_port *p);
  #else
 -static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br)
 -{
 -      return NULL;
 -}
 -
  static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
                                       struct sk_buff *skb)
  {
@@@ -491,6 -502,13 +492,13 @@@ static inline bool br_multicast_is_rout
               (br->multicast_router == 1 &&
                timer_pending(&br->multicast_router_timer));
  }
+ static inline bool br_multicast_querier_exists(struct net_bridge *br)
+ {
+       return time_is_before_jiffies(br->multicast_querier_delay_time) &&
+              (br->multicast_querier ||
+               timer_pending(&br->multicast_querier_timer));
+ }
  #else
  static inline int br_multicast_rcv(struct net_bridge *br,
                                   struct net_bridge_port *port,
@@@ -547,6 -565,10 +555,10 @@@ static inline bool br_multicast_is_rout
  {
        return 0;
  }
+ static inline bool br_multicast_querier_exists(struct net_bridge *br)
+ {
+       return false;
+ }
  static inline void br_mdb_init(void)
  {
  }
diff --combined net/core/neighbour.c
@@@ -1441,18 -1441,16 +1441,18 @@@ struct neigh_parms *neigh_parms_alloc(s
                atomic_set(&p->refcnt, 1);
                p->reachable_time =
                                neigh_rand_reach_time(p->base_reachable_time);
 +              dev_hold(dev);
 +              p->dev = dev;
 +              write_pnet(&p->net, hold_net(net));
 +              p->sysctl_table = NULL;
  
                if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
 +                      release_net(net);
 +                      dev_put(dev);
                        kfree(p);
                        return NULL;
                }
  
 -              dev_hold(dev);
 -              p->dev = dev;
 -              write_pnet(&p->net, hold_net(net));
 -              p->sysctl_table = NULL;
                write_lock_bh(&tbl->lock);
                p->next         = tbl->parms.next;
                tbl->parms.next = p;
@@@ -2769,6 -2767,7 +2769,7 @@@ EXPORT_SYMBOL(neigh_app_ns)
  
  #ifdef CONFIG_SYSCTL
  static int zero;
+ static int int_max = INT_MAX;
  static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
  
  static int proc_unres_qlen(struct ctl_table *ctl, int write,
@@@ -2821,19 -2820,25 +2822,25 @@@ static struct neigh_sysctl_table 
                        .procname       = "mcast_solicit",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
+                       .extra1         = &zero,
+                       .extra2         = &int_max,
+                       .proc_handler   = proc_dointvec_minmax,
                },
                [NEIGH_VAR_UCAST_PROBE] = {
                        .procname       = "ucast_solicit",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
+                       .extra1         = &zero,
+                       .extra2         = &int_max,
+                       .proc_handler   = proc_dointvec_minmax,
                },
                [NEIGH_VAR_APP_PROBE] = {
                        .procname       = "app_solicit",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
+                       .extra1         = &zero,
+                       .extra2         = &int_max,
+                       .proc_handler   = proc_dointvec_minmax,
                },
                [NEIGH_VAR_RETRANS_TIME] = {
                        .procname       = "retrans_time",
                        .procname       = "proxy_qlen",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
+                       .extra1         = &zero,
+                       .extra2         = &int_max,
+                       .proc_handler   = proc_dointvec_minmax,
                },
                [NEIGH_VAR_ANYCAST_DELAY] = {
                        .procname       = "anycast_delay",
                        .procname       = "gc_thresh1",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
+                       .extra1         = &zero,
+                       .extra2         = &int_max,
+                       .proc_handler   = proc_dointvec_minmax,
                },
                [NEIGH_VAR_GC_THRESH2] = {
                        .procname       = "gc_thresh2",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
+                       .extra1         = &zero,
+                       .extra2         = &int_max,
+                       .proc_handler   = proc_dointvec_minmax,
                },
                [NEIGH_VAR_GC_THRESH3] = {
                        .procname       = "gc_thresh3",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
+                       .extra1         = &zero,
+                       .extra2         = &int_max,
+                       .proc_handler   = proc_dointvec_minmax,
                },
                {},
        },
diff --combined net/core/sock.c
@@@ -93,7 -93,6 +93,7 @@@
  
  #include <linux/capability.h>
  #include <linux/errno.h>
 +#include <linux/errqueue.h>
  #include <linux/types.h>
  #include <linux/socket.h>
  #include <linux/in.h>
@@@ -901,7 -900,7 +901,7 @@@ set_rcvbuf
                sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
                break;
  
- #ifdef CONFIG_NET_LL_RX_POLL
+ #ifdef CONFIG_NET_RX_BUSY_POLL
        case SO_BUSY_POLL:
                /* allow unprivileged users to decrease the value */
                if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
@@@ -1171,7 -1170,7 +1171,7 @@@ int sock_getsockopt(struct socket *sock
                v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
                break;
  
- #ifdef CONFIG_NET_LL_RX_POLL
+ #ifdef CONFIG_NET_RX_BUSY_POLL
        case SO_BUSY_POLL:
                v.val = sk->sk_ll_usec;
                break;
@@@ -1576,25 -1575,6 +1576,25 @@@ void sock_wfree(struct sk_buff *skb
  }
  EXPORT_SYMBOL(sock_wfree);
  
 +void skb_orphan_partial(struct sk_buff *skb)
 +{
 +      /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
 +       * so we do not completely orphan skb, but transfert all
 +       * accounted bytes but one, to avoid unexpected reorders.
 +       */
 +      if (skb->destructor == sock_wfree
 +#ifdef CONFIG_INET
 +          || skb->destructor == tcp_wfree
 +#endif
 +              ) {
 +              atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc);
 +              skb->truesize = 1;
 +      } else {
 +              skb_orphan(skb);
 +      }
 +}
 +EXPORT_SYMBOL(skb_orphan_partial);
 +
  /*
   * Read buffer destructor automatically called from kfree_skb.
   */
@@@ -2312,7 -2292,7 +2312,7 @@@ void sock_init_data(struct socket *sock
  
        sk->sk_stamp = ktime_set(-1L, 0);
  
- #ifdef CONFIG_NET_LL_RX_POLL
+ #ifdef CONFIG_NET_RX_BUSY_POLL
        sk->sk_napi_id          =       0;
        sk->sk_ll_usec          =       sysctl_net_busy_read;
  #endif
@@@ -2445,52 -2425,6 +2445,52 @@@ void sock_enable_timestamp(struct sock 
        }
  }
  
 +int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
 +                     int level, int type)
 +{
 +      struct sock_exterr_skb *serr;
 +      struct sk_buff *skb, *skb2;
 +      int copied, err;
 +
 +      err = -EAGAIN;
 +      skb = skb_dequeue(&sk->sk_error_queue);
 +      if (skb == NULL)
 +              goto out;
 +
 +      copied = skb->len;
 +      if (copied > len) {
 +              msg->msg_flags |= MSG_TRUNC;
 +              copied = len;
 +      }
 +      err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
 +      if (err)
 +              goto out_free_skb;
 +
 +      sock_recv_timestamp(msg, sk, skb);
 +
 +      serr = SKB_EXT_ERR(skb);
 +      put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
 +
 +      msg->msg_flags |= MSG_ERRQUEUE;
 +      err = copied;
 +
 +      /* Reset and regenerate socket error */
 +      spin_lock_bh(&sk->sk_error_queue.lock);
 +      sk->sk_err = 0;
 +      if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
 +              sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
 +              spin_unlock_bh(&sk->sk_error_queue.lock);
 +              sk->sk_error_report(sk);
 +      } else
 +              spin_unlock_bh(&sk->sk_error_queue.lock);
 +
 +out_free_skb:
 +      kfree_skb(skb);
 +out:
 +      return err;
 +}
 +EXPORT_SYMBOL(sock_recv_errqueue);
 +
  /*
   *    Get a socket option on an socket.
   *
diff --combined net/ipv4/devinet.c
@@@ -772,7 -772,7 +772,7 @@@ static struct in_ifaddr *rtm_to_ifaddr(
                ci = nla_data(tb[IFA_CACHEINFO]);
                if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
                        err = -EINVAL;
-                       goto errout;
+                       goto errout_free;
                }
                *pvalid_lft = ci->ifa_valid;
                *pprefered_lft = ci->ifa_prefered;
  
        return ifa;
  
+ errout_free:
+       inet_free_ifa(ifa);
  errout:
        return ERR_PTR(err);
  }
@@@ -1124,7 -1126,10 +1126,7 @@@ static int inet_gifconf(struct net_devi
                if (len < (int) sizeof(ifr))
                        break;
                memset(&ifr, 0, sizeof(struct ifreq));
 -              if (ifa->ifa_label)
 -                      strcpy(ifr.ifr_name, ifa->ifa_label);
 -              else
 -                      strcpy(ifr.ifr_name, dev->name);
 +              strcpy(ifr.ifr_name, ifa->ifa_label);
  
                (*(struct sockaddr_in *)&ifr.ifr_addr).sin_family = AF_INET;
                (*(struct sockaddr_in *)&ifr.ifr_addr).sin_addr.s_addr =
@@@ -36,6 -36,8 +36,8 @@@ static int tcp_adv_win_scale_min = -31
  static int tcp_adv_win_scale_max = 31;
  static int ip_ttl_min = 1;
  static int ip_ttl_max = 255;
+ static int tcp_syn_retries_min = 1;
+ static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
  static int ip_ping_group_range_min[] = { 0, 0 };
  static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
  
@@@ -332,7 -334,9 +334,9 @@@ static struct ctl_table ipv4_table[] = 
                .data           = &sysctl_tcp_syn_retries,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &tcp_syn_retries_min,
+               .extra2         = &tcp_syn_retries_max
        },
        {
                .procname       = "tcp_synack_retries",
                .proc_handler   = proc_dointvec_minmax,
                .extra1         = &one,
        },
 +      {
 +              .procname       = "tcp_notsent_lowat",
 +              .data           = &sysctl_tcp_notsent_lowat,
 +              .maxlen         = sizeof(sysctl_tcp_notsent_lowat),
 +              .mode           = 0644,
 +              .proc_handler   = proc_dointvec,
 +      },
        {
                .procname       = "tcp_rmem",
                .data           = &sysctl_tcp_rmem,
diff --combined net/ipv6/addrconf.c
@@@ -813,8 -813,9 +813,9 @@@ static u32 inet6_addr_hash(const struc
  /* On success it returns ifp with increased reference count */
  
  static struct inet6_ifaddr *
- ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
-             int scope, u32 flags)
+ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
+             const struct in6_addr *peer_addr, int pfxlen,
+             int scope, u32 flags, u32 valid_lft, u32 prefered_lft)
  {
        struct inet6_ifaddr *ifa = NULL;
        struct rt6_info *rt;
        }
  
        ifa->addr = *addr;
+       if (peer_addr)
+               ifa->peer_addr = *peer_addr;
  
        spin_lock_init(&ifa->lock);
        spin_lock_init(&ifa->state_lock);
        ifa->scope = scope;
        ifa->prefix_len = pfxlen;
        ifa->flags = flags | IFA_F_TENTATIVE;
+       ifa->valid_lft = valid_lft;
+       ifa->prefered_lft = prefered_lft;
        ifa->cstamp = ifa->tstamp = jiffies;
        ifa->tokenized = false;
  
@@@ -1123,8 -1128,9 +1128,9 @@@ retry
  
        ift = !max_addresses ||
              ipv6_count_addresses(idev) < max_addresses ?
-               ipv6_add_addr(idev, &addr, tmp_plen, ipv6_addr_scope(&addr),
-                             addr_flags) : NULL;
+               ipv6_add_addr(idev, &addr, NULL, tmp_plen,
+                             ipv6_addr_scope(&addr), addr_flags,
+                             tmp_valid_lft, tmp_prefered_lft) : NULL;
        if (IS_ERR_OR_NULL(ift)) {
                in6_ifa_put(ifp);
                in6_dev_put(idev);
  
        spin_lock_bh(&ift->lock);
        ift->ifpub = ifp;
-       ift->valid_lft = tmp_valid_lft;
-       ift->prefered_lft = tmp_prefered_lft;
        ift->cstamp = now;
        ift->tstamp = tmp_tstamp;
        spin_unlock_bh(&ift->lock);
                         */
                        if (!max_addresses ||
                            ipv6_count_addresses(in6_dev) < max_addresses)
-                               ifp = ipv6_add_addr(in6_dev, &addr, pinfo->prefix_len,
+                               ifp = ipv6_add_addr(in6_dev, &addr, NULL,
+                                                   pinfo->prefix_len,
                                                    addr_type&IPV6_ADDR_SCOPE_MASK,
-                                                   addr_flags);
+                                                   addr_flags, valid_lft,
+                                                   prefered_lft);
  
                        if (IS_ERR_OR_NULL(ifp)) {
                                in6_dev_put(in6_dev);
                                return;
                        }
  
-                       update_lft = create = 1;
+                       update_lft = 0;
+                       create = 1;
                        ifp->cstamp = jiffies;
                        ifp->tokenized = tokenized;
                        addrconf_dad_start(ifp);
                                stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ;
                        else
                                stored_lft = 0;
-                       if (!update_lft && stored_lft) {
+                       if (!update_lft && !create && stored_lft) {
                                if (valid_lft > MIN_VALID_LIFETIME ||
                                    valid_lft > stored_lft)
                                        update_lft = 1;
@@@ -2455,17 -2462,10 +2462,10 @@@ static int inet6_addr_add(struct net *n
                prefered_lft = timeout;
        }
  
-       ifp = ipv6_add_addr(idev, pfx, plen, scope, ifa_flags);
+       ifp = ipv6_add_addr(idev, pfx, peer_pfx, plen, scope, ifa_flags,
+                           valid_lft, prefered_lft);
  
        if (!IS_ERR(ifp)) {
-               spin_lock_bh(&ifp->lock);
-               ifp->valid_lft = valid_lft;
-               ifp->prefered_lft = prefered_lft;
-               ifp->tstamp = jiffies;
-               if (peer_pfx)
-                       ifp->peer_addr = *peer_pfx;
-               spin_unlock_bh(&ifp->lock);
                addrconf_prefix_route(&ifp->addr, ifp->prefix_len, dev,
                                      expires, flags);
                /*
@@@ -2557,7 -2557,8 +2557,8 @@@ static void add_addr(struct inet6_dev *
  {
        struct inet6_ifaddr *ifp;
  
-       ifp = ipv6_add_addr(idev, addr, plen, scope, IFA_F_PERMANENT);
+       ifp = ipv6_add_addr(idev, addr, NULL, plen,
+                           scope, IFA_F_PERMANENT, 0, 0);
        if (!IS_ERR(ifp)) {
                spin_lock_bh(&ifp->lock);
                ifp->flags &= ~IFA_F_TENTATIVE;
@@@ -2683,7 -2684,7 +2684,7 @@@ static void addrconf_add_linklocal(stru
  #endif
  
  
-       ifp = ipv6_add_addr(idev, addr, 64, IFA_LINK, addr_flags);
+       ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags, 0, 0);
        if (!IS_ERR(ifp)) {
                addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0);
                addrconf_dad_start(ifp);
@@@ -4653,7 -4654,6 +4654,7 @@@ static void __ipv6_ifa_notify(int event
                break;
        }
        atomic_inc(&net->ipv6.dev_addr_genid);
 +      rt_genid_bump_ipv6(net);
  }
  
  static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
diff --combined net/ipv6/ip6_fib.c
@@@ -425,8 -425,8 +425,8 @@@ out
   *    node.
   */
  
 -static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
 -                                   int addrlen, int plen,
 +static struct fib6_node *fib6_add_1(struct fib6_node *root,
 +                                   struct in6_addr *addr, int plen,
                                     int offset, int allow_create,
                                     int replace_required)
  {
@@@ -543,7 -543,7 +543,7 @@@ insert_above
           but if it is >= plen, the value is ignored in any case.
         */
  
 -      bit = __ipv6_addr_diff(addr, &key->addr, addrlen);
 +      bit = __ipv6_addr_diff(addr, &key->addr, sizeof(*addr));
  
        /*
         *              (intermediate)[in]
@@@ -822,9 -822,9 +822,9 @@@ int fib6_add(struct fib6_node *root, st
        if (!allow_create && !replace_required)
                pr_warn("RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n");
  
 -      fn = fib6_add_1(root, &rt->rt6i_dst.addr, sizeof(struct in6_addr),
 -                      rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst),
 -                      allow_create, replace_required);
 +      fn = fib6_add_1(root, &rt->rt6i_dst.addr, rt->rt6i_dst.plen,
 +                      offsetof(struct rt6_info, rt6i_dst), allow_create,
 +                      replace_required);
  
        if (IS_ERR(fn)) {
                err = PTR_ERR(fn);
                        /* Now add the first leaf node to new subtree */
  
                        sn = fib6_add_1(sfn, &rt->rt6i_src.addr,
 -                                      sizeof(struct in6_addr), rt->rt6i_src.plen,
 +                                      rt->rt6i_src.plen,
                                        offsetof(struct rt6_info, rt6i_src),
                                        allow_create, replace_required);
  
                        fn->subtree = sfn;
                } else {
                        sn = fib6_add_1(fn->subtree, &rt->rt6i_src.addr,
 -                                      sizeof(struct in6_addr), rt->rt6i_src.plen,
 +                                      rt->rt6i_src.plen,
                                        offsetof(struct rt6_info, rt6i_src),
                                        allow_create, replace_required);
  
@@@ -1632,27 -1632,28 +1632,28 @@@ static int fib6_age(struct rt6_info *rt
  
  static DEFINE_SPINLOCK(fib6_gc_lock);
  
- void fib6_run_gc(unsigned long expires, struct net *net)
+ void fib6_run_gc(unsigned long expires, struct net *net, bool force)
  {
-       if (expires != ~0UL) {
+       unsigned long now;
+       if (force) {
                spin_lock_bh(&fib6_gc_lock);
-               gc_args.timeout = expires ? (int)expires :
-                       net->ipv6.sysctl.ip6_rt_gc_interval;
-       } else {
-               if (!spin_trylock_bh(&fib6_gc_lock)) {
-                       mod_timer(&net->ipv6.ip6_fib_timer, jiffies + HZ);
-                       return;
-               }
-               gc_args.timeout = net->ipv6.sysctl.ip6_rt_gc_interval;
+       } else if (!spin_trylock_bh(&fib6_gc_lock)) {
+               mod_timer(&net->ipv6.ip6_fib_timer, jiffies + HZ);
+               return;
        }
+       gc_args.timeout = expires ? (int)expires :
+                         net->ipv6.sysctl.ip6_rt_gc_interval;
  
        gc_args.more = icmp6_dst_gc();
  
        fib6_clean_all(net, fib6_age, 0, NULL);
+       now = jiffies;
+       net->ipv6.ip6_rt_last_gc = now;
  
        if (gc_args.more)
                mod_timer(&net->ipv6.ip6_fib_timer,
-                         round_jiffies(jiffies
+                         round_jiffies(now
                                        + net->ipv6.sysctl.ip6_rt_gc_interval));
        else
                del_timer(&net->ipv6.ip6_fib_timer);
  
  static void fib6_gc_timer_cb(unsigned long arg)
  {
-       fib6_run_gc(0, (struct net *)arg);
+       fib6_run_gc(0, (struct net *)arg, true);
  }
  
  static int __net_init fib6_net_init(struct net *net)
diff --combined net/ipv6/ip6mr.c
@@@ -110,8 -110,8 +110,8 @@@ static struct kmem_cache *mrt_cachep __
  static struct mr6_table *ip6mr_new_table(struct net *net, u32 id);
  static void ip6mr_free_table(struct mr6_table *mrt);
  
 -static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
 -                        struct sk_buff *skb, struct mfc6_cache *cache);
 +static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
 +                         struct sk_buff *skb, struct mfc6_cache *cache);
  static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
                              mifi_t mifi, int assert);
  static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
@@@ -259,10 -259,12 +259,12 @@@ static void __net_exit ip6mr_rules_exit
  {
        struct mr6_table *mrt, *next;
  
+       rtnl_lock();
        list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
                list_del(&mrt->list);
                ip6mr_free_table(mrt);
        }
+       rtnl_unlock();
        fib_rules_unregister(net->ipv6.mr6_rules_ops);
  }
  #else
@@@ -289,7 -291,10 +291,10 @@@ static int __net_init ip6mr_rules_init(
  
  static void __net_exit ip6mr_rules_exit(struct net *net)
  {
+       rtnl_lock();
        ip6mr_free_table(net->ipv6.mrt6);
+       net->ipv6.mrt6 = NULL;
+       rtnl_unlock();
  }
  #endif
  
@@@ -2069,8 -2074,8 +2074,8 @@@ static int ip6mr_find_vif(struct mr6_ta
        return ct;
  }
  
 -static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
 -                        struct sk_buff *skb, struct mfc6_cache *cache)
 +static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
 +                         struct sk_buff *skb, struct mfc6_cache *cache)
  {
        int psend = -1;
        int vif, ct;
@@@ -2151,11 -2156,12 +2156,11 @@@ forward
  last_forward:
        if (psend != -1) {
                ip6mr_forward2(net, mrt, skb, cache, psend);
 -              return 0;
 +              return;
        }
  
  dont_forward:
        kfree_skb(skb);
 -      return 0;
  }
  
  
diff --combined net/ipv6/route.c
@@@ -283,8 -283,9 +283,8 @@@ static inline struct rt6_info *ip6_dst_
  
                memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
                rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers);
 -              rt->rt6i_genid = rt_genid(net);
 +              rt->rt6i_genid = rt_genid_ipv6(net);
                INIT_LIST_HEAD(&rt->rt6i_siblings);
 -              rt->rt6i_nsiblings = 0;
        }
        return rt;
  }
@@@ -1061,7 -1062,7 +1061,7 @@@ static struct dst_entry *ip6_dst_check(
         * DST_OBSOLETE_FORCE_CHK which forces validation calls down
         * into this function always.
         */
 -      if (rt->rt6i_genid != rt_genid(dev_net(rt->dst.dev)))
 +      if (rt->rt6i_genid != rt_genid_ipv6(dev_net(rt->dst.dev)))
                return NULL;
  
        if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie))
@@@ -1310,7 -1311,6 +1310,6 @@@ static void icmp6_clean_all(int (*func)
  
  static int ip6_dst_gc(struct dst_ops *ops)
  {
-       unsigned long now = jiffies;
        struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
        int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
        int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
        int entries;
  
        entries = dst_entries_get_fast(ops);
-       if (time_after(rt_last_gc + rt_min_interval, now) &&
+       if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
            entries <= rt_max_size)
                goto out;
  
        net->ipv6.ip6_rt_gc_expire++;
-       fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net);
-       net->ipv6.ip6_rt_last_gc = now;
+       fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, entries > rt_max_size);
        entries = dst_entries_get_slow(ops);
        if (entries < ops->gc_thresh)
                net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
@@@ -2826,7 -2825,7 +2824,7 @@@ int ipv6_sysctl_rtcache_flush(struct ct
        net = (struct net *)ctl->extra1;
        delay = net->ipv6.sysctl.flush_delay;
        proc_dointvec(ctl, write, buffer, lenp, ppos);
-       fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
+       fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
        return 0;
  }
  
diff --combined net/sunrpc/svcsock.c
@@@ -442,7 -442,7 +442,7 @@@ static void svc_tcp_write_space(struct 
  {
        struct socket *sock = sk->sk_socket;
  
 -      if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock)
 +      if (sk_stream_is_writeable(sk) && sock)
                clear_bit(SOCK_NOSPACE, &sock->flags);
        svc_write_space(sk);
  }
@@@ -1193,7 -1193,9 +1193,9 @@@ static int svc_tcp_has_wspace(struct sv
        if (test_bit(XPT_LISTENER, &xprt->xpt_flags))
                return 1;
        required = atomic_read(&xprt->xpt_reserved) + serv->sv_max_mesg;
-       if (sk_stream_wspace(svsk->sk_sk) >= required)
+       if (sk_stream_wspace(svsk->sk_sk) >= required ||
+           (sk_stream_min_wspace(svsk->sk_sk) == 0 &&
+            atomic_read(&xprt->xpt_reserved) == 0))
                return 1;
        set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
        return 0;