Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
authorDavid S. Miller <davem@davemloft.net>
Wed, 8 Dec 2010 21:15:38 +0000 (13:15 -0800)
committerDavid S. Miller <davem@davemloft.net>
Wed, 8 Dec 2010 21:47:38 +0000 (13:47 -0800)
Conflicts:
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
net/llc/af_llc.c

46 files changed:
1  2 
Documentation/networking/ip-sysctl.txt
drivers/net/Kconfig
drivers/net/benet/be_cmds.c
drivers/net/bonding/bond_main.c
drivers/net/cxgb4vf/cxgb4vf_main.c
drivers/net/cxgb4vf/t4vf_hw.c
drivers/net/e1000/e1000_main.c
drivers/net/ehea/ehea_main.c
drivers/net/ixgbe/ixgbe_main.c
drivers/net/pch_gbe/pch_gbe_main.c
drivers/net/ppp_generic.c
drivers/net/qlge/qlge_main.c
drivers/net/sfc/efx.c
drivers/net/sfc/net_driver.h
drivers/net/sfc/nic.c
drivers/net/stmmac/stmmac_main.c
drivers/net/usb/hso.c
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/eeprom.h
drivers/net/wireless/ath/ath9k/eeprom_def.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/init.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/reg.h
drivers/net/wireless/ath/carl9170/main.c
drivers/net/wireless/ath/carl9170/tx.c
drivers/net/wireless/libertas/main.c
include/net/sock.h
net/ceph/Makefile
net/core/filter.c
net/core/request_sock.c
net/dccp/input.c
net/decnet/af_decnet.c
net/ipv4/tcp.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/sit.c
net/l2tp/l2tp_ip.c
net/mac80211/rx.c
net/mac80211/tx.c
net/unix/af_unix.c
net/x25/x25_link.c

@@@ -144,6 -144,7 +144,7 @@@ tcp_adv_win_scale - INTEGE
        Count buffering overhead as bytes/2^tcp_adv_win_scale
        (if tcp_adv_win_scale > 0) or bytes-bytes/2^(-tcp_adv_win_scale),
        if it is <= 0.
+       Possible values are [-31, 31], inclusive.
        Default: 2
  
  tcp_allowed_congestion_control - STRING
@@@ -707,28 -708,10 +708,28 @@@ igmp_max_memberships - INTEGE
        Change the maximum number of multicast groups we can subscribe to.
        Default: 20
  
 -conf/interface/*  changes special settings per interface (where "interface" is
 -                the name of your network interface)
 -conf/all/*      is special, changes the settings for all interfaces
 +      Theoretical maximum value is bounded by having to send a membership
 +      report in a single datagram (i.e. the report can't span multiple
 +      datagrams, or risk confusing the switch and leaving groups you don't
 +      intend to).
  
 +      The number of supported groups 'M' is bounded by the number of group
 +      report entries you can fit into a single datagram of 65535 bytes.
 +
 +      M = 65536-sizeof (ip header)/(sizeof(Group record))
 +
 +      Group records are variable length, with a minimum of 12 bytes.
 +      So net.ipv4.igmp_max_memberships should not be set higher than:
 +
 +      (65536-24) / 12 = 5459
 +
 +      The value 5459 assumes no IP header options, so in practice
 +      this number may be lower.
 +
 +      conf/interface/*  changes special settings per interface (where
 +      "interface" is the name of your network interface)
 +
 +      conf/all/*        is special, changes the settings for all interfaces
  
  log_martians - BOOLEAN
        Log packets with impossible addresses to kernel log.
diff --combined drivers/net/Kconfig
@@@ -1533,7 -1533,7 +1533,7 @@@ config E10
  
          <http://support.intel.com/support/network/adapter/pro100/21397.htm>
  
 -          to identify the adapter.
 +        to identify the adapter.
  
          For the latest Intel PRO/100 network driver for Linux, see:
  
@@@ -1786,17 -1786,17 +1786,17 @@@ config KS884
        tristate "Micrel KSZ8841/42 with generic bus interface"
        depends on HAS_IOMEM && DMA_ENGINE
        help
 -       This platform driver is for KSZ8841(1-port) / KS8842(2-port)
 -       ethernet switch chip (managed, VLAN, QoS) from Micrel or
 -       Timberdale(FPGA).
 +        This platform driver is for KSZ8841(1-port) / KS8842(2-port)
 +        ethernet switch chip (managed, VLAN, QoS) from Micrel or
 +        Timberdale(FPGA).
  
  config KS8851
 -       tristate "Micrel KS8851 SPI"
 -       depends on SPI
 -       select MII
 +      tristate "Micrel KS8851 SPI"
 +      depends on SPI
 +      select MII
        select CRC32
 -       help
 -         SPI driver for Micrel KS8851 SPI attached network chip.
 +      help
 +        SPI driver for Micrel KS8851 SPI attached network chip.
  
  config KS8851_MLL
        tristate "Micrel KS8851 MLL"
@@@ -2133,25 -2133,25 +2133,25 @@@ config IP100
          will be called ipg.  This is recommended.
  
  config IGB
 -       tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
 -       depends on PCI
 -       ---help---
 -         This driver supports Intel(R) 82575/82576 gigabit ethernet family of
 -         adapters.  For more information on how to identify your adapter, go
 -         to the Adapter & Driver ID Guide at:
 +      tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
 +      depends on PCI
 +      ---help---
 +        This driver supports Intel(R) 82575/82576 gigabit ethernet family of
 +        adapters.  For more information on how to identify your adapter, go
 +        to the Adapter & Driver ID Guide at:
  
 -         <http://support.intel.com/support/network/adapter/pro100/21397.htm>
 +        <http://support.intel.com/support/network/adapter/pro100/21397.htm>
  
 -         For general information and support, go to the Intel support
 -         website at:
 +        For general information and support, go to the Intel support
 +        website at:
  
 -         <http://support.intel.com>
 +        <http://support.intel.com>
  
 -         More specific information on configuring the driver is in
 -         <file:Documentation/networking/e1000.txt>.
 +        More specific information on configuring the driver is in
 +        <file:Documentation/networking/e1000.txt>.
  
 -         To compile this driver as a module, choose M here. The module
 -         will be called igb.
 +        To compile this driver as a module, choose M here. The module
 +        will be called igb.
  
  config IGB_DCA
        bool "Direct Cache Access (DCA) Support"
          is used, with the intent of lessening the impact of cache misses.
  
  config IGBVF
 -       tristate "Intel(R) 82576 Virtual Function Ethernet support"
 -       depends on PCI
 -       ---help---
 -         This driver supports Intel(R) 82576 virtual functions.  For more
 -         information on how to identify your adapter, go to the Adapter &
 -         Driver ID Guide at:
 +      tristate "Intel(R) 82576 Virtual Function Ethernet support"
 +      depends on PCI
 +      ---help---
 +        This driver supports Intel(R) 82576 virtual functions.  For more
 +        information on how to identify your adapter, go to the Adapter &
 +        Driver ID Guide at:
  
 -         <http://support.intel.com/support/network/adapter/pro100/21397.htm>
 +        <http://support.intel.com/support/network/adapter/pro100/21397.htm>
  
 -         For general information and support, go to the Intel support
 -         website at:
 +        For general information and support, go to the Intel support
 +        website at:
  
 -         <http://support.intel.com>
 +        <http://support.intel.com>
  
 -         More specific information on configuring the driver is in
 -         <file:Documentation/networking/e1000.txt>.
 +        More specific information on configuring the driver is in
 +        <file:Documentation/networking/e1000.txt>.
  
 -         To compile this driver as a module, choose M here. The module
 -         will be called igbvf.
 +        To compile this driver as a module, choose M here. The module
 +        will be called igbvf.
  
  source "drivers/net/ixp2000/Kconfig"
  
@@@ -2300,14 -2300,14 +2300,14 @@@ config SKG
          will be called skge.  This is recommended.
  
  config SKGE_DEBUG
 -       bool "Debugging interface"
 -       depends on SKGE && DEBUG_FS
 -       help
 -       This option adds the ability to dump driver state for debugging.
 -       The file /sys/kernel/debug/skge/ethX displays the state of the internal
 -       transmit and receive rings.
 +      bool "Debugging interface"
 +      depends on SKGE && DEBUG_FS
 +      help
 +        This option adds the ability to dump driver state for debugging.
 +        The file /sys/kernel/debug/skge/ethX displays the state of the internal
 +        transmit and receive rings.
  
 -       If unsure, say N.
 +        If unsure, say N.
  
  config SKY2
        tristate "SysKonnect Yukon2 support"
          will be called sky2.  This is recommended.
  
  config SKY2_DEBUG
 -       bool "Debugging interface"
 -       depends on SKY2 && DEBUG_FS
 -       help
 -       This option adds the ability to dump driver state for debugging.
 -       The file /sys/kernel/debug/sky2/ethX displays the state of the internal
 -       transmit and receive rings.
 +      bool "Debugging interface"
 +      depends on SKY2 && DEBUG_FS
 +      help
 +        This option adds the ability to dump driver state for debugging.
 +        The file /sys/kernel/debug/sky2/ethX displays the state of the internal
 +        transmit and receive rings.
  
 -       If unsure, say N.
 +        If unsure, say N.
  
  config VIA_VELOCITY
        tristate "VIA Velocity support"
@@@ -2389,12 -2389,12 +2389,12 @@@ config SPIDER_NE
          Cell Processor-Based Blades from IBM.
  
  config TSI108_ETH
 -         tristate "Tundra TSI108 gigabit Ethernet support"
 -         depends on TSI108_BRIDGE
 -         help
 -           This driver supports Tundra TSI108 gigabit Ethernet ports.
 -           To compile this driver as a module, choose M here: the module
 -           will be called tsi108_eth.
 +      tristate "Tundra TSI108 gigabit Ethernet support"
 +      depends on TSI108_BRIDGE
 +      help
 +        This driver supports Tundra TSI108 gigabit Ethernet ports.
 +        To compile this driver as a module, choose M here: the module
 +        will be called tsi108_eth.
  
  config GELIC_NET
        tristate "PS3 Gigabit Ethernet driver"
@@@ -2543,10 -2543,10 +2543,10 @@@ config PCH_GB
        depends on PCI
        select MII
        ---help---
-         This is a gigabit ethernet driver for Topcliff PCH.
-         Topcliff PCH is the platform controller hub that is used in Intel's
+         This is a gigabit ethernet driver for EG20T PCH.
+         EG20T PCH is the platform controller hub that is used in Intel's
          general embedded platform.
-         Topcliff PCH has Gigabit Ethernet interface.
+         EG20T PCH has Gigabit Ethernet interface.
          Using this interface, it is able to access system devices connected
          to Gigabit Ethernet.
          This driver enables Gigabit Ethernet function.
@@@ -2573,32 -2573,32 +2573,32 @@@ config MDI
        tristate
  
  config CHELSIO_T1
 -        tristate "Chelsio 10Gb Ethernet support"
 -        depends on PCI
 +      tristate "Chelsio 10Gb Ethernet support"
 +      depends on PCI
        select CRC32
        select MDIO
 -        help
 -          This driver supports Chelsio gigabit and 10-gigabit
 -          Ethernet cards. More information about adapter features and
 +      help
 +        This driver supports Chelsio gigabit and 10-gigabit
 +        Ethernet cards. More information about adapter features and
          performance tuning is in <file:Documentation/networking/cxgb.txt>.
  
 -          For general information about Chelsio and our products, visit
 -          our website at <http://www.chelsio.com>.
 +        For general information about Chelsio and our products, visit
 +        our website at <http://www.chelsio.com>.
  
 -          For customer support, please visit our customer support page at
 -          <http://www.chelsio.com/support.html>.
 +        For customer support, please visit our customer support page at
 +        <http://www.chelsio.com/support.html>.
  
 -          Please send feedback to <linux-bugs@chelsio.com>.
 +        Please send feedback to <linux-bugs@chelsio.com>.
  
 -          To compile this driver as a module, choose M here: the module
 -          will be called cxgb.
 +        To compile this driver as a module, choose M here: the module
 +        will be called cxgb.
  
  config CHELSIO_T1_1G
 -        bool "Chelsio gigabit Ethernet support"
 -        depends on CHELSIO_T1
 -        help
 -          Enables support for Chelsio's gigabit Ethernet PCI cards.  If you
 -          are using only 10G cards say 'N' here.
 +      bool "Chelsio gigabit Ethernet support"
 +      depends on CHELSIO_T1
 +      help
 +        Enables support for Chelsio's gigabit Ethernet PCI cards.  If you
 +        are using only 10G cards say 'N' here.
  
  config CHELSIO_T3_DEPENDS
        tristate
@@@ -2728,26 -2728,26 +2728,26 @@@ config IXGBE_DC
          If unsure, say N.
  
  config IXGBEVF
 -       tristate "Intel(R) 82599 Virtual Function Ethernet support"
 -       depends on PCI_MSI
 -       ---help---
 -         This driver supports Intel(R) 82599 virtual functions.  For more
 -         information on how to identify your adapter, go to the Adapter &
 -         Driver ID Guide at:
 +      tristate "Intel(R) 82599 Virtual Function Ethernet support"
 +      depends on PCI_MSI
 +      ---help---
 +        This driver supports Intel(R) 82599 virtual functions.  For more
 +        information on how to identify your adapter, go to the Adapter &
 +        Driver ID Guide at:
  
 -         <http://support.intel.com/support/network/sb/CS-008441.htm>
 +        <http://support.intel.com/support/network/sb/CS-008441.htm>
  
 -         For general information and support, go to the Intel support
 -         website at:
 +        For general information and support, go to the Intel support
 +        website at:
  
 -         <http://support.intel.com>
 +        <http://support.intel.com>
  
 -         More specific information on configuring the driver is in
 -         <file:Documentation/networking/ixgbevf.txt>.
 +        More specific information on configuring the driver is in
 +        <file:Documentation/networking/ixgbevf.txt>.
  
 -         To compile this driver as a module, choose M here. The module
 -         will be called ixgbevf.  MSI-X interrupt support is required
 -         for this driver to work correctly.
 +        To compile this driver as a module, choose M here. The module
 +        will be called ixgbevf.  MSI-X interrupt support is required
 +        for this driver to work correctly.
  
  config IXGB
        tristate "Intel(R) PRO/10GbE support"
          will be called ixgb.
  
  config S2IO
 -      tristate "S2IO 10Gbe XFrame NIC"
 +      tristate "Exar Xframe 10Gb Ethernet Adapter"
        depends on PCI
        ---help---
 -        This driver supports the 10Gbe XFrame NIC of S2IO. 
 +        This driver supports Exar Corp's Xframe Series 10Gb Ethernet Adapters.
 +
          More specific information on configuring the driver is in 
          <file:Documentation/networking/s2io.txt>.
  
 +        To compile this driver as a module, choose M here. The module
 +        will be called s2io.
 +
  config VXGE
 -      tristate "Neterion X3100 Series 10GbE PCIe Server Adapter"
 +      tristate "Exar X3100 Series 10GbE PCIe Server Adapter"
        depends on PCI && INET
        ---help---
 -        This driver supports Neterion Inc's X3100 Series 10 GbE PCIe
 +        This driver supports Exar Corp's X3100 Series 10 GbE PCIe
          I/O Virtualized Server Adapter.
 +
          More specific information on configuring the driver is in
          <file:Documentation/networking/vxge.txt>.
  
 +        To compile this driver as a module, choose M here. The module
 +        will be called vxge.
 +
  config VXGE_DEBUG_TRACE_ALL
        bool "Enabling All Debug trace statments in driver"
        default n
        depends on VXGE
        ---help---
          Say Y here if you want to enabling all the debug trace statements in
 -        driver. By  default only few debug trace statements are enabled.
 +        the vxge driver. By default only few debug trace statements are
 +        enabled.
  
  config MYRI10GE
        tristate "Myricom Myri-10G Ethernet support"
@@@ -2915,18 -2906,18 +2915,18 @@@ config QLG
          will be called qlge.
  
  config BNA
 -        tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
 -        depends on PCI
 -        ---help---
 -          This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
 -          cards.
 -          To compile this driver as a module, choose M here: the module
 -          will be called bna.
 +      tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
 +      depends on PCI
 +      ---help---
 +        This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
 +        cards.
 +        To compile this driver as a module, choose M here: the module
 +        will be called bna.
  
 -          For general information and support, go to the Brocade support
 -          website at:
 +        For general information and support, go to the Brocade support
 +        website at:
  
 -          <http://support.brocade.com>
 +        <http://support.brocade.com>
  
  source "drivers/net/sfc/Kconfig"
  
@@@ -3236,18 -3227,18 +3236,18 @@@ config PPP_BSDCOM
          modules once you have said "make modules". If unsure, say N.
  
  config PPP_MPPE
 -       tristate "PPP MPPE compression (encryption) (EXPERIMENTAL)"
 -       depends on PPP && EXPERIMENTAL
 -       select CRYPTO
 -       select CRYPTO_SHA1
 -       select CRYPTO_ARC4
 -       select CRYPTO_ECB
 -       ---help---
 -         Support for the MPPE Encryption protocol, as employed by the
 -       Microsoft Point-to-Point Tunneling Protocol.
 -
 -       See http://pptpclient.sourceforge.net/ for information on
 -       configuring PPTP clients and servers to utilize this method.
 +      tristate "PPP MPPE compression (encryption) (EXPERIMENTAL)"
 +      depends on PPP && EXPERIMENTAL
 +      select CRYPTO
 +      select CRYPTO_SHA1
 +      select CRYPTO_ARC4
 +      select CRYPTO_ECB
 +      ---help---
 +        Support for the MPPE Encryption protocol, as employed by the
 +        Microsoft Point-to-Point Tunneling Protocol.
 +
 +        See http://pptpclient.sourceforge.net/ for information on
 +        configuring PPTP clients and servers to utilize this method.
  
  config PPPOE
        tristate "PPP over Ethernet (EXPERIMENTAL)"
@@@ -3406,14 -3397,14 +3406,14 @@@ config VIRTIO_NE
        depends on EXPERIMENTAL && VIRTIO
        ---help---
          This is the virtual network driver for virtio.  It can be used with
 -          lguest or QEMU based VMMs (like KVM or Xen).  Say Y or M.
 +        lguest or QEMU based VMMs (like KVM or Xen).  Say Y or M.
  
  config VMXNET3
 -       tristate "VMware VMXNET3 ethernet driver"
 -       depends on PCI && INET
 -       help
 -         This driver supports VMware's vmxnet3 virtual ethernet NIC.
 -         To compile this driver as a module, choose M here: the
 -         module will be called vmxnet3.
 +      tristate "VMware VMXNET3 ethernet driver"
 +      depends on PCI && INET
 +      help
 +        This driver supports VMware's vmxnet3 virtual ethernet NIC.
 +        To compile this driver as a module, choose M here: the
 +        module will be called vmxnet3.
  
  endif # NETDEVICES
@@@ -323,12 -323,7 +323,12 @@@ static int be_mbox_notify_wait(struct b
  
  static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
  {
 -      u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
 +      u32 sem;
 +
 +      if (lancer_chip(adapter))
 +              sem  = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
 +      else
 +              sem  = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
  
        *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
        if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
@@@ -685,36 -680,16 +685,36 @@@ int be_cmd_cq_create(struct be_adapter 
                OPCODE_COMMON_CQ_CREATE, sizeof(*req));
  
        req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
 +      if (lancer_chip(adapter)) {
 +              req->hdr.version = 1;
 +              req->page_size = 1; /* 1 for 4K */
 +              AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt,
 +                                                              coalesce_wm);
 +              AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
 +                                                              no_delay);
 +              AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
 +                                              __ilog2_u32(cq->len/256));
 +              AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
 +              AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
 +                                                              ctxt, 1);
 +              AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
 +                                                              ctxt, eq->id);
 +              AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
 +      } else {
 +              AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
 +                                                              coalesce_wm);
 +              AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
 +                                                              ctxt, no_delay);
 +              AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
 +                                              __ilog2_u32(cq->len/256));
 +              AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
 +              AMAP_SET_BITS(struct amap_cq_context_be, solevent,
 +                                                              ctxt, sol_evts);
 +              AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
 +              AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
 +              AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
 +      }
  
 -      AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
 -      AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
 -      AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
 -                      __ilog2_u32(cq->len/256));
 -      AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
 -      AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
 -      AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
 -      AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
 -      AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
        be_dws_cpu_to_le(ctxt, sizeof(req->context));
  
        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
@@@ -762,27 -737,13 +762,27 @@@ int be_cmd_mccq_create(struct be_adapte
                        OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
  
        req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
 +      if (lancer_chip(adapter)) {
 +              req->hdr.version = 1;
 +              req->cq_id = cpu_to_le16(cq->id);
 +
 +              AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
 +                                              be_encoded_q_len(mccq->len));
 +              AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
 +              AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
 +                                                              ctxt, cq->id);
 +              AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
 +                                                               ctxt, 1);
 +
 +      } else {
 +              AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
 +              AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
 +                                              be_encoded_q_len(mccq->len));
 +              AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
 +      }
  
 -      AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
 -      AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
 -              be_encoded_q_len(mccq->len));
 -      AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
        /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
 -      req->async_event_bitmap[0] |= 0x00000022;
 +      req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
        be_dws_cpu_to_le(ctxt, sizeof(req->context));
  
        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
@@@ -1274,7 -1235,7 +1274,7 @@@ int be_cmd_multicast_set(struct be_adap
  
                i = 0;
                netdev_for_each_mc_addr(ha, netdev)
-                       memcpy(req->mac[i].byte, ha->addr, ETH_ALEN);
+                       memcpy(req->mac[i++].byte, ha->addr, ETH_ALEN);
        } else {
                req->promiscuous = 1;
        }
@@@ -873,11 -873,17 +873,11 @@@ static void bond_mc_del(struct bonding 
  static void __bond_resend_igmp_join_requests(struct net_device *dev)
  {
        struct in_device *in_dev;
 -      struct ip_mc_list *im;
  
        rcu_read_lock();
        in_dev = __in_dev_get_rcu(dev);
 -      if (in_dev) {
 -              read_lock(&in_dev->mc_list_lock);
 -              for (im = in_dev->mc_list; im; im = im->next)
 -                      ip_mc_rejoin_group(im);
 -              read_unlock(&in_dev->mc_list_lock);
 -      }
 -
 +      if (in_dev)
 +              ip_mc_rejoin_groups(in_dev);
        rcu_read_unlock();
  }
  
@@@ -1570,7 -1576,7 +1570,7 @@@ int bond_enslave(struct net_device *bon
  
        /* If this is the first slave, then we need to set the master's hardware
         * address to be the same as the slave's. */
-       if (bond->slave_cnt == 0)
+       if (is_zero_ether_addr(bond->dev->dev_addr))
                memcpy(bond->dev->dev_addr, slave_dev->dev_addr,
                       slave_dev->addr_len);
  
@@@ -3205,7 -3211,7 +3205,7 @@@ out
  #ifdef CONFIG_PROC_FS
  
  static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
 -      __acquires(&dev_base_lock)
 +      __acquires(RCU)
        __acquires(&bond->lock)
  {
        struct bonding *bond = seq->private;
        int i;
  
        /* make sure the bond won't be taken away */
 -      read_lock(&dev_base_lock);
 +      rcu_read_lock();
        read_lock(&bond->lock);
  
        if (*pos == 0)
@@@ -3244,12 -3250,12 +3244,12 @@@ static void *bond_info_seq_next(struct 
  
  static void bond_info_seq_stop(struct seq_file *seq, void *v)
        __releases(&bond->lock)
 -      __releases(&dev_base_lock)
 +      __releases(RCU)
  {
        struct bonding *bond = seq->private;
  
        read_unlock(&bond->lock);
 -      read_unlock(&dev_base_lock);
 +      rcu_read_unlock();
  }
  
  static void bond_info_show_master(struct seq_file *seq)
@@@ -280,7 -280,9 +280,7 @@@ static void name_msix_vecs(struct adapt
                const struct port_info *pi = netdev_priv(dev);
                int qs, msi;
  
 -              for (qs = 0, msi = MSIX_NIQFLINT;
 -                   qs < pi->nqsets;
 -                   qs++, msi++) {
 +              for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) {
                        snprintf(adapter->msix_info[msi].desc, namelen,
                                 "%s-%d", dev->name, qs);
                        adapter->msix_info[msi].desc[namelen] = 0;
@@@ -307,7 -309,7 +307,7 @@@ static int request_msix_queue_irqs(stru
        /*
         * Ethernet queues.
         */
 -      msi = MSIX_NIQFLINT;
 +      msi = MSIX_IQFLINT;
        for_each_ethrxq(s, rxq) {
                err = request_irq(adapter->msix_info[msi].vec,
                                  t4vf_sge_intr_msix, 0,
@@@ -335,7 -337,7 +335,7 @@@ static void free_msix_queue_irqs(struc
        int rxq, msi;
  
        free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
 -      msi = MSIX_NIQFLINT;
 +      msi = MSIX_IQFLINT;
        for_each_ethrxq(s, rxq)
                free_irq(adapter->msix_info[msi++].vec,
                         &s->ethrxq[rxq].rspq);
@@@ -525,7 -527,7 +525,7 @@@ static int setup_sge_queues(struct adap
         * brought up at which point lots of things get nailed down
         * permanently ...
         */
 -      msix = MSIX_NIQFLINT;
 +      msix = MSIX_IQFLINT;
        for_each_port(adapter, pidx) {
                struct net_device *dev = adapter->port[pidx];
                struct port_info *pi = netdev_priv(dev);
@@@ -814,40 -816,48 +814,48 @@@ static struct net_device_stats *cxgb4vf
  }
  
  /*
-  * Collect up to maxaddrs worth of a netdevice's unicast addresses into an
-  * array of addrss pointers and return the number collected.
+  * Collect up to maxaddrs worth of a netdevice's unicast addresses, starting
+  * at a specified offset within the list, into an array of addrss pointers and
+  * return the number collected.
   */
- static inline int collect_netdev_uc_list_addrs(const struct net_device *dev,
-                                              const u8 **addr,
-                                              unsigned int maxaddrs)
+ static inline unsigned int collect_netdev_uc_list_addrs(const struct net_device *dev,
+                                                       const u8 **addr,
+                                                       unsigned int offset,
+                                                       unsigned int maxaddrs)
  {
+       unsigned int index = 0;
        unsigned int naddr = 0;
        const struct netdev_hw_addr *ha;
  
-       for_each_dev_addr(dev, ha) {
-               addr[naddr++] = ha->addr;
-               if (naddr >= maxaddrs)
-                       break;
-       }
+       for_each_dev_addr(dev, ha)
+               if (index++ >= offset) {
+                       addr[naddr++] = ha->addr;
+                       if (naddr >= maxaddrs)
+                               break;
+               }
        return naddr;
  }
  
  /*
-  * Collect up to maxaddrs worth of a netdevice's multicast addresses into an
-  * array of addrss pointers and return the number collected.
+  * Collect up to maxaddrs worth of a netdevice's multicast addresses, starting
+  * at a specified offset within the list, into an array of addrss pointers and
+  * return the number collected.
   */
- static inline int collect_netdev_mc_list_addrs(const struct net_device *dev,
-                                              const u8 **addr,
-                                              unsigned int maxaddrs)
+ static inline unsigned int collect_netdev_mc_list_addrs(const struct net_device *dev,
+                                                       const u8 **addr,
+                                                       unsigned int offset,
+                                                       unsigned int maxaddrs)
  {
+       unsigned int index = 0;
        unsigned int naddr = 0;
        const struct netdev_hw_addr *ha;
  
-       netdev_for_each_mc_addr(ha, dev) {
-               addr[naddr++] = ha->addr;
-               if (naddr >= maxaddrs)
-                       break;
-       }
+       netdev_for_each_mc_addr(ha, dev)
+               if (index++ >= offset) {
+                       addr[naddr++] = ha->addr;
+                       if (naddr >= maxaddrs)
+                               break;
+               }
        return naddr;
  }
  
@@@ -860,16 -870,20 +868,20 @@@ static int set_addr_filters(const struc
        u64 mhash = 0;
        u64 uhash = 0;
        bool free = true;
-       u16 filt_idx[7];
+       unsigned int offset, naddr;
        const u8 *addr[7];
-       int ret, naddr = 0;
+       int ret;
        const struct port_info *pi = netdev_priv(dev);
  
        /* first do the secondary unicast addresses */
-       naddr = collect_netdev_uc_list_addrs(dev, addr, ARRAY_SIZE(addr));
-       if (naddr > 0) {
+       for (offset = 0; ; offset += naddr) {
+               naddr = collect_netdev_uc_list_addrs(dev, addr, offset,
+                                                    ARRAY_SIZE(addr));
+               if (naddr == 0)
+                       break;
                ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
-                                         naddr, addr, filt_idx, &uhash, sleep);
+                                         naddr, addr, NULL, &uhash, sleep);
                if (ret < 0)
                        return ret;
  
        }
  
        /* next set up the multicast addresses */
-       naddr = collect_netdev_mc_list_addrs(dev, addr, ARRAY_SIZE(addr));
-       if (naddr > 0) {
+       for (offset = 0; ; offset += naddr) {
+               naddr = collect_netdev_mc_list_addrs(dev, addr, offset,
+                                                    ARRAY_SIZE(addr));
+               if (naddr == 0)
+                       break;
                ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
-                                         naddr, addr, filt_idx, &mhash, sleep);
+                                         naddr, addr, NULL, &mhash, sleep);
                if (ret < 0)
                        return ret;
+               free = false;
        }
  
        return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0,
@@@ -1346,8 -1365,6 +1363,8 @@@ struct queue_port_stats 
        u64 rx_csum;
        u64 vlan_ex;
        u64 vlan_ins;
 +      u64 lro_pkts;
 +      u64 lro_merged;
  };
  
  /*
@@@ -1385,8 -1402,6 +1402,8 @@@ static const char stats_strings[][ETH_G
        "RxCsumGood        ",
        "VLANextractions   ",
        "VLANinsertions    ",
 +      "GROPackets        ",
 +      "GROMerged         ",
  };
  
  /*
@@@ -1436,8 -1451,6 +1453,8 @@@ static void collect_sge_port_stats(cons
                stats->rx_csum += rxq->stats.rx_cso;
                stats->vlan_ex += rxq->stats.vlan_ex;
                stats->vlan_ins += txq->vlan_ins;
 +              stats->lro_pkts += rxq->stats.lro_pkts;
 +              stats->lro_merged += rxq->stats.lro_merged;
        }
  }
  
@@@ -1533,20 -1546,15 +1550,20 @@@ static void cxgb4vf_get_wol(struct net_
        memset(&wol->sopass, 0, sizeof(wol->sopass));
  }
  
 +/*
 + * TCP Segmentation Offload flags which we support.
 + */
 +#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
 +
  /*
   * Set TCP Segmentation Offloading feature capabilities.
   */
  static int cxgb4vf_set_tso(struct net_device *dev, u32 tso)
  {
        if (tso)
 -              dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
 +              dev->features |= TSO_FLAGS;
        else
 -              dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
 +              dev->features &= ~TSO_FLAGS;
        return 0;
  }
  
@@@ -2037,7 -2045,7 +2054,7 @@@ static int __devinit setup_debugfs(stru
   * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above.  We leave
   * it to our caller to tear down the directory (debugfs_root).
   */
 -static void __devexit cleanup_debugfs(struct adapter *adapter)
 +static void cleanup_debugfs(struct adapter *adapter)
  {
        BUG_ON(adapter->debugfs_root == NULL);
  
   * adapter parameters we're going to be using and initialize basic adapter
   * hardware support.
   */
 -static int adap_init0(struct adapter *adapter)
 +static int __devinit adap_init0(struct adapter *adapter)
  {
        struct vf_resources *vfres = &adapter->params.vfres;
        struct sge_params *sge_params = &adapter->params.sge;
@@@ -2479,6 -2487,7 +2496,6 @@@ static int __devinit cxgb4vf_pci_probe(
                version_printed = 1;
        }
  
 -
        /*
         * Initialize generic PCI device state.
         */
                netif_carrier_off(netdev);
                netdev->irq = pdev->irq;
  
 -              netdev->features = (NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
 +              netdev->features = (NETIF_F_SG | TSO_FLAGS |
                                    NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                                    NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
                                    NETIF_F_GRO);
@@@ -1014,48 -1014,72 +1014,72 @@@ int t4vf_alloc_mac_filt(struct adapter 
                        unsigned int naddr, const u8 **addr, u16 *idx,
                        u64 *hash, bool sleep_ok)
  {
-       int i, ret;
+       int offset, ret = 0;
+       unsigned nfilters = 0;
+       unsigned int rem = naddr;
        struct fw_vi_mac_cmd cmd, rpl;
-       struct fw_vi_mac_exact *p;
-       size_t len16;
  
-       if (naddr > ARRAY_SIZE(cmd.u.exact))
+       if (naddr > FW_CLS_TCAM_NUM_ENTRIES)
                return -EINVAL;
-       len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
-                                     u.exact[naddr]), 16);
  
-       memset(&cmd, 0, sizeof(cmd));
-       cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
-                                    FW_CMD_REQUEST |
-                                    FW_CMD_WRITE |
-                                    (free ? FW_CMD_EXEC : 0) |
-                                    FW_VI_MAC_CMD_VIID(viid));
-       cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) |
-                                           FW_CMD_LEN16(len16));
-       for (i = 0, p = cmd.u.exact; i < naddr; i++, p++) {
-               p->valid_to_idx =
-                       cpu_to_be16(FW_VI_MAC_CMD_VALID |
-                                   FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
-               memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
-       }
+       for (offset = 0; offset < naddr; /**/) {
+               unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact)
+                                        ? rem
+                                        : ARRAY_SIZE(cmd.u.exact));
+               size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
+                                                    u.exact[fw_naddr]), 16);
+               struct fw_vi_mac_exact *p;
+               int i;
+               memset(&cmd, 0, sizeof(cmd));
+               cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
+                                            FW_CMD_REQUEST |
+                                            FW_CMD_WRITE |
+                                            (free ? FW_CMD_EXEC : 0) |
+                                            FW_VI_MAC_CMD_VIID(viid));
+               cmd.freemacs_to_len16 =
+                       cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) |
+                                   FW_CMD_LEN16(len16));
+               for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) {
+                       p->valid_to_idx = cpu_to_be16(
+                               FW_VI_MAC_CMD_VALID |
+                               FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
+                       memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
+               }
  
-       ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl, sleep_ok);
-       if (ret)
-               return ret;
-       for (i = 0, p = rpl.u.exact; i < naddr; i++, p++) {
-               u16 index = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx));
-               if (idx)
-                       idx[i] = (index >= FW_CLS_TCAM_NUM_ENTRIES
-                                 ? 0xffff
-                                 : index);
-               if (index < FW_CLS_TCAM_NUM_ENTRIES)
-                       ret++;
-               else if (hash)
-                       *hash |= (1 << hash_mac_addr(addr[i]));
+               ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl,
+                                       sleep_ok);
+               if (ret && ret != -ENOMEM)
+                       break;
+               for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) {
+                       u16 index = FW_VI_MAC_CMD_IDX_GET(
+                               be16_to_cpu(p->valid_to_idx));
+                       if (idx)
+                               idx[offset+i] =
+                                       (index >= FW_CLS_TCAM_NUM_ENTRIES
+                                        ? 0xffff
+                                        : index);
+                       if (index < FW_CLS_TCAM_NUM_ENTRIES)
+                               nfilters++;
+                       else if (hash)
+                               *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
+               }
+               free = false;
+               offset += fw_naddr;
+               rem -= fw_naddr;
        }
+       /*
+        * If there were no errors or we merely ran out of room in our MAC
+        * address arena, return the number of filters actually written.
+        */
+       if (ret == 0 || ret == -ENOMEM)
+               ret = nfilters;
        return ret;
  }
  
@@@ -1276,7 -1300,7 +1300,7 @@@ int t4vf_eth_eq_free(struct adapter *ad
   */
  int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
  {
 -      struct fw_cmd_hdr *cmd_hdr = (struct fw_cmd_hdr *)rpl;
 +      const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl;
        u8 opcode = FW_CMD_OP_GET(be32_to_cpu(cmd_hdr->hi));
  
        switch (opcode) {
                /*
                 * Link/module state change message.
                 */
 -              const struct fw_port_cmd *port_cmd = (void *)rpl;
 +              const struct fw_port_cmd *port_cmd =
 +                      (const struct fw_port_cmd *)rpl;
                u32 word;
                int action, port_id, link_ok, speed, fc, pidx;
  
@@@ -31,7 -31,7 +31,7 @@@
  
  char e1000_driver_name[] = "e1000";
  static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
- #define DRV_VERSION "7.3.21-k6-NAPI"
+ #define DRV_VERSION "7.3.21-k8-NAPI"
  const char e1000_driver_version[] = DRV_VERSION;
  static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
  
@@@ -485,9 -485,6 +485,6 @@@ void e1000_down(struct e1000_adapter *a
        struct net_device *netdev = adapter->netdev;
        u32 rctl, tctl;
  
-       /* signal that we're down so the interrupt handler does not
-        * reschedule our watchdog timer */
-       set_bit(__E1000_DOWN, &adapter->flags);
  
        /* disable receives in the hardware */
        rctl = er32(RCTL);
  
        e1000_irq_disable(adapter);
  
+       /*
+        * Setting DOWN must be after irq_disable to prevent
+        * a screaming interrupt.  Setting DOWN also prevents
+        * timers and tasks from rescheduling.
+        */
+       set_bit(__E1000_DOWN, &adapter->flags);
        del_timer_sync(&adapter->tx_fifo_stall_timer);
        del_timer_sync(&adapter->watchdog_timer);
        del_timer_sync(&adapter->phy_info_timer);
@@@ -1425,12 -1429,13 +1429,12 @@@ static int e1000_setup_tx_resources(str
        int size;
  
        size = sizeof(struct e1000_buffer) * txdr->count;
 -      txdr->buffer_info = vmalloc(size);
 +      txdr->buffer_info = vzalloc(size);
        if (!txdr->buffer_info) {
                e_err(probe, "Unable to allocate memory for the Tx descriptor "
                      "ring\n");
                return -ENOMEM;
        }
 -      memset(txdr->buffer_info, 0, size);
  
        /* round up to nearest 4K */
  
@@@ -1620,12 -1625,13 +1624,12 @@@ static int e1000_setup_rx_resources(str
        int size, desc_len;
  
        size = sizeof(struct e1000_buffer) * rxdr->count;
 -      rxdr->buffer_info = vmalloc(size);
 +      rxdr->buffer_info = vzalloc(size);
        if (!rxdr->buffer_info) {
                e_err(probe, "Unable to allocate memory for the Rx descriptor "
                      "ring\n");
                return -ENOMEM;
        }
 -      memset(rxdr->buffer_info, 0, size);
  
        desc_len = sizeof(struct e1000_rx_desc);
  
@@@ -400,6 -400,7 +400,7 @@@ static void ehea_refill_rq1(struct ehea
                        skb_arr_rq1[index] = netdev_alloc_skb(dev,
                                                              EHEA_L_PKT_SIZE);
                        if (!skb_arr_rq1[index]) {
+                               ehea_info("Unable to allocate enough skb in the array\n");
                                pr->rq1_skba.os_skbs = fill_wqes - i;
                                break;
                        }
@@@ -422,13 -423,20 +423,20 @@@ static void ehea_init_fill_rq1(struct e
        struct net_device *dev = pr->port->netdev;
        int i;
  
-       for (i = 0; i < pr->rq1_skba.len; i++) {
+       if (nr_rq1a > pr->rq1_skba.len) {
+               ehea_error("NR_RQ1A bigger than skb array len\n");
+               return;
+       }
+       for (i = 0; i < nr_rq1a; i++) {
                skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
-               if (!skb_arr_rq1[i])
+               if (!skb_arr_rq1[i]) {
+                       ehea_info("No enough memory to allocate skb array\n");
                        break;
+               }
        }
        /* Ring doorbell */
-       ehea_update_rq1a(pr->qp, nr_rq1a);
+       ehea_update_rq1a(pr->qp, i);
  }
  
  static int ehea_refill_rq_def(struct ehea_port_res *pr,
@@@ -675,7 -683,7 +683,7 @@@ static void ehea_proc_skb(struct ehea_p
        int vlan_extracted = ((cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) &&
                              pr->port->vgrp);
  
-       if (use_lro) {
+       if (skb->dev->features & NETIF_F_LRO) {
                if (vlan_extracted)
                        lro_vlan_hwaccel_receive_skb(&pr->lro_mgr, skb,
                                                     pr->port->vgrp,
@@@ -735,8 -743,10 +743,10 @@@ static int ehea_proc_rwqes(struct net_d
  
                                        skb = netdev_alloc_skb(dev,
                                                               EHEA_L_PKT_SIZE);
-                                       if (!skb)
+                                       if (!skb) {
+                                               ehea_info("Not enough memory to allocate skb\n");
                                                break;
+                                       }
                                }
                                skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
                                                 cqe->num_bytes_transfered - 4);
                }
                cqe = ehea_poll_rq1(qp, &wqe_index);
        }
-       if (use_lro)
+       if (dev->features & NETIF_F_LRO)
                lro_flush_all(&pr->lro_mgr);
  
        pr->rx_packets += processed;
@@@ -1496,10 -1506,12 +1506,10 @@@ static int ehea_init_q_skba(struct ehea
  {
        int arr_size = sizeof(void *) * max_q_entries;
  
 -      q_skba->arr = vmalloc(arr_size);
 +      q_skba->arr = vzalloc(arr_size);
        if (!q_skba->arr)
                return -ENOMEM;
  
 -      memset(q_skba->arr, 0, arr_size);
 -
        q_skba->len = max_q_entries;
        q_skba->index = 0;
        q_skba->os_skbs = 0;
@@@ -3266,6 -3278,9 +3276,9 @@@ struct ehea_port *ehea_setup_single_por
                      | NETIF_F_LLTX;
        dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
  
+       if (use_lro)
+               dev->features |= NETIF_F_LRO;
        INIT_WORK(&port->reset_task, ehea_reset_port);
  
        ret = register_netdev(dev);
@@@ -52,14 -52,13 +52,14 @@@ char ixgbe_driver_name[] = "ixgbe"
  static const char ixgbe_driver_string[] =
                              "Intel(R) 10 Gigabit PCI Express Network Driver";
  
 -#define DRV_VERSION "2.0.84-k2"
 +#define DRV_VERSION "3.0.12-k2"
  const char ixgbe_driver_version[] = DRV_VERSION;
  static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
  
  static const struct ixgbe_info *ixgbe_info_tbl[] = {
        [board_82598] = &ixgbe_82598_info,
        [board_82599] = &ixgbe_82599_info,
 +      [board_X540] = &ixgbe_X540_info,
  };
  
  /* ixgbe_pci_tbl - PCI Device ID Table
@@@ -109,16 -108,10 +109,16 @@@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pc
         board_82599 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
         board_82599 },
 +      {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE),
 +       board_82599 },
 +      {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE),
 +       board_82599 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM),
         board_82599 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
         board_82599 },
 +      {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T),
 +       board_82599 },
  
        /* required last entry */
        {0, }
@@@ -567,7 -560,6 +567,7 @@@ static void ixgbe_set_ivar(struct ixgbe
                IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
                break;
        case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
                if (direction == -1) {
                        /* other causes */
                        msix_vector |= IXGBE_IVAR_ALLOC_VAL;
@@@ -597,34 -589,29 +597,34 @@@ static inline void ixgbe_irq_rearm_queu
  {
        u32 mask;
  
 -      if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
 +      switch (adapter->hw.mac.type) {
 +      case ixgbe_mac_82598EB:
                mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
 -      } else {
 +              break;
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
                mask = (qmask & 0xFFFFFFFF);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
                mask = (qmask >> 32);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
 +              break;
 +      default:
 +              break;
        }
  }
  
 -void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
 -                                    struct ixgbe_tx_buffer
 -                                    *tx_buffer_info)
 +void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
 +                                    struct ixgbe_tx_buffer *tx_buffer_info)
  {
        if (tx_buffer_info->dma) {
                if (tx_buffer_info->mapped_as_page)
 -                      dma_unmap_page(&adapter->pdev->dev,
 +                      dma_unmap_page(tx_ring->dev,
                                       tx_buffer_info->dma,
                                       tx_buffer_info->length,
                                       DMA_TO_DEVICE);
                else
 -                      dma_unmap_single(&adapter->pdev->dev,
 +                      dma_unmap_single(tx_ring->dev,
                                         tx_buffer_info->dma,
                                         tx_buffer_info->length,
                                         DMA_TO_DEVICE);
  }
  
  /**
 - * ixgbe_tx_xon_state - check the tx ring xon state
 - * @adapter: the ixgbe adapter
 - * @tx_ring: the corresponding tx_ring
 + * ixgbe_dcb_txq_to_tc - convert a reg index to a traffic class
 + * @adapter: driver private struct
 + * @index: reg idx of queue to query (0-127)
   *
 - * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the
 - * corresponding TC of this tx_ring when checking TFCS.
 + * Helper function to determine the traffic index for a paticular
 + * register index.
   *
 - * Returns : true if in xon state (currently not paused)
 + * Returns : a tc index for use in range 0-7, or 0-3
   */
 -static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
 -                                    struct ixgbe_ring *tx_ring)
 +u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx)
  {
 -      u32 txoff = IXGBE_TFCS_TXOFF;
 +      int tc = -1;
 +      int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
  
 -#ifdef CONFIG_IXGBE_DCB
 -      if (adapter->dcb_cfg.pfc_mode_enable) {
 -              int tc;
 -              int reg_idx = tx_ring->reg_idx;
 -              int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
 +      /* if DCB is not enabled the queues have no TC */
 +      if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
 +              return tc;
 +
 +      /* check valid range */
 +      if (reg_idx >= adapter->hw.mac.max_tx_queues)
 +              return tc;
 +
 +      switch (adapter->hw.mac.type) {
 +      case ixgbe_mac_82598EB:
 +              tc = reg_idx >> 2;
 +              break;
 +      default:
 +              if (dcb_i != 4 && dcb_i != 8)
 +                      break;
 +
 +              /* if VMDq is enabled the lowest order bits determine TC */
 +              if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
 +                                    IXGBE_FLAG_VMDQ_ENABLED)) {
 +                      tc = reg_idx & (dcb_i - 1);
 +                      break;
 +              }
 +
 +              /*
 +               * Convert the reg_idx into the correct TC. This bitmask
 +               * targets the last full 32 ring traffic class and assigns
 +               * it a value of 1. From there the rest of the rings are
 +               * based on shifting the mask further up to include the
 +               * reg_idx / 16 and then reg_idx / 8. It assumes dcB_i
 +               * will only ever be 8 or 4 and that reg_idx will never
 +               * be greater then 128. The code without the power of 2
 +               * optimizations would be:
 +               * (((reg_idx % 32) + 32) * dcb_i) >> (9 - reg_idx / 32)
 +               */
 +              tc = ((reg_idx & 0X1F) + 0x20) * dcb_i;
 +              tc >>= 9 - (reg_idx >> 5);
 +      }
 +
 +      return tc;
 +}
 +
 +static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      struct ixgbe_hw_stats *hwstats = &adapter->stats;
 +      u32 data = 0;
 +      u32 xoff[8] = {0};
 +      int i;
  
 -              switch (adapter->hw.mac.type) {
 +      if ((hw->fc.current_mode == ixgbe_fc_full) ||
 +          (hw->fc.current_mode == ixgbe_fc_rx_pause)) {
 +              switch (hw->mac.type) {
                case ixgbe_mac_82598EB:
 -                      tc = reg_idx >> 2;
 -                      txoff = IXGBE_TFCS_TXOFF0;
 +                      data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
                        break;
 -              case ixgbe_mac_82599EB:
 -                      tc = 0;
 -                      txoff = IXGBE_TFCS_TXOFF;
 -                      if (dcb_i == 8) {
 -                              /* TC0, TC1 */
 -                              tc = reg_idx >> 5;
 -                              if (tc == 2) /* TC2, TC3 */
 -                                      tc += (reg_idx - 64) >> 4;
 -                              else if (tc == 3) /* TC4, TC5, TC6, TC7 */
 -                                      tc += 1 + ((reg_idx - 96) >> 3);
 -                      } else if (dcb_i == 4) {
 -                              /* TC0, TC1 */
 -                              tc = reg_idx >> 6;
 -                              if (tc == 1) {
 -                                      tc += (reg_idx - 64) >> 5;
 -                                      if (tc == 2) /* TC2, TC3 */
 -                                              tc += (reg_idx - 96) >> 4;
 -                              }
 -                      }
 +              default:
 +                      data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
 +              }
 +              hwstats->lxoffrxc += data;
 +
 +              /* refill credits (no tx hang) if we received xoff */
 +              if (!data)
 +                      return;
 +
 +              for (i = 0; i < adapter->num_tx_queues; i++)
 +                      clear_bit(__IXGBE_HANG_CHECK_ARMED,
 +                                &adapter->tx_ring[i]->state);
 +              return;
 +      } else if (!(adapter->dcb_cfg.pfc_mode_enable))
 +              return;
 +
 +      /* update stats for each tc, only valid with PFC enabled */
 +      for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
 +              switch (hw->mac.type) {
 +              case ixgbe_mac_82598EB:
 +                      xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
                        break;
                default:
 -                      tc = 0;
 +                      xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
                }
 -              txoff <<= tc;
 +              hwstats->pxoffrxc[i] += xoff[i];
 +      }
 +
 +      /* disarm tx queues that have received xoff frames */
 +      for (i = 0; i < adapter->num_tx_queues; i++) {
 +              struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
 +              u32 tc = ixgbe_dcb_txq_to_tc(adapter, tx_ring->reg_idx);
 +
 +              if (xoff[tc])
 +                      clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
        }
 -#endif
 -      return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff;
  }
  
 -static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
 -                                     struct ixgbe_ring *tx_ring,
 -                                     unsigned int eop)
 +static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
  {
 +      return ring->tx_stats.completed;
 +}
 +
 +static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
 +{
 +      struct ixgbe_adapter *adapter = netdev_priv(ring->netdev);
        struct ixgbe_hw *hw = &adapter->hw;
  
 -      /* Detect a transmit hang in hardware, this serializes the
 -       * check with the clearing of time_stamp and movement of eop */
 -      adapter->detect_tx_hung = false;
 -      if (tx_ring->tx_buffer_info[eop].time_stamp &&
 -          time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
 -          ixgbe_tx_xon_state(adapter, tx_ring)) {
 -              /* detected Tx unit hang */
 -              union ixgbe_adv_tx_desc *tx_desc;
 -              tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
 -              e_err(drv, "Detected Tx Unit Hang\n"
 -                    "  Tx Queue             <%d>\n"
 -                    "  TDH, TDT             <%x>, <%x>\n"
 -                    "  next_to_use          <%x>\n"
 -                    "  next_to_clean        <%x>\n"
 -                    "tx_buffer_info[next_to_clean]\n"
 -                    "  time_stamp           <%lx>\n"
 -                    "  jiffies              <%lx>\n",
 -                    tx_ring->queue_index,
 -                    IXGBE_READ_REG(hw, tx_ring->head),
 -                    IXGBE_READ_REG(hw, tx_ring->tail),
 -                    tx_ring->next_to_use, eop,
 -                    tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
 -              return true;
 +      u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
 +      u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
 +
 +      if (head != tail)
 +              return (head < tail) ?
 +                      tail - head : (tail + ring->count - head);
 +
 +      return 0;
 +}
 +
 +static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
 +{
 +      u32 tx_done = ixgbe_get_tx_completed(tx_ring);
 +      u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
 +      u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
 +      bool ret = false;
 +
 +      clear_check_for_tx_hang(tx_ring);
 +
 +      /*
 +       * Check for a hung queue, but be thorough. This verifies
 +       * that a transmit has been completed since the previous
 +       * check AND there is at least one packet pending. The
 +       * ARMED bit is set to indicate a potential hang. The
 +       * bit is cleared if a pause frame is received to remove
 +       * false hang detection due to PFC or 802.3x frames. By
 +       * requiring this to fail twice we avoid races with
 +       * pfc clearing the ARMED bit and conditions where we
 +       * run the check_tx_hang logic with a transmit completion
 +       * pending but without time to complete it yet.
 +       */
 +      if ((tx_done_old == tx_done) && tx_pending) {
 +              /* make sure it is true for two checks in a row */
 +              ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
 +                                     &tx_ring->state);
 +      } else {
 +              /* update completed stats and continue */
 +              tx_ring->tx_stats.tx_done_old = tx_done;
 +              /* reset the countdown */
 +              clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
        }
  
 -      return false;
 +      return ret;
  }
  
  #define IXGBE_MAX_TXD_PWR       14
@@@ -821,10 -734,11 +821,10 @@@ static bool ixgbe_clean_tx_irq(struct i
                               struct ixgbe_ring *tx_ring)
  {
        struct ixgbe_adapter *adapter = q_vector->adapter;
 -      struct net_device *netdev = adapter->netdev;
        union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
        struct ixgbe_tx_buffer *tx_buffer_info;
 -      unsigned int i, eop, count = 0;
        unsigned int total_bytes = 0, total_packets = 0;
 +      u16 i, eop, count = 0;
  
        i = tx_ring->next_to_clean;
        eop = tx_ring->tx_buffer_info[i].next_to_watch;
                bool cleaned = false;
                rmb(); /* read buffer_info after eop_desc */
                for ( ; !cleaned; count++) {
 -                      struct sk_buff *skb;
                        tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
                        tx_buffer_info = &tx_ring->tx_buffer_info[i];
 -                      cleaned = (i == eop);
 -                      skb = tx_buffer_info->skb;
 -
 -                      if (cleaned && skb) {
 -                              unsigned int segs, bytecount;
 -                              unsigned int hlen = skb_headlen(skb);
 -
 -                              /* gso_segs is currently only valid for tcp */
 -                              segs = skb_shinfo(skb)->gso_segs ?: 1;
 -#ifdef IXGBE_FCOE
 -                              /* adjust for FCoE Sequence Offload */
 -                              if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
 -                                  && skb_is_gso(skb)
 -                                  && vlan_get_protocol(skb) ==
 -                                  htons(ETH_P_FCOE)) {
 -                                      hlen = skb_transport_offset(skb) +
 -                                              sizeof(struct fc_frame_header) +
 -                                              sizeof(struct fcoe_crc_eof);
 -                                      segs = DIV_ROUND_UP(skb->len - hlen,
 -                                              skb_shinfo(skb)->gso_size);
 -                              }
 -#endif /* IXGBE_FCOE */
 -                              /* multiply data chunks by size of headers */
 -                              bytecount = ((segs - 1) * hlen) + skb->len;
 -                              total_packets += segs;
 -                              total_bytes += bytecount;
 -                      }
 -
 -                      ixgbe_unmap_and_free_tx_resource(adapter,
 -                                                       tx_buffer_info);
  
                        tx_desc->wb.status = 0;
 +                      cleaned = (i == eop);
  
                        i++;
                        if (i == tx_ring->count)
                                i = 0;
 +
 +                      if (cleaned && tx_buffer_info->skb) {
 +                              total_bytes += tx_buffer_info->bytecount;
 +                              total_packets += tx_buffer_info->gso_segs;
 +                      }
 +
 +                      ixgbe_unmap_and_free_tx_resource(tx_ring,
 +                                                       tx_buffer_info);
                }
  
 +              tx_ring->tx_stats.completed++;
                eop = tx_ring->tx_buffer_info[i].next_to_watch;
                eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
        }
  
        tx_ring->next_to_clean = i;
 +      tx_ring->total_bytes += total_bytes;
 +      tx_ring->total_packets += total_packets;
 +      u64_stats_update_begin(&tx_ring->syncp);
 +      tx_ring->stats.packets += total_packets;
 +      tx_ring->stats.bytes += total_bytes;
 +      u64_stats_update_end(&tx_ring->syncp);
 +
 +      if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
 +              /* schedule immediate reset if we believe we hung */
 +              struct ixgbe_hw *hw = &adapter->hw;
 +              tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
 +              e_err(drv, "Detected Tx Unit Hang\n"
 +                      "  Tx Queue             <%d>\n"
 +                      "  TDH, TDT             <%x>, <%x>\n"
 +                      "  next_to_use          <%x>\n"
 +                      "  next_to_clean        <%x>\n"
 +                      "tx_buffer_info[next_to_clean]\n"
 +                      "  time_stamp           <%lx>\n"
 +                      "  jiffies              <%lx>\n",
 +                      tx_ring->queue_index,
 +                      IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
 +                      IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
 +                      tx_ring->next_to_use, eop,
 +                      tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
 +
 +              netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
 +
 +              e_info(probe,
 +                     "tx hang %d detected on queue %d, resetting adapter\n",
 +                      adapter->tx_timeout_count + 1, tx_ring->queue_index);
 +
 +              /* schedule immediate reset if we believe we hung */
 +              ixgbe_tx_timeout(adapter->netdev);
 +
 +              /* the adapter is about to reset, no point in enabling stuff */
 +              return true;
 +      }
  
  #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
 -      if (unlikely(count && netif_carrier_ok(netdev) &&
 +      if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
                     (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
                /* Make sure that anybody stopping the queue after this
                 * sees the new next_to_clean.
                 */
                smp_mb();
 -              if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
 +              if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) &&
                    !test_bit(__IXGBE_DOWN, &adapter->state)) {
 -                      netif_wake_subqueue(netdev, tx_ring->queue_index);
 -                      ++tx_ring->restart_queue;
 -              }
 -      }
 -
 -      if (adapter->detect_tx_hung) {
 -              if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
 -                      /* schedule immediate reset if we believe we hung */
 -                      e_info(probe, "tx hang %d detected, resetting "
 -                             "adapter\n", adapter->tx_timeout_count + 1);
 -                      ixgbe_tx_timeout(adapter->netdev);
 +                      netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index);
 +                      ++tx_ring->tx_stats.restart_queue;
                }
        }
  
 -      /* re-arm the interrupt */
 -      if (count >= tx_ring->work_limit)
 -              ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
 -
 -      tx_ring->total_bytes += total_bytes;
 -      tx_ring->total_packets += total_packets;
 -      u64_stats_update_begin(&tx_ring->syncp);
 -      tx_ring->stats.packets += total_packets;
 -      tx_ring->stats.bytes += total_bytes;
 -      u64_stats_update_end(&tx_ring->syncp);
        return count < tx_ring->work_limit;
  }
  
  #ifdef CONFIG_IXGBE_DCA
  static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
 -                              struct ixgbe_ring *rx_ring)
 +                              struct ixgbe_ring *rx_ring,
 +                              int cpu)
  {
 +      struct ixgbe_hw *hw = &adapter->hw;
        u32 rxctrl;
 -      int cpu = get_cpu();
 -      int q = rx_ring->reg_idx;
 -
 -      if (rx_ring->cpu != cpu) {
 -              rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
 -              if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
 -                      rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
 -                      rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
 -              } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
 -                      rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
 -                      rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
 -                                 IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
 -              }
 -              rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
 -              rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
 -              rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
 -              rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
 -                          IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
 -              IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
 -              rx_ring->cpu = cpu;
 +      u8 reg_idx = rx_ring->reg_idx;
 +
 +      rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx));
 +      switch (hw->mac.type) {
 +      case ixgbe_mac_82598EB:
 +              rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
 +              rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
 +              break;
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
 +              rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
 +              rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
 +                         IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
 +              break;
 +      default:
 +              break;
        }
 -      put_cpu();
 +      rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
 +      rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
 +      rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
 +      rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
 +                  IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
 +      IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
  }
  
  static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
 -                              struct ixgbe_ring *tx_ring)
 +                              struct ixgbe_ring *tx_ring,
 +                              int cpu)
  {
 +      struct ixgbe_hw *hw = &adapter->hw;
        u32 txctrl;
 +      u8 reg_idx = tx_ring->reg_idx;
 +
 +      switch (hw->mac.type) {
 +      case ixgbe_mac_82598EB:
 +              txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx));
 +              txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
 +              txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
 +              txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
 +              txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
 +              IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl);
 +              break;
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
 +              txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx));
 +              txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
 +              txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
 +                         IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
 +              txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
 +              txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
 +              IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl);
 +              break;
 +      default:
 +              break;
 +      }
 +}
 +
 +static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
 +{
 +      struct ixgbe_adapter *adapter = q_vector->adapter;
        int cpu = get_cpu();
 -      int q = tx_ring->reg_idx;
 -      struct ixgbe_hw *hw = &adapter->hw;
 +      long r_idx;
 +      int i;
  
 -      if (tx_ring->cpu != cpu) {
 -              if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
 -                      txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(q));
 -                      txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
 -                      txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
 -                      txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
 -                      IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(q), txctrl);
 -              } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
 -                      txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q));
 -                      txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
 -                      txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
 -                                IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
 -                      txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
 -                      IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl);
 -              }
 -              tx_ring->cpu = cpu;
 +      if (q_vector->cpu == cpu)
 +              goto out_no_update;
 +
 +      r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
 +      for (i = 0; i < q_vector->txr_count; i++) {
 +              ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu);
 +              r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
 +                                    r_idx + 1);
        }
 +
 +      r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
 +      for (i = 0; i < q_vector->rxr_count; i++) {
 +              ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu);
 +              r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
 +                                    r_idx + 1);
 +      }
 +
 +      q_vector->cpu = cpu;
 +out_no_update:
        put_cpu();
  }
  
  static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
  {
 +      int num_q_vectors;
        int i;
  
        if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
        /* always use CB2 mode, difference is masked in the CB driver */
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
  
 -      for (i = 0; i < adapter->num_tx_queues; i++) {
 -              adapter->tx_ring[i]->cpu = -1;
 -              ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]);
 -      }
 -      for (i = 0; i < adapter->num_rx_queues; i++) {
 -              adapter->rx_ring[i]->cpu = -1;
 -              ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]);
 +      if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
 +              num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 +      else
 +              num_q_vectors = 1;
 +
 +      for (i = 0; i < num_q_vectors; i++) {
 +              adapter->q_vector[i]->cpu = -1;
 +              ixgbe_update_dca(adapter->q_vector[i]);
        }
  }
  
  static int __ixgbe_notify_dca(struct device *dev, void *data)
  {
 -      struct net_device *netdev = dev_get_drvdata(dev);
 -      struct ixgbe_adapter *adapter = netdev_priv(netdev);
 +      struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
        unsigned long event = *(unsigned long *)data;
  
 +      if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
 +              return 0;
 +
        switch (event) {
        case DCA_PROVIDER_ADD:
                /* if we're already enabled, don't do it again */
@@@ -1136,7 -1013,8 +1136,7 @@@ static inline void ixgbe_rx_checksum(st
        skb->ip_summed = CHECKSUM_UNNECESSARY;
  }
  
 -static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
 -                                       struct ixgbe_ring *rx_ring, u32 val)
 +static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
  {
        /*
         * Force memory writes to complete before letting h/w
         * such as IA-64).
         */
        wmb();
 -      IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val);
 +      writel(val, rx_ring->tail);
  }
  
  /**
   * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
 - * @adapter: address of board private structure
 + * @rx_ring: ring to place buffers on
 + * @cleaned_count: number of buffers to replace
   **/
 -void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
 -                          struct ixgbe_ring *rx_ring,
 -                          int cleaned_count)
 +void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
  {
 -      struct net_device *netdev = adapter->netdev;
 -      struct pci_dev *pdev = adapter->pdev;
        union ixgbe_adv_rx_desc *rx_desc;
        struct ixgbe_rx_buffer *bi;
 -      unsigned int i;
 -      unsigned int bufsz = rx_ring->rx_buf_len;
 +      struct sk_buff *skb;
 +      u16 i = rx_ring->next_to_use;
  
 -      i = rx_ring->next_to_use;
 -      bi = &rx_ring->rx_buffer_info[i];
 +      /* do nothing if no valid netdev defined */
 +      if (!rx_ring->netdev)
 +              return;
  
        while (cleaned_count--) {
                rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
 +              bi = &rx_ring->rx_buffer_info[i];
 +              skb = bi->skb;
  
 -              if (!bi->page_dma &&
 -                  (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) {
 -                      if (!bi->page) {
 -                              bi->page = netdev_alloc_page(netdev);
 -                              if (!bi->page) {
 -                                      adapter->alloc_rx_page_failed++;
 -                                      goto no_buffers;
 -                              }
 -                              bi->page_offset = 0;
 -                      } else {
 -                              /* use a half page if we're re-using */
 -                              bi->page_offset ^= (PAGE_SIZE / 2);
 -                      }
 -
 -                      bi->page_dma = dma_map_page(&pdev->dev, bi->page,
 -                                                  bi->page_offset,
 -                                                  (PAGE_SIZE / 2),
 -                                                  DMA_FROM_DEVICE);
 -              }
 -
 -              if (!bi->skb) {
 -                      struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev,
 -                                                                      bufsz);
 -                      bi->skb = skb;
 -
 +              if (!skb) {
 +                      skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
 +                                                      rx_ring->rx_buf_len);
                        if (!skb) {
 -                              adapter->alloc_rx_buff_failed++;
 +                              rx_ring->rx_stats.alloc_rx_buff_failed++;
                                goto no_buffers;
                        }
                        /* initialize queue mapping */
                        skb_record_rx_queue(skb, rx_ring->queue_index);
 +                      bi->skb = skb;
                }
  
                if (!bi->dma) {
 -                      bi->dma = dma_map_single(&pdev->dev,
 -                                               bi->skb->data,
 +                      bi->dma = dma_map_single(rx_ring->dev,
 +                                               skb->data,
                                                 rx_ring->rx_buf_len,
                                                 DMA_FROM_DEVICE);
 +                      if (dma_mapping_error(rx_ring->dev, bi->dma)) {
 +                              rx_ring->rx_stats.alloc_rx_buff_failed++;
 +                              bi->dma = 0;
 +                              goto no_buffers;
 +                      }
                }
 -              /* Refresh the desc even if buffer_addrs didn't change because
 -               * each write-back erases this info. */
 -              if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
 +
 +              if (ring_is_ps_enabled(rx_ring)) {
 +                      if (!bi->page) {
 +                              bi->page = netdev_alloc_page(rx_ring->netdev);
 +                              if (!bi->page) {
 +                                      rx_ring->rx_stats.alloc_rx_page_failed++;
 +                                      goto no_buffers;
 +                              }
 +                      }
 +
 +                      if (!bi->page_dma) {
 +                              /* use a half page if we're re-using */
 +                              bi->page_offset ^= PAGE_SIZE / 2;
 +                              bi->page_dma = dma_map_page(rx_ring->dev,
 +                                                          bi->page,
 +                                                          bi->page_offset,
 +                                                          PAGE_SIZE / 2,
 +                                                          DMA_FROM_DEVICE);
 +                              if (dma_mapping_error(rx_ring->dev,
 +                                                    bi->page_dma)) {
 +                                      rx_ring->rx_stats.alloc_rx_page_failed++;
 +                                      bi->page_dma = 0;
 +                                      goto no_buffers;
 +                              }
 +                      }
 +
 +                      /* Refresh the desc even if buffer_addrs didn't change
 +                       * because each write-back erases this info. */
                        rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
                        rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
                } else {
                i++;
                if (i == rx_ring->count)
                        i = 0;
 -              bi = &rx_ring->rx_buffer_info[i];
        }
  
  no_buffers:
        if (rx_ring->next_to_use != i) {
                rx_ring->next_to_use = i;
 -              if (i-- == 0)
 -                      i = (rx_ring->count - 1);
 -
 -              ixgbe_release_rx_desc(&adapter->hw, rx_ring, i);
 +              ixgbe_release_rx_desc(rx_ring, i);
        }
  }
  
 -static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
 +static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc)
  {
 -      return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
 -}
 -
 -static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
 -{
 -      return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
 -}
 -
 -static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
 -{
 -      return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
 -              IXGBE_RXDADV_RSCCNT_MASK) >>
 -              IXGBE_RXDADV_RSCCNT_SHIFT;
 +      /* HW will not DMA in data larger than the given buffer, even if it
 +       * parses the (NFS, of course) header to be larger.  In that case, it
 +       * fills the header buffer and spills the rest into the page.
 +       */
 +      u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info);
 +      u16 hlen = (hdr_info &  IXGBE_RXDADV_HDRBUFLEN_MASK) >>
 +                  IXGBE_RXDADV_HDRBUFLEN_SHIFT;
 +      if (hlen > IXGBE_RX_HDR_SIZE)
 +              hlen = IXGBE_RX_HDR_SIZE;
 +      return hlen;
  }
  
  /**
   * ixgbe_transform_rsc_queue - change rsc queue into a full packet
   * @skb: pointer to the last skb in the rsc queue
 - * @count: pointer to number of packets coalesced in this context
   *
   * This function changes a queue full of hw rsc buffers into a completed
   * packet.  It uses the ->prev pointers to find the first packet and then
   * turns it into the frag list owner.
   **/
 -static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
 -                                                      u64 *count)
 +static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
  {
        unsigned int frag_list_size = 0;
 +      unsigned int skb_cnt = 1;
  
        while (skb->prev) {
                struct sk_buff *prev = skb->prev;
                frag_list_size += skb->len;
                skb->prev = NULL;
                skb = prev;
 -              *count += 1;
 +              skb_cnt++;
        }
  
        skb_shinfo(skb)->frag_list = skb->next;
        skb->len += frag_list_size;
        skb->data_len += frag_list_size;
        skb->truesize += frag_list_size;
 +      IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt;
 +
        return skb;
  }
  
 -struct ixgbe_rsc_cb {
 -      dma_addr_t dma;
 -      bool delay_unmap;
 -};
 -
 -#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
 +static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc)
 +{
 +      return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
 +              IXGBE_RXDADV_RSCCNT_MASK);
 +}
  
 -static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
 +static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                               struct ixgbe_ring *rx_ring,
                               int *work_done, int work_to_do)
  {
        struct ixgbe_adapter *adapter = q_vector->adapter;
 -      struct pci_dev *pdev = adapter->pdev;
        union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
        struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
        struct sk_buff *skb;
 -      unsigned int i, rsc_count = 0;
 -      u32 len, staterr;
 -      u16 hdr_info;
 -      bool cleaned = false;
 -      int cleaned_count = 0;
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 +      const int current_node = numa_node_id();
  #ifdef IXGBE_FCOE
        int ddp_bytes = 0;
  #endif /* IXGBE_FCOE */
 +      u32 staterr;
 +      u16 i;
 +      u16 cleaned_count = 0;
 +      bool pkt_is_rsc = false;
  
        i = rx_ring->next_to_clean;
        rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
        staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
 -      rx_buffer_info = &rx_ring->rx_buffer_info[i];
  
        while (staterr & IXGBE_RXD_STAT_DD) {
                u32 upper_len = 0;
 -              if (*work_done >= work_to_do)
 -                      break;
 -              (*work_done)++;
  
                rmb(); /* read descriptor and rx_buffer_info after status DD */
 -              if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
 -                      hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
 -                      len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
 -                             IXGBE_RXDADV_HDRBUFLEN_SHIFT;
 -                      upper_len = le16_to_cpu(rx_desc->wb.upper.length);
 -                      if ((len > IXGBE_RX_HDR_SIZE) ||
 -                          (upper_len && !(hdr_info & IXGBE_RXDADV_SPH)))
 -                              len = IXGBE_RX_HDR_SIZE;
 -              } else {
 -                      len = le16_to_cpu(rx_desc->wb.upper.length);
 -              }
  
 -              cleaned = true;
 +              rx_buffer_info = &rx_ring->rx_buffer_info[i];
 +
                skb = rx_buffer_info->skb;
 -              prefetch(skb->data);
                rx_buffer_info->skb = NULL;
 +              prefetch(skb->data);
  
 +              if (ring_is_rsc_enabled(rx_ring))
 +                      pkt_is_rsc = ixgbe_get_rsc_state(rx_desc);
 +
 +              /* if this is a skb from previous receive DMA will be 0 */
                if (rx_buffer_info->dma) {
 -                      if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
 -                          (!(staterr & IXGBE_RXD_STAT_EOP)) &&
 -                               (!(skb->prev))) {
 +                      u16 hlen;
 +                      if (pkt_is_rsc &&
 +                          !(staterr & IXGBE_RXD_STAT_EOP) &&
 +                          !skb->prev) {
                                /*
                                 * When HWRSC is enabled, delay unmapping
                                 * of the first packet. It carries the
                                IXGBE_RSC_CB(skb)->delay_unmap = true;
                                IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
                        } else {
 -                              dma_unmap_single(&pdev->dev,
 +                              dma_unmap_single(rx_ring->dev,
                                                 rx_buffer_info->dma,
                                                 rx_ring->rx_buf_len,
                                                 DMA_FROM_DEVICE);
                        }
                        rx_buffer_info->dma = 0;
 -                      skb_put(skb, len);
 +
 +                      if (ring_is_ps_enabled(rx_ring)) {
 +                              hlen = ixgbe_get_hlen(rx_desc);
 +                              upper_len = le16_to_cpu(rx_desc->wb.upper.length);
 +                      } else {
 +                              hlen = le16_to_cpu(rx_desc->wb.upper.length);
 +                      }
 +
 +                      skb_put(skb, hlen);
 +              } else {
 +                      /* assume packet split since header is unmapped */
 +                      upper_len = le16_to_cpu(rx_desc->wb.upper.length);
                }
  
                if (upper_len) {
 -                      dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
 -                                     PAGE_SIZE / 2, DMA_FROM_DEVICE);
 +                      dma_unmap_page(rx_ring->dev,
 +                                     rx_buffer_info->page_dma,
 +                                     PAGE_SIZE / 2,
 +                                     DMA_FROM_DEVICE);
                        rx_buffer_info->page_dma = 0;
                        skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
                                           rx_buffer_info->page,
                                           rx_buffer_info->page_offset,
                                           upper_len);
  
 -                      if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
 -                          (page_count(rx_buffer_info->page) != 1))
 -                              rx_buffer_info->page = NULL;
 -                      else
 +                      if ((page_count(rx_buffer_info->page) == 1) &&
 +                          (page_to_nid(rx_buffer_info->page) == current_node))
                                get_page(rx_buffer_info->page);
 +                      else
 +                              rx_buffer_info->page = NULL;
  
                        skb->len += upper_len;
                        skb->data_len += upper_len;
                prefetch(next_rxd);
                cleaned_count++;
  
 -              if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
 -                      rsc_count = ixgbe_get_rsc_count(rx_desc);
 -
 -              if (rsc_count) {
 +              if (pkt_is_rsc) {
                        u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
                                     IXGBE_RXDADV_NEXTP_SHIFT;
                        next_buffer = &rx_ring->rx_buffer_info[nextp];
                        next_buffer = &rx_ring->rx_buffer_info[i];
                }
  
 -              if (staterr & IXGBE_RXD_STAT_EOP) {
 -                      if (skb->prev)
 -                              skb = ixgbe_transform_rsc_queue(skb,
 -                                                              &(rx_ring->rsc_count));
 -                      if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
 -                              if (IXGBE_RSC_CB(skb)->delay_unmap) {
 -                                      dma_unmap_single(&pdev->dev,
 -                                                       IXGBE_RSC_CB(skb)->dma,
 -                                                       rx_ring->rx_buf_len,
 -                                                       DMA_FROM_DEVICE);
 -                                      IXGBE_RSC_CB(skb)->dma = 0;
 -                                      IXGBE_RSC_CB(skb)->delay_unmap = false;
 -                              }
 -                              if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
 -                                      rx_ring->rsc_count +=
 -                                              skb_shinfo(skb)->nr_frags;
 -                              else
 -                                      rx_ring->rsc_count++;
 -                              rx_ring->rsc_flush++;
 -                      }
 -                      u64_stats_update_begin(&rx_ring->syncp);
 -                      rx_ring->stats.packets++;
 -                      rx_ring->stats.bytes += skb->len;
 -                      u64_stats_update_end(&rx_ring->syncp);
 -              } else {
 -                      if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
 +              if (!(staterr & IXGBE_RXD_STAT_EOP)) {
 +                      if (ring_is_ps_enabled(rx_ring)) {
                                rx_buffer_info->skb = next_buffer->skb;
                                rx_buffer_info->dma = next_buffer->dma;
                                next_buffer->skb = skb;
                                skb->next = next_buffer->skb;
                                skb->next->prev = skb;
                        }
 -                      rx_ring->non_eop_descs++;
 +                      rx_ring->rx_stats.non_eop_descs++;
                        goto next_desc;
                }
  
 +              if (skb->prev) {
 +                      skb = ixgbe_transform_rsc_queue(skb);
 +                      /* if we got here without RSC the packet is invalid */
 +                      if (!pkt_is_rsc) {
 +                              __pskb_trim(skb, 0);
 +                              rx_buffer_info->skb = skb;
 +                              goto next_desc;
 +                      }
 +              }
 +
 +              if (ring_is_rsc_enabled(rx_ring)) {
 +                      if (IXGBE_RSC_CB(skb)->delay_unmap) {
 +                              dma_unmap_single(rx_ring->dev,
 +                                               IXGBE_RSC_CB(skb)->dma,
 +                                               rx_ring->rx_buf_len,
 +                                               DMA_FROM_DEVICE);
 +                              IXGBE_RSC_CB(skb)->dma = 0;
 +                              IXGBE_RSC_CB(skb)->delay_unmap = false;
 +                      }
 +              }
 +              if (pkt_is_rsc) {
 +                      if (ring_is_ps_enabled(rx_ring))
 +                              rx_ring->rx_stats.rsc_count +=
 +                                      skb_shinfo(skb)->nr_frags;
 +                      else
 +                              rx_ring->rx_stats.rsc_count +=
 +                                      IXGBE_RSC_CB(skb)->skb_cnt;
 +                      rx_ring->rx_stats.rsc_flush++;
 +              }
 +
 +              /* ERR_MASK will only have valid bits if EOP set */
                if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
 -                      dev_kfree_skb_irq(skb);
 +                      /* trim packet back to size 0 and recycle it */
 +                      __pskb_trim(skb, 0);
 +                      rx_buffer_info->skb = skb;
                        goto next_desc;
                }
  
                total_rx_bytes += skb->len;
                total_rx_packets++;
  
 -              skb->protocol = eth_type_trans(skb, adapter->netdev);
 +              skb->protocol = eth_type_trans(skb, rx_ring->netdev);
  #ifdef IXGBE_FCOE
                /* if ddp, not passing to ULD unless for FCP_RSP or error */
                if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
  next_desc:
                rx_desc->wb.upper.status_error = 0;
  
 +              (*work_done)++;
 +              if (*work_done >= work_to_do)
 +                      break;
 +
                /* return some buffers to hardware, one at a time is too slow */
                if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
 -                      ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
 +                      ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
                        cleaned_count = 0;
                }
  
                /* use prefetched values */
                rx_desc = next_rxd;
 -              rx_buffer_info = &rx_ring->rx_buffer_info[i];
 -
                staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
        }
  
        cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
  
        if (cleaned_count)
 -              ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
 +              ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
  
  #ifdef IXGBE_FCOE
        /* include DDPed FCoE data */
        if (ddp_bytes > 0) {
                unsigned int mss;
  
 -              mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) -
 +              mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) -
                        sizeof(struct fc_frame_header) -
                        sizeof(struct fcoe_crc_eof);
                if (mss > 512)
  
        rx_ring->total_packets += total_rx_packets;
        rx_ring->total_bytes += total_rx_bytes;
 -
 -      return cleaned;
 +      u64_stats_update_begin(&rx_ring->syncp);
 +      rx_ring->stats.packets += total_rx_packets;
 +      rx_ring->stats.bytes += total_rx_bytes;
 +      u64_stats_update_end(&rx_ring->syncp);
  }
  
  static int ixgbe_clean_rxonly(struct napi_struct *, int);
  static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
  {
        struct ixgbe_q_vector *q_vector;
 -      int i, j, q_vectors, v_idx, r_idx;
 +      int i, q_vectors, v_idx, r_idx;
        u32 mask;
  
        q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
                                       adapter->num_rx_queues);
  
                for (i = 0; i < q_vector->rxr_count; i++) {
 -                      j = adapter->rx_ring[r_idx]->reg_idx;
 -                      ixgbe_set_ivar(adapter, 0, j, v_idx);
 +                      u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx;
 +                      ixgbe_set_ivar(adapter, 0, reg_idx, v_idx);
                        r_idx = find_next_bit(q_vector->rxr_idx,
                                              adapter->num_rx_queues,
                                              r_idx + 1);
                                       adapter->num_tx_queues);
  
                for (i = 0; i < q_vector->txr_count; i++) {
 -                      j = adapter->tx_ring[r_idx]->reg_idx;
 -                      ixgbe_set_ivar(adapter, 1, j, v_idx);
 +                      u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx;
 +                      ixgbe_set_ivar(adapter, 1, reg_idx, v_idx);
                        r_idx = find_next_bit(q_vector->txr_idx,
                                              adapter->num_tx_queues,
                                              r_idx + 1);
                }
        }
  
 -      if (adapter->hw.mac.type == ixgbe_mac_82598EB)
 +      switch (adapter->hw.mac.type) {
 +      case ixgbe_mac_82598EB:
                ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
                               v_idx);
 -      else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
 +              break;
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
                ixgbe_set_ivar(adapter, -1, 1, v_idx);
 +              break;
 +
 +      default:
 +              break;
 +      }
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
  
        /* set up to autoclear timer, and the vectors */
@@@ -1693,15 -1548,12 +1693,15 @@@ void ixgbe_write_eitr(struct ixgbe_q_ve
        int v_idx = q_vector->v_idx;
        u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
  
 -      if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
 +      switch (adapter->hw.mac.type) {
 +      case ixgbe_mac_82598EB:
                /* must write high and low 16 bits to reset counter */
                itr_reg |= (itr_reg << 16);
 -      } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
 +              break;
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
                /*
 -               * 82599 can support a value of zero, so allow it for
 +               * 82599 and X540 can support a value of zero, so allow it for
                 * max interrupt rate, but there is an errata where it can
                 * not be zero with RSC
                 */
                 * immediate assertion of the interrupt
                 */
                itr_reg |= IXGBE_EITR_CNT_WDIS;
 +              break;
 +      default:
 +              break;
        }
        IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
  }
  static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
  {
        struct ixgbe_adapter *adapter = q_vector->adapter;
 +      int i, r_idx;
        u32 new_itr;
        u8 current_itr, ret_itr;
 -      int i, r_idx;
 -      struct ixgbe_ring *rx_ring, *tx_ring;
  
        r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
        for (i = 0; i < q_vector->txr_count; i++) {
 -              tx_ring = adapter->tx_ring[r_idx];
 +              struct ixgbe_ring *tx_ring = adapter->tx_ring[r_idx];
                ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
                                           q_vector->tx_itr,
                                           tx_ring->total_packets,
  
        r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
        for (i = 0; i < q_vector->rxr_count; i++) {
 -              rx_ring = adapter->rx_ring[r_idx];
 +              struct ixgbe_ring *rx_ring = adapter->rx_ring[r_idx];
                ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
                                           q_vector->rx_itr,
                                           rx_ring->total_packets,
  
        if (new_itr != q_vector->eitr) {
                /* do an exponential smoothing */
 -              new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
 +              new_itr = ((q_vector->eitr * 9) + new_itr)/10;
  
                /* save the algorithm value here, not the smoothed one */
                q_vector->eitr = new_itr;
@@@ -1844,18 -1694,17 +1844,18 @@@ static void ixgbe_check_sfp_event(struc
  {
        struct ixgbe_hw *hw = &adapter->hw;
  
 +      if (eicr & IXGBE_EICR_GPI_SDP2) {
 +              /* Clear the interrupt */
 +              IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
 +              if (!test_bit(__IXGBE_DOWN, &adapter->state))
 +                      schedule_work(&adapter->sfp_config_module_task);
 +      }
 +
        if (eicr & IXGBE_EICR_GPI_SDP1) {
                /* Clear the interrupt */
                IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
 -              schedule_work(&adapter->multispeed_fiber_task);
 -      } else if (eicr & IXGBE_EICR_GPI_SDP2) {
 -              /* Clear the interrupt */
 -              IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
 -              schedule_work(&adapter->sfp_config_module_task);
 -      } else {
 -              /* Interrupt isn't for us... */
 -              return;
 +              if (!test_bit(__IXGBE_DOWN, &adapter->state))
 +                      schedule_work(&adapter->multispeed_fiber_task);
        }
  }
  
@@@ -1895,9 -1744,16 +1895,9 @@@ static irqreturn_t ixgbe_msix_lsc(int i
        if (eicr & IXGBE_EICR_MAILBOX)
                ixgbe_msg_task(adapter);
  
 -      if (hw->mac.type == ixgbe_mac_82598EB)
 -              ixgbe_check_fan_failure(adapter, eicr);
 -
 -      if (hw->mac.type == ixgbe_mac_82599EB) {
 -              ixgbe_check_sfp_event(adapter, eicr);
 -              adapter->interrupt_event = eicr;
 -              if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
 -                  ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
 -                      schedule_work(&adapter->check_overtemp_task);
 -
 +      switch (hw->mac.type) {
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
                /* Handle Flow Director Full threshold interrupt */
                if (eicr & IXGBE_EICR_FLOW_DIR) {
                        int i;
                        for (i = 0; i < adapter->num_tx_queues; i++) {
                                struct ixgbe_ring *tx_ring =
                                                            adapter->tx_ring[i];
 -                              if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
 -                                                     &tx_ring->reinit_state))
 +                              if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
 +                                                     &tx_ring->state))
                                        schedule_work(&adapter->fdir_reinit_task);
                        }
                }
 +              ixgbe_check_sfp_event(adapter, eicr);
 +              if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
 +                  ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
 +                      adapter->interrupt_event = eicr;
 +                      schedule_work(&adapter->check_overtemp_task);
 +              }
 +              break;
 +      default:
 +              break;
        }
 +
 +      ixgbe_check_fan_failure(adapter, eicr);
 +
        if (!test_bit(__IXGBE_DOWN, &adapter->state))
                IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
  
@@@ -1935,24 -1779,15 +1935,24 @@@ static inline void ixgbe_irq_enable_que
                                           u64 qmask)
  {
        u32 mask;
 +      struct ixgbe_hw *hw = &adapter->hw;
  
 -      if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
 +      switch (hw->mac.type) {
 +      case ixgbe_mac_82598EB:
                mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
 -              IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
 -      } else {
 +              IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
 +              break;
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
                mask = (qmask & 0xFFFFFFFF);
 -              IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
 +              if (mask)
 +                      IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
                mask = (qmask >> 32);
 -              IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
 +              if (mask)
 +                      IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
 +              break;
 +      default:
 +              break;
        }
        /* skip the flush */
  }
@@@ -1961,24 -1796,15 +1961,24 @@@ static inline void ixgbe_irq_disable_qu
                                            u64 qmask)
  {
        u32 mask;
 +      struct ixgbe_hw *hw = &adapter->hw;
  
 -      if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
 +      switch (hw->mac.type) {
 +      case ixgbe_mac_82598EB:
                mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
 -              IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
 -      } else {
 +              IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
 +              break;
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
                mask = (qmask & 0xFFFFFFFF);
 -              IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask);
 +              if (mask)
 +                      IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
                mask = (qmask >> 32);
 -              IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask);
 +              if (mask)
 +                      IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
 +              break;
 +      default:
 +              break;
        }
        /* skip the flush */
  }
@@@ -2021,13 -1847,8 +2021,13 @@@ static irqreturn_t ixgbe_msix_clean_rx(
        int r_idx;
        int i;
  
 +#ifdef CONFIG_IXGBE_DCA
 +      if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
 +              ixgbe_update_dca(q_vector);
 +#endif
 +
        r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
 -      for (i = 0;  i < q_vector->rxr_count; i++) {
 +      for (i = 0; i < q_vector->rxr_count; i++) {
                rx_ring = adapter->rx_ring[r_idx];
                rx_ring->total_bytes = 0;
                rx_ring->total_packets = 0;
        if (!q_vector->rxr_count)
                return IRQ_HANDLED;
  
 -      /* disable interrupts on this vector only */
        /* EIAM disabled interrupts (on this vector) for us */
        napi_schedule(&q_vector->napi);
  
@@@ -2096,14 -1918,13 +2096,14 @@@ static int ixgbe_clean_rxonly(struct na
        int work_done = 0;
        long r_idx;
  
 -      r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
 -      rx_ring = adapter->rx_ring[r_idx];
  #ifdef CONFIG_IXGBE_DCA
        if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
 -              ixgbe_update_rx_dca(adapter, rx_ring);
 +              ixgbe_update_dca(q_vector);
  #endif
  
 +      r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
 +      rx_ring = adapter->rx_ring[r_idx];
 +
        ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
  
        /* If all Rx work done, exit the polling mode */
@@@ -2137,14 -1958,13 +2137,14 @@@ static int ixgbe_clean_rxtx_many(struc
        long r_idx;
        bool tx_clean_complete = true;
  
 +#ifdef CONFIG_IXGBE_DCA
 +      if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
 +              ixgbe_update_dca(q_vector);
 +#endif
 +
        r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
        for (i = 0; i < q_vector->txr_count; i++) {
                ring = adapter->tx_ring[r_idx];
 -#ifdef CONFIG_IXGBE_DCA
 -              if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
 -                      ixgbe_update_tx_dca(adapter, ring);
 -#endif
                tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
                r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
                                      r_idx + 1);
        r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
        for (i = 0; i < q_vector->rxr_count; i++) {
                ring = adapter->rx_ring[r_idx];
 -#ifdef CONFIG_IXGBE_DCA
 -              if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
 -                      ixgbe_update_rx_dca(adapter, ring);
 -#endif
                ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
                r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
                                      r_idx + 1);
@@@ -2195,14 -2019,13 +2195,14 @@@ static int ixgbe_clean_txonly(struct na
        int work_done = 0;
        long r_idx;
  
 -      r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
 -      tx_ring = adapter->tx_ring[r_idx];
  #ifdef CONFIG_IXGBE_DCA
        if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
 -              ixgbe_update_tx_dca(adapter, tx_ring);
 +              ixgbe_update_dca(q_vector);
  #endif
  
 +      r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
 +      tx_ring = adapter->tx_ring[r_idx];
 +
        if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
                work_done = budget;
  
@@@ -2223,27 -2046,24 +2223,27 @@@ static inline void map_vector_to_rxq(st
                                     int r_idx)
  {
        struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
 +      struct ixgbe_ring *rx_ring = a->rx_ring[r_idx];
  
        set_bit(r_idx, q_vector->rxr_idx);
        q_vector->rxr_count++;
 +      rx_ring->q_vector = q_vector;
  }
  
  static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
                                     int t_idx)
  {
        struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
 +      struct ixgbe_ring *tx_ring = a->tx_ring[t_idx];
  
        set_bit(t_idx, q_vector->txr_idx);
        q_vector->txr_count++;
 +      tx_ring->q_vector = q_vector;
  }
  
  /**
   * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
   * @adapter: board private structure to initialize
 - * @vectors: allotted vector count for descriptor rings
   *
   * This function maps descriptor rings to the queue-specific vectors
   * we were allotted through the MSI-X enabling code.  Ideally, we'd have
   * group the rings as "efficiently" as possible.  You would add new
   * mapping configurations in here.
   **/
 -static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
 -                                    int vectors)
 +static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter)
  {
 +      int q_vectors;
        int v_start = 0;
        int rxr_idx = 0, txr_idx = 0;
        int rxr_remaining = adapter->num_rx_queues;
        if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
                goto out;
  
 +      q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 +
        /*
         * The ideal configuration...
         * We have enough vectors to map one per queue.
         */
 -      if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
 +      if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
                for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
                        map_vector_to_rxq(adapter, v_start, rxr_idx);
  
         * multiple queues per vector.
         */
        /* Re-adjusting *qpv takes care of the remainder. */
 -      for (i = v_start; i < vectors; i++) {
 -              rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
 +      for (i = v_start; i < q_vectors; i++) {
 +              rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
                for (j = 0; j < rqpv; j++) {
                        map_vector_to_rxq(adapter, i, rxr_idx);
                        rxr_idx++;
                        rxr_remaining--;
                }
 -      }
 -      for (i = v_start; i < vectors; i++) {
 -              tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
 +              tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
                for (j = 0; j < tqpv; j++) {
                        map_vector_to_txq(adapter, i, txr_idx);
                        txr_idx++;
                        txr_remaining--;
                }
        }
 -
  out:
        return err;
  }
@@@ -2323,36 -2144,30 +2323,36 @@@ static int ixgbe_request_msix_irqs(stru
        /* Decrement for Other and TCP Timer vectors */
        q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
  
 -      /* Map the Tx/Rx rings to the vectors we were allotted. */
 -      err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
 +      err = ixgbe_map_rings_to_vectors(adapter);
        if (err)
 -              goto out;
 +              return err;
  
 -#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
 -                       (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
 -                       &ixgbe_msix_clean_many)
 +#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count)        \
 +                                        ? &ixgbe_msix_clean_many : \
 +                        (_v)->rxr_count ? &ixgbe_msix_clean_rx   : \
 +                        (_v)->txr_count ? &ixgbe_msix_clean_tx   : \
 +                        NULL)
        for (vector = 0; vector < q_vectors; vector++) {
 -              handler = SET_HANDLER(adapter->q_vector[vector]);
 +              struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
 +              handler = SET_HANDLER(q_vector);
  
                if (handler == &ixgbe_msix_clean_rx) {
 -                      sprintf(adapter->name[vector], "%s-%s-%d",
 +                      sprintf(q_vector->name, "%s-%s-%d",
                                netdev->name, "rx", ri++);
                } else if (handler == &ixgbe_msix_clean_tx) {
 -                      sprintf(adapter->name[vector], "%s-%s-%d",
 +                      sprintf(q_vector->name, "%s-%s-%d",
                                netdev->name, "tx", ti++);
 -              } else
 -                      sprintf(adapter->name[vector], "%s-%s-%d",
 -                              netdev->name, "TxRx", vector);
 -
 +              } else if (handler == &ixgbe_msix_clean_many) {
 +                      sprintf(q_vector->name, "%s-%s-%d",
 +                              netdev->name, "TxRx", ri++);
 +                      ti++;
 +              } else {
 +                      /* skip this unused q_vector */
 +                      continue;
 +              }
                err = request_irq(adapter->msix_entries[vector].vector,
 -                                handler, 0, adapter->name[vector],
 -                                adapter->q_vector[vector]);
 +                                handler, 0, q_vector->name,
 +                                q_vector);
                if (err) {
                        e_err(probe, "request_irq failed for MSIX interrupt "
                              "Error: %d\n", err);
                }
        }
  
 -      sprintf(adapter->name[vector], "%s:lsc", netdev->name);
 +      sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name);
        err = request_irq(adapter->msix_entries[vector].vector,
 -                        ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
 +                        ixgbe_msix_lsc, 0, adapter->lsc_int_name, netdev);
        if (err) {
                e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
                goto free_queue_irqs;
@@@ -2378,16 -2193,17 +2378,16 @@@ free_queue_irqs
        pci_disable_msix(adapter->pdev);
        kfree(adapter->msix_entries);
        adapter->msix_entries = NULL;
 -out:
        return err;
  }
  
  static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
  {
        struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
 -      u8 current_itr;
 -      u32 new_itr = q_vector->eitr;
        struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
        struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
 +      u32 new_itr = q_vector->eitr;
 +      u8 current_itr;
  
        q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
                                            q_vector->tx_itr,
  
        if (new_itr != q_vector->eitr) {
                /* do an exponential smoothing */
 -              new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
 +              new_itr = ((q_vector->eitr * 9) + new_itr)/10;
  
 -              /* save the algorithm value here, not the smoothed one */
 +              /* save the algorithm value here */
                q_vector->eitr = new_itr;
  
                ixgbe_write_eitr(q_vector);
@@@ -2440,17 -2256,12 +2440,17 @@@ static inline void ixgbe_irq_enable(str
                mask |= IXGBE_EIMS_GPI_SDP0;
        if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
                mask |= IXGBE_EIMS_GPI_SDP1;
 -      if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
 +      switch (adapter->hw.mac.type) {
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
                mask |= IXGBE_EIMS_ECC;
                mask |= IXGBE_EIMS_GPI_SDP1;
                mask |= IXGBE_EIMS_GPI_SDP2;
                if (adapter->num_vfs)
                        mask |= IXGBE_EIMS_MAILBOX;
 +              break;
 +      default:
 +              break;
        }
        if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
            adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
@@@ -2506,21 -2317,13 +2506,21 @@@ static irqreturn_t ixgbe_intr(int irq, 
        if (eicr & IXGBE_EICR_LSC)
                ixgbe_check_lsc(adapter);
  
 -      if (hw->mac.type == ixgbe_mac_82599EB)
 +      switch (hw->mac.type) {
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
                ixgbe_check_sfp_event(adapter, eicr);
 +              if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
 +                  ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
 +                      adapter->interrupt_event = eicr;
 +                      schedule_work(&adapter->check_overtemp_task);
 +              }
 +              break;
 +      default:
 +              break;
 +      }
  
        ixgbe_check_fan_failure(adapter, eicr);
 -      if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
 -          ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
 -              schedule_work(&adapter->check_overtemp_task);
  
        if (napi_schedule_prep(&(q_vector->napi))) {
                adapter->tx_ring[0]->total_packets = 0;
@@@ -2613,20 -2416,14 +2613,20 @@@ static void ixgbe_free_irq(struct ixgbe
   **/
  static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
  {
 -      if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
 +      switch (adapter->hw.mac.type) {
 +      case ixgbe_mac_82598EB:
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
 -      } else {
 +              break;
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
                if (adapter->num_vfs > 32)
                        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
 +              break;
 +      default:
 +              break;
        }
        IXGBE_WRITE_FLUSH(&adapter->hw);
        if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@@ -2672,7 -2469,7 +2672,7 @@@ void ixgbe_configure_tx_ring(struct ixg
        u64 tdba = ring->dma;
        int wait_loop = 10;
        u32 txdctl;
 -      u16 reg_idx = ring->reg_idx;
 +      u8 reg_idx = ring->reg_idx;
  
        /* disable queue to avoid issues while updating state */
        txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
                        ring->count * sizeof(union ixgbe_adv_tx_desc));
        IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
        IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
 -      ring->head = IXGBE_TDH(reg_idx);
 -      ring->tail = IXGBE_TDT(reg_idx);
 +      ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
  
        /* configure fetching thresholds */
        if (adapter->rx_itr_setting == 0) {
        }
  
        /* reinitialize flowdirector state */
 -      set_bit(__IXGBE_FDIR_INIT_DONE, &ring->reinit_state);
 +      if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
 +          adapter->atr_sample_rate) {
 +              ring->atr_sample_rate = adapter->atr_sample_rate;
 +              ring->atr_count = 0;
 +              set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
 +      } else {
 +              ring->atr_sample_rate = 0;
 +      }
 +
 +      clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
  
        /* enable queue */
        txdctl |= IXGBE_TXDCTL_ENABLE;
@@@ -2803,22 -2592,16 +2803,22 @@@ static void ixgbe_configure_srrctl(stru
                                   struct ixgbe_ring *rx_ring)
  {
        u32 srrctl;
 -      int index;
 -      struct ixgbe_ring_feature *feature = adapter->ring_feature;
 +      u8 reg_idx = rx_ring->reg_idx;
  
 -      index = rx_ring->reg_idx;
 -      if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
 -              unsigned long mask;
 -              mask = (unsigned long) feature[RING_F_RSS].mask;
 -              index = index & mask;
 +      switch (adapter->hw.mac.type) {
 +      case ixgbe_mac_82598EB: {
 +              struct ixgbe_ring_feature *feature = adapter->ring_feature;
 +              const int mask = feature[RING_F_RSS].mask;
 +              reg_idx = reg_idx & mask;
        }
 -      srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
 +              break;
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
 +      default:
 +              break;
 +      }
 +
 +      srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx));
  
        srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
        srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
        srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
                  IXGBE_SRRCTL_BSIZEHDR_MASK;
  
 -      if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
 +      if (ring_is_ps_enabled(rx_ring)) {
  #if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
                srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
  #else
                srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
        }
  
 -      IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
 +      IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl);
  }
  
  static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
        IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
  }
  
 +/**
 + * ixgbe_clear_rscctl - disable RSC for the indicated ring
 + * @adapter: address of board private structure
 + * @ring: structure containing ring specific data
 + **/
 +void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
 +                        struct ixgbe_ring *ring)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      u32 rscctrl;
 +      u8 reg_idx = ring->reg_idx;
 +
 +      rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
 +      rscctrl &= ~IXGBE_RSCCTL_RSCEN;
 +      IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
 +}
 +
  /**
   * ixgbe_configure_rscctl - enable RSC for the indicated ring
   * @adapter:    address of board private structure
   * @index:      index of ring to set
   **/
 -static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
 +void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
                                   struct ixgbe_ring *ring)
  {
        struct ixgbe_hw *hw = &adapter->hw;
        u32 rscctrl;
        int rx_buf_len;
 -      u16 reg_idx = ring->reg_idx;
 +      u8 reg_idx = ring->reg_idx;
  
 -      if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
 +      if (!ring_is_rsc_enabled(ring))
                return;
  
        rx_buf_len = ring->rx_buf_len;
         * total size of max desc * buf_len is not greater
         * than 65535
         */
 -      if (ring->flags & IXGBE_RING_RX_PS_ENABLED) {
 +      if (ring_is_ps_enabled(ring)) {
  #if (MAX_SKB_FRAGS > 16)
                rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
  #elif (MAX_SKB_FRAGS > 8)
@@@ -3004,9 -2770,9 +3004,9 @@@ static void ixgbe_rx_desc_queue_enable(
                                       struct ixgbe_ring *ring)
  {
        struct ixgbe_hw *hw = &adapter->hw;
 -      int reg_idx = ring->reg_idx;
        int wait_loop = IXGBE_MAX_RX_DESC_POLL;
        u32 rxdctl;
 +      u8 reg_idx = ring->reg_idx;
  
        /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
        if (hw->mac.type == ixgbe_mac_82598EB &&
@@@ -3030,7 -2796,7 +3030,7 @@@ void ixgbe_configure_rx_ring(struct ixg
        struct ixgbe_hw *hw = &adapter->hw;
        u64 rdba = ring->dma;
        u32 rxdctl;
 -      u16 reg_idx = ring->reg_idx;
 +      u8 reg_idx = ring->reg_idx;
  
        /* disable queue to avoid issues while updating state */
        rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
                        ring->count * sizeof(union ixgbe_adv_rx_desc));
        IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
        IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
 -      ring->head = IXGBE_RDH(reg_idx);
 -      ring->tail = IXGBE_RDT(reg_idx);
 +      ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
  
        ixgbe_configure_srrctl(adapter, ring);
        ixgbe_configure_rscctl(adapter, ring);
        IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
  
        ixgbe_rx_desc_queue_enable(adapter, ring);
 -      ixgbe_alloc_rx_buffers(adapter, ring, IXGBE_DESC_UNUSED(ring));
 +      ixgbe_alloc_rx_buffers(ring, IXGBE_DESC_UNUSED(ring));
  }
  
  static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
@@@ -3189,32 -2956,24 +3189,32 @@@ static void ixgbe_set_rx_buffer_len(str
                rx_ring->rx_buf_len = rx_buf_len;
  
                if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
 -                      rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED;
 +                      set_ring_ps_enabled(rx_ring);
                else
 -                      rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
 +                      clear_ring_ps_enabled(rx_ring);
 +
 +              if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
 +                      set_ring_rsc_enabled(rx_ring);
 +              else
 +                      clear_ring_rsc_enabled(rx_ring);
  
  #ifdef IXGBE_FCOE
                if (netdev->features & NETIF_F_FCOE_MTU) {
                        struct ixgbe_ring_feature *f;
                        f = &adapter->ring_feature[RING_F_FCOE];
                        if ((i >= f->mask) && (i < f->mask + f->indices)) {
 -                              rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
 +                              clear_ring_ps_enabled(rx_ring);
                                if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
                                        rx_ring->rx_buf_len =
                                                IXGBE_FCOE_JUMBO_FRAME_SIZE;
 +                      } else if (!ring_is_rsc_enabled(rx_ring) &&
 +                                 !ring_is_ps_enabled(rx_ring)) {
 +                              rx_ring->rx_buf_len =
 +                                              IXGBE_FCOE_JUMBO_FRAME_SIZE;
                        }
                }
  #endif /* IXGBE_FCOE */
        }
 -
  }
  
  static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
                rdrxctl |= IXGBE_RDRXCTL_MVMEN;
                break;
        case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
                /* Disable RSC for ACK packets */
                IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
                   (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
@@@ -3365,7 -3123,6 +3365,7 @@@ static void ixgbe_vlan_strip_disable(st
                IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
                break;
        case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
                for (i = 0; i < adapter->num_rx_queues; i++) {
                        j = adapter->rx_ring[i]->reg_idx;
                        vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
@@@ -3395,7 -3152,6 +3395,7 @@@ static void ixgbe_vlan_strip_enable(str
                IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
                break;
        case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
                for (i = 0; i < adapter->num_rx_queues; i++) {
                        j = adapter->rx_ring[i]->reg_idx;
                        vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
@@@ -3593,6 -3349,8 +3593,6 @@@ static void ixgbe_configure_dcb(struct 
  {
        struct ixgbe_hw *hw = &adapter->hw;
        int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
 -      u32 txdctl;
 -      int i, j;
  
        if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
                if (hw->mac.type == ixgbe_mac_82598EB)
                max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
  #endif
  
 -      ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
 +      ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
                                        DCB_TX_CONFIG);
 -      ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
 +      ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
                                        DCB_RX_CONFIG);
  
 -      /* reconfigure the hardware */
 -      ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
 -
 -      for (i = 0; i < adapter->num_tx_queues; i++) {
 -              j = adapter->tx_ring[i]->reg_idx;
 -              txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
 -              /* PThresh workaround for Tx hang with DFP enabled. */
 -              txdctl |= 32;
 -              IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
 -      }
        /* Enable VLAN tag insert/strip */
        adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
  
        hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
 +
 +      /* reconfigure the hardware */
 +      ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
  }
  
  #endif
@@@ -3751,9 -3516,8 +3751,9 @@@ static void ixgbe_setup_gpie(struct ixg
                case ixgbe_mac_82598EB:
                        IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
                        break;
 -              default:
                case ixgbe_mac_82599EB:
 +              case ixgbe_mac_X540:
 +              default:
                        IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
                        IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
                        break;
@@@ -3797,24 -3561,13 +3797,24 @@@ static int ixgbe_up_complete(struct ixg
        else
                ixgbe_configure_msi_and_legacy(adapter);
  
 -      /* enable the optics */
 -      if (hw->phy.multispeed_fiber)
 +      /* enable the optics for both mult-speed fiber and 82599 SFP+ fiber */
 +      if (hw->mac.ops.enable_tx_laser &&
 +          ((hw->phy.multispeed_fiber) ||
 +           ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
 +            (hw->mac.type == ixgbe_mac_82599EB))))
                hw->mac.ops.enable_tx_laser(hw);
  
        clear_bit(__IXGBE_DOWN, &adapter->state);
        ixgbe_napi_enable_all(adapter);
  
 +      if (ixgbe_is_sfp(hw)) {
 +              ixgbe_sfp_link_config(adapter);
 +      } else {
 +              err = ixgbe_non_sfp_link_config(hw);
 +              if (err)
 +                      e_err(probe, "link_config FAILED %d\n", err);
 +      }
 +
        /* clear any pending interrupts, may auto mask */
        IXGBE_READ_REG(hw, IXGBE_EICR);
        ixgbe_irq_enable(adapter, true, true);
         * If we're not hot-pluggable SFP+, we just need to configure link
         * and bring it up.
         */
 -      if (hw->phy.type == ixgbe_phy_unknown) {
 -              err = hw->phy.ops.identify(hw);
 -              if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
 -                      /*
 -                       * Take the device down and schedule the sfp tasklet
 -                       * which will unregister_netdev and log it.
 -                       */
 -                      ixgbe_down(adapter);
 -                      schedule_work(&adapter->sfp_config_module_task);
 -                      return err;
 -              }
 -      }
 -
 -      if (ixgbe_is_sfp(hw)) {
 -              ixgbe_sfp_link_config(adapter);
 -      } else {
 -              err = ixgbe_non_sfp_link_config(hw);
 -              if (err)
 -                      e_err(probe, "link_config FAILED %d\n", err);
 -      }
 +      if (hw->phy.type == ixgbe_phy_unknown)
 +              schedule_work(&adapter->sfp_config_module_task);
  
        /* enable transmits */
        netif_tx_start_all_queues(adapter->netdev);
@@@ -3916,13 -3687,15 +3916,13 @@@ void ixgbe_reset(struct ixgbe_adapter *
  
  /**
   * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
 - * @adapter: board private structure
   * @rx_ring: ring to free buffers from
   **/
 -static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
 -                              struct ixgbe_ring *rx_ring)
 +static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
  {
 -      struct pci_dev *pdev = adapter->pdev;
 +      struct device *dev = rx_ring->dev;
        unsigned long size;
 -      unsigned int i;
 +      u16 i;
  
        /* ring already cleared, nothing to do */
        if (!rx_ring->rx_buffer_info)
  
                rx_buffer_info = &rx_ring->rx_buffer_info[i];
                if (rx_buffer_info->dma) {
 -                      dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
 +                      dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
                                         rx_ring->rx_buf_len,
                                         DMA_FROM_DEVICE);
                        rx_buffer_info->dma = 0;
                        do {
                                struct sk_buff *this = skb;
                                if (IXGBE_RSC_CB(this)->delay_unmap) {
 -                                      dma_unmap_single(&pdev->dev,
 +                                      dma_unmap_single(dev,
                                                         IXGBE_RSC_CB(this)->dma,
                                                         rx_ring->rx_buf_len,
                                                         DMA_FROM_DEVICE);
                if (!rx_buffer_info->page)
                        continue;
                if (rx_buffer_info->page_dma) {
 -                      dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
 +                      dma_unmap_page(dev, rx_buffer_info->page_dma,
                                       PAGE_SIZE / 2, DMA_FROM_DEVICE);
                        rx_buffer_info->page_dma = 0;
                }
  
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
 -
 -      if (rx_ring->head)
 -              writel(0, adapter->hw.hw_addr + rx_ring->head);
 -      if (rx_ring->tail)
 -              writel(0, adapter->hw.hw_addr + rx_ring->tail);
  }
  
  /**
   * ixgbe_clean_tx_ring - Free Tx Buffers
 - * @adapter: board private structure
   * @tx_ring: ring to be cleaned
   **/
 -static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
 -                              struct ixgbe_ring *tx_ring)
 +static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
  {
        struct ixgbe_tx_buffer *tx_buffer_info;
        unsigned long size;
 -      unsigned int i;
 +      u16 i;
  
        /* ring already cleared, nothing to do */
        if (!tx_ring->tx_buffer_info)
        /* Free all the Tx ring sk_buffs */
        for (i = 0; i < tx_ring->count; i++) {
                tx_buffer_info = &tx_ring->tx_buffer_info[i];
 -              ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
 +              ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
        }
  
        size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
  
        tx_ring->next_to_use = 0;
        tx_ring->next_to_clean = 0;
 -
 -      if (tx_ring->head)
 -              writel(0, adapter->hw.hw_addr + tx_ring->head);
 -      if (tx_ring->tail)
 -              writel(0, adapter->hw.hw_addr + tx_ring->tail);
  }
  
  /**
@@@ -4017,7 -3802,7 +4017,7 @@@ static void ixgbe_clean_all_rx_rings(st
        int i;
  
        for (i = 0; i < adapter->num_rx_queues; i++)
 -              ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]);
 +              ixgbe_clean_rx_ring(adapter->rx_ring[i]);
  }
  
  /**
@@@ -4029,7 -3814,7 +4029,7 @@@ static void ixgbe_clean_all_tx_rings(st
        int i;
  
        for (i = 0; i < adapter->num_tx_queues; i++)
 -              ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]);
 +              ixgbe_clean_tx_ring(adapter->tx_ring[i]);
  }
  
  void ixgbe_down(struct ixgbe_adapter *adapter)
        struct ixgbe_hw *hw = &adapter->hw;
        u32 rxctrl;
        u32 txdctl;
 -      int i, j;
 +      int i;
        int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
  
        /* signal that we are down to the interrupt handler */
  
        /* disable transmits in the hardware now that interrupts are off */
        for (i = 0; i < adapter->num_tx_queues; i++) {
 -              j = adapter->tx_ring[i]->reg_idx;
 -              txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
 -              IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
 +              u8 reg_idx = adapter->tx_ring[i]->reg_idx;
 +              txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
 +              IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
                                (txdctl & ~IXGBE_TXDCTL_ENABLE));
        }
        /* Disable the Tx DMA engine on 82599 */
 -      if (hw->mac.type == ixgbe_mac_82599EB)
 +      switch (hw->mac.type) {
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
                IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
                                (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
                                 ~IXGBE_DMATXCTL_TE));
 -
 -      /* power down the optics */
 -      if (hw->phy.multispeed_fiber)
 -              hw->mac.ops.disable_tx_laser(hw);
 +              break;
 +      default:
 +              break;
 +      }
  
        /* clear n-tuple filters that are cached */
        ethtool_ntuple_flush(netdev);
  
        if (!pci_channel_offline(adapter->pdev))
                ixgbe_reset(adapter);
 +
 +      /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
 +      if (hw->mac.ops.disable_tx_laser &&
 +          ((hw->phy.multispeed_fiber) ||
 +           ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
 +            (hw->mac.type == ixgbe_mac_82599EB))))
 +              hw->mac.ops.disable_tx_laser(hw);
 +
        ixgbe_clean_all_tx_rings(adapter);
        ixgbe_clean_all_rx_rings(adapter);
  
@@@ -4150,8 -3925,10 +4150,8 @@@ static int ixgbe_poll(struct napi_struc
        int tx_clean_complete, work_done = 0;
  
  #ifdef CONFIG_IXGBE_DCA
 -      if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
 -              ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]);
 -              ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]);
 -      }
 +      if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
 +              ixgbe_update_dca(q_vector);
  #endif
  
        tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
@@@ -4179,8 -3956,6 +4179,8 @@@ static void ixgbe_tx_timeout(struct net
  {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
  
 +      adapter->tx_timeout_count++;
 +
        /* Do the reset outside of interrupt context */
        schedule_work(&adapter->reset_task);
  }
@@@ -4195,6 -3970,8 +4195,6 @@@ static void ixgbe_reset_task(struct wor
            test_bit(__IXGBE_RESETTING, &adapter->state))
                return;
  
 -      adapter->tx_timeout_count++;
 -
        ixgbe_dump(adapter);
        netdev_err(adapter->netdev, "Reset adapter\n");
        ixgbe_reinit_locked(adapter);
@@@ -4444,16 -4221,19 +4444,16 @@@ static void ixgbe_acquire_msix_vectors(
  static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
  {
        int i;
 -      bool ret = false;
  
 -      if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
 -              for (i = 0; i < adapter->num_rx_queues; i++)
 -                      adapter->rx_ring[i]->reg_idx = i;
 -              for (i = 0; i < adapter->num_tx_queues; i++)
 -                      adapter->tx_ring[i]->reg_idx = i;
 -              ret = true;
 -      } else {
 -              ret = false;
 -      }
 +      if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
 +              return false;
  
 -      return ret;
 +      for (i = 0; i < adapter->num_rx_queues; i++)
 +              adapter->rx_ring[i]->reg_idx = i;
 +      for (i = 0; i < adapter->num_tx_queues; i++)
 +              adapter->tx_ring[i]->reg_idx = i;
 +
 +      return true;
  }
  
  #ifdef CONFIG_IXGBE_DCB
@@@ -4470,67 -4250,71 +4470,67 @@@ static inline bool ixgbe_cache_ring_dcb
        bool ret = false;
        int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
  
 -      if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
 -              if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
 -                      /* the number of queues is assumed to be symmetric */
 -                      for (i = 0; i < dcb_i; i++) {
 -                              adapter->rx_ring[i]->reg_idx = i << 3;
 -                              adapter->tx_ring[i]->reg_idx = i << 2;
 -                      }
 -                      ret = true;
 -              } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
 -                      if (dcb_i == 8) {
 -                              /*
 -                               * Tx TC0 starts at: descriptor queue 0
 -                               * Tx TC1 starts at: descriptor queue 32
 -                               * Tx TC2 starts at: descriptor queue 64
 -                               * Tx TC3 starts at: descriptor queue 80
 -                               * Tx TC4 starts at: descriptor queue 96
 -                               * Tx TC5 starts at: descriptor queue 104
 -                               * Tx TC6 starts at: descriptor queue 112
 -                               * Tx TC7 starts at: descriptor queue 120
 -                               *
 -                               * Rx TC0-TC7 are offset by 16 queues each
 -                               */
 -                              for (i = 0; i < 3; i++) {
 -                                      adapter->tx_ring[i]->reg_idx = i << 5;
 -                                      adapter->rx_ring[i]->reg_idx = i << 4;
 -                              }
 -                              for ( ; i < 5; i++) {
 -                                      adapter->tx_ring[i]->reg_idx =
 -                                                               ((i + 2) << 4);
 -                                      adapter->rx_ring[i]->reg_idx = i << 4;
 -                              }
 -                              for ( ; i < dcb_i; i++) {
 -                                      adapter->tx_ring[i]->reg_idx =
 -                                                               ((i + 8) << 3);
 -                                      adapter->rx_ring[i]->reg_idx = i << 4;
 -                              }
 +      if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
 +              return false;
  
 -                              ret = true;
 -                      } else if (dcb_i == 4) {
 -                              /*
 -                               * Tx TC0 starts at: descriptor queue 0
 -                               * Tx TC1 starts at: descriptor queue 64
 -                               * Tx TC2 starts at: descriptor queue 96
 -                               * Tx TC3 starts at: descriptor queue 112
 -                               *
 -                               * Rx TC0-TC3 are offset by 32 queues each
 -                               */
 -                              adapter->tx_ring[0]->reg_idx = 0;
 -                              adapter->tx_ring[1]->reg_idx = 64;
 -                              adapter->tx_ring[2]->reg_idx = 96;
 -                              adapter->tx_ring[3]->reg_idx = 112;
 -                              for (i = 0 ; i < dcb_i; i++)
 -                                      adapter->rx_ring[i]->reg_idx = i << 5;
 -
 -                              ret = true;
 -                      } else {
 -                              ret = false;
 +      /* the number of queues is assumed to be symmetric */
 +      switch (adapter->hw.mac.type) {
 +      case ixgbe_mac_82598EB:
 +              for (i = 0; i < dcb_i; i++) {
 +                      adapter->rx_ring[i]->reg_idx = i << 3;
 +                      adapter->tx_ring[i]->reg_idx = i << 2;
 +              }
 +              ret = true;
 +              break;
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
 +              if (dcb_i == 8) {
 +                      /*
 +                       * Tx TC0 starts at: descriptor queue 0
 +                       * Tx TC1 starts at: descriptor queue 32
 +                       * Tx TC2 starts at: descriptor queue 64
 +                       * Tx TC3 starts at: descriptor queue 80
 +                       * Tx TC4 starts at: descriptor queue 96
 +                       * Tx TC5 starts at: descriptor queue 104
 +                       * Tx TC6 starts at: descriptor queue 112
 +                       * Tx TC7 starts at: descriptor queue 120
 +                       *
 +                       * Rx TC0-TC7 are offset by 16 queues each
 +                       */
 +                      for (i = 0; i < 3; i++) {
 +                              adapter->tx_ring[i]->reg_idx = i << 5;
 +                              adapter->rx_ring[i]->reg_idx = i << 4;
                        }
 -              } else {
 -                      ret = false;
 +                      for ( ; i < 5; i++) {
 +                              adapter->tx_ring[i]->reg_idx = ((i + 2) << 4);
 +                              adapter->rx_ring[i]->reg_idx = i << 4;
 +                      }
 +                      for ( ; i < dcb_i; i++) {
 +                              adapter->tx_ring[i]->reg_idx = ((i + 8) << 3);
 +                              adapter->rx_ring[i]->reg_idx = i << 4;
 +                      }
 +                      ret = true;
 +              } else if (dcb_i == 4) {
 +                      /*
 +                       * Tx TC0 starts at: descriptor queue 0
 +                       * Tx TC1 starts at: descriptor queue 64
 +                       * Tx TC2 starts at: descriptor queue 96
 +                       * Tx TC3 starts at: descriptor queue 112
 +                       *
 +                       * Rx TC0-TC3 are offset by 32 queues each
 +                       */
 +                      adapter->tx_ring[0]->reg_idx = 0;
 +                      adapter->tx_ring[1]->reg_idx = 64;
 +                      adapter->tx_ring[2]->reg_idx = 96;
 +                      adapter->tx_ring[3]->reg_idx = 112;
 +                      for (i = 0 ; i < dcb_i; i++)
 +                              adapter->rx_ring[i]->reg_idx = i << 5;
 +                      ret = true;
                }
 -      } else {
 -              ret = false;
 +              break;
 +      default:
 +              break;
        }
 -
        return ret;
  }
  #endif
@@@ -4570,55 -4354,55 +4570,55 @@@ static inline bool ixgbe_cache_ring_fdi
   */
  static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
  {
 -      int i, fcoe_rx_i = 0, fcoe_tx_i = 0;
 -      bool ret = false;
        struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
 +      int i;
 +      u8 fcoe_rx_i = 0, fcoe_tx_i = 0;
 +
 +      if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
 +              return false;
  
 -      if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
  #ifdef CONFIG_IXGBE_DCB
 -              if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
 -                      struct ixgbe_fcoe *fcoe = &adapter->fcoe;
 +      if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
 +              struct ixgbe_fcoe *fcoe = &adapter->fcoe;
  
 -                      ixgbe_cache_ring_dcb(adapter);
 -                      /* find out queues in TC for FCoE */
 -                      fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
 -                      fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
 -                      /*
 -                       * In 82599, the number of Tx queues for each traffic
 -                       * class for both 8-TC and 4-TC modes are:
 -                       * TCs  : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
 -                       * 8 TCs:  32  32  16  16   8   8   8   8
 -                       * 4 TCs:  64  64  32  32
 -                       * We have max 8 queues for FCoE, where 8 the is
 -                       * FCoE redirection table size. If TC for FCoE is
 -                       * less than or equal to TC3, we have enough queues
 -                       * to add max of 8 queues for FCoE, so we start FCoE
 -                       * tx descriptor from the next one, i.e., reg_idx + 1.
 -                       * If TC for FCoE is above TC3, implying 8 TC mode,
 -                       * and we need 8 for FCoE, we have to take all queues
 -                       * in that traffic class for FCoE.
 -                       */
 -                      if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
 -                              fcoe_tx_i--;
 -              }
 +              ixgbe_cache_ring_dcb(adapter);
 +              /* find out queues in TC for FCoE */
 +              fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
 +              fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
 +              /*
 +               * In 82599, the number of Tx queues for each traffic
 +               * class for both 8-TC and 4-TC modes are:
 +               * TCs  : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
 +               * 8 TCs:  32  32  16  16   8   8   8   8
 +               * 4 TCs:  64  64  32  32
 +               * We have max 8 queues for FCoE, where 8 the is
 +               * FCoE redirection table size. If TC for FCoE is
 +               * less than or equal to TC3, we have enough queues
 +               * to add max of 8 queues for FCoE, so we start FCoE
 +               * Tx queue from the next one, i.e., reg_idx + 1.
 +               * If TC for FCoE is above TC3, implying 8 TC mode,
 +               * and we need 8 for FCoE, we have to take all queues
 +               * in that traffic class for FCoE.
 +               */
 +              if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
 +                      fcoe_tx_i--;
 +      }
  #endif /* CONFIG_IXGBE_DCB */
 -              if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
 -                      if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
 -                          (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
 -                              ixgbe_cache_ring_fdir(adapter);
 -                      else
 -                              ixgbe_cache_ring_rss(adapter);
 +      if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
 +              if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
 +                  (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
 +                      ixgbe_cache_ring_fdir(adapter);
 +              else
 +                      ixgbe_cache_ring_rss(adapter);
  
 -                      fcoe_rx_i = f->mask;
 -                      fcoe_tx_i = f->mask;
 -              }
 -              for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
 -                      adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
 -                      adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
 -              }
 -              ret = true;
 +              fcoe_rx_i = f->mask;
 +              fcoe_tx_i = f->mask;
        }
 -      return ret;
 +      for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
 +              adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
 +              adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
 +      }
 +      return true;
  }
  
  #endif /* IXGBE_FCOE */
@@@ -4687,55 -4471,65 +4687,55 @@@ static void ixgbe_cache_ring_register(s
   **/
  static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
  {
 -      int i;
 -      int orig_node = adapter->node;
 +      int rx = 0, tx = 0, nid = adapter->node;
  
 -      for (i = 0; i < adapter->num_tx_queues; i++) {
 -              struct ixgbe_ring *ring = adapter->tx_ring[i];
 -              if (orig_node == -1) {
 -                      int cur_node = next_online_node(adapter->node);
 -                      if (cur_node == MAX_NUMNODES)
 -                              cur_node = first_online_node;
 -                      adapter->node = cur_node;
 -              }
 -              ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
 -                                  adapter->node);
 +      if (nid < 0 || !node_online(nid))
 +              nid = first_online_node;
 +
 +      for (; tx < adapter->num_tx_queues; tx++) {
 +              struct ixgbe_ring *ring;
 +
 +              ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
                if (!ring)
 -                      ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
 +                      ring = kzalloc(sizeof(*ring), GFP_KERNEL);
                if (!ring)
 -                      goto err_tx_ring_allocation;
 +                      goto err_allocation;
                ring->count = adapter->tx_ring_count;
 -              ring->queue_index = i;
 -              ring->numa_node = adapter->node;
 +              ring->queue_index = tx;
 +              ring->numa_node = nid;
 +              ring->dev = &adapter->pdev->dev;
 +              ring->netdev = adapter->netdev;
  
 -              adapter->tx_ring[i] = ring;
 +              adapter->tx_ring[tx] = ring;
        }
  
 -      /* Restore the adapter's original node */
 -      adapter->node = orig_node;
 +      for (; rx < adapter->num_rx_queues; rx++) {
 +              struct ixgbe_ring *ring;
  
 -      for (i = 0; i < adapter->num_rx_queues; i++) {
 -              struct ixgbe_ring *ring = adapter->rx_ring[i];
 -              if (orig_node == -1) {
 -                      int cur_node = next_online_node(adapter->node);
 -                      if (cur_node == MAX_NUMNODES)
 -                              cur_node = first_online_node;
 -                      adapter->node = cur_node;
 -              }
 -              ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
 -                                  adapter->node);
 +              ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
                if (!ring)
 -                      ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
 +                      ring = kzalloc(sizeof(*ring), GFP_KERNEL);
                if (!ring)
 -                      goto err_rx_ring_allocation;
 +                      goto err_allocation;
                ring->count = adapter->rx_ring_count;
 -              ring->queue_index = i;
 -              ring->numa_node = adapter->node;
 +              ring->queue_index = rx;
 +              ring->numa_node = nid;
 +              ring->dev = &adapter->pdev->dev;
 +              ring->netdev = adapter->netdev;
  
 -              adapter->rx_ring[i] = ring;
 +              adapter->rx_ring[rx] = ring;
        }
  
 -      /* Restore the adapter's original node */
 -      adapter->node = orig_node;
 -
        ixgbe_cache_ring_register(adapter);
  
        return 0;
  
 -err_rx_ring_allocation:
 -      for (i = 0; i < adapter->num_tx_queues; i++)
 -              kfree(adapter->tx_ring[i]);
 -err_tx_ring_allocation:
 +err_allocation:
 +      while (tx)
 +              kfree(adapter->tx_ring[--tx]);
 +
 +      while (rx)
 +              kfree(adapter->rx_ring[--rx]);
        return -ENOMEM;
  }
  
@@@ -4957,11 -4751,6 +4957,11 @@@ err_set_interrupt
        return err;
  }
  
 +static void ring_free_rcu(struct rcu_head *head)
 +{
 +      kfree(container_of(head, struct ixgbe_ring, rcu));
 +}
 +
  /**
   * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
   * @adapter: board private structure to clear interrupt scheme on
@@@ -4978,15 -4767,13 +4978,18 @@@ void ixgbe_clear_interrupt_scheme(struc
                adapter->tx_ring[i] = NULL;
        }
        for (i = 0; i < adapter->num_rx_queues; i++) {
 -              kfree(adapter->rx_ring[i]);
 +              struct ixgbe_ring *ring = adapter->rx_ring[i];
 +
 +              /* ixgbe_get_stats64() might access this ring, we must wait
 +               * a grace period before freeing it.
 +               */
 +              call_rcu(&ring->rcu, ring_free_rcu);
                adapter->rx_ring[i] = NULL;
        }
  
+       adapter->num_tx_queues = 0;
+       adapter->num_rx_queues = 0;
        ixgbe_free_q_vectors(adapter);
        ixgbe_reset_interrupt_capability(adapter);
  }
@@@ -5060,7 -4847,6 +5063,7 @@@ static int __devinit ixgbe_sw_init(stru
        int j;
        struct tc_configuration *tc;
  #endif
 +      int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
  
        /* PCI config space info */
  
        adapter->ring_feature[RING_F_RSS].indices = rss;
        adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
        adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
 -      if (hw->mac.type == ixgbe_mac_82598EB) {
 +      switch (hw->mac.type) {
 +      case ixgbe_mac_82598EB:
                if (hw->device_id == IXGBE_DEV_ID_82598AT)
                        adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
                adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
 -      } else if (hw->mac.type == ixgbe_mac_82599EB) {
 +              break;
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
                adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
                adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
                adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
                adapter->fcoe.up = IXGBE_FCOE_DEFTC;
  #endif
  #endif /* IXGBE_FCOE */
 +              break;
 +      default:
 +              break;
        }
  
  #ifdef CONFIG_IXGBE_DCB
  #ifdef CONFIG_DCB
        adapter->last_lfc_mode = hw->fc.current_mode;
  #endif
 -      hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
 -      hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
 +      hw->fc.high_water = FC_HIGH_WATER(max_frame);
 +      hw->fc.low_water = FC_LOW_WATER(max_frame);
        hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
        hw->fc.send_xon = true;
        hw->fc.disable_fc_autoneg = false;
  
  /**
   * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
 - * @adapter: board private structure
   * @tx_ring:    tx descriptor ring (for a specific queue) to setup
   *
   * Return 0 on success, negative on failure
   **/
 -int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
 -                           struct ixgbe_ring *tx_ring)
 +int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
  {
 -      struct pci_dev *pdev = adapter->pdev;
 +      struct device *dev = tx_ring->dev;
        int size;
  
        size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
 -      tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node);
 +      tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
        if (!tx_ring->tx_buffer_info)
 -              tx_ring->tx_buffer_info = vmalloc(size);
 +              tx_ring->tx_buffer_info = vzalloc(size);
        if (!tx_ring->tx_buffer_info)
                goto err;
 -      memset(tx_ring->tx_buffer_info, 0, size);
  
        /* round up to nearest 4K */
        tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
        tx_ring->size = ALIGN(tx_ring->size, 4096);
  
 -      tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
 +      tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
                                           &tx_ring->dma, GFP_KERNEL);
        if (!tx_ring->desc)
                goto err;
  err:
        vfree(tx_ring->tx_buffer_info);
        tx_ring->tx_buffer_info = NULL;
 -      e_err(probe, "Unable to allocate memory for the Tx descriptor ring\n");
 +      dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
        return -ENOMEM;
  }
  
@@@ -5234,7 -5017,7 +5237,7 @@@ static int ixgbe_setup_all_tx_resources
        int i, err = 0;
  
        for (i = 0; i < adapter->num_tx_queues; i++) {
 -              err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]);
 +              err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
                if (!err)
                        continue;
                e_err(probe, "Allocation for Tx Queue %u failed\n", i);
  
  /**
   * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
 - * @adapter: board private structure
   * @rx_ring:    rx descriptor ring (for a specific queue) to setup
   *
   * Returns 0 on success, negative on failure
   **/
 -int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
 -                           struct ixgbe_ring *rx_ring)
 +int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
  {
 -      struct pci_dev *pdev = adapter->pdev;
 +      struct device *dev = rx_ring->dev;
        int size;
  
        size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
 -      rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node);
 +      rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
        if (!rx_ring->rx_buffer_info)
 -              rx_ring->rx_buffer_info = vmalloc(size);
 -      if (!rx_ring->rx_buffer_info) {
 -              e_err(probe, "vmalloc allocation failed for the Rx "
 -                    "descriptor ring\n");
 -              goto alloc_failed;
 -      }
 -      memset(rx_ring->rx_buffer_info, 0, size);
 +              rx_ring->rx_buffer_info = vzalloc(size);
 +      if (!rx_ring->rx_buffer_info)
 +              goto err;
  
        /* Round up to nearest 4K */
        rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
        rx_ring->size = ALIGN(rx_ring->size, 4096);
  
 -      rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
 +      rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
                                           &rx_ring->dma, GFP_KERNEL);
  
 -      if (!rx_ring->desc) {
 -              e_err(probe, "Memory allocation failed for the Rx "
 -                    "descriptor ring\n");
 -              vfree(rx_ring->rx_buffer_info);
 -              goto alloc_failed;
 -      }
 +      if (!rx_ring->desc)
 +              goto err;
  
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
  
        return 0;
 -
 -alloc_failed:
 +err:
 +      vfree(rx_ring->rx_buffer_info);
 +      rx_ring->rx_buffer_info = NULL;
 +      dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
        return -ENOMEM;
  }
  
   *
   * Return 0 on success, negative on failure
   **/
 -
  static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
  {
        int i, err = 0;
  
        for (i = 0; i < adapter->num_rx_queues; i++) {
 -              err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
 +              err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
                if (!err)
                        continue;
                e_err(probe, "Allocation for Rx Queue %u failed\n", i);
  
  /**
   * ixgbe_free_tx_resources - Free Tx Resources per Queue
 - * @adapter: board private structure
   * @tx_ring: Tx descriptor ring for a specific queue
   *
   * Free all transmit software resources
   **/
 -void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
 -                           struct ixgbe_ring *tx_ring)
 +void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
  {
 -      struct pci_dev *pdev = adapter->pdev;
 -
 -      ixgbe_clean_tx_ring(adapter, tx_ring);
 +      ixgbe_clean_tx_ring(tx_ring);
  
        vfree(tx_ring->tx_buffer_info);
        tx_ring->tx_buffer_info = NULL;
  
 -      dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
 -                        tx_ring->dma);
 +      /* if not set, then don't free */
 +      if (!tx_ring->desc)
 +              return;
 +
 +      dma_free_coherent(tx_ring->dev, tx_ring->size,
 +                        tx_ring->desc, tx_ring->dma);
  
        tx_ring->desc = NULL;
  }
@@@ -5343,28 -5135,28 +5346,28 @@@ static void ixgbe_free_all_tx_resources
  
        for (i = 0; i < adapter->num_tx_queues; i++)
                if (adapter->tx_ring[i]->desc)
 -                      ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]);
 +                      ixgbe_free_tx_resources(adapter->tx_ring[i]);
  }
  
  /**
   * ixgbe_free_rx_resources - Free Rx Resources
 - * @adapter: board private structure
   * @rx_ring: ring to clean the resources from
   *
   * Free all receive software resources
   **/
 -void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
 -                           struct ixgbe_ring *rx_ring)
 +void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
  {
 -      struct pci_dev *pdev = adapter->pdev;
 -
 -      ixgbe_clean_rx_ring(adapter, rx_ring);
 +      ixgbe_clean_rx_ring(rx_ring);
  
        vfree(rx_ring->rx_buffer_info);
        rx_ring->rx_buffer_info = NULL;
  
 -      dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
 -                        rx_ring->dma);
 +      /* if not set, then don't free */
 +      if (!rx_ring->desc)
 +              return;
 +
 +      dma_free_coherent(rx_ring->dev, rx_ring->size,
 +                        rx_ring->desc, rx_ring->dma);
  
        rx_ring->desc = NULL;
  }
@@@ -5381,7 -5173,7 +5384,7 @@@ static void ixgbe_free_all_rx_resources
  
        for (i = 0; i < adapter->num_rx_queues; i++)
                if (adapter->rx_ring[i]->desc)
 -                      ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]);
 +                      ixgbe_free_rx_resources(adapter->rx_ring[i]);
  }
  
  /**
  static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
  {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 +      struct ixgbe_hw *hw = &adapter->hw;
        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
  
        /* MTU < 68 is an error and causes problems on some kernels */
        /* must set new MTU before calling down or up */
        netdev->mtu = new_mtu;
  
 +      hw->fc.high_water = FC_HIGH_WATER(max_frame);
 +      hw->fc.low_water = FC_LOW_WATER(max_frame);
 +
        if (netif_running(netdev))
                ixgbe_reinit_locked(adapter);
  
@@@ -5503,8 -5291,8 +5506,8 @@@ static int ixgbe_close(struct net_devic
  #ifdef CONFIG_PM
  static int ixgbe_resume(struct pci_dev *pdev)
  {
 -      struct net_device *netdev = pci_get_drvdata(pdev);
 -      struct ixgbe_adapter *adapter = netdev_priv(netdev);
 +      struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
 +      struct net_device *netdev = adapter->netdev;
        u32 err;
  
        pci_set_power_state(pdev, PCI_D0);
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
  
        if (netif_running(netdev)) {
 -              err = ixgbe_open(adapter->netdev);
 +              err = ixgbe_open(netdev);
                if (err)
                        return err;
        }
  
  static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
  {
 -      struct net_device *netdev = pci_get_drvdata(pdev);
 -      struct ixgbe_adapter *adapter = netdev_priv(netdev);
 +      struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
 +      struct net_device *netdev = adapter->netdev;
        struct ixgbe_hw *hw = &adapter->hw;
        u32 ctrl, fctrl;
        u32 wufc = adapter->wol;
                ixgbe_free_all_rx_resources(adapter);
        }
  
 +      ixgbe_clear_interrupt_scheme(adapter);
 +
  #ifdef CONFIG_PM
        retval = pci_save_state(pdev);
        if (retval)
                IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
        }
  
 -      if (wufc && hw->mac.type == ixgbe_mac_82599EB)
 -              pci_wake_from_d3(pdev, true);
 -      else
 +      switch (hw->mac.type) {
 +      case ixgbe_mac_82598EB:
                pci_wake_from_d3(pdev, false);
 +              break;
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
 +              pci_wake_from_d3(pdev, !!wufc);
 +              break;
 +      default:
 +              break;
 +      }
  
        *enable_wake = !!wufc;
  
 -      ixgbe_clear_interrupt_scheme(adapter);
 -
        ixgbe_release_hw_control(adapter);
  
        pci_disable_device(pdev);
@@@ -5656,12 -5437,10 +5659,12 @@@ void ixgbe_update_stats(struct ixgbe_ad
  {
        struct net_device *netdev = adapter->netdev;
        struct ixgbe_hw *hw = &adapter->hw;
 +      struct ixgbe_hw_stats *hwstats = &adapter->stats;
        u64 total_mpc = 0;
        u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
 -      u64 non_eop_descs = 0, restart_queue = 0;
 -      struct ixgbe_hw_stats *hwstats = &adapter->stats;
 +      u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
 +      u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
 +      u64 bytes = 0, packets = 0;
  
        if (test_bit(__IXGBE_DOWN, &adapter->state) ||
            test_bit(__IXGBE_RESETTING, &adapter->state))
                        adapter->hw_rx_no_dma_resources +=
                                IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
                for (i = 0; i < adapter->num_rx_queues; i++) {
 -                      rsc_count += adapter->rx_ring[i]->rsc_count;
 -                      rsc_flush += adapter->rx_ring[i]->rsc_flush;
 +                      rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
 +                      rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
                }
                adapter->rsc_total_count = rsc_count;
                adapter->rsc_total_flush = rsc_flush;
        }
  
 +      for (i = 0; i < adapter->num_rx_queues; i++) {
 +              struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
 +              non_eop_descs += rx_ring->rx_stats.non_eop_descs;
 +              alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
 +              alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
 +              bytes += rx_ring->stats.bytes;
 +              packets += rx_ring->stats.packets;
 +      }
 +      adapter->non_eop_descs = non_eop_descs;
 +      adapter->alloc_rx_page_failed = alloc_rx_page_failed;
 +      adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
 +      netdev->stats.rx_bytes = bytes;
 +      netdev->stats.rx_packets = packets;
 +
 +      bytes = 0;
 +      packets = 0;
        /* gather some stats to the adapter struct that are per queue */
 -      for (i = 0; i < adapter->num_tx_queues; i++)
 -              restart_queue += adapter->tx_ring[i]->restart_queue;
 +      for (i = 0; i < adapter->num_tx_queues; i++) {
 +              struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
 +              restart_queue += tx_ring->tx_stats.restart_queue;
 +              tx_busy += tx_ring->tx_stats.tx_busy;
 +              bytes += tx_ring->stats.bytes;
 +              packets += tx_ring->stats.packets;
 +      }
        adapter->restart_queue = restart_queue;
 -
 -      for (i = 0; i < adapter->num_rx_queues; i++)
 -              non_eop_descs += adapter->rx_ring[i]->non_eop_descs;
 -      adapter->non_eop_descs = non_eop_descs;
 +      adapter->tx_busy = tx_busy;
 +      netdev->stats.tx_bytes = bytes;
 +      netdev->stats.tx_packets = packets;
  
        hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
        for (i = 0; i < 8; i++) {
                hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
                hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
                hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
 -              if (hw->mac.type == ixgbe_mac_82599EB) {
 -                      hwstats->pxonrxc[i] +=
 -                              IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
 -                      hwstats->pxoffrxc[i] +=
 -                              IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
 -                      hwstats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
 -              } else {
 +              switch (hw->mac.type) {
 +              case ixgbe_mac_82598EB:
                        hwstats->pxonrxc[i] +=
                                IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
 -                      hwstats->pxoffrxc[i] +=
 -                              IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
 +                      break;
 +              case ixgbe_mac_82599EB:
 +              case ixgbe_mac_X540:
 +                      hwstats->pxonrxc[i] +=
 +                              IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
 +                      break;
 +              default:
 +                      break;
                }
                hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
                hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
        /* work around hardware counting issue */
        hwstats->gprc -= missed_rx;
  
 +      ixgbe_update_xoff_received(adapter);
 +
        /* 82598 hardware only has a 32 bit counter in the high register */
 -      if (hw->mac.type == ixgbe_mac_82599EB) {
 -              u64 tmp;
 +      switch (hw->mac.type) {
 +      case ixgbe_mac_82598EB:
 +              hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
 +              hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
 +              hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
 +              hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
 +              break;
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
                hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
 -              tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF;
 -                                              /* 4 high bits of GORC */
 -              hwstats->gorc += (tmp << 32);
 +              IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
                hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
 -              tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF;
 -                                              /* 4 high bits of GOTC */
 -              hwstats->gotc += (tmp << 32);
 +              IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
                hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
 -              IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
 +              IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
                hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
 -              hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
                hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
                hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
  #ifdef IXGBE_FCOE
                hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
                hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
  #endif /* IXGBE_FCOE */
 -      } else {
 -              hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
 -              hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
 -              hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
 -              hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
 -              hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
 +              break;
 +      default:
 +              break;
        }
        bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
        hwstats->bprc += bprc;
@@@ -5947,8 -5704,8 +5950,8 @@@ static void ixgbe_fdir_reinit_task(stru
  
        if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
                for (i = 0; i < adapter->num_tx_queues; i++)
 -                      set_bit(__IXGBE_FDIR_INIT_DONE,
 -                              &(adapter->tx_ring[i]->reinit_state));
 +                      set_bit(__IXGBE_TX_FDIR_INIT_DONE,
 +                              &(adapter->tx_ring[i]->state));
        } else {
                e_err(probe, "failed to finish FDIR re-initialization, "
                      "ignored adding FDIR ATR filters\n");
@@@ -6010,27 -5767,17 +6013,27 @@@ static void ixgbe_watchdog_task(struct 
                if (!netif_carrier_ok(netdev)) {
                        bool flow_rx, flow_tx;
  
 -                      if (hw->mac.type == ixgbe_mac_82599EB) {
 -                              u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
 -                              u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
 -                              flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
 -                              flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
 -                      } else {
 +                      switch (hw->mac.type) {
 +                      case ixgbe_mac_82598EB: {
                                u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
                                u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
                                flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
                                flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
                        }
 +                              break;
 +                      case ixgbe_mac_82599EB:
 +                      case ixgbe_mac_X540: {
 +                              u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
 +                              u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
 +                              flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
 +                              flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
 +                      }
 +                              break;
 +                      default:
 +                              flow_tx = false;
 +                              flow_rx = false;
 +                              break;
 +                      }
  
                        e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
                               (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
                        netif_carrier_on(netdev);
                } else {
                        /* Force detection of hung controller */
 -                      adapter->detect_tx_hung = true;
 +                      for (i = 0; i < adapter->num_tx_queues; i++) {
 +                              tx_ring = adapter->tx_ring[i];
 +                              set_check_for_tx_hang(tx_ring);
 +                      }
                }
        } else {
                adapter->link_up = false;
@@@ -6259,17 -6003,15 +6262,17 @@@ static bool ixgbe_tx_csum(struct ixgbe_
  static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
                        struct ixgbe_ring *tx_ring,
                        struct sk_buff *skb, u32 tx_flags,
 -                      unsigned int first)
 +                      unsigned int first, const u8 hdr_len)
  {
 -      struct pci_dev *pdev = adapter->pdev;
 +      struct device *dev = tx_ring->dev;
        struct ixgbe_tx_buffer *tx_buffer_info;
        unsigned int len;
        unsigned int total = skb->len;
        unsigned int offset = 0, size, count = 0, i;
        unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
        unsigned int f;
 +      unsigned int bytecount = skb->len;
 +      u16 gso_segs = 1;
  
        i = tx_ring->next_to_use;
  
  
                tx_buffer_info->length = size;
                tx_buffer_info->mapped_as_page = false;
 -              tx_buffer_info->dma = dma_map_single(&pdev->dev,
 +              tx_buffer_info->dma = dma_map_single(dev,
                                                     skb->data + offset,
                                                     size, DMA_TO_DEVICE);
 -              if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
 +              if (dma_mapping_error(dev, tx_buffer_info->dma))
                        goto dma_error;
                tx_buffer_info->time_stamp = jiffies;
                tx_buffer_info->next_to_watch = i;
                        size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
  
                        tx_buffer_info->length = size;
 -                      tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
 +                      tx_buffer_info->dma = dma_map_page(dev,
                                                           frag->page,
                                                           offset, size,
                                                           DMA_TO_DEVICE);
                        tx_buffer_info->mapped_as_page = true;
 -                      if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
 +                      if (dma_mapping_error(dev, tx_buffer_info->dma))
                                goto dma_error;
                        tx_buffer_info->time_stamp = jiffies;
                        tx_buffer_info->next_to_watch = i;
                        break;
        }
  
 +      if (tx_flags & IXGBE_TX_FLAGS_TSO)
 +              gso_segs = skb_shinfo(skb)->gso_segs;
 +#ifdef IXGBE_FCOE
 +      /* adjust for FCoE Sequence Offload */
 +      else if (tx_flags & IXGBE_TX_FLAGS_FSO)
 +              gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
 +                                      skb_shinfo(skb)->gso_size);
 +#endif /* IXGBE_FCOE */
 +      bytecount += (gso_segs - 1) * hdr_len;
 +
 +      /* multiply data chunks by size of headers */
 +      tx_ring->tx_buffer_info[i].bytecount = bytecount;
 +      tx_ring->tx_buffer_info[i].gso_segs = gso_segs;
        tx_ring->tx_buffer_info[i].skb = skb;
        tx_ring->tx_buffer_info[first].next_to_watch = i;
  
@@@ -6373,13 -6102,14 +6376,13 @@@ dma_error
                        i += tx_ring->count;
                i--;
                tx_buffer_info = &tx_ring->tx_buffer_info[i];
 -              ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
 +              ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
        }
  
        return 0;
  }
  
 -static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
 -                         struct ixgbe_ring *tx_ring,
 +static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring,
                           int tx_flags, int count, u32 paylen, u8 hdr_len)
  {
        union ixgbe_adv_tx_desc *tx_desc = NULL;
        wmb();
  
        tx_ring->next_to_use = i;
 -      writel(i, adapter->hw.hw_addr + tx_ring->tail);
 +      writel(i, tx_ring->tail);
  }
  
  static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
 -                    int queue, u32 tx_flags, __be16 protocol)
 +                    u8 queue, u32 tx_flags, __be16 protocol)
  {
        struct ixgbe_atr_input atr_input;
 -      struct tcphdr *th;
        struct iphdr *iph = ip_hdr(skb);
        struct ethhdr *eth = (struct ethhdr *)skb->data;
 -      u16 vlan_id, src_port, dst_port, flex_bytes;
 -      u32 src_ipv4_addr, dst_ipv4_addr;
 -      u8 l4type = 0;
 +      struct tcphdr *th;
 +      u16 vlan_id;
  
 -      /* Right now, we support IPv4 only */
 -      if (protocol != htons(ETH_P_IP))
 -              return;
 -      /* check if we're UDP or TCP */
 -      if (iph->protocol == IPPROTO_TCP) {
 -              th = tcp_hdr(skb);
 -              src_port = th->source;
 -              dst_port = th->dest;
 -              l4type |= IXGBE_ATR_L4TYPE_TCP;
 -              /* l4type IPv4 type is 0, no need to assign */
 -      } else {
 -              /* Unsupported L4 header, just bail here */
 +      /* Right now, we support IPv4 w/ TCP only */
 +      if (protocol != htons(ETH_P_IP) ||
 +          iph->protocol != IPPROTO_TCP)
                return;
 -      }
  
        memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
  
        vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
                   IXGBE_TX_FLAGS_VLAN_SHIFT;
 -      src_ipv4_addr = iph->saddr;
 -      dst_ipv4_addr = iph->daddr;
 -      flex_bytes = eth->h_proto;
 +
 +      th = tcp_hdr(skb);
  
        ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
 -      ixgbe_atr_set_src_port_82599(&atr_input, dst_port);
 -      ixgbe_atr_set_dst_port_82599(&atr_input, src_port);
 -      ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes);
 -      ixgbe_atr_set_l4type_82599(&atr_input, l4type);
 +      ixgbe_atr_set_src_port_82599(&atr_input, th->dest);
 +      ixgbe_atr_set_dst_port_82599(&atr_input, th->source);
 +      ixgbe_atr_set_flex_byte_82599(&atr_input, eth->h_proto);
 +      ixgbe_atr_set_l4type_82599(&atr_input, IXGBE_ATR_L4TYPE_TCP);
        /* src and dst are inverted, think how the receiver sees them */
 -      ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr);
 -      ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr);
 +      ixgbe_atr_set_src_ipv4_82599(&atr_input, iph->daddr);
 +      ixgbe_atr_set_dst_ipv4_82599(&atr_input, iph->saddr);
  
        /* This assumes the Rx queue and Tx queue are bound to the same CPU */
        ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
  }
  
 -static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
 -                               struct ixgbe_ring *tx_ring, int size)
 +static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
  {
 -      netif_stop_subqueue(netdev, tx_ring->queue_index);
 +      netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
        /* Herbert's original patch had:
         *  smp_mb__after_netif_stop_queue();
         * but since that doesn't exist yet, just open code it. */
                return -EBUSY;
  
        /* A reprieve! - use start_queue because it doesn't call schedule */
 -      netif_start_subqueue(netdev, tx_ring->queue_index);
 -      ++tx_ring->restart_queue;
 +      netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
 +      ++tx_ring->tx_stats.restart_queue;
        return 0;
  }
  
 -static int ixgbe_maybe_stop_tx(struct net_device *netdev,
 -                            struct ixgbe_ring *tx_ring, int size)
 +static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
  {
        if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
                return 0;
 -      return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
 +      return __ixgbe_maybe_stop_tx(tx_ring, size);
  }
  
  static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
        return skb_tx_hash(dev, skb);
  }
  
 -netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev,
 +netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
                          struct ixgbe_adapter *adapter,
                          struct ixgbe_ring *tx_ring)
  {
 +      struct net_device *netdev = tx_ring->netdev;
        struct netdev_queue *txq;
        unsigned int first;
        unsigned int tx_flags = 0;
        for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
                count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
  
 -      if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
 -              adapter->tx_busy++;
 +      if (ixgbe_maybe_stop_tx(tx_ring, count)) {
 +              tx_ring->tx_stats.tx_busy++;
                return NETDEV_TX_BUSY;
        }
  
                        tx_flags |= IXGBE_TX_FLAGS_CSUM;
        }
  
 -      count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first);
 +      count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len);
        if (count) {
                /* add the ATR filter if ATR is on */
                if (tx_ring->atr_sample_rate) {
                        ++tx_ring->atr_count;
                        if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
 -                           test_bit(__IXGBE_FDIR_INIT_DONE,
 -                                    &tx_ring->reinit_state)) {
 +                           test_bit(__IXGBE_TX_FDIR_INIT_DONE,
 +                                    &tx_ring->state)) {
                                ixgbe_atr(adapter, skb, tx_ring->queue_index,
                                          tx_flags, protocol);
                                tx_ring->atr_count = 0;
                txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
                txq->tx_bytes += skb->len;
                txq->tx_packets++;
 -              ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
 -                             hdr_len);
 -              ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
 +              ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
 +              ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
  
        } else {
                dev_kfree_skb_any(skb);
@@@ -6680,7 -6425,7 +6683,7 @@@ static netdev_tx_t ixgbe_xmit_frame(str
        struct ixgbe_ring *tx_ring;
  
        tx_ring = adapter->tx_ring[skb->queue_mapping];
 -      return ixgbe_xmit_frame_ring(skb, netdev, adapter, tx_ring);
 +      return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
  }
  
  /**
@@@ -6821,23 -6566,20 +6824,23 @@@ static struct rtnl_link_stats64 *ixgbe_
  
        /* accurate rx/tx bytes/packets stats */
        dev_txq_stats_fold(netdev, stats);
 +      rcu_read_lock();
        for (i = 0; i < adapter->num_rx_queues; i++) {
 -              struct ixgbe_ring *ring = adapter->rx_ring[i];
 +              struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
                u64 bytes, packets;
                unsigned int start;
  
 -              do {
 -                      start = u64_stats_fetch_begin_bh(&ring->syncp);
 -                      packets = ring->stats.packets;
 -                      bytes   = ring->stats.bytes;
 -              } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
 -              stats->rx_packets += packets;
 -              stats->rx_bytes   += bytes;
 +              if (ring) {
 +                      do {
 +                              start = u64_stats_fetch_begin_bh(&ring->syncp);
 +                              packets = ring->stats.packets;
 +                              bytes   = ring->stats.bytes;
 +                      } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
 +                      stats->rx_packets += packets;
 +                      stats->rx_bytes   += bytes;
 +              }
        }
 -
 +      rcu_read_unlock();
        /* following stats updated by ixgbe_watchdog_task() */
        stats->multicast        = netdev->stats.multicast;
        stats->rx_errors        = netdev->stats.rx_errors;
@@@ -6952,12 -6694,11 +6955,12 @@@ static int __devinit ixgbe_probe(struc
        const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
        static int cards_found;
        int i, err, pci_using_dac;
 +      u8 part_str[IXGBE_PBANUM_LENGTH];
        unsigned int indices = num_possible_cpus();
  #ifdef IXGBE_FCOE
        u16 device_caps;
  #endif
 -      u32 part_num, eec;
 +      u32 eec;
  
        /* Catch broken hardware that put the wrong VF device ID in
         * the PCIe SR-IOV capability.
  
        SET_NETDEV_DEV(netdev, &pdev->dev);
  
 -      pci_set_drvdata(pdev, netdev);
        adapter = netdev_priv(netdev);
 +      pci_set_drvdata(pdev, adapter);
  
        adapter->netdev = netdev;
        adapter->pdev = pdev;
                goto err_sw_init;
  
        /* Make it possible the adapter to be woken up via WOL */
 -      if (adapter->hw.mac.type == ixgbe_mac_82599EB)
 +      switch (adapter->hw.mac.type) {
 +      case ixgbe_mac_82599EB:
 +      case ixgbe_mac_X540:
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
 +              break;
 +      default:
 +              break;
 +      }
  
        /*
         * If there is a fan on this device and it has failed log the
                goto err_eeprom;
        }
  
 -      /* power down the optics */
 -      if (hw->phy.multispeed_fiber)
 +      /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
 +      if (hw->mac.ops.disable_tx_laser &&
 +          ((hw->phy.multispeed_fiber) ||
 +           ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
 +            (hw->mac.type == ixgbe_mac_82599EB))))
                hw->mac.ops.disable_tx_laser(hw);
  
        init_timer(&adapter->watchdog_timer);
                goto err_sw_init;
  
        switch (pdev->device) {
 +      case IXGBE_DEV_ID_82599_SFP:
 +              /* Only this subdevice supports WOL */
 +              if (pdev->subsystem_device == IXGBE_SUBDEV_ID_82599_SFP)
 +                      adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
 +                                      IXGBE_WUFC_MC | IXGBE_WUFC_BC);
 +              break;
 +      case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
 +              /* All except this subdevice support WOL */
 +              if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
 +                      adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
 +                                      IXGBE_WUFC_MC | IXGBE_WUFC_BC);
 +              break;
        case IXGBE_DEV_ID_82599_KX4:
                adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
                                IXGBE_WUFC_MC | IXGBE_WUFC_BC);
                    hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
                    "Unknown"),
                   netdev->dev_addr);
 -      ixgbe_read_pba_num_generic(hw, &part_num);
 +
 +      err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
 +      if (err)
 +              strcpy(part_str, "Unknown");
        if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
 -              e_dev_info("MAC: %d, PHY: %d, SFP+: %d, "
 -                         "PBA No: %06x-%03x\n",
 +              e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
                           hw->mac.type, hw->phy.type, hw->phy.sfp_type,
 -                         (part_num >> 8), (part_num & 0xff));
 +                         part_str);
        else
 -              e_dev_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
 -                         hw->mac.type, hw->phy.type,
 -                         (part_num >> 8), (part_num & 0xff));
 +              e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
 +                         hw->mac.type, hw->phy.type, part_str);
  
        if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
                e_dev_warn("PCI-Express bandwidth available for this card is "
@@@ -7366,8 -7085,8 +7369,8 @@@ err_dma
   **/
  static void __devexit ixgbe_remove(struct pci_dev *pdev)
  {
 -      struct net_device *netdev = pci_get_drvdata(pdev);
 -      struct ixgbe_adapter *adapter = netdev_priv(netdev);
 +      struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
 +      struct net_device *netdev = adapter->netdev;
  
        set_bit(__IXGBE_DOWN, &adapter->state);
        /* clear the module not found bit to make sure the worker won't
  static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
                                                pci_channel_state_t state)
  {
 -      struct net_device *netdev = pci_get_drvdata(pdev);
 -      struct ixgbe_adapter *adapter = netdev_priv(netdev);
 +      struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
 +      struct net_device *netdev = adapter->netdev;
  
        netif_device_detach(netdev);
  
   */
  static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
  {
 -      struct net_device *netdev = pci_get_drvdata(pdev);
 -      struct ixgbe_adapter *adapter = netdev_priv(netdev);
 +      struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
        pci_ers_result_t result;
        int err;
  
   */
  static void ixgbe_io_resume(struct pci_dev *pdev)
  {
 -      struct net_device *netdev = pci_get_drvdata(pdev);
 -      struct ixgbe_adapter *adapter = netdev_priv(netdev);
 +      struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
 +      struct net_device *netdev = adapter->netdev;
  
        if (netif_running(netdev)) {
                if (ixgbe_up(adapter)) {
@@@ -7565,7 -7285,6 +7568,7 @@@ static void __exit ixgbe_exit_module(vo
        dca_unregister_notify(&dca_notifier);
  #endif
        pci_unregister_driver(&ixgbe_driver);
 +      rcu_barrier(); /* Wait for completion of call_rcu()'s */
  }
  
  #ifdef CONFIG_IXGBE_DCA
@@@ -1,6 -1,6 +1,6 @@@
  /*
   * Copyright (C) 1999 - 2010 Intel Corporation.
-  * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+  * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
   *
   * This code was derived from the Intel e1000e Linux driver.
   *
@@@ -1523,11 -1523,12 +1523,11 @@@ int pch_gbe_setup_tx_resources(struct p
        int desNo;
  
        size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
 -      tx_ring->buffer_info = vmalloc(size);
 +      tx_ring->buffer_info = vzalloc(size);
        if (!tx_ring->buffer_info) {
                pr_err("Unable to allocate memory for the buffer infomation\n");
                return -ENOMEM;
        }
 -      memset(tx_ring->buffer_info, 0, size);
  
        tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
  
@@@ -1572,11 -1573,12 +1572,11 @@@ int pch_gbe_setup_rx_resources(struct p
        int desNo;
  
        size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
 -      rx_ring->buffer_info = vmalloc(size);
 +      rx_ring->buffer_info = vzalloc(size);
        if (!rx_ring->buffer_info) {
                pr_err("Unable to allocate memory for the receive descriptor ring\n");
                return -ENOMEM;
        }
 -      memset(rx_ring->buffer_info, 0, size);
        rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
        rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
                                           &rx_ring->dma, GFP_KERNEL);
@@@ -2319,7 -2321,7 +2319,7 @@@ static int pch_gbe_probe(struct pci_de
        netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
        netif_napi_add(netdev, &adapter->napi,
                       pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
 -      netdev->features = NETIF_F_HW_CSUM | NETIF_F_GRO;
 +      netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO;
        pch_gbe_set_ethtool_ops(netdev);
  
        pch_gbe_mac_reset_hw(&adapter->hw);
        pch_gbe_check_options(adapter);
  
        if (adapter->tx_csum)
 -              netdev->features |= NETIF_F_HW_CSUM;
 +              netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
        else
 -              netdev->features &= ~NETIF_F_HW_CSUM;
 +              netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
  
        /* initialize the wol settings based on the eeprom settings */
        adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
@@@ -2462,8 -2464,8 +2462,8 @@@ static void __exit pch_gbe_exit_module(
  module_init(pch_gbe_init_module);
  module_exit(pch_gbe_exit_module);
  
- MODULE_DESCRIPTION("OKI semiconductor PCH Gigabit ethernet Driver");
- MODULE_AUTHOR("OKI semiconductor, <masa-korg@dsn.okisemi.com>");
+ MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
+ MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com>");
  MODULE_LICENSE("GPL");
  MODULE_VERSION(DRV_VERSION);
  MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
@@@ -1136,7 -1136,8 +1136,7 @@@ ppp_send_frame(struct ppp *ppp, struct 
                   a four-byte PPP header on each packet */
                *skb_push(skb, 2) = 1;
                if (ppp->pass_filter &&
 -                  sk_run_filter(skb, ppp->pass_filter,
 -                                ppp->pass_len) == 0) {
 +                  sk_run_filter(skb, ppp->pass_filter) == 0) {
                        if (ppp->debug & 1)
                                printk(KERN_DEBUG "PPP: outbound frame not passed\n");
                        kfree_skb(skb);
                }
                /* if this packet passes the active filter, record the time */
                if (!(ppp->active_filter &&
 -                    sk_run_filter(skb, ppp->active_filter,
 -                                  ppp->active_len) == 0))
 +                    sk_run_filter(skb, ppp->active_filter) == 0))
                        ppp->last_xmit = jiffies;
                skb_pull(skb, 2);
  #else
@@@ -1756,7 -1758,8 +1756,7 @@@ ppp_receive_nonmp_frame(struct ppp *ppp
  
                        *skb_push(skb, 2) = 0;
                        if (ppp->pass_filter &&
 -                          sk_run_filter(skb, ppp->pass_filter,
 -                                        ppp->pass_len) == 0) {
 +                          sk_run_filter(skb, ppp->pass_filter) == 0) {
                                if (ppp->debug & 1)
                                        printk(KERN_DEBUG "PPP: inbound frame "
                                               "not passed\n");
                                return;
                        }
                        if (!(ppp->active_filter &&
 -                            sk_run_filter(skb, ppp->active_filter,
 -                                          ppp->active_len) == 0))
 +                            sk_run_filter(skb, ppp->active_filter) == 0))
                                ppp->last_recv = jiffies;
                        __skb_pull(skb, 2);
                } else
@@@ -2580,16 -2584,16 +2580,16 @@@ ppp_create_interface(struct net *net, i
         */
        dev_net_set(dev, net);
  
-       ret = -EEXIST;
        mutex_lock(&pn->all_ppp_mutex);
  
        if (unit < 0) {
                unit = unit_get(&pn->units_idr, ppp);
                if (unit < 0) {
-                       *retp = unit;
+                       ret = unit;
                        goto out2;
                }
        } else {
+               ret = -EEXIST;
                if (unit_find(&pn->units_idr, unit))
                        goto out2; /* unit already exists */
                /*
@@@ -2664,10 -2668,10 +2664,10 @@@ static void ppp_shutdown_interface(stru
                ppp->closing = 1;
                ppp_unlock(ppp);
                unregister_netdev(ppp->dev);
+               unit_put(&pn->units_idr, ppp->file.index);
        } else
                ppp_unlock(ppp);
  
-       unit_put(&pn->units_idr, ppp->file.index);
        ppp->file.dead = 1;
        ppp->owner = NULL;
        wake_up_interruptible(&ppp->file.rwait);
@@@ -2855,8 -2859,7 +2855,7 @@@ static void __exit ppp_cleanup(void
   * by holding all_ppp_mutex
   */
  
- /* associate pointer with specified number */
- static int unit_set(struct idr *p, void *ptr, int n)
+ static int __unit_alloc(struct idr *p, void *ptr, int n)
  {
        int unit, err;
  
@@@ -2867,10 -2870,24 +2866,24 @@@ again
        }
  
        err = idr_get_new_above(p, ptr, n, &unit);
-       if (err == -EAGAIN)
-               goto again;
+       if (err < 0) {
+               if (err == -EAGAIN)
+                       goto again;
+               return err;
+       }
+       return unit;
+ }
+ /* associate pointer with specified number */
+ static int unit_set(struct idr *p, void *ptr, int n)
+ {
+       int unit;
  
-       if (unit != n) {
+       unit = __unit_alloc(p, ptr, n);
+       if (unit < 0)
+               return unit;
+       else if (unit != n) {
                idr_remove(p, unit);
                return -EINVAL;
        }
  /* get new free unit number and associate pointer with it */
  static int unit_get(struct idr *p, void *ptr)
  {
-       int unit, err;
- again:
-       if (!idr_pre_get(p, GFP_KERNEL)) {
-               printk(KERN_ERR "PPP: No free memory for idr\n");
-               return -ENOMEM;
-       }
-       err = idr_get_new_above(p, ptr, 0, &unit);
-       if (err == -EAGAIN)
-               goto again;
-       return unit;
+       return __unit_alloc(p, ptr, 0);
  }
  
  /* put unit number back to a pool */
@@@ -62,15 -62,15 +62,15 @@@ static const u32 default_msg 
  /* NETIF_MSG_PKTDATA | */
      NETIF_MSG_HW | NETIF_MSG_WOL | 0;
  
- static int debug = 0x00007fff;        /* defaults above */
- module_param(debug, int, 0);
+ static int debug = -1;        /* defaults above */
+ module_param(debug, int, 0664);
  MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  
  #define MSIX_IRQ 0
  #define MSI_IRQ 1
  #define LEG_IRQ 2
  static int qlge_irq_type = MSIX_IRQ;
- module_param(qlge_irq_type, int, MSIX_IRQ);
+ module_param(qlge_irq_type, int, 0664);
  MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
  
  static int qlge_mpi_coredump;
@@@ -3844,7 -3844,7 +3844,7 @@@ static int ql_adapter_reset(struct ql_a
  
  static void ql_display_dev_info(struct net_device *ndev)
  {
 -      struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
 +      struct ql_adapter *qdev = netdev_priv(ndev);
  
        netif_info(qdev, probe, qdev->ndev,
                   "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
@@@ -4264,7 -4264,7 +4264,7 @@@ static struct net_device_stats *qlge_ge
  
  static void qlge_set_multicast_list(struct net_device *ndev)
  {
 -      struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
 +      struct ql_adapter *qdev = netdev_priv(ndev);
        struct netdev_hw_addr *ha;
        int i, status;
  
@@@ -4354,7 -4354,7 +4354,7 @@@ exit
  
  static int qlge_set_mac_address(struct net_device *ndev, void *p)
  {
 -      struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
 +      struct ql_adapter *qdev = netdev_priv(ndev);
        struct sockaddr *addr = p;
        int status;
  
  
  static void qlge_tx_timeout(struct net_device *ndev)
  {
 -      struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
 +      struct ql_adapter *qdev = netdev_priv(ndev);
        ql_queue_asic_error(qdev);
  }
  
diff --combined drivers/net/sfc/efx.c
@@@ -23,6 -23,7 +23,6 @@@
  #include <linux/gfp.h>
  #include "net_driver.h"
  #include "efx.h"
 -#include "mdio_10g.h"
  #include "nic.h"
  
  #include "mcdi.h"
@@@ -196,7 -197,9 +196,9 @@@ MODULE_PARM_DESC(debug, "Bitmapped debu
  
  static void efx_remove_channels(struct efx_nic *efx);
  static void efx_remove_port(struct efx_nic *efx);
+ static void efx_init_napi(struct efx_nic *efx);
  static void efx_fini_napi(struct efx_nic *efx);
+ static void efx_fini_napi_channel(struct efx_channel *channel);
  static void efx_fini_struct(struct efx_nic *efx);
  static void efx_start_all(struct efx_nic *efx);
  static void efx_stop_all(struct efx_nic *efx);
@@@ -334,8 -337,10 +336,10 @@@ void efx_process_channel_now(struct efx
  
        /* Disable interrupts and wait for ISRs to complete */
        efx_nic_disable_interrupts(efx);
-       if (efx->legacy_irq)
+       if (efx->legacy_irq) {
                synchronize_irq(efx->legacy_irq);
+               efx->legacy_irq_enabled = false;
+       }
        if (channel->irq)
                synchronize_irq(channel->irq);
  
        efx_channel_processed(channel);
  
        napi_enable(&channel->napi_str);
+       if (efx->legacy_irq)
+               efx->legacy_irq_enabled = true;
        efx_nic_enable_interrupts(efx);
  }
  
@@@ -425,6 -432,7 +431,7 @@@ efx_alloc_channel(struct efx_nic *efx, 
  
                *channel = *old_channel;
  
+               channel->napi_dev = NULL;
                memset(&channel->eventq, 0, sizeof(channel->eventq));
  
                rx_queue = &channel->rx_queue;
@@@ -735,9 -743,13 +742,13 @@@ efx_realloc_channels(struct efx_nic *ef
        if (rc)
                goto rollback;
  
+       efx_init_napi(efx);
        /* Destroy old channels */
-       for (i = 0; i < efx->n_channels; i++)
+       for (i = 0; i < efx->n_channels; i++) {
+               efx_fini_napi_channel(other_channel[i]);
                efx_remove_channel(other_channel[i]);
+       }
  out:
        /* Free unused channel structures */
        for (i = 0; i < efx->n_channels; i++)
@@@ -909,7 -921,6 +920,7 @@@ static void efx_mac_work(struct work_st
  
  static int efx_probe_port(struct efx_nic *efx)
  {
 +      unsigned char *perm_addr;
        int rc;
  
        netif_dbg(efx, probe, efx->net_dev, "create port\n");
                return rc;
  
        /* Sanity check MAC address */
 -      if (is_valid_ether_addr(efx->mac_address)) {
 -              memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
 +      perm_addr = efx->net_dev->perm_addr;
 +      if (is_valid_ether_addr(perm_addr)) {
 +              memcpy(efx->net_dev->dev_addr, perm_addr, ETH_ALEN);
        } else {
                netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n",
 -                        efx->mac_address);
 +                        perm_addr);
                if (!allow_bad_hwaddr) {
                        rc = -EINVAL;
                        goto err;
@@@ -1401,6 -1411,8 +1412,8 @@@ static void efx_start_all(struct efx_ni
                efx_start_channel(channel);
        }
  
+       if (efx->legacy_irq)
+               efx->legacy_irq_enabled = true;
        efx_nic_enable_interrupts(efx);
  
        /* Switch to event based MCDI completions after enabling interrupts.
@@@ -1461,8 -1473,10 +1474,10 @@@ static void efx_stop_all(struct efx_ni
  
        /* Disable interrupts and wait for ISR to complete */
        efx_nic_disable_interrupts(efx);
-       if (efx->legacy_irq)
+       if (efx->legacy_irq) {
                synchronize_irq(efx->legacy_irq);
+               efx->legacy_irq_enabled = false;
+       }
        efx_for_each_channel(channel, efx) {
                if (channel->irq)
                        synchronize_irq(channel->irq);
@@@ -1594,7 -1608,7 +1609,7 @@@ static int efx_ioctl(struct net_device 
   *
   **************************************************************************/
  
- static int efx_init_napi(struct efx_nic *efx)
+ static void efx_init_napi(struct efx_nic *efx)
  {
        struct efx_channel *channel;
  
                netif_napi_add(channel->napi_dev, &channel->napi_str,
                               efx_poll, napi_weight);
        }
-       return 0;
+ }
+ static void efx_fini_napi_channel(struct efx_channel *channel)
+ {
+       if (channel->napi_dev)
+               netif_napi_del(&channel->napi_str);
+       channel->napi_dev = NULL;
  }
  
  static void efx_fini_napi(struct efx_nic *efx)
  {
        struct efx_channel *channel;
  
-       efx_for_each_channel(channel, efx) {
-               if (channel->napi_dev)
-                       netif_napi_del(&channel->napi_str);
-               channel->napi_dev = NULL;
-       }
+       efx_for_each_channel(channel, efx)
+               efx_fini_napi_channel(channel);
  }
  
  /**************************************************************************
@@@ -1963,6 -1980,7 +1981,6 @@@ void efx_reset_down(struct efx_nic *efx
  
        efx_stop_all(efx);
        mutex_lock(&efx->mac_lock);
 -      mutex_lock(&efx->spi_lock);
  
        efx_fini_channels(efx);
        if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
@@@ -2004,6 -2022,7 +2022,6 @@@ int efx_reset_up(struct efx_nic *efx, e
        efx_init_channels(efx);
        efx_restore_filters(efx);
  
 -      mutex_unlock(&efx->spi_lock);
        mutex_unlock(&efx->mac_lock);
  
        efx_start_all(efx);
  fail:
        efx->port_initialized = false;
  
 -      mutex_unlock(&efx->spi_lock);
        mutex_unlock(&efx->mac_lock);
  
        return rc;
@@@ -2200,6 -2220,8 +2218,6 @@@ static int efx_init_struct(struct efx_n
        /* Initialise common structures */
        memset(efx, 0, sizeof(*efx));
        spin_lock_init(&efx->biu_lock);
 -      mutex_init(&efx->mdio_lock);
 -      mutex_init(&efx->spi_lock);
  #ifdef CONFIG_SFC_MTD
        INIT_LIST_HEAD(&efx->mtd_list);
  #endif
@@@ -2331,9 -2353,7 +2349,7 @@@ static int efx_pci_probe_main(struct ef
        if (rc)
                goto fail1;
  
-       rc = efx_init_napi(efx);
-       if (rc)
-               goto fail2;
+       efx_init_napi(efx);
  
        rc = efx->type->init(efx);
        if (rc) {
        efx->type->fini(efx);
   fail3:
        efx_fini_napi(efx);
-  fail2:
        efx_remove_all(efx);
   fail1:
        return rc;
@@@ -621,6 -621,7 +621,7 @@@ struct efx_filter_state
   * @pci_dev: The PCI device
   * @type: Controller type attributes
   * @legacy_irq: IRQ number
+  * @legacy_irq_enabled: Are IRQs enabled on NIC (INT_EN_KER register)?
   * @workqueue: Workqueue for port reconfigures and the HW monitor.
   *    Work items do not hold and must not acquire RTNL.
   * @workqueue_name: Name of workqueue
   * @n_tx_channels: Number of channels used for TX
   * @rx_buffer_len: RX buffer length
   * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
 + * @rx_hash_key: Toeplitz hash key for RSS
   * @rx_indir_table: Indirection table for RSS
   * @int_error_count: Number of internal errors seen recently
   * @int_error_expire: Time at which error count will be expired
   *    to verify that an interrupt has occurred.
   * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
   * @fatal_irq_level: IRQ level (bit number) used for serious errors
 - * @spi_flash: SPI flash device
 - *    This field will be %NULL if no flash device is present (or for Siena).
 - * @spi_eeprom: SPI EEPROM device
 - *    This field will be %NULL if no EEPROM device is present (or for Siena).
 - * @spi_lock: SPI bus lock
   * @mtd_list: List of MTDs attached to the NIC
   * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
   * @nic_data: Hardware dependant state
   * @stats_buffer: DMA buffer for statistics
   * @stats_lock: Statistics update lock. Serialises statistics fetches
   * @mac_op: MAC interface
 - * @mac_address: Permanent MAC address
   * @phy_type: PHY type
 - * @mdio_lock: MDIO lock
   * @phy_op: PHY interface
   * @phy_data: PHY private data (including PHY-specific stats)
   * @mdio: PHY MDIO interface
   * @mdio_bus: PHY MDIO bus ID (only used by Siena)
   * @phy_mode: PHY operating mode. Serialised by @mac_lock.
 - * @xmac_poll_required: XMAC link state needs polling
   * @link_advertising: Autonegotiation advertising flags
   * @link_state: Current state of the link
   * @n_link_state_changes: Number of times the link has changed state
@@@ -702,6 -710,7 +703,7 @@@ struct efx_nic 
        struct pci_dev *pci_dev;
        const struct efx_nic_type *type;
        int legacy_irq;
+       bool legacy_irq_enabled;
        struct workqueue_struct *workqueue;
        char workqueue_name[16];
        struct work_struct reset_work;
        unsigned irq_zero_count;
        unsigned fatal_irq_level;
  
 -      struct efx_spi_device *spi_flash;
 -      struct efx_spi_device *spi_eeprom;
 -      struct mutex spi_lock;
  #ifdef CONFIG_SFC_MTD
        struct list_head mtd_list;
  #endif
        spinlock_t stats_lock;
  
        struct efx_mac_operations *mac_op;
 -      unsigned char mac_address[ETH_ALEN];
  
        unsigned int phy_type;
 -      struct mutex mdio_lock;
        struct efx_phy_operations *phy_op;
        void *phy_data;
        struct mdio_if_info mdio;
        unsigned int mdio_bus;
        enum efx_phy_mode phy_mode;
  
 -      bool xmac_poll_required;
        u32 link_advertising;
        struct efx_link_state link_state;
        unsigned int n_link_state_changes;
@@@ -816,7 -831,6 +818,7 @@@ static inline unsigned int efx_port_num
   *    be called while the controller is uninitialised.
   * @probe_port: Probe the MAC and PHY
   * @remove_port: Free resources allocated by probe_port()
 + * @handle_global_event: Handle a "global" event (may be %NULL)
   * @prepare_flush: Prepare the hardware for flushing the DMA queues
   * @update_stats: Update statistics not provided by event handling
   * @start_stats: Start the regular fetching of statistics
@@@ -861,7 -875,6 +863,7 @@@ struct efx_nic_type 
        int (*reset)(struct efx_nic *efx, enum reset_type method);
        int (*probe_port)(struct efx_nic *efx);
        void (*remove_port)(struct efx_nic *efx);
 +      bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
        void (*prepare_flush)(struct efx_nic *efx);
        void (*update_stats)(struct efx_nic *efx);
        void (*start_stats)(struct efx_nic *efx);
diff --combined drivers/net/sfc/nic.c
@@@ -894,6 -894,46 +894,6 @@@ efx_handle_generated_event(struct efx_c
                          channel->channel, EFX_QWORD_VAL(*event));
  }
  
 -/* Global events are basically PHY events */
 -static void
 -efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
 -{
 -      struct efx_nic *efx = channel->efx;
 -      bool handled = false;
 -
 -      if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
 -          EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
 -          EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) {
 -              /* Ignored */
 -              handled = true;
 -      }
 -
 -      if ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) &&
 -          EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
 -              efx->xmac_poll_required = true;
 -              handled = true;
 -      }
 -
 -      if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
 -          EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
 -          EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
 -              netif_err(efx, rx_err, efx->net_dev,
 -                        "channel %d seen global RX_RESET event. Resetting.\n",
 -                        channel->channel);
 -
 -              atomic_inc(&efx->rx_reset);
 -              efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
 -                                 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
 -              handled = true;
 -      }
 -
 -      if (!handled)
 -              netif_err(efx, hw, efx->net_dev,
 -                        "channel %d unknown global event "
 -                        EFX_QWORD_FMT "\n", channel->channel,
 -                        EFX_QWORD_VAL(*event));
 -}
 -
  static void
  efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
  {
@@@ -1010,17 -1050,15 +1010,17 @@@ int efx_nic_process_eventq(struct efx_c
                case FSE_AZ_EV_CODE_DRV_GEN_EV:
                        efx_handle_generated_event(channel, &event);
                        break;
 -              case FSE_AZ_EV_CODE_GLOBAL_EV:
 -                      efx_handle_global_event(channel, &event);
 -                      break;
                case FSE_AZ_EV_CODE_DRIVER_EV:
                        efx_handle_driver_event(channel, &event);
                        break;
                case FSE_CZ_EV_CODE_MCDI_EV:
                        efx_mcdi_process_event(channel, &event);
                        break;
 +              case FSE_AZ_EV_CODE_GLOBAL_EV:
 +                      if (efx->type->handle_global_event &&
 +                          efx->type->handle_global_event(channel, &event))
 +                              break;
 +                      /* else fall through */
                default:
                        netif_err(channel->efx, hw, channel->efx->net_dev,
                                  "channel %d unknown event type %d (data "
@@@ -1380,6 -1418,12 +1380,12 @@@ static irqreturn_t efx_legacy_interrupt
        u32 queues;
        int syserr;
  
+       /* Could this be ours?  If interrupts are disabled then the
+        * channel state may not be valid.
+        */
+       if (!efx->legacy_irq_enabled)
+               return result;
        /* Read the ISR which also ACKs the interrupts */
        efx_readd(efx, &reg, FR_BZ_INT_ISR0);
        queues = EFX_EXTRACT_DWORD(reg, 0, 31);
@@@ -186,18 -186,6 +186,18 @@@ static inline u32 stmmac_tx_avail(struc
        return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1;
  }
  
 +/* On some ST platforms, some HW system configuraton registers have to be
 + * set according to the link speed negotiated.
 + */
 +static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
 +{
 +      struct phy_device *phydev = priv->phydev;
 +
 +      if (likely(priv->plat->fix_mac_speed))
 +              priv->plat->fix_mac_speed(priv->plat->bsp_priv,
 +                                        phydev->speed);
 +}
 +
  /**
   * stmmac_adjust_link
   * @dev: net device structure
@@@ -240,13 -228,15 +240,13 @@@ static void stmmac_adjust_link(struct n
                        new_state = 1;
                        switch (phydev->speed) {
                        case 1000:
 -                              if (likely(priv->is_gmac))
 +                              if (likely(priv->plat->has_gmac))
                                        ctrl &= ~priv->hw->link.port;
 -                              if (likely(priv->fix_mac_speed))
 -                                      priv->fix_mac_speed(priv->bsp_priv,
 -                                                          phydev->speed);
 +                              stmmac_hw_fix_mac_speed(priv);
                                break;
                        case 100:
                        case 10:
 -                              if (priv->is_gmac) {
 +                              if (priv->plat->has_gmac) {
                                        ctrl |= priv->hw->link.port;
                                        if (phydev->speed == SPEED_100) {
                                                ctrl |= priv->hw->link.speed;
                                } else {
                                        ctrl &= ~priv->hw->link.port;
                                }
 -                              if (likely(priv->fix_mac_speed))
 -                                      priv->fix_mac_speed(priv->bsp_priv,
 -                                                          phydev->speed);
 +                              stmmac_hw_fix_mac_speed(priv);
                                break;
                        default:
                                if (netif_msg_link(priv))
@@@ -313,7 -305,7 +313,7 @@@ static int stmmac_init_phy(struct net_d
                return 0;
        }
  
 -      snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->bus_id);
 +      snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->plat->bus_id);
        snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
                 priv->phy_addr);
        pr_debug("stmmac_init_phy:  trying to attach to %s\n", phy_id);
@@@ -560,7 -552,7 +560,7 @@@ static void free_dma_desc_resources(str
   */
  static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
  {
 -      if (likely((priv->tx_coe) && (!priv->no_csum_insertion))) {
 +      if (likely((priv->plat->tx_coe) && (!priv->no_csum_insertion))) {
                /* In case of GMAC, SF mode has to be enabled
                 * to perform the TX COE. This depends on:
                 * 1) TX COE if actually supported
@@@ -822,7 -814,7 +822,7 @@@ static int stmmac_open(struct net_devic
        init_dma_desc_rings(dev);
  
        /* DMA initialization and SW reset */
 -      if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->pbl,
 +      if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->plat->pbl,
                                         priv->dma_tx_phy,
                                         priv->dma_rx_phy) < 0)) {
  
        /* Copy the MAC addr into the HW  */
        priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
        /* If required, perform hw setup of the bus. */
 -      if (priv->bus_setup)
 -              priv->bus_setup(priv->ioaddr);
 +      if (priv->plat->bus_setup)
 +              priv->plat->bus_setup(priv->ioaddr);
        /* Initialize the MAC Core */
        priv->hw->mac->core_init(priv->ioaddr);
  
        priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
        if (priv->rx_coe)
                pr_info("stmmac: Rx Checksum Offload Engine supported\n");
 -      if (priv->tx_coe)
 +      if (priv->plat->tx_coe)
                pr_info("\tTX Checksum insertion supported\n");
  
 -      priv->shutdown = 0;
 -
        /* Initialise the MMC (if present) to disable all interrupts. */
        writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK);
        writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK);
@@@ -1048,8 -1042,7 +1048,8 @@@ static netdev_tx_t stmmac_xmit(struct s
                return stmmac_sw_tso(priv, skb);
  
        if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) {
 -              if (unlikely((!priv->tx_coe) || (priv->no_csum_insertion)))
 +              if (unlikely((!priv->plat->tx_coe) ||
 +                           (priv->no_csum_insertion)))
                        skb_checksum_help(skb);
                else
                        csum_insertion = 1;
@@@ -1153,7 -1146,7 +1153,7 @@@ static inline void stmmac_rx_refill(str
                                           DMA_FROM_DEVICE);
  
                        (p + entry)->des2 = priv->rx_skbuff_dma[entry];
 -                      if (unlikely(priv->is_gmac)) {
 +                      if (unlikely(priv->plat->has_gmac)) {
                                if (bfsize >= BUF_SIZE_8KiB)
                                        (p + entry)->des3 =
                                            (p + entry)->des2 + BUF_SIZE_8KiB;
@@@ -1363,7 -1356,7 +1363,7 @@@ static int stmmac_change_mtu(struct net
                return -EBUSY;
        }
  
 -      if (priv->is_gmac)
 +      if (priv->plat->has_gmac)
                max_mtu = JUMBO_LEN;
        else
                max_mtu = ETH_DATA_LEN;
         * needs to have the Tx COE disabled for oversized frames
         * (due to limited buffer sizes). In this case we disable
         * the TX csum insertionin the TDES and not use SF. */
 -      if ((priv->bugged_jumbo) && (priv->dev->mtu > ETH_DATA_LEN))
 +      if ((priv->plat->bugged_jumbo) && (priv->dev->mtu > ETH_DATA_LEN))
                priv->no_csum_insertion = 1;
        else
                priv->no_csum_insertion = 0;
@@@ -1397,7 -1390,7 +1397,7 @@@ static irqreturn_t stmmac_interrupt(in
                return IRQ_NONE;
        }
  
 -      if (priv->is_gmac)
 +      if (priv->plat->has_gmac)
                /* To handle GMAC own interrupts */
                priv->hw->mac->host_irq_status((void __iomem *) dev->base_addr);
  
@@@ -1494,8 -1487,7 +1494,8 @@@ static int stmmac_probe(struct net_devi
        dev->netdev_ops = &stmmac_netdev_ops;
        stmmac_set_ethtool_ops(dev);
  
 -      dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA);
 +      dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA |
 +              NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
        dev->watchdog_timeo = msecs_to_jiffies(watchdog);
  #ifdef STMMAC_VLAN_TAG_USED
        /* Both mac100 and gmac support receive VLAN tag detection */
                pr_warning("\tno valid MAC address;"
                        "please, use ifconfig or nwhwconfig!\n");
  
+       spin_lock_init(&priv->lock);
        ret = register_netdev(dev);
        if (ret) {
                pr_err("%s: ERROR %i registering the device\n",
  
        DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n",
            dev->name, (dev->features & NETIF_F_SG) ? "on" : "off",
 -          (dev->features & NETIF_F_HW_CSUM) ? "on" : "off");
 +          (dev->features & NETIF_F_IP_CSUM) ? "on" : "off");
  
-       spin_lock_init(&priv->lock);
        return ret;
  }
  
@@@ -1544,7 -1536,7 +1544,7 @@@ static int stmmac_mac_device_setup(stru
  
        struct mac_device_info *device;
  
 -      if (priv->is_gmac)
 +      if (priv->plat->has_gmac)
                device = dwmac1000_setup(priv->ioaddr);
        else
                device = dwmac100_setup(priv->ioaddr);
        if (!device)
                return -ENOMEM;
  
 -      if (priv->enh_desc) {
 +      if (priv->plat->enh_desc) {
                device->desc = &enh_desc_ops;
                pr_info("\tEnhanced descriptor structure\n");
        } else
@@@ -1606,7 -1598,7 +1606,7 @@@ static int stmmac_associate_phy(struct 
                plat_dat->bus_id);
  
        /* Check that this phy is for the MAC being initialised */
 -      if (priv->bus_id != plat_dat->bus_id)
 +      if (priv->plat->bus_id != plat_dat->bus_id)
                return 0;
  
        /* OK, this PHY is connected to the MAC.
@@@ -1642,7 -1634,7 +1642,7 @@@ static int stmmac_dvr_probe(struct plat
        struct resource *res;
        void __iomem *addr = NULL;
        struct net_device *ndev = NULL;
 -      struct stmmac_priv *priv;
 +      struct stmmac_priv *priv = NULL;
        struct plat_stmmacenet_data *plat_dat;
  
        pr_info("STMMAC driver:\n\tplatform registration... ");
        priv->device = &(pdev->dev);
        priv->dev = ndev;
        plat_dat = pdev->dev.platform_data;
 -      priv->bus_id = plat_dat->bus_id;
 -      priv->pbl = plat_dat->pbl;      /* TLI */
 -      priv->mii_clk_csr = plat_dat->clk_csr;
 -      priv->tx_coe = plat_dat->tx_coe;
 -      priv->bugged_jumbo = plat_dat->bugged_jumbo;
 -      priv->is_gmac = plat_dat->has_gmac;     /* GMAC is on board */
 -      priv->enh_desc = plat_dat->enh_desc;
 +
 +      priv->plat = plat_dat;
 +
        priv->ioaddr = addr;
  
        /* PMT module is not integrated in all the MAC devices. */
        /* Set the I/O base addr */
        ndev->base_addr = (unsigned long)addr;
  
 -      /* Verify embedded resource for the platform */
 -      ret = stmmac_claim_resource(pdev);
 -      if (ret < 0)
 -              goto out;
 +      /* Custom initialisation */
 +      if (priv->plat->init) {
 +              ret = priv->plat->init(pdev);
 +              if (unlikely(ret))
 +                      goto out;
 +      }
  
        /* MAC HW revice detection */
        ret = stmmac_mac_device_setup(ndev);
                goto out;
        }
  
 -      priv->fix_mac_speed = plat_dat->fix_mac_speed;
 -      priv->bus_setup = plat_dat->bus_setup;
 -      priv->bsp_priv = plat_dat->bsp_priv;
 -
        pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
               "\tIO base addr: 0x%p)\n", ndev->name, pdev->name,
               pdev->id, ndev->irq, addr);
  
        /* MDIO bus Registration */
 -      pr_debug("\tMDIO bus (id: %d)...", priv->bus_id);
 +      pr_debug("\tMDIO bus (id: %d)...", priv->plat->bus_id);
        ret = stmmac_mdio_register(ndev);
        if (ret < 0)
                goto out;
  
  out:
        if (ret < 0) {
 +              if (priv->plat->exit)
 +                      priv->plat->exit(pdev);
 +
                platform_set_drvdata(pdev, NULL);
                release_mem_region(res->start, resource_size(res));
                if (addr != NULL)
@@@ -1782,9 -1777,6 +1782,9 @@@ static int stmmac_dvr_remove(struct pla
  
        stmmac_mdio_unregister(ndev);
  
 +      if (priv->plat->exit)
 +              priv->plat->exit(pdev);
 +
        platform_set_drvdata(pdev, NULL);
        unregister_netdev(ndev);
  
  }
  
  #ifdef CONFIG_PM
 -static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
 +static int stmmac_suspend(struct device *dev)
  {
 -      struct net_device *dev = platform_get_drvdata(pdev);
 -      struct stmmac_priv *priv = netdev_priv(dev);
 +      struct net_device *ndev = dev_get_drvdata(dev);
 +      struct stmmac_priv *priv = netdev_priv(ndev);
        int dis_ic = 0;
  
 -      if (!dev || !netif_running(dev))
 +      if (!ndev || !netif_running(ndev))
                return 0;
  
        spin_lock(&priv->lock);
  
 -      if (state.event == PM_EVENT_SUSPEND) {
 -              netif_device_detach(dev);
 -              netif_stop_queue(dev);
 -              if (priv->phydev)
 -                      phy_stop(priv->phydev);
 +      netif_device_detach(ndev);
 +      netif_stop_queue(ndev);
 +      if (priv->phydev)
 +              phy_stop(priv->phydev);
  
  #ifdef CONFIG_STMMAC_TIMER
 -              priv->tm->timer_stop();
 -              if (likely(priv->tm->enable))
 -                      dis_ic = 1;
 +      priv->tm->timer_stop();
 +      if (likely(priv->tm->enable))
 +              dis_ic = 1;
  #endif
 -              napi_disable(&priv->napi);
 -
 -              /* Stop TX/RX DMA */
 -              priv->hw->dma->stop_tx(priv->ioaddr);
 -              priv->hw->dma->stop_rx(priv->ioaddr);
 -              /* Clear the Rx/Tx descriptors */
 -              priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size,
 -                                           dis_ic);
 -              priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
 -
 -              /* Enable Power down mode by programming the PMT regs */
 -              if (device_can_wakeup(priv->device))
 -                      priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
 -              else
 -                      stmmac_disable_mac(priv->ioaddr);
 -      } else {
 -              priv->shutdown = 1;
 -              /* Although this can appear slightly redundant it actually
 -               * makes fast the standby operation and guarantees the driver
 -               * working if hibernation is on media. */
 -              stmmac_release(dev);
 -      }
 +      napi_disable(&priv->napi);
 +
 +      /* Stop TX/RX DMA */
 +      priv->hw->dma->stop_tx(priv->ioaddr);
 +      priv->hw->dma->stop_rx(priv->ioaddr);
 +      /* Clear the Rx/Tx descriptors */
 +      priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size,
 +                                   dis_ic);
 +      priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
 +
 +      /* Enable Power down mode by programming the PMT regs */
 +      if (device_may_wakeup(priv->device))
 +              priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
 +      else
 +              stmmac_disable_mac(priv->ioaddr);
  
        spin_unlock(&priv->lock);
        return 0;
  }
  
 -static int stmmac_resume(struct platform_device *pdev)
 +static int stmmac_resume(struct device *dev)
  {
 -      struct net_device *dev = platform_get_drvdata(pdev);
 -      struct stmmac_priv *priv = netdev_priv(dev);
 -
 -      if (!netif_running(dev))
 -              return 0;
 +      struct net_device *ndev = dev_get_drvdata(dev);
 +      struct stmmac_priv *priv = netdev_priv(ndev);
  
 -      if (priv->shutdown) {
 -              /* Re-open the interface and re-init the MAC/DMA
 -                 and the rings (i.e. on hibernation stage) */
 -              stmmac_open(dev);
 +      if (!netif_running(ndev))
                return 0;
 -      }
  
        spin_lock(&priv->lock);
  
         * is received. Anyway, it's better to manually clear
         * this bit because it can generate problems while resuming
         * from another devices (e.g. serial console). */
 -      if (device_can_wakeup(priv->device))
 +      if (device_may_wakeup(priv->device))
                priv->hw->mac->pmt(priv->ioaddr, 0);
  
 -      netif_device_attach(dev);
 +      netif_device_attach(ndev);
  
        /* Enable the MAC and DMA */
        stmmac_enable_mac(priv->ioaddr);
        priv->hw->dma->start_rx(priv->ioaddr);
  
  #ifdef CONFIG_STMMAC_TIMER
 -      priv->tm->timer_start(tmrate);
 +      if (likely(priv->tm->enable))
 +              priv->tm->timer_start(tmrate);
  #endif
        napi_enable(&priv->napi);
  
        if (priv->phydev)
                phy_start(priv->phydev);
  
 -      netif_start_queue(dev);
 +      netif_start_queue(ndev);
  
        spin_unlock(&priv->lock);
        return 0;
  }
 -#endif
  
 -static struct platform_driver stmmac_driver = {
 -      .driver = {
 -                 .name = STMMAC_RESOURCE_NAME,
 -                 },
 -      .probe = stmmac_dvr_probe,
 -      .remove = stmmac_dvr_remove,
 -#ifdef CONFIG_PM
 +static int stmmac_freeze(struct device *dev)
 +{
 +      struct net_device *ndev = dev_get_drvdata(dev);
 +
 +      if (!ndev || !netif_running(ndev))
 +              return 0;
 +
 +      return stmmac_release(ndev);
 +}
 +
 +static int stmmac_restore(struct device *dev)
 +{
 +      struct net_device *ndev = dev_get_drvdata(dev);
 +
 +      if (!ndev || !netif_running(ndev))
 +              return 0;
 +
 +      return stmmac_open(ndev);
 +}
 +
 +static const struct dev_pm_ops stmmac_pm_ops = {
        .suspend = stmmac_suspend,
        .resume = stmmac_resume,
 -#endif
 +      .freeze = stmmac_freeze,
 +      .thaw = stmmac_restore,
 +      .restore = stmmac_restore,
 +};
 +#else
 +static const struct dev_pm_ops stmmac_pm_ops;
 +#endif /* CONFIG_PM */
  
 +static struct platform_driver stmmac_driver = {
 +      .probe = stmmac_dvr_probe,
 +      .remove = stmmac_dvr_remove,
 +      .driver = {
 +              .name = STMMAC_RESOURCE_NAME,
 +              .owner = THIS_MODULE,
 +              .pm = &stmmac_pm_ops,
 +      },
  };
  
  /**
diff --combined drivers/net/usb/hso.c
@@@ -1745,6 -1745,7 +1745,6 @@@ static int hso_serial_ioctl(struct tty_
                            unsigned int cmd, unsigned long arg)
  {
        struct hso_serial *serial =  get_serial_by_tty(tty);
 -      void __user *uarg = (void __user *)arg;
        int ret = 0;
        D4("IOCTL cmd: %d, arg: %ld", cmd, arg);
  
@@@ -2993,12 -2994,14 +2993,14 @@@ static int hso_probe(struct usb_interfa
  
        case HSO_INTF_BULK:
                /* It's a regular bulk interface */
-               if (((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) &&
-                   !disable_net)
-                       hso_dev = hso_create_net_device(interface, port_spec);
-               else
+               if ((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) {
+                       if (!disable_net)
+                               hso_dev =
+                                   hso_create_net_device(interface, port_spec);
+               } else {
                        hso_dev =
                            hso_create_bulk_serial_device(interface, port_spec);
+               }
                if (!hso_dev)
                        goto exit;
                break;
  #define COMP_CKSUM_LEN 2
  
  #define AR_CH0_TOP (0x00016288)
 -#define AR_CH0_TOP_XPABIASLVL (0x3)
 +#define AR_CH0_TOP_XPABIASLVL (0x300)
  #define AR_CH0_TOP_XPABIASLVL_S (8)
  
  #define AR_CH0_THERM (0x00016290)
 -#define AR_CH0_THERM_SPARE (0x3f)
 -#define AR_CH0_THERM_SPARE_S (0)
 +#define AR_CH0_THERM_XPABIASLVL_MSB 0x3
 +#define AR_CH0_THERM_XPABIASLVL_MSB_S 0
 +#define AR_CH0_THERM_XPASHORT2GND 0x4
 +#define AR_CH0_THERM_XPASHORT2GND_S 2
  
  #define AR_SWITCH_TABLE_COM_ALL (0xffff)
  #define AR_SWITCH_TABLE_COM_ALL_S (0)
  #define SUB_NUM_CTL_MODES_AT_5G_40 2    /* excluding HT40, EXT-OFDM */
  #define SUB_NUM_CTL_MODES_AT_2G_40 3    /* excluding HT40, EXT-OFDM, EXT-CCK */
  
 -static const struct ar9300_eeprom ar9300_default = {
 +static int ar9003_hw_power_interpolate(int32_t x,
 +                                     int32_t *px, int32_t *py, u_int16_t np);
++
+ #define CTL(_tpower, _flag) ((_tpower) | ((_flag) << 6))
-                { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
-                { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
-                { { {60, 1}, {60, 0}, {60, 0}, {60, 1} } },
 +static const struct ar9300_eeprom ar9300_default = {
 +      .eepromVersion = 2,
 +      .templateVersion = 2,
 +      .macAddr = {1, 2, 3, 4, 5, 6},
 +      .custData = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 +                   0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
 +      .baseEepHeader = {
 +              .regDmn = { LE16(0), LE16(0x1f) },
 +              .txrxMask =  0x77, /* 4 bits tx and 4 bits rx */
 +              .opCapFlags = {
 +                      .opFlags = AR9300_OPFLAGS_11G | AR9300_OPFLAGS_11A,
 +                      .eepMisc = 0,
 +              },
 +              .rfSilent = 0,
 +              .blueToothOptions = 0,
 +              .deviceCap = 0,
 +              .deviceType = 5, /* takes lower byte in eeprom location */
 +              .pwrTableOffset = AR9300_PWR_TABLE_OFFSET,
 +              .params_for_tuning_caps = {0, 0},
 +              .featureEnable = 0x0c,
 +               /*
 +                * bit0 - enable tx temp comp - disabled
 +                * bit1 - enable tx volt comp - disabled
 +                * bit2 - enable fastClock - enabled
 +                * bit3 - enable doubling - enabled
 +                * bit4 - enable internal regulator - disabled
 +                * bit5 - enable pa predistortion - disabled
 +                */
 +              .miscConfiguration = 0, /* bit0 - turn down drivestrength */
 +              .eepromWriteEnableGpio = 3,
 +              .wlanDisableGpio = 0,
 +              .wlanLedGpio = 8,
 +              .rxBandSelectGpio = 0xff,
 +              .txrxgain = 0,
 +              .swreg = 0,
 +       },
 +      .modalHeader2G = {
 +      /* ar9300_modal_eep_header  2g */
 +              /* 4 idle,t1,t2,b(4 bits per setting) */
 +              .antCtrlCommon = LE32(0x110),
 +              /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
 +              .antCtrlCommon2 = LE32(0x22222),
 +
 +              /*
 +               * antCtrlChain[AR9300_MAX_CHAINS]; 6 idle, t, r,
 +               * rx1, rx12, b (2 bits each)
 +               */
 +              .antCtrlChain = { LE16(0x150), LE16(0x150), LE16(0x150) },
 +
 +              /*
 +               * xatten1DB[AR9300_MAX_CHAINS];  3 xatten1_db
 +               * for ar9280 (0xa20c/b20c 5:0)
 +               */
 +              .xatten1DB = {0, 0, 0},
 +
 +              /*
 +               * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
 +               * for ar9280 (0xa20c/b20c 16:12
 +               */
 +              .xatten1Margin = {0, 0, 0},
 +              .tempSlope = 36,
 +              .voltSlope = 0,
 +
 +              /*
 +               * spurChans[OSPREY_EEPROM_MODAL_SPURS]; spur
 +               * channels in usual fbin coding format
 +               */
 +              .spurChans = {0, 0, 0, 0, 0},
 +
 +              /*
 +               * noiseFloorThreshCh[AR9300_MAX_CHAINS]; 3 Check
 +               * if the register is per chain
 +               */
 +              .noiseFloorThreshCh = {-1, 0, 0},
 +              .ob = {1, 1, 1},/* 3 chain */
 +              .db_stage2 = {1, 1, 1}, /* 3 chain  */
 +              .db_stage3 = {0, 0, 0},
 +              .db_stage4 = {0, 0, 0},
 +              .xpaBiasLvl = 0,
 +              .txFrameToDataStart = 0x0e,
 +              .txFrameToPaOn = 0x0e,
 +              .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
 +              .antennaGain = 0,
 +              .switchSettling = 0x2c,
 +              .adcDesiredSize = -30,
 +              .txEndToXpaOff = 0,
 +              .txEndToRxOn = 0x2,
 +              .txFrameToXpaOn = 0xe,
 +              .thresh62 = 28,
 +              .papdRateMaskHt20 = LE32(0x0cf0e0e0),
 +              .papdRateMaskHt40 = LE32(0x6cf0e0e0),
 +              .futureModal = {
 +                      0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 +              },
 +       },
 +      .base_ext1 = {
 +              .ant_div_control = 0,
 +              .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
 +      },
 +      .calFreqPier2G = {
 +              FREQ2FBIN(2412, 1),
 +              FREQ2FBIN(2437, 1),
 +              FREQ2FBIN(2472, 1),
 +       },
 +      /* ar9300_cal_data_per_freq_op_loop 2g */
 +      .calPierData2G = {
 +              { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
 +              { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
 +              { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
 +       },
 +      .calTarget_freqbin_Cck = {
 +              FREQ2FBIN(2412, 1),
 +              FREQ2FBIN(2484, 1),
 +       },
 +      .calTarget_freqbin_2G = {
 +              FREQ2FBIN(2412, 1),
 +              FREQ2FBIN(2437, 1),
 +              FREQ2FBIN(2472, 1)
 +       },
 +      .calTarget_freqbin_2GHT20 = {
 +              FREQ2FBIN(2412, 1),
 +              FREQ2FBIN(2437, 1),
 +              FREQ2FBIN(2472, 1)
 +       },
 +      .calTarget_freqbin_2GHT40 = {
 +              FREQ2FBIN(2412, 1),
 +              FREQ2FBIN(2437, 1),
 +              FREQ2FBIN(2472, 1)
 +       },
 +      .calTargetPowerCck = {
 +               /* 1L-5L,5S,11L,11S */
 +               { {36, 36, 36, 36} },
 +               { {36, 36, 36, 36} },
 +      },
 +      .calTargetPower2G = {
 +               /* 6-24,36,48,54 */
 +               { {32, 32, 28, 24} },
 +               { {32, 32, 28, 24} },
 +               { {32, 32, 28, 24} },
 +      },
 +      .calTargetPower2GHT20 = {
 +              { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
 +              { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
 +              { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
 +      },
 +      .calTargetPower2GHT40 = {
 +              { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
 +              { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
 +              { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
 +      },
 +      .ctlIndex_2G =  {
 +              0x11, 0x12, 0x15, 0x17, 0x41, 0x42,
 +              0x45, 0x47, 0x31, 0x32, 0x35, 0x37,
 +      },
 +      .ctl_freqbin_2G = {
 +              {
 +                      FREQ2FBIN(2412, 1),
 +                      FREQ2FBIN(2417, 1),
 +                      FREQ2FBIN(2457, 1),
 +                      FREQ2FBIN(2462, 1)
 +              },
 +              {
 +                      FREQ2FBIN(2412, 1),
 +                      FREQ2FBIN(2417, 1),
 +                      FREQ2FBIN(2462, 1),
 +                      0xFF,
 +              },
 +
 +              {
 +                      FREQ2FBIN(2412, 1),
 +                      FREQ2FBIN(2417, 1),
 +                      FREQ2FBIN(2462, 1),
 +                      0xFF,
 +              },
 +              {
 +                      FREQ2FBIN(2422, 1),
 +                      FREQ2FBIN(2427, 1),
 +                      FREQ2FBIN(2447, 1),
 +                      FREQ2FBIN(2452, 1)
 +              },
 +
 +              {
 +                      /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
 +                      /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
 +                      /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
 +                      /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(2484, 1),
 +              },
 +
 +              {
 +                      /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
 +                      /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
 +                      /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
 +                      0,
 +              },
 +
 +              {
 +                      /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
 +                      /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
 +                      FREQ2FBIN(2472, 1),
 +                      0,
 +              },
 +
 +              {
 +                      /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
 +                      /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
 +                      /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
 +                      /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
 +              },
 +
 +              {
 +                      /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
 +                      /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
 +                      /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
 +              },
 +
 +              {
 +                      /* Data[9].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
 +                      /* Data[9].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
 +                      /* Data[9].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
 +                      0
 +              },
 +
 +              {
 +                      /* Data[10].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
 +                      /* Data[10].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
 +                      /* Data[10].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
 +                      0
 +              },
 +
 +              {
 +                      /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
 +                      /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
 +                      /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
 +                      /* Data[11].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
 +              }
 +       },
 +      .ctlPowerData_2G = {
-                { { {60, 1}, {60, 0}, {0, 0}, {0, 0} } },
-                { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
-                { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
++               { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
++               { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
++               { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
 +
-                { { {60, 0}, {60, 1}, {60, 1}, {60, 0} } },
-                { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
-                { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
++               { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } },
++               { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
++               { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
 +
-                { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
-                { { {60, 0}, {60, 1}, {60, 1}, {60, 1} } },
-                { { {60, 0}, {60, 1}, {60, 1}, {60, 1} } },
++               { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } },
++               { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
++               { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
 +
-                               {60, 1}, {60, 1}, {60, 1}, {60, 1},
-                               {60, 1}, {60, 1}, {60, 1}, {60, 0},
++               { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
++               { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
++               { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
 +       },
 +      .modalHeader5G = {
 +              /* 4 idle,t1,t2,b (4 bits per setting) */
 +              .antCtrlCommon = LE32(0x110),
 +              /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */
 +              .antCtrlCommon2 = LE32(0x22222),
 +               /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */
 +              .antCtrlChain = {
 +                      LE16(0x000), LE16(0x000), LE16(0x000),
 +              },
 +               /* xatten1DB 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */
 +              .xatten1DB = {0, 0, 0},
 +
 +              /*
 +               * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
 +               * for merlin (0xa20c/b20c 16:12
 +               */
 +              .xatten1Margin = {0, 0, 0},
 +              .tempSlope = 68,
 +              .voltSlope = 0,
 +              /* spurChans spur channels in usual fbin coding format */
 +              .spurChans = {0, 0, 0, 0, 0},
 +              /* noiseFloorThreshCh Check if the register is per chain */
 +              .noiseFloorThreshCh = {-1, 0, 0},
 +              .ob = {3, 3, 3}, /* 3 chain */
 +              .db_stage2 = {3, 3, 3}, /* 3 chain */
 +              .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
 +              .db_stage4 = {3, 3, 3},  /* don't exist for 2G */
 +              .xpaBiasLvl = 0,
 +              .txFrameToDataStart = 0x0e,
 +              .txFrameToPaOn = 0x0e,
 +              .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
 +              .antennaGain = 0,
 +              .switchSettling = 0x2d,
 +              .adcDesiredSize = -30,
 +              .txEndToXpaOff = 0,
 +              .txEndToRxOn = 0x2,
 +              .txFrameToXpaOn = 0xe,
 +              .thresh62 = 28,
 +              .papdRateMaskHt20 = LE32(0x0c80c080),
 +              .papdRateMaskHt40 = LE32(0x0080c080),
 +              .futureModal = {
 +                      0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 +              },
 +       },
 +      .base_ext2 = {
 +              .tempSlopeLow = 0,
 +              .tempSlopeHigh = 0,
 +              .xatten1DBLow = {0, 0, 0},
 +              .xatten1MarginLow = {0, 0, 0},
 +              .xatten1DBHigh = {0, 0, 0},
 +              .xatten1MarginHigh = {0, 0, 0}
 +      },
 +      .calFreqPier5G = {
 +              FREQ2FBIN(5180, 0),
 +              FREQ2FBIN(5220, 0),
 +              FREQ2FBIN(5320, 0),
 +              FREQ2FBIN(5400, 0),
 +              FREQ2FBIN(5500, 0),
 +              FREQ2FBIN(5600, 0),
 +              FREQ2FBIN(5725, 0),
 +              FREQ2FBIN(5825, 0)
 +      },
 +      .calPierData5G = {
 +                      {
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                      },
 +                      {
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                      },
 +                      {
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                      },
 +
 +      },
 +      .calTarget_freqbin_5G = {
 +              FREQ2FBIN(5180, 0),
 +              FREQ2FBIN(5220, 0),
 +              FREQ2FBIN(5320, 0),
 +              FREQ2FBIN(5400, 0),
 +              FREQ2FBIN(5500, 0),
 +              FREQ2FBIN(5600, 0),
 +              FREQ2FBIN(5725, 0),
 +              FREQ2FBIN(5825, 0)
 +      },
 +      .calTarget_freqbin_5GHT20 = {
 +              FREQ2FBIN(5180, 0),
 +              FREQ2FBIN(5240, 0),
 +              FREQ2FBIN(5320, 0),
 +              FREQ2FBIN(5500, 0),
 +              FREQ2FBIN(5700, 0),
 +              FREQ2FBIN(5745, 0),
 +              FREQ2FBIN(5725, 0),
 +              FREQ2FBIN(5825, 0)
 +      },
 +      .calTarget_freqbin_5GHT40 = {
 +              FREQ2FBIN(5180, 0),
 +              FREQ2FBIN(5240, 0),
 +              FREQ2FBIN(5320, 0),
 +              FREQ2FBIN(5500, 0),
 +              FREQ2FBIN(5700, 0),
 +              FREQ2FBIN(5745, 0),
 +              FREQ2FBIN(5725, 0),
 +              FREQ2FBIN(5825, 0)
 +       },
 +      .calTargetPower5G = {
 +              /* 6-24,36,48,54 */
 +              { {20, 20, 20, 10} },
 +              { {20, 20, 20, 10} },
 +              { {20, 20, 20, 10} },
 +              { {20, 20, 20, 10} },
 +              { {20, 20, 20, 10} },
 +              { {20, 20, 20, 10} },
 +              { {20, 20, 20, 10} },
 +              { {20, 20, 20, 10} },
 +       },
 +      .calTargetPower5GHT20 = {
 +              /*
 +               * 0_8_16,1-3_9-11_17-19,
 +               * 4,5,6,7,12,13,14,15,20,21,22,23
 +               */
 +              { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
 +              { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
 +              { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
 +              { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
 +              { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
 +              { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
 +              { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
 +              { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
 +       },
 +      .calTargetPower5GHT40 =  {
 +              /*
 +               * 0_8_16,1-3_9-11_17-19,
 +               * 4,5,6,7,12,13,14,15,20,21,22,23
 +               */
 +              { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
 +              { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
 +              { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
 +              { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
 +              { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
 +              { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
 +              { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
 +              { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
 +       },
 +      .ctlIndex_5G =  {
 +              0x10, 0x16, 0x18, 0x40, 0x46,
 +              0x48, 0x30, 0x36, 0x38
 +      },
 +      .ctl_freqbin_5G =  {
 +              {
 +                      /* Data[0].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
 +                      /* Data[0].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
 +                      /* Data[0].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
 +                      /* Data[0].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
 +                      /* Data[0].ctlEdges[4].bChannel */ FREQ2FBIN(5600, 0),
 +                      /* Data[0].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
 +                      /* Data[0].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
 +                      /* Data[0].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
 +              },
 +              {
 +                      /* Data[1].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
 +                      /* Data[1].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
 +                      /* Data[1].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
 +                      /* Data[1].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
 +                      /* Data[1].ctlEdges[4].bChannel */ FREQ2FBIN(5520, 0),
 +                      /* Data[1].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
 +                      /* Data[1].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
 +                      /* Data[1].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
 +              },
 +
 +              {
 +                      /* Data[2].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
 +                      /* Data[2].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
 +                      /* Data[2].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
 +                      /* Data[2].ctlEdges[3].bChannel */ FREQ2FBIN(5310, 0),
 +                      /* Data[2].ctlEdges[4].bChannel */ FREQ2FBIN(5510, 0),
 +                      /* Data[2].ctlEdges[5].bChannel */ FREQ2FBIN(5550, 0),
 +                      /* Data[2].ctlEdges[6].bChannel */ FREQ2FBIN(5670, 0),
 +                      /* Data[2].ctlEdges[7].bChannel */ FREQ2FBIN(5755, 0)
 +              },
 +
 +              {
 +                      /* Data[3].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
 +                      /* Data[3].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
 +                      /* Data[3].ctlEdges[2].bChannel */ FREQ2FBIN(5260, 0),
 +                      /* Data[3].ctlEdges[3].bChannel */ FREQ2FBIN(5320, 0),
 +                      /* Data[3].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
 +                      /* Data[3].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
 +                      /* Data[3].ctlEdges[6].bChannel */ 0xFF,
 +                      /* Data[3].ctlEdges[7].bChannel */ 0xFF,
 +              },
 +
 +              {
 +                      /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
 +                      /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
 +                      /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(5500, 0),
 +                      /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(5700, 0),
 +                      /* Data[4].ctlEdges[4].bChannel */ 0xFF,
 +                      /* Data[4].ctlEdges[5].bChannel */ 0xFF,
 +                      /* Data[4].ctlEdges[6].bChannel */ 0xFF,
 +                      /* Data[4].ctlEdges[7].bChannel */ 0xFF,
 +              },
 +
 +              {
 +                      /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
 +                      /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(5270, 0),
 +                      /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(5310, 0),
 +                      /* Data[5].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
 +                      /* Data[5].ctlEdges[4].bChannel */ FREQ2FBIN(5590, 0),
 +                      /* Data[5].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
 +                      /* Data[5].ctlEdges[6].bChannel */ 0xFF,
 +                      /* Data[5].ctlEdges[7].bChannel */ 0xFF
 +              },
 +
 +              {
 +                      /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
 +                      /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
 +                      /* Data[6].ctlEdges[2].bChannel */ FREQ2FBIN(5220, 0),
 +                      /* Data[6].ctlEdges[3].bChannel */ FREQ2FBIN(5260, 0),
 +                      /* Data[6].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
 +                      /* Data[6].ctlEdges[5].bChannel */ FREQ2FBIN(5600, 0),
 +                      /* Data[6].ctlEdges[6].bChannel */ FREQ2FBIN(5700, 0),
 +                      /* Data[6].ctlEdges[7].bChannel */ FREQ2FBIN(5745, 0)
 +              },
 +
 +              {
 +                      /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
 +                      /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
 +                      /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(5320, 0),
 +                      /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
 +                      /* Data[7].ctlEdges[4].bChannel */ FREQ2FBIN(5560, 0),
 +                      /* Data[7].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
 +                      /* Data[7].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
 +                      /* Data[7].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
 +              },
 +
 +              {
 +                      /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
 +                      /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
 +                      /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
 +                      /* Data[8].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
 +                      /* Data[8].ctlEdges[4].bChannel */ FREQ2FBIN(5550, 0),
 +                      /* Data[8].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
 +                      /* Data[8].ctlEdges[6].bChannel */ FREQ2FBIN(5755, 0),
 +                      /* Data[8].ctlEdges[7].bChannel */ FREQ2FBIN(5795, 0)
 +              }
 +       },
 +      .ctlPowerData_5G = {
 +              {
 +                      {
-                               {60, 1}, {60, 1}, {60, 1}, {60, 1},
-                               {60, 1}, {60, 1}, {60, 1}, {60, 0},
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
 +                      }
 +              },
 +              {
 +                      {
-                               {60, 0}, {60, 1}, {60, 0}, {60, 1},
-                               {60, 1}, {60, 1}, {60, 1}, {60, 1},
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
 +                      }
 +              },
 +              {
 +                      {
-                               {60, 0}, {60, 1}, {60, 1}, {60, 0},
-                               {60, 1}, {60, 0}, {60, 0}, {60, 0},
++                              CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1),
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
 +                      }
 +              },
 +              {
 +                      {
-                               {60, 1}, {60, 1}, {60, 1}, {60, 0},
-                               {60, 0}, {60, 0}, {60, 0}, {60, 0},
++                              CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0),
++                              CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
 +                      }
 +              },
 +              {
 +                      {
-                               {60, 1}, {60, 1}, {60, 1}, {60, 1},
-                               {60, 1}, {60, 0}, {60, 0}, {60, 0},
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
++                              CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0),
 +                      }
 +              },
 +              {
 +                      {
-                               {60, 1}, {60, 1}, {60, 1}, {60, 1},
-                               {60, 1}, {60, 1}, {60, 1}, {60, 1},
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
++                              CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
 +                      }
 +              },
 +              {
 +                      {
-                               {60, 1}, {60, 1}, {60, 0}, {60, 1},
-                               {60, 1}, {60, 1}, {60, 1}, {60, 0},
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
 +                      }
 +              },
 +              {
 +                      {
-                               {60, 1}, {60, 0}, {60, 1}, {60, 1},
-                               {60, 1}, {60, 1}, {60, 0}, {60, 1},
++                              CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
 +                      }
 +              },
 +              {
 +                      {
-                { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
-                { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
-                { { {60, 1}, {60, 0}, {60, 0}, {60, 1} } },
++                              CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1),
++                              CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
 +                      }
 +              },
 +       }
 +};
 +
 +static const struct ar9300_eeprom ar9300_x113 = {
 +      .eepromVersion = 2,
 +      .templateVersion = 6,
 +      .macAddr = {0x00, 0x03, 0x7f, 0x0, 0x0, 0x0},
 +      .custData = {"x113-023-f0000"},
 +      .baseEepHeader = {
 +              .regDmn = { LE16(0), LE16(0x1f) },
 +              .txrxMask =  0x77, /* 4 bits tx and 4 bits rx */
 +              .opCapFlags = {
 +                      .opFlags = AR9300_OPFLAGS_11G | AR9300_OPFLAGS_11A,
 +                      .eepMisc = 0,
 +              },
 +              .rfSilent = 0,
 +              .blueToothOptions = 0,
 +              .deviceCap = 0,
 +              .deviceType = 5, /* takes lower byte in eeprom location */
 +              .pwrTableOffset = AR9300_PWR_TABLE_OFFSET,
 +              .params_for_tuning_caps = {0, 0},
 +              .featureEnable = 0x0d,
 +               /*
 +                * bit0 - enable tx temp comp - disabled
 +                * bit1 - enable tx volt comp - disabled
 +                * bit2 - enable fastClock - enabled
 +                * bit3 - enable doubling - enabled
 +                * bit4 - enable internal regulator - disabled
 +                * bit5 - enable pa predistortion - disabled
 +                */
 +              .miscConfiguration = 0, /* bit0 - turn down drivestrength */
 +              .eepromWriteEnableGpio = 6,
 +              .wlanDisableGpio = 0,
 +              .wlanLedGpio = 8,
 +              .rxBandSelectGpio = 0xff,
 +              .txrxgain = 0x21,
 +              .swreg = 0,
 +       },
 +      .modalHeader2G = {
 +      /* ar9300_modal_eep_header  2g */
 +              /* 4 idle,t1,t2,b(4 bits per setting) */
 +              .antCtrlCommon = LE32(0x110),
 +              /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
 +              .antCtrlCommon2 = LE32(0x44444),
 +
 +              /*
 +               * antCtrlChain[AR9300_MAX_CHAINS]; 6 idle, t, r,
 +               * rx1, rx12, b (2 bits each)
 +               */
 +              .antCtrlChain = { LE16(0x150), LE16(0x150), LE16(0x150) },
 +
 +              /*
 +               * xatten1DB[AR9300_MAX_CHAINS];  3 xatten1_db
 +               * for ar9280 (0xa20c/b20c 5:0)
 +               */
 +              .xatten1DB = {0, 0, 0},
 +
 +              /*
 +               * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
 +               * for ar9280 (0xa20c/b20c 16:12
 +               */
 +              .xatten1Margin = {0, 0, 0},
 +              .tempSlope = 25,
 +              .voltSlope = 0,
 +
 +              /*
 +               * spurChans[OSPREY_EEPROM_MODAL_SPURS]; spur
 +               * channels in usual fbin coding format
 +               */
 +              .spurChans = {FREQ2FBIN(2464, 1), 0, 0, 0, 0},
 +
 +              /*
 +               * noiseFloorThreshCh[AR9300_MAX_CHAINS]; 3 Check
 +               * if the register is per chain
 +               */
 +              .noiseFloorThreshCh = {-1, 0, 0},
 +              .ob = {1, 1, 1},/* 3 chain */
 +              .db_stage2 = {1, 1, 1}, /* 3 chain  */
 +              .db_stage3 = {0, 0, 0},
 +              .db_stage4 = {0, 0, 0},
 +              .xpaBiasLvl = 0,
 +              .txFrameToDataStart = 0x0e,
 +              .txFrameToPaOn = 0x0e,
 +              .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
 +              .antennaGain = 0,
 +              .switchSettling = 0x2c,
 +              .adcDesiredSize = -30,
 +              .txEndToXpaOff = 0,
 +              .txEndToRxOn = 0x2,
 +              .txFrameToXpaOn = 0xe,
 +              .thresh62 = 28,
 +              .papdRateMaskHt20 = LE32(0x0c80c080),
 +              .papdRateMaskHt40 = LE32(0x0080c080),
 +              .futureModal = {
 +                      0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 +              },
 +       },
 +       .base_ext1 = {
 +              .ant_div_control = 0,
 +              .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
 +       },
 +      .calFreqPier2G = {
 +              FREQ2FBIN(2412, 1),
 +              FREQ2FBIN(2437, 1),
 +              FREQ2FBIN(2472, 1),
 +       },
 +      /* ar9300_cal_data_per_freq_op_loop 2g */
 +      .calPierData2G = {
 +              { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
 +              { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
 +              { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
 +       },
 +      .calTarget_freqbin_Cck = {
 +              FREQ2FBIN(2412, 1),
 +              FREQ2FBIN(2472, 1),
 +       },
 +      .calTarget_freqbin_2G = {
 +              FREQ2FBIN(2412, 1),
 +              FREQ2FBIN(2437, 1),
 +              FREQ2FBIN(2472, 1)
 +       },
 +      .calTarget_freqbin_2GHT20 = {
 +              FREQ2FBIN(2412, 1),
 +              FREQ2FBIN(2437, 1),
 +              FREQ2FBIN(2472, 1)
 +       },
 +      .calTarget_freqbin_2GHT40 = {
 +              FREQ2FBIN(2412, 1),
 +              FREQ2FBIN(2437, 1),
 +              FREQ2FBIN(2472, 1)
 +       },
 +      .calTargetPowerCck = {
 +               /* 1L-5L,5S,11L,11S */
 +               { {34, 34, 34, 34} },
 +               { {34, 34, 34, 34} },
 +      },
 +      .calTargetPower2G = {
 +               /* 6-24,36,48,54 */
 +               { {34, 34, 32, 32} },
 +               { {34, 34, 32, 32} },
 +               { {34, 34, 32, 32} },
 +      },
 +      .calTargetPower2GHT20 = {
 +              { {32, 32, 32, 32, 32, 28, 32, 32, 30, 28, 0, 0, 0, 0} },
 +              { {32, 32, 32, 32, 32, 28, 32, 32, 30, 28, 0, 0, 0, 0} },
 +              { {32, 32, 32, 32, 32, 28, 32, 32, 30, 28, 0, 0, 0, 0} },
 +      },
 +      .calTargetPower2GHT40 = {
 +              { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} },
 +              { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} },
 +              { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} },
 +      },
 +      .ctlIndex_2G =  {
 +              0x11, 0x12, 0x15, 0x17, 0x41, 0x42,
 +              0x45, 0x47, 0x31, 0x32, 0x35, 0x37,
 +      },
 +      .ctl_freqbin_2G = {
 +              {
 +                      FREQ2FBIN(2412, 1),
 +                      FREQ2FBIN(2417, 1),
 +                      FREQ2FBIN(2457, 1),
 +                      FREQ2FBIN(2462, 1)
 +              },
 +              {
 +                      FREQ2FBIN(2412, 1),
 +                      FREQ2FBIN(2417, 1),
 +                      FREQ2FBIN(2462, 1),
 +                      0xFF,
 +              },
 +
 +              {
 +                      FREQ2FBIN(2412, 1),
 +                      FREQ2FBIN(2417, 1),
 +                      FREQ2FBIN(2462, 1),
 +                      0xFF,
 +              },
 +              {
 +                      FREQ2FBIN(2422, 1),
 +                      FREQ2FBIN(2427, 1),
 +                      FREQ2FBIN(2447, 1),
 +                      FREQ2FBIN(2452, 1)
 +              },
 +
 +              {
 +                      /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
 +                      /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
 +                      /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
 +                      /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(2484, 1),
 +              },
 +
 +              {
 +                      /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
 +                      /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
 +                      /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
 +                      0,
 +              },
 +
 +              {
 +                      /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
 +                      /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
 +                      FREQ2FBIN(2472, 1),
 +                      0,
 +              },
 +
 +              {
 +                      /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
 +                      /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
 +                      /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
 +                      /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
 +              },
 +
 +              {
 +                      /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
 +                      /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
 +                      /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
 +              },
 +
 +              {
 +                      /* Data[9].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
 +                      /* Data[9].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
 +                      /* Data[9].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
 +                      0
 +              },
 +
 +              {
 +                      /* Data[10].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
 +                      /* Data[10].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
 +                      /* Data[10].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
 +                      0
 +              },
 +
 +              {
 +                      /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
 +                      /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
 +                      /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
 +                      /* Data[11].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
 +              }
 +       },
 +      .ctlPowerData_2G = {
-                { { {60, 1}, {60, 0}, {0, 0}, {0, 0} } },
-                { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
-                { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
++               { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
++               { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
++               { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
 +
-                { { {60, 0}, {60, 1}, {60, 1}, {60, 0} } },
-                { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
-                { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
++               { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } },
++               { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
++               { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
 +
-                { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
-                { { {60, 0}, {60, 1}, {60, 1}, {60, 1} } },
-                { { {60, 0}, {60, 1}, {60, 1}, {60, 1} } },
++               { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } },
++               { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
++               { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
 +
-                               {60, 1}, {60, 1}, {60, 1}, {60, 1},
-                               {60, 1}, {60, 1}, {60, 1}, {60, 0},
++               { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
++               { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
++               { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
 +       },
 +      .modalHeader5G = {
 +              /* 4 idle,t1,t2,b (4 bits per setting) */
 +              .antCtrlCommon = LE32(0x220),
 +              /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */
 +              .antCtrlCommon2 = LE32(0x11111),
 +               /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */
 +              .antCtrlChain = {
 +                      LE16(0x150), LE16(0x150), LE16(0x150),
 +              },
 +               /* xatten1DB 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */
 +              .xatten1DB = {0, 0, 0},
 +
 +              /*
 +               * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
 +               * for merlin (0xa20c/b20c 16:12
 +               */
 +              .xatten1Margin = {0, 0, 0},
 +              .tempSlope = 68,
 +              .voltSlope = 0,
 +              /* spurChans spur channels in usual fbin coding format */
 +              .spurChans = {FREQ2FBIN(5500, 0), 0, 0, 0, 0},
 +              /* noiseFloorThreshCh Check if the register is per chain */
 +              .noiseFloorThreshCh = {-1, 0, 0},
 +              .ob = {3, 3, 3}, /* 3 chain */
 +              .db_stage2 = {3, 3, 3}, /* 3 chain */
 +              .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
 +              .db_stage4 = {3, 3, 3},  /* don't exist for 2G */
 +              .xpaBiasLvl = 0,
 +              .txFrameToDataStart = 0x0e,
 +              .txFrameToPaOn = 0x0e,
 +              .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
 +              .antennaGain = 0,
 +              .switchSettling = 0x2d,
 +              .adcDesiredSize = -30,
 +              .txEndToXpaOff = 0,
 +              .txEndToRxOn = 0x2,
 +              .txFrameToXpaOn = 0xe,
 +              .thresh62 = 28,
 +              .papdRateMaskHt20 = LE32(0x0cf0e0e0),
 +              .papdRateMaskHt40 = LE32(0x6cf0e0e0),
 +              .futureModal = {
 +                      0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 +              },
 +       },
 +      .base_ext2 = {
 +              .tempSlopeLow = 72,
 +              .tempSlopeHigh = 105,
 +              .xatten1DBLow = {0, 0, 0},
 +              .xatten1MarginLow = {0, 0, 0},
 +              .xatten1DBHigh = {0, 0, 0},
 +              .xatten1MarginHigh = {0, 0, 0}
 +       },
 +      .calFreqPier5G = {
 +              FREQ2FBIN(5180, 0),
 +              FREQ2FBIN(5240, 0),
 +              FREQ2FBIN(5320, 0),
 +              FREQ2FBIN(5400, 0),
 +              FREQ2FBIN(5500, 0),
 +              FREQ2FBIN(5600, 0),
 +              FREQ2FBIN(5745, 0),
 +              FREQ2FBIN(5785, 0)
 +      },
 +      .calPierData5G = {
 +                      {
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                      },
 +                      {
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                      },
 +                      {
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                              {0, 0, 0, 0, 0},
 +                      },
 +
 +      },
 +      .calTarget_freqbin_5G = {
 +              FREQ2FBIN(5180, 0),
 +              FREQ2FBIN(5220, 0),
 +              FREQ2FBIN(5320, 0),
 +              FREQ2FBIN(5400, 0),
 +              FREQ2FBIN(5500, 0),
 +              FREQ2FBIN(5600, 0),
 +              FREQ2FBIN(5745, 0),
 +              FREQ2FBIN(5785, 0)
 +      },
 +      .calTarget_freqbin_5GHT20 = {
 +              FREQ2FBIN(5180, 0),
 +              FREQ2FBIN(5240, 0),
 +              FREQ2FBIN(5320, 0),
 +              FREQ2FBIN(5400, 0),
 +              FREQ2FBIN(5500, 0),
 +              FREQ2FBIN(5700, 0),
 +              FREQ2FBIN(5745, 0),
 +              FREQ2FBIN(5825, 0)
 +      },
 +      .calTarget_freqbin_5GHT40 = {
 +              FREQ2FBIN(5190, 0),
 +              FREQ2FBIN(5230, 0),
 +              FREQ2FBIN(5320, 0),
 +              FREQ2FBIN(5410, 0),
 +              FREQ2FBIN(5510, 0),
 +              FREQ2FBIN(5670, 0),
 +              FREQ2FBIN(5755, 0),
 +              FREQ2FBIN(5825, 0)
 +       },
 +      .calTargetPower5G = {
 +              /* 6-24,36,48,54 */
 +              { {42, 40, 40, 34} },
 +              { {42, 40, 40, 34} },
 +              { {42, 40, 40, 34} },
 +              { {42, 40, 40, 34} },
 +              { {42, 40, 40, 34} },
 +              { {42, 40, 40, 34} },
 +              { {42, 40, 40, 34} },
 +              { {42, 40, 40, 34} },
 +       },
 +      .calTargetPower5GHT20 = {
 +              /*
 +               * 0_8_16,1-3_9-11_17-19,
 +               * 4,5,6,7,12,13,14,15,20,21,22,23
 +               */
 +              { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} },
 +              { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} },
 +              { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} },
 +              { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} },
 +              { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} },
 +              { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} },
 +              { {38, 38, 38, 38, 32, 28, 38, 38, 32, 28, 38, 38, 32, 26} },
 +              { {36, 36, 36, 36, 32, 28, 36, 36, 32, 28, 36, 36, 32, 26} },
 +       },
 +      .calTargetPower5GHT40 =  {
 +              /*
 +               * 0_8_16,1-3_9-11_17-19,
 +               * 4,5,6,7,12,13,14,15,20,21,22,23
 +               */
 +              { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} },
 +              { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} },
 +              { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} },
 +              { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} },
 +              { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} },
 +              { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} },
 +              { {36, 36, 36, 36, 30, 26, 36, 36, 30, 26, 36, 36, 30, 24} },
 +              { {34, 34, 34, 34, 30, 26, 34, 34, 30, 26, 34, 34, 30, 24} },
 +       },
 +      .ctlIndex_5G =  {
 +              0x10, 0x16, 0x18, 0x40, 0x46,
 +              0x48, 0x30, 0x36, 0x38
 +      },
 +      .ctl_freqbin_5G =  {
 +              {
 +                      /* Data[0].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
 +                      /* Data[0].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
 +                      /* Data[0].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
 +                      /* Data[0].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
 +                      /* Data[0].ctlEdges[4].bChannel */ FREQ2FBIN(5600, 0),
 +                      /* Data[0].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
 +                      /* Data[0].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
 +                      /* Data[0].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
 +              },
 +              {
 +                      /* Data[1].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
 +                      /* Data[1].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
 +                      /* Data[1].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
 +                      /* Data[1].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
 +                      /* Data[1].ctlEdges[4].bChannel */ FREQ2FBIN(5520, 0),
 +                      /* Data[1].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
 +                      /* Data[1].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
 +                      /* Data[1].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
 +              },
 +
 +              {
 +                      /* Data[2].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
 +                      /* Data[2].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
 +                      /* Data[2].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
 +                      /* Data[2].ctlEdges[3].bChannel */ FREQ2FBIN(5310, 0),
 +                      /* Data[2].ctlEdges[4].bChannel */ FREQ2FBIN(5510, 0),
 +                      /* Data[2].ctlEdges[5].bChannel */ FREQ2FBIN(5550, 0),
 +                      /* Data[2].ctlEdges[6].bChannel */ FREQ2FBIN(5670, 0),
 +                      /* Data[2].ctlEdges[7].bChannel */ FREQ2FBIN(5755, 0)
 +              },
 +
 +              {
 +                      /* Data[3].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
 +                      /* Data[3].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
 +                      /* Data[3].ctlEdges[2].bChannel */ FREQ2FBIN(5260, 0),
 +                      /* Data[3].ctlEdges[3].bChannel */ FREQ2FBIN(5320, 0),
 +                      /* Data[3].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
 +                      /* Data[3].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
 +                      /* Data[3].ctlEdges[6].bChannel */ 0xFF,
 +                      /* Data[3].ctlEdges[7].bChannel */ 0xFF,
 +              },
 +
 +              {
 +                      /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
 +                      /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
 +                      /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(5500, 0),
 +                      /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(5700, 0),
 +                      /* Data[4].ctlEdges[4].bChannel */ 0xFF,
 +                      /* Data[4].ctlEdges[5].bChannel */ 0xFF,
 +                      /* Data[4].ctlEdges[6].bChannel */ 0xFF,
 +                      /* Data[4].ctlEdges[7].bChannel */ 0xFF,
 +              },
 +
 +              {
 +                      /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
 +                      /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(5270, 0),
 +                      /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(5310, 0),
 +                      /* Data[5].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
 +                      /* Data[5].ctlEdges[4].bChannel */ FREQ2FBIN(5590, 0),
 +                      /* Data[5].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
 +                      /* Data[5].ctlEdges[6].bChannel */ 0xFF,
 +                      /* Data[5].ctlEdges[7].bChannel */ 0xFF
 +              },
 +
 +              {
 +                      /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
 +                      /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
 +                      /* Data[6].ctlEdges[2].bChannel */ FREQ2FBIN(5220, 0),
 +                      /* Data[6].ctlEdges[3].bChannel */ FREQ2FBIN(5260, 0),
 +                      /* Data[6].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
 +                      /* Data[6].ctlEdges[5].bChannel */ FREQ2FBIN(5600, 0),
 +                      /* Data[6].ctlEdges[6].bChannel */ FREQ2FBIN(5700, 0),
 +                      /* Data[6].ctlEdges[7].bChannel */ FREQ2FBIN(5745, 0)
 +              },
 +
 +              {
 +                      /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
 +                      /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
 +                      /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(5320, 0),
 +                      /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
 +                      /* Data[7].ctlEdges[4].bChannel */ FREQ2FBIN(5560, 0),
 +                      /* Data[7].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
 +                      /* Data[7].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
 +                      /* Data[7].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
 +              },
 +
 +              {
 +                      /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
 +                      /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
 +                      /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
 +                      /* Data[8].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
 +                      /* Data[8].ctlEdges[4].bChannel */ FREQ2FBIN(5550, 0),
 +                      /* Data[8].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
 +                      /* Data[8].ctlEdges[6].bChannel */ FREQ2FBIN(5755, 0),
 +                      /* Data[8].ctlEdges[7].bChannel */ FREQ2FBIN(5795, 0)
 +              }
 +       },
 +      .ctlPowerData_5G = {
 +              {
 +                      {
-                               {60, 1}, {60, 1}, {60, 1}, {60, 1},
-                               {60, 1}, {60, 1}, {60, 1}, {60, 0},
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
 +                      }
 +              },
 +              {
 +                      {
-                               {60, 0}, {60, 1}, {60, 0}, {60, 1},
-                               {60, 1}, {60, 1}, {60, 1}, {60, 1},
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
 +                      }
 +              },
 +              {
 +                      {
-                               {60, 0}, {60, 1}, {60, 1}, {60, 0},
-                               {60, 1}, {60, 0}, {60, 0}, {60, 0},
++                              CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1),
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
 +                      }
 +              },
 +              {
 +                      {
-                               {60, 1}, {60, 1}, {60, 1}, {60, 0},
-                               {60, 0}, {60, 0}, {60, 0}, {60, 0},
++                              CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0),
++                              CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
 +                      }
 +              },
 +              {
 +                      {
-                               {60, 1}, {60, 1}, {60, 1}, {60, 1},
-                               {60, 1}, {60, 0}, {60, 0}, {60, 0},
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
++                              CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0),
 +                      }
 +              },
 +              {
 +                      {
-                               {60, 1}, {60, 1}, {60, 1}, {60, 1},
-                               {60, 1}, {60, 1}, {60, 1}, {60, 1},
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
++                              CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
 +                      }
 +              },
 +              {
 +                      {
-                               {60, 1}, {60, 1}, {60, 0}, {60, 1},
-                               {60, 1}, {60, 1}, {60, 1}, {60, 0},
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
 +                      }
 +              },
 +              {
 +                      {
-                               {60, 1}, {60, 0}, {60, 1}, {60, 1},
-                               {60, 1}, {60, 1}, {60, 0}, {60, 1},
++                              CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
 +                      }
 +              },
 +              {
 +                      {
-               { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
-               { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
-               { { {60, 1}, {60, 0}, {60, 0}, {60, 1} } },
++                              CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1),
++                              CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
 +                      }
 +              },
 +       }
 +};
 +
 +
 +static const struct ar9300_eeprom ar9300_h112 = {
 +      .eepromVersion = 2,
 +      .templateVersion = 3,
 +      .macAddr = {0x00, 0x03, 0x7f, 0x0, 0x0, 0x0},
 +      .custData = {"h112-241-f0000"},
 +      .baseEepHeader = {
 +              .regDmn = { LE16(0), LE16(0x1f) },
 +              .txrxMask =  0x77, /* 4 bits tx and 4 bits rx */
 +              .opCapFlags = {
 +                      .opFlags = AR9300_OPFLAGS_11G | AR9300_OPFLAGS_11A,
 +                      .eepMisc = 0,
 +              },
 +              .rfSilent = 0,
 +              .blueToothOptions = 0,
 +              .deviceCap = 0,
 +              .deviceType = 5, /* takes lower byte in eeprom location */
 +              .pwrTableOffset = AR9300_PWR_TABLE_OFFSET,
 +              .params_for_tuning_caps = {0, 0},
 +              .featureEnable = 0x0d,
 +              /*
 +               * bit0 - enable tx temp comp - disabled
 +               * bit1 - enable tx volt comp - disabled
 +               * bit2 - enable fastClock - enabled
 +               * bit3 - enable doubling - enabled
 +               * bit4 - enable internal regulator - disabled
 +               * bit5 - enable pa predistortion - disabled
 +               */
 +              .miscConfiguration = 0, /* bit0 - turn down drivestrength */
 +              .eepromWriteEnableGpio = 6,
 +              .wlanDisableGpio = 0,
 +              .wlanLedGpio = 8,
 +              .rxBandSelectGpio = 0xff,
 +              .txrxgain = 0x10,
 +              .swreg = 0,
 +      },
 +      .modalHeader2G = {
 +              /* ar9300_modal_eep_header  2g */
 +              /* 4 idle,t1,t2,b(4 bits per setting) */
 +              .antCtrlCommon = LE32(0x110),
 +              /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
 +              .antCtrlCommon2 = LE32(0x44444),
 +
 +              /*
 +               * antCtrlChain[AR9300_MAX_CHAINS]; 6 idle, t, r,
 +               * rx1, rx12, b (2 bits each)
 +               */
 +              .antCtrlChain = { LE16(0x150), LE16(0x150), LE16(0x150) },
 +
 +              /*
 +               * xatten1DB[AR9300_MAX_CHAINS];  3 xatten1_db
 +               * for ar9280 (0xa20c/b20c 5:0)
 +               */
 +              .xatten1DB = {0, 0, 0},
 +
 +              /*
 +               * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
 +               * for ar9280 (0xa20c/b20c 16:12
 +               */
 +              .xatten1Margin = {0, 0, 0},
 +              .tempSlope = 25,
 +              .voltSlope = 0,
 +
 +              /*
 +               * spurChans[OSPREY_EEPROM_MODAL_SPURS]; spur
 +               * channels in usual fbin coding format
 +               */
 +              .spurChans = {FREQ2FBIN(2464, 1), 0, 0, 0, 0},
 +
 +              /*
 +               * noiseFloorThreshCh[AR9300_MAX_CHAINS]; 3 Check
 +               * if the register is per chain
 +               */
 +              .noiseFloorThreshCh = {-1, 0, 0},
 +              .ob = {1, 1, 1},/* 3 chain */
 +              .db_stage2 = {1, 1, 1}, /* 3 chain  */
 +              .db_stage3 = {0, 0, 0},
 +              .db_stage4 = {0, 0, 0},
 +              .xpaBiasLvl = 0,
 +              .txFrameToDataStart = 0x0e,
 +              .txFrameToPaOn = 0x0e,
 +              .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
 +              .antennaGain = 0,
 +              .switchSettling = 0x2c,
 +              .adcDesiredSize = -30,
 +              .txEndToXpaOff = 0,
 +              .txEndToRxOn = 0x2,
 +              .txFrameToXpaOn = 0xe,
 +              .thresh62 = 28,
 +              .papdRateMaskHt20 = LE32(0x80c080),
 +              .papdRateMaskHt40 = LE32(0x80c080),
 +              .futureModal = {
 +                      0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 +              },
 +      },
 +      .base_ext1 = {
 +              .ant_div_control = 0,
 +              .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
 +      },
 +      .calFreqPier2G = {
 +              FREQ2FBIN(2412, 1),
 +              FREQ2FBIN(2437, 1),
 +              FREQ2FBIN(2472, 1),
 +      },
 +      /* ar9300_cal_data_per_freq_op_loop 2g */
 +      .calPierData2G = {
 +              { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
 +              { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
 +              { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
 +      },
 +      .calTarget_freqbin_Cck = {
 +              FREQ2FBIN(2412, 1),
 +              FREQ2FBIN(2484, 1),
 +      },
 +      .calTarget_freqbin_2G = {
 +              FREQ2FBIN(2412, 1),
 +              FREQ2FBIN(2437, 1),
 +              FREQ2FBIN(2472, 1)
 +      },
 +      .calTarget_freqbin_2GHT20 = {
 +              FREQ2FBIN(2412, 1),
 +              FREQ2FBIN(2437, 1),
 +              FREQ2FBIN(2472, 1)
 +      },
 +      .calTarget_freqbin_2GHT40 = {
 +              FREQ2FBIN(2412, 1),
 +              FREQ2FBIN(2437, 1),
 +              FREQ2FBIN(2472, 1)
 +      },
 +      .calTargetPowerCck = {
 +              /* 1L-5L,5S,11L,11S */
 +              { {34, 34, 34, 34} },
 +              { {34, 34, 34, 34} },
 +      },
 +      .calTargetPower2G = {
 +              /* 6-24,36,48,54 */
 +              { {34, 34, 32, 32} },
 +              { {34, 34, 32, 32} },
 +              { {34, 34, 32, 32} },
 +      },
 +      .calTargetPower2GHT20 = {
 +              { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 28, 28, 28, 24} },
 +              { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 28, 28, 28, 24} },
 +              { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 28, 28, 28, 24} },
 +      },
 +      .calTargetPower2GHT40 = {
 +              { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 26, 26, 26, 22} },
 +              { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 26, 26, 26, 22} },
 +              { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 26, 26, 26, 22} },
 +      },
 +      .ctlIndex_2G =  {
 +              0x11, 0x12, 0x15, 0x17, 0x41, 0x42,
 +              0x45, 0x47, 0x31, 0x32, 0x35, 0x37,
 +      },
 +      .ctl_freqbin_2G = {
 +              {
 +                      FREQ2FBIN(2412, 1),
 +                      FREQ2FBIN(2417, 1),
 +                      FREQ2FBIN(2457, 1),
 +                      FREQ2FBIN(2462, 1)
 +              },
 +              {
 +                      FREQ2FBIN(2412, 1),
 +                      FREQ2FBIN(2417, 1),
 +                      FREQ2FBIN(2462, 1),
 +                      0xFF,
 +              },
 +
 +              {
 +                      FREQ2FBIN(2412, 1),
 +                      FREQ2FBIN(2417, 1),
 +                      FREQ2FBIN(2462, 1),
 +                      0xFF,
 +              },
 +              {
 +                      FREQ2FBIN(2422, 1),
 +                      FREQ2FBIN(2427, 1),
 +                      FREQ2FBIN(2447, 1),
 +                      FREQ2FBIN(2452, 1)
 +              },
 +
 +              {
 +                      /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
 +                      /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
 +                      /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
 +                      /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(2484, 1),
 +              },
 +
 +              {
 +                      /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
 +                      /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
 +                      /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
 +                      0,
 +              },
 +
 +              {
 +                      /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
 +                      /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
 +                      FREQ2FBIN(2472, 1),
 +                      0,
 +              },
 +
 +              {
 +                      /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
 +                      /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
 +                      /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
 +                      /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
 +              },
 +
 +              {
 +                      /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
 +                      /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
 +                      /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
 +              },
 +
 +              {
 +                      /* Data[9].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
 +                      /* Data[9].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
 +                      /* Data[9].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
 +                      0
 +              },
 +
 +              {
 +                      /* Data[10].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
 +                      /* Data[10].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
 +                      /* Data[10].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
 +                      0
 +              },
 +
 +              {
 +                      /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
 +                      /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
 +                      /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
 +                      /* Data[11].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
 +              }
 +      },
 +      .ctlPowerData_2G = {
-               { { {60, 1}, {60, 0}, {0, 0}, {0, 0} } },
-               { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
-               { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
++              { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
++              { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
++              { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
 +
-               { { {60, 0}, {60, 1}, {60, 1}, {60, 0} } },
-               { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
-               { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
++              { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } },
++              { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
++              { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
 +
-               { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
-               { { {60, 0}, {60, 1}, {60, 1}, {60, 1} } },
-               { { {60, 0}, {60, 1}, {60, 1}, {60, 1} } },
++              { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } },
++              { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
++              { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
 +
-                               {60, 1}, {60, 1}, {60, 1}, {60, 1},
-                               {60, 1}, {60, 1}, {60, 1}, {60, 0},
++              { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
++              { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
++              { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
 +      },
 +      .modalHeader5G = {
 +              /* 4 idle,t1,t2,b (4 bits per setting) */
 +              .antCtrlCommon = LE32(0x220),
 +              /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */
 +              .antCtrlCommon2 = LE32(0x44444),
 +              /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */
 +              .antCtrlChain = {
 +                      LE16(0x150), LE16(0x150), LE16(0x150),
 +              },
 +              /* xatten1DB 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */
 +              .xatten1DB = {0, 0, 0},
 +
 +              /*
 +               * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
 +               * for merlin (0xa20c/b20c 16:12
 +               */
 +              .xatten1Margin = {0, 0, 0},
 +              .tempSlope = 45,
 +              .voltSlope = 0,
 +              /* spurChans spur channels in usual fbin coding format */
 +              .spurChans = {0, 0, 0, 0, 0},
 +              /* noiseFloorThreshCh Check if the register is per chain */
 +              .noiseFloorThreshCh = {-1, 0, 0},
 +              .ob = {3, 3, 3}, /* 3 chain */
 +              .db_stage2 = {3, 3, 3}, /* 3 chain */
 +              .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
 +              .db_stage4 = {3, 3, 3},  /* don't exist for 2G */
 +              .xpaBiasLvl = 0,
 +              .txFrameToDataStart = 0x0e,
 +              .txFrameToPaOn = 0x0e,
 +              .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
 +              .antennaGain = 0,
 +              .switchSettling = 0x2d,
 +              .adcDesiredSize = -30,
 +              .txEndToXpaOff = 0,
 +              .txEndToRxOn = 0x2,
 +              .txFrameToXpaOn = 0xe,
 +              .thresh62 = 28,
 +              .papdRateMaskHt20 = LE32(0x0cf0e0e0),
 +              .papdRateMaskHt40 = LE32(0x6cf0e0e0),
 +              .futureModal = {
 +                      0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 +              },
 +      },
 +      .base_ext2 = {
 +              .tempSlopeLow = 40,
 +              .tempSlopeHigh = 50,
 +              .xatten1DBLow = {0, 0, 0},
 +              .xatten1MarginLow = {0, 0, 0},
 +              .xatten1DBHigh = {0, 0, 0},
 +              .xatten1MarginHigh = {0, 0, 0}
 +      },
 +      .calFreqPier5G = {
 +              FREQ2FBIN(5180, 0),
 +              FREQ2FBIN(5220, 0),
 +              FREQ2FBIN(5320, 0),
 +              FREQ2FBIN(5400, 0),
 +              FREQ2FBIN(5500, 0),
 +              FREQ2FBIN(5600, 0),
 +              FREQ2FBIN(5700, 0),
 +              FREQ2FBIN(5825, 0)
 +      },
 +      .calPierData5G = {
 +              {
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +              },
 +              {
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +              },
 +              {
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +              },
 +
 +      },
 +      .calTarget_freqbin_5G = {
 +              FREQ2FBIN(5180, 0),
 +              FREQ2FBIN(5240, 0),
 +              FREQ2FBIN(5320, 0),
 +              FREQ2FBIN(5400, 0),
 +              FREQ2FBIN(5500, 0),
 +              FREQ2FBIN(5600, 0),
 +              FREQ2FBIN(5700, 0),
 +              FREQ2FBIN(5825, 0)
 +      },
 +      .calTarget_freqbin_5GHT20 = {
 +              FREQ2FBIN(5180, 0),
 +              FREQ2FBIN(5240, 0),
 +              FREQ2FBIN(5320, 0),
 +              FREQ2FBIN(5400, 0),
 +              FREQ2FBIN(5500, 0),
 +              FREQ2FBIN(5700, 0),
 +              FREQ2FBIN(5745, 0),
 +              FREQ2FBIN(5825, 0)
 +      },
 +      .calTarget_freqbin_5GHT40 = {
 +              FREQ2FBIN(5180, 0),
 +              FREQ2FBIN(5240, 0),
 +              FREQ2FBIN(5320, 0),
 +              FREQ2FBIN(5400, 0),
 +              FREQ2FBIN(5500, 0),
 +              FREQ2FBIN(5700, 0),
 +              FREQ2FBIN(5745, 0),
 +              FREQ2FBIN(5825, 0)
 +      },
 +      .calTargetPower5G = {
 +              /* 6-24,36,48,54 */
 +              { {30, 30, 28, 24} },
 +              { {30, 30, 28, 24} },
 +              { {30, 30, 28, 24} },
 +              { {30, 30, 28, 24} },
 +              { {30, 30, 28, 24} },
 +              { {30, 30, 28, 24} },
 +              { {30, 30, 28, 24} },
 +              { {30, 30, 28, 24} },
 +      },
 +      .calTargetPower5GHT20 = {
 +              /*
 +               * 0_8_16,1-3_9-11_17-19,
 +               * 4,5,6,7,12,13,14,15,20,21,22,23
 +               */
 +              { {30, 30, 30, 28, 24, 20, 30, 28, 24, 20, 20, 20, 20, 16} },
 +              { {30, 30, 30, 28, 24, 20, 30, 28, 24, 20, 20, 20, 20, 16} },
 +              { {30, 30, 30, 26, 22, 18, 30, 26, 22, 18, 18, 18, 18, 16} },
 +              { {30, 30, 30, 26, 22, 18, 30, 26, 22, 18, 18, 18, 18, 16} },
 +              { {30, 30, 30, 24, 20, 16, 30, 24, 20, 16, 16, 16, 16, 14} },
 +              { {30, 30, 30, 24, 20, 16, 30, 24, 20, 16, 16, 16, 16, 14} },
 +              { {30, 30, 30, 22, 18, 14, 30, 22, 18, 14, 14, 14, 14, 12} },
 +              { {30, 30, 30, 22, 18, 14, 30, 22, 18, 14, 14, 14, 14, 12} },
 +      },
 +      .calTargetPower5GHT40 =  {
 +              /*
 +               * 0_8_16,1-3_9-11_17-19,
 +               * 4,5,6,7,12,13,14,15,20,21,22,23
 +               */
 +              { {28, 28, 28, 26, 22, 18, 28, 26, 22, 18, 18, 18, 18, 14} },
 +              { {28, 28, 28, 26, 22, 18, 28, 26, 22, 18, 18, 18, 18, 14} },
 +              { {28, 28, 28, 24, 20, 16, 28, 24, 20, 16, 16, 16, 16, 12} },
 +              { {28, 28, 28, 24, 20, 16, 28, 24, 20, 16, 16, 16, 16, 12} },
 +              { {28, 28, 28, 22, 18, 14, 28, 22, 18, 14, 14, 14, 14, 10} },
 +              { {28, 28, 28, 22, 18, 14, 28, 22, 18, 14, 14, 14, 14, 10} },
 +              { {28, 28, 28, 20, 16, 12, 28, 20, 16, 12, 12, 12, 12, 8} },
 +              { {28, 28, 28, 20, 16, 12, 28, 20, 16, 12, 12, 12, 12, 8} },
 +      },
 +      .ctlIndex_5G =  {
 +              0x10, 0x16, 0x18, 0x40, 0x46,
 +              0x48, 0x30, 0x36, 0x38
 +      },
 +      .ctl_freqbin_5G =  {
 +              {
 +                      /* Data[0].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
 +                      /* Data[0].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
 +                      /* Data[0].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
 +                      /* Data[0].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
 +                      /* Data[0].ctlEdges[4].bChannel */ FREQ2FBIN(5600, 0),
 +                      /* Data[0].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
 +                      /* Data[0].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
 +                      /* Data[0].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
 +              },
 +              {
 +                      /* Data[1].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
 +                      /* Data[1].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
 +                      /* Data[1].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
 +                      /* Data[1].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
 +                      /* Data[1].ctlEdges[4].bChannel */ FREQ2FBIN(5520, 0),
 +                      /* Data[1].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
 +                      /* Data[1].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
 +                      /* Data[1].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
 +              },
 +
 +              {
 +                      /* Data[2].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
 +                      /* Data[2].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
 +                      /* Data[2].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
 +                      /* Data[2].ctlEdges[3].bChannel */ FREQ2FBIN(5310, 0),
 +                      /* Data[2].ctlEdges[4].bChannel */ FREQ2FBIN(5510, 0),
 +                      /* Data[2].ctlEdges[5].bChannel */ FREQ2FBIN(5550, 0),
 +                      /* Data[2].ctlEdges[6].bChannel */ FREQ2FBIN(5670, 0),
 +                      /* Data[2].ctlEdges[7].bChannel */ FREQ2FBIN(5755, 0)
 +              },
 +
 +              {
 +                      /* Data[3].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
 +                      /* Data[3].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
 +                      /* Data[3].ctlEdges[2].bChannel */ FREQ2FBIN(5260, 0),
 +                      /* Data[3].ctlEdges[3].bChannel */ FREQ2FBIN(5320, 0),
 +                      /* Data[3].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
 +                      /* Data[3].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
 +                      /* Data[3].ctlEdges[6].bChannel */ 0xFF,
 +                      /* Data[3].ctlEdges[7].bChannel */ 0xFF,
 +              },
 +
 +              {
 +                      /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
 +                      /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
 +                      /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(5500, 0),
 +                      /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(5700, 0),
 +                      /* Data[4].ctlEdges[4].bChannel */ 0xFF,
 +                      /* Data[4].ctlEdges[5].bChannel */ 0xFF,
 +                      /* Data[4].ctlEdges[6].bChannel */ 0xFF,
 +                      /* Data[4].ctlEdges[7].bChannel */ 0xFF,
 +              },
 +
 +              {
 +                      /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
 +                      /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(5270, 0),
 +                      /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(5310, 0),
 +                      /* Data[5].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
 +                      /* Data[5].ctlEdges[4].bChannel */ FREQ2FBIN(5590, 0),
 +                      /* Data[5].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
 +                      /* Data[5].ctlEdges[6].bChannel */ 0xFF,
 +                      /* Data[5].ctlEdges[7].bChannel */ 0xFF
 +              },
 +
 +              {
 +                      /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
 +                      /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
 +                      /* Data[6].ctlEdges[2].bChannel */ FREQ2FBIN(5220, 0),
 +                      /* Data[6].ctlEdges[3].bChannel */ FREQ2FBIN(5260, 0),
 +                      /* Data[6].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
 +                      /* Data[6].ctlEdges[5].bChannel */ FREQ2FBIN(5600, 0),
 +                      /* Data[6].ctlEdges[6].bChannel */ FREQ2FBIN(5700, 0),
 +                      /* Data[6].ctlEdges[7].bChannel */ FREQ2FBIN(5745, 0)
 +              },
 +
 +              {
 +                      /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
 +                      /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
 +                      /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(5320, 0),
 +                      /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
 +                      /* Data[7].ctlEdges[4].bChannel */ FREQ2FBIN(5560, 0),
 +                      /* Data[7].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
 +                      /* Data[7].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
 +                      /* Data[7].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
 +              },
 +
 +              {
 +                      /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
 +                      /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
 +                      /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
 +                      /* Data[8].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
 +                      /* Data[8].ctlEdges[4].bChannel */ FREQ2FBIN(5550, 0),
 +                      /* Data[8].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
 +                      /* Data[8].ctlEdges[6].bChannel */ FREQ2FBIN(5755, 0),
 +                      /* Data[8].ctlEdges[7].bChannel */ FREQ2FBIN(5795, 0)
 +              }
 +      },
 +      .ctlPowerData_5G = {
 +              {
 +                      {
-                               {60, 1}, {60, 1}, {60, 1}, {60, 1},
-                               {60, 1}, {60, 1}, {60, 1}, {60, 0},
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
 +                      }
 +              },
 +              {
 +                      {
-                               {60, 0}, {60, 1}, {60, 0}, {60, 1},
-                               {60, 1}, {60, 1}, {60, 1}, {60, 1},
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
 +                      }
 +              },
 +              {
 +                      {
-                               {60, 0}, {60, 1}, {60, 1}, {60, 0},
-                               {60, 1}, {60, 0}, {60, 0}, {60, 0},
++                              CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1),
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
 +                      }
 +              },
 +              {
 +                      {
-                               {60, 1}, {60, 1}, {60, 1}, {60, 0},
-                               {60, 0}, {60, 0}, {60, 0}, {60, 0},
++                              CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0),
++                              CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
 +                      }
 +              },
 +              {
 +                      {
-                               {60, 1}, {60, 1}, {60, 1}, {60, 1},
-                               {60, 1}, {60, 0}, {60, 0}, {60, 0},
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
++                              CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0),
 +                      }
 +              },
 +              {
 +                      {
-                               {60, 1}, {60, 1}, {60, 1}, {60, 1},
-                               {60, 1}, {60, 1}, {60, 1}, {60, 1},
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
++                              CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
 +                      }
 +              },
 +              {
 +                      {
-                               {60, 1}, {60, 1}, {60, 0}, {60, 1},
-                               {60, 1}, {60, 1}, {60, 1}, {60, 0},
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
 +                      }
 +              },
 +              {
 +                      {
-                               {60, 1}, {60, 0}, {60, 1}, {60, 1},
-                               {60, 1}, {60, 1}, {60, 0}, {60, 1},
++                              CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
 +                      }
 +              },
 +              {
 +                      {
-               { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
-               { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
-               { { {60, 1}, {60, 0}, {60, 0}, {60, 1} } },
++                              CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1),
++                              CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
 +                      }
 +              },
 +      }
 +};
 +
 +
 +static const struct ar9300_eeprom ar9300_x112 = {
 +      .eepromVersion = 2,
 +      .templateVersion = 5,
 +      .macAddr = {0x00, 0x03, 0x7f, 0x0, 0x0, 0x0},
 +      .custData = {"x112-041-f0000"},
 +      .baseEepHeader = {
 +              .regDmn = { LE16(0), LE16(0x1f) },
 +              .txrxMask =  0x77, /* 4 bits tx and 4 bits rx */
 +              .opCapFlags = {
 +                      .opFlags = AR9300_OPFLAGS_11G | AR9300_OPFLAGS_11A,
 +                      .eepMisc = 0,
 +              },
 +              .rfSilent = 0,
 +              .blueToothOptions = 0,
 +              .deviceCap = 0,
 +              .deviceType = 5, /* takes lower byte in eeprom location */
 +              .pwrTableOffset = AR9300_PWR_TABLE_OFFSET,
 +              .params_for_tuning_caps = {0, 0},
 +              .featureEnable = 0x0d,
 +              /*
 +               * bit0 - enable tx temp comp - disabled
 +               * bit1 - enable tx volt comp - disabled
 +               * bit2 - enable fastclock - enabled
 +               * bit3 - enable doubling - enabled
 +               * bit4 - enable internal regulator - disabled
 +               * bit5 - enable pa predistortion - disabled
 +               */
 +              .miscConfiguration = 0, /* bit0 - turn down drivestrength */
 +              .eepromWriteEnableGpio = 6,
 +              .wlanDisableGpio = 0,
 +              .wlanLedGpio = 8,
 +              .rxBandSelectGpio = 0xff,
 +              .txrxgain = 0x0,
 +              .swreg = 0,
 +      },
 +      .modalHeader2G = {
 +              /* ar9300_modal_eep_header  2g */
 +              /* 4 idle,t1,t2,b(4 bits per setting) */
 +              .antCtrlCommon = LE32(0x110),
 +              /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
 +              .antCtrlCommon2 = LE32(0x22222),
 +
 +              /*
 +               * antCtrlChain[ar9300_max_chains]; 6 idle, t, r,
 +               * rx1, rx12, b (2 bits each)
 +               */
 +              .antCtrlChain = { LE16(0x10), LE16(0x10), LE16(0x10) },
 +
 +              /*
 +               * xatten1DB[AR9300_max_chains];  3 xatten1_db
 +               * for ar9280 (0xa20c/b20c 5:0)
 +               */
 +              .xatten1DB = {0x1b, 0x1b, 0x1b},
 +
 +              /*
 +               * xatten1Margin[ar9300_max_chains]; 3 xatten1_margin
 +               * for ar9280 (0xa20c/b20c 16:12
 +               */
 +              .xatten1Margin = {0x15, 0x15, 0x15},
 +              .tempSlope = 50,
 +              .voltSlope = 0,
 +
 +              /*
 +               * spurChans[OSPrey_eeprom_modal_sPURS]; spur
 +               * channels in usual fbin coding format
 +               */
 +              .spurChans = {FREQ2FBIN(2464, 1), 0, 0, 0, 0},
 +
 +              /*
 +               * noiseFloorThreshch[ar9300_max_cHAINS]; 3 Check
 +               * if the register is per chain
 +               */
 +              .noiseFloorThreshCh = {-1, 0, 0},
 +              .ob = {1, 1, 1},/* 3 chain */
 +              .db_stage2 = {1, 1, 1}, /* 3 chain  */
 +              .db_stage3 = {0, 0, 0},
 +              .db_stage4 = {0, 0, 0},
 +              .xpaBiasLvl = 0,
 +              .txFrameToDataStart = 0x0e,
 +              .txFrameToPaOn = 0x0e,
 +              .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
 +              .antennaGain = 0,
 +              .switchSettling = 0x2c,
 +              .adcDesiredSize = -30,
 +              .txEndToXpaOff = 0,
 +              .txEndToRxOn = 0x2,
 +              .txFrameToXpaOn = 0xe,
 +              .thresh62 = 28,
 +              .papdRateMaskHt20 = LE32(0x0c80c080),
 +              .papdRateMaskHt40 = LE32(0x0080c080),
 +              .futureModal = {
 +                      0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 +              },
 +      },
 +      .base_ext1 = {
 +              .ant_div_control = 0,
 +              .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
 +      },
 +      .calFreqPier2G = {
 +              FREQ2FBIN(2412, 1),
 +              FREQ2FBIN(2437, 1),
 +              FREQ2FBIN(2472, 1),
 +      },
 +      /* ar9300_cal_data_per_freq_op_loop 2g */
 +      .calPierData2G = {
 +              { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
 +              { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
 +              { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
 +      },
 +      .calTarget_freqbin_Cck = {
 +              FREQ2FBIN(2412, 1),
 +              FREQ2FBIN(2472, 1),
 +      },
 +      .calTarget_freqbin_2G = {
 +              FREQ2FBIN(2412, 1),
 +              FREQ2FBIN(2437, 1),
 +              FREQ2FBIN(2472, 1)
 +      },
 +      .calTarget_freqbin_2GHT20 = {
 +              FREQ2FBIN(2412, 1),
 +              FREQ2FBIN(2437, 1),
 +              FREQ2FBIN(2472, 1)
 +      },
 +      .calTarget_freqbin_2GHT40 = {
 +              FREQ2FBIN(2412, 1),
 +              FREQ2FBIN(2437, 1),
 +              FREQ2FBIN(2472, 1)
 +      },
 +      .calTargetPowerCck = {
 +              /* 1L-5L,5S,11L,11s */
 +              { {38, 38, 38, 38} },
 +              { {38, 38, 38, 38} },
 +      },
 +      .calTargetPower2G = {
 +              /* 6-24,36,48,54 */
 +              { {38, 38, 36, 34} },
 +              { {38, 38, 36, 34} },
 +              { {38, 38, 34, 32} },
 +      },
 +      .calTargetPower2GHT20 = {
 +              { {36, 36, 36, 36, 36, 34, 34, 32, 30, 28, 28, 28, 28, 26} },
 +              { {36, 36, 36, 36, 36, 34, 36, 34, 32, 30, 30, 30, 28, 26} },
 +              { {36, 36, 36, 36, 36, 34, 34, 32, 30, 28, 28, 28, 28, 26} },
 +      },
 +      .calTargetPower2GHT40 = {
 +              { {36, 36, 36, 36, 34, 32, 32, 30, 28, 26, 26, 26, 26, 24} },
 +              { {36, 36, 36, 36, 34, 32, 34, 32, 30, 28, 28, 28, 28, 24} },
 +              { {36, 36, 36, 36, 34, 32, 32, 30, 28, 26, 26, 26, 26, 24} },
 +      },
 +      .ctlIndex_2G =  {
 +              0x11, 0x12, 0x15, 0x17, 0x41, 0x42,
 +              0x45, 0x47, 0x31, 0x32, 0x35, 0x37,
 +      },
 +      .ctl_freqbin_2G = {
 +              {
 +                      FREQ2FBIN(2412, 1),
 +                      FREQ2FBIN(2417, 1),
 +                      FREQ2FBIN(2457, 1),
 +                      FREQ2FBIN(2462, 1)
 +              },
 +              {
 +                      FREQ2FBIN(2412, 1),
 +                      FREQ2FBIN(2417, 1),
 +                      FREQ2FBIN(2462, 1),
 +                      0xFF,
 +              },
 +
 +              {
 +                      FREQ2FBIN(2412, 1),
 +                      FREQ2FBIN(2417, 1),
 +                      FREQ2FBIN(2462, 1),
 +                      0xFF,
 +              },
 +              {
 +                      FREQ2FBIN(2422, 1),
 +                      FREQ2FBIN(2427, 1),
 +                      FREQ2FBIN(2447, 1),
 +                      FREQ2FBIN(2452, 1)
 +              },
 +
 +              {
 +                      /* Data[4].ctledges[0].bchannel */ FREQ2FBIN(2412, 1),
 +                      /* Data[4].ctledges[1].bchannel */ FREQ2FBIN(2417, 1),
 +                      /* Data[4].ctledges[2].bchannel */ FREQ2FBIN(2472, 1),
 +                      /* Data[4].ctledges[3].bchannel */ FREQ2FBIN(2484, 1),
 +              },
 +
 +              {
 +                      /* Data[5].ctledges[0].bchannel */ FREQ2FBIN(2412, 1),
 +                      /* Data[5].ctledges[1].bchannel */ FREQ2FBIN(2417, 1),
 +                      /* Data[5].ctledges[2].bchannel */ FREQ2FBIN(2472, 1),
 +                      0,
 +              },
 +
 +              {
 +                      /* Data[6].ctledges[0].bchannel */ FREQ2FBIN(2412, 1),
 +                      /* Data[6].ctledges[1].bchannel */ FREQ2FBIN(2417, 1),
 +                      FREQ2FBIN(2472, 1),
 +                      0,
 +              },
 +
 +              {
 +                      /* Data[7].ctledges[0].bchannel */ FREQ2FBIN(2422, 1),
 +                      /* Data[7].ctledges[1].bchannel */ FREQ2FBIN(2427, 1),
 +                      /* Data[7].ctledges[2].bchannel */ FREQ2FBIN(2447, 1),
 +                      /* Data[7].ctledges[3].bchannel */ FREQ2FBIN(2462, 1),
 +              },
 +
 +              {
 +                      /* Data[8].ctledges[0].bchannel */ FREQ2FBIN(2412, 1),
 +                      /* Data[8].ctledges[1].bchannel */ FREQ2FBIN(2417, 1),
 +                      /* Data[8].ctledges[2].bchannel */ FREQ2FBIN(2472, 1),
 +              },
 +
 +              {
 +                      /* Data[9].ctledges[0].bchannel */ FREQ2FBIN(2412, 1),
 +                      /* Data[9].ctledges[1].bchannel */ FREQ2FBIN(2417, 1),
 +                      /* Data[9].ctledges[2].bchannel */ FREQ2FBIN(2472, 1),
 +                      0
 +              },
 +
 +              {
 +                      /* Data[10].ctledges[0].bchannel */ FREQ2FBIN(2412, 1),
 +                      /* Data[10].ctledges[1].bchannel */ FREQ2FBIN(2417, 1),
 +                      /* Data[10].ctledges[2].bchannel */ FREQ2FBIN(2472, 1),
 +                      0
 +              },
 +
 +              {
 +                      /* Data[11].ctledges[0].bchannel */ FREQ2FBIN(2422, 1),
 +                      /* Data[11].ctledges[1].bchannel */ FREQ2FBIN(2427, 1),
 +                      /* Data[11].ctledges[2].bchannel */ FREQ2FBIN(2447, 1),
 +                      /* Data[11].ctledges[3].bchannel */ FREQ2FBIN(2462, 1),
 +              }
 +      },
 +      .ctlPowerData_2G = {
-               { { {60, 1}, {60, 0}, {0, 0}, {0, 0} } },
-               { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
-               { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
++              { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
++              { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
++              { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
 +
-               { { {60, 0}, {60, 1}, {60, 1}, {60, 0} } },
-               { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
-               { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
++              { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } },
++              { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
++              { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
 +
-               { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
-               { { {60, 0}, {60, 1}, {60, 1}, {60, 1} } },
-               { { {60, 0}, {60, 1}, {60, 1}, {60, 1} } },
++              { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } },
++              { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
++              { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
 +
-                               {60, 1}, {60, 1}, {60, 1}, {60, 1},
-                               {60, 1}, {60, 1}, {60, 1}, {60, 0},
++              { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
++              { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
++              { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
 +      },
 +      .modalHeader5G = {
 +              /* 4 idle,t1,t2,b (4 bits per setting) */
 +              .antCtrlCommon = LE32(0x110),
 +              /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */
 +              .antCtrlCommon2 = LE32(0x22222),
 +              /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */
 +              .antCtrlChain = {
 +                      LE16(0x0), LE16(0x0), LE16(0x0),
 +              },
 +              /* xatten1DB 3 xatten1_db for ar9280 (0xa20c/b20c 5:0) */
 +              .xatten1DB = {0x13, 0x19, 0x17},
 +
 +              /*
 +               * xatten1Margin[ar9300_max_chains]; 3 xatten1_margin
 +               * for merlin (0xa20c/b20c 16:12
 +               */
 +              .xatten1Margin = {0x19, 0x19, 0x19},
 +              .tempSlope = 70,
 +              .voltSlope = 15,
 +              /* spurChans spur channels in usual fbin coding format */
 +              .spurChans = {0, 0, 0, 0, 0},
 +              /* noiseFloorThreshch check if the register is per chain */
 +              .noiseFloorThreshCh = {-1, 0, 0},
 +              .ob = {3, 3, 3}, /* 3 chain */
 +              .db_stage2 = {3, 3, 3}, /* 3 chain */
 +              .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
 +              .db_stage4 = {3, 3, 3},  /* don't exist for 2G */
 +              .xpaBiasLvl = 0,
 +              .txFrameToDataStart = 0x0e,
 +              .txFrameToPaOn = 0x0e,
 +              .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
 +              .antennaGain = 0,
 +              .switchSettling = 0x2d,
 +              .adcDesiredSize = -30,
 +              .txEndToXpaOff = 0,
 +              .txEndToRxOn = 0x2,
 +              .txFrameToXpaOn = 0xe,
 +              .thresh62 = 28,
 +              .papdRateMaskHt20 = LE32(0x0cf0e0e0),
 +              .papdRateMaskHt40 = LE32(0x6cf0e0e0),
 +              .futureModal = {
 +                      0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 +              },
 +      },
 +      .base_ext2 = {
 +              .tempSlopeLow = 72,
 +              .tempSlopeHigh = 105,
 +              .xatten1DBLow = {0x10, 0x14, 0x10},
 +              .xatten1MarginLow = {0x19, 0x19 , 0x19},
 +              .xatten1DBHigh = {0x1d, 0x20, 0x24},
 +              .xatten1MarginHigh = {0x10, 0x10, 0x10}
 +      },
 +      .calFreqPier5G = {
 +              FREQ2FBIN(5180, 0),
 +              FREQ2FBIN(5220, 0),
 +              FREQ2FBIN(5320, 0),
 +              FREQ2FBIN(5400, 0),
 +              FREQ2FBIN(5500, 0),
 +              FREQ2FBIN(5600, 0),
 +              FREQ2FBIN(5700, 0),
 +              FREQ2FBIN(5785, 0)
 +      },
 +      .calPierData5G = {
 +              {
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +              },
 +              {
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +              },
 +              {
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +                      {0, 0, 0, 0, 0},
 +              },
 +
 +      },
 +      .calTarget_freqbin_5G = {
 +              FREQ2FBIN(5180, 0),
 +              FREQ2FBIN(5220, 0),
 +              FREQ2FBIN(5320, 0),
 +              FREQ2FBIN(5400, 0),
 +              FREQ2FBIN(5500, 0),
 +              FREQ2FBIN(5600, 0),
 +              FREQ2FBIN(5725, 0),
 +              FREQ2FBIN(5825, 0)
 +      },
 +      .calTarget_freqbin_5GHT20 = {
 +              FREQ2FBIN(5180, 0),
 +              FREQ2FBIN(5220, 0),
 +              FREQ2FBIN(5320, 0),
 +              FREQ2FBIN(5400, 0),
 +              FREQ2FBIN(5500, 0),
 +              FREQ2FBIN(5600, 0),
 +              FREQ2FBIN(5725, 0),
 +              FREQ2FBIN(5825, 0)
 +      },
 +      .calTarget_freqbin_5GHT40 = {
 +              FREQ2FBIN(5180, 0),
 +              FREQ2FBIN(5220, 0),
 +              FREQ2FBIN(5320, 0),
 +              FREQ2FBIN(5400, 0),
 +              FREQ2FBIN(5500, 0),
 +              FREQ2FBIN(5600, 0),
 +              FREQ2FBIN(5725, 0),
 +              FREQ2FBIN(5825, 0)
 +      },
 +      .calTargetPower5G = {
 +              /* 6-24,36,48,54 */
 +              { {32, 32, 28, 26} },
 +              { {32, 32, 28, 26} },
 +              { {32, 32, 28, 26} },
 +              { {32, 32, 26, 24} },
 +              { {32, 32, 26, 24} },
 +              { {32, 32, 24, 22} },
 +              { {30, 30, 24, 22} },
 +              { {30, 30, 24, 22} },
 +      },
 +      .calTargetPower5GHT20 = {
 +              /*
 +               * 0_8_16,1-3_9-11_17-19,
 +               * 4,5,6,7,12,13,14,15,20,21,22,23
 +               */
 +              { {32, 32, 32, 32, 28, 26, 32, 28, 26, 24, 24, 24, 22, 22} },
 +              { {32, 32, 32, 32, 28, 26, 32, 28, 26, 24, 24, 24, 22, 22} },
 +              { {32, 32, 32, 32, 28, 26, 32, 28, 26, 24, 24, 24, 22, 22} },
 +              { {32, 32, 32, 32, 28, 26, 32, 26, 24, 22, 22, 22, 20, 20} },
 +              { {32, 32, 32, 32, 28, 26, 32, 26, 24, 22, 20, 18, 16, 16} },
 +              { {32, 32, 32, 32, 28, 26, 32, 24, 20, 16, 18, 16, 14, 14} },
 +              { {30, 30, 30, 30, 28, 26, 30, 24, 20, 16, 18, 16, 14, 14} },
 +              { {30, 30, 30, 30, 28, 26, 30, 24, 20, 16, 18, 16, 14, 14} },
 +      },
 +      .calTargetPower5GHT40 =  {
 +              /*
 +               * 0_8_16,1-3_9-11_17-19,
 +               * 4,5,6,7,12,13,14,15,20,21,22,23
 +               */
 +              { {32, 32, 32, 30, 28, 26, 30, 28, 26, 24, 24, 24, 22, 22} },
 +              { {32, 32, 32, 30, 28, 26, 30, 28, 26, 24, 24, 24, 22, 22} },
 +              { {32, 32, 32, 30, 28, 26, 30, 28, 26, 24, 24, 24, 22, 22} },
 +              { {32, 32, 32, 30, 28, 26, 30, 26, 24, 22, 22, 22, 20, 20} },
 +              { {32, 32, 32, 30, 28, 26, 30, 26, 24, 22, 20, 18, 16, 16} },
 +              { {32, 32, 32, 30, 28, 26, 30, 22, 20, 16, 18, 16, 14, 14} },
 +              { {30, 30, 30, 30, 28, 26, 30, 22, 20, 16, 18, 16, 14, 14} },
 +              { {30, 30, 30, 30, 28, 26, 30, 22, 20, 16, 18, 16, 14, 14} },
 +      },
 +      .ctlIndex_5G =  {
 +              0x10, 0x16, 0x18, 0x40, 0x46,
 +              0x48, 0x30, 0x36, 0x38
 +      },
 +      .ctl_freqbin_5G =  {
 +              {
 +                      /* Data[0].ctledges[0].bchannel */ FREQ2FBIN(5180, 0),
 +                      /* Data[0].ctledges[1].bchannel */ FREQ2FBIN(5260, 0),
 +                      /* Data[0].ctledges[2].bchannel */ FREQ2FBIN(5280, 0),
 +                      /* Data[0].ctledges[3].bchannel */ FREQ2FBIN(5500, 0),
 +                      /* Data[0].ctledges[4].bchannel */ FREQ2FBIN(5600, 0),
 +                      /* Data[0].ctledges[5].bchannel */ FREQ2FBIN(5700, 0),
 +                      /* Data[0].ctledges[6].bchannel */ FREQ2FBIN(5745, 0),
 +                      /* Data[0].ctledges[7].bchannel */ FREQ2FBIN(5825, 0)
 +              },
 +              {
 +                      /* Data[1].ctledges[0].bchannel */ FREQ2FBIN(5180, 0),
 +                      /* Data[1].ctledges[1].bchannel */ FREQ2FBIN(5260, 0),
 +                      /* Data[1].ctledges[2].bchannel */ FREQ2FBIN(5280, 0),
 +                      /* Data[1].ctledges[3].bchannel */ FREQ2FBIN(5500, 0),
 +                      /* Data[1].ctledges[4].bchannel */ FREQ2FBIN(5520, 0),
 +                      /* Data[1].ctledges[5].bchannel */ FREQ2FBIN(5700, 0),
 +                      /* Data[1].ctledges[6].bchannel */ FREQ2FBIN(5745, 0),
 +                      /* Data[1].ctledges[7].bchannel */ FREQ2FBIN(5825, 0)
 +              },
 +
 +              {
 +                      /* Data[2].ctledges[0].bchannel */ FREQ2FBIN(5190, 0),
 +                      /* Data[2].ctledges[1].bchannel */ FREQ2FBIN(5230, 0),
 +                      /* Data[2].ctledges[2].bchannel */ FREQ2FBIN(5270, 0),
 +                      /* Data[2].ctledges[3].bchannel */ FREQ2FBIN(5310, 0),
 +                      /* Data[2].ctledges[4].bchannel */ FREQ2FBIN(5510, 0),
 +                      /* Data[2].ctledges[5].bchannel */ FREQ2FBIN(5550, 0),
 +                      /* Data[2].ctledges[6].bchannel */ FREQ2FBIN(5670, 0),
 +                      /* Data[2].ctledges[7].bchannel */ FREQ2FBIN(5755, 0)
 +              },
 +
 +              {
 +                      /* Data[3].ctledges[0].bchannel */ FREQ2FBIN(5180, 0),
 +                      /* Data[3].ctledges[1].bchannel */ FREQ2FBIN(5200, 0),
 +                      /* Data[3].ctledges[2].bchannel */ FREQ2FBIN(5260, 0),
 +                      /* Data[3].ctledges[3].bchannel */ FREQ2FBIN(5320, 0),
 +                      /* Data[3].ctledges[4].bchannel */ FREQ2FBIN(5500, 0),
 +                      /* Data[3].ctledges[5].bchannel */ FREQ2FBIN(5700, 0),
 +                      /* Data[3].ctledges[6].bchannel */ 0xFF,
 +                      /* Data[3].ctledges[7].bchannel */ 0xFF,
 +              },
 +
 +              {
 +                      /* Data[4].ctledges[0].bchannel */ FREQ2FBIN(5180, 0),
 +                      /* Data[4].ctledges[1].bchannel */ FREQ2FBIN(5260, 0),
 +                      /* Data[4].ctledges[2].bchannel */ FREQ2FBIN(5500, 0),
 +                      /* Data[4].ctledges[3].bchannel */ FREQ2FBIN(5700, 0),
 +                      /* Data[4].ctledges[4].bchannel */ 0xFF,
 +                      /* Data[4].ctledges[5].bchannel */ 0xFF,
 +                      /* Data[4].ctledges[6].bchannel */ 0xFF,
 +                      /* Data[4].ctledges[7].bchannel */ 0xFF,
 +              },
 +
 +              {
 +                      /* Data[5].ctledges[0].bchannel */ FREQ2FBIN(5190, 0),
 +                      /* Data[5].ctledges[1].bchannel */ FREQ2FBIN(5270, 0),
 +                      /* Data[5].ctledges[2].bchannel */ FREQ2FBIN(5310, 0),
 +                      /* Data[5].ctledges[3].bchannel */ FREQ2FBIN(5510, 0),
 +                      /* Data[5].ctledges[4].bchannel */ FREQ2FBIN(5590, 0),
 +                      /* Data[5].ctledges[5].bchannel */ FREQ2FBIN(5670, 0),
 +                      /* Data[5].ctledges[6].bchannel */ 0xFF,
 +                      /* Data[5].ctledges[7].bchannel */ 0xFF
 +              },
 +
 +              {
 +                      /* Data[6].ctledges[0].bchannel */ FREQ2FBIN(5180, 0),
 +                      /* Data[6].ctledges[1].bchannel */ FREQ2FBIN(5200, 0),
 +                      /* Data[6].ctledges[2].bchannel */ FREQ2FBIN(5220, 0),
 +                      /* Data[6].ctledges[3].bchannel */ FREQ2FBIN(5260, 0),
 +                      /* Data[6].ctledges[4].bchannel */ FREQ2FBIN(5500, 0),
 +                      /* Data[6].ctledges[5].bchannel */ FREQ2FBIN(5600, 0),
 +                      /* Data[6].ctledges[6].bchannel */ FREQ2FBIN(5700, 0),
 +                      /* Data[6].ctledges[7].bchannel */ FREQ2FBIN(5745, 0)
 +              },
 +
 +              {
 +                      /* Data[7].ctledges[0].bchannel */ FREQ2FBIN(5180, 0),
 +                      /* Data[7].ctledges[1].bchannel */ FREQ2FBIN(5260, 0),
 +                      /* Data[7].ctledges[2].bchannel */ FREQ2FBIN(5320, 0),
 +                      /* Data[7].ctledges[3].bchannel */ FREQ2FBIN(5500, 0),
 +                      /* Data[7].ctledges[4].bchannel */ FREQ2FBIN(5560, 0),
 +                      /* Data[7].ctledges[5].bchannel */ FREQ2FBIN(5700, 0),
 +                      /* Data[7].ctledges[6].bchannel */ FREQ2FBIN(5745, 0),
 +                      /* Data[7].ctledges[7].bchannel */ FREQ2FBIN(5825, 0)
 +              },
 +
 +              {
 +                      /* Data[8].ctledges[0].bchannel */ FREQ2FBIN(5190, 0),
 +                      /* Data[8].ctledges[1].bchannel */ FREQ2FBIN(5230, 0),
 +                      /* Data[8].ctledges[2].bchannel */ FREQ2FBIN(5270, 0),
 +                      /* Data[8].ctledges[3].bchannel */ FREQ2FBIN(5510, 0),
 +                      /* Data[8].ctledges[4].bchannel */ FREQ2FBIN(5550, 0),
 +                      /* Data[8].ctledges[5].bchannel */ FREQ2FBIN(5670, 0),
 +                      /* Data[8].ctledges[6].bchannel */ FREQ2FBIN(5755, 0),
 +                      /* Data[8].ctledges[7].bchannel */ FREQ2FBIN(5795, 0)
 +              }
 +      },
 +      .ctlPowerData_5G = {
 +              {
 +                      {
-                               {60, 1}, {60, 1}, {60, 1}, {60, 1},
-                               {60, 1}, {60, 1}, {60, 1}, {60, 0},
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
 +                      }
 +              },
 +              {
 +                      {
-                               {60, 0}, {60, 1}, {60, 0}, {60, 1},
-                               {60, 1}, {60, 1}, {60, 1}, {60, 1},
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
 +                      }
 +              },
 +              {
 +                      {
-                               {60, 0}, {60, 1}, {60, 1}, {60, 0},
-                               {60, 1}, {60, 0}, {60, 0}, {60, 0},
++                              CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1),
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
 +                      }
 +              },
 +              {
 +                      {
-                               {60, 1}, {60, 1}, {60, 1}, {60, 0},
-                               {60, 0}, {60, 0}, {60, 0}, {60, 0},
++                              CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0),
++                              CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
 +                      }
 +              },
 +              {
 +                      {
-                               {60, 1}, {60, 1}, {60, 1}, {60, 1},
-                               {60, 1}, {60, 0}, {60, 0}, {60, 0},
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
++                              CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0),
 +                      }
 +              },
 +              {
 +                      {
-                               {60, 1}, {60, 1}, {60, 1}, {60, 1},
-                               {60, 1}, {60, 1}, {60, 1}, {60, 1},
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
++                              CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
 +                      }
 +              },
 +              {
 +                      {
-                               {60, 1}, {60, 1}, {60, 0}, {60, 1},
-                               {60, 1}, {60, 1}, {60, 1}, {60, 0},
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
 +                      }
 +              },
 +              {
 +                      {
-                               {60, 1}, {60, 0}, {60, 1}, {60, 1},
-                               {60, 1}, {60, 1}, {60, 0}, {60, 1},
++                              CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
++                              CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
 +                      }
 +              },
 +              {
 +                      {
++                              CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1),
++                              CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
 +                      }
 +              },
 +      }
 +};
 +
 +static const struct ar9300_eeprom ar9300_h116 = {
        .eepromVersion = 2,
 -      .templateVersion = 2,
 -      .macAddr = {1, 2, 3, 4, 5, 6},
 -      .custData = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 -                   0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
 +      .templateVersion = 4,
 +      .macAddr = {0x00, 0x03, 0x7f, 0x0, 0x0, 0x0},
 +      .custData = {"h116-041-f0000"},
        .baseEepHeader = {
                .regDmn = { LE16(0), LE16(0x1f) },
 -              .txrxMask =  0x77, /* 4 bits tx and 4 bits rx */
 +              .txrxMask =  0x33, /* 4 bits tx and 4 bits rx */
                .opCapFlags = {
                        .opFlags = AR9300_OPFLAGS_11G | AR9300_OPFLAGS_11A,
                        .eepMisc = 0,
                .deviceType = 5, /* takes lower byte in eeprom location */
                .pwrTableOffset = AR9300_PWR_TABLE_OFFSET,
                .params_for_tuning_caps = {0, 0},
 -              .featureEnable = 0x0c,
 +              .featureEnable = 0x0d,
                 /*
                  * bit0 - enable tx temp comp - disabled
                  * bit1 - enable tx volt comp - disabled
                  * bit5 - enable pa predistortion - disabled
                  */
                .miscConfiguration = 0, /* bit0 - turn down drivestrength */
 -              .eepromWriteEnableGpio = 3,
 +              .eepromWriteEnableGpio = 6,
                .wlanDisableGpio = 0,
                .wlanLedGpio = 8,
                .rxBandSelectGpio = 0xff,
 -              .txrxgain = 0,
 +              .txrxgain = 0x10,
                .swreg = 0,
         },
        .modalHeader2G = {
                /* 4 idle,t1,t2,b(4 bits per setting) */
                .antCtrlCommon = LE32(0x110),
                /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
 -              .antCtrlCommon2 = LE32(0x22222),
 +              .antCtrlCommon2 = LE32(0x44444),
  
                /*
                 * antCtrlChain[AR9300_MAX_CHAINS]; 6 idle, t, r,
                 * rx1, rx12, b (2 bits each)
                 */
 -              .antCtrlChain = { LE16(0x150), LE16(0x150), LE16(0x150) },
 +              .antCtrlChain = { LE16(0x10), LE16(0x10), LE16(0x10) },
  
                /*
                 * xatten1DB[AR9300_MAX_CHAINS];  3 xatten1_db
                 * for ar9280 (0xa20c/b20c 5:0)
                 */
 -              .xatten1DB = {0, 0, 0},
 +              .xatten1DB = {0x1f, 0x1f, 0x1f},
  
                /*
                 * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
                 * for ar9280 (0xa20c/b20c 16:12
                 */
 -              .xatten1Margin = {0, 0, 0},
 -              .tempSlope = 36,
 +              .xatten1Margin = {0x12, 0x12, 0x12},
 +              .tempSlope = 25,
                .voltSlope = 0,
  
                /*
                 * spurChans[OSPREY_EEPROM_MODAL_SPURS]; spur
                 * channels in usual fbin coding format
                 */
 -              .spurChans = {0, 0, 0, 0, 0},
 +              .spurChans = {FREQ2FBIN(2464, 1), 0, 0, 0, 0},
  
                /*
                 * noiseFloorThreshCh[AR9300_MAX_CHAINS]; 3 Check
                .txEndToRxOn = 0x2,
                .txFrameToXpaOn = 0xe,
                .thresh62 = 28,
 -              .papdRateMaskHt20 = LE32(0x80c080),
 -              .papdRateMaskHt40 = LE32(0x80c080),
 +              .papdRateMaskHt20 = LE32(0x0c80C080),
 +              .papdRateMaskHt40 = LE32(0x0080C080),
                .futureModal = {
 -                      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 -                      0, 0, 0, 0, 0, 0, 0, 0
 +                      0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                },
         },
 +       .base_ext1 = {
 +              .ant_div_control = 0,
 +              .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
 +       },
        .calFreqPier2G = {
                FREQ2FBIN(2412, 1),
                FREQ2FBIN(2437, 1),
         },
        .calTarget_freqbin_Cck = {
                FREQ2FBIN(2412, 1),
 -              FREQ2FBIN(2484, 1),
 +              FREQ2FBIN(2472, 1),
         },
        .calTarget_freqbin_2G = {
                FREQ2FBIN(2412, 1),
         },
        .calTargetPowerCck = {
                 /* 1L-5L,5S,11L,11S */
 -               { {36, 36, 36, 36} },
 -               { {36, 36, 36, 36} },
 +               { {34, 34, 34, 34} },
 +               { {34, 34, 34, 34} },
        },
        .calTargetPower2G = {
                 /* 6-24,36,48,54 */
 -               { {32, 32, 28, 24} },
 -               { {32, 32, 28, 24} },
 -               { {32, 32, 28, 24} },
 +               { {34, 34, 32, 32} },
 +               { {34, 34, 32, 32} },
 +               { {34, 34, 32, 32} },
        },
        .calTargetPower2GHT20 = {
 -              { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
 -              { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
 -              { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
 +              { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 0, 0, 0, 0} },
 +              { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 0, 0, 0, 0} },
 +              { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 0, 0, 0, 0} },
        },
        .calTargetPower2GHT40 = {
 -              { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
 -              { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
 -              { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
 +              { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} },
 +              { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} },
 +              { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} },
        },
        .ctlIndex_2G =  {
                0x11, 0x12, 0x15, 0x17, 0x41, 0x42,
                        /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
                        /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
                        /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
 -                      /* Data[11].ctlEdges[3].bChannel */
 -                      FREQ2FBIN(2462, 1),
 +                      /* Data[11].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
                }
         },
        .ctlPowerData_2G = {
-                { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
-                { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
-                { { {60, 1}, {60, 0}, {60, 0}, {60, 1} } },
+                { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+                { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+                { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
  
-                { { {60, 1}, {60, 0}, {0, 0}, {0, 0} } },
-                { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
-                { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+                { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } },
+                { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+                { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
  
-                { { {60, 0}, {60, 1}, {60, 1}, {60, 0} } },
-                { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
-                { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+                { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } },
+                { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+                { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
  
-                { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
-                { { {60, 0}, {60, 1}, {60, 1}, {60, 1} } },
-                { { {60, 0}, {60, 1}, {60, 1}, {60, 1} } },
+                { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+                { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
+                { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
         },
        .modalHeader5G = {
                /* 4 idle,t1,t2,b (4 bits per setting) */
 -              .antCtrlCommon = LE32(0x110),
 +              .antCtrlCommon = LE32(0x220),
                /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */
 -              .antCtrlCommon2 = LE32(0x22222),
 +              .antCtrlCommon2 = LE32(0x44444),
                 /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */
                .antCtrlChain = {
 -                      LE16(0x000), LE16(0x000), LE16(0x000),
 +                      LE16(0x150), LE16(0x150), LE16(0x150),
                },
                 /* xatten1DB 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */
 -              .xatten1DB = {0, 0, 0},
 +              .xatten1DB = {0x19, 0x19, 0x19},
  
                /*
                 * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
                 * for merlin (0xa20c/b20c 16:12
                 */
 -              .xatten1Margin = {0, 0, 0},
 -              .tempSlope = 68,
 +              .xatten1Margin = {0x14, 0x14, 0x14},
 +              .tempSlope = 70,
                .voltSlope = 0,
                /* spurChans spur channels in usual fbin coding format */
                .spurChans = {0, 0, 0, 0, 0},
                .txEndToRxOn = 0x2,
                .txFrameToXpaOn = 0xe,
                .thresh62 = 28,
 -              .papdRateMaskHt20 = LE32(0xf0e0e0),
 -              .papdRateMaskHt40 = LE32(0xf0e0e0),
 +              .papdRateMaskHt20 = LE32(0x0cf0e0e0),
 +              .papdRateMaskHt40 = LE32(0x6cf0e0e0),
                .futureModal = {
 -                      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 -                      0, 0, 0, 0, 0, 0, 0, 0
 +                      0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                },
         },
 +      .base_ext2 = {
 +              .tempSlopeLow = 35,
 +              .tempSlopeHigh = 50,
 +              .xatten1DBLow = {0, 0, 0},
 +              .xatten1MarginLow = {0, 0, 0},
 +              .xatten1DBHigh = {0, 0, 0},
 +              .xatten1MarginHigh = {0, 0, 0}
 +       },
        .calFreqPier5G = {
                FREQ2FBIN(5180, 0),
                FREQ2FBIN(5220, 0),
                FREQ2FBIN(5400, 0),
                FREQ2FBIN(5500, 0),
                FREQ2FBIN(5600, 0),
 -              FREQ2FBIN(5725, 0),
 -              FREQ2FBIN(5825, 0)
 +              FREQ2FBIN(5700, 0),
 +              FREQ2FBIN(5785, 0)
        },
        .calPierData5G = {
                        {
        },
        .calTarget_freqbin_5G = {
                FREQ2FBIN(5180, 0),
 -              FREQ2FBIN(5220, 0),
 +              FREQ2FBIN(5240, 0),
                FREQ2FBIN(5320, 0),
                FREQ2FBIN(5400, 0),
                FREQ2FBIN(5500, 0),
                FREQ2FBIN(5600, 0),
 -              FREQ2FBIN(5725, 0),
 +              FREQ2FBIN(5700, 0),
                FREQ2FBIN(5825, 0)
        },
        .calTarget_freqbin_5GHT20 = {
                FREQ2FBIN(5180, 0),
                FREQ2FBIN(5240, 0),
                FREQ2FBIN(5320, 0),
 +              FREQ2FBIN(5400, 0),
                FREQ2FBIN(5500, 0),
                FREQ2FBIN(5700, 0),
                FREQ2FBIN(5745, 0),
 -              FREQ2FBIN(5725, 0),
                FREQ2FBIN(5825, 0)
        },
        .calTarget_freqbin_5GHT40 = {
                FREQ2FBIN(5180, 0),
                FREQ2FBIN(5240, 0),
                FREQ2FBIN(5320, 0),
 +              FREQ2FBIN(5400, 0),
                FREQ2FBIN(5500, 0),
                FREQ2FBIN(5700, 0),
                FREQ2FBIN(5745, 0),
 -              FREQ2FBIN(5725, 0),
                FREQ2FBIN(5825, 0)
         },
        .calTargetPower5G = {
                /* 6-24,36,48,54 */
 -              { {20, 20, 20, 10} },
 -              { {20, 20, 20, 10} },
 -              { {20, 20, 20, 10} },
 -              { {20, 20, 20, 10} },
 -              { {20, 20, 20, 10} },
 -              { {20, 20, 20, 10} },
 -              { {20, 20, 20, 10} },
 -              { {20, 20, 20, 10} },
 +              { {30, 30, 28, 24} },
 +              { {30, 30, 28, 24} },
 +              { {30, 30, 28, 24} },
 +              { {30, 30, 28, 24} },
 +              { {30, 30, 28, 24} },
 +              { {30, 30, 28, 24} },
 +              { {30, 30, 28, 24} },
 +              { {30, 30, 28, 24} },
         },
        .calTargetPower5GHT20 = {
                /*
                 * 0_8_16,1-3_9-11_17-19,
                 * 4,5,6,7,12,13,14,15,20,21,22,23
                 */
 -              { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
 -              { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
 -              { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
 -              { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
 -              { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
 -              { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
 -              { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
 -              { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
 +              { {30, 30, 30, 28, 24, 20, 30, 28, 24, 20, 0, 0, 0, 0} },
 +              { {30, 30, 30, 28, 24, 20, 30, 28, 24, 20, 0, 0, 0, 0} },
 +              { {30, 30, 30, 26, 22, 18, 30, 26, 22, 18, 0, 0, 0, 0} },
 +              { {30, 30, 30, 26, 22, 18, 30, 26, 22, 18, 0, 0, 0, 0} },
 +              { {30, 30, 30, 24, 20, 16, 30, 24, 20, 16, 0, 0, 0, 0} },
 +              { {30, 30, 30, 24, 20, 16, 30, 24, 20, 16, 0, 0, 0, 0} },
 +              { {30, 30, 30, 22, 18, 14, 30, 22, 18, 14, 0, 0, 0, 0} },
 +              { {30, 30, 30, 22, 18, 14, 30, 22, 18, 14, 0, 0, 0, 0} },
         },
        .calTargetPower5GHT40 =  {
                /*
                 * 0_8_16,1-3_9-11_17-19,
                 * 4,5,6,7,12,13,14,15,20,21,22,23
                 */
 -              { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
 -              { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
 -              { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
 -              { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
 -              { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
 -              { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
 -              { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
 -              { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
 +              { {28, 28, 28, 26, 22, 18, 28, 26, 22, 18, 0, 0, 0, 0} },
 +              { {28, 28, 28, 26, 22, 18, 28, 26, 22, 18, 0, 0, 0, 0} },
 +              { {28, 28, 28, 24, 20, 16, 28, 24, 20, 16, 0, 0, 0, 0} },
 +              { {28, 28, 28, 24, 20, 16, 28, 24, 20, 16, 0, 0, 0, 0} },
 +              { {28, 28, 28, 22, 18, 14, 28, 22, 18, 14, 0, 0, 0, 0} },
 +              { {28, 28, 28, 22, 18, 14, 28, 22, 18, 14, 0, 0, 0, 0} },
 +              { {28, 28, 28, 20, 16, 12, 28, 20, 16, 12, 0, 0, 0, 0} },
 +              { {28, 28, 28, 20, 16, 12, 28, 20, 16, 12, 0, 0, 0, 0} },
         },
        .ctlIndex_5G =  {
                0x10, 0x16, 0x18, 0x40, 0x46,
        .ctlPowerData_5G = {
                {
                        {
-                               {60, 1}, {60, 1}, {60, 1}, {60, 1},
-                               {60, 1}, {60, 1}, {60, 1}, {60, 0},
+                               CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+                               CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
                        }
                },
                {
                        {
-                               {60, 1}, {60, 1}, {60, 1}, {60, 1},
-                               {60, 1}, {60, 1}, {60, 1}, {60, 0},
+                               CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+                               CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
                        }
                },
                {
                        {
-                               {60, 0}, {60, 1}, {60, 0}, {60, 1},
-                               {60, 1}, {60, 1}, {60, 1}, {60, 1},
+                               CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+                               CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
                        }
                },
                {
                        {
-                               {60, 0}, {60, 1}, {60, 1}, {60, 0},
-                               {60, 1}, {60, 0}, {60, 0}, {60, 0},
+                               CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+                               CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
                        }
                },
                {
                        {
-                               {60, 1}, {60, 1}, {60, 1}, {60, 0},
-                               {60, 0}, {60, 0}, {60, 0}, {60, 0},
+                               CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+                               CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0),
                        }
                },
                {
                        {
-                               {60, 1}, {60, 1}, {60, 1}, {60, 1},
-                               {60, 1}, {60, 0}, {60, 0}, {60, 0},
+                               CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+                               CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
                        }
                },
                {
                        {
-                               {60, 1}, {60, 1}, {60, 1}, {60, 1},
-                               {60, 1}, {60, 1}, {60, 1}, {60, 1},
+                               CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+                               CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
                        }
                },
                {
                        {
-                               {60, 1}, {60, 1}, {60, 0}, {60, 1},
-                               {60, 1}, {60, 1}, {60, 1}, {60, 0},
+                               CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+                               CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
                        }
                },
                {
                        {
-                               {60, 1}, {60, 0}, {60, 1}, {60, 1},
-                               {60, 1}, {60, 1}, {60, 0}, {60, 1},
+                               CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1),
+                               CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
                        }
                },
         }
  };
  
 +
 +static const struct ar9300_eeprom *ar9300_eep_templates[] = {
 +      &ar9300_default,
 +      &ar9300_x112,
 +      &ar9300_h116,
 +      &ar9300_h112,
 +      &ar9300_x113,
 +};
 +
 +static const struct ar9300_eeprom *ar9003_eeprom_struct_find_by_id(int id)
 +{
 +#define N_LOOP (sizeof(ar9300_eep_templates) / sizeof(ar9300_eep_templates[0]))
 +      int it;
 +
 +      for (it = 0; it < N_LOOP; it++)
 +              if (ar9300_eep_templates[it]->templateVersion == id)
 +                      return ar9300_eep_templates[it];
 +      return NULL;
 +#undef N_LOOP
 +}
 +
 +
  static u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz)
  {
        if (fbin == AR9300_BCHAN_UNUSED)
@@@ -2982,16 -639,6 +2985,16 @@@ static int ath9k_hw_ar9300_check_eeprom
        return 0;
  }
  
 +static int interpolate(int x, int xa, int xb, int ya, int yb)
 +{
 +      int bf, factor, plus;
 +
 +      bf = 2 * (yb - ya) * (x - xa) / (xb - xa);
 +      factor = bf / 2;
 +      plus = bf % 2;
 +      return ya + factor + plus;
 +}
 +
  static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
                                      enum eeprom_param param)
  {
@@@ -3104,36 -751,6 +3107,36 @@@ error
        return false;
  }
  
 +static bool ar9300_otp_read_word(struct ath_hw *ah, int addr, u32 *data)
 +{
 +      REG_READ(ah, AR9300_OTP_BASE + (4 * addr));
 +
 +      if (!ath9k_hw_wait(ah, AR9300_OTP_STATUS, AR9300_OTP_STATUS_TYPE,
 +                         AR9300_OTP_STATUS_VALID, 1000))
 +              return false;
 +
 +      *data = REG_READ(ah, AR9300_OTP_READ_DATA);
 +      return true;
 +}
 +
 +static bool ar9300_read_otp(struct ath_hw *ah, int address, u8 *buffer,
 +                          int count)
 +{
 +      u32 data;
 +      int i;
 +
 +      for (i = 0; i < count; i++) {
 +              int offset = 8 * ((address - i) % 4);
 +              if (!ar9300_otp_read_word(ah, (address - i) / 4, &data))
 +                      return false;
 +
 +              buffer[i] = (data >> offset) & 0xff;
 +      }
 +
 +      return true;
 +}
 +
 +
  static void ar9300_comp_hdr_unpack(u8 *best, int *code, int *reference,
                                   int *length, int *major, int *minor)
  {
@@@ -3210,7 -827,6 +3213,7 @@@ static int ar9300_compress_decision(str
  {
        struct ath_common *common = ath9k_hw_common(ah);
        u8 *dptr;
 +      const struct ar9300_eeprom *eep = NULL;
  
        switch (code) {
        case _CompressNone:
                if (reference == 0) {
                        dptr = mptr;
                } else {
 -                      if (reference != 2) {
 +                      eep = ar9003_eeprom_struct_find_by_id(reference);
 +                      if (eep == NULL) {
                                ath_print(common, ATH_DBG_EEPROM,
                                          "cant find reference eeprom"
                                          "struct %d\n", reference);
                                return -1;
                        }
 -                      memcpy(mptr, &ar9300_default, mdata_size);
 +                      memcpy(mptr, eep, mdata_size);
                }
                ath_print(common, ATH_DBG_EEPROM,
                          "restore eeprom %d: block, reference %d,"
        return 0;
  }
  
 +typedef bool (*eeprom_read_op)(struct ath_hw *ah, int address, u8 *buffer,
 +                             int count);
 +
 +static bool ar9300_check_header(void *data)
 +{
 +      u32 *word = data;
 +      return !(*word == 0 || *word == ~0);
 +}
 +
 +static bool ar9300_check_eeprom_header(struct ath_hw *ah, eeprom_read_op read,
 +                                     int base_addr)
 +{
 +      u8 header[4];
 +
 +      if (!read(ah, base_addr, header, 4))
 +              return false;
 +
 +      return ar9300_check_header(header);
 +}
 +
 +static int ar9300_eeprom_restore_flash(struct ath_hw *ah, u8 *mptr,
 +                                     int mdata_size)
 +{
 +      struct ath_common *common = ath9k_hw_common(ah);
 +      u16 *data = (u16 *) mptr;
 +      int i;
 +
 +      for (i = 0; i < mdata_size / 2; i++, data++)
 +              ath9k_hw_nvram_read(common, i, data);
 +
 +      return 0;
 +}
  /*
   * Read the configuration data from the eeprom.
   * The data can be put in any specified memory buffer.
@@@ -3303,10 -886,6 +3306,10 @@@ static int ar9300_eeprom_restore_intern
        int it;
        u16 checksum, mchecksum;
        struct ath_common *common = ath9k_hw_common(ah);
 +      eeprom_read_op read;
 +
 +      if (ath9k_hw_use_flash(ah))
 +              return ar9300_eeprom_restore_flash(ah, mptr, mdata_size);
  
        word = kzalloc(2048, GFP_KERNEL);
        if (!word)
  
        memcpy(mptr, &ar9300_default, mdata_size);
  
 +      read = ar9300_read_eeprom;
 +      cptr = AR9300_BASE_ADDR;
 +      ath_print(common, ATH_DBG_EEPROM,
 +              "Trying EEPROM accesss at Address 0x%04x\n", cptr);
 +      if (ar9300_check_eeprom_header(ah, read, cptr))
 +              goto found;
 +
 +      cptr = AR9300_BASE_ADDR_512;
 +      ath_print(common, ATH_DBG_EEPROM,
 +              "Trying EEPROM accesss at Address 0x%04x\n", cptr);
 +      if (ar9300_check_eeprom_header(ah, read, cptr))
 +              goto found;
 +
 +      read = ar9300_read_otp;
        cptr = AR9300_BASE_ADDR;
 +      ath_print(common, ATH_DBG_EEPROM,
 +              "Trying OTP accesss at Address 0x%04x\n", cptr);
 +      if (ar9300_check_eeprom_header(ah, read, cptr))
 +              goto found;
 +
 +      cptr = AR9300_BASE_ADDR_512;
 +      ath_print(common, ATH_DBG_EEPROM,
 +              "Trying OTP accesss at Address 0x%04x\n", cptr);
 +      if (ar9300_check_eeprom_header(ah, read, cptr))
 +              goto found;
 +
 +      goto fail;
 +
 +found:
 +      ath_print(common, ATH_DBG_EEPROM, "Found valid EEPROM data");
 +
        for (it = 0; it < MSTATE; it++) {
 -              if (!ar9300_read_eeprom(ah, cptr, word, COMP_HDR_LEN))
 +              if (!read(ah, cptr, word, COMP_HDR_LEN))
                        goto fail;
  
 -              if ((word[0] == 0 && word[1] == 0 && word[2] == 0 &&
 -                   word[3] == 0) || (word[0] == 0xff && word[1] == 0xff
 -                                     && word[2] == 0xff && word[3] == 0xff))
 +              if (!ar9300_check_header(word))
                        break;
  
                ar9300_comp_hdr_unpack(word, &code, &reference,
                }
  
                osize = length;
 -              ar9300_read_eeprom(ah, cptr, word,
 -                                 COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
 +              read(ah, cptr, word, COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
                checksum = ar9300_comp_cksum(&word[COMP_HDR_LEN], length);
                mchecksum = word[COMP_HDR_LEN + osize] |
                    (word[COMP_HDR_LEN + osize + 1] << 8);
@@@ -3443,9 -995,9 +3446,9 @@@ static s32 ar9003_hw_xpa_bias_level_get
  static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
  {
        int bias = ar9003_hw_xpa_bias_level_get(ah, is2ghz);
 -      REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, (bias & 0x3));
 -      REG_RMW_FIELD(ah, AR_CH0_THERM, AR_CH0_THERM_SPARE,
 -                    ((bias >> 2) & 0x3));
 +      REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
 +      REG_RMW_FIELD(ah, AR_CH0_THERM, AR_CH0_THERM_XPABIASLVL_MSB, bias >> 2);
 +      REG_RMW_FIELD(ah, AR_CH0_THERM, AR_CH0_THERM_XPASHORT2GND, 1);
  }
  
  static u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz)
@@@ -3548,82 -1100,6 +3551,82 @@@ static void ar9003_hw_drive_strength_ap
        REG_WRITE(ah, AR_PHY_65NM_CH0_BIAS4, reg);
  }
  
 +static u16 ar9003_hw_atten_chain_get(struct ath_hw *ah, int chain,
 +                                   struct ath9k_channel *chan)
 +{
 +      int f[3], t[3];
 +      u16 value;
 +      struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
 +
 +      if (chain >= 0 && chain < 3) {
 +              if (IS_CHAN_2GHZ(chan))
 +                      return eep->modalHeader2G.xatten1DB[chain];
 +              else if (eep->base_ext2.xatten1DBLow[chain] != 0) {
 +                      t[0] = eep->base_ext2.xatten1DBLow[chain];
 +                      f[0] = 5180;
 +                      t[1] = eep->modalHeader5G.xatten1DB[chain];
 +                      f[1] = 5500;
 +                      t[2] = eep->base_ext2.xatten1DBHigh[chain];
 +                      f[2] = 5785;
 +                      value = ar9003_hw_power_interpolate((s32) chan->channel,
 +                                                          f, t, 3);
 +                      return value;
 +              } else
 +                      return eep->modalHeader5G.xatten1DB[chain];
 +      }
 +
 +      return 0;
 +}
 +
 +
 +static u16 ar9003_hw_atten_chain_get_margin(struct ath_hw *ah, int chain,
 +                                          struct ath9k_channel *chan)
 +{
 +      int f[3], t[3];
 +      u16 value;
 +      struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
 +
 +      if (chain >= 0 && chain < 3) {
 +              if (IS_CHAN_2GHZ(chan))
 +                      return eep->modalHeader2G.xatten1Margin[chain];
 +              else if (eep->base_ext2.xatten1MarginLow[chain] != 0) {
 +                      t[0] = eep->base_ext2.xatten1MarginLow[chain];
 +                      f[0] = 5180;
 +                      t[1] = eep->modalHeader5G.xatten1Margin[chain];
 +                      f[1] = 5500;
 +                      t[2] = eep->base_ext2.xatten1MarginHigh[chain];
 +                      f[2] = 5785;
 +                      value = ar9003_hw_power_interpolate((s32) chan->channel,
 +                                                          f, t, 3);
 +                      return value;
 +              } else
 +                      return eep->modalHeader5G.xatten1Margin[chain];
 +      }
 +
 +      return 0;
 +}
 +
 +static void ar9003_hw_atten_apply(struct ath_hw *ah, struct ath9k_channel *chan)
 +{
 +      int i;
 +      u16 value;
 +      unsigned long ext_atten_reg[3] = {AR_PHY_EXT_ATTEN_CTL_0,
 +                                        AR_PHY_EXT_ATTEN_CTL_1,
 +                                        AR_PHY_EXT_ATTEN_CTL_2,
 +                                       };
 +
 +      /* Test value. if 0 then attenuation is unused. Don't load anything. */
 +      for (i = 0; i < 3; i++) {
 +              value = ar9003_hw_atten_chain_get(ah, i, chan);
 +              REG_RMW_FIELD(ah, ext_atten_reg[i],
 +                            AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB, value);
 +
 +              value = ar9003_hw_atten_chain_get_margin(ah, i, chan);
 +              REG_RMW_FIELD(ah, ext_atten_reg[i],
 +                            AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN, value);
 +      }
 +}
 +
  static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
  {
        int internal_regulator =
@@@ -3655,7 -1131,6 +3658,7 @@@ static void ath9k_hw_ar9300_set_board_v
        ar9003_hw_xpa_bias_level_apply(ah, IS_CHAN_2GHZ(chan));
        ar9003_hw_ant_ctrl_apply(ah, IS_CHAN_2GHZ(chan));
        ar9003_hw_drive_strength_apply(ah);
 +      ar9003_hw_atten_apply(ah, chan);
        ar9003_hw_internal_regulator_apply(ah);
  }
  
@@@ -3717,7 -1192,7 +3720,7 @@@ static int ar9003_hw_power_interpolate(
                        if (hx == lx)
                                y = ly;
                        else    /* interpolate  */
 -                              y = ly + (((x - lx) * (hy - ly)) / (hx - lx));
 +                              y = interpolate(x, lx, hx, ly, hy);
                } else          /* only low is good, use it */
                        y = ly;
        } else if (hhave)       /* only high is good, use it */
@@@ -4165,7 -1640,6 +4168,7 @@@ static int ar9003_hw_power_control_over
  {
        int tempSlope = 0;
        struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
 +      int f[3], t[3];
  
        REG_RMW(ah, AR_PHY_TPC_11_B0,
                (correction[0] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
         */
        if (frequency < 4000)
                tempSlope = eep->modalHeader2G.tempSlope;
 -      else
 +      else if (eep->base_ext2.tempSlopeLow != 0) {
 +              t[0] = eep->base_ext2.tempSlopeLow;
 +              f[0] = 5180;
 +              t[1] = eep->modalHeader5G.tempSlope;
 +              f[1] = 5500;
 +              t[2] = eep->base_ext2.tempSlopeHigh;
 +              f[2] = 5785;
 +              tempSlope = ar9003_hw_power_interpolate((s32) frequency,
 +                                                      f, t, 3);
 +      } else
                tempSlope = eep->modalHeader5G.tempSlope;
  
        REG_RMW_FIELD(ah, AR_PHY_TPC_19, AR_PHY_TPC_19_ALPHA_THERM, tempSlope);
@@@ -4307,23 -1772,25 +4310,23 @@@ static int ar9003_hw_calibration_apply(
                        /* so is the high frequency, interpolate */
                        if (hfrequency[ichain] - frequency < 1000) {
  
 -                              correction[ichain] = lcorrection[ichain] +
 -                                  (((frequency - lfrequency[ichain]) *
 -                                    (hcorrection[ichain] -
 -                                     lcorrection[ichain])) /
 -                                   (hfrequency[ichain] - lfrequency[ichain]));
 -
 -                              temperature[ichain] = ltemperature[ichain] +
 -                                  (((frequency - lfrequency[ichain]) *
 -                                    (htemperature[ichain] -
 -                                     ltemperature[ichain])) /
 -                                   (hfrequency[ichain] - lfrequency[ichain]));
 -
 -                              voltage[ichain] =
 -                                  lvoltage[ichain] +
 -                                  (((frequency -
 -                                     lfrequency[ichain]) * (hvoltage[ichain] -
 -                                                            lvoltage[ichain]))
 -                                   / (hfrequency[ichain] -
 -                                      lfrequency[ichain]));
 +                              correction[ichain] = interpolate(frequency,
 +                                              lfrequency[ichain],
 +                                              hfrequency[ichain],
 +                                              lcorrection[ichain],
 +                                              hcorrection[ichain]);
 +
 +                              temperature[ichain] = interpolate(frequency,
 +                                              lfrequency[ichain],
 +                                              hfrequency[ichain],
 +                                              ltemperature[ichain],
 +                                              htemperature[ichain]);
 +
 +                              voltage[ichain] = interpolate(frequency,
 +                                              lfrequency[ichain],
 +                                              hfrequency[ichain],
 +                                              lvoltage[ichain],
 +                                              hvoltage[ichain]);
                        }
                        /* only low is good, use it */
                        else {
@@@ -4363,9 -1830,9 +4366,9 @@@ static u16 ar9003_hw_get_direct_edge_po
        struct cal_ctl_data_5g *ctl_5g = eep->ctlPowerData_5G;
  
        if (is2GHz)
-               return ctl_2g[idx].ctlEdges[edge].tPower;
+               return CTL_EDGE_TPOWER(ctl_2g[idx].ctlEdges[edge]);
        else
-               return ctl_5g[idx].ctlEdges[edge].tPower;
+               return CTL_EDGE_TPOWER(ctl_5g[idx].ctlEdges[edge]);
  }
  
  static u16 ar9003_hw_get_indirect_edge_power(struct ar9300_eeprom *eep,
  
        if (is2GHz) {
                if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 1) < freq &&
-                   ctl_2g[idx].ctlEdges[edge - 1].flag)
-                       return ctl_2g[idx].ctlEdges[edge - 1].tPower;
+                   CTL_EDGE_FLAGS(ctl_2g[idx].ctlEdges[edge - 1]))
+                       return CTL_EDGE_TPOWER(ctl_2g[idx].ctlEdges[edge - 1]);
        } else {
                if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 0) < freq &&
-                   ctl_5g[idx].ctlEdges[edge - 1].flag)
-                       return ctl_5g[idx].ctlEdges[edge - 1].tPower;
+                   CTL_EDGE_FLAGS(ctl_5g[idx].ctlEdges[edge - 1]))
+                       return CTL_EDGE_TPOWER(ctl_5g[idx].ctlEdges[edge - 1]);
        }
  
        return AR9300_MAX_RATE_POWER;
@@@ -4455,16 -1922,14 +4458,16 @@@ static void ar9003_hw_set_power_per_rat
        int i;
        int16_t  twiceLargestAntenna;
        u16 scaledPower = 0, minCtlPower, maxRegAllowedPower;
 -      u16 ctlModesFor11a[] = {
 +      static const u16 ctlModesFor11a[] = {
                CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40
        };
 -      u16 ctlModesFor11g[] = {
 +      static const u16 ctlModesFor11g[] = {
                CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT,
                CTL_11G_EXT, CTL_2GHT40
        };
 -      u16 numCtlModes, *pCtlMode, ctlMode, freq;
 +      u16 numCtlModes;
 +      const u16 *pCtlMode;
 +      u16 ctlMode, freq;
        struct chan_centers centers;
        u8 *ctlIndex;
        u8 ctlNum;
@@@ -4669,9 -2134,8 +4672,9 @@@ static void ath9k_hw_ar9300_set_txpower
                                        struct ath9k_channel *chan, u16 cfgCtl,
                                        u8 twiceAntennaReduction,
                                        u8 twiceMaxRegulatoryPower,
 -                                      u8 powerLimit)
 +                                      u8 powerLimit, bool test)
  {
 +      struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
        struct ath_common *common = ath9k_hw_common(ah);
        u8 targetPowerValT2[ar9300RateSize];
        unsigned int i = 0;
                                           twiceMaxRegulatoryPower,
                                           powerLimit);
  
 -      while (i < ar9300RateSize) {
 +      regulatory->max_power_level = 0;
 +      for (i = 0; i < ar9300RateSize; i++) {
 +              if (targetPowerValT2[i] > regulatory->max_power_level)
 +                      regulatory->max_power_level = targetPowerValT2[i];
 +      }
 +
 +      if (test)
 +              return;
 +
 +      for (i = 0; i < ar9300RateSize; i++) {
                ath_print(common, ATH_DBG_EEPROM,
                          "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
                i++;
                i++;
        }
  
 -      /* Write target power array to registers */
 -      ar9003_hw_tx_power_regwrite(ah, targetPowerValT2);
 -
        /*
         * This is the TX power we send back to driver core,
         * and it can use to pass to userspace to display our
                i = ALL_TARGET_HT20_0_8_16; /* ht20 */
  
        ah->txpower_limit = targetPowerValT2[i];
 +      regulatory->max_power_level = targetPowerValT2[i];
  
 +      /* Write target power array to registers */
 +      ar9003_hw_tx_power_regwrite(ah, targetPowerValT2);
        ar9003_hw_calibration_apply(ah, chan->channel);
  }
  
  #define FIXED_CCA_THRESHOLD 15
  
  #define AR9300_BASE_ADDR 0x3ff
 +#define AR9300_BASE_ADDR_512 0x1ff
 +
 +#define AR9300_OTP_BASE                       0x14000
 +#define AR9300_OTP_STATUS             0x15f18
 +#define AR9300_OTP_STATUS_TYPE                0x7
 +#define AR9300_OTP_STATUS_VALID               0x4
 +#define AR9300_OTP_STATUS_ACCESS_BUSY 0x2
 +#define AR9300_OTP_STATUS_SM_BUSY     0x1
 +#define AR9300_OTP_READ_DATA          0x15f1c
  
  enum targetPowerHTRates {
        HT_TARGET_RATE_0_8_16,
@@@ -245,7 -236,7 +245,7 @@@ struct ar9300_modal_eep_header 
        u8 thresh62;
        __le32 papdRateMaskHt20;
        __le32 papdRateMaskHt40;
 -      u8 futureModal[24];
 +      u8 futureModal[10];
  } __packed;
  
  struct ar9300_cal_data_per_freq_op_loop {
@@@ -270,33 -261,14 +270,28 @@@ struct cal_tgt_pow_ht 
        u8 tPow2x[14];
  } __packed;
  
- struct cal_ctl_edge_pwr {
-       u8 tPower:6,
-          flag:2;
- } __packed;
  struct cal_ctl_data_2g {
-       struct cal_ctl_edge_pwr ctlEdges[AR9300_NUM_BAND_EDGES_2G];
+       u8 ctlEdges[AR9300_NUM_BAND_EDGES_2G];
  } __packed;
  
  struct cal_ctl_data_5g {
-       struct cal_ctl_edge_pwr ctlEdges[AR9300_NUM_BAND_EDGES_5G];
+       u8 ctlEdges[AR9300_NUM_BAND_EDGES_5G];
  } __packed;
  
 +struct ar9300_BaseExtension_1 {
 +      u8 ant_div_control;
 +      u8 future[13];
 +} __packed;
 +
 +struct ar9300_BaseExtension_2 {
 +      int8_t    tempSlopeLow;
 +      int8_t    tempSlopeHigh;
 +      u8   xatten1DBLow[AR9300_MAX_CHAINS];
 +      u8   xatten1MarginLow[AR9300_MAX_CHAINS];
 +      u8   xatten1DBHigh[AR9300_MAX_CHAINS];
 +      u8   xatten1MarginHigh[AR9300_MAX_CHAINS];
 +} __packed;
 +
  struct ar9300_eeprom {
        u8 eepromVersion;
        u8 templateVersion;
        struct ar9300_base_eep_hdr baseEepHeader;
  
        struct ar9300_modal_eep_header modalHeader2G;
 +      struct ar9300_BaseExtension_1 base_ext1;
        u8 calFreqPier2G[AR9300_NUM_2G_CAL_PIERS];
        struct ar9300_cal_data_per_freq_op_loop
         calPierData2G[AR9300_MAX_CHAINS][AR9300_NUM_2G_CAL_PIERS];
        u8 ctl_freqbin_2G[AR9300_NUM_CTLS_2G][AR9300_NUM_BAND_EDGES_2G];
        struct cal_ctl_data_2g ctlPowerData_2G[AR9300_NUM_CTLS_2G];
        struct ar9300_modal_eep_header modalHeader5G;
 +      struct ar9300_BaseExtension_2 base_ext2;
        u8 calFreqPier5G[AR9300_NUM_5G_CAL_PIERS];
        struct ar9300_cal_data_per_freq_op_loop
         calPierData5G[AR9300_MAX_CHAINS][AR9300_NUM_5G_CAL_PIERS];
@@@ -21,6 -21,7 +21,7 @@@
  #include <linux/device.h>
  #include <linux/leds.h>
  #include <linux/completion.h>
+ #include <linux/pm_qos_params.h>
  
  #include "debug.h"
  #include "common.h"
@@@ -86,19 -87,33 +87,19 @@@ struct ath_config 
  /**
   * enum buffer_type - Buffer type flags
   *
 - * @BUF_HT: Send this buffer using HT capabilities
   * @BUF_AMPDU: This buffer is an ampdu, as part of an aggregate (during TX)
   * @BUF_AGGR: Indicates whether the buffer can be aggregated
   *    (used in aggregation scheduling)
 - * @BUF_RETRY: Indicates whether the buffer is retried
   * @BUF_XRETRY: To denote excessive retries of the buffer
   */
  enum buffer_type {
 -      BUF_HT                  = BIT(1),
        BUF_AMPDU               = BIT(2),
        BUF_AGGR                = BIT(3),
 -      BUF_RETRY               = BIT(4),
        BUF_XRETRY              = BIT(5),
  };
  
 -#define bf_nframes            bf_state.bfs_nframes
 -#define bf_al                 bf_state.bfs_al
 -#define bf_frmlen             bf_state.bfs_frmlen
 -#define bf_retries            bf_state.bfs_retries
 -#define bf_seqno              bf_state.bfs_seqno
 -#define bf_tidno              bf_state.bfs_tidno
 -#define bf_keyix                bf_state.bfs_keyix
 -#define bf_keytype            bf_state.bfs_keytype
 -#define bf_isht(bf)           (bf->bf_state.bf_type & BUF_HT)
  #define bf_isampdu(bf)                (bf->bf_state.bf_type & BUF_AMPDU)
  #define bf_isaggr(bf)         (bf->bf_state.bf_type & BUF_AGGR)
 -#define bf_isretried(bf)      (bf->bf_state.bf_type & BUF_RETRY)
  #define bf_isxretried(bf)     (bf->bf_state.bf_type & BUF_XRETRY)
  
  #define ATH_TXSTATUS_RING_SIZE 64
@@@ -163,8 -178,8 +164,8 @@@ void ath_descdma_cleanup(struct ath_sof
  
  /* returns delimiter padding required given the packet length */
  #define ATH_AGGR_GET_NDELIM(_len)                                     \
 -      (((((_len) + ATH_AGGR_DELIM_SZ) < ATH_AGGR_MINPLEN) ?           \
 -        (ATH_AGGR_MINPLEN - (_len) - ATH_AGGR_DELIM_SZ) : 0) >> 2)
 +       (((_len) >= ATH_AGGR_MINPLEN) ? 0 :                             \
 +        DIV_ROUND_UP(ATH_AGGR_MINPLEN - (_len), ATH_AGGR_DELIM_SZ))
  
  #define BAW_WITHIN(_start, _bawsz, _seqno) \
        ((((_seqno) - (_start)) & 4095) < (_bawsz))
@@@ -181,6 -196,7 +182,6 @@@ enum ATH_AGGR_STATUS 
  
  #define ATH_TXFIFO_DEPTH 8
  struct ath_txq {
 -      int axq_class;
        u32 axq_qnum;
        u32 *axq_link;
        struct list_head axq_q;
        struct list_head txq_fifo_pending;
        u8 txq_headidx;
        u8 txq_tailidx;
 +      int pending_frames;
  };
  
  struct ath_atx_ac {
 +      struct ath_txq *txq;
        int sched;
 -      int qnum;
        struct list_head list;
        struct list_head tid_q;
  };
  
 +struct ath_frame_info {
 +      int framelen;
 +      u32 keyix;
 +      enum ath9k_key_type keytype;
 +      u8 retries;
 +      u16 seqno;
 +};
 +
  struct ath_buf_state {
 -      int bfs_nframes;
 -      u16 bfs_al;
 -      u16 bfs_frmlen;
 -      int bfs_seqno;
 -      int bfs_tidno;
 -      int bfs_retries;
        u8 bf_type;
        u8 bfs_paprd;
 -      unsigned long bfs_paprd_timestamp;
 -      u32 bfs_keyix;
 -      enum ath9k_key_type bfs_keytype;
 +      enum ath9k_internal_frame_type bfs_ftype;
  };
  
  struct ath_buf {
        dma_addr_t bf_daddr;            /* physical addr of desc */
        dma_addr_t bf_buf_addr; /* physical addr of data buffer, for DMA */
        bool bf_stale;
 -      bool bf_tx_aborted;
        u16 bf_flags;
        struct ath_buf_state bf_state;
        struct ath_wiphy *aphy;
@@@ -255,6 -271,7 +256,6 @@@ struct ath_node 
        struct ath_atx_ac ac[WME_NUM_AC];
        u16 maxampdu;
        u8 mpdudensity;
 -      int last_rssi;
  };
  
  #define AGGR_CLEANUP         BIT(1)
  
  struct ath_tx_control {
        struct ath_txq *txq;
 +      struct ath_node *an;
        int if_id;
        enum ath9k_internal_frame_type frame_type;
        u8 paprd;
  struct ath_tx {
        u16 seq_no;
        u32 txqsetup;
 -      int hwq_map[WME_NUM_AC];
        spinlock_t txbuflock;
        struct list_head txbuf;
        struct ath_txq txq[ATH9K_NUM_TX_QUEUES];
        struct ath_descdma txdma;
 -      int pending_frames[WME_NUM_AC];
 +      struct ath_txq *txq_map[WME_NUM_AC];
  };
  
  struct ath_rx_edma {
@@@ -294,6 -311,7 +295,6 @@@ struct ath_rx 
        u8 rxotherant;
        u32 *rxlink;
        unsigned int rxfilter;
 -      spinlock_t pcu_lock;
        spinlock_t rxbuflock;
        struct list_head rxbuf;
        struct ath_descdma rxdma;
@@@ -310,6 -328,7 +311,6 @@@ void ath_rx_cleanup(struct ath_softc *s
  int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp);
  struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype);
  void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
 -int ath_tx_setup(struct ath_softc *sc, int haltype);
  void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx);
  void ath_draintxq(struct ath_softc *sc,
                     struct ath_txq *txq, bool retry_tx);
@@@ -324,6 -343,7 +325,6 @@@ int ath_tx_start(struct ieee80211_hw *h
                 struct ath_tx_control *txctl);
  void ath_tx_tasklet(struct ath_softc *sc);
  void ath_tx_edma_tasklet(struct ath_softc *sc);
 -void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb);
  int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
                      u16 tid, u16 *ssn);
  void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
@@@ -581,14 -601,13 +582,14 @@@ struct ath_softc 
        struct ath_hw *sc_ah;
        void __iomem *mem;
        int irq;
 -      spinlock_t sc_resetlock;
        spinlock_t sc_serial_rw;
        spinlock_t sc_pm_lock;
 +      spinlock_t sc_pcu_lock;
        struct mutex mutex;
        struct work_struct paprd_work;
        struct work_struct hw_check_work;
        struct completion paprd_complete;
 +      bool paprd_pending;
  
        u32 intrstatus;
        u32 sc_flags; /* SC_OP_* */
        struct ath_descdma txsdma;
  
        struct ath_ant_comb ant_comb;
+       struct pm_qos_request_list pm_qos_req;
  };
  
  struct ath_wiphy {
        bool idle;
        int chan_idx;
        int chan_is_ht;
 +      int last_rssi;
  };
  
  void ath9k_tasklet(unsigned long data);
  int ath_reset(struct ath_softc *sc, bool retry_tx);
 -int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc);
  int ath_cabq_update(struct ath_softc *);
  
  static inline void ath_read_cachesize(struct ath_common *common, int *csz)
  }
  
  extern struct ieee80211_ops ath9k_ops;
- extern struct pm_qos_request_list ath9k_pm_qos_req;
  extern int modparam_nohwcrypt;
  extern int led_blink;
  
@@@ -698,7 -718,7 +700,7 @@@ void ath9k_ps_restore(struct ath_softc 
  void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
  int ath9k_wiphy_add(struct ath_softc *sc);
  int ath9k_wiphy_del(struct ath_wiphy *aphy);
 -void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb);
 +void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, int ftype);
  int ath9k_wiphy_pause(struct ath_wiphy *aphy);
  int ath9k_wiphy_unpause(struct ath_wiphy *aphy);
  int ath9k_wiphy_select(struct ath_wiphy *aphy);
  
  #define AR9287_CHECKSUM_LOCATION (AR9287_EEP_START_LOC + 1)
  
+ #define CTL_EDGE_TPOWER(_ctl) ((_ctl) & 0x3f)
+ #define CTL_EDGE_FLAGS(_ctl) (((_ctl) >> 6) & 0x03)
+ #define LNA_CTL_BUF_MODE      BIT(0)
+ #define LNA_CTL_ISEL_LO               BIT(1)
+ #define LNA_CTL_ISEL_HI               BIT(2)
+ #define LNA_CTL_BUF_IN                BIT(3)
+ #define LNA_CTL_FEM_BAND      BIT(4)
+ #define LNA_CTL_LOCAL_BIAS    BIT(5)
+ #define LNA_CTL_FORCE_XPA     BIT(6)
+ #define LNA_CTL_USE_ANT1      BIT(7)
  enum eeprom_param {
        EEP_NFTHRESH_5,
        EEP_NFTHRESH_2,
@@@ -378,10 -390,7 +390,7 @@@ struct modal_eep_header 
        u8 xatten2Margin[AR5416_MAX_CHAINS];
        u8 ob_ch1;
        u8 db_ch1;
-       u8 useAnt1:1,
-           force_xpaon:1,
-           local_bias:1,
-           femBandSelectUsed:1, xlnabufin:1, xlnaisel:2, xlnabufmode:1;
+       u8 lna_ctl;
        u8 miscBits;
        u16 xpaBiasLvlFreq[3];
        u8 futureModal[6];
@@@ -535,18 -544,10 +544,10 @@@ struct cal_target_power_ht 
        u8 tPow2x[8];
  } __packed;
  
- #ifdef __BIG_ENDIAN_BITFIELD
- struct cal_ctl_edges {
-       u8 bChannel;
-       u8 flag:2, tPower:6;
- } __packed;
- #else
  struct cal_ctl_edges {
        u8 bChannel;
-       u8 tPower:6, flag:2;
+       u8 ctl;
  } __packed;
- #endif
  
  struct cal_data_op_loop_ar9287 {
        u8 pwrPdg[2][5];
@@@ -680,8 -681,7 +681,8 @@@ struct eeprom_ops 
        void (*set_addac)(struct ath_hw *hw, struct ath9k_channel *chan);
        void (*set_txpower)(struct ath_hw *hw, struct ath9k_channel *chan,
                           u16 cfgCtl, u8 twiceAntennaReduction,
 -                         u8 twiceMaxRegulatoryPower, u8 powerLimit);
 +                         u8 twiceMaxRegulatoryPower, u8 powerLimit,
 +                         bool test);
        u16 (*get_spur_channel)(struct ath_hw *ah, u16 i, bool is2GHz);
  };
  
@@@ -451,9 -451,10 +451,10 @@@ static void ath9k_hw_def_set_board_valu
                ath9k_hw_analog_shift_rmw(ah, AR_AN_TOP2,
                                          AR_AN_TOP2_LOCALBIAS,
                                          AR_AN_TOP2_LOCALBIAS_S,
-                                         pModal->local_bias);
+                                         !!(pModal->lna_ctl &
+                                            LNA_CTL_LOCAL_BIAS));
                REG_RMW_FIELD(ah, AR_PHY_XPA_CFG, AR_PHY_FORCE_XPA_CFG,
-                             pModal->force_xpaon);
+                             !!(pModal->lna_ctl & LNA_CTL_FORCE_XPA));
        }
  
        REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH,
@@@ -1021,16 -1022,13 +1022,16 @@@ static void ath9k_hw_set_def_power_per_
                0, {0, 0, 0, 0}
        };
        u16 scaledPower = 0, minCtlPower, maxRegAllowedPower;
 -      u16 ctlModesFor11a[] =
 -              { CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40 };
 -      u16 ctlModesFor11g[] =
 -              { CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT, CTL_11G_EXT,
 -                CTL_2GHT40
 -              };
 -      u16 numCtlModes, *pCtlMode, ctlMode, freq;
 +      static const u16 ctlModesFor11a[] = {
 +              CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40
 +      };
 +      static const u16 ctlModesFor11g[] = {
 +              CTL_11B, CTL_11G, CTL_2GHT20,
 +              CTL_11B_EXT, CTL_11G_EXT, CTL_2GHT40
 +      };
 +      u16 numCtlModes;
 +      const u16 *pCtlMode;
 +      u16 ctlMode, freq;
        struct chan_centers centers;
        int tx_chainmask;
        u16 twiceMinEdgePower;
@@@ -1261,7 -1259,7 +1262,7 @@@ static void ath9k_hw_def_set_txpower(st
                                    u16 cfgCtl,
                                    u8 twiceAntennaReduction,
                                    u8 twiceMaxRegulatoryPower,
 -                                  u8 powerLimit)
 +                                  u8 powerLimit, bool test)
  {
  #define RT_AR_DELTA(x) (ratesArray[x] - cck_ofdm_delta)
        struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
  
        ath9k_hw_set_def_power_cal_table(ah, chan, &txPowerIndexOffset);
  
 +      regulatory->max_power_level = 0;
        for (i = 0; i < ARRAY_SIZE(ratesArray); i++) {
                ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]);
                if (ratesArray[i] > AR5416_MAX_RATE_POWER)
                        ratesArray[i] = AR5416_MAX_RATE_POWER;
 +              if (ratesArray[i] > regulatory->max_power_level)
 +                      regulatory->max_power_level = ratesArray[i];
 +      }
 +
 +      if (!test) {
 +              i = rate6mb;
 +
 +              if (IS_CHAN_HT40(chan))
 +                      i = rateHt40_0;
 +              else if (IS_CHAN_HT20(chan))
 +                      i = rateHt20_0;
 +
 +              regulatory->max_power_level = ratesArray[i];
 +      }
 +
 +      switch(ar5416_get_ntxchains(ah->txchainmask)) {
 +      case 1:
 +              break;
 +      case 2:
 +              regulatory->max_power_level += INCREASE_MAXPOW_BY_TWO_CHAIN;
 +              break;
 +      case 3:
 +              regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN;
 +              break;
 +      default:
 +              ath_print(ath9k_hw_common(ah), ATH_DBG_EEPROM,
 +                        "Invalid chainmask configuration\n");
 +              break;
        }
  
 +      if (test)
 +              return;
 +
        if (AR_SREV_9280_20_OR_LATER(ah)) {
                for (i = 0; i < Ar5416RateSize; i++) {
                        int8_t pwr_table_offset;
        REG_WRITE(ah, AR_PHY_POWER_TX_SUB,
                  ATH9K_POW_SM(pModal->pwrDecreaseFor3Chain, 6)
                  | ATH9K_POW_SM(pModal->pwrDecreaseFor2Chain, 0));
 -
 -      i = rate6mb;
 -
 -      if (IS_CHAN_HT40(chan))
 -              i = rateHt40_0;
 -      else if (IS_CHAN_HT20(chan))
 -              i = rateHt20_0;
 -
 -      if (AR_SREV_9280_20_OR_LATER(ah))
 -              regulatory->max_power_level =
 -                      ratesArray[i] + AR5416_PWR_TABLE_OFFSET_DB * 2;
 -      else
 -              regulatory->max_power_level = ratesArray[i];
 -
 -      switch(ar5416_get_ntxchains(ah->txchainmask)) {
 -      case 1:
 -              break;
 -      case 2:
 -              regulatory->max_power_level += INCREASE_MAXPOW_BY_TWO_CHAIN;
 -              break;
 -      case 3:
 -              regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN;
 -              break;
 -      default:
 -              ath_print(ath9k_hw_common(ah), ATH_DBG_EEPROM,
 -                        "Invalid chainmask configuration\n");
 -              break;
 -      }
  }
  
  static u8 ath9k_hw_def_get_num_ant_config(struct ath_hw *ah,
  
        num_ant_config = 1;
  
-       if (pBase->version >= 0x0E0D)
-               if (pModal->useAnt1)
-                       num_ant_config += 1;
+       if (pBase->version >= 0x0E0D &&
+           (pModal->lna_ctl & LNA_CTL_USE_ANT1))
+               num_ant_config += 1;
  
        return num_ant_config;
  }
@@@ -310,9 -310,10 +310,9 @@@ static bool ath9k_hw_chip_test(struct a
        struct ath_common *common = ath9k_hw_common(ah);
        u32 regAddr[2] = { AR_STA_ID0 };
        u32 regHold[2];
 -      u32 patternData[4] = { 0x55555555,
 -                             0xaaaaaaaa,
 -                             0x66666666,
 -                             0x99999999 };
 +      static const u32 patternData[4] = {
 +              0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999
 +      };
        int i, j, loop_max;
  
        if (!AR_SREV_9300_20_OR_LATER(ah)) {
@@@ -418,6 -419,10 +418,6 @@@ static void ath9k_hw_init_defaults(stru
        ah->hw_version.magic = AR5416_MAGIC;
        ah->hw_version.subvendorid = 0;
  
 -      ah->ah_flags = 0;
 -      if (!AR_SREV_9100(ah))
 -              ah->ah_flags = AH_USE_EEPROM;
 -
        ah->atim_window = 0;
        ah->sta_id1_defaults =
                AR_STA_ID1_CRPT_MIC_ENABLE |
@@@ -435,7 -440,7 +435,7 @@@ static int ath9k_hw_init_macaddr(struc
        u32 sum;
        int i;
        u16 eeval;
 -      u32 EEP_MAC[] = { EEP_MAC_LSW, EEP_MAC_MID, EEP_MAC_MSW };
 +      static const u32 EEP_MAC[] = { EEP_MAC_LSW, EEP_MAC_MID, EEP_MAC_MSW };
  
        sum = 0;
        for (i = 0; i < 3; i++) {
@@@ -1165,7 -1170,7 +1165,7 @@@ static bool ath9k_hw_channel_change(str
                             channel->max_antenna_gain * 2,
                             channel->max_power * 2,
                             min((u32) MAX_RATE_POWER,
 -                           (u32) regulatory->power_limit));
 +                           (u32) regulatory->power_limit), false);
  
        ath9k_hw_rfbus_done(ah);
  
@@@ -1828,10 -1833,6 +1828,10 @@@ int ath9k_hw_fill_cap_info(struct ath_h
  
        ah->misc_mode |= AR_PCU_MIC_NEW_LOC_ENA;
  
 +      /* enable key search for every frame in an aggregate */
 +      if (AR_SREV_9300_20_OR_LATER(ah))
 +              ah->misc_mode |= AR_PCU_ALWAYS_PERFORM_KEYSEARCH;
 +
        pCap->low_2ghz_chan = 2312;
        pCap->high_2ghz_chan = 2732;
  
        if (AR_SREV_9300_20_OR_LATER(ah))
                pCap->hw_caps |= ATH9K_HW_CAP_RAC_SUPPORTED;
  
 +      if (AR_SREV_9300_20_OR_LATER(ah))
 +              ah->ent_mode = REG_READ(ah, AR_ENT_OTP);
 +
        if (AR_SREV_9287_11_OR_LATER(ah) || AR_SREV_9271(ah))
                pCap->hw_caps |= ATH9K_HW_CAP_SGI_20;
  
@@@ -2046,7 -2044,8 +2046,8 @@@ u32 ath9k_hw_gpio_get(struct ath_hw *ah
                val = REG_READ(ah, AR7010_GPIO_IN);
                return (MS(val, AR7010_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) == 0;
        } else if (AR_SREV_9300_20_OR_LATER(ah))
-               return MS_REG_READ(AR9300, gpio) != 0;
+               return (MS(REG_READ(ah, AR_GPIO_IN), AR9300_GPIO_IN_VAL) &
+                       AR_GPIO_BIT(gpio)) != 0;
        else if (AR_SREV_9271(ah))
                return MS_REG_READ(AR9271, gpio) != 0;
        else if (AR_SREV_9287_11_OR_LATER(ah))
@@@ -2178,7 -2177,7 +2179,7 @@@ bool ath9k_hw_disable(struct ath_hw *ah
  }
  EXPORT_SYMBOL(ath9k_hw_disable);
  
 -void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit)
 +void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test)
  {
        struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
        struct ath9k_channel *chan = ah->curchan;
                                 channel->max_antenna_gain * 2,
                                 channel->max_power * 2,
                                 min((u32) MAX_RATE_POWER,
 -                               (u32) regulatory->power_limit));
 +                               (u32) regulatory->power_limit), test);
  }
  EXPORT_SYMBOL(ath9k_hw_set_txpowerlimit);
  
@@@ -2325,10 -2324,11 +2326,10 @@@ static u32 rightmost_index(struct ath_g
        return timer_table->gen_timer_index[b];
  }
  
 -u32 ath9k_hw_gettsf32(struct ath_hw *ah)
 +static u32 ath9k_hw_gettsf32(struct ath_hw *ah)
  {
        return REG_READ(ah, AR_TSF_L32);
  }
 -EXPORT_SYMBOL(ath9k_hw_gettsf32);
  
  struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
                                          void (*trigger)(void *),
@@@ -15,7 -15,6 +15,6 @@@
   */
  
  #include <linux/slab.h>
- #include <linux/pm_qos_params.h>
  
  #include "ath9k.h"
  
@@@ -180,8 -179,6 +179,6 @@@ static const struct ath_ops ath9k_commo
        .write = ath9k_iowrite32,
  };
  
- struct pm_qos_request_list ath9k_pm_qos_req;
  /**************************/
  /*     Initialization     */
  /**************************/
@@@ -398,8 -395,7 +395,8 @@@ static void ath9k_init_crypto(struct at
  
  static int ath9k_init_btcoex(struct ath_softc *sc)
  {
 -      int r, qnum;
 +      struct ath_txq *txq;
 +      int r;
  
        switch (sc->sc_ah->btcoex_hw.scheme) {
        case ATH_BTCOEX_CFG_NONE:
                r = ath_init_btcoex_timer(sc);
                if (r)
                        return -1;
 -              qnum = sc->tx.hwq_map[WME_AC_BE];
 -              ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum);
 +              txq = sc->tx.txq_map[WME_AC_BE];
 +              ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
                sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
                break;
        default:
  
  static int ath9k_init_queues(struct ath_softc *sc)
  {
 -      struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        int i = 0;
  
 -      for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
 -              sc->tx.hwq_map[i] = -1;
 -
        sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
 -      if (sc->beacon.beaconq == -1) {
 -              ath_print(common, ATH_DBG_FATAL,
 -                        "Unable to setup a beacon xmit queue\n");
 -              goto err;
 -      }
 -
        sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
 -      if (sc->beacon.cabq == NULL) {
 -              ath_print(common, ATH_DBG_FATAL,
 -                        "Unable to setup CAB xmit queue\n");
 -              goto err;
 -      }
  
        sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
        ath_cabq_update(sc);
  
 -      if (!ath_tx_setup(sc, WME_AC_BK)) {
 -              ath_print(common, ATH_DBG_FATAL,
 -                        "Unable to setup xmit queue for BK traffic\n");
 -              goto err;
 -      }
 -
 -      if (!ath_tx_setup(sc, WME_AC_BE)) {
 -              ath_print(common, ATH_DBG_FATAL,
 -                        "Unable to setup xmit queue for BE traffic\n");
 -              goto err;
 -      }
 -      if (!ath_tx_setup(sc, WME_AC_VI)) {
 -              ath_print(common, ATH_DBG_FATAL,
 -                        "Unable to setup xmit queue for VI traffic\n");
 -              goto err;
 -      }
 -      if (!ath_tx_setup(sc, WME_AC_VO)) {
 -              ath_print(common, ATH_DBG_FATAL,
 -                        "Unable to setup xmit queue for VO traffic\n");
 -              goto err;
 -      }
 +      for (i = 0; i < WME_NUM_AC; i++)
 +              sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
  
        return 0;
 -
 -err:
 -      for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
 -              if (ATH_TXQ_SETUP(sc, i))
 -                      ath_tx_cleanupq(sc, &sc->tx.txq[i]);
 -
 -      return -EIO;
  }
  
  static int ath9k_init_channels_rates(struct ath_softc *sc)
@@@ -533,9 -570,6 +530,9 @@@ static int ath9k_init_softc(u16 devid, 
        ah->hw_version.subsysid = subsysid;
        sc->sc_ah = ah;
  
 +      if (!sc->dev->platform_data)
 +              ah->ah_flags |= AH_USE_EEPROM;
 +
        common = ath9k_hw_common(ah);
        common->ops = &ath9k_common_ops;
        common->bus_ops = bus_ops;
        spin_lock_init(&common->cc_lock);
  
        spin_lock_init(&sc->wiphy_lock);
 -      spin_lock_init(&sc->sc_resetlock);
        spin_lock_init(&sc->sc_serial_rw);
        spin_lock_init(&sc->sc_pm_lock);
        mutex_init(&sc->mutex);
@@@ -607,37 -642,6 +604,37 @@@ err_hw
        return ret;
  }
  
 +static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
 +{
 +      struct ieee80211_supported_band *sband;
 +      struct ieee80211_channel *chan;
 +      struct ath_hw *ah = sc->sc_ah;
 +      struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
 +      int i;
 +
 +      sband = &sc->sbands[band];
 +      for (i = 0; i < sband->n_channels; i++) {
 +              chan = &sband->channels[i];
 +              ah->curchan = &ah->channels[chan->hw_value];
 +              ath9k_cmn_update_ichannel(ah->curchan, chan, NL80211_CHAN_HT20);
 +              ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
 +              chan->max_power = reg->max_power_level / 2;
 +      }
 +}
 +
 +static void ath9k_init_txpower_limits(struct ath_softc *sc)
 +{
 +      struct ath_hw *ah = sc->sc_ah;
 +      struct ath9k_channel *curchan = ah->curchan;
 +
 +      if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
 +              ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ);
 +      if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
 +              ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ);
 +
 +      ah->curchan = curchan;
 +}
 +
  void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
  {
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
                hw->flags |= IEEE80211_HW_MFP_CAPABLE;
  
        hw->wiphy->interface_modes =
+               BIT(NL80211_IFTYPE_P2P_GO) |
+               BIT(NL80211_IFTYPE_P2P_CLIENT) |
                BIT(NL80211_IFTYPE_AP) |
                BIT(NL80211_IFTYPE_WDS) |
                BIT(NL80211_IFTYPE_STATION) |
@@@ -699,7 -705,6 +698,7 @@@ int ath9k_init_device(u16 devid, struc
                    const struct ath_bus_ops *bus_ops)
  {
        struct ieee80211_hw *hw = sc->hw;
 +      struct ath_wiphy *aphy = hw->priv;
        struct ath_common *common;
        struct ath_hw *ah;
        int error = 0;
        if (error != 0)
                goto error_rx;
  
 +      ath9k_init_txpower_limits(sc);
 +
        /* Register with mac80211 */
        error = ieee80211_register_hw(hw);
        if (error)
        INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
        INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
        sc->wiphy_scheduler_int = msecs_to_jiffies(500);
 +      aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
  
        ath_init_leds(sc);
        ath_start_rfkill_poll(sc);
  
-       pm_qos_add_request(&ath9k_pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
+       pm_qos_add_request(&sc->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
                           PM_QOS_DEFAULT_VALUE);
  
        return 0;
@@@ -827,7 -829,7 +826,7 @@@ void ath9k_deinit_device(struct ath_sof
        }
  
        ieee80211_unregister_hw(hw);
-       pm_qos_remove_request(&ath9k_pm_qos_req);
+       pm_qos_remove_request(&sc->pm_qos_req);
        ath_rx_cleanup(sc);
        ath_tx_cleanup(sc);
        ath9k_deinit_softc(sc);
@@@ -15,7 -15,6 +15,6 @@@
   */
  
  #include <linux/nl80211.h>
- #include <linux/pm_qos_params.h>
  #include "ath9k.h"
  #include "btcoex.h"
  
@@@ -24,7 -23,7 +23,7 @@@ static void ath_update_txpow(struct ath
        struct ath_hw *ah = sc->sc_ah;
  
        if (sc->curtxpow != sc->config.txpowlimit) {
 -              ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit);
 +              ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false);
                /* read back in case value is clamped */
                sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
        }
@@@ -235,8 -234,6 +234,8 @@@ int ath_set_channel(struct ath_softc *s
  
        ath9k_ps_wakeup(sc);
  
 +      spin_lock_bh(&sc->sc_pcu_lock);
 +
        /*
         * This is only performed if the channel settings have
         * actually changed.
         * hardware at the new frequency, and then re-enable
         * the relevant bits of the h/w.
         */
 -      ath9k_hw_set_interrupts(ah, 0);
 +      ath9k_hw_disable_interrupts(ah);
        ath_drain_all_txq(sc, false);
  
 -      spin_lock_bh(&sc->rx.pcu_lock);
 -
        stopped = ath_stoprecv(sc);
  
        /* XXX: do not flush receive queue here. We don't want
                  channel->center_freq, conf_is_ht40(conf),
                  fastcc);
  
 -      spin_lock_bh(&sc->sc_resetlock);
 -
        r = ath9k_hw_reset(ah, hchan, caldata, fastcc);
        if (r) {
                ath_print(common, ATH_DBG_FATAL,
                          "Unable to reset channel (%u MHz), "
                          "reset status %d\n",
                          channel->center_freq, r);
 -              spin_unlock_bh(&sc->sc_resetlock);
 -              spin_unlock_bh(&sc->rx.pcu_lock);
                goto ps_restore;
        }
 -      spin_unlock_bh(&sc->sc_resetlock);
  
        if (ath_startrecv(sc) != 0) {
                ath_print(common, ATH_DBG_FATAL,
                          "Unable to restart recv logic\n");
                r = -EIO;
 -              spin_unlock_bh(&sc->rx.pcu_lock);
                goto ps_restore;
        }
  
 -      spin_unlock_bh(&sc->rx.pcu_lock);
 -
        ath_update_txpow(sc);
        ath9k_hw_set_interrupts(ah, ah->imask);
  
        }
  
   ps_restore:
 +      spin_unlock_bh(&sc->sc_pcu_lock);
 +
        ath9k_ps_restore(sc);
        return r;
  }
@@@ -335,7 -340,7 +334,7 @@@ void ath_paprd_calibrate(struct work_st
        struct ath_tx_control txctl;
        struct ath9k_hw_cal_data *caldata = ah->caldata;
        struct ath_common *common = ath9k_hw_common(ah);
 -      int qnum, ftype;
 +      int ftype;
        int chain_ok = 0;
        int chain;
        int len = 1800;
        memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
  
        memset(&txctl, 0, sizeof(txctl));
 -      qnum = sc->tx.hwq_map[WME_AC_BE];
 -      txctl.txq = &sc->tx.txq[qnum];
 +      txctl.txq = sc->tx.txq_map[WME_AC_BE];
  
        ath9k_ps_wakeup(sc);
        ar9003_paprd_init_table(ah);
                }
  
                init_completion(&sc->paprd_complete);
 +              sc->paprd_pending = true;
                ar9003_paprd_setup_gain_table(ah, chain);
                txctl.paprd = BIT(chain);
                if (ath_tx_start(hw, skb, &txctl) != 0)
  
                time_left = wait_for_completion_timeout(&sc->paprd_complete,
                                msecs_to_jiffies(ATH_PAPRD_TIMEOUT));
 +              sc->paprd_pending = false;
                if (!time_left) {
                        ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
                                  "Timeout waiting for paprd training on "
@@@ -562,6 -566,7 +561,6 @@@ static void ath_node_attach(struct ath_
                an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
                                     sta->ht_cap.ampdu_factor);
                an->mpdudensity = parse_mpdudensity(sta->ht_cap.ampdu_density);
 -              an->last_rssi = ATH_RSSI_DUMMY_MARKER;
        }
  }
  
@@@ -609,8 -614,6 +608,8 @@@ void ath9k_tasklet(unsigned long data
                return;
        }
  
 +      spin_lock_bh(&sc->sc_pcu_lock);
 +
        if (!ath9k_hw_check_alive(ah))
                ieee80211_queue_work(sc->hw, &sc->hw_check_work);
  
                rxmask = (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
  
        if (status & rxmask) {
 -              spin_lock_bh(&sc->rx.pcu_lock);
 -
                /* Check for high priority Rx first */
                if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
                    (status & ATH9K_INT_RXHP))
                        ath_rx_tasklet(sc, 0, true);
  
                ath_rx_tasklet(sc, 0, false);
 -              spin_unlock_bh(&sc->rx.pcu_lock);
        }
  
        if (status & ATH9K_INT_TX) {
                        ath_gen_timer_isr(sc->sc_ah);
  
        /* re-enable hardware interrupt */
 -      ath9k_hw_set_interrupts(ah, ah->imask);
 +      ath9k_hw_enable_interrupts(ah);
 +
 +      spin_unlock_bh(&sc->sc_pcu_lock);
        ath9k_ps_restore(sc);
  }
  
@@@ -752,7 -756,7 +751,7 @@@ irqreturn_t ath_isr(int irq, void *dev
                 * interrupt; otherwise it will continue to
                 * fire.
                 */
 -              ath9k_hw_set_interrupts(ah, 0);
 +              ath9k_hw_disable_interrupts(ah);
                /*
                 * Let the hal handle the event. We assume
                 * it will clear whatever condition caused
                spin_lock(&common->cc_lock);
                ath9k_hw_proc_mib_event(ah);
                spin_unlock(&common->cc_lock);
 -              ath9k_hw_set_interrupts(ah, ah->imask);
 +              ath9k_hw_enable_interrupts(ah);
        }
  
        if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
@@@ -778,8 -782,8 +777,8 @@@ chip_reset
        ath_debug_stat_interrupt(sc, status);
  
        if (sched) {
 -              /* turn off every interrupt except SWBA */
 -              ath9k_hw_set_interrupts(ah, (ah->imask & ATH9K_INT_SWBA));
 +              /* turn off every interrupt */
 +              ath9k_hw_disable_interrupts(ah);
                tasklet_schedule(&sc->intr_tq);
        }
  
@@@ -831,11 -835,9 +830,11 @@@ static u32 ath_get_extchanmode(struct a
  }
  
  static void ath9k_bss_assoc_info(struct ath_softc *sc,
 +                               struct ieee80211_hw *hw,
                                 struct ieee80211_vif *vif,
                                 struct ieee80211_bss_conf *bss_conf)
  {
 +      struct ath_wiphy *aphy = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
  
                ath_beacon_config(sc, vif);
  
                /* Reset rssi stats */
 +              aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
                sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
  
                sc->sc_flags |= SC_OP_ANI_RUN;
@@@ -881,13 -882,13 +880,13 @@@ void ath_radio_enable(struct ath_softc 
        int r;
  
        ath9k_ps_wakeup(sc);
 +      spin_lock_bh(&sc->sc_pcu_lock);
 +
        ath9k_hw_configpcipowersave(ah, 0, 0);
  
        if (!ah->curchan)
                ah->curchan = ath_get_curchannel(sc, sc->hw);
  
 -      spin_lock_bh(&sc->rx.pcu_lock);
 -      spin_lock_bh(&sc->sc_resetlock);
        r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
        if (r) {
                ath_print(common, ATH_DBG_FATAL,
                          "reset status %d\n",
                          channel->center_freq, r);
        }
 -      spin_unlock_bh(&sc->sc_resetlock);
  
        ath_update_txpow(sc);
        if (ath_startrecv(sc) != 0) {
                ath_print(common, ATH_DBG_FATAL,
                          "Unable to restart recv logic\n");
 -              spin_unlock_bh(&sc->rx.pcu_lock);
 +              spin_unlock_bh(&sc->sc_pcu_lock);
                return;
        }
 -      spin_unlock_bh(&sc->rx.pcu_lock);
 -
        if (sc->sc_flags & SC_OP_BEACONS)
                ath_beacon_config(sc, NULL);    /* restart beacons */
  
        ath9k_hw_set_gpio(ah, ah->led_pin, 0);
  
        ieee80211_wake_queues(hw);
 +      spin_unlock_bh(&sc->sc_pcu_lock);
 +
        ath9k_ps_restore(sc);
  }
  
@@@ -927,8 -929,6 +926,8 @@@ void ath_radio_disable(struct ath_soft
        int r;
  
        ath9k_ps_wakeup(sc);
 +      spin_lock_bh(&sc->sc_pcu_lock);
 +
        ieee80211_stop_queues(hw);
  
        /*
        }
  
        /* Disable interrupts */
 -      ath9k_hw_set_interrupts(ah, 0);
 +      ath9k_hw_disable_interrupts(ah);
  
        ath_drain_all_txq(sc, false);   /* clear pending tx frames */
  
 -      spin_lock_bh(&sc->rx.pcu_lock);
 -
        ath_stoprecv(sc);               /* turn off frame recv */
        ath_flushrecv(sc);              /* flush recv queue */
  
        if (!ah->curchan)
                ah->curchan = ath_get_curchannel(sc, hw);
  
 -      spin_lock_bh(&sc->sc_resetlock);
        r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
        if (r) {
                ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
                          "reset status %d\n",
                          channel->center_freq, r);
        }
 -      spin_unlock_bh(&sc->sc_resetlock);
  
        ath9k_hw_phy_disable(ah);
  
 -      spin_unlock_bh(&sc->rx.pcu_lock);
 -
        ath9k_hw_configpcipowersave(ah, 1, 1);
 +
 +      spin_unlock_bh(&sc->sc_pcu_lock);
        ath9k_ps_restore(sc);
 +
        ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
  }
  
@@@ -979,25 -982,29 +978,25 @@@ int ath_reset(struct ath_softc *sc, boo
        /* Stop ANI */
        del_timer_sync(&common->ani.timer);
  
 +      spin_lock_bh(&sc->sc_pcu_lock);
 +
        ieee80211_stop_queues(hw);
  
 -      ath9k_hw_set_interrupts(ah, 0);
 +      ath9k_hw_disable_interrupts(ah);
        ath_drain_all_txq(sc, retry_tx);
  
 -      spin_lock_bh(&sc->rx.pcu_lock);
 -
        ath_stoprecv(sc);
        ath_flushrecv(sc);
  
 -      spin_lock_bh(&sc->sc_resetlock);
        r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
        if (r)
                ath_print(common, ATH_DBG_FATAL,
                          "Unable to reset hardware; reset status %d\n", r);
 -      spin_unlock_bh(&sc->sc_resetlock);
  
        if (ath_startrecv(sc) != 0)
                ath_print(common, ATH_DBG_FATAL,
                          "Unable to start recv logic\n");
  
 -      spin_unlock_bh(&sc->rx.pcu_lock);
 -
        /*
         * We may be doing a reset in response to a request
         * that changes the channel so update any state that
        }
  
        ieee80211_wake_queues(hw);
 +      spin_unlock_bh(&sc->sc_pcu_lock);
  
        /* Start ANI */
        ath_start_ani(common);
        return r;
  }
  
 -static int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
 -{
 -      int qnum;
 -
 -      switch (queue) {
 -      case 0:
 -              qnum = sc->tx.hwq_map[WME_AC_VO];
 -              break;
 -      case 1:
 -              qnum = sc->tx.hwq_map[WME_AC_VI];
 -              break;
 -      case 2:
 -              qnum = sc->tx.hwq_map[WME_AC_BE];
 -              break;
 -      case 3:
 -              qnum = sc->tx.hwq_map[WME_AC_BK];
 -              break;
 -      default:
 -              qnum = sc->tx.hwq_map[WME_AC_BE];
 -              break;
 -      }
 -
 -      return qnum;
 -}
 -
 -int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
 -{
 -      int qnum;
 -
 -      switch (queue) {
 -      case WME_AC_VO:
 -              qnum = 0;
 -              break;
 -      case WME_AC_VI:
 -              qnum = 1;
 -              break;
 -      case WME_AC_BE:
 -              qnum = 2;
 -              break;
 -      case WME_AC_BK:
 -              qnum = 3;
 -              break;
 -      default:
 -              qnum = -1;
 -              break;
 -      }
 -
 -      return qnum;
 -}
 -
  /* XXX: Remove me once we don't depend on ath9k_channel for all
   * this redundant data */
  void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
@@@ -1111,16 -1167,19 +1110,16 @@@ static int ath9k_start(struct ieee80211
         * be followed by initialization of the appropriate bits
         * and then setup of the interrupt mask.
         */
 -      spin_lock_bh(&sc->rx.pcu_lock);
 -      spin_lock_bh(&sc->sc_resetlock);
 +      spin_lock_bh(&sc->sc_pcu_lock);
        r = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
        if (r) {
                ath_print(common, ATH_DBG_FATAL,
                          "Unable to reset hardware; reset status %d "
                          "(freq %u MHz)\n", r,
                          curchan->center_freq);
 -              spin_unlock_bh(&sc->sc_resetlock);
 -              spin_unlock_bh(&sc->rx.pcu_lock);
 +              spin_unlock_bh(&sc->sc_pcu_lock);
                goto mutex_unlock;
        }
 -      spin_unlock_bh(&sc->sc_resetlock);
  
        /*
         * This is needed only to setup initial state
                ath_print(common, ATH_DBG_FATAL,
                          "Unable to start recv logic\n");
                r = -EIO;
 -              spin_unlock_bh(&sc->rx.pcu_lock);
 +              spin_unlock_bh(&sc->sc_pcu_lock);
                goto mutex_unlock;
        }
 -      spin_unlock_bh(&sc->rx.pcu_lock);
 +      spin_unlock_bh(&sc->sc_pcu_lock);
  
        /* Setup our intr mask. */
        ah->imask = ATH9K_INT_TX | ATH9K_INT_RXEOL |
                        ath9k_btcoex_timer_resume(sc);
        }
  
-       pm_qos_update_request(&ath9k_pm_qos_req, 55);
+       pm_qos_update_request(&sc->pm_qos_req, 55);
  
  mutex_unlock:
        mutex_unlock(&sc->mutex);
  static int ath9k_tx(struct ieee80211_hw *hw,
                    struct sk_buff *skb)
  {
 -      struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ath_wiphy *aphy = hw->priv;
        struct ath_softc *sc = aphy->sc;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_tx_control txctl;
 -      int padpos, padsize;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 -      int qnum;
  
        if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) {
                ath_print(common, ATH_DBG_XMIT,
        }
  
        memset(&txctl, 0, sizeof(struct ath_tx_control));
 -
 -      /*
 -       * As a temporary workaround, assign seq# here; this will likely need
 -       * to be cleaned up to work better with Beacon transmission and virtual
 -       * BSSes.
 -       */
 -      if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
 -              if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
 -                      sc->tx.seq_no += 0x10;
 -              hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
 -              hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
 -      }
 -
 -      /* Add the padding after the header if this is not already done */
 -      padpos = ath9k_cmn_padpos(hdr->frame_control);
 -      padsize = padpos & 3;
 -      if (padsize && skb->len>padpos) {
 -              if (skb_headroom(skb) < padsize)
 -                      return -1;
 -              skb_push(skb, padsize);
 -              memmove(skb->data, skb->data + padsize, padpos);
 -      }
 -
 -      qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
 -      txctl.txq = &sc->tx.txq[qnum];
 +      txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)];
  
        ath_print(common, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb);
  
@@@ -1313,25 -1399,22 +1312,25 @@@ static void ath9k_stop(struct ieee80211
                        ath9k_btcoex_timer_pause(sc);
        }
  
 +      spin_lock_bh(&sc->sc_pcu_lock);
 +
        /* make sure h/w will not generate any interrupt
         * before setting the invalid flag. */
 -      ath9k_hw_set_interrupts(ah, 0);
 +      ath9k_hw_disable_interrupts(ah);
  
 -      spin_lock_bh(&sc->rx.pcu_lock);
        if (!(sc->sc_flags & SC_OP_INVALID)) {
                ath_drain_all_txq(sc, false);
                ath_stoprecv(sc);
                ath9k_hw_phy_disable(ah);
        } else
                sc->rx.rxlink = NULL;
 -      spin_unlock_bh(&sc->rx.pcu_lock);
  
        /* disable HAL and put h/w to sleep */
        ath9k_hw_disable(ah);
        ath9k_hw_configpcipowersave(ah, 1, 1);
 +
 +      spin_unlock_bh(&sc->sc_pcu_lock);
 +
        ath9k_ps_restore(sc);
  
        /* Finally, put the chip in FULL SLEEP mode */
  
        sc->sc_flags |= SC_OP_INVALID;
  
-       pm_qos_update_request(&ath9k_pm_qos_req, PM_QOS_DEFAULT_VALUE);
+       pm_qos_update_request(&sc->pm_qos_req, PM_QOS_DEFAULT_VALUE);
  
        mutex_unlock(&sc->mutex);
  
@@@ -1436,6 -1519,7 +1435,7 @@@ static void ath9k_remove_interface(stru
        struct ath_softc *sc = aphy->sc;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_vif *avp = (void *)vif->drv_priv;
+       bool bs_valid = false;
        int i;
  
        ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n");
                               "slot\n", __func__);
                        sc->beacon.bslot[i] = NULL;
                        sc->beacon.bslot_aphy[i] = NULL;
-               }
+               } else if (sc->beacon.bslot[i])
+                       bs_valid = true;
+       }
+       if (!bs_valid && (sc->sc_ah->imask & ATH9K_INT_SWBA)) {
+               /* Disable SWBA interrupt */
+               sc->sc_ah->imask &= ~ATH9K_INT_SWBA;
+               ath9k_ps_wakeup(sc);
+               ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask);
+               ath9k_ps_restore(sc);
        }
  
        sc->nvifs--;
@@@ -1738,15 -1830,12 +1746,15 @@@ static int ath9k_conf_tx(struct ieee802
        struct ath_wiphy *aphy = hw->priv;
        struct ath_softc *sc = aphy->sc;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 +      struct ath_txq *txq;
        struct ath9k_tx_queue_info qi;
 -      int ret = 0, qnum;
 +      int ret = 0;
  
        if (queue >= WME_NUM_AC)
                return 0;
  
 +      txq = sc->tx.txq_map[queue];
 +
        mutex_lock(&sc->mutex);
  
        memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
        qi.tqi_cwmin = params->cw_min;
        qi.tqi_cwmax = params->cw_max;
        qi.tqi_burstTime = params->txop;
 -      qnum = ath_get_hal_qnum(queue, sc);
  
        ath_print(common, ATH_DBG_CONFIG,
                  "Configure tx [queue/halq] [%d/%d],  "
                  "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
 -                queue, qnum, params->aifs, params->cw_min,
 +                queue, txq->axq_qnum, params->aifs, params->cw_min,
                  params->cw_max, params->txop);
  
 -      ret = ath_txq_update(sc, qnum, &qi);
 +      ret = ath_txq_update(sc, txq->axq_qnum, &qi);
        if (ret)
                ath_print(common, ATH_DBG_FATAL, "TXQ Update failed\n");
  
        if (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC)
 -              if ((qnum == sc->tx.hwq_map[WME_AC_BE]) && !ret)
 +              if (queue == WME_AC_BE && !ret)
                        ath_beaconq_config(sc);
  
        mutex_unlock(&sc->mutex);
@@@ -1929,7 -2019,7 +1937,7 @@@ static void ath9k_bss_info_changed(stru
        if (changed & BSS_CHANGED_ASSOC) {
                ath_print(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
                        bss_conf->assoc);
 -              ath9k_bss_assoc_info(sc, vif, bss_conf);
 +              ath9k_bss_assoc_info(sc, hw, vif, bss_conf);
        }
  
        mutex_unlock(&sc->mutex);
@@@ -1992,9 -2082,6 +2000,9 @@@ static int ath9k_ampdu_action(struct ie
        case IEEE80211_AMPDU_RX_STOP:
                break;
        case IEEE80211_AMPDU_TX_START:
 +              if (!(sc->sc_flags & SC_OP_TXAGGR))
 +                      return -EOPNOTSUPP;
 +
                ath9k_ps_wakeup(sc);
                ret = ath_tx_aggr_start(sc, sta, tid, ssn);
                if (!ret)
       ((REG_READ(_ah, AR_AN_SYNTH9) & 0x7) == 0x1))
  
  #define AR_DEVID_7010(_ah) \
 -      (((_ah)->hw_version.devid == 0x7010) || \
 -       ((_ah)->hw_version.devid == 0x7015) || \
 -       ((_ah)->hw_version.devid == 0x9018) || \
 -       ((_ah)->hw_version.devid == 0xA704) || \
 -       ((_ah)->hw_version.devid == 0x1200))
 -
 -#define AR9287_HTC_DEVID(_ah) \
 -      (((_ah)->hw_version.devid == 0x7015) || \
 -       ((_ah)->hw_version.devid == 0x1200))
 +      ((_ah)->common.driver_info & AR7010_DEVICE)
  
  #define AR_RADIO_SREV_MAJOR                   0xf0
  #define AR_RAD5133_SREV_MAJOR                 0xc0
@@@ -976,11 -984,13 +976,13 @@@ enum 
  #define AR9287_GPIO_IN_VAL_S                     11
  #define AR9271_GPIO_IN_VAL                       0xFFFF0000
  #define AR9271_GPIO_IN_VAL_S                     16
- #define AR9300_GPIO_IN_VAL                       0x0001FFFF
- #define AR9300_GPIO_IN_VAL_S                     0
  #define AR7010_GPIO_IN_VAL                       0x0000FFFF
  #define AR7010_GPIO_IN_VAL_S                     0
  
+ #define AR_GPIO_IN                             0x404c
+ #define AR9300_GPIO_IN_VAL                       0x0001FFFF
+ #define AR9300_GPIO_IN_VAL_S                     0
  #define AR_GPIO_OE_OUT                           (AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c)
  #define AR_GPIO_OE_OUT_DRV                       0x3
  #define AR_GPIO_OE_OUT_DRV_NO                    0x0
  #define AR_INTR_PRIO_ASYNC_MASK   0x40c8
  #define AR_INTR_PRIO_SYNC_MASK    0x40cc
  #define AR_INTR_PRIO_ASYNC_ENABLE 0x40d4
 +#define AR_ENT_OTP              0x40d8
 +#define AR_ENT_OTP_CHAIN2_DISABLE               0x00020000
 +#define AR_ENT_OTP_MPSD               0x00800000
  
  #define AR_RTC_9300_PLL_DIV          0x000003ff
  #define AR_RTC_9300_PLL_DIV_S        0
  #define AR_PCU_TBTT_PROTECT        0x00200000
  #define AR_PCU_CLEAR_VMF           0x01000000
  #define AR_PCU_CLEAR_BA_VALID      0x04000000
 +#define AR_PCU_ALWAYS_PERFORM_KEYSEARCH 0x10000000
  
  #define AR_PCU_BT_ANT_PREVENT_RX   0x00100000
  #define AR_PCU_BT_ANT_PREVENT_RX_S 20
@@@ -428,7 -428,6 +428,7 @@@ static void carl9170_cancel_worker(stru
        cancel_delayed_work_sync(&ar->led_work);
  #endif /* CONFIG_CARL9170_LEDS */
        cancel_work_sync(&ar->ps_work);
 +      cancel_work_sync(&ar->ping_work);
        cancel_work_sync(&ar->ampdu_work);
  }
  
@@@ -534,21 -533,6 +534,21 @@@ void carl9170_restart(struct ar9170 *ar
         */
  }
  
 +static void carl9170_ping_work(struct work_struct *work)
 +{
 +      struct ar9170 *ar = container_of(work, struct ar9170, ping_work);
 +      int err;
 +
 +      if (!IS_STARTED(ar))
 +              return;
 +
 +      mutex_lock(&ar->mutex);
 +      err = carl9170_echo_test(ar, 0xdeadbeef);
 +      if (err)
 +              carl9170_restart(ar, CARL9170_RR_UNRESPONSIVE_DEVICE);
 +      mutex_unlock(&ar->mutex);
 +}
 +
  static int carl9170_init_interface(struct ar9170 *ar,
                                   struct ieee80211_vif *vif)
  {
@@@ -1630,7 -1614,6 +1630,7 @@@ void *carl9170_alloc(size_t priv_size
                skb_queue_head_init(&ar->tx_pending[i]);
        }
        INIT_WORK(&ar->ps_work, carl9170_ps_work);
 +      INIT_WORK(&ar->ping_work, carl9170_ping_work);
        INIT_WORK(&ar->restart_work, carl9170_restart_work);
        INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work);
        INIT_DELAYED_WORK(&ar->tx_janitor, carl9170_tx_janitor);
         * supports these modes. The code which will add the
         * additional interface_modes is in fw.c.
         */
-       hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+       hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+                                    BIT(NL80211_IFTYPE_P2P_CLIENT);
  
        hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
                     IEEE80211_HW_REPORTS_TX_ACK_STATUS |
@@@ -1845,7 -1829,7 +1846,7 @@@ int carl9170_register(struct ar9170 *ar
        err = carl9170_led_register(ar);
        if (err)
                goto err_unreg;
 -#endif /* CONFIG_CAR9L170_LEDS */
 +#endif /* CONFIG_CARL9170_LEDS */
  
  #ifdef CONFIG_CARL9170_WPC
        err = carl9170_register_wps_button(ar);
@@@ -242,11 -242,9 +242,11 @@@ static void carl9170_tx_release(struct 
                        ar->tx_ampdu_schedule = true;
  
                if (txinfo->flags & IEEE80211_TX_STAT_AMPDU) {
 -                      txinfo->status.ampdu_len = txinfo->pad[0];
 -                      txinfo->status.ampdu_ack_len = txinfo->pad[1];
 -                      txinfo->pad[0] = txinfo->pad[1] = 0;
 +                      struct _carl9170_tx_superframe *super;
 +
 +                      super = (void *)skb->data;
 +                      txinfo->status.ampdu_len = super->s.rix;
 +                      txinfo->status.ampdu_ack_len = super->s.cnt;
                } else if (txinfo->flags & IEEE80211_TX_STAT_ACK) {
                        /*
                         * drop redundant tx_status reports:
@@@ -339,8 -337,7 +339,8 @@@ static void carl9170_tx_status_process_
        u8 tid;
  
        if (!(txinfo->flags & IEEE80211_TX_CTL_AMPDU) ||
 -          txinfo->flags & IEEE80211_TX_CTL_INJECTED)
 +          txinfo->flags & IEEE80211_TX_CTL_INJECTED ||
 +         (!(super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_AGGR))))
                return;
  
        tx_info = IEEE80211_SKB_CB(skb);
                sta_info->stats[tid].ampdu_ack_len++;
  
        if (super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_IMM_BA)) {
 -              txinfo->pad[0] = sta_info->stats[tid].ampdu_len;
 -              txinfo->pad[1] = sta_info->stats[tid].ampdu_ack_len;
 +              super->s.rix = sta_info->stats[tid].ampdu_len;
 +              super->s.cnt = sta_info->stats[tid].ampdu_ack_len;
                txinfo->flags |= IEEE80211_TX_STAT_AMPDU;
                sta_info->stats[tid].clear = true;
        }
@@@ -527,59 -524,6 +527,59 @@@ next
        }
  }
  
 +static void carl9170_tx_ampdu_timeout(struct ar9170 *ar)
 +{
 +      struct carl9170_sta_tid *iter;
 +      struct sk_buff *skb;
 +      struct ieee80211_tx_info *txinfo;
 +      struct carl9170_tx_info *arinfo;
 +      struct _carl9170_tx_superframe *super;
 +      struct ieee80211_sta *sta;
 +      struct ieee80211_vif *vif;
 +      struct ieee80211_hdr *hdr;
 +      unsigned int vif_id;
 +
 +      rcu_read_lock();
 +      list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) {
 +              if (iter->state < CARL9170_TID_STATE_IDLE)
 +                      continue;
 +
 +              spin_lock_bh(&iter->lock);
 +              skb = skb_peek(&iter->queue);
 +              if (!skb)
 +                      goto unlock;
 +
 +              txinfo = IEEE80211_SKB_CB(skb);
 +              arinfo = (void *)txinfo->rate_driver_data;
 +              if (time_is_after_jiffies(arinfo->timeout +
 +                  msecs_to_jiffies(CARL9170_QUEUE_TIMEOUT)))
 +                      goto unlock;
 +
 +              super = (void *) skb->data;
 +              hdr = (void *) super->frame_data;
 +
 +              vif_id = (super->s.misc & CARL9170_TX_SUPER_MISC_VIF_ID) >>
 +                       CARL9170_TX_SUPER_MISC_VIF_ID_S;
 +
 +              if (WARN_ON(vif_id >= AR9170_MAX_VIRTUAL_MAC))
 +                      goto unlock;
 +
 +              vif = rcu_dereference(ar->vif_priv[vif_id].vif);
 +              if (WARN_ON(!vif))
 +                      goto unlock;
 +
 +              sta = ieee80211_find_sta(vif, hdr->addr1);
 +              if (WARN_ON(!sta))
 +                      goto unlock;
 +
 +              ieee80211_stop_tx_ba_session(sta, iter->tid);
 +unlock:
 +              spin_unlock_bh(&iter->lock);
 +
 +      }
 +      rcu_read_unlock();
 +}
 +
  void carl9170_tx_janitor(struct work_struct *work)
  {
        struct ar9170 *ar = container_of(work, struct ar9170,
        ar->tx_janitor_last_run = jiffies;
  
        carl9170_check_queue_stop_timeout(ar);
 +      carl9170_tx_ampdu_timeout(ar);
  
        if (!atomic_read(&ar->tx_total_queued))
                return;
@@@ -867,7 -810,7 +867,7 @@@ static int carl9170_tx_prepare(struct a
  
        mac_tmp = cpu_to_le16(AR9170_TX_MAC_HW_DURATION |
                              AR9170_TX_MAC_BACKOFF);
-       mac_tmp |= cpu_to_le16((hw_queue << AR9170_TX_MAC_QOS_S) &&
+       mac_tmp |= cpu_to_le16((hw_queue << AR9170_TX_MAC_QOS_S) &
                               AR9170_TX_MAC_QOS);
  
        no_ack = !!(info->flags & IEEE80211_TX_CTL_NO_ACK);
                if (unlikely(!sta || !cvif))
                        goto err_out;
  
 -              factor = min_t(unsigned int, 1u,
 -                       info->control.sta->ht_cap.ampdu_factor);
 -
 -              density = info->control.sta->ht_cap.ampdu_density;
 +              factor = min_t(unsigned int, 1u, sta->ht_cap.ampdu_factor);
 +              density = sta->ht_cap.ampdu_density;
  
                if (density) {
                        /*
@@@ -1261,7 -1206,6 +1261,7 @@@ static void carl9170_tx(struct ar9170 *
  static bool carl9170_tx_ampdu_queue(struct ar9170 *ar,
        struct ieee80211_sta *sta, struct sk_buff *skb)
  {
 +      struct _carl9170_tx_superframe *super = (void *) skb->data;
        struct carl9170_sta_info *sta_info;
        struct carl9170_sta_tid *agg;
        struct sk_buff *iter;
@@@ -1330,7 -1274,6 +1330,7 @@@ err_unlock
  
  err_unlock_rcu:
        rcu_read_unlock();
 +      super->f.mac_control &= ~cpu_to_le16(AR9170_TX_MAC_AGGR);
        carl9170_tx_status(ar, skb, false);
        ar->tx_dropped++;
        return false;
@@@ -1359,6 -1302,9 +1359,6 @@@ int carl9170_op_tx(struct ieee80211_hw 
         */
  
        if (info->flags & IEEE80211_TX_CTL_AMPDU) {
 -              if (WARN_ON_ONCE(!sta))
 -                      goto err_free;
 -
                run = carl9170_tx_ampdu_queue(ar, sta, skb);
                if (run)
                        carl9170_tx_ampdu(ar);
@@@ -851,10 -851,9 +851,10 @@@ struct lbs_private *lbs_add_card(void *
        priv->work_thread = create_singlethread_workqueue("lbs_worker");
        INIT_WORK(&priv->mcast_work, lbs_set_mcast_worker);
  
 -      priv->wol_criteria = 0xffffffff;
 +      priv->wol_criteria = EHS_REMOVE_WAKEUP;
        priv->wol_gpio = 0xff;
        priv->wol_gap = 20;
 +      priv->ehs_remove_supported = true;
  
        goto done;
  
@@@ -916,8 -915,6 +916,6 @@@ void lbs_remove_card(struct lbs_privat
  
        lbs_free_adapter(priv);
        lbs_cfg_free(priv);
-       priv->dev = NULL;
        free_netdev(dev);
  
        lbs_deb_leave(LBS_DEB_MAIN);
diff --combined include/net/sock.h
@@@ -57,7 -57,7 +57,7 @@@
  #include <linux/rculist_nulls.h>
  #include <linux/poll.h>
  
 -#include <asm/atomic.h>
 +#include <linux/atomic.h>
  #include <net/dst.h>
  #include <net/checksum.h>
  
@@@ -241,67 -241,59 +241,67 @@@ struct sock 
  #define sk_bind_node          __sk_common.skc_bind_node
  #define sk_prot                       __sk_common.skc_prot
  #define sk_net                        __sk_common.skc_net
 -      kmemcheck_bitfield_begin(flags);
 -      unsigned int            sk_shutdown  : 2,
 -                              sk_no_check  : 2,
 -                              sk_userlocks : 4,
 -                              sk_protocol  : 8,
 -                              sk_type      : 16;
 -      kmemcheck_bitfield_end(flags);
 -      int                     sk_rcvbuf;
        socket_lock_t           sk_lock;
 +      struct sk_buff_head     sk_receive_queue;
        /*
         * The backlog queue is special, it is always used with
         * the per-socket spinlock held and requires low latency
         * access. Therefore we special case it's implementation.
 +       * Note : rmem_alloc is in this structure to fill a hole
 +       * on 64bit arches, not because its logically part of
 +       * backlog.
         */
        struct {
 -              struct sk_buff *head;
 -              struct sk_buff *tail;
 -              int len;
 +              atomic_t        rmem_alloc;
 +              int             len;
 +              struct sk_buff  *head;
 +              struct sk_buff  *tail;
        } sk_backlog;
 +#define sk_rmem_alloc sk_backlog.rmem_alloc
 +      int                     sk_forward_alloc;
 +#ifdef CONFIG_RPS
 +      __u32                   sk_rxhash;
 +#endif
 +      atomic_t                sk_drops;
 +      int                     sk_rcvbuf;
 +
 +      struct sk_filter __rcu  *sk_filter;
        struct socket_wq        *sk_wq;
 -      struct dst_entry        *sk_dst_cache;
 +
 +#ifdef CONFIG_NET_DMA
 +      struct sk_buff_head     sk_async_wait_queue;
 +#endif
 +
  #ifdef CONFIG_XFRM
        struct xfrm_policy      *sk_policy[2];
  #endif
 +      unsigned long           sk_flags;
 +      struct dst_entry        *sk_dst_cache;
        spinlock_t              sk_dst_lock;
 -      atomic_t                sk_rmem_alloc;
        atomic_t                sk_wmem_alloc;
        atomic_t                sk_omem_alloc;
        int                     sk_sndbuf;
 -      struct sk_buff_head     sk_receive_queue;
        struct sk_buff_head     sk_write_queue;
 -#ifdef CONFIG_NET_DMA
 -      struct sk_buff_head     sk_async_wait_queue;
 -#endif
 +      kmemcheck_bitfield_begin(flags);
 +      unsigned int            sk_shutdown  : 2,
 +                              sk_no_check  : 2,
 +                              sk_userlocks : 4,
 +                              sk_protocol  : 8,
 +                              sk_type      : 16;
 +      kmemcheck_bitfield_end(flags);
        int                     sk_wmem_queued;
 -      int                     sk_forward_alloc;
        gfp_t                   sk_allocation;
        int                     sk_route_caps;
        int                     sk_route_nocaps;
        int                     sk_gso_type;
        unsigned int            sk_gso_max_size;
        int                     sk_rcvlowat;
 -#ifdef CONFIG_RPS
 -      __u32                   sk_rxhash;
 -#endif
 -      unsigned long           sk_flags;
        unsigned long           sk_lingertime;
        struct sk_buff_head     sk_error_queue;
        struct proto            *sk_prot_creator;
        rwlock_t                sk_callback_lock;
        int                     sk_err,
                                sk_err_soft;
 -      atomic_t                sk_drops;
        unsigned short          sk_ack_backlog;
        unsigned short          sk_max_ack_backlog;
        __u32                   sk_priority;
        const struct cred       *sk_peer_cred;
        long                    sk_rcvtimeo;
        long                    sk_sndtimeo;
 -      struct sk_filter __rcu  *sk_filter;
        void                    *sk_protinfo;
        struct timer_list       sk_timer;
        ktime_t                 sk_stamp;
@@@ -516,6 -509,9 +516,6 @@@ static __inline__ void sk_add_bind_node
  #define sk_nulls_for_each_from(__sk, node) \
        if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
                hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
 -#define sk_for_each_continue(__sk, node) \
 -      if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
 -              hlist_for_each_entry_continue(__sk, node, sk_node)
  #define sk_for_each_safe(__sk, node, tmp, list) \
        hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node)
  #define sk_for_each_bound(__sk, node, list) \
@@@ -1159,6 -1155,8 +1159,8 @@@ extern void sk_common_release(struct so
  /* Initialise core socket variables */
  extern void sock_init_data(struct socket *sock, struct sock *sk);
  
+ extern void sk_filter_release_rcu(struct rcu_head *rcu);
  /**
   *    sk_filter_release - release a socket filter
   *    @fp: filter to remove
  static inline void sk_filter_release(struct sk_filter *fp)
  {
        if (atomic_dec_and_test(&fp->refcnt))
-               kfree(fp);
+               call_rcu_bh(&fp->rcu, sk_filter_release_rcu);
  }
  
  static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
diff --combined net/ceph/Makefile
@@@ -1,12 -1,9 +1,9 @@@
  #
  # Makefile for CEPH filesystem.
  #
- ifneq ($(KERNELRELEASE),)
  obj-$(CONFIG_CEPH_LIB) += libceph.o
  
 -libceph-objs := ceph_common.o messenger.o msgpool.o buffer.o pagelist.o \
 +libceph-y := ceph_common.o messenger.o msgpool.o buffer.o pagelist.o \
        mon_client.o \
        osd_client.o osdmap.o crush/crush.o crush/mapper.o crush/hash.o \
        debugfs.o \
        ceph_fs.o ceph_strings.o ceph_hash.o \
        pagevec.o
  
- else
- #Otherwise we were called directly from the command
- # line; invoke the kernel build system.
- KERNELDIR ?= /lib/modules/$(shell uname -r)/build
- PWD := $(shell pwd)
- default: all
- all:
-       $(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_LIB=m modules
- modules_install:
-       $(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_LIB=m modules_install
- clean:
-       $(MAKE) -C $(KERNELDIR) M=$(PWD) clean
- endif
diff --combined net/core/filter.c
  #include <asm/uaccess.h>
  #include <asm/unaligned.h>
  #include <linux/filter.h>
 +#include <linux/reciprocal_div.h>
 +
 +enum {
 +      BPF_S_RET_K = 1,
 +      BPF_S_RET_A,
 +      BPF_S_ALU_ADD_K,
 +      BPF_S_ALU_ADD_X,
 +      BPF_S_ALU_SUB_K,
 +      BPF_S_ALU_SUB_X,
 +      BPF_S_ALU_MUL_K,
 +      BPF_S_ALU_MUL_X,
 +      BPF_S_ALU_DIV_X,
 +      BPF_S_ALU_AND_K,
 +      BPF_S_ALU_AND_X,
 +      BPF_S_ALU_OR_K,
 +      BPF_S_ALU_OR_X,
 +      BPF_S_ALU_LSH_K,
 +      BPF_S_ALU_LSH_X,
 +      BPF_S_ALU_RSH_K,
 +      BPF_S_ALU_RSH_X,
 +      BPF_S_ALU_NEG,
 +      BPF_S_LD_W_ABS,
 +      BPF_S_LD_H_ABS,
 +      BPF_S_LD_B_ABS,
 +      BPF_S_LD_W_LEN,
 +      BPF_S_LD_W_IND,
 +      BPF_S_LD_H_IND,
 +      BPF_S_LD_B_IND,
 +      BPF_S_LD_IMM,
 +      BPF_S_LDX_W_LEN,
 +      BPF_S_LDX_B_MSH,
 +      BPF_S_LDX_IMM,
 +      BPF_S_MISC_TAX,
 +      BPF_S_MISC_TXA,
 +      BPF_S_ALU_DIV_K,
 +      BPF_S_LD_MEM,
 +      BPF_S_LDX_MEM,
 +      BPF_S_ST,
 +      BPF_S_STX,
 +      BPF_S_JMP_JA,
 +      BPF_S_JMP_JEQ_K,
 +      BPF_S_JMP_JEQ_X,
 +      BPF_S_JMP_JGE_K,
 +      BPF_S_JMP_JGE_X,
 +      BPF_S_JMP_JGT_K,
 +      BPF_S_JMP_JGT_X,
 +      BPF_S_JMP_JSET_K,
 +      BPF_S_JMP_JSET_X,
 +};
  
  /* No hurry in this branch */
 -static void *__load_pointer(struct sk_buff *skb, int k)
 +static void *__load_pointer(const struct sk_buff *skb, int k)
  {
        u8 *ptr = NULL;
  
        return NULL;
  }
  
 -static inline void *load_pointer(struct sk_buff *skb, int k,
 +static inline void *load_pointer(const struct sk_buff *skb, int k,
                                 unsigned int size, void *buffer)
  {
        if (k >= 0)
@@@ -138,7 -89,7 +138,7 @@@ int sk_filter(struct sock *sk, struct s
        rcu_read_lock_bh();
        filter = rcu_dereference_bh(sk->sk_filter);
        if (filter) {
 -              unsigned int pkt_len = sk_run_filter(skb, filter->insns, filter->len);
 +              unsigned int pkt_len = sk_run_filter(skb, filter->insns);
  
                err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
        }
@@@ -152,52 -103,50 +152,52 @@@ EXPORT_SYMBOL(sk_filter)
   *    sk_run_filter - run a filter on a socket
   *    @skb: buffer to run the filter on
   *    @filter: filter to apply
 - *    @flen: length of filter
   *
   * Decode and apply filter instructions to the skb->data.
 - * Return length to keep, 0 for none. skb is the data we are
 - * filtering, filter is the array of filter instructions, and
 - * len is the number of filter blocks in the array.
 + * Return length to keep, 0 for none. @skb is the data we are
 + * filtering, @filter is the array of filter instructions.
 + * Because all jumps are guaranteed to be before last instruction,
 + * and last instruction guaranteed to be a RET, we dont need to check
 + * flen. (We used to pass to this function the length of filter)
   */
 -unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
 +unsigned int sk_run_filter(const struct sk_buff *skb,
 +                         const struct sock_filter *fentry)
  {
        void *ptr;
        u32 A = 0;                      /* Accumulator */
        u32 X = 0;                      /* Index Register */
        u32 mem[BPF_MEMWORDS];          /* Scratch Memory Store */
 -      unsigned long memvalid = 0;
        u32 tmp;
        int k;
 -      int pc;
  
 -      BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG);
        /*
         * Process array of filter instructions.
         */
 -      for (pc = 0; pc < flen; pc++) {
 -              const struct sock_filter *fentry = &filter[pc];
 -              u32 f_k = fentry->k;
 +      for (;; fentry++) {
 +#if defined(CONFIG_X86_32)
 +#define       K (fentry->k)
 +#else
 +              const u32 K = fentry->k;
 +#endif
  
                switch (fentry->code) {
                case BPF_S_ALU_ADD_X:
                        A += X;
                        continue;
                case BPF_S_ALU_ADD_K:
 -                      A += f_k;
 +                      A += K;
                        continue;
                case BPF_S_ALU_SUB_X:
                        A -= X;
                        continue;
                case BPF_S_ALU_SUB_K:
 -                      A -= f_k;
 +                      A -= K;
                        continue;
                case BPF_S_ALU_MUL_X:
                        A *= X;
                        continue;
                case BPF_S_ALU_MUL_K:
 -                      A *= f_k;
 +                      A *= K;
                        continue;
                case BPF_S_ALU_DIV_X:
                        if (X == 0)
                        A /= X;
                        continue;
                case BPF_S_ALU_DIV_K:
 -                      A /= f_k;
 +                      A = reciprocal_divide(A, K);
                        continue;
                case BPF_S_ALU_AND_X:
                        A &= X;
                        continue;
                case BPF_S_ALU_AND_K:
 -                      A &= f_k;
 +                      A &= K;
                        continue;
                case BPF_S_ALU_OR_X:
                        A |= X;
                        continue;
                case BPF_S_ALU_OR_K:
 -                      A |= f_k;
 +                      A |= K;
                        continue;
                case BPF_S_ALU_LSH_X:
                        A <<= X;
                        continue;
                case BPF_S_ALU_LSH_K:
 -                      A <<= f_k;
 +                      A <<= K;
                        continue;
                case BPF_S_ALU_RSH_X:
                        A >>= X;
                        continue;
                case BPF_S_ALU_RSH_K:
 -                      A >>= f_k;
 +                      A >>= K;
                        continue;
                case BPF_S_ALU_NEG:
                        A = -A;
                        continue;
                case BPF_S_JMP_JA:
 -                      pc += f_k;
 +                      fentry += K;
                        continue;
                case BPF_S_JMP_JGT_K:
 -                      pc += (A > f_k) ? fentry->jt : fentry->jf;
 +                      fentry += (A > K) ? fentry->jt : fentry->jf;
                        continue;
                case BPF_S_JMP_JGE_K:
 -                      pc += (A >= f_k) ? fentry->jt : fentry->jf;
 +                      fentry += (A >= K) ? fentry->jt : fentry->jf;
                        continue;
                case BPF_S_JMP_JEQ_K:
 -                      pc += (A == f_k) ? fentry->jt : fentry->jf;
 +                      fentry += (A == K) ? fentry->jt : fentry->jf;
                        continue;
                case BPF_S_JMP_JSET_K:
 -                      pc += (A & f_k) ? fentry->jt : fentry->jf;
 +                      fentry += (A & K) ? fentry->jt : fentry->jf;
                        continue;
                case BPF_S_JMP_JGT_X:
 -                      pc += (A > X) ? fentry->jt : fentry->jf;
 +                      fentry += (A > X) ? fentry->jt : fentry->jf;
                        continue;
                case BPF_S_JMP_JGE_X:
 -                      pc += (A >= X) ? fentry->jt : fentry->jf;
 +                      fentry += (A >= X) ? fentry->jt : fentry->jf;
                        continue;
                case BPF_S_JMP_JEQ_X:
 -                      pc += (A == X) ? fentry->jt : fentry->jf;
 +                      fentry += (A == X) ? fentry->jt : fentry->jf;
                        continue;
                case BPF_S_JMP_JSET_X:
 -                      pc += (A & X) ? fentry->jt : fentry->jf;
 +                      fentry += (A & X) ? fentry->jt : fentry->jf;
                        continue;
                case BPF_S_LD_W_ABS:
 -                      k = f_k;
 +                      k = K;
  load_w:
                        ptr = load_pointer(skb, k, 4, &tmp);
                        if (ptr != NULL) {
                        }
                        break;
                case BPF_S_LD_H_ABS:
 -                      k = f_k;
 +                      k = K;
  load_h:
                        ptr = load_pointer(skb, k, 2, &tmp);
                        if (ptr != NULL) {
                        }
                        break;
                case BPF_S_LD_B_ABS:
 -                      k = f_k;
 +                      k = K;
  load_b:
                        ptr = load_pointer(skb, k, 1, &tmp);
                        if (ptr != NULL) {
                        X = skb->len;
                        continue;
                case BPF_S_LD_W_IND:
 -                      k = X + f_k;
 +                      k = X + K;
                        goto load_w;
                case BPF_S_LD_H_IND:
 -                      k = X + f_k;
 +                      k = X + K;
                        goto load_h;
                case BPF_S_LD_B_IND:
 -                      k = X + f_k;
 +                      k = X + K;
                        goto load_b;
                case BPF_S_LDX_B_MSH:
 -                      ptr = load_pointer(skb, f_k, 1, &tmp);
 +                      ptr = load_pointer(skb, K, 1, &tmp);
                        if (ptr != NULL) {
                                X = (*(u8 *)ptr & 0xf) << 2;
                                continue;
                        }
                        return 0;
                case BPF_S_LD_IMM:
 -                      A = f_k;
 +                      A = K;
                        continue;
                case BPF_S_LDX_IMM:
 -                      X = f_k;
 +                      X = K;
                        continue;
                case BPF_S_LD_MEM:
 -                      A = (memvalid & (1UL << f_k)) ?
 -                              mem[f_k] : 0;
 +                      A = mem[K];
                        continue;
                case BPF_S_LDX_MEM:
 -                      X = (memvalid & (1UL << f_k)) ?
 -                              mem[f_k] : 0;
 +                      X = mem[K];
                        continue;
                case BPF_S_MISC_TAX:
                        X = A;
                        A = X;
                        continue;
                case BPF_S_RET_K:
 -                      return f_k;
 +                      return K;
                case BPF_S_RET_A:
                        return A;
                case BPF_S_ST:
 -                      memvalid |= 1UL << f_k;
 -                      mem[f_k] = A;
 +                      mem[K] = A;
                        continue;
                case BPF_S_STX:
 -                      memvalid |= 1UL << f_k;
 -                      mem[f_k] = X;
 +                      mem[K] = X;
                        continue;
                default:
                        WARN_ON(1);
                                return 0;
                        A = skb->dev->type;
                        continue;
 +              case SKF_AD_RXHASH:
 +                      A = skb->rxhash;
 +                      continue;
 +              case SKF_AD_CPU:
 +                      A = raw_smp_processor_id();
 +                      continue;
                case SKF_AD_NLATTR: {
                        struct nlattr *nla;
  
  }
  EXPORT_SYMBOL(sk_run_filter);
  
 +/*
 + * Security :
 + * A BPF program is able to use 16 cells of memory to store intermediate
 + * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter())
 + * As we dont want to clear mem[] array for each packet going through
 + * sk_run_filter(), we check that filter loaded by user never try to read
 + * a cell if not previously written, and we check all branches to be sure
 + * a malicious user doesnt try to abuse us.
 + */
 +static int check_load_and_stores(struct sock_filter *filter, int flen)
 +{
 +      u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
 +      int pc, ret = 0;
 +
 +      BUILD_BUG_ON(BPF_MEMWORDS > 16);
 +      masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
 +      if (!masks)
 +              return -ENOMEM;
 +      memset(masks, 0xff, flen * sizeof(*masks));
 +
 +      for (pc = 0; pc < flen; pc++) {
 +              memvalid &= masks[pc];
 +
 +              switch (filter[pc].code) {
 +              case BPF_S_ST:
 +              case BPF_S_STX:
 +                      memvalid |= (1 << filter[pc].k);
 +                      break;
 +              case BPF_S_LD_MEM:
 +              case BPF_S_LDX_MEM:
 +                      if (!(memvalid & (1 << filter[pc].k))) {
 +                              ret = -EINVAL;
 +                              goto error;
 +                      }
 +                      break;
 +              case BPF_S_JMP_JA:
 +                      /* a jump must set masks on target */
 +                      masks[pc + 1 + filter[pc].k] &= memvalid;
 +                      memvalid = ~0;
 +                      break;
 +              case BPF_S_JMP_JEQ_K:
 +              case BPF_S_JMP_JEQ_X:
 +              case BPF_S_JMP_JGE_K:
 +              case BPF_S_JMP_JGE_X:
 +              case BPF_S_JMP_JGT_K:
 +              case BPF_S_JMP_JGT_X:
 +              case BPF_S_JMP_JSET_X:
 +              case BPF_S_JMP_JSET_K:
 +                      /* a jump must set masks on targets */
 +                      masks[pc + 1 + filter[pc].jt] &= memvalid;
 +                      masks[pc + 1 + filter[pc].jf] &= memvalid;
 +                      memvalid = ~0;
 +                      break;
 +              }
 +      }
 +error:
 +      kfree(masks);
 +      return ret;
 +}
 +
  /**
   *    sk_chk_filter - verify socket filter code
   *    @filter: filter to verify
   */
  int sk_chk_filter(struct sock_filter *filter, int flen)
  {
 -      struct sock_filter *ftest;
 +      /*
 +       * Valid instructions are initialized to non-0.
 +       * Invalid instructions are initialized to 0.
 +       */
 +      static const u8 codes[] = {
 +              [BPF_ALU|BPF_ADD|BPF_K]  = BPF_S_ALU_ADD_K,
 +              [BPF_ALU|BPF_ADD|BPF_X]  = BPF_S_ALU_ADD_X,
 +              [BPF_ALU|BPF_SUB|BPF_K]  = BPF_S_ALU_SUB_K,
 +              [BPF_ALU|BPF_SUB|BPF_X]  = BPF_S_ALU_SUB_X,
 +              [BPF_ALU|BPF_MUL|BPF_K]  = BPF_S_ALU_MUL_K,
 +              [BPF_ALU|BPF_MUL|BPF_X]  = BPF_S_ALU_MUL_X,
 +              [BPF_ALU|BPF_DIV|BPF_X]  = BPF_S_ALU_DIV_X,
 +              [BPF_ALU|BPF_AND|BPF_K]  = BPF_S_ALU_AND_K,
 +              [BPF_ALU|BPF_AND|BPF_X]  = BPF_S_ALU_AND_X,
 +              [BPF_ALU|BPF_OR|BPF_K]   = BPF_S_ALU_OR_K,
 +              [BPF_ALU|BPF_OR|BPF_X]   = BPF_S_ALU_OR_X,
 +              [BPF_ALU|BPF_LSH|BPF_K]  = BPF_S_ALU_LSH_K,
 +              [BPF_ALU|BPF_LSH|BPF_X]  = BPF_S_ALU_LSH_X,
 +              [BPF_ALU|BPF_RSH|BPF_K]  = BPF_S_ALU_RSH_K,
 +              [BPF_ALU|BPF_RSH|BPF_X]  = BPF_S_ALU_RSH_X,
 +              [BPF_ALU|BPF_NEG]        = BPF_S_ALU_NEG,
 +              [BPF_LD|BPF_W|BPF_ABS]   = BPF_S_LD_W_ABS,
 +              [BPF_LD|BPF_H|BPF_ABS]   = BPF_S_LD_H_ABS,
 +              [BPF_LD|BPF_B|BPF_ABS]   = BPF_S_LD_B_ABS,
 +              [BPF_LD|BPF_W|BPF_LEN]   = BPF_S_LD_W_LEN,
 +              [BPF_LD|BPF_W|BPF_IND]   = BPF_S_LD_W_IND,
 +              [BPF_LD|BPF_H|BPF_IND]   = BPF_S_LD_H_IND,
 +              [BPF_LD|BPF_B|BPF_IND]   = BPF_S_LD_B_IND,
 +              [BPF_LD|BPF_IMM]         = BPF_S_LD_IMM,
 +              [BPF_LDX|BPF_W|BPF_LEN]  = BPF_S_LDX_W_LEN,
 +              [BPF_LDX|BPF_B|BPF_MSH]  = BPF_S_LDX_B_MSH,
 +              [BPF_LDX|BPF_IMM]        = BPF_S_LDX_IMM,
 +              [BPF_MISC|BPF_TAX]       = BPF_S_MISC_TAX,
 +              [BPF_MISC|BPF_TXA]       = BPF_S_MISC_TXA,
 +              [BPF_RET|BPF_K]          = BPF_S_RET_K,
 +              [BPF_RET|BPF_A]          = BPF_S_RET_A,
 +              [BPF_ALU|BPF_DIV|BPF_K]  = BPF_S_ALU_DIV_K,
 +              [BPF_LD|BPF_MEM]         = BPF_S_LD_MEM,
 +              [BPF_LDX|BPF_MEM]        = BPF_S_LDX_MEM,
 +              [BPF_ST]                 = BPF_S_ST,
 +              [BPF_STX]                = BPF_S_STX,
 +              [BPF_JMP|BPF_JA]         = BPF_S_JMP_JA,
 +              [BPF_JMP|BPF_JEQ|BPF_K]  = BPF_S_JMP_JEQ_K,
 +              [BPF_JMP|BPF_JEQ|BPF_X]  = BPF_S_JMP_JEQ_X,
 +              [BPF_JMP|BPF_JGE|BPF_K]  = BPF_S_JMP_JGE_K,
 +              [BPF_JMP|BPF_JGE|BPF_X]  = BPF_S_JMP_JGE_X,
 +              [BPF_JMP|BPF_JGT|BPF_K]  = BPF_S_JMP_JGT_K,
 +              [BPF_JMP|BPF_JGT|BPF_X]  = BPF_S_JMP_JGT_X,
 +              [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
 +              [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
 +      };
        int pc;
  
        if (flen == 0 || flen > BPF_MAXINSNS)
  
        /* check the filter code now */
        for (pc = 0; pc < flen; pc++) {
 -              ftest = &filter[pc];
 -
 -              /* Only allow valid instructions */
 -              switch (ftest->code) {
 -              case BPF_ALU|BPF_ADD|BPF_K:
 -                      ftest->code = BPF_S_ALU_ADD_K;
 -                      break;
 -              case BPF_ALU|BPF_ADD|BPF_X:
 -                      ftest->code = BPF_S_ALU_ADD_X;
 -                      break;
 -              case BPF_ALU|BPF_SUB|BPF_K:
 -                      ftest->code = BPF_S_ALU_SUB_K;
 -                      break;
 -              case BPF_ALU|BPF_SUB|BPF_X:
 -                      ftest->code = BPF_S_ALU_SUB_X;
 -                      break;
 -              case BPF_ALU|BPF_MUL|BPF_K:
 -                      ftest->code = BPF_S_ALU_MUL_K;
 -                      break;
 -              case BPF_ALU|BPF_MUL|BPF_X:
 -                      ftest->code = BPF_S_ALU_MUL_X;
 -                      break;
 -              case BPF_ALU|BPF_DIV|BPF_X:
 -                      ftest->code = BPF_S_ALU_DIV_X;
 -                      break;
 -              case BPF_ALU|BPF_AND|BPF_K:
 -                      ftest->code = BPF_S_ALU_AND_K;
 -                      break;
 -              case BPF_ALU|BPF_AND|BPF_X:
 -                      ftest->code = BPF_S_ALU_AND_X;
 -                      break;
 -              case BPF_ALU|BPF_OR|BPF_K:
 -                      ftest->code = BPF_S_ALU_OR_K;
 -                      break;
 -              case BPF_ALU|BPF_OR|BPF_X:
 -                      ftest->code = BPF_S_ALU_OR_X;
 -                      break;
 -              case BPF_ALU|BPF_LSH|BPF_K:
 -                      ftest->code = BPF_S_ALU_LSH_K;
 -                      break;
 -              case BPF_ALU|BPF_LSH|BPF_X:
 -                      ftest->code = BPF_S_ALU_LSH_X;
 -                      break;
 -              case BPF_ALU|BPF_RSH|BPF_K:
 -                      ftest->code = BPF_S_ALU_RSH_K;
 -                      break;
 -              case BPF_ALU|BPF_RSH|BPF_X:
 -                      ftest->code = BPF_S_ALU_RSH_X;
 -                      break;
 -              case BPF_ALU|BPF_NEG:
 -                      ftest->code = BPF_S_ALU_NEG;
 -                      break;
 -              case BPF_LD|BPF_W|BPF_ABS:
 -                      ftest->code = BPF_S_LD_W_ABS;
 -                      break;
 -              case BPF_LD|BPF_H|BPF_ABS:
 -                      ftest->code = BPF_S_LD_H_ABS;
 -                      break;
 -              case BPF_LD|BPF_B|BPF_ABS:
 -                      ftest->code = BPF_S_LD_B_ABS;
 -                      break;
 -              case BPF_LD|BPF_W|BPF_LEN:
 -                      ftest->code = BPF_S_LD_W_LEN;
 -                      break;
 -              case BPF_LD|BPF_W|BPF_IND:
 -                      ftest->code = BPF_S_LD_W_IND;
 -                      break;
 -              case BPF_LD|BPF_H|BPF_IND:
 -                      ftest->code = BPF_S_LD_H_IND;
 -                      break;
 -              case BPF_LD|BPF_B|BPF_IND:
 -                      ftest->code = BPF_S_LD_B_IND;
 -                      break;
 -              case BPF_LD|BPF_IMM:
 -                      ftest->code = BPF_S_LD_IMM;
 -                      break;
 -              case BPF_LDX|BPF_W|BPF_LEN:
 -                      ftest->code = BPF_S_LDX_W_LEN;
 -                      break;
 -              case BPF_LDX|BPF_B|BPF_MSH:
 -                      ftest->code = BPF_S_LDX_B_MSH;
 -                      break;
 -              case BPF_LDX|BPF_IMM:
 -                      ftest->code = BPF_S_LDX_IMM;
 -                      break;
 -              case BPF_MISC|BPF_TAX:
 -                      ftest->code = BPF_S_MISC_TAX;
 -                      break;
 -              case BPF_MISC|BPF_TXA:
 -                      ftest->code = BPF_S_MISC_TXA;
 -                      break;
 -              case BPF_RET|BPF_K:
 -                      ftest->code = BPF_S_RET_K;
 -                      break;
 -              case BPF_RET|BPF_A:
 -                      ftest->code = BPF_S_RET_A;
 -                      break;
 +              struct sock_filter *ftest = &filter[pc];
 +              u16 code = ftest->code;
  
 +              if (code >= ARRAY_SIZE(codes))
 +                      return -EINVAL;
 +              code = codes[code];
 +              if (!code)
 +                      return -EINVAL;
                /* Some instructions need special checks */
 -
 +              switch (code) {
 +              case BPF_S_ALU_DIV_K:
                        /* check for division by zero */
 -              case BPF_ALU|BPF_DIV|BPF_K:
                        if (ftest->k == 0)
                                return -EINVAL;
 -                      ftest->code = BPF_S_ALU_DIV_K;
 -                      break;
 -
 -              /* check for invalid memory addresses */
 -              case BPF_LD|BPF_MEM:
 -                      if (ftest->k >= BPF_MEMWORDS)
 -                              return -EINVAL;
 -                      ftest->code = BPF_S_LD_MEM;
 -                      break;
 -              case BPF_LDX|BPF_MEM:
 -                      if (ftest->k >= BPF_MEMWORDS)
 -                              return -EINVAL;
 -                      ftest->code = BPF_S_LDX_MEM;
 -                      break;
 -              case BPF_ST:
 -                      if (ftest->k >= BPF_MEMWORDS)
 -                              return -EINVAL;
 -                      ftest->code = BPF_S_ST;
 +                      ftest->k = reciprocal_value(ftest->k);
                        break;
 -              case BPF_STX:
 +              case BPF_S_LD_MEM:
 +              case BPF_S_LDX_MEM:
 +              case BPF_S_ST:
 +              case BPF_S_STX:
 +                      /* check for invalid memory addresses */
                        if (ftest->k >= BPF_MEMWORDS)
                                return -EINVAL;
 -                      ftest->code = BPF_S_STX;
                        break;
 -
 -              case BPF_JMP|BPF_JA:
 +              case BPF_S_JMP_JA:
                        /*
                         * Note, the large ftest->k might cause loops.
                         * Compare this with conditional jumps below,
                         */
                        if (ftest->k >= (unsigned)(flen-pc-1))
                                return -EINVAL;
 -                      ftest->code = BPF_S_JMP_JA;
 -                      break;
 -
 -              case BPF_JMP|BPF_JEQ|BPF_K:
 -                      ftest->code = BPF_S_JMP_JEQ_K;
 -                      break;
 -              case BPF_JMP|BPF_JEQ|BPF_X:
 -                      ftest->code = BPF_S_JMP_JEQ_X;
 -                      break;
 -              case BPF_JMP|BPF_JGE|BPF_K:
 -                      ftest->code = BPF_S_JMP_JGE_K;
 -                      break;
 -              case BPF_JMP|BPF_JGE|BPF_X:
 -                      ftest->code = BPF_S_JMP_JGE_X;
 -                      break;
 -              case BPF_JMP|BPF_JGT|BPF_K:
 -                      ftest->code = BPF_S_JMP_JGT_K;
 -                      break;
 -              case BPF_JMP|BPF_JGT|BPF_X:
 -                      ftest->code = BPF_S_JMP_JGT_X;
 -                      break;
 -              case BPF_JMP|BPF_JSET|BPF_K:
 -                      ftest->code = BPF_S_JMP_JSET_K;
                        break;
 -              case BPF_JMP|BPF_JSET|BPF_X:
 -                      ftest->code = BPF_S_JMP_JSET_X;
 -                      break;
 -
 -              default:
 -                      return -EINVAL;
 -              }
 -
 -                      /* for conditionals both must be safe */
 -              switch (ftest->code) {
                case BPF_S_JMP_JEQ_K:
                case BPF_S_JMP_JEQ_X:
                case BPF_S_JMP_JGE_K:
                case BPF_S_JMP_JGT_X:
                case BPF_S_JMP_JSET_X:
                case BPF_S_JMP_JSET_K:
 +                      /* for conditionals both must be safe */
                        if (pc + ftest->jt + 1 >= flen ||
                            pc + ftest->jf + 1 >= flen)
                                return -EINVAL;
 +                      break;
                }
 +              ftest->code = code;
        }
  
        /* last instruction must be a RET code */
        switch (filter[flen - 1].code) {
        case BPF_S_RET_K:
        case BPF_S_RET_A:
 -              return 0;
 -              break;
 -              default:
 -                      return -EINVAL;
 -              }
 +              return check_load_and_stores(filter, flen);
 +      }
 +      return -EINVAL;
  }
  EXPORT_SYMBOL(sk_chk_filter);
  
  /**
-  *    sk_filter_rcu_release - Release a socket filter by rcu_head
+  *    sk_filter_release_rcu - Release a socket filter by rcu_head
   *    @rcu: rcu_head that contains the sk_filter to free
   */
static void sk_filter_rcu_release(struct rcu_head *rcu)
void sk_filter_release_rcu(struct rcu_head *rcu)
  {
        struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
  
-       sk_filter_release(fp);
- }
- static void sk_filter_delayed_uncharge(struct sock *sk, struct sk_filter *fp)
- {
-       unsigned int size = sk_filter_len(fp);
-       atomic_sub(size, &sk->sk_omem_alloc);
-       call_rcu_bh(&fp->rcu, sk_filter_rcu_release);
+       kfree(fp);
  }
+ EXPORT_SYMBOL(sk_filter_release_rcu);
  
  /**
   *    sk_attach_filter - attach a socket filter
@@@ -675,7 -642,7 +668,7 @@@ int sk_attach_filter(struct sock_fprog 
        rcu_assign_pointer(sk->sk_filter, fp);
  
        if (old_fp)
-               sk_filter_delayed_uncharge(sk, old_fp);
+               sk_filter_uncharge(sk, old_fp);
        return 0;
  }
  EXPORT_SYMBOL_GPL(sk_attach_filter);
@@@ -689,7 -656,7 +682,7 @@@ int sk_detach_filter(struct sock *sk
                                           sock_owned_by_user(sk));
        if (filter) {
                rcu_assign_pointer(sk->sk_filter, NULL);
-               sk_filter_delayed_uncharge(sk, filter);
+               sk_filter_uncharge(sk, filter);
                ret = 0;
        }
        return ret;
diff --combined net/core/request_sock.c
@@@ -33,7 -33,6 +33,7 @@@
   * Note : Dont forget somaxconn that may limit backlog too.
   */
  int sysctl_max_syn_backlog = 256;
 +EXPORT_SYMBOL(sysctl_max_syn_backlog);
  
  int reqsk_queue_alloc(struct request_sock_queue *queue,
                      unsigned int nr_table_entries)
@@@ -46,9 -45,7 +46,7 @@@
        nr_table_entries = roundup_pow_of_two(nr_table_entries + 1);
        lopt_size += nr_table_entries * sizeof(struct request_sock *);
        if (lopt_size > PAGE_SIZE)
-               lopt = __vmalloc(lopt_size,
-                       GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
-                       PAGE_KERNEL);
+               lopt = vzalloc(lopt_size);
        else
                lopt = kzalloc(lopt_size, GFP_KERNEL);
        if (lopt == NULL)
diff --combined net/dccp/input.c
@@@ -160,15 -160,13 +160,15 @@@ static void dccp_rcv_reset(struct sock 
        dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
  }
  
 -static void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb)
 +static void dccp_handle_ackvec_processing(struct sock *sk, struct sk_buff *skb)
  {
 -      struct dccp_sock *dp = dccp_sk(sk);
 +      struct dccp_ackvec *av = dccp_sk(sk)->dccps_hc_rx_ackvec;
  
 -      if (dp->dccps_hc_rx_ackvec != NULL)
 -              dccp_ackvec_check_rcv_ackno(dp->dccps_hc_rx_ackvec, sk,
 -                                          DCCP_SKB_CB(skb)->dccpd_ack_seq);
 +      if (av == NULL)
 +              return;
 +      if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
 +              dccp_ackvec_clear_state(av, DCCP_SKB_CB(skb)->dccpd_ack_seq);
 +      dccp_ackvec_input(av, skb);
  }
  
  static void dccp_deliver_input_to_ccids(struct sock *sk, struct sk_buff *skb)
@@@ -241,7 -239,8 +241,8 @@@ static int dccp_check_seqno(struct soc
                dccp_update_gsr(sk, seqno);
  
                if (dh->dccph_type != DCCP_PKT_SYNC &&
-                   (ackno != DCCP_PKT_WITHOUT_ACK_SEQ))
+                   ackno != DCCP_PKT_WITHOUT_ACK_SEQ &&
+                   after48(ackno, dp->dccps_gar))
                        dp->dccps_gar = ackno;
        } else {
                unsigned long now = jiffies;
@@@ -367,13 -366,22 +368,13 @@@ discard
  int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
                         const struct dccp_hdr *dh, const unsigned len)
  {
 -      struct dccp_sock *dp = dccp_sk(sk);
 -
        if (dccp_check_seqno(sk, skb))
                goto discard;
  
        if (dccp_parse_options(sk, NULL, skb))
                return 1;
  
 -      if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
 -              dccp_event_ack_recv(sk, skb);
 -
 -      if (dp->dccps_hc_rx_ackvec != NULL &&
 -          dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
 -                          DCCP_SKB_CB(skb)->dccpd_seq,
 -                          DCCP_ACKVEC_STATE_RECEIVED))
 -              goto discard;
 +      dccp_handle_ackvec_processing(sk, skb);
        dccp_deliver_input_to_ccids(sk, skb);
  
        return __dccp_rcv_established(sk, skb, dh, len);
@@@ -625,7 -633,15 +626,7 @@@ int dccp_rcv_state_process(struct sock 
                if (dccp_parse_options(sk, NULL, skb))
                        return 1;
  
 -              if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
 -                      dccp_event_ack_recv(sk, skb);
 -
 -              if (dp->dccps_hc_rx_ackvec != NULL &&
 -                  dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
 -                                  DCCP_SKB_CB(skb)->dccpd_seq,
 -                                  DCCP_ACKVEC_STATE_RECEIVED))
 -                      goto discard;
 -
 +              dccp_handle_ackvec_processing(sk, skb);
                dccp_deliver_input_to_ccids(sk, skb);
        }
  
diff --combined net/decnet/af_decnet.c
@@@ -1556,6 -1556,8 +1556,8 @@@ static int __dn_getsockopt(struct socke
                        if (r_len > sizeof(struct linkinfo_dn))
                                r_len = sizeof(struct linkinfo_dn);
  
+                       memset(&link, 0, sizeof(link));
                        switch(sock->state) {
                                case SS_CONNECTING:
                                        link.idn_linkstate = LL_CONNECTING;
@@@ -1848,7 -1850,7 +1850,7 @@@ unsigned dn_mss_from_pmtu(struct net_de
  {
        unsigned mss = 230 - DN_MAX_NSP_DATA_HEADER;
        if (dev) {
 -              struct dn_dev *dn_db = dev->dn_ptr;
 +              struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
                mtu -= LL_RESERVED_SPACE(dev);
                if (dn_db->use_long)
                        mtu -= 21;
diff --combined net/ipv4/tcp.c
@@@ -1193,7 -1193,7 +1193,7 @@@ void tcp_cleanup_rbuf(struct sock *sk, 
        struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
  
        WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
 -           KERN_INFO "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
 +           "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
             tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
  #endif
  
@@@ -1477,9 -1477,10 +1477,9 @@@ int tcp_recvmsg(struct kiocb *iocb, str
                         * shouldn't happen.
                         */
                        if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
 -                           KERN_INFO "recvmsg bug: copied %X "
 -                                     "seq %X rcvnxt %X fl %X\n", *seq,
 -                                     TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
 -                                     flags))
 +                               "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
 +                               *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
 +                               flags))
                                break;
  
                        offset = *seq - TCP_SKB_CB(skb)->seq;
                                goto found_ok_skb;
                        if (tcp_hdr(skb)->fin)
                                goto found_fin_ok;
 -                      WARN(!(flags & MSG_PEEK), KERN_INFO "recvmsg bug 2: "
 -                                      "copied %X seq %X rcvnxt %X fl %X\n",
 -                                      *seq, TCP_SKB_CB(skb)->seq,
 -                                      tp->rcv_nxt, flags);
 +                      WARN(!(flags & MSG_PEEK),
 +                           "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
 +                           *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
                }
  
                /* Well, if we have backlog, try to process it now yet. */
@@@ -2244,7 -2246,7 +2244,7 @@@ static int do_tcp_setsockopt(struct soc
                /* Values greater than interface MTU won't take effect. However
                 * at the point when this call is done we typically don't yet
                 * know which interface is going to be used */
-               if (val < 64 || val > MAX_TCP_WINDOW) {
+               if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) {
                        err = -EINVAL;
                        break;
                }
diff --combined net/ipv4/tcp_ipv4.c
@@@ -1210,6 -1210,12 +1210,6 @@@ static const struct tcp_request_sock_op
  };
  #endif
  
 -static struct timewait_sock_ops tcp_timewait_sock_ops = {
 -      .twsk_obj_size  = sizeof(struct tcp_timewait_sock),
 -      .twsk_unique    = tcp_twsk_unique,
 -      .twsk_destructor= tcp_twsk_destructor,
 -};
 -
  int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
  {
        struct tcp_extend_values tmp_ext;
                    tcp_death_row.sysctl_tw_recycle &&
                    (dst = inet_csk_route_req(sk, req)) != NULL &&
                    (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
 -                  peer->v4daddr == saddr) {
 +                  peer->daddr.a4 == saddr) {
                        inet_peer_refcheck(peer);
                        if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
                            (s32)(peer->tcp_ts - req->ts_recent) >
@@@ -1757,40 -1763,64 +1757,40 @@@ do_time_wait
        goto discard_it;
  }
  
 -/* VJ's idea. Save last timestamp seen from this destination
 - * and hold it at least for normal timewait interval to use for duplicate
 - * segment detection in subsequent connections, before they enter synchronized
 - * state.
 - */
 -
 -int tcp_v4_remember_stamp(struct sock *sk)
 +struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it)
  {
 +      struct rtable *rt = (struct rtable *) __sk_dst_get(sk);
        struct inet_sock *inet = inet_sk(sk);
 -      struct tcp_sock *tp = tcp_sk(sk);
 -      struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
 -      struct inet_peer *peer = NULL;
 -      int release_it = 0;
 +      struct inet_peer *peer;
  
        if (!rt || rt->rt_dst != inet->inet_daddr) {
 -              peer = inet_getpeer(inet->inet_daddr, 1);
 -              release_it = 1;
 +              peer = inet_getpeer_v4(inet->inet_daddr, 1);
 +              *release_it = true;
        } else {
                if (!rt->peer)
                        rt_bind_peer(rt, 1);
                peer = rt->peer;
 +              *release_it = false;
        }
  
 -      if (peer) {
 -              if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
 -                  ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
 -                   peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
 -                      peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
 -                      peer->tcp_ts = tp->rx_opt.ts_recent;
 -              }
 -              if (release_it)
 -                      inet_putpeer(peer);
 -              return 1;
 -      }
 -
 -      return 0;
 +      return peer;
  }
 -EXPORT_SYMBOL(tcp_v4_remember_stamp);
 +EXPORT_SYMBOL(tcp_v4_get_peer);
  
 -int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
 +void *tcp_v4_tw_get_peer(struct sock *sk)
  {
 -      struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
 -
 -      if (peer) {
 -              const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
 -
 -              if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
 -                  ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
 -                   peer->tcp_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
 -                      peer->tcp_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
 -                      peer->tcp_ts       = tcptw->tw_ts_recent;
 -              }
 -              inet_putpeer(peer);
 -              return 1;
 -      }
 +      struct inet_timewait_sock *tw = inet_twsk(sk);
  
 -      return 0;
 +      return inet_getpeer_v4(tw->tw_daddr, 1);
  }
 +EXPORT_SYMBOL(tcp_v4_tw_get_peer);
 +
 +static struct timewait_sock_ops tcp_timewait_sock_ops = {
 +      .twsk_obj_size  = sizeof(struct tcp_timewait_sock),
 +      .twsk_unique    = tcp_twsk_unique,
 +      .twsk_destructor= tcp_twsk_destructor,
 +      .twsk_getpeer   = tcp_v4_tw_get_peer,
 +};
  
  const struct inet_connection_sock_af_ops ipv4_specific = {
        .queue_xmit        = ip_queue_xmit,
        .rebuild_header    = inet_sk_rebuild_header,
        .conn_request      = tcp_v4_conn_request,
        .syn_recv_sock     = tcp_v4_syn_recv_sock,
 -      .remember_stamp    = tcp_v4_remember_stamp,
 +      .get_peer          = tcp_v4_get_peer,
        .net_header_len    = sizeof(struct iphdr),
        .setsockopt        = ip_setsockopt,
        .getsockopt        = ip_getsockopt,
@@@ -2013,7 -2043,9 +2013,9 @@@ get_req
        }
  get_sk:
        sk_nulls_for_each_from(sk, node) {
-               if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) {
+               if (!net_eq(sock_net(sk), net))
+                       continue;
+               if (sk->sk_family == st->family) {
                        cur = sk;
                        goto out;
                }
diff --combined net/ipv4/tcp_minisocks.c
@@@ -49,56 -49,6 +49,56 @@@ struct inet_timewait_death_row tcp_deat
  };
  EXPORT_SYMBOL_GPL(tcp_death_row);
  
 +/* VJ's idea. Save last timestamp seen from this destination
 + * and hold it at least for normal timewait interval to use for duplicate
 + * segment detection in subsequent connections, before they enter synchronized
 + * state.
 + */
 +
 +static int tcp_remember_stamp(struct sock *sk)
 +{
 +      const struct inet_connection_sock *icsk = inet_csk(sk);
 +      struct tcp_sock *tp = tcp_sk(sk);
 +      struct inet_peer *peer;
 +      bool release_it;
 +
 +      peer = icsk->icsk_af_ops->get_peer(sk, &release_it);
 +      if (peer) {
 +              if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
 +                  ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
 +                   peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
 +                      peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
 +                      peer->tcp_ts = tp->rx_opt.ts_recent;
 +              }
 +              if (release_it)
 +                      inet_putpeer(peer);
 +              return 1;
 +      }
 +
 +      return 0;
 +}
 +
 +static int tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
 +{
 +      struct sock *sk = (struct sock *) tw;
 +      struct inet_peer *peer;
 +
 +      peer = twsk_getpeer(sk);
 +      if (peer) {
 +              const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
 +
 +              if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
 +                  ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
 +                   peer->tcp_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
 +                      peer->tcp_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
 +                      peer->tcp_ts       = tcptw->tw_ts_recent;
 +              }
 +              inet_putpeer(peer);
 +              return 1;
 +      }
 +      return 0;
 +}
 +
  static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
  {
        if (seq == s_win)
@@@ -199,9 -149,14 +199,9 @@@ kill_with_rst
                        tcptw->tw_ts_recent       = tmp_opt.rcv_tsval;
                }
  
 -              /* I am shamed, but failed to make it more elegant.
 -               * Yes, it is direct reference to IP, which is impossible
 -               * to generalize to IPv6. Taking into account that IPv6
 -               * do not understand recycling in any case, it not
 -               * a big problem in practice. --ANK */
 -              if (tw->tw_family == AF_INET &&
 -                  tcp_death_row.sysctl_tw_recycle && tcptw->tw_ts_recent_stamp &&
 -                  tcp_v4_tw_remember_stamp(tw))
 +              if (tcp_death_row.sysctl_tw_recycle &&
 +                  tcptw->tw_ts_recent_stamp &&
 +                  tcp_tw_remember_stamp(tw))
                        inet_twsk_schedule(tw, &tcp_death_row, tw->tw_timeout,
                                           TCP_TIMEWAIT_LEN);
                else
@@@ -319,7 -274,7 +319,7 @@@ void tcp_time_wait(struct sock *sk, in
        int recycle_ok = 0;
  
        if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
 -              recycle_ok = icsk->icsk_af_ops->remember_stamp(sk);
 +              recycle_ok = tcp_remember_stamp(sk);
  
        if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
                tw = inet_twsk_alloc(sk, state);
                 * socket up.  We've got bigger problems than
                 * non-graceful socket closings.
                 */
-               LIMIT_NETDEBUG(KERN_INFO "TCP: time wait bucket table overflow\n");
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
        }
  
        tcp_update_metrics(sk);
diff --combined net/ipv4/tcp_output.c
@@@ -55,7 -55,7 +55,7 @@@ int sysctl_tcp_workaround_signed_window
  int sysctl_tcp_tso_win_divisor __read_mostly = 3;
  
  int sysctl_tcp_mtu_probing __read_mostly = 0;
 -int sysctl_tcp_base_mss __read_mostly = 512;
 +int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS;
  
  /* By default, RFC2861 behavior.  */
  int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
@@@ -231,11 -231,10 +231,10 @@@ void tcp_select_initial_window(int __sp
                /* when initializing use the value from init_rcv_wnd
                 * rather than the default from above
                 */
-               if (init_rcv_wnd &&
-                   (*rcv_wnd > init_rcv_wnd * mss))
-                       *rcv_wnd = init_rcv_wnd * mss;
-               else if (*rcv_wnd > init_cwnd * mss)
-                       *rcv_wnd = init_cwnd * mss;
+               if (init_rcv_wnd)
+                       *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
+               else
+                       *rcv_wnd = min(*rcv_wnd, init_cwnd * mss);
        }
  
        /* Set the clamp no higher than max representable value */
@@@ -386,27 -385,30 +385,30 @@@ struct tcp_out_options 
   */
  static u8 tcp_cookie_size_check(u8 desired)
  {
-       if (desired > 0) {
+       int cookie_size;
+       if (desired > 0)
                /* previously specified */
                return desired;
-       }
-       if (sysctl_tcp_cookie_size <= 0) {
+       cookie_size = ACCESS_ONCE(sysctl_tcp_cookie_size);
+       if (cookie_size <= 0)
                /* no default specified */
                return 0;
-       }
-       if (sysctl_tcp_cookie_size <= TCP_COOKIE_MIN) {
+       if (cookie_size <= TCP_COOKIE_MIN)
                /* value too small, specify minimum */
                return TCP_COOKIE_MIN;
-       }
-       if (sysctl_tcp_cookie_size >= TCP_COOKIE_MAX) {
+       if (cookie_size >= TCP_COOKIE_MAX)
                /* value too large, specify maximum */
                return TCP_COOKIE_MAX;
-       }
-       if (0x1 & sysctl_tcp_cookie_size) {
+       if (cookie_size & 1)
                /* 8-bit multiple, illegal, fix it */
-               return (u8)(sysctl_tcp_cookie_size + 0x1);
-       }
-       return (u8)sysctl_tcp_cookie_size;
+               cookie_size++;
+       return (u8)cookie_size;
  }
  
  /* Write previously computed TCP options to the packet.
@@@ -822,11 -824,8 +824,11 @@@ static int tcp_transmit_skb(struct soc
                                                           &md5);
        tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
  
 -      if (tcp_packets_in_flight(tp) == 0)
 +      if (tcp_packets_in_flight(tp) == 0) {
                tcp_ca_event(sk, CA_EVENT_TX_START);
 +              skb->ooo_okay = 1;
 +      } else
 +              skb->ooo_okay = 0;
  
        skb_push(skb, tcp_header_size);
        skb_reset_transport_header(skb);
@@@ -1516,6 -1515,7 +1518,7 @@@ static int tcp_tso_should_defer(struct 
        struct tcp_sock *tp = tcp_sk(sk);
        const struct inet_connection_sock *icsk = inet_csk(sk);
        u32 send_win, cong_win, limit, in_flight;
+       int win_divisor;
  
        if (TCP_SKB_CB(skb)->flags & TCPHDR_FIN)
                goto send_now;
        if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
                goto send_now;
  
-       if (sysctl_tcp_tso_win_divisor) {
+       win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor);
+       if (win_divisor) {
                u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
  
                /* If at least some fraction of a window is available,
                 * just use it.
                 */
-               chunk /= sysctl_tcp_tso_win_divisor;
+               chunk /= win_divisor;
                if (limit >= chunk)
                        goto send_now;
        } else {
@@@ -2595,7 -2596,6 +2599,7 @@@ int tcp_connect(struct sock *sk
  {
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *buff;
 +      int err;
  
        tcp_connect_init(sk);
  
        sk->sk_wmem_queued += buff->truesize;
        sk_mem_charge(sk, buff->truesize);
        tp->packets_out += tcp_skb_pcount(buff);
 -      tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
 +      err = tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
 +      if (err == -ECONNREFUSED)
 +              return err;
  
        /* We change tp->snd_nxt after the tcp_transmit_skb() call
         * in order to make this packet get counted in tcpOutSegs.
diff --combined net/ipv6/ip6_tunnel.c
@@@ -58,6 -58,8 +58,6 @@@ MODULE_AUTHOR("Ville Nuorvala")
  MODULE_DESCRIPTION("IPv6 tunneling device");
  MODULE_LICENSE("GPL");
  
 -#define IPV6_TLV_TEL_DST_SIZE 8
 -
  #ifdef IP6_TNL_DEBUG
  #define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__)
  #else
@@@ -1173,6 -1175,8 +1173,8 @@@ static void ip6_tnl_link_config(struct 
                                sizeof (struct ipv6hdr);
  
                        dev->mtu = rt->rt6i_dev->mtu - sizeof (struct ipv6hdr);
+                       if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+                               dev->mtu-=8;
  
                        if (dev->mtu < IPV6_MIN_MTU)
                                dev->mtu = IPV6_MIN_MTU;
@@@ -1361,12 -1365,17 +1363,17 @@@ static const struct net_device_ops ip6_
  
  static void ip6_tnl_dev_setup(struct net_device *dev)
  {
+       struct ip6_tnl *t;
        dev->netdev_ops = &ip6_tnl_netdev_ops;
        dev->destructor = ip6_dev_free;
  
        dev->type = ARPHRD_TUNNEL6;
        dev->hard_header_len = LL_MAX_HEADER + sizeof (struct ipv6hdr);
        dev->mtu = ETH_DATA_LEN - sizeof (struct ipv6hdr);
+       t = netdev_priv(dev);
+       if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+               dev->mtu-=8;
        dev->flags |= IFF_NOARP;
        dev->addr_len = sizeof(struct in6_addr);
        dev->features |= NETIF_F_NETNS_LOCAL;
diff --combined net/ipv6/sit.c
@@@ -606,8 -606,9 +606,9 @@@ static int ipip6_rcv(struct sk_buff *sk
                return 0;
        }
  
-       icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
+       /* no tunnel matched,  let upstream know, ipsec may handle it */
        rcu_read_unlock();
+       return 1;
  out:
        kfree_skb(skb);
        return 0;
@@@ -730,9 -731,10 +731,9 @@@ static netdev_tx_t ipip6_tunnel_xmit(st
        }
  
        {
 -              struct flowi fl = { .nl_u = { .ip4_u =
 -                                            { .daddr = dst,
 -                                              .saddr = tiph->saddr,
 -                                              .tos = RT_TOS(tos) } },
 +              struct flowi fl = { .fl4_dst = dst,
 +                                  .fl4_src = tiph->saddr,
 +                                  .fl4_tos = RT_TOS(tos),
                                    .oif = tunnel->parms.link,
                                    .proto = IPPROTO_IPV6 };
                if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
@@@ -854,9 -856,10 +855,9 @@@ static void ipip6_tunnel_bind_dev(struc
        iph = &tunnel->parms.iph;
  
        if (iph->daddr) {
 -              struct flowi fl = { .nl_u = { .ip4_u =
 -                                            { .daddr = iph->daddr,
 -                                              .saddr = iph->saddr,
 -                                              .tos = RT_TOS(iph->tos) } },
 +              struct flowi fl = { .fl4_dst = iph->daddr,
 +                                  .fl4_src = iph->saddr,
 +                                  .fl4_tos = RT_TOS(iph->tos),
                                    .oif = tunnel->parms.link,
                                    .proto = IPPROTO_IPV6 };
                struct rtable *rt;
diff --combined net/l2tp/l2tp_ip.c
@@@ -476,13 -476,15 +476,13 @@@ static int l2tp_ip_sendmsg(struct kioc
  
                {
                        struct flowi fl = { .oif = sk->sk_bound_dev_if,
 -                                          .nl_u = { .ip4_u = {
 -                                                      .daddr = daddr,
 -                                                      .saddr = inet->inet_saddr,
 -                                                      .tos = RT_CONN_FLAGS(sk) } },
 +                                          .fl4_dst = daddr,
 +                                          .fl4_src = inet->inet_saddr,
 +                                          .fl4_tos = RT_CONN_FLAGS(sk),
                                            .proto = sk->sk_protocol,
                                            .flags = inet_sk_flowi_flags(sk),
 -                                          .uli_u = { .ports = {
 -                                                       .sport = inet->inet_sport,
 -                                                       .dport = inet->inet_dport } } };
 +                                          .fl_ip_sport = inet->inet_sport,
 +                                          .fl_ip_dport = inet->inet_dport };
  
                        /* If this fails, retransmit mechanism of transport layer will
                         * keep trying until route appears or the connection times
@@@ -672,4 -674,8 +672,8 @@@ MODULE_LICENSE("GPL")
  MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
  MODULE_DESCRIPTION("L2TP over IP");
  MODULE_VERSION("1.0");
- MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, SOCK_DGRAM, IPPROTO_L2TP);
+ /* Use the value of SOCK_DGRAM (2) directory, because __stringify does't like
+  * enums
+  */
+ MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP);
diff --combined net/mac80211/rx.c
@@@ -1102,6 -1102,8 +1102,6 @@@ static void ap_sta_ps_end(struct sta_in
  
        atomic_dec(&sdata->bss->num_sta_ps);
  
 -      clear_sta_flags(sta, WLAN_STA_PS_STA);
 -
  #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
        printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
               sdata->name, sta->sta.addr, sta->sta.aid);
@@@ -2245,6 -2247,10 +2245,10 @@@ ieee80211_rx_h_mgmt(struct ieee80211_rx
                break;
        case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
        case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
+               if (is_multicast_ether_addr(mgmt->da) &&
+                   !is_broadcast_ether_addr(mgmt->da))
+                       return RX_DROP_MONITOR;
                /* process only for station */
                if (sdata->vif.type != NL80211_IFTYPE_STATION)
                        return RX_DROP_MONITOR;
@@@ -2739,6 -2745,7 +2743,7 @@@ static void __ieee80211_rx_handle_packe
  
                        if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
                                return;
+                       goto out;
                }
        }
  
                        return;
        }
  
+  out:
        dev_kfree_skb(skb);
  }
  
diff --combined net/mac80211/tx.c
@@@ -622,8 -622,7 +622,8 @@@ ieee80211_tx_h_rate_ctrl(struct ieee802
                txrc.max_rate_idx = -1;
        else
                txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
 -      txrc.ap = tx->sdata->vif.type == NL80211_IFTYPE_AP;
 +      txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
 +                  tx->sdata->vif.type == NL80211_IFTYPE_ADHOC);
  
        /* set up RTS protection if desired */
        if (len > tx->local->hw.wiphy->rts_threshold) {
@@@ -1034,7 -1033,6 +1034,7 @@@ static bool __ieee80211_parse_tx_radiot
        struct ieee80211_radiotap_header *rthdr =
                (struct ieee80211_radiotap_header *) skb->data;
        struct ieee80211_supported_band *sband;
 +      bool hw_frag;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
                                                   NULL);
        info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
        tx->flags &= ~IEEE80211_TX_FRAGMENTED;
  
 +      /* packet is fragmented in HW if we have a non-NULL driver callback */
 +      hw_frag = (tx->local->ops->set_frag_threshold != NULL);
 +
        /*
         * for every radiotap entry that is present
         * (ieee80211_radiotap_iterator_next returns -ENOENT when no more
                        }
                        if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP)
                                info->flags &= ~IEEE80211_TX_INTFL_DONT_ENCRYPT;
 -                      if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG)
 +                      if ((*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG) &&
 +                                                              !hw_frag)
                                tx->flags |= IEEE80211_TX_FRAGMENTED;
                        break;
  
@@@ -1187,10 -1181,8 +1187,10 @@@ ieee80211_tx_prepare(struct ieee80211_s
        /*
         * Set this flag (used below to indicate "automatic fragmentation"),
         * it will be cleared/left by radiotap as desired.
 +       * Only valid when fragmentation is done by the stack.
         */
 -      tx->flags |= IEEE80211_TX_FRAGMENTED;
 +      if (!local->ops->set_frag_threshold)
 +              tx->flags |= IEEE80211_TX_FRAGMENTED;
  
        /* process and remove the injection radiotap header */
        if (unlikely(info->flags & IEEE80211_TX_INTFL_HAS_RADIOTAP)) {
@@@ -1595,7 -1587,12 +1595,12 @@@ static void ieee80211_xmit(struct ieee8
                                                list) {
                                if (!ieee80211_sdata_running(tmp_sdata))
                                        continue;
-                               if (tmp_sdata->vif.type != NL80211_IFTYPE_AP)
+                               if (tmp_sdata->vif.type ==
+                                   NL80211_IFTYPE_MONITOR ||
+                                   tmp_sdata->vif.type ==
+                                   NL80211_IFTYPE_AP_VLAN ||
+                                       tmp_sdata->vif.type ==
+                                   NL80211_IFTYPE_WDS)
                                        continue;
                                if (compare_ether_addr(tmp_sdata->vif.addr,
                                                       hdr->addr2) == 0) {
@@@ -2309,7 -2306,7 +2314,7 @@@ struct sk_buff *ieee80211_beacon_get_ti
                txrc.max_rate_idx = -1;
        else
                txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
 -      txrc.ap = true;
 +      txrc.bss = true;
        rate_control_get_rate(sdata, NULL, &txrc);
  
        info->control.vif = vif;
diff --combined net/unix/af_unix.c
@@@ -316,8 -316,7 +316,8 @@@ static void unix_write_space(struct soc
        if (unix_writable(sk)) {
                wq = rcu_dereference(sk->sk_wq);
                if (wq_has_sleeper(wq))
 -                      wake_up_interruptible_sync(&wq->wait);
 +                      wake_up_interruptible_sync_poll(&wq->wait,
 +                              POLLOUT | POLLWRNORM | POLLWRBAND);
                sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
        }
        rcu_read_unlock();
@@@ -1344,9 -1343,25 +1344,25 @@@ static void unix_destruct_scm(struct sk
        sock_wfree(skb);
  }
  
+ #define MAX_RECURSION_LEVEL 4
  static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
  {
        int i;
+       unsigned char max_level = 0;
+       int unix_sock_count = 0;
+       for (i = scm->fp->count - 1; i >= 0; i--) {
+               struct sock *sk = unix_get_socket(scm->fp->fp[i]);
+               if (sk) {
+                       unix_sock_count++;
+                       max_level = max(max_level,
+                                       unix_sk(sk)->recursion_level);
+               }
+       }
+       if (unlikely(max_level > MAX_RECURSION_LEVEL))
+               return -ETOOMANYREFS;
  
        /*
         * Need to duplicate file references for the sake of garbage
        if (!UNIXCB(skb).fp)
                return -ENOMEM;
  
-       for (i = scm->fp->count-1; i >= 0; i--)
-               unix_inflight(scm->fp->fp[i]);
-       return 0;
+       if (unix_sock_count) {
+               for (i = scm->fp->count - 1; i >= 0; i--)
+                       unix_inflight(scm->fp->fp[i]);
+       }
+       return max_level;
  }
  
  static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
@@@ -1394,6 -1411,7 +1412,7 @@@ static int unix_dgram_sendmsg(struct ki
        struct sk_buff *skb;
        long timeo;
        struct scm_cookie tmp_scm;
+       int max_level;
  
        if (NULL == siocb->scm)
                siocb->scm = &tmp_scm;
                goto out;
  
        err = unix_scm_to_skb(siocb->scm, skb, true);
-       if (err)
+       if (err < 0)
                goto out_free;
+       max_level = err + 1;
        unix_get_secdata(siocb->scm, skb);
  
        skb_reset_transport_header(skb);
@@@ -1515,6 -1534,8 +1535,8 @@@ restart
        if (sock_flag(other, SOCK_RCVTSTAMP))
                __net_timestamp(skb);
        skb_queue_tail(&other->sk_receive_queue, skb);
+       if (max_level > unix_sk(other)->recursion_level)
+               unix_sk(other)->recursion_level = max_level;
        unix_state_unlock(other);
        other->sk_data_ready(other, len);
        sock_put(other);
@@@ -1545,6 -1566,7 +1567,7 @@@ static int unix_stream_sendmsg(struct k
        int sent = 0;
        struct scm_cookie tmp_scm;
        bool fds_sent = false;
+       int max_level;
  
        if (NULL == siocb->scm)
                siocb->scm = &tmp_scm;
  
                /* Only send the fds in the first buffer */
                err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
-               if (err) {
+               if (err < 0) {
                        kfree_skb(skb);
                        goto out_err;
                }
+               max_level = err + 1;
                fds_sent = true;
  
                err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
                        goto pipe_err_free;
  
                skb_queue_tail(&other->sk_receive_queue, skb);
+               if (max_level > unix_sk(other)->recursion_level)
+                       unix_sk(other)->recursion_level = max_level;
                unix_state_unlock(other);
                other->sk_data_ready(other, size);
                sent += size;
@@@ -1711,8 -1736,7 +1737,8 @@@ static int unix_dgram_recvmsg(struct ki
                goto out_unlock;
        }
  
 -      wake_up_interruptible_sync(&u->peer_wait);
 +      wake_up_interruptible_sync_poll(&u->peer_wait,
 +                                      POLLOUT | POLLWRNORM | POLLWRBAND);
  
        if (msg->msg_name)
                unix_copy_addr(msg, skb->sk);
@@@ -1847,6 -1871,7 +1873,7 @@@ static int unix_stream_recvmsg(struct k
                unix_state_lock(sk);
                skb = skb_dequeue(&sk->sk_receive_queue);
                if (skb == NULL) {
+                       unix_sk(sk)->recursion_level = 0;
                        if (copied >= target)
                                goto unlock;
  
@@@ -2074,12 -2099,13 +2101,12 @@@ static unsigned int unix_dgram_poll(str
        if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
                mask |= POLLERR;
        if (sk->sk_shutdown & RCV_SHUTDOWN)
 -              mask |= POLLRDHUP;
 +              mask |= POLLRDHUP | POLLIN | POLLRDNORM;
        if (sk->sk_shutdown == SHUTDOWN_MASK)
                mask |= POLLHUP;
  
        /* readable? */
 -      if (!skb_queue_empty(&sk->sk_receive_queue) ||
 -          (sk->sk_shutdown & RCV_SHUTDOWN))
 +      if (!skb_queue_empty(&sk->sk_receive_queue))
                mask |= POLLIN | POLLRDNORM;
  
        /* Connection-based need to check for termination and startup */
                        return mask;
        }
  
 -      /* writable? */
 -      writable = unix_writable(sk);
 -      if (writable) {
 -              other = unix_peer_get(sk);
 -              if (other) {
 -                      if (unix_peer(other) != sk) {
 -                              sock_poll_wait(file, &unix_sk(other)->peer_wait,
 -                                        wait);
 -                              if (unix_recvq_full(other))
 -                                      writable = 0;
 -                      }
 +      /* No write status requested, avoid expensive OUT tests. */
 +      if (wait && !(wait->key & (POLLWRBAND | POLLWRNORM | POLLOUT)))
 +              return mask;
  
 -                      sock_put(other);
 +      writable = unix_writable(sk);
 +      other = unix_peer_get(sk);
 +      if (other) {
 +              if (unix_peer(other) != sk) {
 +                      sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
 +                      if (unix_recvq_full(other))
 +                              writable = 0;
                }
 +              sock_put(other);
        }
  
        if (writable)
diff --combined net/x25/x25_link.c
@@@ -31,8 -31,8 +31,8 @@@
  #include <linux/init.h>
  #include <net/x25.h>
  
 -static LIST_HEAD(x25_neigh_list);
 -static DEFINE_RWLOCK(x25_neigh_list_lock);
 +LIST_HEAD(x25_neigh_list);
 +DEFINE_RWLOCK(x25_neigh_list_lock);
  
  static void x25_t20timer_expiry(unsigned long);
  
@@@ -360,20 -360,16 +360,20 @@@ int x25_subscr_ioctl(unsigned int cmd, 
        dev_put(dev);
  
        if (cmd == SIOCX25GSUBSCRIP) {
 +              read_lock_bh(&x25_neigh_list_lock);
                x25_subscr.extended          = nb->extended;
                x25_subscr.global_facil_mask = nb->global_facil_mask;
 +              read_unlock_bh(&x25_neigh_list_lock);
                rc = copy_to_user(arg, &x25_subscr,
                                  sizeof(x25_subscr)) ? -EFAULT : 0;
        } else {
                rc = -EINVAL;
                if (!(x25_subscr.extended && x25_subscr.extended != 1)) {
                        rc = 0;
 +                      write_lock_bh(&x25_neigh_list_lock);
                        nb->extended         = x25_subscr.extended;
                        nb->global_facil_mask = x25_subscr.global_facil_mask;
 +                      write_unlock_bh(&x25_neigh_list_lock);
                }
        }
        x25_neigh_put(nb);
@@@ -398,6 -394,7 +398,7 @@@ void __exit x25_link_free(void
        list_for_each_safe(entry, tmp, &x25_neigh_list) {
                nb = list_entry(entry, struct x25_neigh, node);
                __x25_remove_neigh(nb);
+               dev_put(nb->dev);
        }
        write_unlock_bh(&x25_neigh_list_lock);
  }