Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Sat, 24 May 2014 04:32:30 +0000 (00:32 -0400)
committerDavid S. Miller <davem@davemloft.net>
Sat, 24 May 2014 04:32:30 +0000 (00:32 -0400)
Conflicts:
drivers/net/bonding/bond_alb.c
drivers/net/ethernet/altera/altera_msgdma.c
drivers/net/ethernet/altera/altera_sgdma.c
net/ipv6/xfrm6_output.c

Several cases of overlapping changes.

The xfrm6_output.c has a bug fix which overlaps the renaming
of skb->local_df to skb->ignore_df.

In the Altera TSE driver cases, the register access cleanups
in net-next overlapped with bug fixes done in net.

Similarly a bug fix to send ALB packets in the bonding driver using
the right source address overlaps with cleanups in net-next.

Signed-off-by: David S. Miller <davem@davemloft.net>
51 files changed:
1  2 
Documentation/networking/filter.txt
MAINTAINERS
arch/arm/boot/dts/am33xx.dtsi
arch/x86/net/bpf_jit_comp.c
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_options.c
drivers/net/bonding/bonding.h
drivers/net/can/c_can/c_can.c
drivers/net/ethernet/altera/altera_sgdma.c
drivers/net/ethernet/altera/altera_tse_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/qp.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/macvlan.c
drivers/net/phy/phy_device.c
drivers/net/wireless/iwlwifi/mvm/coex.c
drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/mvm/rs.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/mvm/utils.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/xen-netback/interface.c
include/linux/if_vlan.h
include/linux/netdevice.h
include/net/ip6_route.h
include/uapi/linux/audit.h
include/uapi/linux/nl80211.h
net/8021q/vlan_dev.c
net/batman-adv/distributed-arp-table.c
net/core/dev.c
net/core/net_namespace.c
net/core/rtnetlink.c
net/core/skbuff.c
net/ipv4/ip_tunnel.c
net/ipv4/route.c
net/ipv4/xfrm4_output.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/route.c
net/ipv6/xfrm6_output.c
net/mac80211/ieee80211_i.h
net/mac80211/mlme.c
net/sched/cls_tcindex.c

@@@ -277,11 -277,10 +277,11 @@@ Possible BPF extensions are shown in th
    mark                                  skb->mark
    queue                                 skb->queue_mapping
    hatype                                skb->dev->type
-   rxhash                                skb->rxhash
+   rxhash                                skb->hash
    cpu                                   raw_smp_processor_id()
    vlan_tci                              vlan_tx_tag_get(skb)
    vlan_pr                               vlan_tx_tag_present(skb)
 +  rand                                  prandom_u32()
  
  These extensions can also be prefixed with '#'.
  Examples for low-level BPF:
    ret #-1
    drop: ret #0
  
 +** icmp random packet sampling, 1 in 4
 +  ldh [12]
 +  jne #0x800, drop
 +  ldb [23]
 +  jneq #1, drop
 +  # get a random uint32 number
 +  ld rand
 +  mod #4
 +  jneq #1, drop
 +  ret #-1
 +  drop: ret #0
 +
  ** SECCOMP filter example:
  
    ld [4]                  /* offsetof(struct seccomp_data, arch) */
@@@ -613,7 -600,7 +613,7 @@@ Some core changes of the new internal f
  
    Therefore, BPF calling convention is defined as:
  
 -    * R0      - return value from in-kernel function
 +    * R0      - return value from in-kernel function, and exit value for BPF program
      * R1 - R5 - arguments from BPF program to in-kernel function
      * R6 - R9 - callee saved registers that in-kernel function will preserve
      * R10     - read-only frame pointer to access stack
  - Introduces bpf_call insn and register passing convention for zero overhead
    calls from/to other kernel functions:
  
 -  After a kernel function call, R1 - R5 are reset to unreadable and R0 has a
 -  return type of the function. Since R6 - R9 are callee saved, their state is
 -  preserved across the call.
 +  Before an in-kernel function call, the internal BPF program needs to
 +  place function arguments into R1 to R5 registers to satisfy calling
 +  convention, then the interpreter will take them from registers and pass
 +  to in-kernel function. If R1 - R5 registers are mapped to CPU registers
 +  that are used for argument passing on given architecture, the JIT compiler
 +  doesn't need to emit extra moves. Function arguments will be in the correct
 +  registers and BPF_CALL instruction will be JITed as single 'call' HW
 +  instruction. This calling convention was picked to cover common call
 +  situations without performance penalty.
 +
 +  After an in-kernel function call, R1 - R5 are reset to unreadable and R0 has
 +  a return value of the function. Since R6 - R9 are callee saved, their state
 +  is preserved across the call.
 +
 +  For example, consider three C functions:
 +
 +  u64 f1() { return (*_f2)(1); }
 +  u64 f2(u64 a) { return f3(a + 1, a); }
 +  u64 f3(u64 a, u64 b) { return a - b; }
 +
 +  GCC can compile f1, f3 into x86_64:
 +
 +  f1:
 +    movl $1, %edi
 +    movq _f2(%rip), %rax
 +    jmp  *%rax
 +  f3:
 +    movq %rdi, %rax
 +    subq %rsi, %rax
 +    ret
 +
 +  Function f2 in BPF may look like:
 +
 +  f2:
 +    bpf_mov R2, R1
 +    bpf_add R1, 1
 +    bpf_call f3
 +    bpf_exit
 +
 +  If f2 is JITed and the pointer stored to '_f2'. The calls f1 -> f2 -> f3 and
 +  returns will be seamless. Without JIT, __sk_run_filter() interpreter needs to
 +  be used to call into f2.
 +
 +  For practical reasons all BPF programs have only one argument 'ctx' which is
 +  already placed into R1 (e.g. on __sk_run_filter() startup) and the programs
 +  can call kernel functions with up to 5 arguments. Calls with 6 or more arguments
 +  are currently not supported, but these restrictions can be lifted if necessary
 +  in the future.
 +
 +  On 64-bit architectures all register map to HW registers one to one. For
 +  example, x86_64 JIT compiler can map them as ...
 +
 +    R0 - rax
 +    R1 - rdi
 +    R2 - rsi
 +    R3 - rdx
 +    R4 - rcx
 +    R5 - r8
 +    R6 - rbx
 +    R7 - r13
 +    R8 - r14
 +    R9 - r15
 +    R10 - rbp
 +
 +  ... since x86_64 ABI mandates rdi, rsi, rdx, rcx, r8, r9 for argument passing
 +  and rbx, r12 - r15 are callee saved.
 +
 +  Then the following internal BPF pseudo-program:
 +
 +    bpf_mov R6, R1 /* save ctx */
 +    bpf_mov R2, 2
 +    bpf_mov R3, 3
 +    bpf_mov R4, 4
 +    bpf_mov R5, 5
 +    bpf_call foo
 +    bpf_mov R7, R0 /* save foo() return value */
 +    bpf_mov R1, R6 /* restore ctx for next call */
 +    bpf_mov R2, 6
 +    bpf_mov R3, 7
 +    bpf_mov R4, 8
 +    bpf_mov R5, 9
 +    bpf_call bar
 +    bpf_add R0, R7
 +    bpf_exit
 +
 +  After JIT to x86_64 may look like:
 +
 +    push %rbp
 +    mov %rsp,%rbp
 +    sub $0x228,%rsp
 +    mov %rbx,-0x228(%rbp)
 +    mov %r13,-0x220(%rbp)
 +    mov %rdi,%rbx
 +    mov $0x2,%esi
 +    mov $0x3,%edx
 +    mov $0x4,%ecx
 +    mov $0x5,%r8d
 +    callq foo
 +    mov %rax,%r13
 +    mov %rbx,%rdi
 +    mov $0x2,%esi
 +    mov $0x3,%edx
 +    mov $0x4,%ecx
 +    mov $0x5,%r8d
 +    callq bar
 +    add %r13,%rax
 +    mov -0x228(%rbp),%rbx
 +    mov -0x220(%rbp),%r13
 +    leaveq
 +    retq
 +
 +  Which is in this example equivalent in C to:
 +
 +    u64 bpf_filter(u64 ctx)
 +    {
 +        return foo(ctx, 2, 3, 4, 5) + bar(ctx, 6, 7, 8, 9);
 +    }
 +
 +  In-kernel functions foo() and bar() with prototype: u64 (*)(u64 arg1, u64
 +  arg2, u64 arg3, u64 arg4, u64 arg5); will receive arguments in proper
 +  registers and place their return value into '%rax' which is R0 in BPF.
 +  Prologue and epilogue are emitted by JIT and are implicit in the
 +  interpreter. R0-R5 are scratch registers, so BPF program needs to preserve
 +  them across the calls as defined by calling convention.
 +
 +  For example the following program is invalid:
 +
 +    bpf_mov R1, 1
 +    bpf_call foo
 +    bpf_mov R0, R1
 +    bpf_exit
 +
 +  After the call the registers R1-R5 contain junk values and cannot be read.
 +  In the future a BPF verifier can be used to validate internal BPF programs.
  
  Also in the new design, BPF is limited to 4096 insns, which means that any
  program will terminate quickly and will only call a fixed number of kernel
@@@ -807,25 -663,6 +807,25 @@@ A program, that is translated internall
  
    op:16, jt:8, jf:8, k:32    ==>    op:8, a_reg:4, x_reg:4, off:16, imm:32
  
 +So far 87 internal BPF instructions were implemented. 8-bit 'op' opcode field
 +has room for new instructions. Some of them may use 16/24/32 byte encoding. New
 +instructions must be multiple of 8 bytes to preserve backward compatibility.
 +
 +Internal BPF is a general purpose RISC instruction set. Not every register and
 +every instruction are used during translation from original BPF to new format.
 +For example, socket filters are not using 'exclusive add' instruction, but
 +tracing filters may do to maintain counters of events, for example. Register R9
 +is not used by socket filters either, but more complex filters may be running
 +out of registers and would have to resort to spill/fill to stack.
 +
 +Internal BPF can used as generic assembler for last step performance
 +optimizations, socket filters and seccomp are using it as assembler. Tracing
 +filters may use it as assembler to generate code from kernel. In kernel usage
 +may not be bounded by security considerations, since generated internal BPF code
 +may be optimizing internal code path and not being exposed to the user space.
 +Safety of internal BPF can come from a verifier (TBD). In such use cases as
 +described, it may be used as safe instruction set.
 +
  Just like the original BPF, the new format runs within a controlled environment,
  is deterministic and the kernel can easily prove that. The safety of the program
  can be determined in two steps: first step does depth-first-search to disallow
@@@ -833,20 -670,6 +833,20 @@@ loops and other CFG validation; second 
  descends all possible paths. It simulates execution of every insn and observes
  the state change of registers and stack.
  
 +Testing
 +-------
 +
 +Next to the BPF toolchain, the kernel also ships a test module that contains
 +various test cases for classic and internal BPF that can be executed against
 +the BPF interpreter and JIT compiler. It can be found in lib/test_bpf.c and
 +enabled via Kconfig:
 +
 +  CONFIG_TEST_BPF=m
 +
 +After the module has been built and installed, the test suite can be executed
 +via insmod or modprobe against 'test_bpf' module. Results of the test cases
 +including timings in nsec can be found in the kernel log (dmesg).
 +
  Misc
  ----
  
diff --combined MAINTAINERS
@@@ -537,7 -537,7 +537,7 @@@ L: linux-alpha@vger.kernel.or
  F:    arch/alpha/
  
  ALTERA TRIPLE SPEED ETHERNET DRIVER
- M:    Vince Bridgers <vbridgers2013@gmail.com
+ M:    Vince Bridgers <vbridgers2013@gmail.com>
  L:    netdev@vger.kernel.org
  L:    nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
  S:    Maintained
@@@ -1893,14 -1893,15 +1893,15 @@@ L:   netdev@vger.kernel.or
  S:    Supported
  F:    drivers/net/ethernet/broadcom/bnx2x/
  
- BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE
+ BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE
  M:    Christian Daudt <bcm@fixthebug.org>
  M:    Matt Porter <mporter@linaro.org>
  L:    bcm-kernel-feedback-list@broadcom.com
- T:    git git://git.github.com/broadcom/bcm11351
+ T:    git git://github.com/broadcom/mach-bcm
  S:    Maintained
  F:    arch/arm/mach-bcm/
  F:    arch/arm/boot/dts/bcm113*
+ F:    arch/arm/boot/dts/bcm216*
  F:    arch/arm/boot/dts/bcm281*
  F:    arch/arm/configs/bcm_defconfig
  F:    drivers/mmc/host/sdhci_bcm_kona.c
@@@ -1967,12 -1968,6 +1968,12 @@@ S:    Maintaine
  F:    drivers/bcma/
  F:    include/linux/bcma/
  
 +BROADCOM SYSTEMPORT ETHERNET DRIVER
 +M:    Florian Fainelli <f.fainelli@gmail.com>
 +L:    netdev@vger.kernel.org
 +S:    Supported
 +F:    drivers/net/ethernet/broadcom/bcmsysport.*
 +
  BROCADE BFA FC SCSI DRIVER
  M:    Anil Gurumurthy <anil.gurumurthy@qlogic.com>
  M:    Sudarsana Kalluru <sudarsana.kalluru@qlogic.com>
@@@ -2228,8 -2223,9 +2229,8 @@@ F:      drivers/platform/chrome
  CISCO VIC ETHERNET NIC DRIVER
  M:    Christian Benvenuti <benve@cisco.com>
  M:    Sujith Sankar <ssujith@cisco.com>
 -M:    Govindarajulu Varadarajan <govindarajulu90@gmail.com>
 +M:    Govindarajulu Varadarajan <_govind@gmx.com>
  M:    Neel Patel <neepatel@cisco.com>
 -M:    Nishank Trivedi <nistrive@cisco.com>
  S:    Supported
  F:    drivers/net/ethernet/cisco/enic/
  
@@@ -2250,12 -2246,6 +2251,6 @@@ L:     linux-usb@vger.kernel.or
  S:    Maintained
  F:    drivers/usb/host/ohci-ep93xx.c
  
- CIRRUS LOGIC CS4270 SOUND DRIVER
- M:    Timur Tabi <timur@tabi.org>
- L:    alsa-devel@alsa-project.org (moderated for non-subscribers)
- S:    Odd Fixes
- F:    sound/soc/codecs/cs4270*
  CIRRUS LOGIC AUDIO CODEC DRIVERS
  M:    Brian Austin <brian.austin@cirrus.com>
  M:    Paul Handrigan <Paul.Handrigan@cirrus.com>
@@@ -4823,6 -4813,14 +4818,14 @@@ L:    linux-kernel@vger.kernel.or
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
  F:    kernel/irq/
+ IRQCHIP DRIVERS
+ M:    Thomas Gleixner <tglx@linutronix.de>
+ M:    Jason Cooper <jason@lakedaemon.net>
+ L:    linux-kernel@vger.kernel.org
+ S:    Maintained
+ T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
+ T:    git git://git.infradead.org/users/jcooper/linux.git irqchip/core
  F:    drivers/irqchip/
  
  IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
@@@ -5495,15 -5493,15 +5498,15 @@@ F:   Documentation/hwmon/ltc426
  F:    drivers/hwmon/ltc4261.c
  
  LTP (Linux Test Project)
- M:    Shubham Goyal <shubham@linux.vnet.ibm.com>
  M:    Mike Frysinger <vapier@gentoo.org>
  M:    Cyril Hrubis <chrubis@suse.cz>
- M:    Caspar Zhang <caspar@casparzhang.com>
  M:    Wanlong Gao <gaowanlong@cn.fujitsu.com>
+ M:    Jan Stancek <jstancek@redhat.com>
+ M:    Stanislav Kholmanskikh <stanislav.kholmanskikh@oracle.com>
+ M:    Alexey Kodanev <alexey.kodanev@oracle.com>
  L:    ltp-list@lists.sourceforge.net (subscribers-only)
- W:    http://ltp.sourceforge.net/
+ W:    http://linux-test-project.github.io/
  T:    git git://github.com/linux-test-project/ltp.git
- T:    git git://ltp.git.sourceforge.net/gitroot/ltp/ltp-dev
  S:    Maintained
  
  M32R ARCHITECTURE
@@@ -6516,10 -6514,10 +6519,10 @@@ T:   git git://openrisc.net/~jonas/linu
  F:    arch/openrisc/
  
  OPENVSWITCH
- M:    Jesse Gross <jesse@nicira.com>
+ M:    Pravin Shelar <pshelar@nicira.com>
  L:    dev@openvswitch.org
  W:    http://openvswitch.org
- T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jesse/openvswitch.git
+ T:    git git://git.kernel.org/pub/scm/linux/kernel/git/pshelar/openvswitch.git
  S:    Maintained
  F:    net/openvswitch/
  
@@@ -9112,6 -9110,9 +9115,9 @@@ F:      arch/um/os-Linux/drivers
  
  TURBOCHANNEL SUBSYSTEM
  M:    "Maciej W. Rozycki" <macro@linux-mips.org>
+ M:    Ralf Baechle <ralf@linux-mips.org>
+ L:    linux-mips@linux-mips.org
+ Q:    http://patchwork.linux-mips.org/project/linux-mips/list/
  S:    Maintained
  F:    drivers/tc/
  F:    include/linux/tc.h
@@@ -9965,7 -9966,7 +9971,7 @@@ F:      drivers/net/hamradio/*scc.
  F:    drivers/net/hamradio/z8530.h
  
  ZBUD COMPRESSED PAGE ALLOCATOR
- M:    Seth Jennings <sjenning@linux.vnet.ibm.com>
+ M:    Seth Jennings <sjennings@variantweb.net>
  L:    linux-mm@kvack.org
  S:    Maintained
  F:    mm/zbud.c
@@@ -10010,7 -10011,7 +10016,7 @@@ F:   mm/zsmalloc.
  F:    include/linux/zsmalloc.h
  
  ZSWAP COMPRESSED SWAP CACHING
- M:    Seth Jennings <sjenning@linux.vnet.ibm.com>
+ M:    Seth Jennings <sjennings@variantweb.net>
  L:    linux-mm@kvack.org
  S:    Maintained
  F:    mm/zswap.c
                        compatible = "ti,edma3";
                        ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2";
                        reg =   <0x49000000 0x10000>,
-                               <0x44e10f90 0x10>;
+                               <0x44e10f90 0x40>;
                        interrupts = <12 13 14>;
                        #dma-cells = <1>;
                        dma-channels = <64>;
                mac: ethernet@4a100000 {
                        compatible = "ti,cpsw";
                        ti,hwmods = "cpgmac0";
 +                      clocks = <&cpsw_125mhz_gclk>, <&cpsw_cpts_rft_clk>;
 +                      clock-names = "fck", "cpts";
                        cpdma_channels = <8>;
                        ale_entries = <1024>;
                        bd_ram_size = <0x2000>;
@@@ -1,7 -1,6 +1,7 @@@
  /* bpf_jit_comp.c : BPF JIT compiler
   *
   * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
 + * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   *
   * This program is free software; you can redistribute it and/or
   * modify it under the terms of the GNU General Public License
  #include <linux/if_vlan.h>
  #include <linux/random.h>
  
 -/*
 - * Conventions :
 - *  EAX : BPF A accumulator
 - *  EBX : BPF X accumulator
 - *  RDI : pointer to skb   (first argument given to JIT function)
 - *  RBP : frame pointer (even if CONFIG_FRAME_POINTER=n)
 - *  ECX,EDX,ESI : scratch registers
 - *  r9d : skb->len - skb->data_len (headlen)
 - *  r8  : skb->data
 - * -8(RBP) : saved RBX value
 - * -16(RBP)..-80(RBP) : BPF_MEMWORDS values
 - */
  int bpf_jit_enable __read_mostly;
  
  /*
   * assembly code in arch/x86/net/bpf_jit.S
   */
 -extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
 +extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
  extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
 -extern u8 sk_load_byte_positive_offset[], sk_load_byte_msh_positive_offset[];
 +extern u8 sk_load_byte_positive_offset[];
  extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
 -extern u8 sk_load_byte_negative_offset[], sk_load_byte_msh_negative_offset[];
 +extern u8 sk_load_byte_negative_offset[];
  
  static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
  {
  #define EMIT2(b1, b2)         EMIT((b1) + ((b2) << 8), 2)
  #define EMIT3(b1, b2, b3)     EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
  #define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
 -#define EMIT1_off32(b1, off)  do { EMIT1(b1); EMIT(off, 4);} while (0)
 -
 -#define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
 -#define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
 +#define EMIT1_off32(b1, off) \
 +      do {EMIT1(b1); EMIT(off, 4); } while (0)
 +#define EMIT2_off32(b1, b2, off) \
 +      do {EMIT2(b1, b2); EMIT(off, 4); } while (0)
 +#define EMIT3_off32(b1, b2, b3, off) \
 +      do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
 +#define EMIT4_off32(b1, b2, b3, b4, off) \
 +      do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
  
  static inline bool is_imm8(int value)
  {
        return value <= 127 && value >= -128;
  }
  
 -static inline bool is_near(int offset)
 +static inline bool is_simm32(s64 value)
  {
 -      return offset <= 127 && offset >= -128;
 +      return value == (s64) (s32) value;
  }
  
 -#define EMIT_JMP(offset)                                              \
 -do {                                                                  \
 -      if (offset) {                                                   \
 -              if (is_near(offset))                                    \
 -                      EMIT2(0xeb, offset); /* jmp .+off8 */           \
 -              else                                                    \
 -                      EMIT1_off32(0xe9, offset); /* jmp .+off32 */    \
 -      }                                                               \
 -} while (0)
 +/* mov A, X */
 +#define EMIT_mov(A, X) \
 +      do {if (A != X) \
 +              EMIT3(add_2mod(0x48, A, X), 0x89, add_2reg(0xC0, A, X)); \
 +      } while (0)
 +
 +static int bpf_size_to_x86_bytes(int bpf_size)
 +{
 +      if (bpf_size == BPF_W)
 +              return 4;
 +      else if (bpf_size == BPF_H)
 +              return 2;
 +      else if (bpf_size == BPF_B)
 +              return 1;
 +      else if (bpf_size == BPF_DW)
 +              return 4; /* imm32 */
 +      else
 +              return 0;
 +}
  
  /* list of x86 cond jumps opcodes (. + s8)
   * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
  #define X86_JNE 0x75
  #define X86_JBE 0x76
  #define X86_JA  0x77
 -
 -#define EMIT_COND_JMP(op, offset)                             \
 -do {                                                          \
 -      if (is_near(offset))                                    \
 -              EMIT2(op, offset); /* jxx .+off8 */             \
 -      else {                                                  \
 -              EMIT2(0x0f, op + 0x10);                         \
 -              EMIT(offset, 4); /* jxx .+off32 */              \
 -      }                                                       \
 -} while (0)
 -
 -#define COND_SEL(CODE, TOP, FOP)      \
 -      case CODE:                      \
 -              t_op = TOP;             \
 -              f_op = FOP;             \
 -              goto cond_branch
 -
 -
 -#define SEEN_DATAREF 1 /* might call external helpers */
 -#define SEEN_XREG    2 /* ebx is used */
 -#define SEEN_MEM     4 /* use mem[] for temporary storage */
 +#define X86_JGE 0x7D
 +#define X86_JG  0x7F
  
  static inline void bpf_flush_icache(void *start, void *end)
  {
  #define CHOOSE_LOAD_FUNC(K, func) \
        ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
  
 -/* Helper to find the offset of pkt_type in sk_buff
 - * We want to make sure its still a 3bit field starting at a byte boundary.
 - */
 -#define PKT_TYPE_MAX 7
 -static int pkt_type_offset(void)
 -{
 -      struct sk_buff skb_probe = {
 -              .pkt_type = ~0,
 -      };
 -      char *ct = (char *)&skb_probe;
 -      unsigned int off;
 -
 -      for (off = 0; off < sizeof(struct sk_buff); off++) {
 -              if (ct[off] == PKT_TYPE_MAX)
 -                      return off;
 -      }
 -      pr_err_once("Please fix pkt_type_offset(), as pkt_type couldn't be found\n");
 -      return -1;
 -}
 -
  struct bpf_binary_header {
        unsigned int    pages;
        /* Note : for security reasons, bpf code will follow a randomly
@@@ -135,778 -171,590 +135,778 @@@ static struct bpf_binary_header *bpf_al
        memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
  
        header->pages = sz / PAGE_SIZE;
-       hole = sz - (proglen + sizeof(*header));
+       hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header));
  
        /* insert a random number of int3 instructions before BPF code */
        *image_ptr = &header->image[prandom_u32() % hole];
        return header;
  }
  
 -void bpf_jit_compile(struct sk_filter *fp)
 +/* pick a register outside of BPF range for JIT internal work */
 +#define AUX_REG (MAX_BPF_REG + 1)
 +
 +/* the following table maps BPF registers to x64 registers.
 + * x64 register r12 is unused, since if used as base address register
 + * in load/store instructions, it always needs an extra byte of encoding
 + */
 +static const int reg2hex[] = {
 +      [BPF_REG_0] = 0,  /* rax */
 +      [BPF_REG_1] = 7,  /* rdi */
 +      [BPF_REG_2] = 6,  /* rsi */
 +      [BPF_REG_3] = 2,  /* rdx */
 +      [BPF_REG_4] = 1,  /* rcx */
 +      [BPF_REG_5] = 0,  /* r8 */
 +      [BPF_REG_6] = 3,  /* rbx callee saved */
 +      [BPF_REG_7] = 5,  /* r13 callee saved */
 +      [BPF_REG_8] = 6,  /* r14 callee saved */
 +      [BPF_REG_9] = 7,  /* r15 callee saved */
 +      [BPF_REG_FP] = 5, /* rbp readonly */
 +      [AUX_REG] = 3,    /* r11 temp register */
 +};
 +
 +/* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15
 + * which need extra byte of encoding.
 + * rax,rcx,...,rbp have simpler encoding
 + */
 +static inline bool is_ereg(u32 reg)
  {
 -      u8 temp[64];
 -      u8 *prog;
 -      unsigned int proglen, oldproglen = 0;
 -      int ilen, i;
 -      int t_offset, f_offset;
 -      u8 t_op, f_op, seen = 0, pass;
 -      u8 *image = NULL;
 -      struct bpf_binary_header *header = NULL;
 -      u8 *func;
 -      int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */
 -      unsigned int cleanup_addr; /* epilogue code offset */
 -      unsigned int *addrs;
 -      const struct sock_filter *filter = fp->insns;
 -      int flen = fp->len;
 +      if (reg == BPF_REG_5 || reg == AUX_REG ||
 +          (reg >= BPF_REG_7 && reg <= BPF_REG_9))
 +              return true;
 +      else
 +              return false;
 +}
  
 -      if (!bpf_jit_enable)
 -              return;
 +/* add modifiers if 'reg' maps to x64 registers r8..r15 */
 +static inline u8 add_1mod(u8 byte, u32 reg)
 +{
 +      if (is_ereg(reg))
 +              byte |= 1;
 +      return byte;
 +}
  
 -      addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL);
 -      if (addrs == NULL)
 -              return;
 +static inline u8 add_2mod(u8 byte, u32 r1, u32 r2)
 +{
 +      if (is_ereg(r1))
 +              byte |= 1;
 +      if (is_ereg(r2))
 +              byte |= 4;
 +      return byte;
 +}
  
 -      /* Before first pass, make a rough estimation of addrs[]
 -       * each bpf instruction is translated to less than 64 bytes
 +/* encode dest register 'a_reg' into x64 opcode 'byte' */
 +static inline u8 add_1reg(u8 byte, u32 a_reg)
 +{
 +      return byte + reg2hex[a_reg];
 +}
 +
 +/* encode dest 'a_reg' and src 'x_reg' registers into x64 opcode 'byte' */
 +static inline u8 add_2reg(u8 byte, u32 a_reg, u32 x_reg)
 +{
 +      return byte + reg2hex[a_reg] + (reg2hex[x_reg] << 3);
 +}
 +
 +struct jit_context {
 +      unsigned int cleanup_addr; /* epilogue code offset */
 +      bool seen_ld_abs;
 +};
 +
 +static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image,
 +                int oldproglen, struct jit_context *ctx)
 +{
 +      struct sock_filter_int *insn = bpf_prog->insnsi;
 +      int insn_cnt = bpf_prog->len;
 +      u8 temp[64];
 +      int i;
 +      int proglen = 0;
 +      u8 *prog = temp;
 +      int stacksize = MAX_BPF_STACK +
 +              32 /* space for rbx, r13, r14, r15 */ +
 +              8 /* space for skb_copy_bits() buffer */;
 +
 +      EMIT1(0x55); /* push rbp */
 +      EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */
 +
 +      /* sub rsp, stacksize */
 +      EMIT3_off32(0x48, 0x81, 0xEC, stacksize);
 +
 +      /* all classic BPF filters use R6(rbx) save it */
 +
 +      /* mov qword ptr [rbp-X],rbx */
 +      EMIT3_off32(0x48, 0x89, 0x9D, -stacksize);
 +
 +      /* sk_convert_filter() maps classic BPF register X to R7 and uses R8
 +       * as temporary, so all tcpdump filters need to spill/fill R7(r13) and
 +       * R8(r14). R9(r15) spill could be made conditional, but there is only
 +       * one 'bpf_error' return path out of helper functions inside bpf_jit.S
 +       * The overhead of extra spill is negligible for any filter other
 +       * than synthetic ones. Therefore not worth adding complexity.
         */
 -      for (proglen = 0, i = 0; i < flen; i++) {
 -              proglen += 64;
 -              addrs[i] = proglen;
 +
 +      /* mov qword ptr [rbp-X],r13 */
 +      EMIT3_off32(0x4C, 0x89, 0xAD, -stacksize + 8);
 +      /* mov qword ptr [rbp-X],r14 */
 +      EMIT3_off32(0x4C, 0x89, 0xB5, -stacksize + 16);
 +      /* mov qword ptr [rbp-X],r15 */
 +      EMIT3_off32(0x4C, 0x89, 0xBD, -stacksize + 24);
 +
 +      /* clear A and X registers */
 +      EMIT2(0x31, 0xc0); /* xor eax, eax */
 +      EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */
 +
 +      if (ctx->seen_ld_abs) {
 +              /* r9d : skb->len - skb->data_len (headlen)
 +               * r10 : skb->data
 +               */
 +              if (is_imm8(offsetof(struct sk_buff, len)))
 +                      /* mov %r9d, off8(%rdi) */
 +                      EMIT4(0x44, 0x8b, 0x4f,
 +                            offsetof(struct sk_buff, len));
 +              else
 +                      /* mov %r9d, off32(%rdi) */
 +                      EMIT3_off32(0x44, 0x8b, 0x8f,
 +                                  offsetof(struct sk_buff, len));
 +
 +              if (is_imm8(offsetof(struct sk_buff, data_len)))
 +                      /* sub %r9d, off8(%rdi) */
 +                      EMIT4(0x44, 0x2b, 0x4f,
 +                            offsetof(struct sk_buff, data_len));
 +              else
 +                      EMIT3_off32(0x44, 0x2b, 0x8f,
 +                                  offsetof(struct sk_buff, data_len));
 +
 +              if (is_imm8(offsetof(struct sk_buff, data)))
 +                      /* mov %r10, off8(%rdi) */
 +                      EMIT4(0x4c, 0x8b, 0x57,
 +                            offsetof(struct sk_buff, data));
 +              else
 +                      /* mov %r10, off32(%rdi) */
 +                      EMIT3_off32(0x4c, 0x8b, 0x97,
 +                                  offsetof(struct sk_buff, data));
        }
 -      cleanup_addr = proglen; /* epilogue address */
  
 -      for (pass = 0; pass < 10; pass++) {
 -              u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
 -              /* no prologue/epilogue for trivial filters (RET something) */
 -              proglen = 0;
 -              prog = temp;
 +      for (i = 0; i < insn_cnt; i++, insn++) {
 +              const s32 K = insn->imm;
 +              u32 a_reg = insn->a_reg;
 +              u32 x_reg = insn->x_reg;
 +              u8 b1 = 0, b2 = 0, b3 = 0;
 +              s64 jmp_offset;
 +              u8 jmp_cond;
 +              int ilen;
 +              u8 *func;
 +
 +              switch (insn->code) {
 +                      /* ALU */
 +              case BPF_ALU | BPF_ADD | BPF_X:
 +              case BPF_ALU | BPF_SUB | BPF_X:
 +              case BPF_ALU | BPF_AND | BPF_X:
 +              case BPF_ALU | BPF_OR | BPF_X:
 +              case BPF_ALU | BPF_XOR | BPF_X:
 +              case BPF_ALU64 | BPF_ADD | BPF_X:
 +              case BPF_ALU64 | BPF_SUB | BPF_X:
 +              case BPF_ALU64 | BPF_AND | BPF_X:
 +              case BPF_ALU64 | BPF_OR | BPF_X:
 +              case BPF_ALU64 | BPF_XOR | BPF_X:
 +                      switch (BPF_OP(insn->code)) {
 +                      case BPF_ADD: b2 = 0x01; break;
 +                      case BPF_SUB: b2 = 0x29; break;
 +                      case BPF_AND: b2 = 0x21; break;
 +                      case BPF_OR: b2 = 0x09; break;
 +                      case BPF_XOR: b2 = 0x31; break;
 +                      }
 +                      if (BPF_CLASS(insn->code) == BPF_ALU64)
 +                              EMIT1(add_2mod(0x48, a_reg, x_reg));
 +                      else if (is_ereg(a_reg) || is_ereg(x_reg))
 +                              EMIT1(add_2mod(0x40, a_reg, x_reg));
 +                      EMIT2(b2, add_2reg(0xC0, a_reg, x_reg));
 +                      break;
  
 -              if (seen_or_pass0) {
 -                      EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */
 -                      EMIT4(0x48, 0x83, 0xec, 96);    /* subq  $96,%rsp       */
 -                      /* note : must save %rbx in case bpf_error is hit */
 -                      if (seen_or_pass0 & (SEEN_XREG | SEEN_DATAREF))
 -                              EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */
 -                      if (seen_or_pass0 & SEEN_XREG)
 -                              CLEAR_X(); /* make sure we dont leek kernel memory */
 -
 -                      /*
 -                       * If this filter needs to access skb data,
 -                       * loads r9 and r8 with :
 -                       *  r9 = skb->len - skb->data_len
 -                       *  r8 = skb->data
 +                      /* mov A, X */
 +              case BPF_ALU64 | BPF_MOV | BPF_X:
 +                      EMIT_mov(a_reg, x_reg);
 +                      break;
 +
 +                      /* mov32 A, X */
 +              case BPF_ALU | BPF_MOV | BPF_X:
 +                      if (is_ereg(a_reg) || is_ereg(x_reg))
 +                              EMIT1(add_2mod(0x40, a_reg, x_reg));
 +                      EMIT2(0x89, add_2reg(0xC0, a_reg, x_reg));
 +                      break;
 +
 +                      /* neg A */
 +              case BPF_ALU | BPF_NEG:
 +              case BPF_ALU64 | BPF_NEG:
 +                      if (BPF_CLASS(insn->code) == BPF_ALU64)
 +                              EMIT1(add_1mod(0x48, a_reg));
 +                      else if (is_ereg(a_reg))
 +                              EMIT1(add_1mod(0x40, a_reg));
 +                      EMIT2(0xF7, add_1reg(0xD8, a_reg));
 +                      break;
 +
 +              case BPF_ALU | BPF_ADD | BPF_K:
 +              case BPF_ALU | BPF_SUB | BPF_K:
 +              case BPF_ALU | BPF_AND | BPF_K:
 +              case BPF_ALU | BPF_OR | BPF_K:
 +              case BPF_ALU | BPF_XOR | BPF_K:
 +              case BPF_ALU64 | BPF_ADD | BPF_K:
 +              case BPF_ALU64 | BPF_SUB | BPF_K:
 +              case BPF_ALU64 | BPF_AND | BPF_K:
 +              case BPF_ALU64 | BPF_OR | BPF_K:
 +              case BPF_ALU64 | BPF_XOR | BPF_K:
 +                      if (BPF_CLASS(insn->code) == BPF_ALU64)
 +                              EMIT1(add_1mod(0x48, a_reg));
 +                      else if (is_ereg(a_reg))
 +                              EMIT1(add_1mod(0x40, a_reg));
 +
 +                      switch (BPF_OP(insn->code)) {
 +                      case BPF_ADD: b3 = 0xC0; break;
 +                      case BPF_SUB: b3 = 0xE8; break;
 +                      case BPF_AND: b3 = 0xE0; break;
 +                      case BPF_OR: b3 = 0xC8; break;
 +                      case BPF_XOR: b3 = 0xF0; break;
 +                      }
 +
 +                      if (is_imm8(K))
 +                              EMIT3(0x83, add_1reg(b3, a_reg), K);
 +                      else
 +                              EMIT2_off32(0x81, add_1reg(b3, a_reg), K);
 +                      break;
 +
 +              case BPF_ALU64 | BPF_MOV | BPF_K:
 +                      /* optimization: if imm32 is positive,
 +                       * use 'mov eax, imm32' (which zero-extends imm32)
 +                       * to save 2 bytes
                         */
 -                      if (seen_or_pass0 & SEEN_DATAREF) {
 -                              if (offsetof(struct sk_buff, len) <= 127)
 -                                      /* mov    off8(%rdi),%r9d */
 -                                      EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len));
 -                              else {
 -                                      /* mov    off32(%rdi),%r9d */
 -                                      EMIT3(0x44, 0x8b, 0x8f);
 -                                      EMIT(offsetof(struct sk_buff, len), 4);
 -                              }
 -                              if (is_imm8(offsetof(struct sk_buff, data_len)))
 -                                      /* sub    off8(%rdi),%r9d */
 -                                      EMIT4(0x44, 0x2b, 0x4f, offsetof(struct sk_buff, data_len));
 -                              else {
 -                                      EMIT3(0x44, 0x2b, 0x8f);
 -                                      EMIT(offsetof(struct sk_buff, data_len), 4);
 -                              }
 +                      if (K < 0) {
 +                              /* 'mov rax, imm32' sign extends imm32 */
 +                              b1 = add_1mod(0x48, a_reg);
 +                              b2 = 0xC7;
 +                              b3 = 0xC0;
 +                              EMIT3_off32(b1, b2, add_1reg(b3, a_reg), K);
 +                              break;
 +                      }
  
 -                              if (is_imm8(offsetof(struct sk_buff, data)))
 -                                      /* mov off8(%rdi),%r8 */
 -                                      EMIT4(0x4c, 0x8b, 0x47, offsetof(struct sk_buff, data));
 -                              else {
 -                                      /* mov off32(%rdi),%r8 */
 -                                      EMIT3(0x4c, 0x8b, 0x87);
 -                                      EMIT(offsetof(struct sk_buff, data), 4);
 -                              }
 +              case BPF_ALU | BPF_MOV | BPF_K:
 +                      /* mov %eax, imm32 */
 +                      if (is_ereg(a_reg))
 +                              EMIT1(add_1mod(0x40, a_reg));
 +                      EMIT1_off32(add_1reg(0xB8, a_reg), K);
 +                      break;
 +
 +                      /* A %= X, A /= X, A %= K, A /= K */
 +              case BPF_ALU | BPF_MOD | BPF_X:
 +              case BPF_ALU | BPF_DIV | BPF_X:
 +              case BPF_ALU | BPF_MOD | BPF_K:
 +              case BPF_ALU | BPF_DIV | BPF_K:
 +              case BPF_ALU64 | BPF_MOD | BPF_X:
 +              case BPF_ALU64 | BPF_DIV | BPF_X:
 +              case BPF_ALU64 | BPF_MOD | BPF_K:
 +              case BPF_ALU64 | BPF_DIV | BPF_K:
 +                      EMIT1(0x50); /* push rax */
 +                      EMIT1(0x52); /* push rdx */
 +
 +                      if (BPF_SRC(insn->code) == BPF_X)
 +                              /* mov r11, X */
 +                              EMIT_mov(AUX_REG, x_reg);
 +                      else
 +                              /* mov r11, K */
 +                              EMIT3_off32(0x49, 0xC7, 0xC3, K);
 +
 +                      /* mov rax, A */
 +                      EMIT_mov(BPF_REG_0, a_reg);
 +
 +                      /* xor edx, edx
 +                       * equivalent to 'xor rdx, rdx', but one byte less
 +                       */
 +                      EMIT2(0x31, 0xd2);
 +
 +                      if (BPF_SRC(insn->code) == BPF_X) {
 +                              /* if (X == 0) return 0 */
 +
 +                              /* cmp r11, 0 */
 +                              EMIT4(0x49, 0x83, 0xFB, 0x00);
 +
 +                              /* jne .+9 (skip over pop, pop, xor and jmp) */
 +                              EMIT2(X86_JNE, 1 + 1 + 2 + 5);
 +                              EMIT1(0x5A); /* pop rdx */
 +                              EMIT1(0x58); /* pop rax */
 +                              EMIT2(0x31, 0xc0); /* xor eax, eax */
 +
 +                              /* jmp cleanup_addr
 +                               * addrs[i] - 11, because there are 11 bytes
 +                               * after this insn: div, mov, pop, pop, mov
 +                               */
 +                              jmp_offset = ctx->cleanup_addr - (addrs[i] - 11);
 +                              EMIT1_off32(0xE9, jmp_offset);
                        }
 -              }
  
 -              switch (filter[0].code) {
 -              case BPF_S_RET_K:
 -              case BPF_S_LD_W_LEN:
 -              case BPF_S_ANC_PROTOCOL:
 -              case BPF_S_ANC_IFINDEX:
 -              case BPF_S_ANC_MARK:
 -              case BPF_S_ANC_RXHASH:
 -              case BPF_S_ANC_CPU:
 -              case BPF_S_ANC_VLAN_TAG:
 -              case BPF_S_ANC_VLAN_TAG_PRESENT:
 -              case BPF_S_ANC_QUEUE:
 -              case BPF_S_ANC_PKTTYPE:
 -              case BPF_S_LD_W_ABS:
 -              case BPF_S_LD_H_ABS:
 -              case BPF_S_LD_B_ABS:
 -                      /* first instruction sets A register (or is RET 'constant') */
 +                      if (BPF_CLASS(insn->code) == BPF_ALU64)
 +                              /* div r11 */
 +                              EMIT3(0x49, 0xF7, 0xF3);
 +                      else
 +                              /* div r11d */
 +                              EMIT3(0x41, 0xF7, 0xF3);
 +
 +                      if (BPF_OP(insn->code) == BPF_MOD)
 +                              /* mov r11, rdx */
 +                              EMIT3(0x49, 0x89, 0xD3);
 +                      else
 +                              /* mov r11, rax */
 +                              EMIT3(0x49, 0x89, 0xC3);
 +
 +                      EMIT1(0x5A); /* pop rdx */
 +                      EMIT1(0x58); /* pop rax */
 +
 +                      /* mov A, r11 */
 +                      EMIT_mov(a_reg, AUX_REG);
                        break;
 -              default:
 -                      /* make sure we dont leak kernel information to user */
 -                      CLEAR_A(); /* A = 0 */
 -              }
  
 -              for (i = 0; i < flen; i++) {
 -                      unsigned int K = filter[i].k;
 +              case BPF_ALU | BPF_MUL | BPF_K:
 +              case BPF_ALU | BPF_MUL | BPF_X:
 +              case BPF_ALU64 | BPF_MUL | BPF_K:
 +              case BPF_ALU64 | BPF_MUL | BPF_X:
 +                      EMIT1(0x50); /* push rax */
 +                      EMIT1(0x52); /* push rdx */
 +
 +                      /* mov r11, A */
 +                      EMIT_mov(AUX_REG, a_reg);
 +
 +                      if (BPF_SRC(insn->code) == BPF_X)
 +                              /* mov rax, X */
 +                              EMIT_mov(BPF_REG_0, x_reg);
 +                      else
 +                              /* mov rax, K */
 +                              EMIT3_off32(0x48, 0xC7, 0xC0, K);
 +
 +                      if (BPF_CLASS(insn->code) == BPF_ALU64)
 +                              EMIT1(add_1mod(0x48, AUX_REG));
 +                      else if (is_ereg(AUX_REG))
 +                              EMIT1(add_1mod(0x40, AUX_REG));
 +                      /* mul(q) r11 */
 +                      EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
 +
 +                      /* mov r11, rax */
 +                      EMIT_mov(AUX_REG, BPF_REG_0);
 +
 +                      EMIT1(0x5A); /* pop rdx */
 +                      EMIT1(0x58); /* pop rax */
 +
 +                      /* mov A, r11 */
 +                      EMIT_mov(a_reg, AUX_REG);
 +                      break;
  
 -                      switch (filter[i].code) {
 -                      case BPF_S_ALU_ADD_X: /* A += X; */
 -                              seen |= SEEN_XREG;
 -                              EMIT2(0x01, 0xd8);              /* add %ebx,%eax */
 -                              break;
 -                      case BPF_S_ALU_ADD_K: /* A += K; */
 -                              if (!K)
 -                                      break;
 -                              if (is_imm8(K))
 -                                      EMIT3(0x83, 0xc0, K);   /* add imm8,%eax */
 -                              else
 -                                      EMIT1_off32(0x05, K);   /* add imm32,%eax */
 -                              break;
 -                      case BPF_S_ALU_SUB_X: /* A -= X; */
 -                              seen |= SEEN_XREG;
 -                              EMIT2(0x29, 0xd8);              /* sub    %ebx,%eax */
 -                              break;
 -                      case BPF_S_ALU_SUB_K: /* A -= K */
 -                              if (!K)
 -                                      break;
 -                              if (is_imm8(K))
 -                                      EMIT3(0x83, 0xe8, K); /* sub imm8,%eax */
 -                              else
 -                                      EMIT1_off32(0x2d, K); /* sub imm32,%eax */
 -                              break;
 -                      case BPF_S_ALU_MUL_X: /* A *= X; */
 -                              seen |= SEEN_XREG;
 -                              EMIT3(0x0f, 0xaf, 0xc3);        /* imul %ebx,%eax */
 -                              break;
 -                      case BPF_S_ALU_MUL_K: /* A *= K */
 -                              if (is_imm8(K))
 -                                      EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
 -                              else {
 -                                      EMIT2(0x69, 0xc0);              /* imul imm32,%eax */
 -                                      EMIT(K, 4);
 -                              }
 -                              break;
 -                      case BPF_S_ALU_DIV_X: /* A /= X; */
 -                              seen |= SEEN_XREG;
 -                              EMIT2(0x85, 0xdb);      /* test %ebx,%ebx */
 -                              if (pc_ret0 > 0) {
 -                                      /* addrs[pc_ret0 - 1] is start address of target
 -                                       * (addrs[i] - 4) is the address following this jmp
 -                                       * ("xor %edx,%edx; div %ebx" being 4 bytes long)
 -                                       */
 -                                      EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
 -                                                              (addrs[i] - 4));
 -                              } else {
 -                                      EMIT_COND_JMP(X86_JNE, 2 + 5);
 -                                      CLEAR_A();
 -                                      EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */
 -                              }
 -                              EMIT4(0x31, 0xd2, 0xf7, 0xf3); /* xor %edx,%edx; div %ebx */
 -                              break;
 -                      case BPF_S_ALU_MOD_X: /* A %= X; */
 -                              seen |= SEEN_XREG;
 -                              EMIT2(0x85, 0xdb);      /* test %ebx,%ebx */
 -                              if (pc_ret0 > 0) {
 -                                      /* addrs[pc_ret0 - 1] is start address of target
 -                                       * (addrs[i] - 6) is the address following this jmp
 -                                       * ("xor %edx,%edx; div %ebx;mov %edx,%eax" being 6 bytes long)
 -                                       */
 -                                      EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
 -                                                              (addrs[i] - 6));
 -                              } else {
 -                                      EMIT_COND_JMP(X86_JNE, 2 + 5);
 -                                      CLEAR_A();
 -                                      EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 6)); /* jmp .+off32 */
 -                              }
 -                              EMIT2(0x31, 0xd2);      /* xor %edx,%edx */
 -                              EMIT2(0xf7, 0xf3);      /* div %ebx */
 -                              EMIT2(0x89, 0xd0);      /* mov %edx,%eax */
 -                              break;
 -                      case BPF_S_ALU_MOD_K: /* A %= K; */
 -                              if (K == 1) {
 -                                      CLEAR_A();
 -                                      break;
 -                              }
 -                              EMIT2(0x31, 0xd2);      /* xor %edx,%edx */
 -                              EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
 -                              EMIT2(0xf7, 0xf1);      /* div %ecx */
 -                              EMIT2(0x89, 0xd0);      /* mov %edx,%eax */
 -                              break;
 -                      case BPF_S_ALU_DIV_K: /* A /= K */
 -                              if (K == 1)
 -                                      break;
 -                              EMIT2(0x31, 0xd2);      /* xor %edx,%edx */
 -                              EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
 -                              EMIT2(0xf7, 0xf1);      /* div %ecx */
 -                              break;
 -                      case BPF_S_ALU_AND_X:
 -                              seen |= SEEN_XREG;
 -                              EMIT2(0x21, 0xd8);              /* and %ebx,%eax */
 -                              break;
 -                      case BPF_S_ALU_AND_K:
 -                              if (K >= 0xFFFFFF00) {
 -                                      EMIT2(0x24, K & 0xFF); /* and imm8,%al */
 -                              } else if (K >= 0xFFFF0000) {
 -                                      EMIT2(0x66, 0x25);      /* and imm16,%ax */
 -                                      EMIT(K, 2);
 -                              } else {
 -                                      EMIT1_off32(0x25, K);   /* and imm32,%eax */
 -                              }
 -                              break;
 -                      case BPF_S_ALU_OR_X:
 -                              seen |= SEEN_XREG;
 -                              EMIT2(0x09, 0xd8);              /* or %ebx,%eax */
 -                              break;
 -                      case BPF_S_ALU_OR_K:
 -                              if (is_imm8(K))
 -                                      EMIT3(0x83, 0xc8, K); /* or imm8,%eax */
 -                              else
 -                                      EMIT1_off32(0x0d, K);   /* or imm32,%eax */
 -                              break;
 -                      case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
 -                      case BPF_S_ALU_XOR_X:
 -                              seen |= SEEN_XREG;
 -                              EMIT2(0x31, 0xd8);              /* xor %ebx,%eax */
 -                              break;
 -                      case BPF_S_ALU_XOR_K: /* A ^= K; */
 -                              if (K == 0)
 -                                      break;
 -                              if (is_imm8(K))
 -                                      EMIT3(0x83, 0xf0, K);   /* xor imm8,%eax */
 -                              else
 -                                      EMIT1_off32(0x35, K);   /* xor imm32,%eax */
 -                              break;
 -                      case BPF_S_ALU_LSH_X: /* A <<= X; */
 -                              seen |= SEEN_XREG;
 -                              EMIT4(0x89, 0xd9, 0xd3, 0xe0);  /* mov %ebx,%ecx; shl %cl,%eax */
 -                              break;
 -                      case BPF_S_ALU_LSH_K:
 -                              if (K == 0)
 -                                      break;
 -                              else if (K == 1)
 -                                      EMIT2(0xd1, 0xe0); /* shl %eax */
 -                              else
 -                                      EMIT3(0xc1, 0xe0, K);
 -                              break;
 -                      case BPF_S_ALU_RSH_X: /* A >>= X; */
 -                              seen |= SEEN_XREG;
 -                              EMIT4(0x89, 0xd9, 0xd3, 0xe8);  /* mov %ebx,%ecx; shr %cl,%eax */
 -                              break;
 -                      case BPF_S_ALU_RSH_K: /* A >>= K; */
 -                              if (K == 0)
 -                                      break;
 -                              else if (K == 1)
 -                                      EMIT2(0xd1, 0xe8); /* shr %eax */
 -                              else
 -                                      EMIT3(0xc1, 0xe8, K);
 -                              break;
 -                      case BPF_S_ALU_NEG:
 -                              EMIT2(0xf7, 0xd8);              /* neg %eax */
 -                              break;
 -                      case BPF_S_RET_K:
 -                              if (!K) {
 -                                      if (pc_ret0 == -1)
 -                                              pc_ret0 = i;
 -                                      CLEAR_A();
 -                              } else {
 -                                      EMIT1_off32(0xb8, K);   /* mov $imm32,%eax */
 -                              }
 -                              /* fallinto */
 -                      case BPF_S_RET_A:
 -                              if (seen_or_pass0) {
 -                                      if (i != flen - 1) {
 -                                              EMIT_JMP(cleanup_addr - addrs[i]);
 -                                              break;
 -                                      }
 -                                      if (seen_or_pass0 & SEEN_XREG)
 -                                              EMIT4(0x48, 0x8b, 0x5d, 0xf8);  /* mov  -8(%rbp),%rbx */
 -                                      EMIT1(0xc9);            /* leaveq */
 -                              }
 -                              EMIT1(0xc3);            /* ret */
 -                              break;
 -                      case BPF_S_MISC_TAX: /* X = A */
 -                              seen |= SEEN_XREG;
 -                              EMIT2(0x89, 0xc3);      /* mov    %eax,%ebx */
 -                              break;
 -                      case BPF_S_MISC_TXA: /* A = X */
 -                              seen |= SEEN_XREG;
 -                              EMIT2(0x89, 0xd8);      /* mov    %ebx,%eax */
 -                              break;
 -                      case BPF_S_LD_IMM: /* A = K */
 -                              if (!K)
 -                                      CLEAR_A();
 -                              else
 -                                      EMIT1_off32(0xb8, K); /* mov $imm32,%eax */
 -                              break;
 -                      case BPF_S_LDX_IMM: /* X = K */
 -                              seen |= SEEN_XREG;
 -                              if (!K)
 -                                      CLEAR_X();
 +                      /* shifts */
 +              case BPF_ALU | BPF_LSH | BPF_K:
 +              case BPF_ALU | BPF_RSH | BPF_K:
 +              case BPF_ALU | BPF_ARSH | BPF_K:
 +              case BPF_ALU64 | BPF_LSH | BPF_K:
 +              case BPF_ALU64 | BPF_RSH | BPF_K:
 +              case BPF_ALU64 | BPF_ARSH | BPF_K:
 +                      if (BPF_CLASS(insn->code) == BPF_ALU64)
 +                              EMIT1(add_1mod(0x48, a_reg));
 +                      else if (is_ereg(a_reg))
 +                              EMIT1(add_1mod(0x40, a_reg));
 +
 +                      switch (BPF_OP(insn->code)) {
 +                      case BPF_LSH: b3 = 0xE0; break;
 +                      case BPF_RSH: b3 = 0xE8; break;
 +                      case BPF_ARSH: b3 = 0xF8; break;
 +                      }
 +                      EMIT3(0xC1, add_1reg(b3, a_reg), K);
 +                      break;
 +
 +              case BPF_ALU | BPF_END | BPF_FROM_BE:
 +                      switch (K) {
 +                      case 16:
 +                              /* emit 'ror %ax, 8' to swap lower 2 bytes */
 +                              EMIT1(0x66);
 +                              if (is_ereg(a_reg))
 +                                      EMIT1(0x41);
 +                              EMIT3(0xC1, add_1reg(0xC8, a_reg), 8);
 +                              break;
 +                      case 32:
 +                              /* emit 'bswap eax' to swap lower 4 bytes */
 +                              if (is_ereg(a_reg))
 +                                      EMIT2(0x41, 0x0F);
                                else
 -                                      EMIT1_off32(0xbb, K); /* mov $imm32,%ebx */
 -                              break;
 -                      case BPF_S_LD_MEM: /* A = mem[K] : mov off8(%rbp),%eax */
 -                              seen |= SEEN_MEM;
 -                              EMIT3(0x8b, 0x45, 0xf0 - K*4);
 -                              break;
 -                      case BPF_S_LDX_MEM: /* X = mem[K] : mov off8(%rbp),%ebx */
 -                              seen |= SEEN_XREG | SEEN_MEM;
 -                              EMIT3(0x8b, 0x5d, 0xf0 - K*4);
 -                              break;
 -                      case BPF_S_ST: /* mem[K] = A : mov %eax,off8(%rbp) */
 -                              seen |= SEEN_MEM;
 -                              EMIT3(0x89, 0x45, 0xf0 - K*4);
 -                              break;
 -                      case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */
 -                              seen |= SEEN_XREG | SEEN_MEM;
 -                              EMIT3(0x89, 0x5d, 0xf0 - K*4);
 -                              break;
 -                      case BPF_S_LD_W_LEN: /* A = skb->len; */
 -                              BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
 -                              if (is_imm8(offsetof(struct sk_buff, len)))
 -                                      /* mov    off8(%rdi),%eax */
 -                                      EMIT3(0x8b, 0x47, offsetof(struct sk_buff, len));
 -                              else {
 -                                      EMIT2(0x8b, 0x87);
 -                                      EMIT(offsetof(struct sk_buff, len), 4);
 -                              }
 -                              break;
 -                      case BPF_S_LDX_W_LEN: /* X = skb->len; */
 -                              seen |= SEEN_XREG;
 -                              if (is_imm8(offsetof(struct sk_buff, len)))
 -                                      /* mov off8(%rdi),%ebx */
 -                                      EMIT3(0x8b, 0x5f, offsetof(struct sk_buff, len));
 -                              else {
 -                                      EMIT2(0x8b, 0x9f);
 -                                      EMIT(offsetof(struct sk_buff, len), 4);
 -                              }
 -                              break;
 -                      case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
 -                              BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
 -                              if (is_imm8(offsetof(struct sk_buff, protocol))) {
 -                                      /* movzwl off8(%rdi),%eax */
 -                                      EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, protocol));
 -                              } else {
 -                                      EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
 -                                      EMIT(offsetof(struct sk_buff, protocol), 4);
 -                              }
 -                              EMIT2(0x86, 0xc4); /* ntohs() : xchg   %al,%ah */
 -                              break;
 -                      case BPF_S_ANC_IFINDEX:
 -                              if (is_imm8(offsetof(struct sk_buff, dev))) {
 -                                      /* movq off8(%rdi),%rax */
 -                                      EMIT4(0x48, 0x8b, 0x47, offsetof(struct sk_buff, dev));
 -                              } else {
 -                                      EMIT3(0x48, 0x8b, 0x87); /* movq off32(%rdi),%rax */
 -                                      EMIT(offsetof(struct sk_buff, dev), 4);
 -                              }
 -                              EMIT3(0x48, 0x85, 0xc0);        /* test %rax,%rax */
 -                              EMIT_COND_JMP(X86_JE, cleanup_addr - (addrs[i] - 6));
 -                              BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
 -                              EMIT2(0x8b, 0x80);      /* mov off32(%rax),%eax */
 -                              EMIT(offsetof(struct net_device, ifindex), 4);
 +                                      EMIT1(0x0F);
 +                              EMIT1(add_1reg(0xC8, a_reg));
                                break;
 -                      case BPF_S_ANC_MARK:
 -                              BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
 -                              if (is_imm8(offsetof(struct sk_buff, mark))) {
 -                                      /* mov off8(%rdi),%eax */
 -                                      EMIT3(0x8b, 0x47, offsetof(struct sk_buff, mark));
 -                              } else {
 -                                      EMIT2(0x8b, 0x87);
 -                                      EMIT(offsetof(struct sk_buff, mark), 4);
 -                              }
 -                              break;
 -                      case BPF_S_ANC_RXHASH:
 -                              BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
 -                              if (is_imm8(offsetof(struct sk_buff, hash))) {
 -                                      /* mov off8(%rdi),%eax */
 -                                      EMIT3(0x8b, 0x47, offsetof(struct sk_buff, hash));
 -                              } else {
 -                                      EMIT2(0x8b, 0x87);
 -                                      EMIT(offsetof(struct sk_buff, hash), 4);
 -                              }
 -                              break;
 -                      case BPF_S_ANC_QUEUE:
 -                              BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
 -                              if (is_imm8(offsetof(struct sk_buff, queue_mapping))) {
 -                                      /* movzwl off8(%rdi),%eax */
 -                                      EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, queue_mapping));
 -                              } else {
 -                                      EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
 -                                      EMIT(offsetof(struct sk_buff, queue_mapping), 4);
 -                              }
 -                              break;
 -                      case BPF_S_ANC_CPU:
 -#ifdef CONFIG_SMP
 -                              EMIT4(0x65, 0x8b, 0x04, 0x25); /* mov %gs:off32,%eax */
 -                              EMIT((u32)(unsigned long)&cpu_number, 4); /* A = smp_processor_id(); */
 -#else
 -                              CLEAR_A();
 -#endif
 -                              break;
 -                      case BPF_S_ANC_VLAN_TAG:
 -                      case BPF_S_ANC_VLAN_TAG_PRESENT:
 -                              BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
 -                              if (is_imm8(offsetof(struct sk_buff, vlan_tci))) {
 -                                      /* movzwl off8(%rdi),%eax */
 -                                      EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, vlan_tci));
 -                              } else {
 -                                      EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
 -                                      EMIT(offsetof(struct sk_buff, vlan_tci), 4);
 -                              }
 -                              BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
 -                              if (filter[i].code == BPF_S_ANC_VLAN_TAG) {
 -                                      EMIT3(0x80, 0xe4, 0xef); /* and    $0xef,%ah */
 -                              } else {
 -                                      EMIT3(0xc1, 0xe8, 0x0c); /* shr    $0xc,%eax */
 -                                      EMIT3(0x83, 0xe0, 0x01); /* and    $0x1,%eax */
 -                              }
 -                              break;
 -                      case BPF_S_ANC_PKTTYPE:
 -                      {
 -                              int off = pkt_type_offset();
 -
 -                              if (off < 0)
 -                                      goto out;
 -                              if (is_imm8(off)) {
 -                                      /* movzbl off8(%rdi),%eax */
 -                                      EMIT4(0x0f, 0xb6, 0x47, off);
 -                              } else {
 -                                      /* movbl off32(%rdi),%eax */
 -                                      EMIT3(0x0f, 0xb6, 0x87);
 -                                      EMIT(off, 4);
 -                              }
 -                              EMIT3(0x83, 0xe0, PKT_TYPE_MAX); /* and    $0x7,%eax */
 +                      case 64:
 +                              /* emit 'bswap rax' to swap 8 bytes */
 +                              EMIT3(add_1mod(0x48, a_reg), 0x0F,
 +                                    add_1reg(0xC8, a_reg));
                                break;
                        }
 -                      case BPF_S_LD_W_ABS:
 -                              func = CHOOSE_LOAD_FUNC(K, sk_load_word);
 -common_load:                  seen |= SEEN_DATAREF;
 -                              t_offset = func - (image + addrs[i]);
 -                              EMIT1_off32(0xbe, K); /* mov imm32,%esi */
 -                              EMIT1_off32(0xe8, t_offset); /* call */
 -                              break;
 -                      case BPF_S_LD_H_ABS:
 -                              func = CHOOSE_LOAD_FUNC(K, sk_load_half);
 -                              goto common_load;
 -                      case BPF_S_LD_B_ABS:
 -                              func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
 -                              goto common_load;
 -                      case BPF_S_LDX_B_MSH:
 -                              func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
 -                              seen |= SEEN_DATAREF | SEEN_XREG;
 -                              t_offset = func - (image + addrs[i]);
 -                              EMIT1_off32(0xbe, K);   /* mov imm32,%esi */
 -                              EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */
 -                              break;
 -                      case BPF_S_LD_W_IND:
 -                              func = sk_load_word;
 -common_load_ind:              seen |= SEEN_DATAREF | SEEN_XREG;
 -                              t_offset = func - (image + addrs[i]);
 -                              if (K) {
 -                                      if (is_imm8(K)) {
 -                                              EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
 -                                      } else {
 -                                              EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
 -                                              EMIT(K, 4);
 -                                      }
 -                              } else {
 -                                      EMIT2(0x89,0xde); /* mov %ebx,%esi */
 -                              }
 -                              EMIT1_off32(0xe8, t_offset);    /* call sk_load_xxx_ind */
 -                              break;
 -                      case BPF_S_LD_H_IND:
 -                              func = sk_load_half;
 -                              goto common_load_ind;
 -                      case BPF_S_LD_B_IND:
 -                              func = sk_load_byte;
 -                              goto common_load_ind;
 -                      case BPF_S_JMP_JA:
 -                              t_offset = addrs[i + K] - addrs[i];
 -                              EMIT_JMP(t_offset);
 -                              break;
 -                      COND_SEL(BPF_S_JMP_JGT_K, X86_JA, X86_JBE);
 -                      COND_SEL(BPF_S_JMP_JGE_K, X86_JAE, X86_JB);
 -                      COND_SEL(BPF_S_JMP_JEQ_K, X86_JE, X86_JNE);
 -                      COND_SEL(BPF_S_JMP_JSET_K,X86_JNE, X86_JE);
 -                      COND_SEL(BPF_S_JMP_JGT_X, X86_JA, X86_JBE);
 -                      COND_SEL(BPF_S_JMP_JGE_X, X86_JAE, X86_JB);
 -                      COND_SEL(BPF_S_JMP_JEQ_X, X86_JE, X86_JNE);
 -                      COND_SEL(BPF_S_JMP_JSET_X,X86_JNE, X86_JE);
 -
 -cond_branch:                  f_offset = addrs[i + filter[i].jf] - addrs[i];
 -                              t_offset = addrs[i + filter[i].jt] - addrs[i];
 -
 -                              /* same targets, can avoid doing the test :) */
 -                              if (filter[i].jt == filter[i].jf) {
 -                                      EMIT_JMP(t_offset);
 -                                      break;
 -                              }
 +                      break;
 +
 +              case BPF_ALU | BPF_END | BPF_FROM_LE:
 +                      break;
  
 -                              switch (filter[i].code) {
 -                              case BPF_S_JMP_JGT_X:
 -                              case BPF_S_JMP_JGE_X:
 -                              case BPF_S_JMP_JEQ_X:
 -                                      seen |= SEEN_XREG;
 -                                      EMIT2(0x39, 0xd8); /* cmp %ebx,%eax */
 -                                      break;
 -                              case BPF_S_JMP_JSET_X:
 -                                      seen |= SEEN_XREG;
 -                                      EMIT2(0x85, 0xd8); /* test %ebx,%eax */
 -                                      break;
 -                              case BPF_S_JMP_JEQ_K:
 -                                      if (K == 0) {
 -                                              EMIT2(0x85, 0xc0); /* test   %eax,%eax */
 -                                              break;
 -                                      }
 -                              case BPF_S_JMP_JGT_K:
 -                              case BPF_S_JMP_JGE_K:
 -                                      if (K <= 127)
 -                                              EMIT3(0x83, 0xf8, K); /* cmp imm8,%eax */
 +                      /* ST: *(u8*)(a_reg + off) = imm */
 +              case BPF_ST | BPF_MEM | BPF_B:
 +                      if (is_ereg(a_reg))
 +                              EMIT2(0x41, 0xC6);
 +                      else
 +                              EMIT1(0xC6);
 +                      goto st;
 +              case BPF_ST | BPF_MEM | BPF_H:
 +                      if (is_ereg(a_reg))
 +                              EMIT3(0x66, 0x41, 0xC7);
 +                      else
 +                              EMIT2(0x66, 0xC7);
 +                      goto st;
 +              case BPF_ST | BPF_MEM | BPF_W:
 +                      if (is_ereg(a_reg))
 +                              EMIT2(0x41, 0xC7);
 +                      else
 +                              EMIT1(0xC7);
 +                      goto st;
 +              case BPF_ST | BPF_MEM | BPF_DW:
 +                      EMIT2(add_1mod(0x48, a_reg), 0xC7);
 +
 +st:                   if (is_imm8(insn->off))
 +                              EMIT2(add_1reg(0x40, a_reg), insn->off);
 +                      else
 +                              EMIT1_off32(add_1reg(0x80, a_reg), insn->off);
 +
 +                      EMIT(K, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
 +                      break;
 +
 +                      /* STX: *(u8*)(a_reg + off) = x_reg */
 +              case BPF_STX | BPF_MEM | BPF_B:
 +                      /* emit 'mov byte ptr [rax + off], al' */
 +                      if (is_ereg(a_reg) || is_ereg(x_reg) ||
 +                          /* have to add extra byte for x86 SIL, DIL regs */
 +                          x_reg == BPF_REG_1 || x_reg == BPF_REG_2)
 +                              EMIT2(add_2mod(0x40, a_reg, x_reg), 0x88);
 +                      else
 +                              EMIT1(0x88);
 +                      goto stx;
 +              case BPF_STX | BPF_MEM | BPF_H:
 +                      if (is_ereg(a_reg) || is_ereg(x_reg))
 +                              EMIT3(0x66, add_2mod(0x40, a_reg, x_reg), 0x89);
 +                      else
 +                              EMIT2(0x66, 0x89);
 +                      goto stx;
 +              case BPF_STX | BPF_MEM | BPF_W:
 +                      if (is_ereg(a_reg) || is_ereg(x_reg))
 +                              EMIT2(add_2mod(0x40, a_reg, x_reg), 0x89);
 +                      else
 +                              EMIT1(0x89);
 +                      goto stx;
 +              case BPF_STX | BPF_MEM | BPF_DW:
 +                      EMIT2(add_2mod(0x48, a_reg, x_reg), 0x89);
 +stx:                  if (is_imm8(insn->off))
 +                              EMIT2(add_2reg(0x40, a_reg, x_reg), insn->off);
 +                      else
 +                              EMIT1_off32(add_2reg(0x80, a_reg, x_reg),
 +                                          insn->off);
 +                      break;
 +
 +                      /* LDX: a_reg = *(u8*)(x_reg + off) */
 +              case BPF_LDX | BPF_MEM | BPF_B:
 +                      /* emit 'movzx rax, byte ptr [rax + off]' */
 +                      EMIT3(add_2mod(0x48, x_reg, a_reg), 0x0F, 0xB6);
 +                      goto ldx;
 +              case BPF_LDX | BPF_MEM | BPF_H:
 +                      /* emit 'movzx rax, word ptr [rax + off]' */
 +                      EMIT3(add_2mod(0x48, x_reg, a_reg), 0x0F, 0xB7);
 +                      goto ldx;
 +              case BPF_LDX | BPF_MEM | BPF_W:
 +                      /* emit 'mov eax, dword ptr [rax+0x14]' */
 +                      if (is_ereg(a_reg) || is_ereg(x_reg))
 +                              EMIT2(add_2mod(0x40, x_reg, a_reg), 0x8B);
 +                      else
 +                              EMIT1(0x8B);
 +                      goto ldx;
 +              case BPF_LDX | BPF_MEM | BPF_DW:
 +                      /* emit 'mov rax, qword ptr [rax+0x14]' */
 +                      EMIT2(add_2mod(0x48, x_reg, a_reg), 0x8B);
 +ldx:                  /* if insn->off == 0 we can save one extra byte, but
 +                       * special case of x86 r13 which always needs an offset
 +                       * is not worth the hassle
 +                       */
 +                      if (is_imm8(insn->off))
 +                              EMIT2(add_2reg(0x40, x_reg, a_reg), insn->off);
 +                      else
 +                              EMIT1_off32(add_2reg(0x80, x_reg, a_reg),
 +                                          insn->off);
 +                      break;
 +
 +                      /* STX XADD: lock *(u32*)(a_reg + off) += x_reg */
 +              case BPF_STX | BPF_XADD | BPF_W:
 +                      /* emit 'lock add dword ptr [rax + off], eax' */
 +                      if (is_ereg(a_reg) || is_ereg(x_reg))
 +                              EMIT3(0xF0, add_2mod(0x40, a_reg, x_reg), 0x01);
 +                      else
 +                              EMIT2(0xF0, 0x01);
 +                      goto xadd;
 +              case BPF_STX | BPF_XADD | BPF_DW:
 +                      EMIT3(0xF0, add_2mod(0x48, a_reg, x_reg), 0x01);
 +xadd:                 if (is_imm8(insn->off))
 +                              EMIT2(add_2reg(0x40, a_reg, x_reg), insn->off);
 +                      else
 +                              EMIT1_off32(add_2reg(0x80, a_reg, x_reg),
 +                                          insn->off);
 +                      break;
 +
 +                      /* call */
 +              case BPF_JMP | BPF_CALL:
 +                      func = (u8 *) __bpf_call_base + K;
 +                      jmp_offset = func - (image + addrs[i]);
 +                      if (ctx->seen_ld_abs) {
 +                              EMIT2(0x41, 0x52); /* push %r10 */
 +                              EMIT2(0x41, 0x51); /* push %r9 */
 +                              /* need to adjust jmp offset, since
 +                               * pop %r9, pop %r10 take 4 bytes after call insn
 +                               */
 +                              jmp_offset += 4;
 +                      }
 +                      if (!K || !is_simm32(jmp_offset)) {
 +                              pr_err("unsupported bpf func %d addr %p image %p\n",
 +                                     K, func, image);
 +                              return -EINVAL;
 +                      }
 +                      EMIT1_off32(0xE8, jmp_offset);
 +                      if (ctx->seen_ld_abs) {
 +                              EMIT2(0x41, 0x59); /* pop %r9 */
 +                              EMIT2(0x41, 0x5A); /* pop %r10 */
 +                      }
 +                      break;
 +
 +                      /* cond jump */
 +              case BPF_JMP | BPF_JEQ | BPF_X:
 +              case BPF_JMP | BPF_JNE | BPF_X:
 +              case BPF_JMP | BPF_JGT | BPF_X:
 +              case BPF_JMP | BPF_JGE | BPF_X:
 +              case BPF_JMP | BPF_JSGT | BPF_X:
 +              case BPF_JMP | BPF_JSGE | BPF_X:
 +                      /* cmp a_reg, x_reg */
 +                      EMIT3(add_2mod(0x48, a_reg, x_reg), 0x39,
 +                            add_2reg(0xC0, a_reg, x_reg));
 +                      goto emit_cond_jmp;
 +
 +              case BPF_JMP | BPF_JSET | BPF_X:
 +                      /* test a_reg, x_reg */
 +                      EMIT3(add_2mod(0x48, a_reg, x_reg), 0x85,
 +                            add_2reg(0xC0, a_reg, x_reg));
 +                      goto emit_cond_jmp;
 +
 +              case BPF_JMP | BPF_JSET | BPF_K:
 +                      /* test a_reg, imm32 */
 +                      EMIT1(add_1mod(0x48, a_reg));
 +                      EMIT2_off32(0xF7, add_1reg(0xC0, a_reg), K);
 +                      goto emit_cond_jmp;
 +
 +              case BPF_JMP | BPF_JEQ | BPF_K:
 +              case BPF_JMP | BPF_JNE | BPF_K:
 +              case BPF_JMP | BPF_JGT | BPF_K:
 +              case BPF_JMP | BPF_JGE | BPF_K:
 +              case BPF_JMP | BPF_JSGT | BPF_K:
 +              case BPF_JMP | BPF_JSGE | BPF_K:
 +                      /* cmp a_reg, imm8/32 */
 +                      EMIT1(add_1mod(0x48, a_reg));
 +
 +                      if (is_imm8(K))
 +                              EMIT3(0x83, add_1reg(0xF8, a_reg), K);
 +                      else
 +                              EMIT2_off32(0x81, add_1reg(0xF8, a_reg), K);
 +
 +emit_cond_jmp:                /* convert BPF opcode to x86 */
 +                      switch (BPF_OP(insn->code)) {
 +                      case BPF_JEQ:
 +                              jmp_cond = X86_JE;
 +                              break;
 +                      case BPF_JSET:
 +                      case BPF_JNE:
 +                              jmp_cond = X86_JNE;
 +                              break;
 +                      case BPF_JGT:
 +                              /* GT is unsigned '>', JA in x86 */
 +                              jmp_cond = X86_JA;
 +                              break;
 +                      case BPF_JGE:
 +                              /* GE is unsigned '>=', JAE in x86 */
 +                              jmp_cond = X86_JAE;
 +                              break;
 +                      case BPF_JSGT:
 +                              /* signed '>', GT in x86 */
 +                              jmp_cond = X86_JG;
 +                              break;
 +                      case BPF_JSGE:
 +                              /* signed '>=', GE in x86 */
 +                              jmp_cond = X86_JGE;
 +                              break;
 +                      default: /* to silence gcc warning */
 +                              return -EFAULT;
 +                      }
 +                      jmp_offset = addrs[i + insn->off] - addrs[i];
 +                      if (is_imm8(jmp_offset)) {
 +                              EMIT2(jmp_cond, jmp_offset);
 +                      } else if (is_simm32(jmp_offset)) {
 +                              EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
 +                      } else {
 +                              pr_err("cond_jmp gen bug %llx\n", jmp_offset);
 +                              return -EFAULT;
 +                      }
 +
 +                      break;
 +
 +              case BPF_JMP | BPF_JA:
 +                      jmp_offset = addrs[i + insn->off] - addrs[i];
 +                      if (!jmp_offset)
 +                              /* optimize out nop jumps */
 +                              break;
 +emit_jmp:
 +                      if (is_imm8(jmp_offset)) {
 +                              EMIT2(0xEB, jmp_offset);
 +                      } else if (is_simm32(jmp_offset)) {
 +                              EMIT1_off32(0xE9, jmp_offset);
 +                      } else {
 +                              pr_err("jmp gen bug %llx\n", jmp_offset);
 +                              return -EFAULT;
 +                      }
 +                      break;
 +
 +              case BPF_LD | BPF_IND | BPF_W:
 +                      func = sk_load_word;
 +                      goto common_load;
 +              case BPF_LD | BPF_ABS | BPF_W:
 +                      func = CHOOSE_LOAD_FUNC(K, sk_load_word);
 +common_load:          ctx->seen_ld_abs = true;
 +                      jmp_offset = func - (image + addrs[i]);
 +                      if (!func || !is_simm32(jmp_offset)) {
 +                              pr_err("unsupported bpf func %d addr %p image %p\n",
 +                                     K, func, image);
 +                              return -EINVAL;
 +                      }
 +                      if (BPF_MODE(insn->code) == BPF_ABS) {
 +                              /* mov %esi, imm32 */
 +                              EMIT1_off32(0xBE, K);
 +                      } else {
 +                              /* mov %rsi, x_reg */
 +                              EMIT_mov(BPF_REG_2, x_reg);
 +                              if (K) {
 +                                      if (is_imm8(K))
 +                                              /* add %esi, imm8 */
 +                                              EMIT3(0x83, 0xC6, K);
                                        else
 -                                              EMIT1_off32(0x3d, K); /* cmp imm32,%eax */
 -                                      break;
 -                              case BPF_S_JMP_JSET_K:
 -                                      if (K <= 0xFF)
 -                                              EMIT2(0xa8, K); /* test imm8,%al */
 -                                      else if (!(K & 0xFFFF00FF))
 -                                              EMIT3(0xf6, 0xc4, K >> 8); /* test imm8,%ah */
 -                                      else if (K <= 0xFFFF) {
 -                                              EMIT2(0x66, 0xa9); /* test imm16,%ax */
 -                                              EMIT(K, 2);
 -                                      } else {
 -                                              EMIT1_off32(0xa9, K); /* test imm32,%eax */
 -                                      }
 -                                      break;
 +                                              /* add %esi, imm32 */
 +                                              EMIT2_off32(0x81, 0xC6, K);
                                }
 -                              if (filter[i].jt != 0) {
 -                                      if (filter[i].jf && f_offset)
 -                                              t_offset += is_near(f_offset) ? 2 : 5;
 -                                      EMIT_COND_JMP(t_op, t_offset);
 -                                      if (filter[i].jf)
 -                                              EMIT_JMP(f_offset);
 -                                      break;
 -                              }
 -                              EMIT_COND_JMP(f_op, f_offset);
 -                              break;
 -                      default:
 -                              /* hmm, too complex filter, give up with jit compiler */
 -                              goto out;
                        }
 -                      ilen = prog - temp;
 -                      if (image) {
 -                              if (unlikely(proglen + ilen > oldproglen)) {
 -                                      pr_err("bpb_jit_compile fatal error\n");
 -                                      kfree(addrs);
 -                                      module_free(NULL, header);
 -                                      return;
 -                              }
 -                              memcpy(image + proglen, temp, ilen);
 +                      /* skb pointer is in R6 (%rbx), it will be copied into
 +                       * %rdi if skb_copy_bits() call is necessary.
 +                       * sk_load_* helpers also use %r10 and %r9d.
 +                       * See bpf_jit.S
 +                       */
 +                      EMIT1_off32(0xE8, jmp_offset); /* call */
 +                      break;
 +
 +              case BPF_LD | BPF_IND | BPF_H:
 +                      func = sk_load_half;
 +                      goto common_load;
 +              case BPF_LD | BPF_ABS | BPF_H:
 +                      func = CHOOSE_LOAD_FUNC(K, sk_load_half);
 +                      goto common_load;
 +              case BPF_LD | BPF_IND | BPF_B:
 +                      func = sk_load_byte;
 +                      goto common_load;
 +              case BPF_LD | BPF_ABS | BPF_B:
 +                      func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
 +                      goto common_load;
 +
 +              case BPF_JMP | BPF_EXIT:
 +                      if (i != insn_cnt - 1) {
 +                              jmp_offset = ctx->cleanup_addr - addrs[i];
 +                              goto emit_jmp;
                        }
 -                      proglen += ilen;
 -                      addrs[i] = proglen;
 -                      prog = temp;
 +                      /* update cleanup_addr */
 +                      ctx->cleanup_addr = proglen;
 +                      /* mov rbx, qword ptr [rbp-X] */
 +                      EMIT3_off32(0x48, 0x8B, 0x9D, -stacksize);
 +                      /* mov r13, qword ptr [rbp-X] */
 +                      EMIT3_off32(0x4C, 0x8B, 0xAD, -stacksize + 8);
 +                      /* mov r14, qword ptr [rbp-X] */
 +                      EMIT3_off32(0x4C, 0x8B, 0xB5, -stacksize + 16);
 +                      /* mov r15, qword ptr [rbp-X] */
 +                      EMIT3_off32(0x4C, 0x8B, 0xBD, -stacksize + 24);
 +
 +                      EMIT1(0xC9); /* leave */
 +                      EMIT1(0xC3); /* ret */
 +                      break;
 +
 +              default:
 +                      /* By design x64 JIT should support all BPF instructions
 +                       * This error will be seen if new instruction was added
 +                       * to interpreter, but not to JIT
 +                       * or if there is junk in sk_filter
 +                       */
 +                      pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
 +                      return -EINVAL;
                }
 -              /* last bpf instruction is always a RET :
 -               * use it to give the cleanup instruction(s) addr
 -               */
 -              cleanup_addr = proglen - 1; /* ret */
 -              if (seen_or_pass0)
 -                      cleanup_addr -= 1; /* leaveq */
 -              if (seen_or_pass0 & SEEN_XREG)
 -                      cleanup_addr -= 4; /* mov  -8(%rbp),%rbx */
  
 +              ilen = prog - temp;
 +              if (image) {
 +                      if (unlikely(proglen + ilen > oldproglen)) {
 +                              pr_err("bpf_jit_compile fatal error\n");
 +                              return -EFAULT;
 +                      }
 +                      memcpy(image + proglen, temp, ilen);
 +              }
 +              proglen += ilen;
 +              addrs[i] = proglen;
 +              prog = temp;
 +      }
 +      return proglen;
 +}
 +
 +void bpf_jit_compile(struct sk_filter *prog)
 +{
 +}
 +
 +void bpf_int_jit_compile(struct sk_filter *prog)
 +{
 +      struct bpf_binary_header *header = NULL;
 +      int proglen, oldproglen = 0;
 +      struct jit_context ctx = {};
 +      u8 *image = NULL;
 +      int *addrs;
 +      int pass;
 +      int i;
 +
 +      if (!bpf_jit_enable)
 +              return;
 +
 +      if (!prog || !prog->len)
 +              return;
 +
 +      addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
 +      if (!addrs)
 +              return;
 +
 +      /* Before first pass, make a rough estimation of addrs[]
 +       * each bpf instruction is translated to less than 64 bytes
 +       */
 +      for (proglen = 0, i = 0; i < prog->len; i++) {
 +              proglen += 64;
 +              addrs[i] = proglen;
 +      }
 +      ctx.cleanup_addr = proglen;
 +
 +      for (pass = 0; pass < 10; pass++) {
 +              proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
 +              if (proglen <= 0) {
 +                      image = NULL;
 +                      if (header)
 +                              module_free(NULL, header);
 +                      goto out;
 +              }
                if (image) {
                        if (proglen != oldproglen)
 -                              pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen);
 +                              pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
 +                                     proglen, oldproglen);
                        break;
                }
                if (proglen == oldproglen) {
        }
  
        if (bpf_jit_enable > 1)
 -              bpf_jit_dump(flen, proglen, pass, image);
 +              bpf_jit_dump(prog->len, proglen, 0, image);
  
        if (image) {
                bpf_flush_icache(header, image + proglen);
                set_memory_ro((unsigned long)header, header->pages);
 -              fp->bpf_func = (void *)image;
 -              fp->jited = 1;
 +              prog->bpf_func = (void *)image;
 +              prog->jited = 1;
        }
  out:
        kfree(addrs);
 -      return;
  }
  
  static void bpf_jit_free_deferred(struct work_struct *work)
@@@ -82,7 -82,8 +82,8 @@@ static inline struct arp_pkt *arp_pkt(c
  }
  
  /* Forward declaration */
- static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]);
+ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
+                                     bool strict_match);
  static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp);
  static void rlb_src_unlink(struct bonding *bond, u32 index);
  static void rlb_src_link(struct bonding *bond, u32 ip_src_hash,
@@@ -228,7 -229,7 +229,7 @@@ static struct slave *tlb_get_least_load
  
        /* Find the slave with the largest gap */
        bond_for_each_slave_rcu(bond, slave, iter) {
 -              if (SLAVE_IS_OK(slave)) {
 +              if (bond_slave_can_tx(slave)) {
                        long long gap = compute_gap(slave);
  
                        if (max_gap < gap) {
@@@ -383,7 -384,7 +384,7 @@@ static struct slave *rlb_next_rx_slave(
        bool found = false;
  
        bond_for_each_slave(bond, slave, iter) {
 -              if (!SLAVE_IS_OK(slave))
 +              if (!bond_slave_can_tx(slave))
                        continue;
                if (!found) {
                        if (!before || before->speed < slave->speed)
@@@ -416,7 -417,7 +417,7 @@@ static struct slave *__rlb_next_rx_slav
        bool found = false;
  
        bond_for_each_slave_rcu(bond, slave, iter) {
 -              if (!SLAVE_IS_OK(slave))
 +              if (!bond_slave_can_tx(slave))
                        continue;
                if (!found) {
                        if (!before || before->speed < slave->speed)
@@@ -459,7 -460,7 +460,7 @@@ static void rlb_teach_disabled_mac_on_p
  
        bond->alb_info.rlb_promisc_timeout_counter = 0;
  
-       alb_send_learning_packets(bond->curr_active_slave, addr);
+       alb_send_learning_packets(bond->curr_active_slave, addr, true);
  }
  
  /* slave being removed should not be active at this point
@@@ -995,7 -996,7 +996,7 @@@ static void rlb_clear_vlan(struct bondi
  /*********************** tlb/rlb shared functions *********************/
  
  static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
-                           u16 vid)
+                           __be16 vlan_proto, u16 vid)
  {
        struct learning_pkt pkt;
        struct sk_buff *skb;
        skb->dev = slave->dev;
  
        if (vid) {
-               skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vid);
+               skb = vlan_put_tag(skb, vlan_proto, vid);
                if (!skb) {
                        pr_err("%s: Error: failed to insert VLAN tag\n",
                               slave->bond->dev->name);
        dev_queue_xmit(skb);
  }
  
static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
+ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
                                    bool strict_match)
  {
        struct bonding *bond = bond_get_bond_by_slave(slave);
        struct net_device *upper;
        struct list_head *iter;
  
        /* send untagged */
-       alb_send_lp_vid(slave, mac_addr, 0);
+       alb_send_lp_vid(slave, mac_addr, 0, 0);
  
        /* loop through vlans and send one packet for each */
        rcu_read_lock();
        netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
-               if (upper->priv_flags & IFF_802_1Q_VLAN)
-                       alb_send_lp_vid(slave, mac_addr,
-                                       vlan_dev_vlan_id(upper));
+               if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) {
+                       if (strict_match &&
+                           ether_addr_equal_64bits(mac_addr,
+                                                   upper->dev_addr)) {
+                               alb_send_lp_vid(slave, mac_addr,
+                                               vlan_dev_vlan_proto(upper),
+                                               vlan_dev_vlan_id(upper));
+                       } else if (!strict_match) {
+                               alb_send_lp_vid(slave, upper->dev_addr,
+                                               vlan_dev_vlan_proto(upper),
+                                               vlan_dev_vlan_id(upper));
+                       }
+               }
        }
        rcu_read_unlock();
  }
@@@ -1057,7 -1068,7 +1068,7 @@@ static int alb_set_slave_mac_addr(struc
        struct net_device *dev = slave->dev;
        struct sockaddr s_addr;
  
 -      if (slave->bond->params.mode == BOND_MODE_TLB) {
 +      if (BOND_MODE(slave->bond) == BOND_MODE_TLB) {
                memcpy(dev->dev_addr, addr, dev->addr_len);
                return 0;
        }
@@@ -1100,14 -1111,14 +1111,14 @@@ static void alb_swap_mac_addr(struct sl
  static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
                                struct slave *slave2)
  {
 -      int slaves_state_differ = (SLAVE_IS_OK(slave1) != SLAVE_IS_OK(slave2));
 +      int slaves_state_differ = (bond_slave_can_tx(slave1) != bond_slave_can_tx(slave2));
        struct slave *disabled_slave = NULL;
  
        ASSERT_RTNL();
  
        /* fasten the change in the switch */
 -      if (SLAVE_IS_OK(slave1)) {
 +      if (bond_slave_can_tx(slave1)) {
-               alb_send_learning_packets(slave1, slave1->dev->dev_addr);
+               alb_send_learning_packets(slave1, slave1->dev->dev_addr, false);
                if (bond->alb_info.rlb_enabled) {
                        /* inform the clients that the mac address
                         * has changed
                disabled_slave = slave1;
        }
  
 -      if (SLAVE_IS_OK(slave2)) {
 +      if (bond_slave_can_tx(slave2)) {
-               alb_send_learning_packets(slave2, slave2->dev->dev_addr);
+               alb_send_learning_packets(slave2, slave2->dev->dev_addr, false);
                if (bond->alb_info.rlb_enabled) {
                        /* inform the clients that the mac address
                         * has changed
@@@ -1347,77 -1358,6 +1358,77 @@@ void bond_alb_deinitialize(struct bondi
                rlb_deinitialize(bond);
  }
  
 +static int bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond,
 +              struct slave *tx_slave)
 +{
 +      struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
 +      struct ethhdr *eth_data = eth_hdr(skb);
 +
 +      if (!tx_slave) {
 +              /* unbalanced or unassigned, send through primary */
 +              tx_slave = rcu_dereference(bond->curr_active_slave);
 +              if (bond->params.tlb_dynamic_lb)
 +                      bond_info->unbalanced_load += skb->len;
 +      }
 +
 +      if (tx_slave && bond_slave_can_tx(tx_slave)) {
 +              if (tx_slave != rcu_dereference(bond->curr_active_slave)) {
 +                      ether_addr_copy(eth_data->h_source,
 +                                      tx_slave->dev->dev_addr);
 +              }
 +
 +              bond_dev_queue_xmit(bond, skb, tx_slave->dev);
 +              goto out;
 +      }
 +
 +      if (tx_slave && bond->params.tlb_dynamic_lb) {
 +              _lock_tx_hashtbl(bond);
 +              __tlb_clear_slave(bond, tx_slave, 0);
 +              _unlock_tx_hashtbl(bond);
 +      }
 +
 +      /* no suitable interface, frame not sent */
 +      dev_kfree_skb_any(skb);
 +out:
 +      return NETDEV_TX_OK;
 +}
 +
 +int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
 +{
 +      struct bonding *bond = netdev_priv(bond_dev);
 +      struct ethhdr *eth_data;
 +      struct slave *tx_slave = NULL;
 +      u32 hash_index;
 +
 +      skb_reset_mac_header(skb);
 +      eth_data = eth_hdr(skb);
 +
 +      /* Do not TX balance any multicast or broadcast */
 +      if (!is_multicast_ether_addr(eth_data->h_dest)) {
 +              switch (skb->protocol) {
 +              case htons(ETH_P_IP):
 +              case htons(ETH_P_IPX):
 +                  /* In case of IPX, it will falback to L2 hash */
 +              case htons(ETH_P_IPV6):
 +                      hash_index = bond_xmit_hash(bond, skb);
 +                      if (bond->params.tlb_dynamic_lb) {
 +                              tx_slave = tlb_choose_channel(bond,
 +                                                            hash_index & 0xFF,
 +                                                            skb->len);
 +                      } else {
 +                              struct list_head *iter;
 +                              int idx = hash_index % bond->slave_cnt;
 +
 +                              bond_for_each_slave_rcu(bond, tx_slave, iter)
 +                                      if (--idx < 0)
 +                                              break;
 +                      }
 +                      break;
 +              }
 +      }
 +      return bond_do_alb_xmit(skb, bond, tx_slave);
 +}
 +
  int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
  {
        struct bonding *bond = netdev_priv(bond_dev);
        struct slave *tx_slave = NULL;
        static const __be32 ip_bcast = htonl(0xffffffff);
        int hash_size = 0;
 -      int do_tx_balance = 1;
 +      bool do_tx_balance = true;
        u32 hash_index = 0;
        const u8 *hash_start = NULL;
        struct ipv6hdr *ip6hdr;
                if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast) ||
                    (iph->daddr == ip_bcast) ||
                    (iph->protocol == IPPROTO_IGMP)) {
 -                      do_tx_balance = 0;
 +                      do_tx_balance = false;
                        break;
                }
                hash_start = (char *)&(iph->daddr);
                 * that here just in case.
                 */
                if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast)) {
 -                      do_tx_balance = 0;
 +                      do_tx_balance = false;
                        break;
                }
  
                 * broadcasts in IPv4.
                 */
                if (ether_addr_equal_64bits(eth_data->h_dest, mac_v6_allmcast)) {
 -                      do_tx_balance = 0;
 +                      do_tx_balance = false;
                        break;
                }
  
                 */
                ip6hdr = ipv6_hdr(skb);
                if (ipv6_addr_any(&ip6hdr->saddr)) {
 -                      do_tx_balance = 0;
 +                      do_tx_balance = false;
                        break;
                }
  
        case ETH_P_IPX:
                if (ipx_hdr(skb)->ipx_checksum != IPX_NO_CHECKSUM) {
                        /* something is wrong with this packet */
 -                      do_tx_balance = 0;
 +                      do_tx_balance = false;
                        break;
                }
  
                         * this family since it has an "ARP" like
                         * mechanism
                         */
 -                      do_tx_balance = 0;
 +                      do_tx_balance = false;
                        break;
                }
  
                hash_size = ETH_ALEN;
                break;
        case ETH_P_ARP:
 -              do_tx_balance = 0;
 +              do_tx_balance = false;
                if (bond_info->rlb_enabled)
                        tx_slave = rlb_arp_xmit(skb, bond);
                break;
        default:
 -              do_tx_balance = 0;
 +              do_tx_balance = false;
                break;
        }
  
                tx_slave = tlb_choose_channel(bond, hash_index, skb->len);
        }
  
 -      if (!tx_slave) {
 -              /* unbalanced or unassigned, send through primary */
 -              tx_slave = rcu_dereference(bond->curr_active_slave);
 -              bond_info->unbalanced_load += skb->len;
 -      }
 -
 -      if (tx_slave && SLAVE_IS_OK(tx_slave)) {
 -              if (tx_slave != rcu_dereference(bond->curr_active_slave)) {
 -                      ether_addr_copy(eth_data->h_source,
 -                                      tx_slave->dev->dev_addr);
 -              }
 -
 -              bond_dev_queue_xmit(bond, skb, tx_slave->dev);
 -              goto out;
 -      }
 -
 -      if (tx_slave) {
 -              _lock_tx_hashtbl(bond);
 -              __tlb_clear_slave(bond, tx_slave, 0);
 -              _unlock_tx_hashtbl(bond);
 -      }
 -
 -      /* no suitable interface, frame not sent */
 -      dev_kfree_skb_any(skb);
 -out:
 -      return NETDEV_TX_OK;
 +      return bond_do_alb_xmit(skb, bond, tx_slave);
  }
  
  void bond_alb_monitor(struct work_struct *work)
  
        /* send learning packets */
        if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) {
+               bool strict_match;
                /* change of curr_active_slave involves swapping of mac addresses.
                 * in order to avoid this swapping from happening while
                 * sending the learning packets, the curr_slave_lock must be held for
                 */
                read_lock(&bond->curr_slave_lock);
  
-               bond_for_each_slave_rcu(bond, slave, iter)
-                       alb_send_learning_packets(slave, slave->dev->dev_addr);
+               bond_for_each_slave_rcu(bond, slave, iter) {
+                       /* If updating current_active, use all currently
+                        * user mac addreses (!strict_match).  Otherwise, only
+                        * use mac of the slave device.
+                        */
+                       strict_match = (slave != bond->curr_active_slave);
+                       alb_send_learning_packets(slave, slave->dev->dev_addr,
+                                                 strict_match);
+               }
  
                read_unlock(&bond->curr_slave_lock);
  
@@@ -1745,7 -1719,7 +1765,7 @@@ void bond_alb_handle_active_change(stru
        /* in TLB mode, the slave might flip down/up with the old dev_addr,
         * and thus filter bond->dev_addr's packets, so force bond's mac
         */
 -      if (bond->params.mode == BOND_MODE_TLB) {
 +      if (BOND_MODE(bond) == BOND_MODE_TLB) {
                struct sockaddr sa;
                u8 tmp_addr[ETH_ALEN];
  
        } else {
                /* set the new_slave to the bond mac address */
                alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
-               alb_send_learning_packets(new_slave, bond->dev->dev_addr);
+               alb_send_learning_packets(new_slave, bond->dev->dev_addr,
+                                         false);
        }
  
        write_lock_bh(&bond->curr_slave_lock);
@@@ -1810,7 -1785,8 +1831,8 @@@ int bond_alb_set_mac_address(struct net
                alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr);
  
                read_lock(&bond->lock);
-               alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr);
+               alb_send_learning_packets(bond->curr_active_slave,
+                                         bond_dev->dev_addr, false);
                if (bond->alb_info.rlb_enabled) {
                        /* inform clients mac address has changed */
                        rlb_req_update_slave_clients(bond, bond->curr_active_slave);
@@@ -343,7 -343,7 +343,7 @@@ static int bond_set_carrier(struct bond
        if (!bond_has_slaves(bond))
                goto down;
  
 -      if (bond->params.mode == BOND_MODE_8023AD)
 +      if (BOND_MODE(bond) == BOND_MODE_8023AD)
                return bond_3ad_set_carrier(bond);
  
        bond_for_each_slave(bond, slave, iter) {
@@@ -497,7 -497,7 +497,7 @@@ static int bond_set_promiscuity(struct 
        struct list_head *iter;
        int err = 0;
  
 -      if (USES_PRIMARY(bond->params.mode)) {
 +      if (bond_uses_primary(bond)) {
                /* write lock already acquired */
                if (bond->curr_active_slave) {
                        err = dev_set_promiscuity(bond->curr_active_slave->dev,
@@@ -523,7 -523,7 +523,7 @@@ static int bond_set_allmulti(struct bon
        struct list_head *iter;
        int err = 0;
  
 -      if (USES_PRIMARY(bond->params.mode)) {
 +      if (bond_uses_primary(bond)) {
                /* write lock already acquired */
                if (bond->curr_active_slave) {
                        err = dev_set_allmulti(bond->curr_active_slave->dev,
@@@ -574,7 -574,7 +574,7 @@@ static void bond_hw_addr_flush(struct n
        dev_uc_unsync(slave_dev, bond_dev);
        dev_mc_unsync(slave_dev, bond_dev);
  
 -      if (bond->params.mode == BOND_MODE_8023AD) {
 +      if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                /* del lacpdu mc addr from mc list */
                u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
  
  /*--------------------------- Active slave change ---------------------------*/
  
  /* Update the hardware address list and promisc/allmulti for the new and
 - * old active slaves (if any).  Modes that are !USES_PRIMARY keep all
 - * slaves up date at all times; only the USES_PRIMARY modes need to call
 + * old active slaves (if any).  Modes that are not using primary keep all
 + * slaves up date at all times; only the modes that use primary need to call
   * this function to swap these settings during a failover.
   */
  static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
@@@ -747,7 -747,7 +747,7 @@@ static struct slave *bond_find_best_sla
        bond_for_each_slave(bond, slave, iter) {
                if (slave->link == BOND_LINK_UP)
                        return slave;
 -              if (slave->link == BOND_LINK_BACK && IS_UP(slave->dev) &&
 +              if (slave->link == BOND_LINK_BACK && bond_slave_is_up(slave) &&
                    slave->delay < mintime) {
                        mintime = slave->delay;
                        bestslave = slave;
@@@ -801,7 -801,7 +801,7 @@@ void bond_change_active_slave(struct bo
                new_active->last_link_up = jiffies;
  
                if (new_active->link == BOND_LINK_BACK) {
 -                      if (USES_PRIMARY(bond->params.mode)) {
 +                      if (bond_uses_primary(bond)) {
                                pr_info("%s: making interface %s the new active one %d ms earlier\n",
                                        bond->dev->name, new_active->dev->name,
                                        (bond->params.updelay - new_active->delay) * bond->params.miimon);
                        new_active->delay = 0;
                        new_active->link = BOND_LINK_UP;
  
 -                      if (bond->params.mode == BOND_MODE_8023AD)
 +                      if (BOND_MODE(bond) == BOND_MODE_8023AD)
                                bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
  
                        if (bond_is_lb(bond))
                                bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
                } else {
 -                      if (USES_PRIMARY(bond->params.mode)) {
 +                      if (bond_uses_primary(bond)) {
                                pr_info("%s: making interface %s the new active one\n",
                                        bond->dev->name, new_active->dev->name);
                        }
                }
        }
  
 -      if (USES_PRIMARY(bond->params.mode))
 +      if (bond_uses_primary(bond))
                bond_hw_addr_swap(bond, new_active, old_active);
  
        if (bond_is_lb(bond)) {
                rcu_assign_pointer(bond->curr_active_slave, new_active);
        }
  
 -      if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
 +      if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
                if (old_active)
                        bond_set_slave_inactive_flags(old_active,
                                                      BOND_SLAVE_NOTIFY_NOW);
         * resend only if bond is brought up with the affected
         * bonding modes and the retransmission is enabled */
        if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) &&
 -          ((USES_PRIMARY(bond->params.mode) && new_active) ||
 -           bond->params.mode == BOND_MODE_ROUNDROBIN)) {
 +          ((bond_uses_primary(bond) && new_active) ||
 +           BOND_MODE(bond) == BOND_MODE_ROUNDROBIN)) {
                bond->igmp_retrans = bond->params.resend_igmp;
                queue_delayed_work(bond->wq, &bond->mcast_work, 1);
        }
@@@ -958,7 -958,7 +958,7 @@@ static void bond_netpoll_cleanup(struc
        struct slave *slave;
  
        bond_for_each_slave(bond, slave, iter)
 -              if (IS_UP(slave->dev))
 +              if (bond_slave_is_up(slave))
                        slave_disable_netpoll(slave);
  }
  
@@@ -1038,7 -1038,6 +1038,7 @@@ static void bond_compute_features(struc
  
        if (!bond_has_slaves(bond))
                goto done;
 +      vlan_features &= NETIF_F_ALL_FOR_ALL;
  
        bond_for_each_slave(bond, slave, iter) {
                vlan_features = netdev_increment_features(vlan_features,
@@@ -1085,7 -1084,7 +1085,7 @@@ static bool bond_should_deliver_exact_m
                                            struct bonding *bond)
  {
        if (bond_is_slave_inactive(slave)) {
 -              if (bond->params.mode == BOND_MODE_ALB &&
 +              if (BOND_MODE(bond) == BOND_MODE_ALB &&
                    skb->pkt_type != PACKET_BROADCAST &&
                    skb->pkt_type != PACKET_MULTICAST)
                        return false;
@@@ -1127,7 -1126,7 +1127,7 @@@ static rx_handler_result_t bond_handle_
  
        skb->dev = bond->dev;
  
 -      if (bond->params.mode == BOND_MODE_ALB &&
 +      if (BOND_MODE(bond) == BOND_MODE_ALB &&
            bond->dev->priv_flags & IFF_BRIDGE_PORT &&
            skb->pkt_type == PACKET_HOST) {
  
@@@ -1164,35 -1163,6 +1164,35 @@@ static void bond_upper_dev_unlink(struc
        rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE, GFP_KERNEL);
  }
  
 +static struct slave *bond_alloc_slave(struct bonding *bond)
 +{
 +      struct slave *slave = NULL;
 +
 +      slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
 +      if (!slave)
 +              return NULL;
 +
 +      if (BOND_MODE(bond) == BOND_MODE_8023AD) {
 +              SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info),
 +                                             GFP_KERNEL);
 +              if (!SLAVE_AD_INFO(slave)) {
 +                      kfree(slave);
 +                      return NULL;
 +              }
 +      }
 +      return slave;
 +}
 +
 +static void bond_free_slave(struct slave *slave)
 +{
 +      struct bonding *bond = bond_get_bond_by_slave(slave);
 +
 +      if (BOND_MODE(bond) == BOND_MODE_8023AD)
 +              kfree(SLAVE_AD_INFO(slave));
 +
 +      kfree(slave);
 +}
 +
  /* enslave device <slave> to bond device <master> */
  int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
  {
                if (!bond_has_slaves(bond)) {
                        pr_warn("%s: Warning: The first slave device specified does not support setting the MAC address\n",
                                bond_dev->name);
 -                      if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
 +                      if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
                                bond->params.fail_over_mac = BOND_FOM_ACTIVE;
                                pr_warn("%s: Setting fail_over_mac to active for active-backup mode\n",
                                        bond_dev->name);
            bond->dev->addr_assign_type == NET_ADDR_RANDOM)
                bond_set_dev_addr(bond->dev, slave_dev);
  
 -      new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
 +      new_slave = bond_alloc_slave(bond);
        if (!new_slave) {
                res = -ENOMEM;
                goto err_undo_flags;
        }
 +
 +      new_slave->bond = bond;
 +      new_slave->dev = slave_dev;
        /*
         * Set the new_slave's queue_id to be zero.  Queue ID mapping
         * is set via sysfs or module option if desired.
        ether_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr);
  
        if (!bond->params.fail_over_mac ||
 -          bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
 +          BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
                /*
                 * Set slave to master's mac address.  The application already
                 * set the master's mac address to that of the first slave
                goto err_restore_mac;
        }
  
 -      new_slave->bond = bond;
 -      new_slave->dev = slave_dev;
        slave_dev->priv_flags |= IFF_BONDING;
  
        if (bond_is_lb(bond)) {
                        goto err_close;
        }
  
 -      /* If the mode USES_PRIMARY, then the following is handled by
 +      /* If the mode uses primary, then the following is handled by
         * bond_change_active_slave().
         */
 -      if (!USES_PRIMARY(bond->params.mode)) {
 +      if (!bond_uses_primary(bond)) {
                /* set promiscuity level to new slave */
                if (bond_dev->flags & IFF_PROMISC) {
                        res = dev_set_promiscuity(slave_dev, 1);
                netif_addr_unlock_bh(bond_dev);
        }
  
 -      if (bond->params.mode == BOND_MODE_8023AD) {
 +      if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                /* add lacpdu mc addr to mc list */
                u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
  
                 new_slave->link == BOND_LINK_DOWN ? "DOWN" :
                 (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
  
 -      if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
 +      if (bond_uses_primary(bond) && bond->params.primary[0]) {
                /* if there is a primary slave, remember it */
                if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
                        bond->primary_slave = new_slave;
                }
        }
  
 -      switch (bond->params.mode) {
 +      switch (BOND_MODE(bond)) {
        case BOND_MODE_ACTIVEBACKUP:
                bond_set_slave_inactive_flags(new_slave,
                                              BOND_SLAVE_NOTIFY_NOW);
                bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
                /* if this is the first slave */
                if (!prev_slave) {
 -                      SLAVE_AD_INFO(new_slave).id = 1;
 +                      SLAVE_AD_INFO(new_slave)->id = 1;
                        /* Initialize AD with the number of times that the AD timer is called in 1 second
                         * can be called only after the mac address of the bond is set
                         */
                        bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
                } else {
 -                      SLAVE_AD_INFO(new_slave).id =
 -                              SLAVE_AD_INFO(prev_slave).id + 1;
 +                      SLAVE_AD_INFO(new_slave)->id =
 +                              SLAVE_AD_INFO(prev_slave)->id + 1;
                }
  
                bond_3ad_bind_slave(new_slave);
        bond_compute_features(bond);
        bond_set_carrier(bond);
  
 -      if (USES_PRIMARY(bond->params.mode)) {
 +      if (bond_uses_primary(bond)) {
                block_netpoll_tx();
                write_lock_bh(&bond->curr_slave_lock);
                bond_select_active_slave(bond);
@@@ -1594,7 -1563,7 +1594,7 @@@ err_unregister
        netdev_rx_handler_unregister(slave_dev);
  
  err_detach:
 -      if (!USES_PRIMARY(bond->params.mode))
 +      if (!bond_uses_primary(bond))
                bond_hw_addr_flush(bond_dev, slave_dev);
  
        vlan_vids_del_by_dev(slave_dev, bond_dev);
@@@ -1616,7 -1585,7 +1616,7 @@@ err_close
  
  err_restore_mac:
        if (!bond->params.fail_over_mac ||
 -          bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
 +          BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
                /* XXX TODO - fom follow mode needs to change master's
                 * MAC if this slave's MAC is in use by the bond, or at
                 * least print a warning.
@@@ -1630,7 -1599,7 +1630,7 @@@ err_restore_mtu
        dev_set_mtu(slave_dev, new_slave->original_mtu);
  
  err_free:
 -      kfree(new_slave);
 +      bond_free_slave(new_slave);
  
  err_undo_flags:
        /* Enslave of first slave has failed and we need to fix master's mac */
@@@ -1692,7 -1661,7 +1692,7 @@@ static int __bond_release_one(struct ne
        write_lock_bh(&bond->lock);
  
        /* Inform AD package of unbinding of slave. */
 -      if (bond->params.mode == BOND_MODE_8023AD)
 +      if (BOND_MODE(bond) == BOND_MODE_8023AD)
                bond_3ad_unbind_slave(slave);
  
        write_unlock_bh(&bond->lock);
        bond->current_arp_slave = NULL;
  
        if (!all && (!bond->params.fail_over_mac ||
 -                   bond->params.mode != BOND_MODE_ACTIVEBACKUP)) {
 +                   BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
                if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
                    bond_has_slaves(bond))
                        pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s - set the HWaddr of %s to a different address to avoid conflicts\n",
        /* must do this from outside any spinlocks */
        vlan_vids_del_by_dev(slave_dev, bond_dev);
  
 -      /* If the mode USES_PRIMARY, then this cases was handled above by
 +      /* If the mode uses primary, then this cases was handled above by
         * bond_change_active_slave(..., NULL)
         */
 -      if (!USES_PRIMARY(bond->params.mode)) {
 +      if (!bond_uses_primary(bond)) {
                /* unset promiscuity level from slave
                 * NOTE: The NETDEV_CHANGEADDR call above may change the value
                 * of the IFF_PROMISC flag in the bond_dev, but we need the
        dev_close(slave_dev);
  
        if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
 -          bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
 +          BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
                /* restore original ("permanent") mac address */
                ether_addr_copy(addr.sa_data, slave->perm_hwaddr);
                addr.sa_family = slave_dev->type;
  
        slave_dev->priv_flags &= ~IFF_BONDING;
  
 -      kfree(slave);
 +      bond_free_slave(slave);
  
        return 0;  /* deletion OK */
  }
@@@ -1852,7 -1821,7 +1852,7 @@@ static int bond_info_query(struct net_d
  {
        struct bonding *bond = netdev_priv(bond_dev);
  
 -      info->bond_mode = bond->params.mode;
 +      info->bond_mode = BOND_MODE(bond);
        info->miimon = bond->params.miimon;
  
        info->num_slaves = bond->slave_cnt;
@@@ -1908,7 -1877,7 +1908,7 @@@ static int bond_miimon_inspect(struct b
                        if (slave->delay) {
                                pr_info("%s: link status down for %sinterface %s, disabling it in %d ms\n",
                                        bond->dev->name,
 -                                      (bond->params.mode ==
 +                                      (BOND_MODE(bond) ==
                                         BOND_MODE_ACTIVEBACKUP) ?
                                        (bond_is_active_slave(slave) ?
                                         "active " : "backup ") : "",
@@@ -1999,10 -1968,10 +1999,10 @@@ static void bond_miimon_commit(struct b
                        slave->link = BOND_LINK_UP;
                        slave->last_link_up = jiffies;
  
 -                      if (bond->params.mode == BOND_MODE_8023AD) {
 +                      if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                                /* prevent it from being the active one */
                                bond_set_backup_slave(slave);
 -                      } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
 +                      } else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
                                /* make it immediately active */
                                bond_set_active_slave(slave);
                        } else if (slave != bond->primary_slave) {
                                slave->duplex ? "full" : "half");
  
                        /* notify ad that the link status has changed */
 -                      if (bond->params.mode == BOND_MODE_8023AD)
 +                      if (BOND_MODE(bond) == BOND_MODE_8023AD)
                                bond_3ad_handle_link_change(slave, BOND_LINK_UP);
  
                        if (bond_is_lb(bond))
  
                        slave->link = BOND_LINK_DOWN;
  
 -                      if (bond->params.mode == BOND_MODE_ACTIVEBACKUP ||
 -                          bond->params.mode == BOND_MODE_8023AD)
 +                      if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
 +                          BOND_MODE(bond) == BOND_MODE_8023AD)
                                bond_set_slave_inactive_flags(slave,
                                                              BOND_SLAVE_NOTIFY_NOW);
  
                        pr_info("%s: link status definitely down for interface %s, disabling it\n",
                                bond->dev->name, slave->dev->name);
  
 -                      if (bond->params.mode == BOND_MODE_8023AD)
 +                      if (BOND_MODE(bond) == BOND_MODE_8023AD)
                                bond_3ad_handle_link_change(slave,
                                                            BOND_LINK_DOWN);
  
@@@ -2157,10 -2126,10 +2157,10 @@@ static bool bond_has_this_ip(struct bon
   */
  static void bond_arp_send(struct net_device *slave_dev, int arp_op,
                          __be32 dest_ip, __be32 src_ip,
-                         struct bond_vlan_tag *inner,
-                         struct bond_vlan_tag *outer)
+                         struct bond_vlan_tag *tags)
  {
        struct sk_buff *skb;
+       int i;
  
        pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n",
                 arp_op, slave_dev->name, &dest_ip, &src_ip);
                net_err_ratelimited("ARP packet allocation failed\n");
                return;
        }
-       if (outer->vlan_id) {
-               if (inner->vlan_id) {
-                       pr_debug("inner tag: proto %X vid %X\n",
-                                ntohs(inner->vlan_proto), inner->vlan_id);
-                       skb = __vlan_put_tag(skb, inner->vlan_proto,
-                                            inner->vlan_id);
-                       if (!skb) {
-                               net_err_ratelimited("failed to insert inner VLAN tag\n");
-                               return;
-                       }
-               }
  
-               pr_debug("outer reg: proto %X vid %X\n",
-                        ntohs(outer->vlan_proto), outer->vlan_id);
-               skb = vlan_put_tag(skb, outer->vlan_proto, outer->vlan_id);
+       /* Go through all the tags backwards and add them to the packet */
+       for (i = BOND_MAX_VLAN_ENCAP - 1; i > 0; i--) {
+               if (!tags[i].vlan_id)
+                       continue;
+               pr_debug("inner tag: proto %X vid %X\n",
+                        ntohs(tags[i].vlan_proto), tags[i].vlan_id);
+               skb = __vlan_put_tag(skb, tags[i].vlan_proto,
+                                    tags[i].vlan_id);
+               if (!skb) {
+                       net_err_ratelimited("failed to insert inner VLAN tag\n");
+                       return;
+               }
+       }
+       /* Set the outer tag */
+       if (tags[0].vlan_id) {
+               pr_debug("outer tag: proto %X vid %X\n",
+                        ntohs(tags[0].vlan_proto), tags[0].vlan_id);
+               skb = vlan_put_tag(skb, tags[0].vlan_proto, tags[0].vlan_id);
                if (!skb) {
                        net_err_ratelimited("failed to insert outer VLAN tag\n");
                        return;
        arp_xmit(skb);
  }
  
+ /* Validate the device path between the @start_dev and the @end_dev.
+  * The path is valid if the @end_dev is reachable through device
+  * stacking.
+  * When the path is validated, collect any vlan information in the
+  * path.
+  */
+ static bool bond_verify_device_path(struct net_device *start_dev,
+                                   struct net_device *end_dev,
+                                   struct bond_vlan_tag *tags)
+ {
+       struct net_device *upper;
+       struct list_head  *iter;
+       int  idx;
+       if (start_dev == end_dev)
+               return true;
+       netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
+               if (bond_verify_device_path(upper, end_dev, tags)) {
+                       if (is_vlan_dev(upper)) {
+                               idx = vlan_get_encap_level(upper);
+                               if (idx >= BOND_MAX_VLAN_ENCAP)
+                                       return false;
+                               tags[idx].vlan_proto =
+                                                   vlan_dev_vlan_proto(upper);
+                               tags[idx].vlan_id = vlan_dev_vlan_id(upper);
+                       }
+                       return true;
+               }
+       }
+       return false;
+ }
  
  static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
  {
-       struct net_device *upper, *vlan_upper;
-       struct list_head *iter, *vlan_iter;
        struct rtable *rt;
-       struct bond_vlan_tag inner, outer;
+       struct bond_vlan_tag tags[BOND_MAX_VLAN_ENCAP];
        __be32 *targets = bond->params.arp_targets, addr;
        int i;
+       bool ret;
  
        for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
                pr_debug("basa: target %pI4\n", &targets[i]);
-               inner.vlan_proto = 0;
-               inner.vlan_id = 0;
-               outer.vlan_proto = 0;
-               outer.vlan_id = 0;
+               memset(tags, 0, sizeof(tags));
  
                /* Find out through which dev should the packet go */
                rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
                                net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
                                                     bond->dev->name,
                                                     &targets[i]);
-                       bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 0, &inner, &outer);
+                       bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
+                                     0, tags);
                        continue;
                }
  
                        goto found;
  
                rcu_read_lock();
-               /* first we search only for vlan devices. for every vlan
-                * found we verify its upper dev list, searching for the
-                * rt->dst.dev. If found we save the tag of the vlan and
-                * proceed to send the packet.
-                */
-               netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper,
-                                                 vlan_iter) {
-                       if (!is_vlan_dev(vlan_upper))
-                               continue;
-                       if (vlan_upper == rt->dst.dev) {
-                               outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
-                               outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
-                               rcu_read_unlock();
-                               goto found;
-                       }
-                       netdev_for_each_all_upper_dev_rcu(vlan_upper, upper,
-                                                         iter) {
-                               if (upper == rt->dst.dev) {
-                                       /* If the upper dev is a vlan dev too,
-                                        *  set the vlan tag to inner tag.
-                                        */
-                                       if (is_vlan_dev(upper)) {
-                                               inner.vlan_proto = vlan_dev_vlan_proto(upper);
-                                               inner.vlan_id = vlan_dev_vlan_id(upper);
-                                       }
-                                       outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
-                                       outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
-                                       rcu_read_unlock();
-                                       goto found;
-                               }
-                       }
-               }
-               /* if the device we're looking for is not on top of any of
-                * our upper vlans, then just search for any dev that
-                * matches, and in case it's a vlan - save the id
-                */
-               netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
-                       if (upper == rt->dst.dev) {
-                               rcu_read_unlock();
-                               goto found;
-                       }
-               }
+               ret = bond_verify_device_path(bond->dev, rt->dst.dev, tags);
                rcu_read_unlock();
  
+               if (ret)
+                       goto found;
                /* Not our device - skip */
                pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
                         bond->dev->name, &targets[i],
@@@ -2290,7 -2255,7 +2286,7 @@@ found
                addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
                ip_rt_put(rt);
                bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
-                             addr, &inner, &outer);
+                             addr, tags);
        }
  }
  
@@@ -2322,8 -2287,8 +2318,8 @@@ int bond_arp_rcv(const struct sk_buff *
        int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
  
        if (!slave_do_arp_validate(bond, slave)) {
 -              if ((slave_do_arp_validate_only(bond, slave) && is_arp) ||
 -                  !slave_do_arp_validate_only(bond, slave))
 +              if ((slave_do_arp_validate_only(bond) && is_arp) ||
 +                  !slave_do_arp_validate_only(bond))
                        slave->last_rx = jiffies;
                return RX_HANDLER_ANOTHER;
        } else if (!is_arp) {
@@@ -2491,7 -2456,7 +2487,7 @@@ static void bond_loadbalance_arp_mon(st
                 * do - all replies will be rx'ed on same link causing slaves
                 * to be unstable during low/no traffic periods
                 */
 -              if (IS_UP(slave->dev))
 +              if (bond_slave_is_up(slave))
                        bond_arp_send_all(bond, slave);
        }
  
@@@ -2713,10 -2678,10 +2709,10 @@@ static bool bond_ab_arp_probe(struct bo
        bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER);
  
        bond_for_each_slave_rcu(bond, slave, iter) {
 -              if (!found && !before && IS_UP(slave->dev))
 +              if (!found && !before && bond_slave_is_up(slave))
                        before = slave;
  
 -              if (found && !new_slave && IS_UP(slave->dev))
 +              if (found && !new_slave && bond_slave_is_up(slave))
                        new_slave = slave;
                /* if the link state is up at this point, we
                 * mark it down - this can happen if we have
                 * one the current slave so it is still marked
                 * up when it is actually down
                 */
 -              if (!IS_UP(slave->dev) && slave->link == BOND_LINK_UP) {
 +              if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
                        slave->link = BOND_LINK_DOWN;
                        if (slave->link_failure_count < UINT_MAX)
                                slave->link_failure_count++;
@@@ -2888,7 -2853,7 +2884,7 @@@ static int bond_slave_netdev_event(unsi
  
                bond_update_speed_duplex(slave);
  
 -              if (bond->params.mode == BOND_MODE_8023AD) {
 +              if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                        if (old_speed != slave->speed)
                                bond_3ad_adapter_speed_changed(slave);
                        if (old_duplex != slave->duplex)
                break;
        case NETDEV_CHANGENAME:
                /* we don't care if we don't have primary set */
 -              if (!USES_PRIMARY(bond->params.mode) ||
 +              if (!bond_uses_primary(bond) ||
                    !bond->params.primary[0])
                        break;
  
@@@ -3046,18 -3011,20 +3042,18 @@@ static bool bond_flow_dissect(struct bo
   * bond_xmit_hash - generate a hash value based on the xmit policy
   * @bond: bonding device
   * @skb: buffer to use for headers
 - * @count: modulo value
   *
   * This function will extract the necessary headers from the skb buffer and use
   * them to generate a hash based on the xmit_policy set in the bonding device
 - * which will be reduced modulo count before returning.
   */
 -int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count)
 +u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
  {
        struct flow_keys flow;
        u32 hash;
  
        if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
            !bond_flow_dissect(bond, skb, &flow))
 -              return bond_eth_hash(skb) % count;
 +              return bond_eth_hash(skb);
  
        if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
            bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23)
        hash ^= (hash >> 16);
        hash ^= (hash >> 8);
  
 -      return hash % count;
 +      return hash;
  }
  
  /*-------------------------- Device entry points ----------------------------*/
@@@ -3079,7 -3046,7 +3075,7 @@@ static void bond_work_init_all(struct b
                          bond_resend_igmp_join_requests_delayed);
        INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
        INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
 -      if (bond->params.mode == BOND_MODE_ACTIVEBACKUP)
 +      if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
                INIT_DELAYED_WORK(&bond->arp_work, bond_activebackup_arp_mon);
        else
                INIT_DELAYED_WORK(&bond->arp_work, bond_loadbalance_arp_mon);
@@@ -3106,7 -3073,7 +3102,7 @@@ static int bond_open(struct net_device 
        if (bond_has_slaves(bond)) {
                read_lock(&bond->curr_slave_lock);
                bond_for_each_slave(bond, slave, iter) {
 -                      if (USES_PRIMARY(bond->params.mode)
 +                      if (bond_uses_primary(bond)
                                && (slave != bond->curr_active_slave)) {
                                bond_set_slave_inactive_flags(slave,
                                                              BOND_SLAVE_NOTIFY_NOW);
                /* bond_alb_initialize must be called before the timer
                 * is started.
                 */
 -              if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB)))
 +              if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB)))
                        return -ENOMEM;
 -              queue_delayed_work(bond->wq, &bond->alb_work, 0);
 +              if (bond->params.tlb_dynamic_lb)
 +                      queue_delayed_work(bond->wq, &bond->alb_work, 0);
        }
  
        if (bond->params.miimon)  /* link check interval, in milliseconds. */
                bond->recv_probe = bond_arp_rcv;
        }
  
 -      if (bond->params.mode == BOND_MODE_8023AD) {
 +      if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                queue_delayed_work(bond->wq, &bond->ad_work, 0);
                /* register to receive LACPDUs */
                bond->recv_probe = bond_3ad_lacpdu_recv;
@@@ -3344,7 -3310,7 +3340,7 @@@ static void bond_set_rx_mode(struct net
  
  
        rcu_read_lock();
 -      if (USES_PRIMARY(bond->params.mode)) {
 +      if (bond_uses_primary(bond)) {
                slave = rcu_dereference(bond->curr_active_slave);
                if (slave) {
                        dev_uc_sync(slave->dev, bond_dev);
@@@ -3498,7 -3464,7 +3494,7 @@@ static int bond_set_mac_address(struct 
        struct list_head *iter;
        int res = 0;
  
 -      if (bond->params.mode == BOND_MODE_ALB)
 +      if (BOND_MODE(bond) == BOND_MODE_ALB)
                return bond_alb_set_mac_address(bond_dev, addr);
  
  
         * Returning an error causes ifenslave to fail.
         */
        if (bond->params.fail_over_mac &&
 -          bond->params.mode == BOND_MODE_ACTIVEBACKUP)
 +          BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
                return 0;
  
        if (!is_valid_ether_addr(sa->sa_data))
@@@ -3589,7 -3555,7 +3585,7 @@@ static void bond_xmit_slave_id(struct b
        /* Here we start from the slave with slave_id */
        bond_for_each_slave_rcu(bond, slave, iter) {
                if (--i < 0) {
 -                      if (slave_can_tx(slave)) {
 +                      if (bond_slave_can_tx(slave)) {
                                bond_dev_queue_xmit(bond, skb, slave->dev);
                                return;
                        }
        bond_for_each_slave_rcu(bond, slave, iter) {
                if (--i < 0)
                        break;
 -              if (slave_can_tx(slave)) {
 +              if (bond_slave_can_tx(slave)) {
                        bond_dev_queue_xmit(bond, skb, slave->dev);
                        return;
                }
@@@ -3658,7 -3624,7 +3654,7 @@@ static int bond_xmit_roundrobin(struct 
         */
        if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) {
                slave = rcu_dereference(bond->curr_active_slave);
 -              if (slave && slave_can_tx(slave))
 +              if (slave && bond_slave_can_tx(slave))
                        bond_dev_queue_xmit(bond, skb, slave->dev);
                else
                        bond_xmit_slave_id(bond, skb, 0);
@@@ -3696,7 -3662,7 +3692,7 @@@ static int bond_xmit_xor(struct sk_buf
  {
        struct bonding *bond = netdev_priv(bond_dev);
  
 -      bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb, bond->slave_cnt));
 +      bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb) % bond->slave_cnt);
  
        return NETDEV_TX_OK;
  }
@@@ -3711,7 -3677,7 +3707,7 @@@ static int bond_xmit_broadcast(struct s
        bond_for_each_slave_rcu(bond, slave, iter) {
                if (bond_is_last_slave(bond, slave))
                        break;
 -              if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP) {
 +              if (bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
                        struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
  
                        if (!skb2) {
                        bond_dev_queue_xmit(bond, skb2, slave->dev);
                }
        }
 -      if (slave && IS_UP(slave->dev) && slave->link == BOND_LINK_UP)
 +      if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP)
                bond_dev_queue_xmit(bond, skb, slave->dev);
        else
                dev_kfree_skb_any(skb);
@@@ -3748,7 -3714,7 +3744,7 @@@ static inline int bond_slave_override(s
        /* Find out if any slaves have the same mapping as this skb. */
        bond_for_each_slave_rcu(bond, slave, iter) {
                if (slave->queue_id == skb->queue_mapping) {
 -                      if (slave_can_tx(slave)) {
 +                      if (bond_slave_can_tx(slave)) {
                                bond_dev_queue_xmit(bond, skb, slave->dev);
                                return 0;
                        }
@@@ -3789,11 -3755,12 +3785,11 @@@ static netdev_tx_t __bond_start_xmit(st
  {
        struct bonding *bond = netdev_priv(dev);
  
 -      if (TX_QUEUE_OVERRIDE(bond->params.mode)) {
 -              if (!bond_slave_override(bond, skb))
 -                      return NETDEV_TX_OK;
 -      }
 +      if (bond_should_override_tx_queue(bond) &&
 +          !bond_slave_override(bond, skb))
 +              return NETDEV_TX_OK;
  
 -      switch (bond->params.mode) {
 +      switch (BOND_MODE(bond)) {
        case BOND_MODE_ROUNDROBIN:
                return bond_xmit_roundrobin(skb, dev);
        case BOND_MODE_ACTIVEBACKUP:
        case BOND_MODE_8023AD:
                return bond_3ad_xmit_xor(skb, dev);
        case BOND_MODE_ALB:
 -      case BOND_MODE_TLB:
                return bond_alb_xmit(skb, dev);
 +      case BOND_MODE_TLB:
 +              return bond_tlb_xmit(skb, dev);
        default:
                /* Should never happen, mode already checked */
                pr_err("%s: Error: Unknown bonding mode %d\n",
 -                     dev->name, bond->params.mode);
 +                     dev->name, BOND_MODE(bond));
                WARN_ON_ONCE(1);
                dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
@@@ -3851,14 -3817,14 +3847,14 @@@ static int bond_ethtool_get_settings(st
        ecmd->duplex = DUPLEX_UNKNOWN;
        ecmd->port = PORT_OTHER;
  
 -      /* Since SLAVE_IS_OK returns false for all inactive or down slaves, we
 +      /* Since bond_slave_can_tx returns false for all inactive or down slaves, we
         * do not need to check mode.  Though link speed might not represent
         * the true receive or transmit bandwidth (not all modes are symmetric)
         * this is an accurate maximum.
         */
        read_lock(&bond->lock);
        bond_for_each_slave(bond, slave, iter) {
 -              if (SLAVE_IS_OK(slave)) {
 +              if (bond_slave_can_tx(slave)) {
                        if (slave->speed != SPEED_UNKNOWN)
                                speed += slave->speed;
                        if (ecmd->duplex == DUPLEX_UNKNOWN &&
@@@ -4028,8 -3994,7 +4024,8 @@@ static int bond_check_params(struct bon
  
        if (xmit_hash_policy) {
                if ((bond_mode != BOND_MODE_XOR) &&
 -                  (bond_mode != BOND_MODE_8023AD)) {
 +                  (bond_mode != BOND_MODE_8023AD) &&
 +                  (bond_mode != BOND_MODE_TLB)) {
                        pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
                                bond_mode_name(bond_mode));
                } else {
        }
  
        /* reset values for 802.3ad/TLB/ALB */
 -      if (BOND_NO_USES_ARP(bond_mode)) {
 +      if (!bond_mode_uses_arp(bond_mode)) {
                if (!miimon) {
                        pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
                        pr_warn("Forcing miimon to 100msec\n");
                   catch mistakes */
                __be32 ip;
                if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
 -                  IS_IP_TARGET_UNUSABLE_ADDRESS(ip)) {
 +                  !bond_is_ip_target_ok(ip)) {
                        pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
                                arp_ip_target[i]);
                        arp_interval = 0;
                pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n");
        }
  
 -      if (primary && !USES_PRIMARY(bond_mode)) {
 +      if (primary && !bond_mode_uses_primary(bond_mode)) {
                /* currently, using a primary only makes sense
                 * in active backup, TLB or ALB modes
                 */
        params->min_links = min_links;
        params->lp_interval = lp_interval;
        params->packets_per_slave = packets_per_slave;
 +      params->tlb_dynamic_lb = 1; /* Default value */
        if (packets_per_slave > 0) {
                params->reciprocal_packets_per_slave =
                        reciprocal_value(packets_per_slave);
@@@ -70,8 -70,6 +70,8 @@@ static int bond_option_mode_set(struct 
                                const struct bond_opt_value *newval);
  static int bond_option_slaves_set(struct bonding *bond,
                                  const struct bond_opt_value *newval);
 +static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
 +                                const struct bond_opt_value *newval);
  
  
  static const struct bond_opt_value bond_mode_tbl[] = {
@@@ -127,6 -125,7 +127,7 @@@ static const struct bond_opt_value bond
  static const struct bond_opt_value bond_intmax_tbl[] = {
        { "off",     0,       BOND_VALFLAG_DEFAULT},
        { "maxval",  INT_MAX, BOND_VALFLAG_MAX},
+       { NULL,      -1,      0}
  };
  
  static const struct bond_opt_value bond_lacp_rate_tbl[] = {
@@@ -181,12 -180,6 +182,12 @@@ static const struct bond_opt_value bond
        { NULL,      -1,      0},
  };
  
 +static const struct bond_opt_value bond_tlb_dynamic_lb_tbl[] = {
 +      { "off", 0,  0},
 +      { "on",  1,  BOND_VALFLAG_DEFAULT},
 +      { NULL,  -1, 0}
 +};
 +
  static const struct bond_option bond_opts[] = {
        [BOND_OPT_MODE] = {
                .id = BOND_OPT_MODE,
        [BOND_OPT_XMIT_HASH] = {
                .id = BOND_OPT_XMIT_HASH,
                .name = "xmit_hash_policy",
 -              .desc = "balance-xor and 802.3ad hashing method",
 +              .desc = "balance-xor, 802.3ad, and tlb hashing method",
                .values = bond_xmit_hashtype_tbl,
                .set = bond_option_xmit_hash_policy_set
        },
                .flags = BOND_OPTFLAG_RAWVAL,
                .set = bond_option_slaves_set
        },
 +      [BOND_OPT_TLB_DYNAMIC_LB] = {
 +              .id = BOND_OPT_TLB_DYNAMIC_LB,
 +              .name = "tlb_dynamic_lb",
 +              .desc = "Enable dynamic flow shuffling",
 +              .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_TLB)),
 +              .values = bond_tlb_dynamic_lb_tbl,
 +              .flags = BOND_OPTFLAG_IFDOWN,
 +              .set = bond_option_tlb_dynamic_lb_set,
 +      },
        { }
  };
  
 +/* Searches for an option by name */
 +const struct bond_option *bond_opt_get_by_name(const char *name)
 +{
 +      const struct bond_option *opt;
 +      int option;
 +
 +      for (option = 0; option < BOND_OPT_LAST; option++) {
 +              opt = bond_opt_get(option);
 +              if (opt && !strcmp(opt->name, name))
 +                      return opt;
 +      }
 +
 +      return NULL;
 +}
 +
  /* Searches for a value in opt's values[] table */
  const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val)
  {
@@@ -672,7 -641,7 +673,7 @@@ const struct bond_option *bond_opt_get(
  
  int bond_option_mode_set(struct bonding *bond, const struct bond_opt_value *newval)
  {
 -      if (BOND_NO_USES_ARP(newval->value) && bond->params.arp_interval) {
 +      if (!bond_mode_uses_arp(newval->value) && bond->params.arp_interval) {
                pr_info("%s: %s mode is incompatible with arp monitoring, start mii monitoring\n",
                        bond->dev->name, newval->string);
                /* disable arp monitoring */
  static struct net_device *__bond_option_active_slave_get(struct bonding *bond,
                                                         struct slave *slave)
  {
 -      return USES_PRIMARY(bond->params.mode) && slave ? slave->dev : NULL;
 +      return bond_uses_primary(bond) && slave ? slave->dev : NULL;
  }
  
  struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond)
@@@ -758,7 -727,7 +759,7 @@@ static int bond_option_active_slave_set
                                bond->dev->name, new_active->dev->name);
                } else {
                        if (old_active && (new_active->link == BOND_LINK_UP) &&
 -                          IS_UP(new_active->dev)) {
 +                          bond_slave_is_up(new_active)) {
                                pr_info("%s: Setting %s as active slave\n",
                                        bond->dev->name, new_active->dev->name);
                                bond_change_active_slave(bond, new_active);
        return ret;
  }
  
 +/* There are two tricky bits here.  First, if MII monitoring is activated, then
 + * we must disable ARP monitoring.  Second, if the timer isn't running, we must
 + * start it.
 + */
  static int bond_option_miimon_set(struct bonding *bond,
                                  const struct bond_opt_value *newval)
  {
        return 0;
  }
  
 +/* Set up and down delays. These must be multiples of the
 + * MII monitoring value, and are stored internally as the multiplier.
 + * Thus, we must translate to MS for the real world.
 + */
  static int bond_option_updelay_set(struct bonding *bond,
                                   const struct bond_opt_value *newval)
  {
@@@ -881,10 -842,6 +882,10 @@@ static int bond_option_use_carrier_set(
        return 0;
  }
  
 +/* There are two tricky bits here.  First, if ARP monitoring is activated, then
 + * we must disable MII monitoring.  Second, if the ARP timer isn't running,
 + * we must start it.
 + */
  static int bond_option_arp_interval_set(struct bonding *bond,
                                        const struct bond_opt_value *newval)
  {
@@@ -942,7 -899,7 +943,7 @@@ static int _bond_option_arp_ip_target_a
        __be32 *targets = bond->params.arp_targets;
        int ind;
  
 -      if (IS_IP_TARGET_UNUSABLE_ADDRESS(target)) {
 +      if (!bond_is_ip_target_ok(target)) {
                pr_err("%s: invalid ARP target %pI4 specified for addition\n",
                       bond->dev->name, &target);
                return -EINVAL;
@@@ -987,7 -944,7 +988,7 @@@ static int bond_option_arp_ip_target_re
        unsigned long *targets_rx;
        int ind, i;
  
 -      if (IS_IP_TARGET_UNUSABLE_ADDRESS(target)) {
 +      if (!bond_is_ip_target_ok(target)) {
                pr_err("%s: invalid ARP target %pI4 specified for removal\n",
                       bond->dev->name, &target);
                return -EINVAL;
@@@ -1381,13 -1338,3 +1382,13 @@@ err_no_cmd
        ret = -EPERM;
        goto out;
  }
 +
 +static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
 +                                        const struct bond_opt_value *newval)
 +{
 +      pr_info("%s: Setting dynamic-lb to %s (%llu)\n",
 +              bond->dev->name, newval->string, newval->value);
 +      bond->params.tlb_dynamic_lb = newval->value;
 +
 +      return 0;
 +}
  
  #define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
  
+ #define BOND_MAX_VLAN_ENCAP   2
  #define BOND_MAX_ARP_TARGETS  16
  
  #define BOND_DEFAULT_MIIMON   100
  
 -#define IS_UP(dev)                                       \
 -            ((((dev)->flags & IFF_UP) == IFF_UP)      && \
 -             netif_running(dev)                       && \
 -             netif_carrier_ok(dev))
 -
 -/*
 - * Checks whether slave is ready for transmit.
 - */
 -#define SLAVE_IS_OK(slave)                            \
 -                  (((slave)->dev->flags & IFF_UP)  && \
 -                   netif_running((slave)->dev)     && \
 -                   ((slave)->link == BOND_LINK_UP) && \
 -                   bond_is_active_slave(slave))
 -
 -
 -#define USES_PRIMARY(mode)                            \
 -              (((mode) == BOND_MODE_ACTIVEBACKUP) ||  \
 -               ((mode) == BOND_MODE_TLB)          ||  \
 -               ((mode) == BOND_MODE_ALB))
 -
 -#define BOND_NO_USES_ARP(mode)                                \
 -              (((mode) == BOND_MODE_8023AD)   ||      \
 -               ((mode) == BOND_MODE_TLB)      ||      \
 -               ((mode) == BOND_MODE_ALB))
 -
 -#define TX_QUEUE_OVERRIDE(mode)                               \
 -                      (((mode) == BOND_MODE_ACTIVEBACKUP) ||  \
 -                       ((mode) == BOND_MODE_ROUNDROBIN))
 -
 -#define BOND_MODE_IS_LB(mode)                 \
 -              (((mode) == BOND_MODE_TLB) ||   \
 -               ((mode) == BOND_MODE_ALB))
 -
 -#define IS_IP_TARGET_UNUSABLE_ADDRESS(a)      \
 -      ((htonl(INADDR_BROADCAST) == a) ||      \
 -       ipv4_is_zeronet(a))
  /*
   * Less bad way to call ioctl from within the kernel; this needs to be
   * done some other way to get the call out of interrupt context.
@@@ -53,8 -90,6 +54,8 @@@
        set_fs(fs);                     \
        res; })
  
 +#define BOND_MODE(bond) ((bond)->params.mode)
 +
  /* slave list primitives */
  #define bond_slave_list(bond) (&(bond)->dev->adj_list.lower)
  
@@@ -140,7 -175,6 +141,7 @@@ struct bond_params 
        int resend_igmp;
        int lp_interval;
        int packets_per_slave;
 +      int tlb_dynamic_lb;
        struct reciprocal_value reciprocal_packets_per_slave;
  };
  
@@@ -149,6 -183,8 +150,6 @@@ struct bond_parm_tbl 
        int mode;
  };
  
 -#define BOND_MAX_MODENAME_LEN 20
 -
  struct slave {
        struct net_device *dev; /* first - useful for panic debug */
        struct bonding *bond; /* our master */
        u32    speed;
        u16    queue_id;
        u8     perm_hwaddr[ETH_ALEN];
 -      struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */
 +      struct ad_slave_info *ad_info;
        struct tlb_slave_info tlb_info;
  #ifdef CONFIG_NET_POLL_CONTROLLER
        struct netpoll *np;
@@@ -249,41 -285,14 +250,41 @@@ static inline struct slave *bond_get_sl
  
  static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
  {
 -      if (!slave || !slave->bond)
 -              return NULL;
        return slave->bond;
  }
  
 +static inline bool bond_should_override_tx_queue(struct bonding *bond)
 +{
 +      return BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
 +             BOND_MODE(bond) == BOND_MODE_ROUNDROBIN;
 +}
 +
  static inline bool bond_is_lb(const struct bonding *bond)
  {
 -      return BOND_MODE_IS_LB(bond->params.mode);
 +      return BOND_MODE(bond) == BOND_MODE_TLB ||
 +             BOND_MODE(bond) == BOND_MODE_ALB;
 +}
 +
 +static inline bool bond_mode_uses_arp(int mode)
 +{
 +      return mode != BOND_MODE_8023AD && mode != BOND_MODE_TLB &&
 +             mode != BOND_MODE_ALB;
 +}
 +
 +static inline bool bond_mode_uses_primary(int mode)
 +{
 +      return mode == BOND_MODE_ACTIVEBACKUP || mode == BOND_MODE_TLB ||
 +             mode == BOND_MODE_ALB;
 +}
 +
 +static inline bool bond_uses_primary(struct bonding *bond)
 +{
 +      return bond_mode_uses_primary(BOND_MODE(bond));
 +}
 +
 +static inline bool bond_slave_is_up(struct slave *slave)
 +{
 +      return netif_running(slave->dev) && netif_carrier_ok(slave->dev);
  }
  
  static inline void bond_set_active_slave(struct slave *slave)
@@@ -356,12 -365,6 +357,12 @@@ static inline bool bond_is_active_slave
        return !bond_slave_state(slave);
  }
  
 +static inline bool bond_slave_can_tx(struct slave *slave)
 +{
 +      return bond_slave_is_up(slave) && slave->link == BOND_LINK_UP &&
 +             bond_is_active_slave(slave);
 +}
 +
  #define BOND_PRI_RESELECT_ALWAYS      0
  #define BOND_PRI_RESELECT_BETTER      1
  #define BOND_PRI_RESELECT_FAILURE     2
@@@ -393,16 -396,12 +394,16 @@@ static inline int slave_do_arp_validate
        return bond->params.arp_validate & (1 << bond_slave_state(slave));
  }
  
 -static inline int slave_do_arp_validate_only(struct bonding *bond,
 -                                           struct slave *slave)
 +static inline int slave_do_arp_validate_only(struct bonding *bond)
  {
        return bond->params.arp_validate & BOND_ARP_FILTER;
  }
  
 +static inline int bond_is_ip_target_ok(__be32 addr)
 +{
 +      return !ipv4_is_lbcast(addr) && !ipv4_is_zeronet(addr);
 +}
 +
  /* Get the oldest arp which we've received on this slave for bond's
   * arp_targets.
   */
@@@ -480,14 -479,16 +481,14 @@@ static inline __be32 bond_confirm_addr(
        return addr;
  }
  
 -static inline bool slave_can_tx(struct slave *slave)
 -{
 -      if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP &&
 -          bond_is_active_slave(slave))
 -              return true;
 -      else
 -              return false;
 -}
 -
 -struct bond_net;
 +struct bond_net {
 +      struct net              *net;   /* Associated network namespace */
 +      struct list_head        dev_list;
 +#ifdef CONFIG_PROC_FS
 +      struct proc_dir_entry   *proc_dir;
 +#endif
 +      struct class_attribute  class_attr_bonding_masters;
 +};
  
  int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
  void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
@@@ -499,7 -500,7 +500,7 @@@ int bond_sysfs_slave_add(struct slave *
  void bond_sysfs_slave_del(struct slave *slave);
  int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
  int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
 -int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count);
 +u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb);
  void bond_select_active_slave(struct bonding *bond);
  void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
  void bond_create_debugfs(void);
@@@ -516,6 -517,15 +517,6 @@@ struct net_device *bond_option_active_s
  struct net_device *bond_option_active_slave_get(struct bonding *bond);
  const char *bond_slave_link_status(s8 link);
  
 -struct bond_net {
 -      struct net *            net;    /* Associated network namespace */
 -      struct list_head        dev_list;
 -#ifdef CONFIG_PROC_FS
 -      struct proc_dir_entry * proc_dir;
 -#endif
 -      struct class_attribute  class_attr_bonding_masters;
 -};
 -
  #ifdef CONFIG_PROC_FS
  void bond_create_proc_entry(struct bonding *bond);
  void bond_remove_proc_entry(struct bonding *bond);
@@@ -252,7 -252,8 +252,7 @@@ static void c_can_obj_update(struct net
        struct c_can_priv *priv = netdev_priv(dev);
        int cnt, reg = C_CAN_IFACE(COMREQ_REG, iface);
  
 -      priv->write_reg(priv, reg + 1, cmd);
 -      priv->write_reg(priv, reg, obj);
 +      priv->write_reg32(priv, reg, (cmd << 16) | obj);
  
        for (cnt = MIN_TIMEOUT_VALUE; cnt; cnt--) {
                if (!(priv->read_reg(priv, reg) & IF_COMR_BUSY))
@@@ -327,7 -328,8 +327,7 @@@ static void c_can_setup_tx_object(struc
                change_bit(idx, &priv->tx_dir);
        }
  
 -      priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), arb);
 -      priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), arb >> 16);
 +      priv->write_reg32(priv, C_CAN_IFACE(ARB1_REG, iface), arb);
  
        priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
  
@@@ -389,7 -391,8 +389,7 @@@ static int c_can_read_msg_object(struc
  
        frame->can_dlc = get_can_dlc(ctrl & 0x0F);
  
 -      arb = priv->read_reg(priv, C_CAN_IFACE(ARB1_REG, iface));
 -      arb |= priv->read_reg(priv, C_CAN_IFACE(ARB2_REG, iface)) << 16;
 +      arb = priv->read_reg32(priv, C_CAN_IFACE(ARB1_REG, iface));
  
        if (arb & IF_ARB_MSGXTD)
                frame->can_id = (arb & CAN_EFF_MASK) | CAN_EFF_FLAG;
@@@ -421,10 -424,12 +421,10 @@@ static void c_can_setup_receive_object(
        struct c_can_priv *priv = netdev_priv(dev);
  
        mask |= BIT(29);
 -      priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface), mask);
 -      priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface), mask >> 16);
 +      priv->write_reg32(priv, C_CAN_IFACE(MASK1_REG, iface), mask);
  
        id |= IF_ARB_MSGVAL;
 -      priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), id);
 -      priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), id >> 16);
 +      priv->write_reg32(priv, C_CAN_IFACE(ARB1_REG, iface), id);
  
        priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont);
        c_can_object_put(dev, iface, obj, IF_COMM_RCV_SETUP);
@@@ -727,26 -732,12 +727,12 @@@ static u32 c_can_adjust_pending(u32 pen
  static inline void c_can_rx_object_get(struct net_device *dev,
                                       struct c_can_priv *priv, u32 obj)
  {
- #ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
-       if (obj < C_CAN_MSG_RX_LOW_LAST)
-               c_can_object_get(dev, IF_RX, obj, IF_COMM_RCV_LOW);
-       else
- #endif
                c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high);
  }
  
  static inline void c_can_rx_finalize(struct net_device *dev,
                                     struct c_can_priv *priv, u32 obj)
  {
- #ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
-       if (obj < C_CAN_MSG_RX_LOW_LAST)
-               priv->rxmasked |= BIT(obj - 1);
-       else if (obj == C_CAN_MSG_RX_LOW_LAST) {
-               priv->rxmasked = 0;
-               /* activate all lower message objects */
-               c_can_activate_all_lower_rx_msg_obj(dev, IF_RX);
-       }
- #endif
        if (priv->type != BOSCH_D_CAN)
                c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT);
  }
@@@ -794,9 -785,6 +780,6 @@@ static inline u32 c_can_get_pending(str
  {
        u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG);
  
- #ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
-       pend &= ~priv->rxmasked;
- #endif
        return pend;
  }
  
   * has arrived. To work-around this issue, we keep two groups of message
   * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
   *
-  * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = y
-  *
-  * To ensure in-order frame reception we use the following
-  * approach while re-activating a message object to receive further
-  * frames:
-  * - if the current message object number is lower than
-  *   C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing
-  *   the INTPND bit.
-  * - if the current message object number is equal to
-  *   C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower
-  *   receive message objects.
-  * - if the current message object number is greater than
-  *   C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
-  *   only this message object.
-  *
-  * This can cause packet loss!
-  *
-  * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = n
-  *
   * We clear the newdat bit right away.
   *
   * This can result in packet reordering when the readout is slow.
@@@ -20,8 -20,8 +20,8 @@@
  #include "altera_sgdmahw.h"
  #include "altera_sgdma.h"
  
- static void sgdma_setup_descrip(struct sgdma_descrip *desc,
-                               struct sgdma_descrip *ndesc,
+ static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
+                               struct sgdma_descrip __iomem *ndesc,
                                dma_addr_t ndesc_phys,
                                dma_addr_t raddr,
                                dma_addr_t waddr,
                                int wfixed);
  
  static int sgdma_async_write(struct altera_tse_private *priv,
-                             struct sgdma_descrip *desc);
+                             struct sgdma_descrip __iomem *desc);
  
  static int sgdma_async_read(struct altera_tse_private *priv);
  
  static dma_addr_t
  sgdma_txphysaddr(struct altera_tse_private *priv,
-                struct sgdma_descrip *desc);
+                struct sgdma_descrip __iomem *desc);
  
  static dma_addr_t
  sgdma_rxphysaddr(struct altera_tse_private *priv,
-                struct sgdma_descrip *desc);
+                struct sgdma_descrip __iomem *desc);
  
  static int sgdma_txbusy(struct altera_tse_private *priv);
  
@@@ -79,7 -79,8 +79,8 @@@ int sgdma_initialize(struct altera_tse_
        priv->rxdescphys = (dma_addr_t) 0;
        priv->txdescphys = (dma_addr_t) 0;
  
-       priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc,
+       priv->rxdescphys = dma_map_single(priv->device,
+                                         (void __force *)priv->rx_dma_desc,
                                          priv->rxdescmem, DMA_BIDIRECTIONAL);
  
        if (dma_mapping_error(priv->device, priv->rxdescphys)) {
@@@ -88,7 -89,8 +89,8 @@@
                return -EINVAL;
        }
  
-       priv->txdescphys = dma_map_single(priv->device, priv->tx_dma_desc,
+       priv->txdescphys = dma_map_single(priv->device,
+                                         (void __force *)priv->tx_dma_desc,
                                          priv->txdescmem, DMA_TO_DEVICE);
  
        if (dma_mapping_error(priv->device, priv->txdescphys)) {
        }
  
        /* Initialize descriptor memory to all 0's, sync memory to cache */
-       memset(priv->tx_dma_desc, 0, priv->txdescmem);
-       memset(priv->rx_dma_desc, 0, priv->rxdescmem);
+       memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
+       memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
  
        dma_sync_single_for_device(priv->device, priv->txdescphys,
                                   priv->txdescmem, DMA_TO_DEVICE);
@@@ -126,22 -128,15 +128,15 @@@ void sgdma_uninitialize(struct altera_t
   */
  void sgdma_reset(struct altera_tse_private *priv)
  {
-       u32 *ptxdescripmem = priv->tx_dma_desc;
-       u32 txdescriplen   = priv->txdescmem;
-       u32 *prxdescripmem = priv->rx_dma_desc;
-       u32 rxdescriplen   = priv->rxdescmem;
-       struct sgdma_csr *ptxsgdma = priv->tx_dma_csr;
-       struct sgdma_csr *prxsgdma = priv->rx_dma_csr;
        /* Initialize descriptor memory to 0 */
-       memset(ptxdescripmem, 0, txdescriplen);
-       memset(prxdescripmem, 0, rxdescriplen);
+       memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
+       memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
  
-       iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control);
-       iowrite32(0, &ptxsgdma->control);
+       csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control));
+       csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
  
-       iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control);
-       iowrite32(0, &prxsgdma->control);
+       csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control));
+       csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
  }
  
  /* For SGDMA, interrupts remain enabled after initially enabling,
@@@ -167,14 -162,14 +162,14 @@@ void sgdma_disable_txirq(struct altera_
  
  void sgdma_clear_rxirq(struct altera_tse_private *priv)
  {
-       struct sgdma_csr *csr = priv->rx_dma_csr;
-       tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
+       tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control),
+                   SGDMA_CTRLREG_CLRINT);
  }
  
  void sgdma_clear_txirq(struct altera_tse_private *priv)
  {
-       struct sgdma_csr *csr = priv->tx_dma_csr;
-       tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
+       tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control),
+                   SGDMA_CTRLREG_CLRINT);
  }
  
  /* transmits buffer through SGDMA. Returns number of buffers
   */
  int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
  {
-       int pktstx = 0;
-       struct sgdma_descrip *descbase = priv->tx_dma_desc;
+       struct sgdma_descrip __iomem *descbase =
+               (struct sgdma_descrip __iomem *)priv->tx_dma_desc;
  
-       struct sgdma_descrip *cdesc = &descbase[0];
-       struct sgdma_descrip *ndesc = &descbase[1];
+       struct sgdma_descrip __iomem *cdesc = &descbase[0];
+       struct sgdma_descrip __iomem *ndesc = &descbase[1];
  
        /* wait 'til the tx sgdma is ready for the next transmit request */
        if (sgdma_txbusy(priv))
                            0,                          /* read fixed */
                            SGDMA_CONTROL_WR_FIXED);    /* Generate SOP */
  
-       pktstx = sgdma_async_write(priv, cdesc);
+       sgdma_async_write(priv, cdesc);
  
        /* enqueue the request to the pending transmit queue */
        queue_tx(priv, buffer);
  u32 sgdma_tx_completions(struct altera_tse_private *priv)
  {
        u32 ready = 0;
-       struct sgdma_descrip *desc = priv->tx_dma_desc;
  
        if (!sgdma_txbusy(priv) &&
-           ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) &&
+           ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control))
+            & SGDMA_CONTROL_HW_OWNED) == 0) &&
            (dequeue_tx(priv))) {
                ready = 1;
        }
@@@ -245,32 -240,31 +240,31 @@@ void sgdma_add_rx_desc(struct altera_ts
   */
  u32 sgdma_rx_status(struct altera_tse_private *priv)
  {
-       struct sgdma_csr *csr = priv->rx_dma_csr;
-       struct sgdma_descrip *base = priv->rx_dma_desc;
-       struct sgdma_descrip *desc = NULL;
-       int pktsrx;
-       unsigned int rxstatus = 0;
-       unsigned int pktlength = 0;
-       unsigned int pktstatus = 0;
+       struct sgdma_descrip __iomem *base =
+               (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
+       struct sgdma_descrip __iomem *desc = NULL;
        struct tse_buffer *rxbuffer = NULL;
+       unsigned int rxstatus = 0;
  
-       u32 sts = ioread32(&csr->status);
+       u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status));
  
        desc = &base[0];
        if (sts & SGDMA_STSREG_EOP) {
+               unsigned int pktlength = 0;
+               unsigned int pktstatus = 0;
                dma_sync_single_for_cpu(priv->device,
                                        priv->rxdescphys,
                                        priv->sgdmadesclen,
                                        DMA_FROM_DEVICE);
  
-               pktlength = desc->bytes_xferred;
-               pktstatus = desc->status & 0x3f;
-               rxstatus = pktstatus;
+               pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred));
+               pktstatus = csrrd8(desc, sgdma_descroffs(status));
+               rxstatus = pktstatus & ~SGDMA_STATUS_EOP;
                rxstatus = rxstatus << 16;
                rxstatus |= (pktlength & 0xffff);
  
                if (rxstatus) {
-                       desc->status = 0;
+                       csrwr8(0, desc, sgdma_descroffs(status));
  
                        rxbuffer = dequeue_rx(priv);
                        if (rxbuffer == NULL)
                                            "sgdma rx and rx queue empty!\n");
  
                        /* Clear control */
-                       iowrite32(0, &csr->control);
+                       csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
                        /* clear status */
-                       iowrite32(0xf, &csr->status);
+                       csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status));
  
                        /* kick the rx sgdma after reaping this descriptor */
-                       pktsrx = sgdma_async_read(priv);
+                       sgdma_async_read(priv);
  
                } else {
                        /* If the SGDMA indicated an end of packet on recv,
                         */
                        netdev_err(priv->dev,
                                   "SGDMA RX Error Info: %x, %x, %x\n",
-                                  sts, desc->status, rxstatus);
+                                  sts, csrrd8(desc, sgdma_descroffs(status)),
+                                  rxstatus);
                }
        } else if (sts == 0) {
-               pktsrx = sgdma_async_read(priv);
+               sgdma_async_read(priv);
        }
  
        return rxstatus;
  
  
  /* Private functions */
- static void sgdma_setup_descrip(struct sgdma_descrip *desc,
-                               struct sgdma_descrip *ndesc,
+ static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
+                               struct sgdma_descrip __iomem *ndesc,
                                dma_addr_t ndesc_phys,
                                dma_addr_t raddr,
                                dma_addr_t waddr,
                                int wfixed)
  {
        /* Clear the next descriptor as not owned by hardware */
-       u32 ctrl = ndesc->control;
+       u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control));
        ctrl &= ~SGDMA_CONTROL_HW_OWNED;
-       ndesc->control = ctrl;
+       csrwr8(ctrl, ndesc, sgdma_descroffs(control));
  
-       ctrl = 0;
        ctrl = SGDMA_CONTROL_HW_OWNED;
        ctrl |= generate_eop;
        ctrl |= rfixed;
        ctrl |= wfixed;
  
        /* Channel is implicitly zero, initialized to 0 by default */
-       desc->raddr = raddr;
-       desc->waddr = waddr;
-       desc->next = lower_32_bits(ndesc_phys);
-       desc->control = ctrl;
-       desc->status = 0;
-       desc->rburst = 0;
-       desc->wburst = 0;
-       desc->bytes = length;
-       desc->bytes_xferred = 0;
+       csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr));
+       csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr));
+       csrwr32(0, desc, sgdma_descroffs(pad1));
+       csrwr32(0, desc, sgdma_descroffs(pad2));
+       csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next));
+       csrwr8(ctrl, desc, sgdma_descroffs(control));
+       csrwr8(0, desc, sgdma_descroffs(status));
+       csrwr8(0, desc, sgdma_descroffs(wburst));
+       csrwr8(0, desc, sgdma_descroffs(rburst));
+       csrwr16(length, desc, sgdma_descroffs(bytes));
+       csrwr16(0, desc, sgdma_descroffs(bytes_xferred));
  }
  
  /* If hardware is busy, don't restart async read.
   */
  static int sgdma_async_read(struct altera_tse_private *priv)
  {
-       struct sgdma_csr *csr = priv->rx_dma_csr;
-       struct sgdma_descrip *descbase = priv->rx_dma_desc;
-       struct sgdma_descrip *cdesc = &descbase[0];
-       struct sgdma_descrip *ndesc = &descbase[1];
+       struct sgdma_descrip __iomem *descbase =
+               (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
  
 -
+       struct sgdma_descrip __iomem *cdesc = &descbase[0];
+       struct sgdma_descrip __iomem *ndesc = &descbase[1];
        struct tse_buffer *rxbuffer = NULL;
  
        if (!sgdma_rxbusy(priv)) {
                                           priv->sgdmadesclen,
                                           DMA_TO_DEVICE);
  
-               iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
-                         &csr->next_descrip);
+               csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
+                       priv->rx_dma_csr,
+                       sgdma_csroffs(next_descrip));
  
-               iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START),
-                         &csr->control);
+               csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START),
+                       priv->rx_dma_csr,
+                       sgdma_csroffs(control));
  
                return 1;
        }
  }
  
  static int sgdma_async_write(struct altera_tse_private *priv,
-                            struct sgdma_descrip *desc)
+                            struct sgdma_descrip __iomem *desc)
  {
-       struct sgdma_csr *csr = priv->tx_dma_csr;
        if (sgdma_txbusy(priv))
                return 0;
  
        /* clear control and status */
-       iowrite32(0, &csr->control);
-       iowrite32(0x1f, &csr->status);
+       csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
+       csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
  
        dma_sync_single_for_device(priv->device, priv->txdescphys,
                                   priv->sgdmadesclen, DMA_TO_DEVICE);
  
-       iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
-                 &csr->next_descrip);
+       csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
+               priv->tx_dma_csr,
+               sgdma_csroffs(next_descrip));
  
-       iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START),
-                 &csr->control);
+       csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START),
+               priv->tx_dma_csr,
+               sgdma_csroffs(control));
  
        return 1;
  }
  
  static dma_addr_t
  sgdma_txphysaddr(struct altera_tse_private *priv,
-                struct sgdma_descrip *desc)
+                struct sgdma_descrip __iomem *desc)
  {
        dma_addr_t paddr = priv->txdescmem_busaddr;
        uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
  
  static dma_addr_t
  sgdma_rxphysaddr(struct altera_tse_private *priv,
-                struct sgdma_descrip *desc)
+                struct sgdma_descrip __iomem *desc)
  {
        dma_addr_t paddr = priv->rxdescmem_busaddr;
        uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
@@@ -515,8 -516,8 +515,8 @@@ queue_rx_peekhead(struct altera_tse_pri
   */
  static int sgdma_rxbusy(struct altera_tse_private *priv)
  {
-       struct sgdma_csr *csr = priv->rx_dma_csr;
-       return ioread32(&csr->status) & SGDMA_STSREG_BUSY;
+       return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status))
+                      & SGDMA_STSREG_BUSY;
  }
  
  /* waits for the tx sgdma to finish it's current operation, returns 0
  static int sgdma_txbusy(struct altera_tse_private *priv)
  {
        int delay = 0;
-       struct sgdma_csr *csr = priv->tx_dma_csr;
  
        /* if DMA is busy, wait for current transactino to finish */
-       while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100))
+       while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
+               & SGDMA_STSREG_BUSY) && (delay++ < 100))
                udelay(1);
  
-       if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) {
+       if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
+           & SGDMA_STSREG_BUSY) {
                netdev_err(priv->dev, "timeout waiting for tx dma\n");
                return 1;
        }
@@@ -96,54 -96,89 +96,89 @@@ static void tse_fill_stats(struct net_d
                           u64 *buf)
  {
        struct altera_tse_private *priv = netdev_priv(dev);
-       struct altera_tse_mac *mac = priv->mac_dev;
        u64 ext;
  
-       buf[0] = ioread32(&mac->frames_transmitted_ok);
-       buf[1] = ioread32(&mac->frames_received_ok);
-       buf[2] = ioread32(&mac->frames_check_sequence_errors);
-       buf[3] = ioread32(&mac->alignment_errors);
+       buf[0] = csrrd32(priv->mac_dev,
+                        tse_csroffs(frames_transmitted_ok));
+       buf[1] = csrrd32(priv->mac_dev,
+                        tse_csroffs(frames_received_ok));
+       buf[2] = csrrd32(priv->mac_dev,
+                        tse_csroffs(frames_check_sequence_errors));
+       buf[3] = csrrd32(priv->mac_dev,
+                        tse_csroffs(alignment_errors));
  
        /* Extended aOctetsTransmittedOK counter */
-       ext = (u64) ioread32(&mac->msb_octets_transmitted_ok) << 32;
-       ext |= ioread32(&mac->octets_transmitted_ok);
+       ext = (u64) csrrd32(priv->mac_dev,
+                           tse_csroffs(msb_octets_transmitted_ok)) << 32;
+       ext |= csrrd32(priv->mac_dev,
+                      tse_csroffs(octets_transmitted_ok));
        buf[4] = ext;
  
        /* Extended aOctetsReceivedOK counter */
-       ext = (u64) ioread32(&mac->msb_octets_received_ok) << 32;
-       ext |= ioread32(&mac->octets_received_ok);
+       ext = (u64) csrrd32(priv->mac_dev,
+                           tse_csroffs(msb_octets_received_ok)) << 32;
+       ext |= csrrd32(priv->mac_dev,
+                      tse_csroffs(octets_received_ok));
        buf[5] = ext;
  
-       buf[6] = ioread32(&mac->tx_pause_mac_ctrl_frames);
-       buf[7] = ioread32(&mac->rx_pause_mac_ctrl_frames);
-       buf[8] = ioread32(&mac->if_in_errors);
-       buf[9] = ioread32(&mac->if_out_errors);
-       buf[10] = ioread32(&mac->if_in_ucast_pkts);
-       buf[11] = ioread32(&mac->if_in_multicast_pkts);
-       buf[12] = ioread32(&mac->if_in_broadcast_pkts);
-       buf[13] = ioread32(&mac->if_out_discards);
-       buf[14] = ioread32(&mac->if_out_ucast_pkts);
-       buf[15] = ioread32(&mac->if_out_multicast_pkts);
-       buf[16] = ioread32(&mac->if_out_broadcast_pkts);
-       buf[17] = ioread32(&mac->ether_stats_drop_events);
+       buf[6] = csrrd32(priv->mac_dev,
+                        tse_csroffs(tx_pause_mac_ctrl_frames));
+       buf[7] = csrrd32(priv->mac_dev,
+                        tse_csroffs(rx_pause_mac_ctrl_frames));
+       buf[8] = csrrd32(priv->mac_dev,
+                        tse_csroffs(if_in_errors));
+       buf[9] = csrrd32(priv->mac_dev,
+                        tse_csroffs(if_out_errors));
+       buf[10] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_in_ucast_pkts));
+       buf[11] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_in_multicast_pkts));
+       buf[12] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_in_broadcast_pkts));
+       buf[13] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_out_discards));
+       buf[14] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_out_ucast_pkts));
+       buf[15] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_out_multicast_pkts));
+       buf[16] = csrrd32(priv->mac_dev,
+                         tse_csroffs(if_out_broadcast_pkts));
+       buf[17] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_drop_events));
  
        /* Extended etherStatsOctets counter */
-       ext = (u64) ioread32(&mac->msb_ether_stats_octets) << 32;
-       ext |= ioread32(&mac->ether_stats_octets);
+       ext = (u64) csrrd32(priv->mac_dev,
+                           tse_csroffs(msb_ether_stats_octets)) << 32;
+       ext |= csrrd32(priv->mac_dev,
+                      tse_csroffs(ether_stats_octets));
        buf[18] = ext;
  
-       buf[19] = ioread32(&mac->ether_stats_pkts);
-       buf[20] = ioread32(&mac->ether_stats_undersize_pkts);
-       buf[21] = ioread32(&mac->ether_stats_oversize_pkts);
-       buf[22] = ioread32(&mac->ether_stats_pkts_64_octets);
-       buf[23] = ioread32(&mac->ether_stats_pkts_65to127_octets);
-       buf[24] = ioread32(&mac->ether_stats_pkts_128to255_octets);
-       buf[25] = ioread32(&mac->ether_stats_pkts_256to511_octets);
-       buf[26] = ioread32(&mac->ether_stats_pkts_512to1023_octets);
-       buf[27] = ioread32(&mac->ether_stats_pkts_1024to1518_octets);
-       buf[28] = ioread32(&mac->ether_stats_pkts_1519tox_octets);
-       buf[29] = ioread32(&mac->ether_stats_jabbers);
-       buf[30] = ioread32(&mac->ether_stats_fragments);
+       buf[19] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts));
+       buf[20] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_undersize_pkts));
+       buf[21] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_oversize_pkts));
+       buf[22] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_64_octets));
+       buf[23] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_65to127_octets));
+       buf[24] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_128to255_octets));
+       buf[25] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_256to511_octets));
+       buf[26] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_512to1023_octets));
+       buf[27] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_1024to1518_octets));
+       buf[28] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_pkts_1519tox_octets));
+       buf[29] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_jabbers));
+       buf[30] = csrrd32(priv->mac_dev,
+                         tse_csroffs(ether_stats_fragments));
  }
  
  static int tse_sset_count(struct net_device *dev, int sset)
@@@ -178,7 -213,6 +213,6 @@@ static void tse_get_regs(struct net_dev
  {
        int i;
        struct altera_tse_private *priv = netdev_priv(dev);
-       u32 *tse_mac_regs = (u32 *)priv->mac_dev;
        u32 *buf = regbuf;
  
        /* Set version to a known value, so ethtool knows
        regs->version = 1;
  
        for (i = 0; i < TSE_NUM_REGS; i++)
-               buf[i] = ioread32(&tse_mac_regs[i]);
+               buf[i] = csrrd32(priv->mac_dev, i * 4);
  }
  
  static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@@ -237,5 -271,5 +271,5 @@@ static const struct ethtool_ops tse_eth
  
  void altera_tse_set_ethtool_ops(struct net_device *netdev)
  {
 -      SET_ETHTOOL_OPS(netdev, &tse_ethtool_ops);
 +      netdev->ethtool_ops = &tse_ethtool_ops;
  }
@@@ -2576,8 -2576,7 +2576,8 @@@ int bnx2x_get_vf_config(struct net_devi
  
        ivi->vf = vfidx;
        ivi->qos = 0;
 -      ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */
 +      ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */
 +      ivi->min_tx_rate = 0;
        ivi->spoofchk = 1; /*always enabled */
        if (vf->state == VF_ENABLED) {
                /* mac and vlan are in vlan_mac objects */
@@@ -2696,7 -2695,7 +2696,7 @@@ out
                bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
        }
  
-       return 0;
+       return rc;
  }
  
  int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
@@@ -134,7 -134,7 +134,7 @@@ static void be_queue_free(struct be_ada
  }
  
  static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
 -              u16 len, u16 entry_size)
 +                        u16 len, u16 entry_size)
  {
        struct be_dma_mem *mem = &q->dma_mem;
  
@@@ -154,7 -154,7 +154,7 @@@ static void be_reg_intr_set(struct be_a
        u32 reg, enabled;
  
        pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
 -                              &reg);
 +                            &reg);
        enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
  
        if (!enabled && enable)
                return;
  
        pci_write_config_dword(adapter->pdev,
 -                      PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
 +                             PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
  }
  
  static void be_intr_set(struct be_adapter *adapter, bool enable)
@@@ -206,11 -206,12 +206,11 @@@ static void be_txq_notify(struct be_ada
  }
  
  static void be_eq_notify(struct be_adapter *adapter, u16 qid,
 -              bool arm, bool clear_int, u16 num_popped)
 +                       bool arm, bool clear_int, u16 num_popped)
  {
        u32 val = 0;
        val |= qid & DB_EQ_RING_ID_MASK;
 -      val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
 -                      DB_EQ_RING_ID_EXT_MASK_SHIFT);
 +      val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
  
        if (adapter->eeh_error)
                return;
@@@ -476,7 -477,7 +476,7 @@@ static void populate_be_v2_stats(struc
        drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
        drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
        adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
 -      if (be_roce_supported(adapter))  {
 +      if (be_roce_supported(adapter)) {
                drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
                drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
                drvs->rx_roce_frames = port_stats->roce_frames_received;
@@@ -490,7 -491,8 +490,7 @@@ static void populate_lancer_stats(struc
  {
  
        struct be_drv_stats *drvs = &adapter->drv_stats;
 -      struct lancer_pport_stats *pport_stats =
 -                                      pport_stats_from_cmd(adapter);
 +      struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
  
        be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
        drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
@@@ -537,7 -539,8 +537,7 @@@ static void accumulate_16bit_val(u32 *a
  }
  
  static void populate_erx_stats(struct be_adapter *adapter,
 -                      struct be_rx_obj *rxo,
 -                      u32 erx_stat)
 +                             struct be_rx_obj *rxo, u32 erx_stat)
  {
        if (!BEx_chip(adapter))
                rx_stats(rxo)->rx_drops_no_frags = erx_stat;
@@@ -576,7 -579,7 +576,7 @@@ void be_parse_stats(struct be_adapter *
  }
  
  static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
 -                                      struct rtnl_link_stats64 *stats)
 +                                              struct rtnl_link_stats64 *stats)
  {
        struct be_adapter *adapter = netdev_priv(netdev);
        struct be_drv_stats *drvs = &adapter->drv_stats;
@@@ -657,8 -660,7 +657,8 @@@ void be_link_status_update(struct be_ad
  }
  
  static void be_tx_stats_update(struct be_tx_obj *txo,
 -                      u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
 +                             u32 wrb_cnt, u32 copied, u32 gso_segs,
 +                             bool stopped)
  {
        struct be_tx_stats *stats = tx_stats(txo);
  
  
  /* Determine number of WRB entries needed to xmit data in an skb */
  static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
 -                                                              bool *dummy)
 +                         bool *dummy)
  {
        int cnt = (skb->len > skb->data_len);
  
@@@ -702,7 -704,7 +702,7 @@@ static inline void wrb_fill(struct be_e
  }
  
  static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
 -                                      struct sk_buff *skb)
 +                                   struct sk_buff *skb)
  {
        u8 vlan_prio;
        u16 vlan_tag;
@@@ -731,8 -733,7 +731,8 @@@ static u16 skb_ip_proto(struct sk_buff 
  }
  
  static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
 -              struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
 +                       struct sk_buff *skb, u32 wrb_cnt, u32 len,
 +                       bool skip_hw_vlan)
  {
        u16 vlan_tag, proto;
  
  }
  
  static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
 -              bool unmap_single)
 +                        bool unmap_single)
  {
        dma_addr_t dma;
  
  }
  
  static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
 -              struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
 -              bool skip_hw_vlan)
 +                      struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
 +                      bool skip_hw_vlan)
  {
        dma_addr_t busaddr;
        int i, copied = 0;
        }
  
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 -              const struct skb_frag_struct *frag =
 -                      &skb_shinfo(skb)->frags[i];
 +              const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
                busaddr = skb_frag_dma_map(dev, frag, 0,
                                           skb_frag_size(frag), DMA_TO_DEVICE);
                if (dma_mapping_error(dev, busaddr))
@@@ -925,7 -927,8 +925,7 @@@ static int be_vlan_tag_tx_chk(struct be
        return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
  }
  
 -static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
 -                              struct sk_buff *skb)
 +static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
  {
        return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
  }
@@@ -956,7 -959,7 +956,7 @@@ static struct sk_buff *be_lancer_xmit_w
         */
        if (be_pvid_tagging_enabled(adapter) &&
            veh->h_vlan_proto == htons(ETH_P_8021Q))
 -                      *skip_hw_vlan = true;
 +              *skip_hw_vlan = true;
  
        /* HW has a bug wherein it will calculate CSUM for VLAN
         * pkts even though it is disabled.
@@@ -1074,15 -1077,16 +1074,15 @@@ static int be_change_mtu(struct net_dev
  {
        struct be_adapter *adapter = netdev_priv(netdev);
        if (new_mtu < BE_MIN_MTU ||
 -                      new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
 -                                      (ETH_HLEN + ETH_FCS_LEN))) {
 +          new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) {
                dev_info(&adapter->pdev->dev,
 -                      "MTU must be between %d and %d bytes\n",
 -                      BE_MIN_MTU,
 -                      (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
 +                       "MTU must be between %d and %d bytes\n",
 +                       BE_MIN_MTU,
 +                       (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
                return -EINVAL;
        }
        dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
 -                      netdev->mtu, new_mtu);
 +               netdev->mtu, new_mtu);
        netdev->mtu = new_mtu;
        return 0;
  }
  static int be_vid_config(struct be_adapter *adapter)
  {
        u16 vids[BE_NUM_VLANS_SUPPORTED];
 -      u16 num = 0, i;
 +      u16 num = 0, i = 0;
        int status = 0;
  
        /* No need to further configure vids if in promiscuous mode */
                goto set_vlan_promisc;
  
        /* Construct VLAN Table to give to HW */
 -      for (i = 0; i < VLAN_N_VID; i++)
 -              if (adapter->vlan_tag[i])
 -                      vids[num++] = cpu_to_le16(i);
 -
 -      status = be_cmd_vlan_config(adapter, adapter->if_handle,
 -                                  vids, num, 0);
 +      for_each_set_bit(i, adapter->vids, VLAN_N_VID)
 +              vids[num++] = cpu_to_le16(i);
  
 +      status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
        if (status) {
                /* Set to VLAN promisc mode as setting VLAN filter failed */
                if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
@@@ -1153,16 -1160,16 +1153,16 @@@ static int be_vlan_add_vid(struct net_d
        if (lancer_chip(adapter) && vid == 0)
                return status;
  
 -      if (adapter->vlan_tag[vid])
 +      if (test_bit(vid, adapter->vids))
                return status;
  
 -      adapter->vlan_tag[vid] = 1;
 +      set_bit(vid, adapter->vids);
        adapter->vlans_added++;
  
        status = be_vid_config(adapter);
        if (status) {
                adapter->vlans_added--;
 -              adapter->vlan_tag[vid] = 0;
 +              clear_bit(vid, adapter->vids);
        }
  
        return status;
@@@ -1177,12 -1184,12 +1177,12 @@@ static int be_vlan_rem_vid(struct net_d
        if (lancer_chip(adapter) && vid == 0)
                goto ret;
  
 -      adapter->vlan_tag[vid] = 0;
 +      clear_bit(vid, adapter->vids);
        status = be_vid_config(adapter);
        if (!status)
                adapter->vlans_added--;
        else
 -              adapter->vlan_tag[vid] = 1;
 +              set_bit(vid, adapter->vids);
  ret:
        return status;
  }
@@@ -1247,10 -1254,8 +1247,10 @@@ static void be_set_rx_mode(struct net_d
  
        /* Set to MCAST promisc mode if setting MULTICAST address fails */
        if (status) {
 -              dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
 -              dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
 +              dev_info(&adapter->pdev->dev,
 +                       "Exhausted multicast HW filters.\n");
 +              dev_info(&adapter->pdev->dev,
 +                       "Disabling HW multicast filtering.\n");
                be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
        }
  done:
@@@ -1282,7 -1287,7 +1282,7 @@@ static int be_set_vf_mac(struct net_dev
  
        if (status)
                dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
 -                              mac, vf);
 +                      mac, vf);
        else
                memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
  
  }
  
  static int be_get_vf_config(struct net_device *netdev, int vf,
 -                      struct ifla_vf_info *vi)
 +                          struct ifla_vf_info *vi)
  {
        struct be_adapter *adapter = netdev_priv(netdev);
        struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
                return -EINVAL;
  
        vi->vf = vf;
 -      vi->tx_rate = vf_cfg->tx_rate;
 +      vi->max_tx_rate = vf_cfg->tx_rate;
 +      vi->min_tx_rate = 0;
        vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
        vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
        memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
        return 0;
  }
  
 -static int be_set_vf_vlan(struct net_device *netdev,
 -                      int vf, u16 vlan, u8 qos)
 +static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
  {
        struct be_adapter *adapter = netdev_priv(netdev);
        struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
        return status;
  }
  
 -static int be_set_vf_tx_rate(struct net_device *netdev,
 -                      int vf, int rate)
 +static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
 +                           int min_tx_rate, int max_tx_rate)
  {
        struct be_adapter *adapter = netdev_priv(netdev);
        int status = 0;
        if (vf >= adapter->num_vfs)
                return -EINVAL;
  
 -      if (rate < 100 || rate > 10000) {
 +      if (min_tx_rate)
 +              return -EINVAL;
 +
 +      if (max_tx_rate < 100 || max_tx_rate > 10000) {
                dev_err(&adapter->pdev->dev,
 -                      "tx rate must be between 100 and 10000 Mbps\n");
 +                      "max tx rate must be between 100 and 10000 Mbps\n");
                return -EINVAL;
        }
  
 -      status = be_cmd_config_qos(adapter, rate / 10, vf + 1);
 +      status = be_cmd_config_qos(adapter, max_tx_rate / 10, vf + 1);
        if (status)
                dev_err(&adapter->pdev->dev,
 -                              "tx rate %d on VF %d failed\n", rate, vf);
 +                      "max tx rate %d on VF %d failed\n", max_tx_rate, vf);
        else
 -              adapter->vf_cfg[vf].tx_rate = rate;
 +              adapter->vf_cfg[vf].tx_rate = max_tx_rate;
        return status;
  }
  static int be_set_vf_link_state(struct net_device *netdev, int vf,
@@@ -1467,7 -1469,7 +1467,7 @@@ modify_eqd
  }
  
  static void be_rx_stats_update(struct be_rx_obj *rxo,
 -              struct be_rx_compl_info *rxcp)
 +                             struct be_rx_compl_info *rxcp)
  {
        struct be_rx_stats *stats = rx_stats(rxo);
  
@@@ -1564,8 -1566,7 +1564,8 @@@ static void skb_fill_rx_data(struct be_
                skb_frag_set_page(skb, 0, page_info->page);
                skb_shinfo(skb)->frags[0].page_offset =
                                        page_info->page_offset + hdr_len;
 -              skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
 +              skb_frag_size_set(&skb_shinfo(skb)->frags[0],
 +                                curr_frag_len - hdr_len);
                skb->data_len = curr_frag_len - hdr_len;
                skb->truesize += rx_frag_size;
                skb->tail += hdr_len;
@@@ -1724,8 -1725,8 +1724,8 @@@ static void be_parse_rx_compl_v1(struc
        if (rxcp->vlanf) {
                rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
                                          compl);
 -              rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
 -                                             compl);
 +              rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
 +                                             vlan_tag, compl);
        }
        rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
        rxcp->tunneled =
@@@ -1756,8 -1757,8 +1756,8 @@@ static void be_parse_rx_compl_v0(struc
        if (rxcp->vlanf) {
                rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
                                          compl);
 -              rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
 -                                             compl);
 +              rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
 +                                             vlan_tag, compl);
        }
        rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
        rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
@@@ -1798,7 -1799,7 +1798,7 @@@ static struct be_rx_compl_info *be_rx_c
                        rxcp->vlan_tag = swab16(rxcp->vlan_tag);
  
                if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
 -                  !adapter->vlan_tag[rxcp->vlan_tag])
 +                  !test_bit(rxcp->vlan_tag, adapter->vids))
                        rxcp->vlanf = 0;
        }
  
@@@ -1914,7 -1915,7 +1914,7 @@@ static struct be_eth_tx_compl *be_tx_co
  }
  
  static u16 be_tx_compl_process(struct be_adapter *adapter,
 -              struct be_tx_obj *txo, u16 last_index)
 +                             struct be_tx_obj *txo, u16 last_index)
  {
        struct be_queue_info *txq = &txo->q;
        struct be_eth_wrb *wrb;
@@@ -2121,7 -2122,7 +2121,7 @@@ static int be_evt_queues_create(struct 
  
                eq = &eqo->q;
                rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
 -                                      sizeof(struct be_eq_entry));
 +                                  sizeof(struct be_eq_entry));
                if (rc)
                        return rc;
  
@@@ -2154,7 -2155,7 +2154,7 @@@ static int be_mcc_queues_create(struct 
  
        cq = &adapter->mcc_obj.cq;
        if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
 -                      sizeof(struct be_mcc_compl)))
 +                         sizeof(struct be_mcc_compl)))
                goto err;
  
        /* Use the default EQ for MCC completions */
@@@ -2274,7 -2275,7 +2274,7 @@@ static int be_rx_cqs_create(struct be_a
                rxo->adapter = adapter;
                cq = &rxo->cq;
                rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
 -                              sizeof(struct be_eth_rx_compl));
 +                                  sizeof(struct be_eth_rx_compl));
                if (rc)
                        return rc;
  
@@@ -2338,7 -2339,7 +2338,7 @@@ static inline bool do_gro(struct be_rx_
  }
  
  static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
 -                      int budget, int polling)
 +                       int budget, int polling)
  {
        struct be_adapter *adapter = rxo->adapter;
        struct be_queue_info *rx_cq = &rxo->cq;
                 * promiscuous mode on some skews
                 */
                if (unlikely(rxcp->port != adapter->port_num &&
 -                              !lancer_chip(adapter))) {
 +                           !lancer_chip(adapter))) {
                        be_rx_compl_discard(rxo, rxcp);
                        goto loop_continue;
                }
@@@ -2404,9 -2405,8 +2404,9 @@@ static bool be_process_tx(struct be_ada
                if (!txcp)
                        break;
                num_wrbs += be_tx_compl_process(adapter, txo,
 -                              AMAP_GET_BITS(struct amap_eth_tx_compl,
 -                                      wrb_index, txcp));
 +                                              AMAP_GET_BITS(struct
 +                                                            amap_eth_tx_compl,
 +                                                            wrb_index, txcp));
        }
  
        if (work_done) {
                /* As Tx wrbs have been freed up, wake up netdev queue
                 * if it was stopped due to lack of tx wrbs.  */
                if (__netif_subqueue_stopped(adapter->netdev, idx) &&
 -                      atomic_read(&txo->q.used) < txo->q.len / 2) {
 +                  atomic_read(&txo->q.used) < txo->q.len / 2) {
                        netif_wake_subqueue(adapter->netdev, idx);
                }
  
@@@ -2510,9 -2510,9 +2510,9 @@@ void be_detect_error(struct be_adapter 
                sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
                if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
                        sliport_err1 = ioread32(adapter->db +
 -                                      SLIPORT_ERROR1_OFFSET);
 +                                              SLIPORT_ERROR1_OFFSET);
                        sliport_err2 = ioread32(adapter->db +
 -                                      SLIPORT_ERROR2_OFFSET);
 +                                              SLIPORT_ERROR2_OFFSET);
                        adapter->hw_error = true;
                        /* Do not log error messages if its a FW reset */
                        if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
                }
        } else {
                pci_read_config_dword(adapter->pdev,
 -                              PCICFG_UE_STATUS_LOW, &ue_lo);
 +                                    PCICFG_UE_STATUS_LOW, &ue_lo);
                pci_read_config_dword(adapter->pdev,
 -                              PCICFG_UE_STATUS_HIGH, &ue_hi);
 +                                    PCICFG_UE_STATUS_HIGH, &ue_hi);
                pci_read_config_dword(adapter->pdev,
 -                              PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
 +                                    PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
                pci_read_config_dword(adapter->pdev,
 -                              PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
 +                                    PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
  
                ue_lo = (ue_lo & ~ue_lo_mask);
                ue_hi = (ue_hi & ~ue_hi_mask);
@@@ -2624,7 -2624,7 +2624,7 @@@ fail
  }
  
  static inline int be_msix_vec_get(struct be_adapter *adapter,
 -                              struct be_eq_obj *eqo)
 +                                struct be_eq_obj *eqo)
  {
        return adapter->msix_entries[eqo->msix_idx].vector;
  }
@@@ -2648,7 -2648,7 +2648,7 @@@ err_msix
        for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
                free_irq(be_msix_vec_get(adapter, eqo), eqo);
        dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
 -              status);
 +               status);
        be_msix_disable(adapter);
        return status;
  }
@@@ -2774,8 -2774,7 +2774,8 @@@ static int be_rx_qs_create(struct be_ad
  {
        struct be_rx_obj *rxo;
        int rc, i, j;
 -      u8 rsstable[128];
 +      u8 rss_hkey[RSS_HASH_KEY_LEN];
 +      struct rss_info *rss = &adapter->rss_info;
  
        for_all_rx_queues(adapter, rxo, i) {
                rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
        }
  
        if (be_multi_rxq(adapter)) {
 -              for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
 +              for (j = 0; j < RSS_INDIR_TABLE_LEN;
 +                      j += adapter->num_rx_qs - 1) {
                        for_all_rss_queues(adapter, rxo, i) {
 -                              if ((j + i) >= 128)
 +                              if ((j + i) >= RSS_INDIR_TABLE_LEN)
                                        break;
 -                              rsstable[j + i] = rxo->rss_id;
 +                              rss->rsstable[j + i] = rxo->rss_id;
 +                              rss->rss_queue[j + i] = i;
                        }
                }
 -              adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
 -                                      RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
 +              rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
 +                      RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
  
                if (!BEx_chip(adapter))
 -                      adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
 -                                              RSS_ENABLE_UDP_IPV6;
 +                      rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
 +                              RSS_ENABLE_UDP_IPV6;
        } else {
                /* Disable RSS, if only default RX Q is created */
 -              adapter->rss_flags = RSS_ENABLE_NONE;
 +              rss->rss_flags = RSS_ENABLE_NONE;
        }
  
 -      rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
 -                             128);
 +      get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
 +      rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
 +                             128, rss_hkey);
        if (rc) {
 -              adapter->rss_flags = RSS_ENABLE_NONE;
 +              rss->rss_flags = RSS_ENABLE_NONE;
                return rc;
        }
  
 +      memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
 +
        /* First time posting */
        for_all_rx_queues(adapter, rxo, i)
                be_post_rx_frags(rxo, GFP_KERNEL);
@@@ -2902,8 -2896,7 +2902,8 @@@ static int be_setup_wol(struct be_adapt
  
        if (enable) {
                status = pci_write_config_dword(adapter->pdev,
 -                      PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
 +                                              PCICFG_PM_CONTROL_OFFSET,
 +                                              PCICFG_PM_CONTROL_MASK);
                if (status) {
                        dev_err(&adapter->pdev->dev,
                                "Could not enable Wake-on-lan\n");
                        return status;
                }
                status = be_cmd_enable_magic_wol(adapter,
 -                              adapter->netdev->dev_addr, &cmd);
 +                                               adapter->netdev->dev_addr,
 +                                               &cmd);
                pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
                pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
        } else {
@@@ -2952,8 -2944,7 +2952,8 @@@ static int be_vf_eth_addr_config(struc
  
                if (status)
                        dev_err(&adapter->pdev->dev,
 -                      "Mac address assignment failed for VF %d\n", vf);
 +                              "Mac address assignment failed for VF %d\n",
 +                              vf);
                else
                        memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
  
@@@ -3095,11 -3086,9 +3095,11 @@@ static int be_vfs_if_create(struct be_a
  
                /* If a FW profile exists, then cap_flags are updated */
                en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
 -                         BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
 -              status = be_cmd_if_create(adapter, cap_flags, en_flags,
 -                                        &vf_cfg->if_handle, vf + 1);
 +                                      BE_IF_FLAGS_BROADCAST |
 +                                      BE_IF_FLAGS_MULTICAST);
 +              status =
 +                  be_cmd_if_create(adapter, cap_flags, en_flags,
 +                                   &vf_cfg->if_handle, vf + 1);
                if (status)
                        goto err;
        }
@@@ -3605,8 -3594,8 +3605,8 @@@ static void be_netpoll(struct net_devic
  static char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
  
  static bool be_flash_redboot(struct be_adapter *adapter,
 -                      const u8 *p, u32 img_start, int image_size,
 -                      int hdr_size)
 +                           const u8 *p, u32 img_start, int image_size,
 +                           int hdr_size)
  {
        u32 crc_offset;
        u8 flashed_crc[4];
  
        p += crc_offset;
  
 -      status = be_cmd_get_flash_crc(adapter, flashed_crc,
 -                      (image_size - 4));
 +      status = be_cmd_get_flash_crc(adapter, flashed_crc, (image_size - 4));
        if (status) {
                dev_err(&adapter->pdev->dev,
 -              "could not get crc from flash, not flashing redboot\n");
 +                      "could not get crc from flash, not flashing redboot\n");
                return false;
        }
  
@@@ -3659,8 -3649,8 +3659,8 @@@ static bool is_comp_in_ufi(struct be_ad
  }
  
  static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
 -                                       int header_size,
 -                                       const struct firmware *fw)
 +                                              int header_size,
 +                                              const struct firmware *fw)
  {
        struct flash_section_info *fsec = NULL;
        const u8 *p = fw->data;
  }
  
  static int be_flash(struct be_adapter *adapter, const u8 *img,
 -              struct be_dma_mem *flash_cmd, int optype, int img_size)
 +                  struct be_dma_mem *flash_cmd, int optype, int img_size)
  {
        u32 total_bytes = 0, flash_op, num_bytes = 0;
        int status = 0;
                memcpy(req->data_buf, img, num_bytes);
                img += num_bytes;
                status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
 -                                              flash_op, num_bytes);
 +                                             flash_op, num_bytes);
                if (status) {
                        if (status == ILLEGAL_IOCTL_REQ &&
                            optype == OPTYPE_PHY_FW)
  
  /* For BE2, BE3 and BE3-R */
  static int be_flash_BEx(struct be_adapter *adapter,
 -                       const struct firmware *fw,
 -                       struct be_dma_mem *flash_cmd,
 -                       int num_of_images)
 -
 +                      const struct firmware *fw,
 +                      struct be_dma_mem *flash_cmd, int num_of_images)
  {
        int status = 0, i, filehdr_size = 0;
        int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
  
                if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
                        redboot = be_flash_redboot(adapter, fw->data,
 -                              pflashcomp[i].offset, pflashcomp[i].size,
 -                              filehdr_size + img_hdrs_size);
 +                                                 pflashcomp[i].offset,
 +                                                 pflashcomp[i].size,
 +                                                 filehdr_size +
 +                                                 img_hdrs_size);
                        if (!redboot)
                                continue;
                }
                        return -1;
  
                status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
 -                                      pflashcomp[i].size);
 +                                pflashcomp[i].size);
                if (status) {
                        dev_err(&adapter->pdev->dev,
                                "Flashing section type %d failed.\n",
  }
  
  static int be_flash_skyhawk(struct be_adapter *adapter,
 -              const struct firmware *fw,
 -              struct be_dma_mem *flash_cmd, int num_of_images)
 +                          const struct firmware *fw,
 +                          struct be_dma_mem *flash_cmd, int num_of_images)
  {
        int status = 0, i, filehdr_size = 0;
        int img_offset, img_size, img_optype, redboot;
  
                if (img_optype == OPTYPE_REDBOOT) {
                        redboot = be_flash_redboot(adapter, fw->data,
 -                                      img_offset, img_size,
 -                                      filehdr_size + img_hdrs_size);
 +                                                 img_offset, img_size,
 +                                                 filehdr_size +
 +                                                 img_hdrs_size);
                        if (!redboot)
                                continue;
                }
  }
  
  static int lancer_fw_download(struct be_adapter *adapter,
 -                              const struct firmware *fw)
 +                            const struct firmware *fw)
  {
  #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
  #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
        }
  
        dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
 -                              flash_cmd.dma);
 +                        flash_cmd.dma);
        if (status) {
                dev_err(&adapter->pdev->dev,
                        "Firmware load error. "
                        goto lancer_fw_exit;
                }
        } else if (change_status != LANCER_NO_RESET_NEEDED) {
 -                      dev_err(&adapter->pdev->dev,
 -                              "System reboot required for new FW"
 -                              " to be active\n");
 +              dev_err(&adapter->pdev->dev,
 +                      "System reboot required for new FW to be active\n");
        }
  
        dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
@@@ -4052,7 -4042,7 +4052,7 @@@ static int be_fw_download(struct be_ada
                        switch (ufi_type) {
                        case UFI_TYPE4:
                                status = be_flash_skyhawk(adapter, fw,
 -                                                      &flash_cmd, num_imgs);
 +                                                        &flash_cmd, num_imgs);
                                break;
                        case UFI_TYPE3R:
                                status = be_flash_BEx(adapter, fw, &flash_cmd,
@@@ -4122,7 -4112,8 +4122,7 @@@ fw_exit
        return status;
  }
  
 -static int be_ndo_bridge_setlink(struct net_device *dev,
 -                                  struct nlmsghdr *nlh)
 +static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
  {
        struct be_adapter *adapter = netdev_priv(dev);
        struct nlattr *attr, *br_spec;
@@@ -4164,7 -4155,8 +4164,7 @@@ err
  }
  
  static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
 -                                  struct net_device *dev,
 -                                  u32 filter_mask)
 +                               struct net_device *dev, u32 filter_mask)
  {
        struct be_adapter *adapter = netdev_priv(dev);
        int status = 0;
@@@ -4262,7 -4254,7 +4262,7 @@@ static const struct net_device_ops be_n
        .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
        .ndo_set_vf_mac         = be_set_vf_mac,
        .ndo_set_vf_vlan        = be_set_vf_vlan,
 -      .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
 +      .ndo_set_vf_rate        = be_set_vf_tx_rate,
        .ndo_get_vf_config      = be_get_vf_config,
        .ndo_set_vf_link_state  = be_set_vf_link_state,
  #ifdef CONFIG_NET_POLL_CONTROLLER
@@@ -4309,7 -4301,7 +4309,7 @@@ static void be_netdev_init(struct net_d
  
        netdev->netdev_ops = &be_netdev_ops;
  
 -      SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
 +      netdev->ethtool_ops = &be_ethtool_ops;
  }
  
  static void be_unmap_pci_bars(struct be_adapter *adapter)
@@@ -4878,7 -4870,7 +4878,7 @@@ static void be_shutdown(struct pci_dev 
  }
  
  static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
 -                              pci_channel_state_t state)
 +                                          pci_channel_state_t state)
  {
        struct be_adapter *adapter = pci_get_drvdata(pdev);
        struct net_device *netdev =  adapter->netdev;
@@@ -4957,6 -4949,12 +4957,12 @@@ static void be_eeh_resume(struct pci_de
        if (status)
                goto err;
  
+       /* On some BE3 FW versions, after a HW reset,
+        * interrupts will remain disabled for each function.
+        * So, explicitly enable interrupts
+        */
+       be_intr_set(adapter, true);
        /* tell fw we're ready to fire cmds */
        status = be_cmd_fw_init(adapter);
        if (status)
@@@ -212,7 -212,8 +212,7 @@@ static int mlx4_comm_cmd_poll(struct ml
  
        /* First, verify that the master reports correct status */
        if (comm_pending(dev)) {
 -              mlx4_warn(dev, "Communication channel is not idle."
 -                        "my toggle is %d (cmd:0x%x)\n",
 +              mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
                          priv->cmd.comm_toggle, cmd);
                return -EAGAIN;
        }
@@@ -421,8 -422,9 +421,8 @@@ static int mlx4_slave_cmd(struct mlx4_d
                                        *out_param =
                                                be64_to_cpu(vhcr->out_param);
                                else {
 -                                      mlx4_err(dev, "response expected while"
 -                                               "output mailbox is NULL for "
 -                                               "command 0x%x\n", op);
 +                                      mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
 +                                               op);
                                        vhcr->status = CMD_STAT_BAD_PARAM;
                                }
                        }
                                        *out_param =
                                                be64_to_cpu(vhcr->out_param);
                                else {
 -                                      mlx4_err(dev, "response expected while"
 -                                               "output mailbox is NULL for "
 -                                               "command 0x%x\n", op);
 +                                      mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
 +                                               op);
                                        vhcr->status = CMD_STAT_BAD_PARAM;
                                }
                        }
                        ret = mlx4_status_to_errno(vhcr->status);
                } else
 -                      mlx4_err(dev, "failed execution of VHCR_POST command"
 -                               "opcode 0x%x\n", op);
 +                      mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n",
 +                               op);
        }
  
        mutex_unlock(&priv->cmd.slave_cmd_mutex);
@@@ -473,13 -476,6 +473,13 @@@ static int mlx4_cmd_poll(struct mlx4_de
                goto out;
        }
  
 +      if (out_is_imm && !out_param) {
 +              mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
 +                       op);
 +              err = -EINVAL;
 +              goto out;
 +      }
 +
        err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
                            in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
        if (err)
@@@ -558,13 -554,6 +558,13 @@@ static int mlx4_cmd_wait(struct mlx4_de
        cmd->free_head = context->next;
        spin_unlock(&cmd->context_lock);
  
 +      if (out_is_imm && !out_param) {
 +              mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
 +                       op);
 +              err = -EINVAL;
 +              goto out;
 +      }
 +
        init_completion(&context->done);
  
        mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
@@@ -636,8 -625,9 +636,8 @@@ static int mlx4_ACCESS_MEM(struct mlx4_
  
        if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
            (slave & ~0x7f) | (size & 0xff)) {
 -              mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx "
 -                            "master_addr:0x%llx slave_id:%d size:%d\n",
 -                            slave_addr, master_addr, slave, size);
 +              mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n",
 +                       slave_addr, master_addr, slave, size);
                return -EINVAL;
        }
  
@@@ -798,7 -788,8 +798,7 @@@ static int mlx4_MAD_IFC_wrapper(struct 
            ((smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) ||
             (smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
              smp->method == IB_MGMT_METHOD_SET))) {
 -              mlx4_err(dev, "slave %d is trying to execute a Subnet MGMT MAD, "
 -                       "class 0x%x, method 0x%x for attr 0x%x. Rejecting\n",
 +              mlx4_err(dev, "slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x for attr 0x%x - Rejecting\n",
                         slave, smp->method, smp->mgmt_class,
                         be16_to_cpu(smp->attr_id));
                return -EPERM;
@@@ -1262,12 -1253,12 +1262,12 @@@ static struct mlx4_cmd_info cmd_info[] 
        },
        {
                .opcode = MLX4_CMD_UPDATE_QP,
-               .has_inbox = false,
+               .has_inbox = true,
                .has_outbox = false,
                .out_is_imm = false,
                .encode_slave_id = false,
                .verify = NULL,
-               .wrapper = mlx4_CMD_EPERM_wrapper
+               .wrapper = mlx4_UPDATE_QP_wrapper
        },
        {
                .opcode = MLX4_CMD_GET_OP_REQ,
@@@ -1418,8 -1409,8 +1418,8 @@@ static int mlx4_master_process_vhcr(str
                                      ALIGN(sizeof(struct mlx4_vhcr_cmd),
                                            MLX4_ACCESS_MEM_ALIGN), 1);
                if (ret) {
 -                      mlx4_err(dev, "%s:Failed reading vhcr"
 -                               "ret: 0x%x\n", __func__, ret);
 +                      mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
 +                               __func__, ret);
                        kfree(vhcr);
                        return ret;
                }
  
        /* Apply permission and bound checks if applicable */
        if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
 -              mlx4_warn(dev, "Command:0x%x from slave: %d failed protection "
 -                        "checks for resource_id:%d\n", vhcr->op, slave,
 -                        vhcr->in_modifier);
 +              mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n",
 +                        vhcr->op, slave, vhcr->in_modifier);
                vhcr_cmd->status = CMD_STAT_BAD_OP;
                goto out_status;
        }
        }
  
        if (err) {
 -              mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with"
 -                        " error:%d, status %d\n",
 +              mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
                          vhcr->op, slave, vhcr->errno, err);
                vhcr_cmd->status = mlx4_errno_to_status(err);
                goto out_status;
@@@ -1544,8 -1537,8 +1544,8 @@@ out_status
                                 __func__);
                else if (vhcr->e_bit &&
                         mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
 -                              mlx4_warn(dev, "Failed to generate command completion "
 -                                        "eqe for slave %d\n", slave);
 +                              mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n",
 +                                        slave);
        }
  
  out:
@@@ -1584,9 -1577,8 +1584,9 @@@ static int mlx4_master_immediate_activa
  
        mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n",
                 slave, port);
 -      mlx4_dbg(dev, "vlan %d QoS %d link down %d\n", vp_admin->default_vlan,
 -               vp_admin->default_qos, vp_admin->link_state);
 +      mlx4_dbg(dev, "vlan %d QoS %d link down %d\n",
 +               vp_admin->default_vlan, vp_admin->default_qos,
 +               vp_admin->link_state);
  
        work = kzalloc(sizeof(*work), GFP_KERNEL);
        if (!work)
                                                   &admin_vlan_ix);
                        if (err) {
                                kfree(work);
 -                              mlx4_warn((&priv->dev),
 +                              mlx4_warn(&priv->dev,
                                          "No vlan resources slave %d, port %d\n",
                                          slave, port);
                                return err;
                        admin_vlan_ix = NO_INDX;
                }
                work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
 -              mlx4_dbg((&(priv->dev)),
 +              mlx4_dbg(&priv->dev,
                         "alloc vlan %d idx  %d slave %d port %d\n",
                         (int)(vp_admin->default_vlan),
                         admin_vlan_ix, slave, port);
@@@ -1669,12 -1661,12 +1669,12 @@@ static int mlx4_master_activate_admin_s
                                                   vp_admin->default_vlan, &(vp_oper->vlan_idx));
                        if (err) {
                                vp_oper->vlan_idx = NO_INDX;
 -                              mlx4_warn((&priv->dev),
 +                              mlx4_warn(&priv->dev,
                                          "No vlan resorces slave %d, port %d\n",
                                          slave, port);
                                return err;
                        }
 -                      mlx4_dbg((&(priv->dev)), "alloc vlan %d idx  %d slave %d port %d\n",
 +                      mlx4_dbg(&priv->dev, "alloc vlan %d idx  %d slave %d port %d\n",
                                 (int)(vp_oper->state.default_vlan),
                                 vp_oper->vlan_idx, slave, port);
                }
                        if (0 > vp_oper->mac_idx) {
                                err = vp_oper->mac_idx;
                                vp_oper->mac_idx = NO_INDX;
 -                              mlx4_warn((&priv->dev),
 +                              mlx4_warn(&priv->dev,
                                          "No mac resorces slave %d, port %d\n",
                                          slave, port);
                                return err;
                        }
 -                      mlx4_dbg((&(priv->dev)), "alloc mac %llx idx  %d slave %d port %d\n",
 +                      mlx4_dbg(&priv->dev, "alloc mac %llx idx  %d slave %d port %d\n",
                                 vp_oper->state.mac, vp_oper->mac_idx, slave, port);
                }
        }
@@@ -1739,8 -1731,8 +1739,8 @@@ static void mlx4_master_do_cmd(struct m
        slave_state[slave].comm_toggle ^= 1;
        reply = (u32) slave_state[slave].comm_toggle << 31;
        if (toggle != slave_state[slave].comm_toggle) {
 -              mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER"
 -                        "STATE COMPROMISIED ***\n", toggle, slave);
 +              mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
 +                        toggle, slave);
                goto reset_slave;
        }
        if (cmd == MLX4_COMM_CMD_RESET) {
        /*command from slave in the middle of FLR*/
        if (cmd != MLX4_COMM_CMD_RESET &&
            MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
 -              mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) "
 -                        "in the middle of FLR\n", slave, cmd);
 +              mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
 +                        slave, cmd);
                return;
        }
  
  
                mutex_lock(&priv->cmd.slave_cmd_mutex);
                if (mlx4_master_process_vhcr(dev, slave, NULL)) {
 -                      mlx4_err(dev, "Failed processing vhcr for slave:%d,"
 -                               " resetting slave.\n", slave);
 +                      mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n",
 +                               slave);
                        mutex_unlock(&priv->cmd.slave_cmd_mutex);
                        goto reset_slave;
                }
                is_going_down = 1;
        spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
        if (is_going_down) {
 -              mlx4_warn(dev, "Slave is going down aborting command(%d)"
 -                        " executing from slave:%d\n",
 +              mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n",
                          cmd, slave);
                return;
        }
@@@ -1887,9 -1880,10 +1887,9 @@@ void mlx4_master_comm_channel(struct wo
                        if (toggle != slt) {
                                if (master->slave_state[slave].comm_toggle
                                    != slt) {
 -                                      printk(KERN_INFO "slave %d out of sync."
 -                                             " read toggle %d, state toggle %d. "
 -                                             "Resynching.\n", slave, slt,
 -                                             master->slave_state[slave].comm_toggle);
 +                                      pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
 +                                              slave, slt,
 +                                              master->slave_state[slave].comm_toggle);
                                        master->slave_state[slave].comm_toggle =
                                                slt;
                                }
        }
  
        if (reported && reported != served)
 -              mlx4_warn(dev, "Got command event with bitmask from %d slaves"
 -                        " but %d were served\n",
 +              mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n",
                          reported, served);
  
        if (mlx4_ARM_COMM_CHANNEL(dev))
@@@ -1958,7 -1953,7 +1958,7 @@@ int mlx4_multi_func_init(struct mlx4_de
                ioremap(pci_resource_start(dev->pdev, 2) +
                        MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
        if (!priv->mfunc.comm) {
 -              mlx4_err(dev, "Couldn't map communication vector.\n");
 +              mlx4_err(dev, "Couldn't map communication vector\n");
                goto err_vhcr;
        }
  
@@@ -2085,7 -2080,7 +2085,7 @@@ int mlx4_cmd_init(struct mlx4_dev *dev
                priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
                                        MLX4_HCR_BASE, MLX4_HCR_SIZE);
                if (!priv->cmd.hcr) {
 -                      mlx4_err(dev, "Couldn't map command register.\n");
 +                      mlx4_err(dev, "Couldn't map command register\n");
                        return -ENOMEM;
                }
        }
@@@ -2486,12 -2481,11 +2486,12 @@@ int mlx4_get_vf_config(struct mlx4_dev 
        ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff);
        ivf->mac[5] = ((s_info->mac)  & 0xff);
  
 -      ivf->vlan       = s_info->default_vlan;
 -      ivf->qos        = s_info->default_qos;
 -      ivf->tx_rate    = s_info->tx_rate;
 -      ivf->spoofchk   = s_info->spoofchk;
 -      ivf->linkstate  = s_info->link_state;
 +      ivf->vlan               = s_info->default_vlan;
 +      ivf->qos                = s_info->default_qos;
 +      ivf->max_tx_rate        = s_info->tx_rate;
 +      ivf->min_tx_rate        = 0;
 +      ivf->spoofchk           = s_info->spoofchk;
 +      ivf->linkstate          = s_info->link_state;
  
        return 0;
  }
@@@ -216,19 -216,18 +216,19 @@@ extern int mlx4_debug_level
  #define mlx4_debug_level      (0)
  #endif /* CONFIG_MLX4_DEBUG */
  
 -#define mlx4_dbg(mdev, format, arg...)                                        \
 +#define mlx4_dbg(mdev, format, ...)                                   \
  do {                                                                  \
        if (mlx4_debug_level)                                           \
 -              dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ##arg); \
 +              dev_printk(KERN_DEBUG, &(mdev)->pdev->dev, format,      \
 +                         ##__VA_ARGS__);                              \
  } while (0)
  
 -#define mlx4_err(mdev, format, arg...) \
 -      dev_err(&mdev->pdev->dev, format, ##arg)
 -#define mlx4_info(mdev, format, arg...) \
 -      dev_info(&mdev->pdev->dev, format, ##arg)
 -#define mlx4_warn(mdev, format, arg...) \
 -      dev_warn(&mdev->pdev->dev, format, ##arg)
 +#define mlx4_err(mdev, format, ...)                                   \
 +      dev_err(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
 +#define mlx4_info(mdev, format, ...)                                  \
 +      dev_info(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
 +#define mlx4_warn(mdev, format, ...)                                  \
 +      dev_warn(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
  
  extern int mlx4_log_num_mgm_entry_size;
  extern int log_mtts_per_seg;
@@@ -1196,6 -1195,12 +1196,12 @@@ int mlx4_QP_ATTACH_wrapper(struct mlx4_
                           struct mlx4_cmd_mailbox *outbox,
                           struct mlx4_cmd_info *cmd);
  
+ int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
  int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
                         struct mlx4_vhcr *vhcr,
                         struct mlx4_cmd_mailbox *inbox,
@@@ -264,8 -264,8 +264,8 @@@ void mlx4_qp_release_range(struct mlx4_
                               MLX4_CMD_FREE_RES,
                               MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
                if (err) {
 -                      mlx4_warn(dev, "Failed to release qp range"
 -                                " base:%d cnt:%d\n", base_qpn, cnt);
 +                      mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n",
 +                                base_qpn, cnt);
                }
        } else
                 __mlx4_qp_release_range(dev, base_qpn, cnt);
@@@ -389,6 -389,41 +389,41 @@@ err_icm
  
  EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
  
+ #define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
+ int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
+                  enum mlx4_update_qp_attr attr,
+                  struct mlx4_update_qp_params *params)
+ {
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_update_qp_context *cmd;
+       u64 pri_addr_path_mask = 0;
+       int err = 0;
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       cmd = (struct mlx4_update_qp_context *)mailbox->buf;
+       if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
+               return -EINVAL;
+       if (attr & MLX4_UPDATE_QP_SMAC) {
+               pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
+               cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
+       }
+       cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
+       err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0,
+                      MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
+                      MLX4_CMD_NATIVE);
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+ }
+ EXPORT_SYMBOL_GPL(mlx4_update_qp);
  void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
  {
        struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
@@@ -577,7 -612,8 +612,7 @@@ int mlx4_qp_to_ready(struct mlx4_dev *d
                err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
                                     context, 0, 0, qp);
                if (err) {
 -                      mlx4_err(dev, "Failed to bring QP to state: "
 -                               "%d with error: %d\n",
 +                      mlx4_err(dev, "Failed to bring QP to state: %d with error: %d\n",
                                 states[i + 1], err);
                        return err;
                }
@@@ -962,7 -962,7 +962,7 @@@ static struct res_common *alloc_tr(u64 
                ret = alloc_srq_tr(id);
                break;
        case RES_MAC:
 -              printk(KERN_ERR "implementation missing\n");
 +              pr_err("implementation missing\n");
                return NULL;
        case RES_COUNTER:
                ret = alloc_counter_tr(id);
@@@ -1056,10 -1056,10 +1056,10 @@@ static int remove_mtt_ok(struct res_mt
  {
        if (res->com.state == RES_MTT_BUSY ||
            atomic_read(&res->ref_count)) {
 -              printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
 -                     __func__, __LINE__,
 -                     mtt_states_str(res->com.state),
 -                     atomic_read(&res->ref_count));
 +              pr_devel("%s-%d: state %s, ref_count %d\n",
 +                       __func__, __LINE__,
 +                       mtt_states_str(res->com.state),
 +                       atomic_read(&res->ref_count));
                return -EBUSY;
        } else if (res->com.state != RES_MTT_ALLOCATED)
                return -EPERM;
@@@ -3880,7 -3880,7 +3880,7 @@@ static int add_eth_header(struct mlx4_d
                }
        }
        if (!be_mac) {
 -              pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
 +              pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
                       port);
                return -EINVAL;
        }
  
  }
  
+ #define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
+ int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd_info)
+ {
+       int err;
+       u32 qpn = vhcr->in_modifier & 0xffffff;
+       struct res_qp *rqp;
+       u64 mac;
+       unsigned port;
+       u64 pri_addr_path_mask;
+       struct mlx4_update_qp_context *cmd;
+       int smac_index;
+       cmd = (struct mlx4_update_qp_context *)inbox->buf;
+       pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
+       if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
+           (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
+               return -EPERM;
+       /* Just change the smac for the QP */
+       err = get_res(dev, slave, qpn, RES_QP, &rqp);
+       if (err) {
+               mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
+               return err;
+       }
+       port = (rqp->sched_queue >> 6 & 1) + 1;
+       smac_index = cmd->qp_context.pri_path.grh_mylmc;
+       err = mac_find_smac_ix_in_slave(dev, slave, port,
+                                       smac_index, &mac);
+       if (err) {
+               mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
+                        qpn, smac_index);
+               goto err_mac;
+       }
+       err = mlx4_cmd(dev, inbox->dma,
+                      vhcr->in_modifier, 0,
+                      MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
+                      MLX4_CMD_NATIVE);
+       if (err) {
+               mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
+               goto err_mac;
+       }
+ err_mac:
+       put_res(dev, slave, qpn, RES_QP);
+       return err;
+ }
  int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
                                         struct mlx4_vhcr *vhcr,
                                         struct mlx4_cmd_mailbox *inbox,
        qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
        err = get_res(dev, slave, qpn, RES_QP, &rqp);
        if (err) {
 -              pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
 +              pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
                return err;
        }
        rule_header = (struct _rule_hw *)(ctrl + 1);
        case MLX4_NET_TRANS_RULE_ID_IPV4:
        case MLX4_NET_TRANS_RULE_ID_TCP:
        case MLX4_NET_TRANS_RULE_ID_UDP:
 -              pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
 +              pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
                if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
                        err = -EINVAL;
                        goto err_put;
                        sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
                break;
        default:
 -              pr_err("Corrupted mailbox.\n");
 +              pr_err("Corrupted mailbox\n");
                err = -EINVAL;
                goto err_put;
        }
  
        err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
        if (err) {
 -              mlx4_err(dev, "Fail to add flow steering resources.\n ");
 +              mlx4_err(dev, "Fail to add flow steering resources\n");
                /* detach rule*/
                mlx4_cmd(dev, vhcr->out_param, 0, 0,
                         MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
@@@ -4002,7 -4056,7 +4056,7 @@@ int mlx4_QP_FLOW_STEERING_DETACH_wrappe
  
        err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
        if (err) {
 -              mlx4_err(dev, "Fail to remove flow steering resources.\n ");
 +              mlx4_err(dev, "Fail to remove flow steering resources\n");
                goto out;
        }
  
@@@ -4131,8 -4185,8 +4185,8 @@@ static void rem_slave_qps(struct mlx4_d
  
        err = move_all_busy(dev, slave, RES_QP);
        if (err)
 -              mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
 -                        "for slave %d\n", slave);
 +              mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
 +                        slave);
  
        spin_lock_irq(mlx4_tlock(dev));
        list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
                                                       MLX4_CMD_TIME_CLASS_A,
                                                       MLX4_CMD_NATIVE);
                                        if (err)
 -                                              mlx4_dbg(dev, "rem_slave_qps: failed"
 -                                                       " to move slave %d qpn %d to"
 -                                                       " reset\n", slave,
 -                                                       qp->local_qpn);
 +                                              mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
 +                                                       slave, qp->local_qpn);
                                        atomic_dec(&qp->rcq->ref_count);
                                        atomic_dec(&qp->scq->ref_count);
                                        atomic_dec(&qp->mtt->ref_count);
@@@ -4205,8 -4261,8 +4259,8 @@@ static void rem_slave_srqs(struct mlx4_
  
        err = move_all_busy(dev, slave, RES_SRQ);
        if (err)
 -              mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
 -                        "busy for slave %d\n", slave);
 +              mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
 +                        slave);
  
        spin_lock_irq(mlx4_tlock(dev));
        list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
                                                       MLX4_CMD_TIME_CLASS_A,
                                                       MLX4_CMD_NATIVE);
                                        if (err)
 -                                              mlx4_dbg(dev, "rem_slave_srqs: failed"
 -                                                       " to move slave %d srq %d to"
 -                                                       " SW ownership\n",
 +                                              mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
                                                         slave, srqn);
  
                                        atomic_dec(&srq->mtt->ref_count);
@@@ -4271,8 -4329,8 +4325,8 @@@ static void rem_slave_cqs(struct mlx4_d
  
        err = move_all_busy(dev, slave, RES_CQ);
        if (err)
 -              mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
 -                        "busy for slave %d\n", slave);
 +              mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
 +                        slave);
  
        spin_lock_irq(mlx4_tlock(dev));
        list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
                                                       MLX4_CMD_TIME_CLASS_A,
                                                       MLX4_CMD_NATIVE);
                                        if (err)
 -                                              mlx4_dbg(dev, "rem_slave_cqs: failed"
 -                                                       " to move slave %d cq %d to"
 -                                                       " SW ownership\n",
 +                                              mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
                                                         slave, cqn);
                                        atomic_dec(&cq->mtt->ref_count);
                                        state = RES_CQ_ALLOCATED;
@@@ -4334,8 -4394,8 +4388,8 @@@ static void rem_slave_mrs(struct mlx4_d
  
        err = move_all_busy(dev, slave, RES_MPT);
        if (err)
 -              mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
 -                        "busy for slave %d\n", slave);
 +              mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
 +                        slave);
  
        spin_lock_irq(mlx4_tlock(dev));
        list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
                                                     MLX4_CMD_TIME_CLASS_A,
                                                     MLX4_CMD_NATIVE);
                                        if (err)
 -                                              mlx4_dbg(dev, "rem_slave_mrs: failed"
 -                                                       " to move slave %d mpt %d to"
 -                                                       " SW ownership\n",
 +                                              mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
                                                         slave, mptn);
                                        if (mpt->mtt)
                                                atomic_dec(&mpt->mtt->ref_count);
@@@ -4402,8 -4464,8 +4456,8 @@@ static void rem_slave_mtts(struct mlx4_
  
        err = move_all_busy(dev, slave, RES_MTT);
        if (err)
 -              mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
 -                        "busy for slave %d\n", slave);
 +              mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts  - too busy for slave %d\n",
 +                        slave);
  
        spin_lock_irq(mlx4_tlock(dev));
        list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
@@@ -4505,8 -4567,8 +4559,8 @@@ static void rem_slave_eqs(struct mlx4_d
  
        err = move_all_busy(dev, slave, RES_EQ);
        if (err)
 -              mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
 -                        "busy for slave %d\n", slave);
 +              mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
 +                        slave);
  
        spin_lock_irq(mlx4_tlock(dev));
        list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
                                                           MLX4_CMD_TIME_CLASS_A,
                                                           MLX4_CMD_NATIVE);
                                        if (err)
 -                                              mlx4_dbg(dev, "rem_slave_eqs: failed"
 -                                                       " to move slave %d eqs %d to"
 -                                                       " SW ownership\n", slave, eqn);
 +                                              mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
 +                                                       slave, eqn);
                                        mlx4_free_cmd_mailbox(dev, mailbox);
                                        atomic_dec(&eq->mtt->ref_count);
                                        state = RES_EQ_RESERVED;
@@@ -4568,8 -4631,8 +4622,8 @@@ static void rem_slave_counters(struct m
  
        err = move_all_busy(dev, slave, RES_COUNTER);
        if (err)
 -              mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
 -                        "busy for slave %d\n", slave);
 +              mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
 +                        slave);
  
        spin_lock_irq(mlx4_tlock(dev));
        list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
@@@ -4599,8 -4662,8 +4653,8 @@@ static void rem_slave_xrcdns(struct mlx
  
        err = move_all_busy(dev, slave, RES_XRCD);
        if (err)
 -              mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
 -                        "busy for slave %d\n", slave);
 +              mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
 +                        slave);
  
        spin_lock_irq(mlx4_tlock(dev));
        list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
@@@ -4745,8 -4808,10 +4799,8 @@@ void mlx4_vf_immed_vlan_work_handler(st
                                       0, MLX4_CMD_UPDATE_QP,
                                       MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
                        if (err) {
 -                              mlx4_info(dev, "UPDATE_QP failed for slave %d, "
 -                                        "port %d, qpn %d (%d)\n",
 -                                        work->slave, port, qp->local_qpn,
 -                                        err);
 +                              mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
 +                                        work->slave, port, qp->local_qpn, err);
                                errors++;
                        }
                }
@@@ -1704,7 -1704,7 +1704,7 @@@ static int stmmac_open(struct net_devic
                if (ret) {
                        pr_err("%s: Cannot attach to PHY (error: %d)\n",
                               __func__, ret);
-                       goto phy_error;
+                       return ret;
                }
        }
  
@@@ -1779,8 -1779,6 +1779,6 @@@ init_error
  dma_desc_error:
        if (priv->phydev)
                phy_disconnect(priv->phydev);
- phy_error:
-       clk_disable_unprepare(priv->stmmac_clk);
  
        return ret;
  }
@@@ -2214,6 -2212,27 +2212,6 @@@ static void stmmac_tx_timeout(struct ne
        stmmac_tx_err(priv);
  }
  
 -/* Configuration changes (passed on by ifconfig) */
 -static int stmmac_config(struct net_device *dev, struct ifmap *map)
 -{
 -      if (dev->flags & IFF_UP)        /* can't act on a running interface */
 -              return -EBUSY;
 -
 -      /* Don't allow changing the I/O address */
 -      if (map->base_addr != dev->base_addr) {
 -              pr_warn("%s: can't change I/O address\n", dev->name);
 -              return -EOPNOTSUPP;
 -      }
 -
 -      /* Don't allow changing the IRQ */
 -      if (map->irq != dev->irq) {
 -              pr_warn("%s: not change IRQ number %d\n", dev->name, dev->irq);
 -              return -EOPNOTSUPP;
 -      }
 -
 -      return 0;
 -}
 -
  /**
   *  stmmac_set_rx_mode - entry point for multicast addressing
   *  @dev : pointer to the device structure
@@@ -2579,6 -2598,7 +2577,6 @@@ static const struct net_device_ops stmm
        .ndo_set_rx_mode = stmmac_set_rx_mode,
        .ndo_tx_timeout = stmmac_tx_timeout,
        .ndo_do_ioctl = stmmac_ioctl,
 -      .ndo_set_config = stmmac_config,
  #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller = stmmac_poll_controller,
  #endif
diff --combined drivers/net/macvlan.c
@@@ -30,7 -30,6 +30,7 @@@
  #include <linux/if_link.h>
  #include <linux/if_macvlan.h>
  #include <linux/hash.h>
 +#include <linux/workqueue.h>
  #include <net/rtnetlink.h>
  #include <net/xfrm.h>
  
@@@ -41,19 -40,10 +41,19 @@@ struct macvlan_port 
        struct hlist_head       vlan_hash[MACVLAN_HASH_SIZE];
        struct list_head        vlans;
        struct rcu_head         rcu;
 +      struct sk_buff_head     bc_queue;
 +      struct work_struct      bc_work;
        bool                    passthru;
 -      int                     count;
  };
  
 +#define MACVLAN_PORT_IS_EMPTY(port)    list_empty(&port->vlans)
 +
 +struct macvlan_skb_cb {
 +      const struct macvlan_dev *src;
 +};
 +
 +#define MACVLAN_SKB_CB(__skb) ((struct macvlan_skb_cb *)&((__skb)->cb[0]))
 +
  static void macvlan_port_destroy(struct net_device *dev);
  
  static struct macvlan_port *macvlan_port_get_rcu(const struct net_device *dev)
@@@ -130,7 -120,7 +130,7 @@@ static int macvlan_broadcast_one(struc
        struct net_device *dev = vlan->dev;
  
        if (local)
 -              return dev_forward_skb(dev, skb);
 +              return __dev_forward_skb(dev, skb);
  
        skb->dev = dev;
        if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
        else
                skb->pkt_type = PACKET_MULTICAST;
  
 -      return netif_rx(skb);
 +      return 0;
  }
  
  static u32 macvlan_hash_mix(const struct macvlan_dev *vlan)
@@@ -185,32 -175,32 +185,32 @@@ static void macvlan_broadcast(struct sk
                        if (likely(nskb))
                                err = macvlan_broadcast_one(
                                        nskb, vlan, eth,
 -                                      mode == MACVLAN_MODE_BRIDGE);
 +                                      mode == MACVLAN_MODE_BRIDGE) ?:
 +                                    netif_rx_ni(nskb);
                        macvlan_count_rx(vlan, skb->len + ETH_HLEN,
                                         err == NET_RX_SUCCESS, 1);
                }
        }
  }
  
 -/* called under rcu_read_lock() from netif_receive_skb */
 -static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
 +static void macvlan_process_broadcast(struct work_struct *w)
  {
 -      struct macvlan_port *port;
 -      struct sk_buff *skb = *pskb;
 -      const struct ethhdr *eth = eth_hdr(skb);
 -      const struct macvlan_dev *vlan;
 -      const struct macvlan_dev *src;
 -      struct net_device *dev;
 -      unsigned int len = 0;
 -      int ret = NET_RX_DROP;
 +      struct macvlan_port *port = container_of(w, struct macvlan_port,
 +                                               bc_work);
 +      struct sk_buff *skb;
 +      struct sk_buff_head list;
 +
 +      skb_queue_head_init(&list);
 +
 +      spin_lock_bh(&port->bc_queue.lock);
 +      skb_queue_splice_tail_init(&port->bc_queue, &list);
 +      spin_unlock_bh(&port->bc_queue.lock);
 +
 +      while ((skb = __skb_dequeue(&list))) {
 +              const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src;
 +
 +              rcu_read_lock();
  
 -      port = macvlan_port_get_rcu(skb->dev);
 -      if (is_multicast_ether_addr(eth->h_dest)) {
 -              skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN);
 -              if (!skb)
 -                      return RX_HANDLER_CONSUMED;
 -              eth = eth_hdr(skb);
 -              src = macvlan_hash_lookup(port, eth->h_source);
                if (!src)
                        /* frame comes from an external address */
                        macvlan_broadcast(skb, port, NULL,
                        macvlan_broadcast(skb, port, src->dev,
                                          MACVLAN_MODE_VEPA |
                                          MACVLAN_MODE_BRIDGE);
 -              else if (src->mode == MACVLAN_MODE_BRIDGE)
 +              else
                        /*
                         * flood only to VEPA ports, bridge ports
                         * already saw the frame on the way out.
                         */
                        macvlan_broadcast(skb, port, src->dev,
                                          MACVLAN_MODE_VEPA);
 -              else {
 +
 +              rcu_read_unlock();
 +
 +              kfree_skb(skb);
 +      }
 +}
 +
 +static void macvlan_broadcast_enqueue(struct macvlan_port *port,
 +                                    struct sk_buff *skb)
 +{
 +      struct sk_buff *nskb;
 +      int err = -ENOMEM;
 +
 +      nskb = skb_clone(skb, GFP_ATOMIC);
 +      if (!nskb)
 +              goto err;
 +
 +      spin_lock(&port->bc_queue.lock);
 +      if (skb_queue_len(&port->bc_queue) < skb->dev->tx_queue_len) {
 +              __skb_queue_tail(&port->bc_queue, nskb);
 +              err = 0;
 +      }
 +      spin_unlock(&port->bc_queue.lock);
 +
 +      if (err)
 +              goto free_nskb;
 +
 +      schedule_work(&port->bc_work);
 +      return;
 +
 +free_nskb:
 +      kfree_skb(nskb);
 +err:
 +      atomic_long_inc(&skb->dev->rx_dropped);
 +}
 +
 +/* called under rcu_read_lock() from netif_receive_skb */
 +static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
 +{
 +      struct macvlan_port *port;
 +      struct sk_buff *skb = *pskb;
 +      const struct ethhdr *eth = eth_hdr(skb);
 +      const struct macvlan_dev *vlan;
 +      const struct macvlan_dev *src;
 +      struct net_device *dev;
 +      unsigned int len = 0;
 +      int ret = NET_RX_DROP;
 +
 +      port = macvlan_port_get_rcu(skb->dev);
 +      if (is_multicast_ether_addr(eth->h_dest)) {
 +              skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN);
 +              if (!skb)
 +                      return RX_HANDLER_CONSUMED;
 +              eth = eth_hdr(skb);
 +              src = macvlan_hash_lookup(port, eth->h_source);
 +              if (src && src->mode != MACVLAN_MODE_VEPA &&
 +                  src->mode != MACVLAN_MODE_BRIDGE) {
                        /* forward to original port. */
                        vlan = src;
 -                      ret = macvlan_broadcast_one(skb, vlan, eth, 0);
 +                      ret = macvlan_broadcast_one(skb, vlan, eth, 0) ?:
 +                            netif_rx(skb);
                        goto out;
                }
  
 +              MACVLAN_SKB_CB(skb)->src = src;
 +              macvlan_broadcast_enqueue(port, skb);
 +
                return RX_HANDLER_PASS;
        }
  
@@@ -528,8 -458,10 +528,10 @@@ static void macvlan_change_rx_flags(str
        struct macvlan_dev *vlan = netdev_priv(dev);
        struct net_device *lowerdev = vlan->lowerdev;
  
-       if (change & IFF_ALLMULTI)
-               dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+       if (dev->flags & IFF_UP) {
+               if (change & IFF_ALLMULTI)
+                       dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+       }
  }
  
  static void macvlan_set_mac_lists(struct net_device *dev)
@@@ -585,6 -517,11 +587,11 @@@ static struct lock_class_key macvlan_ne
  #define MACVLAN_STATE_MASK \
        ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
  
+ static int macvlan_get_nest_level(struct net_device *dev)
+ {
+       return ((struct macvlan_dev *)netdev_priv(dev))->nest_level;
+ }
  static void macvlan_set_lockdep_class_one(struct net_device *dev,
                                          struct netdev_queue *txq,
                                          void *_unused)
  
  static void macvlan_set_lockdep_class(struct net_device *dev)
  {
-       lockdep_set_class(&dev->addr_list_lock,
-                         &macvlan_netdev_addr_lock_key);
+       lockdep_set_class_and_subclass(&dev->addr_list_lock,
+                                      &macvlan_netdev_addr_lock_key,
+                                      macvlan_get_nest_level(dev));
        netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL);
  }
  
@@@ -629,7 -567,8 +637,7 @@@ static void macvlan_uninit(struct net_d
  
        free_percpu(vlan->pcpu_stats);
  
 -      port->count -= 1;
 -      if (!port->count)
 +      if (MACVLAN_PORT_IS_EMPTY(port))
                macvlan_port_destroy(port->dev);
  }
  
@@@ -790,6 -729,7 +798,7 @@@ static const struct net_device_ops macv
        .ndo_fdb_add            = macvlan_fdb_add,
        .ndo_fdb_del            = macvlan_fdb_del,
        .ndo_fdb_dump           = ndo_dflt_fdb_dump,
+       .ndo_get_lock_subclass  = macvlan_get_nest_level,
  };
  
  void macvlan_common_setup(struct net_device *dev)
@@@ -830,9 -770,6 +839,9 @@@ static int macvlan_port_create(struct n
        for (i = 0; i < MACVLAN_HASH_SIZE; i++)
                INIT_HLIST_HEAD(&port->vlan_hash[i]);
  
 +      skb_queue_head_init(&port->bc_queue);
 +      INIT_WORK(&port->bc_work, macvlan_process_broadcast);
 +
        err = netdev_rx_handler_register(dev, macvlan_handle_frame, port);
        if (err)
                kfree(port);
@@@ -845,7 -782,6 +854,7 @@@ static void macvlan_port_destroy(struc
  {
        struct macvlan_port *port = macvlan_port_get_rtnl(dev);
  
 +      cancel_work_sync(&port->bc_work);
        dev->priv_flags &= ~IFF_MACVLAN_PORT;
        netdev_rx_handler_unregister(dev);
        kfree_rcu(port, rcu);
@@@ -922,6 -858,7 +931,7 @@@ int macvlan_common_newlink(struct net *
        vlan->dev      = dev;
        vlan->port     = port;
        vlan->set_features = MACVLAN_FEATURES;
+       vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1;
  
        vlan->mode     = MACVLAN_MODE_VEPA;
        if (data && data[IFLA_MACVLAN_MODE])
                vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
  
        if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
 -              if (port->count)
 +              if (!MACVLAN_PORT_IS_EMPTY(port))
                        return -EINVAL;
                port->passthru = true;
                eth_hw_addr_inherit(dev, lowerdev);
        }
  
 -      port->count += 1;
        err = register_netdevice(dev);
        if (err < 0)
                goto destroy_port;
  unregister_netdev:
        unregister_netdevice(dev);
  destroy_port:
 -      port->count -= 1;
 -      if (!port->count)
 +      if (MACVLAN_PORT_IS_EMPTY(port))
                macvlan_port_destroy(lowerdev);
  
        return err;
@@@ -1089,13 -1028,6 +1099,13 @@@ static int macvlan_device_event(struct 
                        netdev_update_features(vlan->dev);
                }
                break;
 +      case NETDEV_CHANGEMTU:
 +              list_for_each_entry(vlan, &port->vlans, list) {
 +                      if (vlan->dev->mtu <= dev->mtu)
 +                              continue;
 +                      dev_set_mtu(vlan->dev, dev->mtu);
 +              }
 +              break;
        case NETDEV_UNREGISTER:
                /* twiddle thumbs on netns device moves */
                if (dev->reg_state != NETREG_UNREGISTERING)
@@@ -33,7 -33,6 +33,7 @@@
  #include <linux/mdio.h>
  #include <linux/io.h>
  #include <linux/uaccess.h>
 +#include <linux/of.h>
  
  #include <asm/irq.h>
  
@@@ -615,8 -614,8 +615,8 @@@ int phy_attach_direct(struct net_devic
        err = phy_init_hw(phydev);
        if (err)
                phy_detach(phydev);
-       phy_resume(phydev);
+       else
+               phy_resume(phydev);
  
        return err;
  }
@@@ -1068,11 -1067,14 +1068,11 @@@ int genphy_soft_reset(struct phy_devic
  }
  EXPORT_SYMBOL(genphy_soft_reset);
  
 -static int genphy_config_init(struct phy_device *phydev)
 +int genphy_config_init(struct phy_device *phydev)
  {
        int val;
        u32 features;
  
 -      /* For now, I'll claim that the generic driver supports
 -       * all possible port types
 -       */
        features = (SUPPORTED_TP | SUPPORTED_MII
                        | SUPPORTED_AUI | SUPPORTED_FIBRE |
                        SUPPORTED_BNC);
                        features |= SUPPORTED_1000baseT_Half;
        }
  
 -      phydev->supported = features;
 -      phydev->advertising = features;
 +      phydev->supported &= features;
 +      phydev->advertising &= features;
  
        return 0;
  }
@@@ -1116,7 -1118,6 +1116,7 @@@ static int gen10g_soft_reset(struct phy
        /* Do nothing for now */
        return 0;
  }
 +EXPORT_SYMBOL(genphy_config_init);
  
  static int gen10g_config_init(struct phy_device *phydev)
  {
@@@ -1167,38 -1168,6 +1167,38 @@@ static int gen10g_resume(struct phy_dev
        return 0;
  }
  
 +static void of_set_phy_supported(struct phy_device *phydev)
 +{
 +      struct device_node *node = phydev->dev.of_node;
 +      u32 max_speed;
 +
 +      if (!IS_ENABLED(CONFIG_OF_MDIO))
 +              return;
 +
 +      if (!node)
 +              return;
 +
 +      if (!of_property_read_u32(node, "max-speed", &max_speed)) {
 +              /* The default values for phydev->supported are provided by the PHY
 +               * driver "features" member, we want to reset to sane defaults fist
 +               * before supporting higher speeds.
 +               */
 +              phydev->supported &= PHY_DEFAULT_FEATURES;
 +
 +              switch (max_speed) {
 +              default:
 +                      return;
 +
 +              case SPEED_1000:
 +                      phydev->supported |= PHY_1000BT_FEATURES;
 +              case SPEED_100:
 +                      phydev->supported |= PHY_100BT_FEATURES;
 +              case SPEED_10:
 +                      phydev->supported |= PHY_10BT_FEATURES;
 +              }
 +      }
 +}
 +
  /**
   * phy_probe - probe and init a PHY device
   * @dev: device to probe and init
@@@ -1233,8 -1202,7 +1233,8 @@@ static int phy_probe(struct device *dev
         * or both of these values
         */
        phydev->supported = phydrv->features;
 -      phydev->advertising = phydrv->features;
 +      of_set_phy_supported(phydev);
 +      phydev->advertising = phydev->supported;
  
        /* Set the state to READY by default */
        phydev->state = PHY_READY;
@@@ -1327,9 -1295,7 +1327,9 @@@ static struct phy_driver genphy_driver[
        .name           = "Generic PHY",
        .soft_reset     = genphy_soft_reset,
        .config_init    = genphy_config_init,
 -      .features       = 0,
 +      .features       = PHY_GBIT_FEATURES | SUPPORTED_MII |
 +                        SUPPORTED_AUI | SUPPORTED_FIBRE |
 +                        SUPPORTED_BNC,
        .config_aneg    = genphy_config_aneg,
        .aneg_done      = genphy_aneg_done,
        .read_status    = genphy_read_status,
@@@ -104,8 -104,11 +104,8 @@@ static const u8 iwl_bt_prio_tbl[BT_COEX
  #define BT_DISABLE_REDUCED_TXPOWER_THRESHOLD  (-65)
  #define BT_ANTENNA_COUPLING_THRESHOLD         (30)
  
 -int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
 +static int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
  {
 -      if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
 -              return 0;
 -
        return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, CMD_SYNC,
                                    sizeof(struct iwl_bt_coex_prio_tbl_cmd),
                                    &iwl_bt_prio_tbl);
@@@ -570,9 -573,8 +570,9 @@@ int iwl_send_bt_init_conf(struct iwl_mv
        int ret;
        u32 flags;
  
 -      if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
 -              return 0;
 +      ret = iwl_send_bt_prio_tbl(mvm);
 +      if (ret)
 +              return ret;
  
        bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
        if (!bt_cmd)
        cmd.data[0] = bt_cmd;
  
        bt_cmd->max_kill = 5;
 -      bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD,
 -      bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling,
 -      bt_cmd->bt4_tx_tx_delta_freq_thr = 15,
 -      bt_cmd->bt4_tx_rx_max_freq0 = 15,
 +      bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD;
 +      bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling;
 +      bt_cmd->bt4_tx_tx_delta_freq_thr = 15;
 +      bt_cmd->bt4_tx_rx_max_freq0 = 15;
 +      bt_cmd->override_primary_lut = BT_COEX_INVALID_LUT;
 +      bt_cmd->override_secondary_lut = BT_COEX_INVALID_LUT;
  
        flags = iwlwifi_mod_params.bt_coex_active ?
                        BT_COEX_NW : BT_COEX_DISABLE;
                bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
  
        if (IWL_MVM_BT_COEX_CORUNNING) {
-               bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_CORUN_LUT_20 |
-                                                   BT_VALID_CORUN_LUT_40);
+               bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 |
+                                                    BT_VALID_CORUN_LUT_40);
                bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
        }
  
        if (IWL_MVM_BT_COEX_MPLUT) {
                bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
-               bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
+               bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
        }
  
        if (mvm->cfg->bt_shared_single_ant)
@@@ -1215,17 -1215,6 +1215,17 @@@ bool iwl_mvm_bt_coex_is_mimo_allowed(st
        return iwl_get_coex_type(mvm, mvmsta->vif) == BT_COEX_TIGHT_LUT;
  }
  
 +bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
 +                                  enum ieee80211_band band)
 +{
 +      u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
 +
 +      if (band != IEEE80211_BAND_2GHZ)
 +              return false;
 +
 +      return bt_activity >= BT_LOW_TRAFFIC;
 +}
 +
  u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
                           struct ieee80211_tx_info *info, u8 ac)
  {
  
  void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
  {
 -      if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
 -              return;
 -
        iwl_mvm_bt_coex_notif_handle(mvm);
  }
  
@@@ -169,12 -169,8 +169,12 @@@ enum iwl_scan_type 
        SCAN_TYPE_DISCOVERY_FORCED      = 6,
  }; /* SCAN_ACTIVITY_TYPE_E_VER_1 */
  
 -/* Maximal number of channels to scan */
 -#define MAX_NUM_SCAN_CHANNELS 0x24
 +/**
 + * Maximal number of channels to scan
 + * it should be equal to:
 + * max(IWL_NUM_CHANNELS, IWL_NUM_CHANNELS_FAMILY_8000)
 + */
 +#define MAX_NUM_SCAN_CHANNELS 50
  
  /**
   * struct iwl_scan_cmd - scan request command
   *    this number of packets were received (typically 1)
   * @passive2active: is auto switching from passive to active during scan allowed
   * @rxchain_sel_flags: RXON_RX_CHAIN_*
-  * @max_out_time: in usecs, max out of serving channel time
+  * @max_out_time: in TUs, max out of serving channel time
   * @suspend_time: how long to pause scan when returning to service channel:
-  *    bits 0-19: beacon interal in usecs (suspend before executing)
+  *    bits 0-19: beacon interal in TUs (suspend before executing)
   *    bits 20-23: reserved
   *    bits 24-31: number of beacons (suspend between channels)
   * @rxon_flags: RXON_FLG_*
@@@ -387,8 -383,8 +387,8 @@@ enum scan_framework_client 
   * @quiet_plcp_th:    quiet channel num of packets threshold
   * @good_CRC_th:      passive to active promotion threshold
   * @rx_chain:         RXON rx chain.
-  * @max_out_time:     max uSec to be out of assoceated channel
-  * @suspend_time:     pause scan this long when returning to service channel
+  * @max_out_time:     max TUs to be out of assoceated channel
+  * @suspend_time:     pause scan this TUs when returning to service channel
   * @flags:            RXON flags
   * @filter_flags:     RXONfilter
   * @tx_cmd:           tx command for active scan; for 2GHz and for 5GHz.
@@@ -538,16 -534,13 +538,16 @@@ struct iwl_scan_offload_schedule 
   *
   * IWL_SCAN_OFFLOAD_FLAG_PASS_ALL: pass all results - no filtering.
   * IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL: add cached channels to partial scan.
 - * IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN: use energy based scan before partial scan
 - *    on A band.
 + * IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE: EBS duration is 100mSec - typical
 + *    beacon period. Finding channel activity in this mode is not guaranteed.
 + * IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE: EBS duration is 200mSec.
 + *    Assuming beacon period is 100ms finding channel activity is guaranteed.
   */
  enum iwl_scan_offload_flags {
        IWL_SCAN_OFFLOAD_FLAG_PASS_ALL          = BIT(0),
        IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL    = BIT(2),
 -      IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN       = BIT(3),
 +      IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE    = BIT(5),
 +      IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE = BIT(6),
  };
  
  /**
@@@ -570,24 -563,17 +570,24 @@@ enum iwl_scan_offload_compleate_status 
        IWL_SCAN_OFFLOAD_ABORTED        = 2,
  };
  
 +enum iwl_scan_ebs_status {
 +      IWL_SCAN_EBS_SUCCESS,
 +      IWL_SCAN_EBS_FAILED,
 +      IWL_SCAN_EBS_CHAN_NOT_FOUND,
 +};
 +
  /**
   * iwl_scan_offload_complete - SCAN_OFFLOAD_COMPLETE_NTF_API_S_VER_1
   * @last_schedule_line:               last schedule line executed (fast or regular)
   * @last_schedule_iteration:  last scan iteration executed before scan abort
   * @status:                   enum iwl_scan_offload_compleate_status
 + * @ebs_status: last EBS status, see IWL_SCAN_EBS_*
   */
  struct iwl_scan_offload_complete {
        u8 last_schedule_line;
        u8 last_schedule_iteration;
        u8 status;
 -      u8 reserved;
 +      u8 ebs_status;
  } __packed;
  
  /**
@@@ -276,7 -276,6 +276,7 @@@ int iwl_mvm_mac_setup_register(struct i
                    IEEE80211_HW_AMPDU_AGGREGATION |
                    IEEE80211_HW_TIMING_BEACON_ONLY |
                    IEEE80211_HW_CONNECTION_MONITOR |
 +                  IEEE80211_HW_SUPPORTS_UAPSD |
                    IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
                    IEEE80211_HW_SUPPORTS_STATIC_SMPS;
  
                                    IEEE80211_RADIOTAP_MCS_HAVE_STBC;
        hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC;
        hw->rate_control_algorithm = "iwl-mvm-rs";
 +      hw->uapsd_queues = IWL_UAPSD_AC_INFO;
 +      hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
  
        /*
         * Enable 11w if advertised by firmware and software crypto
            !iwlwifi_mod_params.sw_crypto)
                hw->flags |= IEEE80211_HW_MFP_CAPABLE;
  
 -      if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT) {
 -              hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
 -              hw->uapsd_queues = IWL_UAPSD_AC_INFO;
 -              hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
 -      }
 +      /* Disable uAPSD due to firmware issues */
 +      if (true)
 +              hw->flags &= ~IEEE80211_HW_SUPPORTS_UAPSD;
  
        hw->sta_data_size = sizeof(struct iwl_mvm_sta);
        hw->vif_data_size = sizeof(struct iwl_mvm_vif);
                BIT(NL80211_IFTYPE_P2P_CLIENT) |
                BIT(NL80211_IFTYPE_AP) |
                BIT(NL80211_IFTYPE_P2P_GO) |
 -              BIT(NL80211_IFTYPE_P2P_DEVICE);
 -
 -      /* IBSS has bugs in older versions */
 -      if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
 -              hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
 +              BIT(NL80211_IFTYPE_P2P_DEVICE) |
 +              BIT(NL80211_IFTYPE_ADHOC);
  
        hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
        hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
        else
                hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
  
 -      if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
 -              hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
 -              hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
 -              hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
 -              /* we create the 802.11 header and zero length SSID IE. */
 -              hw->wiphy->max_sched_scan_ie_len =
 -                                      SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
 -      }
 +      hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
 +      hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
 +      hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
 +      /* we create the 802.11 header and zero length SSID IE. */
 +      hw->wiphy->max_sched_scan_ie_len = SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
  
        hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
                               NL80211_FEATURE_P2P_GO_OPPPS;
        }
  
  #ifdef CONFIG_PM_SLEEP
 -      if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
 +      if (iwl_mvm_is_d0i3_supported(mvm) &&
 +          device_can_wakeup(mvm->trans->dev)) {
 +              mvm->wowlan.flags = WIPHY_WOWLAN_ANY;
 +              hw->wiphy->wowlan = &mvm->wowlan;
 +      } else if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
            mvm->trans->ops->d3_suspend &&
            mvm->trans->ops->d3_resume &&
            device_can_wakeup(mvm->trans->dev)) {
@@@ -826,7 -827,8 +826,7 @@@ static int iwl_mvm_mac_add_interface(st
                goto out_remove_mac;
  
        if (!mvm->bf_allowed_vif &&
 -          vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
 -          mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED){
 +          vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
                mvm->bf_allowed_vif = mvmvif;
                vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
                                     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
@@@ -1005,7 -1007,7 +1005,7 @@@ static void iwl_mvm_mc_iface_iterator(v
        memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
        len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
  
-       ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, len, cmd);
+       ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
        if (ret)
                IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
  }
@@@ -1021,7 -1023,7 +1021,7 @@@ static void iwl_mvm_recalc_multicast(st
        if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
                return;
  
-       ieee80211_iterate_active_interfaces(
+       ieee80211_iterate_active_interfaces_atomic(
                mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
                iwl_mvm_mc_iface_iterator, &iter_data);
  }
@@@ -1221,10 -1223,6 +1221,10 @@@ static int iwl_mvm_configure_bcast_filt
        if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
                return 0;
  
 +      /* bcast filtering isn't supported for P2P client */
 +      if (vif->p2p)
 +              return 0;
 +
        if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
                return 0;
  
@@@ -1699,11 -1697,6 +1699,11 @@@ static int iwl_mvm_mac_sta_state(struc
                ret = iwl_mvm_add_sta(mvm, vif, sta);
        } else if (old_state == IEEE80211_STA_NONE &&
                   new_state == IEEE80211_STA_AUTH) {
 +              /*
 +               * EBS may be disabled due to previous failures reported by FW.
 +               * Reset EBS status here assuming environment has been changed.
 +               */
 +              mvm->last_ebs_successful = true;
                ret = 0;
        } else if (old_state == IEEE80211_STA_AUTH &&
                   new_state == IEEE80211_STA_ASSOC) {
@@@ -1814,6 -1807,11 +1814,11 @@@ static int iwl_mvm_mac_sched_scan_start
  
        mutex_lock(&mvm->mutex);
  
+       if (!iwl_mvm_is_idle(mvm)) {
+               ret = -EBUSY;
+               goto out;
+       }
        switch (mvm->scan_status) {
        case IWL_MVM_SCAN_OS:
                IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n");
@@@ -164,6 -164,7 +164,6 @@@ enum iwl_dbgfs_pm_mask 
        MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS = BIT(2),
        MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3),
        MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4),
 -      MVM_DEBUGFS_PM_DISABLE_POWER_OFF = BIT(5),
        MVM_DEBUGFS_PM_LPRX_ENA = BIT(6),
        MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7),
        MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8),
@@@ -176,6 -177,7 +176,6 @@@ struct iwl_dbgfs_pm 
        u32 tx_data_timeout;
        bool skip_over_dtim;
        u8 skip_dtim_periods;
 -      bool disable_power_off;
        bool lprx_ena;
        u32 lprx_rssi_threshold;
        bool snooze_ena;
@@@ -230,7 -232,6 +230,7 @@@ enum iwl_mvm_ref_type 
        IWL_MVM_REF_USER,
        IWL_MVM_REF_TX,
        IWL_MVM_REF_TX_AGG,
 +      IWL_MVM_REF_EXIT_WORK,
  
        IWL_MVM_REF_COUNT,
  };
@@@ -264,7 -265,6 +264,7 @@@ struct iwl_mvm_vif_bf_data 
   * @uploaded: indicates the MAC context has been added to the device
   * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface
   *    should get quota etc.
 + * @pm_enabled - Indicate if MAC power management is allowed
   * @monitor_active: indicates that monitor context is configured, and that the
   *    interface should get quota etc.
   * @low_latency: indicates that this interface is in low-latency mode
@@@ -283,7 -283,6 +283,7 @@@ struct iwl_mvm_vif 
  
        bool uploaded;
        bool ap_ibss_active;
 +      bool pm_enabled;
        bool monitor_active;
        bool low_latency;
        struct iwl_mvm_vif_bf_data bf_data;
@@@ -452,11 -451,6 +452,11 @@@ struct iwl_mvm_frame_stats 
        int last_frame_idx;
  };
  
 +enum {
 +      D0I3_DEFER_WAKEUP,
 +      D0I3_PENDING_WAKEUP,
 +};
 +
  struct iwl_mvm {
        /* for logger access */
        struct device *dev;
        /* Internal station */
        struct iwl_mvm_int_sta aux_sta;
  
 +      bool last_ebs_successful;
 +
        u8 scan_last_antenna_idx; /* to toggle TX between antennas */
        u8 mgmt_last_antenna_idx;
  
        void *fw_error_dump;
        void *fw_error_sram;
        u32 fw_error_sram_len;
 +      u32 *fw_error_rxf;
 +      u32 fw_error_rxf_len;
  
        struct led_classdev led;
  
        bool d0i3_offloading;
        struct work_struct d0i3_exit_work;
        struct sk_buff_head d0i3_tx;
 +      /* protect d0i3_suspend_flags */
 +      struct mutex d0i3_suspend_mutex;
 +      unsigned long d0i3_suspend_flags;
        /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
        spinlock_t d0i3_tx_lock;
        wait_queue_head_t d0i3_exit_waitq;
  
        /* Indicate if device power save is allowed */
        bool ps_disabled;
 -      /* Indicate if device power management is allowed */
 -      bool pm_disabled;
  };
  
  /* Extract MVM priv from op_mode and _hw */
@@@ -716,7 -705,6 +716,7 @@@ void iwl_mvm_dump_nic_error_log(struct 
  #ifdef CONFIG_IWLWIFI_DEBUGFS
  void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
  void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm);
 +void iwl_mvm_fw_error_rxf_dump(struct iwl_mvm *mvm);
  #endif
  u8 first_antenna(u8 mask);
  u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
@@@ -886,6 -874,8 +886,6 @@@ void iwl_mvm_update_frame_stats(struct 
  int rs_pretty_print_rate(char *buf, const u32 rate);
  
  /* power management */
 -int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm);
 -
  int iwl_mvm_power_update_device(struct iwl_mvm *mvm);
  int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
  int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@@ -932,9 -922,9 +932,9 @@@ int iwl_mvm_send_proto_offload(struct i
  void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
  void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
  void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq);
 +int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm);
  
  /* BT Coex */
 -int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm);
  int iwl_send_bt_init_conf(struct iwl_mvm *mvm);
  int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
                             struct iwl_rx_cmd_buffer *rxb,
@@@ -946,8 -936,6 +946,8 @@@ u16 iwl_mvm_coex_agg_time_limit(struct 
                                struct ieee80211_sta *sta);
  bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
                                     struct ieee80211_sta *sta);
 +bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
 +                                  enum ieee80211_band band);
  u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
                           struct ieee80211_tx_info *info, u8 ac);
  int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable);
@@@ -1015,6 -1003,9 +1015,9 @@@ static inline bool iwl_mvm_vif_low_late
        return mvmvif->low_latency;
  }
  
+ /* Assoc status */
+ bool iwl_mvm_is_idle(struct iwl_mvm *mvm);
  /* Thermal management and CT-kill */
  void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
  void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
@@@ -527,9 -527,6 +527,9 @@@ static void rs_rate_scale_clear_tbl_win
        IWL_DEBUG_RATE(mvm, "Clearing up window stats\n");
        for (i = 0; i < IWL_RATE_COUNT; i++)
                rs_rate_scale_clear_window(&tbl->win[i]);
 +
 +      for (i = 0; i < ARRAY_SIZE(tbl->tpc_win); i++)
 +              rs_rate_scale_clear_window(&tbl->tpc_win[i]);
  }
  
  static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
@@@ -659,34 -656,17 +659,34 @@@ static int _rs_collect_tx_data(struct i
        return 0;
  }
  
 -static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
 -                            int scale_index, int attempts, int successes)
 +static int rs_collect_tx_data(struct iwl_lq_sta *lq_sta,
 +                            struct iwl_scale_tbl_info *tbl,
 +                            int scale_index, int attempts, int successes,
 +                            u8 reduced_txp)
  {
        struct iwl_rate_scale_data *window = NULL;
 +      int ret;
  
        if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
                return -EINVAL;
  
 +      if (tbl->column != RS_COLUMN_INVALID) {
 +              lq_sta->tx_stats[tbl->column][scale_index].total += attempts;
 +              lq_sta->tx_stats[tbl->column][scale_index].success += successes;
 +      }
 +
        /* Select window for current tx bit rate */
        window = &(tbl->win[scale_index]);
  
 +      ret = _rs_collect_tx_data(tbl, scale_index, attempts, successes,
 +                                window);
 +      if (ret)
 +              return ret;
 +
 +      if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION))
 +              return -EINVAL;
 +
 +      window = &tbl->tpc_win[reduced_txp];
        return _rs_collect_tx_data(tbl, scale_index, attempts, successes,
                                   window);
  }
@@@ -1020,7 -1000,6 +1020,7 @@@ static void rs_tx_status(void *mvm_r, s
        u32 ucode_rate;
        struct rs_rate rate;
        struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
 +      u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0];
  
        /* Treat uninitialized rate scaling data same as non-existing. */
        if (!lq_sta) {
                return;
        }
  
- #ifdef CPTCFG_MAC80211_DEBUGFS
+ #ifdef CONFIG_MAC80211_DEBUGFS
        /* Disable last tx check if we are debugging with fixed rate */
        if (lq_sta->dbg_fixed_rate) {
                IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n");
        if (info->flags & IEEE80211_TX_STAT_AMPDU) {
                ucode_rate = le32_to_cpu(table->rs_table[0]);
                rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
 -              rs_collect_tx_data(curr_tbl, rate.index,
 +              rs_collect_tx_data(lq_sta, curr_tbl, rate.index,
                                   info->status.ampdu_len,
 -                                 info->status.ampdu_ack_len);
 +                                 info->status.ampdu_ack_len,
 +                                 reduced_txp);
  
                /* Update success/fail counts if not searching for new mode */
                if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
                        else
                                continue;
  
 -                      rs_collect_tx_data(tmp_tbl, rate.index, 1,
 -                                         i < retries ? 0 : legacy_success);
 +                      rs_collect_tx_data(lq_sta, tmp_tbl, rate.index, 1,
 +                                         i < retries ? 0 : legacy_success,
 +                                         reduced_txp);
                }
  
                /* Update success/fail counts if not searching for new mode */
        }
        /* The last TX rate is cached in lq_sta; it's set in if/else above */
        lq_sta->last_rate_n_flags = ucode_rate;
 +      IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp);
  done:
        /* See if there's a better rate or modulation mode to try. */
        if (sta && sta->supp_rates[sband->band])
        return action;
  }
  
 +static void rs_get_adjacent_txp(struct iwl_mvm *mvm, int index,
 +                              int *weaker, int *stronger)
 +{
 +      *weaker = index + TPC_TX_POWER_STEP;
 +      if (*weaker > TPC_MAX_REDUCTION)
 +              *weaker = TPC_INVALID;
 +
 +      *stronger = index - TPC_TX_POWER_STEP;
 +      if (*stronger < 0)
 +              *stronger = TPC_INVALID;
 +}
 +
 +static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct rs_rate *rate,
 +                         enum ieee80211_band band)
 +{
 +      int index = rate->index;
 +
 +      /*
 +       * allow tpc only if power management is enabled, or bt coex
 +       * activity grade allows it and we are on 2.4Ghz.
 +       */
 +      if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM &&
 +          !iwl_mvm_bt_coex_is_tpc_allowed(mvm, band))
 +              return false;
 +
 +      IWL_DEBUG_RATE(mvm, "check rate, table type: %d\n", rate->type);
 +      if (is_legacy(rate))
 +              return index == IWL_RATE_54M_INDEX;
 +      if (is_ht(rate))
 +              return index == IWL_RATE_MCS_7_INDEX;
 +      if (is_vht(rate))
 +              return index == IWL_RATE_MCS_7_INDEX ||
 +                     index == IWL_RATE_MCS_8_INDEX ||
 +                     index == IWL_RATE_MCS_9_INDEX;
 +
 +      WARN_ON_ONCE(1);
 +      return false;
 +}
 +
 +enum tpc_action {
 +      TPC_ACTION_STAY,
 +      TPC_ACTION_DECREASE,
 +      TPC_ACTION_INCREASE,
 +      TPC_ACTION_NO_RESTIRCTION,
 +};
 +
 +static enum tpc_action rs_get_tpc_action(struct iwl_mvm *mvm,
 +                                       s32 sr, int weak, int strong,
 +                                       int current_tpt,
 +                                       int weak_tpt, int strong_tpt)
 +{
 +      /* stay until we have valid tpt */
 +      if (current_tpt == IWL_INVALID_VALUE) {
 +              IWL_DEBUG_RATE(mvm, "no current tpt. stay.\n");
 +              return TPC_ACTION_STAY;
 +      }
 +
 +      /* Too many failures, increase txp */
 +      if (sr <= TPC_SR_FORCE_INCREASE || current_tpt == 0) {
 +              IWL_DEBUG_RATE(mvm, "increase txp because of weak SR\n");
 +              return TPC_ACTION_NO_RESTIRCTION;
 +      }
 +
 +      /* try decreasing first if applicable */
 +      if (weak != TPC_INVALID) {
 +              if (weak_tpt == IWL_INVALID_VALUE &&
 +                  (strong_tpt == IWL_INVALID_VALUE ||
 +                   current_tpt >= strong_tpt)) {
 +                      IWL_DEBUG_RATE(mvm,
 +                                     "no weak txp measurement. decrease txp\n");
 +                      return TPC_ACTION_DECREASE;
 +              }
 +
 +              if (weak_tpt > current_tpt) {
 +                      IWL_DEBUG_RATE(mvm,
 +                                     "lower txp has better tpt. decrease txp\n");
 +                      return TPC_ACTION_DECREASE;
 +              }
 +      }
 +
 +      /* next, increase if needed */
 +      if (sr < TPC_SR_NO_INCREASE && strong != TPC_INVALID) {
 +              if (weak_tpt == IWL_INVALID_VALUE &&
 +                  strong_tpt != IWL_INVALID_VALUE &&
 +                  current_tpt < strong_tpt) {
 +                      IWL_DEBUG_RATE(mvm,
 +                                     "higher txp has better tpt. increase txp\n");
 +                      return TPC_ACTION_INCREASE;
 +              }
 +
 +              if (weak_tpt < current_tpt &&
 +                  (strong_tpt == IWL_INVALID_VALUE ||
 +                   strong_tpt > current_tpt)) {
 +                      IWL_DEBUG_RATE(mvm,
 +                                     "lower txp has worse tpt. increase txp\n");
 +                      return TPC_ACTION_INCREASE;
 +              }
 +      }
 +
 +      IWL_DEBUG_RATE(mvm, "no need to increase or decrease txp - stay\n");
 +      return TPC_ACTION_STAY;
 +}
 +
 +static bool rs_tpc_perform(struct iwl_mvm *mvm,
 +                         struct ieee80211_sta *sta,
 +                         struct iwl_lq_sta *lq_sta,
 +                         struct iwl_scale_tbl_info *tbl)
 +{
 +      struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
 +      struct ieee80211_vif *vif = mvm_sta->vif;
 +      struct ieee80211_chanctx_conf *chanctx_conf;
 +      enum ieee80211_band band;
 +      struct iwl_rate_scale_data *window;
 +      struct rs_rate *rate = &tbl->rate;
 +      enum tpc_action action;
 +      s32 sr;
 +      u8 cur = lq_sta->lq.reduced_tpc;
 +      int current_tpt;
 +      int weak, strong;
 +      int weak_tpt = IWL_INVALID_VALUE, strong_tpt = IWL_INVALID_VALUE;
 +
 +#ifdef CONFIG_MAC80211_DEBUGFS
 +      if (lq_sta->dbg_fixed_txp_reduction <= TPC_MAX_REDUCTION) {
 +              IWL_DEBUG_RATE(mvm, "fixed tpc: %d",
 +                             lq_sta->dbg_fixed_txp_reduction);
 +              lq_sta->lq.reduced_tpc = lq_sta->dbg_fixed_txp_reduction;
 +              return cur != lq_sta->dbg_fixed_txp_reduction;
 +      }
 +#endif
 +
 +      rcu_read_lock();
 +      chanctx_conf = rcu_dereference(vif->chanctx_conf);
 +      if (WARN_ON(!chanctx_conf))
 +              band = IEEE80211_NUM_BANDS;
 +      else
 +              band = chanctx_conf->def.chan->band;
 +      rcu_read_unlock();
 +
 +      if (!rs_tpc_allowed(mvm, rate, band)) {
 +              IWL_DEBUG_RATE(mvm,
 +                             "tpc is not allowed. remove txp restrictions");
 +              lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
 +              return cur != TPC_NO_REDUCTION;
 +      }
 +
 +      rs_get_adjacent_txp(mvm, cur, &weak, &strong);
 +
 +      /* Collect measured throughputs for current and adjacent rates */
 +      window = tbl->tpc_win;
 +      sr = window[cur].success_ratio;
 +      current_tpt = window[cur].average_tpt;
 +      if (weak != TPC_INVALID)
 +              weak_tpt = window[weak].average_tpt;
 +      if (strong != TPC_INVALID)
 +              strong_tpt = window[strong].average_tpt;
 +
 +      IWL_DEBUG_RATE(mvm,
 +                     "(TPC: %d): cur_tpt %d SR %d weak %d strong %d weak_tpt %d strong_tpt %d\n",
 +                     cur, current_tpt, sr, weak, strong,
 +                     weak_tpt, strong_tpt);
 +
 +      action = rs_get_tpc_action(mvm, sr, weak, strong,
 +                                 current_tpt, weak_tpt, strong_tpt);
 +
 +      /* override actions if we are on the edge */
 +      if (weak == TPC_INVALID && action == TPC_ACTION_DECREASE) {
 +              IWL_DEBUG_RATE(mvm, "already in lowest txp, stay");
 +              action = TPC_ACTION_STAY;
 +      } else if (strong == TPC_INVALID &&
 +                 (action == TPC_ACTION_INCREASE ||
 +                  action == TPC_ACTION_NO_RESTIRCTION)) {
 +              IWL_DEBUG_RATE(mvm, "already in highest txp, stay");
 +              action = TPC_ACTION_STAY;
 +      }
 +
 +      switch (action) {
 +      case TPC_ACTION_DECREASE:
 +              lq_sta->lq.reduced_tpc = weak;
 +              return true;
 +      case TPC_ACTION_INCREASE:
 +              lq_sta->lq.reduced_tpc = strong;
 +              return true;
 +      case TPC_ACTION_NO_RESTIRCTION:
 +              lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
 +              return true;
 +      case TPC_ACTION_STAY:
 +              /* do nothing */
 +              break;
 +      }
 +      return false;
 +}
 +
  /*
   * Do rate scaling and search for new modulation mode.
   */
@@@ -2235,8 -2019,6 +2235,8 @@@ static void rs_rate_scale_perform(struc
                break;
        case RS_ACTION_STAY:
                /* No change */
 +              update_lq = rs_tpc_perform(mvm, sta, lq_sta, tbl);
 +              break;
        default:
                break;
        }
@@@ -2696,7 -2478,6 +2696,7 @@@ void iwl_mvm_rs_rate_init(struct iwl_mv
        lq_sta->is_agg = 0;
  #ifdef CONFIG_MAC80211_DEBUGFS
        lq_sta->dbg_fixed_rate = 0;
 +      lq_sta->dbg_fixed_txp_reduction = TPC_INVALID;
  #endif
  #ifdef CONFIG_IWLWIFI_DEBUGFS
        iwl_mvm_reset_frame_stats(mvm, &mvm->drv_rx_stats);
@@@ -2872,7 -2653,6 +2872,7 @@@ static void rs_fill_lq_cmd(struct iwl_m
                rs_build_rates_table_from_fixed(mvm, lq_cmd,
                                                lq_sta->band,
                                                lq_sta->dbg_fixed_rate);
 +              lq_cmd->reduced_tpc = 0;
                ant = (lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >>
                        RATE_MCS_ANT_POS;
        } else
@@@ -3003,6 -2783,7 +3003,6 @@@ static ssize_t rs_sta_dbgfs_scale_table
        size_t buf_size;
        u32 parsed_rate;
  
 -
        mvm = lq_sta->drv;
        memset(buf, 0, sizeof(buf));
        buf_size = min(count, sizeof(buf) -  1);
@@@ -3075,7 -2856,6 +3075,7 @@@ static ssize_t rs_sta_dbgfs_scale_table
                        lq_sta->lq.agg_disable_start_th,
                        lq_sta->lq.agg_frame_cnt_limit);
  
 +      desc += sprintf(buff+desc, "reduced tpc=%d\n", lq_sta->lq.reduced_tpc);
        desc += sprintf(buff+desc,
                        "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
                        lq_sta->lq.initial_rate_index[0],
@@@ -3148,94 -2928,6 +3148,94 @@@ static const struct file_operations rs_
        .llseek = default_llseek,
  };
  
 +static ssize_t rs_sta_dbgfs_drv_tx_stats_read(struct file *file,
 +                                            char __user *user_buf,
 +                                            size_t count, loff_t *ppos)
 +{
 +      static const char * const column_name[] = {
 +              [RS_COLUMN_LEGACY_ANT_A] = "LEGACY_ANT_A",
 +              [RS_COLUMN_LEGACY_ANT_B] = "LEGACY_ANT_B",
 +              [RS_COLUMN_SISO_ANT_A] = "SISO_ANT_A",
 +              [RS_COLUMN_SISO_ANT_B] = "SISO_ANT_B",
 +              [RS_COLUMN_SISO_ANT_A_SGI] = "SISO_ANT_A_SGI",
 +              [RS_COLUMN_SISO_ANT_B_SGI] = "SISO_ANT_B_SGI",
 +              [RS_COLUMN_MIMO2] = "MIMO2",
 +              [RS_COLUMN_MIMO2_SGI] = "MIMO2_SGI",
 +      };
 +
 +      static const char * const rate_name[] = {
 +              [IWL_RATE_1M_INDEX] = "1M",
 +              [IWL_RATE_2M_INDEX] = "2M",
 +              [IWL_RATE_5M_INDEX] = "5.5M",
 +              [IWL_RATE_11M_INDEX] = "11M",
 +              [IWL_RATE_6M_INDEX] = "6M|MCS0",
 +              [IWL_RATE_9M_INDEX] = "9M",
 +              [IWL_RATE_12M_INDEX] = "12M|MCS1",
 +              [IWL_RATE_18M_INDEX] = "18M|MCS2",
 +              [IWL_RATE_24M_INDEX] = "24M|MCS3",
 +              [IWL_RATE_36M_INDEX] = "36M|MCS4",
 +              [IWL_RATE_48M_INDEX] = "48M|MCS5",
 +              [IWL_RATE_54M_INDEX] = "54M|MCS6",
 +              [IWL_RATE_MCS_7_INDEX] = "MCS7",
 +              [IWL_RATE_MCS_8_INDEX] = "MCS8",
 +              [IWL_RATE_MCS_9_INDEX] = "MCS9",
 +      };
 +
 +      char *buff, *pos, *endpos;
 +      int col, rate;
 +      ssize_t ret;
 +      struct iwl_lq_sta *lq_sta = file->private_data;
 +      struct rs_rate_stats *stats;
 +      static const size_t bufsz = 1024;
 +
 +      buff = kmalloc(bufsz, GFP_KERNEL);
 +      if (!buff)
 +              return -ENOMEM;
 +
 +      pos = buff;
 +      endpos = pos + bufsz;
 +
 +      pos += scnprintf(pos, endpos - pos, "COLUMN,");
 +      for (rate = 0; rate < IWL_RATE_COUNT; rate++)
 +              pos += scnprintf(pos, endpos - pos, "%s,", rate_name[rate]);
 +      pos += scnprintf(pos, endpos - pos, "\n");
 +
 +      for (col = 0; col < RS_COLUMN_COUNT; col++) {
 +              pos += scnprintf(pos, endpos - pos,
 +                               "%s,", column_name[col]);
 +
 +              for (rate = 0; rate < IWL_RATE_COUNT; rate++) {
 +                      stats = &(lq_sta->tx_stats[col][rate]);
 +                      pos += scnprintf(pos, endpos - pos,
 +                                       "%llu/%llu,",
 +                                       stats->success,
 +                                       stats->total);
 +              }
 +              pos += scnprintf(pos, endpos - pos, "\n");
 +      }
 +
 +      ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
 +      kfree(buff);
 +      return ret;
 +}
 +
 +static ssize_t rs_sta_dbgfs_drv_tx_stats_write(struct file *file,
 +                                             const char __user *user_buf,
 +                                             size_t count, loff_t *ppos)
 +{
 +      struct iwl_lq_sta *lq_sta = file->private_data;
 +      memset(lq_sta->tx_stats, 0, sizeof(lq_sta->tx_stats));
 +
 +      return count;
 +}
 +
 +static const struct file_operations rs_sta_dbgfs_drv_tx_stats_ops = {
 +      .read = rs_sta_dbgfs_drv_tx_stats_read,
 +      .write = rs_sta_dbgfs_drv_tx_stats_write,
 +      .open = simple_open,
 +      .llseek = default_llseek,
 +};
 +
  static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
  {
        struct iwl_lq_sta *lq_sta = mvm_sta;
        lq_sta->rs_sta_dbgfs_stats_table_file =
                debugfs_create_file("rate_stats_table", S_IRUSR, dir,
                                    lq_sta, &rs_sta_dbgfs_stats_table_ops);
 +      lq_sta->rs_sta_dbgfs_drv_tx_stats_file =
 +              debugfs_create_file("drv_tx_stats", S_IRUSR | S_IWUSR, dir,
 +                                  lq_sta, &rs_sta_dbgfs_drv_tx_stats_ops);
        lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
                debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
                                  &lq_sta->tx_agg_tid_en);
 +      lq_sta->rs_sta_dbgfs_reduced_txp_file =
 +              debugfs_create_u8("reduced_tpc", S_IRUSR | S_IWUSR, dir,
 +                                &lq_sta->dbg_fixed_txp_reduction);
  }
  
  static void rs_remove_debugfs(void *mvm, void *mvm_sta)
        struct iwl_lq_sta *lq_sta = mvm_sta;
        debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
        debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
 +      debugfs_remove(lq_sta->rs_sta_dbgfs_drv_tx_stats_file);
        debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
 +      debugfs_remove(lq_sta->rs_sta_dbgfs_reduced_txp_file);
  }
  #endif
  
@@@ -277,51 -277,22 +277,22 @@@ static void iwl_mvm_scan_calc_params(st
                                            IEEE80211_IFACE_ITER_NORMAL,
                                            iwl_mvm_scan_condition_iterator,
                                            &global_bound);
-       /*
-        * Under low latency traffic passive scan is fragmented meaning
-        * that dwell on a particular channel will be fragmented. Each fragment
-        * dwell time is 20ms and fragments period is 105ms. Skipping to next
-        * channel will be delayed by the same period - 105ms. So suspend_time
-        * parameter describing both fragments and channels skipping periods is
-        * set to 105ms. This value is chosen so that overall passive scan
-        * duration will not be too long. Max_out_time in this case is set to
-        * 70ms, so for active scanning operating channel will be left for 70ms
-        * while for passive still for 20ms (fragment dwell).
-        */
-       if (global_bound) {
-               if (!iwl_mvm_low_latency(mvm)) {
-                       params->suspend_time = ieee80211_tu_to_usec(100);
-                       params->max_out_time = ieee80211_tu_to_usec(600);
-               } else {
-                       params->suspend_time = ieee80211_tu_to_usec(105);
-                       /* P2P doesn't support fragmented passive scan, so
-                        * configure max_out_time to be at least longest dwell
-                        * time for passive scan.
-                        */
-                       if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
-                               params->max_out_time = ieee80211_tu_to_usec(70);
-                               params->passive_fragmented = true;
-                       } else {
-                               u32 passive_dwell;
  
-                               /*
-                                * Use band G so that passive channel dwell time
-                                * will be assigned with maximum value.
-                                */
-                               band = IEEE80211_BAND_2GHZ;
-                               passive_dwell = iwl_mvm_get_passive_dwell(band);
-                               params->max_out_time =
-                                       ieee80211_tu_to_usec(passive_dwell);
-                       }
-               }
+       if (!global_bound)
+               goto not_bound;
+       params->suspend_time = 100;
+       params->max_out_time = 600;
+       if (iwl_mvm_low_latency(mvm)) {
+               params->suspend_time = 250;
+               params->max_out_time = 250;
        }
  
+ not_bound:
        for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
-               if (params->passive_fragmented)
-                       params->dwell[band].passive = 20;
-               else
-                       params->dwell[band].passive =
-                               iwl_mvm_get_passive_dwell(band);
+               params->dwell[band].passive = iwl_mvm_get_passive_dwell(band);
                params->dwell[band].active = iwl_mvm_get_active_dwell(band,
                                                                      n_ssids);
        }
@@@ -348,10 -319,7 +319,10 @@@ int iwl_mvm_scan_request(struct iwl_mv
        struct iwl_mvm_scan_params params = {};
  
        lockdep_assert_held(&mvm->mutex);
 -      BUG_ON(mvm->scan_cmd == NULL);
 +
 +      /* we should have failed registration if scan_cmd was NULL */
 +      if (WARN_ON(mvm->scan_cmd == NULL))
 +              return -ENOMEM;
  
        IWL_DEBUG_SCAN(mvm, "Handling mac80211 scan request\n");
        mvm->scan_status = IWL_MVM_SCAN_OS;
@@@ -570,13 -538,9 +541,13 @@@ int iwl_mvm_rx_scan_offload_complete_no
        /* scan status must be locked for proper checking */
        lockdep_assert_held(&mvm->mutex);
  
 -      IWL_DEBUG_SCAN(mvm, "Scheduled scan completed, status %s\n",
 +      IWL_DEBUG_SCAN(mvm,
 +                     "Scheduled scan completed, status %s EBS status %s:%d\n",
                       scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
 -                     "completed" : "aborted");
 +                     "completed" : "aborted", scan_notif->ebs_status ==
 +                     IWL_SCAN_EBS_SUCCESS ? "success" : "failed",
 +                     scan_notif->ebs_status);
 +
  
        /* only call mac80211 completion if the stop was initiated by FW */
        if (mvm->scan_status == IWL_MVM_SCAN_SCHED) {
                ieee80211_sched_scan_stopped(mvm->hw);
        }
  
 +      mvm->last_ebs_successful = !scan_notif->ebs_status;
 +
        return 0;
  }
  
@@@ -770,7 -732,7 +741,7 @@@ int iwl_mvm_config_sched_scan(struct iw
        int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
        int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
        int head = 0;
-       int tail = band_2ghz + band_5ghz;
+       int tail = band_2ghz + band_5ghz - 1;
        u32 ssid_bitmap;
        int cmd_len;
        int ret;
@@@ -922,11 -884,6 +893,11 @@@ int iwl_mvm_sched_scan_start(struct iwl
                scan_req.flags |= cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_PASS_ALL);
        }
  
 +      if (mvm->last_ebs_successful &&
 +          mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT)
 +              scan_req.flags |=
 +                      cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE);
 +
        return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, CMD_SYNC,
                                    sizeof(scan_req), &scan_req);
  }
@@@ -64,7 -64,6 +64,7 @@@
  
  #include "iwl-debug.h"
  #include "iwl-io.h"
 +#include "iwl-prph.h"
  
  #include "mvm.h"
  #include "fw-api-rs.h"
@@@ -470,8 -469,6 +470,8 @@@ void iwl_mvm_dump_nic_error_log(struct 
                        mvm->status, table.valid);
        }
  
 +      /* Do not change this output - scripts rely on it */
 +
        IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
  
        trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
@@@ -525,7 -522,7 +525,7 @@@ void iwl_mvm_fw_error_sram_dump(struct 
        u32 ofs, sram_len;
        void *sram;
  
 -      if (!mvm->ucode_loaded || mvm->fw_error_sram)
 +      if (!mvm->ucode_loaded || mvm->fw_error_sram || mvm->fw_error_dump)
                return;
  
        img = &mvm->fw->img[mvm->cur_ucode];
        mvm->fw_error_sram_len = sram_len;
  }
  
 +void iwl_mvm_fw_error_rxf_dump(struct iwl_mvm *mvm)
 +{
 +      int i, reg_val;
 +      unsigned long flags;
 +
 +      if (!mvm->ucode_loaded || mvm->fw_error_rxf || mvm->fw_error_dump)
 +              return;
 +
 +      /* reading buffer size */
 +      reg_val = iwl_trans_read_prph(mvm->trans, RXF_SIZE_ADDR);
 +      mvm->fw_error_rxf_len =
 +              (reg_val & RXF_SIZE_BYTE_CNT_MSK) >> RXF_SIZE_BYTE_CND_POS;
 +
 +      /* the register holds the value divided by 128 */
 +      mvm->fw_error_rxf_len = mvm->fw_error_rxf_len << 7;
 +
 +      if (!mvm->fw_error_rxf_len)
 +              return;
 +
 +      mvm->fw_error_rxf =  kzalloc(mvm->fw_error_rxf_len, GFP_ATOMIC);
 +      if (!mvm->fw_error_rxf) {
 +              mvm->fw_error_rxf_len = 0;
 +              return;
 +      }
 +
 +      if (!iwl_trans_grab_nic_access(mvm->trans, false, &flags)) {
 +              kfree(mvm->fw_error_rxf);
 +              mvm->fw_error_rxf = NULL;
 +              mvm->fw_error_rxf_len = 0;
 +              return;
 +      }
 +
 +      for (i = 0; i < (mvm->fw_error_rxf_len / sizeof(u32)); i++) {
 +              iwl_trans_write_prph(mvm->trans, RXF_LD_FENCE_OFFSET_ADDR,
 +                                   i * sizeof(u32));
 +              mvm->fw_error_rxf[i] =
 +                      iwl_trans_read_prph(mvm->trans, RXF_FIFO_RD_FENCE_ADDR);
 +      }
 +      iwl_trans_release_nic_access(mvm->trans, &flags);
 +}
 +
  /**
   * iwl_mvm_send_lq_cmd() - Send link quality command
   * @init: This command is sent as part of station initialization right
@@@ -688,3 -644,22 +688,22 @@@ bool iwl_mvm_low_latency(struct iwl_mv
  
        return result;
  }
+ static void iwl_mvm_idle_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
+ {
+       bool *idle = _data;
+       if (!vif->bss_conf.idle)
+               *idle = false;
+ }
+ bool iwl_mvm_is_idle(struct iwl_mvm *mvm)
+ {
+       bool idle = true;
+       ieee80211_iterate_active_interfaces_atomic(
+                       mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                       iwl_mvm_idle_iter, &idle);
+       return idle;
+ }
@@@ -103,6 -103,7 +103,6 @@@ static void iwl_pcie_set_pwr(struct iwl
  
  /* PCI registers */
  #define PCI_CFG_RETRY_TIMEOUT 0x041
 -#define CPU1_CPU2_SEPARATOR_SECTION   0xFFFFCCCC
  
  static void iwl_pcie_apm_config(struct iwl_trans *trans)
  {
@@@ -1052,12 -1053,6 +1052,12 @@@ static void iwl_trans_pcie_write_prph(s
        iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
  }
  
 +static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
 +{
 +      WARN_ON(1);
 +      return 0;
 +}
 +
  static void iwl_trans_pcie_configure(struct iwl_trans *trans,
                                     const struct iwl_trans_config *trans_cfg)
  {
  
        trans_pcie->command_names = trans_cfg->command_names;
        trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
 +
 +      /* Initialize NAPI here - it should be before registering to mac80211
 +       * in the opmode but after the HW struct is allocated.
 +       * As this function may be called again in some corner cases don't
 +       * do anything if NAPI was already initialized.
 +       */
 +      if (!trans_pcie->napi.poll && trans->op_mode->ops->napi_add) {
 +              init_dummy_netdev(&trans_pcie->napi_dev);
 +              iwl_op_mode_napi_add(trans->op_mode, &trans_pcie->napi,
 +                                   &trans_pcie->napi_dev,
 +                                   iwl_pcie_dummy_napi_poll, 64);
 +      }
  }
  
  void iwl_trans_pcie_free(struct iwl_trans *trans)
        pci_disable_device(trans_pcie->pci_dev);
        kmem_cache_destroy(trans->dev_cmd_pool);
  
 +      if (trans_pcie->napi.poll)
 +              netif_napi_del(&trans_pcie->napi);
 +
        kfree(trans);
  }
  
@@@ -1257,7 -1237,7 +1257,7 @@@ static int iwl_trans_pcie_write_mem(str
  
  #define IWL_FLUSH_WAIT_MS     2000
  
 -static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
 +static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
  {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_txq *txq;
  
        /* waiting for all the tx frames complete might take a while */
        for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
 +              u8 wr_ptr;
 +
                if (cnt == trans_pcie->cmd_queue)
                        continue;
 +              if (!test_bit(cnt, trans_pcie->queue_used))
 +                      continue;
 +              if (!(BIT(cnt) & txq_bm))
 +                      continue;
 +
 +              IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
                txq = &trans_pcie->txq[cnt];
                q = &txq->q;
 -              while (q->read_ptr != q->write_ptr && !time_after(jiffies,
 -                     now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
 +              wr_ptr = ACCESS_ONCE(q->write_ptr);
 +
 +              while (q->read_ptr != ACCESS_ONCE(q->write_ptr) &&
 +                     !time_after(jiffies,
 +                                 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
 +                      u8 write_ptr = ACCESS_ONCE(q->write_ptr);
 +
 +                      if (WARN_ONCE(wr_ptr != write_ptr,
 +                                    "WR pointer moved while flushing %d -> %d\n",
 +                                    wr_ptr, write_ptr))
 +                              return -ETIMEDOUT;
                        msleep(1);
 +              }
  
                if (q->read_ptr != q->write_ptr) {
                        IWL_ERR(trans,
                        ret = -ETIMEDOUT;
                        break;
                }
 +              IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt);
        }
  
        if (!ret)
@@@ -1788,6 -1749,10 +1788,10 @@@ struct iwl_trans *iwl_trans_pcie_alloc(
         * PCI Tx retries from interfering with C3 CPU state */
        pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
  
+       trans->dev = &pdev->dev;
+       trans_pcie->pci_dev = pdev;
+       iwl_disable_interrupts(trans);
        err = pci_enable_msi(pdev);
        if (err) {
                dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
                }
        }
  
-       trans->dev = &pdev->dev;
-       trans_pcie->pci_dev = pdev;
        trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
        trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
        snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
                goto out_pci_disable_msi;
        }
  
-       trans_pcie->inta_mask = CSR_INI_SET_MASK;
        if (iwl_pcie_alloc_ict(trans))
                goto out_free_cmd_pool;
  
                goto out_free_ict;
        }
  
+       trans_pcie->inta_mask = CSR_INI_SET_MASK;
        return trans;
  
  out_free_ict:
@@@ -75,32 -75,8 +75,8 @@@ static int xenvif_poll(struct napi_stru
        work_done = xenvif_tx_action(vif, budget);
  
        if (work_done < budget) {
-               int more_to_do = 0;
-               unsigned long flags;
-               /* It is necessary to disable IRQ before calling
-                * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might
-                * lose event from the frontend.
-                *
-                * Consider:
-                *   RING_HAS_UNCONSUMED_REQUESTS
-                *   <frontend generates event to trigger napi_schedule>
-                *   __napi_complete
-                *
-                * This handler is still in scheduled state so the
-                * event has no effect at all. After __napi_complete
-                * this handler is descheduled and cannot get
-                * scheduled again. We lose event in this case and the ring
-                * will be completely stalled.
-                */
-               local_irq_save(flags);
-               RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
-               if (!more_to_do)
-                       __napi_complete(napi);
-               local_irq_restore(flags);
+               napi_complete(napi);
+               xenvif_napi_schedule_or_enable_events(vif);
        }
  
        return work_done;
@@@ -194,7 -170,7 +170,7 @@@ static void xenvif_up(struct xenvif *vi
        enable_irq(vif->tx_irq);
        if (vif->tx_irq != vif->rx_irq)
                enable_irq(vif->rx_irq);
-       xenvif_check_rx_xenvif(vif);
+       xenvif_napi_schedule_or_enable_events(vif);
  }
  
  static void xenvif_down(struct xenvif *vif)
@@@ -386,7 -362,7 +362,7 @@@ struct xenvif *xenvif_alloc(struct devi
                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                NETIF_F_TSO | NETIF_F_TSO6;
        dev->features = dev->hw_features | NETIF_F_RXCSUM;
 -      SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops);
 +      dev->ethtool_ops = &xenvif_ethtool_ops;
  
        dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
  
diff --combined include/linux/if_vlan.h
@@@ -73,7 -73,7 +73,7 @@@ static inline struct vlan_ethhdr *vlan_
  /* found in socket.c */
  extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
  
- static inline int is_vlan_dev(struct net_device *dev)
+ static inline bool is_vlan_dev(struct net_device *dev)
  {
          return dev->priv_flags & IFF_802_1Q_VLAN;
  }
@@@ -106,7 -106,7 +106,7 @@@ struct vlan_pcpu_stats 
  
  #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
  
 -extern struct net_device *__vlan_find_dev_deep(struct net_device *real_dev,
 +extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev,
                                               __be16 vlan_proto, u16 vlan_id);
  extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
  extern u16 vlan_dev_vlan_id(const struct net_device *dev);
@@@ -159,6 -159,7 +159,7 @@@ struct vlan_dev_priv 
  #ifdef CONFIG_NET_POLL_CONTROLLER
        struct netpoll                          *netpoll;
  #endif
+       unsigned int                            nest_level;
  };
  
  static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
@@@ -197,9 -198,15 +198,15 @@@ extern void vlan_vids_del_by_dev(struc
                                 const struct net_device *by_dev);
  
  extern bool vlan_uses_dev(const struct net_device *dev);
+ static inline int vlan_get_encap_level(struct net_device *dev)
+ {
+       BUG_ON(!is_vlan_dev(dev));
+       return vlan_dev_priv(dev)->nest_level;
+ }
  #else
  static inline struct net_device *
 -__vlan_find_dev_deep(struct net_device *real_dev,
 +__vlan_find_dev_deep_rcu(struct net_device *real_dev,
                     __be16 vlan_proto, u16 vlan_id)
  {
        return NULL;
@@@ -263,6 -270,11 +270,11 @@@ static inline bool vlan_uses_dev(const 
  {
        return false;
  }
+ static inline int vlan_get_encap_level(struct net_device *dev)
+ {
+       BUG();
+       return 0;
+ }
  #endif
  
  static inline bool vlan_hw_offload_capable(netdev_features_t features,
@@@ -483,4 -495,5 +495,5 @@@ static inline void vlan_set_encap_proto
                 */
                skb->protocol = htons(ETH_P_802_2);
  }
  #endif /* !(_LINUX_IF_VLAN_H_) */
@@@ -56,6 -56,9 +56,6 @@@ struct device
  struct phy_device;
  /* 802.11 specific */
  struct wireless_dev;
 -                                      /* source back-compat hooks */
 -#define SET_ETHTOOL_OPS(netdev,ops) \
 -      ( (netdev)->ethtool_ops = (ops) )
  
  void netdev_set_default_ethtool_ops(struct net_device *dev,
                                    const struct ethtool_ops *ops);
@@@ -850,8 -853,7 +850,8 @@@ typedef u16 (*select_queue_fallback_t)(
   *    SR-IOV management functions.
   * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
   * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
 - * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate);
 + * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
 + *                      int max_tx_rate);
   * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
   * int (*ndo_get_vf_config)(struct net_device *dev,
   *                        int vf, struct ifla_vf_info *ivf);
@@@ -1045,9 -1047,8 +1045,9 @@@ struct net_device_ops 
                                                  int queue, u8 *mac);
        int                     (*ndo_set_vf_vlan)(struct net_device *dev,
                                                   int queue, u16 vlan, u8 qos);
 -      int                     (*ndo_set_vf_tx_rate)(struct net_device *dev,
 -                                                    int vf, int rate);
 +      int                     (*ndo_set_vf_rate)(struct net_device *dev,
 +                                                 int vf, int min_tx_rate,
 +                                                 int max_tx_rate);
        int                     (*ndo_set_vf_spoofchk)(struct net_device *dev,
                                                       int vf, bool setting);
        int                     (*ndo_get_vf_config)(struct net_device *dev,
        netdev_tx_t             (*ndo_dfwd_start_xmit) (struct sk_buff *skb,
                                                        struct net_device *dev,
                                                        void *priv);
+       int                     (*ndo_get_lock_subclass)(struct net_device *dev);
  };
  
  /**
@@@ -2632,7 -2634,6 +2633,7 @@@ int dev_get_phys_port_id(struct net_dev
                         struct netdev_phys_port_id *ppid);
  int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
                        struct netdev_queue *txq);
 +int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
  int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
  bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb);
  
@@@ -2950,7 -2951,12 +2951,12 @@@ static inline void netif_addr_lock(stru
  
  static inline void netif_addr_lock_nested(struct net_device *dev)
  {
-       spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING);
+       int subclass = SINGLE_DEPTH_NESTING;
+       if (dev->netdev_ops->ndo_get_lock_subclass)
+               subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
+       spin_lock_nested(&dev->addr_list_lock, subclass);
  }
  
  static inline void netif_addr_lock_bh(struct net_device *dev)
@@@ -3050,9 -3056,18 +3056,18 @@@ extern int            weight_p
  extern int            bpf_jit_enable;
  
  bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
+ struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
+                                                    struct list_head **iter);
  struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
                                                     struct list_head **iter);
  
+ /* iterate through upper list, must be called under RCU read lock */
+ #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
+       for (iter = &(dev)->adj_list.upper, \
+            updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
+            updev; \
+            updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
  /* iterate through upper list, must be called under RCU read lock */
  #define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
        for (iter = &(dev)->all_adj_list.upper, \
@@@ -3077,6 -3092,14 +3092,14 @@@ void *netdev_lower_get_next_private_rcu
             priv; \
             priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
  
+ void *netdev_lower_get_next(struct net_device *dev,
+                               struct list_head **iter);
+ #define netdev_for_each_lower_dev(dev, ldev, iter) \
+       for (iter = &(dev)->adj_list.lower, \
+            ldev = netdev_lower_get_next(dev, &(iter)); \
+            ldev; \
+            ldev = netdev_lower_get_next(dev, &(iter)))
  void *netdev_adjacent_get_private(struct list_head *adj_list);
  void *netdev_lower_get_first_private_rcu(struct net_device *dev);
  struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
@@@ -3092,6 -3115,8 +3115,8 @@@ void netdev_upper_dev_unlink(struct net
  void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
  void *netdev_lower_dev_get_private(struct net_device *dev,
                                   struct net_device *lower_dev);
+ int dev_get_nest_level(struct net_device *dev,
+                      bool (*type_check)(struct net_device *dev));
  int skb_checksum_help(struct sk_buff *skb);
  struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
                                  netdev_features_t features, bool tx_path);
@@@ -3155,20 -3180,6 +3180,20 @@@ const char *netdev_drivername(const str
  
  void linkwatch_run_queue(void);
  
 +static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
 +                                                        netdev_features_t f2)
 +{
 +      if (f1 & NETIF_F_GEN_CSUM)
 +              f1 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
 +      if (f2 & NETIF_F_GEN_CSUM)
 +              f2 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
 +      f1 &= f2;
 +      if (f1 & NETIF_F_GEN_CSUM)
 +              f1 &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
 +
 +      return f1;
 +}
 +
  static inline netdev_features_t netdev_get_wanted_features(
        struct net_device *dev)
  {
diff --combined include/net/ip6_route.h
@@@ -127,6 -127,7 +127,7 @@@ int rt6_dump_route(struct rt6_info *rt
  void rt6_ifdown(struct net *net, struct net_device *dev);
  void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
  void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
+ void rt6_clean_tohost(struct net *net, struct in6_addr *gateway);
  
  
  /*
@@@ -185,7 -186,7 +186,7 @@@ static inline bool ip6_sk_accept_pmtu(c
               inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_OMIT;
  }
  
 -static inline bool ip6_sk_local_df(const struct sock *sk)
 +static inline bool ip6_sk_ignore_df(const struct sock *sk)
  {
        return inet6_sk(sk)->pmtudisc < IPV6_PMTUDISC_DO ||
               inet6_sk(sk)->pmtudisc == IPV6_PMTUDISC_OMIT;
@@@ -331,9 -331,17 +331,17 @@@ enum 
  #define AUDIT_FAIL_PRINTK     1
  #define AUDIT_FAIL_PANIC      2
  
+ /*
+  * These bits disambiguate different calling conventions that share an
+  * ELF machine type, bitness, and endianness
+  */
+ #define __AUDIT_ARCH_CONVENTION_MASK 0x30000000
+ #define __AUDIT_ARCH_CONVENTION_MIPS64_N32 0x20000000
  /* distinguish syscall tables */
  #define __AUDIT_ARCH_64BIT 0x80000000
  #define __AUDIT_ARCH_LE          0x40000000
  #define AUDIT_ARCH_ALPHA      (EM_ALPHA|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
  #define AUDIT_ARCH_ARM                (EM_ARM|__AUDIT_ARCH_LE)
  #define AUDIT_ARCH_ARMEB      (EM_ARM)
  #define AUDIT_ARCH_MIPS               (EM_MIPS)
  #define AUDIT_ARCH_MIPSEL     (EM_MIPS|__AUDIT_ARCH_LE)
  #define AUDIT_ARCH_MIPS64     (EM_MIPS|__AUDIT_ARCH_64BIT)
+ #define AUDIT_ARCH_MIPS64N32  (EM_MIPS|__AUDIT_ARCH_64BIT|\
+                                __AUDIT_ARCH_CONVENTION_MIPS64_N32)
  #define AUDIT_ARCH_MIPSEL64   (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
+ #define AUDIT_ARCH_MIPSEL64N32        (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE\
+                                __AUDIT_ARCH_CONVENTION_MIPS64_N32)
  #define AUDIT_ARCH_OPENRISC   (EM_OPENRISC)
  #define AUDIT_ARCH_PARISC     (EM_PARISC)
  #define AUDIT_ARCH_PARISC64   (EM_PARISC|__AUDIT_ARCH_64BIT)
   */
  #define AUDIT_MESSAGE_TEXT_MAX        8560
  
 +/* Multicast Netlink socket groups (default up to 32) */
 +enum audit_nlgrps {
 +      AUDIT_NLGRP_NONE,       /* Group 0 not used */
 +      AUDIT_NLGRP_READLOG,    /* "best effort" read only socket */
 +      __AUDIT_NLGRP_MAX
 +};
 +#define AUDIT_NLGRP_MAX                (__AUDIT_NLGRP_MAX - 1)
 +
  struct audit_status {
        __u32           mask;           /* Bit mask for valid entries */
        __u32           enabled;        /* 1 = enabled, 0 = disabled */
@@@ -1579,10 -1579,6 +1579,10 @@@ enum nl80211_commands 
   * @NL80211_ATTR_TDLS_PEER_CAPABILITY: flags for TDLS peer capabilities, u32.
   *    As specified in the &enum nl80211_tdls_peer_capability.
   *
 + * @NL80211_ATTR_IFACE_SOCKET_OWNER: flag attribute, if set during interface
 + *    creation then the new interface will be owned by the netlink socket
 + *    that created it and will be destroyed when the socket is closed
 + *
   * @NL80211_ATTR_MAX: highest attribute number currently defined
   * @__NL80211_ATTR_AFTER_LAST: internal use
   */
@@@ -1918,8 -1914,6 +1918,8 @@@ enum nl80211_attrs 
  
        NL80211_ATTR_TDLS_PEER_CAPABILITY,
  
 +      NL80211_ATTR_IFACE_SOCKET_OWNER,
 +
        /* add attributes here, update the policy in nl80211.c */
  
        __NL80211_ATTR_AFTER_LAST,
@@@ -2342,34 -2336,9 +2342,34 @@@ enum nl80211_band_attr 
   *    using this channel as the primary or any of the secondary channels
   *    isn't possible
   * @NL80211_FREQUENCY_ATTR_DFS_CAC_TIME: DFS CAC time in milliseconds.
 + * @NL80211_FREQUENCY_ATTR_INDOOR_ONLY: Only indoor use is permitted on this
 + *    channel. A channel that has the INDOOR_ONLY attribute can only be
 + *    used when there is a clear assessment that the device is operating in
 + *    an indoor surroundings, i.e., it is connected to AC power (and not
 + *    through portable DC inverters) or is under the control of a master
 + *    that is acting as an AP and is connected to AC power.
 + * @NL80211_FREQUENCY_ATTR_GO_CONCURRENT: GO operation is allowed on this
 + *    channel if it's connected concurrently to a BSS on the same channel on
 + *    the 2 GHz band or to a channel in the same UNII band (on the 5 GHz
 + *    band), and IEEE80211_CHAN_RADAR is not set. Instantiating a GO on a
 + *    channel that has the GO_CONCURRENT attribute set can be done when there
 + *    is a clear assessment that the device is operating under the guidance of
 + *    an authorized master, i.e., setting up a GO while the device is also
 + *    connected to an AP with DFS and radar detection on the UNII band (it is
 + *    up to user-space, i.e., wpa_supplicant to perform the required
 + *    verifications)
 + * @NL80211_FREQUENCY_ATTR_NO_20MHZ: 20 MHz operation is not allowed
 + *    on this channel in current regulatory domain.
 + * @NL80211_FREQUENCY_ATTR_NO_10MHZ: 10 MHz operation is not allowed
 + *    on this channel in current regulatory domain.
   * @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number
   *    currently defined
   * @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use
 + *
 + * See https://apps.fcc.gov/eas/comments/GetPublishedDocument.html?id=327&tn=528122
 + * for more information on the FCC description of the relaxations allowed
 + * by NL80211_FREQUENCY_ATTR_INDOOR_ONLY and
 + * NL80211_FREQUENCY_ATTR_GO_CONCURRENT.
   */
  enum nl80211_frequency_attr {
        __NL80211_FREQUENCY_ATTR_INVALID,
        NL80211_FREQUENCY_ATTR_NO_80MHZ,
        NL80211_FREQUENCY_ATTR_NO_160MHZ,
        NL80211_FREQUENCY_ATTR_DFS_CAC_TIME,
 +      NL80211_FREQUENCY_ATTR_INDOOR_ONLY,
 +      NL80211_FREQUENCY_ATTR_GO_CONCURRENT,
 +      NL80211_FREQUENCY_ATTR_NO_20MHZ,
 +      NL80211_FREQUENCY_ATTR_NO_10MHZ,
  
        /* keep last */
        __NL80211_FREQUENCY_ATTR_AFTER_LAST,
@@@ -2608,13 -2573,10 +2608,13 @@@ enum nl80211_dfs_regions 
   *    present has been registered with the wireless core that
   *    has listed NL80211_FEATURE_CELL_BASE_REG_HINTS as a
   *    supported feature.
 + * @NL80211_USER_REG_HINT_INDOOR: a user sent an hint indicating that the
 + *    platform is operating in an indoor environment.
   */
  enum nl80211_user_reg_hint_type {
        NL80211_USER_REG_HINT_USER      = 0,
        NL80211_USER_REG_HINT_CELL_BASE = 1,
 +      NL80211_USER_REG_HINT_INDOOR    = 2,
  };
  
  /**
@@@ -3894,6 -3856,8 +3894,8 @@@ enum nl80211_ap_sme_features 
   * @NL80211_FEATURE_CELL_BASE_REG_HINTS: This driver has been tested
   *    to work properly to suppport receiving regulatory hints from
   *    cellular base stations.
+  * @NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL: (no longer available, only
+  *    here to reserve the value for API/ABI compatibility)
   * @NL80211_FEATURE_SAE: This driver supports simultaneous authentication of
   *    equals (SAE) with user space SME (NL80211_CMD_AUTHENTICATE) in station
   *    mode
   *    interface. An active monitor interface behaves like a normal monitor
   *    interface, but gets added to the driver. It ensures that incoming
   *    unicast packets directed at the configured interface address get ACKed.
 + * @NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE: This driver supports dynamic
 + *    channel bandwidth change (e.g., HT 20 <-> 40 MHz channel) during the
 + *    lifetime of a BSS.
   */
  enum nl80211_feature_flags {
        NL80211_FEATURE_SK_TX_STATUS                    = 1 << 0,
        NL80211_FEATURE_HT_IBSS                         = 1 << 1,
        NL80211_FEATURE_INACTIVITY_TIMER                = 1 << 2,
        NL80211_FEATURE_CELL_BASE_REG_HINTS             = 1 << 3,
-       /* bit 4 is reserved - don't use */
+       NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL        = 1 << 4,
        NL80211_FEATURE_SAE                             = 1 << 5,
        NL80211_FEATURE_LOW_PRIORITY_SCAN               = 1 << 6,
        NL80211_FEATURE_SCAN_FLUSH                      = 1 << 7,
        NL80211_FEATURE_FULL_AP_CLIENT_STATE            = 1 << 15,
        NL80211_FEATURE_USERSPACE_MPM                   = 1 << 16,
        NL80211_FEATURE_ACTIVE_MONITOR                  = 1 << 17,
 +      NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE       = 1 << 18,
  };
  
  /**
diff --combined net/8021q/vlan_dev.c
@@@ -493,48 -493,10 +493,10 @@@ static void vlan_dev_change_rx_flags(st
        }
  }
  
- static int vlan_calculate_locking_subclass(struct net_device *real_dev)
- {
-       int subclass = 0;
-       while (is_vlan_dev(real_dev)) {
-               subclass++;
-               real_dev = vlan_dev_priv(real_dev)->real_dev;
-       }
-       return subclass;
- }
- static void vlan_dev_mc_sync(struct net_device *to, struct net_device *from)
- {
-       int err = 0, subclass;
-       subclass = vlan_calculate_locking_subclass(to);
-       spin_lock_nested(&to->addr_list_lock, subclass);
-       err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
-       if (!err)
-               __dev_set_rx_mode(to);
-       spin_unlock(&to->addr_list_lock);
- }
- static void vlan_dev_uc_sync(struct net_device *to, struct net_device *from)
- {
-       int err = 0, subclass;
-       subclass = vlan_calculate_locking_subclass(to);
-       spin_lock_nested(&to->addr_list_lock, subclass);
-       err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
-       if (!err)
-               __dev_set_rx_mode(to);
-       spin_unlock(&to->addr_list_lock);
- }
  static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
  {
-       vlan_dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
-       vlan_dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
+       dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
+       dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
  }
  
  /*
@@@ -562,6 -524,11 +524,11 @@@ static void vlan_dev_set_lockdep_class(
        netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass);
  }
  
+ static int vlan_dev_get_lock_subclass(struct net_device *dev)
+ {
+       return vlan_dev_priv(dev)->nest_level;
+ }
  static const struct header_ops vlan_header_ops = {
        .create  = vlan_dev_hard_header,
        .rebuild = vlan_dev_rebuild_header,
@@@ -597,7 -564,6 +564,6 @@@ static const struct net_device_ops vlan
  static int vlan_dev_init(struct net_device *dev)
  {
        struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
-       int subclass = 0;
  
        netif_carrier_off(dev);
  
  
        SET_NETDEV_DEVTYPE(dev, &vlan_type);
  
-       subclass = vlan_calculate_locking_subclass(dev);
-       vlan_dev_set_lockdep_class(dev, subclass);
+       vlan_dev_set_lockdep_class(dev, vlan_dev_get_lock_subclass(dev));
  
        vlan_dev_priv(dev)->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
        if (!vlan_dev_priv(dev)->vlan_pcpu_stats)
@@@ -678,9 -643,9 +643,9 @@@ static netdev_features_t vlan_dev_fix_f
        struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        netdev_features_t old_features = features;
  
 -      features &= real_dev->vlan_features;
 +      features = netdev_intersect_features(features, real_dev->vlan_features);
        features |= NETIF_F_RXCSUM;
 -      features &= real_dev->features;
 +      features = netdev_intersect_features(features, real_dev->features);
  
        features |= old_features & NETIF_F_SOFT_FEATURES;
        features |= NETIF_F_LLTX;
@@@ -706,36 -671,38 +671,36 @@@ static void vlan_ethtool_get_drvinfo(st
  
  static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
  {
 +      struct vlan_pcpu_stats *p;
 +      u32 rx_errors = 0, tx_dropped = 0;
 +      int i;
  
 -      if (vlan_dev_priv(dev)->vlan_pcpu_stats) {
 -              struct vlan_pcpu_stats *p;
 -              u32 rx_errors = 0, tx_dropped = 0;
 -              int i;
 -
 -              for_each_possible_cpu(i) {
 -                      u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
 -                      unsigned int start;
 -
 -                      p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
 -                      do {
 -                              start = u64_stats_fetch_begin_irq(&p->syncp);
 -                              rxpackets       = p->rx_packets;
 -                              rxbytes         = p->rx_bytes;
 -                              rxmulticast     = p->rx_multicast;
 -                              txpackets       = p->tx_packets;
 -                              txbytes         = p->tx_bytes;
 -                      } while (u64_stats_fetch_retry_irq(&p->syncp, start));
 -
 -                      stats->rx_packets       += rxpackets;
 -                      stats->rx_bytes         += rxbytes;
 -                      stats->multicast        += rxmulticast;
 -                      stats->tx_packets       += txpackets;
 -                      stats->tx_bytes         += txbytes;
 -                      /* rx_errors & tx_dropped are u32 */
 -                      rx_errors       += p->rx_errors;
 -                      tx_dropped      += p->tx_dropped;
 -              }
 -              stats->rx_errors  = rx_errors;
 -              stats->tx_dropped = tx_dropped;
 +      for_each_possible_cpu(i) {
 +              u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
 +              unsigned int start;
 +
 +              p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
 +              do {
 +                      start = u64_stats_fetch_begin_irq(&p->syncp);
 +                      rxpackets       = p->rx_packets;
 +                      rxbytes         = p->rx_bytes;
 +                      rxmulticast     = p->rx_multicast;
 +                      txpackets       = p->tx_packets;
 +                      txbytes         = p->tx_bytes;
 +              } while (u64_stats_fetch_retry_irq(&p->syncp, start));
 +
 +              stats->rx_packets       += rxpackets;
 +              stats->rx_bytes         += rxbytes;
 +              stats->multicast        += rxmulticast;
 +              stats->tx_packets       += txpackets;
 +              stats->tx_bytes         += txbytes;
 +              /* rx_errors & tx_dropped are u32 */
 +              rx_errors       += p->rx_errors;
 +              tx_dropped      += p->tx_dropped;
        }
 +      stats->rx_errors  = rx_errors;
 +      stats->tx_dropped = tx_dropped;
 +
        return stats;
  }
  
@@@ -817,6 -784,7 +782,7 @@@ static const struct net_device_ops vlan
        .ndo_netpoll_cleanup    = vlan_dev_netpoll_cleanup,
  #endif
        .ndo_fix_features       = vlan_dev_fix_features,
+       .ndo_get_lock_subclass  = vlan_dev_get_lock_subclass,
  };
  
  void vlan_setup(struct net_device *dev)
@@@ -662,7 -662,6 +662,7 @@@ static void batadv_dat_tvlv_container_u
  void batadv_dat_status_update(struct net_device *net_dev)
  {
        struct batadv_priv *bat_priv = netdev_priv(net_dev);
 +
        batadv_dat_tvlv_container_update(bat_priv);
  }
  
@@@ -941,8 -940,7 +941,7 @@@ bool batadv_dat_snoop_outgoing_arp_requ
                 * additional DAT answer may trigger kernel warnings about
                 * a packet coming from the wrong port.
                 */
-               if (batadv_is_my_client(bat_priv, dat_entry->mac_addr,
-                                       BATADV_NO_FLAGS)) {
+               if (batadv_is_my_client(bat_priv, dat_entry->mac_addr, vid)) {
                        ret = true;
                        goto out;
                }
diff --combined net/core/dev.c
@@@ -1661,29 -1661,6 +1661,29 @@@ bool is_skb_forwardable(struct net_devi
  }
  EXPORT_SYMBOL_GPL(is_skb_forwardable);
  
 +int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
 +{
 +      if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
 +              if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
 +                      atomic_long_inc(&dev->rx_dropped);
 +                      kfree_skb(skb);
 +                      return NET_RX_DROP;
 +              }
 +      }
 +
 +      if (unlikely(!is_skb_forwardable(dev, skb))) {
 +              atomic_long_inc(&dev->rx_dropped);
 +              kfree_skb(skb);
 +              return NET_RX_DROP;
 +      }
 +
 +      skb_scrub_packet(skb, true);
 +      skb->protocol = eth_type_trans(skb, dev);
 +
 +      return 0;
 +}
 +EXPORT_SYMBOL_GPL(__dev_forward_skb);
 +
  /**
   * dev_forward_skb - loopback an skb to another netif
   *
   */
  int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
  {
 -      if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
 -              if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
 -                      atomic_long_inc(&dev->rx_dropped);
 -                      kfree_skb(skb);
 -                      return NET_RX_DROP;
 -              }
 -      }
 -
 -      if (unlikely(!is_skb_forwardable(dev, skb))) {
 -              atomic_long_inc(&dev->rx_dropped);
 -              kfree_skb(skb);
 -              return NET_RX_DROP;
 -      }
 -
 -      skb_scrub_packet(skb, true);
 -      skb->protocol = eth_type_trans(skb, dev);
 -
 -      return netif_rx_internal(skb);
 +      return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
  }
  EXPORT_SYMBOL_GPL(dev_forward_skb);
  
@@@ -3957,6 -3951,7 +3957,7 @@@ static enum gro_result dev_gro_receive(
        }
        NAPI_GRO_CB(skb)->count = 1;
        NAPI_GRO_CB(skb)->age = jiffies;
+       NAPI_GRO_CB(skb)->last = skb;
        skb_shinfo(skb)->gso_size = skb_gro_len(skb);
        skb->next = napi->gro_list;
        napi->gro_list = skb;
@@@ -4546,6 -4541,32 +4547,32 @@@ void *netdev_adjacent_get_private(struc
  }
  EXPORT_SYMBOL(netdev_adjacent_get_private);
  
+ /**
+  * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
+  * @dev: device
+  * @iter: list_head ** of the current position
+  *
+  * Gets the next device from the dev's upper list, starting from iter
+  * position. The caller must hold RCU read lock.
+  */
+ struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
+                                                struct list_head **iter)
+ {
+       struct netdev_adjacent *upper;
+       WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
+       upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
+       if (&upper->list == &dev->adj_list.upper)
+               return NULL;
+       *iter = &upper->list;
+       return upper->dev;
+ }
+ EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
  /**
   * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
   * @dev: device
@@@ -4627,6 -4648,32 +4654,32 @@@ void *netdev_lower_get_next_private_rcu
  }
  EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
  
+ /**
+  * netdev_lower_get_next - Get the next device from the lower neighbour
+  *                         list
+  * @dev: device
+  * @iter: list_head ** of the current position
+  *
+  * Gets the next netdev_adjacent from the dev's lower neighbour
+  * list, starting from iter position. The caller must hold RTNL lock or
+  * its own locking that guarantees that the neighbour lower
+  * list will remain unchainged.
+  */
+ void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
+ {
+       struct netdev_adjacent *lower;
+       lower = list_entry((*iter)->next, struct netdev_adjacent, list);
+       if (&lower->list == &dev->adj_list.lower)
+               return NULL;
+       *iter = &lower->list;
+       return lower->dev;
+ }
+ EXPORT_SYMBOL(netdev_lower_get_next);
  /**
   * netdev_lower_get_first_private_rcu - Get the first ->private from the
   *                                   lower neighbour list, RCU
@@@ -5077,6 -5124,30 +5130,30 @@@ void *netdev_lower_dev_get_private(stru
  }
  EXPORT_SYMBOL(netdev_lower_dev_get_private);
  
+ int dev_get_nest_level(struct net_device *dev,
+                      bool (*type_check)(struct net_device *dev))
+ {
+       struct net_device *lower = NULL;
+       struct list_head *iter;
+       int max_nest = -1;
+       int nest;
+       ASSERT_RTNL();
+       netdev_for_each_lower_dev(dev, lower, iter) {
+               nest = dev_get_nest_level(lower, type_check);
+               if (max_nest < nest)
+                       max_nest = nest;
+       }
+       if (type_check(dev))
+               max_nest++;
+       return max_nest;
+ }
+ EXPORT_SYMBOL(dev_get_nest_level);
  static void dev_change_rx_flags(struct net_device *dev, int flags)
  {
        const struct net_device_ops *ops = dev->netdev_ops;
@@@ -5242,7 -5313,6 +5319,6 @@@ void __dev_set_rx_mode(struct net_devic
        if (ops->ndo_set_rx_mode)
                ops->ndo_set_rx_mode(dev);
  }
- EXPORT_SYMBOL(__dev_set_rx_mode);
  
  void dev_set_rx_mode(struct net_device *dev)
  {
@@@ -5547,7 -5617,7 +5623,7 @@@ static int dev_new_index(struct net *ne
  
  /* Delayed registration/unregisteration */
  static LIST_HEAD(net_todo_list);
static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
+ DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
  
  static void net_set_todo(struct net_device *dev)
  {
@@@ -5604,6 -5674,10 +5680,6 @@@ static void rollback_registered_many(st
                */
                call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
  
 -              if (!dev->rtnl_link_ops ||
 -                  dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
 -                      rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
 -
                /*
                 *      Flush the unicast and multicast chains
                 */
                if (dev->netdev_ops->ndo_uninit)
                        dev->netdev_ops->ndo_uninit(dev);
  
 +              if (!dev->rtnl_link_ops ||
 +                  dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
 +                      rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
 +
                /* Notifier chain MUST detach us all upper devices. */
                WARN_ON(netdev_has_any_upper_dev(dev));
  
diff --combined net/core/net_namespace.c
@@@ -24,7 -24,7 +24,7 @@@
  
  static LIST_HEAD(pernet_list);
  static struct list_head *first_device = &pernet_list;
static DEFINE_MUTEX(net_mutex);
+ DEFINE_MUTEX(net_mutex);
  
  LIST_HEAD(net_namespace_list);
  EXPORT_SYMBOL_GPL(net_namespace_list);
@@@ -273,7 -273,7 +273,7 @@@ static void cleanup_net(struct work_str
  {
        const struct pernet_operations *ops;
        struct net *net, *tmp;
 -      LIST_HEAD(net_kill_list);
 +      struct list_head net_kill_list;
        LIST_HEAD(net_exit_list);
  
        /* Atomically snapshot the list of namespaces to cleanup */
diff --combined net/core/rtnetlink.c
@@@ -353,15 -353,46 +353,46 @@@ void __rtnl_link_unregister(struct rtnl
  }
  EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
  
+ /* Return with the rtnl_lock held when there are no network
+  * devices unregistering in any network namespace.
+  */
+ static void rtnl_lock_unregistering_all(void)
+ {
+       struct net *net;
+       bool unregistering;
+       DEFINE_WAIT(wait);
+       for (;;) {
+               prepare_to_wait(&netdev_unregistering_wq, &wait,
+                               TASK_UNINTERRUPTIBLE);
+               unregistering = false;
+               rtnl_lock();
+               for_each_net(net) {
+                       if (net->dev_unreg_count > 0) {
+                               unregistering = true;
+                               break;
+                       }
+               }
+               if (!unregistering)
+                       break;
+               __rtnl_unlock();
+               schedule();
+       }
+       finish_wait(&netdev_unregistering_wq, &wait);
+ }
  /**
   * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
   * @ops: struct rtnl_link_ops * to unregister
   */
  void rtnl_link_unregister(struct rtnl_link_ops *ops)
  {
-       rtnl_lock();
+       /* Close the race with cleanup_net() */
+       mutex_lock(&net_mutex);
+       rtnl_lock_unregistering_all();
        __rtnl_link_unregister(ops);
        rtnl_unlock();
+       mutex_unlock(&net_mutex);
  }
  EXPORT_SYMBOL_GPL(rtnl_link_unregister);
  
@@@ -767,8 -798,8 +798,8 @@@ static inline int rtnl_vfinfo_size(cons
                size += num_vfs *
                        (nla_total_size(sizeof(struct ifla_vf_mac)) +
                         nla_total_size(sizeof(struct ifla_vf_vlan)) +
 -                       nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
 -                       nla_total_size(sizeof(struct ifla_vf_spoofchk)));
 +                       nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
 +                       nla_total_size(sizeof(struct ifla_vf_rate)));
                return size;
        } else
                return 0;
@@@ -1034,7 -1065,6 +1065,7 @@@ static int rtnl_fill_ifinfo(struct sk_b
                        struct ifla_vf_info ivi;
                        struct ifla_vf_mac vf_mac;
                        struct ifla_vf_vlan vf_vlan;
 +                      struct ifla_vf_rate vf_rate;
                        struct ifla_vf_tx_rate vf_tx_rate;
                        struct ifla_vf_spoofchk vf_spoofchk;
                        struct ifla_vf_link_state vf_linkstate;
                                break;
                        vf_mac.vf =
                                vf_vlan.vf =
 +                              vf_rate.vf =
                                vf_tx_rate.vf =
                                vf_spoofchk.vf =
                                vf_linkstate.vf = ivi.vf;
                        memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
                        vf_vlan.vlan = ivi.vlan;
                        vf_vlan.qos = ivi.qos;
 -                      vf_tx_rate.rate = ivi.tx_rate;
 +                      vf_tx_rate.rate = ivi.max_tx_rate;
 +                      vf_rate.min_tx_rate = ivi.min_tx_rate;
 +                      vf_rate.max_tx_rate = ivi.max_tx_rate;
                        vf_spoofchk.setting = ivi.spoofchk;
                        vf_linkstate.link_state = ivi.linkstate;
                        vf = nla_nest_start(skb, IFLA_VF_INFO);
                        }
                        if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
                            nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
 +                          nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
 +                                  &vf_rate) ||
                            nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
                                    &vf_tx_rate) ||
                            nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
@@@ -1183,8 -1208,6 +1214,8 @@@ static const struct nla_policy ifla_vf_
                                    .len = sizeof(struct ifla_vf_tx_rate) },
        [IFLA_VF_SPOOFCHK]      = { .type = NLA_BINARY,
                                    .len = sizeof(struct ifla_vf_spoofchk) },
 +      [IFLA_VF_RATE]          = { .type = NLA_BINARY,
 +                                  .len = sizeof(struct ifla_vf_rate) },
  };
  
  static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
@@@ -1344,29 -1367,11 +1375,29 @@@ static int do_setvfinfo(struct net_devi
                }
                case IFLA_VF_TX_RATE: {
                        struct ifla_vf_tx_rate *ivt;
 +                      struct ifla_vf_info ivf;
                        ivt = nla_data(vf);
                        err = -EOPNOTSUPP;
 -                      if (ops->ndo_set_vf_tx_rate)
 -                              err = ops->ndo_set_vf_tx_rate(dev, ivt->vf,
 -                                                            ivt->rate);
 +                      if (ops->ndo_get_vf_config)
 +                              err = ops->ndo_get_vf_config(dev, ivt->vf,
 +                                                           &ivf);
 +                      if (err)
 +                              break;
 +                      err = -EOPNOTSUPP;
 +                      if (ops->ndo_set_vf_rate)
 +                              err = ops->ndo_set_vf_rate(dev, ivt->vf,
 +                                                         ivf.min_tx_rate,
 +                                                         ivt->rate);
 +                      break;
 +              }
 +              case IFLA_VF_RATE: {
 +                      struct ifla_vf_rate *ivt;
 +                      ivt = nla_data(vf);
 +                      err = -EOPNOTSUPP;
 +                      if (ops->ndo_set_vf_rate)
 +                              err = ops->ndo_set_vf_rate(dev, ivt->vf,
 +                                                         ivt->min_tx_rate,
 +                                                         ivt->max_tx_rate);
                        break;
                }
                case IFLA_VF_SPOOFCHK: {
diff --combined net/core/skbuff.c
@@@ -694,7 -694,7 +694,7 @@@ static void __copy_skb_header(struct sk
  #endif
        memcpy(new->cb, old->cb, sizeof(old->cb));
        new->csum               = old->csum;
 -      new->local_df           = old->local_df;
 +      new->ignore_df          = old->ignore_df;
        new->pkt_type           = old->pkt_type;
        new->ip_summed          = old->ip_summed;
        skb_copy_queue_mapping(new, old);
@@@ -3076,7 -3076,7 +3076,7 @@@ int skb_gro_receive(struct sk_buff **he
        if (unlikely(p->len + len >= 65536))
                return -E2BIG;
  
-       lp = NAPI_GRO_CB(p)->last ?: p;
+       lp = NAPI_GRO_CB(p)->last;
        pinfo = skb_shinfo(lp);
  
        if (headlen <= offset) {
@@@ -3192,7 -3192,7 +3192,7 @@@ merge
  
        __skb_pull(skb, offset);
  
-       if (!NAPI_GRO_CB(p)->last)
+       if (NAPI_GRO_CB(p)->last == p)
                skb_shinfo(p)->frag_list = skb;
        else
                NAPI_GRO_CB(p)->last->next = skb;
@@@ -3913,7 -3913,7 +3913,7 @@@ void skb_scrub_packet(struct sk_buff *s
        skb->tstamp.tv64 = 0;
        skb->pkt_type = PACKET_HOST;
        skb->skb_iif = 0;
 -      skb->local_df = 0;
 +      skb->ignore_df = 0;
        skb_dst_drop(skb);
        skb->mark = 0;
        secpath_reset(skb);
diff --combined net/ipv4/ip_tunnel.c
@@@ -395,10 -395,11 +395,10 @@@ static struct ip_tunnel *ip_tunnel_crea
                                          struct ip_tunnel_net *itn,
                                          struct ip_tunnel_parm *parms)
  {
 -      struct ip_tunnel *nt, *fbt;
 +      struct ip_tunnel *nt;
        struct net_device *dev;
  
        BUG_ON(!itn->fb_tunnel_dev);
 -      fbt = netdev_priv(itn->fb_tunnel_dev);
        dev = __ip_tunnel_create(net, itn->fb_tunnel_dev->rtnl_link_ops, parms);
        if (IS_ERR(dev))
                return ERR_CAST(dev);
@@@ -539,9 -540,10 +539,10 @@@ void ip_tunnel_xmit(struct sk_buff *skb
        unsigned int max_headroom;      /* The extra header space needed */
        __be32 dst;
        int err;
-       bool connected = true;
+       bool connected;
  
        inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
+       connected = (tunnel->parms.iph.daddr != 0);
  
        dst = tnl_params->daddr;
        if (dst == 0) {
@@@ -754,8 -756,10 +755,8 @@@ int ip_tunnel_ioctl(struct net_device *
  
                if (!t && (cmd == SIOCADDTUNNEL)) {
                        t = ip_tunnel_create(net, itn, p);
 -                      if (IS_ERR(t)) {
 -                              err = PTR_ERR(t);
 -                              break;
 -                      }
 +                      err = PTR_ERR_OR_ZERO(t);
 +                      break;
                }
                if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
                        if (t != NULL) {
@@@ -879,6 -883,7 +880,7 @@@ int ip_tunnel_init_net(struct net *net
         */
        if (!IS_ERR(itn->fb_tunnel_dev)) {
                itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
+               itn->fb_tunnel_dev->mtu = ip_tunnel_bind_dev(itn->fb_tunnel_dev);
                ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
        }
        rtnl_unlock();
diff --combined net/ipv4/route.c
@@@ -993,9 -993,6 +993,9 @@@ void ipv4_update_pmtu(struct sk_buff *s
        struct flowi4 fl4;
        struct rtable *rt;
  
 +      if (!mark)
 +              mark = IP4_REPLY_MARK(net, skb->mark);
 +
        __build_flow_key(&fl4, NULL, iph, oif,
                         RT_TOS(iph->tos), protocol, mark, flow_flags);
        rt = __ip_route_output_key(net, &fl4);
@@@ -1013,10 -1010,6 +1013,10 @@@ static void __ipv4_sk_update_pmtu(struc
        struct rtable *rt;
  
        __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
 +
 +      if (!fl4.flowi4_mark)
 +              fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
 +
        rt = __ip_route_output_key(sock_net(sk), &fl4);
        if (!IS_ERR(rt)) {
                __ip_rt_update_pmtu(rt, &fl4, mtu);
@@@ -1526,7 -1519,7 +1526,7 @@@ static int __mkroute_input(struct sk_bu
        struct in_device *out_dev;
        unsigned int flags = 0;
        bool do_cache;
-       u32 itag;
+       u32 itag = 0;
  
        /* get a working reference to the output device */
        out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
diff --combined net/ipv4/xfrm4_output.c
@@@ -25,7 -25,7 +25,7 @@@ static int xfrm4_tunnel_check_size(stru
        if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
                goto out;
  
 -      if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df)
 +      if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->ignore_df)
                goto out;
  
        mtu = dst_mtu(skb_dst(skb));
@@@ -62,10 -62,7 +62,7 @@@ int xfrm4_prepare_output(struct xfrm_st
        if (err)
                return err;
  
-       memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
-       IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED;
-       skb->protocol = htons(ETH_P_IP);
+       IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
  
        return x->outer_mode->output2(x, skb);
  }
@@@ -73,27 -70,34 +70,34 @@@ EXPORT_SYMBOL(xfrm4_prepare_output)
  
  int xfrm4_output_finish(struct sk_buff *skb)
  {
+       memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+       skb->protocol = htons(ETH_P_IP);
+ #ifdef CONFIG_NETFILTER
+       IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
+ #endif
+       return xfrm_output(skb);
+ }
+ static int __xfrm4_output(struct sk_buff *skb)
+ {
+       struct xfrm_state *x = skb_dst(skb)->xfrm;
  #ifdef CONFIG_NETFILTER
-       if (!skb_dst(skb)->xfrm) {
+       if (!x) {
                IPCB(skb)->flags |= IPSKB_REROUTED;
                return dst_output(skb);
        }
-       IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
  #endif
  
-       skb->protocol = htons(ETH_P_IP);
-       return xfrm_output(skb);
+       return x->outer_mode->afinfo->output_finish(skb);
  }
  
  int xfrm4_output(struct sock *sk, struct sk_buff *skb)
  {
-       struct dst_entry *dst = skb_dst(skb);
-       struct xfrm_state *x = dst->xfrm;
        return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb,
-                           NULL, dst->dev,
-                           x->outer_mode->afinfo->output_finish,
+                           NULL, skb_dst(skb)->dev, __xfrm4_output,
                            !(IPCB(skb)->flags & IPSKB_REROUTED));
  }
  
diff --combined net/ipv6/ip6_output.c
@@@ -219,7 -219,7 +219,7 @@@ int ip6_xmit(struct sock *sk, struct sk
        skb->mark = sk->sk_mark;
  
        mtu = dst_mtu(dst);
 -      if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) {
 +      if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
                IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
                              IPSTATS_MIB_OUT, skb->len);
                return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
@@@ -347,11 -347,11 +347,11 @@@ static bool ip6_pkt_too_big(const struc
        if (skb->len <= mtu)
                return false;
  
 -      /* ipv6 conntrack defrag sets max_frag_size + local_df */
 +      /* ipv6 conntrack defrag sets max_frag_size + ignore_df */
        if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
                return true;
  
 -      if (skb->local_df)
 +      if (skb->ignore_df)
                return false;
  
        if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
@@@ -559,7 -559,7 +559,7 @@@ int ip6_fragment(struct sk_buff *skb, i
        /* We must not fragment if the socket is set to force MTU discovery
         * or if the skb it not generated by a local socket.
         */
 -      if (unlikely(!skb->local_df && skb->len > mtu) ||
 +      if (unlikely(!skb->ignore_df && skb->len > mtu) ||
                     (IP6CB(skb)->frag_max_size &&
                      IP6CB(skb)->frag_max_size > mtu)) {
                if (skb->sk && dst_allfrag(skb_dst(skb)))
@@@ -1229,12 -1229,12 +1229,12 @@@ int ip6_append_data(struct sock *sk, in
                unsigned int maxnonfragsize, headersize;
  
                headersize = sizeof(struct ipv6hdr) +
-                            (opt ? opt->tot_len : 0) +
+                            (opt ? opt->opt_flen + opt->opt_nflen : 0) +
                             (dst_allfrag(&rt->dst) ?
                              sizeof(struct frag_hdr) : 0) +
                             rt->rt6i_nfheader_len;
  
 -              if (ip6_sk_local_df(sk))
 +              if (ip6_sk_ignore_df(sk))
                        maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
                else
                        maxnonfragsize = mtu;
@@@ -1544,7 -1544,7 +1544,7 @@@ int ip6_push_pending_frames(struct soc
        }
  
        /* Allow local fragmentation. */
 -      skb->local_df = ip6_sk_local_df(sk);
 +      skb->ignore_df = ip6_sk_ignore_df(sk);
  
        *final_dst = fl6->daddr;
        __skb_pull(skb, skb_network_header_len(skb));
diff --combined net/ipv6/ip6_tunnel.c
@@@ -61,7 -61,6 +61,7 @@@
  MODULE_AUTHOR("Ville Nuorvala");
  MODULE_DESCRIPTION("IPv6 tunneling device");
  MODULE_LICENSE("GPL");
 +MODULE_ALIAS_RTNL_LINK("ip6tnl");
  MODULE_ALIAS_NETDEV("ip6tnl0");
  
  #ifdef IP6_TNL_DEBUG
@@@ -1558,7 -1557,7 +1558,7 @@@ static int ip6_tnl_validate(struct nlat
  {
        u8 proto;
  
-       if (!data)
+       if (!data || !data[IFLA_IPTUN_PROTO])
                return 0;
  
        proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
diff --combined net/ipv6/ip6_vti.c
@@@ -511,6 -511,7 +511,7 @@@ static int vti6_err(struct sk_buff *skb
                    u8 type, u8 code, int offset, __be32 info)
  {
        __be32 spi;
+       __u32 mark;
        struct xfrm_state *x;
        struct ip6_tnl *t;
        struct ip_esp_hdr *esph;
        if (!t)
                return -1;
  
+       mark = be32_to_cpu(t->parms.o_key);
        switch (protocol) {
        case IPPROTO_ESP:
                esph = (struct ip_esp_hdr *)(skb->data + offset);
            type != NDISC_REDIRECT)
                return 0;
  
-       x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
+       x = xfrm_state_lookup(net, mark, (const xfrm_address_t *)&iph->daddr,
                              spi, protocol, AF_INET6);
        if (!x)
                return 0;
@@@ -792,12 -795,15 +795,12 @@@ static const struct net_device_ops vti6
   **/
  static void vti6_dev_setup(struct net_device *dev)
  {
 -      struct ip6_tnl *t;
 -
        dev->netdev_ops = &vti6_netdev_ops;
        dev->destructor = vti6_dev_free;
  
        dev->type = ARPHRD_TUNNEL6;
        dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr);
        dev->mtu = ETH_DATA_LEN;
 -      t = netdev_priv(dev);
        dev->flags |= IFF_NOARP;
        dev->addr_len = sizeof(struct in6_addr);
        dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
@@@ -1094,7 -1100,6 +1097,6 @@@ static int __init vti6_tunnel_init(void
  
        err = xfrm6_protocol_register(&vti_esp6_protocol, IPPROTO_ESP);
        if (err < 0) {
-               unregister_pernet_device(&vti6_net_ops);
                pr_err("%s: can't register vti6 protocol\n", __func__);
  
                goto out;
        err = xfrm6_protocol_register(&vti_ah6_protocol, IPPROTO_AH);
        if (err < 0) {
                xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
-               unregister_pernet_device(&vti6_net_ops);
                pr_err("%s: can't register vti6 protocol\n", __func__);
  
                goto out;
        if (err < 0) {
                xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH);
                xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
-               unregister_pernet_device(&vti6_net_ops);
                pr_err("%s: can't register vti6 protocol\n", __func__);
  
                goto out;
diff --combined net/ipv6/route.c
@@@ -1176,7 -1176,7 +1176,7 @@@ void ip6_update_pmtu(struct sk_buff *sk
  
        memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_oif = oif;
 -      fl6.flowi6_mark = mark;
 +      fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
        fl6.daddr = iph->daddr;
        fl6.saddr = iph->saddr;
        fl6.flowlabel = ip6_flowinfo(iph);
@@@ -1455,7 -1455,7 +1455,7 @@@ static int ip6_dst_gc(struct dst_ops *o
                goto out;
  
        net->ipv6.ip6_rt_gc_expire++;
 -      fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, entries > rt_max_size);
 +      fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
        entries = dst_entries_get_slow(ops);
        if (entries < ops->gc_thresh)
                net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
@@@ -2234,6 -2234,27 +2234,27 @@@ void rt6_remove_prefsrc(struct inet6_if
        fib6_clean_all(net, fib6_remove_prefsrc, &adni);
  }
  
+ #define RTF_RA_ROUTER         (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
+ #define RTF_CACHE_GATEWAY     (RTF_GATEWAY | RTF_CACHE)
+ /* Remove routers and update dst entries when gateway turn into host. */
+ static int fib6_clean_tohost(struct rt6_info *rt, void *arg)
+ {
+       struct in6_addr *gateway = (struct in6_addr *)arg;
+       if ((((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) ||
+            ((rt->rt6i_flags & RTF_CACHE_GATEWAY) == RTF_CACHE_GATEWAY)) &&
+            ipv6_addr_equal(gateway, &rt->rt6i_gateway)) {
+               return -1;
+       }
+       return 0;
+ }
+ void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
+ {
+       fib6_clean_all(net, fib6_clean_tohost, gateway);
+ }
  struct arg_dev_net {
        struct net_device *dev;
        struct net *net;
@@@ -2709,6 -2730,9 +2730,9 @@@ static int inet6_rtm_getroute(struct sk
        if (tb[RTA_OIF])
                oif = nla_get_u32(tb[RTA_OIF]);
  
+       if (tb[RTA_MARK])
+               fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
        if (iif) {
                struct net_device *dev;
                int flags = 0;
diff --combined net/ipv6/xfrm6_output.c
@@@ -78,7 -78,7 +78,7 @@@ static int xfrm6_tunnel_check_size(stru
        if (mtu < IPV6_MIN_MTU)
                mtu = IPV6_MIN_MTU;
  
 -      if (!skb->local_df && skb->len > mtu) {
 +      if (!skb->ignore_df && skb->len > mtu) {
                skb->dev = dst->dev;
  
                if (xfrm6_local_dontfrag(skb))
@@@ -114,13 -114,7 +114,7 @@@ int xfrm6_prepare_output(struct xfrm_st
        if (err)
                return err;
  
-       memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
- #ifdef CONFIG_NETFILTER
-       IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
- #endif
-       skb->protocol = htons(ETH_P_IPV6);
 -      skb->local_df = 1;
 +      skb->ignore_df = 1;
  
        return x->outer_mode->output2(x, skb);
  }
@@@ -128,11 -122,13 +122,13 @@@ EXPORT_SYMBOL(xfrm6_prepare_output)
  
  int xfrm6_output_finish(struct sk_buff *skb)
  {
+       memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+       skb->protocol = htons(ETH_P_IPV6);
  #ifdef CONFIG_NETFILTER
        IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
  #endif
  
-       skb->protocol = htons(ETH_P_IPV6);
        return xfrm_output(skb);
  }
  
@@@ -142,6 -138,13 +138,13 @@@ static int __xfrm6_output(struct sk_buf
        struct xfrm_state *x = dst->xfrm;
        int mtu;
  
+ #ifdef CONFIG_NETFILTER
+       if (!x) {
+               IP6CB(skb)->flags |= IP6SKB_REROUTED;
+               return dst_output(skb);
+       }
+ #endif
        if (skb->protocol == htons(ETH_P_IPV6))
                mtu = ip6_skb_dst_mtu(skb);
        else
        if (skb->len > mtu && xfrm6_local_dontfrag(skb)) {
                xfrm6_local_rxpmtu(skb, mtu);
                return -EMSGSIZE;
 -      } else if (!skb->local_df && skb->len > mtu && skb->sk) {
 +      } else if (!skb->ignore_df && skb->len > mtu && skb->sk) {
                xfrm_local_error(skb, mtu);
                return -EMSGSIZE;
        }
  
  int xfrm6_output(struct sock *sk, struct sk_buff *skb)
  {
-       return NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL,
-                      skb_dst(skb)->dev, __xfrm6_output);
+       return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb,
+                           NULL, skb_dst(skb)->dev, __xfrm6_output,
+                           !(IP6CB(skb)->flags & IP6SKB_REROUTED));
  }
@@@ -260,7 -260,7 +260,7 @@@ struct ieee80211_if_ap 
  
        /* to be used after channel switch. */
        struct cfg80211_beacon_data *next_beacon;
 -      struct list_head vlans;
 +      struct list_head vlans; /* write-protected with RTNL and local->mtx */
  
        struct ps_data ps;
        atomic_t num_mcast_sta; /* number of stations receiving multicast */
@@@ -276,7 -276,7 +276,7 @@@ struct ieee80211_if_wds 
  };
  
  struct ieee80211_if_vlan {
 -      struct list_head list;
 +      struct list_head list; /* write-protected with RTNL and local->mtx */
  
        /* used for all tx if the VLAN is configured to 4-addr mode */
        struct sta_info __rcu *sta;
@@@ -317,6 -317,7 +317,7 @@@ struct ieee80211_roc_work 
  
        bool started, abort, hw_begun, notified;
        bool to_be_freed;
+       bool on_channel;
  
        unsigned long hw_start_time;
  
@@@ -691,10 -692,8 +692,10 @@@ struct ieee80211_chanctx 
        struct list_head list;
        struct rcu_head rcu_head;
  
 +      struct list_head assigned_vifs;
 +      struct list_head reserved_vifs;
 +
        enum ieee80211_chanctx_mode mode;
 -      int refcount;
        bool driver_present;
  
        struct ieee80211_chanctx_conf conf;
@@@ -758,14 -757,6 +759,14 @@@ struct ieee80211_sub_if_data 
        bool csa_radar_required;
        struct cfg80211_chan_def csa_chandef;
  
 +      struct list_head assigned_chanctx_list; /* protected by chanctx_mtx */
 +      struct list_head reserved_chanctx_list; /* protected by chanctx_mtx */
 +
 +      /* context reservation -- protected with chanctx_mtx */
 +      struct ieee80211_chanctx *reserved_chanctx;
 +      struct cfg80211_chan_def reserved_chandef;
 +      bool reserved_radar_required;
 +
        /* used to reconfigure hardware SM PS */
        struct work_struct recalc_smps;
  
@@@ -1780,16 -1771,6 +1781,16 @@@ int __must_chec
  ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
                          const struct cfg80211_chan_def *chandef,
                          enum ieee80211_chanctx_mode mode);
 +int __must_check
 +ieee80211_vif_reserve_chanctx(struct ieee80211_sub_if_data *sdata,
 +                            const struct cfg80211_chan_def *chandef,
 +                            enum ieee80211_chanctx_mode mode,
 +                            bool radar_required);
 +int __must_check
 +ieee80211_vif_use_reserved_context(struct ieee80211_sub_if_data *sdata,
 +                                 u32 *changed);
 +int ieee80211_vif_unreserve_chanctx(struct ieee80211_sub_if_data *sdata);
 +
  int __must_check
  ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
                               const struct cfg80211_chan_def *chandef,
@@@ -1802,8 -1783,6 +1803,8 @@@ void ieee80211_vif_release_channel(stru
  void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata);
  void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
                                         bool clear);
 +int ieee80211_chanctx_refcount(struct ieee80211_local *local,
 +                             struct ieee80211_chanctx *ctx);
  
  void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
                                   struct ieee80211_chanctx *chanctx);
@@@ -1827,11 -1806,6 +1828,11 @@@ int ieee80211_cs_headroom(struct ieee80
                          enum nl80211_iftype iftype);
  void ieee80211_recalc_dtim(struct ieee80211_local *local,
                           struct ieee80211_sub_if_data *sdata);
 +int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
 +                               const struct cfg80211_chan_def *chandef,
 +                               enum ieee80211_chanctx_mode chanmode,
 +                               u8 radar_detect);
 +int ieee80211_max_num_channels(struct ieee80211_local *local);
  
  #ifdef CONFIG_MAC80211_NOINLINE
  #define debug_noinline noinline
diff --combined net/mac80211/mlme.c
@@@ -1089,7 -1089,7 +1089,7 @@@ ieee80211_sta_process_chanswitch(struc
        }
        chanctx = container_of(rcu_access_pointer(sdata->vif.chanctx_conf),
                               struct ieee80211_chanctx, conf);
 -      if (chanctx->refcount > 1) {
 +      if (ieee80211_chanctx_refcount(local, chanctx) > 1) {
                sdata_info(sdata,
                           "channel switch with multiple interfaces on the same channel, disconnecting\n");
                ieee80211_queue_work(&local->hw,
@@@ -3598,18 -3598,24 +3598,24 @@@ void ieee80211_mgd_quiesce(struct ieee8
  
        sdata_lock(sdata);
  
-       if (ifmgd->auth_data) {
+       if (ifmgd->auth_data || ifmgd->assoc_data) {
+               const u8 *bssid = ifmgd->auth_data ?
+                               ifmgd->auth_data->bss->bssid :
+                               ifmgd->assoc_data->bss->bssid;
                /*
-                * If we are trying to authenticate while suspending, cfg80211
-                * won't know and won't actually abort those attempts, thus we
-                * need to do that ourselves.
+                * If we are trying to authenticate / associate while suspending,
+                * cfg80211 won't know and won't actually abort those attempts,
+                * thus we need to do that ourselves.
                 */
-               ieee80211_send_deauth_disassoc(sdata,
-                                              ifmgd->auth_data->bss->bssid,
+               ieee80211_send_deauth_disassoc(sdata, bssid,
                                               IEEE80211_STYPE_DEAUTH,
                                               WLAN_REASON_DEAUTH_LEAVING,
                                               false, frame_buf);
-               ieee80211_destroy_auth_data(sdata, false);
+               if (ifmgd->assoc_data)
+                       ieee80211_destroy_assoc_data(sdata, false);
+               if (ifmgd->auth_data)
+                       ieee80211_destroy_auth_data(sdata, false);
                cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
                                      IEEE80211_DEAUTH_FRAME_LEN);
        }
@@@ -3701,7 -3707,7 +3707,7 @@@ int ieee80211_max_network_latency(struc
        ieee80211_recalc_ps(local, latency_usec);
        mutex_unlock(&local->iflist_mtx);
  
 -      return 0;
 +      return NOTIFY_OK;
  }
  
  static u8 ieee80211_ht_vht_rx_chains(struct ieee80211_sub_if_data *sdata,
diff --combined net/sched/cls_tcindex.c
@@@ -188,11 -188,17 +188,17 @@@ static const struct nla_policy tcindex_
        [TCA_TCINDEX_CLASSID]           = { .type = NLA_U32 },
  };
  
+ static void tcindex_filter_result_init(struct tcindex_filter_result *r)
+ {
+       memset(r, 0, sizeof(*r));
+       tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
+ }
  static int
  tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
                  u32 handle, struct tcindex_data *p,
                  struct tcindex_filter_result *r, struct nlattr **tb,
 -               struct nlattr *est)
 +                struct nlattr *est, bool ovr)
  {
        int err, balloc = 0;
        struct tcindex_filter_result new_filter_result, *old_r = r;
        struct tcf_exts e;
  
        tcf_exts_init(&e, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
 -      err = tcf_exts_validate(net, tp, tb, est, &e);
 +      err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
        if (err < 0)
                return err;
  
        memcpy(&cp, p, sizeof(cp));
-       memset(&new_filter_result, 0, sizeof(new_filter_result));
-       tcf_exts_init(&new_filter_result.exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
+       tcindex_filter_result_init(&new_filter_result);
  
+       tcindex_filter_result_init(&cr);
        if (old_r)
-               memcpy(&cr, r, sizeof(cr));
-       else {
-               memset(&cr, 0, sizeof(cr));
-               tcf_exts_init(&cr.exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
-       }
+               cr.res = r->res;
  
        if (tb[TCA_TCINDEX_HASH])
                cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
        err = -ENOMEM;
        if (!cp.perfect && !cp.h) {
                if (valid_perfect_hash(&cp)) {
+                       int i;
                        cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL);
                        if (!cp.perfect)
                                goto errout;
+                       for (i = 0; i < cp.hash; i++)
+                               tcf_exts_init(&cp.perfect[i].exts, TCA_TCINDEX_ACT,
+                                             TCA_TCINDEX_POLICE);
                        balloc = 1;
                } else {
                        cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL);
                tcf_bind_filter(tp, &cr.res, base);
        }
  
-       tcf_exts_change(tp, &cr.exts, &e);
+       if (old_r)
+               tcf_exts_change(tp, &r->exts, &e);
+       else
+               tcf_exts_change(tp, &cr.exts, &e);
  
        tcf_tree_lock(tp);
        if (old_r && old_r != r)
-               memset(old_r, 0, sizeof(*old_r));
+               tcindex_filter_result_init(old_r);
  
        memcpy(p, &cp, sizeof(cp));
-       memcpy(r, &cr, sizeof(cr));
+       r->res = cr.res;
  
        if (r == &new_filter_result) {
                struct tcindex_filter **fp;
@@@ -331,7 -341,7 +341,7 @@@ errout
  static int
  tcindex_change(struct net *net, struct sk_buff *in_skb,
               struct tcf_proto *tp, unsigned long base, u32 handle,
 -             struct nlattr **tca, unsigned long *arg)
 +             struct nlattr **tca, unsigned long *arg, bool ovr)
  {
        struct nlattr *opt = tca[TCA_OPTIONS];
        struct nlattr *tb[TCA_TCINDEX_MAX + 1];
                return err;
  
        return tcindex_set_parms(net, tp, base, handle, p, r, tb,
 -                               tca[TCA_RATE]);
 +                               tca[TCA_RATE], ovr);
  }