Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[pandora-kernel.git] / net / core / filter.c
index ab3c74e..6bd2e35 100644 (file)
 #include <linux/seccomp.h>
 #include <linux/if_vlan.h>
 
+/* Registers */
+#define BPF_R0 regs[BPF_REG_0]
+#define BPF_R1 regs[BPF_REG_1]
+#define BPF_R2 regs[BPF_REG_2]
+#define BPF_R3 regs[BPF_REG_3]
+#define BPF_R4 regs[BPF_REG_4]
+#define BPF_R5 regs[BPF_REG_5]
+#define BPF_R6 regs[BPF_REG_6]
+#define BPF_R7 regs[BPF_REG_7]
+#define BPF_R8 regs[BPF_REG_8]
+#define BPF_R9 regs[BPF_REG_9]
+#define BPF_R10        regs[BPF_REG_10]
+
+/* Named registers */
+#define A      regs[insn->a_reg]
+#define X      regs[insn->x_reg]
+#define FP     regs[BPF_REG_FP]
+#define ARG1   regs[BPF_REG_ARG1]
+#define CTX    regs[BPF_REG_CTX]
+#define K      insn->imm
+
 /* No hurry in this branch
  *
  * Exported for the bpf jit load helper.
@@ -57,9 +78,9 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns
                ptr = skb_network_header(skb) + k - SKF_NET_OFF;
        else if (k >= SKF_LL_OFF)
                ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
-
        if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
                return ptr;
+
        return NULL;
 }
 
@@ -68,6 +89,7 @@ static inline void *load_pointer(const struct sk_buff *skb, int k,
 {
        if (k >= 0)
                return skb_header_pointer(skb, k, size, buffer);
+
        return bpf_internal_load_pointer_neg_helper(skb, k, size);
 }
 
@@ -122,13 +144,6 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
        return 0;
 }
 
-/* Register mappings for user programs. */
-#define A_REG          0
-#define X_REG          7
-#define TMP_REG                8
-#define ARG2_REG       2
-#define ARG3_REG       3
-
 /**
  *     __sk_run_filter - run a filter on a given context
  *     @ctx: buffer to run the filter on
@@ -138,212 +153,213 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
  * keep, 0 for none. @ctx is the data we are operating on, @insn is the
  * array of filter instructions.
  */
-unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
+static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
 {
        u64 stack[MAX_BPF_STACK / sizeof(u64)];
        u64 regs[MAX_BPF_REG], tmp;
-       void *ptr;
-       int off;
-
-#define K  insn->imm
-#define A  regs[insn->a_reg]
-#define X  regs[insn->x_reg]
-#define R0 regs[0]
-
-#define CONT    ({insn++; goto select_insn; })
-#define CONT_JMP ({insn++; goto select_insn; })
-
        static const void *jumptable[256] = {
                [0 ... 255] = &&default_label,
                /* Now overwrite non-defaults ... */
-#define DL(A, B, C)    [A|B|C] = &&A##_##B##_##C
-               DL(BPF_ALU, BPF_ADD, BPF_X),
-               DL(BPF_ALU, BPF_ADD, BPF_K),
-               DL(BPF_ALU, BPF_SUB, BPF_X),
-               DL(BPF_ALU, BPF_SUB, BPF_K),
-               DL(BPF_ALU, BPF_AND, BPF_X),
-               DL(BPF_ALU, BPF_AND, BPF_K),
-               DL(BPF_ALU, BPF_OR, BPF_X),
-               DL(BPF_ALU, BPF_OR, BPF_K),
-               DL(BPF_ALU, BPF_LSH, BPF_X),
-               DL(BPF_ALU, BPF_LSH, BPF_K),
-               DL(BPF_ALU, BPF_RSH, BPF_X),
-               DL(BPF_ALU, BPF_RSH, BPF_K),
-               DL(BPF_ALU, BPF_XOR, BPF_X),
-               DL(BPF_ALU, BPF_XOR, BPF_K),
-               DL(BPF_ALU, BPF_MUL, BPF_X),
-               DL(BPF_ALU, BPF_MUL, BPF_K),
-               DL(BPF_ALU, BPF_MOV, BPF_X),
-               DL(BPF_ALU, BPF_MOV, BPF_K),
-               DL(BPF_ALU, BPF_DIV, BPF_X),
-               DL(BPF_ALU, BPF_DIV, BPF_K),
-               DL(BPF_ALU, BPF_MOD, BPF_X),
-               DL(BPF_ALU, BPF_MOD, BPF_K),
-               DL(BPF_ALU, BPF_NEG, 0),
-               DL(BPF_ALU, BPF_END, BPF_TO_BE),
-               DL(BPF_ALU, BPF_END, BPF_TO_LE),
-               DL(BPF_ALU64, BPF_ADD, BPF_X),
-               DL(BPF_ALU64, BPF_ADD, BPF_K),
-               DL(BPF_ALU64, BPF_SUB, BPF_X),
-               DL(BPF_ALU64, BPF_SUB, BPF_K),
-               DL(BPF_ALU64, BPF_AND, BPF_X),
-               DL(BPF_ALU64, BPF_AND, BPF_K),
-               DL(BPF_ALU64, BPF_OR, BPF_X),
-               DL(BPF_ALU64, BPF_OR, BPF_K),
-               DL(BPF_ALU64, BPF_LSH, BPF_X),
-               DL(BPF_ALU64, BPF_LSH, BPF_K),
-               DL(BPF_ALU64, BPF_RSH, BPF_X),
-               DL(BPF_ALU64, BPF_RSH, BPF_K),
-               DL(BPF_ALU64, BPF_XOR, BPF_X),
-               DL(BPF_ALU64, BPF_XOR, BPF_K),
-               DL(BPF_ALU64, BPF_MUL, BPF_X),
-               DL(BPF_ALU64, BPF_MUL, BPF_K),
-               DL(BPF_ALU64, BPF_MOV, BPF_X),
-               DL(BPF_ALU64, BPF_MOV, BPF_K),
-               DL(BPF_ALU64, BPF_ARSH, BPF_X),
-               DL(BPF_ALU64, BPF_ARSH, BPF_K),
-               DL(BPF_ALU64, BPF_DIV, BPF_X),
-               DL(BPF_ALU64, BPF_DIV, BPF_K),
-               DL(BPF_ALU64, BPF_MOD, BPF_X),
-               DL(BPF_ALU64, BPF_MOD, BPF_K),
-               DL(BPF_ALU64, BPF_NEG, 0),
-               DL(BPF_JMP, BPF_CALL, 0),
-               DL(BPF_JMP, BPF_JA, 0),
-               DL(BPF_JMP, BPF_JEQ, BPF_X),
-               DL(BPF_JMP, BPF_JEQ, BPF_K),
-               DL(BPF_JMP, BPF_JNE, BPF_X),
-               DL(BPF_JMP, BPF_JNE, BPF_K),
-               DL(BPF_JMP, BPF_JGT, BPF_X),
-               DL(BPF_JMP, BPF_JGT, BPF_K),
-               DL(BPF_JMP, BPF_JGE, BPF_X),
-               DL(BPF_JMP, BPF_JGE, BPF_K),
-               DL(BPF_JMP, BPF_JSGT, BPF_X),
-               DL(BPF_JMP, BPF_JSGT, BPF_K),
-               DL(BPF_JMP, BPF_JSGE, BPF_X),
-               DL(BPF_JMP, BPF_JSGE, BPF_K),
-               DL(BPF_JMP, BPF_JSET, BPF_X),
-               DL(BPF_JMP, BPF_JSET, BPF_K),
-               DL(BPF_JMP, BPF_EXIT, 0),
-               DL(BPF_STX, BPF_MEM, BPF_B),
-               DL(BPF_STX, BPF_MEM, BPF_H),
-               DL(BPF_STX, BPF_MEM, BPF_W),
-               DL(BPF_STX, BPF_MEM, BPF_DW),
-               DL(BPF_STX, BPF_XADD, BPF_W),
-               DL(BPF_STX, BPF_XADD, BPF_DW),
-               DL(BPF_ST, BPF_MEM, BPF_B),
-               DL(BPF_ST, BPF_MEM, BPF_H),
-               DL(BPF_ST, BPF_MEM, BPF_W),
-               DL(BPF_ST, BPF_MEM, BPF_DW),
-               DL(BPF_LDX, BPF_MEM, BPF_B),
-               DL(BPF_LDX, BPF_MEM, BPF_H),
-               DL(BPF_LDX, BPF_MEM, BPF_W),
-               DL(BPF_LDX, BPF_MEM, BPF_DW),
-               DL(BPF_LD, BPF_ABS, BPF_W),
-               DL(BPF_LD, BPF_ABS, BPF_H),
-               DL(BPF_LD, BPF_ABS, BPF_B),
-               DL(BPF_LD, BPF_IND, BPF_W),
-               DL(BPF_LD, BPF_IND, BPF_H),
-               DL(BPF_LD, BPF_IND, BPF_B),
-#undef DL
+               /* 32 bit ALU operations */
+               [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
+               [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
+               [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
+               [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
+               [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
+               [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
+               [BPF_ALU | BPF_OR | BPF_X]  = &&ALU_OR_X,
+               [BPF_ALU | BPF_OR | BPF_K]  = &&ALU_OR_K,
+               [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
+               [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
+               [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
+               [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
+               [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
+               [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
+               [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
+               [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
+               [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
+               [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
+               [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
+               [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
+               [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
+               [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
+               [BPF_ALU | BPF_NEG] = &&ALU_NEG,
+               [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
+               [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
+               /* 64 bit ALU operations */
+               [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
+               [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
+               [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
+               [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
+               [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
+               [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
+               [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
+               [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
+               [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
+               [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
+               [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
+               [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
+               [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
+               [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
+               [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
+               [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
+               [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
+               [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
+               [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
+               [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
+               [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
+               [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
+               [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
+               [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
+               [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
+               /* Call instruction */
+               [BPF_JMP | BPF_CALL] = &&JMP_CALL,
+               /* Jumps */
+               [BPF_JMP | BPF_JA] = &&JMP_JA,
+               [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
+               [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
+               [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
+               [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
+               [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
+               [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
+               [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
+               [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
+               [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
+               [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
+               [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
+               [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
+               [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
+               [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
+               /* Program return */
+               [BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
+               /* Store instructions */
+               [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
+               [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
+               [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
+               [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
+               [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
+               [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
+               [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
+               [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
+               [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
+               [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
+               /* Load instructions */
+               [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
+               [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
+               [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
+               [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
+               [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
+               [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
+               [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
+               [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
+               [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
+               [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
        };
+       void *ptr;
+       int off;
+
+#define CONT    ({ insn++; goto select_insn; })
+#define CONT_JMP ({ insn++; goto select_insn; })
 
-       regs[FP_REG]  = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
-       regs[ARG1_REG] = (u64) (unsigned long) ctx;
-       regs[A_REG] = 0;
-       regs[X_REG] = 0;
+       FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
+       ARG1 = (u64) (unsigned long) ctx;
+
+       /* Register for user BPF programs need to be reset first. */
+       regs[BPF_REG_A] = 0;
+       regs[BPF_REG_X] = 0;
 
 select_insn:
        goto *jumptable[insn->code];
 
        /* ALU */
 #define ALU(OPCODE, OP)                        \
-       BPF_ALU64_##OPCODE##_BPF_X:     \
+       ALU64_##OPCODE##_X:             \
                A = A OP X;             \
                CONT;                   \
-       BPF_ALU_##OPCODE##_BPF_X:       \
+       ALU_##OPCODE##_X:               \
                A = (u32) A OP (u32) X; \
                CONT;                   \
-       BPF_ALU64_##OPCODE##_BPF_K:     \
+       ALU64_##OPCODE##_K:             \
                A = A OP K;             \
                CONT;                   \
-       BPF_ALU_##OPCODE##_BPF_K:       \
+       ALU_##OPCODE##_K:               \
                A = (u32) A OP (u32) K; \
                CONT;
 
-       ALU(BPF_ADD,  +)
-       ALU(BPF_SUB,  -)
-       ALU(BPF_AND,  &)
-       ALU(BPF_OR,   |)
-       ALU(BPF_LSH, <<)
-       ALU(BPF_RSH, >>)
-       ALU(BPF_XOR,  ^)
-       ALU(BPF_MUL,  *)
+       ALU(ADD,  +)
+       ALU(SUB,  -)
+       ALU(AND,  &)
+       ALU(OR,   |)
+       ALU(LSH, <<)
+       ALU(RSH, >>)
+       ALU(XOR,  ^)
+       ALU(MUL,  *)
 #undef ALU
-       BPF_ALU_BPF_NEG_0:
+       ALU_NEG:
                A = (u32) -A;
                CONT;
-       BPF_ALU64_BPF_NEG_0:
+       ALU64_NEG:
                A = -A;
                CONT;
-       BPF_ALU_BPF_MOV_BPF_X:
+       ALU_MOV_X:
                A = (u32) X;
                CONT;
-       BPF_ALU_BPF_MOV_BPF_K:
+       ALU_MOV_K:
                A = (u32) K;
                CONT;
-       BPF_ALU64_BPF_MOV_BPF_X:
+       ALU64_MOV_X:
                A = X;
                CONT;
-       BPF_ALU64_BPF_MOV_BPF_K:
+       ALU64_MOV_K:
                A = K;
                CONT;
-       BPF_ALU64_BPF_ARSH_BPF_X:
+       ALU64_ARSH_X:
                (*(s64 *) &A) >>= X;
                CONT;
-       BPF_ALU64_BPF_ARSH_BPF_K:
+       ALU64_ARSH_K:
                (*(s64 *) &A) >>= K;
                CONT;
-       BPF_ALU64_BPF_MOD_BPF_X:
+       ALU64_MOD_X:
                if (unlikely(X == 0))
                        return 0;
                tmp = A;
                A = do_div(tmp, X);
                CONT;
-       BPF_ALU_BPF_MOD_BPF_X:
+       ALU_MOD_X:
                if (unlikely(X == 0))
                        return 0;
                tmp = (u32) A;
                A = do_div(tmp, (u32) X);
                CONT;
-       BPF_ALU64_BPF_MOD_BPF_K:
+       ALU64_MOD_K:
                tmp = A;
                A = do_div(tmp, K);
                CONT;
-       BPF_ALU_BPF_MOD_BPF_K:
+       ALU_MOD_K:
                tmp = (u32) A;
                A = do_div(tmp, (u32) K);
                CONT;
-       BPF_ALU64_BPF_DIV_BPF_X:
+       ALU64_DIV_X:
                if (unlikely(X == 0))
                        return 0;
                do_div(A, X);
                CONT;
-       BPF_ALU_BPF_DIV_BPF_X:
+       ALU_DIV_X:
                if (unlikely(X == 0))
                        return 0;
                tmp = (u32) A;
                do_div(tmp, (u32) X);
                A = (u32) tmp;
                CONT;
-       BPF_ALU64_BPF_DIV_BPF_K:
+       ALU64_DIV_K:
                do_div(A, K);
                CONT;
-       BPF_ALU_BPF_DIV_BPF_K:
+       ALU_DIV_K:
                tmp = (u32) A;
                do_div(tmp, (u32) K);
                A = (u32) tmp;
                CONT;
-       BPF_ALU_BPF_END_BPF_TO_BE:
+       ALU_END_TO_BE:
                switch (K) {
                case 16:
                        A = (__force u16) cpu_to_be16(A);
@@ -356,7 +372,7 @@ select_insn:
                        break;
                }
                CONT;
-       BPF_ALU_BPF_END_BPF_TO_LE:
+       ALU_END_TO_LE:
                switch (K) {
                case 16:
                        A = (__force u16) cpu_to_le16(A);
@@ -371,142 +387,144 @@ select_insn:
                CONT;
 
        /* CALL */
-       BPF_JMP_BPF_CALL_0:
-               /* Function call scratches R1-R5 registers, preserves R6-R9,
-                * and stores return value into R0.
+       JMP_CALL:
+               /* Function call scratches BPF_R1-BPF_R5 registers,
+                * preserves BPF_R6-BPF_R9, and stores return value
+                * into BPF_R0.
                 */
-               R0 = (__bpf_call_base + insn->imm)(regs[1], regs[2], regs[3],
-                                                  regs[4], regs[5]);
+               BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
+                                                      BPF_R4, BPF_R5);
                CONT;
 
        /* JMP */
-       BPF_JMP_BPF_JA_0:
+       JMP_JA:
                insn += insn->off;
                CONT;
-       BPF_JMP_BPF_JEQ_BPF_X:
+       JMP_JEQ_X:
                if (A == X) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JEQ_BPF_K:
+       JMP_JEQ_K:
                if (A == K) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JNE_BPF_X:
+       JMP_JNE_X:
                if (A != X) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JNE_BPF_K:
+       JMP_JNE_K:
                if (A != K) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JGT_BPF_X:
+       JMP_JGT_X:
                if (A > X) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JGT_BPF_K:
+       JMP_JGT_K:
                if (A > K) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JGE_BPF_X:
+       JMP_JGE_X:
                if (A >= X) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JGE_BPF_K:
+       JMP_JGE_K:
                if (A >= K) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JSGT_BPF_X:
-               if (((s64)A) > ((s64)X)) {
+       JMP_JSGT_X:
+               if (((s64) A) > ((s64) X)) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JSGT_BPF_K:
-               if (((s64)A) > ((s64)K)) {
+       JMP_JSGT_K:
+               if (((s64) A) > ((s64) K)) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JSGE_BPF_X:
-               if (((s64)A) >= ((s64)X)) {
+       JMP_JSGE_X:
+               if (((s64) A) >= ((s64) X)) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JSGE_BPF_K:
-               if (((s64)A) >= ((s64)K)) {
+       JMP_JSGE_K:
+               if (((s64) A) >= ((s64) K)) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JSET_BPF_X:
+       JMP_JSET_X:
                if (A & X) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JSET_BPF_K:
+       JMP_JSET_K:
                if (A & K) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_EXIT_0:
-               return R0;
+       JMP_EXIT:
+               return BPF_R0;
 
        /* STX and ST and LDX*/
 #define LDST(SIZEOP, SIZE)                                     \
-       BPF_STX_BPF_MEM_##SIZEOP:                               \
+       STX_MEM_##SIZEOP:                                       \
                *(SIZE *)(unsigned long) (A + insn->off) = X;   \
                CONT;                                           \
-       BPF_ST_BPF_MEM_##SIZEOP:                                \
+       ST_MEM_##SIZEOP:                                        \
                *(SIZE *)(unsigned long) (A + insn->off) = K;   \
                CONT;                                           \
-       BPF_LDX_BPF_MEM_##SIZEOP:                               \
+       LDX_MEM_##SIZEOP:                                       \
                A = *(SIZE *)(unsigned long) (X + insn->off);   \
                CONT;
 
-       LDST(BPF_B,   u8)
-       LDST(BPF_H,  u16)
-       LDST(BPF_W,  u32)
-       LDST(BPF_DW, u64)
+       LDST(B,   u8)
+       LDST(H,  u16)
+       LDST(W,  u32)
+       LDST(DW, u64)
 #undef LDST
-       BPF_STX_BPF_XADD_BPF_W: /* lock xadd *(u32 *)(A + insn->off) += X */
+       STX_XADD_W: /* lock xadd *(u32 *)(A + insn->off) += X */
                atomic_add((u32) X, (atomic_t *)(unsigned long)
                           (A + insn->off));
                CONT;
-       BPF_STX_BPF_XADD_BPF_DW: /* lock xadd *(u64 *)(A + insn->off) += X */
+       STX_XADD_DW: /* lock xadd *(u64 *)(A + insn->off) += X */
                atomic64_add((u64) X, (atomic64_t *)(unsigned long)
                             (A + insn->off));
                CONT;
-       BPF_LD_BPF_ABS_BPF_W: /* R0 = ntohl(*(u32 *) (skb->data + K)) */
+       LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + K)) */
                off = K;
 load_word:
-               /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only
-                * appearing in the programs where ctx == skb. All programs
-                * keep 'ctx' in regs[CTX_REG] == R6, sk_convert_filter()
-                * saves it in R6, internal BPF verifier will check that
-                * R6 == ctx.
+               /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
+                * only appearing in the programs where ctx ==
+                * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
+                * == BPF_R6, sk_convert_filter() saves it in BPF_R6,
+                * internal BPF verifier will check that BPF_R6 ==
+                * ctx.
                 *
-                * BPF_ABS and BPF_IND are wrappers of function calls, so
-                * they scratch R1-R5 registers, preserve R6-R9, and store
-                * return value into R0.
+                * BPF_ABS and BPF_IND are wrappers of function calls,
+                * so they scratch BPF_R1-BPF_R5 registers, preserve
+                * BPF_R6-BPF_R9, and store return value into BPF_R0.
                 *
                 * Implicit input:
                 *   ctx
@@ -516,39 +534,43 @@ load_word:
                 *   K == 32-bit immediate
                 *
                 * Output:
-                *   R0 - 8/16/32-bit skb data converted to cpu endianness
+                *   BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
                 */
+
                ptr = load_pointer((struct sk_buff *) ctx, off, 4, &tmp);
                if (likely(ptr != NULL)) {
-                       R0 = get_unaligned_be32(ptr);
+                       BPF_R0 = get_unaligned_be32(ptr);
                        CONT;
                }
+
                return 0;
-       BPF_LD_BPF_ABS_BPF_H: /* R0 = ntohs(*(u16 *) (skb->data + K)) */
+       LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + K)) */
                off = K;
 load_half:
                ptr = load_pointer((struct sk_buff *) ctx, off, 2, &tmp);
                if (likely(ptr != NULL)) {
-                       R0 = get_unaligned_be16(ptr);
+                       BPF_R0 = get_unaligned_be16(ptr);
                        CONT;
                }
+
                return 0;
-       BPF_LD_BPF_ABS_BPF_B: /* R0 = *(u8 *) (ctx + K) */
+       LD_ABS_B: /* BPF_R0 = *(u8 *) (ctx + K) */
                off = K;
 load_byte:
                ptr = load_pointer((struct sk_buff *) ctx, off, 1, &tmp);
                if (likely(ptr != NULL)) {
-                       R0 = *(u8 *)ptr;
+                       BPF_R0 = *(u8 *)ptr;
                        CONT;
                }
+
                return 0;
-       BPF_LD_BPF_IND_BPF_W: /* R0 = ntohl(*(u32 *) (skb->data + X + K)) */
+       LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + X + K)) */
                off = K + X;
                goto load_word;
-       BPF_LD_BPF_IND_BPF_H: /* R0 = ntohs(*(u16 *) (skb->data + X + K)) */
+       LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + X + K)) */
                off = K + X;
                goto load_half;
-       BPF_LD_BPF_IND_BPF_B: /* R0 = *(u8 *) (skb->data + X + K) */
+       LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + X + K) */
                off = K + X;
                goto load_byte;
 
@@ -556,24 +578,8 @@ load_byte:
                /* If we ever reach this, we have a bug somewhere. */
                WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
                return 0;
-#undef CONT_JMP
-#undef CONT
-
-#undef R0
-#undef X
-#undef A
-#undef K
 }
 
-u32 sk_run_filter_int_seccomp(const struct seccomp_data *ctx,
-                             const struct sock_filter_int *insni)
-    __attribute__ ((alias ("__sk_run_filter")));
-
-u32 sk_run_filter_int_skb(const struct sk_buff *ctx,
-                         const struct sock_filter_int *insni)
-    __attribute__ ((alias ("__sk_run_filter")));
-EXPORT_SYMBOL_GPL(sk_run_filter_int_skb);
-
 /* Helper to find the offset of pkt_type in sk_buff structure. We want
  * to make sure its still a 3bit field starting at a byte boundary;
  * taken from arch/x86/net/bpf_jit_comp.c.
@@ -598,16 +604,14 @@ static unsigned int pkt_type_offset(void)
        return -1;
 }
 
-static u64 __skb_get_pay_offset(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
+static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
 {
-       struct sk_buff *skb = (struct sk_buff *)(long) ctx;
-
-       return __skb_get_poff(skb);
+       return __skb_get_poff((struct sk_buff *)(unsigned long) ctx);
 }
 
-static u64 __skb_get_nlattr(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
+static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
 {
-       struct sk_buff *skb = (struct sk_buff *)(long) ctx;
+       struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
        struct nlattr *nla;
 
        if (skb_is_nonlinear(skb))
@@ -616,19 +620,19 @@ static u64 __skb_get_nlattr(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
        if (skb->len < sizeof(struct nlattr))
                return 0;
 
-       if (A > skb->len - sizeof(struct nlattr))
+       if (a > skb->len - sizeof(struct nlattr))
                return 0;
 
-       nla = nla_find((struct nlattr *) &skb->data[A], skb->len - A, X);
+       nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
        if (nla)
                return (void *) nla - (void *) skb->data;
 
        return 0;
 }
 
-static u64 __skb_get_nlattr_nest(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
+static u64 __skb_get_nlattr_nest(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
 {
-       struct sk_buff *skb = (struct sk_buff *)(long) ctx;
+       struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
        struct nlattr *nla;
 
        if (skb_is_nonlinear(skb))
@@ -637,25 +641,31 @@ static u64 __skb_get_nlattr_nest(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
        if (skb->len < sizeof(struct nlattr))
                return 0;
 
-       if (A > skb->len - sizeof(struct nlattr))
+       if (a > skb->len - sizeof(struct nlattr))
                return 0;
 
-       nla = (struct nlattr *) &skb->data[A];
-       if (nla->nla_len > skb->len - A)
+       nla = (struct nlattr *) &skb->data[a];
+       if (nla->nla_len > skb->len - a)
                return 0;
 
-       nla = nla_find_nested(nla, X);
+       nla = nla_find_nested(nla, x);
        if (nla)
                return (void *) nla - (void *) skb->data;
 
        return 0;
 }
 
-static u64 __get_raw_cpu_id(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
+static u64 __get_raw_cpu_id(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
 {
        return raw_smp_processor_id();
 }
 
+/* note that this only generates 32-bit random numbers */
+static u64 __get_random_u32(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
+{
+       return prandom_u32();
+}
+
 static bool convert_bpf_extensions(struct sock_filter *fp,
                                   struct sock_filter_int **insnp)
 {
@@ -665,126 +675,83 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
        case SKF_AD_OFF + SKF_AD_PROTOCOL:
                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
 
-               insn->code = BPF_LDX | BPF_MEM | BPF_H;
-               insn->a_reg = A_REG;
-               insn->x_reg = CTX_REG;
-               insn->off = offsetof(struct sk_buff, protocol);
-               insn++;
-
+               /* A = *(u16 *) (ctx + offsetof(protocol)) */
+               *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
+                                     offsetof(struct sk_buff, protocol));
                /* A = ntohs(A) [emitting a nop or swap16] */
-               insn->code = BPF_ALU | BPF_END | BPF_FROM_BE;
-               insn->a_reg = A_REG;
-               insn->imm = 16;
+               *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
                break;
 
        case SKF_AD_OFF + SKF_AD_PKTTYPE:
-               insn->code = BPF_LDX | BPF_MEM | BPF_B;
-               insn->a_reg = A_REG;
-               insn->x_reg = CTX_REG;
-               insn->off = pkt_type_offset();
+               *insn = BPF_LDX_MEM(BPF_B, BPF_REG_A, BPF_REG_CTX,
+                                   pkt_type_offset());
                if (insn->off < 0)
                        return false;
                insn++;
-
-               insn->code = BPF_ALU | BPF_AND | BPF_K;
-               insn->a_reg = A_REG;
-               insn->imm = PKT_TYPE_MAX;
+               *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, PKT_TYPE_MAX);
 #ifdef __BIG_ENDIAN_BITFIELD
                insn++;
-
-               insn->code = BPF_ALU | BPF_RSH | BPF_K;
-               insn->a_reg = A_REG;
-               insn->imm = 5;
+                *insn = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 5);
 #endif
                break;
 
        case SKF_AD_OFF + SKF_AD_IFINDEX:
        case SKF_AD_OFF + SKF_AD_HATYPE:
-               if (FIELD_SIZEOF(struct sk_buff, dev) == 8)
-                       insn->code = BPF_LDX | BPF_MEM | BPF_DW;
-               else
-                       insn->code = BPF_LDX | BPF_MEM | BPF_W;
-               insn->a_reg = TMP_REG;
-               insn->x_reg = CTX_REG;
-               insn->off = offsetof(struct sk_buff, dev);
-               insn++;
-
-               insn->code = BPF_JMP | BPF_JNE | BPF_K;
-               insn->a_reg = TMP_REG;
-               insn->imm = 0;
-               insn->off = 1;
-               insn++;
-
-               insn->code = BPF_JMP | BPF_EXIT;
-               insn++;
-
                BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
                BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
-
-               insn->a_reg = A_REG;
-               insn->x_reg = TMP_REG;
-
-               if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX) {
-                       insn->code = BPF_LDX | BPF_MEM | BPF_W;
-                       insn->off = offsetof(struct net_device, ifindex);
-               } else {
-                       insn->code = BPF_LDX | BPF_MEM | BPF_H;
-                       insn->off = offsetof(struct net_device, type);
-               }
+               BUILD_BUG_ON(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)) < 0);
+
+               *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)),
+                                     BPF_REG_TMP, BPF_REG_CTX,
+                                     offsetof(struct sk_buff, dev));
+               /* if (tmp != 0) goto pc + 1 */
+               *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
+               *insn++ = BPF_EXIT_INSN();
+               if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX)
+                       *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP,
+                                           offsetof(struct net_device, ifindex));
+               else
+                       *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP,
+                                           offsetof(struct net_device, type));
                break;
 
        case SKF_AD_OFF + SKF_AD_MARK:
                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
 
-               insn->code = BPF_LDX | BPF_MEM | BPF_W;
-               insn->a_reg = A_REG;
-               insn->x_reg = CTX_REG;
-               insn->off = offsetof(struct sk_buff, mark);
+               *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
+                                   offsetof(struct sk_buff, mark));
                break;
 
        case SKF_AD_OFF + SKF_AD_RXHASH:
                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
 
-               insn->code = BPF_LDX | BPF_MEM | BPF_W;
-               insn->a_reg = A_REG;
-               insn->x_reg = CTX_REG;
-               insn->off = offsetof(struct sk_buff, hash);
+               *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
+                                   offsetof(struct sk_buff, hash));
                break;
 
        case SKF_AD_OFF + SKF_AD_QUEUE:
                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
 
-               insn->code = BPF_LDX | BPF_MEM | BPF_H;
-               insn->a_reg = A_REG;
-               insn->x_reg = CTX_REG;
-               insn->off = offsetof(struct sk_buff, queue_mapping);
+               *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
+                                   offsetof(struct sk_buff, queue_mapping));
                break;
 
        case SKF_AD_OFF + SKF_AD_VLAN_TAG:
        case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
-
-               insn->code = BPF_LDX | BPF_MEM | BPF_H;
-               insn->a_reg = A_REG;
-               insn->x_reg = CTX_REG;
-               insn->off = offsetof(struct sk_buff, vlan_tci);
-               insn++;
-
                BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
 
+               /* A = *(u16 *) (ctx + offsetof(vlan_tci)) */
+               *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
+                                     offsetof(struct sk_buff, vlan_tci));
                if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) {
-                       insn->code = BPF_ALU | BPF_AND | BPF_K;
-                       insn->a_reg = A_REG;
-                       insn->imm = ~VLAN_TAG_PRESENT;
+                       *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A,
+                                             ~VLAN_TAG_PRESENT);
                } else {
-                       insn->code = BPF_ALU | BPF_RSH | BPF_K;
-                       insn->a_reg = A_REG;
-                       insn->imm = 12;
-                       insn++;
-
-                       insn->code = BPF_ALU | BPF_AND | BPF_K;
-                       insn->a_reg = A_REG;
-                       insn->imm = 1;
+                       /* A >>= 12 */
+                       *insn++ = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 12);
+                       /* A &= 1 */
+                       *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 1);
                }
                break;
 
@@ -792,46 +759,36 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
        case SKF_AD_OFF + SKF_AD_NLATTR:
        case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
        case SKF_AD_OFF + SKF_AD_CPU:
+       case SKF_AD_OFF + SKF_AD_RANDOM:
                /* arg1 = ctx */
-               insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
-               insn->a_reg = ARG1_REG;
-               insn->x_reg = CTX_REG;
-               insn++;
-
+               *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
                /* arg2 = A */
-               insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
-               insn->a_reg = ARG2_REG;
-               insn->x_reg = A_REG;
-               insn++;
-
+               *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A);
                /* arg3 = X */
-               insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
-               insn->a_reg = ARG3_REG;
-               insn->x_reg = X_REG;
-               insn++;
-
+               *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X);
                /* Emit call(ctx, arg2=A, arg3=X) */
-               insn->code = BPF_JMP | BPF_CALL;
                switch (fp->k) {
                case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
-                       insn->imm = __skb_get_pay_offset - __bpf_call_base;
+                       *insn = BPF_EMIT_CALL(__skb_get_pay_offset);
                        break;
                case SKF_AD_OFF + SKF_AD_NLATTR:
-                       insn->imm = __skb_get_nlattr - __bpf_call_base;
+                       *insn = BPF_EMIT_CALL(__skb_get_nlattr);
                        break;
                case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
-                       insn->imm = __skb_get_nlattr_nest - __bpf_call_base;
+                       *insn = BPF_EMIT_CALL(__skb_get_nlattr_nest);
                        break;
                case SKF_AD_OFF + SKF_AD_CPU:
-                       insn->imm = __get_raw_cpu_id - __bpf_call_base;
+                       *insn = BPF_EMIT_CALL(__get_raw_cpu_id);
+                       break;
+               case SKF_AD_OFF + SKF_AD_RANDOM:
+                       *insn = BPF_EMIT_CALL(__get_random_u32);
                        break;
                }
                break;
 
        case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
-               insn->code = BPF_ALU | BPF_XOR | BPF_X;
-               insn->a_reg = A_REG;
-               insn->x_reg = X_REG;
+               /* A ^= X */
+               *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X);
                break;
 
        default:
@@ -881,7 +838,7 @@ int sk_convert_filter(struct sock_filter *prog, int len,
        u8 bpf_src;
 
        BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
-       BUILD_BUG_ON(FP_REG + 1 != MAX_BPF_REG);
+       BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
 
        if (len <= 0 || len >= BPF_MAXINSNS)
                return -EINVAL;
@@ -896,11 +853,8 @@ do_pass:
        new_insn = new_prog;
        fp = prog;
 
-       if (new_insn) {
-               new_insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
-               new_insn->a_reg = CTX_REG;
-               new_insn->x_reg = ARG1_REG;
-       }
+       if (new_insn)
+               *new_insn = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
        new_insn++;
 
        for (i = 0; i < len; fp++, i++) {
@@ -948,17 +902,16 @@ do_pass:
                            convert_bpf_extensions(fp, &insn))
                                break;
 
-                       insn->code = fp->code;
-                       insn->a_reg = A_REG;
-                       insn->x_reg = X_REG;
-                       insn->imm = fp->k;
+                       *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
                        break;
 
-               /* Jump opcodes map as-is, but offsets need adjustment. */
-               case BPF_JMP | BPF_JA:
-                       target = i + fp->k + 1;
-                       insn->code = fp->code;
-#define EMIT_JMP                                                       \
+               /* Jump transformation cannot use BPF block macros
+                * everywhere as offset calculation and target updates
+                * require a bit more work than the rest, i.e. jump
+                * opcodes map as-is, but offsets need adjustment.
+                */
+
+#define BPF_EMIT_JMP                                                   \
        do {                                                            \
                if (target >= len || target < 0)                        \
                        goto err;                                       \
@@ -967,7 +920,10 @@ do_pass:
                insn->off -= insn - tmp_insns;                          \
        } while (0)
 
-                       EMIT_JMP;
+               case BPF_JMP | BPF_JA:
+                       target = i + fp->k + 1;
+                       insn->code = fp->code;
+                       BPF_EMIT_JMP;
                        break;
 
                case BPF_JMP | BPF_JEQ | BPF_K:
@@ -983,17 +939,14 @@ do_pass:
                                 * immediate into tmp register and use it
                                 * in compare insn.
                                 */
-                               insn->code = BPF_ALU | BPF_MOV | BPF_K;
-                               insn->a_reg = TMP_REG;
-                               insn->imm = fp->k;
-                               insn++;
+                               *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
 
-                               insn->a_reg = A_REG;
-                               insn->x_reg = TMP_REG;
+                               insn->a_reg = BPF_REG_A;
+                               insn->x_reg = BPF_REG_TMP;
                                bpf_src = BPF_X;
                        } else {
-                               insn->a_reg = A_REG;
-                               insn->x_reg = X_REG;
+                               insn->a_reg = BPF_REG_A;
+                               insn->x_reg = BPF_REG_X;
                                insn->imm = fp->k;
                                bpf_src = BPF_SRC(fp->code);
                        }
@@ -1002,7 +955,7 @@ do_pass:
                        if (fp->jf == 0) {
                                insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
                                target = i + fp->jt + 1;
-                               EMIT_JMP;
+                               BPF_EMIT_JMP;
                                break;
                        }
 
@@ -1010,127 +963,94 @@ do_pass:
                        if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) {
                                insn->code = BPF_JMP | BPF_JNE | bpf_src;
                                target = i + fp->jf + 1;
-                               EMIT_JMP;
+                               BPF_EMIT_JMP;
                                break;
                        }
 
                        /* Other jumps are mapped into two insns: Jxx and JA. */
                        target = i + fp->jt + 1;
                        insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
-                       EMIT_JMP;
+                       BPF_EMIT_JMP;
                        insn++;
 
                        insn->code = BPF_JMP | BPF_JA;
                        target = i + fp->jf + 1;
-                       EMIT_JMP;
+                       BPF_EMIT_JMP;
                        break;
 
                /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
                case BPF_LDX | BPF_MSH | BPF_B:
-                       insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
-                       insn->a_reg = TMP_REG;
-                       insn->x_reg = A_REG;
-                       insn++;
-
-                       insn->code = BPF_LD | BPF_ABS | BPF_B;
-                       insn->a_reg = A_REG;
-                       insn->imm = fp->k;
-                       insn++;
-
-                       insn->code = BPF_ALU | BPF_AND | BPF_K;
-                       insn->a_reg = A_REG;
-                       insn->imm = 0xf;
-                       insn++;
-
-                       insn->code = BPF_ALU | BPF_LSH | BPF_K;
-                       insn->a_reg = A_REG;
-                       insn->imm = 2;
-                       insn++;
-
-                       insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
-                       insn->a_reg = X_REG;
-                       insn->x_reg = A_REG;
-                       insn++;
-
-                       insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
-                       insn->a_reg = A_REG;
-                       insn->x_reg = TMP_REG;
+                       /* tmp = A */
+                       *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_A);
+                       /* A = BPF_R0 = *(u8 *) (skb->data + K) */
+                       *insn++ = BPF_LD_ABS(BPF_B, fp->k);
+                       /* A &= 0xf */
+                       *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
+                       /* A <<= 2 */
+                       *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
+                       /* X = A */
+                       *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
+                       /* A = tmp */
+                       *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
                        break;
 
                /* RET_K, RET_A are remaped into 2 insns. */
                case BPF_RET | BPF_A:
                case BPF_RET | BPF_K:
-                       insn->code = BPF_ALU | BPF_MOV |
-                                    (BPF_RVAL(fp->code) == BPF_K ?
-                                     BPF_K : BPF_X);
-                       insn->a_reg = 0;
-                       insn->x_reg = A_REG;
-                       insn->imm = fp->k;
-                       insn++;
-
-                       insn->code = BPF_JMP | BPF_EXIT;
+                       *insn++ = BPF_MOV32_RAW(BPF_RVAL(fp->code) == BPF_K ?
+                                               BPF_K : BPF_X, BPF_REG_0,
+                                               BPF_REG_A, fp->k);
+                       *insn = BPF_EXIT_INSN();
                        break;
 
                /* Store to stack. */
                case BPF_ST:
                case BPF_STX:
-                       insn->code = BPF_STX | BPF_MEM | BPF_W;
-                       insn->a_reg = FP_REG;
-                       insn->x_reg = fp->code == BPF_ST ? A_REG : X_REG;
-                       insn->off = -(BPF_MEMWORDS - fp->k) * 4;
+                       *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
+                                           BPF_ST ? BPF_REG_A : BPF_REG_X,
+                                           -(BPF_MEMWORDS - fp->k) * 4);
                        break;
 
                /* Load from stack. */
                case BPF_LD | BPF_MEM:
                case BPF_LDX | BPF_MEM:
-                       insn->code = BPF_LDX | BPF_MEM | BPF_W;
-                       insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ?
-                                     A_REG : X_REG;
-                       insn->x_reg = FP_REG;
-                       insn->off = -(BPF_MEMWORDS - fp->k) * 4;
+                       *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD  ?
+                                           BPF_REG_A : BPF_REG_X, BPF_REG_FP,
+                                           -(BPF_MEMWORDS - fp->k) * 4);
                        break;
 
                /* A = K or X = K */
                case BPF_LD | BPF_IMM:
                case BPF_LDX | BPF_IMM:
-                       insn->code = BPF_ALU | BPF_MOV | BPF_K;
-                       insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ?
-                                     A_REG : X_REG;
-                       insn->imm = fp->k;
+                       *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ?
+                                             BPF_REG_A : BPF_REG_X, fp->k);
                        break;
 
                /* X = A */
                case BPF_MISC | BPF_TAX:
-                       insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
-                       insn->a_reg = X_REG;
-                       insn->x_reg = A_REG;
+                       *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
                        break;
 
                /* A = X */
                case BPF_MISC | BPF_TXA:
-                       insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
-                       insn->a_reg = A_REG;
-                       insn->x_reg = X_REG;
+                       *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X);
                        break;
 
                /* A = skb->len or X = skb->len */
                case BPF_LD | BPF_W | BPF_LEN:
                case BPF_LDX | BPF_W | BPF_LEN:
-                       insn->code = BPF_LDX | BPF_MEM | BPF_W;
-                       insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ?
-                                     A_REG : X_REG;
-                       insn->x_reg = CTX_REG;
-                       insn->off = offsetof(struct sk_buff, len);
+                       *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
+                                           BPF_REG_A : BPF_REG_X, BPF_REG_CTX,
+                                           offsetof(struct sk_buff, len));
                        break;
 
-               /* access seccomp_data fields */
+               /* Access seccomp_data fields. */
                case BPF_LDX | BPF_ABS | BPF_W:
-                       insn->code = BPF_LDX | BPF_MEM | BPF_W;
-                       insn->a_reg = A_REG;
-                       insn->x_reg = CTX_REG;
-                       insn->off = fp->k;
+                       /* A = *(u32 *) (ctx + K) */
+                       *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
                        break;
 
+               /* Unkown instruction. */
                default:
                        goto err;
                }
@@ -1139,7 +1059,6 @@ do_pass:
                if (new_prog)
                        memcpy(new_insn, tmp_insns,
                               sizeof(*insn) * (insn - tmp_insns));
-
                new_insn += insn - tmp_insns;
        }
 
@@ -1154,7 +1073,6 @@ do_pass:
                new_flen = new_insn - new_prog;
                if (pass > 2)
                        goto err;
-
                goto do_pass;
        }
 
@@ -1178,44 +1096,46 @@ err:
  */
 static int check_load_and_stores(struct sock_filter *filter, int flen)
 {
-       u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
+       u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
        int pc, ret = 0;
 
        BUILD_BUG_ON(BPF_MEMWORDS > 16);
+
        masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
        if (!masks)
                return -ENOMEM;
+
        memset(masks, 0xff, flen * sizeof(*masks));
 
        for (pc = 0; pc < flen; pc++) {
                memvalid &= masks[pc];
 
                switch (filter[pc].code) {
-               case BPF_S_ST:
-               case BPF_S_STX:
+               case BPF_ST:
+               case BPF_STX:
                        memvalid |= (1 << filter[pc].k);
                        break;
-               case BPF_S_LD_MEM:
-               case BPF_S_LDX_MEM:
+               case BPF_LD | BPF_MEM:
+               case BPF_LDX | BPF_MEM:
                        if (!(memvalid & (1 << filter[pc].k))) {
                                ret = -EINVAL;
                                goto error;
                        }
                        break;
-               case BPF_S_JMP_JA:
-                       /* a jump must set masks on target */
+               case BPF_JMP | BPF_JA:
+                       /* A jump must set masks on target */
                        masks[pc + 1 + filter[pc].k] &= memvalid;
                        memvalid = ~0;
                        break;
-               case BPF_S_JMP_JEQ_K:
-               case BPF_S_JMP_JEQ_X:
-               case BPF_S_JMP_JGE_K:
-               case BPF_S_JMP_JGE_X:
-               case BPF_S_JMP_JGT_K:
-               case BPF_S_JMP_JGT_X:
-               case BPF_S_JMP_JSET_X:
-               case BPF_S_JMP_JSET_K:
-                       /* a jump must set masks on targets */
+               case BPF_JMP | BPF_JEQ | BPF_K:
+               case BPF_JMP | BPF_JEQ | BPF_X:
+               case BPF_JMP | BPF_JGE | BPF_K:
+               case BPF_JMP | BPF_JGE | BPF_X:
+               case BPF_JMP | BPF_JGT | BPF_K:
+               case BPF_JMP | BPF_JGT | BPF_X:
+               case BPF_JMP | BPF_JSET | BPF_K:
+               case BPF_JMP | BPF_JSET | BPF_X:
+                       /* A jump must set masks on targets */
                        masks[pc + 1 + filter[pc].jt] &= memvalid;
                        masks[pc + 1 + filter[pc].jf] &= memvalid;
                        memvalid = ~0;
@@ -1227,6 +1147,72 @@ error:
        return ret;
 }
 
+static bool chk_code_allowed(u16 code_to_probe)
+{
+       static const bool codes[] = {
+               /* 32 bit ALU operations */
+               [BPF_ALU | BPF_ADD | BPF_K] = true,
+               [BPF_ALU | BPF_ADD | BPF_X] = true,
+               [BPF_ALU | BPF_SUB | BPF_K] = true,
+               [BPF_ALU | BPF_SUB | BPF_X] = true,
+               [BPF_ALU | BPF_MUL | BPF_K] = true,
+               [BPF_ALU | BPF_MUL | BPF_X] = true,
+               [BPF_ALU | BPF_DIV | BPF_K] = true,
+               [BPF_ALU | BPF_DIV | BPF_X] = true,
+               [BPF_ALU | BPF_MOD | BPF_K] = true,
+               [BPF_ALU | BPF_MOD | BPF_X] = true,
+               [BPF_ALU | BPF_AND | BPF_K] = true,
+               [BPF_ALU | BPF_AND | BPF_X] = true,
+               [BPF_ALU | BPF_OR | BPF_K] = true,
+               [BPF_ALU | BPF_OR | BPF_X] = true,
+               [BPF_ALU | BPF_XOR | BPF_K] = true,
+               [BPF_ALU | BPF_XOR | BPF_X] = true,
+               [BPF_ALU | BPF_LSH | BPF_K] = true,
+               [BPF_ALU | BPF_LSH | BPF_X] = true,
+               [BPF_ALU | BPF_RSH | BPF_K] = true,
+               [BPF_ALU | BPF_RSH | BPF_X] = true,
+               [BPF_ALU | BPF_NEG] = true,
+               /* Load instructions */
+               [BPF_LD | BPF_W | BPF_ABS] = true,
+               [BPF_LD | BPF_H | BPF_ABS] = true,
+               [BPF_LD | BPF_B | BPF_ABS] = true,
+               [BPF_LD | BPF_W | BPF_LEN] = true,
+               [BPF_LD | BPF_W | BPF_IND] = true,
+               [BPF_LD | BPF_H | BPF_IND] = true,
+               [BPF_LD | BPF_B | BPF_IND] = true,
+               [BPF_LD | BPF_IMM] = true,
+               [BPF_LD | BPF_MEM] = true,
+               [BPF_LDX | BPF_W | BPF_LEN] = true,
+               [BPF_LDX | BPF_B | BPF_MSH] = true,
+               [BPF_LDX | BPF_IMM] = true,
+               [BPF_LDX | BPF_MEM] = true,
+               /* Store instructions */
+               [BPF_ST] = true,
+               [BPF_STX] = true,
+               /* Misc instructions */
+               [BPF_MISC | BPF_TAX] = true,
+               [BPF_MISC | BPF_TXA] = true,
+               /* Return instructions */
+               [BPF_RET | BPF_K] = true,
+               [BPF_RET | BPF_A] = true,
+               /* Jump instructions */
+               [BPF_JMP | BPF_JA] = true,
+               [BPF_JMP | BPF_JEQ | BPF_K] = true,
+               [BPF_JMP | BPF_JEQ | BPF_X] = true,
+               [BPF_JMP | BPF_JGE | BPF_K] = true,
+               [BPF_JMP | BPF_JGE | BPF_X] = true,
+               [BPF_JMP | BPF_JGT | BPF_K] = true,
+               [BPF_JMP | BPF_JGT | BPF_X] = true,
+               [BPF_JMP | BPF_JSET | BPF_K] = true,
+               [BPF_JMP | BPF_JSET | BPF_X] = true,
+       };
+
+       if (code_to_probe >= ARRAY_SIZE(codes))
+               return false;
+
+       return codes[code_to_probe];
+}
+
 /**
  *     sk_chk_filter - verify socket filter code
  *     @filter: filter to verify
@@ -1243,153 +1229,76 @@ error:
  */
 int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
 {
-       /*
-        * Valid instructions are initialized to non-0.
-        * Invalid instructions are initialized to 0.
-        */
-       static const u8 codes[] = {
-               [BPF_ALU|BPF_ADD|BPF_K]  = BPF_S_ALU_ADD_K,
-               [BPF_ALU|BPF_ADD|BPF_X]  = BPF_S_ALU_ADD_X,
-               [BPF_ALU|BPF_SUB|BPF_K]  = BPF_S_ALU_SUB_K,
-               [BPF_ALU|BPF_SUB|BPF_X]  = BPF_S_ALU_SUB_X,
-               [BPF_ALU|BPF_MUL|BPF_K]  = BPF_S_ALU_MUL_K,
-               [BPF_ALU|BPF_MUL|BPF_X]  = BPF_S_ALU_MUL_X,
-               [BPF_ALU|BPF_DIV|BPF_X]  = BPF_S_ALU_DIV_X,
-               [BPF_ALU|BPF_MOD|BPF_K]  = BPF_S_ALU_MOD_K,
-               [BPF_ALU|BPF_MOD|BPF_X]  = BPF_S_ALU_MOD_X,
-               [BPF_ALU|BPF_AND|BPF_K]  = BPF_S_ALU_AND_K,
-               [BPF_ALU|BPF_AND|BPF_X]  = BPF_S_ALU_AND_X,
-               [BPF_ALU|BPF_OR|BPF_K]   = BPF_S_ALU_OR_K,
-               [BPF_ALU|BPF_OR|BPF_X]   = BPF_S_ALU_OR_X,
-               [BPF_ALU|BPF_XOR|BPF_K]  = BPF_S_ALU_XOR_K,
-               [BPF_ALU|BPF_XOR|BPF_X]  = BPF_S_ALU_XOR_X,
-               [BPF_ALU|BPF_LSH|BPF_K]  = BPF_S_ALU_LSH_K,
-               [BPF_ALU|BPF_LSH|BPF_X]  = BPF_S_ALU_LSH_X,
-               [BPF_ALU|BPF_RSH|BPF_K]  = BPF_S_ALU_RSH_K,
-               [BPF_ALU|BPF_RSH|BPF_X]  = BPF_S_ALU_RSH_X,
-               [BPF_ALU|BPF_NEG]        = BPF_S_ALU_NEG,
-               [BPF_LD|BPF_W|BPF_ABS]   = BPF_S_LD_W_ABS,
-               [BPF_LD|BPF_H|BPF_ABS]   = BPF_S_LD_H_ABS,
-               [BPF_LD|BPF_B|BPF_ABS]   = BPF_S_LD_B_ABS,
-               [BPF_LD|BPF_W|BPF_LEN]   = BPF_S_LD_W_LEN,
-               [BPF_LD|BPF_W|BPF_IND]   = BPF_S_LD_W_IND,
-               [BPF_LD|BPF_H|BPF_IND]   = BPF_S_LD_H_IND,
-               [BPF_LD|BPF_B|BPF_IND]   = BPF_S_LD_B_IND,
-               [BPF_LD|BPF_IMM]         = BPF_S_LD_IMM,
-               [BPF_LDX|BPF_W|BPF_LEN]  = BPF_S_LDX_W_LEN,
-               [BPF_LDX|BPF_B|BPF_MSH]  = BPF_S_LDX_B_MSH,
-               [BPF_LDX|BPF_IMM]        = BPF_S_LDX_IMM,
-               [BPF_MISC|BPF_TAX]       = BPF_S_MISC_TAX,
-               [BPF_MISC|BPF_TXA]       = BPF_S_MISC_TXA,
-               [BPF_RET|BPF_K]          = BPF_S_RET_K,
-               [BPF_RET|BPF_A]          = BPF_S_RET_A,
-               [BPF_ALU|BPF_DIV|BPF_K]  = BPF_S_ALU_DIV_K,
-               [BPF_LD|BPF_MEM]         = BPF_S_LD_MEM,
-               [BPF_LDX|BPF_MEM]        = BPF_S_LDX_MEM,
-               [BPF_ST]                 = BPF_S_ST,
-               [BPF_STX]                = BPF_S_STX,
-               [BPF_JMP|BPF_JA]         = BPF_S_JMP_JA,
-               [BPF_JMP|BPF_JEQ|BPF_K]  = BPF_S_JMP_JEQ_K,
-               [BPF_JMP|BPF_JEQ|BPF_X]  = BPF_S_JMP_JEQ_X,
-               [BPF_JMP|BPF_JGE|BPF_K]  = BPF_S_JMP_JGE_K,
-               [BPF_JMP|BPF_JGE|BPF_X]  = BPF_S_JMP_JGE_X,
-               [BPF_JMP|BPF_JGT|BPF_K]  = BPF_S_JMP_JGT_K,
-               [BPF_JMP|BPF_JGT|BPF_X]  = BPF_S_JMP_JGT_X,
-               [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
-               [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
-       };
-       int pc;
        bool anc_found;
+       int pc;
 
        if (flen == 0 || flen > BPF_MAXINSNS)
                return -EINVAL;
 
-       /* check the filter code now */
+       /* Check the filter code now */
        for (pc = 0; pc < flen; pc++) {
                struct sock_filter *ftest = &filter[pc];
-               u16 code = ftest->code;
 
-               if (code >= ARRAY_SIZE(codes))
-                       return -EINVAL;
-               code = codes[code];
-               if (!code)
+               /* May we actually operate on this code? */
+               if (!chk_code_allowed(ftest->code))
                        return -EINVAL;
+
                /* Some instructions need special checks */
-               switch (code) {
-               case BPF_S_ALU_DIV_K:
-               case BPF_S_ALU_MOD_K:
-                       /* check for division by zero */
+               switch (ftest->code) {
+               case BPF_ALU | BPF_DIV | BPF_K:
+               case BPF_ALU | BPF_MOD | BPF_K:
+                       /* Check for division by zero */
                        if (ftest->k == 0)
                                return -EINVAL;
                        break;
-               case BPF_S_LD_MEM:
-               case BPF_S_LDX_MEM:
-               case BPF_S_ST:
-               case BPF_S_STX:
-                       /* check for invalid memory addresses */
+               case BPF_LD | BPF_MEM:
+               case BPF_LDX | BPF_MEM:
+               case BPF_ST:
+               case BPF_STX:
+                       /* Check for invalid memory addresses */
                        if (ftest->k >= BPF_MEMWORDS)
                                return -EINVAL;
                        break;
-               case BPF_S_JMP_JA:
-                       /*
-                        * Note, the large ftest->k might cause loops.
+               case BPF_JMP | BPF_JA:
+                       /* Note, the large ftest->k might cause loops.
                         * Compare this with conditional jumps below,
                         * where offsets are limited. --ANK (981016)
                         */
-                       if (ftest->k >= (unsigned int)(flen-pc-1))
+                       if (ftest->k >= (unsigned int)(flen - pc - 1))
                                return -EINVAL;
                        break;
-               case BPF_S_JMP_JEQ_K:
-               case BPF_S_JMP_JEQ_X:
-               case BPF_S_JMP_JGE_K:
-               case BPF_S_JMP_JGE_X:
-               case BPF_S_JMP_JGT_K:
-               case BPF_S_JMP_JGT_X:
-               case BPF_S_JMP_JSET_X:
-               case BPF_S_JMP_JSET_K:
-                       /* for conditionals both must be safe */
+               case BPF_JMP | BPF_JEQ | BPF_K:
+               case BPF_JMP | BPF_JEQ | BPF_X:
+               case BPF_JMP | BPF_JGE | BPF_K:
+               case BPF_JMP | BPF_JGE | BPF_X:
+               case BPF_JMP | BPF_JGT | BPF_K:
+               case BPF_JMP | BPF_JGT | BPF_X:
+               case BPF_JMP | BPF_JSET | BPF_K:
+               case BPF_JMP | BPF_JSET | BPF_X:
+                       /* Both conditionals must be safe */
                        if (pc + ftest->jt + 1 >= flen ||
                            pc + ftest->jf + 1 >= flen)
                                return -EINVAL;
                        break;
-               case BPF_S_LD_W_ABS:
-               case BPF_S_LD_H_ABS:
-               case BPF_S_LD_B_ABS:
+               case BPF_LD | BPF_W | BPF_ABS:
+               case BPF_LD | BPF_H | BPF_ABS:
+               case BPF_LD | BPF_B | BPF_ABS:
                        anc_found = false;
-#define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE:       \
-                               code = BPF_S_ANC_##CODE;        \
-                               anc_found = true;               \
-                               break
-                       switch (ftest->k) {
-                       ANCILLARY(PROTOCOL);
-                       ANCILLARY(PKTTYPE);
-                       ANCILLARY(IFINDEX);
-                       ANCILLARY(NLATTR);
-                       ANCILLARY(NLATTR_NEST);
-                       ANCILLARY(MARK);
-                       ANCILLARY(QUEUE);
-                       ANCILLARY(HATYPE);
-                       ANCILLARY(RXHASH);
-                       ANCILLARY(CPU);
-                       ANCILLARY(ALU_XOR_X);
-                       ANCILLARY(VLAN_TAG);
-                       ANCILLARY(VLAN_TAG_PRESENT);
-                       ANCILLARY(PAY_OFFSET);
-                       }
-
-                       /* ancillary operation unknown or unsupported */
+                       if (bpf_anc_helper(ftest) & BPF_ANC)
+                               anc_found = true;
+                       /* Ancillary operation unknown or unsupported */
                        if (anc_found == false && ftest->k >= SKF_AD_OFF)
                                return -EINVAL;
                }
-               ftest->code = code;
        }
 
-       /* last instruction must be a RET code */
+       /* Last instruction must be a RET code */
        switch (filter[flen - 1].code) {
-       case BPF_S_RET_K:
-       case BPF_S_RET_A:
+       case BPF_RET | BPF_K:
+       case BPF_RET | BPF_A:
                return check_load_and_stores(filter, flen);
        }
+
        return -EINVAL;
 }
 EXPORT_SYMBOL(sk_chk_filter);
@@ -1434,7 +1343,7 @@ static void sk_filter_release_rcu(struct rcu_head *rcu)
        struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
 
        sk_release_orig_filter(fp);
-       bpf_jit_free(fp);
+       sk_filter_free(fp);
 }
 
 /**
@@ -1472,7 +1381,7 @@ static struct sk_filter *__sk_migrate_realloc(struct sk_filter *fp,
 
        fp_new = sock_kmalloc(sk, len, GFP_KERNEL);
        if (fp_new) {
-               memcpy(fp_new, fp, sizeof(struct sk_filter));
+               *fp_new = *fp;
                /* As we're kepping orig_prog in fp_new along,
                 * we need to make sure we're not evicting it
                 * from the old fp.
@@ -1489,7 +1398,7 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
 {
        struct sock_filter *old_prog;
        struct sk_filter *old_fp;
-       int i, err, new_len, old_len = fp->len;
+       int err, new_len, old_len = fp->len;
 
        /* We are free to overwrite insns et al right here as it
         * won't be used at this point in time anymore internally
@@ -1499,13 +1408,6 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
        BUILD_BUG_ON(sizeof(struct sock_filter) !=
                     sizeof(struct sock_filter_int));
 
-       /* For now, we need to unfiddle BPF_S_* identifiers in place.
-        * This can sooner or later on be subject to removal, e.g. when
-        * JITs have been converted.
-        */
-       for (i = 0; i < fp->len; i++)
-               sk_decode_filter(&fp->insns[i], &fp->insns[i]);
-
        /* Conversion cannot happen on overlapping memory areas,
         * so we need to keep the user BPF around until the 2nd
         * pass. At this time, the user BPF is stored in fp->insns.
@@ -1534,7 +1436,6 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
                goto out_err_free;
        }
 
-       fp->bpf_func = sk_run_filter_int_skb;
        fp->len = new_len;
 
        /* 2nd pass: remap sock_filter insns into sock_filter_int insns. */
@@ -1547,6 +1448,8 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
                 */
                goto out_err_free;
 
+       sk_filter_select_runtime(fp);
+
        kfree(old_prog);
        return fp;
 
@@ -1561,6 +1464,33 @@ out_err:
        return ERR_PTR(err);
 }
 
+void __weak bpf_int_jit_compile(struct sk_filter *prog)
+{
+}
+
+/**
+ *     sk_filter_select_runtime - select execution runtime for BPF program
+ *     @fp: sk_filter populated with internal BPF program
+ *
+ * try to JIT internal BPF program, if JIT is not available select interpreter
+ * BPF program will be executed via SK_RUN_FILTER() macro
+ */
+void sk_filter_select_runtime(struct sk_filter *fp)
+{
+       fp->bpf_func = (void *) __sk_run_filter;
+
+       /* Probe if internal BPF can be JITed */
+       bpf_int_jit_compile(fp);
+}
+EXPORT_SYMBOL_GPL(sk_filter_select_runtime);
+
+/* free internal BPF program */
+void sk_filter_free(struct sk_filter *fp)
+{
+       bpf_jit_free(fp);
+}
+EXPORT_SYMBOL_GPL(sk_filter_free);
+
 static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
                                             struct sock *sk)
 {
@@ -1603,7 +1533,7 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
  * a negative errno code is returned. On success the return is zero.
  */
 int sk_unattached_filter_create(struct sk_filter **pfp,
-                               struct sock_fprog *fprog)
+                               struct sock_fprog_kern *fprog)
 {
        unsigned int fsize = sk_filter_proglen(fprog);
        struct sk_filter *fp;
@@ -1724,83 +1654,6 @@ int sk_detach_filter(struct sock *sk)
 }
 EXPORT_SYMBOL_GPL(sk_detach_filter);
 
-void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
-{
-       static const u16 decodes[] = {
-               [BPF_S_ALU_ADD_K]       = BPF_ALU|BPF_ADD|BPF_K,
-               [BPF_S_ALU_ADD_X]       = BPF_ALU|BPF_ADD|BPF_X,
-               [BPF_S_ALU_SUB_K]       = BPF_ALU|BPF_SUB|BPF_K,
-               [BPF_S_ALU_SUB_X]       = BPF_ALU|BPF_SUB|BPF_X,
-               [BPF_S_ALU_MUL_K]       = BPF_ALU|BPF_MUL|BPF_K,
-               [BPF_S_ALU_MUL_X]       = BPF_ALU|BPF_MUL|BPF_X,
-               [BPF_S_ALU_DIV_X]       = BPF_ALU|BPF_DIV|BPF_X,
-               [BPF_S_ALU_MOD_K]       = BPF_ALU|BPF_MOD|BPF_K,
-               [BPF_S_ALU_MOD_X]       = BPF_ALU|BPF_MOD|BPF_X,
-               [BPF_S_ALU_AND_K]       = BPF_ALU|BPF_AND|BPF_K,
-               [BPF_S_ALU_AND_X]       = BPF_ALU|BPF_AND|BPF_X,
-               [BPF_S_ALU_OR_K]        = BPF_ALU|BPF_OR|BPF_K,
-               [BPF_S_ALU_OR_X]        = BPF_ALU|BPF_OR|BPF_X,
-               [BPF_S_ALU_XOR_K]       = BPF_ALU|BPF_XOR|BPF_K,
-               [BPF_S_ALU_XOR_X]       = BPF_ALU|BPF_XOR|BPF_X,
-               [BPF_S_ALU_LSH_K]       = BPF_ALU|BPF_LSH|BPF_K,
-               [BPF_S_ALU_LSH_X]       = BPF_ALU|BPF_LSH|BPF_X,
-               [BPF_S_ALU_RSH_K]       = BPF_ALU|BPF_RSH|BPF_K,
-               [BPF_S_ALU_RSH_X]       = BPF_ALU|BPF_RSH|BPF_X,
-               [BPF_S_ALU_NEG]         = BPF_ALU|BPF_NEG,
-               [BPF_S_LD_W_ABS]        = BPF_LD|BPF_W|BPF_ABS,
-               [BPF_S_LD_H_ABS]        = BPF_LD|BPF_H|BPF_ABS,
-               [BPF_S_LD_B_ABS]        = BPF_LD|BPF_B|BPF_ABS,
-               [BPF_S_ANC_PROTOCOL]    = BPF_LD|BPF_B|BPF_ABS,
-               [BPF_S_ANC_PKTTYPE]     = BPF_LD|BPF_B|BPF_ABS,
-               [BPF_S_ANC_IFINDEX]     = BPF_LD|BPF_B|BPF_ABS,
-               [BPF_S_ANC_NLATTR]      = BPF_LD|BPF_B|BPF_ABS,
-               [BPF_S_ANC_NLATTR_NEST] = BPF_LD|BPF_B|BPF_ABS,
-               [BPF_S_ANC_MARK]        = BPF_LD|BPF_B|BPF_ABS,
-               [BPF_S_ANC_QUEUE]       = BPF_LD|BPF_B|BPF_ABS,
-               [BPF_S_ANC_HATYPE]      = BPF_LD|BPF_B|BPF_ABS,
-               [BPF_S_ANC_RXHASH]      = BPF_LD|BPF_B|BPF_ABS,
-               [BPF_S_ANC_CPU]         = BPF_LD|BPF_B|BPF_ABS,
-               [BPF_S_ANC_ALU_XOR_X]   = BPF_LD|BPF_B|BPF_ABS,
-               [BPF_S_ANC_VLAN_TAG]    = BPF_LD|BPF_B|BPF_ABS,
-               [BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS,
-               [BPF_S_ANC_PAY_OFFSET]  = BPF_LD|BPF_B|BPF_ABS,
-               [BPF_S_LD_W_LEN]        = BPF_LD|BPF_W|BPF_LEN,
-               [BPF_S_LD_W_IND]        = BPF_LD|BPF_W|BPF_IND,
-               [BPF_S_LD_H_IND]        = BPF_LD|BPF_H|BPF_IND,
-               [BPF_S_LD_B_IND]        = BPF_LD|BPF_B|BPF_IND,
-               [BPF_S_LD_IMM]          = BPF_LD|BPF_IMM,
-               [BPF_S_LDX_W_LEN]       = BPF_LDX|BPF_W|BPF_LEN,
-               [BPF_S_LDX_B_MSH]       = BPF_LDX|BPF_B|BPF_MSH,
-               [BPF_S_LDX_IMM]         = BPF_LDX|BPF_IMM,
-               [BPF_S_MISC_TAX]        = BPF_MISC|BPF_TAX,
-               [BPF_S_MISC_TXA]        = BPF_MISC|BPF_TXA,
-               [BPF_S_RET_K]           = BPF_RET|BPF_K,
-               [BPF_S_RET_A]           = BPF_RET|BPF_A,
-               [BPF_S_ALU_DIV_K]       = BPF_ALU|BPF_DIV|BPF_K,
-               [BPF_S_LD_MEM]          = BPF_LD|BPF_MEM,
-               [BPF_S_LDX_MEM]         = BPF_LDX|BPF_MEM,
-               [BPF_S_ST]              = BPF_ST,
-               [BPF_S_STX]             = BPF_STX,
-               [BPF_S_JMP_JA]          = BPF_JMP|BPF_JA,
-               [BPF_S_JMP_JEQ_K]       = BPF_JMP|BPF_JEQ|BPF_K,
-               [BPF_S_JMP_JEQ_X]       = BPF_JMP|BPF_JEQ|BPF_X,
-               [BPF_S_JMP_JGE_K]       = BPF_JMP|BPF_JGE|BPF_K,
-               [BPF_S_JMP_JGE_X]       = BPF_JMP|BPF_JGE|BPF_X,
-               [BPF_S_JMP_JGT_K]       = BPF_JMP|BPF_JGT|BPF_K,
-               [BPF_S_JMP_JGT_X]       = BPF_JMP|BPF_JGT|BPF_X,
-               [BPF_S_JMP_JSET_K]      = BPF_JMP|BPF_JSET|BPF_K,
-               [BPF_S_JMP_JSET_X]      = BPF_JMP|BPF_JSET|BPF_X,
-       };
-       u16 code;
-
-       code = filt->code;
-
-       to->code = decodes[code];
-       to->jt = filt->jt;
-       to->jf = filt->jf;
-       to->k = filt->k;
-}
-
 int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
                  unsigned int len)
 {