1 /* bpf_jit_comp.c : BPF JIT compiler
3 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
4 * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; version 2
11 #include <linux/netdevice.h>
12 #include <linux/filter.h>
13 #include <linux/if_vlan.h>
14 #include <asm/cacheflush.h>
16 int bpf_jit_enable __read_mostly;
19 * assembly code in arch/x86/net/bpf_jit.S
21 extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
22 extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
23 extern u8 sk_load_byte_positive_offset[];
24 extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
25 extern u8 sk_load_byte_negative_offset[];
27 static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
40 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
42 #define EMIT1(b1) EMIT(b1, 1)
43 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
44 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
45 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
46 #define EMIT1_off32(b1, off) \
47 do {EMIT1(b1); EMIT(off, 4); } while (0)
48 #define EMIT2_off32(b1, b2, off) \
49 do {EMIT2(b1, b2); EMIT(off, 4); } while (0)
50 #define EMIT3_off32(b1, b2, b3, off) \
51 do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
52 #define EMIT4_off32(b1, b2, b3, b4, off) \
53 do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
55 static inline bool is_imm8(int value)
57 return value <= 127 && value >= -128;
60 static inline bool is_simm32(s64 value)
62 return value == (s64) (s32) value;
66 #define EMIT_mov(DST, SRC) \
68 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
71 static int bpf_size_to_x86_bytes(int bpf_size)
73 if (bpf_size == BPF_W)
75 else if (bpf_size == BPF_H)
77 else if (bpf_size == BPF_B)
79 else if (bpf_size == BPF_DW)
85 /* list of x86 cond jumps opcodes (. + s8)
86 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
97 static inline void bpf_flush_icache(void *start, void *end)
99 mm_segment_t old_fs = get_fs();
103 flush_icache_range((unsigned long)start, (unsigned long)end);
107 #define CHOOSE_LOAD_FUNC(K, func) \
108 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
110 /* pick a register outside of BPF range for JIT internal work */
111 #define AUX_REG (MAX_BPF_REG + 1)
113 /* the following table maps BPF registers to x64 registers.
114 * x64 register r12 is unused, since if used as base address register
115 * in load/store instructions, it always needs an extra byte of encoding
117 static const int reg2hex[] = {
118 [BPF_REG_0] = 0, /* rax */
119 [BPF_REG_1] = 7, /* rdi */
120 [BPF_REG_2] = 6, /* rsi */
121 [BPF_REG_3] = 2, /* rdx */
122 [BPF_REG_4] = 1, /* rcx */
123 [BPF_REG_5] = 0, /* r8 */
124 [BPF_REG_6] = 3, /* rbx callee saved */
125 [BPF_REG_7] = 5, /* r13 callee saved */
126 [BPF_REG_8] = 6, /* r14 callee saved */
127 [BPF_REG_9] = 7, /* r15 callee saved */
128 [BPF_REG_FP] = 5, /* rbp readonly */
129 [AUX_REG] = 3, /* r11 temp register */
132 /* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15
133 * which need extra byte of encoding.
134 * rax,rcx,...,rbp have simpler encoding
136 static inline bool is_ereg(u32 reg)
138 if (reg == BPF_REG_5 || reg == AUX_REG ||
139 (reg >= BPF_REG_7 && reg <= BPF_REG_9))
145 /* add modifiers if 'reg' maps to x64 registers r8..r15 */
146 static inline u8 add_1mod(u8 byte, u32 reg)
153 static inline u8 add_2mod(u8 byte, u32 r1, u32 r2)
162 /* encode 'dst_reg' register into x64 opcode 'byte' */
163 static inline u8 add_1reg(u8 byte, u32 dst_reg)
165 return byte + reg2hex[dst_reg];
168 /* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */
169 static inline u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
171 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
174 static void jit_fill_hole(void *area, unsigned int size)
176 /* fill whole space with int3 instructions */
177 memset(area, 0xcc, size);
181 unsigned int cleanup_addr; /* epilogue code offset */
185 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
186 int oldproglen, struct jit_context *ctx)
188 struct bpf_insn *insn = bpf_prog->insnsi;
189 int insn_cnt = bpf_prog->len;
194 int stacksize = MAX_BPF_STACK +
195 32 /* space for rbx, r13, r14, r15 */ +
196 8 /* space for skb_copy_bits() buffer */;
198 EMIT1(0x55); /* push rbp */
199 EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */
201 /* sub rsp, stacksize */
202 EMIT3_off32(0x48, 0x81, 0xEC, stacksize);
204 /* all classic BPF filters use R6(rbx) save it */
206 /* mov qword ptr [rbp-X],rbx */
207 EMIT3_off32(0x48, 0x89, 0x9D, -stacksize);
209 /* bpf_convert_filter() maps classic BPF register X to R7 and uses R8
210 * as temporary, so all tcpdump filters need to spill/fill R7(r13) and
211 * R8(r14). R9(r15) spill could be made conditional, but there is only
212 * one 'bpf_error' return path out of helper functions inside bpf_jit.S
213 * The overhead of extra spill is negligible for any filter other
214 * than synthetic ones. Therefore not worth adding complexity.
217 /* mov qword ptr [rbp-X],r13 */
218 EMIT3_off32(0x4C, 0x89, 0xAD, -stacksize + 8);
219 /* mov qword ptr [rbp-X],r14 */
220 EMIT3_off32(0x4C, 0x89, 0xB5, -stacksize + 16);
221 /* mov qword ptr [rbp-X],r15 */
222 EMIT3_off32(0x4C, 0x89, 0xBD, -stacksize + 24);
224 /* clear A and X registers */
225 EMIT2(0x31, 0xc0); /* xor eax, eax */
226 EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */
228 if (ctx->seen_ld_abs) {
229 /* r9d : skb->len - skb->data_len (headlen)
232 if (is_imm8(offsetof(struct sk_buff, len)))
233 /* mov %r9d, off8(%rdi) */
234 EMIT4(0x44, 0x8b, 0x4f,
235 offsetof(struct sk_buff, len));
237 /* mov %r9d, off32(%rdi) */
238 EMIT3_off32(0x44, 0x8b, 0x8f,
239 offsetof(struct sk_buff, len));
241 if (is_imm8(offsetof(struct sk_buff, data_len)))
242 /* sub %r9d, off8(%rdi) */
243 EMIT4(0x44, 0x2b, 0x4f,
244 offsetof(struct sk_buff, data_len));
246 EMIT3_off32(0x44, 0x2b, 0x8f,
247 offsetof(struct sk_buff, data_len));
249 if (is_imm8(offsetof(struct sk_buff, data)))
250 /* mov %r10, off8(%rdi) */
251 EMIT4(0x4c, 0x8b, 0x57,
252 offsetof(struct sk_buff, data));
254 /* mov %r10, off32(%rdi) */
255 EMIT3_off32(0x4c, 0x8b, 0x97,
256 offsetof(struct sk_buff, data));
259 for (i = 0; i < insn_cnt; i++, insn++) {
260 const s32 imm32 = insn->imm;
261 u32 dst_reg = insn->dst_reg;
262 u32 src_reg = insn->src_reg;
263 u8 b1 = 0, b2 = 0, b3 = 0;
269 switch (insn->code) {
271 case BPF_ALU | BPF_ADD | BPF_X:
272 case BPF_ALU | BPF_SUB | BPF_X:
273 case BPF_ALU | BPF_AND | BPF_X:
274 case BPF_ALU | BPF_OR | BPF_X:
275 case BPF_ALU | BPF_XOR | BPF_X:
276 case BPF_ALU64 | BPF_ADD | BPF_X:
277 case BPF_ALU64 | BPF_SUB | BPF_X:
278 case BPF_ALU64 | BPF_AND | BPF_X:
279 case BPF_ALU64 | BPF_OR | BPF_X:
280 case BPF_ALU64 | BPF_XOR | BPF_X:
281 switch (BPF_OP(insn->code)) {
282 case BPF_ADD: b2 = 0x01; break;
283 case BPF_SUB: b2 = 0x29; break;
284 case BPF_AND: b2 = 0x21; break;
285 case BPF_OR: b2 = 0x09; break;
286 case BPF_XOR: b2 = 0x31; break;
288 if (BPF_CLASS(insn->code) == BPF_ALU64)
289 EMIT1(add_2mod(0x48, dst_reg, src_reg));
290 else if (is_ereg(dst_reg) || is_ereg(src_reg))
291 EMIT1(add_2mod(0x40, dst_reg, src_reg));
292 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
296 case BPF_ALU64 | BPF_MOV | BPF_X:
297 EMIT_mov(dst_reg, src_reg);
301 case BPF_ALU | BPF_MOV | BPF_X:
302 if (is_ereg(dst_reg) || is_ereg(src_reg))
303 EMIT1(add_2mod(0x40, dst_reg, src_reg));
304 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
308 case BPF_ALU | BPF_NEG:
309 case BPF_ALU64 | BPF_NEG:
310 if (BPF_CLASS(insn->code) == BPF_ALU64)
311 EMIT1(add_1mod(0x48, dst_reg));
312 else if (is_ereg(dst_reg))
313 EMIT1(add_1mod(0x40, dst_reg));
314 EMIT2(0xF7, add_1reg(0xD8, dst_reg));
317 case BPF_ALU | BPF_ADD | BPF_K:
318 case BPF_ALU | BPF_SUB | BPF_K:
319 case BPF_ALU | BPF_AND | BPF_K:
320 case BPF_ALU | BPF_OR | BPF_K:
321 case BPF_ALU | BPF_XOR | BPF_K:
322 case BPF_ALU64 | BPF_ADD | BPF_K:
323 case BPF_ALU64 | BPF_SUB | BPF_K:
324 case BPF_ALU64 | BPF_AND | BPF_K:
325 case BPF_ALU64 | BPF_OR | BPF_K:
326 case BPF_ALU64 | BPF_XOR | BPF_K:
327 if (BPF_CLASS(insn->code) == BPF_ALU64)
328 EMIT1(add_1mod(0x48, dst_reg));
329 else if (is_ereg(dst_reg))
330 EMIT1(add_1mod(0x40, dst_reg));
332 switch (BPF_OP(insn->code)) {
333 case BPF_ADD: b3 = 0xC0; break;
334 case BPF_SUB: b3 = 0xE8; break;
335 case BPF_AND: b3 = 0xE0; break;
336 case BPF_OR: b3 = 0xC8; break;
337 case BPF_XOR: b3 = 0xF0; break;
341 EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
343 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
346 case BPF_ALU64 | BPF_MOV | BPF_K:
347 /* optimization: if imm32 is positive,
348 * use 'mov eax, imm32' (which zero-extends imm32)
352 /* 'mov rax, imm32' sign extends imm32 */
353 b1 = add_1mod(0x48, dst_reg);
356 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
360 case BPF_ALU | BPF_MOV | BPF_K:
361 /* mov %eax, imm32 */
362 if (is_ereg(dst_reg))
363 EMIT1(add_1mod(0x40, dst_reg));
364 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
367 case BPF_LD | BPF_IMM | BPF_DW:
368 if (insn[1].code != 0 || insn[1].src_reg != 0 ||
369 insn[1].dst_reg != 0 || insn[1].off != 0) {
370 /* verifier must catch invalid insns */
371 pr_err("invalid BPF_LD_IMM64 insn\n");
375 /* movabsq %rax, imm64 */
376 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
377 EMIT(insn[0].imm, 4);
378 EMIT(insn[1].imm, 4);
384 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
385 case BPF_ALU | BPF_MOD | BPF_X:
386 case BPF_ALU | BPF_DIV | BPF_X:
387 case BPF_ALU | BPF_MOD | BPF_K:
388 case BPF_ALU | BPF_DIV | BPF_K:
389 case BPF_ALU64 | BPF_MOD | BPF_X:
390 case BPF_ALU64 | BPF_DIV | BPF_X:
391 case BPF_ALU64 | BPF_MOD | BPF_K:
392 case BPF_ALU64 | BPF_DIV | BPF_K:
393 EMIT1(0x50); /* push rax */
394 EMIT1(0x52); /* push rdx */
396 if (BPF_SRC(insn->code) == BPF_X)
397 /* mov r11, src_reg */
398 EMIT_mov(AUX_REG, src_reg);
401 EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
403 /* mov rax, dst_reg */
404 EMIT_mov(BPF_REG_0, dst_reg);
407 * equivalent to 'xor rdx, rdx', but one byte less
411 if (BPF_SRC(insn->code) == BPF_X) {
412 /* if (src_reg == 0) return 0 */
415 EMIT4(0x49, 0x83, 0xFB, 0x00);
417 /* jne .+9 (skip over pop, pop, xor and jmp) */
418 EMIT2(X86_JNE, 1 + 1 + 2 + 5);
419 EMIT1(0x5A); /* pop rdx */
420 EMIT1(0x58); /* pop rax */
421 EMIT2(0x31, 0xc0); /* xor eax, eax */
424 * addrs[i] - 11, because there are 11 bytes
425 * after this insn: div, mov, pop, pop, mov
427 jmp_offset = ctx->cleanup_addr - (addrs[i] - 11);
428 EMIT1_off32(0xE9, jmp_offset);
431 if (BPF_CLASS(insn->code) == BPF_ALU64)
433 EMIT3(0x49, 0xF7, 0xF3);
436 EMIT3(0x41, 0xF7, 0xF3);
438 if (BPF_OP(insn->code) == BPF_MOD)
440 EMIT3(0x49, 0x89, 0xD3);
443 EMIT3(0x49, 0x89, 0xC3);
445 EMIT1(0x5A); /* pop rdx */
446 EMIT1(0x58); /* pop rax */
448 /* mov dst_reg, r11 */
449 EMIT_mov(dst_reg, AUX_REG);
452 case BPF_ALU | BPF_MUL | BPF_K:
453 case BPF_ALU | BPF_MUL | BPF_X:
454 case BPF_ALU64 | BPF_MUL | BPF_K:
455 case BPF_ALU64 | BPF_MUL | BPF_X:
456 EMIT1(0x50); /* push rax */
457 EMIT1(0x52); /* push rdx */
459 /* mov r11, dst_reg */
460 EMIT_mov(AUX_REG, dst_reg);
462 if (BPF_SRC(insn->code) == BPF_X)
463 /* mov rax, src_reg */
464 EMIT_mov(BPF_REG_0, src_reg);
467 EMIT3_off32(0x48, 0xC7, 0xC0, imm32);
469 if (BPF_CLASS(insn->code) == BPF_ALU64)
470 EMIT1(add_1mod(0x48, AUX_REG));
471 else if (is_ereg(AUX_REG))
472 EMIT1(add_1mod(0x40, AUX_REG));
474 EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
477 EMIT_mov(AUX_REG, BPF_REG_0);
479 EMIT1(0x5A); /* pop rdx */
480 EMIT1(0x58); /* pop rax */
482 /* mov dst_reg, r11 */
483 EMIT_mov(dst_reg, AUX_REG);
487 case BPF_ALU | BPF_LSH | BPF_K:
488 case BPF_ALU | BPF_RSH | BPF_K:
489 case BPF_ALU | BPF_ARSH | BPF_K:
490 case BPF_ALU64 | BPF_LSH | BPF_K:
491 case BPF_ALU64 | BPF_RSH | BPF_K:
492 case BPF_ALU64 | BPF_ARSH | BPF_K:
493 if (BPF_CLASS(insn->code) == BPF_ALU64)
494 EMIT1(add_1mod(0x48, dst_reg));
495 else if (is_ereg(dst_reg))
496 EMIT1(add_1mod(0x40, dst_reg));
498 switch (BPF_OP(insn->code)) {
499 case BPF_LSH: b3 = 0xE0; break;
500 case BPF_RSH: b3 = 0xE8; break;
501 case BPF_ARSH: b3 = 0xF8; break;
503 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
506 case BPF_ALU | BPF_LSH | BPF_X:
507 case BPF_ALU | BPF_RSH | BPF_X:
508 case BPF_ALU | BPF_ARSH | BPF_X:
509 case BPF_ALU64 | BPF_LSH | BPF_X:
510 case BPF_ALU64 | BPF_RSH | BPF_X:
511 case BPF_ALU64 | BPF_ARSH | BPF_X:
513 /* check for bad case when dst_reg == rcx */
514 if (dst_reg == BPF_REG_4) {
515 /* mov r11, dst_reg */
516 EMIT_mov(AUX_REG, dst_reg);
520 if (src_reg != BPF_REG_4) { /* common case */
521 EMIT1(0x51); /* push rcx */
523 /* mov rcx, src_reg */
524 EMIT_mov(BPF_REG_4, src_reg);
527 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
528 if (BPF_CLASS(insn->code) == BPF_ALU64)
529 EMIT1(add_1mod(0x48, dst_reg));
530 else if (is_ereg(dst_reg))
531 EMIT1(add_1mod(0x40, dst_reg));
533 switch (BPF_OP(insn->code)) {
534 case BPF_LSH: b3 = 0xE0; break;
535 case BPF_RSH: b3 = 0xE8; break;
536 case BPF_ARSH: b3 = 0xF8; break;
538 EMIT2(0xD3, add_1reg(b3, dst_reg));
540 if (src_reg != BPF_REG_4)
541 EMIT1(0x59); /* pop rcx */
543 if (insn->dst_reg == BPF_REG_4)
544 /* mov dst_reg, r11 */
545 EMIT_mov(insn->dst_reg, AUX_REG);
548 case BPF_ALU | BPF_END | BPF_FROM_BE:
551 /* emit 'ror %ax, 8' to swap lower 2 bytes */
553 if (is_ereg(dst_reg))
555 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
558 /* emit 'bswap eax' to swap lower 4 bytes */
559 if (is_ereg(dst_reg))
563 EMIT1(add_1reg(0xC8, dst_reg));
566 /* emit 'bswap rax' to swap 8 bytes */
567 EMIT3(add_1mod(0x48, dst_reg), 0x0F,
568 add_1reg(0xC8, dst_reg));
573 case BPF_ALU | BPF_END | BPF_FROM_LE:
576 /* ST: *(u8*)(dst_reg + off) = imm */
577 case BPF_ST | BPF_MEM | BPF_B:
578 if (is_ereg(dst_reg))
583 case BPF_ST | BPF_MEM | BPF_H:
584 if (is_ereg(dst_reg))
585 EMIT3(0x66, 0x41, 0xC7);
589 case BPF_ST | BPF_MEM | BPF_W:
590 if (is_ereg(dst_reg))
595 case BPF_ST | BPF_MEM | BPF_DW:
596 EMIT2(add_1mod(0x48, dst_reg), 0xC7);
598 st: if (is_imm8(insn->off))
599 EMIT2(add_1reg(0x40, dst_reg), insn->off);
601 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
603 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
606 /* STX: *(u8*)(dst_reg + off) = src_reg */
607 case BPF_STX | BPF_MEM | BPF_B:
608 /* emit 'mov byte ptr [rax + off], al' */
609 if (is_ereg(dst_reg) || is_ereg(src_reg) ||
610 /* have to add extra byte for x86 SIL, DIL regs */
611 src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
612 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
616 case BPF_STX | BPF_MEM | BPF_H:
617 if (is_ereg(dst_reg) || is_ereg(src_reg))
618 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
622 case BPF_STX | BPF_MEM | BPF_W:
623 if (is_ereg(dst_reg) || is_ereg(src_reg))
624 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
628 case BPF_STX | BPF_MEM | BPF_DW:
629 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
630 stx: if (is_imm8(insn->off))
631 EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
633 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
637 /* LDX: dst_reg = *(u8*)(src_reg + off) */
638 case BPF_LDX | BPF_MEM | BPF_B:
639 /* emit 'movzx rax, byte ptr [rax + off]' */
640 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
642 case BPF_LDX | BPF_MEM | BPF_H:
643 /* emit 'movzx rax, word ptr [rax + off]' */
644 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
646 case BPF_LDX | BPF_MEM | BPF_W:
647 /* emit 'mov eax, dword ptr [rax+0x14]' */
648 if (is_ereg(dst_reg) || is_ereg(src_reg))
649 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
653 case BPF_LDX | BPF_MEM | BPF_DW:
654 /* emit 'mov rax, qword ptr [rax+0x14]' */
655 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
656 ldx: /* if insn->off == 0 we can save one extra byte, but
657 * special case of x86 r13 which always needs an offset
658 * is not worth the hassle
660 if (is_imm8(insn->off))
661 EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off);
663 EMIT1_off32(add_2reg(0x80, src_reg, dst_reg),
667 /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
668 case BPF_STX | BPF_XADD | BPF_W:
669 /* emit 'lock add dword ptr [rax + off], eax' */
670 if (is_ereg(dst_reg) || is_ereg(src_reg))
671 EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
675 case BPF_STX | BPF_XADD | BPF_DW:
676 EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
677 xadd: if (is_imm8(insn->off))
678 EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
680 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
685 case BPF_JMP | BPF_CALL:
686 func = (u8 *) __bpf_call_base + imm32;
687 jmp_offset = func - (image + addrs[i]);
688 if (ctx->seen_ld_abs) {
689 EMIT2(0x41, 0x52); /* push %r10 */
690 EMIT2(0x41, 0x51); /* push %r9 */
691 /* need to adjust jmp offset, since
692 * pop %r9, pop %r10 take 4 bytes after call insn
696 if (!imm32 || !is_simm32(jmp_offset)) {
697 pr_err("unsupported bpf func %d addr %p image %p\n",
701 EMIT1_off32(0xE8, jmp_offset);
702 if (ctx->seen_ld_abs) {
703 EMIT2(0x41, 0x59); /* pop %r9 */
704 EMIT2(0x41, 0x5A); /* pop %r10 */
709 case BPF_JMP | BPF_JEQ | BPF_X:
710 case BPF_JMP | BPF_JNE | BPF_X:
711 case BPF_JMP | BPF_JGT | BPF_X:
712 case BPF_JMP | BPF_JGE | BPF_X:
713 case BPF_JMP | BPF_JSGT | BPF_X:
714 case BPF_JMP | BPF_JSGE | BPF_X:
715 /* cmp dst_reg, src_reg */
716 EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39,
717 add_2reg(0xC0, dst_reg, src_reg));
720 case BPF_JMP | BPF_JSET | BPF_X:
721 /* test dst_reg, src_reg */
722 EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85,
723 add_2reg(0xC0, dst_reg, src_reg));
726 case BPF_JMP | BPF_JSET | BPF_K:
727 /* test dst_reg, imm32 */
728 EMIT1(add_1mod(0x48, dst_reg));
729 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
732 case BPF_JMP | BPF_JEQ | BPF_K:
733 case BPF_JMP | BPF_JNE | BPF_K:
734 case BPF_JMP | BPF_JGT | BPF_K:
735 case BPF_JMP | BPF_JGE | BPF_K:
736 case BPF_JMP | BPF_JSGT | BPF_K:
737 case BPF_JMP | BPF_JSGE | BPF_K:
738 /* cmp dst_reg, imm8/32 */
739 EMIT1(add_1mod(0x48, dst_reg));
742 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
744 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
746 emit_cond_jmp: /* convert BPF opcode to x86 */
747 switch (BPF_OP(insn->code)) {
756 /* GT is unsigned '>', JA in x86 */
760 /* GE is unsigned '>=', JAE in x86 */
764 /* signed '>', GT in x86 */
768 /* signed '>=', GE in x86 */
771 default: /* to silence gcc warning */
774 jmp_offset = addrs[i + insn->off] - addrs[i];
775 if (is_imm8(jmp_offset)) {
776 EMIT2(jmp_cond, jmp_offset);
777 } else if (is_simm32(jmp_offset)) {
778 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
780 pr_err("cond_jmp gen bug %llx\n", jmp_offset);
786 case BPF_JMP | BPF_JA:
787 jmp_offset = addrs[i + insn->off] - addrs[i];
789 /* optimize out nop jumps */
792 if (is_imm8(jmp_offset)) {
793 EMIT2(0xEB, jmp_offset);
794 } else if (is_simm32(jmp_offset)) {
795 EMIT1_off32(0xE9, jmp_offset);
797 pr_err("jmp gen bug %llx\n", jmp_offset);
802 case BPF_LD | BPF_IND | BPF_W:
805 case BPF_LD | BPF_ABS | BPF_W:
806 func = CHOOSE_LOAD_FUNC(imm32, sk_load_word);
807 common_load: ctx->seen_ld_abs = true;
808 jmp_offset = func - (image + addrs[i]);
809 if (!func || !is_simm32(jmp_offset)) {
810 pr_err("unsupported bpf func %d addr %p image %p\n",
814 if (BPF_MODE(insn->code) == BPF_ABS) {
815 /* mov %esi, imm32 */
816 EMIT1_off32(0xBE, imm32);
818 /* mov %rsi, src_reg */
819 EMIT_mov(BPF_REG_2, src_reg);
823 EMIT3(0x83, 0xC6, imm32);
825 /* add %esi, imm32 */
826 EMIT2_off32(0x81, 0xC6, imm32);
829 /* skb pointer is in R6 (%rbx), it will be copied into
830 * %rdi if skb_copy_bits() call is necessary.
831 * sk_load_* helpers also use %r10 and %r9d.
834 EMIT1_off32(0xE8, jmp_offset); /* call */
837 case BPF_LD | BPF_IND | BPF_H:
840 case BPF_LD | BPF_ABS | BPF_H:
841 func = CHOOSE_LOAD_FUNC(imm32, sk_load_half);
843 case BPF_LD | BPF_IND | BPF_B:
846 case BPF_LD | BPF_ABS | BPF_B:
847 func = CHOOSE_LOAD_FUNC(imm32, sk_load_byte);
850 case BPF_JMP | BPF_EXIT:
851 if (i != insn_cnt - 1) {
852 jmp_offset = ctx->cleanup_addr - addrs[i];
855 /* update cleanup_addr */
856 ctx->cleanup_addr = proglen;
857 /* mov rbx, qword ptr [rbp-X] */
858 EMIT3_off32(0x48, 0x8B, 0x9D, -stacksize);
859 /* mov r13, qword ptr [rbp-X] */
860 EMIT3_off32(0x4C, 0x8B, 0xAD, -stacksize + 8);
861 /* mov r14, qword ptr [rbp-X] */
862 EMIT3_off32(0x4C, 0x8B, 0xB5, -stacksize + 16);
863 /* mov r15, qword ptr [rbp-X] */
864 EMIT3_off32(0x4C, 0x8B, 0xBD, -stacksize + 24);
866 EMIT1(0xC9); /* leave */
867 EMIT1(0xC3); /* ret */
871 /* By design x64 JIT should support all BPF instructions
872 * This error will be seen if new instruction was added
873 * to interpreter, but not to JIT
874 * or if there is junk in bpf_prog
876 pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
882 if (unlikely(proglen + ilen > oldproglen)) {
883 pr_err("bpf_jit_compile fatal error\n");
886 memcpy(image + proglen, temp, ilen);
895 void bpf_jit_compile(struct bpf_prog *prog)
899 void bpf_int_jit_compile(struct bpf_prog *prog)
901 struct bpf_binary_header *header = NULL;
902 int proglen, oldproglen = 0;
903 struct jit_context ctx = {};
912 if (!prog || !prog->len)
915 addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
919 /* Before first pass, make a rough estimation of addrs[]
920 * each bpf instruction is translated to less than 64 bytes
922 for (proglen = 0, i = 0; i < prog->len; i++) {
926 ctx.cleanup_addr = proglen;
928 for (pass = 0; pass < 10; pass++) {
929 proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
933 bpf_jit_binary_free(header);
937 if (proglen != oldproglen)
938 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
939 proglen, oldproglen);
942 if (proglen == oldproglen) {
943 header = bpf_jit_binary_alloc(proglen, &image,
948 oldproglen = proglen;
951 if (bpf_jit_enable > 1)
952 bpf_jit_dump(prog->len, proglen, 0, image);
955 bpf_flush_icache(header, image + proglen);
956 set_memory_ro((unsigned long)header, header->pages);
957 prog->bpf_func = (void *)image;
964 void bpf_jit_free(struct bpf_prog *fp)
966 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
967 struct bpf_binary_header *header = (void *)addr;
972 set_memory_rw(addr, header->pages);
973 bpf_jit_binary_free(header);
976 bpf_prog_unlock_free(fp);