1 /* bpf_jit_comp.c : BPF JIT compiler
3 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
4 * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; version 2
11 #include <linux/netdevice.h>
12 #include <linux/filter.h>
13 #include <linux/if_vlan.h>
14 #include <asm/cacheflush.h>
15 #include <linux/bpf.h>
17 int bpf_jit_enable __read_mostly
;
20 * assembly code in arch/x86/net/bpf_jit.S
22 extern u8 sk_load_word
[], sk_load_half
[], sk_load_byte
[];
23 extern u8 sk_load_word_positive_offset
[], sk_load_half_positive_offset
[];
24 extern u8 sk_load_byte_positive_offset
[];
25 extern u8 sk_load_word_negative_offset
[], sk_load_half_negative_offset
[];
26 extern u8 sk_load_byte_negative_offset
[];
28 static u8
*emit_code(u8
*ptr
, u32 bytes
, unsigned int len
)
41 #define EMIT(bytes, len) \
42 do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
44 #define EMIT1(b1) EMIT(b1, 1)
45 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
46 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
47 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
48 #define EMIT1_off32(b1, off) \
49 do {EMIT1(b1); EMIT(off, 4); } while (0)
50 #define EMIT2_off32(b1, b2, off) \
51 do {EMIT2(b1, b2); EMIT(off, 4); } while (0)
52 #define EMIT3_off32(b1, b2, b3, off) \
53 do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
54 #define EMIT4_off32(b1, b2, b3, b4, off) \
55 do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
57 static bool is_imm8(int value
)
59 return value
<= 127 && value
>= -128;
62 static bool is_simm32(s64 value
)
64 return value
== (s64
) (s32
) value
;
68 #define EMIT_mov(DST, SRC) \
70 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
73 static int bpf_size_to_x86_bytes(int bpf_size
)
75 if (bpf_size
== BPF_W
)
77 else if (bpf_size
== BPF_H
)
79 else if (bpf_size
== BPF_B
)
81 else if (bpf_size
== BPF_DW
)
87 /* list of x86 cond jumps opcodes (. + s8)
88 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
99 static void bpf_flush_icache(void *start
, void *end
)
101 mm_segment_t old_fs
= get_fs();
105 flush_icache_range((unsigned long)start
, (unsigned long)end
);
109 #define CHOOSE_LOAD_FUNC(K, func) \
110 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
112 /* pick a register outside of BPF range for JIT internal work */
113 #define AUX_REG (MAX_BPF_REG + 1)
115 /* the following table maps BPF registers to x64 registers.
116 * x64 register r12 is unused, since if used as base address register
117 * in load/store instructions, it always needs an extra byte of encoding
119 static const int reg2hex
[] = {
120 [BPF_REG_0
] = 0, /* rax */
121 [BPF_REG_1
] = 7, /* rdi */
122 [BPF_REG_2
] = 6, /* rsi */
123 [BPF_REG_3
] = 2, /* rdx */
124 [BPF_REG_4
] = 1, /* rcx */
125 [BPF_REG_5
] = 0, /* r8 */
126 [BPF_REG_6
] = 3, /* rbx callee saved */
127 [BPF_REG_7
] = 5, /* r13 callee saved */
128 [BPF_REG_8
] = 6, /* r14 callee saved */
129 [BPF_REG_9
] = 7, /* r15 callee saved */
130 [BPF_REG_FP
] = 5, /* rbp readonly */
131 [AUX_REG
] = 3, /* r11 temp register */
134 /* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15
135 * which need extra byte of encoding.
136 * rax,rcx,...,rbp have simpler encoding
138 static bool is_ereg(u32 reg
)
140 return (1 << reg
) & (BIT(BPF_REG_5
) |
147 /* add modifiers if 'reg' maps to x64 registers r8..r15 */
148 static u8
add_1mod(u8 byte
, u32 reg
)
155 static u8
add_2mod(u8 byte
, u32 r1
, u32 r2
)
164 /* encode 'dst_reg' register into x64 opcode 'byte' */
165 static u8
add_1reg(u8 byte
, u32 dst_reg
)
167 return byte
+ reg2hex
[dst_reg
];
170 /* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */
171 static u8
add_2reg(u8 byte
, u32 dst_reg
, u32 src_reg
)
173 return byte
+ reg2hex
[dst_reg
] + (reg2hex
[src_reg
] << 3);
176 static void jit_fill_hole(void *area
, unsigned int size
)
178 /* fill whole space with int3 instructions */
179 memset(area
, 0xcc, size
);
183 int cleanup_addr
; /* epilogue code offset */
187 /* maximum number of bytes emitted while JITing one eBPF insn */
188 #define BPF_MAX_INSN_SIZE 128
189 #define BPF_INSN_SAFETY 64
193 32 /* space for rbx, r13, r14, r15 */ + \
194 8 /* space for skb_copy_bits() buffer */)
196 #define PROLOGUE_SIZE 51
198 /* emit x64 prologue code for BPF program and check it's size.
199 * bpf_tail_call helper will skip it while jumping into another program
201 static void emit_prologue(u8
**pprog
)
206 EMIT1(0x55); /* push rbp */
207 EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */
209 /* sub rsp, STACKSIZE */
210 EMIT3_off32(0x48, 0x81, 0xEC, STACKSIZE
);
212 /* all classic BPF filters use R6(rbx) save it */
214 /* mov qword ptr [rbp-X],rbx */
215 EMIT3_off32(0x48, 0x89, 0x9D, -STACKSIZE
);
217 /* bpf_convert_filter() maps classic BPF register X to R7 and uses R8
218 * as temporary, so all tcpdump filters need to spill/fill R7(r13) and
219 * R8(r14). R9(r15) spill could be made conditional, but there is only
220 * one 'bpf_error' return path out of helper functions inside bpf_jit.S
221 * The overhead of extra spill is negligible for any filter other
222 * than synthetic ones. Therefore not worth adding complexity.
225 /* mov qword ptr [rbp-X],r13 */
226 EMIT3_off32(0x4C, 0x89, 0xAD, -STACKSIZE
+ 8);
227 /* mov qword ptr [rbp-X],r14 */
228 EMIT3_off32(0x4C, 0x89, 0xB5, -STACKSIZE
+ 16);
229 /* mov qword ptr [rbp-X],r15 */
230 EMIT3_off32(0x4C, 0x89, 0xBD, -STACKSIZE
+ 24);
232 /* clear A and X registers */
233 EMIT2(0x31, 0xc0); /* xor eax, eax */
234 EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */
236 /* clear tail_cnt: mov qword ptr [rbp-X], rax */
237 EMIT3_off32(0x48, 0x89, 0x85, -STACKSIZE
+ 32);
239 BUILD_BUG_ON(cnt
!= PROLOGUE_SIZE
);
243 /* generate the following code:
244 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
245 * if (index >= array->map.max_entries)
247 * if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
249 * prog = array->prog[index];
252 * goto *(prog->bpf_func + prologue_size);
255 static void emit_bpf_tail_call(u8
**pprog
)
258 int label1
, label2
, label3
;
261 /* rdi - pointer to ctx
262 * rsi - pointer to bpf_array
263 * rdx - index in bpf_array
266 /* if (index >= array->map.max_entries)
269 EMIT4(0x48, 0x8B, 0x46, /* mov rax, qword ptr [rsi + 16] */
270 offsetof(struct bpf_array
, map
.max_entries
));
271 EMIT3(0x48, 0x39, 0xD0); /* cmp rax, rdx */
272 #define OFFSET1 47 /* number of bytes to jump */
273 EMIT2(X86_JBE
, OFFSET1
); /* jbe out */
276 /* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
279 EMIT2_off32(0x8B, 0x85, -STACKSIZE
+ 36); /* mov eax, dword ptr [rbp - 516] */
280 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT
); /* cmp eax, MAX_TAIL_CALL_CNT */
282 EMIT2(X86_JA
, OFFSET2
); /* ja out */
284 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
285 EMIT2_off32(0x89, 0x85, -STACKSIZE
+ 36); /* mov dword ptr [rbp - 516], eax */
287 /* prog = array->prog[index]; */
288 EMIT4_off32(0x48, 0x8D, 0x84, 0xD6, /* lea rax, [rsi + rdx * 8 + offsetof(...)] */
289 offsetof(struct bpf_array
, prog
));
290 EMIT3(0x48, 0x8B, 0x00); /* mov rax, qword ptr [rax] */
295 EMIT4(0x48, 0x83, 0xF8, 0x00); /* cmp rax, 0 */
297 EMIT2(X86_JE
, OFFSET3
); /* je out */
300 /* goto *(prog->bpf_func + prologue_size); */
301 EMIT4(0x48, 0x8B, 0x40, /* mov rax, qword ptr [rax + 32] */
302 offsetof(struct bpf_prog
, bpf_func
));
303 EMIT4(0x48, 0x83, 0xC0, PROLOGUE_SIZE
); /* add rax, prologue_size */
305 /* now we're ready to jump into next BPF program
306 * rdi == ctx (1st arg)
307 * rax == prog->bpf_func + prologue_size
309 EMIT2(0xFF, 0xE0); /* jmp rax */
312 BUILD_BUG_ON(cnt
- label1
!= OFFSET1
);
313 BUILD_BUG_ON(cnt
- label2
!= OFFSET2
);
314 BUILD_BUG_ON(cnt
- label3
!= OFFSET3
);
318 static int do_jit(struct bpf_prog
*bpf_prog
, int *addrs
, u8
*image
,
319 int oldproglen
, struct jit_context
*ctx
)
321 struct bpf_insn
*insn
= bpf_prog
->insnsi
;
322 int insn_cnt
= bpf_prog
->len
;
323 bool seen_ld_abs
= ctx
->seen_ld_abs
| (oldproglen
== 0);
324 bool seen_exit
= false;
325 u8 temp
[BPF_MAX_INSN_SIZE
+ BPF_INSN_SAFETY
];
330 emit_prologue(&prog
);
333 /* r9d : skb->len - skb->data_len (headlen)
336 if (is_imm8(offsetof(struct sk_buff
, len
)))
337 /* mov %r9d, off8(%rdi) */
338 EMIT4(0x44, 0x8b, 0x4f,
339 offsetof(struct sk_buff
, len
));
341 /* mov %r9d, off32(%rdi) */
342 EMIT3_off32(0x44, 0x8b, 0x8f,
343 offsetof(struct sk_buff
, len
));
345 if (is_imm8(offsetof(struct sk_buff
, data_len
)))
346 /* sub %r9d, off8(%rdi) */
347 EMIT4(0x44, 0x2b, 0x4f,
348 offsetof(struct sk_buff
, data_len
));
350 EMIT3_off32(0x44, 0x2b, 0x8f,
351 offsetof(struct sk_buff
, data_len
));
353 if (is_imm8(offsetof(struct sk_buff
, data
)))
354 /* mov %r10, off8(%rdi) */
355 EMIT4(0x4c, 0x8b, 0x57,
356 offsetof(struct sk_buff
, data
));
358 /* mov %r10, off32(%rdi) */
359 EMIT3_off32(0x4c, 0x8b, 0x97,
360 offsetof(struct sk_buff
, data
));
363 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
364 const s32 imm32
= insn
->imm
;
365 u32 dst_reg
= insn
->dst_reg
;
366 u32 src_reg
= insn
->src_reg
;
367 u8 b1
= 0, b2
= 0, b3
= 0;
373 switch (insn
->code
) {
375 case BPF_ALU
| BPF_ADD
| BPF_X
:
376 case BPF_ALU
| BPF_SUB
| BPF_X
:
377 case BPF_ALU
| BPF_AND
| BPF_X
:
378 case BPF_ALU
| BPF_OR
| BPF_X
:
379 case BPF_ALU
| BPF_XOR
| BPF_X
:
380 case BPF_ALU64
| BPF_ADD
| BPF_X
:
381 case BPF_ALU64
| BPF_SUB
| BPF_X
:
382 case BPF_ALU64
| BPF_AND
| BPF_X
:
383 case BPF_ALU64
| BPF_OR
| BPF_X
:
384 case BPF_ALU64
| BPF_XOR
| BPF_X
:
385 switch (BPF_OP(insn
->code
)) {
386 case BPF_ADD
: b2
= 0x01; break;
387 case BPF_SUB
: b2
= 0x29; break;
388 case BPF_AND
: b2
= 0x21; break;
389 case BPF_OR
: b2
= 0x09; break;
390 case BPF_XOR
: b2
= 0x31; break;
392 if (BPF_CLASS(insn
->code
) == BPF_ALU64
)
393 EMIT1(add_2mod(0x48, dst_reg
, src_reg
));
394 else if (is_ereg(dst_reg
) || is_ereg(src_reg
))
395 EMIT1(add_2mod(0x40, dst_reg
, src_reg
));
396 EMIT2(b2
, add_2reg(0xC0, dst_reg
, src_reg
));
400 case BPF_ALU64
| BPF_MOV
| BPF_X
:
401 EMIT_mov(dst_reg
, src_reg
);
405 case BPF_ALU
| BPF_MOV
| BPF_X
:
406 if (is_ereg(dst_reg
) || is_ereg(src_reg
))
407 EMIT1(add_2mod(0x40, dst_reg
, src_reg
));
408 EMIT2(0x89, add_2reg(0xC0, dst_reg
, src_reg
));
412 case BPF_ALU
| BPF_NEG
:
413 case BPF_ALU64
| BPF_NEG
:
414 if (BPF_CLASS(insn
->code
) == BPF_ALU64
)
415 EMIT1(add_1mod(0x48, dst_reg
));
416 else if (is_ereg(dst_reg
))
417 EMIT1(add_1mod(0x40, dst_reg
));
418 EMIT2(0xF7, add_1reg(0xD8, dst_reg
));
421 case BPF_ALU
| BPF_ADD
| BPF_K
:
422 case BPF_ALU
| BPF_SUB
| BPF_K
:
423 case BPF_ALU
| BPF_AND
| BPF_K
:
424 case BPF_ALU
| BPF_OR
| BPF_K
:
425 case BPF_ALU
| BPF_XOR
| BPF_K
:
426 case BPF_ALU64
| BPF_ADD
| BPF_K
:
427 case BPF_ALU64
| BPF_SUB
| BPF_K
:
428 case BPF_ALU64
| BPF_AND
| BPF_K
:
429 case BPF_ALU64
| BPF_OR
| BPF_K
:
430 case BPF_ALU64
| BPF_XOR
| BPF_K
:
431 if (BPF_CLASS(insn
->code
) == BPF_ALU64
)
432 EMIT1(add_1mod(0x48, dst_reg
));
433 else if (is_ereg(dst_reg
))
434 EMIT1(add_1mod(0x40, dst_reg
));
436 switch (BPF_OP(insn
->code
)) {
437 case BPF_ADD
: b3
= 0xC0; break;
438 case BPF_SUB
: b3
= 0xE8; break;
439 case BPF_AND
: b3
= 0xE0; break;
440 case BPF_OR
: b3
= 0xC8; break;
441 case BPF_XOR
: b3
= 0xF0; break;
445 EMIT3(0x83, add_1reg(b3
, dst_reg
), imm32
);
447 EMIT2_off32(0x81, add_1reg(b3
, dst_reg
), imm32
);
450 case BPF_ALU64
| BPF_MOV
| BPF_K
:
451 /* optimization: if imm32 is positive,
452 * use 'mov eax, imm32' (which zero-extends imm32)
456 /* 'mov rax, imm32' sign extends imm32 */
457 b1
= add_1mod(0x48, dst_reg
);
460 EMIT3_off32(b1
, b2
, add_1reg(b3
, dst_reg
), imm32
);
464 case BPF_ALU
| BPF_MOV
| BPF_K
:
465 /* mov %eax, imm32 */
466 if (is_ereg(dst_reg
))
467 EMIT1(add_1mod(0x40, dst_reg
));
468 EMIT1_off32(add_1reg(0xB8, dst_reg
), imm32
);
471 case BPF_LD
| BPF_IMM
| BPF_DW
:
472 if (insn
[1].code
!= 0 || insn
[1].src_reg
!= 0 ||
473 insn
[1].dst_reg
!= 0 || insn
[1].off
!= 0) {
474 /* verifier must catch invalid insns */
475 pr_err("invalid BPF_LD_IMM64 insn\n");
479 /* movabsq %rax, imm64 */
480 EMIT2(add_1mod(0x48, dst_reg
), add_1reg(0xB8, dst_reg
));
481 EMIT(insn
[0].imm
, 4);
482 EMIT(insn
[1].imm
, 4);
488 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
489 case BPF_ALU
| BPF_MOD
| BPF_X
:
490 case BPF_ALU
| BPF_DIV
| BPF_X
:
491 case BPF_ALU
| BPF_MOD
| BPF_K
:
492 case BPF_ALU
| BPF_DIV
| BPF_K
:
493 case BPF_ALU64
| BPF_MOD
| BPF_X
:
494 case BPF_ALU64
| BPF_DIV
| BPF_X
:
495 case BPF_ALU64
| BPF_MOD
| BPF_K
:
496 case BPF_ALU64
| BPF_DIV
| BPF_K
:
497 EMIT1(0x50); /* push rax */
498 EMIT1(0x52); /* push rdx */
500 if (BPF_SRC(insn
->code
) == BPF_X
)
501 /* mov r11, src_reg */
502 EMIT_mov(AUX_REG
, src_reg
);
505 EMIT3_off32(0x49, 0xC7, 0xC3, imm32
);
507 /* mov rax, dst_reg */
508 EMIT_mov(BPF_REG_0
, dst_reg
);
511 * equivalent to 'xor rdx, rdx', but one byte less
515 if (BPF_SRC(insn
->code
) == BPF_X
) {
516 /* if (src_reg == 0) return 0 */
519 EMIT4(0x49, 0x83, 0xFB, 0x00);
521 /* jne .+9 (skip over pop, pop, xor and jmp) */
522 EMIT2(X86_JNE
, 1 + 1 + 2 + 5);
523 EMIT1(0x5A); /* pop rdx */
524 EMIT1(0x58); /* pop rax */
525 EMIT2(0x31, 0xc0); /* xor eax, eax */
528 * addrs[i] - 11, because there are 11 bytes
529 * after this insn: div, mov, pop, pop, mov
531 jmp_offset
= ctx
->cleanup_addr
- (addrs
[i
] - 11);
532 EMIT1_off32(0xE9, jmp_offset
);
535 if (BPF_CLASS(insn
->code
) == BPF_ALU64
)
537 EMIT3(0x49, 0xF7, 0xF3);
540 EMIT3(0x41, 0xF7, 0xF3);
542 if (BPF_OP(insn
->code
) == BPF_MOD
)
544 EMIT3(0x49, 0x89, 0xD3);
547 EMIT3(0x49, 0x89, 0xC3);
549 EMIT1(0x5A); /* pop rdx */
550 EMIT1(0x58); /* pop rax */
552 /* mov dst_reg, r11 */
553 EMIT_mov(dst_reg
, AUX_REG
);
556 case BPF_ALU
| BPF_MUL
| BPF_K
:
557 case BPF_ALU
| BPF_MUL
| BPF_X
:
558 case BPF_ALU64
| BPF_MUL
| BPF_K
:
559 case BPF_ALU64
| BPF_MUL
| BPF_X
:
560 EMIT1(0x50); /* push rax */
561 EMIT1(0x52); /* push rdx */
563 /* mov r11, dst_reg */
564 EMIT_mov(AUX_REG
, dst_reg
);
566 if (BPF_SRC(insn
->code
) == BPF_X
)
567 /* mov rax, src_reg */
568 EMIT_mov(BPF_REG_0
, src_reg
);
571 EMIT3_off32(0x48, 0xC7, 0xC0, imm32
);
573 if (BPF_CLASS(insn
->code
) == BPF_ALU64
)
574 EMIT1(add_1mod(0x48, AUX_REG
));
575 else if (is_ereg(AUX_REG
))
576 EMIT1(add_1mod(0x40, AUX_REG
));
578 EMIT2(0xF7, add_1reg(0xE0, AUX_REG
));
581 EMIT_mov(AUX_REG
, BPF_REG_0
);
583 EMIT1(0x5A); /* pop rdx */
584 EMIT1(0x58); /* pop rax */
586 /* mov dst_reg, r11 */
587 EMIT_mov(dst_reg
, AUX_REG
);
591 case BPF_ALU
| BPF_LSH
| BPF_K
:
592 case BPF_ALU
| BPF_RSH
| BPF_K
:
593 case BPF_ALU
| BPF_ARSH
| BPF_K
:
594 case BPF_ALU64
| BPF_LSH
| BPF_K
:
595 case BPF_ALU64
| BPF_RSH
| BPF_K
:
596 case BPF_ALU64
| BPF_ARSH
| BPF_K
:
597 if (BPF_CLASS(insn
->code
) == BPF_ALU64
)
598 EMIT1(add_1mod(0x48, dst_reg
));
599 else if (is_ereg(dst_reg
))
600 EMIT1(add_1mod(0x40, dst_reg
));
602 switch (BPF_OP(insn
->code
)) {
603 case BPF_LSH
: b3
= 0xE0; break;
604 case BPF_RSH
: b3
= 0xE8; break;
605 case BPF_ARSH
: b3
= 0xF8; break;
607 EMIT3(0xC1, add_1reg(b3
, dst_reg
), imm32
);
610 case BPF_ALU
| BPF_LSH
| BPF_X
:
611 case BPF_ALU
| BPF_RSH
| BPF_X
:
612 case BPF_ALU
| BPF_ARSH
| BPF_X
:
613 case BPF_ALU64
| BPF_LSH
| BPF_X
:
614 case BPF_ALU64
| BPF_RSH
| BPF_X
:
615 case BPF_ALU64
| BPF_ARSH
| BPF_X
:
617 /* check for bad case when dst_reg == rcx */
618 if (dst_reg
== BPF_REG_4
) {
619 /* mov r11, dst_reg */
620 EMIT_mov(AUX_REG
, dst_reg
);
624 if (src_reg
!= BPF_REG_4
) { /* common case */
625 EMIT1(0x51); /* push rcx */
627 /* mov rcx, src_reg */
628 EMIT_mov(BPF_REG_4
, src_reg
);
631 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
632 if (BPF_CLASS(insn
->code
) == BPF_ALU64
)
633 EMIT1(add_1mod(0x48, dst_reg
));
634 else if (is_ereg(dst_reg
))
635 EMIT1(add_1mod(0x40, dst_reg
));
637 switch (BPF_OP(insn
->code
)) {
638 case BPF_LSH
: b3
= 0xE0; break;
639 case BPF_RSH
: b3
= 0xE8; break;
640 case BPF_ARSH
: b3
= 0xF8; break;
642 EMIT2(0xD3, add_1reg(b3
, dst_reg
));
644 if (src_reg
!= BPF_REG_4
)
645 EMIT1(0x59); /* pop rcx */
647 if (insn
->dst_reg
== BPF_REG_4
)
648 /* mov dst_reg, r11 */
649 EMIT_mov(insn
->dst_reg
, AUX_REG
);
652 case BPF_ALU
| BPF_END
| BPF_FROM_BE
:
655 /* emit 'ror %ax, 8' to swap lower 2 bytes */
657 if (is_ereg(dst_reg
))
659 EMIT3(0xC1, add_1reg(0xC8, dst_reg
), 8);
661 /* emit 'movzwl eax, ax' */
662 if (is_ereg(dst_reg
))
663 EMIT3(0x45, 0x0F, 0xB7);
666 EMIT1(add_2reg(0xC0, dst_reg
, dst_reg
));
669 /* emit 'bswap eax' to swap lower 4 bytes */
670 if (is_ereg(dst_reg
))
674 EMIT1(add_1reg(0xC8, dst_reg
));
677 /* emit 'bswap rax' to swap 8 bytes */
678 EMIT3(add_1mod(0x48, dst_reg
), 0x0F,
679 add_1reg(0xC8, dst_reg
));
684 case BPF_ALU
| BPF_END
| BPF_FROM_LE
:
687 /* emit 'movzwl eax, ax' to zero extend 16-bit
690 if (is_ereg(dst_reg
))
691 EMIT3(0x45, 0x0F, 0xB7);
694 EMIT1(add_2reg(0xC0, dst_reg
, dst_reg
));
697 /* emit 'mov eax, eax' to clear upper 32-bits */
698 if (is_ereg(dst_reg
))
700 EMIT2(0x89, add_2reg(0xC0, dst_reg
, dst_reg
));
708 /* ST: *(u8*)(dst_reg + off) = imm */
709 case BPF_ST
| BPF_MEM
| BPF_B
:
710 if (is_ereg(dst_reg
))
715 case BPF_ST
| BPF_MEM
| BPF_H
:
716 if (is_ereg(dst_reg
))
717 EMIT3(0x66, 0x41, 0xC7);
721 case BPF_ST
| BPF_MEM
| BPF_W
:
722 if (is_ereg(dst_reg
))
727 case BPF_ST
| BPF_MEM
| BPF_DW
:
728 EMIT2(add_1mod(0x48, dst_reg
), 0xC7);
730 st
: if (is_imm8(insn
->off
))
731 EMIT2(add_1reg(0x40, dst_reg
), insn
->off
);
733 EMIT1_off32(add_1reg(0x80, dst_reg
), insn
->off
);
735 EMIT(imm32
, bpf_size_to_x86_bytes(BPF_SIZE(insn
->code
)));
738 /* STX: *(u8*)(dst_reg + off) = src_reg */
739 case BPF_STX
| BPF_MEM
| BPF_B
:
740 /* emit 'mov byte ptr [rax + off], al' */
741 if (is_ereg(dst_reg
) || is_ereg(src_reg
) ||
742 /* have to add extra byte for x86 SIL, DIL regs */
743 src_reg
== BPF_REG_1
|| src_reg
== BPF_REG_2
)
744 EMIT2(add_2mod(0x40, dst_reg
, src_reg
), 0x88);
748 case BPF_STX
| BPF_MEM
| BPF_H
:
749 if (is_ereg(dst_reg
) || is_ereg(src_reg
))
750 EMIT3(0x66, add_2mod(0x40, dst_reg
, src_reg
), 0x89);
754 case BPF_STX
| BPF_MEM
| BPF_W
:
755 if (is_ereg(dst_reg
) || is_ereg(src_reg
))
756 EMIT2(add_2mod(0x40, dst_reg
, src_reg
), 0x89);
760 case BPF_STX
| BPF_MEM
| BPF_DW
:
761 EMIT2(add_2mod(0x48, dst_reg
, src_reg
), 0x89);
762 stx
: if (is_imm8(insn
->off
))
763 EMIT2(add_2reg(0x40, dst_reg
, src_reg
), insn
->off
);
765 EMIT1_off32(add_2reg(0x80, dst_reg
, src_reg
),
769 /* LDX: dst_reg = *(u8*)(src_reg + off) */
770 case BPF_LDX
| BPF_MEM
| BPF_B
:
771 /* emit 'movzx rax, byte ptr [rax + off]' */
772 EMIT3(add_2mod(0x48, src_reg
, dst_reg
), 0x0F, 0xB6);
774 case BPF_LDX
| BPF_MEM
| BPF_H
:
775 /* emit 'movzx rax, word ptr [rax + off]' */
776 EMIT3(add_2mod(0x48, src_reg
, dst_reg
), 0x0F, 0xB7);
778 case BPF_LDX
| BPF_MEM
| BPF_W
:
779 /* emit 'mov eax, dword ptr [rax+0x14]' */
780 if (is_ereg(dst_reg
) || is_ereg(src_reg
))
781 EMIT2(add_2mod(0x40, src_reg
, dst_reg
), 0x8B);
785 case BPF_LDX
| BPF_MEM
| BPF_DW
:
786 /* emit 'mov rax, qword ptr [rax+0x14]' */
787 EMIT2(add_2mod(0x48, src_reg
, dst_reg
), 0x8B);
788 ldx
: /* if insn->off == 0 we can save one extra byte, but
789 * special case of x86 r13 which always needs an offset
790 * is not worth the hassle
792 if (is_imm8(insn
->off
))
793 EMIT2(add_2reg(0x40, src_reg
, dst_reg
), insn
->off
);
795 EMIT1_off32(add_2reg(0x80, src_reg
, dst_reg
),
799 /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
800 case BPF_STX
| BPF_XADD
| BPF_W
:
801 /* emit 'lock add dword ptr [rax + off], eax' */
802 if (is_ereg(dst_reg
) || is_ereg(src_reg
))
803 EMIT3(0xF0, add_2mod(0x40, dst_reg
, src_reg
), 0x01);
807 case BPF_STX
| BPF_XADD
| BPF_DW
:
808 EMIT3(0xF0, add_2mod(0x48, dst_reg
, src_reg
), 0x01);
809 xadd
: if (is_imm8(insn
->off
))
810 EMIT2(add_2reg(0x40, dst_reg
, src_reg
), insn
->off
);
812 EMIT1_off32(add_2reg(0x80, dst_reg
, src_reg
),
817 case BPF_JMP
| BPF_CALL
:
818 func
= (u8
*) __bpf_call_base
+ imm32
;
819 jmp_offset
= func
- (image
+ addrs
[i
]);
821 EMIT2(0x41, 0x52); /* push %r10 */
822 EMIT2(0x41, 0x51); /* push %r9 */
823 /* need to adjust jmp offset, since
824 * pop %r9, pop %r10 take 4 bytes after call insn
828 if (!imm32
|| !is_simm32(jmp_offset
)) {
829 pr_err("unsupported bpf func %d addr %p image %p\n",
833 EMIT1_off32(0xE8, jmp_offset
);
835 EMIT2(0x41, 0x59); /* pop %r9 */
836 EMIT2(0x41, 0x5A); /* pop %r10 */
840 case BPF_JMP
| BPF_CALL
| BPF_X
:
841 emit_bpf_tail_call(&prog
);
845 case BPF_JMP
| BPF_JEQ
| BPF_X
:
846 case BPF_JMP
| BPF_JNE
| BPF_X
:
847 case BPF_JMP
| BPF_JGT
| BPF_X
:
848 case BPF_JMP
| BPF_JGE
| BPF_X
:
849 case BPF_JMP
| BPF_JSGT
| BPF_X
:
850 case BPF_JMP
| BPF_JSGE
| BPF_X
:
851 /* cmp dst_reg, src_reg */
852 EMIT3(add_2mod(0x48, dst_reg
, src_reg
), 0x39,
853 add_2reg(0xC0, dst_reg
, src_reg
));
856 case BPF_JMP
| BPF_JSET
| BPF_X
:
857 /* test dst_reg, src_reg */
858 EMIT3(add_2mod(0x48, dst_reg
, src_reg
), 0x85,
859 add_2reg(0xC0, dst_reg
, src_reg
));
862 case BPF_JMP
| BPF_JSET
| BPF_K
:
863 /* test dst_reg, imm32 */
864 EMIT1(add_1mod(0x48, dst_reg
));
865 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg
), imm32
);
868 case BPF_JMP
| BPF_JEQ
| BPF_K
:
869 case BPF_JMP
| BPF_JNE
| BPF_K
:
870 case BPF_JMP
| BPF_JGT
| BPF_K
:
871 case BPF_JMP
| BPF_JGE
| BPF_K
:
872 case BPF_JMP
| BPF_JSGT
| BPF_K
:
873 case BPF_JMP
| BPF_JSGE
| BPF_K
:
874 /* cmp dst_reg, imm8/32 */
875 EMIT1(add_1mod(0x48, dst_reg
));
878 EMIT3(0x83, add_1reg(0xF8, dst_reg
), imm32
);
880 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg
), imm32
);
882 emit_cond_jmp
: /* convert BPF opcode to x86 */
883 switch (BPF_OP(insn
->code
)) {
892 /* GT is unsigned '>', JA in x86 */
896 /* GE is unsigned '>=', JAE in x86 */
900 /* signed '>', GT in x86 */
904 /* signed '>=', GE in x86 */
907 default: /* to silence gcc warning */
910 jmp_offset
= addrs
[i
+ insn
->off
] - addrs
[i
];
911 if (is_imm8(jmp_offset
)) {
912 EMIT2(jmp_cond
, jmp_offset
);
913 } else if (is_simm32(jmp_offset
)) {
914 EMIT2_off32(0x0F, jmp_cond
+ 0x10, jmp_offset
);
916 pr_err("cond_jmp gen bug %llx\n", jmp_offset
);
922 case BPF_JMP
| BPF_JA
:
923 jmp_offset
= addrs
[i
+ insn
->off
] - addrs
[i
];
925 /* optimize out nop jumps */
928 if (is_imm8(jmp_offset
)) {
929 EMIT2(0xEB, jmp_offset
);
930 } else if (is_simm32(jmp_offset
)) {
931 EMIT1_off32(0xE9, jmp_offset
);
933 pr_err("jmp gen bug %llx\n", jmp_offset
);
938 case BPF_LD
| BPF_IND
| BPF_W
:
941 case BPF_LD
| BPF_ABS
| BPF_W
:
942 func
= CHOOSE_LOAD_FUNC(imm32
, sk_load_word
);
944 ctx
->seen_ld_abs
= seen_ld_abs
= true;
945 jmp_offset
= func
- (image
+ addrs
[i
]);
946 if (!func
|| !is_simm32(jmp_offset
)) {
947 pr_err("unsupported bpf func %d addr %p image %p\n",
951 if (BPF_MODE(insn
->code
) == BPF_ABS
) {
952 /* mov %esi, imm32 */
953 EMIT1_off32(0xBE, imm32
);
955 /* mov %rsi, src_reg */
956 EMIT_mov(BPF_REG_2
, src_reg
);
960 EMIT3(0x83, 0xC6, imm32
);
962 /* add %esi, imm32 */
963 EMIT2_off32(0x81, 0xC6, imm32
);
966 /* skb pointer is in R6 (%rbx), it will be copied into
967 * %rdi if skb_copy_bits() call is necessary.
968 * sk_load_* helpers also use %r10 and %r9d.
971 EMIT1_off32(0xE8, jmp_offset
); /* call */
974 case BPF_LD
| BPF_IND
| BPF_H
:
977 case BPF_LD
| BPF_ABS
| BPF_H
:
978 func
= CHOOSE_LOAD_FUNC(imm32
, sk_load_half
);
980 case BPF_LD
| BPF_IND
| BPF_B
:
983 case BPF_LD
| BPF_ABS
| BPF_B
:
984 func
= CHOOSE_LOAD_FUNC(imm32
, sk_load_byte
);
987 case BPF_JMP
| BPF_EXIT
:
989 jmp_offset
= ctx
->cleanup_addr
- addrs
[i
];
993 /* update cleanup_addr */
994 ctx
->cleanup_addr
= proglen
;
995 /* mov rbx, qword ptr [rbp-X] */
996 EMIT3_off32(0x48, 0x8B, 0x9D, -STACKSIZE
);
997 /* mov r13, qword ptr [rbp-X] */
998 EMIT3_off32(0x4C, 0x8B, 0xAD, -STACKSIZE
+ 8);
999 /* mov r14, qword ptr [rbp-X] */
1000 EMIT3_off32(0x4C, 0x8B, 0xB5, -STACKSIZE
+ 16);
1001 /* mov r15, qword ptr [rbp-X] */
1002 EMIT3_off32(0x4C, 0x8B, 0xBD, -STACKSIZE
+ 24);
1004 EMIT1(0xC9); /* leave */
1005 EMIT1(0xC3); /* ret */
1009 /* By design x64 JIT should support all BPF instructions
1010 * This error will be seen if new instruction was added
1011 * to interpreter, but not to JIT
1012 * or if there is junk in bpf_prog
1014 pr_err("bpf_jit: unknown opcode %02x\n", insn
->code
);
1019 if (ilen
> BPF_MAX_INSN_SIZE
) {
1020 pr_err("bpf_jit_compile fatal insn size error\n");
1025 if (unlikely(proglen
+ ilen
> oldproglen
)) {
1026 pr_err("bpf_jit_compile fatal error\n");
1029 memcpy(image
+ proglen
, temp
, ilen
);
1038 void bpf_jit_compile(struct bpf_prog
*prog
)
1042 void bpf_int_jit_compile(struct bpf_prog
*prog
)
1044 struct bpf_binary_header
*header
= NULL
;
1045 int proglen
, oldproglen
= 0;
1046 struct jit_context ctx
= {};
1052 if (!bpf_jit_enable
)
1055 if (!prog
|| !prog
->len
)
1058 addrs
= kmalloc(prog
->len
* sizeof(*addrs
), GFP_KERNEL
);
1062 /* Before first pass, make a rough estimation of addrs[]
1063 * each bpf instruction is translated to less than 64 bytes
1065 for (proglen
= 0, i
= 0; i
< prog
->len
; i
++) {
1069 ctx
.cleanup_addr
= proglen
;
1071 /* JITed image shrinks with every pass and the loop iterates
1072 * until the image stops shrinking. Very large bpf programs
1073 * may converge on the last pass. In such case do one more
1074 * pass to emit the final image
1076 for (pass
= 0; pass
< 10 || image
; pass
++) {
1077 proglen
= do_jit(prog
, addrs
, image
, oldproglen
, &ctx
);
1081 bpf_jit_binary_free(header
);
1085 if (proglen
!= oldproglen
) {
1086 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
1087 proglen
, oldproglen
);
1092 if (proglen
== oldproglen
) {
1093 header
= bpf_jit_binary_alloc(proglen
, &image
,
1098 oldproglen
= proglen
;
1101 if (bpf_jit_enable
> 1)
1102 bpf_jit_dump(prog
->len
, proglen
, 0, image
);
1105 bpf_flush_icache(header
, image
+ proglen
);
1106 set_memory_ro((unsigned long)header
, header
->pages
);
1107 prog
->bpf_func
= (void *)image
;
1114 void bpf_jit_free(struct bpf_prog
*fp
)
1116 unsigned long addr
= (unsigned long)fp
->bpf_func
& PAGE_MASK
;
1117 struct bpf_binary_header
*header
= (void *)addr
;
1122 set_memory_rw(addr
, header
->pages
);
1123 bpf_jit_binary_free(header
);
1126 bpf_prog_unlock_free(fp
);