Commit | Line | Data |
---|---|---|
0a14842f ED |
1 | /* bpf_jit_comp.c : BPF JIT compiler |
2 | * | |
3 | * Copyright (C) 2011 Eric Dumazet (eric.dumazet@gmail.com) | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License | |
7 | * as published by the Free Software Foundation; version 2 | |
8 | * of the License. | |
9 | */ | |
10 | #include <linux/moduleloader.h> | |
11 | #include <asm/cacheflush.h> | |
12 | #include <linux/netdevice.h> | |
13 | #include <linux/filter.h> | |
14 | ||
15 | /* | |
16 | * Conventions : | |
17 | * EAX : BPF A accumulator | |
18 | * EBX : BPF X accumulator | |
19 | * RDI : pointer to skb (first argument given to JIT function) | |
20 | * RBP : frame pointer (even if CONFIG_FRAME_POINTER=n) | |
21 | * ECX,EDX,ESI : scratch registers | |
22 | * r9d : skb->len - skb->data_len (headlen) | |
23 | * r8 : skb->data | |
24 | * -8(RBP) : saved RBX value | |
25 | * -16(RBP)..-80(RBP) : BPF_MEMWORDS values | |
26 | */ | |
27 | int bpf_jit_enable __read_mostly; | |
28 | ||
29 | /* | |
30 | * assembly code in arch/x86/net/bpf_jit.S | |
31 | */ | |
32 | extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[]; | |
33 | extern u8 sk_load_word_ind[], sk_load_half_ind[], sk_load_byte_ind[]; | |
34 | ||
35 | static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) | |
36 | { | |
37 | if (len == 1) | |
38 | *ptr = bytes; | |
39 | else if (len == 2) | |
40 | *(u16 *)ptr = bytes; | |
41 | else { | |
42 | *(u32 *)ptr = bytes; | |
43 | barrier(); | |
44 | } | |
45 | return ptr + len; | |
46 | } | |
47 | ||
48 | #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0) | |
49 | ||
50 | #define EMIT1(b1) EMIT(b1, 1) | |
51 | #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) | |
52 | #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) | |
53 | #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) | |
54 | #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0) | |
55 | ||
56 | #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */ | |
57 | #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */ | |
58 | ||
59 | static inline bool is_imm8(int value) | |
60 | { | |
61 | return value <= 127 && value >= -128; | |
62 | } | |
63 | ||
64 | static inline bool is_near(int offset) | |
65 | { | |
66 | return offset <= 127 && offset >= -128; | |
67 | } | |
68 | ||
69 | #define EMIT_JMP(offset) \ | |
70 | do { \ | |
71 | if (offset) { \ | |
72 | if (is_near(offset)) \ | |
73 | EMIT2(0xeb, offset); /* jmp .+off8 */ \ | |
74 | else \ | |
75 | EMIT1_off32(0xe9, offset); /* jmp .+off32 */ \ | |
76 | } \ | |
77 | } while (0) | |
78 | ||
79 | /* list of x86 cond jumps opcodes (. + s8) | |
80 | * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32) | |
81 | */ | |
82 | #define X86_JB 0x72 | |
83 | #define X86_JAE 0x73 | |
84 | #define X86_JE 0x74 | |
85 | #define X86_JNE 0x75 | |
86 | #define X86_JBE 0x76 | |
87 | #define X86_JA 0x77 | |
88 | ||
89 | #define EMIT_COND_JMP(op, offset) \ | |
90 | do { \ | |
91 | if (is_near(offset)) \ | |
92 | EMIT2(op, offset); /* jxx .+off8 */ \ | |
93 | else { \ | |
94 | EMIT2(0x0f, op + 0x10); \ | |
95 | EMIT(offset, 4); /* jxx .+off32 */ \ | |
96 | } \ | |
97 | } while (0) | |
98 | ||
99 | #define COND_SEL(CODE, TOP, FOP) \ | |
100 | case CODE: \ | |
101 | t_op = TOP; \ | |
102 | f_op = FOP; \ | |
103 | goto cond_branch | |
104 | ||
105 | ||
106 | #define SEEN_DATAREF 1 /* might call external helpers */ | |
107 | #define SEEN_XREG 2 /* ebx is used */ | |
108 | #define SEEN_MEM 4 /* use mem[] for temporary storage */ | |
109 | ||
110 | static inline void bpf_flush_icache(void *start, void *end) | |
111 | { | |
112 | mm_segment_t old_fs = get_fs(); | |
113 | ||
114 | set_fs(KERNEL_DS); | |
115 | smp_wmb(); | |
116 | flush_icache_range((unsigned long)start, (unsigned long)end); | |
117 | set_fs(old_fs); | |
118 | } | |
119 | ||
120 | ||
121 | void bpf_jit_compile(struct sk_filter *fp) | |
122 | { | |
123 | u8 temp[64]; | |
124 | u8 *prog; | |
125 | unsigned int proglen, oldproglen = 0; | |
126 | int ilen, i; | |
127 | int t_offset, f_offset; | |
128 | u8 t_op, f_op, seen = 0, pass; | |
129 | u8 *image = NULL; | |
130 | u8 *func; | |
131 | int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */ | |
132 | unsigned int cleanup_addr; /* epilogue code offset */ | |
133 | unsigned int *addrs; | |
134 | const struct sock_filter *filter = fp->insns; | |
135 | int flen = fp->len; | |
136 | ||
137 | if (!bpf_jit_enable) | |
138 | return; | |
139 | ||
140 | addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL); | |
141 | if (addrs == NULL) | |
142 | return; | |
143 | ||
144 | /* Before first pass, make a rough estimation of addrs[] | |
145 | * each bpf instruction is translated to less than 64 bytes | |
146 | */ | |
147 | for (proglen = 0, i = 0; i < flen; i++) { | |
148 | proglen += 64; | |
149 | addrs[i] = proglen; | |
150 | } | |
151 | cleanup_addr = proglen; /* epilogue address */ | |
152 | ||
153 | for (pass = 0; pass < 10; pass++) { | |
154 | /* no prologue/epilogue for trivial filters (RET something) */ | |
155 | proglen = 0; | |
156 | prog = temp; | |
157 | ||
158 | if (seen) { | |
159 | EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */ | |
160 | EMIT4(0x48, 0x83, 0xec, 96); /* subq $96,%rsp */ | |
161 | /* note : must save %rbx in case bpf_error is hit */ | |
162 | if (seen & (SEEN_XREG | SEEN_DATAREF)) | |
163 | EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */ | |
164 | if (seen & SEEN_XREG) | |
165 | CLEAR_X(); /* make sure we dont leek kernel memory */ | |
166 | ||
167 | /* | |
168 | * If this filter needs to access skb data, | |
169 | * loads r9 and r8 with : | |
170 | * r9 = skb->len - skb->data_len | |
171 | * r8 = skb->data | |
172 | */ | |
173 | if (seen & SEEN_DATAREF) { | |
174 | if (offsetof(struct sk_buff, len) <= 127) | |
175 | /* mov off8(%rdi),%r9d */ | |
176 | EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len)); | |
177 | else { | |
178 | /* mov off32(%rdi),%r9d */ | |
179 | EMIT3(0x44, 0x8b, 0x8f); | |
180 | EMIT(offsetof(struct sk_buff, len), 4); | |
181 | } | |
182 | if (is_imm8(offsetof(struct sk_buff, data_len))) | |
183 | /* sub off8(%rdi),%r9d */ | |
184 | EMIT4(0x44, 0x2b, 0x4f, offsetof(struct sk_buff, data_len)); | |
185 | else { | |
186 | EMIT3(0x44, 0x2b, 0x8f); | |
187 | EMIT(offsetof(struct sk_buff, data_len), 4); | |
188 | } | |
189 | ||
190 | if (is_imm8(offsetof(struct sk_buff, data))) | |
191 | /* mov off8(%rdi),%r8 */ | |
192 | EMIT4(0x4c, 0x8b, 0x47, offsetof(struct sk_buff, data)); | |
193 | else { | |
194 | /* mov off32(%rdi),%r8 */ | |
195 | EMIT3(0x4c, 0x8b, 0x87); | |
196 | EMIT(offsetof(struct sk_buff, data), 4); | |
197 | } | |
198 | } | |
199 | } | |
200 | ||
201 | switch (filter[0].code) { | |
202 | case BPF_S_RET_K: | |
203 | case BPF_S_LD_W_LEN: | |
204 | case BPF_S_ANC_PROTOCOL: | |
205 | case BPF_S_ANC_IFINDEX: | |
206 | case BPF_S_ANC_MARK: | |
207 | case BPF_S_ANC_RXHASH: | |
208 | case BPF_S_ANC_CPU: | |
209 | case BPF_S_ANC_QUEUE: | |
210 | case BPF_S_LD_W_ABS: | |
211 | case BPF_S_LD_H_ABS: | |
212 | case BPF_S_LD_B_ABS: | |
213 | /* first instruction sets A register (or is RET 'constant') */ | |
214 | break; | |
215 | default: | |
216 | /* make sure we dont leak kernel information to user */ | |
217 | CLEAR_A(); /* A = 0 */ | |
218 | } | |
219 | ||
220 | for (i = 0; i < flen; i++) { | |
221 | unsigned int K = filter[i].k; | |
222 | ||
223 | switch (filter[i].code) { | |
224 | case BPF_S_ALU_ADD_X: /* A += X; */ | |
225 | seen |= SEEN_XREG; | |
226 | EMIT2(0x01, 0xd8); /* add %ebx,%eax */ | |
227 | break; | |
228 | case BPF_S_ALU_ADD_K: /* A += K; */ | |
229 | if (!K) | |
230 | break; | |
231 | if (is_imm8(K)) | |
232 | EMIT3(0x83, 0xc0, K); /* add imm8,%eax */ | |
233 | else | |
234 | EMIT1_off32(0x05, K); /* add imm32,%eax */ | |
235 | break; | |
236 | case BPF_S_ALU_SUB_X: /* A -= X; */ | |
237 | seen |= SEEN_XREG; | |
238 | EMIT2(0x29, 0xd8); /* sub %ebx,%eax */ | |
239 | break; | |
240 | case BPF_S_ALU_SUB_K: /* A -= K */ | |
241 | if (!K) | |
242 | break; | |
243 | if (is_imm8(K)) | |
244 | EMIT3(0x83, 0xe8, K); /* sub imm8,%eax */ | |
245 | else | |
246 | EMIT1_off32(0x2d, K); /* sub imm32,%eax */ | |
247 | break; | |
248 | case BPF_S_ALU_MUL_X: /* A *= X; */ | |
249 | seen |= SEEN_XREG; | |
250 | EMIT3(0x0f, 0xaf, 0xc3); /* imul %ebx,%eax */ | |
251 | break; | |
252 | case BPF_S_ALU_MUL_K: /* A *= K */ | |
253 | if (is_imm8(K)) | |
254 | EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */ | |
255 | else { | |
256 | EMIT2(0x69, 0xc0); /* imul imm32,%eax */ | |
257 | EMIT(K, 4); | |
258 | } | |
259 | break; | |
260 | case BPF_S_ALU_DIV_X: /* A /= X; */ | |
261 | seen |= SEEN_XREG; | |
262 | EMIT2(0x85, 0xdb); /* test %ebx,%ebx */ | |
263 | if (pc_ret0 != -1) | |
264 | EMIT_COND_JMP(X86_JE, addrs[pc_ret0] - (addrs[i] - 4)); | |
265 | else { | |
266 | EMIT_COND_JMP(X86_JNE, 2 + 5); | |
267 | CLEAR_A(); | |
268 | EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */ | |
269 | } | |
270 | EMIT4(0x31, 0xd2, 0xf7, 0xf3); /* xor %edx,%edx; div %ebx */ | |
271 | break; | |
272 | case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */ | |
273 | EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */ | |
274 | EMIT(K, 4); | |
275 | EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */ | |
276 | break; | |
277 | case BPF_S_ALU_AND_X: | |
278 | seen |= SEEN_XREG; | |
279 | EMIT2(0x21, 0xd8); /* and %ebx,%eax */ | |
280 | break; | |
281 | case BPF_S_ALU_AND_K: | |
282 | if (K >= 0xFFFFFF00) { | |
283 | EMIT2(0x24, K & 0xFF); /* and imm8,%al */ | |
284 | } else if (K >= 0xFFFF0000) { | |
285 | EMIT2(0x66, 0x25); /* and imm16,%ax */ | |
286 | EMIT2(K, 2); | |
287 | } else { | |
288 | EMIT1_off32(0x25, K); /* and imm32,%eax */ | |
289 | } | |
290 | break; | |
291 | case BPF_S_ALU_OR_X: | |
292 | seen |= SEEN_XREG; | |
293 | EMIT2(0x09, 0xd8); /* or %ebx,%eax */ | |
294 | break; | |
295 | case BPF_S_ALU_OR_K: | |
296 | if (is_imm8(K)) | |
297 | EMIT3(0x83, 0xc8, K); /* or imm8,%eax */ | |
298 | else | |
299 | EMIT1_off32(0x0d, K); /* or imm32,%eax */ | |
300 | break; | |
301 | case BPF_S_ALU_LSH_X: /* A <<= X; */ | |
302 | seen |= SEEN_XREG; | |
303 | EMIT4(0x89, 0xd9, 0xd3, 0xe0); /* mov %ebx,%ecx; shl %cl,%eax */ | |
304 | break; | |
305 | case BPF_S_ALU_LSH_K: | |
306 | if (K == 0) | |
307 | break; | |
308 | else if (K == 1) | |
309 | EMIT2(0xd1, 0xe0); /* shl %eax */ | |
310 | else | |
311 | EMIT3(0xc1, 0xe0, K); | |
312 | break; | |
313 | case BPF_S_ALU_RSH_X: /* A >>= X; */ | |
314 | seen |= SEEN_XREG; | |
315 | EMIT4(0x89, 0xd9, 0xd3, 0xe8); /* mov %ebx,%ecx; shr %cl,%eax */ | |
316 | break; | |
317 | case BPF_S_ALU_RSH_K: /* A >>= K; */ | |
318 | if (K == 0) | |
319 | break; | |
320 | else if (K == 1) | |
321 | EMIT2(0xd1, 0xe8); /* shr %eax */ | |
322 | else | |
323 | EMIT3(0xc1, 0xe8, K); | |
324 | break; | |
325 | case BPF_S_ALU_NEG: | |
326 | EMIT2(0xf7, 0xd8); /* neg %eax */ | |
327 | break; | |
328 | case BPF_S_RET_K: | |
329 | if (!K) { | |
330 | if (pc_ret0 == -1) | |
331 | pc_ret0 = i; | |
332 | CLEAR_A(); | |
333 | } else { | |
334 | EMIT1_off32(0xb8, K); /* mov $imm32,%eax */ | |
335 | } | |
336 | /* fallinto */ | |
337 | case BPF_S_RET_A: | |
338 | if (seen) { | |
339 | if (i != flen - 1) { | |
340 | EMIT_JMP(cleanup_addr - addrs[i]); | |
341 | break; | |
342 | } | |
343 | if (seen & SEEN_XREG) | |
344 | EMIT4(0x48, 0x8b, 0x5d, 0xf8); /* mov -8(%rbp),%rbx */ | |
345 | EMIT1(0xc9); /* leaveq */ | |
346 | } | |
347 | EMIT1(0xc3); /* ret */ | |
348 | break; | |
349 | case BPF_S_MISC_TAX: /* X = A */ | |
350 | seen |= SEEN_XREG; | |
351 | EMIT2(0x89, 0xc3); /* mov %eax,%ebx */ | |
352 | break; | |
353 | case BPF_S_MISC_TXA: /* A = X */ | |
354 | seen |= SEEN_XREG; | |
355 | EMIT2(0x89, 0xd8); /* mov %ebx,%eax */ | |
356 | break; | |
357 | case BPF_S_LD_IMM: /* A = K */ | |
358 | if (!K) | |
359 | CLEAR_A(); | |
360 | else | |
361 | EMIT1_off32(0xb8, K); /* mov $imm32,%eax */ | |
362 | break; | |
363 | case BPF_S_LDX_IMM: /* X = K */ | |
364 | seen |= SEEN_XREG; | |
365 | if (!K) | |
366 | CLEAR_X(); | |
367 | else | |
368 | EMIT1_off32(0xbb, K); /* mov $imm32,%ebx */ | |
369 | break; | |
370 | case BPF_S_LD_MEM: /* A = mem[K] : mov off8(%rbp),%eax */ | |
371 | seen |= SEEN_MEM; | |
372 | EMIT3(0x8b, 0x45, 0xf0 - K*4); | |
373 | break; | |
374 | case BPF_S_LDX_MEM: /* X = mem[K] : mov off8(%rbp),%ebx */ | |
375 | seen |= SEEN_XREG | SEEN_MEM; | |
376 | EMIT3(0x8b, 0x5d, 0xf0 - K*4); | |
377 | break; | |
378 | case BPF_S_ST: /* mem[K] = A : mov %eax,off8(%rbp) */ | |
379 | seen |= SEEN_MEM; | |
380 | EMIT3(0x89, 0x45, 0xf0 - K*4); | |
381 | break; | |
382 | case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */ | |
383 | seen |= SEEN_XREG | SEEN_MEM; | |
384 | EMIT3(0x89, 0x5d, 0xf0 - K*4); | |
385 | break; | |
386 | case BPF_S_LD_W_LEN: /* A = skb->len; */ | |
387 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); | |
388 | if (is_imm8(offsetof(struct sk_buff, len))) | |
389 | /* mov off8(%rdi),%eax */ | |
390 | EMIT3(0x8b, 0x47, offsetof(struct sk_buff, len)); | |
391 | else { | |
392 | EMIT2(0x8b, 0x87); | |
393 | EMIT(offsetof(struct sk_buff, len), 4); | |
394 | } | |
395 | break; | |
396 | case BPF_S_LDX_W_LEN: /* X = skb->len; */ | |
397 | seen |= SEEN_XREG; | |
398 | if (is_imm8(offsetof(struct sk_buff, len))) | |
399 | /* mov off8(%rdi),%ebx */ | |
400 | EMIT3(0x8b, 0x5f, offsetof(struct sk_buff, len)); | |
401 | else { | |
402 | EMIT2(0x8b, 0x9f); | |
403 | EMIT(offsetof(struct sk_buff, len), 4); | |
404 | } | |
405 | break; | |
406 | case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */ | |
407 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); | |
408 | if (is_imm8(offsetof(struct sk_buff, protocol))) { | |
409 | /* movzwl off8(%rdi),%eax */ | |
410 | EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, protocol)); | |
411 | } else { | |
412 | EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */ | |
413 | EMIT(offsetof(struct sk_buff, protocol), 4); | |
414 | } | |
415 | EMIT2(0x86, 0xc4); /* ntohs() : xchg %al,%ah */ | |
416 | break; | |
417 | case BPF_S_ANC_IFINDEX: | |
418 | if (is_imm8(offsetof(struct sk_buff, dev))) { | |
419 | /* movq off8(%rdi),%rax */ | |
420 | EMIT4(0x48, 0x8b, 0x47, offsetof(struct sk_buff, dev)); | |
421 | } else { | |
422 | EMIT3(0x48, 0x8b, 0x87); /* movq off32(%rdi),%rax */ | |
423 | EMIT(offsetof(struct sk_buff, dev), 4); | |
424 | } | |
425 | EMIT3(0x48, 0x85, 0xc0); /* test %rax,%rax */ | |
426 | EMIT_COND_JMP(X86_JE, cleanup_addr - (addrs[i] - 6)); | |
427 | BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); | |
428 | EMIT2(0x8b, 0x80); /* mov off32(%rax),%eax */ | |
429 | EMIT(offsetof(struct net_device, ifindex), 4); | |
430 | break; | |
431 | case BPF_S_ANC_MARK: | |
432 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); | |
433 | if (is_imm8(offsetof(struct sk_buff, mark))) { | |
434 | /* mov off8(%rdi),%eax */ | |
435 | EMIT3(0x8b, 0x47, offsetof(struct sk_buff, mark)); | |
436 | } else { | |
437 | EMIT2(0x8b, 0x87); | |
438 | EMIT(offsetof(struct sk_buff, mark), 4); | |
439 | } | |
440 | break; | |
441 | case BPF_S_ANC_RXHASH: | |
442 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4); | |
443 | if (is_imm8(offsetof(struct sk_buff, rxhash))) { | |
444 | /* mov off8(%rdi),%eax */ | |
445 | EMIT3(0x8b, 0x47, offsetof(struct sk_buff, rxhash)); | |
446 | } else { | |
447 | EMIT2(0x8b, 0x87); | |
448 | EMIT(offsetof(struct sk_buff, rxhash), 4); | |
449 | } | |
450 | break; | |
451 | case BPF_S_ANC_QUEUE: | |
452 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); | |
453 | if (is_imm8(offsetof(struct sk_buff, queue_mapping))) { | |
454 | /* movzwl off8(%rdi),%eax */ | |
455 | EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, queue_mapping)); | |
456 | } else { | |
457 | EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */ | |
458 | EMIT(offsetof(struct sk_buff, queue_mapping), 4); | |
459 | } | |
460 | break; | |
461 | case BPF_S_ANC_CPU: | |
462 | #ifdef CONFIG_SMP | |
463 | EMIT4(0x65, 0x8b, 0x04, 0x25); /* mov %gs:off32,%eax */ | |
464 | EMIT((u32)(unsigned long)&cpu_number, 4); /* A = smp_processor_id(); */ | |
465 | #else | |
466 | CLEAR_A(); | |
467 | #endif | |
468 | break; | |
469 | case BPF_S_LD_W_ABS: | |
470 | func = sk_load_word; | |
471 | common_load: seen |= SEEN_DATAREF; | |
472 | if ((int)K < 0) | |
473 | goto out; | |
474 | t_offset = func - (image + addrs[i]); | |
475 | EMIT1_off32(0xbe, K); /* mov imm32,%esi */ | |
476 | EMIT1_off32(0xe8, t_offset); /* call */ | |
477 | break; | |
478 | case BPF_S_LD_H_ABS: | |
479 | func = sk_load_half; | |
480 | goto common_load; | |
481 | case BPF_S_LD_B_ABS: | |
482 | func = sk_load_byte; | |
483 | goto common_load; | |
484 | case BPF_S_LDX_B_MSH: | |
485 | if ((int)K < 0) { | |
486 | if (pc_ret0 != -1) { | |
487 | EMIT_JMP(addrs[pc_ret0] - addrs[i]); | |
488 | break; | |
489 | } | |
490 | CLEAR_A(); | |
491 | EMIT_JMP(cleanup_addr - addrs[i]); | |
492 | break; | |
493 | } | |
494 | seen |= SEEN_DATAREF | SEEN_XREG; | |
495 | t_offset = sk_load_byte_msh - (image + addrs[i]); | |
496 | EMIT1_off32(0xbe, K); /* mov imm32,%esi */ | |
497 | EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */ | |
498 | break; | |
499 | case BPF_S_LD_W_IND: | |
500 | func = sk_load_word_ind; | |
501 | common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; | |
502 | t_offset = func - (image + addrs[i]); | |
503 | EMIT1_off32(0xbe, K); /* mov imm32,%esi */ | |
504 | EMIT1_off32(0xe8, t_offset); /* call sk_load_xxx_ind */ | |
505 | break; | |
506 | case BPF_S_LD_H_IND: | |
507 | func = sk_load_half_ind; | |
508 | goto common_load_ind; | |
509 | case BPF_S_LD_B_IND: | |
510 | func = sk_load_byte_ind; | |
511 | goto common_load_ind; | |
512 | case BPF_S_JMP_JA: | |
513 | t_offset = addrs[i + K] - addrs[i]; | |
514 | EMIT_JMP(t_offset); | |
515 | break; | |
516 | COND_SEL(BPF_S_JMP_JGT_K, X86_JA, X86_JBE); | |
517 | COND_SEL(BPF_S_JMP_JGE_K, X86_JAE, X86_JB); | |
518 | COND_SEL(BPF_S_JMP_JEQ_K, X86_JE, X86_JNE); | |
519 | COND_SEL(BPF_S_JMP_JSET_K,X86_JNE, X86_JE); | |
520 | COND_SEL(BPF_S_JMP_JGT_X, X86_JA, X86_JBE); | |
521 | COND_SEL(BPF_S_JMP_JGE_X, X86_JAE, X86_JB); | |
522 | COND_SEL(BPF_S_JMP_JEQ_X, X86_JE, X86_JNE); | |
523 | COND_SEL(BPF_S_JMP_JSET_X,X86_JNE, X86_JE); | |
524 | ||
525 | cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i]; | |
526 | t_offset = addrs[i + filter[i].jt] - addrs[i]; | |
527 | ||
528 | /* same targets, can avoid doing the test :) */ | |
529 | if (filter[i].jt == filter[i].jf) { | |
530 | EMIT_JMP(t_offset); | |
531 | break; | |
532 | } | |
533 | ||
534 | switch (filter[i].code) { | |
535 | case BPF_S_JMP_JGT_X: | |
536 | case BPF_S_JMP_JGE_X: | |
537 | case BPF_S_JMP_JEQ_X: | |
538 | seen |= SEEN_XREG; | |
539 | EMIT2(0x39, 0xd8); /* cmp %ebx,%eax */ | |
540 | break; | |
541 | case BPF_S_JMP_JSET_X: | |
542 | seen |= SEEN_XREG; | |
543 | EMIT2(0x85, 0xd8); /* test %ebx,%eax */ | |
544 | break; | |
545 | case BPF_S_JMP_JEQ_K: | |
546 | if (K == 0) { | |
547 | EMIT2(0x85, 0xc0); /* test %eax,%eax */ | |
548 | break; | |
549 | } | |
550 | case BPF_S_JMP_JGT_K: | |
551 | case BPF_S_JMP_JGE_K: | |
552 | if (K <= 127) | |
553 | EMIT3(0x83, 0xf8, K); /* cmp imm8,%eax */ | |
554 | else | |
555 | EMIT1_off32(0x3d, K); /* cmp imm32,%eax */ | |
556 | break; | |
557 | case BPF_S_JMP_JSET_K: | |
558 | if (K <= 0xFF) | |
559 | EMIT2(0xa8, K); /* test imm8,%al */ | |
560 | else if (!(K & 0xFFFF00FF)) | |
561 | EMIT3(0xf6, 0xc4, K >> 8); /* test imm8,%ah */ | |
562 | else if (K <= 0xFFFF) { | |
563 | EMIT2(0x66, 0xa9); /* test imm16,%ax */ | |
564 | EMIT(K, 2); | |
565 | } else { | |
566 | EMIT1_off32(0xa9, K); /* test imm32,%eax */ | |
567 | } | |
568 | break; | |
569 | } | |
570 | if (filter[i].jt != 0) { | |
571 | if (filter[i].jf) | |
572 | t_offset += is_near(f_offset) ? 2 : 6; | |
573 | EMIT_COND_JMP(t_op, t_offset); | |
574 | if (filter[i].jf) | |
575 | EMIT_JMP(f_offset); | |
576 | break; | |
577 | } | |
578 | EMIT_COND_JMP(f_op, f_offset); | |
579 | break; | |
580 | default: | |
581 | /* hmm, too complex filter, give up with jit compiler */ | |
582 | goto out; | |
583 | } | |
584 | ilen = prog - temp; | |
585 | if (image) { | |
586 | if (unlikely(proglen + ilen > oldproglen)) { | |
587 | pr_err("bpb_jit_compile fatal error\n"); | |
588 | kfree(addrs); | |
589 | module_free(NULL, image); | |
590 | return; | |
591 | } | |
592 | memcpy(image + proglen, temp, ilen); | |
593 | } | |
594 | proglen += ilen; | |
595 | addrs[i] = proglen; | |
596 | prog = temp; | |
597 | } | |
598 | /* last bpf instruction is always a RET : | |
599 | * use it to give the cleanup instruction(s) addr | |
600 | */ | |
601 | cleanup_addr = proglen - 1; /* ret */ | |
602 | if (seen) | |
603 | cleanup_addr -= 1; /* leaveq */ | |
604 | if (seen & SEEN_XREG) | |
605 | cleanup_addr -= 4; /* mov -8(%rbp),%rbx */ | |
606 | ||
607 | if (image) { | |
608 | WARN_ON(proglen != oldproglen); | |
609 | break; | |
610 | } | |
611 | if (proglen == oldproglen) { | |
612 | image = module_alloc(max_t(unsigned int, | |
613 | proglen, | |
614 | sizeof(struct work_struct))); | |
615 | if (!image) | |
616 | goto out; | |
617 | } | |
618 | oldproglen = proglen; | |
619 | } | |
620 | if (bpf_jit_enable > 1) | |
621 | pr_err("flen=%d proglen=%u pass=%d image=%p\n", | |
622 | flen, proglen, pass, image); | |
623 | ||
624 | if (image) { | |
625 | if (bpf_jit_enable > 1) | |
626 | print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_ADDRESS, | |
627 | 16, 1, image, proglen, false); | |
628 | ||
629 | bpf_flush_icache(image, image + proglen); | |
630 | ||
631 | fp->bpf_func = (void *)image; | |
632 | } | |
633 | out: | |
634 | kfree(addrs); | |
635 | return; | |
636 | } | |
637 | ||
638 | static void jit_free_defer(struct work_struct *arg) | |
639 | { | |
640 | module_free(NULL, arg); | |
641 | } | |
642 | ||
643 | /* run from softirq, we must use a work_struct to call | |
644 | * module_free() from process context | |
645 | */ | |
646 | void bpf_jit_free(struct sk_filter *fp) | |
647 | { | |
648 | if (fp->bpf_func != sk_run_filter) { | |
649 | struct work_struct *work = (struct work_struct *)fp->bpf_func; | |
650 | ||
651 | INIT_WORK(work, jit_free_defer); | |
652 | schedule_work(work); | |
653 | } | |
654 | } |