power: supply: sbs-battery: simplify DT parsing
[deliverable/linux.git] / kernel / bpf / core.c
1 /*
2 * Linux Socket Filter - Kernel level socket filtering
3 *
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
6 *
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8 *
9 * Authors:
10 *
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 * Andi Kleen - Fix a few bad bugs and races.
21 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
22 */
23
24 #include <linux/filter.h>
25 #include <linux/skbuff.h>
26 #include <linux/vmalloc.h>
27 #include <linux/random.h>
28 #include <linux/moduleloader.h>
29 #include <linux/bpf.h>
30 #include <linux/frame.h>
31
32 #include <asm/unaligned.h>
33
34 /* Registers */
35 #define BPF_R0 regs[BPF_REG_0]
36 #define BPF_R1 regs[BPF_REG_1]
37 #define BPF_R2 regs[BPF_REG_2]
38 #define BPF_R3 regs[BPF_REG_3]
39 #define BPF_R4 regs[BPF_REG_4]
40 #define BPF_R5 regs[BPF_REG_5]
41 #define BPF_R6 regs[BPF_REG_6]
42 #define BPF_R7 regs[BPF_REG_7]
43 #define BPF_R8 regs[BPF_REG_8]
44 #define BPF_R9 regs[BPF_REG_9]
45 #define BPF_R10 regs[BPF_REG_10]
46
47 /* Named registers */
48 #define DST regs[insn->dst_reg]
49 #define SRC regs[insn->src_reg]
50 #define FP regs[BPF_REG_FP]
51 #define ARG1 regs[BPF_REG_ARG1]
52 #define CTX regs[BPF_REG_CTX]
53 #define IMM insn->imm
54
55 /* No hurry in this branch
56 *
57 * Exported for the bpf jit load helper.
58 */
59 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
60 {
61 u8 *ptr = NULL;
62
63 if (k >= SKF_NET_OFF)
64 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
65 else if (k >= SKF_LL_OFF)
66 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
67
68 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
69 return ptr;
70
71 return NULL;
72 }
73
74 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
75 {
76 gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
77 gfp_extra_flags;
78 struct bpf_prog_aux *aux;
79 struct bpf_prog *fp;
80
81 size = round_up(size, PAGE_SIZE);
82 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
83 if (fp == NULL)
84 return NULL;
85
86 kmemcheck_annotate_bitfield(fp, meta);
87
88 aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
89 if (aux == NULL) {
90 vfree(fp);
91 return NULL;
92 }
93
94 fp->pages = size / PAGE_SIZE;
95 fp->aux = aux;
96 fp->aux->prog = fp;
97
98 return fp;
99 }
100 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
101
102 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
103 gfp_t gfp_extra_flags)
104 {
105 gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
106 gfp_extra_flags;
107 struct bpf_prog *fp;
108
109 BUG_ON(fp_old == NULL);
110
111 size = round_up(size, PAGE_SIZE);
112 if (size <= fp_old->pages * PAGE_SIZE)
113 return fp_old;
114
115 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
116 if (fp != NULL) {
117 kmemcheck_annotate_bitfield(fp, meta);
118
119 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
120 fp->pages = size / PAGE_SIZE;
121 fp->aux->prog = fp;
122
123 /* We keep fp->aux from fp_old around in the new
124 * reallocated structure.
125 */
126 fp_old->aux = NULL;
127 __bpf_prog_free(fp_old);
128 }
129
130 return fp;
131 }
132
133 void __bpf_prog_free(struct bpf_prog *fp)
134 {
135 kfree(fp->aux);
136 vfree(fp);
137 }
138
139 static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
140 {
141 return BPF_CLASS(insn->code) == BPF_JMP &&
142 /* Call and Exit are both special jumps with no
143 * target inside the BPF instruction image.
144 */
145 BPF_OP(insn->code) != BPF_CALL &&
146 BPF_OP(insn->code) != BPF_EXIT;
147 }
148
149 static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta)
150 {
151 struct bpf_insn *insn = prog->insnsi;
152 u32 i, insn_cnt = prog->len;
153
154 for (i = 0; i < insn_cnt; i++, insn++) {
155 if (!bpf_is_jmp_and_has_target(insn))
156 continue;
157
158 /* Adjust offset of jmps if we cross boundaries. */
159 if (i < pos && i + insn->off + 1 > pos)
160 insn->off += delta;
161 else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
162 insn->off -= delta;
163 }
164 }
165
166 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
167 const struct bpf_insn *patch, u32 len)
168 {
169 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
170 struct bpf_prog *prog_adj;
171
172 /* Since our patchlet doesn't expand the image, we're done. */
173 if (insn_delta == 0) {
174 memcpy(prog->insnsi + off, patch, sizeof(*patch));
175 return prog;
176 }
177
178 insn_adj_cnt = prog->len + insn_delta;
179
180 /* Several new instructions need to be inserted. Make room
181 * for them. Likely, there's no need for a new allocation as
182 * last page could have large enough tailroom.
183 */
184 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
185 GFP_USER);
186 if (!prog_adj)
187 return NULL;
188
189 prog_adj->len = insn_adj_cnt;
190
191 /* Patching happens in 3 steps:
192 *
193 * 1) Move over tail of insnsi from next instruction onwards,
194 * so we can patch the single target insn with one or more
195 * new ones (patching is always from 1 to n insns, n > 0).
196 * 2) Inject new instructions at the target location.
197 * 3) Adjust branch offsets if necessary.
198 */
199 insn_rest = insn_adj_cnt - off - len;
200
201 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
202 sizeof(*patch) * insn_rest);
203 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
204
205 bpf_adj_branches(prog_adj, off, insn_delta);
206
207 return prog_adj;
208 }
209
210 #ifdef CONFIG_BPF_JIT
211 struct bpf_binary_header *
212 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
213 unsigned int alignment,
214 bpf_jit_fill_hole_t bpf_fill_ill_insns)
215 {
216 struct bpf_binary_header *hdr;
217 unsigned int size, hole, start;
218
219 /* Most of BPF filters are really small, but if some of them
220 * fill a page, allow at least 128 extra bytes to insert a
221 * random section of illegal instructions.
222 */
223 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
224 hdr = module_alloc(size);
225 if (hdr == NULL)
226 return NULL;
227
228 /* Fill space with illegal/arch-dep instructions. */
229 bpf_fill_ill_insns(hdr, size);
230
231 hdr->pages = size / PAGE_SIZE;
232 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
233 PAGE_SIZE - sizeof(*hdr));
234 start = (get_random_int() % hole) & ~(alignment - 1);
235
236 /* Leave a random number of instructions before BPF code. */
237 *image_ptr = &hdr->image[start];
238
239 return hdr;
240 }
241
242 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
243 {
244 module_memfree(hdr);
245 }
246
247 int bpf_jit_harden __read_mostly;
248
249 static int bpf_jit_blind_insn(const struct bpf_insn *from,
250 const struct bpf_insn *aux,
251 struct bpf_insn *to_buff)
252 {
253 struct bpf_insn *to = to_buff;
254 u32 imm_rnd = get_random_int();
255 s16 off;
256
257 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
258 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
259
260 if (from->imm == 0 &&
261 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
262 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
263 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
264 goto out;
265 }
266
267 switch (from->code) {
268 case BPF_ALU | BPF_ADD | BPF_K:
269 case BPF_ALU | BPF_SUB | BPF_K:
270 case BPF_ALU | BPF_AND | BPF_K:
271 case BPF_ALU | BPF_OR | BPF_K:
272 case BPF_ALU | BPF_XOR | BPF_K:
273 case BPF_ALU | BPF_MUL | BPF_K:
274 case BPF_ALU | BPF_MOV | BPF_K:
275 case BPF_ALU | BPF_DIV | BPF_K:
276 case BPF_ALU | BPF_MOD | BPF_K:
277 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
278 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
279 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
280 break;
281
282 case BPF_ALU64 | BPF_ADD | BPF_K:
283 case BPF_ALU64 | BPF_SUB | BPF_K:
284 case BPF_ALU64 | BPF_AND | BPF_K:
285 case BPF_ALU64 | BPF_OR | BPF_K:
286 case BPF_ALU64 | BPF_XOR | BPF_K:
287 case BPF_ALU64 | BPF_MUL | BPF_K:
288 case BPF_ALU64 | BPF_MOV | BPF_K:
289 case BPF_ALU64 | BPF_DIV | BPF_K:
290 case BPF_ALU64 | BPF_MOD | BPF_K:
291 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
292 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
293 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
294 break;
295
296 case BPF_JMP | BPF_JEQ | BPF_K:
297 case BPF_JMP | BPF_JNE | BPF_K:
298 case BPF_JMP | BPF_JGT | BPF_K:
299 case BPF_JMP | BPF_JGE | BPF_K:
300 case BPF_JMP | BPF_JSGT | BPF_K:
301 case BPF_JMP | BPF_JSGE | BPF_K:
302 case BPF_JMP | BPF_JSET | BPF_K:
303 /* Accommodate for extra offset in case of a backjump. */
304 off = from->off;
305 if (off < 0)
306 off -= 2;
307 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
308 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
309 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
310 break;
311
312 case BPF_LD | BPF_ABS | BPF_W:
313 case BPF_LD | BPF_ABS | BPF_H:
314 case BPF_LD | BPF_ABS | BPF_B:
315 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
316 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
317 *to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0);
318 break;
319
320 case BPF_LD | BPF_IND | BPF_W:
321 case BPF_LD | BPF_IND | BPF_H:
322 case BPF_LD | BPF_IND | BPF_B:
323 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
324 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
325 *to++ = BPF_ALU32_REG(BPF_ADD, BPF_REG_AX, from->src_reg);
326 *to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0);
327 break;
328
329 case BPF_LD | BPF_IMM | BPF_DW:
330 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
331 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
332 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
333 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
334 break;
335 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
336 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
337 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
338 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
339 break;
340
341 case BPF_ST | BPF_MEM | BPF_DW:
342 case BPF_ST | BPF_MEM | BPF_W:
343 case BPF_ST | BPF_MEM | BPF_H:
344 case BPF_ST | BPF_MEM | BPF_B:
345 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
346 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
347 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
348 break;
349 }
350 out:
351 return to - to_buff;
352 }
353
354 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
355 gfp_t gfp_extra_flags)
356 {
357 gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
358 gfp_extra_flags;
359 struct bpf_prog *fp;
360
361 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
362 if (fp != NULL) {
363 kmemcheck_annotate_bitfield(fp, meta);
364
365 /* aux->prog still points to the fp_other one, so
366 * when promoting the clone to the real program,
367 * this still needs to be adapted.
368 */
369 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
370 }
371
372 return fp;
373 }
374
375 static void bpf_prog_clone_free(struct bpf_prog *fp)
376 {
377 /* aux was stolen by the other clone, so we cannot free
378 * it from this path! It will be freed eventually by the
379 * other program on release.
380 *
381 * At this point, we don't need a deferred release since
382 * clone is guaranteed to not be locked.
383 */
384 fp->aux = NULL;
385 __bpf_prog_free(fp);
386 }
387
388 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
389 {
390 /* We have to repoint aux->prog to self, as we don't
391 * know whether fp here is the clone or the original.
392 */
393 fp->aux->prog = fp;
394 bpf_prog_clone_free(fp_other);
395 }
396
397 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
398 {
399 struct bpf_insn insn_buff[16], aux[2];
400 struct bpf_prog *clone, *tmp;
401 int insn_delta, insn_cnt;
402 struct bpf_insn *insn;
403 int i, rewritten;
404
405 if (!bpf_jit_blinding_enabled())
406 return prog;
407
408 clone = bpf_prog_clone_create(prog, GFP_USER);
409 if (!clone)
410 return ERR_PTR(-ENOMEM);
411
412 insn_cnt = clone->len;
413 insn = clone->insnsi;
414
415 for (i = 0; i < insn_cnt; i++, insn++) {
416 /* We temporarily need to hold the original ld64 insn
417 * so that we can still access the first part in the
418 * second blinding run.
419 */
420 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
421 insn[1].code == 0)
422 memcpy(aux, insn, sizeof(aux));
423
424 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff);
425 if (!rewritten)
426 continue;
427
428 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
429 if (!tmp) {
430 /* Patching may have repointed aux->prog during
431 * realloc from the original one, so we need to
432 * fix it up here on error.
433 */
434 bpf_jit_prog_release_other(prog, clone);
435 return ERR_PTR(-ENOMEM);
436 }
437
438 clone = tmp;
439 insn_delta = rewritten - 1;
440
441 /* Walk new program and skip insns we just inserted. */
442 insn = clone->insnsi + i + insn_delta;
443 insn_cnt += insn_delta;
444 i += insn_delta;
445 }
446
447 return clone;
448 }
449 #endif /* CONFIG_BPF_JIT */
450
451 /* Base function for offset calculation. Needs to go into .text section,
452 * therefore keeping it non-static as well; will also be used by JITs
453 * anyway later on, so do not let the compiler omit it.
454 */
455 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
456 {
457 return 0;
458 }
459 EXPORT_SYMBOL_GPL(__bpf_call_base);
460
461 /**
462 * __bpf_prog_run - run eBPF program on a given context
463 * @ctx: is the data we are operating on
464 * @insn: is the array of eBPF instructions
465 *
466 * Decode and execute eBPF instructions.
467 */
468 static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
469 {
470 u64 stack[MAX_BPF_STACK / sizeof(u64)];
471 u64 regs[MAX_BPF_REG], tmp;
472 static const void *jumptable[256] = {
473 [0 ... 255] = &&default_label,
474 /* Now overwrite non-defaults ... */
475 /* 32 bit ALU operations */
476 [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
477 [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
478 [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
479 [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
480 [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
481 [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
482 [BPF_ALU | BPF_OR | BPF_X] = &&ALU_OR_X,
483 [BPF_ALU | BPF_OR | BPF_K] = &&ALU_OR_K,
484 [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
485 [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
486 [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
487 [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
488 [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
489 [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
490 [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
491 [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
492 [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
493 [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
494 [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
495 [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
496 [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
497 [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
498 [BPF_ALU | BPF_NEG] = &&ALU_NEG,
499 [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
500 [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
501 /* 64 bit ALU operations */
502 [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
503 [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
504 [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
505 [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
506 [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
507 [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
508 [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
509 [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
510 [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
511 [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
512 [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
513 [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
514 [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
515 [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
516 [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
517 [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
518 [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
519 [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
520 [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
521 [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
522 [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
523 [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
524 [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
525 [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
526 [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
527 /* Call instruction */
528 [BPF_JMP | BPF_CALL] = &&JMP_CALL,
529 [BPF_JMP | BPF_CALL | BPF_X] = &&JMP_TAIL_CALL,
530 /* Jumps */
531 [BPF_JMP | BPF_JA] = &&JMP_JA,
532 [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
533 [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
534 [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
535 [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
536 [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
537 [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
538 [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
539 [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
540 [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
541 [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
542 [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
543 [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
544 [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
545 [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
546 /* Program return */
547 [BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
548 /* Store instructions */
549 [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
550 [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
551 [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
552 [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
553 [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
554 [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
555 [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
556 [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
557 [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
558 [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
559 /* Load instructions */
560 [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
561 [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
562 [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
563 [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
564 [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
565 [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
566 [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
567 [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
568 [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
569 [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
570 [BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW,
571 };
572 u32 tail_call_cnt = 0;
573 void *ptr;
574 int off;
575
576 #define CONT ({ insn++; goto select_insn; })
577 #define CONT_JMP ({ insn++; goto select_insn; })
578
579 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
580 ARG1 = (u64) (unsigned long) ctx;
581
582 select_insn:
583 goto *jumptable[insn->code];
584
585 /* ALU */
586 #define ALU(OPCODE, OP) \
587 ALU64_##OPCODE##_X: \
588 DST = DST OP SRC; \
589 CONT; \
590 ALU_##OPCODE##_X: \
591 DST = (u32) DST OP (u32) SRC; \
592 CONT; \
593 ALU64_##OPCODE##_K: \
594 DST = DST OP IMM; \
595 CONT; \
596 ALU_##OPCODE##_K: \
597 DST = (u32) DST OP (u32) IMM; \
598 CONT;
599
600 ALU(ADD, +)
601 ALU(SUB, -)
602 ALU(AND, &)
603 ALU(OR, |)
604 ALU(LSH, <<)
605 ALU(RSH, >>)
606 ALU(XOR, ^)
607 ALU(MUL, *)
608 #undef ALU
609 ALU_NEG:
610 DST = (u32) -DST;
611 CONT;
612 ALU64_NEG:
613 DST = -DST;
614 CONT;
615 ALU_MOV_X:
616 DST = (u32) SRC;
617 CONT;
618 ALU_MOV_K:
619 DST = (u32) IMM;
620 CONT;
621 ALU64_MOV_X:
622 DST = SRC;
623 CONT;
624 ALU64_MOV_K:
625 DST = IMM;
626 CONT;
627 LD_IMM_DW:
628 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
629 insn++;
630 CONT;
631 ALU64_ARSH_X:
632 (*(s64 *) &DST) >>= SRC;
633 CONT;
634 ALU64_ARSH_K:
635 (*(s64 *) &DST) >>= IMM;
636 CONT;
637 ALU64_MOD_X:
638 if (unlikely(SRC == 0))
639 return 0;
640 div64_u64_rem(DST, SRC, &tmp);
641 DST = tmp;
642 CONT;
643 ALU_MOD_X:
644 if (unlikely(SRC == 0))
645 return 0;
646 tmp = (u32) DST;
647 DST = do_div(tmp, (u32) SRC);
648 CONT;
649 ALU64_MOD_K:
650 div64_u64_rem(DST, IMM, &tmp);
651 DST = tmp;
652 CONT;
653 ALU_MOD_K:
654 tmp = (u32) DST;
655 DST = do_div(tmp, (u32) IMM);
656 CONT;
657 ALU64_DIV_X:
658 if (unlikely(SRC == 0))
659 return 0;
660 DST = div64_u64(DST, SRC);
661 CONT;
662 ALU_DIV_X:
663 if (unlikely(SRC == 0))
664 return 0;
665 tmp = (u32) DST;
666 do_div(tmp, (u32) SRC);
667 DST = (u32) tmp;
668 CONT;
669 ALU64_DIV_K:
670 DST = div64_u64(DST, IMM);
671 CONT;
672 ALU_DIV_K:
673 tmp = (u32) DST;
674 do_div(tmp, (u32) IMM);
675 DST = (u32) tmp;
676 CONT;
677 ALU_END_TO_BE:
678 switch (IMM) {
679 case 16:
680 DST = (__force u16) cpu_to_be16(DST);
681 break;
682 case 32:
683 DST = (__force u32) cpu_to_be32(DST);
684 break;
685 case 64:
686 DST = (__force u64) cpu_to_be64(DST);
687 break;
688 }
689 CONT;
690 ALU_END_TO_LE:
691 switch (IMM) {
692 case 16:
693 DST = (__force u16) cpu_to_le16(DST);
694 break;
695 case 32:
696 DST = (__force u32) cpu_to_le32(DST);
697 break;
698 case 64:
699 DST = (__force u64) cpu_to_le64(DST);
700 break;
701 }
702 CONT;
703
704 /* CALL */
705 JMP_CALL:
706 /* Function call scratches BPF_R1-BPF_R5 registers,
707 * preserves BPF_R6-BPF_R9, and stores return value
708 * into BPF_R0.
709 */
710 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
711 BPF_R4, BPF_R5);
712 CONT;
713
714 JMP_TAIL_CALL: {
715 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
716 struct bpf_array *array = container_of(map, struct bpf_array, map);
717 struct bpf_prog *prog;
718 u64 index = BPF_R3;
719
720 if (unlikely(index >= array->map.max_entries))
721 goto out;
722 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
723 goto out;
724
725 tail_call_cnt++;
726
727 prog = READ_ONCE(array->ptrs[index]);
728 if (!prog)
729 goto out;
730
731 /* ARG1 at this point is guaranteed to point to CTX from
732 * the verifier side due to the fact that the tail call is
733 * handeled like a helper, that is, bpf_tail_call_proto,
734 * where arg1_type is ARG_PTR_TO_CTX.
735 */
736 insn = prog->insnsi;
737 goto select_insn;
738 out:
739 CONT;
740 }
741 /* JMP */
742 JMP_JA:
743 insn += insn->off;
744 CONT;
745 JMP_JEQ_X:
746 if (DST == SRC) {
747 insn += insn->off;
748 CONT_JMP;
749 }
750 CONT;
751 JMP_JEQ_K:
752 if (DST == IMM) {
753 insn += insn->off;
754 CONT_JMP;
755 }
756 CONT;
757 JMP_JNE_X:
758 if (DST != SRC) {
759 insn += insn->off;
760 CONT_JMP;
761 }
762 CONT;
763 JMP_JNE_K:
764 if (DST != IMM) {
765 insn += insn->off;
766 CONT_JMP;
767 }
768 CONT;
769 JMP_JGT_X:
770 if (DST > SRC) {
771 insn += insn->off;
772 CONT_JMP;
773 }
774 CONT;
775 JMP_JGT_K:
776 if (DST > IMM) {
777 insn += insn->off;
778 CONT_JMP;
779 }
780 CONT;
781 JMP_JGE_X:
782 if (DST >= SRC) {
783 insn += insn->off;
784 CONT_JMP;
785 }
786 CONT;
787 JMP_JGE_K:
788 if (DST >= IMM) {
789 insn += insn->off;
790 CONT_JMP;
791 }
792 CONT;
793 JMP_JSGT_X:
794 if (((s64) DST) > ((s64) SRC)) {
795 insn += insn->off;
796 CONT_JMP;
797 }
798 CONT;
799 JMP_JSGT_K:
800 if (((s64) DST) > ((s64) IMM)) {
801 insn += insn->off;
802 CONT_JMP;
803 }
804 CONT;
805 JMP_JSGE_X:
806 if (((s64) DST) >= ((s64) SRC)) {
807 insn += insn->off;
808 CONT_JMP;
809 }
810 CONT;
811 JMP_JSGE_K:
812 if (((s64) DST) >= ((s64) IMM)) {
813 insn += insn->off;
814 CONT_JMP;
815 }
816 CONT;
817 JMP_JSET_X:
818 if (DST & SRC) {
819 insn += insn->off;
820 CONT_JMP;
821 }
822 CONT;
823 JMP_JSET_K:
824 if (DST & IMM) {
825 insn += insn->off;
826 CONT_JMP;
827 }
828 CONT;
829 JMP_EXIT:
830 return BPF_R0;
831
832 /* STX and ST and LDX*/
833 #define LDST(SIZEOP, SIZE) \
834 STX_MEM_##SIZEOP: \
835 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
836 CONT; \
837 ST_MEM_##SIZEOP: \
838 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
839 CONT; \
840 LDX_MEM_##SIZEOP: \
841 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
842 CONT;
843
844 LDST(B, u8)
845 LDST(H, u16)
846 LDST(W, u32)
847 LDST(DW, u64)
848 #undef LDST
849 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
850 atomic_add((u32) SRC, (atomic_t *)(unsigned long)
851 (DST + insn->off));
852 CONT;
853 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
854 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
855 (DST + insn->off));
856 CONT;
857 LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
858 off = IMM;
859 load_word:
860 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
861 * only appearing in the programs where ctx ==
862 * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
863 * == BPF_R6, bpf_convert_filter() saves it in BPF_R6,
864 * internal BPF verifier will check that BPF_R6 ==
865 * ctx.
866 *
867 * BPF_ABS and BPF_IND are wrappers of function calls,
868 * so they scratch BPF_R1-BPF_R5 registers, preserve
869 * BPF_R6-BPF_R9, and store return value into BPF_R0.
870 *
871 * Implicit input:
872 * ctx == skb == BPF_R6 == CTX
873 *
874 * Explicit input:
875 * SRC == any register
876 * IMM == 32-bit immediate
877 *
878 * Output:
879 * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
880 */
881
882 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
883 if (likely(ptr != NULL)) {
884 BPF_R0 = get_unaligned_be32(ptr);
885 CONT;
886 }
887
888 return 0;
889 LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
890 off = IMM;
891 load_half:
892 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
893 if (likely(ptr != NULL)) {
894 BPF_R0 = get_unaligned_be16(ptr);
895 CONT;
896 }
897
898 return 0;
899 LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
900 off = IMM;
901 load_byte:
902 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
903 if (likely(ptr != NULL)) {
904 BPF_R0 = *(u8 *)ptr;
905 CONT;
906 }
907
908 return 0;
909 LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
910 off = IMM + SRC;
911 goto load_word;
912 LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
913 off = IMM + SRC;
914 goto load_half;
915 LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
916 off = IMM + SRC;
917 goto load_byte;
918
919 default_label:
920 /* If we ever reach this, we have a bug somewhere. */
921 WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
922 return 0;
923 }
924 STACK_FRAME_NON_STANDARD(__bpf_prog_run); /* jump table */
925
926 bool bpf_prog_array_compatible(struct bpf_array *array,
927 const struct bpf_prog *fp)
928 {
929 if (!array->owner_prog_type) {
930 /* There's no owner yet where we could check for
931 * compatibility.
932 */
933 array->owner_prog_type = fp->type;
934 array->owner_jited = fp->jited;
935
936 return true;
937 }
938
939 return array->owner_prog_type == fp->type &&
940 array->owner_jited == fp->jited;
941 }
942
943 static int bpf_check_tail_call(const struct bpf_prog *fp)
944 {
945 struct bpf_prog_aux *aux = fp->aux;
946 int i;
947
948 for (i = 0; i < aux->used_map_cnt; i++) {
949 struct bpf_map *map = aux->used_maps[i];
950 struct bpf_array *array;
951
952 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
953 continue;
954
955 array = container_of(map, struct bpf_array, map);
956 if (!bpf_prog_array_compatible(array, fp))
957 return -EINVAL;
958 }
959
960 return 0;
961 }
962
963 /**
964 * bpf_prog_select_runtime - select exec runtime for BPF program
965 * @fp: bpf_prog populated with internal BPF program
966 * @err: pointer to error variable
967 *
968 * Try to JIT eBPF program, if JIT is not available, use interpreter.
969 * The BPF program will be executed via BPF_PROG_RUN() macro.
970 */
971 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
972 {
973 fp->bpf_func = (void *) __bpf_prog_run;
974
975 /* eBPF JITs can rewrite the program in case constant
976 * blinding is active. However, in case of error during
977 * blinding, bpf_int_jit_compile() must always return a
978 * valid program, which in this case would simply not
979 * be JITed, but falls back to the interpreter.
980 */
981 fp = bpf_int_jit_compile(fp);
982 bpf_prog_lock_ro(fp);
983
984 /* The tail call compatibility check can only be done at
985 * this late stage as we need to determine, if we deal
986 * with JITed or non JITed program concatenations and not
987 * all eBPF JITs might immediately support all features.
988 */
989 *err = bpf_check_tail_call(fp);
990
991 return fp;
992 }
993 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
994
995 static void bpf_prog_free_deferred(struct work_struct *work)
996 {
997 struct bpf_prog_aux *aux;
998
999 aux = container_of(work, struct bpf_prog_aux, work);
1000 bpf_jit_free(aux->prog);
1001 }
1002
1003 /* Free internal BPF program */
1004 void bpf_prog_free(struct bpf_prog *fp)
1005 {
1006 struct bpf_prog_aux *aux = fp->aux;
1007
1008 INIT_WORK(&aux->work, bpf_prog_free_deferred);
1009 schedule_work(&aux->work);
1010 }
1011 EXPORT_SYMBOL_GPL(bpf_prog_free);
1012
1013 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
1014 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
1015
1016 void bpf_user_rnd_init_once(void)
1017 {
1018 prandom_init_once(&bpf_user_rnd_state);
1019 }
1020
1021 u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1022 {
1023 /* Should someone ever have the rather unwise idea to use some
1024 * of the registers passed into this function, then note that
1025 * this function is called from native eBPF and classic-to-eBPF
1026 * transformations. Register assignments from both sides are
1027 * different, f.e. classic always sets fn(ctx, A, X) here.
1028 */
1029 struct rnd_state *state;
1030 u32 res;
1031
1032 state = &get_cpu_var(bpf_user_rnd_state);
1033 res = prandom_u32_state(state);
1034 put_cpu_var(state);
1035
1036 return res;
1037 }
1038
1039 /* Weak definitions of helper functions in case we don't have bpf syscall. */
1040 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
1041 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
1042 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
1043
1044 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
1045 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
1046 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
1047
1048 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
1049 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
1050 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
1051
1052 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
1053 {
1054 return NULL;
1055 }
1056
1057 u64 __weak
1058 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
1059 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
1060 {
1061 return -ENOTSUPP;
1062 }
1063
1064 /* Always built-in helper functions. */
1065 const struct bpf_func_proto bpf_tail_call_proto = {
1066 .func = NULL,
1067 .gpl_only = false,
1068 .ret_type = RET_VOID,
1069 .arg1_type = ARG_PTR_TO_CTX,
1070 .arg2_type = ARG_CONST_MAP_PTR,
1071 .arg3_type = ARG_ANYTHING,
1072 };
1073
1074 /* For classic BPF JITs that don't implement bpf_int_jit_compile(). */
1075 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
1076 {
1077 return prog;
1078 }
1079
1080 bool __weak bpf_helper_changes_skb_data(void *func)
1081 {
1082 return false;
1083 }
1084
1085 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
1086 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
1087 */
1088 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
1089 int len)
1090 {
1091 return -EFAULT;
1092 }
This page took 0.096428 seconds and 5 git commands to generate.