2 * Testsuite for eBPF verifier
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
12 #include <linux/bpf.h>
14 #include <linux/unistd.h>
16 #include <linux/filter.h>
21 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
25 struct bpf_insn insns
[MAX_INSNS
];
34 static struct bpf_test tests
[] = {
38 BPF_MOV64_IMM(BPF_REG_1
, 1),
39 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 2),
40 BPF_MOV64_IMM(BPF_REG_2
, 3),
41 BPF_ALU64_REG(BPF_SUB
, BPF_REG_1
, BPF_REG_2
),
42 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -1),
43 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_1
, 3),
44 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
55 .errstr
= "unreachable",
61 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
62 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
65 .errstr
= "unreachable",
71 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
74 .errstr
= "jump out of range",
80 BPF_JMP_IMM(BPF_JA
, 0, 0, -2),
83 .errstr
= "jump out of range",
89 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
90 BPF_LD_IMM64(BPF_REG_0
, 0),
91 BPF_LD_IMM64(BPF_REG_0
, 0),
92 BPF_LD_IMM64(BPF_REG_0
, 1),
93 BPF_LD_IMM64(BPF_REG_0
, 1),
94 BPF_MOV64_IMM(BPF_REG_0
, 2),
97 .errstr
= "invalid BPF_LD_IMM insn",
103 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
104 BPF_LD_IMM64(BPF_REG_0
, 0),
105 BPF_LD_IMM64(BPF_REG_0
, 0),
106 BPF_LD_IMM64(BPF_REG_0
, 1),
107 BPF_LD_IMM64(BPF_REG_0
, 1),
110 .errstr
= "invalid BPF_LD_IMM insn",
116 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
117 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 0),
118 BPF_LD_IMM64(BPF_REG_0
, 0),
119 BPF_LD_IMM64(BPF_REG_0
, 0),
120 BPF_LD_IMM64(BPF_REG_0
, 1),
121 BPF_LD_IMM64(BPF_REG_0
, 1),
124 .errstr
= "invalid bpf_ld_imm64 insn",
130 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 0),
133 .errstr
= "invalid bpf_ld_imm64 insn",
139 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 0),
141 .errstr
= "invalid bpf_ld_imm64 insn",
147 BPF_ALU64_REG(BPF_MOV
, BPF_REG_0
, BPF_REG_2
),
149 .errstr
= "jump out of range",
155 BPF_JMP_IMM(BPF_JA
, 0, 0, -1),
158 .errstr
= "back-edge",
164 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
165 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_0
),
166 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_0
),
167 BPF_JMP_IMM(BPF_JA
, 0, 0, -4),
170 .errstr
= "back-edge",
176 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
177 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_0
),
178 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_0
),
179 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, -3),
182 .errstr
= "back-edge",
186 "read uninitialized register",
188 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
191 .errstr
= "R2 !read_ok",
195 "read invalid register",
197 BPF_MOV64_REG(BPF_REG_0
, -1),
200 .errstr
= "R15 is invalid",
204 "program doesn't init R0 before exit",
206 BPF_ALU64_REG(BPF_MOV
, BPF_REG_2
, BPF_REG_1
),
209 .errstr
= "R0 !read_ok",
213 "program doesn't init R0 before exit in all branches",
215 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
216 BPF_MOV64_IMM(BPF_REG_0
, 1),
217 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 2),
220 .errstr
= "R0 !read_ok",
224 "stack out of bounds",
226 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, 8, 0),
229 .errstr
= "invalid stack",
233 "invalid call insn1",
235 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
| BPF_X
, 0, 0, 0, 0),
238 .errstr
= "BPF_CALL uses reserved",
242 "invalid call insn2",
244 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 1, 0),
247 .errstr
= "BPF_CALL uses reserved",
251 "invalid function call",
253 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, 1234567),
256 .errstr
= "invalid func 1234567",
260 "uninitialized stack1",
262 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
263 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
264 BPF_LD_MAP_FD(BPF_REG_1
, 0),
265 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
269 .errstr
= "invalid indirect read from stack",
273 "uninitialized stack2",
275 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
276 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_2
, -8),
279 .errstr
= "invalid read from stack",
283 "check valid spill/fill",
285 /* spill R1(ctx) into stack */
286 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
288 /* fill it back into R2 */
289 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -8),
291 /* should be able to access R0 = *(R2 + 8) */
292 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
293 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
299 "check corrupted spill/fill",
301 /* spill R1(ctx) into stack */
302 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
304 /* mess up with R1 pointer on stack */
305 BPF_ST_MEM(BPF_B
, BPF_REG_10
, -7, 0x23),
307 /* fill back into R0 should fail */
308 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -8),
312 .errstr
= "corrupted spill",
316 "invalid src register in STX",
318 BPF_STX_MEM(BPF_B
, BPF_REG_10
, -1, -1),
321 .errstr
= "R15 is invalid",
325 "invalid dst register in STX",
327 BPF_STX_MEM(BPF_B
, 14, BPF_REG_10
, -1),
330 .errstr
= "R14 is invalid",
334 "invalid dst register in ST",
336 BPF_ST_MEM(BPF_B
, 14, -1, -1),
339 .errstr
= "R14 is invalid",
343 "invalid src register in LDX",
345 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, 12, 0),
348 .errstr
= "R12 is invalid",
352 "invalid dst register in LDX",
354 BPF_LDX_MEM(BPF_B
, 11, BPF_REG_1
, 0),
357 .errstr
= "R11 is invalid",
363 BPF_RAW_INSN(0, 0, 0, 0, 0),
366 .errstr
= "invalid BPF_LD_IMM",
372 BPF_RAW_INSN(1, 0, 0, 0, 0),
375 .errstr
= "BPF_LDX uses reserved fields",
381 BPF_RAW_INSN(-1, 0, 0, 0, 0),
384 .errstr
= "invalid BPF_ALU opcode f0",
390 BPF_RAW_INSN(-1, -1, -1, -1, -1),
393 .errstr
= "invalid BPF_ALU opcode f0",
399 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
402 .errstr
= "BPF_ALU uses reserved fields",
406 "misaligned read from stack",
408 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
409 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_2
, -4),
412 .errstr
= "misaligned access",
416 "invalid map_fd for function call",
418 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
419 BPF_ALU64_REG(BPF_MOV
, BPF_REG_2
, BPF_REG_10
),
420 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
421 BPF_LD_MAP_FD(BPF_REG_1
, 0),
422 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_delete_elem
),
425 .errstr
= "fd 0 is not pointing to valid bpf_map",
429 "don't check return value before access",
431 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
432 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
433 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
434 BPF_LD_MAP_FD(BPF_REG_1
, 0),
435 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
436 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 0),
440 .errstr
= "R0 invalid mem access 'map_value_or_null'",
444 "access memory with incorrect alignment",
446 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
447 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
448 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
449 BPF_LD_MAP_FD(BPF_REG_1
, 0),
450 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
451 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
452 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 4, 0),
456 .errstr
= "misaligned access",
460 "sometimes access memory with incorrect alignment",
462 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
463 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
464 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
465 BPF_LD_MAP_FD(BPF_REG_1
, 0),
466 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
467 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
468 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 0),
470 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 1),
474 .errstr
= "R0 invalid mem access",
480 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
481 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -8),
482 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
483 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 0),
484 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 1, 1),
485 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 1),
486 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 2, 1),
487 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 2),
488 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 3, 1),
489 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 3),
490 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 4, 1),
491 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 4),
492 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 5, 1),
493 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -32, 5),
494 BPF_MOV64_IMM(BPF_REG_0
, 0),
502 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
503 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 2),
504 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 0),
505 BPF_JMP_IMM(BPF_JA
, 0, 0, 14),
506 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 1, 2),
507 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 0),
508 BPF_JMP_IMM(BPF_JA
, 0, 0, 11),
509 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 2, 2),
510 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -32, 0),
511 BPF_JMP_IMM(BPF_JA
, 0, 0, 8),
512 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 3, 2),
513 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -40, 0),
514 BPF_JMP_IMM(BPF_JA
, 0, 0, 5),
515 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 4, 2),
516 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -48, 0),
517 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
518 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 5, 1),
519 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -56, 0),
520 BPF_MOV64_IMM(BPF_REG_0
, 0),
528 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
529 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 3),
530 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 0),
531 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
532 BPF_JMP_IMM(BPF_JA
, 0, 0, 19),
533 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 1, 3),
534 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 0),
535 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -16),
536 BPF_JMP_IMM(BPF_JA
, 0, 0, 15),
537 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 2, 3),
538 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -32, 0),
539 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -32),
540 BPF_JMP_IMM(BPF_JA
, 0, 0, 11),
541 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 3, 3),
542 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -40, 0),
543 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -40),
544 BPF_JMP_IMM(BPF_JA
, 0, 0, 7),
545 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 4, 3),
546 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -48, 0),
547 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -48),
548 BPF_JMP_IMM(BPF_JA
, 0, 0, 3),
549 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 5, 0),
550 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -56, 0),
551 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -56),
552 BPF_LD_MAP_FD(BPF_REG_1
, 0),
553 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_delete_elem
),
562 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
563 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
564 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
565 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
566 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
567 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
568 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
569 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
570 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
571 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
572 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
573 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
574 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
575 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
576 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
577 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
578 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
579 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
580 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
581 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
582 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
583 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
584 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
585 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
586 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
587 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
588 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
589 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
590 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
591 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
592 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
593 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
594 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
595 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
596 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
597 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
598 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
599 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
600 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
601 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
602 BPF_MOV64_IMM(BPF_REG_0
, 0),
610 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
611 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_2
),
612 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
613 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
614 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
615 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
616 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
617 BPF_MOV64_IMM(BPF_REG_0
, 0),
618 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
619 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
620 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
621 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
622 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
623 BPF_MOV64_IMM(BPF_REG_0
, 0),
624 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
625 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
626 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
627 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
628 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
629 BPF_MOV64_IMM(BPF_REG_0
, 0),
630 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
631 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
632 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
633 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
634 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
635 BPF_MOV64_IMM(BPF_REG_0
, 0),
636 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
637 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
638 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
639 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
640 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
641 BPF_MOV64_IMM(BPF_REG_0
, 0),
647 "access skb fields ok",
649 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
650 offsetof(struct __sk_buff
, len
)),
651 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 1),
652 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
653 offsetof(struct __sk_buff
, mark
)),
654 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 1),
655 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
656 offsetof(struct __sk_buff
, pkt_type
)),
657 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 1),
658 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
659 offsetof(struct __sk_buff
, queue_mapping
)),
660 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 0),
661 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
662 offsetof(struct __sk_buff
, protocol
)),
663 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 0),
664 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
665 offsetof(struct __sk_buff
, vlan_present
)),
666 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 0),
667 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
668 offsetof(struct __sk_buff
, vlan_tci
)),
669 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 0),
675 "access skb fields bad1",
677 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -4),
680 .errstr
= "invalid bpf_context access",
684 "access skb fields bad2",
686 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 9),
687 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
688 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
689 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
690 BPF_LD_MAP_FD(BPF_REG_1
, 0),
691 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
692 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
694 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
695 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
696 offsetof(struct __sk_buff
, pkt_type
)),
700 .errstr
= "different pointers",
704 "access skb fields bad3",
706 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
707 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
708 offsetof(struct __sk_buff
, pkt_type
)),
710 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
711 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
712 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
713 BPF_LD_MAP_FD(BPF_REG_1
, 0),
714 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
715 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
717 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
718 BPF_JMP_IMM(BPF_JA
, 0, 0, -12),
721 .errstr
= "different pointers",
725 "access skb fields bad4",
727 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 3),
728 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
729 offsetof(struct __sk_buff
, len
)),
730 BPF_MOV64_IMM(BPF_REG_0
, 0),
732 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
733 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
734 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
735 BPF_LD_MAP_FD(BPF_REG_1
, 0),
736 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
737 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
739 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
740 BPF_JMP_IMM(BPF_JA
, 0, 0, -13),
743 .errstr
= "different pointers",
748 static int probe_filter_length(struct bpf_insn
*fp
)
752 for (len
= MAX_INSNS
- 1; len
> 0; --len
)
753 if (fp
[len
].code
!= 0 || fp
[len
].imm
!= 0)
759 static int create_map(void)
761 long long key
, value
= 0;
764 map_fd
= bpf_create_map(BPF_MAP_TYPE_HASH
, sizeof(key
), sizeof(value
), 1024);
766 printf("failed to create map '%s'\n", strerror(errno
));
772 static int test(void)
774 int prog_fd
, i
, pass_cnt
= 0, err_cnt
= 0;
776 for (i
= 0; i
< ARRAY_SIZE(tests
); i
++) {
777 struct bpf_insn
*prog
= tests
[i
].insns
;
778 int prog_len
= probe_filter_length(prog
);
779 int *fixup
= tests
[i
].fixup
;
783 map_fd
= create_map();
786 prog
[*fixup
].imm
= map_fd
;
790 printf("#%d %s ", i
, tests
[i
].descr
);
792 prog_fd
= bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER
, prog
,
793 prog_len
* sizeof(struct bpf_insn
),
796 if (tests
[i
].result
== ACCEPT
) {
798 printf("FAIL\nfailed to load prog '%s'\n",
800 printf("%s", bpf_log_buf
);
806 printf("FAIL\nunexpected success to load\n");
807 printf("%s", bpf_log_buf
);
811 if (strstr(bpf_log_buf
, tests
[i
].errstr
) == 0) {
812 printf("FAIL\nunexpected error message: %s",
827 printf("Summary: %d PASSED, %d FAILED\n", pass_cnt
, err_cnt
);