Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[deliverable/linux.git] / samples / bpf / test_verifier.c
1 /*
2 * Testsuite for eBPF verifier
3 *
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 */
10 #include <stdio.h>
11 #include <unistd.h>
12 #include <linux/bpf.h>
13 #include <errno.h>
14 #include <linux/unistd.h>
15 #include <string.h>
16 #include <linux/filter.h>
17 #include "libbpf.h"
18
19 #define MAX_INSNS 512
20 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
21
22 struct bpf_test {
23 const char *descr;
24 struct bpf_insn insns[MAX_INSNS];
25 int fixup[32];
26 const char *errstr;
27 enum {
28 ACCEPT,
29 REJECT
30 } result;
31 };
32
33 static struct bpf_test tests[] = {
34 {
35 "add+sub+mul",
36 .insns = {
37 BPF_MOV64_IMM(BPF_REG_1, 1),
38 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
39 BPF_MOV64_IMM(BPF_REG_2, 3),
40 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
41 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
42 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
43 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
44 BPF_EXIT_INSN(),
45 },
46 .result = ACCEPT,
47 },
48 {
49 "unreachable",
50 .insns = {
51 BPF_EXIT_INSN(),
52 BPF_EXIT_INSN(),
53 },
54 .errstr = "unreachable",
55 .result = REJECT,
56 },
57 {
58 "unreachable2",
59 .insns = {
60 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
61 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
62 BPF_EXIT_INSN(),
63 },
64 .errstr = "unreachable",
65 .result = REJECT,
66 },
67 {
68 "out of range jump",
69 .insns = {
70 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
71 BPF_EXIT_INSN(),
72 },
73 .errstr = "jump out of range",
74 .result = REJECT,
75 },
76 {
77 "out of range jump2",
78 .insns = {
79 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
80 BPF_EXIT_INSN(),
81 },
82 .errstr = "jump out of range",
83 .result = REJECT,
84 },
85 {
86 "test1 ld_imm64",
87 .insns = {
88 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
89 BPF_LD_IMM64(BPF_REG_0, 0),
90 BPF_LD_IMM64(BPF_REG_0, 0),
91 BPF_LD_IMM64(BPF_REG_0, 1),
92 BPF_LD_IMM64(BPF_REG_0, 1),
93 BPF_MOV64_IMM(BPF_REG_0, 2),
94 BPF_EXIT_INSN(),
95 },
96 .errstr = "invalid BPF_LD_IMM insn",
97 .result = REJECT,
98 },
99 {
100 "test2 ld_imm64",
101 .insns = {
102 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
103 BPF_LD_IMM64(BPF_REG_0, 0),
104 BPF_LD_IMM64(BPF_REG_0, 0),
105 BPF_LD_IMM64(BPF_REG_0, 1),
106 BPF_LD_IMM64(BPF_REG_0, 1),
107 BPF_EXIT_INSN(),
108 },
109 .errstr = "invalid BPF_LD_IMM insn",
110 .result = REJECT,
111 },
112 {
113 "test3 ld_imm64",
114 .insns = {
115 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
116 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
117 BPF_LD_IMM64(BPF_REG_0, 0),
118 BPF_LD_IMM64(BPF_REG_0, 0),
119 BPF_LD_IMM64(BPF_REG_0, 1),
120 BPF_LD_IMM64(BPF_REG_0, 1),
121 BPF_EXIT_INSN(),
122 },
123 .errstr = "invalid bpf_ld_imm64 insn",
124 .result = REJECT,
125 },
126 {
127 "test4 ld_imm64",
128 .insns = {
129 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
130 BPF_EXIT_INSN(),
131 },
132 .errstr = "invalid bpf_ld_imm64 insn",
133 .result = REJECT,
134 },
135 {
136 "test5 ld_imm64",
137 .insns = {
138 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
139 },
140 .errstr = "invalid bpf_ld_imm64 insn",
141 .result = REJECT,
142 },
143 {
144 "no bpf_exit",
145 .insns = {
146 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
147 },
148 .errstr = "jump out of range",
149 .result = REJECT,
150 },
151 {
152 "loop (back-edge)",
153 .insns = {
154 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
155 BPF_EXIT_INSN(),
156 },
157 .errstr = "back-edge",
158 .result = REJECT,
159 },
160 {
161 "loop2 (back-edge)",
162 .insns = {
163 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
164 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
165 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
166 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
167 BPF_EXIT_INSN(),
168 },
169 .errstr = "back-edge",
170 .result = REJECT,
171 },
172 {
173 "conditional loop",
174 .insns = {
175 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
176 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
177 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
178 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
179 BPF_EXIT_INSN(),
180 },
181 .errstr = "back-edge",
182 .result = REJECT,
183 },
184 {
185 "read uninitialized register",
186 .insns = {
187 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
188 BPF_EXIT_INSN(),
189 },
190 .errstr = "R2 !read_ok",
191 .result = REJECT,
192 },
193 {
194 "read invalid register",
195 .insns = {
196 BPF_MOV64_REG(BPF_REG_0, -1),
197 BPF_EXIT_INSN(),
198 },
199 .errstr = "R15 is invalid",
200 .result = REJECT,
201 },
202 {
203 "program doesn't init R0 before exit",
204 .insns = {
205 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
206 BPF_EXIT_INSN(),
207 },
208 .errstr = "R0 !read_ok",
209 .result = REJECT,
210 },
211 {
212 "stack out of bounds",
213 .insns = {
214 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
215 BPF_EXIT_INSN(),
216 },
217 .errstr = "invalid stack",
218 .result = REJECT,
219 },
220 {
221 "invalid call insn1",
222 .insns = {
223 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
224 BPF_EXIT_INSN(),
225 },
226 .errstr = "BPF_CALL uses reserved",
227 .result = REJECT,
228 },
229 {
230 "invalid call insn2",
231 .insns = {
232 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
233 BPF_EXIT_INSN(),
234 },
235 .errstr = "BPF_CALL uses reserved",
236 .result = REJECT,
237 },
238 {
239 "invalid function call",
240 .insns = {
241 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
242 BPF_EXIT_INSN(),
243 },
244 .errstr = "invalid func 1234567",
245 .result = REJECT,
246 },
247 {
248 "uninitialized stack1",
249 .insns = {
250 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
251 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
252 BPF_LD_MAP_FD(BPF_REG_1, 0),
253 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec),
254 BPF_EXIT_INSN(),
255 },
256 .fixup = {2},
257 .errstr = "invalid indirect read from stack",
258 .result = REJECT,
259 },
260 {
261 "uninitialized stack2",
262 .insns = {
263 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
264 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
265 BPF_EXIT_INSN(),
266 },
267 .errstr = "invalid read from stack",
268 .result = REJECT,
269 },
270 {
271 "check valid spill/fill",
272 .insns = {
273 /* spill R1(ctx) into stack */
274 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
275
276 /* fill it back into R2 */
277 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
278
279 /* should be able to access R0 = *(R2 + 8) */
280 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8),
281 BPF_EXIT_INSN(),
282 },
283 .result = ACCEPT,
284 },
285 {
286 "check corrupted spill/fill",
287 .insns = {
288 /* spill R1(ctx) into stack */
289 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
290
291 /* mess up with R1 pointer on stack */
292 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
293
294 /* fill back into R0 should fail */
295 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
296
297 BPF_EXIT_INSN(),
298 },
299 .errstr = "corrupted spill",
300 .result = REJECT,
301 },
302 {
303 "invalid src register in STX",
304 .insns = {
305 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
306 BPF_EXIT_INSN(),
307 },
308 .errstr = "R15 is invalid",
309 .result = REJECT,
310 },
311 {
312 "invalid dst register in STX",
313 .insns = {
314 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
315 BPF_EXIT_INSN(),
316 },
317 .errstr = "R14 is invalid",
318 .result = REJECT,
319 },
320 {
321 "invalid dst register in ST",
322 .insns = {
323 BPF_ST_MEM(BPF_B, 14, -1, -1),
324 BPF_EXIT_INSN(),
325 },
326 .errstr = "R14 is invalid",
327 .result = REJECT,
328 },
329 {
330 "invalid src register in LDX",
331 .insns = {
332 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
333 BPF_EXIT_INSN(),
334 },
335 .errstr = "R12 is invalid",
336 .result = REJECT,
337 },
338 {
339 "invalid dst register in LDX",
340 .insns = {
341 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
342 BPF_EXIT_INSN(),
343 },
344 .errstr = "R11 is invalid",
345 .result = REJECT,
346 },
347 {
348 "junk insn",
349 .insns = {
350 BPF_RAW_INSN(0, 0, 0, 0, 0),
351 BPF_EXIT_INSN(),
352 },
353 .errstr = "invalid BPF_LD_IMM",
354 .result = REJECT,
355 },
356 {
357 "junk insn2",
358 .insns = {
359 BPF_RAW_INSN(1, 0, 0, 0, 0),
360 BPF_EXIT_INSN(),
361 },
362 .errstr = "BPF_LDX uses reserved fields",
363 .result = REJECT,
364 },
365 {
366 "junk insn3",
367 .insns = {
368 BPF_RAW_INSN(-1, 0, 0, 0, 0),
369 BPF_EXIT_INSN(),
370 },
371 .errstr = "invalid BPF_ALU opcode f0",
372 .result = REJECT,
373 },
374 {
375 "junk insn4",
376 .insns = {
377 BPF_RAW_INSN(-1, -1, -1, -1, -1),
378 BPF_EXIT_INSN(),
379 },
380 .errstr = "invalid BPF_ALU opcode f0",
381 .result = REJECT,
382 },
383 {
384 "junk insn5",
385 .insns = {
386 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
387 BPF_EXIT_INSN(),
388 },
389 .errstr = "BPF_ALU uses reserved fields",
390 .result = REJECT,
391 },
392 {
393 "misaligned read from stack",
394 .insns = {
395 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
396 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
397 BPF_EXIT_INSN(),
398 },
399 .errstr = "misaligned access",
400 .result = REJECT,
401 },
402 {
403 "invalid map_fd for function call",
404 .insns = {
405 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
406 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
407 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
408 BPF_LD_MAP_FD(BPF_REG_1, 0),
409 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec),
410 BPF_EXIT_INSN(),
411 },
412 .errstr = "fd 0 is not pointing to valid bpf_map",
413 .result = REJECT,
414 },
415 {
416 "don't check return value before access",
417 .insns = {
418 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
419 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
420 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
421 BPF_LD_MAP_FD(BPF_REG_1, 0),
422 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec),
423 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
424 BPF_EXIT_INSN(),
425 },
426 .fixup = {3},
427 .errstr = "R0 invalid mem access 'map_value_or_null'",
428 .result = REJECT,
429 },
430 {
431 "access memory with incorrect alignment",
432 .insns = {
433 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
434 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
435 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
436 BPF_LD_MAP_FD(BPF_REG_1, 0),
437 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec),
438 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
439 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
440 BPF_EXIT_INSN(),
441 },
442 .fixup = {3},
443 .errstr = "misaligned access",
444 .result = REJECT,
445 },
446 {
447 "sometimes access memory with incorrect alignment",
448 .insns = {
449 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
450 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
451 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
452 BPF_LD_MAP_FD(BPF_REG_1, 0),
453 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec),
454 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
455 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
456 BPF_EXIT_INSN(),
457 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
458 BPF_EXIT_INSN(),
459 },
460 .fixup = {3},
461 .errstr = "R0 invalid mem access",
462 .result = REJECT,
463 },
464 {
465 "jump test 1",
466 .insns = {
467 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
468 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
469 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
470 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
471 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
472 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
473 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
474 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
475 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
476 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
477 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
478 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
479 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
480 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
481 BPF_MOV64_IMM(BPF_REG_0, 0),
482 BPF_EXIT_INSN(),
483 },
484 .result = ACCEPT,
485 },
486 {
487 "jump test 2",
488 .insns = {
489 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
490 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
491 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
492 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
493 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
494 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
495 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
496 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
497 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
498 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
499 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
500 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
501 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
502 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
503 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
504 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
505 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
506 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
507 BPF_MOV64_IMM(BPF_REG_0, 0),
508 BPF_EXIT_INSN(),
509 },
510 .result = ACCEPT,
511 },
512 {
513 "jump test 3",
514 .insns = {
515 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
516 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
517 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
518 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
519 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
520 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
521 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
522 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
523 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
524 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
525 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
526 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
527 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
528 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
529 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
530 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
531 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
532 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
533 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
534 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
535 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
536 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
537 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
538 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
539 BPF_LD_MAP_FD(BPF_REG_1, 0),
540 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec),
541 BPF_EXIT_INSN(),
542 },
543 .fixup = {24},
544 .result = ACCEPT,
545 },
546 {
547 "jump test 4",
548 .insns = {
549 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
550 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
551 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
552 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
553 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
554 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
555 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
556 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
557 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
558 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
559 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
560 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
561 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
562 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
563 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
564 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
565 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
566 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
567 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
568 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
569 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
570 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
571 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
572 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
573 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
574 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
575 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
576 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
577 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
578 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
579 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
580 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
581 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
582 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
583 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
584 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
585 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
586 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
587 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
588 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
589 BPF_MOV64_IMM(BPF_REG_0, 0),
590 BPF_EXIT_INSN(),
591 },
592 .result = ACCEPT,
593 },
594 };
595
596 static int probe_filter_length(struct bpf_insn *fp)
597 {
598 int len = 0;
599
600 for (len = MAX_INSNS - 1; len > 0; --len)
601 if (fp[len].code != 0 || fp[len].imm != 0)
602 break;
603
604 return len + 1;
605 }
606
607 static int create_map(void)
608 {
609 long long key, value = 0;
610 int map_fd;
611
612 map_fd = bpf_create_map(BPF_MAP_TYPE_UNSPEC, sizeof(key), sizeof(value), 1024);
613 if (map_fd < 0) {
614 printf("failed to create map '%s'\n", strerror(errno));
615 }
616
617 return map_fd;
618 }
619
620 static int test(void)
621 {
622 int prog_fd, i;
623
624 for (i = 0; i < ARRAY_SIZE(tests); i++) {
625 struct bpf_insn *prog = tests[i].insns;
626 int prog_len = probe_filter_length(prog);
627 int *fixup = tests[i].fixup;
628 int map_fd = -1;
629
630 if (*fixup) {
631 map_fd = create_map();
632
633 do {
634 prog[*fixup].imm = map_fd;
635 fixup++;
636 } while (*fixup);
637 }
638 printf("#%d %s ", i, tests[i].descr);
639
640 prog_fd = bpf_prog_load(BPF_PROG_TYPE_UNSPEC, prog,
641 prog_len * sizeof(struct bpf_insn),
642 "GPL");
643
644 if (tests[i].result == ACCEPT) {
645 if (prog_fd < 0) {
646 printf("FAIL\nfailed to load prog '%s'\n",
647 strerror(errno));
648 printf("%s", bpf_log_buf);
649 goto fail;
650 }
651 } else {
652 if (prog_fd >= 0) {
653 printf("FAIL\nunexpected success to load\n");
654 printf("%s", bpf_log_buf);
655 goto fail;
656 }
657 if (strstr(bpf_log_buf, tests[i].errstr) == 0) {
658 printf("FAIL\nunexpected error message: %s",
659 bpf_log_buf);
660 goto fail;
661 }
662 }
663
664 printf("OK\n");
665 fail:
666 if (map_fd >= 0)
667 close(map_fd);
668 close(prog_fd);
669
670 }
671
672 return 0;
673 }
674
675 int main(void)
676 {
677 return test();
678 }
This page took 0.059616 seconds and 6 git commands to generate.