Commit | Line | Data |
---|---|---|
99c55f7d AS |
1 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
2 | * | |
3 | * This program is free software; you can redistribute it and/or | |
4 | * modify it under the terms of version 2 of the GNU General Public | |
5 | * License as published by the Free Software Foundation. | |
6 | * | |
7 | * This program is distributed in the hope that it will be useful, but | |
8 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
10 | * General Public License for more details. | |
11 | */ | |
12 | #include <linux/bpf.h> | |
13 | #include <linux/syscalls.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/anon_inodes.h> | |
db20fd2b | 16 | #include <linux/file.h> |
09756af4 AS |
17 | #include <linux/license.h> |
18 | #include <linux/filter.h> | |
2541517c | 19 | #include <linux/version.h> |
99c55f7d AS |
20 | |
21 | static LIST_HEAD(bpf_map_types); | |
22 | ||
23 | static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) | |
24 | { | |
25 | struct bpf_map_type_list *tl; | |
26 | struct bpf_map *map; | |
27 | ||
28 | list_for_each_entry(tl, &bpf_map_types, list_node) { | |
29 | if (tl->type == attr->map_type) { | |
30 | map = tl->ops->map_alloc(attr); | |
31 | if (IS_ERR(map)) | |
32 | return map; | |
33 | map->ops = tl->ops; | |
34 | map->map_type = attr->map_type; | |
35 | return map; | |
36 | } | |
37 | } | |
38 | return ERR_PTR(-EINVAL); | |
39 | } | |
40 | ||
41 | /* boot time registration of different map implementations */ | |
42 | void bpf_register_map_type(struct bpf_map_type_list *tl) | |
43 | { | |
44 | list_add(&tl->list_node, &bpf_map_types); | |
45 | } | |
46 | ||
47 | /* called from workqueue */ | |
48 | static void bpf_map_free_deferred(struct work_struct *work) | |
49 | { | |
50 | struct bpf_map *map = container_of(work, struct bpf_map, work); | |
51 | ||
52 | /* implementation dependent freeing */ | |
53 | map->ops->map_free(map); | |
54 | } | |
55 | ||
56 | /* decrement map refcnt and schedule it for freeing via workqueue | |
57 | * (unrelying map implementation ops->map_free() might sleep) | |
58 | */ | |
59 | void bpf_map_put(struct bpf_map *map) | |
60 | { | |
61 | if (atomic_dec_and_test(&map->refcnt)) { | |
62 | INIT_WORK(&map->work, bpf_map_free_deferred); | |
63 | schedule_work(&map->work); | |
64 | } | |
65 | } | |
66 | ||
67 | static int bpf_map_release(struct inode *inode, struct file *filp) | |
68 | { | |
69 | struct bpf_map *map = filp->private_data; | |
70 | ||
04fd61ab AS |
71 | if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) |
72 | /* prog_array stores refcnt-ed bpf_prog pointers | |
73 | * release them all when user space closes prog_array_fd | |
74 | */ | |
75 | bpf_prog_array_map_clear(map); | |
76 | ||
99c55f7d AS |
77 | bpf_map_put(map); |
78 | return 0; | |
79 | } | |
80 | ||
81 | static const struct file_operations bpf_map_fops = { | |
82 | .release = bpf_map_release, | |
83 | }; | |
84 | ||
85 | /* helper macro to check that unused fields 'union bpf_attr' are zero */ | |
86 | #define CHECK_ATTR(CMD) \ | |
87 | memchr_inv((void *) &attr->CMD##_LAST_FIELD + \ | |
88 | sizeof(attr->CMD##_LAST_FIELD), 0, \ | |
89 | sizeof(*attr) - \ | |
90 | offsetof(union bpf_attr, CMD##_LAST_FIELD) - \ | |
91 | sizeof(attr->CMD##_LAST_FIELD)) != NULL | |
92 | ||
93 | #define BPF_MAP_CREATE_LAST_FIELD max_entries | |
94 | /* called via syscall */ | |
95 | static int map_create(union bpf_attr *attr) | |
96 | { | |
97 | struct bpf_map *map; | |
98 | int err; | |
99 | ||
100 | err = CHECK_ATTR(BPF_MAP_CREATE); | |
101 | if (err) | |
102 | return -EINVAL; | |
103 | ||
104 | /* find map type and init map: hashtable vs rbtree vs bloom vs ... */ | |
105 | map = find_and_alloc_map(attr); | |
106 | if (IS_ERR(map)) | |
107 | return PTR_ERR(map); | |
108 | ||
109 | atomic_set(&map->refcnt, 1); | |
110 | ||
111 | err = anon_inode_getfd("bpf-map", &bpf_map_fops, map, O_RDWR | O_CLOEXEC); | |
112 | ||
113 | if (err < 0) | |
114 | /* failed to allocate fd */ | |
115 | goto free_map; | |
116 | ||
117 | return err; | |
118 | ||
119 | free_map: | |
120 | map->ops->map_free(map); | |
121 | return err; | |
122 | } | |
123 | ||
db20fd2b AS |
124 | /* if error is returned, fd is released. |
125 | * On success caller should complete fd access with matching fdput() | |
126 | */ | |
127 | struct bpf_map *bpf_map_get(struct fd f) | |
128 | { | |
129 | struct bpf_map *map; | |
130 | ||
131 | if (!f.file) | |
132 | return ERR_PTR(-EBADF); | |
133 | ||
134 | if (f.file->f_op != &bpf_map_fops) { | |
135 | fdput(f); | |
136 | return ERR_PTR(-EINVAL); | |
137 | } | |
138 | ||
139 | map = f.file->private_data; | |
140 | ||
141 | return map; | |
142 | } | |
143 | ||
144 | /* helper to convert user pointers passed inside __aligned_u64 fields */ | |
145 | static void __user *u64_to_ptr(__u64 val) | |
146 | { | |
147 | return (void __user *) (unsigned long) val; | |
148 | } | |
149 | ||
150 | /* last field in 'union bpf_attr' used by this command */ | |
151 | #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value | |
152 | ||
153 | static int map_lookup_elem(union bpf_attr *attr) | |
154 | { | |
155 | void __user *ukey = u64_to_ptr(attr->key); | |
156 | void __user *uvalue = u64_to_ptr(attr->value); | |
157 | int ufd = attr->map_fd; | |
158 | struct fd f = fdget(ufd); | |
159 | struct bpf_map *map; | |
8ebe667c | 160 | void *key, *value, *ptr; |
db20fd2b AS |
161 | int err; |
162 | ||
163 | if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) | |
164 | return -EINVAL; | |
165 | ||
166 | map = bpf_map_get(f); | |
167 | if (IS_ERR(map)) | |
168 | return PTR_ERR(map); | |
169 | ||
170 | err = -ENOMEM; | |
171 | key = kmalloc(map->key_size, GFP_USER); | |
172 | if (!key) | |
173 | goto err_put; | |
174 | ||
175 | err = -EFAULT; | |
176 | if (copy_from_user(key, ukey, map->key_size) != 0) | |
177 | goto free_key; | |
178 | ||
8ebe667c AS |
179 | err = -ENOMEM; |
180 | value = kmalloc(map->value_size, GFP_USER); | |
db20fd2b | 181 | if (!value) |
8ebe667c AS |
182 | goto free_key; |
183 | ||
184 | rcu_read_lock(); | |
185 | ptr = map->ops->map_lookup_elem(map, key); | |
186 | if (ptr) | |
187 | memcpy(value, ptr, map->value_size); | |
188 | rcu_read_unlock(); | |
189 | ||
190 | err = -ENOENT; | |
191 | if (!ptr) | |
192 | goto free_value; | |
db20fd2b AS |
193 | |
194 | err = -EFAULT; | |
195 | if (copy_to_user(uvalue, value, map->value_size) != 0) | |
8ebe667c | 196 | goto free_value; |
db20fd2b AS |
197 | |
198 | err = 0; | |
199 | ||
8ebe667c AS |
200 | free_value: |
201 | kfree(value); | |
db20fd2b AS |
202 | free_key: |
203 | kfree(key); | |
204 | err_put: | |
205 | fdput(f); | |
206 | return err; | |
207 | } | |
208 | ||
3274f520 | 209 | #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags |
db20fd2b AS |
210 | |
211 | static int map_update_elem(union bpf_attr *attr) | |
212 | { | |
213 | void __user *ukey = u64_to_ptr(attr->key); | |
214 | void __user *uvalue = u64_to_ptr(attr->value); | |
215 | int ufd = attr->map_fd; | |
216 | struct fd f = fdget(ufd); | |
217 | struct bpf_map *map; | |
218 | void *key, *value; | |
219 | int err; | |
220 | ||
221 | if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM)) | |
222 | return -EINVAL; | |
223 | ||
224 | map = bpf_map_get(f); | |
225 | if (IS_ERR(map)) | |
226 | return PTR_ERR(map); | |
227 | ||
228 | err = -ENOMEM; | |
229 | key = kmalloc(map->key_size, GFP_USER); | |
230 | if (!key) | |
231 | goto err_put; | |
232 | ||
233 | err = -EFAULT; | |
234 | if (copy_from_user(key, ukey, map->key_size) != 0) | |
235 | goto free_key; | |
236 | ||
237 | err = -ENOMEM; | |
238 | value = kmalloc(map->value_size, GFP_USER); | |
239 | if (!value) | |
240 | goto free_key; | |
241 | ||
242 | err = -EFAULT; | |
243 | if (copy_from_user(value, uvalue, map->value_size) != 0) | |
244 | goto free_value; | |
245 | ||
246 | /* eBPF program that use maps are running under rcu_read_lock(), | |
247 | * therefore all map accessors rely on this fact, so do the same here | |
248 | */ | |
249 | rcu_read_lock(); | |
3274f520 | 250 | err = map->ops->map_update_elem(map, key, value, attr->flags); |
db20fd2b AS |
251 | rcu_read_unlock(); |
252 | ||
253 | free_value: | |
254 | kfree(value); | |
255 | free_key: | |
256 | kfree(key); | |
257 | err_put: | |
258 | fdput(f); | |
259 | return err; | |
260 | } | |
261 | ||
262 | #define BPF_MAP_DELETE_ELEM_LAST_FIELD key | |
263 | ||
264 | static int map_delete_elem(union bpf_attr *attr) | |
265 | { | |
266 | void __user *ukey = u64_to_ptr(attr->key); | |
267 | int ufd = attr->map_fd; | |
268 | struct fd f = fdget(ufd); | |
269 | struct bpf_map *map; | |
270 | void *key; | |
271 | int err; | |
272 | ||
273 | if (CHECK_ATTR(BPF_MAP_DELETE_ELEM)) | |
274 | return -EINVAL; | |
275 | ||
276 | map = bpf_map_get(f); | |
277 | if (IS_ERR(map)) | |
278 | return PTR_ERR(map); | |
279 | ||
280 | err = -ENOMEM; | |
281 | key = kmalloc(map->key_size, GFP_USER); | |
282 | if (!key) | |
283 | goto err_put; | |
284 | ||
285 | err = -EFAULT; | |
286 | if (copy_from_user(key, ukey, map->key_size) != 0) | |
287 | goto free_key; | |
288 | ||
289 | rcu_read_lock(); | |
290 | err = map->ops->map_delete_elem(map, key); | |
291 | rcu_read_unlock(); | |
292 | ||
293 | free_key: | |
294 | kfree(key); | |
295 | err_put: | |
296 | fdput(f); | |
297 | return err; | |
298 | } | |
299 | ||
300 | /* last field in 'union bpf_attr' used by this command */ | |
301 | #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key | |
302 | ||
303 | static int map_get_next_key(union bpf_attr *attr) | |
304 | { | |
305 | void __user *ukey = u64_to_ptr(attr->key); | |
306 | void __user *unext_key = u64_to_ptr(attr->next_key); | |
307 | int ufd = attr->map_fd; | |
308 | struct fd f = fdget(ufd); | |
309 | struct bpf_map *map; | |
310 | void *key, *next_key; | |
311 | int err; | |
312 | ||
313 | if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY)) | |
314 | return -EINVAL; | |
315 | ||
316 | map = bpf_map_get(f); | |
317 | if (IS_ERR(map)) | |
318 | return PTR_ERR(map); | |
319 | ||
320 | err = -ENOMEM; | |
321 | key = kmalloc(map->key_size, GFP_USER); | |
322 | if (!key) | |
323 | goto err_put; | |
324 | ||
325 | err = -EFAULT; | |
326 | if (copy_from_user(key, ukey, map->key_size) != 0) | |
327 | goto free_key; | |
328 | ||
329 | err = -ENOMEM; | |
330 | next_key = kmalloc(map->key_size, GFP_USER); | |
331 | if (!next_key) | |
332 | goto free_key; | |
333 | ||
334 | rcu_read_lock(); | |
335 | err = map->ops->map_get_next_key(map, key, next_key); | |
336 | rcu_read_unlock(); | |
337 | if (err) | |
338 | goto free_next_key; | |
339 | ||
340 | err = -EFAULT; | |
341 | if (copy_to_user(unext_key, next_key, map->key_size) != 0) | |
342 | goto free_next_key; | |
343 | ||
344 | err = 0; | |
345 | ||
346 | free_next_key: | |
347 | kfree(next_key); | |
348 | free_key: | |
349 | kfree(key); | |
350 | err_put: | |
351 | fdput(f); | |
352 | return err; | |
353 | } | |
354 | ||
09756af4 AS |
355 | static LIST_HEAD(bpf_prog_types); |
356 | ||
357 | static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) | |
358 | { | |
359 | struct bpf_prog_type_list *tl; | |
360 | ||
361 | list_for_each_entry(tl, &bpf_prog_types, list_node) { | |
362 | if (tl->type == type) { | |
363 | prog->aux->ops = tl->ops; | |
24701ece | 364 | prog->type = type; |
09756af4 AS |
365 | return 0; |
366 | } | |
367 | } | |
24701ece | 368 | |
09756af4 AS |
369 | return -EINVAL; |
370 | } | |
371 | ||
372 | void bpf_register_prog_type(struct bpf_prog_type_list *tl) | |
373 | { | |
374 | list_add(&tl->list_node, &bpf_prog_types); | |
375 | } | |
376 | ||
0a542a86 AS |
377 | /* fixup insn->imm field of bpf_call instructions: |
378 | * if (insn->imm == BPF_FUNC_map_lookup_elem) | |
379 | * insn->imm = bpf_map_lookup_elem - __bpf_call_base; | |
380 | * else if (insn->imm == BPF_FUNC_map_update_elem) | |
381 | * insn->imm = bpf_map_update_elem - __bpf_call_base; | |
382 | * else ... | |
383 | * | |
384 | * this function is called after eBPF program passed verification | |
385 | */ | |
386 | static void fixup_bpf_calls(struct bpf_prog *prog) | |
387 | { | |
388 | const struct bpf_func_proto *fn; | |
389 | int i; | |
390 | ||
391 | for (i = 0; i < prog->len; i++) { | |
392 | struct bpf_insn *insn = &prog->insnsi[i]; | |
393 | ||
394 | if (insn->code == (BPF_JMP | BPF_CALL)) { | |
395 | /* we reach here when program has bpf_call instructions | |
396 | * and it passed bpf_check(), means that | |
397 | * ops->get_func_proto must have been supplied, check it | |
398 | */ | |
399 | BUG_ON(!prog->aux->ops->get_func_proto); | |
400 | ||
04fd61ab AS |
401 | if (insn->imm == BPF_FUNC_tail_call) { |
402 | /* mark bpf_tail_call as different opcode | |
403 | * to avoid conditional branch in | |
404 | * interpeter for every normal call | |
405 | * and to prevent accidental JITing by | |
406 | * JIT compiler that doesn't support | |
407 | * bpf_tail_call yet | |
408 | */ | |
409 | insn->imm = 0; | |
410 | insn->code |= BPF_X; | |
411 | continue; | |
412 | } | |
413 | ||
0a542a86 AS |
414 | fn = prog->aux->ops->get_func_proto(insn->imm); |
415 | /* all functions that have prototype and verifier allowed | |
416 | * programs to call them, must be real in-kernel functions | |
417 | */ | |
418 | BUG_ON(!fn->func); | |
419 | insn->imm = fn->func - __bpf_call_base; | |
420 | } | |
421 | } | |
422 | } | |
423 | ||
09756af4 AS |
424 | /* drop refcnt on maps used by eBPF program and free auxilary data */ |
425 | static void free_used_maps(struct bpf_prog_aux *aux) | |
426 | { | |
427 | int i; | |
428 | ||
429 | for (i = 0; i < aux->used_map_cnt; i++) | |
430 | bpf_map_put(aux->used_maps[i]); | |
431 | ||
432 | kfree(aux->used_maps); | |
433 | } | |
434 | ||
435 | void bpf_prog_put(struct bpf_prog *prog) | |
436 | { | |
437 | if (atomic_dec_and_test(&prog->aux->refcnt)) { | |
438 | free_used_maps(prog->aux); | |
439 | bpf_prog_free(prog); | |
440 | } | |
441 | } | |
e2e9b654 | 442 | EXPORT_SYMBOL_GPL(bpf_prog_put); |
09756af4 AS |
443 | |
444 | static int bpf_prog_release(struct inode *inode, struct file *filp) | |
445 | { | |
446 | struct bpf_prog *prog = filp->private_data; | |
447 | ||
448 | bpf_prog_put(prog); | |
449 | return 0; | |
450 | } | |
451 | ||
452 | static const struct file_operations bpf_prog_fops = { | |
453 | .release = bpf_prog_release, | |
454 | }; | |
455 | ||
456 | static struct bpf_prog *get_prog(struct fd f) | |
457 | { | |
458 | struct bpf_prog *prog; | |
459 | ||
460 | if (!f.file) | |
461 | return ERR_PTR(-EBADF); | |
462 | ||
463 | if (f.file->f_op != &bpf_prog_fops) { | |
464 | fdput(f); | |
465 | return ERR_PTR(-EINVAL); | |
466 | } | |
467 | ||
468 | prog = f.file->private_data; | |
469 | ||
470 | return prog; | |
471 | } | |
472 | ||
473 | /* called by sockets/tracing/seccomp before attaching program to an event | |
474 | * pairs with bpf_prog_put() | |
475 | */ | |
476 | struct bpf_prog *bpf_prog_get(u32 ufd) | |
477 | { | |
478 | struct fd f = fdget(ufd); | |
479 | struct bpf_prog *prog; | |
480 | ||
481 | prog = get_prog(f); | |
482 | ||
483 | if (IS_ERR(prog)) | |
484 | return prog; | |
485 | ||
486 | atomic_inc(&prog->aux->refcnt); | |
487 | fdput(f); | |
488 | return prog; | |
489 | } | |
e2e9b654 | 490 | EXPORT_SYMBOL_GPL(bpf_prog_get); |
09756af4 AS |
491 | |
492 | /* last field in 'union bpf_attr' used by this command */ | |
2541517c | 493 | #define BPF_PROG_LOAD_LAST_FIELD kern_version |
09756af4 AS |
494 | |
495 | static int bpf_prog_load(union bpf_attr *attr) | |
496 | { | |
497 | enum bpf_prog_type type = attr->prog_type; | |
498 | struct bpf_prog *prog; | |
499 | int err; | |
500 | char license[128]; | |
501 | bool is_gpl; | |
502 | ||
503 | if (CHECK_ATTR(BPF_PROG_LOAD)) | |
504 | return -EINVAL; | |
505 | ||
506 | /* copy eBPF program license from user space */ | |
507 | if (strncpy_from_user(license, u64_to_ptr(attr->license), | |
508 | sizeof(license) - 1) < 0) | |
509 | return -EFAULT; | |
510 | license[sizeof(license) - 1] = 0; | |
511 | ||
512 | /* eBPF programs must be GPL compatible to use GPL-ed functions */ | |
513 | is_gpl = license_is_gpl_compatible(license); | |
514 | ||
515 | if (attr->insn_cnt >= BPF_MAXINSNS) | |
516 | return -EINVAL; | |
517 | ||
2541517c AS |
518 | if (type == BPF_PROG_TYPE_KPROBE && |
519 | attr->kern_version != LINUX_VERSION_CODE) | |
520 | return -EINVAL; | |
521 | ||
09756af4 AS |
522 | /* plain bpf_prog allocation */ |
523 | prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER); | |
524 | if (!prog) | |
525 | return -ENOMEM; | |
526 | ||
527 | prog->len = attr->insn_cnt; | |
528 | ||
529 | err = -EFAULT; | |
530 | if (copy_from_user(prog->insns, u64_to_ptr(attr->insns), | |
531 | prog->len * sizeof(struct bpf_insn)) != 0) | |
532 | goto free_prog; | |
533 | ||
534 | prog->orig_prog = NULL; | |
535 | prog->jited = false; | |
536 | ||
537 | atomic_set(&prog->aux->refcnt, 1); | |
24701ece | 538 | prog->gpl_compatible = is_gpl; |
09756af4 AS |
539 | |
540 | /* find program type: socket_filter vs tracing_filter */ | |
541 | err = find_prog_type(type, prog); | |
542 | if (err < 0) | |
543 | goto free_prog; | |
544 | ||
545 | /* run eBPF verifier */ | |
9bac3d6d | 546 | err = bpf_check(&prog, attr); |
09756af4 AS |
547 | if (err < 0) |
548 | goto free_used_maps; | |
549 | ||
0a542a86 AS |
550 | /* fixup BPF_CALL->imm field */ |
551 | fixup_bpf_calls(prog); | |
552 | ||
09756af4 | 553 | /* eBPF program is ready to be JITed */ |
04fd61ab AS |
554 | err = bpf_prog_select_runtime(prog); |
555 | if (err < 0) | |
556 | goto free_used_maps; | |
09756af4 AS |
557 | |
558 | err = anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, O_RDWR | O_CLOEXEC); | |
09756af4 AS |
559 | if (err < 0) |
560 | /* failed to allocate fd */ | |
561 | goto free_used_maps; | |
562 | ||
563 | return err; | |
564 | ||
565 | free_used_maps: | |
566 | free_used_maps(prog->aux); | |
567 | free_prog: | |
568 | bpf_prog_free(prog); | |
569 | return err; | |
570 | } | |
571 | ||
99c55f7d AS |
572 | SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) |
573 | { | |
574 | union bpf_attr attr = {}; | |
575 | int err; | |
576 | ||
577 | /* the syscall is limited to root temporarily. This restriction will be | |
578 | * lifted when security audit is clean. Note that eBPF+tracing must have | |
579 | * this restriction, since it may pass kernel data to user space | |
580 | */ | |
581 | if (!capable(CAP_SYS_ADMIN)) | |
582 | return -EPERM; | |
583 | ||
584 | if (!access_ok(VERIFY_READ, uattr, 1)) | |
585 | return -EFAULT; | |
586 | ||
587 | if (size > PAGE_SIZE) /* silly large */ | |
588 | return -E2BIG; | |
589 | ||
590 | /* If we're handed a bigger struct than we know of, | |
591 | * ensure all the unknown bits are 0 - i.e. new | |
592 | * user-space does not rely on any kernel feature | |
593 | * extensions we dont know about yet. | |
594 | */ | |
595 | if (size > sizeof(attr)) { | |
596 | unsigned char __user *addr; | |
597 | unsigned char __user *end; | |
598 | unsigned char val; | |
599 | ||
600 | addr = (void __user *)uattr + sizeof(attr); | |
601 | end = (void __user *)uattr + size; | |
602 | ||
603 | for (; addr < end; addr++) { | |
604 | err = get_user(val, addr); | |
605 | if (err) | |
606 | return err; | |
607 | if (val) | |
608 | return -E2BIG; | |
609 | } | |
610 | size = sizeof(attr); | |
611 | } | |
612 | ||
613 | /* copy attributes from user space, may be less than sizeof(bpf_attr) */ | |
614 | if (copy_from_user(&attr, uattr, size) != 0) | |
615 | return -EFAULT; | |
616 | ||
617 | switch (cmd) { | |
618 | case BPF_MAP_CREATE: | |
619 | err = map_create(&attr); | |
620 | break; | |
db20fd2b AS |
621 | case BPF_MAP_LOOKUP_ELEM: |
622 | err = map_lookup_elem(&attr); | |
623 | break; | |
624 | case BPF_MAP_UPDATE_ELEM: | |
625 | err = map_update_elem(&attr); | |
626 | break; | |
627 | case BPF_MAP_DELETE_ELEM: | |
628 | err = map_delete_elem(&attr); | |
629 | break; | |
630 | case BPF_MAP_GET_NEXT_KEY: | |
631 | err = map_get_next_key(&attr); | |
632 | break; | |
09756af4 AS |
633 | case BPF_PROG_LOAD: |
634 | err = bpf_prog_load(&attr); | |
635 | break; | |
99c55f7d AS |
636 | default: |
637 | err = -EINVAL; | |
638 | break; | |
639 | } | |
640 | ||
641 | return err; | |
642 | } |