1 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
2 * Copyright (c) 2016 Facebook
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/bpf.h>
12 #include <linux/bpf_perf_event.h>
13 #include <linux/filter.h>
14 #include <linux/uaccess.h>
15 #include <linux/ctype.h>
19 * trace_call_bpf - invoke BPF program
21 * @ctx: opaque context pointer
23 * kprobe handlers execute BPF programs via this helper.
24 * Can be used from static tracepoints in the future.
26 * Return: BPF programs always return an integer which is interpreted by
28 * 0 - return from kprobe (event is filtered out)
29 * 1 - store kprobe event into ring buffer
30 * Other values are reserved and currently alias to 1
32 unsigned int trace_call_bpf(struct bpf_prog
*prog
, void *ctx
)
36 if (in_nmi()) /* not supported yet */
41 if (unlikely(__this_cpu_inc_return(bpf_prog_active
) != 1)) {
43 * since some bpf program is already running on this cpu,
44 * don't call into another bpf program (same or different)
45 * and don't send kprobe event into ring-buffer,
53 ret
= BPF_PROG_RUN(prog
, ctx
);
57 __this_cpu_dec(bpf_prog_active
);
62 EXPORT_SYMBOL_GPL(trace_call_bpf
);
64 static u64
bpf_probe_read(u64 r1
, u64 r2
, u64 r3
, u64 r4
, u64 r5
)
66 void *dst
= (void *) (long) r1
;
67 int ret
, size
= (int) r2
;
68 void *unsafe_ptr
= (void *) (long) r3
;
70 ret
= probe_kernel_read(dst
, unsafe_ptr
, size
);
71 if (unlikely(ret
< 0))
77 static const struct bpf_func_proto bpf_probe_read_proto
= {
78 .func
= bpf_probe_read
,
80 .ret_type
= RET_INTEGER
,
81 .arg1_type
= ARG_PTR_TO_RAW_STACK
,
82 .arg2_type
= ARG_CONST_STACK_SIZE
,
83 .arg3_type
= ARG_ANYTHING
,
86 static u64
bpf_probe_write_user(u64 r1
, u64 r2
, u64 r3
, u64 r4
, u64 r5
)
88 void *unsafe_ptr
= (void *) (long) r1
;
89 void *src
= (void *) (long) r2
;
93 * Ensure we're in user context which is safe for the helper to
94 * run. This helper has no business in a kthread.
96 * access_ok() should prevent writing to non-user memory, but in
97 * some situations (nommu, temporary switch, etc) access_ok() does
98 * not provide enough validation, hence the check on KERNEL_DS.
101 if (unlikely(in_interrupt() ||
102 current
->flags
& (PF_KTHREAD
| PF_EXITING
)))
104 if (unlikely(segment_eq(get_fs(), KERNEL_DS
)))
106 if (!access_ok(VERIFY_WRITE
, unsafe_ptr
, size
))
109 return probe_kernel_write(unsafe_ptr
, src
, size
);
112 static const struct bpf_func_proto bpf_probe_write_user_proto
= {
113 .func
= bpf_probe_write_user
,
115 .ret_type
= RET_INTEGER
,
116 .arg1_type
= ARG_ANYTHING
,
117 .arg2_type
= ARG_PTR_TO_STACK
,
118 .arg3_type
= ARG_CONST_STACK_SIZE
,
121 static const struct bpf_func_proto
*bpf_get_probe_write_proto(void)
123 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
124 current
->comm
, task_pid_nr(current
));
126 return &bpf_probe_write_user_proto
;
130 * limited trace_printk()
131 * only %d %u %x %ld %lu %lx %lld %llu %llx %p %s conversion specifiers allowed
133 static u64
bpf_trace_printk(u64 r1
, u64 fmt_size
, u64 r3
, u64 r4
, u64 r5
)
135 char *fmt
= (char *) (long) r1
;
136 bool str_seen
= false;
144 * bpf_check()->check_func_arg()->check_stack_boundary()
145 * guarantees that fmt points to bpf program stack,
146 * fmt_size bytes of it were initialized and fmt_size > 0
148 if (fmt
[--fmt_size
] != 0)
151 /* check format string for allowed specifiers */
152 for (i
= 0; i
< fmt_size
; i
++) {
153 if ((!isprint(fmt
[i
]) && !isspace(fmt
[i
])) || !isascii(fmt
[i
]))
162 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
167 } else if (fmt
[i
] == 'p' || fmt
[i
] == 's') {
170 if (!isspace(fmt
[i
]) && !ispunct(fmt
[i
]) && fmt
[i
] != 0)
173 if (fmt
[i
- 1] == 's') {
175 /* allow only one '%s' per fmt string */
194 strncpy_from_unsafe(buf
,
195 (void *) (long) unsafe_addr
,
206 if (fmt
[i
] != 'd' && fmt
[i
] != 'u' && fmt
[i
] != 'x')
211 return __trace_printk(1/* fake ip will not be printed */, fmt
,
212 mod
[0] == 2 ? r3
: mod
[0] == 1 ? (long) r3
: (u32
) r3
,
213 mod
[1] == 2 ? r4
: mod
[1] == 1 ? (long) r4
: (u32
) r4
,
214 mod
[2] == 2 ? r5
: mod
[2] == 1 ? (long) r5
: (u32
) r5
);
217 static const struct bpf_func_proto bpf_trace_printk_proto
= {
218 .func
= bpf_trace_printk
,
220 .ret_type
= RET_INTEGER
,
221 .arg1_type
= ARG_PTR_TO_STACK
,
222 .arg2_type
= ARG_CONST_STACK_SIZE
,
225 const struct bpf_func_proto
*bpf_get_trace_printk_proto(void)
228 * this program might be calling bpf_trace_printk,
229 * so allocate per-cpu printk buffers
231 trace_printk_init_buffers();
233 return &bpf_trace_printk_proto
;
236 static u64
bpf_perf_event_read(u64 r1
, u64 flags
, u64 r3
, u64 r4
, u64 r5
)
238 struct bpf_map
*map
= (struct bpf_map
*) (unsigned long) r1
;
239 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
240 unsigned int cpu
= smp_processor_id();
241 u64 index
= flags
& BPF_F_INDEX_MASK
;
242 struct bpf_event_entry
*ee
;
243 struct perf_event
*event
;
245 if (unlikely(flags
& ~(BPF_F_INDEX_MASK
)))
247 if (index
== BPF_F_CURRENT_CPU
)
249 if (unlikely(index
>= array
->map
.max_entries
))
252 ee
= READ_ONCE(array
->ptrs
[index
]);
257 if (unlikely(event
->attr
.type
!= PERF_TYPE_HARDWARE
&&
258 event
->attr
.type
!= PERF_TYPE_RAW
))
261 /* make sure event is local and doesn't have pmu::count */
262 if (unlikely(event
->oncpu
!= cpu
|| event
->pmu
->count
))
266 * we don't know if the function is run successfully by the
267 * return value. It can be judged in other places, such as
270 return perf_event_read_local(event
);
273 static const struct bpf_func_proto bpf_perf_event_read_proto
= {
274 .func
= bpf_perf_event_read
,
276 .ret_type
= RET_INTEGER
,
277 .arg1_type
= ARG_CONST_MAP_PTR
,
278 .arg2_type
= ARG_ANYTHING
,
281 static __always_inline u64
282 __bpf_perf_event_output(struct pt_regs
*regs
, struct bpf_map
*map
,
283 u64 flags
, struct perf_raw_record
*raw
)
285 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
286 unsigned int cpu
= smp_processor_id();
287 u64 index
= flags
& BPF_F_INDEX_MASK
;
288 struct perf_sample_data sample_data
;
289 struct bpf_event_entry
*ee
;
290 struct perf_event
*event
;
292 if (index
== BPF_F_CURRENT_CPU
)
294 if (unlikely(index
>= array
->map
.max_entries
))
297 ee
= READ_ONCE(array
->ptrs
[index
]);
302 if (unlikely(event
->attr
.type
!= PERF_TYPE_SOFTWARE
||
303 event
->attr
.config
!= PERF_COUNT_SW_BPF_OUTPUT
))
306 if (unlikely(event
->oncpu
!= cpu
))
309 perf_sample_data_init(&sample_data
, 0, 0);
310 sample_data
.raw
= raw
;
311 perf_event_output(event
, &sample_data
, regs
);
315 static u64
bpf_perf_event_output(u64 r1
, u64 r2
, u64 flags
, u64 r4
, u64 size
)
317 struct pt_regs
*regs
= (struct pt_regs
*)(long) r1
;
318 struct bpf_map
*map
= (struct bpf_map
*)(long) r2
;
319 void *data
= (void *)(long) r4
;
320 struct perf_raw_record raw
= {
327 if (unlikely(flags
& ~(BPF_F_INDEX_MASK
)))
330 return __bpf_perf_event_output(regs
, map
, flags
, &raw
);
333 static const struct bpf_func_proto bpf_perf_event_output_proto
= {
334 .func
= bpf_perf_event_output
,
336 .ret_type
= RET_INTEGER
,
337 .arg1_type
= ARG_PTR_TO_CTX
,
338 .arg2_type
= ARG_CONST_MAP_PTR
,
339 .arg3_type
= ARG_ANYTHING
,
340 .arg4_type
= ARG_PTR_TO_STACK
,
341 .arg5_type
= ARG_CONST_STACK_SIZE
,
344 static DEFINE_PER_CPU(struct pt_regs
, bpf_pt_regs
);
346 u64
bpf_event_output(struct bpf_map
*map
, u64 flags
, void *meta
, u64 meta_size
,
347 void *ctx
, u64 ctx_size
, bpf_ctx_copy_t ctx_copy
)
349 struct pt_regs
*regs
= this_cpu_ptr(&bpf_pt_regs
);
350 struct perf_raw_frag frag
= {
355 struct perf_raw_record raw
= {
358 .next
= ctx_size
? &frag
: NULL
,
365 perf_fetch_caller_regs(regs
);
367 return __bpf_perf_event_output(regs
, map
, flags
, &raw
);
370 static u64
bpf_get_current_task(u64 r1
, u64 r2
, u64 r3
, u64 r4
, u64 r5
)
372 return (long) current
;
375 static const struct bpf_func_proto bpf_get_current_task_proto
= {
376 .func
= bpf_get_current_task
,
378 .ret_type
= RET_INTEGER
,
381 static u64
bpf_current_task_under_cgroup(u64 r1
, u64 r2
, u64 r3
, u64 r4
, u64 r5
)
383 struct bpf_map
*map
= (struct bpf_map
*)(long)r1
;
384 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
388 if (unlikely(in_interrupt()))
391 if (unlikely(idx
>= array
->map
.max_entries
))
394 cgrp
= READ_ONCE(array
->ptrs
[idx
]);
398 return task_under_cgroup_hierarchy(current
, cgrp
);
401 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto
= {
402 .func
= bpf_current_task_under_cgroup
,
404 .ret_type
= RET_INTEGER
,
405 .arg1_type
= ARG_CONST_MAP_PTR
,
406 .arg2_type
= ARG_ANYTHING
,
409 static const struct bpf_func_proto
*tracing_func_proto(enum bpf_func_id func_id
)
412 case BPF_FUNC_map_lookup_elem
:
413 return &bpf_map_lookup_elem_proto
;
414 case BPF_FUNC_map_update_elem
:
415 return &bpf_map_update_elem_proto
;
416 case BPF_FUNC_map_delete_elem
:
417 return &bpf_map_delete_elem_proto
;
418 case BPF_FUNC_probe_read
:
419 return &bpf_probe_read_proto
;
420 case BPF_FUNC_ktime_get_ns
:
421 return &bpf_ktime_get_ns_proto
;
422 case BPF_FUNC_tail_call
:
423 return &bpf_tail_call_proto
;
424 case BPF_FUNC_get_current_pid_tgid
:
425 return &bpf_get_current_pid_tgid_proto
;
426 case BPF_FUNC_get_current_task
:
427 return &bpf_get_current_task_proto
;
428 case BPF_FUNC_get_current_uid_gid
:
429 return &bpf_get_current_uid_gid_proto
;
430 case BPF_FUNC_get_current_comm
:
431 return &bpf_get_current_comm_proto
;
432 case BPF_FUNC_trace_printk
:
433 return bpf_get_trace_printk_proto();
434 case BPF_FUNC_get_smp_processor_id
:
435 return &bpf_get_smp_processor_id_proto
;
436 case BPF_FUNC_perf_event_read
:
437 return &bpf_perf_event_read_proto
;
438 case BPF_FUNC_probe_write_user
:
439 return bpf_get_probe_write_proto();
440 case BPF_FUNC_current_task_under_cgroup
:
441 return &bpf_current_task_under_cgroup_proto
;
442 case BPF_FUNC_get_prandom_u32
:
443 return &bpf_get_prandom_u32_proto
;
449 static const struct bpf_func_proto
*kprobe_prog_func_proto(enum bpf_func_id func_id
)
452 case BPF_FUNC_perf_event_output
:
453 return &bpf_perf_event_output_proto
;
454 case BPF_FUNC_get_stackid
:
455 return &bpf_get_stackid_proto
;
457 return tracing_func_proto(func_id
);
461 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
462 static bool kprobe_prog_is_valid_access(int off
, int size
, enum bpf_access_type type
,
463 enum bpf_reg_type
*reg_type
)
465 if (off
< 0 || off
>= sizeof(struct pt_regs
))
467 if (type
!= BPF_READ
)
474 static const struct bpf_verifier_ops kprobe_prog_ops
= {
475 .get_func_proto
= kprobe_prog_func_proto
,
476 .is_valid_access
= kprobe_prog_is_valid_access
,
479 static struct bpf_prog_type_list kprobe_tl
= {
480 .ops
= &kprobe_prog_ops
,
481 .type
= BPF_PROG_TYPE_KPROBE
,
484 static u64
bpf_perf_event_output_tp(u64 r1
, u64 r2
, u64 index
, u64 r4
, u64 size
)
487 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
488 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
489 * from there and call the same bpf_perf_event_output() helper
491 u64 ctx
= *(long *)(uintptr_t)r1
;
493 return bpf_perf_event_output(ctx
, r2
, index
, r4
, size
);
496 static const struct bpf_func_proto bpf_perf_event_output_proto_tp
= {
497 .func
= bpf_perf_event_output_tp
,
499 .ret_type
= RET_INTEGER
,
500 .arg1_type
= ARG_PTR_TO_CTX
,
501 .arg2_type
= ARG_CONST_MAP_PTR
,
502 .arg3_type
= ARG_ANYTHING
,
503 .arg4_type
= ARG_PTR_TO_STACK
,
504 .arg5_type
= ARG_CONST_STACK_SIZE
,
507 static u64
bpf_get_stackid_tp(u64 r1
, u64 r2
, u64 r3
, u64 r4
, u64 r5
)
509 u64 ctx
= *(long *)(uintptr_t)r1
;
511 return bpf_get_stackid(ctx
, r2
, r3
, r4
, r5
);
514 static const struct bpf_func_proto bpf_get_stackid_proto_tp
= {
515 .func
= bpf_get_stackid_tp
,
517 .ret_type
= RET_INTEGER
,
518 .arg1_type
= ARG_PTR_TO_CTX
,
519 .arg2_type
= ARG_CONST_MAP_PTR
,
520 .arg3_type
= ARG_ANYTHING
,
523 static const struct bpf_func_proto
*tp_prog_func_proto(enum bpf_func_id func_id
)
526 case BPF_FUNC_perf_event_output
:
527 return &bpf_perf_event_output_proto_tp
;
528 case BPF_FUNC_get_stackid
:
529 return &bpf_get_stackid_proto_tp
;
531 return tracing_func_proto(func_id
);
535 static bool tp_prog_is_valid_access(int off
, int size
, enum bpf_access_type type
,
536 enum bpf_reg_type
*reg_type
)
538 if (off
< sizeof(void *) || off
>= PERF_MAX_TRACE_SIZE
)
540 if (type
!= BPF_READ
)
547 static const struct bpf_verifier_ops tracepoint_prog_ops
= {
548 .get_func_proto
= tp_prog_func_proto
,
549 .is_valid_access
= tp_prog_is_valid_access
,
552 static struct bpf_prog_type_list tracepoint_tl
= {
553 .ops
= &tracepoint_prog_ops
,
554 .type
= BPF_PROG_TYPE_TRACEPOINT
,
557 static bool pe_prog_is_valid_access(int off
, int size
, enum bpf_access_type type
,
558 enum bpf_reg_type
*reg_type
)
560 if (off
< 0 || off
>= sizeof(struct bpf_perf_event_data
))
562 if (type
!= BPF_READ
)
566 if (off
== offsetof(struct bpf_perf_event_data
, sample_period
)) {
567 if (size
!= sizeof(u64
))
570 if (size
!= sizeof(long))
576 static u32
pe_prog_convert_ctx_access(enum bpf_access_type type
, int dst_reg
,
577 int src_reg
, int ctx_off
,
578 struct bpf_insn
*insn_buf
,
579 struct bpf_prog
*prog
)
581 struct bpf_insn
*insn
= insn_buf
;
584 case offsetof(struct bpf_perf_event_data
, sample_period
):
585 BUILD_BUG_ON(FIELD_SIZEOF(struct perf_sample_data
, period
) != sizeof(u64
));
586 *insn
++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct bpf_perf_event_data_kern
, data
)),
588 offsetof(struct bpf_perf_event_data_kern
, data
));
589 *insn
++ = BPF_LDX_MEM(BPF_DW
, dst_reg
, dst_reg
,
590 offsetof(struct perf_sample_data
, period
));
593 *insn
++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct bpf_perf_event_data_kern
, regs
)),
595 offsetof(struct bpf_perf_event_data_kern
, regs
));
596 *insn
++ = BPF_LDX_MEM(bytes_to_bpf_size(sizeof(long)),
597 dst_reg
, dst_reg
, ctx_off
);
601 return insn
- insn_buf
;
604 static const struct bpf_verifier_ops perf_event_prog_ops
= {
605 .get_func_proto
= tp_prog_func_proto
,
606 .is_valid_access
= pe_prog_is_valid_access
,
607 .convert_ctx_access
= pe_prog_convert_ctx_access
,
610 static struct bpf_prog_type_list perf_event_tl
= {
611 .ops
= &perf_event_prog_ops
,
612 .type
= BPF_PROG_TYPE_PERF_EVENT
,
615 static int __init
register_kprobe_prog_ops(void)
617 bpf_register_prog_type(&kprobe_tl
);
618 bpf_register_prog_type(&tracepoint_tl
);
619 bpf_register_prog_type(&perf_event_tl
);
622 late_initcall(register_kprobe_prog_ops
);