2 #ifndef _LINUX_TRACE_EVENT_H
3 #define _LINUX_TRACE_EVENT_H
5 #include <linux/ring_buffer.h>
6 #include <linux/trace_seq.h>
7 #include <linux/percpu.h>
8 #include <linux/hardirq.h>
9 #include <linux/perf_event.h>
10 #include <linux/tracepoint.h>
18 const char *trace_print_flags_seq(struct trace_seq
*p
, const char *delim
,
20 const struct trace_print_flags
*flag_array
);
22 const char *trace_print_symbols_seq(struct trace_seq
*p
, unsigned long val
,
23 const struct trace_print_flags
*symbol_array
);
25 #if BITS_PER_LONG == 32
26 const char *trace_print_symbols_seq_u64(struct trace_seq
*p
,
27 unsigned long long val
,
28 const struct trace_print_flags_u64
32 const char *trace_print_bitmask_seq(struct trace_seq
*p
, void *bitmask_ptr
,
33 unsigned int bitmask_size
);
35 const char *trace_print_hex_seq(struct trace_seq
*p
,
36 const unsigned char *buf
, int len
);
38 const char *trace_print_array_seq(struct trace_seq
*p
,
39 const void *buf
, int count
,
42 struct trace_iterator
;
45 int trace_raw_output_prep(struct trace_iterator
*iter
,
46 struct trace_event
*event
);
49 * The trace entry - the most basic unit of tracing. This is what
50 * is printed in the end as a single line in the trace output, such as:
52 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
57 unsigned char preempt_count
;
61 #define TRACE_EVENT_TYPE_MAX \
62 ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
65 * Trace iterator - used by printout routines who present trace
66 * results to users and which routines might sleep, etc:
68 struct trace_iterator
{
69 struct trace_array
*tr
;
71 struct trace_buffer
*trace_buffer
;
75 struct ring_buffer_iter
**buffer_iter
;
76 unsigned long iter_flags
;
78 /* trace_seq for __print_flags() and __print_symbolic() etc. */
79 struct trace_seq tmp_seq
;
81 cpumask_var_t started
;
83 /* it's true when current open file is snapshot */
86 /* The below is zeroed out in pipe_read */
88 struct trace_entry
*ent
;
89 unsigned long lost_events
;
98 /* All new field here will be zeroed out in pipe_read */
101 enum trace_iter_flags
{
102 TRACE_FILE_LAT_FMT
= 1,
103 TRACE_FILE_ANNOTATE
= 2,
104 TRACE_FILE_TIME_IN_NS
= 4,
108 typedef enum print_line_t (*trace_print_func
)(struct trace_iterator
*iter
,
109 int flags
, struct trace_event
*event
);
111 struct trace_event_functions
{
112 trace_print_func trace
;
113 trace_print_func raw
;
114 trace_print_func hex
;
115 trace_print_func binary
;
119 struct hlist_node node
;
120 struct list_head list
;
122 struct trace_event_functions
*funcs
;
125 extern int register_trace_event(struct trace_event
*event
);
126 extern int unregister_trace_event(struct trace_event
*event
);
128 /* Return values for print_line callback */
130 TRACE_TYPE_PARTIAL_LINE
= 0, /* Retry after flushing the seq */
131 TRACE_TYPE_HANDLED
= 1,
132 TRACE_TYPE_UNHANDLED
= 2, /* Relay to other output functions */
133 TRACE_TYPE_NO_CONSUME
= 3 /* Handled but ask to not consume */
137 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
138 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
139 * simplifies those functions and keeps them in sync.
141 static inline enum print_line_t
trace_handle_return(struct trace_seq
*s
)
143 return trace_seq_has_overflowed(s
) ?
144 TRACE_TYPE_PARTIAL_LINE
: TRACE_TYPE_HANDLED
;
147 void tracing_generic_entry_update(struct trace_entry
*entry
,
150 struct trace_event_file
;
152 struct ring_buffer_event
*
153 trace_event_buffer_lock_reserve(struct ring_buffer
**current_buffer
,
154 struct trace_event_file
*trace_file
,
155 int type
, unsigned long len
,
156 unsigned long flags
, int pc
);
157 struct ring_buffer_event
*
158 trace_current_buffer_lock_reserve(struct ring_buffer
**current_buffer
,
159 int type
, unsigned long len
,
160 unsigned long flags
, int pc
);
161 void trace_buffer_unlock_commit(struct trace_array
*tr
,
162 struct ring_buffer
*buffer
,
163 struct ring_buffer_event
*event
,
164 unsigned long flags
, int pc
);
165 void trace_buffer_unlock_commit_regs(struct trace_array
*tr
,
166 struct ring_buffer
*buffer
,
167 struct ring_buffer_event
*event
,
168 unsigned long flags
, int pc
,
169 struct pt_regs
*regs
);
170 void trace_current_buffer_discard_commit(struct ring_buffer
*buffer
,
171 struct ring_buffer_event
*event
);
173 void tracing_record_cmdline(struct task_struct
*tsk
);
175 int trace_output_call(struct trace_iterator
*iter
, char *name
, char *fmt
, ...);
181 TRACE_REG_UNREGISTER
,
182 #ifdef CONFIG_PERF_EVENTS
183 TRACE_REG_PERF_REGISTER
,
184 TRACE_REG_PERF_UNREGISTER
,
186 TRACE_REG_PERF_CLOSE
,
192 struct trace_event_call
;
194 struct trace_event_class
{
197 #ifdef CONFIG_PERF_EVENTS
200 int (*reg
)(struct trace_event_call
*event
,
201 enum trace_reg type
, void *data
);
202 int (*define_fields
)(struct trace_event_call
*);
203 struct list_head
*(*get_fields
)(struct trace_event_call
*);
204 struct list_head fields
;
205 int (*raw_init
)(struct trace_event_call
*);
208 extern int trace_event_reg(struct trace_event_call
*event
,
209 enum trace_reg type
, void *data
);
211 struct trace_event_buffer
{
212 struct ring_buffer
*buffer
;
213 struct ring_buffer_event
*event
;
214 struct trace_event_file
*trace_file
;
220 void *trace_event_buffer_reserve(struct trace_event_buffer
*fbuffer
,
221 struct trace_event_file
*trace_file
,
224 void trace_event_buffer_commit(struct trace_event_buffer
*fbuffer
);
227 TRACE_EVENT_FL_FILTERED_BIT
,
228 TRACE_EVENT_FL_CAP_ANY_BIT
,
229 TRACE_EVENT_FL_NO_SET_FILTER_BIT
,
230 TRACE_EVENT_FL_IGNORE_ENABLE_BIT
,
231 TRACE_EVENT_FL_WAS_ENABLED_BIT
,
232 TRACE_EVENT_FL_USE_CALL_FILTER_BIT
,
233 TRACE_EVENT_FL_TRACEPOINT_BIT
,
234 TRACE_EVENT_FL_KPROBE_BIT
,
235 TRACE_EVENT_FL_UPROBE_BIT
,
240 * FILTERED - The event has a filter attached
241 * CAP_ANY - Any user can enable for perf
242 * NO_SET_FILTER - Set when filter has error and is to be ignored
243 * IGNORE_ENABLE - For trace internal events, do not enable with debugfs file
244 * WAS_ENABLED - Set and stays set when an event was ever enabled
245 * (used for module unloading, if a module event is enabled,
246 * it is best to clear the buffers that used it).
247 * USE_CALL_FILTER - For trace internal events, don't use file filter
248 * TRACEPOINT - Event is a tracepoint
249 * KPROBE - Event is a kprobe
250 * UPROBE - Event is a uprobe
253 TRACE_EVENT_FL_FILTERED
= (1 << TRACE_EVENT_FL_FILTERED_BIT
),
254 TRACE_EVENT_FL_CAP_ANY
= (1 << TRACE_EVENT_FL_CAP_ANY_BIT
),
255 TRACE_EVENT_FL_NO_SET_FILTER
= (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT
),
256 TRACE_EVENT_FL_IGNORE_ENABLE
= (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT
),
257 TRACE_EVENT_FL_WAS_ENABLED
= (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT
),
258 TRACE_EVENT_FL_USE_CALL_FILTER
= (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT
),
259 TRACE_EVENT_FL_TRACEPOINT
= (1 << TRACE_EVENT_FL_TRACEPOINT_BIT
),
260 TRACE_EVENT_FL_KPROBE
= (1 << TRACE_EVENT_FL_KPROBE_BIT
),
261 TRACE_EVENT_FL_UPROBE
= (1 << TRACE_EVENT_FL_UPROBE_BIT
),
264 #define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE)
266 struct trace_event_call
{
267 struct list_head list
;
268 struct trace_event_class
*class;
271 /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
272 struct tracepoint
*tp
;
274 struct trace_event event
;
276 struct event_filter
*filter
;
280 * bit 0: filter_active
281 * bit 1: allow trace by non root (cap any)
282 * bit 2: failed to apply filter
283 * bit 3: trace internal event (do not enable)
284 * bit 4: Event was enabled by module
285 * bit 5: use call filter rather than file filter
286 * bit 6: Event is a tracepoint
288 int flags
; /* static flags of different events */
290 #ifdef CONFIG_PERF_EVENTS
292 struct hlist_head __percpu
*perf_events
;
293 struct bpf_prog
*prog
;
295 int (*perf_perm
)(struct trace_event_call
*,
296 struct perf_event
*);
300 static inline const char *
301 trace_event_name(struct trace_event_call
*call
)
303 if (call
->flags
& TRACE_EVENT_FL_TRACEPOINT
)
304 return call
->tp
? call
->tp
->name
: NULL
;
310 struct trace_subsystem_dir
;
313 EVENT_FILE_FL_ENABLED_BIT
,
314 EVENT_FILE_FL_RECORDED_CMD_BIT
,
315 EVENT_FILE_FL_FILTERED_BIT
,
316 EVENT_FILE_FL_NO_SET_FILTER_BIT
,
317 EVENT_FILE_FL_SOFT_MODE_BIT
,
318 EVENT_FILE_FL_SOFT_DISABLED_BIT
,
319 EVENT_FILE_FL_TRIGGER_MODE_BIT
,
320 EVENT_FILE_FL_TRIGGER_COND_BIT
,
321 EVENT_FILE_FL_PID_FILTER_BIT
,
326 * ENABLED - The event is enabled
327 * RECORDED_CMD - The comms should be recorded at sched_switch
328 * FILTERED - The event has a filter attached
329 * NO_SET_FILTER - Set when filter has error and is to be ignored
330 * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED
331 * SOFT_DISABLED - When set, do not trace the event (even though its
332 * tracepoint may be enabled)
333 * TRIGGER_MODE - When set, invoke the triggers associated with the event
334 * TRIGGER_COND - When set, one or more triggers has an associated filter
335 * PID_FILTER - When set, the event is filtered based on pid
338 EVENT_FILE_FL_ENABLED
= (1 << EVENT_FILE_FL_ENABLED_BIT
),
339 EVENT_FILE_FL_RECORDED_CMD
= (1 << EVENT_FILE_FL_RECORDED_CMD_BIT
),
340 EVENT_FILE_FL_FILTERED
= (1 << EVENT_FILE_FL_FILTERED_BIT
),
341 EVENT_FILE_FL_NO_SET_FILTER
= (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT
),
342 EVENT_FILE_FL_SOFT_MODE
= (1 << EVENT_FILE_FL_SOFT_MODE_BIT
),
343 EVENT_FILE_FL_SOFT_DISABLED
= (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT
),
344 EVENT_FILE_FL_TRIGGER_MODE
= (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT
),
345 EVENT_FILE_FL_TRIGGER_COND
= (1 << EVENT_FILE_FL_TRIGGER_COND_BIT
),
346 EVENT_FILE_FL_PID_FILTER
= (1 << EVENT_FILE_FL_PID_FILTER_BIT
),
349 struct trace_event_file
{
350 struct list_head list
;
351 struct trace_event_call
*event_call
;
352 struct event_filter
*filter
;
354 struct trace_array
*tr
;
355 struct trace_subsystem_dir
*system
;
356 struct list_head triggers
;
361 * bit 1: enabled cmd record
362 * bit 2: enable/disable with the soft disable bit
363 * bit 3: soft disabled
364 * bit 4: trigger enabled
366 * Note: The bits must be set atomically to prevent races
367 * from other writers. Reads of flags do not need to be in
368 * sync as they occur in critical sections. But the way flags
369 * is currently used, these changes do not affect the code
370 * except that when a change is made, it may have a slight
371 * delay in propagating the changes to other CPUs due to
372 * caching and such. Which is mostly OK ;-)
375 atomic_t sm_ref
; /* soft-mode reference counter */
376 atomic_t tm_ref
; /* trigger-mode reference counter */
379 #define __TRACE_EVENT_FLAGS(name, value) \
380 static int __init trace_init_flags_##name(void) \
382 event_##name.flags |= value; \
385 early_initcall(trace_init_flags_##name);
387 #define __TRACE_EVENT_PERF_PERM(name, expr...) \
388 static int perf_perm_##name(struct trace_event_call *tp_event, \
389 struct perf_event *p_event) \
391 return ({ expr; }); \
393 static int __init trace_init_perf_perm_##name(void) \
395 event_##name.perf_perm = &perf_perm_##name; \
398 early_initcall(trace_init_perf_perm_##name);
400 #define PERF_MAX_TRACE_SIZE 2048
402 #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
404 enum event_trigger_type
{
406 ETT_TRACE_ONOFF
= (1 << 0),
407 ETT_SNAPSHOT
= (1 << 1),
408 ETT_STACKTRACE
= (1 << 2),
409 ETT_EVENT_ENABLE
= (1 << 3),
412 extern int filter_match_preds(struct event_filter
*filter
, void *rec
);
414 extern int filter_check_discard(struct trace_event_file
*file
, void *rec
,
415 struct ring_buffer
*buffer
,
416 struct ring_buffer_event
*event
);
417 extern int call_filter_check_discard(struct trace_event_call
*call
, void *rec
,
418 struct ring_buffer
*buffer
,
419 struct ring_buffer_event
*event
);
420 extern enum event_trigger_type
event_triggers_call(struct trace_event_file
*file
,
422 extern void event_triggers_post_call(struct trace_event_file
*file
,
423 enum event_trigger_type tt
);
425 bool trace_event_ignore_this_pid(struct trace_event_file
*trace_file
);
428 * trace_trigger_soft_disabled - do triggers and test if soft disabled
429 * @file: The file pointer of the event to test
431 * If any triggers without filters are attached to this event, they
432 * will be called here. If the event is soft disabled and has no
433 * triggers that require testing the fields, it will return true,
437 trace_trigger_soft_disabled(struct trace_event_file
*file
)
439 unsigned long eflags
= file
->flags
;
441 if (!(eflags
& EVENT_FILE_FL_TRIGGER_COND
)) {
442 if (eflags
& EVENT_FILE_FL_TRIGGER_MODE
)
443 event_triggers_call(file
, NULL
);
444 if (eflags
& EVENT_FILE_FL_SOFT_DISABLED
)
446 if (eflags
& EVENT_FILE_FL_PID_FILTER
)
447 return trace_event_ignore_this_pid(file
);
453 * Helper function for event_trigger_unlock_commit{_regs}().
454 * If there are event triggers attached to this event that requires
455 * filtering against its fields, then they wil be called as the
456 * entry already holds the field information of the current event.
458 * It also checks if the event should be discarded or not.
459 * It is to be discarded if the event is soft disabled and the
460 * event was only recorded to process triggers, or if the event
461 * filter is active and this event did not match the filters.
463 * Returns true if the event is discarded, false otherwise.
466 __event_trigger_test_discard(struct trace_event_file
*file
,
467 struct ring_buffer
*buffer
,
468 struct ring_buffer_event
*event
,
470 enum event_trigger_type
*tt
)
472 unsigned long eflags
= file
->flags
;
474 if (eflags
& EVENT_FILE_FL_TRIGGER_COND
)
475 *tt
= event_triggers_call(file
, entry
);
477 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT
, &file
->flags
))
478 ring_buffer_discard_commit(buffer
, event
);
479 else if (!filter_check_discard(file
, entry
, buffer
, event
))
486 * event_trigger_unlock_commit - handle triggers and finish event commit
487 * @file: The file pointer assoctiated to the event
488 * @buffer: The ring buffer that the event is being written to
489 * @event: The event meta data in the ring buffer
490 * @entry: The event itself
491 * @irq_flags: The state of the interrupts at the start of the event
492 * @pc: The state of the preempt count at the start of the event.
494 * This is a helper function to handle triggers that require data
495 * from the event itself. It also tests the event against filters and
496 * if the event is soft disabled and should be discarded.
499 event_trigger_unlock_commit(struct trace_event_file
*file
,
500 struct ring_buffer
*buffer
,
501 struct ring_buffer_event
*event
,
502 void *entry
, unsigned long irq_flags
, int pc
)
504 enum event_trigger_type tt
= ETT_NONE
;
506 if (!__event_trigger_test_discard(file
, buffer
, event
, entry
, &tt
))
507 trace_buffer_unlock_commit(file
->tr
, buffer
, event
, irq_flags
, pc
);
510 event_triggers_post_call(file
, tt
);
514 * event_trigger_unlock_commit_regs - handle triggers and finish event commit
515 * @file: The file pointer assoctiated to the event
516 * @buffer: The ring buffer that the event is being written to
517 * @event: The event meta data in the ring buffer
518 * @entry: The event itself
519 * @irq_flags: The state of the interrupts at the start of the event
520 * @pc: The state of the preempt count at the start of the event.
522 * This is a helper function to handle triggers that require data
523 * from the event itself. It also tests the event against filters and
524 * if the event is soft disabled and should be discarded.
526 * Same as event_trigger_unlock_commit() but calls
527 * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
530 event_trigger_unlock_commit_regs(struct trace_event_file
*file
,
531 struct ring_buffer
*buffer
,
532 struct ring_buffer_event
*event
,
533 void *entry
, unsigned long irq_flags
, int pc
,
534 struct pt_regs
*regs
)
536 enum event_trigger_type tt
= ETT_NONE
;
538 if (!__event_trigger_test_discard(file
, buffer
, event
, entry
, &tt
))
539 trace_buffer_unlock_commit_regs(file
->tr
, buffer
, event
,
540 irq_flags
, pc
, regs
);
543 event_triggers_post_call(file
, tt
);
546 #ifdef CONFIG_BPF_EVENTS
547 unsigned int trace_call_bpf(struct bpf_prog
*prog
, void *ctx
);
549 static inline unsigned int trace_call_bpf(struct bpf_prog
*prog
, void *ctx
)
557 FILTER_STATIC_STRING
,
565 extern int trace_event_raw_init(struct trace_event_call
*call
);
566 extern int trace_define_field(struct trace_event_call
*call
, const char *type
,
567 const char *name
, int offset
, int size
,
568 int is_signed
, int filter_type
);
569 extern int trace_add_event_call(struct trace_event_call
*call
);
570 extern int trace_remove_event_call(struct trace_event_call
*call
);
572 #define is_signed_type(type) (((type)(-1)) < (type)1)
574 int trace_set_clr_event(const char *system
, const char *event
, int set
);
577 * The double __builtin_constant_p is because gcc will give us an error
578 * if we try to allocate the static variable to fmt if it is not a
579 * constant. Even with the outer if statement optimizing out.
581 #define event_trace_printk(ip, fmt, args...) \
583 __trace_printk_check_format(fmt, ##args); \
584 tracing_record_cmdline(current); \
585 if (__builtin_constant_p(fmt)) { \
586 static const char *trace_printk_fmt \
587 __attribute__((section("__trace_printk_fmt"))) = \
588 __builtin_constant_p(fmt) ? fmt : NULL; \
590 __trace_bprintk(ip, trace_printk_fmt, ##args); \
592 __trace_printk(ip, fmt, ##args); \
595 #ifdef CONFIG_PERF_EVENTS
598 DECLARE_PER_CPU(struct pt_regs
, perf_trace_regs
);
600 extern int perf_trace_init(struct perf_event
*event
);
601 extern void perf_trace_destroy(struct perf_event
*event
);
602 extern int perf_trace_add(struct perf_event
*event
, int flags
);
603 extern void perf_trace_del(struct perf_event
*event
, int flags
);
604 extern int ftrace_profile_set_filter(struct perf_event
*event
, int event_id
,
606 extern void ftrace_profile_free_filter(struct perf_event
*event
);
607 extern void *perf_trace_buf_prepare(int size
, unsigned short type
,
608 struct pt_regs
**regs
, int *rctxp
);
611 perf_trace_buf_submit(void *raw_data
, int size
, int rctx
, u64 addr
,
612 u64 count
, struct pt_regs
*regs
, void *head
,
613 struct task_struct
*task
)
615 perf_tp_event(addr
, count
, raw_data
, size
, regs
, head
, rctx
, task
);
619 #endif /* _LINUX_TRACE_EVENT_H */