Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/gerg/m68knommu
[deliverable/linux.git] / include / linux / perf_event.h
CommitLineData
0793a61d 1/*
57c0c15b 2 * Performance events:
0793a61d 3 *
a308444c 4 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
e7e7ee2e
IM
5 * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
0793a61d 7 *
57c0c15b 8 * Data type definitions, declarations, prototypes.
0793a61d 9 *
a308444c 10 * Started by: Thomas Gleixner and Ingo Molnar
0793a61d 11 *
57c0c15b 12 * For licencing details see kernel-base/COPYING
0793a61d 13 */
cdd6c482
IM
14#ifndef _LINUX_PERF_EVENT_H
15#define _LINUX_PERF_EVENT_H
0793a61d 16
607ca46e 17#include <uapi/linux/perf_event.h>
0793a61d 18
9f66a381 19/*
f3dfd265 20 * Kernel-internal data types and definitions:
9f66a381
IM
21 */
22
cdd6c482
IM
23#ifdef CONFIG_PERF_EVENTS
24# include <asm/perf_event.h>
7be79236 25# include <asm/local64.h>
f3dfd265
PM
26#endif
27
39447b38 28struct perf_guest_info_callbacks {
e7e7ee2e
IM
29 int (*is_in_guest)(void);
30 int (*is_user_mode)(void);
31 unsigned long (*get_guest_ip)(void);
39447b38
ZY
32};
33
2ff6cfd7
AB
34#ifdef CONFIG_HAVE_HW_BREAKPOINT
35#include <asm/hw_breakpoint.h>
36#endif
37
f3dfd265
PM
38#include <linux/list.h>
39#include <linux/mutex.h>
40#include <linux/rculist.h>
41#include <linux/rcupdate.h>
42#include <linux/spinlock.h>
d6d020e9 43#include <linux/hrtimer.h>
3c446b3d 44#include <linux/fs.h>
709e50cf 45#include <linux/pid_namespace.h>
906010b2 46#include <linux/workqueue.h>
5331d7b8 47#include <linux/ftrace.h>
85cfabbc 48#include <linux/cpu.h>
e360adbe 49#include <linux/irq_work.h>
c5905afb 50#include <linux/static_key.h>
851cf6e7 51#include <linux/jump_label_ratelimit.h>
60063497 52#include <linux/atomic.h>
641cc938 53#include <linux/sysfs.h>
4018994f 54#include <linux/perf_regs.h>
fadfe7be 55#include <linux/workqueue.h>
39bed6cb 56#include <linux/cgroup.h>
fa588151 57#include <asm/local.h>
f3dfd265 58
f9188e02
PZ
59struct perf_callchain_entry {
60 __u64 nr;
c5dfd78e 61 __u64 ip[0]; /* /proc/sys/kernel/perf_event_max_stack */
f9188e02
PZ
62};
63
cfbcf468
ACM
64struct perf_callchain_entry_ctx {
65 struct perf_callchain_entry *entry;
66 u32 max_stack;
3b1fff08 67 u32 nr;
c85b0334
ACM
68 short contexts;
69 bool contexts_maxed;
cfbcf468
ACM
70};
71
7e3f977e 72typedef unsigned long (*perf_copy_f)(void *dst, const void *src,
aa7145c1 73 unsigned long off, unsigned long len);
7e3f977e
DB
74
75struct perf_raw_frag {
76 union {
77 struct perf_raw_frag *next;
78 unsigned long pad;
79 };
80 perf_copy_f copy;
81 void *data;
82 u32 size;
83} __packed;
84
3a43ce68 85struct perf_raw_record {
7e3f977e 86 struct perf_raw_frag frag;
3a43ce68 87 u32 size;
f413cdb8
FW
88};
89
bce38cd5
SE
90/*
91 * branch stack layout:
92 * nr: number of taken branches stored in entries[]
93 *
94 * Note that nr can vary from sample to sample
95 * branches (to, from) are stored from most recent
96 * to least recent, i.e., entries[0] contains the most
97 * recent branch.
98 */
caff2bef
PZ
99struct perf_branch_stack {
100 __u64 nr;
101 struct perf_branch_entry entries[0];
102};
103
f3dfd265
PM
104struct task_struct;
105
efc9f05d
SE
106/*
107 * extra PMU register associated with an event
108 */
109struct hw_perf_event_extra {
110 u64 config; /* register value */
111 unsigned int reg; /* register address or index */
112 int alloc; /* extra register already allocated */
113 int idx; /* index in shared_regs->regs[] */
114};
115
0793a61d 116/**
cdd6c482 117 * struct hw_perf_event - performance event hardware details:
0793a61d 118 */
cdd6c482
IM
119struct hw_perf_event {
120#ifdef CONFIG_PERF_EVENTS
d6d020e9
PZ
121 union {
122 struct { /* hardware */
a308444c 123 u64 config;
447a194b 124 u64 last_tag;
a308444c 125 unsigned long config_base;
cdd6c482 126 unsigned long event_base;
c48b6053 127 int event_base_rdpmc;
a308444c 128 int idx;
447a194b 129 int last_cpu;
9fac2cf3 130 int flags;
bce38cd5 131
efc9f05d 132 struct hw_perf_event_extra extra_reg;
bce38cd5 133 struct hw_perf_event_extra branch_reg;
d6d020e9 134 };
721a669b 135 struct { /* software */
a308444c 136 struct hrtimer hrtimer;
d6d020e9 137 };
f22c1bb6 138 struct { /* tracepoint */
f22c1bb6
ON
139 /* for tp_event->class */
140 struct list_head tp_list;
141 };
4afbb24c
MF
142 struct { /* intel_cqm */
143 int cqm_state;
b3df4ec4 144 u32 cqm_rmid;
a223c1c7 145 int is_group_event;
4afbb24c
MF
146 struct list_head cqm_events_entry;
147 struct list_head cqm_groups_entry;
148 struct list_head cqm_group_entry;
149 };
ec0d7729
AS
150 struct { /* itrace */
151 int itrace_started;
152 };
c7ab62bf
HR
153 struct { /* amd_power */
154 u64 pwr_acc;
155 u64 ptsc;
156 };
24f1e32c 157#ifdef CONFIG_HAVE_HW_BREAKPOINT
45a73372 158 struct { /* breakpoint */
d580ff86
PZ
159 /*
160 * Crufty hack to avoid the chicken and egg
161 * problem hw_breakpoint has with context
162 * creation and event initalization.
163 */
f22c1bb6
ON
164 struct arch_hw_breakpoint info;
165 struct list_head bp_list;
45a73372 166 };
24f1e32c 167#endif
d6d020e9 168 };
b0e87875
PZ
169 /*
170 * If the event is a per task event, this will point to the task in
171 * question. See the comment in perf_event_alloc().
172 */
50f16a8b 173 struct task_struct *target;
b0e87875 174
375637bc
AS
175 /*
176 * PMU would store hardware filter configuration
177 * here.
178 */
179 void *addr_filters;
180
181 /* Last sync'ed generation of filters */
182 unsigned long addr_filters_gen;
183
b0e87875
PZ
184/*
185 * hw_perf_event::state flags; used to track the PERF_EF_* state.
186 */
187#define PERF_HES_STOPPED 0x01 /* the counter is stopped */
188#define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
189#define PERF_HES_ARCH 0x04
190
a4eaf7f1 191 int state;
b0e87875
PZ
192
193 /*
194 * The last observed hardware counter value, updated with a
195 * local64_cmpxchg() such that pmu::read() can be called nested.
196 */
e7850595 197 local64_t prev_count;
b0e87875
PZ
198
199 /*
200 * The period to start the next sample with.
201 */
b23f3325 202 u64 sample_period;
b0e87875
PZ
203
204 /*
205 * The period we started this sample with.
206 */
9e350de3 207 u64 last_period;
b0e87875
PZ
208
209 /*
210 * However much is left of the current period; note that this is
211 * a full 64bit value and allows for generation of periods longer
212 * than hardware might allow.
213 */
e7850595 214 local64_t period_left;
b0e87875
PZ
215
216 /*
217 * State for throttling the event, see __perf_event_overflow() and
218 * perf_adjust_freq_unthr_context().
219 */
e050e3f0 220 u64 interrupts_seq;
60db5e09 221 u64 interrupts;
6a24ed6c 222
b0e87875
PZ
223 /*
224 * State for freq target events, see __perf_event_overflow() and
225 * perf_adjust_freq_unthr_context().
226 */
abd50713
PZ
227 u64 freq_time_stamp;
228 u64 freq_count_stamp;
ee06094f 229#endif
0793a61d
TG
230};
231
cdd6c482 232struct perf_event;
621a01ea 233
8d2cacbb
PZ
234/*
235 * Common implementation detail of pmu::{start,commit,cancel}_txn
236 */
fbbe0701 237#define PERF_PMU_TXN_ADD 0x1 /* txn to add/schedule event on PMU */
4a00c16e 238#define PERF_PMU_TXN_READ 0x2 /* txn to read event group from PMU */
fbbe0701 239
53b25335
VW
240/**
241 * pmu::capabilities flags
242 */
243#define PERF_PMU_CAP_NO_INTERRUPT 0x01
34f43927 244#define PERF_PMU_CAP_NO_NMI 0x02
0a4e38e6 245#define PERF_PMU_CAP_AUX_NO_SG 0x04
6a279230 246#define PERF_PMU_CAP_AUX_SW_DOUBLEBUF 0x08
bed5b25a 247#define PERF_PMU_CAP_EXCLUSIVE 0x10
ec0d7729 248#define PERF_PMU_CAP_ITRACE 0x20
5101ef20 249#define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40
53b25335 250
621a01ea 251/**
4aeb0b42 252 * struct pmu - generic performance monitoring unit
621a01ea 253 */
4aeb0b42 254struct pmu {
b0a873eb
PZ
255 struct list_head entry;
256
c464c76e 257 struct module *module;
abe43400 258 struct device *dev;
0c9d42ed 259 const struct attribute_group **attr_groups;
03d8e80b 260 const char *name;
2e80a82a
PZ
261 int type;
262
53b25335
VW
263 /*
264 * various common per-pmu feature flags
265 */
266 int capabilities;
267
108b02cf
PZ
268 int * __percpu pmu_disable_count;
269 struct perf_cpu_context * __percpu pmu_cpu_context;
bed5b25a 270 atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */
8dc85d54 271 int task_ctx_nr;
62b85639 272 int hrtimer_interval_ms;
6bde9b6c 273
375637bc
AS
274 /* number of address filters this PMU can do */
275 unsigned int nr_addr_filters;
276
6bde9b6c 277 /*
a4eaf7f1
PZ
278 * Fully disable/enable this PMU, can be used to protect from the PMI
279 * as well as for lazy/batch writing of the MSRs.
6bde9b6c 280 */
ad5133b7
PZ
281 void (*pmu_enable) (struct pmu *pmu); /* optional */
282 void (*pmu_disable) (struct pmu *pmu); /* optional */
6bde9b6c 283
8d2cacbb 284 /*
a4eaf7f1 285 * Try and initialize the event for this PMU.
b0e87875
PZ
286 *
287 * Returns:
288 * -ENOENT -- @event is not for this PMU
289 *
290 * -ENODEV -- @event is for this PMU but PMU not present
291 * -EBUSY -- @event is for this PMU but PMU temporarily unavailable
292 * -EINVAL -- @event is for this PMU but @event is not valid
293 * -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported
294 * -EACCESS -- @event is for this PMU, @event is valid, but no privilidges
295 *
296 * 0 -- @event is for this PMU and valid
297 *
298 * Other error return values are allowed.
8d2cacbb 299 */
b0a873eb
PZ
300 int (*event_init) (struct perf_event *event);
301
1e0fb9ec
AL
302 /*
303 * Notification that the event was mapped or unmapped. Called
304 * in the context of the mapping task.
305 */
306 void (*event_mapped) (struct perf_event *event); /*optional*/
307 void (*event_unmapped) (struct perf_event *event); /*optional*/
308
b0e87875
PZ
309 /*
310 * Flags for ->add()/->del()/ ->start()/->stop(). There are
311 * matching hw_perf_event::state flags.
312 */
a4eaf7f1
PZ
313#define PERF_EF_START 0x01 /* start the counter when adding */
314#define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
315#define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
316
8d2cacbb 317 /*
b0e87875
PZ
318 * Adds/Removes a counter to/from the PMU, can be done inside a
319 * transaction, see the ->*_txn() methods.
320 *
321 * The add/del callbacks will reserve all hardware resources required
322 * to service the event, this includes any counter constraint
323 * scheduling etc.
324 *
325 * Called with IRQs disabled and the PMU disabled on the CPU the event
326 * is on.
327 *
328 * ->add() called without PERF_EF_START should result in the same state
329 * as ->add() followed by ->stop().
330 *
331 * ->del() must always PERF_EF_UPDATE stop an event. If it calls
332 * ->stop() that must deal with already being stopped without
333 * PERF_EF_UPDATE.
a4eaf7f1
PZ
334 */
335 int (*add) (struct perf_event *event, int flags);
336 void (*del) (struct perf_event *event, int flags);
337
338 /*
b0e87875
PZ
339 * Starts/Stops a counter present on the PMU.
340 *
341 * The PMI handler should stop the counter when perf_event_overflow()
342 * returns !0. ->start() will be used to continue.
343 *
344 * Also used to change the sample period.
345 *
346 * Called with IRQs disabled and the PMU disabled on the CPU the event
347 * is on -- will be called from NMI context with the PMU generates
348 * NMIs.
349 *
350 * ->stop() with PERF_EF_UPDATE will read the counter and update
351 * period/count values like ->read() would.
352 *
353 * ->start() with PERF_EF_RELOAD will reprogram the the counter
354 * value, must be preceded by a ->stop() with PERF_EF_UPDATE.
a4eaf7f1
PZ
355 */
356 void (*start) (struct perf_event *event, int flags);
357 void (*stop) (struct perf_event *event, int flags);
358
359 /*
360 * Updates the counter value of the event.
b0e87875
PZ
361 *
362 * For sampling capable PMUs this will also update the software period
363 * hw_perf_event::period_left field.
a4eaf7f1 364 */
cdd6c482 365 void (*read) (struct perf_event *event);
6bde9b6c
LM
366
367 /*
24cd7f54
PZ
368 * Group events scheduling is treated as a transaction, add
369 * group events as a whole and perform one schedulability test.
370 * If the test fails, roll back the whole group
a4eaf7f1
PZ
371 *
372 * Start the transaction, after this ->add() doesn't need to
24cd7f54 373 * do schedulability tests.
fbbe0701
SB
374 *
375 * Optional.
8d2cacbb 376 */
fbbe0701 377 void (*start_txn) (struct pmu *pmu, unsigned int txn_flags);
8d2cacbb 378 /*
a4eaf7f1 379 * If ->start_txn() disabled the ->add() schedulability test
8d2cacbb
PZ
380 * then ->commit_txn() is required to perform one. On success
381 * the transaction is closed. On error the transaction is kept
382 * open until ->cancel_txn() is called.
fbbe0701
SB
383 *
384 * Optional.
8d2cacbb 385 */
fbbe0701 386 int (*commit_txn) (struct pmu *pmu);
8d2cacbb 387 /*
a4eaf7f1 388 * Will cancel the transaction, assumes ->del() is called
25985edc 389 * for each successful ->add() during the transaction.
fbbe0701
SB
390 *
391 * Optional.
8d2cacbb 392 */
fbbe0701 393 void (*cancel_txn) (struct pmu *pmu);
35edc2a5
PZ
394
395 /*
396 * Will return the value for perf_event_mmap_page::index for this event,
397 * if no implementation is provided it will default to: event->hw.idx + 1.
398 */
399 int (*event_idx) (struct perf_event *event); /*optional */
d010b332 400
ba532500
YZ
401 /*
402 * context-switches callback
403 */
404 void (*sched_task) (struct perf_event_context *ctx,
405 bool sched_in);
4af57ef2
YZ
406 /*
407 * PMU specific data size
408 */
409 size_t task_ctx_size;
ba532500 410
eacd3ecc
MF
411
412 /*
413 * Return the count value for a counter.
414 */
415 u64 (*count) (struct perf_event *event); /*optional*/
45bfb2e5
PZ
416
417 /*
418 * Set up pmu-private data structures for an AUX area
419 */
420 void *(*setup_aux) (int cpu, void **pages,
421 int nr_pages, bool overwrite);
422 /* optional */
423
424 /*
425 * Free pmu-private AUX data structures
426 */
427 void (*free_aux) (void *aux); /* optional */
66eb579e 428
375637bc
AS
429 /*
430 * Validate address range filters: make sure the HW supports the
431 * requested configuration and number of filters; return 0 if the
432 * supplied filters are valid, -errno otherwise.
433 *
434 * Runs in the context of the ioctl()ing process and is not serialized
435 * with the rest of the PMU callbacks.
436 */
437 int (*addr_filters_validate) (struct list_head *filters);
438 /* optional */
439
440 /*
441 * Synchronize address range filter configuration:
442 * translate hw-agnostic filters into hardware configuration in
443 * event::hw::addr_filters.
444 *
445 * Runs as a part of filter sync sequence that is done in ->start()
446 * callback by calling perf_event_addr_filters_sync().
447 *
448 * May (and should) traverse event::addr_filters::list, for which its
449 * caller provides necessary serialization.
450 */
451 void (*addr_filters_sync) (struct perf_event *event);
452 /* optional */
453
66eb579e
MR
454 /*
455 * Filter events for PMU-specific reasons.
456 */
457 int (*filter_match) (struct perf_event *event); /* optional */
621a01ea
IM
458};
459
375637bc
AS
460/**
461 * struct perf_addr_filter - address range filter definition
462 * @entry: event's filter list linkage
463 * @inode: object file's inode for file-based filters
464 * @offset: filter range offset
465 * @size: filter range size
466 * @range: 1: range, 0: address
467 * @filter: 1: filter/start, 0: stop
468 *
469 * This is a hardware-agnostic filter configuration as specified by the user.
470 */
471struct perf_addr_filter {
472 struct list_head entry;
473 struct inode *inode;
474 unsigned long offset;
475 unsigned long size;
476 unsigned int range : 1,
477 filter : 1;
478};
479
480/**
481 * struct perf_addr_filters_head - container for address range filters
482 * @list: list of filters for this event
483 * @lock: spinlock that serializes accesses to the @list and event's
484 * (and its children's) filter generations.
485 *
486 * A child event will use parent's @list (and therefore @lock), so they are
487 * bundled together; see perf_event_addr_filters().
488 */
489struct perf_addr_filters_head {
490 struct list_head list;
491 raw_spinlock_t lock;
492};
493
6a930700 494/**
cdd6c482 495 * enum perf_event_active_state - the states of a event
6a930700 496 */
cdd6c482 497enum perf_event_active_state {
a69b0ca4 498 PERF_EVENT_STATE_DEAD = -4,
179033b3 499 PERF_EVENT_STATE_EXIT = -3,
57c0c15b 500 PERF_EVENT_STATE_ERROR = -2,
cdd6c482
IM
501 PERF_EVENT_STATE_OFF = -1,
502 PERF_EVENT_STATE_INACTIVE = 0,
57c0c15b 503 PERF_EVENT_STATE_ACTIVE = 1,
6a930700
IM
504};
505
9b51f66d 506struct file;
453f19ee
PZ
507struct perf_sample_data;
508
a8b0ca17 509typedef void (*perf_overflow_handler_t)(struct perf_event *,
b326e956
FW
510 struct perf_sample_data *,
511 struct pt_regs *regs);
512
d6f962b5 513enum perf_group_flag {
e7e7ee2e 514 PERF_GROUP_SOFTWARE = 0x1,
d6f962b5
FW
515};
516
e7e7ee2e
IM
517#define SWEVENT_HLIST_BITS 8
518#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
76e1d904
FW
519
520struct swevent_hlist {
e7e7ee2e
IM
521 struct hlist_head heads[SWEVENT_HLIST_SIZE];
522 struct rcu_head rcu_head;
76e1d904
FW
523};
524
8a49542c
PZ
525#define PERF_ATTACH_CONTEXT 0x01
526#define PERF_ATTACH_GROUP 0x02
d580ff86 527#define PERF_ATTACH_TASK 0x04
4af57ef2 528#define PERF_ATTACH_TASK_DATA 0x08
8a49542c 529
877c6856 530struct perf_cgroup;
76369139
FW
531struct ring_buffer;
532
f2fb6bef
KL
533struct pmu_event_list {
534 raw_spinlock_t lock;
535 struct list_head list;
536};
537
0793a61d 538/**
cdd6c482 539 * struct perf_event - performance event kernel representation:
0793a61d 540 */
cdd6c482
IM
541struct perf_event {
542#ifdef CONFIG_PERF_EVENTS
9886167d
PZ
543 /*
544 * entry onto perf_event_context::event_list;
545 * modifications require ctx->lock
546 * RCU safe iterations.
547 */
592903cd 548 struct list_head event_entry;
9886167d
PZ
549
550 /*
551 * XXX: group_entry and sibling_list should be mutually exclusive;
552 * either you're a sibling on a group, or you're the group leader.
553 * Rework the code to always use the same list element.
554 *
555 * Locked for modification by both ctx->mutex and ctx->lock; holding
556 * either sufficies for read.
557 */
558 struct list_head group_entry;
04289bb9 559 struct list_head sibling_list;
9886167d
PZ
560
561 /*
562 * We need storage to track the entries in perf_pmu_migrate_context; we
563 * cannot use the event_entry because of RCU and we want to keep the
564 * group in tact which avoids us using the other two entries.
565 */
566 struct list_head migrate_entry;
567
f3ae75de
SE
568 struct hlist_node hlist_entry;
569 struct list_head active_entry;
0127c3ea 570 int nr_siblings;
d6f962b5 571 int group_flags;
cdd6c482 572 struct perf_event *group_leader;
a4eaf7f1 573 struct pmu *pmu;
54d751d4 574 void *pmu_private;
04289bb9 575
cdd6c482 576 enum perf_event_active_state state;
8a49542c 577 unsigned int attach_state;
e7850595 578 local64_t count;
a6e6dea6 579 atomic64_t child_count;
ee06094f 580
53cfbf59 581 /*
cdd6c482 582 * These are the total time in nanoseconds that the event
53cfbf59 583 * has been enabled (i.e. eligible to run, and the task has
cdd6c482 584 * been scheduled in, if this is a per-task event)
53cfbf59
PM
585 * and running (scheduled onto the CPU), respectively.
586 *
587 * They are computed from tstamp_enabled, tstamp_running and
cdd6c482 588 * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
53cfbf59
PM
589 */
590 u64 total_time_enabled;
591 u64 total_time_running;
592
593 /*
594 * These are timestamps used for computing total_time_enabled
cdd6c482 595 * and total_time_running when the event is in INACTIVE or
53cfbf59
PM
596 * ACTIVE state, measured in nanoseconds from an arbitrary point
597 * in time.
cdd6c482
IM
598 * tstamp_enabled: the notional time when the event was enabled
599 * tstamp_running: the notional time when the event was scheduled on
53cfbf59 600 * tstamp_stopped: in INACTIVE state, the notional time when the
cdd6c482 601 * event was scheduled off.
53cfbf59
PM
602 */
603 u64 tstamp_enabled;
604 u64 tstamp_running;
605 u64 tstamp_stopped;
606
eed01528
SE
607 /*
608 * timestamp shadows the actual context timing but it can
609 * be safely used in NMI interrupt context. It reflects the
610 * context time as it was when the event was last scheduled in.
611 *
612 * ctx_time already accounts for ctx->timestamp. Therefore to
613 * compute ctx_time for a sample, simply add perf_clock().
614 */
615 u64 shadow_ctx_time;
616
24f1e32c 617 struct perf_event_attr attr;
c320c7b7 618 u16 header_size;
6844c09d 619 u16 id_header_size;
c320c7b7 620 u16 read_size;
cdd6c482 621 struct hw_perf_event hw;
0793a61d 622
cdd6c482 623 struct perf_event_context *ctx;
a6fa941d 624 atomic_long_t refcount;
0793a61d 625
53cfbf59
PM
626 /*
627 * These accumulate total time (in nanoseconds) that children
cdd6c482 628 * events have been enabled and running, respectively.
53cfbf59
PM
629 */
630 atomic64_t child_total_time_enabled;
631 atomic64_t child_total_time_running;
632
0793a61d 633 /*
d859e29f 634 * Protect attach/detach and child_list:
0793a61d 635 */
fccc714b
PZ
636 struct mutex child_mutex;
637 struct list_head child_list;
cdd6c482 638 struct perf_event *parent;
0793a61d
TG
639
640 int oncpu;
641 int cpu;
642
082ff5a2
PZ
643 struct list_head owner_entry;
644 struct task_struct *owner;
645
7b732a75
PZ
646 /* mmap bits */
647 struct mutex mmap_mutex;
648 atomic_t mmap_count;
26cb63ad 649
76369139 650 struct ring_buffer *rb;
10c6db11 651 struct list_head rb_entry;
b69cf536
PZ
652 unsigned long rcu_batches;
653 int rcu_pending;
37d81828 654
7b732a75 655 /* poll related */
0793a61d 656 wait_queue_head_t waitq;
3c446b3d 657 struct fasync_struct *fasync;
79f14641
PZ
658
659 /* delayed work for NMIs and such */
660 int pending_wakeup;
4c9e2542 661 int pending_kill;
79f14641 662 int pending_disable;
e360adbe 663 struct irq_work pending;
592903cd 664
79f14641
PZ
665 atomic_t event_limit;
666
375637bc
AS
667 /* address range filters */
668 struct perf_addr_filters_head addr_filters;
669 /* vma address array for file-based filders */
670 unsigned long *addr_filters_offs;
671 unsigned long addr_filters_gen;
672
cdd6c482 673 void (*destroy)(struct perf_event *);
592903cd 674 struct rcu_head rcu_head;
709e50cf
PZ
675
676 struct pid_namespace *ns;
8e5799b1 677 u64 id;
6fb2915d 678
34f43927 679 u64 (*clock)(void);
b326e956 680 perf_overflow_handler_t overflow_handler;
4dc0da86 681 void *overflow_handler_context;
453f19ee 682
07b139c8 683#ifdef CONFIG_EVENT_TRACING
2425bcb9 684 struct trace_event_call *tp_event;
6fb2915d 685 struct event_filter *filter;
ced39002
JO
686#ifdef CONFIG_FUNCTION_TRACER
687 struct ftrace_ops ftrace_ops;
688#endif
ee06094f 689#endif
6fb2915d 690
e5d1367f
SE
691#ifdef CONFIG_CGROUP_PERF
692 struct perf_cgroup *cgrp; /* cgroup event is attach to */
693 int cgrp_defer_enabled;
694#endif
695
f2fb6bef 696 struct list_head sb_list;
6fb2915d 697#endif /* CONFIG_PERF_EVENTS */
0793a61d
TG
698};
699
700/**
cdd6c482 701 * struct perf_event_context - event context structure
0793a61d 702 *
cdd6c482 703 * Used as a container for task events and CPU events as well:
0793a61d 704 */
cdd6c482 705struct perf_event_context {
108b02cf 706 struct pmu *pmu;
0793a61d 707 /*
cdd6c482 708 * Protect the states of the events in the list,
d859e29f 709 * nr_active, and the list:
0793a61d 710 */
e625cce1 711 raw_spinlock_t lock;
d859e29f 712 /*
cdd6c482 713 * Protect the list of events. Locking either mutex or lock
d859e29f
PM
714 * is sufficient to ensure the list doesn't change; to change
715 * the list you need to lock both the mutex and the spinlock.
716 */
a308444c 717 struct mutex mutex;
04289bb9 718
2fde4f94 719 struct list_head active_ctx_list;
889ff015
FW
720 struct list_head pinned_groups;
721 struct list_head flexible_groups;
a308444c 722 struct list_head event_list;
cdd6c482 723 int nr_events;
a308444c
IM
724 int nr_active;
725 int is_active;
bfbd3381 726 int nr_stat;
0f5a2601 727 int nr_freq;
dddd3379 728 int rotate_disable;
a308444c
IM
729 atomic_t refcount;
730 struct task_struct *task;
53cfbf59
PM
731
732 /*
4af4998b 733 * Context clock, runs when context enabled.
53cfbf59 734 */
a308444c
IM
735 u64 time;
736 u64 timestamp;
564c2b21
PM
737
738 /*
739 * These fields let us detect when two contexts have both
740 * been cloned (inherited) from a common ancestor.
741 */
cdd6c482 742 struct perf_event_context *parent_ctx;
a308444c
IM
743 u64 parent_gen;
744 u64 generation;
745 int pin_count;
db4a8356 746#ifdef CONFIG_CGROUP_PERF
d010b332 747 int nr_cgroups; /* cgroup evts */
db4a8356 748#endif
4af57ef2 749 void *task_ctx_data; /* pmu specific data */
28009ce4 750 struct rcu_head rcu_head;
0793a61d
TG
751};
752
7ae07ea3
FW
753/*
754 * Number of contexts where an event can trigger:
e7e7ee2e 755 * task, softirq, hardirq, nmi.
7ae07ea3
FW
756 */
757#define PERF_NR_CONTEXTS 4
758
0793a61d 759/**
cdd6c482 760 * struct perf_event_cpu_context - per cpu event context structure
0793a61d
TG
761 */
762struct perf_cpu_context {
cdd6c482
IM
763 struct perf_event_context ctx;
764 struct perf_event_context *task_ctx;
0793a61d 765 int active_oncpu;
3b6f9e5c 766 int exclusive;
4cfafd30
PZ
767
768 raw_spinlock_t hrtimer_lock;
9e630205
SE
769 struct hrtimer hrtimer;
770 ktime_t hrtimer_interval;
4cfafd30
PZ
771 unsigned int hrtimer_active;
772
3f1f3320 773 struct pmu *unique_pmu;
db4a8356 774#ifdef CONFIG_CGROUP_PERF
e5d1367f 775 struct perf_cgroup *cgrp;
db4a8356 776#endif
0793a61d
TG
777};
778
5622f295 779struct perf_output_handle {
57c0c15b 780 struct perf_event *event;
76369139 781 struct ring_buffer *rb;
6d1acfd5 782 unsigned long wakeup;
5d967a8b 783 unsigned long size;
fdc26706
AS
784 union {
785 void *addr;
786 unsigned long head;
787 };
5d967a8b 788 int page;
5622f295
MM
789};
790
39bed6cb
MF
791#ifdef CONFIG_CGROUP_PERF
792
793/*
794 * perf_cgroup_info keeps track of time_enabled for a cgroup.
795 * This is a per-cpu dynamically allocated data structure.
796 */
797struct perf_cgroup_info {
798 u64 time;
799 u64 timestamp;
800};
801
802struct perf_cgroup {
803 struct cgroup_subsys_state css;
804 struct perf_cgroup_info __percpu *info;
805};
806
807/*
808 * Must ensure cgroup is pinned (css_get) before calling
809 * this function. In other words, we cannot call this function
810 * if there is no cgroup event for the current CPU context.
811 */
812static inline struct perf_cgroup *
614e4c4e 813perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
39bed6cb 814{
614e4c4e
SE
815 return container_of(task_css_check(task, perf_event_cgrp_id,
816 ctx ? lockdep_is_held(&ctx->lock)
817 : true),
39bed6cb
MF
818 struct perf_cgroup, css);
819}
820#endif /* CONFIG_CGROUP_PERF */
821
cdd6c482 822#ifdef CONFIG_PERF_EVENTS
829b42dd 823
fdc26706
AS
824extern void *perf_aux_output_begin(struct perf_output_handle *handle,
825 struct perf_event *event);
826extern void perf_aux_output_end(struct perf_output_handle *handle,
827 unsigned long size, bool truncated);
828extern int perf_aux_output_skip(struct perf_output_handle *handle,
829 unsigned long size);
830extern void *perf_get_aux(struct perf_output_handle *handle);
831
03d8e80b 832extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
b0a873eb 833extern void perf_pmu_unregister(struct pmu *pmu);
621a01ea 834
3bf101ba 835extern int perf_num_counters(void);
84c79910 836extern const char *perf_pmu_name(void);
ab0cce56
JO
837extern void __perf_event_task_sched_in(struct task_struct *prev,
838 struct task_struct *task);
839extern void __perf_event_task_sched_out(struct task_struct *prev,
840 struct task_struct *next);
cdd6c482
IM
841extern int perf_event_init_task(struct task_struct *child);
842extern void perf_event_exit_task(struct task_struct *child);
843extern void perf_event_free_task(struct task_struct *task);
4e231c79 844extern void perf_event_delayed_put(struct task_struct *task);
e03e7ee3 845extern struct file *perf_event_get(unsigned int fd);
ffe8690c 846extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
cdd6c482 847extern void perf_event_print_debug(void);
33696fc0
PZ
848extern void perf_pmu_disable(struct pmu *pmu);
849extern void perf_pmu_enable(struct pmu *pmu);
ba532500
YZ
850extern void perf_sched_cb_dec(struct pmu *pmu);
851extern void perf_sched_cb_inc(struct pmu *pmu);
cdd6c482
IM
852extern int perf_event_task_disable(void);
853extern int perf_event_task_enable(void);
26ca5c11 854extern int perf_event_refresh(struct perf_event *event, int refresh);
cdd6c482 855extern void perf_event_update_userpage(struct perf_event *event);
fb0459d7
AV
856extern int perf_event_release_kernel(struct perf_event *event);
857extern struct perf_event *
858perf_event_create_kernel_counter(struct perf_event_attr *attr,
859 int cpu,
38a81da2 860 struct task_struct *task,
4dc0da86
AK
861 perf_overflow_handler_t callback,
862 void *context);
0cda4c02
YZ
863extern void perf_pmu_migrate_context(struct pmu *pmu,
864 int src_cpu, int dst_cpu);
ffe8690c 865extern u64 perf_event_read_local(struct perf_event *event);
59ed446f
PZ
866extern u64 perf_event_read_value(struct perf_event *event,
867 u64 *enabled, u64 *running);
5c92d124 868
d010b332 869
df1a132b 870struct perf_sample_data {
2565711f
PZ
871 /*
872 * Fields set by perf_sample_data_init(), group so as to
873 * minimize the cachelines touched.
874 */
875 u64 addr;
876 struct perf_raw_record *raw;
877 struct perf_branch_stack *br_stack;
878 u64 period;
879 u64 weight;
880 u64 txn;
881 union perf_mem_data_src data_src;
5622f295 882
2565711f
PZ
883 /*
884 * The other fields, optionally {set,used} by
885 * perf_{prepare,output}_sample().
886 */
887 u64 type;
5622f295
MM
888 u64 ip;
889 struct {
890 u32 pid;
891 u32 tid;
892 } tid_entry;
893 u64 time;
5622f295
MM
894 u64 id;
895 u64 stream_id;
896 struct {
897 u32 cpu;
898 u32 reserved;
899 } cpu_entry;
5622f295 900 struct perf_callchain_entry *callchain;
88a7c26a
AL
901
902 /*
903 * regs_user may point to task_pt_regs or to regs_user_copy, depending
904 * on arch details.
905 */
60e2364e 906 struct perf_regs regs_user;
88a7c26a
AL
907 struct pt_regs regs_user_copy;
908
60e2364e 909 struct perf_regs regs_intr;
c5ebcedb 910 u64 stack_user_size;
2565711f 911} ____cacheline_aligned;
df1a132b 912
770eee1f
SE
913/* default value for data source */
914#define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\
915 PERF_MEM_S(LVL, NA) |\
916 PERF_MEM_S(SNOOP, NA) |\
917 PERF_MEM_S(LOCK, NA) |\
918 PERF_MEM_S(TLB, NA))
919
fd0d000b
RR
920static inline void perf_sample_data_init(struct perf_sample_data *data,
921 u64 addr, u64 period)
dc1d628a 922{
fd0d000b 923 /* remaining struct members initialized in perf_prepare_sample() */
dc1d628a
PZ
924 data->addr = addr;
925 data->raw = NULL;
bce38cd5 926 data->br_stack = NULL;
4018994f 927 data->period = period;
c3feedf2 928 data->weight = 0;
770eee1f 929 data->data_src.val = PERF_MEM_NA;
fdfbbd07 930 data->txn = 0;
dc1d628a
PZ
931}
932
5622f295
MM
933extern void perf_output_sample(struct perf_output_handle *handle,
934 struct perf_event_header *header,
935 struct perf_sample_data *data,
cdd6c482 936 struct perf_event *event);
5622f295
MM
937extern void perf_prepare_sample(struct perf_event_header *header,
938 struct perf_sample_data *data,
cdd6c482 939 struct perf_event *event,
5622f295
MM
940 struct pt_regs *regs);
941
a8b0ca17 942extern int perf_event_overflow(struct perf_event *event,
5622f295
MM
943 struct perf_sample_data *data,
944 struct pt_regs *regs);
df1a132b 945
9ecda41a
WN
946extern void perf_event_output_forward(struct perf_event *event,
947 struct perf_sample_data *data,
948 struct pt_regs *regs);
949extern void perf_event_output_backward(struct perf_event *event,
950 struct perf_sample_data *data,
951 struct pt_regs *regs);
21509084 952extern void perf_event_output(struct perf_event *event,
9ecda41a
WN
953 struct perf_sample_data *data,
954 struct pt_regs *regs);
21509084 955
1879445d
WN
956static inline bool
957is_default_overflow_handler(struct perf_event *event)
958{
9ecda41a
WN
959 if (likely(event->overflow_handler == perf_event_output_forward))
960 return true;
961 if (unlikely(event->overflow_handler == perf_event_output_backward))
962 return true;
963 return false;
1879445d
WN
964}
965
21509084
YZ
966extern void
967perf_event_header__init_id(struct perf_event_header *header,
968 struct perf_sample_data *data,
969 struct perf_event *event);
970extern void
971perf_event__output_id_sample(struct perf_event *event,
972 struct perf_output_handle *handle,
973 struct perf_sample_data *sample);
974
f38b0dbb
KL
975extern void
976perf_log_lost_samples(struct perf_event *event, u64 lost);
977
6c7e550f
FBH
978static inline bool is_sampling_event(struct perf_event *event)
979{
980 return event->attr.sample_period != 0;
981}
982
3b6f9e5c 983/*
cdd6c482 984 * Return 1 for a software event, 0 for a hardware event
3b6f9e5c 985 */
cdd6c482 986static inline int is_software_event(struct perf_event *event)
3b6f9e5c 987{
89a1e187 988 return event->pmu->task_ctx_nr == perf_sw_context;
3b6f9e5c
PM
989}
990
c5905afb 991extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
f29ac756 992
86038c5e 993extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
a8b0ca17 994extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
f29ac756 995
b0f82b81 996#ifndef perf_arch_fetch_caller_regs
e7e7ee2e 997static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
b0f82b81 998#endif
5331d7b8
FW
999
1000/*
1001 * Take a snapshot of the regs. Skip ip and frame pointer to
1002 * the nth caller. We only need a few of the regs:
1003 * - ip for PERF_SAMPLE_IP
1004 * - cs for user_mode() tests
1005 * - bp for callchains
1006 * - eflags, for future purposes, just in case
1007 */
b0f82b81 1008static inline void perf_fetch_caller_regs(struct pt_regs *regs)
5331d7b8 1009{
b0f82b81 1010 perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
5331d7b8
FW
1011}
1012
7e54a5a0 1013static __always_inline void
a8b0ca17 1014perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
e49a5bd3 1015{
86038c5e
PZI
1016 if (static_key_false(&perf_swevent_enabled[event_id]))
1017 __perf_sw_event(event_id, nr, regs, addr);
1018}
1019
1020DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
7e54a5a0 1021
86038c5e
PZI
1022/*
1023 * 'Special' version for the scheduler, it hard assumes no recursion,
1024 * which is guaranteed by us not actually scheduling inside other swevents
1025 * because those disable preemption.
1026 */
1027static __always_inline void
1028perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
1029{
c5905afb 1030 if (static_key_false(&perf_swevent_enabled[event_id])) {
86038c5e
PZI
1031 struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
1032
1033 perf_fetch_caller_regs(regs);
1034 ___perf_sw_event(event_id, nr, regs, addr);
e49a5bd3
FW
1035 }
1036}
1037
9107c89e 1038extern struct static_key_false perf_sched_events;
ee6dcfa4 1039
ff303e66
PZ
1040static __always_inline bool
1041perf_sw_migrate_enabled(void)
1042{
1043 if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS]))
1044 return true;
1045 return false;
1046}
1047
1048static inline void perf_event_task_migrate(struct task_struct *task)
1049{
1050 if (perf_sw_migrate_enabled())
1051 task->sched_migrated = 1;
1052}
1053
ab0cce56 1054static inline void perf_event_task_sched_in(struct task_struct *prev,
a8d757ef 1055 struct task_struct *task)
ab0cce56 1056{
9107c89e 1057 if (static_branch_unlikely(&perf_sched_events))
ab0cce56 1058 __perf_event_task_sched_in(prev, task);
ff303e66
PZ
1059
1060 if (perf_sw_migrate_enabled() && task->sched_migrated) {
1061 struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
1062
1063 perf_fetch_caller_regs(regs);
1064 ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0);
1065 task->sched_migrated = 0;
1066 }
ab0cce56
JO
1067}
1068
1069static inline void perf_event_task_sched_out(struct task_struct *prev,
1070 struct task_struct *next)
ee6dcfa4 1071{
86038c5e 1072 perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
ee6dcfa4 1073
9107c89e 1074 if (static_branch_unlikely(&perf_sched_events))
ab0cce56 1075 __perf_event_task_sched_out(prev, next);
ee6dcfa4
PZ
1076}
1077
eacd3ecc
MF
1078static inline u64 __perf_event_count(struct perf_event *event)
1079{
1080 return local64_read(&event->count) + atomic64_read(&event->child_count);
1081}
1082
3af9e859 1083extern void perf_event_mmap(struct vm_area_struct *vma);
39447b38 1084extern struct perf_guest_info_callbacks *perf_guest_cbs;
dcf46b94
ZY
1085extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1086extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
39447b38 1087
e041e328 1088extern void perf_event_exec(void);
82b89778 1089extern void perf_event_comm(struct task_struct *tsk, bool exec);
cdd6c482 1090extern void perf_event_fork(struct task_struct *tsk);
8d1b2d93 1091
56962b44
FW
1092/* Callchains */
1093DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
1094
cfbcf468
ACM
1095extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
1096extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
568b329a
AS
1097extern struct perf_callchain_entry *
1098get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
cfbcf468 1099 u32 max_stack, bool crosstask, bool add_mark);
97c79a38 1100extern int get_callchain_buffers(int max_stack);
568b329a 1101extern void put_callchain_buffers(void);
394ee076 1102
c5dfd78e 1103extern int sysctl_perf_event_max_stack;
c85b0334 1104extern int sysctl_perf_event_max_contexts_per_stack;
c5dfd78e 1105
c85b0334
ACM
1106static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip)
1107{
1108 if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) {
1109 struct perf_callchain_entry *entry = ctx->entry;
1110 entry->ip[entry->nr++] = ip;
1111 ++ctx->contexts;
1112 return 0;
1113 } else {
1114 ctx->contexts_maxed = true;
1115 return -1; /* no more room, stop walking the stack */
1116 }
1117}
3e4de4ec 1118
cfbcf468 1119static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip)
70791ce9 1120{
c85b0334 1121 if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) {
3b1fff08 1122 struct perf_callchain_entry *entry = ctx->entry;
70791ce9 1123 entry->ip[entry->nr++] = ip;
3b1fff08 1124 ++ctx->nr;
568b329a
AS
1125 return 0;
1126 } else {
1127 return -1; /* no more room, stop walking the stack */
1128 }
70791ce9 1129}
394ee076 1130
cdd6c482
IM
1131extern int sysctl_perf_event_paranoid;
1132extern int sysctl_perf_event_mlock;
1133extern int sysctl_perf_event_sample_rate;
14c63f17
DH
1134extern int sysctl_perf_cpu_time_max_percent;
1135
1136extern void perf_sample_event_took(u64 sample_len_ns);
1ccd1549 1137
163ec435
PZ
1138extern int perf_proc_update_handler(struct ctl_table *table, int write,
1139 void __user *buffer, size_t *lenp,
1140 loff_t *ppos);
14c63f17
DH
1141extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
1142 void __user *buffer, size_t *lenp,
1143 loff_t *ppos);
1144
c5dfd78e
ACM
1145int perf_event_max_stack_handler(struct ctl_table *table, int write,
1146 void __user *buffer, size_t *lenp, loff_t *ppos);
163ec435 1147
320ebf09
PZ
1148static inline bool perf_paranoid_tracepoint_raw(void)
1149{
1150 return sysctl_perf_event_paranoid > -1;
1151}
1152
1153static inline bool perf_paranoid_cpu(void)
1154{
1155 return sysctl_perf_event_paranoid > 0;
1156}
1157
1158static inline bool perf_paranoid_kernel(void)
1159{
1160 return sysctl_perf_event_paranoid > 1;
1161}
1162
cdd6c482 1163extern void perf_event_init(void);
1e1dcd93 1164extern void perf_tp_event(u16 event_type, u64 count, void *record,
1c024eca 1165 int entry_size, struct pt_regs *regs,
e6dab5ff
AV
1166 struct hlist_head *head, int rctx,
1167 struct task_struct *task);
24f1e32c 1168extern void perf_bp_event(struct perf_event *event, void *data);
0d905bca 1169
9d23a90a 1170#ifndef perf_misc_flags
e7e7ee2e
IM
1171# define perf_misc_flags(regs) \
1172 (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
1173# define perf_instruction_pointer(regs) instruction_pointer(regs)
9d23a90a
PM
1174#endif
1175
bce38cd5
SE
1176static inline bool has_branch_stack(struct perf_event *event)
1177{
1178 return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
a46a2300
YZ
1179}
1180
1181static inline bool needs_branch_stack(struct perf_event *event)
1182{
1183 return event->attr.branch_sample_type != 0;
bce38cd5
SE
1184}
1185
45bfb2e5
PZ
1186static inline bool has_aux(struct perf_event *event)
1187{
1188 return event->pmu->setup_aux;
1189}
1190
9ecda41a
WN
1191static inline bool is_write_backward(struct perf_event *event)
1192{
1193 return !!event->attr.write_backward;
1194}
1195
375637bc
AS
1196static inline bool has_addr_filter(struct perf_event *event)
1197{
1198 return event->pmu->nr_addr_filters;
1199}
1200
1201/*
1202 * An inherited event uses parent's filters
1203 */
1204static inline struct perf_addr_filters_head *
1205perf_event_addr_filters(struct perf_event *event)
1206{
1207 struct perf_addr_filters_head *ifh = &event->addr_filters;
1208
1209 if (event->parent)
1210 ifh = &event->parent->addr_filters;
1211
1212 return ifh;
1213}
1214
1215extern void perf_event_addr_filters_sync(struct perf_event *event);
1216
5622f295 1217extern int perf_output_begin(struct perf_output_handle *handle,
a7ac67ea 1218 struct perf_event *event, unsigned int size);
9ecda41a
WN
1219extern int perf_output_begin_forward(struct perf_output_handle *handle,
1220 struct perf_event *event,
1221 unsigned int size);
1222extern int perf_output_begin_backward(struct perf_output_handle *handle,
1223 struct perf_event *event,
1224 unsigned int size);
1225
5622f295 1226extern void perf_output_end(struct perf_output_handle *handle);
91d7753a 1227extern unsigned int perf_output_copy(struct perf_output_handle *handle,
5622f295 1228 const void *buf, unsigned int len);
5685e0ff
JO
1229extern unsigned int perf_output_skip(struct perf_output_handle *handle,
1230 unsigned int len);
4ed7c92d
PZ
1231extern int perf_swevent_get_recursion_context(void);
1232extern void perf_swevent_put_recursion_context(int rctx);
ab573844 1233extern u64 perf_swevent_set_period(struct perf_event *event);
44234adc
FW
1234extern void perf_event_enable(struct perf_event *event);
1235extern void perf_event_disable(struct perf_event *event);
fae3fde6 1236extern void perf_event_disable_local(struct perf_event *event);
e9d2b064 1237extern void perf_event_task_tick(void);
e041e328 1238#else /* !CONFIG_PERF_EVENTS: */
fdc26706
AS
1239static inline void *
1240perf_aux_output_begin(struct perf_output_handle *handle,
1241 struct perf_event *event) { return NULL; }
1242static inline void
1243perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
1244 bool truncated) { }
1245static inline int
1246perf_aux_output_skip(struct perf_output_handle *handle,
1247 unsigned long size) { return -EINVAL; }
1248static inline void *
1249perf_get_aux(struct perf_output_handle *handle) { return NULL; }
0793a61d 1250static inline void
ff303e66
PZ
1251perf_event_task_migrate(struct task_struct *task) { }
1252static inline void
ab0cce56
JO
1253perf_event_task_sched_in(struct task_struct *prev,
1254 struct task_struct *task) { }
1255static inline void
1256perf_event_task_sched_out(struct task_struct *prev,
1257 struct task_struct *next) { }
cdd6c482
IM
1258static inline int perf_event_init_task(struct task_struct *child) { return 0; }
1259static inline void perf_event_exit_task(struct task_struct *child) { }
1260static inline void perf_event_free_task(struct task_struct *task) { }
4e231c79 1261static inline void perf_event_delayed_put(struct task_struct *task) { }
e03e7ee3 1262static inline struct file *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); }
ffe8690c
KX
1263static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
1264{
1265 return ERR_PTR(-EINVAL);
1266}
1267static inline u64 perf_event_read_local(struct perf_event *event) { return -EINVAL; }
57c0c15b 1268static inline void perf_event_print_debug(void) { }
57c0c15b
IM
1269static inline int perf_event_task_disable(void) { return -EINVAL; }
1270static inline int perf_event_task_enable(void) { return -EINVAL; }
26ca5c11
AK
1271static inline int perf_event_refresh(struct perf_event *event, int refresh)
1272{
1273 return -EINVAL;
1274}
15dbf27c 1275
925d519a 1276static inline void
a8b0ca17 1277perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
24f1e32c 1278static inline void
86038c5e
PZI
1279perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { }
1280static inline void
184f412c 1281perf_bp_event(struct perf_event *event, void *data) { }
0a4a9391 1282
39447b38 1283static inline int perf_register_guest_info_callbacks
e7e7ee2e 1284(struct perf_guest_info_callbacks *callbacks) { return 0; }
39447b38 1285static inline int perf_unregister_guest_info_callbacks
e7e7ee2e 1286(struct perf_guest_info_callbacks *callbacks) { return 0; }
39447b38 1287
57c0c15b 1288static inline void perf_event_mmap(struct vm_area_struct *vma) { }
e041e328 1289static inline void perf_event_exec(void) { }
82b89778 1290static inline void perf_event_comm(struct task_struct *tsk, bool exec) { }
cdd6c482
IM
1291static inline void perf_event_fork(struct task_struct *tsk) { }
1292static inline void perf_event_init(void) { }
184f412c 1293static inline int perf_swevent_get_recursion_context(void) { return -1; }
4ed7c92d 1294static inline void perf_swevent_put_recursion_context(int rctx) { }
ab573844 1295static inline u64 perf_swevent_set_period(struct perf_event *event) { return 0; }
44234adc
FW
1296static inline void perf_event_enable(struct perf_event *event) { }
1297static inline void perf_event_disable(struct perf_event *event) { }
500ad2d8 1298static inline int __perf_event_disable(void *info) { return -1; }
e9d2b064 1299static inline void perf_event_task_tick(void) { }
ffe8690c 1300static inline int perf_event_release_kernel(struct perf_event *event) { return 0; }
0793a61d
TG
1301#endif
1302
6c4d3bc9
DR
1303#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
1304extern void perf_restore_debug_store(void);
1305#else
1d9d8639 1306static inline void perf_restore_debug_store(void) { }
0793a61d
TG
1307#endif
1308
7e3f977e
DB
1309static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag)
1310{
1311 return frag->pad < sizeof(u64);
1312}
1313
e7e7ee2e 1314#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
5622f295 1315
2663960c
SB
1316struct perf_pmu_events_attr {
1317 struct device_attribute attr;
1318 u64 id;
3a54aaa0 1319 const char *event_str;
2663960c
SB
1320};
1321
fc07e9f9
AK
1322struct perf_pmu_events_ht_attr {
1323 struct device_attribute attr;
1324 u64 id;
1325 const char *event_str_ht;
1326 const char *event_str_noht;
1327};
1328
fd979c01
CS
1329ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
1330 char *page);
1331
2663960c
SB
1332#define PMU_EVENT_ATTR(_name, _var, _id, _show) \
1333static struct perf_pmu_events_attr _var = { \
1334 .attr = __ATTR(_name, 0444, _show, NULL), \
1335 .id = _id, \
1336};
1337
f0405b81
CS
1338#define PMU_EVENT_ATTR_STRING(_name, _var, _str) \
1339static struct perf_pmu_events_attr _var = { \
1340 .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
1341 .id = 0, \
1342 .event_str = _str, \
1343};
1344
641cc938
JO
1345#define PMU_FORMAT_ATTR(_name, _format) \
1346static ssize_t \
1347_name##_show(struct device *dev, \
1348 struct device_attribute *attr, \
1349 char *page) \
1350{ \
1351 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
1352 return sprintf(page, _format "\n"); \
1353} \
1354 \
1355static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
1356
00e16c3d
TG
1357/* Performance counter hotplug functions */
1358#ifdef CONFIG_PERF_EVENTS
1359int perf_event_init_cpu(unsigned int cpu);
1360int perf_event_exit_cpu(unsigned int cpu);
1361#else
1362#define perf_event_init_cpu NULL
1363#define perf_event_exit_cpu NULL
1364#endif
1365
cdd6c482 1366#endif /* _LINUX_PERF_EVENT_H */
This page took 0.658195 seconds and 5 git commands to generate.