perf/x86/cqm: Fix CQM handling of grouping events into a cache_group
[deliverable/linux.git] / include / linux / perf_event.h
CommitLineData
0793a61d 1/*
57c0c15b 2 * Performance events:
0793a61d 3 *
a308444c 4 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
e7e7ee2e
IM
5 * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
0793a61d 7 *
57c0c15b 8 * Data type definitions, declarations, prototypes.
0793a61d 9 *
a308444c 10 * Started by: Thomas Gleixner and Ingo Molnar
0793a61d 11 *
57c0c15b 12 * For licencing details see kernel-base/COPYING
0793a61d 13 */
cdd6c482
IM
14#ifndef _LINUX_PERF_EVENT_H
15#define _LINUX_PERF_EVENT_H
0793a61d 16
607ca46e 17#include <uapi/linux/perf_event.h>
0793a61d 18
9f66a381 19/*
f3dfd265 20 * Kernel-internal data types and definitions:
9f66a381
IM
21 */
22
cdd6c482
IM
23#ifdef CONFIG_PERF_EVENTS
24# include <asm/perf_event.h>
7be79236 25# include <asm/local64.h>
f3dfd265
PM
26#endif
27
39447b38 28struct perf_guest_info_callbacks {
e7e7ee2e
IM
29 int (*is_in_guest)(void);
30 int (*is_user_mode)(void);
31 unsigned long (*get_guest_ip)(void);
39447b38
ZY
32};
33
2ff6cfd7
AB
34#ifdef CONFIG_HAVE_HW_BREAKPOINT
35#include <asm/hw_breakpoint.h>
36#endif
37
f3dfd265
PM
38#include <linux/list.h>
39#include <linux/mutex.h>
40#include <linux/rculist.h>
41#include <linux/rcupdate.h>
42#include <linux/spinlock.h>
d6d020e9 43#include <linux/hrtimer.h>
3c446b3d 44#include <linux/fs.h>
709e50cf 45#include <linux/pid_namespace.h>
906010b2 46#include <linux/workqueue.h>
5331d7b8 47#include <linux/ftrace.h>
85cfabbc 48#include <linux/cpu.h>
e360adbe 49#include <linux/irq_work.h>
c5905afb 50#include <linux/static_key.h>
851cf6e7 51#include <linux/jump_label_ratelimit.h>
60063497 52#include <linux/atomic.h>
641cc938 53#include <linux/sysfs.h>
4018994f 54#include <linux/perf_regs.h>
fadfe7be 55#include <linux/workqueue.h>
39bed6cb 56#include <linux/cgroup.h>
fa588151 57#include <asm/local.h>
f3dfd265 58
f9188e02
PZ
59struct perf_callchain_entry {
60 __u64 nr;
61 __u64 ip[PERF_MAX_STACK_DEPTH];
62};
63
3a43ce68
FW
64struct perf_raw_record {
65 u32 size;
66 void *data;
f413cdb8
FW
67};
68
bce38cd5
SE
69/*
70 * branch stack layout:
71 * nr: number of taken branches stored in entries[]
72 *
73 * Note that nr can vary from sample to sample
74 * branches (to, from) are stored from most recent
75 * to least recent, i.e., entries[0] contains the most
76 * recent branch.
77 */
caff2bef
PZ
78struct perf_branch_stack {
79 __u64 nr;
80 struct perf_branch_entry entries[0];
81};
82
f3dfd265
PM
83struct task_struct;
84
efc9f05d
SE
85/*
86 * extra PMU register associated with an event
87 */
88struct hw_perf_event_extra {
89 u64 config; /* register value */
90 unsigned int reg; /* register address or index */
91 int alloc; /* extra register already allocated */
92 int idx; /* index in shared_regs->regs[] */
93};
94
0793a61d 95/**
cdd6c482 96 * struct hw_perf_event - performance event hardware details:
0793a61d 97 */
cdd6c482
IM
98struct hw_perf_event {
99#ifdef CONFIG_PERF_EVENTS
d6d020e9
PZ
100 union {
101 struct { /* hardware */
a308444c 102 u64 config;
447a194b 103 u64 last_tag;
a308444c 104 unsigned long config_base;
cdd6c482 105 unsigned long event_base;
c48b6053 106 int event_base_rdpmc;
a308444c 107 int idx;
447a194b 108 int last_cpu;
9fac2cf3 109 int flags;
bce38cd5 110
efc9f05d 111 struct hw_perf_event_extra extra_reg;
bce38cd5 112 struct hw_perf_event_extra branch_reg;
d6d020e9 113 };
721a669b 114 struct { /* software */
a308444c 115 struct hrtimer hrtimer;
d6d020e9 116 };
f22c1bb6 117 struct { /* tracepoint */
f22c1bb6
ON
118 /* for tp_event->class */
119 struct list_head tp_list;
120 };
4afbb24c
MF
121 struct { /* intel_cqm */
122 int cqm_state;
b3df4ec4 123 u32 cqm_rmid;
a223c1c7 124 int is_group_event;
4afbb24c
MF
125 struct list_head cqm_events_entry;
126 struct list_head cqm_groups_entry;
127 struct list_head cqm_group_entry;
128 };
ec0d7729
AS
129 struct { /* itrace */
130 int itrace_started;
131 };
24f1e32c 132#ifdef CONFIG_HAVE_HW_BREAKPOINT
45a73372 133 struct { /* breakpoint */
d580ff86
PZ
134 /*
135 * Crufty hack to avoid the chicken and egg
136 * problem hw_breakpoint has with context
137 * creation and event initalization.
138 */
f22c1bb6
ON
139 struct arch_hw_breakpoint info;
140 struct list_head bp_list;
45a73372 141 };
24f1e32c 142#endif
d6d020e9 143 };
b0e87875
PZ
144 /*
145 * If the event is a per task event, this will point to the task in
146 * question. See the comment in perf_event_alloc().
147 */
50f16a8b 148 struct task_struct *target;
b0e87875
PZ
149
150/*
151 * hw_perf_event::state flags; used to track the PERF_EF_* state.
152 */
153#define PERF_HES_STOPPED 0x01 /* the counter is stopped */
154#define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
155#define PERF_HES_ARCH 0x04
156
a4eaf7f1 157 int state;
b0e87875
PZ
158
159 /*
160 * The last observed hardware counter value, updated with a
161 * local64_cmpxchg() such that pmu::read() can be called nested.
162 */
e7850595 163 local64_t prev_count;
b0e87875
PZ
164
165 /*
166 * The period to start the next sample with.
167 */
b23f3325 168 u64 sample_period;
b0e87875
PZ
169
170 /*
171 * The period we started this sample with.
172 */
9e350de3 173 u64 last_period;
b0e87875
PZ
174
175 /*
176 * However much is left of the current period; note that this is
177 * a full 64bit value and allows for generation of periods longer
178 * than hardware might allow.
179 */
e7850595 180 local64_t period_left;
b0e87875
PZ
181
182 /*
183 * State for throttling the event, see __perf_event_overflow() and
184 * perf_adjust_freq_unthr_context().
185 */
e050e3f0 186 u64 interrupts_seq;
60db5e09 187 u64 interrupts;
6a24ed6c 188
b0e87875
PZ
189 /*
190 * State for freq target events, see __perf_event_overflow() and
191 * perf_adjust_freq_unthr_context().
192 */
abd50713
PZ
193 u64 freq_time_stamp;
194 u64 freq_count_stamp;
ee06094f 195#endif
0793a61d
TG
196};
197
cdd6c482 198struct perf_event;
621a01ea 199
8d2cacbb
PZ
200/*
201 * Common implementation detail of pmu::{start,commit,cancel}_txn
202 */
fbbe0701 203#define PERF_PMU_TXN_ADD 0x1 /* txn to add/schedule event on PMU */
4a00c16e 204#define PERF_PMU_TXN_READ 0x2 /* txn to read event group from PMU */
fbbe0701 205
53b25335
VW
206/**
207 * pmu::capabilities flags
208 */
209#define PERF_PMU_CAP_NO_INTERRUPT 0x01
34f43927 210#define PERF_PMU_CAP_NO_NMI 0x02
0a4e38e6 211#define PERF_PMU_CAP_AUX_NO_SG 0x04
6a279230 212#define PERF_PMU_CAP_AUX_SW_DOUBLEBUF 0x08
bed5b25a 213#define PERF_PMU_CAP_EXCLUSIVE 0x10
ec0d7729 214#define PERF_PMU_CAP_ITRACE 0x20
53b25335 215
621a01ea 216/**
4aeb0b42 217 * struct pmu - generic performance monitoring unit
621a01ea 218 */
4aeb0b42 219struct pmu {
b0a873eb
PZ
220 struct list_head entry;
221
c464c76e 222 struct module *module;
abe43400 223 struct device *dev;
0c9d42ed 224 const struct attribute_group **attr_groups;
03d8e80b 225 const char *name;
2e80a82a
PZ
226 int type;
227
53b25335
VW
228 /*
229 * various common per-pmu feature flags
230 */
231 int capabilities;
232
108b02cf
PZ
233 int * __percpu pmu_disable_count;
234 struct perf_cpu_context * __percpu pmu_cpu_context;
bed5b25a 235 atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */
8dc85d54 236 int task_ctx_nr;
62b85639 237 int hrtimer_interval_ms;
6bde9b6c
LM
238
239 /*
a4eaf7f1
PZ
240 * Fully disable/enable this PMU, can be used to protect from the PMI
241 * as well as for lazy/batch writing of the MSRs.
6bde9b6c 242 */
ad5133b7
PZ
243 void (*pmu_enable) (struct pmu *pmu); /* optional */
244 void (*pmu_disable) (struct pmu *pmu); /* optional */
6bde9b6c 245
8d2cacbb 246 /*
a4eaf7f1 247 * Try and initialize the event for this PMU.
b0e87875
PZ
248 *
249 * Returns:
250 * -ENOENT -- @event is not for this PMU
251 *
252 * -ENODEV -- @event is for this PMU but PMU not present
253 * -EBUSY -- @event is for this PMU but PMU temporarily unavailable
254 * -EINVAL -- @event is for this PMU but @event is not valid
255 * -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported
256 * -EACCESS -- @event is for this PMU, @event is valid, but no privilidges
257 *
258 * 0 -- @event is for this PMU and valid
259 *
260 * Other error return values are allowed.
8d2cacbb 261 */
b0a873eb
PZ
262 int (*event_init) (struct perf_event *event);
263
1e0fb9ec
AL
264 /*
265 * Notification that the event was mapped or unmapped. Called
266 * in the context of the mapping task.
267 */
268 void (*event_mapped) (struct perf_event *event); /*optional*/
269 void (*event_unmapped) (struct perf_event *event); /*optional*/
270
b0e87875
PZ
271 /*
272 * Flags for ->add()/->del()/ ->start()/->stop(). There are
273 * matching hw_perf_event::state flags.
274 */
a4eaf7f1
PZ
275#define PERF_EF_START 0x01 /* start the counter when adding */
276#define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
277#define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
278
8d2cacbb 279 /*
b0e87875
PZ
280 * Adds/Removes a counter to/from the PMU, can be done inside a
281 * transaction, see the ->*_txn() methods.
282 *
283 * The add/del callbacks will reserve all hardware resources required
284 * to service the event, this includes any counter constraint
285 * scheduling etc.
286 *
287 * Called with IRQs disabled and the PMU disabled on the CPU the event
288 * is on.
289 *
290 * ->add() called without PERF_EF_START should result in the same state
291 * as ->add() followed by ->stop().
292 *
293 * ->del() must always PERF_EF_UPDATE stop an event. If it calls
294 * ->stop() that must deal with already being stopped without
295 * PERF_EF_UPDATE.
a4eaf7f1
PZ
296 */
297 int (*add) (struct perf_event *event, int flags);
298 void (*del) (struct perf_event *event, int flags);
299
300 /*
b0e87875
PZ
301 * Starts/Stops a counter present on the PMU.
302 *
303 * The PMI handler should stop the counter when perf_event_overflow()
304 * returns !0. ->start() will be used to continue.
305 *
306 * Also used to change the sample period.
307 *
308 * Called with IRQs disabled and the PMU disabled on the CPU the event
309 * is on -- will be called from NMI context with the PMU generates
310 * NMIs.
311 *
312 * ->stop() with PERF_EF_UPDATE will read the counter and update
313 * period/count values like ->read() would.
314 *
315 * ->start() with PERF_EF_RELOAD will reprogram the the counter
316 * value, must be preceded by a ->stop() with PERF_EF_UPDATE.
a4eaf7f1
PZ
317 */
318 void (*start) (struct perf_event *event, int flags);
319 void (*stop) (struct perf_event *event, int flags);
320
321 /*
322 * Updates the counter value of the event.
b0e87875
PZ
323 *
324 * For sampling capable PMUs this will also update the software period
325 * hw_perf_event::period_left field.
a4eaf7f1 326 */
cdd6c482 327 void (*read) (struct perf_event *event);
6bde9b6c
LM
328
329 /*
24cd7f54
PZ
330 * Group events scheduling is treated as a transaction, add
331 * group events as a whole and perform one schedulability test.
332 * If the test fails, roll back the whole group
a4eaf7f1
PZ
333 *
334 * Start the transaction, after this ->add() doesn't need to
24cd7f54 335 * do schedulability tests.
fbbe0701
SB
336 *
337 * Optional.
8d2cacbb 338 */
fbbe0701 339 void (*start_txn) (struct pmu *pmu, unsigned int txn_flags);
8d2cacbb 340 /*
a4eaf7f1 341 * If ->start_txn() disabled the ->add() schedulability test
8d2cacbb
PZ
342 * then ->commit_txn() is required to perform one. On success
343 * the transaction is closed. On error the transaction is kept
344 * open until ->cancel_txn() is called.
fbbe0701
SB
345 *
346 * Optional.
8d2cacbb 347 */
fbbe0701 348 int (*commit_txn) (struct pmu *pmu);
8d2cacbb 349 /*
a4eaf7f1 350 * Will cancel the transaction, assumes ->del() is called
25985edc 351 * for each successful ->add() during the transaction.
fbbe0701
SB
352 *
353 * Optional.
8d2cacbb 354 */
fbbe0701 355 void (*cancel_txn) (struct pmu *pmu);
35edc2a5
PZ
356
357 /*
358 * Will return the value for perf_event_mmap_page::index for this event,
359 * if no implementation is provided it will default to: event->hw.idx + 1.
360 */
361 int (*event_idx) (struct perf_event *event); /*optional */
d010b332 362
ba532500
YZ
363 /*
364 * context-switches callback
365 */
366 void (*sched_task) (struct perf_event_context *ctx,
367 bool sched_in);
4af57ef2
YZ
368 /*
369 * PMU specific data size
370 */
371 size_t task_ctx_size;
ba532500 372
eacd3ecc
MF
373
374 /*
375 * Return the count value for a counter.
376 */
377 u64 (*count) (struct perf_event *event); /*optional*/
45bfb2e5
PZ
378
379 /*
380 * Set up pmu-private data structures for an AUX area
381 */
382 void *(*setup_aux) (int cpu, void **pages,
383 int nr_pages, bool overwrite);
384 /* optional */
385
386 /*
387 * Free pmu-private AUX data structures
388 */
389 void (*free_aux) (void *aux); /* optional */
66eb579e
MR
390
391 /*
392 * Filter events for PMU-specific reasons.
393 */
394 int (*filter_match) (struct perf_event *event); /* optional */
621a01ea
IM
395};
396
6a930700 397/**
cdd6c482 398 * enum perf_event_active_state - the states of a event
6a930700 399 */
cdd6c482 400enum perf_event_active_state {
a69b0ca4 401 PERF_EVENT_STATE_DEAD = -4,
179033b3 402 PERF_EVENT_STATE_EXIT = -3,
57c0c15b 403 PERF_EVENT_STATE_ERROR = -2,
cdd6c482
IM
404 PERF_EVENT_STATE_OFF = -1,
405 PERF_EVENT_STATE_INACTIVE = 0,
57c0c15b 406 PERF_EVENT_STATE_ACTIVE = 1,
6a930700
IM
407};
408
9b51f66d 409struct file;
453f19ee
PZ
410struct perf_sample_data;
411
a8b0ca17 412typedef void (*perf_overflow_handler_t)(struct perf_event *,
b326e956
FW
413 struct perf_sample_data *,
414 struct pt_regs *regs);
415
d6f962b5 416enum perf_group_flag {
e7e7ee2e 417 PERF_GROUP_SOFTWARE = 0x1,
d6f962b5
FW
418};
419
e7e7ee2e
IM
420#define SWEVENT_HLIST_BITS 8
421#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
76e1d904
FW
422
423struct swevent_hlist {
e7e7ee2e
IM
424 struct hlist_head heads[SWEVENT_HLIST_SIZE];
425 struct rcu_head rcu_head;
76e1d904
FW
426};
427
8a49542c
PZ
428#define PERF_ATTACH_CONTEXT 0x01
429#define PERF_ATTACH_GROUP 0x02
d580ff86 430#define PERF_ATTACH_TASK 0x04
4af57ef2 431#define PERF_ATTACH_TASK_DATA 0x08
8a49542c 432
877c6856 433struct perf_cgroup;
76369139
FW
434struct ring_buffer;
435
0793a61d 436/**
cdd6c482 437 * struct perf_event - performance event kernel representation:
0793a61d 438 */
cdd6c482
IM
439struct perf_event {
440#ifdef CONFIG_PERF_EVENTS
9886167d
PZ
441 /*
442 * entry onto perf_event_context::event_list;
443 * modifications require ctx->lock
444 * RCU safe iterations.
445 */
592903cd 446 struct list_head event_entry;
9886167d
PZ
447
448 /*
449 * XXX: group_entry and sibling_list should be mutually exclusive;
450 * either you're a sibling on a group, or you're the group leader.
451 * Rework the code to always use the same list element.
452 *
453 * Locked for modification by both ctx->mutex and ctx->lock; holding
454 * either sufficies for read.
455 */
456 struct list_head group_entry;
04289bb9 457 struct list_head sibling_list;
9886167d
PZ
458
459 /*
460 * We need storage to track the entries in perf_pmu_migrate_context; we
461 * cannot use the event_entry because of RCU and we want to keep the
462 * group in tact which avoids us using the other two entries.
463 */
464 struct list_head migrate_entry;
465
f3ae75de
SE
466 struct hlist_node hlist_entry;
467 struct list_head active_entry;
0127c3ea 468 int nr_siblings;
d6f962b5 469 int group_flags;
cdd6c482 470 struct perf_event *group_leader;
a4eaf7f1 471 struct pmu *pmu;
54d751d4 472 void *pmu_private;
04289bb9 473
cdd6c482 474 enum perf_event_active_state state;
8a49542c 475 unsigned int attach_state;
e7850595 476 local64_t count;
a6e6dea6 477 atomic64_t child_count;
ee06094f 478
53cfbf59 479 /*
cdd6c482 480 * These are the total time in nanoseconds that the event
53cfbf59 481 * has been enabled (i.e. eligible to run, and the task has
cdd6c482 482 * been scheduled in, if this is a per-task event)
53cfbf59
PM
483 * and running (scheduled onto the CPU), respectively.
484 *
485 * They are computed from tstamp_enabled, tstamp_running and
cdd6c482 486 * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
53cfbf59
PM
487 */
488 u64 total_time_enabled;
489 u64 total_time_running;
490
491 /*
492 * These are timestamps used for computing total_time_enabled
cdd6c482 493 * and total_time_running when the event is in INACTIVE or
53cfbf59
PM
494 * ACTIVE state, measured in nanoseconds from an arbitrary point
495 * in time.
cdd6c482
IM
496 * tstamp_enabled: the notional time when the event was enabled
497 * tstamp_running: the notional time when the event was scheduled on
53cfbf59 498 * tstamp_stopped: in INACTIVE state, the notional time when the
cdd6c482 499 * event was scheduled off.
53cfbf59
PM
500 */
501 u64 tstamp_enabled;
502 u64 tstamp_running;
503 u64 tstamp_stopped;
504
eed01528
SE
505 /*
506 * timestamp shadows the actual context timing but it can
507 * be safely used in NMI interrupt context. It reflects the
508 * context time as it was when the event was last scheduled in.
509 *
510 * ctx_time already accounts for ctx->timestamp. Therefore to
511 * compute ctx_time for a sample, simply add perf_clock().
512 */
513 u64 shadow_ctx_time;
514
24f1e32c 515 struct perf_event_attr attr;
c320c7b7 516 u16 header_size;
6844c09d 517 u16 id_header_size;
c320c7b7 518 u16 read_size;
cdd6c482 519 struct hw_perf_event hw;
0793a61d 520
cdd6c482 521 struct perf_event_context *ctx;
a6fa941d 522 atomic_long_t refcount;
0793a61d 523
53cfbf59
PM
524 /*
525 * These accumulate total time (in nanoseconds) that children
cdd6c482 526 * events have been enabled and running, respectively.
53cfbf59
PM
527 */
528 atomic64_t child_total_time_enabled;
529 atomic64_t child_total_time_running;
530
0793a61d 531 /*
d859e29f 532 * Protect attach/detach and child_list:
0793a61d 533 */
fccc714b
PZ
534 struct mutex child_mutex;
535 struct list_head child_list;
cdd6c482 536 struct perf_event *parent;
0793a61d
TG
537
538 int oncpu;
539 int cpu;
540
082ff5a2
PZ
541 struct list_head owner_entry;
542 struct task_struct *owner;
543
7b732a75
PZ
544 /* mmap bits */
545 struct mutex mmap_mutex;
546 atomic_t mmap_count;
26cb63ad 547
76369139 548 struct ring_buffer *rb;
10c6db11 549 struct list_head rb_entry;
b69cf536
PZ
550 unsigned long rcu_batches;
551 int rcu_pending;
37d81828 552
7b732a75 553 /* poll related */
0793a61d 554 wait_queue_head_t waitq;
3c446b3d 555 struct fasync_struct *fasync;
79f14641
PZ
556
557 /* delayed work for NMIs and such */
558 int pending_wakeup;
4c9e2542 559 int pending_kill;
79f14641 560 int pending_disable;
e360adbe 561 struct irq_work pending;
592903cd 562
79f14641
PZ
563 atomic_t event_limit;
564
cdd6c482 565 void (*destroy)(struct perf_event *);
592903cd 566 struct rcu_head rcu_head;
709e50cf
PZ
567
568 struct pid_namespace *ns;
8e5799b1 569 u64 id;
6fb2915d 570
34f43927 571 u64 (*clock)(void);
b326e956 572 perf_overflow_handler_t overflow_handler;
4dc0da86 573 void *overflow_handler_context;
453f19ee 574
07b139c8 575#ifdef CONFIG_EVENT_TRACING
2425bcb9 576 struct trace_event_call *tp_event;
6fb2915d 577 struct event_filter *filter;
ced39002
JO
578#ifdef CONFIG_FUNCTION_TRACER
579 struct ftrace_ops ftrace_ops;
580#endif
ee06094f 581#endif
6fb2915d 582
e5d1367f
SE
583#ifdef CONFIG_CGROUP_PERF
584 struct perf_cgroup *cgrp; /* cgroup event is attach to */
585 int cgrp_defer_enabled;
586#endif
587
6fb2915d 588#endif /* CONFIG_PERF_EVENTS */
0793a61d
TG
589};
590
591/**
cdd6c482 592 * struct perf_event_context - event context structure
0793a61d 593 *
cdd6c482 594 * Used as a container for task events and CPU events as well:
0793a61d 595 */
cdd6c482 596struct perf_event_context {
108b02cf 597 struct pmu *pmu;
0793a61d 598 /*
cdd6c482 599 * Protect the states of the events in the list,
d859e29f 600 * nr_active, and the list:
0793a61d 601 */
e625cce1 602 raw_spinlock_t lock;
d859e29f 603 /*
cdd6c482 604 * Protect the list of events. Locking either mutex or lock
d859e29f
PM
605 * is sufficient to ensure the list doesn't change; to change
606 * the list you need to lock both the mutex and the spinlock.
607 */
a308444c 608 struct mutex mutex;
04289bb9 609
2fde4f94 610 struct list_head active_ctx_list;
889ff015
FW
611 struct list_head pinned_groups;
612 struct list_head flexible_groups;
a308444c 613 struct list_head event_list;
cdd6c482 614 int nr_events;
a308444c
IM
615 int nr_active;
616 int is_active;
bfbd3381 617 int nr_stat;
0f5a2601 618 int nr_freq;
dddd3379 619 int rotate_disable;
a308444c
IM
620 atomic_t refcount;
621 struct task_struct *task;
53cfbf59
PM
622
623 /*
4af4998b 624 * Context clock, runs when context enabled.
53cfbf59 625 */
a308444c
IM
626 u64 time;
627 u64 timestamp;
564c2b21
PM
628
629 /*
630 * These fields let us detect when two contexts have both
631 * been cloned (inherited) from a common ancestor.
632 */
cdd6c482 633 struct perf_event_context *parent_ctx;
a308444c
IM
634 u64 parent_gen;
635 u64 generation;
636 int pin_count;
d010b332 637 int nr_cgroups; /* cgroup evts */
4af57ef2 638 void *task_ctx_data; /* pmu specific data */
28009ce4 639 struct rcu_head rcu_head;
0793a61d
TG
640};
641
7ae07ea3
FW
642/*
643 * Number of contexts where an event can trigger:
e7e7ee2e 644 * task, softirq, hardirq, nmi.
7ae07ea3
FW
645 */
646#define PERF_NR_CONTEXTS 4
647
0793a61d 648/**
cdd6c482 649 * struct perf_event_cpu_context - per cpu event context structure
0793a61d
TG
650 */
651struct perf_cpu_context {
cdd6c482
IM
652 struct perf_event_context ctx;
653 struct perf_event_context *task_ctx;
0793a61d 654 int active_oncpu;
3b6f9e5c 655 int exclusive;
4cfafd30
PZ
656
657 raw_spinlock_t hrtimer_lock;
9e630205
SE
658 struct hrtimer hrtimer;
659 ktime_t hrtimer_interval;
4cfafd30
PZ
660 unsigned int hrtimer_active;
661
3f1f3320 662 struct pmu *unique_pmu;
e5d1367f 663 struct perf_cgroup *cgrp;
0793a61d
TG
664};
665
5622f295 666struct perf_output_handle {
57c0c15b 667 struct perf_event *event;
76369139 668 struct ring_buffer *rb;
6d1acfd5 669 unsigned long wakeup;
5d967a8b 670 unsigned long size;
fdc26706
AS
671 union {
672 void *addr;
673 unsigned long head;
674 };
5d967a8b 675 int page;
5622f295
MM
676};
677
39bed6cb
MF
678#ifdef CONFIG_CGROUP_PERF
679
680/*
681 * perf_cgroup_info keeps track of time_enabled for a cgroup.
682 * This is a per-cpu dynamically allocated data structure.
683 */
684struct perf_cgroup_info {
685 u64 time;
686 u64 timestamp;
687};
688
689struct perf_cgroup {
690 struct cgroup_subsys_state css;
691 struct perf_cgroup_info __percpu *info;
692};
693
694/*
695 * Must ensure cgroup is pinned (css_get) before calling
696 * this function. In other words, we cannot call this function
697 * if there is no cgroup event for the current CPU context.
698 */
699static inline struct perf_cgroup *
614e4c4e 700perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
39bed6cb 701{
614e4c4e
SE
702 return container_of(task_css_check(task, perf_event_cgrp_id,
703 ctx ? lockdep_is_held(&ctx->lock)
704 : true),
39bed6cb
MF
705 struct perf_cgroup, css);
706}
707#endif /* CONFIG_CGROUP_PERF */
708
cdd6c482 709#ifdef CONFIG_PERF_EVENTS
829b42dd 710
fdc26706
AS
711extern void *perf_aux_output_begin(struct perf_output_handle *handle,
712 struct perf_event *event);
713extern void perf_aux_output_end(struct perf_output_handle *handle,
714 unsigned long size, bool truncated);
715extern int perf_aux_output_skip(struct perf_output_handle *handle,
716 unsigned long size);
717extern void *perf_get_aux(struct perf_output_handle *handle);
718
03d8e80b 719extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
b0a873eb 720extern void perf_pmu_unregister(struct pmu *pmu);
621a01ea 721
3bf101ba 722extern int perf_num_counters(void);
84c79910 723extern const char *perf_pmu_name(void);
ab0cce56
JO
724extern void __perf_event_task_sched_in(struct task_struct *prev,
725 struct task_struct *task);
726extern void __perf_event_task_sched_out(struct task_struct *prev,
727 struct task_struct *next);
cdd6c482
IM
728extern int perf_event_init_task(struct task_struct *child);
729extern void perf_event_exit_task(struct task_struct *child);
730extern void perf_event_free_task(struct task_struct *task);
4e231c79 731extern void perf_event_delayed_put(struct task_struct *task);
e03e7ee3 732extern struct file *perf_event_get(unsigned int fd);
ffe8690c 733extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
cdd6c482 734extern void perf_event_print_debug(void);
33696fc0
PZ
735extern void perf_pmu_disable(struct pmu *pmu);
736extern void perf_pmu_enable(struct pmu *pmu);
ba532500
YZ
737extern void perf_sched_cb_dec(struct pmu *pmu);
738extern void perf_sched_cb_inc(struct pmu *pmu);
cdd6c482
IM
739extern int perf_event_task_disable(void);
740extern int perf_event_task_enable(void);
26ca5c11 741extern int perf_event_refresh(struct perf_event *event, int refresh);
cdd6c482 742extern void perf_event_update_userpage(struct perf_event *event);
fb0459d7
AV
743extern int perf_event_release_kernel(struct perf_event *event);
744extern struct perf_event *
745perf_event_create_kernel_counter(struct perf_event_attr *attr,
746 int cpu,
38a81da2 747 struct task_struct *task,
4dc0da86
AK
748 perf_overflow_handler_t callback,
749 void *context);
0cda4c02
YZ
750extern void perf_pmu_migrate_context(struct pmu *pmu,
751 int src_cpu, int dst_cpu);
ffe8690c 752extern u64 perf_event_read_local(struct perf_event *event);
59ed446f
PZ
753extern u64 perf_event_read_value(struct perf_event *event,
754 u64 *enabled, u64 *running);
5c92d124 755
d010b332 756
df1a132b 757struct perf_sample_data {
2565711f
PZ
758 /*
759 * Fields set by perf_sample_data_init(), group so as to
760 * minimize the cachelines touched.
761 */
762 u64 addr;
763 struct perf_raw_record *raw;
764 struct perf_branch_stack *br_stack;
765 u64 period;
766 u64 weight;
767 u64 txn;
768 union perf_mem_data_src data_src;
5622f295 769
2565711f
PZ
770 /*
771 * The other fields, optionally {set,used} by
772 * perf_{prepare,output}_sample().
773 */
774 u64 type;
5622f295
MM
775 u64 ip;
776 struct {
777 u32 pid;
778 u32 tid;
779 } tid_entry;
780 u64 time;
5622f295
MM
781 u64 id;
782 u64 stream_id;
783 struct {
784 u32 cpu;
785 u32 reserved;
786 } cpu_entry;
5622f295 787 struct perf_callchain_entry *callchain;
88a7c26a
AL
788
789 /*
790 * regs_user may point to task_pt_regs or to regs_user_copy, depending
791 * on arch details.
792 */
60e2364e 793 struct perf_regs regs_user;
88a7c26a
AL
794 struct pt_regs regs_user_copy;
795
60e2364e 796 struct perf_regs regs_intr;
c5ebcedb 797 u64 stack_user_size;
2565711f 798} ____cacheline_aligned;
df1a132b 799
770eee1f
SE
800/* default value for data source */
801#define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\
802 PERF_MEM_S(LVL, NA) |\
803 PERF_MEM_S(SNOOP, NA) |\
804 PERF_MEM_S(LOCK, NA) |\
805 PERF_MEM_S(TLB, NA))
806
fd0d000b
RR
807static inline void perf_sample_data_init(struct perf_sample_data *data,
808 u64 addr, u64 period)
dc1d628a 809{
fd0d000b 810 /* remaining struct members initialized in perf_prepare_sample() */
dc1d628a
PZ
811 data->addr = addr;
812 data->raw = NULL;
bce38cd5 813 data->br_stack = NULL;
4018994f 814 data->period = period;
c3feedf2 815 data->weight = 0;
770eee1f 816 data->data_src.val = PERF_MEM_NA;
fdfbbd07 817 data->txn = 0;
dc1d628a
PZ
818}
819
5622f295
MM
820extern void perf_output_sample(struct perf_output_handle *handle,
821 struct perf_event_header *header,
822 struct perf_sample_data *data,
cdd6c482 823 struct perf_event *event);
5622f295
MM
824extern void perf_prepare_sample(struct perf_event_header *header,
825 struct perf_sample_data *data,
cdd6c482 826 struct perf_event *event,
5622f295
MM
827 struct pt_regs *regs);
828
a8b0ca17 829extern int perf_event_overflow(struct perf_event *event,
5622f295
MM
830 struct perf_sample_data *data,
831 struct pt_regs *regs);
df1a132b 832
21509084
YZ
833extern void perf_event_output(struct perf_event *event,
834 struct perf_sample_data *data,
835 struct pt_regs *regs);
836
837extern void
838perf_event_header__init_id(struct perf_event_header *header,
839 struct perf_sample_data *data,
840 struct perf_event *event);
841extern void
842perf_event__output_id_sample(struct perf_event *event,
843 struct perf_output_handle *handle,
844 struct perf_sample_data *sample);
845
f38b0dbb
KL
846extern void
847perf_log_lost_samples(struct perf_event *event, u64 lost);
848
6c7e550f
FBH
849static inline bool is_sampling_event(struct perf_event *event)
850{
851 return event->attr.sample_period != 0;
852}
853
3b6f9e5c 854/*
cdd6c482 855 * Return 1 for a software event, 0 for a hardware event
3b6f9e5c 856 */
cdd6c482 857static inline int is_software_event(struct perf_event *event)
3b6f9e5c 858{
89a1e187 859 return event->pmu->task_ctx_nr == perf_sw_context;
3b6f9e5c
PM
860}
861
c5905afb 862extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
f29ac756 863
86038c5e 864extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
a8b0ca17 865extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
f29ac756 866
b0f82b81 867#ifndef perf_arch_fetch_caller_regs
e7e7ee2e 868static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
b0f82b81 869#endif
5331d7b8
FW
870
871/*
872 * Take a snapshot of the regs. Skip ip and frame pointer to
873 * the nth caller. We only need a few of the regs:
874 * - ip for PERF_SAMPLE_IP
875 * - cs for user_mode() tests
876 * - bp for callchains
877 * - eflags, for future purposes, just in case
878 */
b0f82b81 879static inline void perf_fetch_caller_regs(struct pt_regs *regs)
5331d7b8 880{
5331d7b8
FW
881 memset(regs, 0, sizeof(*regs));
882
b0f82b81 883 perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
5331d7b8
FW
884}
885
7e54a5a0 886static __always_inline void
a8b0ca17 887perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
e49a5bd3 888{
86038c5e
PZI
889 if (static_key_false(&perf_swevent_enabled[event_id]))
890 __perf_sw_event(event_id, nr, regs, addr);
891}
892
893DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
7e54a5a0 894
86038c5e
PZI
895/*
896 * 'Special' version for the scheduler, it hard assumes no recursion,
897 * which is guaranteed by us not actually scheduling inside other swevents
898 * because those disable preemption.
899 */
900static __always_inline void
901perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
902{
c5905afb 903 if (static_key_false(&perf_swevent_enabled[event_id])) {
86038c5e
PZI
904 struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
905
906 perf_fetch_caller_regs(regs);
907 ___perf_sw_event(event_id, nr, regs, addr);
e49a5bd3
FW
908 }
909}
910
9107c89e 911extern struct static_key_false perf_sched_events;
ee6dcfa4 912
ff303e66
PZ
913static __always_inline bool
914perf_sw_migrate_enabled(void)
915{
916 if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS]))
917 return true;
918 return false;
919}
920
921static inline void perf_event_task_migrate(struct task_struct *task)
922{
923 if (perf_sw_migrate_enabled())
924 task->sched_migrated = 1;
925}
926
ab0cce56 927static inline void perf_event_task_sched_in(struct task_struct *prev,
a8d757ef 928 struct task_struct *task)
ab0cce56 929{
9107c89e 930 if (static_branch_unlikely(&perf_sched_events))
ab0cce56 931 __perf_event_task_sched_in(prev, task);
ff303e66
PZ
932
933 if (perf_sw_migrate_enabled() && task->sched_migrated) {
934 struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
935
936 perf_fetch_caller_regs(regs);
937 ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0);
938 task->sched_migrated = 0;
939 }
ab0cce56
JO
940}
941
942static inline void perf_event_task_sched_out(struct task_struct *prev,
943 struct task_struct *next)
ee6dcfa4 944{
86038c5e 945 perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
ee6dcfa4 946
9107c89e 947 if (static_branch_unlikely(&perf_sched_events))
ab0cce56 948 __perf_event_task_sched_out(prev, next);
ee6dcfa4
PZ
949}
950
eacd3ecc
MF
951static inline u64 __perf_event_count(struct perf_event *event)
952{
953 return local64_read(&event->count) + atomic64_read(&event->child_count);
954}
955
3af9e859 956extern void perf_event_mmap(struct vm_area_struct *vma);
39447b38 957extern struct perf_guest_info_callbacks *perf_guest_cbs;
dcf46b94
ZY
958extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
959extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
39447b38 960
e041e328 961extern void perf_event_exec(void);
82b89778 962extern void perf_event_comm(struct task_struct *tsk, bool exec);
cdd6c482 963extern void perf_event_fork(struct task_struct *tsk);
8d1b2d93 964
56962b44
FW
965/* Callchains */
966DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
967
e7e7ee2e
IM
968extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
969extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
394ee076 970
e7e7ee2e 971static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
70791ce9
FW
972{
973 if (entry->nr < PERF_MAX_STACK_DEPTH)
974 entry->ip[entry->nr++] = ip;
975}
394ee076 976
cdd6c482
IM
977extern int sysctl_perf_event_paranoid;
978extern int sysctl_perf_event_mlock;
979extern int sysctl_perf_event_sample_rate;
14c63f17
DH
980extern int sysctl_perf_cpu_time_max_percent;
981
982extern void perf_sample_event_took(u64 sample_len_ns);
1ccd1549 983
163ec435
PZ
984extern int perf_proc_update_handler(struct ctl_table *table, int write,
985 void __user *buffer, size_t *lenp,
986 loff_t *ppos);
14c63f17
DH
987extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
988 void __user *buffer, size_t *lenp,
989 loff_t *ppos);
990
163ec435 991
320ebf09
PZ
992static inline bool perf_paranoid_tracepoint_raw(void)
993{
994 return sysctl_perf_event_paranoid > -1;
995}
996
997static inline bool perf_paranoid_cpu(void)
998{
999 return sysctl_perf_event_paranoid > 0;
1000}
1001
1002static inline bool perf_paranoid_kernel(void)
1003{
1004 return sysctl_perf_event_paranoid > 1;
1005}
1006
cdd6c482 1007extern void perf_event_init(void);
1c024eca
PZ
1008extern void perf_tp_event(u64 addr, u64 count, void *record,
1009 int entry_size, struct pt_regs *regs,
e6dab5ff
AV
1010 struct hlist_head *head, int rctx,
1011 struct task_struct *task);
24f1e32c 1012extern void perf_bp_event(struct perf_event *event, void *data);
0d905bca 1013
9d23a90a 1014#ifndef perf_misc_flags
e7e7ee2e
IM
1015# define perf_misc_flags(regs) \
1016 (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
1017# define perf_instruction_pointer(regs) instruction_pointer(regs)
9d23a90a
PM
1018#endif
1019
bce38cd5
SE
1020static inline bool has_branch_stack(struct perf_event *event)
1021{
1022 return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
a46a2300
YZ
1023}
1024
1025static inline bool needs_branch_stack(struct perf_event *event)
1026{
1027 return event->attr.branch_sample_type != 0;
bce38cd5
SE
1028}
1029
45bfb2e5
PZ
1030static inline bool has_aux(struct perf_event *event)
1031{
1032 return event->pmu->setup_aux;
1033}
1034
5622f295 1035extern int perf_output_begin(struct perf_output_handle *handle,
a7ac67ea 1036 struct perf_event *event, unsigned int size);
5622f295 1037extern void perf_output_end(struct perf_output_handle *handle);
91d7753a 1038extern unsigned int perf_output_copy(struct perf_output_handle *handle,
5622f295 1039 const void *buf, unsigned int len);
5685e0ff
JO
1040extern unsigned int perf_output_skip(struct perf_output_handle *handle,
1041 unsigned int len);
4ed7c92d
PZ
1042extern int perf_swevent_get_recursion_context(void);
1043extern void perf_swevent_put_recursion_context(int rctx);
ab573844 1044extern u64 perf_swevent_set_period(struct perf_event *event);
44234adc
FW
1045extern void perf_event_enable(struct perf_event *event);
1046extern void perf_event_disable(struct perf_event *event);
fae3fde6 1047extern void perf_event_disable_local(struct perf_event *event);
e9d2b064 1048extern void perf_event_task_tick(void);
e041e328 1049#else /* !CONFIG_PERF_EVENTS: */
fdc26706
AS
1050static inline void *
1051perf_aux_output_begin(struct perf_output_handle *handle,
1052 struct perf_event *event) { return NULL; }
1053static inline void
1054perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
1055 bool truncated) { }
1056static inline int
1057perf_aux_output_skip(struct perf_output_handle *handle,
1058 unsigned long size) { return -EINVAL; }
1059static inline void *
1060perf_get_aux(struct perf_output_handle *handle) { return NULL; }
0793a61d 1061static inline void
ff303e66
PZ
1062perf_event_task_migrate(struct task_struct *task) { }
1063static inline void
ab0cce56
JO
1064perf_event_task_sched_in(struct task_struct *prev,
1065 struct task_struct *task) { }
1066static inline void
1067perf_event_task_sched_out(struct task_struct *prev,
1068 struct task_struct *next) { }
cdd6c482
IM
1069static inline int perf_event_init_task(struct task_struct *child) { return 0; }
1070static inline void perf_event_exit_task(struct task_struct *child) { }
1071static inline void perf_event_free_task(struct task_struct *task) { }
4e231c79 1072static inline void perf_event_delayed_put(struct task_struct *task) { }
e03e7ee3 1073static inline struct file *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); }
ffe8690c
KX
1074static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
1075{
1076 return ERR_PTR(-EINVAL);
1077}
1078static inline u64 perf_event_read_local(struct perf_event *event) { return -EINVAL; }
57c0c15b 1079static inline void perf_event_print_debug(void) { }
57c0c15b
IM
1080static inline int perf_event_task_disable(void) { return -EINVAL; }
1081static inline int perf_event_task_enable(void) { return -EINVAL; }
26ca5c11
AK
1082static inline int perf_event_refresh(struct perf_event *event, int refresh)
1083{
1084 return -EINVAL;
1085}
15dbf27c 1086
925d519a 1087static inline void
a8b0ca17 1088perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
24f1e32c 1089static inline void
86038c5e
PZI
1090perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { }
1091static inline void
184f412c 1092perf_bp_event(struct perf_event *event, void *data) { }
0a4a9391 1093
39447b38 1094static inline int perf_register_guest_info_callbacks
e7e7ee2e 1095(struct perf_guest_info_callbacks *callbacks) { return 0; }
39447b38 1096static inline int perf_unregister_guest_info_callbacks
e7e7ee2e 1097(struct perf_guest_info_callbacks *callbacks) { return 0; }
39447b38 1098
57c0c15b 1099static inline void perf_event_mmap(struct vm_area_struct *vma) { }
e041e328 1100static inline void perf_event_exec(void) { }
82b89778 1101static inline void perf_event_comm(struct task_struct *tsk, bool exec) { }
cdd6c482
IM
1102static inline void perf_event_fork(struct task_struct *tsk) { }
1103static inline void perf_event_init(void) { }
184f412c 1104static inline int perf_swevent_get_recursion_context(void) { return -1; }
4ed7c92d 1105static inline void perf_swevent_put_recursion_context(int rctx) { }
ab573844 1106static inline u64 perf_swevent_set_period(struct perf_event *event) { return 0; }
44234adc
FW
1107static inline void perf_event_enable(struct perf_event *event) { }
1108static inline void perf_event_disable(struct perf_event *event) { }
500ad2d8 1109static inline int __perf_event_disable(void *info) { return -1; }
e9d2b064 1110static inline void perf_event_task_tick(void) { }
ffe8690c 1111static inline int perf_event_release_kernel(struct perf_event *event) { return 0; }
0793a61d
TG
1112#endif
1113
6c4d3bc9
DR
1114#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
1115extern void perf_restore_debug_store(void);
1116#else
1d9d8639 1117static inline void perf_restore_debug_store(void) { }
0793a61d
TG
1118#endif
1119
e7e7ee2e 1120#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
5622f295 1121
3f6da390 1122/*
0a0fca9d 1123 * This has to have a higher priority than migration_notifier in sched/core.c.
3f6da390 1124 */
e7e7ee2e
IM
1125#define perf_cpu_notifier(fn) \
1126do { \
0db0628d 1127 static struct notifier_block fn##_nb = \
e7e7ee2e 1128 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
c13d38e4 1129 unsigned long cpu = smp_processor_id(); \
6760bca9 1130 unsigned long flags; \
f0bdb5e0
SB
1131 \
1132 cpu_notifier_register_begin(); \
e7e7ee2e 1133 fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
c13d38e4 1134 (void *)(unsigned long)cpu); \
6760bca9 1135 local_irq_save(flags); \
e7e7ee2e 1136 fn(&fn##_nb, (unsigned long)CPU_STARTING, \
c13d38e4 1137 (void *)(unsigned long)cpu); \
6760bca9 1138 local_irq_restore(flags); \
e7e7ee2e 1139 fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
c13d38e4 1140 (void *)(unsigned long)cpu); \
f0bdb5e0
SB
1141 __register_cpu_notifier(&fn##_nb); \
1142 cpu_notifier_register_done(); \
3f6da390
PZ
1143} while (0)
1144
f0bdb5e0
SB
1145/*
1146 * Bare-bones version of perf_cpu_notifier(), which doesn't invoke the
1147 * callback for already online CPUs.
1148 */
1149#define __perf_cpu_notifier(fn) \
1150do { \
1151 static struct notifier_block fn##_nb = \
1152 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
1153 \
1154 __register_cpu_notifier(&fn##_nb); \
1155} while (0)
641cc938 1156
2663960c
SB
1157struct perf_pmu_events_attr {
1158 struct device_attribute attr;
1159 u64 id;
3a54aaa0 1160 const char *event_str;
2663960c
SB
1161};
1162
fd979c01
CS
1163ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
1164 char *page);
1165
2663960c
SB
1166#define PMU_EVENT_ATTR(_name, _var, _id, _show) \
1167static struct perf_pmu_events_attr _var = { \
1168 .attr = __ATTR(_name, 0444, _show, NULL), \
1169 .id = _id, \
1170};
1171
f0405b81
CS
1172#define PMU_EVENT_ATTR_STRING(_name, _var, _str) \
1173static struct perf_pmu_events_attr _var = { \
1174 .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
1175 .id = 0, \
1176 .event_str = _str, \
1177};
1178
641cc938
JO
1179#define PMU_FORMAT_ATTR(_name, _format) \
1180static ssize_t \
1181_name##_show(struct device *dev, \
1182 struct device_attribute *attr, \
1183 char *page) \
1184{ \
1185 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
1186 return sprintf(page, _format "\n"); \
1187} \
1188 \
1189static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
1190
cdd6c482 1191#endif /* _LINUX_PERF_EVENT_H */
This page took 0.609646 seconds and 5 git commands to generate.