perf_counter: Separate out attr->type from attr->config
[deliverable/linux.git] / include / linux / perf_counter.h
CommitLineData
0793a61d
TG
1/*
2 * Performance counters:
3 *
4 * Copyright(C) 2008, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2008, Red Hat, Inc., Ingo Molnar
6 *
7 * Data type definitions, declarations, prototypes.
8 *
9 * Started by: Thomas Gleixner and Ingo Molnar
10 *
11 * For licencing details see kernel-base/COPYING
12 */
13#ifndef _LINUX_PERF_COUNTER_H
14#define _LINUX_PERF_COUNTER_H
15
f3dfd265
PM
16#include <linux/types.h>
17#include <linux/ioctl.h>
9aaa131a 18#include <asm/byteorder.h>
0793a61d
TG
19
20/*
9f66a381
IM
21 * User-space ABI bits:
22 */
23
24/*
0d48696f 25 * attr.type
0793a61d 26 */
b8e83514
PZ
27enum perf_event_types {
28 PERF_TYPE_HARDWARE = 0,
29 PERF_TYPE_SOFTWARE = 1,
30 PERF_TYPE_TRACEPOINT = 2,
31
0793a61d 32 /*
b8e83514 33 * available TYPE space, raw is the max value.
0793a61d 34 */
9f66a381 35
b8e83514
PZ
36 PERF_TYPE_RAW = 128,
37};
6c594c21 38
b8e83514 39/*
0d48696f 40 * Generalized performance counter event types, used by the attr.event_id
b8e83514
PZ
41 * parameter of the sys_perf_counter_open() syscall:
42 */
0d48696f 43enum attr_ids {
9f66a381 44 /*
b8e83514 45 * Common hardware events, generalized by the kernel:
9f66a381 46 */
b8e83514
PZ
47 PERF_COUNT_CPU_CYCLES = 0,
48 PERF_COUNT_INSTRUCTIONS = 1,
49 PERF_COUNT_CACHE_REFERENCES = 2,
50 PERF_COUNT_CACHE_MISSES = 3,
51 PERF_COUNT_BRANCH_INSTRUCTIONS = 4,
52 PERF_COUNT_BRANCH_MISSES = 5,
53 PERF_COUNT_BUS_CYCLES = 6,
54
55 PERF_HW_EVENTS_MAX = 7,
56};
e077df4f 57
b8e83514
PZ
58/*
59 * Special "software" counters provided by the kernel, even if the hardware
60 * does not support performance counters. These counters measure various
61 * physical and sw events of the kernel (and allow the profiling of them as
62 * well):
63 */
64enum sw_event_ids {
65 PERF_COUNT_CPU_CLOCK = 0,
66 PERF_COUNT_TASK_CLOCK = 1,
67 PERF_COUNT_PAGE_FAULTS = 2,
68 PERF_COUNT_CONTEXT_SWITCHES = 3,
69 PERF_COUNT_CPU_MIGRATIONS = 4,
70 PERF_COUNT_PAGE_FAULTS_MIN = 5,
71 PERF_COUNT_PAGE_FAULTS_MAJ = 6,
72
73 PERF_SW_EVENTS_MAX = 7,
0793a61d
TG
74};
75
8a057d84 76/*
0d48696f 77 * Bits that can be set in attr.sample_type to request information
8a057d84
PZ
78 * in the overflow packets.
79 */
b23f3325
PZ
80enum perf_counter_sample_format {
81 PERF_SAMPLE_IP = 1U << 0,
82 PERF_SAMPLE_TID = 1U << 1,
83 PERF_SAMPLE_TIME = 1U << 2,
84 PERF_SAMPLE_ADDR = 1U << 3,
85 PERF_SAMPLE_GROUP = 1U << 4,
86 PERF_SAMPLE_CALLCHAIN = 1U << 5,
ac4bcf88 87 PERF_SAMPLE_ID = 1U << 6,
b23f3325 88 PERF_SAMPLE_CPU = 1U << 7,
689802b2 89 PERF_SAMPLE_PERIOD = 1U << 8,
8a057d84
PZ
90};
91
53cfbf59 92/*
0d48696f 93 * Bits that can be set in attr.read_format to request that
53cfbf59
PM
94 * reads on the counter should return the indicated quantities,
95 * in increasing order of bit value, after the counter value.
96 */
97enum perf_counter_read_format {
8e5799b1
PZ
98 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
99 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
100 PERF_FORMAT_ID = 1U << 2,
53cfbf59
PM
101};
102
9f66a381
IM
103/*
104 * Hardware event to monitor via a performance monitoring counter:
105 */
0d48696f 106struct perf_counter_attr {
f4a2deb4 107 /*
a21ca2ca
IM
108 * Major type: hardware/software/tracepoint/etc.
109 */
110 __u32 type;
111 __u32 __reserved_1;
112
113 /*
114 * Type specific configuration information.
f4a2deb4
PZ
115 */
116 __u64 config;
9f66a381 117
60db5e09 118 union {
b23f3325
PZ
119 __u64 sample_period;
120 __u64 sample_freq;
60db5e09
PZ
121 };
122
b23f3325
PZ
123 __u64 sample_type;
124 __u64 read_format;
9f66a381 125
2743a5b0 126 __u64 disabled : 1, /* off by default */
0475f9ea
PM
127 inherit : 1, /* children inherit it */
128 pinned : 1, /* must always be on PMU */
129 exclusive : 1, /* only group on PMU */
130 exclude_user : 1, /* don't count user */
131 exclude_kernel : 1, /* ditto kernel */
132 exclude_hv : 1, /* ditto hypervisor */
2743a5b0 133 exclude_idle : 1, /* don't count when idle */
0a4a9391 134 mmap : 1, /* include mmap data */
8d1b2d93 135 comm : 1, /* include comm data */
60db5e09 136 freq : 1, /* use freq, not period */
0475f9ea 137
a21ca2ca 138 __reserved_2 : 53;
2743a5b0 139
c457810a 140 __u32 wakeup_events; /* wakeup every n events */
a21ca2ca 141 __u32 __reserved_3;
9f66a381 142
e527ea31 143 __u64 __reserved_4;
eab656ae
TG
144};
145
d859e29f
PM
146/*
147 * Ioctls that can be done on a perf counter fd:
148 */
08247e31
PZ
149#define PERF_COUNTER_IOC_ENABLE _IO ('$', 0)
150#define PERF_COUNTER_IOC_DISABLE _IO ('$', 1)
151#define PERF_COUNTER_IOC_REFRESH _IO ('$', 2)
152#define PERF_COUNTER_IOC_RESET _IO ('$', 3)
153#define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64)
3df5edad
PZ
154
155enum perf_counter_ioc_flags {
156 PERF_IOC_FLAG_GROUP = 1U << 0,
157};
d859e29f 158
37d81828
PM
159/*
160 * Structure of the page that can be mapped via mmap
161 */
162struct perf_counter_mmap_page {
163 __u32 version; /* version number of this structure */
164 __u32 compat_version; /* lowest version this is compat with */
38ff667b
PZ
165
166 /*
167 * Bits needed to read the hw counters in user-space.
168 *
92f22a38
PZ
169 * u32 seq;
170 * s64 count;
38ff667b 171 *
a2e87d06
PZ
172 * do {
173 * seq = pc->lock;
38ff667b 174 *
a2e87d06
PZ
175 * barrier()
176 * if (pc->index) {
177 * count = pmc_read(pc->index - 1);
178 * count += pc->offset;
179 * } else
180 * goto regular_read;
38ff667b 181 *
a2e87d06
PZ
182 * barrier();
183 * } while (pc->lock != seq);
38ff667b 184 *
92f22a38
PZ
185 * NOTE: for obvious reason this only works on self-monitoring
186 * processes.
38ff667b 187 */
37d81828
PM
188 __u32 lock; /* seqlock for synchronization */
189 __u32 index; /* hardware counter identifier */
190 __s64 offset; /* add to hardware counter value */
7b732a75 191
38ff667b
PZ
192 /*
193 * Control data for the mmap() data buffer.
194 *
195 * User-space reading this value should issue an rmb(), on SMP capable
196 * platforms, after reading this value -- see perf_counter_wakeup().
197 */
8e3747c1 198 __u64 data_head; /* head in the data section */
37d81828
PM
199};
200
9d23a90a
PM
201#define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0)
202#define PERF_EVENT_MISC_CPUMODE_UNKNOWN (0 << 0)
6b6e5486 203#define PERF_EVENT_MISC_KERNEL (1 << 0)
9d23a90a
PM
204#define PERF_EVENT_MISC_USER (2 << 0)
205#define PERF_EVENT_MISC_HYPERVISOR (3 << 0)
6b6e5486 206#define PERF_EVENT_MISC_OVERFLOW (1 << 2)
6fab0192 207
5c148194
PZ
208struct perf_event_header {
209 __u32 type;
6fab0192
PZ
210 __u16 misc;
211 __u16 size;
5c148194
PZ
212};
213
214enum perf_event_type {
5ed00415 215
0c593b34
PZ
216 /*
217 * The MMAP events record the PROT_EXEC mappings so that we can
218 * correlate userspace IPs to code. They have the following structure:
219 *
220 * struct {
0127c3ea 221 * struct perf_event_header header;
0c593b34 222 *
0127c3ea
IM
223 * u32 pid, tid;
224 * u64 addr;
225 * u64 len;
226 * u64 pgoff;
227 * char filename[];
0c593b34
PZ
228 * };
229 */
8a057d84 230 PERF_EVENT_MMAP = 1,
0a4a9391 231
8d1b2d93
PZ
232 /*
233 * struct {
0127c3ea 234 * struct perf_event_header header;
8d1b2d93 235 *
0127c3ea
IM
236 * u32 pid, tid;
237 * char comm[];
8d1b2d93
PZ
238 * };
239 */
240 PERF_EVENT_COMM = 3,
241
26b119bc
PZ
242 /*
243 * struct {
0127c3ea
IM
244 * struct perf_event_header header;
245 * u64 time;
689802b2 246 * u64 id;
b23f3325 247 * u64 sample_period;
26b119bc
PZ
248 * };
249 */
250 PERF_EVENT_PERIOD = 4,
251
a78ac325
PZ
252 /*
253 * struct {
0127c3ea
IM
254 * struct perf_event_header header;
255 * u64 time;
a78ac325
PZ
256 * };
257 */
258 PERF_EVENT_THROTTLE = 5,
259 PERF_EVENT_UNTHROTTLE = 6,
260
60313ebe
PZ
261 /*
262 * struct {
a21ca2ca
IM
263 * struct perf_event_header header;
264 * u32 pid, ppid;
60313ebe
PZ
265 * };
266 */
267 PERF_EVENT_FORK = 7,
268
8a057d84 269 /*
6b6e5486
PZ
270 * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field
271 * will be PERF_RECORD_*
0c593b34
PZ
272 *
273 * struct {
0127c3ea 274 * struct perf_event_header header;
0c593b34 275 *
0127c3ea
IM
276 * { u64 ip; } && PERF_RECORD_IP
277 * { u32 pid, tid; } && PERF_RECORD_TID
278 * { u64 time; } && PERF_RECORD_TIME
279 * { u64 addr; } && PERF_RECORD_ADDR
280 * { u64 config; } && PERF_RECORD_CONFIG
281 * { u32 cpu, res; } && PERF_RECORD_CPU
0c593b34 282 *
0127c3ea 283 * { u64 nr;
8e5799b1 284 * { u64 id, val; } cnt[nr]; } && PERF_RECORD_GROUP
0c593b34 285 *
0127c3ea
IM
286 * { u16 nr,
287 * hv,
288 * kernel,
289 * user;
290 * u64 ips[nr]; } && PERF_RECORD_CALLCHAIN
0c593b34 291 * };
8a057d84 292 */
5c148194
PZ
293};
294
f3dfd265 295#ifdef __KERNEL__
9f66a381 296/*
f3dfd265 297 * Kernel-internal data types and definitions:
9f66a381
IM
298 */
299
f3dfd265
PM
300#ifdef CONFIG_PERF_COUNTERS
301# include <asm/perf_counter.h>
302#endif
303
304#include <linux/list.h>
305#include <linux/mutex.h>
306#include <linux/rculist.h>
307#include <linux/rcupdate.h>
308#include <linux/spinlock.h>
d6d020e9 309#include <linux/hrtimer.h>
3c446b3d 310#include <linux/fs.h>
709e50cf 311#include <linux/pid_namespace.h>
f3dfd265
PM
312#include <asm/atomic.h>
313
314struct task_struct;
315
0793a61d 316/**
9f66a381 317 * struct hw_perf_counter - performance counter hardware details:
0793a61d
TG
318 */
319struct hw_perf_counter {
ee06094f 320#ifdef CONFIG_PERF_COUNTERS
d6d020e9
PZ
321 union {
322 struct { /* hardware */
323 u64 config;
324 unsigned long config_base;
325 unsigned long counter_base;
6f00cada 326 int idx;
d6d020e9
PZ
327 };
328 union { /* software */
329 atomic64_t count;
330 struct hrtimer hrtimer;
331 };
332 };
ee06094f 333 atomic64_t prev_count;
b23f3325 334 u64 sample_period;
ee06094f 335 atomic64_t period_left;
60db5e09 336 u64 interrupts;
6a24ed6c
PZ
337
338 u64 freq_count;
339 u64 freq_interrupts;
ee06094f 340#endif
0793a61d
TG
341};
342
621a01ea
IM
343struct perf_counter;
344
345/**
4aeb0b42 346 * struct pmu - generic performance monitoring unit
621a01ea 347 */
4aeb0b42 348struct pmu {
95cdd2e7 349 int (*enable) (struct perf_counter *counter);
7671581f
IM
350 void (*disable) (struct perf_counter *counter);
351 void (*read) (struct perf_counter *counter);
a78ac325 352 void (*unthrottle) (struct perf_counter *counter);
621a01ea
IM
353};
354
6a930700
IM
355/**
356 * enum perf_counter_active_state - the states of a counter
357 */
358enum perf_counter_active_state {
3b6f9e5c 359 PERF_COUNTER_STATE_ERROR = -2,
6a930700
IM
360 PERF_COUNTER_STATE_OFF = -1,
361 PERF_COUNTER_STATE_INACTIVE = 0,
362 PERF_COUNTER_STATE_ACTIVE = 1,
363};
364
9b51f66d
IM
365struct file;
366
7b732a75
PZ
367struct perf_mmap_data {
368 struct rcu_head rcu_head;
8740f941 369 int nr_pages; /* nr of data pages */
c5078f78 370 int nr_locked; /* nr pages mlocked */
8740f941 371
c33a0bc4 372 atomic_t poll; /* POLL_ for wakeups */
8740f941
PZ
373 atomic_t events; /* event limit */
374
8e3747c1
PZ
375 atomic_long_t head; /* write position */
376 atomic_long_t done_head; /* completed head */
377
c33a0bc4
PZ
378 atomic_t lock; /* concurrent writes */
379
c66de4a5
PZ
380 atomic_t wakeup; /* needs a wakeup */
381
7b732a75 382 struct perf_counter_mmap_page *user_page;
0127c3ea 383 void *data_pages[0];
7b732a75
PZ
384};
385
671dec5d
PZ
386struct perf_pending_entry {
387 struct perf_pending_entry *next;
388 void (*func)(struct perf_pending_entry *);
925d519a
PZ
389};
390
0793a61d
TG
391/**
392 * struct perf_counter - performance counter kernel representation:
393 */
394struct perf_counter {
ee06094f 395#ifdef CONFIG_PERF_COUNTERS
04289bb9 396 struct list_head list_entry;
592903cd 397 struct list_head event_entry;
04289bb9 398 struct list_head sibling_list;
0127c3ea 399 int nr_siblings;
04289bb9 400 struct perf_counter *group_leader;
4aeb0b42 401 const struct pmu *pmu;
04289bb9 402
6a930700 403 enum perf_counter_active_state state;
0793a61d 404 atomic64_t count;
ee06094f 405
53cfbf59
PM
406 /*
407 * These are the total time in nanoseconds that the counter
408 * has been enabled (i.e. eligible to run, and the task has
409 * been scheduled in, if this is a per-task counter)
410 * and running (scheduled onto the CPU), respectively.
411 *
412 * They are computed from tstamp_enabled, tstamp_running and
413 * tstamp_stopped when the counter is in INACTIVE or ACTIVE state.
414 */
415 u64 total_time_enabled;
416 u64 total_time_running;
417
418 /*
419 * These are timestamps used for computing total_time_enabled
420 * and total_time_running when the counter is in INACTIVE or
421 * ACTIVE state, measured in nanoseconds from an arbitrary point
422 * in time.
423 * tstamp_enabled: the notional time when the counter was enabled
424 * tstamp_running: the notional time when the counter was scheduled on
425 * tstamp_stopped: in INACTIVE state, the notional time when the
426 * counter was scheduled off.
427 */
428 u64 tstamp_enabled;
429 u64 tstamp_running;
430 u64 tstamp_stopped;
431
0d48696f 432 struct perf_counter_attr attr;
0793a61d
TG
433 struct hw_perf_counter hw;
434
435 struct perf_counter_context *ctx;
9b51f66d 436 struct file *filp;
0793a61d 437
53cfbf59
PM
438 /*
439 * These accumulate total time (in nanoseconds) that children
440 * counters have been enabled and running, respectively.
441 */
442 atomic64_t child_total_time_enabled;
443 atomic64_t child_total_time_running;
444
0793a61d 445 /*
d859e29f 446 * Protect attach/detach and child_list:
0793a61d 447 */
fccc714b
PZ
448 struct mutex child_mutex;
449 struct list_head child_list;
450 struct perf_counter *parent;
0793a61d
TG
451
452 int oncpu;
453 int cpu;
454
082ff5a2
PZ
455 struct list_head owner_entry;
456 struct task_struct *owner;
457
7b732a75
PZ
458 /* mmap bits */
459 struct mutex mmap_mutex;
460 atomic_t mmap_count;
461 struct perf_mmap_data *data;
37d81828 462
7b732a75 463 /* poll related */
0793a61d 464 wait_queue_head_t waitq;
3c446b3d 465 struct fasync_struct *fasync;
79f14641
PZ
466
467 /* delayed work for NMIs and such */
468 int pending_wakeup;
4c9e2542 469 int pending_kill;
79f14641 470 int pending_disable;
671dec5d 471 struct perf_pending_entry pending;
592903cd 472
79f14641
PZ
473 atomic_t event_limit;
474
e077df4f 475 void (*destroy)(struct perf_counter *);
592903cd 476 struct rcu_head rcu_head;
709e50cf
PZ
477
478 struct pid_namespace *ns;
8e5799b1 479 u64 id;
ee06094f 480#endif
0793a61d
TG
481};
482
483/**
484 * struct perf_counter_context - counter context structure
485 *
486 * Used as a container for task counters and CPU counters as well:
487 */
488struct perf_counter_context {
0793a61d 489 /*
d859e29f
PM
490 * Protect the states of the counters in the list,
491 * nr_active, and the list:
0793a61d
TG
492 */
493 spinlock_t lock;
d859e29f
PM
494 /*
495 * Protect the list of counters. Locking either mutex or lock
496 * is sufficient to ensure the list doesn't change; to change
497 * the list you need to lock both the mutex and the spinlock.
498 */
499 struct mutex mutex;
04289bb9
IM
500
501 struct list_head counter_list;
592903cd 502 struct list_head event_list;
0793a61d
TG
503 int nr_counters;
504 int nr_active;
d859e29f 505 int is_active;
a63eaf34 506 atomic_t refcount;
0793a61d 507 struct task_struct *task;
53cfbf59
PM
508
509 /*
4af4998b 510 * Context clock, runs when context enabled.
53cfbf59 511 */
4af4998b
PZ
512 u64 time;
513 u64 timestamp;
564c2b21
PM
514
515 /*
516 * These fields let us detect when two contexts have both
517 * been cloned (inherited) from a common ancestor.
518 */
519 struct perf_counter_context *parent_ctx;
c93f7669
PM
520 u64 parent_gen;
521 u64 generation;
25346b93 522 int pin_count;
c93f7669 523 struct rcu_head rcu_head;
0793a61d
TG
524};
525
526/**
527 * struct perf_counter_cpu_context - per cpu counter context structure
528 */
529struct perf_cpu_context {
530 struct perf_counter_context ctx;
531 struct perf_counter_context *task_ctx;
532 int active_oncpu;
533 int max_pertask;
3b6f9e5c 534 int exclusive;
96f6d444
PZ
535
536 /*
537 * Recursion avoidance:
538 *
539 * task, softirq, irq, nmi context
540 */
22a4f650 541 int recursion[4];
0793a61d
TG
542};
543
829b42dd
RR
544#ifdef CONFIG_PERF_COUNTERS
545
0793a61d
TG
546/*
547 * Set by architecture code:
548 */
549extern int perf_max_counters;
550
4aeb0b42 551extern const struct pmu *hw_perf_counter_init(struct perf_counter *counter);
621a01ea 552
0793a61d 553extern void perf_counter_task_sched_in(struct task_struct *task, int cpu);
564c2b21
PM
554extern void perf_counter_task_sched_out(struct task_struct *task,
555 struct task_struct *next, int cpu);
0793a61d 556extern void perf_counter_task_tick(struct task_struct *task, int cpu);
6ab423e0 557extern int perf_counter_init_task(struct task_struct *child);
9b51f66d 558extern void perf_counter_exit_task(struct task_struct *child);
bbbee908 559extern void perf_counter_free_task(struct task_struct *task);
925d519a 560extern void perf_counter_do_pending(void);
0793a61d 561extern void perf_counter_print_debug(void);
9e35ad38
PZ
562extern void __perf_disable(void);
563extern bool __perf_enable(void);
564extern void perf_disable(void);
565extern void perf_enable(void);
1d1c7ddb
IM
566extern int perf_counter_task_disable(void);
567extern int perf_counter_task_enable(void);
3cbed429
PM
568extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
569 struct perf_cpu_context *cpuctx,
570 struct perf_counter_context *ctx, int cpu);
37d81828 571extern void perf_counter_update_userpage(struct perf_counter *counter);
5c92d124 572
f6c7d5fe 573extern int perf_counter_overflow(struct perf_counter *counter,
78f13e95 574 int nmi, struct pt_regs *regs, u64 addr);
3b6f9e5c
PM
575/*
576 * Return 1 for a software counter, 0 for a hardware counter
577 */
578static inline int is_software_counter(struct perf_counter *counter)
579{
a21ca2ca
IM
580 return (counter->attr.type != PERF_TYPE_RAW) &&
581 (counter->attr.type != PERF_TYPE_HARDWARE);
3b6f9e5c
PM
582}
583
78f13e95 584extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64);
15dbf27c 585
089dd79d
PZ
586extern void __perf_counter_mmap(struct vm_area_struct *vma);
587
588static inline void perf_counter_mmap(struct vm_area_struct *vma)
589{
590 if (vma->vm_flags & VM_EXEC)
591 __perf_counter_mmap(vma);
592}
0a4a9391 593
8d1b2d93 594extern void perf_counter_comm(struct task_struct *tsk);
60313ebe 595extern void perf_counter_fork(struct task_struct *tsk);
8d1b2d93 596
3f731ca6
PM
597extern void perf_counter_task_migration(struct task_struct *task, int cpu);
598
9c03d88e 599#define MAX_STACK_DEPTH 255
394ee076
PZ
600
601struct perf_callchain_entry {
9c03d88e 602 u16 nr, hv, kernel, user;
394ee076
PZ
603 u64 ip[MAX_STACK_DEPTH];
604};
605
606extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
607
1ccd1549 608extern int sysctl_perf_counter_priv;
c5078f78 609extern int sysctl_perf_counter_mlock;
a78ac325 610extern int sysctl_perf_counter_limit;
1ccd1549 611
0d905bca
IM
612extern void perf_counter_init(void);
613
9d23a90a
PM
614#ifndef perf_misc_flags
615#define perf_misc_flags(regs) (user_mode(regs) ? PERF_EVENT_MISC_USER : \
616 PERF_EVENT_MISC_KERNEL)
617#define perf_instruction_pointer(regs) instruction_pointer(regs)
618#endif
619
0793a61d
TG
620#else
621static inline void
622perf_counter_task_sched_in(struct task_struct *task, int cpu) { }
623static inline void
910431c7
IM
624perf_counter_task_sched_out(struct task_struct *task,
625 struct task_struct *next, int cpu) { }
0793a61d
TG
626static inline void
627perf_counter_task_tick(struct task_struct *task, int cpu) { }
d3e78ee3 628static inline int perf_counter_init_task(struct task_struct *child) { return 0; }
9b51f66d 629static inline void perf_counter_exit_task(struct task_struct *child) { }
bbbee908 630static inline void perf_counter_free_task(struct task_struct *task) { }
925d519a 631static inline void perf_counter_do_pending(void) { }
0793a61d 632static inline void perf_counter_print_debug(void) { }
9e35ad38
PZ
633static inline void perf_disable(void) { }
634static inline void perf_enable(void) { }
1d1c7ddb
IM
635static inline int perf_counter_task_disable(void) { return -EINVAL; }
636static inline int perf_counter_task_enable(void) { return -EINVAL; }
15dbf27c 637
925d519a 638static inline void
78f13e95
PZ
639perf_swcounter_event(u32 event, u64 nr, int nmi,
640 struct pt_regs *regs, u64 addr) { }
0a4a9391 641
089dd79d 642static inline void perf_counter_mmap(struct vm_area_struct *vma) { }
8d1b2d93 643static inline void perf_counter_comm(struct task_struct *tsk) { }
60313ebe 644static inline void perf_counter_fork(struct task_struct *tsk) { }
0d905bca 645static inline void perf_counter_init(void) { }
3f731ca6
PM
646static inline void perf_counter_task_migration(struct task_struct *task,
647 int cpu) { }
0793a61d
TG
648#endif
649
f3dfd265 650#endif /* __KERNEL__ */
0793a61d 651#endif /* _LINUX_PERF_COUNTER_H */
This page took 0.10651 seconds and 5 git commands to generate.