perf, x86: Add Intel SandyBridge CPU support
[deliverable/linux.git] / arch / x86 / kernel / cpu / perf_event.c
CommitLineData
241771ef 1/*
cdd6c482 2 * Performance events x86 architecture code
241771ef 3 *
98144511
IM
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
30dd568c 9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
1da53e02 10 * Copyright (C) 2009 Google, Inc., Stephane Eranian
241771ef
IM
11 *
12 * For licencing details see kernel-base/COPYING
13 */
14
cdd6c482 15#include <linux/perf_event.h>
241771ef
IM
16#include <linux/capability.h>
17#include <linux/notifier.h>
18#include <linux/hardirq.h>
19#include <linux/kprobes.h>
4ac13294 20#include <linux/module.h>
241771ef
IM
21#include <linux/kdebug.h>
22#include <linux/sched.h>
d7d59fb3 23#include <linux/uaccess.h>
5a0e3ad6 24#include <linux/slab.h>
74193ef0 25#include <linux/highmem.h>
30dd568c 26#include <linux/cpu.h>
272d30be 27#include <linux/bitops.h>
241771ef 28
241771ef 29#include <asm/apic.h>
d7d59fb3 30#include <asm/stacktrace.h>
4e935e47 31#include <asm/nmi.h>
257ef9d2 32#include <asm/compat.h>
241771ef 33
7645a24c
PZ
34#if 0
35#undef wrmsrl
36#define wrmsrl(msr, val) \
37do { \
38 trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\
39 (unsigned long)(val)); \
40 native_write_msr((msr), (u32)((u64)(val)), \
41 (u32)((u64)(val) >> 32)); \
42} while (0)
43#endif
44
ef21f683
PZ
45/*
46 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
47 */
48static unsigned long
49copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
50{
51 unsigned long offset, addr = (unsigned long)from;
ef21f683
PZ
52 unsigned long size, len = 0;
53 struct page *page;
54 void *map;
55 int ret;
56
57 do {
58 ret = __get_user_pages_fast(addr, 1, 0, &page);
59 if (!ret)
60 break;
61
62 offset = addr & (PAGE_SIZE - 1);
63 size = min(PAGE_SIZE - offset, n - len);
64
7a837d1b 65 map = kmap_atomic(page);
ef21f683 66 memcpy(to, map+offset, size);
7a837d1b 67 kunmap_atomic(map);
ef21f683
PZ
68 put_page(page);
69
70 len += size;
71 to += size;
72 addr += size;
73
74 } while (len < n);
75
76 return len;
77}
78
1da53e02 79struct event_constraint {
c91e0f5d
PZ
80 union {
81 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
b622d644 82 u64 idxmsk64;
c91e0f5d 83 };
b622d644
PZ
84 u64 code;
85 u64 cmask;
272d30be 86 int weight;
1da53e02
SE
87};
88
38331f62
SE
89struct amd_nb {
90 int nb_id; /* NorthBridge id */
91 int refcnt; /* reference count */
92 struct perf_event *owners[X86_PMC_IDX_MAX];
93 struct event_constraint event_constraints[X86_PMC_IDX_MAX];
94};
95
caff2bef
PZ
96#define MAX_LBR_ENTRIES 16
97
cdd6c482 98struct cpu_hw_events {
ca037701
PZ
99 /*
100 * Generic x86 PMC bits
101 */
1da53e02 102 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
43f6201a 103 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
63e6be6d 104 unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
b0f3f28e 105 int enabled;
241771ef 106
1da53e02
SE
107 int n_events;
108 int n_added;
90151c35 109 int n_txn;
1da53e02 110 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
447a194b 111 u64 tags[X86_PMC_IDX_MAX];
1da53e02 112 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
ca037701 113
4d1c52b0
LM
114 unsigned int group_flag;
115
ca037701
PZ
116 /*
117 * Intel DebugStore bits
118 */
119 struct debug_store *ds;
120 u64 pebs_enabled;
121
caff2bef
PZ
122 /*
123 * Intel LBR bits
124 */
125 int lbr_users;
126 void *lbr_context;
127 struct perf_branch_stack lbr_stack;
128 struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
129
ca037701
PZ
130 /*
131 * AMD specific bits
132 */
38331f62 133 struct amd_nb *amd_nb;
b690081d
SE
134};
135
fce877e3 136#define __EVENT_CONSTRAINT(c, n, m, w) {\
b622d644 137 { .idxmsk64 = (n) }, \
c91e0f5d
PZ
138 .code = (c), \
139 .cmask = (m), \
fce877e3 140 .weight = (w), \
c91e0f5d 141}
b690081d 142
fce877e3
PZ
143#define EVENT_CONSTRAINT(c, n, m) \
144 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
145
ca037701
PZ
146/*
147 * Constraint on the Event code.
148 */
ed8777fc 149#define INTEL_EVENT_CONSTRAINT(c, n) \
a098f448 150 EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
8433be11 151
ca037701
PZ
152/*
153 * Constraint on the Event code + UMask + fixed-mask
a098f448
RR
154 *
155 * filter mask to validate fixed counter events.
156 * the following filters disqualify for fixed counters:
157 * - inv
158 * - edge
159 * - cnt-mask
160 * The other filters are supported by fixed counters.
161 * The any-thread option is supported starting with v3.
ca037701 162 */
ed8777fc 163#define FIXED_EVENT_CONSTRAINT(c, n) \
a098f448 164 EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
8433be11 165
ca037701
PZ
166/*
167 * Constraint on the Event code + UMask
168 */
b06b3d49 169#define INTEL_UEVENT_CONSTRAINT(c, n) \
ca037701 170 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
b06b3d49
LM
171#define PEBS_EVENT_CONSTRAINT(c, n) \
172 INTEL_UEVENT_CONSTRAINT(c, n)
ca037701 173
ed8777fc
PZ
174#define EVENT_CONSTRAINT_END \
175 EVENT_CONSTRAINT(0, 0, 0)
176
177#define for_each_event_constraint(e, c) \
a1f2b70a 178 for ((e) = (c); (e)->weight; (e)++)
b690081d 179
8db909a7
PZ
180union perf_capabilities {
181 struct {
182 u64 lbr_format : 6;
183 u64 pebs_trap : 1;
184 u64 pebs_arch_reg : 1;
185 u64 pebs_format : 4;
186 u64 smm_freeze : 1;
187 };
188 u64 capabilities;
189};
190
241771ef 191/*
5f4ec28f 192 * struct x86_pmu - generic x86 pmu
241771ef 193 */
5f4ec28f 194struct x86_pmu {
ca037701
PZ
195 /*
196 * Generic x86 PMC bits
197 */
faa28ae0
RR
198 const char *name;
199 int version;
a3288106 200 int (*handle_irq)(struct pt_regs *);
9e35ad38 201 void (*disable_all)(void);
11164cd4 202 void (*enable_all)(int added);
aff3d91a
PZ
203 void (*enable)(struct perf_event *);
204 void (*disable)(struct perf_event *);
b4cdc5c2 205 int (*hw_config)(struct perf_event *event);
a072738e 206 int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
169e41eb
JSR
207 unsigned eventsel;
208 unsigned perfctr;
b0f3f28e 209 u64 (*event_map)(int);
169e41eb 210 int max_events;
948b1bb8
RR
211 int num_counters;
212 int num_counters_fixed;
213 int cntval_bits;
214 u64 cntval_mask;
04da8a43 215 int apic;
c619b8ff 216 u64 max_period;
63b14649
PZ
217 struct event_constraint *
218 (*get_event_constraints)(struct cpu_hw_events *cpuc,
219 struct perf_event *event);
220
c91e0f5d
PZ
221 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
222 struct perf_event *event);
63b14649 223 struct event_constraint *event_constraints;
3c44780b 224 void (*quirks)(void);
68aa00ac 225 int perfctr_second_write;
3f6da390 226
b38b24ea 227 int (*cpu_prepare)(int cpu);
3f6da390
PZ
228 void (*cpu_starting)(int cpu);
229 void (*cpu_dying)(int cpu);
230 void (*cpu_dead)(int cpu);
ca037701
PZ
231
232 /*
233 * Intel Arch Perfmon v2+
234 */
8db909a7
PZ
235 u64 intel_ctrl;
236 union perf_capabilities intel_cap;
ca037701
PZ
237
238 /*
239 * Intel DebugStore bits
240 */
241 int bts, pebs;
6809b6ea 242 int bts_active, pebs_active;
ca037701
PZ
243 int pebs_record_size;
244 void (*drain_pebs)(struct pt_regs *regs);
245 struct event_constraint *pebs_constraints;
caff2bef
PZ
246
247 /*
248 * Intel LBR
249 */
250 unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
251 int lbr_nr; /* hardware stack size */
b56a3802
JSR
252};
253
4a06bd85 254static struct x86_pmu x86_pmu __read_mostly;
b56a3802 255
cdd6c482 256static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
b0f3f28e
PZ
257 .enabled = 1,
258};
241771ef 259
07088edb 260static int x86_perf_event_set_period(struct perf_event *event);
b690081d 261
8326f44d 262/*
dfc65094 263 * Generalized hw caching related hw_event table, filled
8326f44d 264 * in on a per model basis. A value of 0 means
dfc65094
IM
265 * 'not supported', -1 means 'hw_event makes no sense on
266 * this CPU', any other value means the raw hw_event
8326f44d
IM
267 * ID.
268 */
269
270#define C(x) PERF_COUNT_HW_CACHE_##x
271
272static u64 __read_mostly hw_cache_event_ids
273 [PERF_COUNT_HW_CACHE_MAX]
274 [PERF_COUNT_HW_CACHE_OP_MAX]
275 [PERF_COUNT_HW_CACHE_RESULT_MAX];
276
ee06094f 277/*
cdd6c482
IM
278 * Propagate event elapsed time into the generic event.
279 * Can only be executed on the CPU where the event is active.
ee06094f
IM
280 * Returns the delta events processed.
281 */
4b7bfd0d 282static u64
cc2ad4ba 283x86_perf_event_update(struct perf_event *event)
ee06094f 284{
cc2ad4ba 285 struct hw_perf_event *hwc = &event->hw;
948b1bb8 286 int shift = 64 - x86_pmu.cntval_bits;
ec3232bd 287 u64 prev_raw_count, new_raw_count;
cc2ad4ba 288 int idx = hwc->idx;
ec3232bd 289 s64 delta;
ee06094f 290
30dd568c
MM
291 if (idx == X86_PMC_IDX_FIXED_BTS)
292 return 0;
293
ee06094f 294 /*
cdd6c482 295 * Careful: an NMI might modify the previous event value.
ee06094f
IM
296 *
297 * Our tactic to handle this is to first atomically read and
298 * exchange a new raw count - then add that new-prev delta
cdd6c482 299 * count to the generic event atomically:
ee06094f
IM
300 */
301again:
e7850595 302 prev_raw_count = local64_read(&hwc->prev_count);
cdd6c482 303 rdmsrl(hwc->event_base + idx, new_raw_count);
ee06094f 304
e7850595 305 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
ee06094f
IM
306 new_raw_count) != prev_raw_count)
307 goto again;
308
309 /*
310 * Now we have the new raw value and have updated the prev
311 * timestamp already. We can now calculate the elapsed delta
cdd6c482 312 * (event-)time and add that to the generic event.
ee06094f
IM
313 *
314 * Careful, not all hw sign-extends above the physical width
ec3232bd 315 * of the count.
ee06094f 316 */
ec3232bd
PZ
317 delta = (new_raw_count << shift) - (prev_raw_count << shift);
318 delta >>= shift;
ee06094f 319
e7850595
PZ
320 local64_add(delta, &event->count);
321 local64_sub(delta, &hwc->period_left);
4b7bfd0d
RR
322
323 return new_raw_count;
ee06094f
IM
324}
325
cdd6c482 326static atomic_t active_events;
4e935e47
PZ
327static DEFINE_MUTEX(pmc_reserve_mutex);
328
b27ea29c
RR
329#ifdef CONFIG_X86_LOCAL_APIC
330
4e935e47
PZ
331static bool reserve_pmc_hardware(void)
332{
333 int i;
334
948b1bb8 335 for (i = 0; i < x86_pmu.num_counters; i++) {
4a06bd85 336 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
4e935e47
PZ
337 goto perfctr_fail;
338 }
339
948b1bb8 340 for (i = 0; i < x86_pmu.num_counters; i++) {
4a06bd85 341 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
4e935e47
PZ
342 goto eventsel_fail;
343 }
344
345 return true;
346
347eventsel_fail:
348 for (i--; i >= 0; i--)
4a06bd85 349 release_evntsel_nmi(x86_pmu.eventsel + i);
4e935e47 350
948b1bb8 351 i = x86_pmu.num_counters;
4e935e47
PZ
352
353perfctr_fail:
354 for (i--; i >= 0; i--)
4a06bd85 355 release_perfctr_nmi(x86_pmu.perfctr + i);
4e935e47 356
4e935e47
PZ
357 return false;
358}
359
360static void release_pmc_hardware(void)
361{
362 int i;
363
948b1bb8 364 for (i = 0; i < x86_pmu.num_counters; i++) {
4a06bd85
RR
365 release_perfctr_nmi(x86_pmu.perfctr + i);
366 release_evntsel_nmi(x86_pmu.eventsel + i);
4e935e47 367 }
4e935e47
PZ
368}
369
b27ea29c
RR
370#else
371
372static bool reserve_pmc_hardware(void) { return true; }
373static void release_pmc_hardware(void) {}
374
375#endif
376
33c6d6a7
DZ
377static bool check_hw_exists(void)
378{
379 u64 val, val_new = 0;
4407204c 380 int i, reg, ret = 0;
33c6d6a7 381
4407204c
PZ
382 /*
383 * Check to see if the BIOS enabled any of the counters, if so
384 * complain and bail.
385 */
386 for (i = 0; i < x86_pmu.num_counters; i++) {
387 reg = x86_pmu.eventsel + i;
388 ret = rdmsrl_safe(reg, &val);
389 if (ret)
390 goto msr_fail;
391 if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
392 goto bios_fail;
393 }
394
395 if (x86_pmu.num_counters_fixed) {
396 reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
397 ret = rdmsrl_safe(reg, &val);
398 if (ret)
399 goto msr_fail;
400 for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
401 if (val & (0x03 << i*4))
402 goto bios_fail;
403 }
404 }
405
406 /*
407 * Now write a value and read it back to see if it matches,
408 * this is needed to detect certain hardware emulators (qemu/kvm)
409 * that don't trap on the MSR access and always return 0s.
410 */
33c6d6a7 411 val = 0xabcdUL;
4407204c 412 ret = checking_wrmsrl(x86_pmu.perfctr, val);
33c6d6a7
DZ
413 ret |= rdmsrl_safe(x86_pmu.perfctr, &val_new);
414 if (ret || val != val_new)
4407204c 415 goto msr_fail;
33c6d6a7
DZ
416
417 return true;
4407204c
PZ
418
419bios_fail:
420 printk(KERN_CONT "Broken BIOS detected, using software events only.\n");
421 printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg, val);
422 return false;
423
424msr_fail:
425 printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n");
426 return false;
33c6d6a7
DZ
427}
428
f80c9e30 429static void reserve_ds_buffers(void);
ca037701 430static void release_ds_buffers(void);
30dd568c 431
cdd6c482 432static void hw_perf_event_destroy(struct perf_event *event)
4e935e47 433{
cdd6c482 434 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
4e935e47 435 release_pmc_hardware();
ca037701 436 release_ds_buffers();
4e935e47
PZ
437 mutex_unlock(&pmc_reserve_mutex);
438 }
439}
440
85cf9dba
RR
441static inline int x86_pmu_initialized(void)
442{
443 return x86_pmu.handle_irq != NULL;
444}
445
8326f44d 446static inline int
cdd6c482 447set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
8326f44d
IM
448{
449 unsigned int cache_type, cache_op, cache_result;
450 u64 config, val;
451
452 config = attr->config;
453
454 cache_type = (config >> 0) & 0xff;
455 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
456 return -EINVAL;
457
458 cache_op = (config >> 8) & 0xff;
459 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
460 return -EINVAL;
461
462 cache_result = (config >> 16) & 0xff;
463 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
464 return -EINVAL;
465
466 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
467
468 if (val == 0)
469 return -ENOENT;
470
471 if (val == -1)
472 return -EINVAL;
473
474 hwc->config |= val;
475
476 return 0;
477}
478
c1726f34
RR
479static int x86_setup_perfctr(struct perf_event *event)
480{
481 struct perf_event_attr *attr = &event->attr;
482 struct hw_perf_event *hwc = &event->hw;
483 u64 config;
484
6c7e550f 485 if (!is_sampling_event(event)) {
c1726f34
RR
486 hwc->sample_period = x86_pmu.max_period;
487 hwc->last_period = hwc->sample_period;
e7850595 488 local64_set(&hwc->period_left, hwc->sample_period);
c1726f34
RR
489 } else {
490 /*
491 * If we have a PMU initialized but no APIC
492 * interrupts, we cannot sample hardware
493 * events (user-space has to fall back and
494 * sample via a hrtimer based software event):
495 */
496 if (!x86_pmu.apic)
497 return -EOPNOTSUPP;
498 }
499
500 if (attr->type == PERF_TYPE_RAW)
501 return 0;
502
503 if (attr->type == PERF_TYPE_HW_CACHE)
504 return set_ext_hw_attr(hwc, attr);
505
506 if (attr->config >= x86_pmu.max_events)
507 return -EINVAL;
508
509 /*
510 * The generic map:
511 */
512 config = x86_pmu.event_map(attr->config);
513
514 if (config == 0)
515 return -ENOENT;
516
517 if (config == -1LL)
518 return -EINVAL;
519
520 /*
521 * Branch tracing:
522 */
523 if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
524 (hwc->sample_period == 1)) {
525 /* BTS is not supported by this architecture. */
6809b6ea 526 if (!x86_pmu.bts_active)
c1726f34
RR
527 return -EOPNOTSUPP;
528
529 /* BTS is currently only allowed for user-mode. */
530 if (!attr->exclude_kernel)
531 return -EOPNOTSUPP;
532 }
533
534 hwc->config |= config;
535
536 return 0;
537}
4261e0e0 538
b4cdc5c2 539static int x86_pmu_hw_config(struct perf_event *event)
a072738e 540{
ab608344
PZ
541 if (event->attr.precise_ip) {
542 int precise = 0;
543
544 /* Support for constant skid */
6809b6ea 545 if (x86_pmu.pebs_active) {
ab608344
PZ
546 precise++;
547
5553be26
PZ
548 /* Support for IP fixup */
549 if (x86_pmu.lbr_nr)
550 precise++;
551 }
ab608344
PZ
552
553 if (event->attr.precise_ip > precise)
554 return -EOPNOTSUPP;
555 }
556
a072738e
CG
557 /*
558 * Generate PMC IRQs:
559 * (keep 'enabled' bit clear for now)
560 */
b4cdc5c2 561 event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
a072738e
CG
562
563 /*
564 * Count user and OS events unless requested not to
565 */
b4cdc5c2
PZ
566 if (!event->attr.exclude_user)
567 event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
568 if (!event->attr.exclude_kernel)
569 event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
a072738e 570
b4cdc5c2
PZ
571 if (event->attr.type == PERF_TYPE_RAW)
572 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
a072738e 573
9d0fcba6 574 return x86_setup_perfctr(event);
a098f448
RR
575}
576
241771ef 577/*
0d48696f 578 * Setup the hardware configuration for a given attr_type
241771ef 579 */
b0a873eb 580static int __x86_pmu_event_init(struct perf_event *event)
241771ef 581{
4e935e47 582 int err;
241771ef 583
85cf9dba
RR
584 if (!x86_pmu_initialized())
585 return -ENODEV;
241771ef 586
4e935e47 587 err = 0;
cdd6c482 588 if (!atomic_inc_not_zero(&active_events)) {
4e935e47 589 mutex_lock(&pmc_reserve_mutex);
cdd6c482 590 if (atomic_read(&active_events) == 0) {
30dd568c
MM
591 if (!reserve_pmc_hardware())
592 err = -EBUSY;
f80c9e30
PZ
593 else
594 reserve_ds_buffers();
30dd568c
MM
595 }
596 if (!err)
cdd6c482 597 atomic_inc(&active_events);
4e935e47
PZ
598 mutex_unlock(&pmc_reserve_mutex);
599 }
600 if (err)
601 return err;
602
cdd6c482 603 event->destroy = hw_perf_event_destroy;
a1792cda 604
4261e0e0
RR
605 event->hw.idx = -1;
606 event->hw.last_cpu = -1;
607 event->hw.last_tag = ~0ULL;
b690081d 608
9d0fcba6 609 return x86_pmu.hw_config(event);
4261e0e0
RR
610}
611
8c48e444 612static void x86_pmu_disable_all(void)
f87ad35d 613{
cdd6c482 614 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
9e35ad38
PZ
615 int idx;
616
948b1bb8 617 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
b0f3f28e
PZ
618 u64 val;
619
43f6201a 620 if (!test_bit(idx, cpuc->active_mask))
4295ee62 621 continue;
8c48e444 622 rdmsrl(x86_pmu.eventsel + idx, val);
bb1165d6 623 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
4295ee62 624 continue;
bb1165d6 625 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
8c48e444 626 wrmsrl(x86_pmu.eventsel + idx, val);
f87ad35d 627 }
f87ad35d
JSR
628}
629
a4eaf7f1 630static void x86_pmu_disable(struct pmu *pmu)
b56a3802 631{
1da53e02
SE
632 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
633
85cf9dba 634 if (!x86_pmu_initialized())
9e35ad38 635 return;
1da53e02 636
1a6e21f7
PZ
637 if (!cpuc->enabled)
638 return;
639
640 cpuc->n_added = 0;
641 cpuc->enabled = 0;
642 barrier();
1da53e02
SE
643
644 x86_pmu.disable_all();
b56a3802 645}
241771ef 646
11164cd4 647static void x86_pmu_enable_all(int added)
f87ad35d 648{
cdd6c482 649 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
f87ad35d
JSR
650 int idx;
651
948b1bb8 652 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
cdd6c482 653 struct perf_event *event = cpuc->events[idx];
4295ee62 654 u64 val;
b0f3f28e 655
43f6201a 656 if (!test_bit(idx, cpuc->active_mask))
4295ee62 657 continue;
984b838c 658
cdd6c482 659 val = event->hw.config;
bb1165d6 660 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
8c48e444 661 wrmsrl(x86_pmu.eventsel + idx, val);
f87ad35d
JSR
662 }
663}
664
51b0fe39 665static struct pmu pmu;
1da53e02
SE
666
667static inline int is_x86_event(struct perf_event *event)
668{
669 return event->pmu == &pmu;
670}
671
672static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
673{
63b14649 674 struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
1da53e02 675 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
c933c1a6 676 int i, j, w, wmax, num = 0;
1da53e02
SE
677 struct hw_perf_event *hwc;
678
679 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
680
681 for (i = 0; i < n; i++) {
b622d644
PZ
682 c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
683 constraints[i] = c;
1da53e02
SE
684 }
685
8113070d
SE
686 /*
687 * fastpath, try to reuse previous register
688 */
c933c1a6 689 for (i = 0; i < n; i++) {
8113070d 690 hwc = &cpuc->event_list[i]->hw;
81269a08 691 c = constraints[i];
8113070d
SE
692
693 /* never assigned */
694 if (hwc->idx == -1)
695 break;
696
697 /* constraint still honored */
63b14649 698 if (!test_bit(hwc->idx, c->idxmsk))
8113070d
SE
699 break;
700
701 /* not already used */
702 if (test_bit(hwc->idx, used_mask))
703 break;
704
34538ee7 705 __set_bit(hwc->idx, used_mask);
8113070d
SE
706 if (assign)
707 assign[i] = hwc->idx;
708 }
c933c1a6 709 if (i == n)
8113070d
SE
710 goto done;
711
712 /*
713 * begin slow path
714 */
715
716 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
717
1da53e02
SE
718 /*
719 * weight = number of possible counters
720 *
721 * 1 = most constrained, only works on one counter
722 * wmax = least constrained, works on any counter
723 *
724 * assign events to counters starting with most
725 * constrained events.
726 */
948b1bb8 727 wmax = x86_pmu.num_counters;
1da53e02
SE
728
729 /*
730 * when fixed event counters are present,
731 * wmax is incremented by 1 to account
732 * for one more choice
733 */
948b1bb8 734 if (x86_pmu.num_counters_fixed)
1da53e02
SE
735 wmax++;
736
8113070d 737 for (w = 1, num = n; num && w <= wmax; w++) {
1da53e02 738 /* for each event */
8113070d 739 for (i = 0; num && i < n; i++) {
81269a08 740 c = constraints[i];
1da53e02
SE
741 hwc = &cpuc->event_list[i]->hw;
742
272d30be 743 if (c->weight != w)
1da53e02
SE
744 continue;
745
984b3f57 746 for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
1da53e02
SE
747 if (!test_bit(j, used_mask))
748 break;
749 }
750
751 if (j == X86_PMC_IDX_MAX)
752 break;
1da53e02 753
34538ee7 754 __set_bit(j, used_mask);
8113070d 755
1da53e02
SE
756 if (assign)
757 assign[i] = j;
758 num--;
759 }
760 }
8113070d 761done:
1da53e02
SE
762 /*
763 * scheduling failed or is just a simulation,
764 * free resources if necessary
765 */
766 if (!assign || num) {
767 for (i = 0; i < n; i++) {
768 if (x86_pmu.put_event_constraints)
769 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
770 }
771 }
772 return num ? -ENOSPC : 0;
773}
774
775/*
776 * dogrp: true if must collect siblings events (group)
777 * returns total number of events and error code
778 */
779static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
780{
781 struct perf_event *event;
782 int n, max_count;
783
948b1bb8 784 max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
1da53e02
SE
785
786 /* current number of events already accepted */
787 n = cpuc->n_events;
788
789 if (is_x86_event(leader)) {
790 if (n >= max_count)
791 return -ENOSPC;
792 cpuc->event_list[n] = leader;
793 n++;
794 }
795 if (!dogrp)
796 return n;
797
798 list_for_each_entry(event, &leader->sibling_list, group_entry) {
799 if (!is_x86_event(event) ||
8113070d 800 event->state <= PERF_EVENT_STATE_OFF)
1da53e02
SE
801 continue;
802
803 if (n >= max_count)
804 return -ENOSPC;
805
806 cpuc->event_list[n] = event;
807 n++;
808 }
809 return n;
810}
811
1da53e02 812static inline void x86_assign_hw_event(struct perf_event *event,
447a194b 813 struct cpu_hw_events *cpuc, int i)
1da53e02 814{
447a194b
SE
815 struct hw_perf_event *hwc = &event->hw;
816
817 hwc->idx = cpuc->assign[i];
818 hwc->last_cpu = smp_processor_id();
819 hwc->last_tag = ++cpuc->tags[i];
1da53e02
SE
820
821 if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
822 hwc->config_base = 0;
823 hwc->event_base = 0;
824 } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
825 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
826 /*
827 * We set it so that event_base + idx in wrmsr/rdmsr maps to
828 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
829 */
830 hwc->event_base =
831 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
832 } else {
833 hwc->config_base = x86_pmu.eventsel;
834 hwc->event_base = x86_pmu.perfctr;
835 }
836}
837
447a194b
SE
838static inline int match_prev_assignment(struct hw_perf_event *hwc,
839 struct cpu_hw_events *cpuc,
840 int i)
841{
842 return hwc->idx == cpuc->assign[i] &&
843 hwc->last_cpu == smp_processor_id() &&
844 hwc->last_tag == cpuc->tags[i];
845}
846
a4eaf7f1
PZ
847static void x86_pmu_start(struct perf_event *event, int flags);
848static void x86_pmu_stop(struct perf_event *event, int flags);
2e841873 849
a4eaf7f1 850static void x86_pmu_enable(struct pmu *pmu)
ee06094f 851{
1da53e02
SE
852 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
853 struct perf_event *event;
854 struct hw_perf_event *hwc;
11164cd4 855 int i, added = cpuc->n_added;
1da53e02 856
85cf9dba 857 if (!x86_pmu_initialized())
2b9ff0db 858 return;
1a6e21f7
PZ
859
860 if (cpuc->enabled)
861 return;
862
1da53e02 863 if (cpuc->n_added) {
19925ce7 864 int n_running = cpuc->n_events - cpuc->n_added;
1da53e02
SE
865 /*
866 * apply assignment obtained either from
867 * hw_perf_group_sched_in() or x86_pmu_enable()
868 *
869 * step1: save events moving to new counters
870 * step2: reprogram moved events into new counters
871 */
19925ce7 872 for (i = 0; i < n_running; i++) {
1da53e02
SE
873 event = cpuc->event_list[i];
874 hwc = &event->hw;
875
447a194b
SE
876 /*
877 * we can avoid reprogramming counter if:
878 * - assigned same counter as last time
879 * - running on same CPU as last time
880 * - no other event has used the counter since
881 */
882 if (hwc->idx == -1 ||
883 match_prev_assignment(hwc, cpuc, i))
1da53e02
SE
884 continue;
885
a4eaf7f1
PZ
886 /*
887 * Ensure we don't accidentally enable a stopped
888 * counter simply because we rescheduled.
889 */
890 if (hwc->state & PERF_HES_STOPPED)
891 hwc->state |= PERF_HES_ARCH;
892
893 x86_pmu_stop(event, PERF_EF_UPDATE);
1da53e02
SE
894 }
895
896 for (i = 0; i < cpuc->n_events; i++) {
1da53e02
SE
897 event = cpuc->event_list[i];
898 hwc = &event->hw;
899
45e16a68 900 if (!match_prev_assignment(hwc, cpuc, i))
447a194b 901 x86_assign_hw_event(event, cpuc, i);
45e16a68
PZ
902 else if (i < n_running)
903 continue;
1da53e02 904
a4eaf7f1
PZ
905 if (hwc->state & PERF_HES_ARCH)
906 continue;
907
908 x86_pmu_start(event, PERF_EF_RELOAD);
1da53e02
SE
909 }
910 cpuc->n_added = 0;
911 perf_events_lapic_init();
912 }
1a6e21f7
PZ
913
914 cpuc->enabled = 1;
915 barrier();
916
11164cd4 917 x86_pmu.enable_all(added);
ee06094f 918}
ee06094f 919
31fa58af
RR
920static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
921 u64 enable_mask)
b0f3f28e 922{
31fa58af 923 wrmsrl(hwc->config_base + hwc->idx, hwc->config | enable_mask);
b0f3f28e
PZ
924}
925
aff3d91a 926static inline void x86_pmu_disable_event(struct perf_event *event)
b0f3f28e 927{
aff3d91a 928 struct hw_perf_event *hwc = &event->hw;
7645a24c
PZ
929
930 wrmsrl(hwc->config_base + hwc->idx, hwc->config);
b0f3f28e
PZ
931}
932
245b2e70 933static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
241771ef 934
ee06094f
IM
935/*
936 * Set the next IRQ period, based on the hwc->period_left value.
cdd6c482 937 * To be called with the event disabled in hw:
ee06094f 938 */
e4abb5d4 939static int
07088edb 940x86_perf_event_set_period(struct perf_event *event)
241771ef 941{
07088edb 942 struct hw_perf_event *hwc = &event->hw;
e7850595 943 s64 left = local64_read(&hwc->period_left);
e4abb5d4 944 s64 period = hwc->sample_period;
7645a24c 945 int ret = 0, idx = hwc->idx;
ee06094f 946
30dd568c
MM
947 if (idx == X86_PMC_IDX_FIXED_BTS)
948 return 0;
949
ee06094f 950 /*
af901ca1 951 * If we are way outside a reasonable range then just skip forward:
ee06094f
IM
952 */
953 if (unlikely(left <= -period)) {
954 left = period;
e7850595 955 local64_set(&hwc->period_left, left);
9e350de3 956 hwc->last_period = period;
e4abb5d4 957 ret = 1;
ee06094f
IM
958 }
959
960 if (unlikely(left <= 0)) {
961 left += period;
e7850595 962 local64_set(&hwc->period_left, left);
9e350de3 963 hwc->last_period = period;
e4abb5d4 964 ret = 1;
ee06094f 965 }
1c80f4b5 966 /*
dfc65094 967 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
1c80f4b5
IM
968 */
969 if (unlikely(left < 2))
970 left = 2;
241771ef 971
e4abb5d4
PZ
972 if (left > x86_pmu.max_period)
973 left = x86_pmu.max_period;
974
245b2e70 975 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
ee06094f
IM
976
977 /*
cdd6c482 978 * The hw event starts counting from this event offset,
ee06094f
IM
979 * mark it to be able to extra future deltas:
980 */
e7850595 981 local64_set(&hwc->prev_count, (u64)-left);
ee06094f 982
68aa00ac
CG
983 wrmsrl(hwc->event_base + idx, (u64)(-left) & x86_pmu.cntval_mask);
984
985 /*
986 * Due to erratum on certan cpu we need
987 * a second write to be sure the register
988 * is updated properly
989 */
990 if (x86_pmu.perfctr_second_write) {
991 wrmsrl(hwc->event_base + idx,
948b1bb8 992 (u64)(-left) & x86_pmu.cntval_mask);
68aa00ac 993 }
e4abb5d4 994
cdd6c482 995 perf_event_update_userpage(event);
194002b2 996
e4abb5d4 997 return ret;
2f18d1e8
IM
998}
999
aff3d91a 1000static void x86_pmu_enable_event(struct perf_event *event)
7c90cc45 1001{
0a3aee0d 1002 if (__this_cpu_read(cpu_hw_events.enabled))
31fa58af
RR
1003 __x86_pmu_enable_event(&event->hw,
1004 ARCH_PERFMON_EVENTSEL_ENABLE);
241771ef
IM
1005}
1006
b690081d 1007/*
a4eaf7f1 1008 * Add a single event to the PMU.
1da53e02
SE
1009 *
1010 * The event is added to the group of enabled events
1011 * but only if it can be scehduled with existing events.
fe9081cc 1012 */
a4eaf7f1 1013static int x86_pmu_add(struct perf_event *event, int flags)
fe9081cc
PZ
1014{
1015 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1da53e02
SE
1016 struct hw_perf_event *hwc;
1017 int assign[X86_PMC_IDX_MAX];
1018 int n, n0, ret;
fe9081cc 1019
1da53e02 1020 hwc = &event->hw;
fe9081cc 1021
33696fc0 1022 perf_pmu_disable(event->pmu);
1da53e02 1023 n0 = cpuc->n_events;
24cd7f54
PZ
1024 ret = n = collect_events(cpuc, event, false);
1025 if (ret < 0)
1026 goto out;
53b441a5 1027
a4eaf7f1
PZ
1028 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
1029 if (!(flags & PERF_EF_START))
1030 hwc->state |= PERF_HES_ARCH;
1031
4d1c52b0
LM
1032 /*
1033 * If group events scheduling transaction was started,
1034 * skip the schedulability test here, it will be peformed
a4eaf7f1 1035 * at commit time (->commit_txn) as a whole
4d1c52b0 1036 */
8d2cacbb 1037 if (cpuc->group_flag & PERF_EVENT_TXN)
24cd7f54 1038 goto done_collect;
4d1c52b0 1039
a072738e 1040 ret = x86_pmu.schedule_events(cpuc, n, assign);
1da53e02 1041 if (ret)
24cd7f54 1042 goto out;
1da53e02
SE
1043 /*
1044 * copy new assignment, now we know it is possible
1045 * will be used by hw_perf_enable()
1046 */
1047 memcpy(cpuc->assign, assign, n*sizeof(int));
7e2ae347 1048
24cd7f54 1049done_collect:
1da53e02 1050 cpuc->n_events = n;
356e1f2e 1051 cpuc->n_added += n - n0;
90151c35 1052 cpuc->n_txn += n - n0;
95cdd2e7 1053
24cd7f54
PZ
1054 ret = 0;
1055out:
33696fc0 1056 perf_pmu_enable(event->pmu);
24cd7f54 1057 return ret;
241771ef
IM
1058}
1059
a4eaf7f1 1060static void x86_pmu_start(struct perf_event *event, int flags)
d76a0812 1061{
c08053e6
PZ
1062 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1063 int idx = event->hw.idx;
1064
a4eaf7f1
PZ
1065 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
1066 return;
1067
1068 if (WARN_ON_ONCE(idx == -1))
1069 return;
1070
1071 if (flags & PERF_EF_RELOAD) {
1072 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
1073 x86_perf_event_set_period(event);
1074 }
1075
1076 event->hw.state = 0;
d76a0812 1077
c08053e6
PZ
1078 cpuc->events[idx] = event;
1079 __set_bit(idx, cpuc->active_mask);
63e6be6d 1080 __set_bit(idx, cpuc->running);
aff3d91a 1081 x86_pmu.enable(event);
c08053e6 1082 perf_event_update_userpage(event);
a78ac325
PZ
1083}
1084
cdd6c482 1085void perf_event_print_debug(void)
241771ef 1086{
2f18d1e8 1087 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
ca037701 1088 u64 pebs;
cdd6c482 1089 struct cpu_hw_events *cpuc;
5bb9efe3 1090 unsigned long flags;
1e125676
IM
1091 int cpu, idx;
1092
948b1bb8 1093 if (!x86_pmu.num_counters)
1e125676 1094 return;
241771ef 1095
5bb9efe3 1096 local_irq_save(flags);
241771ef
IM
1097
1098 cpu = smp_processor_id();
cdd6c482 1099 cpuc = &per_cpu(cpu_hw_events, cpu);
241771ef 1100
faa28ae0 1101 if (x86_pmu.version >= 2) {
a1ef58f4
JSR
1102 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1103 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1104 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1105 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
ca037701 1106 rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
a1ef58f4
JSR
1107
1108 pr_info("\n");
1109 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
1110 pr_info("CPU#%d: status: %016llx\n", cpu, status);
1111 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
1112 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
ca037701 1113 pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
f87ad35d 1114 }
7645a24c 1115 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
241771ef 1116
948b1bb8 1117 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
4a06bd85
RR
1118 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1119 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
241771ef 1120
245b2e70 1121 prev_left = per_cpu(pmc_prev_left[idx], cpu);
241771ef 1122
a1ef58f4 1123 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
241771ef 1124 cpu, idx, pmc_ctrl);
a1ef58f4 1125 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
241771ef 1126 cpu, idx, pmc_count);
a1ef58f4 1127 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
ee06094f 1128 cpu, idx, prev_left);
241771ef 1129 }
948b1bb8 1130 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
2f18d1e8
IM
1131 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1132
a1ef58f4 1133 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
2f18d1e8
IM
1134 cpu, idx, pmc_count);
1135 }
5bb9efe3 1136 local_irq_restore(flags);
241771ef
IM
1137}
1138
a4eaf7f1 1139static void x86_pmu_stop(struct perf_event *event, int flags)
241771ef 1140{
d76a0812 1141 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
cdd6c482 1142 struct hw_perf_event *hwc = &event->hw;
241771ef 1143
a4eaf7f1
PZ
1144 if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
1145 x86_pmu.disable(event);
1146 cpuc->events[hwc->idx] = NULL;
1147 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
1148 hwc->state |= PERF_HES_STOPPED;
1149 }
30dd568c 1150
a4eaf7f1
PZ
1151 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
1152 /*
1153 * Drain the remaining delta count out of a event
1154 * that we are disabling:
1155 */
1156 x86_perf_event_update(event);
1157 hwc->state |= PERF_HES_UPTODATE;
1158 }
2e841873
PZ
1159}
1160
a4eaf7f1 1161static void x86_pmu_del(struct perf_event *event, int flags)
2e841873
PZ
1162{
1163 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1164 int i;
1165
90151c35
SE
1166 /*
1167 * If we're called during a txn, we don't need to do anything.
1168 * The events never got scheduled and ->cancel_txn will truncate
1169 * the event_list.
1170 */
8d2cacbb 1171 if (cpuc->group_flag & PERF_EVENT_TXN)
90151c35
SE
1172 return;
1173
a4eaf7f1 1174 x86_pmu_stop(event, PERF_EF_UPDATE);
194002b2 1175
1da53e02
SE
1176 for (i = 0; i < cpuc->n_events; i++) {
1177 if (event == cpuc->event_list[i]) {
1178
1179 if (x86_pmu.put_event_constraints)
1180 x86_pmu.put_event_constraints(cpuc, event);
1181
1182 while (++i < cpuc->n_events)
1183 cpuc->event_list[i-1] = cpuc->event_list[i];
1184
1185 --cpuc->n_events;
6c9687ab 1186 break;
1da53e02
SE
1187 }
1188 }
cdd6c482 1189 perf_event_update_userpage(event);
241771ef
IM
1190}
1191
8c48e444 1192static int x86_pmu_handle_irq(struct pt_regs *regs)
a29aa8a7 1193{
df1a132b 1194 struct perf_sample_data data;
cdd6c482
IM
1195 struct cpu_hw_events *cpuc;
1196 struct perf_event *event;
11d1578f 1197 int idx, handled = 0;
9029a5e3
IM
1198 u64 val;
1199
dc1d628a 1200 perf_sample_data_init(&data, 0);
df1a132b 1201
cdd6c482 1202 cpuc = &__get_cpu_var(cpu_hw_events);
962bf7a6 1203
948b1bb8 1204 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
63e6be6d
RR
1205 if (!test_bit(idx, cpuc->active_mask)) {
1206 /*
1207 * Though we deactivated the counter some cpus
1208 * might still deliver spurious interrupts still
1209 * in flight. Catch them:
1210 */
1211 if (__test_and_clear_bit(idx, cpuc->running))
1212 handled++;
a29aa8a7 1213 continue;
63e6be6d 1214 }
962bf7a6 1215
cdd6c482 1216 event = cpuc->events[idx];
a4016a79 1217
cc2ad4ba 1218 val = x86_perf_event_update(event);
948b1bb8 1219 if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
48e22d56 1220 continue;
962bf7a6 1221
9e350de3 1222 /*
cdd6c482 1223 * event overflow
9e350de3 1224 */
4177c42a 1225 handled++;
cdd6c482 1226 data.period = event->hw.last_period;
9e350de3 1227
07088edb 1228 if (!x86_perf_event_set_period(event))
e4abb5d4
PZ
1229 continue;
1230
cdd6c482 1231 if (perf_event_overflow(event, 1, &data, regs))
a4eaf7f1 1232 x86_pmu_stop(event, 0);
a29aa8a7 1233 }
962bf7a6 1234
9e350de3
PZ
1235 if (handled)
1236 inc_irq_stat(apic_perf_irqs);
1237
a29aa8a7
RR
1238 return handled;
1239}
39d81eab 1240
cdd6c482 1241void perf_events_lapic_init(void)
241771ef 1242{
04da8a43 1243 if (!x86_pmu.apic || !x86_pmu_initialized())
241771ef 1244 return;
85cf9dba 1245
241771ef 1246 /*
c323d95f 1247 * Always use NMI for PMU
241771ef 1248 */
c323d95f 1249 apic_write(APIC_LVTPC, APIC_DM_NMI);
241771ef
IM
1250}
1251
4177c42a
RR
1252struct pmu_nmi_state {
1253 unsigned int marked;
1254 int handled;
1255};
1256
1257static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
1258
241771ef 1259static int __kprobes
cdd6c482 1260perf_event_nmi_handler(struct notifier_block *self,
241771ef
IM
1261 unsigned long cmd, void *__args)
1262{
1263 struct die_args *args = __args;
4177c42a
RR
1264 unsigned int this_nmi;
1265 int handled;
b0f3f28e 1266
cdd6c482 1267 if (!atomic_read(&active_events))
63a809a2
PZ
1268 return NOTIFY_DONE;
1269
b0f3f28e
PZ
1270 switch (cmd) {
1271 case DIE_NMI:
b0f3f28e 1272 break;
4177c42a
RR
1273 case DIE_NMIUNKNOWN:
1274 this_nmi = percpu_read(irq_stat.__nmi_count);
0a3aee0d 1275 if (this_nmi != __this_cpu_read(pmu_nmi.marked))
4177c42a
RR
1276 /* let the kernel handle the unknown nmi */
1277 return NOTIFY_DONE;
1278 /*
1279 * This one is a PMU back-to-back nmi. Two events
1280 * trigger 'simultaneously' raising two back-to-back
1281 * NMIs. If the first NMI handles both, the latter
1282 * will be empty and daze the CPU. So, we drop it to
1283 * avoid false-positive 'unknown nmi' messages.
1284 */
1285 return NOTIFY_STOP;
b0f3f28e 1286 default:
241771ef 1287 return NOTIFY_DONE;
b0f3f28e 1288 }
241771ef 1289
241771ef 1290 apic_write(APIC_LVTPC, APIC_DM_NMI);
4177c42a
RR
1291
1292 handled = x86_pmu.handle_irq(args->regs);
1293 if (!handled)
1294 return NOTIFY_DONE;
1295
1296 this_nmi = percpu_read(irq_stat.__nmi_count);
1297 if ((handled > 1) ||
1298 /* the next nmi could be a back-to-back nmi */
0a3aee0d
TH
1299 ((__this_cpu_read(pmu_nmi.marked) == this_nmi) &&
1300 (__this_cpu_read(pmu_nmi.handled) > 1))) {
4177c42a
RR
1301 /*
1302 * We could have two subsequent back-to-back nmis: The
1303 * first handles more than one counter, the 2nd
1304 * handles only one counter and the 3rd handles no
1305 * counter.
1306 *
1307 * This is the 2nd nmi because the previous was
1308 * handling more than one counter. We will mark the
1309 * next (3rd) and then drop it if unhandled.
1310 */
0a3aee0d
TH
1311 __this_cpu_write(pmu_nmi.marked, this_nmi + 1);
1312 __this_cpu_write(pmu_nmi.handled, handled);
4177c42a 1313 }
241771ef 1314
a4016a79 1315 return NOTIFY_STOP;
241771ef
IM
1316}
1317
f22f54f4
PZ
1318static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1319 .notifier_call = perf_event_nmi_handler,
1320 .next = NULL,
166d7514 1321 .priority = NMI_LOCAL_LOW_PRIOR,
f22f54f4
PZ
1322};
1323
63b14649 1324static struct event_constraint unconstrained;
38331f62 1325static struct event_constraint emptyconstraint;
63b14649 1326
63b14649 1327static struct event_constraint *
f22f54f4 1328x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1da53e02 1329{
63b14649 1330 struct event_constraint *c;
1da53e02 1331
1da53e02
SE
1332 if (x86_pmu.event_constraints) {
1333 for_each_event_constraint(c, x86_pmu.event_constraints) {
63b14649
PZ
1334 if ((event->hw.config & c->cmask) == c->code)
1335 return c;
1da53e02
SE
1336 }
1337 }
63b14649
PZ
1338
1339 return &unconstrained;
1da53e02
SE
1340}
1341
f22f54f4
PZ
1342#include "perf_event_amd.c"
1343#include "perf_event_p6.c"
a072738e 1344#include "perf_event_p4.c"
caff2bef 1345#include "perf_event_intel_lbr.c"
ca037701 1346#include "perf_event_intel_ds.c"
f22f54f4 1347#include "perf_event_intel.c"
f87ad35d 1348
3f6da390
PZ
1349static int __cpuinit
1350x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1351{
1352 unsigned int cpu = (long)hcpu;
b38b24ea 1353 int ret = NOTIFY_OK;
3f6da390
PZ
1354
1355 switch (action & ~CPU_TASKS_FROZEN) {
1356 case CPU_UP_PREPARE:
1357 if (x86_pmu.cpu_prepare)
b38b24ea 1358 ret = x86_pmu.cpu_prepare(cpu);
3f6da390
PZ
1359 break;
1360
1361 case CPU_STARTING:
1362 if (x86_pmu.cpu_starting)
1363 x86_pmu.cpu_starting(cpu);
1364 break;
1365
1366 case CPU_DYING:
1367 if (x86_pmu.cpu_dying)
1368 x86_pmu.cpu_dying(cpu);
1369 break;
1370
b38b24ea 1371 case CPU_UP_CANCELED:
3f6da390
PZ
1372 case CPU_DEAD:
1373 if (x86_pmu.cpu_dead)
1374 x86_pmu.cpu_dead(cpu);
1375 break;
1376
1377 default:
1378 break;
1379 }
1380
b38b24ea 1381 return ret;
3f6da390
PZ
1382}
1383
12558038
CG
1384static void __init pmu_check_apic(void)
1385{
1386 if (cpu_has_apic)
1387 return;
1388
1389 x86_pmu.apic = 0;
1390 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
1391 pr_info("no hardware sampling interrupt available.\n");
1392}
1393
004417a6 1394int __init init_hw_perf_events(void)
b56a3802 1395{
b622d644 1396 struct event_constraint *c;
72eae04d
RR
1397 int err;
1398
cdd6c482 1399 pr_info("Performance Events: ");
1123e3ad 1400
b56a3802
JSR
1401 switch (boot_cpu_data.x86_vendor) {
1402 case X86_VENDOR_INTEL:
72eae04d 1403 err = intel_pmu_init();
b56a3802 1404 break;
f87ad35d 1405 case X86_VENDOR_AMD:
72eae04d 1406 err = amd_pmu_init();
f87ad35d 1407 break;
4138960a 1408 default:
004417a6 1409 return 0;
b56a3802 1410 }
1123e3ad 1411 if (err != 0) {
cdd6c482 1412 pr_cont("no PMU driver, software events only.\n");
004417a6 1413 return 0;
1123e3ad 1414 }
b56a3802 1415
12558038
CG
1416 pmu_check_apic();
1417
33c6d6a7 1418 /* sanity check that the hardware exists or is emulated */
4407204c 1419 if (!check_hw_exists())
004417a6 1420 return 0;
33c6d6a7 1421
1123e3ad 1422 pr_cont("%s PMU driver.\n", x86_pmu.name);
faa28ae0 1423
3c44780b
PZ
1424 if (x86_pmu.quirks)
1425 x86_pmu.quirks();
1426
948b1bb8 1427 if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
cdd6c482 1428 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
948b1bb8
RR
1429 x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
1430 x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
241771ef 1431 }
948b1bb8 1432 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
241771ef 1433
948b1bb8 1434 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
cdd6c482 1435 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
948b1bb8
RR
1436 x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
1437 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
703e937c 1438 }
862a1a5f 1439
d6dc0b4e 1440 x86_pmu.intel_ctrl |=
948b1bb8 1441 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
241771ef 1442
cdd6c482
IM
1443 perf_events_lapic_init();
1444 register_die_notifier(&perf_event_nmi_notifier);
1123e3ad 1445
63b14649 1446 unconstrained = (struct event_constraint)
948b1bb8
RR
1447 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
1448 0, x86_pmu.num_counters);
63b14649 1449
b622d644
PZ
1450 if (x86_pmu.event_constraints) {
1451 for_each_event_constraint(c, x86_pmu.event_constraints) {
a098f448 1452 if (c->cmask != X86_RAW_EVENT_MASK)
b622d644
PZ
1453 continue;
1454
948b1bb8
RR
1455 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
1456 c->weight += x86_pmu.num_counters;
b622d644
PZ
1457 }
1458 }
1459
57c0c15b 1460 pr_info("... version: %d\n", x86_pmu.version);
948b1bb8
RR
1461 pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
1462 pr_info("... generic registers: %d\n", x86_pmu.num_counters);
1463 pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
57c0c15b 1464 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
948b1bb8 1465 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
d6dc0b4e 1466 pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
3f6da390 1467
2e80a82a 1468 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
3f6da390 1469 perf_cpu_notifier(x86_pmu_notifier);
004417a6
PZ
1470
1471 return 0;
241771ef 1472}
004417a6 1473early_initcall(init_hw_perf_events);
621a01ea 1474
cdd6c482 1475static inline void x86_pmu_read(struct perf_event *event)
ee06094f 1476{
cc2ad4ba 1477 x86_perf_event_update(event);
ee06094f
IM
1478}
1479
4d1c52b0
LM
1480/*
1481 * Start group events scheduling transaction
1482 * Set the flag to make pmu::enable() not perform the
1483 * schedulability test, it will be performed at commit time
1484 */
51b0fe39 1485static void x86_pmu_start_txn(struct pmu *pmu)
4d1c52b0 1486{
33696fc0 1487 perf_pmu_disable(pmu);
0a3aee0d
TH
1488 __this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN);
1489 __this_cpu_write(cpu_hw_events.n_txn, 0);
4d1c52b0
LM
1490}
1491
1492/*
1493 * Stop group events scheduling transaction
1494 * Clear the flag and pmu::enable() will perform the
1495 * schedulability test.
1496 */
51b0fe39 1497static void x86_pmu_cancel_txn(struct pmu *pmu)
4d1c52b0 1498{
0a3aee0d 1499 __this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN);
90151c35
SE
1500 /*
1501 * Truncate the collected events.
1502 */
0a3aee0d
TH
1503 __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
1504 __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
33696fc0 1505 perf_pmu_enable(pmu);
4d1c52b0
LM
1506}
1507
1508/*
1509 * Commit group events scheduling transaction
1510 * Perform the group schedulability test as a whole
1511 * Return 0 if success
1512 */
51b0fe39 1513static int x86_pmu_commit_txn(struct pmu *pmu)
4d1c52b0
LM
1514{
1515 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1516 int assign[X86_PMC_IDX_MAX];
1517 int n, ret;
1518
1519 n = cpuc->n_events;
1520
1521 if (!x86_pmu_initialized())
1522 return -EAGAIN;
1523
1524 ret = x86_pmu.schedule_events(cpuc, n, assign);
1525 if (ret)
1526 return ret;
1527
1528 /*
1529 * copy new assignment, now we know it is possible
1530 * will be used by hw_perf_enable()
1531 */
1532 memcpy(cpuc->assign, assign, n*sizeof(int));
1533
8d2cacbb 1534 cpuc->group_flag &= ~PERF_EVENT_TXN;
33696fc0 1535 perf_pmu_enable(pmu);
4d1c52b0
LM
1536 return 0;
1537}
1538
ca037701
PZ
1539/*
1540 * validate that we can schedule this event
1541 */
1542static int validate_event(struct perf_event *event)
1543{
1544 struct cpu_hw_events *fake_cpuc;
1545 struct event_constraint *c;
1546 int ret = 0;
1547
1548 fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
1549 if (!fake_cpuc)
1550 return -ENOMEM;
1551
1552 c = x86_pmu.get_event_constraints(fake_cpuc, event);
1553
1554 if (!c || !c->weight)
1555 ret = -ENOSPC;
1556
1557 if (x86_pmu.put_event_constraints)
1558 x86_pmu.put_event_constraints(fake_cpuc, event);
1559
1560 kfree(fake_cpuc);
1561
1562 return ret;
1563}
1564
1da53e02
SE
1565/*
1566 * validate a single event group
1567 *
1568 * validation include:
184f412c
IM
1569 * - check events are compatible which each other
1570 * - events do not compete for the same counter
1571 * - number of events <= number of counters
1da53e02
SE
1572 *
1573 * validation ensures the group can be loaded onto the
1574 * PMU if it was the only group available.
1575 */
fe9081cc
PZ
1576static int validate_group(struct perf_event *event)
1577{
1da53e02 1578 struct perf_event *leader = event->group_leader;
502568d5
PZ
1579 struct cpu_hw_events *fake_cpuc;
1580 int ret, n;
fe9081cc 1581
502568d5
PZ
1582 ret = -ENOMEM;
1583 fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
1584 if (!fake_cpuc)
1585 goto out;
fe9081cc 1586
1da53e02
SE
1587 /*
1588 * the event is not yet connected with its
1589 * siblings therefore we must first collect
1590 * existing siblings, then add the new event
1591 * before we can simulate the scheduling
1592 */
502568d5
PZ
1593 ret = -ENOSPC;
1594 n = collect_events(fake_cpuc, leader, true);
1da53e02 1595 if (n < 0)
502568d5 1596 goto out_free;
fe9081cc 1597
502568d5
PZ
1598 fake_cpuc->n_events = n;
1599 n = collect_events(fake_cpuc, event, false);
1da53e02 1600 if (n < 0)
502568d5 1601 goto out_free;
fe9081cc 1602
502568d5 1603 fake_cpuc->n_events = n;
1da53e02 1604
a072738e 1605 ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
502568d5
PZ
1606
1607out_free:
1608 kfree(fake_cpuc);
1609out:
1610 return ret;
fe9081cc
PZ
1611}
1612
b0a873eb 1613int x86_pmu_event_init(struct perf_event *event)
621a01ea 1614{
51b0fe39 1615 struct pmu *tmp;
621a01ea
IM
1616 int err;
1617
b0a873eb
PZ
1618 switch (event->attr.type) {
1619 case PERF_TYPE_RAW:
1620 case PERF_TYPE_HARDWARE:
1621 case PERF_TYPE_HW_CACHE:
1622 break;
1623
1624 default:
1625 return -ENOENT;
1626 }
1627
1628 err = __x86_pmu_event_init(event);
fe9081cc 1629 if (!err) {
8113070d
SE
1630 /*
1631 * we temporarily connect event to its pmu
1632 * such that validate_group() can classify
1633 * it as an x86 event using is_x86_event()
1634 */
1635 tmp = event->pmu;
1636 event->pmu = &pmu;
1637
fe9081cc
PZ
1638 if (event->group_leader != event)
1639 err = validate_group(event);
ca037701
PZ
1640 else
1641 err = validate_event(event);
8113070d
SE
1642
1643 event->pmu = tmp;
fe9081cc 1644 }
a1792cda 1645 if (err) {
cdd6c482
IM
1646 if (event->destroy)
1647 event->destroy(event);
a1792cda 1648 }
621a01ea 1649
b0a873eb 1650 return err;
621a01ea 1651}
d7d59fb3 1652
b0a873eb 1653static struct pmu pmu = {
a4eaf7f1
PZ
1654 .pmu_enable = x86_pmu_enable,
1655 .pmu_disable = x86_pmu_disable,
1656
b0a873eb 1657 .event_init = x86_pmu_event_init,
a4eaf7f1
PZ
1658
1659 .add = x86_pmu_add,
1660 .del = x86_pmu_del,
b0a873eb
PZ
1661 .start = x86_pmu_start,
1662 .stop = x86_pmu_stop,
1663 .read = x86_pmu_read,
a4eaf7f1 1664
b0a873eb
PZ
1665 .start_txn = x86_pmu_start_txn,
1666 .cancel_txn = x86_pmu_cancel_txn,
1667 .commit_txn = x86_pmu_commit_txn,
1668};
1669
d7d59fb3
PZ
1670/*
1671 * callchain support
1672 */
1673
d7d59fb3
PZ
1674static void
1675backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
1676{
1677 /* Ignore warnings */
1678}
1679
1680static void backtrace_warning(void *data, char *msg)
1681{
1682 /* Ignore warnings */
1683}
1684
1685static int backtrace_stack(void *data, char *name)
1686{
038e836e 1687 return 0;
d7d59fb3
PZ
1688}
1689
1690static void backtrace_address(void *data, unsigned long addr, int reliable)
1691{
1692 struct perf_callchain_entry *entry = data;
1693
70791ce9 1694 perf_callchain_store(entry, addr);
d7d59fb3
PZ
1695}
1696
1697static const struct stacktrace_ops backtrace_ops = {
1698 .warning = backtrace_warning,
1699 .warning_symbol = backtrace_warning_symbol,
1700 .stack = backtrace_stack,
1701 .address = backtrace_address,
06d65bda 1702 .walk_stack = print_context_stack_bp,
d7d59fb3
PZ
1703};
1704
56962b44
FW
1705void
1706perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
d7d59fb3 1707{
927c7a9e
FW
1708 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1709 /* TODO: We don't support guest os callchain now */
ed805261 1710 return;
927c7a9e
FW
1711 }
1712
70791ce9 1713 perf_callchain_store(entry, regs->ip);
d7d59fb3 1714
9c0729dc 1715 dump_trace(NULL, regs, NULL, &backtrace_ops, entry);
d7d59fb3
PZ
1716}
1717
257ef9d2
TE
1718#ifdef CONFIG_COMPAT
1719static inline int
1720perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
74193ef0 1721{
257ef9d2
TE
1722 /* 32-bit process in 64-bit kernel. */
1723 struct stack_frame_ia32 frame;
1724 const void __user *fp;
74193ef0 1725
257ef9d2
TE
1726 if (!test_thread_flag(TIF_IA32))
1727 return 0;
1728
1729 fp = compat_ptr(regs->bp);
1730 while (entry->nr < PERF_MAX_STACK_DEPTH) {
1731 unsigned long bytes;
1732 frame.next_frame = 0;
1733 frame.return_address = 0;
1734
1735 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1736 if (bytes != sizeof(frame))
1737 break;
74193ef0 1738
257ef9d2
TE
1739 if (fp < compat_ptr(regs->sp))
1740 break;
74193ef0 1741
70791ce9 1742 perf_callchain_store(entry, frame.return_address);
257ef9d2
TE
1743 fp = compat_ptr(frame.next_frame);
1744 }
1745 return 1;
d7d59fb3 1746}
257ef9d2
TE
1747#else
1748static inline int
1749perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1750{
1751 return 0;
1752}
1753#endif
d7d59fb3 1754
56962b44
FW
1755void
1756perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
d7d59fb3
PZ
1757{
1758 struct stack_frame frame;
1759 const void __user *fp;
1760
927c7a9e
FW
1761 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1762 /* TODO: We don't support guest os callchain now */
ed805261 1763 return;
927c7a9e 1764 }
5a6cec3a 1765
74193ef0 1766 fp = (void __user *)regs->bp;
d7d59fb3 1767
70791ce9 1768 perf_callchain_store(entry, regs->ip);
d7d59fb3 1769
257ef9d2
TE
1770 if (perf_callchain_user32(regs, entry))
1771 return;
1772
f9188e02 1773 while (entry->nr < PERF_MAX_STACK_DEPTH) {
257ef9d2 1774 unsigned long bytes;
038e836e 1775 frame.next_frame = NULL;
d7d59fb3
PZ
1776 frame.return_address = 0;
1777
257ef9d2
TE
1778 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1779 if (bytes != sizeof(frame))
d7d59fb3
PZ
1780 break;
1781
5a6cec3a 1782 if ((unsigned long)fp < regs->sp)
d7d59fb3
PZ
1783 break;
1784
70791ce9 1785 perf_callchain_store(entry, frame.return_address);
038e836e 1786 fp = frame.next_frame;
d7d59fb3
PZ
1787 }
1788}
1789
39447b38
ZY
1790unsigned long perf_instruction_pointer(struct pt_regs *regs)
1791{
1792 unsigned long ip;
dcf46b94 1793
39447b38
ZY
1794 if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
1795 ip = perf_guest_cbs->get_guest_ip();
1796 else
1797 ip = instruction_pointer(regs);
dcf46b94 1798
39447b38
ZY
1799 return ip;
1800}
1801
1802unsigned long perf_misc_flags(struct pt_regs *regs)
1803{
1804 int misc = 0;
dcf46b94 1805
39447b38 1806 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
dcf46b94
ZY
1807 if (perf_guest_cbs->is_user_mode())
1808 misc |= PERF_RECORD_MISC_GUEST_USER;
1809 else
1810 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
1811 } else {
1812 if (user_mode(regs))
1813 misc |= PERF_RECORD_MISC_USER;
1814 else
1815 misc |= PERF_RECORD_MISC_KERNEL;
1816 }
1817
39447b38 1818 if (regs->flags & PERF_EFLAGS_EXACT)
ab608344 1819 misc |= PERF_RECORD_MISC_EXACT_IP;
39447b38
ZY
1820
1821 return misc;
1822}
This page took 0.331349 seconds and 5 git commands to generate.