perf_counter: x86: Expose INV and EDGE bits
[deliverable/linux.git] / arch / x86 / kernel / cpu / perf_counter.c
CommitLineData
241771ef
IM
1/*
2 * Performance counter x86 architecture code
3 *
98144511
IM
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
241771ef
IM
9 *
10 * For licencing details see kernel-base/COPYING
11 */
12
13#include <linux/perf_counter.h>
14#include <linux/capability.h>
15#include <linux/notifier.h>
16#include <linux/hardirq.h>
17#include <linux/kprobes.h>
4ac13294 18#include <linux/module.h>
241771ef
IM
19#include <linux/kdebug.h>
20#include <linux/sched.h>
d7d59fb3 21#include <linux/uaccess.h>
241771ef 22
241771ef 23#include <asm/apic.h>
d7d59fb3 24#include <asm/stacktrace.h>
4e935e47 25#include <asm/nmi.h>
241771ef 26
862a1a5f 27static u64 perf_counter_mask __read_mostly;
703e937c 28
241771ef 29struct cpu_hw_counters {
862a1a5f 30 struct perf_counter *counters[X86_PMC_IDX_MAX];
43f6201a
RR
31 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
32 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
4b39fd96 33 unsigned long interrupts;
b0f3f28e 34 int enabled;
241771ef
IM
35};
36
37/*
5f4ec28f 38 * struct x86_pmu - generic x86 pmu
241771ef 39 */
5f4ec28f 40struct x86_pmu {
faa28ae0
RR
41 const char *name;
42 int version;
39d81eab 43 int (*handle_irq)(struct pt_regs *, int);
9e35ad38
PZ
44 void (*disable_all)(void);
45 void (*enable_all)(void);
7c90cc45 46 void (*enable)(struct hw_perf_counter *, int);
d4369891 47 void (*disable)(struct hw_perf_counter *, int);
169e41eb
JSR
48 unsigned eventsel;
49 unsigned perfctr;
b0f3f28e
PZ
50 u64 (*event_map)(int);
51 u64 (*raw_event)(u64);
169e41eb 52 int max_events;
0933e5c6
RR
53 int num_counters;
54 int num_counters_fixed;
55 int counter_bits;
56 u64 counter_mask;
c619b8ff 57 u64 max_period;
9e35ad38 58 u64 intel_ctrl;
b56a3802
JSR
59};
60
4a06bd85 61static struct x86_pmu x86_pmu __read_mostly;
b56a3802 62
b0f3f28e
PZ
63static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = {
64 .enabled = 1,
65};
241771ef 66
b56a3802
JSR
67/*
68 * Intel PerfMon v3. Used on Core2 and later.
69 */
b0f3f28e 70static const u64 intel_perfmon_event_map[] =
241771ef 71{
f650a672 72 [PERF_COUNT_CPU_CYCLES] = 0x003c,
241771ef
IM
73 [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
74 [PERF_COUNT_CACHE_REFERENCES] = 0x4f2e,
75 [PERF_COUNT_CACHE_MISSES] = 0x412e,
76 [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
77 [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
f650a672 78 [PERF_COUNT_BUS_CYCLES] = 0x013c,
241771ef
IM
79};
80
5f4ec28f 81static u64 intel_pmu_event_map(int event)
b56a3802
JSR
82{
83 return intel_perfmon_event_map[event];
84}
241771ef 85
5f4ec28f 86static u64 intel_pmu_raw_event(u64 event)
b0f3f28e 87{
82bae4f8
PZ
88#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
89#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
ff99be57
PZ
90#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
91#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
82bae4f8 92#define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL
b0f3f28e
PZ
93
94#define CORE_EVNTSEL_MASK \
95 (CORE_EVNTSEL_EVENT_MASK | \
96 CORE_EVNTSEL_UNIT_MASK | \
ff99be57
PZ
97 CORE_EVNTSEL_EDGE_MASK | \
98 CORE_EVNTSEL_INV_MASK | \
b0f3f28e
PZ
99 CORE_EVNTSEL_COUNTER_MASK)
100
101 return event & CORE_EVNTSEL_MASK;
102}
103
f87ad35d
JSR
104/*
105 * AMD Performance Monitor K7 and later.
106 */
b0f3f28e 107static const u64 amd_perfmon_event_map[] =
f87ad35d
JSR
108{
109 [PERF_COUNT_CPU_CYCLES] = 0x0076,
110 [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
111 [PERF_COUNT_CACHE_REFERENCES] = 0x0080,
112 [PERF_COUNT_CACHE_MISSES] = 0x0081,
113 [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
114 [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
115};
116
5f4ec28f 117static u64 amd_pmu_event_map(int event)
f87ad35d
JSR
118{
119 return amd_perfmon_event_map[event];
120}
121
5f4ec28f 122static u64 amd_pmu_raw_event(u64 event)
b0f3f28e 123{
82bae4f8
PZ
124#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
125#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
ff99be57
PZ
126#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
127#define K7_EVNTSEL_INV_MASK 0x000800000ULL
82bae4f8 128#define K7_EVNTSEL_COUNTER_MASK 0x0FF000000ULL
b0f3f28e
PZ
129
130#define K7_EVNTSEL_MASK \
131 (K7_EVNTSEL_EVENT_MASK | \
132 K7_EVNTSEL_UNIT_MASK | \
ff99be57
PZ
133 K7_EVNTSEL_EDGE_MASK | \
134 K7_EVNTSEL_INV_MASK | \
b0f3f28e
PZ
135 K7_EVNTSEL_COUNTER_MASK)
136
137 return event & K7_EVNTSEL_MASK;
138}
139
ee06094f
IM
140/*
141 * Propagate counter elapsed time into the generic counter.
142 * Can only be executed on the CPU where the counter is active.
143 * Returns the delta events processed.
144 */
4b7bfd0d 145static u64
ee06094f
IM
146x86_perf_counter_update(struct perf_counter *counter,
147 struct hw_perf_counter *hwc, int idx)
148{
ec3232bd
PZ
149 int shift = 64 - x86_pmu.counter_bits;
150 u64 prev_raw_count, new_raw_count;
151 s64 delta;
ee06094f 152
ee06094f
IM
153 /*
154 * Careful: an NMI might modify the previous counter value.
155 *
156 * Our tactic to handle this is to first atomically read and
157 * exchange a new raw count - then add that new-prev delta
158 * count to the generic counter atomically:
159 */
160again:
161 prev_raw_count = atomic64_read(&hwc->prev_count);
162 rdmsrl(hwc->counter_base + idx, new_raw_count);
163
164 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
165 new_raw_count) != prev_raw_count)
166 goto again;
167
168 /*
169 * Now we have the new raw value and have updated the prev
170 * timestamp already. We can now calculate the elapsed delta
171 * (counter-)time and add that to the generic counter.
172 *
173 * Careful, not all hw sign-extends above the physical width
ec3232bd 174 * of the count.
ee06094f 175 */
ec3232bd
PZ
176 delta = (new_raw_count << shift) - (prev_raw_count << shift);
177 delta >>= shift;
ee06094f
IM
178
179 atomic64_add(delta, &counter->count);
180 atomic64_sub(delta, &hwc->period_left);
4b7bfd0d
RR
181
182 return new_raw_count;
ee06094f
IM
183}
184
ba77813a 185static atomic_t active_counters;
4e935e47
PZ
186static DEFINE_MUTEX(pmc_reserve_mutex);
187
188static bool reserve_pmc_hardware(void)
189{
190 int i;
191
192 if (nmi_watchdog == NMI_LOCAL_APIC)
193 disable_lapic_nmi_watchdog();
194
0933e5c6 195 for (i = 0; i < x86_pmu.num_counters; i++) {
4a06bd85 196 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
4e935e47
PZ
197 goto perfctr_fail;
198 }
199
0933e5c6 200 for (i = 0; i < x86_pmu.num_counters; i++) {
4a06bd85 201 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
4e935e47
PZ
202 goto eventsel_fail;
203 }
204
205 return true;
206
207eventsel_fail:
208 for (i--; i >= 0; i--)
4a06bd85 209 release_evntsel_nmi(x86_pmu.eventsel + i);
4e935e47 210
0933e5c6 211 i = x86_pmu.num_counters;
4e935e47
PZ
212
213perfctr_fail:
214 for (i--; i >= 0; i--)
4a06bd85 215 release_perfctr_nmi(x86_pmu.perfctr + i);
4e935e47
PZ
216
217 if (nmi_watchdog == NMI_LOCAL_APIC)
218 enable_lapic_nmi_watchdog();
219
220 return false;
221}
222
223static void release_pmc_hardware(void)
224{
225 int i;
226
0933e5c6 227 for (i = 0; i < x86_pmu.num_counters; i++) {
4a06bd85
RR
228 release_perfctr_nmi(x86_pmu.perfctr + i);
229 release_evntsel_nmi(x86_pmu.eventsel + i);
4e935e47
PZ
230 }
231
232 if (nmi_watchdog == NMI_LOCAL_APIC)
233 enable_lapic_nmi_watchdog();
234}
235
236static void hw_perf_counter_destroy(struct perf_counter *counter)
237{
ba77813a 238 if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) {
4e935e47
PZ
239 release_pmc_hardware();
240 mutex_unlock(&pmc_reserve_mutex);
241 }
242}
243
85cf9dba
RR
244static inline int x86_pmu_initialized(void)
245{
246 return x86_pmu.handle_irq != NULL;
247}
248
241771ef
IM
249/*
250 * Setup the hardware configuration for a given hw_event_type
251 */
621a01ea 252static int __hw_perf_counter_init(struct perf_counter *counter)
241771ef 253{
9f66a381 254 struct perf_counter_hw_event *hw_event = &counter->hw_event;
241771ef 255 struct hw_perf_counter *hwc = &counter->hw;
4e935e47 256 int err;
241771ef 257
85cf9dba
RR
258 if (!x86_pmu_initialized())
259 return -ENODEV;
241771ef 260
4e935e47 261 err = 0;
ba77813a 262 if (!atomic_inc_not_zero(&active_counters)) {
4e935e47 263 mutex_lock(&pmc_reserve_mutex);
ba77813a 264 if (atomic_read(&active_counters) == 0 && !reserve_pmc_hardware())
4e935e47
PZ
265 err = -EBUSY;
266 else
ba77813a 267 atomic_inc(&active_counters);
4e935e47
PZ
268 mutex_unlock(&pmc_reserve_mutex);
269 }
270 if (err)
271 return err;
272
241771ef 273 /*
0475f9ea 274 * Generate PMC IRQs:
241771ef
IM
275 * (keep 'enabled' bit clear for now)
276 */
0475f9ea 277 hwc->config = ARCH_PERFMON_EVENTSEL_INT;
241771ef
IM
278
279 /*
0475f9ea 280 * Count user and OS events unless requested not to.
241771ef 281 */
0475f9ea
PM
282 if (!hw_event->exclude_user)
283 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
284 if (!hw_event->exclude_kernel)
241771ef 285 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
0475f9ea
PM
286
287 /*
288 * If privileged enough, allow NMI events:
289 */
290 hwc->nmi = 0;
a026dfec
PZ
291 if (hw_event->nmi) {
292 if (sysctl_perf_counter_priv && !capable(CAP_SYS_ADMIN))
293 return -EACCES;
0475f9ea 294 hwc->nmi = 1;
a026dfec 295 }
b68f1d2e 296 perf_counters_lapic_init(hwc->nmi);
241771ef 297
d2517a49
IM
298 if (!hwc->irq_period)
299 hwc->irq_period = x86_pmu.max_period;
300
60db5e09
PZ
301 atomic64_set(&hwc->period_left,
302 min(x86_pmu.max_period, hwc->irq_period));
241771ef
IM
303
304 /*
dfa7c899 305 * Raw event type provide the config in the event structure
241771ef 306 */
f4a2deb4 307 if (perf_event_raw(hw_event)) {
4a06bd85 308 hwc->config |= x86_pmu.raw_event(perf_event_config(hw_event));
241771ef 309 } else {
4a06bd85 310 if (perf_event_id(hw_event) >= x86_pmu.max_events)
241771ef
IM
311 return -EINVAL;
312 /*
313 * The generic map:
314 */
4a06bd85 315 hwc->config |= x86_pmu.event_map(perf_event_id(hw_event));
241771ef 316 }
241771ef 317
4e935e47
PZ
318 counter->destroy = hw_perf_counter_destroy;
319
241771ef
IM
320 return 0;
321}
322
9e35ad38 323static void intel_pmu_disable_all(void)
4ac13294 324{
862a1a5f 325 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
241771ef 326}
b56a3802 327
9e35ad38 328static void amd_pmu_disable_all(void)
f87ad35d 329{
b0f3f28e 330 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
9e35ad38
PZ
331 int idx;
332
333 if (!cpuc->enabled)
334 return;
b0f3f28e 335
b0f3f28e 336 cpuc->enabled = 0;
60b3df9c
PZ
337 /*
338 * ensure we write the disable before we start disabling the
5f4ec28f
RR
339 * counters proper, so that amd_pmu_enable_counter() does the
340 * right thing.
60b3df9c 341 */
b0f3f28e 342 barrier();
f87ad35d 343
0933e5c6 344 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
b0f3f28e
PZ
345 u64 val;
346
43f6201a 347 if (!test_bit(idx, cpuc->active_mask))
4295ee62 348 continue;
f87ad35d 349 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
4295ee62
RR
350 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
351 continue;
352 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
353 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
f87ad35d 354 }
f87ad35d
JSR
355}
356
9e35ad38 357void hw_perf_disable(void)
b56a3802 358{
85cf9dba 359 if (!x86_pmu_initialized())
9e35ad38
PZ
360 return;
361 return x86_pmu.disable_all();
b56a3802 362}
241771ef 363
9e35ad38 364static void intel_pmu_enable_all(void)
b56a3802 365{
9e35ad38 366 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
b56a3802
JSR
367}
368
9e35ad38 369static void amd_pmu_enable_all(void)
f87ad35d 370{
b0f3f28e 371 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
f87ad35d
JSR
372 int idx;
373
9e35ad38 374 if (cpuc->enabled)
b0f3f28e
PZ
375 return;
376
9e35ad38
PZ
377 cpuc->enabled = 1;
378 barrier();
379
0933e5c6 380 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
4295ee62 381 u64 val;
b0f3f28e 382
43f6201a 383 if (!test_bit(idx, cpuc->active_mask))
4295ee62
RR
384 continue;
385 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
386 if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
387 continue;
388 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
389 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
f87ad35d
JSR
390 }
391}
392
9e35ad38 393void hw_perf_enable(void)
ee06094f 394{
85cf9dba 395 if (!x86_pmu_initialized())
2b9ff0db 396 return;
9e35ad38 397 x86_pmu.enable_all();
ee06094f 398}
ee06094f 399
19d84dab 400static inline u64 intel_pmu_get_status(void)
b0f3f28e
PZ
401{
402 u64 status;
403
b7f8859a 404 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
b0f3f28e 405
b7f8859a 406 return status;
b0f3f28e
PZ
407}
408
dee5d906 409static inline void intel_pmu_ack_status(u64 ack)
b0f3f28e
PZ
410{
411 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
412}
413
7c90cc45 414static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
b0f3f28e 415{
7c90cc45 416 int err;
7c90cc45
RR
417 err = checking_wrmsrl(hwc->config_base + idx,
418 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
b0f3f28e
PZ
419}
420
d4369891 421static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
b0f3f28e 422{
d4369891 423 int err;
d4369891
RR
424 err = checking_wrmsrl(hwc->config_base + idx,
425 hwc->config);
b0f3f28e
PZ
426}
427
2f18d1e8 428static inline void
d4369891 429intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx)
2f18d1e8
IM
430{
431 int idx = __idx - X86_PMC_IDX_FIXED;
432 u64 ctrl_val, mask;
433 int err;
434
435 mask = 0xfULL << (idx * 4);
436
437 rdmsrl(hwc->config_base, ctrl_val);
438 ctrl_val &= ~mask;
439 err = checking_wrmsrl(hwc->config_base, ctrl_val);
440}
441
7e2ae347 442static inline void
d4369891 443intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
7e2ae347 444{
d4369891
RR
445 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
446 intel_pmu_disable_fixed(hwc, idx);
447 return;
448 }
449
450 x86_pmu_disable_counter(hwc, idx);
451}
452
453static inline void
454amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
455{
456 x86_pmu_disable_counter(hwc, idx);
7e2ae347
IM
457}
458
2f18d1e8 459static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
241771ef 460
ee06094f
IM
461/*
462 * Set the next IRQ period, based on the hwc->period_left value.
463 * To be called with the counter disabled in hw:
464 */
465static void
26816c28 466x86_perf_counter_set_period(struct perf_counter *counter,
ee06094f 467 struct hw_perf_counter *hwc, int idx)
241771ef 468{
2f18d1e8 469 s64 left = atomic64_read(&hwc->period_left);
60db5e09 470 s64 period = min(x86_pmu.max_period, hwc->irq_period);
2f18d1e8 471 int err;
ee06094f 472
ee06094f
IM
473 /*
474 * If we are way outside a reasoable range then just skip forward:
475 */
476 if (unlikely(left <= -period)) {
477 left = period;
478 atomic64_set(&hwc->period_left, left);
479 }
480
481 if (unlikely(left <= 0)) {
482 left += period;
483 atomic64_set(&hwc->period_left, left);
484 }
1c80f4b5
IM
485 /*
486 * Quirk: certain CPUs dont like it if just 1 event is left:
487 */
488 if (unlikely(left < 2))
489 left = 2;
241771ef 490
ee06094f
IM
491 per_cpu(prev_left[idx], smp_processor_id()) = left;
492
493 /*
494 * The hw counter starts counting from this counter offset,
495 * mark it to be able to extra future deltas:
496 */
2f18d1e8 497 atomic64_set(&hwc->prev_count, (u64)-left);
ee06094f 498
2f18d1e8 499 err = checking_wrmsrl(hwc->counter_base + idx,
0933e5c6 500 (u64)(-left) & x86_pmu.counter_mask);
2f18d1e8
IM
501}
502
503static inline void
7c90cc45 504intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx)
2f18d1e8
IM
505{
506 int idx = __idx - X86_PMC_IDX_FIXED;
507 u64 ctrl_val, bits, mask;
508 int err;
509
510 /*
0475f9ea
PM
511 * Enable IRQ generation (0x8),
512 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
513 * if requested:
2f18d1e8 514 */
0475f9ea
PM
515 bits = 0x8ULL;
516 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
517 bits |= 0x2;
2f18d1e8
IM
518 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
519 bits |= 0x1;
520 bits <<= (idx * 4);
521 mask = 0xfULL << (idx * 4);
522
523 rdmsrl(hwc->config_base, ctrl_val);
524 ctrl_val &= ~mask;
525 ctrl_val |= bits;
526 err = checking_wrmsrl(hwc->config_base, ctrl_val);
7e2ae347
IM
527}
528
7c90cc45 529static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
7e2ae347 530{
7c90cc45
RR
531 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
532 intel_pmu_enable_fixed(hwc, idx);
533 return;
534 }
535
536 x86_pmu_enable_counter(hwc, idx);
537}
538
539static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
540{
541 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
542
543 if (cpuc->enabled)
544 x86_pmu_enable_counter(hwc, idx);
2b583d8b 545 else
d4369891 546 x86_pmu_disable_counter(hwc, idx);
241771ef
IM
547}
548
2f18d1e8
IM
549static int
550fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
862a1a5f 551{
2f18d1e8
IM
552 unsigned int event;
553
ef7b3e09 554 if (!x86_pmu.num_counters_fixed)
f87ad35d
JSR
555 return -1;
556
2f18d1e8
IM
557 if (unlikely(hwc->nmi))
558 return -1;
559
560 event = hwc->config & ARCH_PERFMON_EVENT_MASK;
561
4a06bd85 562 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_INSTRUCTIONS)))
2f18d1e8 563 return X86_PMC_IDX_FIXED_INSTRUCTIONS;
4a06bd85 564 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_CPU_CYCLES)))
2f18d1e8 565 return X86_PMC_IDX_FIXED_CPU_CYCLES;
4a06bd85 566 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_BUS_CYCLES)))
2f18d1e8
IM
567 return X86_PMC_IDX_FIXED_BUS_CYCLES;
568
862a1a5f
IM
569 return -1;
570}
571
ee06094f
IM
572/*
573 * Find a PMC slot for the freshly enabled / scheduled in counter:
574 */
4aeb0b42 575static int x86_pmu_enable(struct perf_counter *counter)
241771ef
IM
576{
577 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
578 struct hw_perf_counter *hwc = &counter->hw;
2f18d1e8 579 int idx;
241771ef 580
2f18d1e8
IM
581 idx = fixed_mode_idx(counter, hwc);
582 if (idx >= 0) {
583 /*
584 * Try to get the fixed counter, if that is already taken
585 * then try to get a generic counter:
586 */
43f6201a 587 if (test_and_set_bit(idx, cpuc->used_mask))
2f18d1e8 588 goto try_generic;
0dff86aa 589
2f18d1e8
IM
590 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
591 /*
592 * We set it so that counter_base + idx in wrmsr/rdmsr maps to
593 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
594 */
595 hwc->counter_base =
596 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
241771ef 597 hwc->idx = idx;
2f18d1e8
IM
598 } else {
599 idx = hwc->idx;
600 /* Try to get the previous generic counter again */
43f6201a 601 if (test_and_set_bit(idx, cpuc->used_mask)) {
2f18d1e8 602try_generic:
43f6201a 603 idx = find_first_zero_bit(cpuc->used_mask,
0933e5c6
RR
604 x86_pmu.num_counters);
605 if (idx == x86_pmu.num_counters)
2f18d1e8
IM
606 return -EAGAIN;
607
43f6201a 608 set_bit(idx, cpuc->used_mask);
2f18d1e8
IM
609 hwc->idx = idx;
610 }
4a06bd85
RR
611 hwc->config_base = x86_pmu.eventsel;
612 hwc->counter_base = x86_pmu.perfctr;
241771ef
IM
613 }
614
d4369891 615 x86_pmu.disable(hwc, idx);
241771ef 616
862a1a5f 617 cpuc->counters[idx] = counter;
43f6201a 618 set_bit(idx, cpuc->active_mask);
7e2ae347 619
26816c28 620 x86_perf_counter_set_period(counter, hwc, idx);
7c90cc45 621 x86_pmu.enable(hwc, idx);
95cdd2e7
IM
622
623 return 0;
241771ef
IM
624}
625
626void perf_counter_print_debug(void)
627{
2f18d1e8 628 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
0dff86aa 629 struct cpu_hw_counters *cpuc;
5bb9efe3 630 unsigned long flags;
1e125676
IM
631 int cpu, idx;
632
0933e5c6 633 if (!x86_pmu.num_counters)
1e125676 634 return;
241771ef 635
5bb9efe3 636 local_irq_save(flags);
241771ef
IM
637
638 cpu = smp_processor_id();
0dff86aa 639 cpuc = &per_cpu(cpu_hw_counters, cpu);
241771ef 640
faa28ae0 641 if (x86_pmu.version >= 2) {
a1ef58f4
JSR
642 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
643 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
644 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
645 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
646
647 pr_info("\n");
648 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
649 pr_info("CPU#%d: status: %016llx\n", cpu, status);
650 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
651 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
f87ad35d 652 }
43f6201a 653 pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask);
241771ef 654
0933e5c6 655 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
4a06bd85
RR
656 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
657 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
241771ef 658
ee06094f 659 prev_left = per_cpu(prev_left[idx], cpu);
241771ef 660
a1ef58f4 661 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
241771ef 662 cpu, idx, pmc_ctrl);
a1ef58f4 663 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
241771ef 664 cpu, idx, pmc_count);
a1ef58f4 665 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
ee06094f 666 cpu, idx, prev_left);
241771ef 667 }
0933e5c6 668 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
2f18d1e8
IM
669 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
670
a1ef58f4 671 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
2f18d1e8
IM
672 cpu, idx, pmc_count);
673 }
5bb9efe3 674 local_irq_restore(flags);
241771ef
IM
675}
676
4aeb0b42 677static void x86_pmu_disable(struct perf_counter *counter)
241771ef
IM
678{
679 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
680 struct hw_perf_counter *hwc = &counter->hw;
6f00cada 681 int idx = hwc->idx;
241771ef 682
09534238
RR
683 /*
684 * Must be done before we disable, otherwise the nmi handler
685 * could reenable again:
686 */
43f6201a 687 clear_bit(idx, cpuc->active_mask);
d4369891 688 x86_pmu.disable(hwc, idx);
241771ef 689
2f18d1e8
IM
690 /*
691 * Make sure the cleared pointer becomes visible before we
692 * (potentially) free the counter:
693 */
527e26af 694 barrier();
241771ef 695
ee06094f
IM
696 /*
697 * Drain the remaining delta count out of a counter
698 * that we are disabling:
699 */
700 x86_perf_counter_update(counter, hwc, idx);
09534238 701 cpuc->counters[idx] = NULL;
43f6201a 702 clear_bit(idx, cpuc->used_mask);
241771ef
IM
703}
704
7e2ae347 705/*
ee06094f
IM
706 * Save and restart an expired counter. Called by NMI contexts,
707 * so it has to be careful about preempting normal counter ops:
7e2ae347 708 */
55de0f2e 709static void intel_pmu_save_and_restart(struct perf_counter *counter)
241771ef
IM
710{
711 struct hw_perf_counter *hwc = &counter->hw;
712 int idx = hwc->idx;
241771ef 713
ee06094f 714 x86_perf_counter_update(counter, hwc, idx);
26816c28 715 x86_perf_counter_set_period(counter, hwc, idx);
7e2ae347 716
2f18d1e8 717 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
7c90cc45 718 intel_pmu_enable_counter(hwc, idx);
241771ef
IM
719}
720
4b39fd96
MG
721/*
722 * Maximum interrupt frequency of 100KHz per CPU
723 */
169e41eb 724#define PERFMON_MAX_INTERRUPTS (100000/HZ)
4b39fd96 725
241771ef
IM
726/*
727 * This handler is triggered by the local APIC, so the APIC IRQ handling
728 * rules apply:
729 */
39d81eab 730static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
241771ef 731{
9029a5e3
IM
732 struct cpu_hw_counters *cpuc;
733 struct cpu_hw_counters;
734 int bit, cpu, loops;
4b39fd96 735 u64 ack, status;
9029a5e3
IM
736
737 cpu = smp_processor_id();
738 cpuc = &per_cpu(cpu_hw_counters, cpu);
241771ef 739
9e35ad38 740 perf_disable();
19d84dab 741 status = intel_pmu_get_status();
9e35ad38
PZ
742 if (!status) {
743 perf_enable();
744 return 0;
745 }
87b9cf46 746
9029a5e3 747 loops = 0;
241771ef 748again:
9029a5e3
IM
749 if (++loops > 100) {
750 WARN_ONCE(1, "perfcounters: irq loop stuck!\n");
34adc806 751 perf_counter_print_debug();
9029a5e3
IM
752 return 1;
753 }
754
d278c484 755 inc_irq_stat(apic_perf_irqs);
241771ef 756 ack = status;
2f18d1e8 757 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
862a1a5f 758 struct perf_counter *counter = cpuc->counters[bit];
241771ef
IM
759
760 clear_bit(bit, (unsigned long *) &status);
43f6201a 761 if (!test_bit(bit, cpuc->active_mask))
241771ef
IM
762 continue;
763
55de0f2e 764 intel_pmu_save_and_restart(counter);
78f13e95 765 if (perf_counter_overflow(counter, nmi, regs, 0))
d4369891 766 intel_pmu_disable_counter(&counter->hw, bit);
241771ef
IM
767 }
768
dee5d906 769 intel_pmu_ack_status(ack);
241771ef
IM
770
771 /*
772 * Repeat if there is more work to be done:
773 */
19d84dab 774 status = intel_pmu_get_status();
241771ef
IM
775 if (status)
776 goto again;
b0f3f28e 777
9e35ad38
PZ
778 if (++cpuc->interrupts != PERFMON_MAX_INTERRUPTS)
779 perf_enable();
780
781 return 1;
1b023a96
MG
782}
783
a29aa8a7
RR
784static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
785{
9029a5e3
IM
786 int cpu, idx, throttle = 0, handled = 0;
787 struct cpu_hw_counters *cpuc;
a29aa8a7
RR
788 struct perf_counter *counter;
789 struct hw_perf_counter *hwc;
9029a5e3
IM
790 u64 val;
791
792 cpu = smp_processor_id();
793 cpuc = &per_cpu(cpu_hw_counters, cpu);
962bf7a6 794
9e35ad38
PZ
795 if (++cpuc->interrupts == PERFMON_MAX_INTERRUPTS) {
796 throttle = 1;
797 __perf_disable();
798 cpuc->enabled = 0;
799 barrier();
962bf7a6 800 }
a29aa8a7 801
a29aa8a7 802 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
962bf7a6
PZ
803 int disable = 0;
804
43f6201a 805 if (!test_bit(idx, cpuc->active_mask))
a29aa8a7 806 continue;
962bf7a6 807
a29aa8a7
RR
808 counter = cpuc->counters[idx];
809 hwc = &counter->hw;
a4016a79
PZ
810
811 if (counter->hw_event.nmi != nmi)
812 goto next;
813
4b7bfd0d 814 val = x86_perf_counter_update(counter, hwc, idx);
a29aa8a7 815 if (val & (1ULL << (x86_pmu.counter_bits - 1)))
962bf7a6
PZ
816 goto next;
817
a29aa8a7
RR
818 /* counter overflow */
819 x86_perf_counter_set_period(counter, hwc, idx);
820 handled = 1;
821 inc_irq_stat(apic_perf_irqs);
962bf7a6
PZ
822 disable = perf_counter_overflow(counter, nmi, regs, 0);
823
824next:
825 if (disable || throttle)
a29aa8a7 826 amd_pmu_disable_counter(hwc, idx);
a29aa8a7 827 }
962bf7a6 828
a29aa8a7
RR
829 return handled;
830}
39d81eab 831
1b023a96
MG
832void perf_counter_unthrottle(void)
833{
834 struct cpu_hw_counters *cpuc;
835
85cf9dba 836 if (!x86_pmu_initialized())
1b023a96
MG
837 return;
838
b0f3f28e 839 cpuc = &__get_cpu_var(cpu_hw_counters);
4b39fd96 840 if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) {
f5a5a2f6
IM
841 /*
842 * Clear them before re-enabling irqs/NMIs again:
843 */
844 cpuc->interrupts = 0;
9e35ad38 845 perf_enable();
f5a5a2f6
IM
846 } else {
847 cpuc->interrupts = 0;
1b023a96 848 }
241771ef
IM
849}
850
851void smp_perf_counter_interrupt(struct pt_regs *regs)
852{
853 irq_enter();
241771ef 854 apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
b0f3f28e 855 ack_APIC_irq();
4a06bd85 856 x86_pmu.handle_irq(regs, 0);
241771ef
IM
857 irq_exit();
858}
859
b6276f35
PZ
860void smp_perf_pending_interrupt(struct pt_regs *regs)
861{
862 irq_enter();
863 ack_APIC_irq();
864 inc_irq_stat(apic_pending_irqs);
865 perf_counter_do_pending();
866 irq_exit();
867}
868
869void set_perf_counter_pending(void)
870{
871 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
872}
873
3415dd91 874void perf_counters_lapic_init(int nmi)
241771ef
IM
875{
876 u32 apic_val;
877
85cf9dba 878 if (!x86_pmu_initialized())
241771ef 879 return;
85cf9dba 880
241771ef
IM
881 /*
882 * Enable the performance counter vector in the APIC LVT:
883 */
884 apic_val = apic_read(APIC_LVTERR);
885
886 apic_write(APIC_LVTERR, apic_val | APIC_LVT_MASKED);
887 if (nmi)
888 apic_write(APIC_LVTPC, APIC_DM_NMI);
889 else
890 apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
891 apic_write(APIC_LVTERR, apic_val);
892}
893
894static int __kprobes
895perf_counter_nmi_handler(struct notifier_block *self,
896 unsigned long cmd, void *__args)
897{
898 struct die_args *args = __args;
899 struct pt_regs *regs;
b0f3f28e 900
ba77813a 901 if (!atomic_read(&active_counters))
63a809a2
PZ
902 return NOTIFY_DONE;
903
b0f3f28e
PZ
904 switch (cmd) {
905 case DIE_NMI:
906 case DIE_NMI_IPI:
907 break;
241771ef 908
b0f3f28e 909 default:
241771ef 910 return NOTIFY_DONE;
b0f3f28e 911 }
241771ef
IM
912
913 regs = args->regs;
914
915 apic_write(APIC_LVTPC, APIC_DM_NMI);
a4016a79
PZ
916 /*
917 * Can't rely on the handled return value to say it was our NMI, two
918 * counters could trigger 'simultaneously' raising two back-to-back NMIs.
919 *
920 * If the first NMI handles both, the latter will be empty and daze
921 * the CPU.
922 */
923 x86_pmu.handle_irq(regs, 1);
241771ef 924
a4016a79 925 return NOTIFY_STOP;
241771ef
IM
926}
927
928static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
5b75af0a
MG
929 .notifier_call = perf_counter_nmi_handler,
930 .next = NULL,
931 .priority = 1
241771ef
IM
932};
933
5f4ec28f 934static struct x86_pmu intel_pmu = {
faa28ae0 935 .name = "Intel",
39d81eab 936 .handle_irq = intel_pmu_handle_irq,
9e35ad38
PZ
937 .disable_all = intel_pmu_disable_all,
938 .enable_all = intel_pmu_enable_all,
5f4ec28f
RR
939 .enable = intel_pmu_enable_counter,
940 .disable = intel_pmu_disable_counter,
b56a3802
JSR
941 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
942 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
5f4ec28f
RR
943 .event_map = intel_pmu_event_map,
944 .raw_event = intel_pmu_raw_event,
b56a3802 945 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
c619b8ff
RR
946 /*
947 * Intel PMCs cannot be accessed sanely above 32 bit width,
948 * so we install an artificial 1<<31 period regardless of
949 * the generic counter period:
950 */
951 .max_period = (1ULL << 31) - 1,
b56a3802
JSR
952};
953
5f4ec28f 954static struct x86_pmu amd_pmu = {
faa28ae0 955 .name = "AMD",
39d81eab 956 .handle_irq = amd_pmu_handle_irq,
9e35ad38
PZ
957 .disable_all = amd_pmu_disable_all,
958 .enable_all = amd_pmu_enable_all,
5f4ec28f
RR
959 .enable = amd_pmu_enable_counter,
960 .disable = amd_pmu_disable_counter,
f87ad35d
JSR
961 .eventsel = MSR_K7_EVNTSEL0,
962 .perfctr = MSR_K7_PERFCTR0,
5f4ec28f
RR
963 .event_map = amd_pmu_event_map,
964 .raw_event = amd_pmu_raw_event,
f87ad35d 965 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
0933e5c6
RR
966 .num_counters = 4,
967 .counter_bits = 48,
968 .counter_mask = (1ULL << 48) - 1,
c619b8ff
RR
969 /* use highest bit to detect overflow */
970 .max_period = (1ULL << 47) - 1,
f87ad35d
JSR
971};
972
72eae04d 973static int intel_pmu_init(void)
241771ef 974{
7bb497bd 975 union cpuid10_edx edx;
241771ef 976 union cpuid10_eax eax;
703e937c 977 unsigned int unused;
7bb497bd 978 unsigned int ebx;
faa28ae0 979 int version;
241771ef 980
da1a776b 981 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
72eae04d 982 return -ENODEV;
da1a776b 983
241771ef
IM
984 /*
985 * Check whether the Architectural PerfMon supports
986 * Branch Misses Retired Event or not.
987 */
703e937c 988 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
241771ef 989 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
72eae04d 990 return -ENODEV;
241771ef 991
faa28ae0
RR
992 version = eax.split.version_id;
993 if (version < 2)
72eae04d 994 return -ENODEV;
7bb497bd 995
4a06bd85 996 x86_pmu = intel_pmu;
faa28ae0 997 x86_pmu.version = version;
0933e5c6 998 x86_pmu.num_counters = eax.split.num_counters;
066d7dea
IM
999
1000 /*
1001 * Quirk: v2 perfmon does not report fixed-purpose counters, so
1002 * assume at least 3 counters:
1003 */
1004 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
1005
0933e5c6
RR
1006 x86_pmu.counter_bits = eax.split.bit_width;
1007 x86_pmu.counter_mask = (1ULL << eax.split.bit_width) - 1;
b56a3802 1008
9e35ad38
PZ
1009 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
1010
72eae04d 1011 return 0;
b56a3802
JSR
1012}
1013
72eae04d 1014static int amd_pmu_init(void)
f87ad35d 1015{
4a06bd85 1016 x86_pmu = amd_pmu;
72eae04d 1017 return 0;
f87ad35d
JSR
1018}
1019
b56a3802
JSR
1020void __init init_hw_perf_counters(void)
1021{
72eae04d
RR
1022 int err;
1023
b56a3802
JSR
1024 switch (boot_cpu_data.x86_vendor) {
1025 case X86_VENDOR_INTEL:
72eae04d 1026 err = intel_pmu_init();
b56a3802 1027 break;
f87ad35d 1028 case X86_VENDOR_AMD:
72eae04d 1029 err = amd_pmu_init();
f87ad35d 1030 break;
4138960a
RR
1031 default:
1032 return;
b56a3802 1033 }
72eae04d 1034 if (err != 0)
b56a3802
JSR
1035 return;
1036
faa28ae0
RR
1037 pr_info("%s Performance Monitoring support detected.\n", x86_pmu.name);
1038 pr_info("... version: %d\n", x86_pmu.version);
1039 pr_info("... bit width: %d\n", x86_pmu.counter_bits);
1040
0933e5c6
RR
1041 pr_info("... num counters: %d\n", x86_pmu.num_counters);
1042 if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
1043 x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
241771ef 1044 WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
0933e5c6 1045 x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
241771ef 1046 }
0933e5c6
RR
1047 perf_counter_mask = (1 << x86_pmu.num_counters) - 1;
1048 perf_max_counters = x86_pmu.num_counters;
241771ef 1049
0933e5c6 1050 pr_info("... value mask: %016Lx\n", x86_pmu.counter_mask);
c619b8ff 1051 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
2f18d1e8 1052
0933e5c6
RR
1053 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
1054 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
703e937c 1055 WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
0933e5c6 1056 x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
703e937c 1057 }
0933e5c6 1058 pr_info("... fixed counters: %d\n", x86_pmu.num_counters_fixed);
862a1a5f 1059
0933e5c6
RR
1060 perf_counter_mask |=
1061 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
241771ef 1062
a1ef58f4 1063 pr_info("... counter mask: %016Lx\n", perf_counter_mask);
75f224cf 1064
b68f1d2e 1065 perf_counters_lapic_init(1);
241771ef 1066 register_die_notifier(&perf_counter_nmi_notifier);
241771ef 1067}
621a01ea 1068
bb775fc2 1069static inline void x86_pmu_read(struct perf_counter *counter)
ee06094f
IM
1070{
1071 x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
1072}
1073
4aeb0b42
RR
1074static const struct pmu pmu = {
1075 .enable = x86_pmu_enable,
1076 .disable = x86_pmu_disable,
1077 .read = x86_pmu_read,
621a01ea
IM
1078};
1079
4aeb0b42 1080const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
621a01ea
IM
1081{
1082 int err;
1083
1084 err = __hw_perf_counter_init(counter);
1085 if (err)
9ea98e19 1086 return ERR_PTR(err);
621a01ea 1087
4aeb0b42 1088 return &pmu;
621a01ea 1089}
d7d59fb3
PZ
1090
1091/*
1092 * callchain support
1093 */
1094
1095static inline
1096void callchain_store(struct perf_callchain_entry *entry, unsigned long ip)
1097{
1098 if (entry->nr < MAX_STACK_DEPTH)
1099 entry->ip[entry->nr++] = ip;
1100}
1101
1102static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry);
1103static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry);
1104
1105
1106static void
1107backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
1108{
1109 /* Ignore warnings */
1110}
1111
1112static void backtrace_warning(void *data, char *msg)
1113{
1114 /* Ignore warnings */
1115}
1116
1117static int backtrace_stack(void *data, char *name)
1118{
1119 /* Don't bother with IRQ stacks for now */
1120 return -1;
1121}
1122
1123static void backtrace_address(void *data, unsigned long addr, int reliable)
1124{
1125 struct perf_callchain_entry *entry = data;
1126
1127 if (reliable)
1128 callchain_store(entry, addr);
1129}
1130
1131static const struct stacktrace_ops backtrace_ops = {
1132 .warning = backtrace_warning,
1133 .warning_symbol = backtrace_warning_symbol,
1134 .stack = backtrace_stack,
1135 .address = backtrace_address,
1136};
1137
1138static void
1139perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
1140{
1141 unsigned long bp;
1142 char *stack;
5872bdb8 1143 int nr = entry->nr;
d7d59fb3
PZ
1144
1145 callchain_store(entry, instruction_pointer(regs));
1146
1147 stack = ((char *)regs + sizeof(struct pt_regs));
1148#ifdef CONFIG_FRAME_POINTER
1149 bp = frame_pointer(regs);
1150#else
1151 bp = 0;
1152#endif
1153
1154 dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, entry);
5872bdb8
PZ
1155
1156 entry->kernel = entry->nr - nr;
d7d59fb3
PZ
1157}
1158
1159
1160struct stack_frame {
1161 const void __user *next_fp;
1162 unsigned long return_address;
1163};
1164
1165static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
1166{
1167 int ret;
1168
1169 if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
1170 return 0;
1171
1172 ret = 1;
1173 pagefault_disable();
1174 if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
1175 ret = 0;
1176 pagefault_enable();
1177
1178 return ret;
1179}
1180
1181static void
1182perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
1183{
1184 struct stack_frame frame;
1185 const void __user *fp;
5872bdb8 1186 int nr = entry->nr;
d7d59fb3
PZ
1187
1188 regs = (struct pt_regs *)current->thread.sp0 - 1;
1189 fp = (void __user *)regs->bp;
1190
1191 callchain_store(entry, regs->ip);
1192
1193 while (entry->nr < MAX_STACK_DEPTH) {
1194 frame.next_fp = NULL;
1195 frame.return_address = 0;
1196
1197 if (!copy_stack_frame(fp, &frame))
1198 break;
1199
1200 if ((unsigned long)fp < user_stack_pointer(regs))
1201 break;
1202
1203 callchain_store(entry, frame.return_address);
1204 fp = frame.next_fp;
1205 }
5872bdb8
PZ
1206
1207 entry->user = entry->nr - nr;
d7d59fb3
PZ
1208}
1209
1210static void
1211perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
1212{
1213 int is_user;
1214
1215 if (!regs)
1216 return;
1217
1218 is_user = user_mode(regs);
1219
1220 if (!current || current->pid == 0)
1221 return;
1222
1223 if (is_user && current->state != TASK_RUNNING)
1224 return;
1225
1226 if (!is_user)
1227 perf_callchain_kernel(regs, entry);
1228
1229 if (current->mm)
1230 perf_callchain_user(regs, entry);
1231}
1232
1233struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1234{
1235 struct perf_callchain_entry *entry;
1236
1237 if (in_nmi())
1238 entry = &__get_cpu_var(nmi_entry);
1239 else
1240 entry = &__get_cpu_var(irq_entry);
1241
1242 entry->nr = 0;
5872bdb8
PZ
1243 entry->hv = 0;
1244 entry->kernel = 0;
1245 entry->user = 0;
d7d59fb3
PZ
1246
1247 perf_do_callchain(regs, entry);
1248
1249 return entry;
1250}
This page took 0.112781 seconds and 5 git commands to generate.