2 * Performance counter core code
4 * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
7 * For licencing details see kernel-base/COPYING
11 #include <linux/cpu.h>
12 #include <linux/smp.h>
13 #include <linux/file.h>
14 #include <linux/poll.h>
15 #include <linux/sysfs.h>
16 #include <linux/ptrace.h>
17 #include <linux/percpu.h>
18 #include <linux/uaccess.h>
19 #include <linux/syscalls.h>
20 #include <linux/anon_inodes.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/perf_counter.h>
25 * Each CPU has a list of per CPU counters:
27 DEFINE_PER_CPU(struct perf_cpu_context
, perf_cpu_context
);
29 int perf_max_counters __read_mostly
= 1;
30 static int perf_reserved_percpu __read_mostly
;
31 static int perf_overcommit __read_mostly
= 1;
34 * Mutex for (sysadmin-configurable) counter reservations:
36 static DEFINE_MUTEX(perf_resource_mutex
);
39 * Architecture provided APIs - weak aliases:
41 extern __weak
const struct hw_perf_counter_ops
*
42 hw_perf_counter_init(struct perf_counter
*counter
)
47 u64 __weak
hw_perf_save_disable(void) { return 0; }
48 void __weak
hw_perf_restore(u64 ctrl
) { barrier(); }
49 void __weak
hw_perf_counter_setup(void) { barrier(); }
50 int __weak
hw_perf_group_sched_in(struct perf_counter
*group_leader
,
51 struct perf_cpu_context
*cpuctx
,
52 struct perf_counter_context
*ctx
, int cpu
)
57 void __weak
perf_counter_print_debug(void) { }
60 list_add_counter(struct perf_counter
*counter
, struct perf_counter_context
*ctx
)
62 struct perf_counter
*group_leader
= counter
->group_leader
;
65 * Depending on whether it is a standalone or sibling counter,
66 * add it straight to the context's counter list, or to the group
67 * leader's sibling list:
69 if (counter
->group_leader
== counter
)
70 list_add_tail(&counter
->list_entry
, &ctx
->counter_list
);
72 list_add_tail(&counter
->list_entry
, &group_leader
->sibling_list
);
76 list_del_counter(struct perf_counter
*counter
, struct perf_counter_context
*ctx
)
78 struct perf_counter
*sibling
, *tmp
;
80 list_del_init(&counter
->list_entry
);
83 * If this was a group counter with sibling counters then
84 * upgrade the siblings to singleton counters by adding them
85 * to the context list directly:
87 list_for_each_entry_safe(sibling
, tmp
,
88 &counter
->sibling_list
, list_entry
) {
90 list_del_init(&sibling
->list_entry
);
91 list_add_tail(&sibling
->list_entry
, &ctx
->counter_list
);
92 sibling
->group_leader
= sibling
;
97 * Cross CPU call to remove a performance counter
99 * We disable the counter on the hardware level first. After that we
100 * remove it from the context list.
102 static void __perf_counter_remove_from_context(void *info
)
104 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
105 struct perf_counter
*counter
= info
;
106 struct perf_counter_context
*ctx
= counter
->ctx
;
111 * If this is a task context, we need to check whether it is
112 * the current task context of this cpu. If not it has been
113 * scheduled out before the smp call arrived.
115 if (ctx
->task
&& cpuctx
->task_ctx
!= ctx
)
118 curr_rq_lock_irq_save(&flags
);
119 spin_lock(&ctx
->lock
);
121 if (counter
->state
== PERF_COUNTER_STATE_ACTIVE
) {
122 counter
->state
= PERF_COUNTER_STATE_INACTIVE
;
123 counter
->hw_ops
->disable(counter
);
125 cpuctx
->active_oncpu
--;
126 counter
->task
= NULL
;
132 * Protect the list operation against NMI by disabling the
133 * counters on a global level. NOP for non NMI based counters.
135 perf_flags
= hw_perf_save_disable();
136 list_del_counter(counter
, ctx
);
137 hw_perf_restore(perf_flags
);
141 * Allow more per task counters with respect to the
144 cpuctx
->max_pertask
=
145 min(perf_max_counters
- ctx
->nr_counters
,
146 perf_max_counters
- perf_reserved_percpu
);
149 spin_unlock(&ctx
->lock
);
150 curr_rq_unlock_irq_restore(&flags
);
155 * Remove the counter from a task's (or a CPU's) list of counters.
157 * Must be called with counter->mutex held.
159 * CPU counters are removed with a smp call. For task counters we only
160 * call when the task is on a CPU.
162 static void perf_counter_remove_from_context(struct perf_counter
*counter
)
164 struct perf_counter_context
*ctx
= counter
->ctx
;
165 struct task_struct
*task
= ctx
->task
;
169 * Per cpu counters are removed via an smp call and
170 * the removal is always sucessful.
172 smp_call_function_single(counter
->cpu
,
173 __perf_counter_remove_from_context
,
179 task_oncpu_function_call(task
, __perf_counter_remove_from_context
,
182 spin_lock_irq(&ctx
->lock
);
184 * If the context is active we need to retry the smp call.
186 if (ctx
->nr_active
&& !list_empty(&counter
->list_entry
)) {
187 spin_unlock_irq(&ctx
->lock
);
192 * The lock prevents that this context is scheduled in so we
193 * can remove the counter safely, if the call above did not
196 if (!list_empty(&counter
->list_entry
)) {
198 list_del_counter(counter
, ctx
);
199 counter
->task
= NULL
;
201 spin_unlock_irq(&ctx
->lock
);
205 counter_sched_in(struct perf_counter
*counter
,
206 struct perf_cpu_context
*cpuctx
,
207 struct perf_counter_context
*ctx
,
210 if (counter
->state
== PERF_COUNTER_STATE_OFF
)
213 counter
->state
= PERF_COUNTER_STATE_ACTIVE
;
214 counter
->oncpu
= cpu
; /* TODO: put 'cpu' into cpuctx->cpu */
216 * The new state must be visible before we turn it on in the hardware:
220 if (counter
->hw_ops
->enable(counter
)) {
221 counter
->state
= PERF_COUNTER_STATE_INACTIVE
;
226 cpuctx
->active_oncpu
++;
233 * Cross CPU call to install and enable a performance counter
235 static void __perf_install_in_context(void *info
)
237 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
238 struct perf_counter
*counter
= info
;
239 struct perf_counter_context
*ctx
= counter
->ctx
;
240 int cpu
= smp_processor_id();
245 * If this is a task context, we need to check whether it is
246 * the current task context of this cpu. If not it has been
247 * scheduled out before the smp call arrived.
249 if (ctx
->task
&& cpuctx
->task_ctx
!= ctx
)
252 curr_rq_lock_irq_save(&flags
);
253 spin_lock(&ctx
->lock
);
256 * Protect the list operation against NMI by disabling the
257 * counters on a global level. NOP for non NMI based counters.
259 perf_flags
= hw_perf_save_disable();
261 list_add_counter(counter
, ctx
);
264 counter_sched_in(counter
, cpuctx
, ctx
, cpu
);
266 if (!ctx
->task
&& cpuctx
->max_pertask
)
267 cpuctx
->max_pertask
--;
269 hw_perf_restore(perf_flags
);
271 spin_unlock(&ctx
->lock
);
272 curr_rq_unlock_irq_restore(&flags
);
276 * Attach a performance counter to a context
278 * First we add the counter to the list with the hardware enable bit
279 * in counter->hw_config cleared.
281 * If the counter is attached to a task which is on a CPU we use a smp
282 * call to enable it in the task context. The task might have been
283 * scheduled away, but we check this in the smp call again.
286 perf_install_in_context(struct perf_counter_context
*ctx
,
287 struct perf_counter
*counter
,
290 struct task_struct
*task
= ctx
->task
;
295 * Per cpu counters are installed via an smp call and
296 * the install is always sucessful.
298 smp_call_function_single(cpu
, __perf_install_in_context
,
303 counter
->task
= task
;
305 task_oncpu_function_call(task
, __perf_install_in_context
,
308 spin_lock_irq(&ctx
->lock
);
310 * we need to retry the smp call.
312 if (ctx
->nr_active
&& list_empty(&counter
->list_entry
)) {
313 spin_unlock_irq(&ctx
->lock
);
318 * The lock prevents that this context is scheduled in so we
319 * can add the counter safely, if it the call above did not
322 if (list_empty(&counter
->list_entry
)) {
323 list_add_counter(counter
, ctx
);
326 spin_unlock_irq(&ctx
->lock
);
330 counter_sched_out(struct perf_counter
*counter
,
331 struct perf_cpu_context
*cpuctx
,
332 struct perf_counter_context
*ctx
)
334 if (counter
->state
!= PERF_COUNTER_STATE_ACTIVE
)
337 counter
->state
= PERF_COUNTER_STATE_INACTIVE
;
338 counter
->hw_ops
->disable(counter
);
341 cpuctx
->active_oncpu
--;
346 group_sched_out(struct perf_counter
*group_counter
,
347 struct perf_cpu_context
*cpuctx
,
348 struct perf_counter_context
*ctx
)
350 struct perf_counter
*counter
;
352 if (group_counter
->state
!= PERF_COUNTER_STATE_ACTIVE
)
355 counter_sched_out(group_counter
, cpuctx
, ctx
);
358 * Schedule out siblings (if any):
360 list_for_each_entry(counter
, &group_counter
->sibling_list
, list_entry
)
361 counter_sched_out(counter
, cpuctx
, ctx
);
364 void __perf_counter_sched_out(struct perf_counter_context
*ctx
,
365 struct perf_cpu_context
*cpuctx
)
367 struct perf_counter
*counter
;
370 if (likely(!ctx
->nr_counters
))
373 spin_lock(&ctx
->lock
);
374 flags
= hw_perf_save_disable();
375 if (ctx
->nr_active
) {
376 list_for_each_entry(counter
, &ctx
->counter_list
, list_entry
)
377 group_sched_out(counter
, cpuctx
, ctx
);
379 hw_perf_restore(flags
);
380 spin_unlock(&ctx
->lock
);
384 * Called from scheduler to remove the counters of the current task,
385 * with interrupts disabled.
387 * We stop each counter and update the counter value in counter->count.
389 * This does not protect us against NMI, but disable()
390 * sets the disabled bit in the control field of counter _before_
391 * accessing the counter control register. If a NMI hits, then it will
392 * not restart the counter.
394 void perf_counter_task_sched_out(struct task_struct
*task
, int cpu
)
396 struct perf_cpu_context
*cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
397 struct perf_counter_context
*ctx
= &task
->perf_counter_ctx
;
399 if (likely(!cpuctx
->task_ctx
))
402 __perf_counter_sched_out(ctx
, cpuctx
);
404 cpuctx
->task_ctx
= NULL
;
407 static void perf_counter_cpu_sched_out(struct perf_cpu_context
*cpuctx
)
409 __perf_counter_sched_out(&cpuctx
->ctx
, cpuctx
);
413 group_sched_in(struct perf_counter
*group_counter
,
414 struct perf_cpu_context
*cpuctx
,
415 struct perf_counter_context
*ctx
,
418 struct perf_counter
*counter
, *partial_group
;
421 if (group_counter
->state
== PERF_COUNTER_STATE_OFF
)
424 ret
= hw_perf_group_sched_in(group_counter
, cpuctx
, ctx
, cpu
);
426 return ret
< 0 ? ret
: 0;
428 if (counter_sched_in(group_counter
, cpuctx
, ctx
, cpu
))
432 * Schedule in siblings as one group (if any):
434 list_for_each_entry(counter
, &group_counter
->sibling_list
, list_entry
) {
435 if (counter_sched_in(counter
, cpuctx
, ctx
, cpu
)) {
436 partial_group
= counter
;
445 * Groups can be scheduled in as one unit only, so undo any
446 * partial group before returning:
448 list_for_each_entry(counter
, &group_counter
->sibling_list
, list_entry
) {
449 if (counter
== partial_group
)
451 counter_sched_out(counter
, cpuctx
, ctx
);
453 counter_sched_out(group_counter
, cpuctx
, ctx
);
459 * Return 1 for a software counter, 0 for a hardware counter
461 static inline int is_software_counter(struct perf_counter
*counter
)
463 return !counter
->hw_event
.raw
&& counter
->hw_event
.type
< 0;
467 * Return 1 for a group consisting entirely of software counters,
468 * 0 if the group contains any hardware counters.
470 static int is_software_only_group(struct perf_counter
*leader
)
472 struct perf_counter
*counter
;
474 if (!is_software_counter(leader
))
476 list_for_each_entry(counter
, &leader
->sibling_list
, list_entry
)
477 if (!is_software_counter(counter
))
483 __perf_counter_sched_in(struct perf_counter_context
*ctx
,
484 struct perf_cpu_context
*cpuctx
, int cpu
)
486 struct perf_counter
*counter
;
490 if (likely(!ctx
->nr_counters
))
493 spin_lock(&ctx
->lock
);
494 flags
= hw_perf_save_disable();
495 list_for_each_entry(counter
, &ctx
->counter_list
, list_entry
) {
497 * Listen to the 'cpu' scheduling filter constraint
500 if (counter
->cpu
!= -1 && counter
->cpu
!= cpu
)
504 * If we scheduled in a group atomically and exclusively,
505 * or if this group can't go on, don't add any more
508 if (can_add_hw
|| is_software_only_group(counter
))
509 if (group_sched_in(counter
, cpuctx
, ctx
, cpu
))
512 hw_perf_restore(flags
);
513 spin_unlock(&ctx
->lock
);
517 * Called from scheduler to add the counters of the current task
518 * with interrupts disabled.
520 * We restore the counter value and then enable it.
522 * This does not protect us against NMI, but enable()
523 * sets the enabled bit in the control field of counter _before_
524 * accessing the counter control register. If a NMI hits, then it will
525 * keep the counter running.
527 void perf_counter_task_sched_in(struct task_struct
*task
, int cpu
)
529 struct perf_cpu_context
*cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
530 struct perf_counter_context
*ctx
= &task
->perf_counter_ctx
;
532 __perf_counter_sched_in(ctx
, cpuctx
, cpu
);
533 cpuctx
->task_ctx
= ctx
;
536 static void perf_counter_cpu_sched_in(struct perf_cpu_context
*cpuctx
, int cpu
)
538 struct perf_counter_context
*ctx
= &cpuctx
->ctx
;
540 __perf_counter_sched_in(ctx
, cpuctx
, cpu
);
543 int perf_counter_task_disable(void)
545 struct task_struct
*curr
= current
;
546 struct perf_counter_context
*ctx
= &curr
->perf_counter_ctx
;
547 struct perf_counter
*counter
;
552 if (likely(!ctx
->nr_counters
))
555 curr_rq_lock_irq_save(&flags
);
556 cpu
= smp_processor_id();
558 /* force the update of the task clock: */
559 __task_delta_exec(curr
, 1);
561 perf_counter_task_sched_out(curr
, cpu
);
563 spin_lock(&ctx
->lock
);
566 * Disable all the counters:
568 perf_flags
= hw_perf_save_disable();
570 list_for_each_entry(counter
, &ctx
->counter_list
, list_entry
)
571 counter
->state
= PERF_COUNTER_STATE_OFF
;
573 hw_perf_restore(perf_flags
);
575 spin_unlock(&ctx
->lock
);
577 curr_rq_unlock_irq_restore(&flags
);
582 int perf_counter_task_enable(void)
584 struct task_struct
*curr
= current
;
585 struct perf_counter_context
*ctx
= &curr
->perf_counter_ctx
;
586 struct perf_counter
*counter
;
591 if (likely(!ctx
->nr_counters
))
594 curr_rq_lock_irq_save(&flags
);
595 cpu
= smp_processor_id();
597 /* force the update of the task clock: */
598 __task_delta_exec(curr
, 1);
600 perf_counter_task_sched_out(curr
, cpu
);
602 spin_lock(&ctx
->lock
);
605 * Disable all the counters:
607 perf_flags
= hw_perf_save_disable();
609 list_for_each_entry(counter
, &ctx
->counter_list
, list_entry
) {
610 if (counter
->state
!= PERF_COUNTER_STATE_OFF
)
612 counter
->state
= PERF_COUNTER_STATE_INACTIVE
;
613 counter
->hw_event
.disabled
= 0;
615 hw_perf_restore(perf_flags
);
617 spin_unlock(&ctx
->lock
);
619 perf_counter_task_sched_in(curr
, cpu
);
621 curr_rq_unlock_irq_restore(&flags
);
627 * Round-robin a context's counters:
629 static void rotate_ctx(struct perf_counter_context
*ctx
)
631 struct perf_counter
*counter
;
634 if (!ctx
->nr_counters
)
637 spin_lock(&ctx
->lock
);
639 * Rotate the first entry last (works just fine for group counters too):
641 perf_flags
= hw_perf_save_disable();
642 list_for_each_entry(counter
, &ctx
->counter_list
, list_entry
) {
643 list_del(&counter
->list_entry
);
644 list_add_tail(&counter
->list_entry
, &ctx
->counter_list
);
647 hw_perf_restore(perf_flags
);
649 spin_unlock(&ctx
->lock
);
652 void perf_counter_task_tick(struct task_struct
*curr
, int cpu
)
654 struct perf_cpu_context
*cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
655 struct perf_counter_context
*ctx
= &curr
->perf_counter_ctx
;
656 const int rotate_percpu
= 0;
659 perf_counter_cpu_sched_out(cpuctx
);
660 perf_counter_task_sched_out(curr
, cpu
);
663 rotate_ctx(&cpuctx
->ctx
);
667 perf_counter_cpu_sched_in(cpuctx
, cpu
);
668 perf_counter_task_sched_in(curr
, cpu
);
672 * Cross CPU call to read the hardware counter
674 static void __read(void *info
)
676 struct perf_counter
*counter
= info
;
679 curr_rq_lock_irq_save(&flags
);
680 counter
->hw_ops
->read(counter
);
681 curr_rq_unlock_irq_restore(&flags
);
684 static u64
perf_counter_read(struct perf_counter
*counter
)
687 * If counter is enabled and currently active on a CPU, update the
688 * value in the counter structure:
690 if (counter
->state
== PERF_COUNTER_STATE_ACTIVE
) {
691 smp_call_function_single(counter
->oncpu
,
695 return atomic64_read(&counter
->count
);
699 * Cross CPU call to switch performance data pointers
701 static void __perf_switch_irq_data(void *info
)
703 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
704 struct perf_counter
*counter
= info
;
705 struct perf_counter_context
*ctx
= counter
->ctx
;
706 struct perf_data
*oldirqdata
= counter
->irqdata
;
709 * If this is a task context, we need to check whether it is
710 * the current task context of this cpu. If not it has been
711 * scheduled out before the smp call arrived.
714 if (cpuctx
->task_ctx
!= ctx
)
716 spin_lock(&ctx
->lock
);
719 /* Change the pointer NMI safe */
720 atomic_long_set((atomic_long_t
*)&counter
->irqdata
,
721 (unsigned long) counter
->usrdata
);
722 counter
->usrdata
= oldirqdata
;
725 spin_unlock(&ctx
->lock
);
728 static struct perf_data
*perf_switch_irq_data(struct perf_counter
*counter
)
730 struct perf_counter_context
*ctx
= counter
->ctx
;
731 struct perf_data
*oldirqdata
= counter
->irqdata
;
732 struct task_struct
*task
= ctx
->task
;
735 smp_call_function_single(counter
->cpu
,
736 __perf_switch_irq_data
,
738 return counter
->usrdata
;
742 spin_lock_irq(&ctx
->lock
);
743 if (counter
->state
!= PERF_COUNTER_STATE_ACTIVE
) {
744 counter
->irqdata
= counter
->usrdata
;
745 counter
->usrdata
= oldirqdata
;
746 spin_unlock_irq(&ctx
->lock
);
749 spin_unlock_irq(&ctx
->lock
);
750 task_oncpu_function_call(task
, __perf_switch_irq_data
, counter
);
751 /* Might have failed, because task was scheduled out */
752 if (counter
->irqdata
== oldirqdata
)
755 return counter
->usrdata
;
758 static void put_context(struct perf_counter_context
*ctx
)
761 put_task_struct(ctx
->task
);
764 static struct perf_counter_context
*find_get_context(pid_t pid
, int cpu
)
766 struct perf_cpu_context
*cpuctx
;
767 struct perf_counter_context
*ctx
;
768 struct task_struct
*task
;
771 * If cpu is not a wildcard then this is a percpu counter:
774 /* Must be root to operate on a CPU counter: */
775 if (!capable(CAP_SYS_ADMIN
))
776 return ERR_PTR(-EACCES
);
778 if (cpu
< 0 || cpu
> num_possible_cpus())
779 return ERR_PTR(-EINVAL
);
782 * We could be clever and allow to attach a counter to an
783 * offline CPU and activate it when the CPU comes up, but
786 if (!cpu_isset(cpu
, cpu_online_map
))
787 return ERR_PTR(-ENODEV
);
789 cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
799 task
= find_task_by_vpid(pid
);
801 get_task_struct(task
);
805 return ERR_PTR(-ESRCH
);
807 ctx
= &task
->perf_counter_ctx
;
810 /* Reuse ptrace permission checks for now. */
811 if (!ptrace_may_access(task
, PTRACE_MODE_READ
)) {
813 return ERR_PTR(-EACCES
);
820 * Called when the last reference to the file is gone.
822 static int perf_release(struct inode
*inode
, struct file
*file
)
824 struct perf_counter
*counter
= file
->private_data
;
825 struct perf_counter_context
*ctx
= counter
->ctx
;
827 file
->private_data
= NULL
;
829 mutex_lock(&counter
->mutex
);
831 perf_counter_remove_from_context(counter
);
834 mutex_unlock(&counter
->mutex
);
842 * Read the performance counter - simple non blocking version for now
845 perf_read_hw(struct perf_counter
*counter
, char __user
*buf
, size_t count
)
849 if (count
!= sizeof(cntval
))
852 mutex_lock(&counter
->mutex
);
853 cntval
= perf_counter_read(counter
);
854 mutex_unlock(&counter
->mutex
);
856 return put_user(cntval
, (u64 __user
*) buf
) ? -EFAULT
: sizeof(cntval
);
860 perf_copy_usrdata(struct perf_data
*usrdata
, char __user
*buf
, size_t count
)
865 count
= min(count
, (size_t)usrdata
->len
);
866 if (copy_to_user(buf
, usrdata
->data
+ usrdata
->rd_idx
, count
))
869 /* Adjust the counters */
870 usrdata
->len
-= count
;
874 usrdata
->rd_idx
+= count
;
880 perf_read_irq_data(struct perf_counter
*counter
,
885 struct perf_data
*irqdata
, *usrdata
;
886 DECLARE_WAITQUEUE(wait
, current
);
889 irqdata
= counter
->irqdata
;
890 usrdata
= counter
->usrdata
;
892 if (usrdata
->len
+ irqdata
->len
>= count
)
898 spin_lock_irq(&counter
->waitq
.lock
);
899 __add_wait_queue(&counter
->waitq
, &wait
);
901 set_current_state(TASK_INTERRUPTIBLE
);
902 if (usrdata
->len
+ irqdata
->len
>= count
)
905 if (signal_pending(current
))
908 spin_unlock_irq(&counter
->waitq
.lock
);
910 spin_lock_irq(&counter
->waitq
.lock
);
912 __remove_wait_queue(&counter
->waitq
, &wait
);
913 __set_current_state(TASK_RUNNING
);
914 spin_unlock_irq(&counter
->waitq
.lock
);
916 if (usrdata
->len
+ irqdata
->len
< count
)
919 mutex_lock(&counter
->mutex
);
921 /* Drain pending data first: */
922 res
= perf_copy_usrdata(usrdata
, buf
, count
);
923 if (res
< 0 || res
== count
)
926 /* Switch irq buffer: */
927 usrdata
= perf_switch_irq_data(counter
);
928 if (perf_copy_usrdata(usrdata
, buf
+ res
, count
- res
) < 0) {
935 mutex_unlock(&counter
->mutex
);
941 perf_read(struct file
*file
, char __user
*buf
, size_t count
, loff_t
*ppos
)
943 struct perf_counter
*counter
= file
->private_data
;
945 switch (counter
->hw_event
.record_type
) {
946 case PERF_RECORD_SIMPLE
:
947 return perf_read_hw(counter
, buf
, count
);
949 case PERF_RECORD_IRQ
:
950 case PERF_RECORD_GROUP
:
951 return perf_read_irq_data(counter
, buf
, count
,
952 file
->f_flags
& O_NONBLOCK
);
957 static unsigned int perf_poll(struct file
*file
, poll_table
*wait
)
959 struct perf_counter
*counter
= file
->private_data
;
960 unsigned int events
= 0;
963 poll_wait(file
, &counter
->waitq
, wait
);
965 spin_lock_irqsave(&counter
->waitq
.lock
, flags
);
966 if (counter
->usrdata
->len
|| counter
->irqdata
->len
)
968 spin_unlock_irqrestore(&counter
->waitq
.lock
, flags
);
973 static const struct file_operations perf_fops
= {
974 .release
= perf_release
,
979 static int cpu_clock_perf_counter_enable(struct perf_counter
*counter
)
981 int cpu
= raw_smp_processor_id();
983 atomic64_set(&counter
->hw
.prev_count
, cpu_clock(cpu
));
987 static void cpu_clock_perf_counter_update(struct perf_counter
*counter
)
989 int cpu
= raw_smp_processor_id();
993 now
= cpu_clock(cpu
);
994 prev
= atomic64_read(&counter
->hw
.prev_count
);
995 atomic64_set(&counter
->hw
.prev_count
, now
);
996 atomic64_add(now
- prev
, &counter
->count
);
999 static void cpu_clock_perf_counter_disable(struct perf_counter
*counter
)
1001 cpu_clock_perf_counter_update(counter
);
1004 static void cpu_clock_perf_counter_read(struct perf_counter
*counter
)
1006 cpu_clock_perf_counter_update(counter
);
1009 static const struct hw_perf_counter_ops perf_ops_cpu_clock
= {
1010 .enable
= cpu_clock_perf_counter_enable
,
1011 .disable
= cpu_clock_perf_counter_disable
,
1012 .read
= cpu_clock_perf_counter_read
,
1016 * Called from within the scheduler:
1018 static u64
task_clock_perf_counter_val(struct perf_counter
*counter
, int update
)
1020 struct task_struct
*curr
= counter
->task
;
1023 delta
= __task_delta_exec(curr
, update
);
1025 return curr
->se
.sum_exec_runtime
+ delta
;
1028 static void task_clock_perf_counter_update(struct perf_counter
*counter
, u64 now
)
1033 prev
= atomic64_read(&counter
->hw
.prev_count
);
1035 atomic64_set(&counter
->hw
.prev_count
, now
);
1039 atomic64_add(delta
, &counter
->count
);
1042 static void task_clock_perf_counter_read(struct perf_counter
*counter
)
1044 u64 now
= task_clock_perf_counter_val(counter
, 1);
1046 task_clock_perf_counter_update(counter
, now
);
1049 static int task_clock_perf_counter_enable(struct perf_counter
*counter
)
1051 u64 now
= task_clock_perf_counter_val(counter
, 0);
1053 atomic64_set(&counter
->hw
.prev_count
, now
);
1058 static void task_clock_perf_counter_disable(struct perf_counter
*counter
)
1060 u64 now
= task_clock_perf_counter_val(counter
, 0);
1062 task_clock_perf_counter_update(counter
, now
);
1065 static const struct hw_perf_counter_ops perf_ops_task_clock
= {
1066 .enable
= task_clock_perf_counter_enable
,
1067 .disable
= task_clock_perf_counter_disable
,
1068 .read
= task_clock_perf_counter_read
,
1071 static u64
get_page_faults(void)
1073 struct task_struct
*curr
= current
;
1075 return curr
->maj_flt
+ curr
->min_flt
;
1078 static void page_faults_perf_counter_update(struct perf_counter
*counter
)
1083 prev
= atomic64_read(&counter
->hw
.prev_count
);
1084 now
= get_page_faults();
1086 atomic64_set(&counter
->hw
.prev_count
, now
);
1090 atomic64_add(delta
, &counter
->count
);
1093 static void page_faults_perf_counter_read(struct perf_counter
*counter
)
1095 page_faults_perf_counter_update(counter
);
1098 static int page_faults_perf_counter_enable(struct perf_counter
*counter
)
1101 * page-faults is a per-task value already,
1102 * so we dont have to clear it on switch-in.
1108 static void page_faults_perf_counter_disable(struct perf_counter
*counter
)
1110 page_faults_perf_counter_update(counter
);
1113 static const struct hw_perf_counter_ops perf_ops_page_faults
= {
1114 .enable
= page_faults_perf_counter_enable
,
1115 .disable
= page_faults_perf_counter_disable
,
1116 .read
= page_faults_perf_counter_read
,
1119 static u64
get_context_switches(void)
1121 struct task_struct
*curr
= current
;
1123 return curr
->nvcsw
+ curr
->nivcsw
;
1126 static void context_switches_perf_counter_update(struct perf_counter
*counter
)
1131 prev
= atomic64_read(&counter
->hw
.prev_count
);
1132 now
= get_context_switches();
1134 atomic64_set(&counter
->hw
.prev_count
, now
);
1138 atomic64_add(delta
, &counter
->count
);
1141 static void context_switches_perf_counter_read(struct perf_counter
*counter
)
1143 context_switches_perf_counter_update(counter
);
1146 static int context_switches_perf_counter_enable(struct perf_counter
*counter
)
1149 * ->nvcsw + curr->nivcsw is a per-task value already,
1150 * so we dont have to clear it on switch-in.
1156 static void context_switches_perf_counter_disable(struct perf_counter
*counter
)
1158 context_switches_perf_counter_update(counter
);
1161 static const struct hw_perf_counter_ops perf_ops_context_switches
= {
1162 .enable
= context_switches_perf_counter_enable
,
1163 .disable
= context_switches_perf_counter_disable
,
1164 .read
= context_switches_perf_counter_read
,
1167 static inline u64
get_cpu_migrations(void)
1169 return current
->se
.nr_migrations
;
1172 static void cpu_migrations_perf_counter_update(struct perf_counter
*counter
)
1177 prev
= atomic64_read(&counter
->hw
.prev_count
);
1178 now
= get_cpu_migrations();
1180 atomic64_set(&counter
->hw
.prev_count
, now
);
1184 atomic64_add(delta
, &counter
->count
);
1187 static void cpu_migrations_perf_counter_read(struct perf_counter
*counter
)
1189 cpu_migrations_perf_counter_update(counter
);
1192 static int cpu_migrations_perf_counter_enable(struct perf_counter
*counter
)
1195 * se.nr_migrations is a per-task value already,
1196 * so we dont have to clear it on switch-in.
1202 static void cpu_migrations_perf_counter_disable(struct perf_counter
*counter
)
1204 cpu_migrations_perf_counter_update(counter
);
1207 static const struct hw_perf_counter_ops perf_ops_cpu_migrations
= {
1208 .enable
= cpu_migrations_perf_counter_enable
,
1209 .disable
= cpu_migrations_perf_counter_disable
,
1210 .read
= cpu_migrations_perf_counter_read
,
1213 static const struct hw_perf_counter_ops
*
1214 sw_perf_counter_init(struct perf_counter
*counter
)
1216 const struct hw_perf_counter_ops
*hw_ops
= NULL
;
1218 switch (counter
->hw_event
.type
) {
1219 case PERF_COUNT_CPU_CLOCK
:
1220 hw_ops
= &perf_ops_cpu_clock
;
1222 case PERF_COUNT_TASK_CLOCK
:
1223 hw_ops
= &perf_ops_task_clock
;
1225 case PERF_COUNT_PAGE_FAULTS
:
1226 hw_ops
= &perf_ops_page_faults
;
1228 case PERF_COUNT_CONTEXT_SWITCHES
:
1229 hw_ops
= &perf_ops_context_switches
;
1231 case PERF_COUNT_CPU_MIGRATIONS
:
1232 hw_ops
= &perf_ops_cpu_migrations
;
1241 * Allocate and initialize a counter structure
1243 static struct perf_counter
*
1244 perf_counter_alloc(struct perf_counter_hw_event
*hw_event
,
1246 struct perf_counter
*group_leader
,
1249 const struct hw_perf_counter_ops
*hw_ops
;
1250 struct perf_counter
*counter
;
1252 counter
= kzalloc(sizeof(*counter
), gfpflags
);
1257 * Single counters are their own group leaders, with an
1258 * empty sibling list:
1261 group_leader
= counter
;
1263 mutex_init(&counter
->mutex
);
1264 INIT_LIST_HEAD(&counter
->list_entry
);
1265 INIT_LIST_HEAD(&counter
->sibling_list
);
1266 init_waitqueue_head(&counter
->waitq
);
1268 counter
->irqdata
= &counter
->data
[0];
1269 counter
->usrdata
= &counter
->data
[1];
1271 counter
->hw_event
= *hw_event
;
1272 counter
->wakeup_pending
= 0;
1273 counter
->group_leader
= group_leader
;
1274 counter
->hw_ops
= NULL
;
1276 counter
->state
= PERF_COUNTER_STATE_INACTIVE
;
1277 if (hw_event
->disabled
)
1278 counter
->state
= PERF_COUNTER_STATE_OFF
;
1281 if (!hw_event
->raw
&& hw_event
->type
< 0)
1282 hw_ops
= sw_perf_counter_init(counter
);
1284 hw_ops
= hw_perf_counter_init(counter
);
1290 counter
->hw_ops
= hw_ops
;
1296 * sys_perf_task_open - open a performance counter, associate it to a task/cpu
1298 * @hw_event_uptr: event type attributes for monitoring/sampling
1301 * @group_fd: group leader counter fd
1304 sys_perf_counter_open(struct perf_counter_hw_event
*hw_event_uptr __user
,
1305 pid_t pid
, int cpu
, int group_fd
)
1307 struct perf_counter
*counter
, *group_leader
;
1308 struct perf_counter_hw_event hw_event
;
1309 struct perf_counter_context
*ctx
;
1310 struct file
*counter_file
= NULL
;
1311 struct file
*group_file
= NULL
;
1312 int fput_needed
= 0;
1313 int fput_needed2
= 0;
1316 if (copy_from_user(&hw_event
, hw_event_uptr
, sizeof(hw_event
)) != 0)
1320 * Get the target context (task or percpu):
1322 ctx
= find_get_context(pid
, cpu
);
1324 return PTR_ERR(ctx
);
1327 * Look up the group leader (we will attach this counter to it):
1329 group_leader
= NULL
;
1330 if (group_fd
!= -1) {
1332 group_file
= fget_light(group_fd
, &fput_needed
);
1334 goto err_put_context
;
1335 if (group_file
->f_op
!= &perf_fops
)
1336 goto err_put_context
;
1338 group_leader
= group_file
->private_data
;
1340 * Do not allow a recursive hierarchy (this new sibling
1341 * becoming part of another group-sibling):
1343 if (group_leader
->group_leader
!= group_leader
)
1344 goto err_put_context
;
1346 * Do not allow to attach to a group in a different
1347 * task or CPU context:
1349 if (group_leader
->ctx
!= ctx
)
1350 goto err_put_context
;
1354 counter
= perf_counter_alloc(&hw_event
, cpu
, group_leader
, GFP_KERNEL
);
1356 goto err_put_context
;
1358 ret
= anon_inode_getfd("[perf_counter]", &perf_fops
, counter
, 0);
1360 goto err_free_put_context
;
1362 counter_file
= fget_light(ret
, &fput_needed2
);
1364 goto err_free_put_context
;
1366 counter
->filp
= counter_file
;
1367 perf_install_in_context(ctx
, counter
, cpu
);
1369 fput_light(counter_file
, fput_needed2
);
1372 fput_light(group_file
, fput_needed
);
1376 err_free_put_context
:
1386 * Initialize the perf_counter context in a task_struct:
1389 __perf_counter_init_context(struct perf_counter_context
*ctx
,
1390 struct task_struct
*task
)
1392 memset(ctx
, 0, sizeof(*ctx
));
1393 spin_lock_init(&ctx
->lock
);
1394 INIT_LIST_HEAD(&ctx
->counter_list
);
1399 * inherit a counter from parent task to child task:
1402 inherit_counter(struct perf_counter
*parent_counter
,
1403 struct task_struct
*parent
,
1404 struct perf_counter_context
*parent_ctx
,
1405 struct task_struct
*child
,
1406 struct perf_counter_context
*child_ctx
)
1408 struct perf_counter
*child_counter
;
1410 child_counter
= perf_counter_alloc(&parent_counter
->hw_event
,
1411 parent_counter
->cpu
, NULL
,
1417 * Link it up in the child's context:
1419 child_counter
->ctx
= child_ctx
;
1420 child_counter
->task
= child
;
1421 list_add_counter(child_counter
, child_ctx
);
1422 child_ctx
->nr_counters
++;
1424 child_counter
->parent
= parent_counter
;
1426 * inherit into child's child as well:
1428 child_counter
->hw_event
.inherit
= 1;
1431 * Get a reference to the parent filp - we will fput it
1432 * when the child counter exits. This is safe to do because
1433 * we are in the parent and we know that the filp still
1434 * exists and has a nonzero count:
1436 atomic_long_inc(&parent_counter
->filp
->f_count
);
1442 __perf_counter_exit_task(struct task_struct
*child
,
1443 struct perf_counter
*child_counter
,
1444 struct perf_counter_context
*child_ctx
)
1446 struct perf_counter
*parent_counter
;
1447 u64 parent_val
, child_val
;
1450 * If we do not self-reap then we have to wait for the
1451 * child task to unschedule (it will happen for sure),
1452 * so that its counter is at its final count. (This
1453 * condition triggers rarely - child tasks usually get
1454 * off their CPU before the parent has a chance to
1455 * get this far into the reaping action)
1457 if (child
!= current
) {
1458 wait_task_inactive(child
, 0);
1459 list_del_init(&child_counter
->list_entry
);
1461 struct perf_cpu_context
*cpuctx
;
1462 unsigned long flags
;
1466 * Disable and unlink this counter.
1468 * Be careful about zapping the list - IRQ/NMI context
1469 * could still be processing it:
1471 curr_rq_lock_irq_save(&flags
);
1472 perf_flags
= hw_perf_save_disable();
1474 cpuctx
= &__get_cpu_var(perf_cpu_context
);
1476 if (child_counter
->state
== PERF_COUNTER_STATE_ACTIVE
) {
1477 child_counter
->state
= PERF_COUNTER_STATE_INACTIVE
;
1478 child_counter
->hw_ops
->disable(child_counter
);
1479 cpuctx
->active_oncpu
--;
1480 child_ctx
->nr_active
--;
1481 child_counter
->oncpu
= -1;
1484 list_del_init(&child_counter
->list_entry
);
1486 child_ctx
->nr_counters
--;
1488 hw_perf_restore(perf_flags
);
1489 curr_rq_unlock_irq_restore(&flags
);
1492 parent_counter
= child_counter
->parent
;
1494 * It can happen that parent exits first, and has counters
1495 * that are still around due to the child reference. These
1496 * counters need to be zapped - but otherwise linger.
1498 if (!parent_counter
)
1501 parent_val
= atomic64_read(&parent_counter
->count
);
1502 child_val
= atomic64_read(&child_counter
->count
);
1505 * Add back the child's count to the parent's count:
1507 atomic64_add(child_val
, &parent_counter
->count
);
1509 fput(parent_counter
->filp
);
1511 kfree(child_counter
);
1515 * When a child task exist, feed back counter values to parent counters.
1517 * Note: we are running in child context, but the PID is not hashed
1518 * anymore so new counters will not be added.
1520 void perf_counter_exit_task(struct task_struct
*child
)
1522 struct perf_counter
*child_counter
, *tmp
;
1523 struct perf_counter_context
*child_ctx
;
1525 child_ctx
= &child
->perf_counter_ctx
;
1527 if (likely(!child_ctx
->nr_counters
))
1530 list_for_each_entry_safe(child_counter
, tmp
, &child_ctx
->counter_list
,
1532 __perf_counter_exit_task(child
, child_counter
, child_ctx
);
1536 * Initialize the perf_counter context in task_struct
1538 void perf_counter_init_task(struct task_struct
*child
)
1540 struct perf_counter_context
*child_ctx
, *parent_ctx
;
1541 struct perf_counter
*counter
, *parent_counter
;
1542 struct task_struct
*parent
= current
;
1543 unsigned long flags
;
1545 child_ctx
= &child
->perf_counter_ctx
;
1546 parent_ctx
= &parent
->perf_counter_ctx
;
1548 __perf_counter_init_context(child_ctx
, child
);
1551 * This is executed from the parent task context, so inherit
1552 * counters that have been marked for cloning:
1555 if (likely(!parent_ctx
->nr_counters
))
1559 * Lock the parent list. No need to lock the child - not PID
1560 * hashed yet and not running, so nobody can access it.
1562 spin_lock_irqsave(&parent_ctx
->lock
, flags
);
1565 * We dont have to disable NMIs - we are only looking at
1566 * the list, not manipulating it:
1568 list_for_each_entry(counter
, &parent_ctx
->counter_list
, list_entry
) {
1569 if (!counter
->hw_event
.inherit
|| counter
->group_leader
!= counter
)
1573 * Instead of creating recursive hierarchies of counters,
1574 * we link inheritd counters back to the original parent,
1575 * which has a filp for sure, which we use as the reference
1578 parent_counter
= counter
;
1579 if (counter
->parent
)
1580 parent_counter
= counter
->parent
;
1582 if (inherit_counter(parent_counter
, parent
,
1583 parent_ctx
, child
, child_ctx
))
1587 spin_unlock_irqrestore(&parent_ctx
->lock
, flags
);
1590 static void __cpuinit
perf_counter_init_cpu(int cpu
)
1592 struct perf_cpu_context
*cpuctx
;
1594 cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
1595 __perf_counter_init_context(&cpuctx
->ctx
, NULL
);
1597 mutex_lock(&perf_resource_mutex
);
1598 cpuctx
->max_pertask
= perf_max_counters
- perf_reserved_percpu
;
1599 mutex_unlock(&perf_resource_mutex
);
1601 hw_perf_counter_setup();
1604 #ifdef CONFIG_HOTPLUG_CPU
1605 static void __perf_counter_exit_cpu(void *info
)
1607 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
1608 struct perf_counter_context
*ctx
= &cpuctx
->ctx
;
1609 struct perf_counter
*counter
, *tmp
;
1611 list_for_each_entry_safe(counter
, tmp
, &ctx
->counter_list
, list_entry
)
1612 __perf_counter_remove_from_context(counter
);
1615 static void perf_counter_exit_cpu(int cpu
)
1617 smp_call_function_single(cpu
, __perf_counter_exit_cpu
, NULL
, 1);
1620 static inline void perf_counter_exit_cpu(int cpu
) { }
1623 static int __cpuinit
1624 perf_cpu_notify(struct notifier_block
*self
, unsigned long action
, void *hcpu
)
1626 unsigned int cpu
= (long)hcpu
;
1630 case CPU_UP_PREPARE
:
1631 case CPU_UP_PREPARE_FROZEN
:
1632 perf_counter_init_cpu(cpu
);
1635 case CPU_DOWN_PREPARE
:
1636 case CPU_DOWN_PREPARE_FROZEN
:
1637 perf_counter_exit_cpu(cpu
);
1647 static struct notifier_block __cpuinitdata perf_cpu_nb
= {
1648 .notifier_call
= perf_cpu_notify
,
1651 static int __init
perf_counter_init(void)
1653 perf_cpu_notify(&perf_cpu_nb
, (unsigned long)CPU_UP_PREPARE
,
1654 (void *)(long)smp_processor_id());
1655 register_cpu_notifier(&perf_cpu_nb
);
1659 early_initcall(perf_counter_init
);
1661 static ssize_t
perf_show_reserve_percpu(struct sysdev_class
*class, char *buf
)
1663 return sprintf(buf
, "%d\n", perf_reserved_percpu
);
1667 perf_set_reserve_percpu(struct sysdev_class
*class,
1671 struct perf_cpu_context
*cpuctx
;
1675 err
= strict_strtoul(buf
, 10, &val
);
1678 if (val
> perf_max_counters
)
1681 mutex_lock(&perf_resource_mutex
);
1682 perf_reserved_percpu
= val
;
1683 for_each_online_cpu(cpu
) {
1684 cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
1685 spin_lock_irq(&cpuctx
->ctx
.lock
);
1686 mpt
= min(perf_max_counters
- cpuctx
->ctx
.nr_counters
,
1687 perf_max_counters
- perf_reserved_percpu
);
1688 cpuctx
->max_pertask
= mpt
;
1689 spin_unlock_irq(&cpuctx
->ctx
.lock
);
1691 mutex_unlock(&perf_resource_mutex
);
1696 static ssize_t
perf_show_overcommit(struct sysdev_class
*class, char *buf
)
1698 return sprintf(buf
, "%d\n", perf_overcommit
);
1702 perf_set_overcommit(struct sysdev_class
*class, const char *buf
, size_t count
)
1707 err
= strict_strtoul(buf
, 10, &val
);
1713 mutex_lock(&perf_resource_mutex
);
1714 perf_overcommit
= val
;
1715 mutex_unlock(&perf_resource_mutex
);
1720 static SYSDEV_CLASS_ATTR(
1723 perf_show_reserve_percpu
,
1724 perf_set_reserve_percpu
1727 static SYSDEV_CLASS_ATTR(
1730 perf_show_overcommit
,
1734 static struct attribute
*perfclass_attrs
[] = {
1735 &attr_reserve_percpu
.attr
,
1736 &attr_overcommit
.attr
,
1740 static struct attribute_group perfclass_attr_group
= {
1741 .attrs
= perfclass_attrs
,
1742 .name
= "perf_counters",
1745 static int __init
perf_counter_sysfs_init(void)
1747 return sysfs_create_group(&cpu_sysdev_class
.kset
.kobj
,
1748 &perfclass_attr_group
);
1750 device_initcall(perf_counter_sysfs_init
);