perf: Default PMU ops
[deliverable/linux.git] / kernel / perf_event.c
1 /*
2 * Performance events core code:
3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 *
9 * For licensing details see kernel-base/COPYING
10 */
11
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/file.h>
17 #include <linux/poll.h>
18 #include <linux/slab.h>
19 #include <linux/hash.h>
20 #include <linux/sysfs.h>
21 #include <linux/dcache.h>
22 #include <linux/percpu.h>
23 #include <linux/ptrace.h>
24 #include <linux/vmstat.h>
25 #include <linux/vmalloc.h>
26 #include <linux/hardirq.h>
27 #include <linux/rculist.h>
28 #include <linux/uaccess.h>
29 #include <linux/syscalls.h>
30 #include <linux/anon_inodes.h>
31 #include <linux/kernel_stat.h>
32 #include <linux/perf_event.h>
33 #include <linux/ftrace_event.h>
34
35 #include <asm/irq_regs.h>
36
37 /*
38 * Each CPU has a list of per CPU events:
39 */
40 static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
41
42 int perf_max_events __read_mostly = 1;
43 static int perf_reserved_percpu __read_mostly;
44 static int perf_overcommit __read_mostly = 1;
45
46 static atomic_t nr_events __read_mostly;
47 static atomic_t nr_mmap_events __read_mostly;
48 static atomic_t nr_comm_events __read_mostly;
49 static atomic_t nr_task_events __read_mostly;
50
51 /*
52 * perf event paranoia level:
53 * -1 - not paranoid at all
54 * 0 - disallow raw tracepoint access for unpriv
55 * 1 - disallow cpu events for unpriv
56 * 2 - disallow kernel profiling for unpriv
57 */
58 int sysctl_perf_event_paranoid __read_mostly = 1;
59
60 int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
61
62 /*
63 * max perf event sample rate
64 */
65 int sysctl_perf_event_sample_rate __read_mostly = 100000;
66
67 static atomic64_t perf_event_id;
68
69 /*
70 * Lock for (sysadmin-configurable) event reservations:
71 */
72 static DEFINE_SPINLOCK(perf_resource_lock);
73
74 void __weak perf_event_print_debug(void) { }
75
76 void perf_pmu_disable(struct pmu *pmu)
77 {
78 int *count = this_cpu_ptr(pmu->pmu_disable_count);
79 if (!(*count)++)
80 pmu->pmu_disable(pmu);
81 }
82
83 void perf_pmu_enable(struct pmu *pmu)
84 {
85 int *count = this_cpu_ptr(pmu->pmu_disable_count);
86 if (!--(*count))
87 pmu->pmu_enable(pmu);
88 }
89
90 static void get_ctx(struct perf_event_context *ctx)
91 {
92 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
93 }
94
95 static void free_ctx(struct rcu_head *head)
96 {
97 struct perf_event_context *ctx;
98
99 ctx = container_of(head, struct perf_event_context, rcu_head);
100 kfree(ctx);
101 }
102
103 static void put_ctx(struct perf_event_context *ctx)
104 {
105 if (atomic_dec_and_test(&ctx->refcount)) {
106 if (ctx->parent_ctx)
107 put_ctx(ctx->parent_ctx);
108 if (ctx->task)
109 put_task_struct(ctx->task);
110 call_rcu(&ctx->rcu_head, free_ctx);
111 }
112 }
113
114 static void unclone_ctx(struct perf_event_context *ctx)
115 {
116 if (ctx->parent_ctx) {
117 put_ctx(ctx->parent_ctx);
118 ctx->parent_ctx = NULL;
119 }
120 }
121
122 /*
123 * If we inherit events we want to return the parent event id
124 * to userspace.
125 */
126 static u64 primary_event_id(struct perf_event *event)
127 {
128 u64 id = event->id;
129
130 if (event->parent)
131 id = event->parent->id;
132
133 return id;
134 }
135
136 /*
137 * Get the perf_event_context for a task and lock it.
138 * This has to cope with with the fact that until it is locked,
139 * the context could get moved to another task.
140 */
141 static struct perf_event_context *
142 perf_lock_task_context(struct task_struct *task, unsigned long *flags)
143 {
144 struct perf_event_context *ctx;
145
146 rcu_read_lock();
147 retry:
148 ctx = rcu_dereference(task->perf_event_ctxp);
149 if (ctx) {
150 /*
151 * If this context is a clone of another, it might
152 * get swapped for another underneath us by
153 * perf_event_task_sched_out, though the
154 * rcu_read_lock() protects us from any context
155 * getting freed. Lock the context and check if it
156 * got swapped before we could get the lock, and retry
157 * if so. If we locked the right context, then it
158 * can't get swapped on us any more.
159 */
160 raw_spin_lock_irqsave(&ctx->lock, *flags);
161 if (ctx != rcu_dereference(task->perf_event_ctxp)) {
162 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
163 goto retry;
164 }
165
166 if (!atomic_inc_not_zero(&ctx->refcount)) {
167 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
168 ctx = NULL;
169 }
170 }
171 rcu_read_unlock();
172 return ctx;
173 }
174
175 /*
176 * Get the context for a task and increment its pin_count so it
177 * can't get swapped to another task. This also increments its
178 * reference count so that the context can't get freed.
179 */
180 static struct perf_event_context *perf_pin_task_context(struct task_struct *task)
181 {
182 struct perf_event_context *ctx;
183 unsigned long flags;
184
185 ctx = perf_lock_task_context(task, &flags);
186 if (ctx) {
187 ++ctx->pin_count;
188 raw_spin_unlock_irqrestore(&ctx->lock, flags);
189 }
190 return ctx;
191 }
192
193 static void perf_unpin_context(struct perf_event_context *ctx)
194 {
195 unsigned long flags;
196
197 raw_spin_lock_irqsave(&ctx->lock, flags);
198 --ctx->pin_count;
199 raw_spin_unlock_irqrestore(&ctx->lock, flags);
200 put_ctx(ctx);
201 }
202
203 static inline u64 perf_clock(void)
204 {
205 return local_clock();
206 }
207
208 /*
209 * Update the record of the current time in a context.
210 */
211 static void update_context_time(struct perf_event_context *ctx)
212 {
213 u64 now = perf_clock();
214
215 ctx->time += now - ctx->timestamp;
216 ctx->timestamp = now;
217 }
218
219 /*
220 * Update the total_time_enabled and total_time_running fields for a event.
221 */
222 static void update_event_times(struct perf_event *event)
223 {
224 struct perf_event_context *ctx = event->ctx;
225 u64 run_end;
226
227 if (event->state < PERF_EVENT_STATE_INACTIVE ||
228 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
229 return;
230
231 if (ctx->is_active)
232 run_end = ctx->time;
233 else
234 run_end = event->tstamp_stopped;
235
236 event->total_time_enabled = run_end - event->tstamp_enabled;
237
238 if (event->state == PERF_EVENT_STATE_INACTIVE)
239 run_end = event->tstamp_stopped;
240 else
241 run_end = ctx->time;
242
243 event->total_time_running = run_end - event->tstamp_running;
244 }
245
246 /*
247 * Update total_time_enabled and total_time_running for all events in a group.
248 */
249 static void update_group_times(struct perf_event *leader)
250 {
251 struct perf_event *event;
252
253 update_event_times(leader);
254 list_for_each_entry(event, &leader->sibling_list, group_entry)
255 update_event_times(event);
256 }
257
258 static struct list_head *
259 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
260 {
261 if (event->attr.pinned)
262 return &ctx->pinned_groups;
263 else
264 return &ctx->flexible_groups;
265 }
266
267 /*
268 * Add a event from the lists for its context.
269 * Must be called with ctx->mutex and ctx->lock held.
270 */
271 static void
272 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
273 {
274 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
275 event->attach_state |= PERF_ATTACH_CONTEXT;
276
277 /*
278 * If we're a stand alone event or group leader, we go to the context
279 * list, group events are kept attached to the group so that
280 * perf_group_detach can, at all times, locate all siblings.
281 */
282 if (event->group_leader == event) {
283 struct list_head *list;
284
285 if (is_software_event(event))
286 event->group_flags |= PERF_GROUP_SOFTWARE;
287
288 list = ctx_group_list(event, ctx);
289 list_add_tail(&event->group_entry, list);
290 }
291
292 list_add_rcu(&event->event_entry, &ctx->event_list);
293 ctx->nr_events++;
294 if (event->attr.inherit_stat)
295 ctx->nr_stat++;
296 }
297
298 static void perf_group_attach(struct perf_event *event)
299 {
300 struct perf_event *group_leader = event->group_leader;
301
302 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_GROUP);
303 event->attach_state |= PERF_ATTACH_GROUP;
304
305 if (group_leader == event)
306 return;
307
308 if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
309 !is_software_event(event))
310 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
311
312 list_add_tail(&event->group_entry, &group_leader->sibling_list);
313 group_leader->nr_siblings++;
314 }
315
316 /*
317 * Remove a event from the lists for its context.
318 * Must be called with ctx->mutex and ctx->lock held.
319 */
320 static void
321 list_del_event(struct perf_event *event, struct perf_event_context *ctx)
322 {
323 /*
324 * We can have double detach due to exit/hot-unplug + close.
325 */
326 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
327 return;
328
329 event->attach_state &= ~PERF_ATTACH_CONTEXT;
330
331 ctx->nr_events--;
332 if (event->attr.inherit_stat)
333 ctx->nr_stat--;
334
335 list_del_rcu(&event->event_entry);
336
337 if (event->group_leader == event)
338 list_del_init(&event->group_entry);
339
340 update_group_times(event);
341
342 /*
343 * If event was in error state, then keep it
344 * that way, otherwise bogus counts will be
345 * returned on read(). The only way to get out
346 * of error state is by explicit re-enabling
347 * of the event
348 */
349 if (event->state > PERF_EVENT_STATE_OFF)
350 event->state = PERF_EVENT_STATE_OFF;
351 }
352
353 static void perf_group_detach(struct perf_event *event)
354 {
355 struct perf_event *sibling, *tmp;
356 struct list_head *list = NULL;
357
358 /*
359 * We can have double detach due to exit/hot-unplug + close.
360 */
361 if (!(event->attach_state & PERF_ATTACH_GROUP))
362 return;
363
364 event->attach_state &= ~PERF_ATTACH_GROUP;
365
366 /*
367 * If this is a sibling, remove it from its group.
368 */
369 if (event->group_leader != event) {
370 list_del_init(&event->group_entry);
371 event->group_leader->nr_siblings--;
372 return;
373 }
374
375 if (!list_empty(&event->group_entry))
376 list = &event->group_entry;
377
378 /*
379 * If this was a group event with sibling events then
380 * upgrade the siblings to singleton events by adding them
381 * to whatever list we are on.
382 */
383 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
384 if (list)
385 list_move_tail(&sibling->group_entry, list);
386 sibling->group_leader = sibling;
387
388 /* Inherit group flags from the previous leader */
389 sibling->group_flags = event->group_flags;
390 }
391 }
392
393 static inline int
394 event_filter_match(struct perf_event *event)
395 {
396 return event->cpu == -1 || event->cpu == smp_processor_id();
397 }
398
399 static void
400 event_sched_out(struct perf_event *event,
401 struct perf_cpu_context *cpuctx,
402 struct perf_event_context *ctx)
403 {
404 u64 delta;
405 /*
406 * An event which could not be activated because of
407 * filter mismatch still needs to have its timings
408 * maintained, otherwise bogus information is return
409 * via read() for time_enabled, time_running:
410 */
411 if (event->state == PERF_EVENT_STATE_INACTIVE
412 && !event_filter_match(event)) {
413 delta = ctx->time - event->tstamp_stopped;
414 event->tstamp_running += delta;
415 event->tstamp_stopped = ctx->time;
416 }
417
418 if (event->state != PERF_EVENT_STATE_ACTIVE)
419 return;
420
421 event->state = PERF_EVENT_STATE_INACTIVE;
422 if (event->pending_disable) {
423 event->pending_disable = 0;
424 event->state = PERF_EVENT_STATE_OFF;
425 }
426 event->tstamp_stopped = ctx->time;
427 event->pmu->disable(event);
428 event->oncpu = -1;
429
430 if (!is_software_event(event))
431 cpuctx->active_oncpu--;
432 ctx->nr_active--;
433 if (event->attr.exclusive || !cpuctx->active_oncpu)
434 cpuctx->exclusive = 0;
435 }
436
437 static void
438 group_sched_out(struct perf_event *group_event,
439 struct perf_cpu_context *cpuctx,
440 struct perf_event_context *ctx)
441 {
442 struct perf_event *event;
443 int state = group_event->state;
444
445 event_sched_out(group_event, cpuctx, ctx);
446
447 /*
448 * Schedule out siblings (if any):
449 */
450 list_for_each_entry(event, &group_event->sibling_list, group_entry)
451 event_sched_out(event, cpuctx, ctx);
452
453 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
454 cpuctx->exclusive = 0;
455 }
456
457 /*
458 * Cross CPU call to remove a performance event
459 *
460 * We disable the event on the hardware level first. After that we
461 * remove it from the context list.
462 */
463 static void __perf_event_remove_from_context(void *info)
464 {
465 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
466 struct perf_event *event = info;
467 struct perf_event_context *ctx = event->ctx;
468
469 /*
470 * If this is a task context, we need to check whether it is
471 * the current task context of this cpu. If not it has been
472 * scheduled out before the smp call arrived.
473 */
474 if (ctx->task && cpuctx->task_ctx != ctx)
475 return;
476
477 raw_spin_lock(&ctx->lock);
478
479 event_sched_out(event, cpuctx, ctx);
480
481 list_del_event(event, ctx);
482
483 if (!ctx->task) {
484 /*
485 * Allow more per task events with respect to the
486 * reservation:
487 */
488 cpuctx->max_pertask =
489 min(perf_max_events - ctx->nr_events,
490 perf_max_events - perf_reserved_percpu);
491 }
492
493 raw_spin_unlock(&ctx->lock);
494 }
495
496
497 /*
498 * Remove the event from a task's (or a CPU's) list of events.
499 *
500 * Must be called with ctx->mutex held.
501 *
502 * CPU events are removed with a smp call. For task events we only
503 * call when the task is on a CPU.
504 *
505 * If event->ctx is a cloned context, callers must make sure that
506 * every task struct that event->ctx->task could possibly point to
507 * remains valid. This is OK when called from perf_release since
508 * that only calls us on the top-level context, which can't be a clone.
509 * When called from perf_event_exit_task, it's OK because the
510 * context has been detached from its task.
511 */
512 static void perf_event_remove_from_context(struct perf_event *event)
513 {
514 struct perf_event_context *ctx = event->ctx;
515 struct task_struct *task = ctx->task;
516
517 if (!task) {
518 /*
519 * Per cpu events are removed via an smp call and
520 * the removal is always successful.
521 */
522 smp_call_function_single(event->cpu,
523 __perf_event_remove_from_context,
524 event, 1);
525 return;
526 }
527
528 retry:
529 task_oncpu_function_call(task, __perf_event_remove_from_context,
530 event);
531
532 raw_spin_lock_irq(&ctx->lock);
533 /*
534 * If the context is active we need to retry the smp call.
535 */
536 if (ctx->nr_active && !list_empty(&event->group_entry)) {
537 raw_spin_unlock_irq(&ctx->lock);
538 goto retry;
539 }
540
541 /*
542 * The lock prevents that this context is scheduled in so we
543 * can remove the event safely, if the call above did not
544 * succeed.
545 */
546 if (!list_empty(&event->group_entry))
547 list_del_event(event, ctx);
548 raw_spin_unlock_irq(&ctx->lock);
549 }
550
551 /*
552 * Cross CPU call to disable a performance event
553 */
554 static void __perf_event_disable(void *info)
555 {
556 struct perf_event *event = info;
557 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
558 struct perf_event_context *ctx = event->ctx;
559
560 /*
561 * If this is a per-task event, need to check whether this
562 * event's task is the current task on this cpu.
563 */
564 if (ctx->task && cpuctx->task_ctx != ctx)
565 return;
566
567 raw_spin_lock(&ctx->lock);
568
569 /*
570 * If the event is on, turn it off.
571 * If it is in error state, leave it in error state.
572 */
573 if (event->state >= PERF_EVENT_STATE_INACTIVE) {
574 update_context_time(ctx);
575 update_group_times(event);
576 if (event == event->group_leader)
577 group_sched_out(event, cpuctx, ctx);
578 else
579 event_sched_out(event, cpuctx, ctx);
580 event->state = PERF_EVENT_STATE_OFF;
581 }
582
583 raw_spin_unlock(&ctx->lock);
584 }
585
586 /*
587 * Disable a event.
588 *
589 * If event->ctx is a cloned context, callers must make sure that
590 * every task struct that event->ctx->task could possibly point to
591 * remains valid. This condition is satisifed when called through
592 * perf_event_for_each_child or perf_event_for_each because they
593 * hold the top-level event's child_mutex, so any descendant that
594 * goes to exit will block in sync_child_event.
595 * When called from perf_pending_event it's OK because event->ctx
596 * is the current context on this CPU and preemption is disabled,
597 * hence we can't get into perf_event_task_sched_out for this context.
598 */
599 void perf_event_disable(struct perf_event *event)
600 {
601 struct perf_event_context *ctx = event->ctx;
602 struct task_struct *task = ctx->task;
603
604 if (!task) {
605 /*
606 * Disable the event on the cpu that it's on
607 */
608 smp_call_function_single(event->cpu, __perf_event_disable,
609 event, 1);
610 return;
611 }
612
613 retry:
614 task_oncpu_function_call(task, __perf_event_disable, event);
615
616 raw_spin_lock_irq(&ctx->lock);
617 /*
618 * If the event is still active, we need to retry the cross-call.
619 */
620 if (event->state == PERF_EVENT_STATE_ACTIVE) {
621 raw_spin_unlock_irq(&ctx->lock);
622 goto retry;
623 }
624
625 /*
626 * Since we have the lock this context can't be scheduled
627 * in, so we can change the state safely.
628 */
629 if (event->state == PERF_EVENT_STATE_INACTIVE) {
630 update_group_times(event);
631 event->state = PERF_EVENT_STATE_OFF;
632 }
633
634 raw_spin_unlock_irq(&ctx->lock);
635 }
636
637 static int
638 event_sched_in(struct perf_event *event,
639 struct perf_cpu_context *cpuctx,
640 struct perf_event_context *ctx)
641 {
642 if (event->state <= PERF_EVENT_STATE_OFF)
643 return 0;
644
645 event->state = PERF_EVENT_STATE_ACTIVE;
646 event->oncpu = smp_processor_id();
647 /*
648 * The new state must be visible before we turn it on in the hardware:
649 */
650 smp_wmb();
651
652 if (event->pmu->enable(event)) {
653 event->state = PERF_EVENT_STATE_INACTIVE;
654 event->oncpu = -1;
655 return -EAGAIN;
656 }
657
658 event->tstamp_running += ctx->time - event->tstamp_stopped;
659
660 if (!is_software_event(event))
661 cpuctx->active_oncpu++;
662 ctx->nr_active++;
663
664 if (event->attr.exclusive)
665 cpuctx->exclusive = 1;
666
667 return 0;
668 }
669
670 static int
671 group_sched_in(struct perf_event *group_event,
672 struct perf_cpu_context *cpuctx,
673 struct perf_event_context *ctx)
674 {
675 struct perf_event *event, *partial_group = NULL;
676 struct pmu *pmu = group_event->pmu;
677
678 if (group_event->state == PERF_EVENT_STATE_OFF)
679 return 0;
680
681 pmu->start_txn(pmu);
682
683 if (event_sched_in(group_event, cpuctx, ctx)) {
684 pmu->cancel_txn(pmu);
685 return -EAGAIN;
686 }
687
688 /*
689 * Schedule in siblings as one group (if any):
690 */
691 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
692 if (event_sched_in(event, cpuctx, ctx)) {
693 partial_group = event;
694 goto group_error;
695 }
696 }
697
698 if (!pmu->commit_txn(pmu))
699 return 0;
700
701 group_error:
702 /*
703 * Groups can be scheduled in as one unit only, so undo any
704 * partial group before returning:
705 */
706 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
707 if (event == partial_group)
708 break;
709 event_sched_out(event, cpuctx, ctx);
710 }
711 event_sched_out(group_event, cpuctx, ctx);
712
713 pmu->cancel_txn(pmu);
714
715 return -EAGAIN;
716 }
717
718 /*
719 * Work out whether we can put this event group on the CPU now.
720 */
721 static int group_can_go_on(struct perf_event *event,
722 struct perf_cpu_context *cpuctx,
723 int can_add_hw)
724 {
725 /*
726 * Groups consisting entirely of software events can always go on.
727 */
728 if (event->group_flags & PERF_GROUP_SOFTWARE)
729 return 1;
730 /*
731 * If an exclusive group is already on, no other hardware
732 * events can go on.
733 */
734 if (cpuctx->exclusive)
735 return 0;
736 /*
737 * If this group is exclusive and there are already
738 * events on the CPU, it can't go on.
739 */
740 if (event->attr.exclusive && cpuctx->active_oncpu)
741 return 0;
742 /*
743 * Otherwise, try to add it if all previous groups were able
744 * to go on.
745 */
746 return can_add_hw;
747 }
748
749 static void add_event_to_ctx(struct perf_event *event,
750 struct perf_event_context *ctx)
751 {
752 list_add_event(event, ctx);
753 perf_group_attach(event);
754 event->tstamp_enabled = ctx->time;
755 event->tstamp_running = ctx->time;
756 event->tstamp_stopped = ctx->time;
757 }
758
759 /*
760 * Cross CPU call to install and enable a performance event
761 *
762 * Must be called with ctx->mutex held
763 */
764 static void __perf_install_in_context(void *info)
765 {
766 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
767 struct perf_event *event = info;
768 struct perf_event_context *ctx = event->ctx;
769 struct perf_event *leader = event->group_leader;
770 int err;
771
772 /*
773 * If this is a task context, we need to check whether it is
774 * the current task context of this cpu. If not it has been
775 * scheduled out before the smp call arrived.
776 * Or possibly this is the right context but it isn't
777 * on this cpu because it had no events.
778 */
779 if (ctx->task && cpuctx->task_ctx != ctx) {
780 if (cpuctx->task_ctx || ctx->task != current)
781 return;
782 cpuctx->task_ctx = ctx;
783 }
784
785 raw_spin_lock(&ctx->lock);
786 ctx->is_active = 1;
787 update_context_time(ctx);
788
789 add_event_to_ctx(event, ctx);
790
791 if (event->cpu != -1 && event->cpu != smp_processor_id())
792 goto unlock;
793
794 /*
795 * Don't put the event on if it is disabled or if
796 * it is in a group and the group isn't on.
797 */
798 if (event->state != PERF_EVENT_STATE_INACTIVE ||
799 (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE))
800 goto unlock;
801
802 /*
803 * An exclusive event can't go on if there are already active
804 * hardware events, and no hardware event can go on if there
805 * is already an exclusive event on.
806 */
807 if (!group_can_go_on(event, cpuctx, 1))
808 err = -EEXIST;
809 else
810 err = event_sched_in(event, cpuctx, ctx);
811
812 if (err) {
813 /*
814 * This event couldn't go on. If it is in a group
815 * then we have to pull the whole group off.
816 * If the event group is pinned then put it in error state.
817 */
818 if (leader != event)
819 group_sched_out(leader, cpuctx, ctx);
820 if (leader->attr.pinned) {
821 update_group_times(leader);
822 leader->state = PERF_EVENT_STATE_ERROR;
823 }
824 }
825
826 if (!err && !ctx->task && cpuctx->max_pertask)
827 cpuctx->max_pertask--;
828
829 unlock:
830 raw_spin_unlock(&ctx->lock);
831 }
832
833 /*
834 * Attach a performance event to a context
835 *
836 * First we add the event to the list with the hardware enable bit
837 * in event->hw_config cleared.
838 *
839 * If the event is attached to a task which is on a CPU we use a smp
840 * call to enable it in the task context. The task might have been
841 * scheduled away, but we check this in the smp call again.
842 *
843 * Must be called with ctx->mutex held.
844 */
845 static void
846 perf_install_in_context(struct perf_event_context *ctx,
847 struct perf_event *event,
848 int cpu)
849 {
850 struct task_struct *task = ctx->task;
851
852 if (!task) {
853 /*
854 * Per cpu events are installed via an smp call and
855 * the install is always successful.
856 */
857 smp_call_function_single(cpu, __perf_install_in_context,
858 event, 1);
859 return;
860 }
861
862 retry:
863 task_oncpu_function_call(task, __perf_install_in_context,
864 event);
865
866 raw_spin_lock_irq(&ctx->lock);
867 /*
868 * we need to retry the smp call.
869 */
870 if (ctx->is_active && list_empty(&event->group_entry)) {
871 raw_spin_unlock_irq(&ctx->lock);
872 goto retry;
873 }
874
875 /*
876 * The lock prevents that this context is scheduled in so we
877 * can add the event safely, if it the call above did not
878 * succeed.
879 */
880 if (list_empty(&event->group_entry))
881 add_event_to_ctx(event, ctx);
882 raw_spin_unlock_irq(&ctx->lock);
883 }
884
885 /*
886 * Put a event into inactive state and update time fields.
887 * Enabling the leader of a group effectively enables all
888 * the group members that aren't explicitly disabled, so we
889 * have to update their ->tstamp_enabled also.
890 * Note: this works for group members as well as group leaders
891 * since the non-leader members' sibling_lists will be empty.
892 */
893 static void __perf_event_mark_enabled(struct perf_event *event,
894 struct perf_event_context *ctx)
895 {
896 struct perf_event *sub;
897
898 event->state = PERF_EVENT_STATE_INACTIVE;
899 event->tstamp_enabled = ctx->time - event->total_time_enabled;
900 list_for_each_entry(sub, &event->sibling_list, group_entry) {
901 if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
902 sub->tstamp_enabled =
903 ctx->time - sub->total_time_enabled;
904 }
905 }
906 }
907
908 /*
909 * Cross CPU call to enable a performance event
910 */
911 static void __perf_event_enable(void *info)
912 {
913 struct perf_event *event = info;
914 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
915 struct perf_event_context *ctx = event->ctx;
916 struct perf_event *leader = event->group_leader;
917 int err;
918
919 /*
920 * If this is a per-task event, need to check whether this
921 * event's task is the current task on this cpu.
922 */
923 if (ctx->task && cpuctx->task_ctx != ctx) {
924 if (cpuctx->task_ctx || ctx->task != current)
925 return;
926 cpuctx->task_ctx = ctx;
927 }
928
929 raw_spin_lock(&ctx->lock);
930 ctx->is_active = 1;
931 update_context_time(ctx);
932
933 if (event->state >= PERF_EVENT_STATE_INACTIVE)
934 goto unlock;
935 __perf_event_mark_enabled(event, ctx);
936
937 if (event->cpu != -1 && event->cpu != smp_processor_id())
938 goto unlock;
939
940 /*
941 * If the event is in a group and isn't the group leader,
942 * then don't put it on unless the group is on.
943 */
944 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
945 goto unlock;
946
947 if (!group_can_go_on(event, cpuctx, 1)) {
948 err = -EEXIST;
949 } else {
950 if (event == leader)
951 err = group_sched_in(event, cpuctx, ctx);
952 else
953 err = event_sched_in(event, cpuctx, ctx);
954 }
955
956 if (err) {
957 /*
958 * If this event can't go on and it's part of a
959 * group, then the whole group has to come off.
960 */
961 if (leader != event)
962 group_sched_out(leader, cpuctx, ctx);
963 if (leader->attr.pinned) {
964 update_group_times(leader);
965 leader->state = PERF_EVENT_STATE_ERROR;
966 }
967 }
968
969 unlock:
970 raw_spin_unlock(&ctx->lock);
971 }
972
973 /*
974 * Enable a event.
975 *
976 * If event->ctx is a cloned context, callers must make sure that
977 * every task struct that event->ctx->task could possibly point to
978 * remains valid. This condition is satisfied when called through
979 * perf_event_for_each_child or perf_event_for_each as described
980 * for perf_event_disable.
981 */
982 void perf_event_enable(struct perf_event *event)
983 {
984 struct perf_event_context *ctx = event->ctx;
985 struct task_struct *task = ctx->task;
986
987 if (!task) {
988 /*
989 * Enable the event on the cpu that it's on
990 */
991 smp_call_function_single(event->cpu, __perf_event_enable,
992 event, 1);
993 return;
994 }
995
996 raw_spin_lock_irq(&ctx->lock);
997 if (event->state >= PERF_EVENT_STATE_INACTIVE)
998 goto out;
999
1000 /*
1001 * If the event is in error state, clear that first.
1002 * That way, if we see the event in error state below, we
1003 * know that it has gone back into error state, as distinct
1004 * from the task having been scheduled away before the
1005 * cross-call arrived.
1006 */
1007 if (event->state == PERF_EVENT_STATE_ERROR)
1008 event->state = PERF_EVENT_STATE_OFF;
1009
1010 retry:
1011 raw_spin_unlock_irq(&ctx->lock);
1012 task_oncpu_function_call(task, __perf_event_enable, event);
1013
1014 raw_spin_lock_irq(&ctx->lock);
1015
1016 /*
1017 * If the context is active and the event is still off,
1018 * we need to retry the cross-call.
1019 */
1020 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF)
1021 goto retry;
1022
1023 /*
1024 * Since we have the lock this context can't be scheduled
1025 * in, so we can change the state safely.
1026 */
1027 if (event->state == PERF_EVENT_STATE_OFF)
1028 __perf_event_mark_enabled(event, ctx);
1029
1030 out:
1031 raw_spin_unlock_irq(&ctx->lock);
1032 }
1033
1034 static int perf_event_refresh(struct perf_event *event, int refresh)
1035 {
1036 /*
1037 * not supported on inherited events
1038 */
1039 if (event->attr.inherit)
1040 return -EINVAL;
1041
1042 atomic_add(refresh, &event->event_limit);
1043 perf_event_enable(event);
1044
1045 return 0;
1046 }
1047
1048 enum event_type_t {
1049 EVENT_FLEXIBLE = 0x1,
1050 EVENT_PINNED = 0x2,
1051 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
1052 };
1053
1054 static void ctx_sched_out(struct perf_event_context *ctx,
1055 struct perf_cpu_context *cpuctx,
1056 enum event_type_t event_type)
1057 {
1058 struct perf_event *event;
1059
1060 raw_spin_lock(&ctx->lock);
1061 ctx->is_active = 0;
1062 if (likely(!ctx->nr_events))
1063 goto out;
1064 update_context_time(ctx);
1065
1066 if (!ctx->nr_active)
1067 goto out;
1068
1069 if (event_type & EVENT_PINNED) {
1070 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
1071 group_sched_out(event, cpuctx, ctx);
1072 }
1073
1074 if (event_type & EVENT_FLEXIBLE) {
1075 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
1076 group_sched_out(event, cpuctx, ctx);
1077 }
1078 out:
1079 raw_spin_unlock(&ctx->lock);
1080 }
1081
1082 /*
1083 * Test whether two contexts are equivalent, i.e. whether they
1084 * have both been cloned from the same version of the same context
1085 * and they both have the same number of enabled events.
1086 * If the number of enabled events is the same, then the set
1087 * of enabled events should be the same, because these are both
1088 * inherited contexts, therefore we can't access individual events
1089 * in them directly with an fd; we can only enable/disable all
1090 * events via prctl, or enable/disable all events in a family
1091 * via ioctl, which will have the same effect on both contexts.
1092 */
1093 static int context_equiv(struct perf_event_context *ctx1,
1094 struct perf_event_context *ctx2)
1095 {
1096 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1097 && ctx1->parent_gen == ctx2->parent_gen
1098 && !ctx1->pin_count && !ctx2->pin_count;
1099 }
1100
1101 static void __perf_event_sync_stat(struct perf_event *event,
1102 struct perf_event *next_event)
1103 {
1104 u64 value;
1105
1106 if (!event->attr.inherit_stat)
1107 return;
1108
1109 /*
1110 * Update the event value, we cannot use perf_event_read()
1111 * because we're in the middle of a context switch and have IRQs
1112 * disabled, which upsets smp_call_function_single(), however
1113 * we know the event must be on the current CPU, therefore we
1114 * don't need to use it.
1115 */
1116 switch (event->state) {
1117 case PERF_EVENT_STATE_ACTIVE:
1118 event->pmu->read(event);
1119 /* fall-through */
1120
1121 case PERF_EVENT_STATE_INACTIVE:
1122 update_event_times(event);
1123 break;
1124
1125 default:
1126 break;
1127 }
1128
1129 /*
1130 * In order to keep per-task stats reliable we need to flip the event
1131 * values when we flip the contexts.
1132 */
1133 value = local64_read(&next_event->count);
1134 value = local64_xchg(&event->count, value);
1135 local64_set(&next_event->count, value);
1136
1137 swap(event->total_time_enabled, next_event->total_time_enabled);
1138 swap(event->total_time_running, next_event->total_time_running);
1139
1140 /*
1141 * Since we swizzled the values, update the user visible data too.
1142 */
1143 perf_event_update_userpage(event);
1144 perf_event_update_userpage(next_event);
1145 }
1146
1147 #define list_next_entry(pos, member) \
1148 list_entry(pos->member.next, typeof(*pos), member)
1149
1150 static void perf_event_sync_stat(struct perf_event_context *ctx,
1151 struct perf_event_context *next_ctx)
1152 {
1153 struct perf_event *event, *next_event;
1154
1155 if (!ctx->nr_stat)
1156 return;
1157
1158 update_context_time(ctx);
1159
1160 event = list_first_entry(&ctx->event_list,
1161 struct perf_event, event_entry);
1162
1163 next_event = list_first_entry(&next_ctx->event_list,
1164 struct perf_event, event_entry);
1165
1166 while (&event->event_entry != &ctx->event_list &&
1167 &next_event->event_entry != &next_ctx->event_list) {
1168
1169 __perf_event_sync_stat(event, next_event);
1170
1171 event = list_next_entry(event, event_entry);
1172 next_event = list_next_entry(next_event, event_entry);
1173 }
1174 }
1175
1176 /*
1177 * Called from scheduler to remove the events of the current task,
1178 * with interrupts disabled.
1179 *
1180 * We stop each event and update the event value in event->count.
1181 *
1182 * This does not protect us against NMI, but disable()
1183 * sets the disabled bit in the control field of event _before_
1184 * accessing the event control register. If a NMI hits, then it will
1185 * not restart the event.
1186 */
1187 void perf_event_task_sched_out(struct task_struct *task,
1188 struct task_struct *next)
1189 {
1190 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1191 struct perf_event_context *ctx = task->perf_event_ctxp;
1192 struct perf_event_context *next_ctx;
1193 struct perf_event_context *parent;
1194 int do_switch = 1;
1195
1196 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
1197
1198 if (likely(!ctx || !cpuctx->task_ctx))
1199 return;
1200
1201 rcu_read_lock();
1202 parent = rcu_dereference(ctx->parent_ctx);
1203 next_ctx = next->perf_event_ctxp;
1204 if (parent && next_ctx &&
1205 rcu_dereference(next_ctx->parent_ctx) == parent) {
1206 /*
1207 * Looks like the two contexts are clones, so we might be
1208 * able to optimize the context switch. We lock both
1209 * contexts and check that they are clones under the
1210 * lock (including re-checking that neither has been
1211 * uncloned in the meantime). It doesn't matter which
1212 * order we take the locks because no other cpu could
1213 * be trying to lock both of these tasks.
1214 */
1215 raw_spin_lock(&ctx->lock);
1216 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
1217 if (context_equiv(ctx, next_ctx)) {
1218 /*
1219 * XXX do we need a memory barrier of sorts
1220 * wrt to rcu_dereference() of perf_event_ctxp
1221 */
1222 task->perf_event_ctxp = next_ctx;
1223 next->perf_event_ctxp = ctx;
1224 ctx->task = next;
1225 next_ctx->task = task;
1226 do_switch = 0;
1227
1228 perf_event_sync_stat(ctx, next_ctx);
1229 }
1230 raw_spin_unlock(&next_ctx->lock);
1231 raw_spin_unlock(&ctx->lock);
1232 }
1233 rcu_read_unlock();
1234
1235 if (do_switch) {
1236 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
1237 cpuctx->task_ctx = NULL;
1238 }
1239 }
1240
1241 static void task_ctx_sched_out(struct perf_event_context *ctx,
1242 enum event_type_t event_type)
1243 {
1244 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1245
1246 if (!cpuctx->task_ctx)
1247 return;
1248
1249 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1250 return;
1251
1252 ctx_sched_out(ctx, cpuctx, event_type);
1253 cpuctx->task_ctx = NULL;
1254 }
1255
1256 /*
1257 * Called with IRQs disabled
1258 */
1259 static void __perf_event_task_sched_out(struct perf_event_context *ctx)
1260 {
1261 task_ctx_sched_out(ctx, EVENT_ALL);
1262 }
1263
1264 /*
1265 * Called with IRQs disabled
1266 */
1267 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
1268 enum event_type_t event_type)
1269 {
1270 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
1271 }
1272
1273 static void
1274 ctx_pinned_sched_in(struct perf_event_context *ctx,
1275 struct perf_cpu_context *cpuctx)
1276 {
1277 struct perf_event *event;
1278
1279 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1280 if (event->state <= PERF_EVENT_STATE_OFF)
1281 continue;
1282 if (event->cpu != -1 && event->cpu != smp_processor_id())
1283 continue;
1284
1285 if (group_can_go_on(event, cpuctx, 1))
1286 group_sched_in(event, cpuctx, ctx);
1287
1288 /*
1289 * If this pinned group hasn't been scheduled,
1290 * put it in error state.
1291 */
1292 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1293 update_group_times(event);
1294 event->state = PERF_EVENT_STATE_ERROR;
1295 }
1296 }
1297 }
1298
1299 static void
1300 ctx_flexible_sched_in(struct perf_event_context *ctx,
1301 struct perf_cpu_context *cpuctx)
1302 {
1303 struct perf_event *event;
1304 int can_add_hw = 1;
1305
1306 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
1307 /* Ignore events in OFF or ERROR state */
1308 if (event->state <= PERF_EVENT_STATE_OFF)
1309 continue;
1310 /*
1311 * Listen to the 'cpu' scheduling filter constraint
1312 * of events:
1313 */
1314 if (event->cpu != -1 && event->cpu != smp_processor_id())
1315 continue;
1316
1317 if (group_can_go_on(event, cpuctx, can_add_hw)) {
1318 if (group_sched_in(event, cpuctx, ctx))
1319 can_add_hw = 0;
1320 }
1321 }
1322 }
1323
1324 static void
1325 ctx_sched_in(struct perf_event_context *ctx,
1326 struct perf_cpu_context *cpuctx,
1327 enum event_type_t event_type)
1328 {
1329 raw_spin_lock(&ctx->lock);
1330 ctx->is_active = 1;
1331 if (likely(!ctx->nr_events))
1332 goto out;
1333
1334 ctx->timestamp = perf_clock();
1335
1336 /*
1337 * First go through the list and put on any pinned groups
1338 * in order to give them the best chance of going on.
1339 */
1340 if (event_type & EVENT_PINNED)
1341 ctx_pinned_sched_in(ctx, cpuctx);
1342
1343 /* Then walk through the lower prio flexible groups */
1344 if (event_type & EVENT_FLEXIBLE)
1345 ctx_flexible_sched_in(ctx, cpuctx);
1346
1347 out:
1348 raw_spin_unlock(&ctx->lock);
1349 }
1350
1351 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
1352 enum event_type_t event_type)
1353 {
1354 struct perf_event_context *ctx = &cpuctx->ctx;
1355
1356 ctx_sched_in(ctx, cpuctx, event_type);
1357 }
1358
1359 static void task_ctx_sched_in(struct task_struct *task,
1360 enum event_type_t event_type)
1361 {
1362 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1363 struct perf_event_context *ctx = task->perf_event_ctxp;
1364
1365 if (likely(!ctx))
1366 return;
1367 if (cpuctx->task_ctx == ctx)
1368 return;
1369 ctx_sched_in(ctx, cpuctx, event_type);
1370 cpuctx->task_ctx = ctx;
1371 }
1372 /*
1373 * Called from scheduler to add the events of the current task
1374 * with interrupts disabled.
1375 *
1376 * We restore the event value and then enable it.
1377 *
1378 * This does not protect us against NMI, but enable()
1379 * sets the enabled bit in the control field of event _before_
1380 * accessing the event control register. If a NMI hits, then it will
1381 * keep the event running.
1382 */
1383 void perf_event_task_sched_in(struct task_struct *task)
1384 {
1385 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1386 struct perf_event_context *ctx = task->perf_event_ctxp;
1387
1388 if (likely(!ctx))
1389 return;
1390
1391 if (cpuctx->task_ctx == ctx)
1392 return;
1393
1394 /*
1395 * We want to keep the following priority order:
1396 * cpu pinned (that don't need to move), task pinned,
1397 * cpu flexible, task flexible.
1398 */
1399 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1400
1401 ctx_sched_in(ctx, cpuctx, EVENT_PINNED);
1402 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1403 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
1404
1405 cpuctx->task_ctx = ctx;
1406 }
1407
1408 #define MAX_INTERRUPTS (~0ULL)
1409
1410 static void perf_log_throttle(struct perf_event *event, int enable);
1411
1412 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
1413 {
1414 u64 frequency = event->attr.sample_freq;
1415 u64 sec = NSEC_PER_SEC;
1416 u64 divisor, dividend;
1417
1418 int count_fls, nsec_fls, frequency_fls, sec_fls;
1419
1420 count_fls = fls64(count);
1421 nsec_fls = fls64(nsec);
1422 frequency_fls = fls64(frequency);
1423 sec_fls = 30;
1424
1425 /*
1426 * We got @count in @nsec, with a target of sample_freq HZ
1427 * the target period becomes:
1428 *
1429 * @count * 10^9
1430 * period = -------------------
1431 * @nsec * sample_freq
1432 *
1433 */
1434
1435 /*
1436 * Reduce accuracy by one bit such that @a and @b converge
1437 * to a similar magnitude.
1438 */
1439 #define REDUCE_FLS(a, b) \
1440 do { \
1441 if (a##_fls > b##_fls) { \
1442 a >>= 1; \
1443 a##_fls--; \
1444 } else { \
1445 b >>= 1; \
1446 b##_fls--; \
1447 } \
1448 } while (0)
1449
1450 /*
1451 * Reduce accuracy until either term fits in a u64, then proceed with
1452 * the other, so that finally we can do a u64/u64 division.
1453 */
1454 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
1455 REDUCE_FLS(nsec, frequency);
1456 REDUCE_FLS(sec, count);
1457 }
1458
1459 if (count_fls + sec_fls > 64) {
1460 divisor = nsec * frequency;
1461
1462 while (count_fls + sec_fls > 64) {
1463 REDUCE_FLS(count, sec);
1464 divisor >>= 1;
1465 }
1466
1467 dividend = count * sec;
1468 } else {
1469 dividend = count * sec;
1470
1471 while (nsec_fls + frequency_fls > 64) {
1472 REDUCE_FLS(nsec, frequency);
1473 dividend >>= 1;
1474 }
1475
1476 divisor = nsec * frequency;
1477 }
1478
1479 if (!divisor)
1480 return dividend;
1481
1482 return div64_u64(dividend, divisor);
1483 }
1484
1485 static void perf_event_stop(struct perf_event *event)
1486 {
1487 if (!event->pmu->stop)
1488 return event->pmu->disable(event);
1489
1490 return event->pmu->stop(event);
1491 }
1492
1493 static int perf_event_start(struct perf_event *event)
1494 {
1495 if (!event->pmu->start)
1496 return event->pmu->enable(event);
1497
1498 return event->pmu->start(event);
1499 }
1500
1501 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
1502 {
1503 struct hw_perf_event *hwc = &event->hw;
1504 s64 period, sample_period;
1505 s64 delta;
1506
1507 period = perf_calculate_period(event, nsec, count);
1508
1509 delta = (s64)(period - hwc->sample_period);
1510 delta = (delta + 7) / 8; /* low pass filter */
1511
1512 sample_period = hwc->sample_period + delta;
1513
1514 if (!sample_period)
1515 sample_period = 1;
1516
1517 hwc->sample_period = sample_period;
1518
1519 if (local64_read(&hwc->period_left) > 8*sample_period) {
1520 perf_event_stop(event);
1521 local64_set(&hwc->period_left, 0);
1522 perf_event_start(event);
1523 }
1524 }
1525
1526 static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1527 {
1528 struct perf_event *event;
1529 struct hw_perf_event *hwc;
1530 u64 interrupts, now;
1531 s64 delta;
1532
1533 raw_spin_lock(&ctx->lock);
1534 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
1535 if (event->state != PERF_EVENT_STATE_ACTIVE)
1536 continue;
1537
1538 if (event->cpu != -1 && event->cpu != smp_processor_id())
1539 continue;
1540
1541 hwc = &event->hw;
1542
1543 interrupts = hwc->interrupts;
1544 hwc->interrupts = 0;
1545
1546 /*
1547 * unthrottle events on the tick
1548 */
1549 if (interrupts == MAX_INTERRUPTS) {
1550 perf_log_throttle(event, 1);
1551 event->pmu->unthrottle(event);
1552 }
1553
1554 if (!event->attr.freq || !event->attr.sample_freq)
1555 continue;
1556
1557 event->pmu->read(event);
1558 now = local64_read(&event->count);
1559 delta = now - hwc->freq_count_stamp;
1560 hwc->freq_count_stamp = now;
1561
1562 if (delta > 0)
1563 perf_adjust_period(event, TICK_NSEC, delta);
1564 }
1565 raw_spin_unlock(&ctx->lock);
1566 }
1567
1568 /*
1569 * Round-robin a context's events:
1570 */
1571 static void rotate_ctx(struct perf_event_context *ctx)
1572 {
1573 raw_spin_lock(&ctx->lock);
1574
1575 /* Rotate the first entry last of non-pinned groups */
1576 list_rotate_left(&ctx->flexible_groups);
1577
1578 raw_spin_unlock(&ctx->lock);
1579 }
1580
1581 void perf_event_task_tick(struct task_struct *curr)
1582 {
1583 struct perf_cpu_context *cpuctx;
1584 struct perf_event_context *ctx;
1585 int rotate = 0;
1586
1587 if (!atomic_read(&nr_events))
1588 return;
1589
1590 cpuctx = &__get_cpu_var(perf_cpu_context);
1591 if (cpuctx->ctx.nr_events &&
1592 cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
1593 rotate = 1;
1594
1595 ctx = curr->perf_event_ctxp;
1596 if (ctx && ctx->nr_events && ctx->nr_events != ctx->nr_active)
1597 rotate = 1;
1598
1599 perf_ctx_adjust_freq(&cpuctx->ctx);
1600 if (ctx)
1601 perf_ctx_adjust_freq(ctx);
1602
1603 if (!rotate)
1604 return;
1605
1606 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1607 if (ctx)
1608 task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
1609
1610 rotate_ctx(&cpuctx->ctx);
1611 if (ctx)
1612 rotate_ctx(ctx);
1613
1614 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1615 if (ctx)
1616 task_ctx_sched_in(curr, EVENT_FLEXIBLE);
1617 }
1618
1619 static int event_enable_on_exec(struct perf_event *event,
1620 struct perf_event_context *ctx)
1621 {
1622 if (!event->attr.enable_on_exec)
1623 return 0;
1624
1625 event->attr.enable_on_exec = 0;
1626 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1627 return 0;
1628
1629 __perf_event_mark_enabled(event, ctx);
1630
1631 return 1;
1632 }
1633
1634 /*
1635 * Enable all of a task's events that have been marked enable-on-exec.
1636 * This expects task == current.
1637 */
1638 static void perf_event_enable_on_exec(struct task_struct *task)
1639 {
1640 struct perf_event_context *ctx;
1641 struct perf_event *event;
1642 unsigned long flags;
1643 int enabled = 0;
1644 int ret;
1645
1646 local_irq_save(flags);
1647 ctx = task->perf_event_ctxp;
1648 if (!ctx || !ctx->nr_events)
1649 goto out;
1650
1651 __perf_event_task_sched_out(ctx);
1652
1653 raw_spin_lock(&ctx->lock);
1654
1655 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1656 ret = event_enable_on_exec(event, ctx);
1657 if (ret)
1658 enabled = 1;
1659 }
1660
1661 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
1662 ret = event_enable_on_exec(event, ctx);
1663 if (ret)
1664 enabled = 1;
1665 }
1666
1667 /*
1668 * Unclone this context if we enabled any event.
1669 */
1670 if (enabled)
1671 unclone_ctx(ctx);
1672
1673 raw_spin_unlock(&ctx->lock);
1674
1675 perf_event_task_sched_in(task);
1676 out:
1677 local_irq_restore(flags);
1678 }
1679
1680 /*
1681 * Cross CPU call to read the hardware event
1682 */
1683 static void __perf_event_read(void *info)
1684 {
1685 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1686 struct perf_event *event = info;
1687 struct perf_event_context *ctx = event->ctx;
1688
1689 /*
1690 * If this is a task context, we need to check whether it is
1691 * the current task context of this cpu. If not it has been
1692 * scheduled out before the smp call arrived. In that case
1693 * event->count would have been updated to a recent sample
1694 * when the event was scheduled out.
1695 */
1696 if (ctx->task && cpuctx->task_ctx != ctx)
1697 return;
1698
1699 raw_spin_lock(&ctx->lock);
1700 update_context_time(ctx);
1701 update_event_times(event);
1702 raw_spin_unlock(&ctx->lock);
1703
1704 event->pmu->read(event);
1705 }
1706
1707 static inline u64 perf_event_count(struct perf_event *event)
1708 {
1709 return local64_read(&event->count) + atomic64_read(&event->child_count);
1710 }
1711
1712 static u64 perf_event_read(struct perf_event *event)
1713 {
1714 /*
1715 * If event is enabled and currently active on a CPU, update the
1716 * value in the event structure:
1717 */
1718 if (event->state == PERF_EVENT_STATE_ACTIVE) {
1719 smp_call_function_single(event->oncpu,
1720 __perf_event_read, event, 1);
1721 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
1722 struct perf_event_context *ctx = event->ctx;
1723 unsigned long flags;
1724
1725 raw_spin_lock_irqsave(&ctx->lock, flags);
1726 update_context_time(ctx);
1727 update_event_times(event);
1728 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1729 }
1730
1731 return perf_event_count(event);
1732 }
1733
1734 /*
1735 * Callchain support
1736 */
1737
1738 struct callchain_cpus_entries {
1739 struct rcu_head rcu_head;
1740 struct perf_callchain_entry *cpu_entries[0];
1741 };
1742
1743 static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
1744 static atomic_t nr_callchain_events;
1745 static DEFINE_MUTEX(callchain_mutex);
1746 struct callchain_cpus_entries *callchain_cpus_entries;
1747
1748
1749 __weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
1750 struct pt_regs *regs)
1751 {
1752 }
1753
1754 __weak void perf_callchain_user(struct perf_callchain_entry *entry,
1755 struct pt_regs *regs)
1756 {
1757 }
1758
1759 static void release_callchain_buffers_rcu(struct rcu_head *head)
1760 {
1761 struct callchain_cpus_entries *entries;
1762 int cpu;
1763
1764 entries = container_of(head, struct callchain_cpus_entries, rcu_head);
1765
1766 for_each_possible_cpu(cpu)
1767 kfree(entries->cpu_entries[cpu]);
1768
1769 kfree(entries);
1770 }
1771
1772 static void release_callchain_buffers(void)
1773 {
1774 struct callchain_cpus_entries *entries;
1775
1776 entries = callchain_cpus_entries;
1777 rcu_assign_pointer(callchain_cpus_entries, NULL);
1778 call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
1779 }
1780
1781 static int alloc_callchain_buffers(void)
1782 {
1783 int cpu;
1784 int size;
1785 struct callchain_cpus_entries *entries;
1786
1787 /*
1788 * We can't use the percpu allocation API for data that can be
1789 * accessed from NMI. Use a temporary manual per cpu allocation
1790 * until that gets sorted out.
1791 */
1792 size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) *
1793 num_possible_cpus();
1794
1795 entries = kzalloc(size, GFP_KERNEL);
1796 if (!entries)
1797 return -ENOMEM;
1798
1799 size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
1800
1801 for_each_possible_cpu(cpu) {
1802 entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
1803 cpu_to_node(cpu));
1804 if (!entries->cpu_entries[cpu])
1805 goto fail;
1806 }
1807
1808 rcu_assign_pointer(callchain_cpus_entries, entries);
1809
1810 return 0;
1811
1812 fail:
1813 for_each_possible_cpu(cpu)
1814 kfree(entries->cpu_entries[cpu]);
1815 kfree(entries);
1816
1817 return -ENOMEM;
1818 }
1819
1820 static int get_callchain_buffers(void)
1821 {
1822 int err = 0;
1823 int count;
1824
1825 mutex_lock(&callchain_mutex);
1826
1827 count = atomic_inc_return(&nr_callchain_events);
1828 if (WARN_ON_ONCE(count < 1)) {
1829 err = -EINVAL;
1830 goto exit;
1831 }
1832
1833 if (count > 1) {
1834 /* If the allocation failed, give up */
1835 if (!callchain_cpus_entries)
1836 err = -ENOMEM;
1837 goto exit;
1838 }
1839
1840 err = alloc_callchain_buffers();
1841 if (err)
1842 release_callchain_buffers();
1843 exit:
1844 mutex_unlock(&callchain_mutex);
1845
1846 return err;
1847 }
1848
1849 static void put_callchain_buffers(void)
1850 {
1851 if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
1852 release_callchain_buffers();
1853 mutex_unlock(&callchain_mutex);
1854 }
1855 }
1856
1857 static int get_recursion_context(int *recursion)
1858 {
1859 int rctx;
1860
1861 if (in_nmi())
1862 rctx = 3;
1863 else if (in_irq())
1864 rctx = 2;
1865 else if (in_softirq())
1866 rctx = 1;
1867 else
1868 rctx = 0;
1869
1870 if (recursion[rctx])
1871 return -1;
1872
1873 recursion[rctx]++;
1874 barrier();
1875
1876 return rctx;
1877 }
1878
1879 static inline void put_recursion_context(int *recursion, int rctx)
1880 {
1881 barrier();
1882 recursion[rctx]--;
1883 }
1884
1885 static struct perf_callchain_entry *get_callchain_entry(int *rctx)
1886 {
1887 int cpu;
1888 struct callchain_cpus_entries *entries;
1889
1890 *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
1891 if (*rctx == -1)
1892 return NULL;
1893
1894 entries = rcu_dereference(callchain_cpus_entries);
1895 if (!entries)
1896 return NULL;
1897
1898 cpu = smp_processor_id();
1899
1900 return &entries->cpu_entries[cpu][*rctx];
1901 }
1902
1903 static void
1904 put_callchain_entry(int rctx)
1905 {
1906 put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
1907 }
1908
1909 static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1910 {
1911 int rctx;
1912 struct perf_callchain_entry *entry;
1913
1914
1915 entry = get_callchain_entry(&rctx);
1916 if (rctx == -1)
1917 return NULL;
1918
1919 if (!entry)
1920 goto exit_put;
1921
1922 entry->nr = 0;
1923
1924 if (!user_mode(regs)) {
1925 perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
1926 perf_callchain_kernel(entry, regs);
1927 if (current->mm)
1928 regs = task_pt_regs(current);
1929 else
1930 regs = NULL;
1931 }
1932
1933 if (regs) {
1934 perf_callchain_store(entry, PERF_CONTEXT_USER);
1935 perf_callchain_user(entry, regs);
1936 }
1937
1938 exit_put:
1939 put_callchain_entry(rctx);
1940
1941 return entry;
1942 }
1943
1944 /*
1945 * Initialize the perf_event context in a task_struct:
1946 */
1947 static void
1948 __perf_event_init_context(struct perf_event_context *ctx,
1949 struct task_struct *task)
1950 {
1951 raw_spin_lock_init(&ctx->lock);
1952 mutex_init(&ctx->mutex);
1953 INIT_LIST_HEAD(&ctx->pinned_groups);
1954 INIT_LIST_HEAD(&ctx->flexible_groups);
1955 INIT_LIST_HEAD(&ctx->event_list);
1956 atomic_set(&ctx->refcount, 1);
1957 ctx->task = task;
1958 }
1959
1960 static struct perf_event_context *find_get_context(pid_t pid, int cpu)
1961 {
1962 struct perf_event_context *ctx;
1963 struct perf_cpu_context *cpuctx;
1964 struct task_struct *task;
1965 unsigned long flags;
1966 int err;
1967
1968 if (pid == -1 && cpu != -1) {
1969 /* Must be root to operate on a CPU event: */
1970 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1971 return ERR_PTR(-EACCES);
1972
1973 if (cpu < 0 || cpu >= nr_cpumask_bits)
1974 return ERR_PTR(-EINVAL);
1975
1976 /*
1977 * We could be clever and allow to attach a event to an
1978 * offline CPU and activate it when the CPU comes up, but
1979 * that's for later.
1980 */
1981 if (!cpu_online(cpu))
1982 return ERR_PTR(-ENODEV);
1983
1984 cpuctx = &per_cpu(perf_cpu_context, cpu);
1985 ctx = &cpuctx->ctx;
1986 get_ctx(ctx);
1987
1988 return ctx;
1989 }
1990
1991 rcu_read_lock();
1992 if (!pid)
1993 task = current;
1994 else
1995 task = find_task_by_vpid(pid);
1996 if (task)
1997 get_task_struct(task);
1998 rcu_read_unlock();
1999
2000 if (!task)
2001 return ERR_PTR(-ESRCH);
2002
2003 /*
2004 * Can't attach events to a dying task.
2005 */
2006 err = -ESRCH;
2007 if (task->flags & PF_EXITING)
2008 goto errout;
2009
2010 /* Reuse ptrace permission checks for now. */
2011 err = -EACCES;
2012 if (!ptrace_may_access(task, PTRACE_MODE_READ))
2013 goto errout;
2014
2015 retry:
2016 ctx = perf_lock_task_context(task, &flags);
2017 if (ctx) {
2018 unclone_ctx(ctx);
2019 raw_spin_unlock_irqrestore(&ctx->lock, flags);
2020 }
2021
2022 if (!ctx) {
2023 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
2024 err = -ENOMEM;
2025 if (!ctx)
2026 goto errout;
2027 __perf_event_init_context(ctx, task);
2028 get_ctx(ctx);
2029 if (cmpxchg(&task->perf_event_ctxp, NULL, ctx)) {
2030 /*
2031 * We raced with some other task; use
2032 * the context they set.
2033 */
2034 kfree(ctx);
2035 goto retry;
2036 }
2037 get_task_struct(task);
2038 }
2039
2040 put_task_struct(task);
2041 return ctx;
2042
2043 errout:
2044 put_task_struct(task);
2045 return ERR_PTR(err);
2046 }
2047
2048 static void perf_event_free_filter(struct perf_event *event);
2049
2050 static void free_event_rcu(struct rcu_head *head)
2051 {
2052 struct perf_event *event;
2053
2054 event = container_of(head, struct perf_event, rcu_head);
2055 if (event->ns)
2056 put_pid_ns(event->ns);
2057 perf_event_free_filter(event);
2058 kfree(event);
2059 }
2060
2061 static void perf_pending_sync(struct perf_event *event);
2062 static void perf_buffer_put(struct perf_buffer *buffer);
2063
2064 static void free_event(struct perf_event *event)
2065 {
2066 perf_pending_sync(event);
2067
2068 if (!event->parent) {
2069 atomic_dec(&nr_events);
2070 if (event->attr.mmap || event->attr.mmap_data)
2071 atomic_dec(&nr_mmap_events);
2072 if (event->attr.comm)
2073 atomic_dec(&nr_comm_events);
2074 if (event->attr.task)
2075 atomic_dec(&nr_task_events);
2076 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
2077 put_callchain_buffers();
2078 }
2079
2080 if (event->buffer) {
2081 perf_buffer_put(event->buffer);
2082 event->buffer = NULL;
2083 }
2084
2085 if (event->destroy)
2086 event->destroy(event);
2087
2088 put_ctx(event->ctx);
2089 call_rcu(&event->rcu_head, free_event_rcu);
2090 }
2091
2092 int perf_event_release_kernel(struct perf_event *event)
2093 {
2094 struct perf_event_context *ctx = event->ctx;
2095
2096 /*
2097 * Remove from the PMU, can't get re-enabled since we got
2098 * here because the last ref went.
2099 */
2100 perf_event_disable(event);
2101
2102 WARN_ON_ONCE(ctx->parent_ctx);
2103 /*
2104 * There are two ways this annotation is useful:
2105 *
2106 * 1) there is a lock recursion from perf_event_exit_task
2107 * see the comment there.
2108 *
2109 * 2) there is a lock-inversion with mmap_sem through
2110 * perf_event_read_group(), which takes faults while
2111 * holding ctx->mutex, however this is called after
2112 * the last filedesc died, so there is no possibility
2113 * to trigger the AB-BA case.
2114 */
2115 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
2116 raw_spin_lock_irq(&ctx->lock);
2117 perf_group_detach(event);
2118 list_del_event(event, ctx);
2119 raw_spin_unlock_irq(&ctx->lock);
2120 mutex_unlock(&ctx->mutex);
2121
2122 mutex_lock(&event->owner->perf_event_mutex);
2123 list_del_init(&event->owner_entry);
2124 mutex_unlock(&event->owner->perf_event_mutex);
2125 put_task_struct(event->owner);
2126
2127 free_event(event);
2128
2129 return 0;
2130 }
2131 EXPORT_SYMBOL_GPL(perf_event_release_kernel);
2132
2133 /*
2134 * Called when the last reference to the file is gone.
2135 */
2136 static int perf_release(struct inode *inode, struct file *file)
2137 {
2138 struct perf_event *event = file->private_data;
2139
2140 file->private_data = NULL;
2141
2142 return perf_event_release_kernel(event);
2143 }
2144
2145 static int perf_event_read_size(struct perf_event *event)
2146 {
2147 int entry = sizeof(u64); /* value */
2148 int size = 0;
2149 int nr = 1;
2150
2151 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2152 size += sizeof(u64);
2153
2154 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2155 size += sizeof(u64);
2156
2157 if (event->attr.read_format & PERF_FORMAT_ID)
2158 entry += sizeof(u64);
2159
2160 if (event->attr.read_format & PERF_FORMAT_GROUP) {
2161 nr += event->group_leader->nr_siblings;
2162 size += sizeof(u64);
2163 }
2164
2165 size += entry * nr;
2166
2167 return size;
2168 }
2169
2170 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
2171 {
2172 struct perf_event *child;
2173 u64 total = 0;
2174
2175 *enabled = 0;
2176 *running = 0;
2177
2178 mutex_lock(&event->child_mutex);
2179 total += perf_event_read(event);
2180 *enabled += event->total_time_enabled +
2181 atomic64_read(&event->child_total_time_enabled);
2182 *running += event->total_time_running +
2183 atomic64_read(&event->child_total_time_running);
2184
2185 list_for_each_entry(child, &event->child_list, child_list) {
2186 total += perf_event_read(child);
2187 *enabled += child->total_time_enabled;
2188 *running += child->total_time_running;
2189 }
2190 mutex_unlock(&event->child_mutex);
2191
2192 return total;
2193 }
2194 EXPORT_SYMBOL_GPL(perf_event_read_value);
2195
2196 static int perf_event_read_group(struct perf_event *event,
2197 u64 read_format, char __user *buf)
2198 {
2199 struct perf_event *leader = event->group_leader, *sub;
2200 int n = 0, size = 0, ret = -EFAULT;
2201 struct perf_event_context *ctx = leader->ctx;
2202 u64 values[5];
2203 u64 count, enabled, running;
2204
2205 mutex_lock(&ctx->mutex);
2206 count = perf_event_read_value(leader, &enabled, &running);
2207
2208 values[n++] = 1 + leader->nr_siblings;
2209 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2210 values[n++] = enabled;
2211 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2212 values[n++] = running;
2213 values[n++] = count;
2214 if (read_format & PERF_FORMAT_ID)
2215 values[n++] = primary_event_id(leader);
2216
2217 size = n * sizeof(u64);
2218
2219 if (copy_to_user(buf, values, size))
2220 goto unlock;
2221
2222 ret = size;
2223
2224 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2225 n = 0;
2226
2227 values[n++] = perf_event_read_value(sub, &enabled, &running);
2228 if (read_format & PERF_FORMAT_ID)
2229 values[n++] = primary_event_id(sub);
2230
2231 size = n * sizeof(u64);
2232
2233 if (copy_to_user(buf + ret, values, size)) {
2234 ret = -EFAULT;
2235 goto unlock;
2236 }
2237
2238 ret += size;
2239 }
2240 unlock:
2241 mutex_unlock(&ctx->mutex);
2242
2243 return ret;
2244 }
2245
2246 static int perf_event_read_one(struct perf_event *event,
2247 u64 read_format, char __user *buf)
2248 {
2249 u64 enabled, running;
2250 u64 values[4];
2251 int n = 0;
2252
2253 values[n++] = perf_event_read_value(event, &enabled, &running);
2254 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2255 values[n++] = enabled;
2256 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2257 values[n++] = running;
2258 if (read_format & PERF_FORMAT_ID)
2259 values[n++] = primary_event_id(event);
2260
2261 if (copy_to_user(buf, values, n * sizeof(u64)))
2262 return -EFAULT;
2263
2264 return n * sizeof(u64);
2265 }
2266
2267 /*
2268 * Read the performance event - simple non blocking version for now
2269 */
2270 static ssize_t
2271 perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
2272 {
2273 u64 read_format = event->attr.read_format;
2274 int ret;
2275
2276 /*
2277 * Return end-of-file for a read on a event that is in
2278 * error state (i.e. because it was pinned but it couldn't be
2279 * scheduled on to the CPU at some point).
2280 */
2281 if (event->state == PERF_EVENT_STATE_ERROR)
2282 return 0;
2283
2284 if (count < perf_event_read_size(event))
2285 return -ENOSPC;
2286
2287 WARN_ON_ONCE(event->ctx->parent_ctx);
2288 if (read_format & PERF_FORMAT_GROUP)
2289 ret = perf_event_read_group(event, read_format, buf);
2290 else
2291 ret = perf_event_read_one(event, read_format, buf);
2292
2293 return ret;
2294 }
2295
2296 static ssize_t
2297 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
2298 {
2299 struct perf_event *event = file->private_data;
2300
2301 return perf_read_hw(event, buf, count);
2302 }
2303
2304 static unsigned int perf_poll(struct file *file, poll_table *wait)
2305 {
2306 struct perf_event *event = file->private_data;
2307 struct perf_buffer *buffer;
2308 unsigned int events = POLL_HUP;
2309
2310 rcu_read_lock();
2311 buffer = rcu_dereference(event->buffer);
2312 if (buffer)
2313 events = atomic_xchg(&buffer->poll, 0);
2314 rcu_read_unlock();
2315
2316 poll_wait(file, &event->waitq, wait);
2317
2318 return events;
2319 }
2320
2321 static void perf_event_reset(struct perf_event *event)
2322 {
2323 (void)perf_event_read(event);
2324 local64_set(&event->count, 0);
2325 perf_event_update_userpage(event);
2326 }
2327
2328 /*
2329 * Holding the top-level event's child_mutex means that any
2330 * descendant process that has inherited this event will block
2331 * in sync_child_event if it goes to exit, thus satisfying the
2332 * task existence requirements of perf_event_enable/disable.
2333 */
2334 static void perf_event_for_each_child(struct perf_event *event,
2335 void (*func)(struct perf_event *))
2336 {
2337 struct perf_event *child;
2338
2339 WARN_ON_ONCE(event->ctx->parent_ctx);
2340 mutex_lock(&event->child_mutex);
2341 func(event);
2342 list_for_each_entry(child, &event->child_list, child_list)
2343 func(child);
2344 mutex_unlock(&event->child_mutex);
2345 }
2346
2347 static void perf_event_for_each(struct perf_event *event,
2348 void (*func)(struct perf_event *))
2349 {
2350 struct perf_event_context *ctx = event->ctx;
2351 struct perf_event *sibling;
2352
2353 WARN_ON_ONCE(ctx->parent_ctx);
2354 mutex_lock(&ctx->mutex);
2355 event = event->group_leader;
2356
2357 perf_event_for_each_child(event, func);
2358 func(event);
2359 list_for_each_entry(sibling, &event->sibling_list, group_entry)
2360 perf_event_for_each_child(event, func);
2361 mutex_unlock(&ctx->mutex);
2362 }
2363
2364 static int perf_event_period(struct perf_event *event, u64 __user *arg)
2365 {
2366 struct perf_event_context *ctx = event->ctx;
2367 unsigned long size;
2368 int ret = 0;
2369 u64 value;
2370
2371 if (!event->attr.sample_period)
2372 return -EINVAL;
2373
2374 size = copy_from_user(&value, arg, sizeof(value));
2375 if (size != sizeof(value))
2376 return -EFAULT;
2377
2378 if (!value)
2379 return -EINVAL;
2380
2381 raw_spin_lock_irq(&ctx->lock);
2382 if (event->attr.freq) {
2383 if (value > sysctl_perf_event_sample_rate) {
2384 ret = -EINVAL;
2385 goto unlock;
2386 }
2387
2388 event->attr.sample_freq = value;
2389 } else {
2390 event->attr.sample_period = value;
2391 event->hw.sample_period = value;
2392 }
2393 unlock:
2394 raw_spin_unlock_irq(&ctx->lock);
2395
2396 return ret;
2397 }
2398
2399 static const struct file_operations perf_fops;
2400
2401 static struct perf_event *perf_fget_light(int fd, int *fput_needed)
2402 {
2403 struct file *file;
2404
2405 file = fget_light(fd, fput_needed);
2406 if (!file)
2407 return ERR_PTR(-EBADF);
2408
2409 if (file->f_op != &perf_fops) {
2410 fput_light(file, *fput_needed);
2411 *fput_needed = 0;
2412 return ERR_PTR(-EBADF);
2413 }
2414
2415 return file->private_data;
2416 }
2417
2418 static int perf_event_set_output(struct perf_event *event,
2419 struct perf_event *output_event);
2420 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
2421
2422 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2423 {
2424 struct perf_event *event = file->private_data;
2425 void (*func)(struct perf_event *);
2426 u32 flags = arg;
2427
2428 switch (cmd) {
2429 case PERF_EVENT_IOC_ENABLE:
2430 func = perf_event_enable;
2431 break;
2432 case PERF_EVENT_IOC_DISABLE:
2433 func = perf_event_disable;
2434 break;
2435 case PERF_EVENT_IOC_RESET:
2436 func = perf_event_reset;
2437 break;
2438
2439 case PERF_EVENT_IOC_REFRESH:
2440 return perf_event_refresh(event, arg);
2441
2442 case PERF_EVENT_IOC_PERIOD:
2443 return perf_event_period(event, (u64 __user *)arg);
2444
2445 case PERF_EVENT_IOC_SET_OUTPUT:
2446 {
2447 struct perf_event *output_event = NULL;
2448 int fput_needed = 0;
2449 int ret;
2450
2451 if (arg != -1) {
2452 output_event = perf_fget_light(arg, &fput_needed);
2453 if (IS_ERR(output_event))
2454 return PTR_ERR(output_event);
2455 }
2456
2457 ret = perf_event_set_output(event, output_event);
2458 if (output_event)
2459 fput_light(output_event->filp, fput_needed);
2460
2461 return ret;
2462 }
2463
2464 case PERF_EVENT_IOC_SET_FILTER:
2465 return perf_event_set_filter(event, (void __user *)arg);
2466
2467 default:
2468 return -ENOTTY;
2469 }
2470
2471 if (flags & PERF_IOC_FLAG_GROUP)
2472 perf_event_for_each(event, func);
2473 else
2474 perf_event_for_each_child(event, func);
2475
2476 return 0;
2477 }
2478
2479 int perf_event_task_enable(void)
2480 {
2481 struct perf_event *event;
2482
2483 mutex_lock(&current->perf_event_mutex);
2484 list_for_each_entry(event, &current->perf_event_list, owner_entry)
2485 perf_event_for_each_child(event, perf_event_enable);
2486 mutex_unlock(&current->perf_event_mutex);
2487
2488 return 0;
2489 }
2490
2491 int perf_event_task_disable(void)
2492 {
2493 struct perf_event *event;
2494
2495 mutex_lock(&current->perf_event_mutex);
2496 list_for_each_entry(event, &current->perf_event_list, owner_entry)
2497 perf_event_for_each_child(event, perf_event_disable);
2498 mutex_unlock(&current->perf_event_mutex);
2499
2500 return 0;
2501 }
2502
2503 #ifndef PERF_EVENT_INDEX_OFFSET
2504 # define PERF_EVENT_INDEX_OFFSET 0
2505 #endif
2506
2507 static int perf_event_index(struct perf_event *event)
2508 {
2509 if (event->state != PERF_EVENT_STATE_ACTIVE)
2510 return 0;
2511
2512 return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
2513 }
2514
2515 /*
2516 * Callers need to ensure there can be no nesting of this function, otherwise
2517 * the seqlock logic goes bad. We can not serialize this because the arch
2518 * code calls this from NMI context.
2519 */
2520 void perf_event_update_userpage(struct perf_event *event)
2521 {
2522 struct perf_event_mmap_page *userpg;
2523 struct perf_buffer *buffer;
2524
2525 rcu_read_lock();
2526 buffer = rcu_dereference(event->buffer);
2527 if (!buffer)
2528 goto unlock;
2529
2530 userpg = buffer->user_page;
2531
2532 /*
2533 * Disable preemption so as to not let the corresponding user-space
2534 * spin too long if we get preempted.
2535 */
2536 preempt_disable();
2537 ++userpg->lock;
2538 barrier();
2539 userpg->index = perf_event_index(event);
2540 userpg->offset = perf_event_count(event);
2541 if (event->state == PERF_EVENT_STATE_ACTIVE)
2542 userpg->offset -= local64_read(&event->hw.prev_count);
2543
2544 userpg->time_enabled = event->total_time_enabled +
2545 atomic64_read(&event->child_total_time_enabled);
2546
2547 userpg->time_running = event->total_time_running +
2548 atomic64_read(&event->child_total_time_running);
2549
2550 barrier();
2551 ++userpg->lock;
2552 preempt_enable();
2553 unlock:
2554 rcu_read_unlock();
2555 }
2556
2557 static unsigned long perf_data_size(struct perf_buffer *buffer);
2558
2559 static void
2560 perf_buffer_init(struct perf_buffer *buffer, long watermark, int flags)
2561 {
2562 long max_size = perf_data_size(buffer);
2563
2564 if (watermark)
2565 buffer->watermark = min(max_size, watermark);
2566
2567 if (!buffer->watermark)
2568 buffer->watermark = max_size / 2;
2569
2570 if (flags & PERF_BUFFER_WRITABLE)
2571 buffer->writable = 1;
2572
2573 atomic_set(&buffer->refcount, 1);
2574 }
2575
2576 #ifndef CONFIG_PERF_USE_VMALLOC
2577
2578 /*
2579 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
2580 */
2581
2582 static struct page *
2583 perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff)
2584 {
2585 if (pgoff > buffer->nr_pages)
2586 return NULL;
2587
2588 if (pgoff == 0)
2589 return virt_to_page(buffer->user_page);
2590
2591 return virt_to_page(buffer->data_pages[pgoff - 1]);
2592 }
2593
2594 static void *perf_mmap_alloc_page(int cpu)
2595 {
2596 struct page *page;
2597 int node;
2598
2599 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
2600 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
2601 if (!page)
2602 return NULL;
2603
2604 return page_address(page);
2605 }
2606
2607 static struct perf_buffer *
2608 perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
2609 {
2610 struct perf_buffer *buffer;
2611 unsigned long size;
2612 int i;
2613
2614 size = sizeof(struct perf_buffer);
2615 size += nr_pages * sizeof(void *);
2616
2617 buffer = kzalloc(size, GFP_KERNEL);
2618 if (!buffer)
2619 goto fail;
2620
2621 buffer->user_page = perf_mmap_alloc_page(cpu);
2622 if (!buffer->user_page)
2623 goto fail_user_page;
2624
2625 for (i = 0; i < nr_pages; i++) {
2626 buffer->data_pages[i] = perf_mmap_alloc_page(cpu);
2627 if (!buffer->data_pages[i])
2628 goto fail_data_pages;
2629 }
2630
2631 buffer->nr_pages = nr_pages;
2632
2633 perf_buffer_init(buffer, watermark, flags);
2634
2635 return buffer;
2636
2637 fail_data_pages:
2638 for (i--; i >= 0; i--)
2639 free_page((unsigned long)buffer->data_pages[i]);
2640
2641 free_page((unsigned long)buffer->user_page);
2642
2643 fail_user_page:
2644 kfree(buffer);
2645
2646 fail:
2647 return NULL;
2648 }
2649
2650 static void perf_mmap_free_page(unsigned long addr)
2651 {
2652 struct page *page = virt_to_page((void *)addr);
2653
2654 page->mapping = NULL;
2655 __free_page(page);
2656 }
2657
2658 static void perf_buffer_free(struct perf_buffer *buffer)
2659 {
2660 int i;
2661
2662 perf_mmap_free_page((unsigned long)buffer->user_page);
2663 for (i = 0; i < buffer->nr_pages; i++)
2664 perf_mmap_free_page((unsigned long)buffer->data_pages[i]);
2665 kfree(buffer);
2666 }
2667
2668 static inline int page_order(struct perf_buffer *buffer)
2669 {
2670 return 0;
2671 }
2672
2673 #else
2674
2675 /*
2676 * Back perf_mmap() with vmalloc memory.
2677 *
2678 * Required for architectures that have d-cache aliasing issues.
2679 */
2680
2681 static inline int page_order(struct perf_buffer *buffer)
2682 {
2683 return buffer->page_order;
2684 }
2685
2686 static struct page *
2687 perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff)
2688 {
2689 if (pgoff > (1UL << page_order(buffer)))
2690 return NULL;
2691
2692 return vmalloc_to_page((void *)buffer->user_page + pgoff * PAGE_SIZE);
2693 }
2694
2695 static void perf_mmap_unmark_page(void *addr)
2696 {
2697 struct page *page = vmalloc_to_page(addr);
2698
2699 page->mapping = NULL;
2700 }
2701
2702 static void perf_buffer_free_work(struct work_struct *work)
2703 {
2704 struct perf_buffer *buffer;
2705 void *base;
2706 int i, nr;
2707
2708 buffer = container_of(work, struct perf_buffer, work);
2709 nr = 1 << page_order(buffer);
2710
2711 base = buffer->user_page;
2712 for (i = 0; i < nr + 1; i++)
2713 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
2714
2715 vfree(base);
2716 kfree(buffer);
2717 }
2718
2719 static void perf_buffer_free(struct perf_buffer *buffer)
2720 {
2721 schedule_work(&buffer->work);
2722 }
2723
2724 static struct perf_buffer *
2725 perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
2726 {
2727 struct perf_buffer *buffer;
2728 unsigned long size;
2729 void *all_buf;
2730
2731 size = sizeof(struct perf_buffer);
2732 size += sizeof(void *);
2733
2734 buffer = kzalloc(size, GFP_KERNEL);
2735 if (!buffer)
2736 goto fail;
2737
2738 INIT_WORK(&buffer->work, perf_buffer_free_work);
2739
2740 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
2741 if (!all_buf)
2742 goto fail_all_buf;
2743
2744 buffer->user_page = all_buf;
2745 buffer->data_pages[0] = all_buf + PAGE_SIZE;
2746 buffer->page_order = ilog2(nr_pages);
2747 buffer->nr_pages = 1;
2748
2749 perf_buffer_init(buffer, watermark, flags);
2750
2751 return buffer;
2752
2753 fail_all_buf:
2754 kfree(buffer);
2755
2756 fail:
2757 return NULL;
2758 }
2759
2760 #endif
2761
2762 static unsigned long perf_data_size(struct perf_buffer *buffer)
2763 {
2764 return buffer->nr_pages << (PAGE_SHIFT + page_order(buffer));
2765 }
2766
2767 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2768 {
2769 struct perf_event *event = vma->vm_file->private_data;
2770 struct perf_buffer *buffer;
2771 int ret = VM_FAULT_SIGBUS;
2772
2773 if (vmf->flags & FAULT_FLAG_MKWRITE) {
2774 if (vmf->pgoff == 0)
2775 ret = 0;
2776 return ret;
2777 }
2778
2779 rcu_read_lock();
2780 buffer = rcu_dereference(event->buffer);
2781 if (!buffer)
2782 goto unlock;
2783
2784 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
2785 goto unlock;
2786
2787 vmf->page = perf_mmap_to_page(buffer, vmf->pgoff);
2788 if (!vmf->page)
2789 goto unlock;
2790
2791 get_page(vmf->page);
2792 vmf->page->mapping = vma->vm_file->f_mapping;
2793 vmf->page->index = vmf->pgoff;
2794
2795 ret = 0;
2796 unlock:
2797 rcu_read_unlock();
2798
2799 return ret;
2800 }
2801
2802 static void perf_buffer_free_rcu(struct rcu_head *rcu_head)
2803 {
2804 struct perf_buffer *buffer;
2805
2806 buffer = container_of(rcu_head, struct perf_buffer, rcu_head);
2807 perf_buffer_free(buffer);
2808 }
2809
2810 static struct perf_buffer *perf_buffer_get(struct perf_event *event)
2811 {
2812 struct perf_buffer *buffer;
2813
2814 rcu_read_lock();
2815 buffer = rcu_dereference(event->buffer);
2816 if (buffer) {
2817 if (!atomic_inc_not_zero(&buffer->refcount))
2818 buffer = NULL;
2819 }
2820 rcu_read_unlock();
2821
2822 return buffer;
2823 }
2824
2825 static void perf_buffer_put(struct perf_buffer *buffer)
2826 {
2827 if (!atomic_dec_and_test(&buffer->refcount))
2828 return;
2829
2830 call_rcu(&buffer->rcu_head, perf_buffer_free_rcu);
2831 }
2832
2833 static void perf_mmap_open(struct vm_area_struct *vma)
2834 {
2835 struct perf_event *event = vma->vm_file->private_data;
2836
2837 atomic_inc(&event->mmap_count);
2838 }
2839
2840 static void perf_mmap_close(struct vm_area_struct *vma)
2841 {
2842 struct perf_event *event = vma->vm_file->private_data;
2843
2844 if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
2845 unsigned long size = perf_data_size(event->buffer);
2846 struct user_struct *user = event->mmap_user;
2847 struct perf_buffer *buffer = event->buffer;
2848
2849 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
2850 vma->vm_mm->locked_vm -= event->mmap_locked;
2851 rcu_assign_pointer(event->buffer, NULL);
2852 mutex_unlock(&event->mmap_mutex);
2853
2854 perf_buffer_put(buffer);
2855 free_uid(user);
2856 }
2857 }
2858
2859 static const struct vm_operations_struct perf_mmap_vmops = {
2860 .open = perf_mmap_open,
2861 .close = perf_mmap_close,
2862 .fault = perf_mmap_fault,
2863 .page_mkwrite = perf_mmap_fault,
2864 };
2865
2866 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2867 {
2868 struct perf_event *event = file->private_data;
2869 unsigned long user_locked, user_lock_limit;
2870 struct user_struct *user = current_user();
2871 unsigned long locked, lock_limit;
2872 struct perf_buffer *buffer;
2873 unsigned long vma_size;
2874 unsigned long nr_pages;
2875 long user_extra, extra;
2876 int ret = 0, flags = 0;
2877
2878 /*
2879 * Don't allow mmap() of inherited per-task counters. This would
2880 * create a performance issue due to all children writing to the
2881 * same buffer.
2882 */
2883 if (event->cpu == -1 && event->attr.inherit)
2884 return -EINVAL;
2885
2886 if (!(vma->vm_flags & VM_SHARED))
2887 return -EINVAL;
2888
2889 vma_size = vma->vm_end - vma->vm_start;
2890 nr_pages = (vma_size / PAGE_SIZE) - 1;
2891
2892 /*
2893 * If we have buffer pages ensure they're a power-of-two number, so we
2894 * can do bitmasks instead of modulo.
2895 */
2896 if (nr_pages != 0 && !is_power_of_2(nr_pages))
2897 return -EINVAL;
2898
2899 if (vma_size != PAGE_SIZE * (1 + nr_pages))
2900 return -EINVAL;
2901
2902 if (vma->vm_pgoff != 0)
2903 return -EINVAL;
2904
2905 WARN_ON_ONCE(event->ctx->parent_ctx);
2906 mutex_lock(&event->mmap_mutex);
2907 if (event->buffer) {
2908 if (event->buffer->nr_pages == nr_pages)
2909 atomic_inc(&event->buffer->refcount);
2910 else
2911 ret = -EINVAL;
2912 goto unlock;
2913 }
2914
2915 user_extra = nr_pages + 1;
2916 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
2917
2918 /*
2919 * Increase the limit linearly with more CPUs:
2920 */
2921 user_lock_limit *= num_online_cpus();
2922
2923 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
2924
2925 extra = 0;
2926 if (user_locked > user_lock_limit)
2927 extra = user_locked - user_lock_limit;
2928
2929 lock_limit = rlimit(RLIMIT_MEMLOCK);
2930 lock_limit >>= PAGE_SHIFT;
2931 locked = vma->vm_mm->locked_vm + extra;
2932
2933 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
2934 !capable(CAP_IPC_LOCK)) {
2935 ret = -EPERM;
2936 goto unlock;
2937 }
2938
2939 WARN_ON(event->buffer);
2940
2941 if (vma->vm_flags & VM_WRITE)
2942 flags |= PERF_BUFFER_WRITABLE;
2943
2944 buffer = perf_buffer_alloc(nr_pages, event->attr.wakeup_watermark,
2945 event->cpu, flags);
2946 if (!buffer) {
2947 ret = -ENOMEM;
2948 goto unlock;
2949 }
2950 rcu_assign_pointer(event->buffer, buffer);
2951
2952 atomic_long_add(user_extra, &user->locked_vm);
2953 event->mmap_locked = extra;
2954 event->mmap_user = get_current_user();
2955 vma->vm_mm->locked_vm += event->mmap_locked;
2956
2957 unlock:
2958 if (!ret)
2959 atomic_inc(&event->mmap_count);
2960 mutex_unlock(&event->mmap_mutex);
2961
2962 vma->vm_flags |= VM_RESERVED;
2963 vma->vm_ops = &perf_mmap_vmops;
2964
2965 return ret;
2966 }
2967
2968 static int perf_fasync(int fd, struct file *filp, int on)
2969 {
2970 struct inode *inode = filp->f_path.dentry->d_inode;
2971 struct perf_event *event = filp->private_data;
2972 int retval;
2973
2974 mutex_lock(&inode->i_mutex);
2975 retval = fasync_helper(fd, filp, on, &event->fasync);
2976 mutex_unlock(&inode->i_mutex);
2977
2978 if (retval < 0)
2979 return retval;
2980
2981 return 0;
2982 }
2983
2984 static const struct file_operations perf_fops = {
2985 .llseek = no_llseek,
2986 .release = perf_release,
2987 .read = perf_read,
2988 .poll = perf_poll,
2989 .unlocked_ioctl = perf_ioctl,
2990 .compat_ioctl = perf_ioctl,
2991 .mmap = perf_mmap,
2992 .fasync = perf_fasync,
2993 };
2994
2995 /*
2996 * Perf event wakeup
2997 *
2998 * If there's data, ensure we set the poll() state and publish everything
2999 * to user-space before waking everybody up.
3000 */
3001
3002 void perf_event_wakeup(struct perf_event *event)
3003 {
3004 wake_up_all(&event->waitq);
3005
3006 if (event->pending_kill) {
3007 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
3008 event->pending_kill = 0;
3009 }
3010 }
3011
3012 /*
3013 * Pending wakeups
3014 *
3015 * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
3016 *
3017 * The NMI bit means we cannot possibly take locks. Therefore, maintain a
3018 * single linked list and use cmpxchg() to add entries lockless.
3019 */
3020
3021 static void perf_pending_event(struct perf_pending_entry *entry)
3022 {
3023 struct perf_event *event = container_of(entry,
3024 struct perf_event, pending);
3025
3026 if (event->pending_disable) {
3027 event->pending_disable = 0;
3028 __perf_event_disable(event);
3029 }
3030
3031 if (event->pending_wakeup) {
3032 event->pending_wakeup = 0;
3033 perf_event_wakeup(event);
3034 }
3035 }
3036
3037 #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
3038
3039 static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
3040 PENDING_TAIL,
3041 };
3042
3043 static void perf_pending_queue(struct perf_pending_entry *entry,
3044 void (*func)(struct perf_pending_entry *))
3045 {
3046 struct perf_pending_entry **head;
3047
3048 if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
3049 return;
3050
3051 entry->func = func;
3052
3053 head = &get_cpu_var(perf_pending_head);
3054
3055 do {
3056 entry->next = *head;
3057 } while (cmpxchg(head, entry->next, entry) != entry->next);
3058
3059 set_perf_event_pending();
3060
3061 put_cpu_var(perf_pending_head);
3062 }
3063
3064 static int __perf_pending_run(void)
3065 {
3066 struct perf_pending_entry *list;
3067 int nr = 0;
3068
3069 list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
3070 while (list != PENDING_TAIL) {
3071 void (*func)(struct perf_pending_entry *);
3072 struct perf_pending_entry *entry = list;
3073
3074 list = list->next;
3075
3076 func = entry->func;
3077 entry->next = NULL;
3078 /*
3079 * Ensure we observe the unqueue before we issue the wakeup,
3080 * so that we won't be waiting forever.
3081 * -- see perf_not_pending().
3082 */
3083 smp_wmb();
3084
3085 func(entry);
3086 nr++;
3087 }
3088
3089 return nr;
3090 }
3091
3092 static inline int perf_not_pending(struct perf_event *event)
3093 {
3094 /*
3095 * If we flush on whatever cpu we run, there is a chance we don't
3096 * need to wait.
3097 */
3098 get_cpu();
3099 __perf_pending_run();
3100 put_cpu();
3101
3102 /*
3103 * Ensure we see the proper queue state before going to sleep
3104 * so that we do not miss the wakeup. -- see perf_pending_handle()
3105 */
3106 smp_rmb();
3107 return event->pending.next == NULL;
3108 }
3109
3110 static void perf_pending_sync(struct perf_event *event)
3111 {
3112 wait_event(event->waitq, perf_not_pending(event));
3113 }
3114
3115 void perf_event_do_pending(void)
3116 {
3117 __perf_pending_run();
3118 }
3119
3120 /*
3121 * We assume there is only KVM supporting the callbacks.
3122 * Later on, we might change it to a list if there is
3123 * another virtualization implementation supporting the callbacks.
3124 */
3125 struct perf_guest_info_callbacks *perf_guest_cbs;
3126
3127 int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3128 {
3129 perf_guest_cbs = cbs;
3130 return 0;
3131 }
3132 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
3133
3134 int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3135 {
3136 perf_guest_cbs = NULL;
3137 return 0;
3138 }
3139 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
3140
3141 /*
3142 * Output
3143 */
3144 static bool perf_output_space(struct perf_buffer *buffer, unsigned long tail,
3145 unsigned long offset, unsigned long head)
3146 {
3147 unsigned long mask;
3148
3149 if (!buffer->writable)
3150 return true;
3151
3152 mask = perf_data_size(buffer) - 1;
3153
3154 offset = (offset - tail) & mask;
3155 head = (head - tail) & mask;
3156
3157 if ((int)(head - offset) < 0)
3158 return false;
3159
3160 return true;
3161 }
3162
3163 static void perf_output_wakeup(struct perf_output_handle *handle)
3164 {
3165 atomic_set(&handle->buffer->poll, POLL_IN);
3166
3167 if (handle->nmi) {
3168 handle->event->pending_wakeup = 1;
3169 perf_pending_queue(&handle->event->pending,
3170 perf_pending_event);
3171 } else
3172 perf_event_wakeup(handle->event);
3173 }
3174
3175 /*
3176 * We need to ensure a later event_id doesn't publish a head when a former
3177 * event isn't done writing. However since we need to deal with NMIs we
3178 * cannot fully serialize things.
3179 *
3180 * We only publish the head (and generate a wakeup) when the outer-most
3181 * event completes.
3182 */
3183 static void perf_output_get_handle(struct perf_output_handle *handle)
3184 {
3185 struct perf_buffer *buffer = handle->buffer;
3186
3187 preempt_disable();
3188 local_inc(&buffer->nest);
3189 handle->wakeup = local_read(&buffer->wakeup);
3190 }
3191
3192 static void perf_output_put_handle(struct perf_output_handle *handle)
3193 {
3194 struct perf_buffer *buffer = handle->buffer;
3195 unsigned long head;
3196
3197 again:
3198 head = local_read(&buffer->head);
3199
3200 /*
3201 * IRQ/NMI can happen here, which means we can miss a head update.
3202 */
3203
3204 if (!local_dec_and_test(&buffer->nest))
3205 goto out;
3206
3207 /*
3208 * Publish the known good head. Rely on the full barrier implied
3209 * by atomic_dec_and_test() order the buffer->head read and this
3210 * write.
3211 */
3212 buffer->user_page->data_head = head;
3213
3214 /*
3215 * Now check if we missed an update, rely on the (compiler)
3216 * barrier in atomic_dec_and_test() to re-read buffer->head.
3217 */
3218 if (unlikely(head != local_read(&buffer->head))) {
3219 local_inc(&buffer->nest);
3220 goto again;
3221 }
3222
3223 if (handle->wakeup != local_read(&buffer->wakeup))
3224 perf_output_wakeup(handle);
3225
3226 out:
3227 preempt_enable();
3228 }
3229
3230 __always_inline void perf_output_copy(struct perf_output_handle *handle,
3231 const void *buf, unsigned int len)
3232 {
3233 do {
3234 unsigned long size = min_t(unsigned long, handle->size, len);
3235
3236 memcpy(handle->addr, buf, size);
3237
3238 len -= size;
3239 handle->addr += size;
3240 buf += size;
3241 handle->size -= size;
3242 if (!handle->size) {
3243 struct perf_buffer *buffer = handle->buffer;
3244
3245 handle->page++;
3246 handle->page &= buffer->nr_pages - 1;
3247 handle->addr = buffer->data_pages[handle->page];
3248 handle->size = PAGE_SIZE << page_order(buffer);
3249 }
3250 } while (len);
3251 }
3252
3253 int perf_output_begin(struct perf_output_handle *handle,
3254 struct perf_event *event, unsigned int size,
3255 int nmi, int sample)
3256 {
3257 struct perf_buffer *buffer;
3258 unsigned long tail, offset, head;
3259 int have_lost;
3260 struct {
3261 struct perf_event_header header;
3262 u64 id;
3263 u64 lost;
3264 } lost_event;
3265
3266 rcu_read_lock();
3267 /*
3268 * For inherited events we send all the output towards the parent.
3269 */
3270 if (event->parent)
3271 event = event->parent;
3272
3273 buffer = rcu_dereference(event->buffer);
3274 if (!buffer)
3275 goto out;
3276
3277 handle->buffer = buffer;
3278 handle->event = event;
3279 handle->nmi = nmi;
3280 handle->sample = sample;
3281
3282 if (!buffer->nr_pages)
3283 goto out;
3284
3285 have_lost = local_read(&buffer->lost);
3286 if (have_lost)
3287 size += sizeof(lost_event);
3288
3289 perf_output_get_handle(handle);
3290
3291 do {
3292 /*
3293 * Userspace could choose to issue a mb() before updating the
3294 * tail pointer. So that all reads will be completed before the
3295 * write is issued.
3296 */
3297 tail = ACCESS_ONCE(buffer->user_page->data_tail);
3298 smp_rmb();
3299 offset = head = local_read(&buffer->head);
3300 head += size;
3301 if (unlikely(!perf_output_space(buffer, tail, offset, head)))
3302 goto fail;
3303 } while (local_cmpxchg(&buffer->head, offset, head) != offset);
3304
3305 if (head - local_read(&buffer->wakeup) > buffer->watermark)
3306 local_add(buffer->watermark, &buffer->wakeup);
3307
3308 handle->page = offset >> (PAGE_SHIFT + page_order(buffer));
3309 handle->page &= buffer->nr_pages - 1;
3310 handle->size = offset & ((PAGE_SIZE << page_order(buffer)) - 1);
3311 handle->addr = buffer->data_pages[handle->page];
3312 handle->addr += handle->size;
3313 handle->size = (PAGE_SIZE << page_order(buffer)) - handle->size;
3314
3315 if (have_lost) {
3316 lost_event.header.type = PERF_RECORD_LOST;
3317 lost_event.header.misc = 0;
3318 lost_event.header.size = sizeof(lost_event);
3319 lost_event.id = event->id;
3320 lost_event.lost = local_xchg(&buffer->lost, 0);
3321
3322 perf_output_put(handle, lost_event);
3323 }
3324
3325 return 0;
3326
3327 fail:
3328 local_inc(&buffer->lost);
3329 perf_output_put_handle(handle);
3330 out:
3331 rcu_read_unlock();
3332
3333 return -ENOSPC;
3334 }
3335
3336 void perf_output_end(struct perf_output_handle *handle)
3337 {
3338 struct perf_event *event = handle->event;
3339 struct perf_buffer *buffer = handle->buffer;
3340
3341 int wakeup_events = event->attr.wakeup_events;
3342
3343 if (handle->sample && wakeup_events) {
3344 int events = local_inc_return(&buffer->events);
3345 if (events >= wakeup_events) {
3346 local_sub(wakeup_events, &buffer->events);
3347 local_inc(&buffer->wakeup);
3348 }
3349 }
3350
3351 perf_output_put_handle(handle);
3352 rcu_read_unlock();
3353 }
3354
3355 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
3356 {
3357 /*
3358 * only top level events have the pid namespace they were created in
3359 */
3360 if (event->parent)
3361 event = event->parent;
3362
3363 return task_tgid_nr_ns(p, event->ns);
3364 }
3365
3366 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
3367 {
3368 /*
3369 * only top level events have the pid namespace they were created in
3370 */
3371 if (event->parent)
3372 event = event->parent;
3373
3374 return task_pid_nr_ns(p, event->ns);
3375 }
3376
3377 static void perf_output_read_one(struct perf_output_handle *handle,
3378 struct perf_event *event)
3379 {
3380 u64 read_format = event->attr.read_format;
3381 u64 values[4];
3382 int n = 0;
3383
3384 values[n++] = perf_event_count(event);
3385 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
3386 values[n++] = event->total_time_enabled +
3387 atomic64_read(&event->child_total_time_enabled);
3388 }
3389 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
3390 values[n++] = event->total_time_running +
3391 atomic64_read(&event->child_total_time_running);
3392 }
3393 if (read_format & PERF_FORMAT_ID)
3394 values[n++] = primary_event_id(event);
3395
3396 perf_output_copy(handle, values, n * sizeof(u64));
3397 }
3398
3399 /*
3400 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3401 */
3402 static void perf_output_read_group(struct perf_output_handle *handle,
3403 struct perf_event *event)
3404 {
3405 struct perf_event *leader = event->group_leader, *sub;
3406 u64 read_format = event->attr.read_format;
3407 u64 values[5];
3408 int n = 0;
3409
3410 values[n++] = 1 + leader->nr_siblings;
3411
3412 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3413 values[n++] = leader->total_time_enabled;
3414
3415 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3416 values[n++] = leader->total_time_running;
3417
3418 if (leader != event)
3419 leader->pmu->read(leader);
3420
3421 values[n++] = perf_event_count(leader);
3422 if (read_format & PERF_FORMAT_ID)
3423 values[n++] = primary_event_id(leader);
3424
3425 perf_output_copy(handle, values, n * sizeof(u64));
3426
3427 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3428 n = 0;
3429
3430 if (sub != event)
3431 sub->pmu->read(sub);
3432
3433 values[n++] = perf_event_count(sub);
3434 if (read_format & PERF_FORMAT_ID)
3435 values[n++] = primary_event_id(sub);
3436
3437 perf_output_copy(handle, values, n * sizeof(u64));
3438 }
3439 }
3440
3441 static void perf_output_read(struct perf_output_handle *handle,
3442 struct perf_event *event)
3443 {
3444 if (event->attr.read_format & PERF_FORMAT_GROUP)
3445 perf_output_read_group(handle, event);
3446 else
3447 perf_output_read_one(handle, event);
3448 }
3449
3450 void perf_output_sample(struct perf_output_handle *handle,
3451 struct perf_event_header *header,
3452 struct perf_sample_data *data,
3453 struct perf_event *event)
3454 {
3455 u64 sample_type = data->type;
3456
3457 perf_output_put(handle, *header);
3458
3459 if (sample_type & PERF_SAMPLE_IP)
3460 perf_output_put(handle, data->ip);
3461
3462 if (sample_type & PERF_SAMPLE_TID)
3463 perf_output_put(handle, data->tid_entry);
3464
3465 if (sample_type & PERF_SAMPLE_TIME)
3466 perf_output_put(handle, data->time);
3467
3468 if (sample_type & PERF_SAMPLE_ADDR)
3469 perf_output_put(handle, data->addr);
3470
3471 if (sample_type & PERF_SAMPLE_ID)
3472 perf_output_put(handle, data->id);
3473
3474 if (sample_type & PERF_SAMPLE_STREAM_ID)
3475 perf_output_put(handle, data->stream_id);
3476
3477 if (sample_type & PERF_SAMPLE_CPU)
3478 perf_output_put(handle, data->cpu_entry);
3479
3480 if (sample_type & PERF_SAMPLE_PERIOD)
3481 perf_output_put(handle, data->period);
3482
3483 if (sample_type & PERF_SAMPLE_READ)
3484 perf_output_read(handle, event);
3485
3486 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3487 if (data->callchain) {
3488 int size = 1;
3489
3490 if (data->callchain)
3491 size += data->callchain->nr;
3492
3493 size *= sizeof(u64);
3494
3495 perf_output_copy(handle, data->callchain, size);
3496 } else {
3497 u64 nr = 0;
3498 perf_output_put(handle, nr);
3499 }
3500 }
3501
3502 if (sample_type & PERF_SAMPLE_RAW) {
3503 if (data->raw) {
3504 perf_output_put(handle, data->raw->size);
3505 perf_output_copy(handle, data->raw->data,
3506 data->raw->size);
3507 } else {
3508 struct {
3509 u32 size;
3510 u32 data;
3511 } raw = {
3512 .size = sizeof(u32),
3513 .data = 0,
3514 };
3515 perf_output_put(handle, raw);
3516 }
3517 }
3518 }
3519
3520 void perf_prepare_sample(struct perf_event_header *header,
3521 struct perf_sample_data *data,
3522 struct perf_event *event,
3523 struct pt_regs *regs)
3524 {
3525 u64 sample_type = event->attr.sample_type;
3526
3527 data->type = sample_type;
3528
3529 header->type = PERF_RECORD_SAMPLE;
3530 header->size = sizeof(*header);
3531
3532 header->misc = 0;
3533 header->misc |= perf_misc_flags(regs);
3534
3535 if (sample_type & PERF_SAMPLE_IP) {
3536 data->ip = perf_instruction_pointer(regs);
3537
3538 header->size += sizeof(data->ip);
3539 }
3540
3541 if (sample_type & PERF_SAMPLE_TID) {
3542 /* namespace issues */
3543 data->tid_entry.pid = perf_event_pid(event, current);
3544 data->tid_entry.tid = perf_event_tid(event, current);
3545
3546 header->size += sizeof(data->tid_entry);
3547 }
3548
3549 if (sample_type & PERF_SAMPLE_TIME) {
3550 data->time = perf_clock();
3551
3552 header->size += sizeof(data->time);
3553 }
3554
3555 if (sample_type & PERF_SAMPLE_ADDR)
3556 header->size += sizeof(data->addr);
3557
3558 if (sample_type & PERF_SAMPLE_ID) {
3559 data->id = primary_event_id(event);
3560
3561 header->size += sizeof(data->id);
3562 }
3563
3564 if (sample_type & PERF_SAMPLE_STREAM_ID) {
3565 data->stream_id = event->id;
3566
3567 header->size += sizeof(data->stream_id);
3568 }
3569
3570 if (sample_type & PERF_SAMPLE_CPU) {
3571 data->cpu_entry.cpu = raw_smp_processor_id();
3572 data->cpu_entry.reserved = 0;
3573
3574 header->size += sizeof(data->cpu_entry);
3575 }
3576
3577 if (sample_type & PERF_SAMPLE_PERIOD)
3578 header->size += sizeof(data->period);
3579
3580 if (sample_type & PERF_SAMPLE_READ)
3581 header->size += perf_event_read_size(event);
3582
3583 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3584 int size = 1;
3585
3586 data->callchain = perf_callchain(regs);
3587
3588 if (data->callchain)
3589 size += data->callchain->nr;
3590
3591 header->size += size * sizeof(u64);
3592 }
3593
3594 if (sample_type & PERF_SAMPLE_RAW) {
3595 int size = sizeof(u32);
3596
3597 if (data->raw)
3598 size += data->raw->size;
3599 else
3600 size += sizeof(u32);
3601
3602 WARN_ON_ONCE(size & (sizeof(u64)-1));
3603 header->size += size;
3604 }
3605 }
3606
3607 static void perf_event_output(struct perf_event *event, int nmi,
3608 struct perf_sample_data *data,
3609 struct pt_regs *regs)
3610 {
3611 struct perf_output_handle handle;
3612 struct perf_event_header header;
3613
3614 /* protect the callchain buffers */
3615 rcu_read_lock();
3616
3617 perf_prepare_sample(&header, data, event, regs);
3618
3619 if (perf_output_begin(&handle, event, header.size, nmi, 1))
3620 goto exit;
3621
3622 perf_output_sample(&handle, &header, data, event);
3623
3624 perf_output_end(&handle);
3625
3626 exit:
3627 rcu_read_unlock();
3628 }
3629
3630 /*
3631 * read event_id
3632 */
3633
3634 struct perf_read_event {
3635 struct perf_event_header header;
3636
3637 u32 pid;
3638 u32 tid;
3639 };
3640
3641 static void
3642 perf_event_read_event(struct perf_event *event,
3643 struct task_struct *task)
3644 {
3645 struct perf_output_handle handle;
3646 struct perf_read_event read_event = {
3647 .header = {
3648 .type = PERF_RECORD_READ,
3649 .misc = 0,
3650 .size = sizeof(read_event) + perf_event_read_size(event),
3651 },
3652 .pid = perf_event_pid(event, task),
3653 .tid = perf_event_tid(event, task),
3654 };
3655 int ret;
3656
3657 ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0);
3658 if (ret)
3659 return;
3660
3661 perf_output_put(&handle, read_event);
3662 perf_output_read(&handle, event);
3663
3664 perf_output_end(&handle);
3665 }
3666
3667 /*
3668 * task tracking -- fork/exit
3669 *
3670 * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
3671 */
3672
3673 struct perf_task_event {
3674 struct task_struct *task;
3675 struct perf_event_context *task_ctx;
3676
3677 struct {
3678 struct perf_event_header header;
3679
3680 u32 pid;
3681 u32 ppid;
3682 u32 tid;
3683 u32 ptid;
3684 u64 time;
3685 } event_id;
3686 };
3687
3688 static void perf_event_task_output(struct perf_event *event,
3689 struct perf_task_event *task_event)
3690 {
3691 struct perf_output_handle handle;
3692 struct task_struct *task = task_event->task;
3693 int size, ret;
3694
3695 size = task_event->event_id.header.size;
3696 ret = perf_output_begin(&handle, event, size, 0, 0);
3697
3698 if (ret)
3699 return;
3700
3701 task_event->event_id.pid = perf_event_pid(event, task);
3702 task_event->event_id.ppid = perf_event_pid(event, current);
3703
3704 task_event->event_id.tid = perf_event_tid(event, task);
3705 task_event->event_id.ptid = perf_event_tid(event, current);
3706
3707 perf_output_put(&handle, task_event->event_id);
3708
3709 perf_output_end(&handle);
3710 }
3711
3712 static int perf_event_task_match(struct perf_event *event)
3713 {
3714 if (event->state < PERF_EVENT_STATE_INACTIVE)
3715 return 0;
3716
3717 if (event->cpu != -1 && event->cpu != smp_processor_id())
3718 return 0;
3719
3720 if (event->attr.comm || event->attr.mmap ||
3721 event->attr.mmap_data || event->attr.task)
3722 return 1;
3723
3724 return 0;
3725 }
3726
3727 static void perf_event_task_ctx(struct perf_event_context *ctx,
3728 struct perf_task_event *task_event)
3729 {
3730 struct perf_event *event;
3731
3732 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3733 if (perf_event_task_match(event))
3734 perf_event_task_output(event, task_event);
3735 }
3736 }
3737
3738 static void perf_event_task_event(struct perf_task_event *task_event)
3739 {
3740 struct perf_cpu_context *cpuctx;
3741 struct perf_event_context *ctx = task_event->task_ctx;
3742
3743 rcu_read_lock();
3744 cpuctx = &get_cpu_var(perf_cpu_context);
3745 perf_event_task_ctx(&cpuctx->ctx, task_event);
3746 if (!ctx)
3747 ctx = rcu_dereference(current->perf_event_ctxp);
3748 if (ctx)
3749 perf_event_task_ctx(ctx, task_event);
3750 put_cpu_var(perf_cpu_context);
3751 rcu_read_unlock();
3752 }
3753
3754 static void perf_event_task(struct task_struct *task,
3755 struct perf_event_context *task_ctx,
3756 int new)
3757 {
3758 struct perf_task_event task_event;
3759
3760 if (!atomic_read(&nr_comm_events) &&
3761 !atomic_read(&nr_mmap_events) &&
3762 !atomic_read(&nr_task_events))
3763 return;
3764
3765 task_event = (struct perf_task_event){
3766 .task = task,
3767 .task_ctx = task_ctx,
3768 .event_id = {
3769 .header = {
3770 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
3771 .misc = 0,
3772 .size = sizeof(task_event.event_id),
3773 },
3774 /* .pid */
3775 /* .ppid */
3776 /* .tid */
3777 /* .ptid */
3778 .time = perf_clock(),
3779 },
3780 };
3781
3782 perf_event_task_event(&task_event);
3783 }
3784
3785 void perf_event_fork(struct task_struct *task)
3786 {
3787 perf_event_task(task, NULL, 1);
3788 }
3789
3790 /*
3791 * comm tracking
3792 */
3793
3794 struct perf_comm_event {
3795 struct task_struct *task;
3796 char *comm;
3797 int comm_size;
3798
3799 struct {
3800 struct perf_event_header header;
3801
3802 u32 pid;
3803 u32 tid;
3804 } event_id;
3805 };
3806
3807 static void perf_event_comm_output(struct perf_event *event,
3808 struct perf_comm_event *comm_event)
3809 {
3810 struct perf_output_handle handle;
3811 int size = comm_event->event_id.header.size;
3812 int ret = perf_output_begin(&handle, event, size, 0, 0);
3813
3814 if (ret)
3815 return;
3816
3817 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
3818 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
3819
3820 perf_output_put(&handle, comm_event->event_id);
3821 perf_output_copy(&handle, comm_event->comm,
3822 comm_event->comm_size);
3823 perf_output_end(&handle);
3824 }
3825
3826 static int perf_event_comm_match(struct perf_event *event)
3827 {
3828 if (event->state < PERF_EVENT_STATE_INACTIVE)
3829 return 0;
3830
3831 if (event->cpu != -1 && event->cpu != smp_processor_id())
3832 return 0;
3833
3834 if (event->attr.comm)
3835 return 1;
3836
3837 return 0;
3838 }
3839
3840 static void perf_event_comm_ctx(struct perf_event_context *ctx,
3841 struct perf_comm_event *comm_event)
3842 {
3843 struct perf_event *event;
3844
3845 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3846 if (perf_event_comm_match(event))
3847 perf_event_comm_output(event, comm_event);
3848 }
3849 }
3850
3851 static void perf_event_comm_event(struct perf_comm_event *comm_event)
3852 {
3853 struct perf_cpu_context *cpuctx;
3854 struct perf_event_context *ctx;
3855 unsigned int size;
3856 char comm[TASK_COMM_LEN];
3857
3858 memset(comm, 0, sizeof(comm));
3859 strlcpy(comm, comm_event->task->comm, sizeof(comm));
3860 size = ALIGN(strlen(comm)+1, sizeof(u64));
3861
3862 comm_event->comm = comm;
3863 comm_event->comm_size = size;
3864
3865 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
3866
3867 rcu_read_lock();
3868 cpuctx = &get_cpu_var(perf_cpu_context);
3869 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
3870 ctx = rcu_dereference(current->perf_event_ctxp);
3871 if (ctx)
3872 perf_event_comm_ctx(ctx, comm_event);
3873 put_cpu_var(perf_cpu_context);
3874 rcu_read_unlock();
3875 }
3876
3877 void perf_event_comm(struct task_struct *task)
3878 {
3879 struct perf_comm_event comm_event;
3880
3881 if (task->perf_event_ctxp)
3882 perf_event_enable_on_exec(task);
3883
3884 if (!atomic_read(&nr_comm_events))
3885 return;
3886
3887 comm_event = (struct perf_comm_event){
3888 .task = task,
3889 /* .comm */
3890 /* .comm_size */
3891 .event_id = {
3892 .header = {
3893 .type = PERF_RECORD_COMM,
3894 .misc = 0,
3895 /* .size */
3896 },
3897 /* .pid */
3898 /* .tid */
3899 },
3900 };
3901
3902 perf_event_comm_event(&comm_event);
3903 }
3904
3905 /*
3906 * mmap tracking
3907 */
3908
3909 struct perf_mmap_event {
3910 struct vm_area_struct *vma;
3911
3912 const char *file_name;
3913 int file_size;
3914
3915 struct {
3916 struct perf_event_header header;
3917
3918 u32 pid;
3919 u32 tid;
3920 u64 start;
3921 u64 len;
3922 u64 pgoff;
3923 } event_id;
3924 };
3925
3926 static void perf_event_mmap_output(struct perf_event *event,
3927 struct perf_mmap_event *mmap_event)
3928 {
3929 struct perf_output_handle handle;
3930 int size = mmap_event->event_id.header.size;
3931 int ret = perf_output_begin(&handle, event, size, 0, 0);
3932
3933 if (ret)
3934 return;
3935
3936 mmap_event->event_id.pid = perf_event_pid(event, current);
3937 mmap_event->event_id.tid = perf_event_tid(event, current);
3938
3939 perf_output_put(&handle, mmap_event->event_id);
3940 perf_output_copy(&handle, mmap_event->file_name,
3941 mmap_event->file_size);
3942 perf_output_end(&handle);
3943 }
3944
3945 static int perf_event_mmap_match(struct perf_event *event,
3946 struct perf_mmap_event *mmap_event,
3947 int executable)
3948 {
3949 if (event->state < PERF_EVENT_STATE_INACTIVE)
3950 return 0;
3951
3952 if (event->cpu != -1 && event->cpu != smp_processor_id())
3953 return 0;
3954
3955 if ((!executable && event->attr.mmap_data) ||
3956 (executable && event->attr.mmap))
3957 return 1;
3958
3959 return 0;
3960 }
3961
3962 static void perf_event_mmap_ctx(struct perf_event_context *ctx,
3963 struct perf_mmap_event *mmap_event,
3964 int executable)
3965 {
3966 struct perf_event *event;
3967
3968 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3969 if (perf_event_mmap_match(event, mmap_event, executable))
3970 perf_event_mmap_output(event, mmap_event);
3971 }
3972 }
3973
3974 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
3975 {
3976 struct perf_cpu_context *cpuctx;
3977 struct perf_event_context *ctx;
3978 struct vm_area_struct *vma = mmap_event->vma;
3979 struct file *file = vma->vm_file;
3980 unsigned int size;
3981 char tmp[16];
3982 char *buf = NULL;
3983 const char *name;
3984
3985 memset(tmp, 0, sizeof(tmp));
3986
3987 if (file) {
3988 /*
3989 * d_path works from the end of the buffer backwards, so we
3990 * need to add enough zero bytes after the string to handle
3991 * the 64bit alignment we do later.
3992 */
3993 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
3994 if (!buf) {
3995 name = strncpy(tmp, "//enomem", sizeof(tmp));
3996 goto got_name;
3997 }
3998 name = d_path(&file->f_path, buf, PATH_MAX);
3999 if (IS_ERR(name)) {
4000 name = strncpy(tmp, "//toolong", sizeof(tmp));
4001 goto got_name;
4002 }
4003 } else {
4004 if (arch_vma_name(mmap_event->vma)) {
4005 name = strncpy(tmp, arch_vma_name(mmap_event->vma),
4006 sizeof(tmp));
4007 goto got_name;
4008 }
4009
4010 if (!vma->vm_mm) {
4011 name = strncpy(tmp, "[vdso]", sizeof(tmp));
4012 goto got_name;
4013 } else if (vma->vm_start <= vma->vm_mm->start_brk &&
4014 vma->vm_end >= vma->vm_mm->brk) {
4015 name = strncpy(tmp, "[heap]", sizeof(tmp));
4016 goto got_name;
4017 } else if (vma->vm_start <= vma->vm_mm->start_stack &&
4018 vma->vm_end >= vma->vm_mm->start_stack) {
4019 name = strncpy(tmp, "[stack]", sizeof(tmp));
4020 goto got_name;
4021 }
4022
4023 name = strncpy(tmp, "//anon", sizeof(tmp));
4024 goto got_name;
4025 }
4026
4027 got_name:
4028 size = ALIGN(strlen(name)+1, sizeof(u64));
4029
4030 mmap_event->file_name = name;
4031 mmap_event->file_size = size;
4032
4033 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
4034
4035 rcu_read_lock();
4036 cpuctx = &get_cpu_var(perf_cpu_context);
4037 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, vma->vm_flags & VM_EXEC);
4038 ctx = rcu_dereference(current->perf_event_ctxp);
4039 if (ctx)
4040 perf_event_mmap_ctx(ctx, mmap_event, vma->vm_flags & VM_EXEC);
4041 put_cpu_var(perf_cpu_context);
4042 rcu_read_unlock();
4043
4044 kfree(buf);
4045 }
4046
4047 void perf_event_mmap(struct vm_area_struct *vma)
4048 {
4049 struct perf_mmap_event mmap_event;
4050
4051 if (!atomic_read(&nr_mmap_events))
4052 return;
4053
4054 mmap_event = (struct perf_mmap_event){
4055 .vma = vma,
4056 /* .file_name */
4057 /* .file_size */
4058 .event_id = {
4059 .header = {
4060 .type = PERF_RECORD_MMAP,
4061 .misc = PERF_RECORD_MISC_USER,
4062 /* .size */
4063 },
4064 /* .pid */
4065 /* .tid */
4066 .start = vma->vm_start,
4067 .len = vma->vm_end - vma->vm_start,
4068 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
4069 },
4070 };
4071
4072 perf_event_mmap_event(&mmap_event);
4073 }
4074
4075 /*
4076 * IRQ throttle logging
4077 */
4078
4079 static void perf_log_throttle(struct perf_event *event, int enable)
4080 {
4081 struct perf_output_handle handle;
4082 int ret;
4083
4084 struct {
4085 struct perf_event_header header;
4086 u64 time;
4087 u64 id;
4088 u64 stream_id;
4089 } throttle_event = {
4090 .header = {
4091 .type = PERF_RECORD_THROTTLE,
4092 .misc = 0,
4093 .size = sizeof(throttle_event),
4094 },
4095 .time = perf_clock(),
4096 .id = primary_event_id(event),
4097 .stream_id = event->id,
4098 };
4099
4100 if (enable)
4101 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
4102
4103 ret = perf_output_begin(&handle, event, sizeof(throttle_event), 1, 0);
4104 if (ret)
4105 return;
4106
4107 perf_output_put(&handle, throttle_event);
4108 perf_output_end(&handle);
4109 }
4110
4111 /*
4112 * Generic event overflow handling, sampling.
4113 */
4114
4115 static int __perf_event_overflow(struct perf_event *event, int nmi,
4116 int throttle, struct perf_sample_data *data,
4117 struct pt_regs *regs)
4118 {
4119 int events = atomic_read(&event->event_limit);
4120 struct hw_perf_event *hwc = &event->hw;
4121 int ret = 0;
4122
4123 throttle = (throttle && event->pmu->unthrottle != NULL);
4124
4125 if (!throttle) {
4126 hwc->interrupts++;
4127 } else {
4128 if (hwc->interrupts != MAX_INTERRUPTS) {
4129 hwc->interrupts++;
4130 if (HZ * hwc->interrupts >
4131 (u64)sysctl_perf_event_sample_rate) {
4132 hwc->interrupts = MAX_INTERRUPTS;
4133 perf_log_throttle(event, 0);
4134 ret = 1;
4135 }
4136 } else {
4137 /*
4138 * Keep re-disabling events even though on the previous
4139 * pass we disabled it - just in case we raced with a
4140 * sched-in and the event got enabled again:
4141 */
4142 ret = 1;
4143 }
4144 }
4145
4146 if (event->attr.freq) {
4147 u64 now = perf_clock();
4148 s64 delta = now - hwc->freq_time_stamp;
4149
4150 hwc->freq_time_stamp = now;
4151
4152 if (delta > 0 && delta < 2*TICK_NSEC)
4153 perf_adjust_period(event, delta, hwc->last_period);
4154 }
4155
4156 /*
4157 * XXX event_limit might not quite work as expected on inherited
4158 * events
4159 */
4160
4161 event->pending_kill = POLL_IN;
4162 if (events && atomic_dec_and_test(&event->event_limit)) {
4163 ret = 1;
4164 event->pending_kill = POLL_HUP;
4165 if (nmi) {
4166 event->pending_disable = 1;
4167 perf_pending_queue(&event->pending,
4168 perf_pending_event);
4169 } else
4170 perf_event_disable(event);
4171 }
4172
4173 if (event->overflow_handler)
4174 event->overflow_handler(event, nmi, data, regs);
4175 else
4176 perf_event_output(event, nmi, data, regs);
4177
4178 return ret;
4179 }
4180
4181 int perf_event_overflow(struct perf_event *event, int nmi,
4182 struct perf_sample_data *data,
4183 struct pt_regs *regs)
4184 {
4185 return __perf_event_overflow(event, nmi, 1, data, regs);
4186 }
4187
4188 /*
4189 * Generic software event infrastructure
4190 */
4191
4192 /*
4193 * We directly increment event->count and keep a second value in
4194 * event->hw.period_left to count intervals. This period event
4195 * is kept in the range [-sample_period, 0] so that we can use the
4196 * sign as trigger.
4197 */
4198
4199 static u64 perf_swevent_set_period(struct perf_event *event)
4200 {
4201 struct hw_perf_event *hwc = &event->hw;
4202 u64 period = hwc->last_period;
4203 u64 nr, offset;
4204 s64 old, val;
4205
4206 hwc->last_period = hwc->sample_period;
4207
4208 again:
4209 old = val = local64_read(&hwc->period_left);
4210 if (val < 0)
4211 return 0;
4212
4213 nr = div64_u64(period + val, period);
4214 offset = nr * period;
4215 val -= offset;
4216 if (local64_cmpxchg(&hwc->period_left, old, val) != old)
4217 goto again;
4218
4219 return nr;
4220 }
4221
4222 static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
4223 int nmi, struct perf_sample_data *data,
4224 struct pt_regs *regs)
4225 {
4226 struct hw_perf_event *hwc = &event->hw;
4227 int throttle = 0;
4228
4229 data->period = event->hw.last_period;
4230 if (!overflow)
4231 overflow = perf_swevent_set_period(event);
4232
4233 if (hwc->interrupts == MAX_INTERRUPTS)
4234 return;
4235
4236 for (; overflow; overflow--) {
4237 if (__perf_event_overflow(event, nmi, throttle,
4238 data, regs)) {
4239 /*
4240 * We inhibit the overflow from happening when
4241 * hwc->interrupts == MAX_INTERRUPTS.
4242 */
4243 break;
4244 }
4245 throttle = 1;
4246 }
4247 }
4248
4249 static void perf_swevent_add(struct perf_event *event, u64 nr,
4250 int nmi, struct perf_sample_data *data,
4251 struct pt_regs *regs)
4252 {
4253 struct hw_perf_event *hwc = &event->hw;
4254
4255 local64_add(nr, &event->count);
4256
4257 if (!regs)
4258 return;
4259
4260 if (!hwc->sample_period)
4261 return;
4262
4263 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
4264 return perf_swevent_overflow(event, 1, nmi, data, regs);
4265
4266 if (local64_add_negative(nr, &hwc->period_left))
4267 return;
4268
4269 perf_swevent_overflow(event, 0, nmi, data, regs);
4270 }
4271
4272 static int perf_exclude_event(struct perf_event *event,
4273 struct pt_regs *regs)
4274 {
4275 if (regs) {
4276 if (event->attr.exclude_user && user_mode(regs))
4277 return 1;
4278
4279 if (event->attr.exclude_kernel && !user_mode(regs))
4280 return 1;
4281 }
4282
4283 return 0;
4284 }
4285
4286 static int perf_swevent_match(struct perf_event *event,
4287 enum perf_type_id type,
4288 u32 event_id,
4289 struct perf_sample_data *data,
4290 struct pt_regs *regs)
4291 {
4292 if (event->attr.type != type)
4293 return 0;
4294
4295 if (event->attr.config != event_id)
4296 return 0;
4297
4298 if (perf_exclude_event(event, regs))
4299 return 0;
4300
4301 return 1;
4302 }
4303
4304 static inline u64 swevent_hash(u64 type, u32 event_id)
4305 {
4306 u64 val = event_id | (type << 32);
4307
4308 return hash_64(val, SWEVENT_HLIST_BITS);
4309 }
4310
4311 static inline struct hlist_head *
4312 __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
4313 {
4314 u64 hash = swevent_hash(type, event_id);
4315
4316 return &hlist->heads[hash];
4317 }
4318
4319 /* For the read side: events when they trigger */
4320 static inline struct hlist_head *
4321 find_swevent_head_rcu(struct perf_cpu_context *ctx, u64 type, u32 event_id)
4322 {
4323 struct swevent_hlist *hlist;
4324
4325 hlist = rcu_dereference(ctx->swevent_hlist);
4326 if (!hlist)
4327 return NULL;
4328
4329 return __find_swevent_head(hlist, type, event_id);
4330 }
4331
4332 /* For the event head insertion and removal in the hlist */
4333 static inline struct hlist_head *
4334 find_swevent_head(struct perf_cpu_context *ctx, struct perf_event *event)
4335 {
4336 struct swevent_hlist *hlist;
4337 u32 event_id = event->attr.config;
4338 u64 type = event->attr.type;
4339
4340 /*
4341 * Event scheduling is always serialized against hlist allocation
4342 * and release. Which makes the protected version suitable here.
4343 * The context lock guarantees that.
4344 */
4345 hlist = rcu_dereference_protected(ctx->swevent_hlist,
4346 lockdep_is_held(&event->ctx->lock));
4347 if (!hlist)
4348 return NULL;
4349
4350 return __find_swevent_head(hlist, type, event_id);
4351 }
4352
4353 static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
4354 u64 nr, int nmi,
4355 struct perf_sample_data *data,
4356 struct pt_regs *regs)
4357 {
4358 struct perf_cpu_context *cpuctx;
4359 struct perf_event *event;
4360 struct hlist_node *node;
4361 struct hlist_head *head;
4362
4363 cpuctx = &__get_cpu_var(perf_cpu_context);
4364
4365 rcu_read_lock();
4366
4367 head = find_swevent_head_rcu(cpuctx, type, event_id);
4368
4369 if (!head)
4370 goto end;
4371
4372 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
4373 if (perf_swevent_match(event, type, event_id, data, regs))
4374 perf_swevent_add(event, nr, nmi, data, regs);
4375 }
4376 end:
4377 rcu_read_unlock();
4378 }
4379
4380 int perf_swevent_get_recursion_context(void)
4381 {
4382 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
4383
4384 return get_recursion_context(cpuctx->recursion);
4385 }
4386 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
4387
4388 void inline perf_swevent_put_recursion_context(int rctx)
4389 {
4390 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
4391
4392 put_recursion_context(cpuctx->recursion, rctx);
4393 }
4394
4395 void __perf_sw_event(u32 event_id, u64 nr, int nmi,
4396 struct pt_regs *regs, u64 addr)
4397 {
4398 struct perf_sample_data data;
4399 int rctx;
4400
4401 preempt_disable_notrace();
4402 rctx = perf_swevent_get_recursion_context();
4403 if (rctx < 0)
4404 return;
4405
4406 perf_sample_data_init(&data, addr);
4407
4408 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
4409
4410 perf_swevent_put_recursion_context(rctx);
4411 preempt_enable_notrace();
4412 }
4413
4414 static void perf_swevent_read(struct perf_event *event)
4415 {
4416 }
4417
4418 static int perf_swevent_enable(struct perf_event *event)
4419 {
4420 struct hw_perf_event *hwc = &event->hw;
4421 struct perf_cpu_context *cpuctx;
4422 struct hlist_head *head;
4423
4424 cpuctx = &__get_cpu_var(perf_cpu_context);
4425
4426 if (hwc->sample_period) {
4427 hwc->last_period = hwc->sample_period;
4428 perf_swevent_set_period(event);
4429 }
4430
4431 head = find_swevent_head(cpuctx, event);
4432 if (WARN_ON_ONCE(!head))
4433 return -EINVAL;
4434
4435 hlist_add_head_rcu(&event->hlist_entry, head);
4436
4437 return 0;
4438 }
4439
4440 static void perf_swevent_disable(struct perf_event *event)
4441 {
4442 hlist_del_rcu(&event->hlist_entry);
4443 }
4444
4445 static void perf_swevent_void(struct perf_event *event)
4446 {
4447 }
4448
4449 static int perf_swevent_int(struct perf_event *event)
4450 {
4451 return 0;
4452 }
4453
4454 /* Deref the hlist from the update side */
4455 static inline struct swevent_hlist *
4456 swevent_hlist_deref(struct perf_cpu_context *cpuctx)
4457 {
4458 return rcu_dereference_protected(cpuctx->swevent_hlist,
4459 lockdep_is_held(&cpuctx->hlist_mutex));
4460 }
4461
4462 static void swevent_hlist_release_rcu(struct rcu_head *rcu_head)
4463 {
4464 struct swevent_hlist *hlist;
4465
4466 hlist = container_of(rcu_head, struct swevent_hlist, rcu_head);
4467 kfree(hlist);
4468 }
4469
4470 static void swevent_hlist_release(struct perf_cpu_context *cpuctx)
4471 {
4472 struct swevent_hlist *hlist = swevent_hlist_deref(cpuctx);
4473
4474 if (!hlist)
4475 return;
4476
4477 rcu_assign_pointer(cpuctx->swevent_hlist, NULL);
4478 call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu);
4479 }
4480
4481 static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
4482 {
4483 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
4484
4485 mutex_lock(&cpuctx->hlist_mutex);
4486
4487 if (!--cpuctx->hlist_refcount)
4488 swevent_hlist_release(cpuctx);
4489
4490 mutex_unlock(&cpuctx->hlist_mutex);
4491 }
4492
4493 static void swevent_hlist_put(struct perf_event *event)
4494 {
4495 int cpu;
4496
4497 if (event->cpu != -1) {
4498 swevent_hlist_put_cpu(event, event->cpu);
4499 return;
4500 }
4501
4502 for_each_possible_cpu(cpu)
4503 swevent_hlist_put_cpu(event, cpu);
4504 }
4505
4506 static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
4507 {
4508 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
4509 int err = 0;
4510
4511 mutex_lock(&cpuctx->hlist_mutex);
4512
4513 if (!swevent_hlist_deref(cpuctx) && cpu_online(cpu)) {
4514 struct swevent_hlist *hlist;
4515
4516 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
4517 if (!hlist) {
4518 err = -ENOMEM;
4519 goto exit;
4520 }
4521 rcu_assign_pointer(cpuctx->swevent_hlist, hlist);
4522 }
4523 cpuctx->hlist_refcount++;
4524 exit:
4525 mutex_unlock(&cpuctx->hlist_mutex);
4526
4527 return err;
4528 }
4529
4530 static int swevent_hlist_get(struct perf_event *event)
4531 {
4532 int err;
4533 int cpu, failed_cpu;
4534
4535 if (event->cpu != -1)
4536 return swevent_hlist_get_cpu(event, event->cpu);
4537
4538 get_online_cpus();
4539 for_each_possible_cpu(cpu) {
4540 err = swevent_hlist_get_cpu(event, cpu);
4541 if (err) {
4542 failed_cpu = cpu;
4543 goto fail;
4544 }
4545 }
4546 put_online_cpus();
4547
4548 return 0;
4549 fail:
4550 for_each_possible_cpu(cpu) {
4551 if (cpu == failed_cpu)
4552 break;
4553 swevent_hlist_put_cpu(event, cpu);
4554 }
4555
4556 put_online_cpus();
4557 return err;
4558 }
4559
4560 atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
4561
4562 static void sw_perf_event_destroy(struct perf_event *event)
4563 {
4564 u64 event_id = event->attr.config;
4565
4566 WARN_ON(event->parent);
4567
4568 atomic_dec(&perf_swevent_enabled[event_id]);
4569 swevent_hlist_put(event);
4570 }
4571
4572 static int perf_swevent_init(struct perf_event *event)
4573 {
4574 int event_id = event->attr.config;
4575
4576 if (event->attr.type != PERF_TYPE_SOFTWARE)
4577 return -ENOENT;
4578
4579 switch (event_id) {
4580 case PERF_COUNT_SW_CPU_CLOCK:
4581 case PERF_COUNT_SW_TASK_CLOCK:
4582 return -ENOENT;
4583
4584 default:
4585 break;
4586 }
4587
4588 if (event_id > PERF_COUNT_SW_MAX)
4589 return -ENOENT;
4590
4591 if (!event->parent) {
4592 int err;
4593
4594 err = swevent_hlist_get(event);
4595 if (err)
4596 return err;
4597
4598 atomic_inc(&perf_swevent_enabled[event_id]);
4599 event->destroy = sw_perf_event_destroy;
4600 }
4601
4602 return 0;
4603 }
4604
4605 static struct pmu perf_swevent = {
4606 .event_init = perf_swevent_init,
4607 .enable = perf_swevent_enable,
4608 .disable = perf_swevent_disable,
4609 .start = perf_swevent_int,
4610 .stop = perf_swevent_void,
4611 .read = perf_swevent_read,
4612 .unthrottle = perf_swevent_void, /* hwc->interrupts already reset */
4613 };
4614
4615 #ifdef CONFIG_EVENT_TRACING
4616
4617 static int perf_tp_filter_match(struct perf_event *event,
4618 struct perf_sample_data *data)
4619 {
4620 void *record = data->raw->data;
4621
4622 if (likely(!event->filter) || filter_match_preds(event->filter, record))
4623 return 1;
4624 return 0;
4625 }
4626
4627 static int perf_tp_event_match(struct perf_event *event,
4628 struct perf_sample_data *data,
4629 struct pt_regs *regs)
4630 {
4631 /*
4632 * All tracepoints are from kernel-space.
4633 */
4634 if (event->attr.exclude_kernel)
4635 return 0;
4636
4637 if (!perf_tp_filter_match(event, data))
4638 return 0;
4639
4640 return 1;
4641 }
4642
4643 void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
4644 struct pt_regs *regs, struct hlist_head *head, int rctx)
4645 {
4646 struct perf_sample_data data;
4647 struct perf_event *event;
4648 struct hlist_node *node;
4649
4650 struct perf_raw_record raw = {
4651 .size = entry_size,
4652 .data = record,
4653 };
4654
4655 perf_sample_data_init(&data, addr);
4656 data.raw = &raw;
4657
4658 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
4659 if (perf_tp_event_match(event, &data, regs))
4660 perf_swevent_add(event, count, 1, &data, regs);
4661 }
4662
4663 perf_swevent_put_recursion_context(rctx);
4664 }
4665 EXPORT_SYMBOL_GPL(perf_tp_event);
4666
4667 static void tp_perf_event_destroy(struct perf_event *event)
4668 {
4669 perf_trace_destroy(event);
4670 }
4671
4672 static int perf_tp_event_init(struct perf_event *event)
4673 {
4674 int err;
4675
4676 if (event->attr.type != PERF_TYPE_TRACEPOINT)
4677 return -ENOENT;
4678
4679 /*
4680 * Raw tracepoint data is a severe data leak, only allow root to
4681 * have these.
4682 */
4683 if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
4684 perf_paranoid_tracepoint_raw() &&
4685 !capable(CAP_SYS_ADMIN))
4686 return -EPERM;
4687
4688 err = perf_trace_init(event);
4689 if (err)
4690 return err;
4691
4692 event->destroy = tp_perf_event_destroy;
4693
4694 return 0;
4695 }
4696
4697 static struct pmu perf_tracepoint = {
4698 .event_init = perf_tp_event_init,
4699 .enable = perf_trace_enable,
4700 .disable = perf_trace_disable,
4701 .start = perf_swevent_int,
4702 .stop = perf_swevent_void,
4703 .read = perf_swevent_read,
4704 .unthrottle = perf_swevent_void,
4705 };
4706
4707 static inline void perf_tp_register(void)
4708 {
4709 perf_pmu_register(&perf_tracepoint);
4710 }
4711
4712 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
4713 {
4714 char *filter_str;
4715 int ret;
4716
4717 if (event->attr.type != PERF_TYPE_TRACEPOINT)
4718 return -EINVAL;
4719
4720 filter_str = strndup_user(arg, PAGE_SIZE);
4721 if (IS_ERR(filter_str))
4722 return PTR_ERR(filter_str);
4723
4724 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
4725
4726 kfree(filter_str);
4727 return ret;
4728 }
4729
4730 static void perf_event_free_filter(struct perf_event *event)
4731 {
4732 ftrace_profile_free_filter(event);
4733 }
4734
4735 #else
4736
4737 static inline void perf_tp_register(void)
4738 {
4739 }
4740
4741 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
4742 {
4743 return -ENOENT;
4744 }
4745
4746 static void perf_event_free_filter(struct perf_event *event)
4747 {
4748 }
4749
4750 #endif /* CONFIG_EVENT_TRACING */
4751
4752 #ifdef CONFIG_HAVE_HW_BREAKPOINT
4753 void perf_bp_event(struct perf_event *bp, void *data)
4754 {
4755 struct perf_sample_data sample;
4756 struct pt_regs *regs = data;
4757
4758 perf_sample_data_init(&sample, bp->attr.bp_addr);
4759
4760 if (!perf_exclude_event(bp, regs))
4761 perf_swevent_add(bp, 1, 1, &sample, regs);
4762 }
4763 #endif
4764
4765 /*
4766 * hrtimer based swevent callback
4767 */
4768
4769 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
4770 {
4771 enum hrtimer_restart ret = HRTIMER_RESTART;
4772 struct perf_sample_data data;
4773 struct pt_regs *regs;
4774 struct perf_event *event;
4775 u64 period;
4776
4777 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
4778 event->pmu->read(event);
4779
4780 perf_sample_data_init(&data, 0);
4781 data.period = event->hw.last_period;
4782 regs = get_irq_regs();
4783
4784 if (regs && !perf_exclude_event(event, regs)) {
4785 if (!(event->attr.exclude_idle && current->pid == 0))
4786 if (perf_event_overflow(event, 0, &data, regs))
4787 ret = HRTIMER_NORESTART;
4788 }
4789
4790 period = max_t(u64, 10000, event->hw.sample_period);
4791 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
4792
4793 return ret;
4794 }
4795
4796 static void perf_swevent_start_hrtimer(struct perf_event *event)
4797 {
4798 struct hw_perf_event *hwc = &event->hw;
4799
4800 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4801 hwc->hrtimer.function = perf_swevent_hrtimer;
4802 if (hwc->sample_period) {
4803 u64 period;
4804
4805 if (hwc->remaining) {
4806 if (hwc->remaining < 0)
4807 period = 10000;
4808 else
4809 period = hwc->remaining;
4810 hwc->remaining = 0;
4811 } else {
4812 period = max_t(u64, 10000, hwc->sample_period);
4813 }
4814 __hrtimer_start_range_ns(&hwc->hrtimer,
4815 ns_to_ktime(period), 0,
4816 HRTIMER_MODE_REL, 0);
4817 }
4818 }
4819
4820 static void perf_swevent_cancel_hrtimer(struct perf_event *event)
4821 {
4822 struct hw_perf_event *hwc = &event->hw;
4823
4824 if (hwc->sample_period) {
4825 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
4826 hwc->remaining = ktime_to_ns(remaining);
4827
4828 hrtimer_cancel(&hwc->hrtimer);
4829 }
4830 }
4831
4832 /*
4833 * Software event: cpu wall time clock
4834 */
4835
4836 static void cpu_clock_event_update(struct perf_event *event)
4837 {
4838 int cpu = raw_smp_processor_id();
4839 s64 prev;
4840 u64 now;
4841
4842 now = cpu_clock(cpu);
4843 prev = local64_xchg(&event->hw.prev_count, now);
4844 local64_add(now - prev, &event->count);
4845 }
4846
4847 static int cpu_clock_event_enable(struct perf_event *event)
4848 {
4849 struct hw_perf_event *hwc = &event->hw;
4850 int cpu = raw_smp_processor_id();
4851
4852 local64_set(&hwc->prev_count, cpu_clock(cpu));
4853 perf_swevent_start_hrtimer(event);
4854
4855 return 0;
4856 }
4857
4858 static void cpu_clock_event_disable(struct perf_event *event)
4859 {
4860 perf_swevent_cancel_hrtimer(event);
4861 cpu_clock_event_update(event);
4862 }
4863
4864 static void cpu_clock_event_read(struct perf_event *event)
4865 {
4866 cpu_clock_event_update(event);
4867 }
4868
4869 static int cpu_clock_event_init(struct perf_event *event)
4870 {
4871 if (event->attr.type != PERF_TYPE_SOFTWARE)
4872 return -ENOENT;
4873
4874 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
4875 return -ENOENT;
4876
4877 return 0;
4878 }
4879
4880 static struct pmu perf_cpu_clock = {
4881 .event_init = cpu_clock_event_init,
4882 .enable = cpu_clock_event_enable,
4883 .disable = cpu_clock_event_disable,
4884 .read = cpu_clock_event_read,
4885 };
4886
4887 /*
4888 * Software event: task time clock
4889 */
4890
4891 static void task_clock_event_update(struct perf_event *event, u64 now)
4892 {
4893 u64 prev;
4894 s64 delta;
4895
4896 prev = local64_xchg(&event->hw.prev_count, now);
4897 delta = now - prev;
4898 local64_add(delta, &event->count);
4899 }
4900
4901 static int task_clock_event_enable(struct perf_event *event)
4902 {
4903 struct hw_perf_event *hwc = &event->hw;
4904 u64 now;
4905
4906 now = event->ctx->time;
4907
4908 local64_set(&hwc->prev_count, now);
4909
4910 perf_swevent_start_hrtimer(event);
4911
4912 return 0;
4913 }
4914
4915 static void task_clock_event_disable(struct perf_event *event)
4916 {
4917 perf_swevent_cancel_hrtimer(event);
4918 task_clock_event_update(event, event->ctx->time);
4919
4920 }
4921
4922 static void task_clock_event_read(struct perf_event *event)
4923 {
4924 u64 time;
4925
4926 if (!in_nmi()) {
4927 update_context_time(event->ctx);
4928 time = event->ctx->time;
4929 } else {
4930 u64 now = perf_clock();
4931 u64 delta = now - event->ctx->timestamp;
4932 time = event->ctx->time + delta;
4933 }
4934
4935 task_clock_event_update(event, time);
4936 }
4937
4938 static int task_clock_event_init(struct perf_event *event)
4939 {
4940 if (event->attr.type != PERF_TYPE_SOFTWARE)
4941 return -ENOENT;
4942
4943 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
4944 return -ENOENT;
4945
4946 return 0;
4947 }
4948
4949 static struct pmu perf_task_clock = {
4950 .event_init = task_clock_event_init,
4951 .enable = task_clock_event_enable,
4952 .disable = task_clock_event_disable,
4953 .read = task_clock_event_read,
4954 };
4955
4956 static LIST_HEAD(pmus);
4957 static DEFINE_MUTEX(pmus_lock);
4958 static struct srcu_struct pmus_srcu;
4959
4960 static void perf_pmu_nop_void(struct pmu *pmu)
4961 {
4962 }
4963
4964 static int perf_pmu_nop_int(struct pmu *pmu)
4965 {
4966 return 0;
4967 }
4968
4969 static void perf_pmu_start_txn(struct pmu *pmu)
4970 {
4971 perf_pmu_disable(pmu);
4972 }
4973
4974 static int perf_pmu_commit_txn(struct pmu *pmu)
4975 {
4976 perf_pmu_enable(pmu);
4977 return 0;
4978 }
4979
4980 static void perf_pmu_cancel_txn(struct pmu *pmu)
4981 {
4982 perf_pmu_enable(pmu);
4983 }
4984
4985 int perf_pmu_register(struct pmu *pmu)
4986 {
4987 int ret;
4988
4989 mutex_lock(&pmus_lock);
4990 ret = -ENOMEM;
4991 pmu->pmu_disable_count = alloc_percpu(int);
4992 if (!pmu->pmu_disable_count)
4993 goto unlock;
4994
4995 if (!pmu->start_txn) {
4996 if (pmu->pmu_enable) {
4997 /*
4998 * If we have pmu_enable/pmu_disable calls, install
4999 * transaction stubs that use that to try and batch
5000 * hardware accesses.
5001 */
5002 pmu->start_txn = perf_pmu_start_txn;
5003 pmu->commit_txn = perf_pmu_commit_txn;
5004 pmu->cancel_txn = perf_pmu_cancel_txn;
5005 } else {
5006 pmu->start_txn = perf_pmu_nop_void;
5007 pmu->commit_txn = perf_pmu_nop_int;
5008 pmu->cancel_txn = perf_pmu_nop_void;
5009 }
5010 }
5011
5012 if (!pmu->pmu_enable) {
5013 pmu->pmu_enable = perf_pmu_nop_void;
5014 pmu->pmu_disable = perf_pmu_nop_void;
5015 }
5016
5017 list_add_rcu(&pmu->entry, &pmus);
5018 ret = 0;
5019 unlock:
5020 mutex_unlock(&pmus_lock);
5021
5022 return ret;
5023 }
5024
5025 void perf_pmu_unregister(struct pmu *pmu)
5026 {
5027 mutex_lock(&pmus_lock);
5028 list_del_rcu(&pmu->entry);
5029 mutex_unlock(&pmus_lock);
5030
5031 synchronize_srcu(&pmus_srcu);
5032
5033 free_percpu(pmu->pmu_disable_count);
5034 }
5035
5036 struct pmu *perf_init_event(struct perf_event *event)
5037 {
5038 struct pmu *pmu = NULL;
5039 int idx;
5040
5041 idx = srcu_read_lock(&pmus_srcu);
5042 list_for_each_entry_rcu(pmu, &pmus, entry) {
5043 int ret = pmu->event_init(event);
5044 if (!ret)
5045 break;
5046 if (ret != -ENOENT) {
5047 pmu = ERR_PTR(ret);
5048 break;
5049 }
5050 }
5051 srcu_read_unlock(&pmus_srcu, idx);
5052
5053 return pmu;
5054 }
5055
5056 /*
5057 * Allocate and initialize a event structure
5058 */
5059 static struct perf_event *
5060 perf_event_alloc(struct perf_event_attr *attr,
5061 int cpu,
5062 struct perf_event_context *ctx,
5063 struct perf_event *group_leader,
5064 struct perf_event *parent_event,
5065 perf_overflow_handler_t overflow_handler,
5066 gfp_t gfpflags)
5067 {
5068 struct pmu *pmu;
5069 struct perf_event *event;
5070 struct hw_perf_event *hwc;
5071 long err;
5072
5073 event = kzalloc(sizeof(*event), gfpflags);
5074 if (!event)
5075 return ERR_PTR(-ENOMEM);
5076
5077 /*
5078 * Single events are their own group leaders, with an
5079 * empty sibling list:
5080 */
5081 if (!group_leader)
5082 group_leader = event;
5083
5084 mutex_init(&event->child_mutex);
5085 INIT_LIST_HEAD(&event->child_list);
5086
5087 INIT_LIST_HEAD(&event->group_entry);
5088 INIT_LIST_HEAD(&event->event_entry);
5089 INIT_LIST_HEAD(&event->sibling_list);
5090 init_waitqueue_head(&event->waitq);
5091
5092 mutex_init(&event->mmap_mutex);
5093
5094 event->cpu = cpu;
5095 event->attr = *attr;
5096 event->group_leader = group_leader;
5097 event->pmu = NULL;
5098 event->ctx = ctx;
5099 event->oncpu = -1;
5100
5101 event->parent = parent_event;
5102
5103 event->ns = get_pid_ns(current->nsproxy->pid_ns);
5104 event->id = atomic64_inc_return(&perf_event_id);
5105
5106 event->state = PERF_EVENT_STATE_INACTIVE;
5107
5108 if (!overflow_handler && parent_event)
5109 overflow_handler = parent_event->overflow_handler;
5110
5111 event->overflow_handler = overflow_handler;
5112
5113 if (attr->disabled)
5114 event->state = PERF_EVENT_STATE_OFF;
5115
5116 pmu = NULL;
5117
5118 hwc = &event->hw;
5119 hwc->sample_period = attr->sample_period;
5120 if (attr->freq && attr->sample_freq)
5121 hwc->sample_period = 1;
5122 hwc->last_period = hwc->sample_period;
5123
5124 local64_set(&hwc->period_left, hwc->sample_period);
5125
5126 /*
5127 * we currently do not support PERF_FORMAT_GROUP on inherited events
5128 */
5129 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
5130 goto done;
5131
5132 pmu = perf_init_event(event);
5133
5134 done:
5135 err = 0;
5136 if (!pmu)
5137 err = -EINVAL;
5138 else if (IS_ERR(pmu))
5139 err = PTR_ERR(pmu);
5140
5141 if (err) {
5142 if (event->ns)
5143 put_pid_ns(event->ns);
5144 kfree(event);
5145 return ERR_PTR(err);
5146 }
5147
5148 event->pmu = pmu;
5149
5150 if (!event->parent) {
5151 atomic_inc(&nr_events);
5152 if (event->attr.mmap || event->attr.mmap_data)
5153 atomic_inc(&nr_mmap_events);
5154 if (event->attr.comm)
5155 atomic_inc(&nr_comm_events);
5156 if (event->attr.task)
5157 atomic_inc(&nr_task_events);
5158 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
5159 err = get_callchain_buffers();
5160 if (err) {
5161 free_event(event);
5162 return ERR_PTR(err);
5163 }
5164 }
5165 }
5166
5167 return event;
5168 }
5169
5170 static int perf_copy_attr(struct perf_event_attr __user *uattr,
5171 struct perf_event_attr *attr)
5172 {
5173 u32 size;
5174 int ret;
5175
5176 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
5177 return -EFAULT;
5178
5179 /*
5180 * zero the full structure, so that a short copy will be nice.
5181 */
5182 memset(attr, 0, sizeof(*attr));
5183
5184 ret = get_user(size, &uattr->size);
5185 if (ret)
5186 return ret;
5187
5188 if (size > PAGE_SIZE) /* silly large */
5189 goto err_size;
5190
5191 if (!size) /* abi compat */
5192 size = PERF_ATTR_SIZE_VER0;
5193
5194 if (size < PERF_ATTR_SIZE_VER0)
5195 goto err_size;
5196
5197 /*
5198 * If we're handed a bigger struct than we know of,
5199 * ensure all the unknown bits are 0 - i.e. new
5200 * user-space does not rely on any kernel feature
5201 * extensions we dont know about yet.
5202 */
5203 if (size > sizeof(*attr)) {
5204 unsigned char __user *addr;
5205 unsigned char __user *end;
5206 unsigned char val;
5207
5208 addr = (void __user *)uattr + sizeof(*attr);
5209 end = (void __user *)uattr + size;
5210
5211 for (; addr < end; addr++) {
5212 ret = get_user(val, addr);
5213 if (ret)
5214 return ret;
5215 if (val)
5216 goto err_size;
5217 }
5218 size = sizeof(*attr);
5219 }
5220
5221 ret = copy_from_user(attr, uattr, size);
5222 if (ret)
5223 return -EFAULT;
5224
5225 /*
5226 * If the type exists, the corresponding creation will verify
5227 * the attr->config.
5228 */
5229 if (attr->type >= PERF_TYPE_MAX)
5230 return -EINVAL;
5231
5232 if (attr->__reserved_1)
5233 return -EINVAL;
5234
5235 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
5236 return -EINVAL;
5237
5238 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
5239 return -EINVAL;
5240
5241 out:
5242 return ret;
5243
5244 err_size:
5245 put_user(sizeof(*attr), &uattr->size);
5246 ret = -E2BIG;
5247 goto out;
5248 }
5249
5250 static int
5251 perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
5252 {
5253 struct perf_buffer *buffer = NULL, *old_buffer = NULL;
5254 int ret = -EINVAL;
5255
5256 if (!output_event)
5257 goto set;
5258
5259 /* don't allow circular references */
5260 if (event == output_event)
5261 goto out;
5262
5263 /*
5264 * Don't allow cross-cpu buffers
5265 */
5266 if (output_event->cpu != event->cpu)
5267 goto out;
5268
5269 /*
5270 * If its not a per-cpu buffer, it must be the same task.
5271 */
5272 if (output_event->cpu == -1 && output_event->ctx != event->ctx)
5273 goto out;
5274
5275 set:
5276 mutex_lock(&event->mmap_mutex);
5277 /* Can't redirect output if we've got an active mmap() */
5278 if (atomic_read(&event->mmap_count))
5279 goto unlock;
5280
5281 if (output_event) {
5282 /* get the buffer we want to redirect to */
5283 buffer = perf_buffer_get(output_event);
5284 if (!buffer)
5285 goto unlock;
5286 }
5287
5288 old_buffer = event->buffer;
5289 rcu_assign_pointer(event->buffer, buffer);
5290 ret = 0;
5291 unlock:
5292 mutex_unlock(&event->mmap_mutex);
5293
5294 if (old_buffer)
5295 perf_buffer_put(old_buffer);
5296 out:
5297 return ret;
5298 }
5299
5300 /**
5301 * sys_perf_event_open - open a performance event, associate it to a task/cpu
5302 *
5303 * @attr_uptr: event_id type attributes for monitoring/sampling
5304 * @pid: target pid
5305 * @cpu: target cpu
5306 * @group_fd: group leader event fd
5307 */
5308 SYSCALL_DEFINE5(perf_event_open,
5309 struct perf_event_attr __user *, attr_uptr,
5310 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
5311 {
5312 struct perf_event *event, *group_leader = NULL, *output_event = NULL;
5313 struct perf_event_attr attr;
5314 struct perf_event_context *ctx;
5315 struct file *event_file = NULL;
5316 struct file *group_file = NULL;
5317 int event_fd;
5318 int fput_needed = 0;
5319 int err;
5320
5321 /* for future expandability... */
5322 if (flags & ~(PERF_FLAG_FD_NO_GROUP | PERF_FLAG_FD_OUTPUT))
5323 return -EINVAL;
5324
5325 err = perf_copy_attr(attr_uptr, &attr);
5326 if (err)
5327 return err;
5328
5329 if (!attr.exclude_kernel) {
5330 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
5331 return -EACCES;
5332 }
5333
5334 if (attr.freq) {
5335 if (attr.sample_freq > sysctl_perf_event_sample_rate)
5336 return -EINVAL;
5337 }
5338
5339 event_fd = get_unused_fd_flags(O_RDWR);
5340 if (event_fd < 0)
5341 return event_fd;
5342
5343 /*
5344 * Get the target context (task or percpu):
5345 */
5346 ctx = find_get_context(pid, cpu);
5347 if (IS_ERR(ctx)) {
5348 err = PTR_ERR(ctx);
5349 goto err_fd;
5350 }
5351
5352 if (group_fd != -1) {
5353 group_leader = perf_fget_light(group_fd, &fput_needed);
5354 if (IS_ERR(group_leader)) {
5355 err = PTR_ERR(group_leader);
5356 goto err_put_context;
5357 }
5358 group_file = group_leader->filp;
5359 if (flags & PERF_FLAG_FD_OUTPUT)
5360 output_event = group_leader;
5361 if (flags & PERF_FLAG_FD_NO_GROUP)
5362 group_leader = NULL;
5363 }
5364
5365 /*
5366 * Look up the group leader (we will attach this event to it):
5367 */
5368 if (group_leader) {
5369 err = -EINVAL;
5370
5371 /*
5372 * Do not allow a recursive hierarchy (this new sibling
5373 * becoming part of another group-sibling):
5374 */
5375 if (group_leader->group_leader != group_leader)
5376 goto err_put_context;
5377 /*
5378 * Do not allow to attach to a group in a different
5379 * task or CPU context:
5380 */
5381 if (group_leader->ctx != ctx)
5382 goto err_put_context;
5383 /*
5384 * Only a group leader can be exclusive or pinned
5385 */
5386 if (attr.exclusive || attr.pinned)
5387 goto err_put_context;
5388 }
5389
5390 event = perf_event_alloc(&attr, cpu, ctx, group_leader,
5391 NULL, NULL, GFP_KERNEL);
5392 if (IS_ERR(event)) {
5393 err = PTR_ERR(event);
5394 goto err_put_context;
5395 }
5396
5397 if (output_event) {
5398 err = perf_event_set_output(event, output_event);
5399 if (err)
5400 goto err_free_put_context;
5401 }
5402
5403 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
5404 if (IS_ERR(event_file)) {
5405 err = PTR_ERR(event_file);
5406 goto err_free_put_context;
5407 }
5408
5409 event->filp = event_file;
5410 WARN_ON_ONCE(ctx->parent_ctx);
5411 mutex_lock(&ctx->mutex);
5412 perf_install_in_context(ctx, event, cpu);
5413 ++ctx->generation;
5414 mutex_unlock(&ctx->mutex);
5415
5416 event->owner = current;
5417 get_task_struct(current);
5418 mutex_lock(&current->perf_event_mutex);
5419 list_add_tail(&event->owner_entry, &current->perf_event_list);
5420 mutex_unlock(&current->perf_event_mutex);
5421
5422 /*
5423 * Drop the reference on the group_event after placing the
5424 * new event on the sibling_list. This ensures destruction
5425 * of the group leader will find the pointer to itself in
5426 * perf_group_detach().
5427 */
5428 fput_light(group_file, fput_needed);
5429 fd_install(event_fd, event_file);
5430 return event_fd;
5431
5432 err_free_put_context:
5433 free_event(event);
5434 err_put_context:
5435 fput_light(group_file, fput_needed);
5436 put_ctx(ctx);
5437 err_fd:
5438 put_unused_fd(event_fd);
5439 return err;
5440 }
5441
5442 /**
5443 * perf_event_create_kernel_counter
5444 *
5445 * @attr: attributes of the counter to create
5446 * @cpu: cpu in which the counter is bound
5447 * @pid: task to profile
5448 */
5449 struct perf_event *
5450 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
5451 pid_t pid,
5452 perf_overflow_handler_t overflow_handler)
5453 {
5454 struct perf_event *event;
5455 struct perf_event_context *ctx;
5456 int err;
5457
5458 /*
5459 * Get the target context (task or percpu):
5460 */
5461
5462 ctx = find_get_context(pid, cpu);
5463 if (IS_ERR(ctx)) {
5464 err = PTR_ERR(ctx);
5465 goto err_exit;
5466 }
5467
5468 event = perf_event_alloc(attr, cpu, ctx, NULL,
5469 NULL, overflow_handler, GFP_KERNEL);
5470 if (IS_ERR(event)) {
5471 err = PTR_ERR(event);
5472 goto err_put_context;
5473 }
5474
5475 event->filp = NULL;
5476 WARN_ON_ONCE(ctx->parent_ctx);
5477 mutex_lock(&ctx->mutex);
5478 perf_install_in_context(ctx, event, cpu);
5479 ++ctx->generation;
5480 mutex_unlock(&ctx->mutex);
5481
5482 event->owner = current;
5483 get_task_struct(current);
5484 mutex_lock(&current->perf_event_mutex);
5485 list_add_tail(&event->owner_entry, &current->perf_event_list);
5486 mutex_unlock(&current->perf_event_mutex);
5487
5488 return event;
5489
5490 err_put_context:
5491 put_ctx(ctx);
5492 err_exit:
5493 return ERR_PTR(err);
5494 }
5495 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
5496
5497 /*
5498 * inherit a event from parent task to child task:
5499 */
5500 static struct perf_event *
5501 inherit_event(struct perf_event *parent_event,
5502 struct task_struct *parent,
5503 struct perf_event_context *parent_ctx,
5504 struct task_struct *child,
5505 struct perf_event *group_leader,
5506 struct perf_event_context *child_ctx)
5507 {
5508 struct perf_event *child_event;
5509
5510 /*
5511 * Instead of creating recursive hierarchies of events,
5512 * we link inherited events back to the original parent,
5513 * which has a filp for sure, which we use as the reference
5514 * count:
5515 */
5516 if (parent_event->parent)
5517 parent_event = parent_event->parent;
5518
5519 child_event = perf_event_alloc(&parent_event->attr,
5520 parent_event->cpu, child_ctx,
5521 group_leader, parent_event,
5522 NULL, GFP_KERNEL);
5523 if (IS_ERR(child_event))
5524 return child_event;
5525 get_ctx(child_ctx);
5526
5527 /*
5528 * Make the child state follow the state of the parent event,
5529 * not its attr.disabled bit. We hold the parent's mutex,
5530 * so we won't race with perf_event_{en, dis}able_family.
5531 */
5532 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
5533 child_event->state = PERF_EVENT_STATE_INACTIVE;
5534 else
5535 child_event->state = PERF_EVENT_STATE_OFF;
5536
5537 if (parent_event->attr.freq) {
5538 u64 sample_period = parent_event->hw.sample_period;
5539 struct hw_perf_event *hwc = &child_event->hw;
5540
5541 hwc->sample_period = sample_period;
5542 hwc->last_period = sample_period;
5543
5544 local64_set(&hwc->period_left, sample_period);
5545 }
5546
5547 child_event->overflow_handler = parent_event->overflow_handler;
5548
5549 /*
5550 * Link it up in the child's context:
5551 */
5552 add_event_to_ctx(child_event, child_ctx);
5553
5554 /*
5555 * Get a reference to the parent filp - we will fput it
5556 * when the child event exits. This is safe to do because
5557 * we are in the parent and we know that the filp still
5558 * exists and has a nonzero count:
5559 */
5560 atomic_long_inc(&parent_event->filp->f_count);
5561
5562 /*
5563 * Link this into the parent event's child list
5564 */
5565 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
5566 mutex_lock(&parent_event->child_mutex);
5567 list_add_tail(&child_event->child_list, &parent_event->child_list);
5568 mutex_unlock(&parent_event->child_mutex);
5569
5570 return child_event;
5571 }
5572
5573 static int inherit_group(struct perf_event *parent_event,
5574 struct task_struct *parent,
5575 struct perf_event_context *parent_ctx,
5576 struct task_struct *child,
5577 struct perf_event_context *child_ctx)
5578 {
5579 struct perf_event *leader;
5580 struct perf_event *sub;
5581 struct perf_event *child_ctr;
5582
5583 leader = inherit_event(parent_event, parent, parent_ctx,
5584 child, NULL, child_ctx);
5585 if (IS_ERR(leader))
5586 return PTR_ERR(leader);
5587 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
5588 child_ctr = inherit_event(sub, parent, parent_ctx,
5589 child, leader, child_ctx);
5590 if (IS_ERR(child_ctr))
5591 return PTR_ERR(child_ctr);
5592 }
5593 return 0;
5594 }
5595
5596 static void sync_child_event(struct perf_event *child_event,
5597 struct task_struct *child)
5598 {
5599 struct perf_event *parent_event = child_event->parent;
5600 u64 child_val;
5601
5602 if (child_event->attr.inherit_stat)
5603 perf_event_read_event(child_event, child);
5604
5605 child_val = perf_event_count(child_event);
5606
5607 /*
5608 * Add back the child's count to the parent's count:
5609 */
5610 atomic64_add(child_val, &parent_event->child_count);
5611 atomic64_add(child_event->total_time_enabled,
5612 &parent_event->child_total_time_enabled);
5613 atomic64_add(child_event->total_time_running,
5614 &parent_event->child_total_time_running);
5615
5616 /*
5617 * Remove this event from the parent's list
5618 */
5619 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
5620 mutex_lock(&parent_event->child_mutex);
5621 list_del_init(&child_event->child_list);
5622 mutex_unlock(&parent_event->child_mutex);
5623
5624 /*
5625 * Release the parent event, if this was the last
5626 * reference to it.
5627 */
5628 fput(parent_event->filp);
5629 }
5630
5631 static void
5632 __perf_event_exit_task(struct perf_event *child_event,
5633 struct perf_event_context *child_ctx,
5634 struct task_struct *child)
5635 {
5636 struct perf_event *parent_event;
5637
5638 perf_event_remove_from_context(child_event);
5639
5640 parent_event = child_event->parent;
5641 /*
5642 * It can happen that parent exits first, and has events
5643 * that are still around due to the child reference. These
5644 * events need to be zapped - but otherwise linger.
5645 */
5646 if (parent_event) {
5647 sync_child_event(child_event, child);
5648 free_event(child_event);
5649 }
5650 }
5651
5652 /*
5653 * When a child task exits, feed back event values to parent events.
5654 */
5655 void perf_event_exit_task(struct task_struct *child)
5656 {
5657 struct perf_event *child_event, *tmp;
5658 struct perf_event_context *child_ctx;
5659 unsigned long flags;
5660
5661 if (likely(!child->perf_event_ctxp)) {
5662 perf_event_task(child, NULL, 0);
5663 return;
5664 }
5665
5666 local_irq_save(flags);
5667 /*
5668 * We can't reschedule here because interrupts are disabled,
5669 * and either child is current or it is a task that can't be
5670 * scheduled, so we are now safe from rescheduling changing
5671 * our context.
5672 */
5673 child_ctx = child->perf_event_ctxp;
5674 __perf_event_task_sched_out(child_ctx);
5675
5676 /*
5677 * Take the context lock here so that if find_get_context is
5678 * reading child->perf_event_ctxp, we wait until it has
5679 * incremented the context's refcount before we do put_ctx below.
5680 */
5681 raw_spin_lock(&child_ctx->lock);
5682 child->perf_event_ctxp = NULL;
5683 /*
5684 * If this context is a clone; unclone it so it can't get
5685 * swapped to another process while we're removing all
5686 * the events from it.
5687 */
5688 unclone_ctx(child_ctx);
5689 update_context_time(child_ctx);
5690 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
5691
5692 /*
5693 * Report the task dead after unscheduling the events so that we
5694 * won't get any samples after PERF_RECORD_EXIT. We can however still
5695 * get a few PERF_RECORD_READ events.
5696 */
5697 perf_event_task(child, child_ctx, 0);
5698
5699 /*
5700 * We can recurse on the same lock type through:
5701 *
5702 * __perf_event_exit_task()
5703 * sync_child_event()
5704 * fput(parent_event->filp)
5705 * perf_release()
5706 * mutex_lock(&ctx->mutex)
5707 *
5708 * But since its the parent context it won't be the same instance.
5709 */
5710 mutex_lock(&child_ctx->mutex);
5711
5712 again:
5713 list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
5714 group_entry)
5715 __perf_event_exit_task(child_event, child_ctx, child);
5716
5717 list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
5718 group_entry)
5719 __perf_event_exit_task(child_event, child_ctx, child);
5720
5721 /*
5722 * If the last event was a group event, it will have appended all
5723 * its siblings to the list, but we obtained 'tmp' before that which
5724 * will still point to the list head terminating the iteration.
5725 */
5726 if (!list_empty(&child_ctx->pinned_groups) ||
5727 !list_empty(&child_ctx->flexible_groups))
5728 goto again;
5729
5730 mutex_unlock(&child_ctx->mutex);
5731
5732 put_ctx(child_ctx);
5733 }
5734
5735 static void perf_free_event(struct perf_event *event,
5736 struct perf_event_context *ctx)
5737 {
5738 struct perf_event *parent = event->parent;
5739
5740 if (WARN_ON_ONCE(!parent))
5741 return;
5742
5743 mutex_lock(&parent->child_mutex);
5744 list_del_init(&event->child_list);
5745 mutex_unlock(&parent->child_mutex);
5746
5747 fput(parent->filp);
5748
5749 perf_group_detach(event);
5750 list_del_event(event, ctx);
5751 free_event(event);
5752 }
5753
5754 /*
5755 * free an unexposed, unused context as created by inheritance by
5756 * init_task below, used by fork() in case of fail.
5757 */
5758 void perf_event_free_task(struct task_struct *task)
5759 {
5760 struct perf_event_context *ctx = task->perf_event_ctxp;
5761 struct perf_event *event, *tmp;
5762
5763 if (!ctx)
5764 return;
5765
5766 mutex_lock(&ctx->mutex);
5767 again:
5768 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
5769 perf_free_event(event, ctx);
5770
5771 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
5772 group_entry)
5773 perf_free_event(event, ctx);
5774
5775 if (!list_empty(&ctx->pinned_groups) ||
5776 !list_empty(&ctx->flexible_groups))
5777 goto again;
5778
5779 mutex_unlock(&ctx->mutex);
5780
5781 put_ctx(ctx);
5782 }
5783
5784 static int
5785 inherit_task_group(struct perf_event *event, struct task_struct *parent,
5786 struct perf_event_context *parent_ctx,
5787 struct task_struct *child,
5788 int *inherited_all)
5789 {
5790 int ret;
5791 struct perf_event_context *child_ctx = child->perf_event_ctxp;
5792
5793 if (!event->attr.inherit) {
5794 *inherited_all = 0;
5795 return 0;
5796 }
5797
5798 if (!child_ctx) {
5799 /*
5800 * This is executed from the parent task context, so
5801 * inherit events that have been marked for cloning.
5802 * First allocate and initialize a context for the
5803 * child.
5804 */
5805
5806 child_ctx = kzalloc(sizeof(struct perf_event_context),
5807 GFP_KERNEL);
5808 if (!child_ctx)
5809 return -ENOMEM;
5810
5811 __perf_event_init_context(child_ctx, child);
5812 child->perf_event_ctxp = child_ctx;
5813 get_task_struct(child);
5814 }
5815
5816 ret = inherit_group(event, parent, parent_ctx,
5817 child, child_ctx);
5818
5819 if (ret)
5820 *inherited_all = 0;
5821
5822 return ret;
5823 }
5824
5825
5826 /*
5827 * Initialize the perf_event context in task_struct
5828 */
5829 int perf_event_init_task(struct task_struct *child)
5830 {
5831 struct perf_event_context *child_ctx, *parent_ctx;
5832 struct perf_event_context *cloned_ctx;
5833 struct perf_event *event;
5834 struct task_struct *parent = current;
5835 int inherited_all = 1;
5836 int ret = 0;
5837
5838 child->perf_event_ctxp = NULL;
5839
5840 mutex_init(&child->perf_event_mutex);
5841 INIT_LIST_HEAD(&child->perf_event_list);
5842
5843 if (likely(!parent->perf_event_ctxp))
5844 return 0;
5845
5846 /*
5847 * If the parent's context is a clone, pin it so it won't get
5848 * swapped under us.
5849 */
5850 parent_ctx = perf_pin_task_context(parent);
5851
5852 /*
5853 * No need to check if parent_ctx != NULL here; since we saw
5854 * it non-NULL earlier, the only reason for it to become NULL
5855 * is if we exit, and since we're currently in the middle of
5856 * a fork we can't be exiting at the same time.
5857 */
5858
5859 /*
5860 * Lock the parent list. No need to lock the child - not PID
5861 * hashed yet and not running, so nobody can access it.
5862 */
5863 mutex_lock(&parent_ctx->mutex);
5864
5865 /*
5866 * We dont have to disable NMIs - we are only looking at
5867 * the list, not manipulating it:
5868 */
5869 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
5870 ret = inherit_task_group(event, parent, parent_ctx, child,
5871 &inherited_all);
5872 if (ret)
5873 break;
5874 }
5875
5876 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
5877 ret = inherit_task_group(event, parent, parent_ctx, child,
5878 &inherited_all);
5879 if (ret)
5880 break;
5881 }
5882
5883 child_ctx = child->perf_event_ctxp;
5884
5885 if (child_ctx && inherited_all) {
5886 /*
5887 * Mark the child context as a clone of the parent
5888 * context, or of whatever the parent is a clone of.
5889 * Note that if the parent is a clone, it could get
5890 * uncloned at any point, but that doesn't matter
5891 * because the list of events and the generation
5892 * count can't have changed since we took the mutex.
5893 */
5894 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
5895 if (cloned_ctx) {
5896 child_ctx->parent_ctx = cloned_ctx;
5897 child_ctx->parent_gen = parent_ctx->parent_gen;
5898 } else {
5899 child_ctx->parent_ctx = parent_ctx;
5900 child_ctx->parent_gen = parent_ctx->generation;
5901 }
5902 get_ctx(child_ctx->parent_ctx);
5903 }
5904
5905 mutex_unlock(&parent_ctx->mutex);
5906
5907 perf_unpin_context(parent_ctx);
5908
5909 return ret;
5910 }
5911
5912 static void __init perf_event_init_all_cpus(void)
5913 {
5914 int cpu;
5915 struct perf_cpu_context *cpuctx;
5916
5917 for_each_possible_cpu(cpu) {
5918 cpuctx = &per_cpu(perf_cpu_context, cpu);
5919 mutex_init(&cpuctx->hlist_mutex);
5920 __perf_event_init_context(&cpuctx->ctx, NULL);
5921 }
5922 }
5923
5924 static void __cpuinit perf_event_init_cpu(int cpu)
5925 {
5926 struct perf_cpu_context *cpuctx;
5927
5928 cpuctx = &per_cpu(perf_cpu_context, cpu);
5929
5930 spin_lock(&perf_resource_lock);
5931 cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
5932 spin_unlock(&perf_resource_lock);
5933
5934 mutex_lock(&cpuctx->hlist_mutex);
5935 if (cpuctx->hlist_refcount > 0) {
5936 struct swevent_hlist *hlist;
5937
5938 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
5939 WARN_ON_ONCE(!hlist);
5940 rcu_assign_pointer(cpuctx->swevent_hlist, hlist);
5941 }
5942 mutex_unlock(&cpuctx->hlist_mutex);
5943 }
5944
5945 #ifdef CONFIG_HOTPLUG_CPU
5946 static void __perf_event_exit_cpu(void *info)
5947 {
5948 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
5949 struct perf_event_context *ctx = &cpuctx->ctx;
5950 struct perf_event *event, *tmp;
5951
5952 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
5953 __perf_event_remove_from_context(event);
5954 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
5955 __perf_event_remove_from_context(event);
5956 }
5957 static void perf_event_exit_cpu(int cpu)
5958 {
5959 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
5960 struct perf_event_context *ctx = &cpuctx->ctx;
5961
5962 mutex_lock(&cpuctx->hlist_mutex);
5963 swevent_hlist_release(cpuctx);
5964 mutex_unlock(&cpuctx->hlist_mutex);
5965
5966 mutex_lock(&ctx->mutex);
5967 smp_call_function_single(cpu, __perf_event_exit_cpu, NULL, 1);
5968 mutex_unlock(&ctx->mutex);
5969 }
5970 #else
5971 static inline void perf_event_exit_cpu(int cpu) { }
5972 #endif
5973
5974 static int __cpuinit
5975 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
5976 {
5977 unsigned int cpu = (long)hcpu;
5978
5979 switch (action & ~CPU_TASKS_FROZEN) {
5980
5981 case CPU_UP_PREPARE:
5982 case CPU_DOWN_FAILED:
5983 perf_event_init_cpu(cpu);
5984 break;
5985
5986 case CPU_UP_CANCELED:
5987 case CPU_DOWN_PREPARE:
5988 perf_event_exit_cpu(cpu);
5989 break;
5990
5991 default:
5992 break;
5993 }
5994
5995 return NOTIFY_OK;
5996 }
5997
5998 void __init perf_event_init(void)
5999 {
6000 perf_event_init_all_cpus();
6001 init_srcu_struct(&pmus_srcu);
6002 perf_pmu_register(&perf_swevent);
6003 perf_pmu_register(&perf_cpu_clock);
6004 perf_pmu_register(&perf_task_clock);
6005 perf_tp_register();
6006 perf_cpu_notifier(perf_cpu_notify);
6007 }
6008
6009 static ssize_t perf_show_reserve_percpu(struct sysdev_class *class,
6010 struct sysdev_class_attribute *attr,
6011 char *buf)
6012 {
6013 return sprintf(buf, "%d\n", perf_reserved_percpu);
6014 }
6015
6016 static ssize_t
6017 perf_set_reserve_percpu(struct sysdev_class *class,
6018 struct sysdev_class_attribute *attr,
6019 const char *buf,
6020 size_t count)
6021 {
6022 struct perf_cpu_context *cpuctx;
6023 unsigned long val;
6024 int err, cpu, mpt;
6025
6026 err = strict_strtoul(buf, 10, &val);
6027 if (err)
6028 return err;
6029 if (val > perf_max_events)
6030 return -EINVAL;
6031
6032 spin_lock(&perf_resource_lock);
6033 perf_reserved_percpu = val;
6034 for_each_online_cpu(cpu) {
6035 cpuctx = &per_cpu(perf_cpu_context, cpu);
6036 raw_spin_lock_irq(&cpuctx->ctx.lock);
6037 mpt = min(perf_max_events - cpuctx->ctx.nr_events,
6038 perf_max_events - perf_reserved_percpu);
6039 cpuctx->max_pertask = mpt;
6040 raw_spin_unlock_irq(&cpuctx->ctx.lock);
6041 }
6042 spin_unlock(&perf_resource_lock);
6043
6044 return count;
6045 }
6046
6047 static ssize_t perf_show_overcommit(struct sysdev_class *class,
6048 struct sysdev_class_attribute *attr,
6049 char *buf)
6050 {
6051 return sprintf(buf, "%d\n", perf_overcommit);
6052 }
6053
6054 static ssize_t
6055 perf_set_overcommit(struct sysdev_class *class,
6056 struct sysdev_class_attribute *attr,
6057 const char *buf, size_t count)
6058 {
6059 unsigned long val;
6060 int err;
6061
6062 err = strict_strtoul(buf, 10, &val);
6063 if (err)
6064 return err;
6065 if (val > 1)
6066 return -EINVAL;
6067
6068 spin_lock(&perf_resource_lock);
6069 perf_overcommit = val;
6070 spin_unlock(&perf_resource_lock);
6071
6072 return count;
6073 }
6074
6075 static SYSDEV_CLASS_ATTR(
6076 reserve_percpu,
6077 0644,
6078 perf_show_reserve_percpu,
6079 perf_set_reserve_percpu
6080 );
6081
6082 static SYSDEV_CLASS_ATTR(
6083 overcommit,
6084 0644,
6085 perf_show_overcommit,
6086 perf_set_overcommit
6087 );
6088
6089 static struct attribute *perfclass_attrs[] = {
6090 &attr_reserve_percpu.attr,
6091 &attr_overcommit.attr,
6092 NULL
6093 };
6094
6095 static struct attribute_group perfclass_attr_group = {
6096 .attrs = perfclass_attrs,
6097 .name = "perf_events",
6098 };
6099
6100 static int __init perf_event_sysfs_init(void)
6101 {
6102 return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
6103 &perfclass_attr_group);
6104 }
6105 device_initcall(perf_event_sysfs_init);
This page took 0.169773 seconds and 5 git commands to generate.