Commit | Line | Data |
---|---|---|
833ad6a0 MD |
1 | /* |
2 | * (C) Copyright 2009-2011 - | |
3 | * Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
4 | * | |
5 | * LTTng performance monitoring counters (perf-counters) integration module. | |
6 | * | |
7 | * Dual LGPL v2.1/GPL v2 license. | |
8 | */ | |
9 | ||
10 | #include <linux/module.h> | |
11 | #include <linux/slab.h> | |
12 | #include <linux/perf_event.h> | |
13 | #include <linux/list.h> | |
c24a0d71 MD |
14 | #include <linux/string.h> |
15 | #include "ltt-events.h" | |
16 | #include "wrapper/ringbuffer/frontend_types.h" | |
17 | #include "wrapper/vmalloc.h" | |
90f5546c | 18 | #include "wrapper/perf.h" |
c24a0d71 | 19 | #include "ltt-tracer.h" |
833ad6a0 | 20 | |
f1676205 MD |
21 | static |
22 | size_t perf_counter_get_size(size_t offset) | |
23 | { | |
24 | size_t size = 0; | |
25 | ||
26 | size += lib_ring_buffer_align(offset, ltt_alignof(uint64_t)); | |
27 | size += sizeof(uint64_t); | |
28 | return size; | |
29 | } | |
30 | ||
833ad6a0 MD |
31 | static |
32 | void perf_counter_record(struct lttng_ctx_field *field, | |
33 | struct lib_ring_buffer_ctx *ctx, | |
34 | struct ltt_channel *chan) | |
35 | { | |
36 | struct perf_event *event; | |
37 | uint64_t value; | |
38 | ||
2001023e | 39 | event = field->u.perf_counter->e[ctx->cpu]; |
0478c519 | 40 | if (likely(event)) { |
7b745a96 MD |
41 | if (unlikely(event->state == PERF_EVENT_STATE_ERROR)) { |
42 | value = 0; | |
43 | } else { | |
44 | event->pmu->read(event); | |
45 | value = local64_read(&event->count); | |
46 | } | |
f91fd73b MD |
47 | } else { |
48 | /* | |
49 | * Perf chooses not to be clever and not to support enabling a | |
50 | * perf counter before the cpu is brought up. Therefore, we need | |
51 | * to support having events coming (e.g. scheduler events) | |
52 | * before the counter is setup. Write an arbitrary 0 in this | |
53 | * case. | |
54 | */ | |
55 | value = 0; | |
56 | } | |
9e7e4892 | 57 | lib_ring_buffer_align_ctx(ctx, ltt_alignof(value)); |
833ad6a0 MD |
58 | chan->ops->event_write(ctx, &value, sizeof(value)); |
59 | } | |
60 | ||
90f5546c MD |
61 | #if defined(CONFIG_PERF_EVENTS) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,99)) |
62 | static | |
63 | void overflow_callback(struct perf_event *event, | |
64 | struct perf_sample_data *data, | |
65 | struct pt_regs *regs) | |
66 | { | |
67 | } | |
68 | #else | |
833ad6a0 MD |
69 | static |
70 | void overflow_callback(struct perf_event *event, int nmi, | |
71 | struct perf_sample_data *data, | |
72 | struct pt_regs *regs) | |
73 | { | |
74 | } | |
90f5546c | 75 | #endif |
833ad6a0 | 76 | |
2dccf128 MD |
77 | static |
78 | void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field) | |
79 | { | |
2001023e | 80 | struct perf_event **events = field->u.perf_counter->e; |
2dccf128 MD |
81 | int cpu; |
82 | ||
8289661d | 83 | get_online_cpus(); |
2dccf128 MD |
84 | for_each_online_cpu(cpu) |
85 | perf_event_release_kernel(events[cpu]); | |
8289661d MD |
86 | put_online_cpus(); |
87 | #ifdef CONFIG_HOTPLUG_CPU | |
2001023e | 88 | unregister_cpu_notifier(&field->u.perf_counter->nb); |
8289661d | 89 | #endif |
c24a0d71 | 90 | kfree(field->event_field.name); |
2001023e | 91 | kfree(field->u.perf_counter->attr); |
2dccf128 | 92 | kfree(events); |
2001023e | 93 | kfree(field->u.perf_counter); |
2dccf128 MD |
94 | } |
95 | ||
8289661d MD |
96 | #ifdef CONFIG_HOTPLUG_CPU |
97 | ||
98 | /** | |
99 | * lttng_perf_counter_hp_callback - CPU hotplug callback | |
100 | * @nb: notifier block | |
101 | * @action: hotplug action to take | |
102 | * @hcpu: CPU number | |
103 | * | |
104 | * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD) | |
105 | * | |
106 | * We can setup perf counters when the cpu is online (up prepare seems to be too | |
107 | * soon). | |
108 | */ | |
109 | static | |
110 | int __cpuinit lttng_perf_counter_cpu_hp_callback(struct notifier_block *nb, | |
111 | unsigned long action, | |
112 | void *hcpu) | |
113 | { | |
114 | unsigned int cpu = (unsigned long) hcpu; | |
2001023e MD |
115 | struct lttng_perf_counter_field *perf_field = |
116 | container_of(nb, struct lttng_perf_counter_field, nb); | |
117 | struct perf_event **events = perf_field->e; | |
118 | struct perf_event_attr *attr = perf_field->attr; | |
f91fd73b | 119 | struct perf_event *pevent; |
8289661d | 120 | |
2001023e | 121 | if (!perf_field->hp_enable) |
8289661d MD |
122 | return NOTIFY_OK; |
123 | ||
124 | switch (action) { | |
125 | case CPU_ONLINE: | |
126 | case CPU_ONLINE_FROZEN: | |
90f5546c | 127 | pevent = wrapper_perf_event_create_kernel_counter(attr, |
8289661d | 128 | cpu, NULL, overflow_callback); |
0478c519 | 129 | if (!pevent || IS_ERR(pevent)) |
8289661d | 130 | return NOTIFY_BAD; |
7b745a96 MD |
131 | if (pevent->state == PERF_EVENT_STATE_ERROR) { |
132 | perf_event_release_kernel(pevent); | |
133 | return NOTIFY_BAD; | |
134 | } | |
f91fd73b MD |
135 | barrier(); /* Create perf counter before setting event */ |
136 | events[cpu] = pevent; | |
8289661d MD |
137 | break; |
138 | case CPU_UP_CANCELED: | |
139 | case CPU_UP_CANCELED_FROZEN: | |
140 | case CPU_DEAD: | |
141 | case CPU_DEAD_FROZEN: | |
f91fd73b MD |
142 | pevent = events[cpu]; |
143 | events[cpu] = NULL; | |
144 | barrier(); /* NULLify event before perf counter teardown */ | |
145 | perf_event_release_kernel(pevent); | |
8289661d MD |
146 | break; |
147 | } | |
148 | return NOTIFY_OK; | |
149 | } | |
150 | ||
151 | #endif | |
152 | ||
833ad6a0 MD |
153 | int lttng_add_perf_counter_to_ctx(uint32_t type, |
154 | uint64_t config, | |
c24a0d71 | 155 | const char *name, |
2dccf128 | 156 | struct lttng_ctx **ctx) |
833ad6a0 MD |
157 | { |
158 | struct lttng_ctx_field *field; | |
2001023e | 159 | struct lttng_perf_counter_field *perf_field; |
833ad6a0 MD |
160 | struct perf_event **events; |
161 | struct perf_event_attr *attr; | |
162 | int ret; | |
163 | int cpu; | |
c24a0d71 | 164 | char *name_alloc; |
833ad6a0 MD |
165 | |
166 | events = kzalloc(num_possible_cpus() * sizeof(*events), GFP_KERNEL); | |
167 | if (!events) | |
168 | return -ENOMEM; | |
169 | ||
2001023e | 170 | attr = kzalloc(sizeof(struct perf_event_attr), GFP_KERNEL); |
833ad6a0 MD |
171 | if (!attr) { |
172 | ret = -ENOMEM; | |
173 | goto error_attr; | |
174 | } | |
175 | ||
176 | attr->type = type; | |
177 | attr->config = config; | |
178 | attr->size = sizeof(struct perf_event_attr); | |
179 | attr->pinned = 1; | |
180 | attr->disabled = 0; | |
181 | ||
2001023e MD |
182 | perf_field = kzalloc(sizeof(struct lttng_perf_counter_field), GFP_KERNEL); |
183 | if (!perf_field) { | |
184 | ret = -ENOMEM; | |
185 | goto error_alloc_perf_field; | |
186 | } | |
187 | perf_field->e = events; | |
188 | perf_field->attr = attr; | |
189 | ||
c24a0d71 | 190 | name_alloc = kstrdup(name, GFP_KERNEL); |
bef96e48 MD |
191 | if (!name_alloc) { |
192 | ret = -ENOMEM; | |
c24a0d71 | 193 | goto name_alloc_error; |
bef96e48 | 194 | } |
c24a0d71 | 195 | |
2dccf128 MD |
196 | field = lttng_append_context(ctx); |
197 | if (!field) { | |
198 | ret = -ENOMEM; | |
8289661d | 199 | goto append_context_error; |
833ad6a0 | 200 | } |
2001023e | 201 | if (lttng_find_context(*ctx, name_alloc)) { |
44252f0f MD |
202 | ret = -EEXIST; |
203 | goto find_error; | |
204 | } | |
8289661d MD |
205 | |
206 | #ifdef CONFIG_HOTPLUG_CPU | |
2001023e | 207 | perf_field->nb.notifier_call = |
8289661d | 208 | lttng_perf_counter_cpu_hp_callback; |
2001023e MD |
209 | perf_field->nb.priority = 0; |
210 | register_cpu_notifier(&perf_field->nb); | |
8289661d MD |
211 | #endif |
212 | ||
213 | get_online_cpus(); | |
214 | for_each_online_cpu(cpu) { | |
90f5546c | 215 | events[cpu] = wrapper_perf_event_create_kernel_counter(attr, |
8289661d | 216 | cpu, NULL, overflow_callback); |
0478c519 | 217 | if (!events[cpu] || IS_ERR(events[cpu])) { |
8289661d MD |
218 | ret = -EINVAL; |
219 | goto counter_error; | |
220 | } | |
7b745a96 MD |
221 | if (events[cpu]->state == PERF_EVENT_STATE_ERROR) { |
222 | ret = -EBUSY; | |
223 | goto counter_busy; | |
224 | } | |
8289661d MD |
225 | } |
226 | put_online_cpus(); | |
227 | ||
2dccf128 | 228 | field->destroy = lttng_destroy_perf_counter_field; |
833ad6a0 | 229 | |
c24a0d71 | 230 | field->event_field.name = name_alloc; |
8070f5c0 MD |
231 | field->event_field.type.atype = atype_integer; |
232 | field->event_field.type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT; | |
233 | field->event_field.type.u.basic.integer.alignment = ltt_alignof(unsigned long) * CHAR_BIT; | |
234 | field->event_field.type.u.basic.integer.signedness = is_signed_type(unsigned long); | |
235 | field->event_field.type.u.basic.integer.reverse_byte_order = 0; | |
236 | field->event_field.type.u.basic.integer.base = 10; | |
237 | field->event_field.type.u.basic.integer.encoding = lttng_encode_none; | |
f1676205 MD |
238 | field->get_size = perf_counter_get_size; |
239 | field->record = perf_counter_record; | |
2001023e MD |
240 | field->u.perf_counter = perf_field; |
241 | perf_field->hp_enable = 1; | |
833ad6a0 MD |
242 | |
243 | wrapper_vmalloc_sync_all(); | |
244 | return 0; | |
245 | ||
7b745a96 | 246 | counter_busy: |
8289661d | 247 | counter_error: |
833ad6a0 | 248 | for_each_online_cpu(cpu) { |
0478c519 | 249 | if (events[cpu] && !IS_ERR(events[cpu])) |
833ad6a0 MD |
250 | perf_event_release_kernel(events[cpu]); |
251 | } | |
8289661d MD |
252 | put_online_cpus(); |
253 | #ifdef CONFIG_HOTPLUG_CPU | |
2001023e | 254 | unregister_cpu_notifier(&perf_field->nb); |
8289661d | 255 | #endif |
44252f0f | 256 | find_error: |
8289661d MD |
257 | lttng_remove_context_field(ctx, field); |
258 | append_context_error: | |
259 | kfree(name_alloc); | |
260 | name_alloc_error: | |
2001023e MD |
261 | kfree(perf_field); |
262 | error_alloc_perf_field: | |
833ad6a0 MD |
263 | kfree(attr); |
264 | error_attr: | |
265 | kfree(events); | |
266 | return ret; | |
267 | } | |
268 | ||
833ad6a0 MD |
269 | MODULE_LICENSE("GPL and additional rights"); |
270 | MODULE_AUTHOR("Mathieu Desnoyers"); | |
271 | MODULE_DESCRIPTION("Linux Trace Toolkit Perf Support"); |