Commit | Line | Data |
---|---|---|
9dad1eb8 PMF |
1 | /* |
2 | * ltt/ltt-tracer.c | |
3 | * | |
4 | * (C) Copyright 2005-2008 - | |
5 | * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca) | |
6 | * | |
7 | * Tracing management internal kernel API. Trace buffer allocation/free, tracing | |
8 | * start/stop. | |
9 | * | |
10 | * Author: | |
11 | * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca) | |
12 | * | |
13 | * Inspired from LTT : | |
14 | * Karim Yaghmour (karim@opersys.com) | |
15 | * Tom Zanussi (zanussi@us.ibm.com) | |
16 | * Bob Wisniewski (bob@watson.ibm.com) | |
17 | * And from K42 : | |
18 | * Bob Wisniewski (bob@watson.ibm.com) | |
19 | * | |
20 | * Changelog: | |
21 | * 22/09/06, Move to the marker/probes mechanism. | |
22 | * 19/10/05, Complete lockless mechanism. | |
23 | * 27/05/05, Modular redesign and rewrite. | |
24 | */ | |
25 | ||
b6bf28ec PMF |
26 | //ust// #include <linux/time.h> |
27 | //ust// #include <linux/ltt-tracer.h> | |
28 | //ust// #include <linux/module.h> | |
29 | //ust// #include <linux/string.h> | |
30 | //ust// #include <linux/slab.h> | |
31 | //ust// #include <linux/init.h> | |
32 | //ust// #include <linux/rcupdate.h> | |
33 | //ust// #include <linux/sched.h> | |
34 | //ust// #include <linux/bitops.h> | |
35 | //ust// #include <linux/fs.h> | |
36 | //ust// #include <linux/cpu.h> | |
37 | //ust// #include <linux/kref.h> | |
38 | //ust// #include <linux/delay.h> | |
39 | //ust// #include <linux/vmalloc.h> | |
40 | //ust// #include <asm/atomic.h> | |
1ae7f074 | 41 | #include "kernelcompat.h" |
b6bf28ec PMF |
42 | #include "tracercore.h" |
43 | #include "tracer.h" | |
b6bf28ec PMF |
44 | #include "usterr.h" |
45 | ||
46 | //ust// static void async_wakeup(unsigned long data); | |
47 | //ust// | |
48 | //ust// static DEFINE_TIMER(ltt_async_wakeup_timer, async_wakeup, 0, 0); | |
9dad1eb8 PMF |
49 | |
50 | /* Default callbacks for modules */ | |
51 | notrace int ltt_filter_control_default(enum ltt_filter_control_msg msg, | |
52 | struct ltt_trace_struct *trace) | |
53 | { | |
54 | return 0; | |
55 | } | |
56 | ||
57 | int ltt_statedump_default(struct ltt_trace_struct *trace) | |
58 | { | |
59 | return 0; | |
60 | } | |
61 | ||
62 | /* Callbacks for registered modules */ | |
63 | ||
64 | int (*ltt_filter_control_functor) | |
65 | (enum ltt_filter_control_msg msg, struct ltt_trace_struct *trace) = | |
66 | ltt_filter_control_default; | |
67 | struct module *ltt_filter_control_owner; | |
68 | ||
69 | /* These function pointers are protected by a trace activation check */ | |
70 | struct module *ltt_run_filter_owner; | |
71 | int (*ltt_statedump_functor)(struct ltt_trace_struct *trace) = | |
72 | ltt_statedump_default; | |
73 | struct module *ltt_statedump_owner; | |
74 | ||
75 | struct chan_info_struct { | |
76 | const char *name; | |
77 | unsigned int def_subbufsize; | |
78 | unsigned int def_subbufcount; | |
79 | } chan_infos[] = { | |
80 | [LTT_CHANNEL_METADATA] = { | |
81 | LTT_METADATA_CHANNEL, | |
82 | LTT_DEFAULT_SUBBUF_SIZE_LOW, | |
83 | LTT_DEFAULT_N_SUBBUFS_LOW, | |
84 | }, | |
b6bf28ec PMF |
85 | [LTT_CHANNEL_UST] = { |
86 | LTT_UST_CHANNEL, | |
9dad1eb8 PMF |
87 | LTT_DEFAULT_SUBBUF_SIZE_HIGH, |
88 | LTT_DEFAULT_N_SUBBUFS_HIGH, | |
89 | }, | |
9dad1eb8 PMF |
90 | }; |
91 | ||
92 | static enum ltt_channels get_channel_type_from_name(const char *name) | |
93 | { | |
94 | int i; | |
95 | ||
96 | if (!name) | |
b6bf28ec | 97 | return LTT_CHANNEL_UST; |
9dad1eb8 PMF |
98 | |
99 | for (i = 0; i < ARRAY_SIZE(chan_infos); i++) | |
100 | if (chan_infos[i].name && !strcmp(name, chan_infos[i].name)) | |
101 | return (enum ltt_channels)i; | |
102 | ||
b6bf28ec | 103 | return LTT_CHANNEL_UST; |
9dad1eb8 PMF |
104 | } |
105 | ||
106 | /** | |
107 | * ltt_module_register - LTT module registration | |
108 | * @name: module type | |
109 | * @function: callback to register | |
110 | * @owner: module which owns the callback | |
111 | * | |
112 | * The module calling this registration function must ensure that no | |
113 | * trap-inducing code will be executed by "function". E.g. vmalloc_sync_all() | |
114 | * must be called between a vmalloc and the moment the memory is made visible to | |
115 | * "function". This registration acts as a vmalloc_sync_all. Therefore, only if | |
116 | * the module allocates virtual memory after its registration must it | |
117 | * synchronize the TLBs. | |
118 | */ | |
b6bf28ec PMF |
119 | //ust// int ltt_module_register(enum ltt_module_function name, void *function, |
120 | //ust// struct module *owner) | |
121 | //ust// { | |
122 | //ust// int ret = 0; | |
123 | //ust// | |
124 | //ust// /* | |
125 | //ust// * Make sure no page fault can be triggered by the module about to be | |
126 | //ust// * registered. We deal with this here so we don't have to call | |
127 | //ust// * vmalloc_sync_all() in each module's init. | |
128 | //ust// */ | |
129 | //ust// vmalloc_sync_all(); | |
130 | //ust// | |
131 | //ust// switch (name) { | |
132 | //ust// case LTT_FUNCTION_RUN_FILTER: | |
133 | //ust// if (ltt_run_filter_owner != NULL) { | |
134 | //ust// ret = -EEXIST; | |
135 | //ust// goto end; | |
136 | //ust// } | |
137 | //ust// ltt_filter_register((ltt_run_filter_functor)function); | |
138 | //ust// ltt_run_filter_owner = owner; | |
139 | //ust// break; | |
140 | //ust// case LTT_FUNCTION_FILTER_CONTROL: | |
141 | //ust// if (ltt_filter_control_owner != NULL) { | |
142 | //ust// ret = -EEXIST; | |
143 | //ust// goto end; | |
144 | //ust// } | |
145 | //ust// ltt_filter_control_functor = | |
146 | //ust// (int (*)(enum ltt_filter_control_msg, | |
147 | //ust// struct ltt_trace_struct *))function; | |
148 | //ust// ltt_filter_control_owner = owner; | |
149 | //ust// break; | |
150 | //ust// case LTT_FUNCTION_STATEDUMP: | |
151 | //ust// if (ltt_statedump_owner != NULL) { | |
152 | //ust// ret = -EEXIST; | |
153 | //ust// goto end; | |
154 | //ust// } | |
155 | //ust// ltt_statedump_functor = | |
156 | //ust// (int (*)(struct ltt_trace_struct *))function; | |
157 | //ust// ltt_statedump_owner = owner; | |
158 | //ust// break; | |
159 | //ust// } | |
160 | //ust// | |
161 | //ust// end: | |
162 | //ust// | |
163 | //ust// return ret; | |
164 | //ust// } | |
165 | //ust// EXPORT_SYMBOL_GPL(ltt_module_register); | |
9dad1eb8 PMF |
166 | |
167 | /** | |
168 | * ltt_module_unregister - LTT module unregistration | |
169 | * @name: module type | |
170 | */ | |
b6bf28ec PMF |
171 | //ust// void ltt_module_unregister(enum ltt_module_function name) |
172 | //ust// { | |
173 | //ust// switch (name) { | |
174 | //ust// case LTT_FUNCTION_RUN_FILTER: | |
175 | //ust// ltt_filter_unregister(); | |
176 | //ust// ltt_run_filter_owner = NULL; | |
177 | //ust// /* Wait for preempt sections to finish */ | |
178 | //ust// synchronize_sched(); | |
179 | //ust// break; | |
180 | //ust// case LTT_FUNCTION_FILTER_CONTROL: | |
181 | //ust// ltt_filter_control_functor = ltt_filter_control_default; | |
182 | //ust// ltt_filter_control_owner = NULL; | |
183 | //ust// break; | |
184 | //ust// case LTT_FUNCTION_STATEDUMP: | |
185 | //ust// ltt_statedump_functor = ltt_statedump_default; | |
186 | //ust// ltt_statedump_owner = NULL; | |
187 | //ust// break; | |
188 | //ust// } | |
189 | //ust// | |
190 | //ust// } | |
191 | //ust// EXPORT_SYMBOL_GPL(ltt_module_unregister); | |
9dad1eb8 PMF |
192 | |
193 | static LIST_HEAD(ltt_transport_list); | |
194 | ||
195 | /** | |
196 | * ltt_transport_register - LTT transport registration | |
197 | * @transport: transport structure | |
198 | * | |
199 | * Registers a transport which can be used as output to extract the data out of | |
200 | * LTTng. The module calling this registration function must ensure that no | |
201 | * trap-inducing code will be executed by the transport functions. E.g. | |
202 | * vmalloc_sync_all() must be called between a vmalloc and the moment the memory | |
203 | * is made visible to the transport function. This registration acts as a | |
204 | * vmalloc_sync_all. Therefore, only if the module allocates virtual memory | |
205 | * after its registration must it synchronize the TLBs. | |
206 | */ | |
5f54827b PMF |
207 | void ltt_transport_register(struct ltt_transport *transport) |
208 | { | |
209 | /* | |
210 | * Make sure no page fault can be triggered by the module about to be | |
211 | * registered. We deal with this here so we don't have to call | |
212 | * vmalloc_sync_all() in each module's init. | |
213 | */ | |
bb07823d | 214 | //ust// vmalloc_sync_all(); |
5f54827b PMF |
215 | |
216 | ltt_lock_traces(); | |
217 | list_add_tail(&transport->node, <t_transport_list); | |
218 | ltt_unlock_traces(); | |
219 | } | |
b6bf28ec | 220 | //ust// EXPORT_SYMBOL_GPL(ltt_transport_register); |
9dad1eb8 PMF |
221 | |
222 | /** | |
223 | * ltt_transport_unregister - LTT transport unregistration | |
224 | * @transport: transport structure | |
225 | */ | |
bb07823d PMF |
226 | void ltt_transport_unregister(struct ltt_transport *transport) |
227 | { | |
228 | ltt_lock_traces(); | |
229 | list_del(&transport->node); | |
230 | ltt_unlock_traces(); | |
231 | } | |
b6bf28ec | 232 | //ust// EXPORT_SYMBOL_GPL(ltt_transport_unregister); |
9dad1eb8 PMF |
233 | |
234 | static inline int is_channel_overwrite(enum ltt_channels chan, | |
235 | enum trace_mode mode) | |
236 | { | |
237 | switch (mode) { | |
238 | case LTT_TRACE_NORMAL: | |
239 | return 0; | |
240 | case LTT_TRACE_FLIGHT: | |
241 | switch (chan) { | |
242 | case LTT_CHANNEL_METADATA: | |
243 | return 0; | |
244 | default: | |
245 | return 1; | |
246 | } | |
247 | case LTT_TRACE_HYBRID: | |
248 | switch (chan) { | |
b6bf28ec | 249 | case LTT_CHANNEL_METADATA: |
9dad1eb8 | 250 | return 0; |
b6bf28ec PMF |
251 | default: |
252 | return 1; | |
9dad1eb8 PMF |
253 | } |
254 | default: | |
255 | return 0; | |
256 | } | |
257 | } | |
258 | ||
259 | /** | |
260 | * ltt_write_trace_header - Write trace header | |
261 | * @trace: Trace information | |
262 | * @header: Memory address where the information must be written to | |
263 | */ | |
264 | void notrace ltt_write_trace_header(struct ltt_trace_struct *trace, | |
265 | struct ltt_subbuffer_header *header) | |
266 | { | |
267 | header->magic_number = LTT_TRACER_MAGIC_NUMBER; | |
268 | header->major_version = LTT_TRACER_VERSION_MAJOR; | |
269 | header->minor_version = LTT_TRACER_VERSION_MINOR; | |
270 | header->arch_size = sizeof(void *); | |
271 | header->alignment = ltt_get_alignment(); | |
272 | header->start_time_sec = trace->start_time.tv_sec; | |
273 | header->start_time_usec = trace->start_time.tv_usec; | |
274 | header->start_freq = trace->start_freq; | |
275 | header->freq_scale = trace->freq_scale; | |
276 | } | |
b6bf28ec | 277 | //ust// EXPORT_SYMBOL_GPL(ltt_write_trace_header); |
9dad1eb8 PMF |
278 | |
279 | static void trace_async_wakeup(struct ltt_trace_struct *trace) | |
280 | { | |
281 | int i; | |
282 | struct ltt_channel_struct *chan; | |
283 | ||
284 | /* Must check each channel for pending read wakeup */ | |
285 | for (i = 0; i < trace->nr_channels; i++) { | |
286 | chan = &trace->channels[i]; | |
287 | if (chan->active) | |
288 | trace->ops->wakeup_channel(chan); | |
289 | } | |
290 | } | |
291 | ||
b6bf28ec PMF |
292 | //ust// /* Timer to send async wakeups to the readers */ |
293 | //ust// static void async_wakeup(unsigned long data) | |
294 | //ust// { | |
295 | //ust// struct ltt_trace_struct *trace; | |
296 | //ust// | |
297 | //ust// /* | |
298 | //ust// * PREEMPT_RT does not allow spinlocks to be taken within preempt | |
299 | //ust// * disable sections (spinlock taken in wake_up). However, mainline won't | |
300 | //ust// * allow mutex to be taken in interrupt context. Ugly. | |
301 | //ust// * A proper way to do this would be to turn the timer into a | |
302 | //ust// * periodically woken up thread, but it adds to the footprint. | |
303 | //ust// */ | |
304 | //ust// #ifndef CONFIG_PREEMPT_RT | |
305 | //ust// rcu_read_lock_sched(); | |
306 | //ust// #else | |
307 | //ust// ltt_lock_traces(); | |
308 | //ust// #endif | |
309 | //ust// list_for_each_entry_rcu(trace, <t_traces.head, list) { | |
310 | //ust// trace_async_wakeup(trace); | |
311 | //ust// } | |
312 | //ust// #ifndef CONFIG_PREEMPT_RT | |
313 | //ust// rcu_read_unlock_sched(); | |
314 | //ust// #else | |
315 | //ust// ltt_unlock_traces(); | |
316 | //ust// #endif | |
317 | //ust// | |
318 | //ust// mod_timer(<t_async_wakeup_timer, jiffies + LTT_PERCPU_TIMER_INTERVAL); | |
319 | //ust// } | |
9dad1eb8 PMF |
320 | |
321 | /** | |
322 | * _ltt_trace_find - find a trace by given name. | |
323 | * trace_name: trace name | |
324 | * | |
325 | * Returns a pointer to the trace structure, NULL if not found. | |
326 | */ | |
9c67dc50 | 327 | struct ltt_trace_struct *_ltt_trace_find(const char *trace_name) |
9dad1eb8 PMF |
328 | { |
329 | struct ltt_trace_struct *trace; | |
330 | ||
331 | list_for_each_entry(trace, <t_traces.head, list) | |
332 | if (!strncmp(trace->trace_name, trace_name, NAME_MAX)) | |
333 | return trace; | |
334 | ||
335 | return NULL; | |
336 | } | |
337 | ||
338 | /* _ltt_trace_find_setup : | |
339 | * find a trace in setup list by given name. | |
340 | * | |
341 | * Returns a pointer to the trace structure, NULL if not found. | |
342 | */ | |
343 | struct ltt_trace_struct *_ltt_trace_find_setup(const char *trace_name) | |
344 | { | |
345 | struct ltt_trace_struct *trace; | |
346 | ||
347 | list_for_each_entry(trace, <t_traces.setup_head, list) | |
348 | if (!strncmp(trace->trace_name, trace_name, NAME_MAX)) | |
349 | return trace; | |
350 | ||
351 | return NULL; | |
352 | } | |
b6bf28ec | 353 | //ust// EXPORT_SYMBOL_GPL(_ltt_trace_find_setup); |
9dad1eb8 PMF |
354 | |
355 | /** | |
356 | * ltt_release_transport - Release an LTT transport | |
357 | * @kref : reference count on the transport | |
358 | */ | |
359 | void ltt_release_transport(struct kref *kref) | |
360 | { | |
361 | struct ltt_trace_struct *trace = container_of(kref, | |
362 | struct ltt_trace_struct, ltt_transport_kref); | |
b6bf28ec | 363 | //ust// trace->ops->remove_dirs(trace); |
9dad1eb8 | 364 | } |
b6bf28ec | 365 | //ust// EXPORT_SYMBOL_GPL(ltt_release_transport); |
9dad1eb8 PMF |
366 | |
367 | /** | |
368 | * ltt_release_trace - Release a LTT trace | |
369 | * @kref : reference count on the trace | |
370 | */ | |
371 | void ltt_release_trace(struct kref *kref) | |
372 | { | |
373 | struct ltt_trace_struct *trace = container_of(kref, | |
374 | struct ltt_trace_struct, kref); | |
375 | ltt_channels_trace_free(trace->channels); | |
376 | kfree(trace); | |
377 | } | |
b6bf28ec | 378 | //ust// EXPORT_SYMBOL_GPL(ltt_release_trace); |
9dad1eb8 PMF |
379 | |
380 | static inline void prepare_chan_size_num(unsigned int *subbuf_size, | |
381 | unsigned int *n_subbufs) | |
382 | { | |
383 | *subbuf_size = 1 << get_count_order(*subbuf_size); | |
384 | *n_subbufs = 1 << get_count_order(*n_subbufs); | |
385 | ||
386 | /* Subbuf size and number must both be power of two */ | |
387 | WARN_ON(hweight32(*subbuf_size) != 1); | |
388 | WARN_ON(hweight32(*n_subbufs) != 1); | |
389 | } | |
390 | ||
391 | int _ltt_trace_setup(const char *trace_name) | |
392 | { | |
393 | int err = 0; | |
394 | struct ltt_trace_struct *new_trace = NULL; | |
395 | int metadata_index; | |
396 | unsigned int chan; | |
397 | enum ltt_channels chantype; | |
398 | ||
399 | if (_ltt_trace_find_setup(trace_name)) { | |
400 | printk(KERN_ERR "LTT : Trace name %s already used.\n", | |
401 | trace_name); | |
402 | err = -EEXIST; | |
403 | goto traces_error; | |
404 | } | |
405 | ||
406 | if (_ltt_trace_find(trace_name)) { | |
407 | printk(KERN_ERR "LTT : Trace name %s already used.\n", | |
408 | trace_name); | |
409 | err = -EEXIST; | |
410 | goto traces_error; | |
411 | } | |
412 | ||
413 | new_trace = kzalloc(sizeof(struct ltt_trace_struct), GFP_KERNEL); | |
414 | if (!new_trace) { | |
415 | printk(KERN_ERR | |
416 | "LTT : Unable to allocate memory for trace %s\n", | |
417 | trace_name); | |
418 | err = -ENOMEM; | |
419 | goto traces_error; | |
420 | } | |
421 | strncpy(new_trace->trace_name, trace_name, NAME_MAX); | |
422 | new_trace->channels = ltt_channels_trace_alloc(&new_trace->nr_channels, | |
423 | 0, 1); | |
424 | if (!new_trace->channels) { | |
425 | printk(KERN_ERR | |
426 | "LTT : Unable to allocate memory for chaninfo %s\n", | |
427 | trace_name); | |
428 | err = -ENOMEM; | |
429 | goto trace_free; | |
430 | } | |
431 | ||
432 | /* | |
433 | * Force metadata channel to active, no overwrite. | |
434 | */ | |
435 | metadata_index = ltt_channels_get_index_from_name("metadata"); | |
436 | WARN_ON(metadata_index < 0); | |
437 | new_trace->channels[metadata_index].overwrite = 0; | |
438 | new_trace->channels[metadata_index].active = 1; | |
439 | ||
440 | /* | |
441 | * Set hardcoded tracer defaults for some channels | |
442 | */ | |
443 | for (chan = 0; chan < new_trace->nr_channels; chan++) { | |
444 | if (!(new_trace->channels[chan].active)) | |
445 | continue; | |
446 | ||
447 | chantype = get_channel_type_from_name( | |
448 | ltt_channels_get_name_from_index(chan)); | |
449 | new_trace->channels[chan].subbuf_size = | |
450 | chan_infos[chantype].def_subbufsize; | |
451 | new_trace->channels[chan].subbuf_cnt = | |
452 | chan_infos[chantype].def_subbufcount; | |
453 | } | |
454 | ||
455 | list_add(&new_trace->list, <t_traces.setup_head); | |
456 | return 0; | |
457 | ||
458 | trace_free: | |
459 | kfree(new_trace); | |
460 | traces_error: | |
461 | return err; | |
462 | } | |
b6bf28ec | 463 | //ust// EXPORT_SYMBOL_GPL(_ltt_trace_setup); |
9dad1eb8 PMF |
464 | |
465 | ||
466 | int ltt_trace_setup(const char *trace_name) | |
467 | { | |
468 | int ret; | |
469 | ltt_lock_traces(); | |
470 | ret = _ltt_trace_setup(trace_name); | |
471 | ltt_unlock_traces(); | |
472 | return ret; | |
473 | } | |
b6bf28ec | 474 | //ust// EXPORT_SYMBOL_GPL(ltt_trace_setup); |
9dad1eb8 PMF |
475 | |
476 | /* must be called from within a traces lock. */ | |
477 | static void _ltt_trace_free(struct ltt_trace_struct *trace) | |
478 | { | |
479 | list_del(&trace->list); | |
480 | kfree(trace); | |
481 | } | |
482 | ||
483 | int ltt_trace_set_type(const char *trace_name, const char *trace_type) | |
484 | { | |
485 | int err = 0; | |
486 | struct ltt_trace_struct *trace; | |
487 | struct ltt_transport *tran_iter, *transport = NULL; | |
488 | ||
489 | ltt_lock_traces(); | |
490 | ||
491 | trace = _ltt_trace_find_setup(trace_name); | |
492 | if (!trace) { | |
493 | printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); | |
494 | err = -ENOENT; | |
495 | goto traces_error; | |
496 | } | |
497 | ||
498 | list_for_each_entry(tran_iter, <t_transport_list, node) { | |
499 | if (!strcmp(tran_iter->name, trace_type)) { | |
500 | transport = tran_iter; | |
501 | break; | |
502 | } | |
503 | } | |
504 | if (!transport) { | |
505 | printk(KERN_ERR "LTT : Transport %s is not present.\n", | |
506 | trace_type); | |
507 | err = -EINVAL; | |
508 | goto traces_error; | |
509 | } | |
510 | ||
511 | trace->transport = transport; | |
512 | ||
513 | traces_error: | |
514 | ltt_unlock_traces(); | |
515 | return err; | |
516 | } | |
b6bf28ec | 517 | //ust// EXPORT_SYMBOL_GPL(ltt_trace_set_type); |
9dad1eb8 PMF |
518 | |
519 | int ltt_trace_set_channel_subbufsize(const char *trace_name, | |
520 | const char *channel_name, unsigned int size) | |
521 | { | |
522 | int err = 0; | |
523 | struct ltt_trace_struct *trace; | |
524 | int index; | |
525 | ||
526 | ltt_lock_traces(); | |
527 | ||
528 | trace = _ltt_trace_find_setup(trace_name); | |
529 | if (!trace) { | |
530 | printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); | |
531 | err = -ENOENT; | |
532 | goto traces_error; | |
533 | } | |
534 | ||
535 | index = ltt_channels_get_index_from_name(channel_name); | |
536 | if (index < 0) { | |
537 | printk(KERN_ERR "LTT : Channel %s not found\n", channel_name); | |
538 | err = -ENOENT; | |
539 | goto traces_error; | |
540 | } | |
541 | trace->channels[index].subbuf_size = size; | |
542 | ||
543 | traces_error: | |
544 | ltt_unlock_traces(); | |
545 | return err; | |
546 | } | |
b6bf28ec | 547 | //ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_subbufsize); |
9dad1eb8 PMF |
548 | |
549 | int ltt_trace_set_channel_subbufcount(const char *trace_name, | |
550 | const char *channel_name, unsigned int cnt) | |
551 | { | |
552 | int err = 0; | |
553 | struct ltt_trace_struct *trace; | |
554 | int index; | |
555 | ||
556 | ltt_lock_traces(); | |
557 | ||
558 | trace = _ltt_trace_find_setup(trace_name); | |
559 | if (!trace) { | |
560 | printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); | |
561 | err = -ENOENT; | |
562 | goto traces_error; | |
563 | } | |
564 | ||
565 | index = ltt_channels_get_index_from_name(channel_name); | |
566 | if (index < 0) { | |
567 | printk(KERN_ERR "LTT : Channel %s not found\n", channel_name); | |
568 | err = -ENOENT; | |
569 | goto traces_error; | |
570 | } | |
571 | trace->channels[index].subbuf_cnt = cnt; | |
572 | ||
573 | traces_error: | |
574 | ltt_unlock_traces(); | |
575 | return err; | |
576 | } | |
b6bf28ec | 577 | //ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_subbufcount); |
9dad1eb8 PMF |
578 | |
579 | int ltt_trace_set_channel_enable(const char *trace_name, | |
580 | const char *channel_name, unsigned int enable) | |
581 | { | |
582 | int err = 0; | |
583 | struct ltt_trace_struct *trace; | |
584 | int index; | |
585 | ||
586 | ltt_lock_traces(); | |
587 | ||
588 | trace = _ltt_trace_find_setup(trace_name); | |
589 | if (!trace) { | |
590 | printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); | |
591 | err = -ENOENT; | |
592 | goto traces_error; | |
593 | } | |
594 | ||
595 | /* | |
596 | * Datas in metadata channel(marker info) is necessary to be able to | |
597 | * read the trace, we always enable this channel. | |
598 | */ | |
599 | if (!enable && !strcmp(channel_name, "metadata")) { | |
600 | printk(KERN_ERR "LTT : Trying to disable metadata channel\n"); | |
601 | err = -EINVAL; | |
602 | goto traces_error; | |
603 | } | |
604 | ||
605 | index = ltt_channels_get_index_from_name(channel_name); | |
606 | if (index < 0) { | |
607 | printk(KERN_ERR "LTT : Channel %s not found\n", channel_name); | |
608 | err = -ENOENT; | |
609 | goto traces_error; | |
610 | } | |
611 | ||
612 | trace->channels[index].active = enable; | |
613 | ||
614 | traces_error: | |
615 | ltt_unlock_traces(); | |
616 | return err; | |
617 | } | |
b6bf28ec | 618 | //ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_enable); |
9dad1eb8 PMF |
619 | |
620 | int ltt_trace_set_channel_overwrite(const char *trace_name, | |
621 | const char *channel_name, unsigned int overwrite) | |
622 | { | |
623 | int err = 0; | |
624 | struct ltt_trace_struct *trace; | |
625 | int index; | |
626 | ||
627 | ltt_lock_traces(); | |
628 | ||
629 | trace = _ltt_trace_find_setup(trace_name); | |
630 | if (!trace) { | |
631 | printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); | |
632 | err = -ENOENT; | |
633 | goto traces_error; | |
634 | } | |
635 | ||
636 | /* | |
637 | * Always put the metadata channel in non-overwrite mode : | |
638 | * This is a very low traffic channel and it can't afford to have its | |
639 | * data overwritten : this data (marker info) is necessary to be | |
640 | * able to read the trace. | |
641 | */ | |
642 | if (overwrite && !strcmp(channel_name, "metadata")) { | |
643 | printk(KERN_ERR "LTT : Trying to set metadata channel to " | |
644 | "overwrite mode\n"); | |
645 | err = -EINVAL; | |
646 | goto traces_error; | |
647 | } | |
648 | ||
649 | index = ltt_channels_get_index_from_name(channel_name); | |
650 | if (index < 0) { | |
651 | printk(KERN_ERR "LTT : Channel %s not found\n", channel_name); | |
652 | err = -ENOENT; | |
653 | goto traces_error; | |
654 | } | |
655 | ||
656 | trace->channels[index].overwrite = overwrite; | |
657 | ||
658 | traces_error: | |
659 | ltt_unlock_traces(); | |
660 | return err; | |
661 | } | |
b6bf28ec | 662 | //ust// EXPORT_SYMBOL_GPL(ltt_trace_set_channel_overwrite); |
9dad1eb8 PMF |
663 | |
664 | int ltt_trace_alloc(const char *trace_name) | |
665 | { | |
666 | int err = 0; | |
667 | struct ltt_trace_struct *trace; | |
668 | int subbuf_size, subbuf_cnt; | |
669 | unsigned long flags; | |
670 | int chan; | |
671 | const char *channel_name; | |
672 | ||
673 | ltt_lock_traces(); | |
674 | ||
675 | trace = _ltt_trace_find_setup(trace_name); | |
676 | if (!trace) { | |
677 | printk(KERN_ERR "LTT : Trace not found %s\n", trace_name); | |
678 | err = -ENOENT; | |
679 | goto traces_error; | |
680 | } | |
681 | ||
682 | kref_init(&trace->kref); | |
683 | kref_init(&trace->ltt_transport_kref); | |
b6bf28ec | 684 | //ust// init_waitqueue_head(&trace->kref_wq); |
9dad1eb8 | 685 | trace->active = 0; |
b6bf28ec | 686 | //ust// get_trace_clock(); |
9dad1eb8 PMF |
687 | trace->freq_scale = trace_clock_freq_scale(); |
688 | ||
689 | if (!trace->transport) { | |
690 | printk(KERN_ERR "LTT : Transport is not set.\n"); | |
691 | err = -EINVAL; | |
692 | goto transport_error; | |
693 | } | |
b6bf28ec PMF |
694 | //ust// if (!try_module_get(trace->transport->owner)) { |
695 | //ust// printk(KERN_ERR "LTT : Can't lock transport module.\n"); | |
696 | //ust// err = -ENODEV; | |
697 | //ust// goto transport_error; | |
698 | //ust// } | |
9dad1eb8 PMF |
699 | trace->ops = &trace->transport->ops; |
700 | ||
b6bf28ec PMF |
701 | //ust// err = trace->ops->create_dirs(trace); |
702 | //ust// if (err) { | |
703 | //ust// printk(KERN_ERR "LTT : Can't create dir for trace %s.\n", | |
704 | //ust// trace_name); | |
705 | //ust// goto dirs_error; | |
706 | //ust// } | |
9dad1eb8 | 707 | |
b6bf28ec | 708 | //ust// local_irq_save(flags); |
9dad1eb8 PMF |
709 | trace->start_freq = trace_clock_frequency(); |
710 | trace->start_tsc = trace_clock_read64(); | |
b6bf28ec PMF |
711 | gettimeofday(&trace->start_time, NULL); //ust// changed |
712 | //ust// local_irq_restore(flags); | |
9dad1eb8 PMF |
713 | |
714 | for (chan = 0; chan < trace->nr_channels; chan++) { | |
715 | if (!(trace->channels[chan].active)) | |
716 | continue; | |
717 | ||
718 | channel_name = ltt_channels_get_name_from_index(chan); | |
719 | WARN_ON(!channel_name); | |
720 | subbuf_size = trace->channels[chan].subbuf_size; | |
721 | subbuf_cnt = trace->channels[chan].subbuf_cnt; | |
722 | prepare_chan_size_num(&subbuf_size, &subbuf_cnt); | |
723 | err = trace->ops->create_channel(trace_name, trace, | |
724 | trace->dentry.trace_root, | |
725 | channel_name, | |
726 | &trace->channels[chan], | |
727 | subbuf_size, | |
728 | subbuf_cnt, | |
729 | trace->channels[chan].overwrite); | |
730 | if (err != 0) { | |
731 | printk(KERN_ERR "LTT : Can't create channel %s.\n", | |
732 | channel_name); | |
733 | goto create_channel_error; | |
734 | } | |
735 | } | |
736 | ||
737 | list_del(&trace->list); | |
b6bf28ec PMF |
738 | //ust// if (list_empty(<t_traces.head)) { |
739 | //ust// mod_timer(<t_async_wakeup_timer, | |
740 | //ust// jiffies + LTT_PERCPU_TIMER_INTERVAL); | |
741 | //ust// set_kernel_trace_flag_all_tasks(); | |
742 | //ust// } | |
8d938dbd | 743 | list_add_rcu(&trace->list, <t_traces.head); |
b6bf28ec | 744 | //ust// synchronize_sched(); |
9dad1eb8 PMF |
745 | |
746 | ltt_unlock_traces(); | |
747 | ||
748 | return 0; | |
749 | ||
750 | create_channel_error: | |
751 | for (chan--; chan >= 0; chan--) | |
752 | if (trace->channels[chan].active) | |
753 | trace->ops->remove_channel(&trace->channels[chan]); | |
754 | ||
755 | dirs_error: | |
b6bf28ec | 756 | //ust// module_put(trace->transport->owner); |
9dad1eb8 | 757 | transport_error: |
b6bf28ec | 758 | //ust// put_trace_clock(); |
9dad1eb8 PMF |
759 | traces_error: |
760 | ltt_unlock_traces(); | |
761 | return err; | |
762 | } | |
b6bf28ec | 763 | //ust// EXPORT_SYMBOL_GPL(ltt_trace_alloc); |
9dad1eb8 PMF |
764 | |
765 | /* | |
766 | * It is worked as a wrapper for current version of ltt_control.ko. | |
767 | * We will make a new ltt_control based on debugfs, and control each channel's | |
768 | * buffer. | |
769 | */ | |
770 | static int ltt_trace_create(const char *trace_name, const char *trace_type, | |
771 | enum trace_mode mode, | |
772 | unsigned int subbuf_size_low, unsigned int n_subbufs_low, | |
773 | unsigned int subbuf_size_med, unsigned int n_subbufs_med, | |
774 | unsigned int subbuf_size_high, unsigned int n_subbufs_high) | |
775 | { | |
776 | int err = 0; | |
777 | ||
778 | err = ltt_trace_setup(trace_name); | |
779 | if (IS_ERR_VALUE(err)) | |
780 | return err; | |
781 | ||
782 | err = ltt_trace_set_type(trace_name, trace_type); | |
783 | if (IS_ERR_VALUE(err)) | |
784 | return err; | |
785 | ||
786 | err = ltt_trace_alloc(trace_name); | |
787 | if (IS_ERR_VALUE(err)) | |
788 | return err; | |
789 | ||
790 | return err; | |
791 | } | |
792 | ||
793 | /* Must be called while sure that trace is in the list. */ | |
794 | static int _ltt_trace_destroy(struct ltt_trace_struct *trace) | |
795 | { | |
796 | int err = -EPERM; | |
797 | ||
798 | if (trace == NULL) { | |
799 | err = -ENOENT; | |
800 | goto traces_error; | |
801 | } | |
802 | if (trace->active) { | |
803 | printk(KERN_ERR | |
804 | "LTT : Can't destroy trace %s : tracer is active\n", | |
805 | trace->trace_name); | |
806 | err = -EBUSY; | |
807 | goto active_error; | |
808 | } | |
809 | /* Everything went fine */ | |
b6bf28ec PMF |
810 | //ust// list_del_rcu(&trace->list); |
811 | //ust// synchronize_sched(); | |
9dad1eb8 | 812 | if (list_empty(<t_traces.head)) { |
b6bf28ec | 813 | //ust// clear_kernel_trace_flag_all_tasks(); |
9dad1eb8 PMF |
814 | /* |
815 | * We stop the asynchronous delivery of reader wakeup, but | |
816 | * we must make one last check for reader wakeups pending | |
817 | * later in __ltt_trace_destroy. | |
818 | */ | |
b6bf28ec | 819 | //ust// del_timer_sync(<t_async_wakeup_timer); |
9dad1eb8 PMF |
820 | } |
821 | return 0; | |
822 | ||
823 | /* error handling */ | |
824 | active_error: | |
825 | traces_error: | |
826 | return err; | |
827 | } | |
828 | ||
829 | /* Sleepable part of the destroy */ | |
830 | static void __ltt_trace_destroy(struct ltt_trace_struct *trace) | |
831 | { | |
832 | int i; | |
833 | struct ltt_channel_struct *chan; | |
834 | ||
835 | for (i = 0; i < trace->nr_channels; i++) { | |
836 | chan = &trace->channels[i]; | |
837 | if (chan->active) | |
838 | trace->ops->finish_channel(chan); | |
839 | } | |
840 | ||
98963de4 | 841 | return; /* FIXME: temporary for ust */ |
b6bf28ec | 842 | //ust// flush_scheduled_work(); |
9dad1eb8 PMF |
843 | |
844 | /* | |
845 | * The currently destroyed trace is not in the trace list anymore, | |
846 | * so it's safe to call the async wakeup ourself. It will deliver | |
847 | * the last subbuffers. | |
848 | */ | |
849 | trace_async_wakeup(trace); | |
850 | ||
851 | for (i = 0; i < trace->nr_channels; i++) { | |
852 | chan = &trace->channels[i]; | |
853 | if (chan->active) | |
854 | trace->ops->remove_channel(chan); | |
855 | } | |
856 | ||
857 | kref_put(&trace->ltt_transport_kref, ltt_release_transport); | |
858 | ||
b6bf28ec | 859 | //ust// module_put(trace->transport->owner); |
9dad1eb8 PMF |
860 | |
861 | /* | |
862 | * Wait for lttd readers to release the files, therefore making sure | |
863 | * the last subbuffers have been read. | |
864 | */ | |
b6bf28ec PMF |
865 | //ust// if (atomic_read(&trace->kref.refcount) > 1) { |
866 | //ust// int ret = 0; | |
867 | //ust// __wait_event_interruptible(trace->kref_wq, | |
868 | //ust// (atomic_read(&trace->kref.refcount) == 1), ret); | |
869 | //ust// } | |
9dad1eb8 PMF |
870 | kref_put(&trace->kref, ltt_release_trace); |
871 | } | |
872 | ||
873 | int ltt_trace_destroy(const char *trace_name) | |
874 | { | |
875 | int err = 0; | |
876 | struct ltt_trace_struct *trace; | |
877 | ||
878 | ltt_lock_traces(); | |
879 | ||
880 | trace = _ltt_trace_find(trace_name); | |
881 | if (trace) { | |
882 | err = _ltt_trace_destroy(trace); | |
883 | if (err) | |
884 | goto error; | |
885 | ||
886 | ltt_unlock_traces(); | |
887 | ||
888 | __ltt_trace_destroy(trace); | |
b6bf28ec | 889 | //ust// put_trace_clock(); |
9dad1eb8 PMF |
890 | |
891 | return 0; | |
892 | } | |
893 | ||
894 | trace = _ltt_trace_find_setup(trace_name); | |
895 | if (trace) { | |
896 | _ltt_trace_free(trace); | |
897 | ltt_unlock_traces(); | |
898 | return 0; | |
899 | } | |
900 | ||
901 | err = -ENOENT; | |
902 | ||
903 | /* Error handling */ | |
904 | error: | |
905 | ltt_unlock_traces(); | |
906 | return err; | |
907 | } | |
b6bf28ec | 908 | //ust// EXPORT_SYMBOL_GPL(ltt_trace_destroy); |
9dad1eb8 PMF |
909 | |
910 | /* must be called from within a traces lock. */ | |
911 | static int _ltt_trace_start(struct ltt_trace_struct *trace) | |
912 | { | |
913 | int err = 0; | |
914 | ||
915 | if (trace == NULL) { | |
916 | err = -ENOENT; | |
917 | goto traces_error; | |
918 | } | |
919 | if (trace->active) | |
920 | printk(KERN_INFO "LTT : Tracing already active for trace %s\n", | |
921 | trace->trace_name); | |
b6bf28ec PMF |
922 | //ust// if (!try_module_get(ltt_run_filter_owner)) { |
923 | //ust// err = -ENODEV; | |
924 | //ust// printk(KERN_ERR "LTT : Can't lock filter module.\n"); | |
925 | //ust// goto get_ltt_run_filter_error; | |
926 | //ust// } | |
9dad1eb8 PMF |
927 | trace->active = 1; |
928 | /* Read by trace points without protection : be careful */ | |
929 | ltt_traces.num_active_traces++; | |
930 | return err; | |
931 | ||
932 | /* error handling */ | |
933 | get_ltt_run_filter_error: | |
934 | traces_error: | |
935 | return err; | |
936 | } | |
937 | ||
938 | int ltt_trace_start(const char *trace_name) | |
939 | { | |
940 | int err = 0; | |
941 | struct ltt_trace_struct *trace; | |
942 | ||
943 | ltt_lock_traces(); | |
944 | ||
945 | trace = _ltt_trace_find(trace_name); | |
946 | err = _ltt_trace_start(trace); | |
947 | if (err) | |
948 | goto no_trace; | |
949 | ||
950 | ltt_unlock_traces(); | |
951 | ||
952 | /* | |
953 | * Call the kernel state dump. | |
954 | * Events will be mixed with real kernel events, it's ok. | |
955 | * Notice that there is no protection on the trace : that's exactly | |
956 | * why we iterate on the list and check for trace equality instead of | |
957 | * directly using this trace handle inside the logging function. | |
958 | */ | |
959 | ||
9c67dc50 | 960 | ltt_dump_marker_state(trace); |
9dad1eb8 | 961 | |
b6bf28ec PMF |
962 | //ust// if (!try_module_get(ltt_statedump_owner)) { |
963 | //ust// err = -ENODEV; | |
964 | //ust// printk(KERN_ERR | |
965 | //ust// "LTT : Can't lock state dump module.\n"); | |
966 | //ust// } else { | |
9dad1eb8 | 967 | ltt_statedump_functor(trace); |
b6bf28ec PMF |
968 | //ust// module_put(ltt_statedump_owner); |
969 | //ust// } | |
9dad1eb8 PMF |
970 | |
971 | return err; | |
972 | ||
973 | /* Error handling */ | |
974 | no_trace: | |
975 | ltt_unlock_traces(); | |
976 | return err; | |
977 | } | |
b6bf28ec | 978 | //ust// EXPORT_SYMBOL_GPL(ltt_trace_start); |
9dad1eb8 PMF |
979 | |
980 | /* must be called from within traces lock */ | |
981 | static int _ltt_trace_stop(struct ltt_trace_struct *trace) | |
982 | { | |
983 | int err = -EPERM; | |
984 | ||
985 | if (trace == NULL) { | |
986 | err = -ENOENT; | |
987 | goto traces_error; | |
988 | } | |
989 | if (!trace->active) | |
990 | printk(KERN_INFO "LTT : Tracing not active for trace %s\n", | |
991 | trace->trace_name); | |
992 | if (trace->active) { | |
993 | trace->active = 0; | |
994 | ltt_traces.num_active_traces--; | |
b6bf28ec | 995 | //ust// synchronize_sched(); /* Wait for each tracing to be finished */ |
9dad1eb8 | 996 | } |
b6bf28ec | 997 | //ust// module_put(ltt_run_filter_owner); |
9dad1eb8 PMF |
998 | /* Everything went fine */ |
999 | return 0; | |
1000 | ||
1001 | /* Error handling */ | |
1002 | traces_error: | |
1003 | return err; | |
1004 | } | |
1005 | ||
1006 | int ltt_trace_stop(const char *trace_name) | |
1007 | { | |
1008 | int err = 0; | |
1009 | struct ltt_trace_struct *trace; | |
1010 | ||
1011 | ltt_lock_traces(); | |
1012 | trace = _ltt_trace_find(trace_name); | |
1013 | err = _ltt_trace_stop(trace); | |
1014 | ltt_unlock_traces(); | |
1015 | return err; | |
1016 | } | |
b6bf28ec | 1017 | //ust// EXPORT_SYMBOL_GPL(ltt_trace_stop); |
9dad1eb8 PMF |
1018 | |
1019 | /** | |
1020 | * ltt_control - Trace control in-kernel API | |
1021 | * @msg: Action to perform | |
1022 | * @trace_name: Trace on which the action must be done | |
1023 | * @trace_type: Type of trace (normal, flight, hybrid) | |
1024 | * @args: Arguments specific to the action | |
1025 | */ | |
b6bf28ec PMF |
1026 | //ust// int ltt_control(enum ltt_control_msg msg, const char *trace_name, |
1027 | //ust// const char *trace_type, union ltt_control_args args) | |
1028 | //ust// { | |
1029 | //ust// int err = -EPERM; | |
1030 | //ust// | |
1031 | //ust// printk(KERN_ALERT "ltt_control : trace %s\n", trace_name); | |
1032 | //ust// switch (msg) { | |
1033 | //ust// case LTT_CONTROL_START: | |
1034 | //ust// printk(KERN_DEBUG "Start tracing %s\n", trace_name); | |
1035 | //ust// err = ltt_trace_start(trace_name); | |
1036 | //ust// break; | |
1037 | //ust// case LTT_CONTROL_STOP: | |
1038 | //ust// printk(KERN_DEBUG "Stop tracing %s\n", trace_name); | |
1039 | //ust// err = ltt_trace_stop(trace_name); | |
1040 | //ust// break; | |
1041 | //ust// case LTT_CONTROL_CREATE_TRACE: | |
1042 | //ust// printk(KERN_DEBUG "Creating trace %s\n", trace_name); | |
1043 | //ust// err = ltt_trace_create(trace_name, trace_type, | |
1044 | //ust// args.new_trace.mode, | |
1045 | //ust// args.new_trace.subbuf_size_low, | |
1046 | //ust// args.new_trace.n_subbufs_low, | |
1047 | //ust// args.new_trace.subbuf_size_med, | |
1048 | //ust// args.new_trace.n_subbufs_med, | |
1049 | //ust// args.new_trace.subbuf_size_high, | |
1050 | //ust// args.new_trace.n_subbufs_high); | |
1051 | //ust// break; | |
1052 | //ust// case LTT_CONTROL_DESTROY_TRACE: | |
1053 | //ust// printk(KERN_DEBUG "Destroying trace %s\n", trace_name); | |
1054 | //ust// err = ltt_trace_destroy(trace_name); | |
1055 | //ust// break; | |
1056 | //ust// } | |
1057 | //ust// return err; | |
1058 | //ust// } | |
1059 | //ust// EXPORT_SYMBOL_GPL(ltt_control); | |
9dad1eb8 PMF |
1060 | |
1061 | /** | |
1062 | * ltt_filter_control - Trace filter control in-kernel API | |
1063 | * @msg: Action to perform on the filter | |
1064 | * @trace_name: Trace on which the action must be done | |
1065 | */ | |
1066 | int ltt_filter_control(enum ltt_filter_control_msg msg, const char *trace_name) | |
1067 | { | |
1068 | int err; | |
1069 | struct ltt_trace_struct *trace; | |
1070 | ||
1071 | printk(KERN_DEBUG "ltt_filter_control : trace %s\n", trace_name); | |
1072 | ltt_lock_traces(); | |
1073 | trace = _ltt_trace_find(trace_name); | |
1074 | if (trace == NULL) { | |
1075 | printk(KERN_ALERT | |
1076 | "Trace does not exist. Cannot proxy control request\n"); | |
1077 | err = -ENOENT; | |
1078 | goto trace_error; | |
1079 | } | |
b6bf28ec PMF |
1080 | //ust// if (!try_module_get(ltt_filter_control_owner)) { |
1081 | //ust// err = -ENODEV; | |
1082 | //ust// goto get_module_error; | |
1083 | //ust// } | |
9dad1eb8 PMF |
1084 | switch (msg) { |
1085 | case LTT_FILTER_DEFAULT_ACCEPT: | |
1086 | printk(KERN_DEBUG | |
1087 | "Proxy filter default accept %s\n", trace_name); | |
1088 | err = (*ltt_filter_control_functor)(msg, trace); | |
1089 | break; | |
1090 | case LTT_FILTER_DEFAULT_REJECT: | |
1091 | printk(KERN_DEBUG | |
1092 | "Proxy filter default reject %s\n", trace_name); | |
1093 | err = (*ltt_filter_control_functor)(msg, trace); | |
1094 | break; | |
1095 | default: | |
1096 | err = -EPERM; | |
1097 | } | |
b6bf28ec | 1098 | //ust// module_put(ltt_filter_control_owner); |
9dad1eb8 PMF |
1099 | |
1100 | get_module_error: | |
1101 | trace_error: | |
1102 | ltt_unlock_traces(); | |
1103 | return err; | |
1104 | } | |
b6bf28ec PMF |
1105 | //ust// EXPORT_SYMBOL_GPL(ltt_filter_control); |
1106 | ||
1107 | //ust// int __init ltt_init(void) | |
1108 | //ust// { | |
1109 | //ust// /* Make sure no page fault can be triggered by this module */ | |
1110 | //ust// vmalloc_sync_all(); | |
1111 | //ust// return 0; | |
1112 | //ust// } | |
1113 | ||
1114 | //ust// module_init(ltt_init) | |
1115 | ||
1116 | //ust// static void __exit ltt_exit(void) | |
1117 | //ust// { | |
1118 | //ust// struct ltt_trace_struct *trace; | |
1119 | //ust// struct list_head *pos, *n; | |
1120 | //ust// | |
1121 | //ust// ltt_lock_traces(); | |
1122 | //ust// /* Stop each trace, currently being read by RCU read-side */ | |
1123 | //ust// list_for_each_entry_rcu(trace, <t_traces.head, list) | |
1124 | //ust// _ltt_trace_stop(trace); | |
1125 | //ust// /* Wait for quiescent state. Readers have preemption disabled. */ | |
1126 | //ust// synchronize_sched(); | |
1127 | //ust// /* Safe iteration is now permitted. It does not have to be RCU-safe | |
1128 | //ust// * because no readers are left. */ | |
1129 | //ust// list_for_each_safe(pos, n, <t_traces.head) { | |
1130 | //ust// trace = container_of(pos, struct ltt_trace_struct, list); | |
1131 | //ust// /* _ltt_trace_destroy does a synchronize_sched() */ | |
1132 | //ust// _ltt_trace_destroy(trace); | |
1133 | //ust// __ltt_trace_destroy(trace); | |
1134 | //ust// } | |
1135 | //ust// /* free traces in pre-alloc status */ | |
1136 | //ust// list_for_each_safe(pos, n, <t_traces.setup_head) { | |
1137 | //ust// trace = container_of(pos, struct ltt_trace_struct, list); | |
1138 | //ust// _ltt_trace_free(trace); | |
1139 | //ust// } | |
1140 | //ust// | |
1141 | //ust// ltt_unlock_traces(); | |
1142 | //ust// } | |
1143 | ||
1144 | //ust// module_exit(ltt_exit) | |
1145 | ||
1146 | //ust// MODULE_LICENSE("GPL"); | |
1147 | //ust// MODULE_AUTHOR("Mathieu Desnoyers"); | |
1148 | //ust// MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Tracer Kernel API"); |