SoW-2020-0002: Trace Hit Counters: Implement key-addressed counters as a new LTTng...
[deliverable/lttng-modules.git] / src / lttng-events.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-events.c
4 *
5 * Holds LTTng per-session event registry.
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 /*
11 * This page_alloc.h wrapper needs to be included before gfpflags.h because it
12 * overrides a function with a define.
13 */
14 #include "wrapper/page_alloc.h"
15
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/jiffies.h>
21 #include <linux/utsname.h>
22 #include <linux/err.h>
23 #include <linux/seq_file.h>
24 #include <linux/file.h>
25 #include <linux/anon_inodes.h>
26 #include <wrapper/file.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/dmi.h>
30
31 #include <wrapper/uuid.h>
32 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
33 #include <wrapper/random.h>
34 #include <wrapper/tracepoint.h>
35 #include <wrapper/list.h>
36 #include <wrapper/types.h>
37 #include <lttng/kernel-version.h>
38 #include <lttng/events.h>
39 #include <lttng/lttng-bytecode.h>
40 #include <lttng/tracer.h>
41 #include <lttng/trigger-notification.h>
42 #include <lttng/abi-old.h>
43 #include <lttng/endian.h>
44 #include <lttng/string-utils.h>
45 #include <lttng/utils.h>
46 #include <ringbuffer/backend.h>
47 #include <ringbuffer/frontend.h>
48 #include <wrapper/time.h>
49
50 #define METADATA_CACHE_DEFAULT_SIZE 4096
51
52 static LIST_HEAD(sessions);
53 static LIST_HEAD(trigger_groups);
54 static LIST_HEAD(lttng_transport_list);
55 static LIST_HEAD(lttng_counter_transport_list);
56 /*
57 * Protect the sessions and metadata caches.
58 */
59 static DEFINE_MUTEX(sessions_mutex);
60 static struct kmem_cache *event_cache;
61 static struct kmem_cache *trigger_cache;
62
63 static void lttng_session_lazy_sync_event_enablers(struct lttng_session *session);
64 static void lttng_session_sync_event_enablers(struct lttng_session *session);
65 static void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler);
66 static void lttng_trigger_enabler_destroy(struct lttng_trigger_enabler *trigger_enabler);
67 static void lttng_trigger_group_sync_enablers(struct lttng_trigger_group *trigger_group);
68
69 static void _lttng_event_destroy(struct lttng_event *event);
70 static void _lttng_trigger_destroy(struct lttng_trigger *trigger);
71 static void _lttng_channel_destroy(struct lttng_channel *chan);
72 static int _lttng_event_unregister(struct lttng_event *event);
73 static int _lttng_trigger_unregister(struct lttng_trigger *trigger);
74 static
75 int _lttng_event_metadata_statedump(struct lttng_session *session,
76 struct lttng_channel *chan,
77 struct lttng_event *event);
78 static
79 int _lttng_session_metadata_statedump(struct lttng_session *session);
80 static
81 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
82 static
83 int _lttng_type_statedump(struct lttng_session *session,
84 const struct lttng_type *type,
85 size_t nesting);
86 static
87 int _lttng_field_statedump(struct lttng_session *session,
88 const struct lttng_event_field *field,
89 size_t nesting);
90
91 void synchronize_trace(void)
92 {
93 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
94 synchronize_rcu();
95 #else
96 synchronize_sched();
97 #endif
98
99 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
100 #ifdef CONFIG_PREEMPT_RT_FULL
101 synchronize_rcu();
102 #endif
103 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
104 #ifdef CONFIG_PREEMPT_RT
105 synchronize_rcu();
106 #endif
107 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
108 }
109
110 void lttng_lock_sessions(void)
111 {
112 mutex_lock(&sessions_mutex);
113 }
114
115 void lttng_unlock_sessions(void)
116 {
117 mutex_unlock(&sessions_mutex);
118 }
119
120 static struct lttng_transport *lttng_transport_find(const char *name)
121 {
122 struct lttng_transport *transport;
123
124 list_for_each_entry(transport, &lttng_transport_list, node) {
125 if (!strcmp(transport->name, name))
126 return transport;
127 }
128 return NULL;
129 }
130
131 /*
132 * Called with sessions lock held.
133 */
134 int lttng_session_active(void)
135 {
136 struct lttng_session *iter;
137
138 list_for_each_entry(iter, &sessions, list) {
139 if (iter->active)
140 return 1;
141 }
142 return 0;
143 }
144
145 struct lttng_session *lttng_session_create(void)
146 {
147 struct lttng_session *session;
148 struct lttng_metadata_cache *metadata_cache;
149 int i;
150
151 mutex_lock(&sessions_mutex);
152 session = lttng_kvzalloc(sizeof(struct lttng_session), GFP_KERNEL);
153 if (!session)
154 goto err;
155 INIT_LIST_HEAD(&session->chan);
156 INIT_LIST_HEAD(&session->events);
157 lttng_guid_gen(&session->uuid);
158
159 metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
160 GFP_KERNEL);
161 if (!metadata_cache)
162 goto err_free_session;
163 metadata_cache->data = vzalloc(METADATA_CACHE_DEFAULT_SIZE);
164 if (!metadata_cache->data)
165 goto err_free_cache;
166 metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
167 kref_init(&metadata_cache->refcount);
168 mutex_init(&metadata_cache->lock);
169 session->metadata_cache = metadata_cache;
170 INIT_LIST_HEAD(&metadata_cache->metadata_stream);
171 memcpy(&metadata_cache->uuid, &session->uuid,
172 sizeof(metadata_cache->uuid));
173 INIT_LIST_HEAD(&session->enablers_head);
174 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
175 INIT_HLIST_HEAD(&session->events_ht.table[i]);
176 list_add(&session->list, &sessions);
177 session->pid_tracker.session = session;
178 session->pid_tracker.tracker_type = TRACKER_PID;
179 session->vpid_tracker.session = session;
180 session->vpid_tracker.tracker_type = TRACKER_VPID;
181 session->uid_tracker.session = session;
182 session->uid_tracker.tracker_type = TRACKER_UID;
183 session->vuid_tracker.session = session;
184 session->vuid_tracker.tracker_type = TRACKER_VUID;
185 session->gid_tracker.session = session;
186 session->gid_tracker.tracker_type = TRACKER_GID;
187 session->vgid_tracker.session = session;
188 session->vgid_tracker.tracker_type = TRACKER_VGID;
189 mutex_unlock(&sessions_mutex);
190 return session;
191
192 err_free_cache:
193 kfree(metadata_cache);
194 err_free_session:
195 lttng_kvfree(session);
196 err:
197 mutex_unlock(&sessions_mutex);
198 return NULL;
199 }
200
201 static
202 struct lttng_counter_transport *lttng_counter_transport_find(const char *name)
203 {
204 struct lttng_counter_transport *transport;
205
206 list_for_each_entry(transport, &lttng_counter_transport_list, node) {
207 if (!strcmp(transport->name, name))
208 return transport;
209 }
210 return NULL;
211 }
212
213 struct lttng_counter *lttng_kernel_counter_create(
214 const char *counter_transport_name,
215 size_t number_dimensions, const size_t *dimensions_sizes)
216 {
217 struct lttng_counter *counter = NULL;
218 struct lttng_counter_transport *counter_transport = NULL;
219
220 counter_transport = lttng_counter_transport_find(counter_transport_name);
221 if (!counter_transport) {
222 printk(KERN_WARNING "LTTng: counter transport %s not found.\n",
223 counter_transport_name);
224 goto notransport;
225 }
226 if (!try_module_get(counter_transport->owner)) {
227 printk(KERN_WARNING "LTTng: Can't lock counter transport module.\n");
228 goto notransport;
229 }
230
231 counter = kzalloc(sizeof(struct lttng_counter), GFP_KERNEL);
232 if (!counter)
233 goto nomem;
234
235 /* Create trigger error counter. */
236 counter->ops = &counter_transport->ops;
237 counter->transport = counter_transport;
238
239 counter->counter = counter->ops->counter_create(
240 number_dimensions, dimensions_sizes, 0);
241 if (!counter->counter) {
242 goto create_error;
243 }
244
245 return counter;
246
247 create_error:
248 kfree(counter);
249 nomem:
250 if (counter_transport)
251 module_put(counter_transport->owner);
252 notransport:
253 return NULL;
254 }
255
256 struct lttng_trigger_group *lttng_trigger_group_create(void)
257 {
258 struct lttng_transport *transport = NULL;
259 struct lttng_trigger_group *trigger_group;
260 const char *transport_name = "relay-trigger";
261 size_t subbuf_size = 4096; //TODO
262 size_t num_subbuf = 16; //TODO
263 unsigned int switch_timer_interval = 0;
264 unsigned int read_timer_interval = 0;
265 int i;
266
267 mutex_lock(&sessions_mutex);
268
269 transport = lttng_transport_find(transport_name);
270 if (!transport) {
271 printk(KERN_WARNING "LTTng transport %s not found\n",
272 transport_name);
273 goto notransport;
274 }
275 if (!try_module_get(transport->owner)) {
276 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
277 goto notransport;
278 }
279
280 trigger_group = lttng_kvzalloc(sizeof(struct lttng_trigger_group),
281 GFP_KERNEL);
282 if (!trigger_group)
283 goto nomem;
284
285 trigger_group->ops = &transport->ops;
286 trigger_group->chan = transport->ops.channel_create(transport_name,
287 trigger_group, NULL, subbuf_size, num_subbuf,
288 switch_timer_interval, read_timer_interval);
289 if (!trigger_group->chan)
290 goto create_error;
291
292 trigger_group->transport = transport;
293 INIT_LIST_HEAD(&trigger_group->enablers_head);
294 INIT_LIST_HEAD(&trigger_group->triggers_head);
295 for (i = 0; i < LTTNG_TRIGGER_HT_SIZE; i++)
296 INIT_HLIST_HEAD(&trigger_group->triggers_ht.table[i]);
297
298 list_add(&trigger_group->node, &trigger_groups);
299 mutex_unlock(&sessions_mutex);
300
301 return trigger_group;
302
303 create_error:
304 lttng_kvfree(trigger_group);
305 nomem:
306 if (transport)
307 module_put(transport->owner);
308 notransport:
309 mutex_unlock(&sessions_mutex);
310 return NULL;
311 }
312
313 void metadata_cache_destroy(struct kref *kref)
314 {
315 struct lttng_metadata_cache *cache =
316 container_of(kref, struct lttng_metadata_cache, refcount);
317 vfree(cache->data);
318 kfree(cache);
319 }
320
321 void lttng_session_destroy(struct lttng_session *session)
322 {
323 struct lttng_channel *chan, *tmpchan;
324 struct lttng_event *event, *tmpevent;
325 struct lttng_metadata_stream *metadata_stream;
326 struct lttng_event_enabler *event_enabler, *tmp_event_enabler;
327 int ret;
328
329 mutex_lock(&sessions_mutex);
330 WRITE_ONCE(session->active, 0);
331 list_for_each_entry(chan, &session->chan, list) {
332 ret = lttng_syscalls_unregister_event(chan);
333 WARN_ON(ret);
334 }
335 list_for_each_entry(event, &session->events, list) {
336 ret = _lttng_event_unregister(event);
337 WARN_ON(ret);
338 }
339 synchronize_trace(); /* Wait for in-flight events to complete */
340 list_for_each_entry(chan, &session->chan, list) {
341 ret = lttng_syscalls_destroy_event(chan);
342 WARN_ON(ret);
343 }
344 list_for_each_entry_safe(event_enabler, tmp_event_enabler,
345 &session->enablers_head, node)
346 lttng_event_enabler_destroy(event_enabler);
347 list_for_each_entry_safe(event, tmpevent, &session->events, list)
348 _lttng_event_destroy(event);
349 list_for_each_entry_safe(chan, tmpchan, &session->chan, list) {
350 BUG_ON(chan->channel_type == METADATA_CHANNEL);
351 _lttng_channel_destroy(chan);
352 }
353 mutex_lock(&session->metadata_cache->lock);
354 list_for_each_entry(metadata_stream, &session->metadata_cache->metadata_stream, list)
355 _lttng_metadata_channel_hangup(metadata_stream);
356 mutex_unlock(&session->metadata_cache->lock);
357 lttng_id_tracker_destroy(&session->pid_tracker, false);
358 lttng_id_tracker_destroy(&session->vpid_tracker, false);
359 lttng_id_tracker_destroy(&session->uid_tracker, false);
360 lttng_id_tracker_destroy(&session->vuid_tracker, false);
361 lttng_id_tracker_destroy(&session->gid_tracker, false);
362 lttng_id_tracker_destroy(&session->vgid_tracker, false);
363 kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
364 list_del(&session->list);
365 mutex_unlock(&sessions_mutex);
366 lttng_kvfree(session);
367 }
368
369 void lttng_trigger_group_destroy(struct lttng_trigger_group *trigger_group)
370 {
371 struct lttng_trigger_enabler *trigger_enabler, *tmp_trigger_enabler;
372 struct lttng_trigger *trigger, *tmptrigger;
373 int ret;
374
375 if (!trigger_group)
376 return;
377
378 mutex_lock(&sessions_mutex);
379
380 ret = lttng_syscalls_unregister_trigger(trigger_group);
381 WARN_ON(ret);
382
383 list_for_each_entry_safe(trigger, tmptrigger,
384 &trigger_group->triggers_head, list) {
385 ret = _lttng_trigger_unregister(trigger);
386 WARN_ON(ret);
387 }
388
389 synchronize_trace(); /* Wait for in-flight triggers to complete */
390
391 irq_work_sync(&trigger_group->wakeup_pending);
392
393 list_for_each_entry_safe(trigger_enabler, tmp_trigger_enabler,
394 &trigger_group->enablers_head, node)
395 lttng_trigger_enabler_destroy(trigger_enabler);
396
397 list_for_each_entry_safe(trigger, tmptrigger,
398 &trigger_group->triggers_head, list)
399 _lttng_trigger_destroy(trigger);
400
401 if (trigger_group->error_counter) {
402 struct lttng_counter *error_counter = trigger_group->error_counter;
403 error_counter->ops->counter_destroy(error_counter->counter);
404 module_put(error_counter->transport->owner);
405 lttng_kvfree(error_counter);
406 trigger_group->error_counter = NULL;
407 }
408 trigger_group->ops->channel_destroy(trigger_group->chan);
409 module_put(trigger_group->transport->owner);
410 list_del(&trigger_group->node);
411 mutex_unlock(&sessions_mutex);
412 lttng_kvfree(trigger_group);
413 }
414
415 int lttng_session_statedump(struct lttng_session *session)
416 {
417 int ret;
418
419 mutex_lock(&sessions_mutex);
420 ret = lttng_statedump_start(session);
421 mutex_unlock(&sessions_mutex);
422 return ret;
423 }
424
425 int lttng_session_enable(struct lttng_session *session)
426 {
427 int ret = 0;
428 struct lttng_channel *chan;
429
430 mutex_lock(&sessions_mutex);
431 if (session->active) {
432 ret = -EBUSY;
433 goto end;
434 }
435
436 /* Set transient enabler state to "enabled" */
437 session->tstate = 1;
438
439 /* We need to sync enablers with session before activation. */
440 lttng_session_sync_event_enablers(session);
441
442 /*
443 * Snapshot the number of events per channel to know the type of header
444 * we need to use.
445 */
446 list_for_each_entry(chan, &session->chan, list) {
447 if (chan->header_type)
448 continue; /* don't change it if session stop/restart */
449 if (chan->free_event_id < 31)
450 chan->header_type = 1; /* compact */
451 else
452 chan->header_type = 2; /* large */
453 }
454
455 /* Clear each stream's quiescent state. */
456 list_for_each_entry(chan, &session->chan, list) {
457 if (chan->channel_type != METADATA_CHANNEL)
458 lib_ring_buffer_clear_quiescent_channel(chan->chan);
459 }
460
461 WRITE_ONCE(session->active, 1);
462 WRITE_ONCE(session->been_active, 1);
463 ret = _lttng_session_metadata_statedump(session);
464 if (ret) {
465 WRITE_ONCE(session->active, 0);
466 goto end;
467 }
468 ret = lttng_statedump_start(session);
469 if (ret)
470 WRITE_ONCE(session->active, 0);
471 end:
472 mutex_unlock(&sessions_mutex);
473 return ret;
474 }
475
476 int lttng_session_disable(struct lttng_session *session)
477 {
478 int ret = 0;
479 struct lttng_channel *chan;
480
481 mutex_lock(&sessions_mutex);
482 if (!session->active) {
483 ret = -EBUSY;
484 goto end;
485 }
486 WRITE_ONCE(session->active, 0);
487
488 /* Set transient enabler state to "disabled" */
489 session->tstate = 0;
490 lttng_session_sync_event_enablers(session);
491
492 /* Set each stream's quiescent state. */
493 list_for_each_entry(chan, &session->chan, list) {
494 if (chan->channel_type != METADATA_CHANNEL)
495 lib_ring_buffer_set_quiescent_channel(chan->chan);
496 }
497 end:
498 mutex_unlock(&sessions_mutex);
499 return ret;
500 }
501
502 int lttng_session_metadata_regenerate(struct lttng_session *session)
503 {
504 int ret = 0;
505 struct lttng_channel *chan;
506 struct lttng_event *event;
507 struct lttng_metadata_cache *cache = session->metadata_cache;
508 struct lttng_metadata_stream *stream;
509
510 mutex_lock(&sessions_mutex);
511 if (!session->active) {
512 ret = -EBUSY;
513 goto end;
514 }
515
516 mutex_lock(&cache->lock);
517 memset(cache->data, 0, cache->cache_alloc);
518 cache->metadata_written = 0;
519 cache->version++;
520 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list) {
521 stream->metadata_out = 0;
522 stream->metadata_in = 0;
523 }
524 mutex_unlock(&cache->lock);
525
526 session->metadata_dumped = 0;
527 list_for_each_entry(chan, &session->chan, list) {
528 chan->metadata_dumped = 0;
529 }
530
531 list_for_each_entry(event, &session->events, list) {
532 event->metadata_dumped = 0;
533 }
534
535 ret = _lttng_session_metadata_statedump(session);
536
537 end:
538 mutex_unlock(&sessions_mutex);
539 return ret;
540 }
541
542 int lttng_channel_enable(struct lttng_channel *channel)
543 {
544 int ret = 0;
545
546 mutex_lock(&sessions_mutex);
547 if (channel->channel_type == METADATA_CHANNEL) {
548 ret = -EPERM;
549 goto end;
550 }
551 if (channel->enabled) {
552 ret = -EEXIST;
553 goto end;
554 }
555 /* Set transient enabler state to "enabled" */
556 channel->tstate = 1;
557 lttng_session_sync_event_enablers(channel->session);
558 /* Set atomically the state to "enabled" */
559 WRITE_ONCE(channel->enabled, 1);
560 end:
561 mutex_unlock(&sessions_mutex);
562 return ret;
563 }
564
565 int lttng_channel_disable(struct lttng_channel *channel)
566 {
567 int ret = 0;
568
569 mutex_lock(&sessions_mutex);
570 if (channel->channel_type == METADATA_CHANNEL) {
571 ret = -EPERM;
572 goto end;
573 }
574 if (!channel->enabled) {
575 ret = -EEXIST;
576 goto end;
577 }
578 /* Set atomically the state to "disabled" */
579 WRITE_ONCE(channel->enabled, 0);
580 /* Set transient enabler state to "enabled" */
581 channel->tstate = 0;
582 lttng_session_sync_event_enablers(channel->session);
583 end:
584 mutex_unlock(&sessions_mutex);
585 return ret;
586 }
587
588 int lttng_event_enable(struct lttng_event *event)
589 {
590 int ret = 0;
591
592 mutex_lock(&sessions_mutex);
593 if (event->chan->channel_type == METADATA_CHANNEL) {
594 ret = -EPERM;
595 goto end;
596 }
597 if (event->enabled) {
598 ret = -EEXIST;
599 goto end;
600 }
601 switch (event->instrumentation) {
602 case LTTNG_KERNEL_TRACEPOINT:
603 case LTTNG_KERNEL_SYSCALL:
604 ret = -EINVAL;
605 break;
606 case LTTNG_KERNEL_KPROBE:
607 case LTTNG_KERNEL_UPROBE:
608 case LTTNG_KERNEL_NOOP:
609 WRITE_ONCE(event->enabled, 1);
610 break;
611 case LTTNG_KERNEL_KRETPROBE:
612 ret = lttng_kretprobes_event_enable_state(event, 1);
613 break;
614 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
615 default:
616 WARN_ON_ONCE(1);
617 ret = -EINVAL;
618 }
619 end:
620 mutex_unlock(&sessions_mutex);
621 return ret;
622 }
623
624 int lttng_event_disable(struct lttng_event *event)
625 {
626 int ret = 0;
627
628 mutex_lock(&sessions_mutex);
629 if (event->chan->channel_type == METADATA_CHANNEL) {
630 ret = -EPERM;
631 goto end;
632 }
633 if (!event->enabled) {
634 ret = -EEXIST;
635 goto end;
636 }
637 switch (event->instrumentation) {
638 case LTTNG_KERNEL_TRACEPOINT:
639 case LTTNG_KERNEL_SYSCALL:
640 ret = -EINVAL;
641 break;
642 case LTTNG_KERNEL_KPROBE:
643 case LTTNG_KERNEL_UPROBE:
644 case LTTNG_KERNEL_NOOP:
645 WRITE_ONCE(event->enabled, 0);
646 break;
647 case LTTNG_KERNEL_KRETPROBE:
648 ret = lttng_kretprobes_event_enable_state(event, 0);
649 break;
650 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
651 default:
652 WARN_ON_ONCE(1);
653 ret = -EINVAL;
654 }
655 end:
656 mutex_unlock(&sessions_mutex);
657 return ret;
658 }
659
660 int lttng_trigger_enable(struct lttng_trigger *trigger)
661 {
662 int ret = 0;
663
664 mutex_lock(&sessions_mutex);
665 if (trigger->enabled) {
666 ret = -EEXIST;
667 goto end;
668 }
669 switch (trigger->instrumentation) {
670 case LTTNG_KERNEL_TRACEPOINT:
671 case LTTNG_KERNEL_SYSCALL:
672 ret = -EINVAL;
673 break;
674 case LTTNG_KERNEL_KPROBE:
675 case LTTNG_KERNEL_UPROBE:
676 WRITE_ONCE(trigger->enabled, 1);
677 break;
678 case LTTNG_KERNEL_FUNCTION:
679 case LTTNG_KERNEL_NOOP:
680 case LTTNG_KERNEL_KRETPROBE:
681 default:
682 WARN_ON_ONCE(1);
683 ret = -EINVAL;
684 }
685 end:
686 mutex_unlock(&sessions_mutex);
687 return ret;
688 }
689
690 int lttng_trigger_disable(struct lttng_trigger *trigger)
691 {
692 int ret = 0;
693
694 mutex_lock(&sessions_mutex);
695 if (!trigger->enabled) {
696 ret = -EEXIST;
697 goto end;
698 }
699 switch (trigger->instrumentation) {
700 case LTTNG_KERNEL_TRACEPOINT:
701 case LTTNG_KERNEL_SYSCALL:
702 ret = -EINVAL;
703 break;
704 case LTTNG_KERNEL_KPROBE:
705 case LTTNG_KERNEL_UPROBE:
706 WRITE_ONCE(trigger->enabled, 0);
707 break;
708 case LTTNG_KERNEL_FUNCTION:
709 case LTTNG_KERNEL_NOOP:
710 case LTTNG_KERNEL_KRETPROBE:
711 default:
712 WARN_ON_ONCE(1);
713 ret = -EINVAL;
714 }
715 end:
716 mutex_unlock(&sessions_mutex);
717 return ret;
718 }
719
720 struct lttng_channel *lttng_channel_create(struct lttng_session *session,
721 const char *transport_name,
722 void *buf_addr,
723 size_t subbuf_size, size_t num_subbuf,
724 unsigned int switch_timer_interval,
725 unsigned int read_timer_interval,
726 enum channel_type channel_type)
727 {
728 struct lttng_channel *chan;
729 struct lttng_transport *transport = NULL;
730
731 mutex_lock(&sessions_mutex);
732 if (session->been_active && channel_type != METADATA_CHANNEL)
733 goto active; /* Refuse to add channel to active session */
734 transport = lttng_transport_find(transport_name);
735 if (!transport) {
736 printk(KERN_WARNING "LTTng: transport %s not found\n",
737 transport_name);
738 goto notransport;
739 }
740 if (!try_module_get(transport->owner)) {
741 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
742 goto notransport;
743 }
744 chan = kzalloc(sizeof(struct lttng_channel), GFP_KERNEL);
745 if (!chan)
746 goto nomem;
747 chan->session = session;
748 chan->id = session->free_chan_id++;
749 chan->ops = &transport->ops;
750 /*
751 * Note: the channel creation op already writes into the packet
752 * headers. Therefore the "chan" information used as input
753 * should be already accessible.
754 */
755 chan->chan = transport->ops.channel_create(transport_name,
756 chan, buf_addr, subbuf_size, num_subbuf,
757 switch_timer_interval, read_timer_interval);
758 if (!chan->chan)
759 goto create_error;
760 chan->tstate = 1;
761 chan->enabled = 1;
762 chan->transport = transport;
763 chan->channel_type = channel_type;
764 list_add(&chan->list, &session->chan);
765 mutex_unlock(&sessions_mutex);
766 return chan;
767
768 create_error:
769 kfree(chan);
770 nomem:
771 if (transport)
772 module_put(transport->owner);
773 notransport:
774 active:
775 mutex_unlock(&sessions_mutex);
776 return NULL;
777 }
778
779 /*
780 * Only used internally at session destruction for per-cpu channels, and
781 * when metadata channel is released.
782 * Needs to be called with sessions mutex held.
783 */
784 static
785 void _lttng_channel_destroy(struct lttng_channel *chan)
786 {
787 chan->ops->channel_destroy(chan->chan);
788 module_put(chan->transport->owner);
789 list_del(&chan->list);
790 lttng_destroy_context(chan->ctx);
791 kfree(chan);
792 }
793
794 void lttng_metadata_channel_destroy(struct lttng_channel *chan)
795 {
796 BUG_ON(chan->channel_type != METADATA_CHANNEL);
797
798 /* Protect the metadata cache with the sessions_mutex. */
799 mutex_lock(&sessions_mutex);
800 _lttng_channel_destroy(chan);
801 mutex_unlock(&sessions_mutex);
802 }
803 EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
804
805 static
806 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
807 {
808 stream->finalized = 1;
809 wake_up_interruptible(&stream->read_wait);
810 }
811
812
813 /*
814 * Supports event creation while tracing session is active.
815 * Needs to be called with sessions mutex held.
816 */
817 struct lttng_event *_lttng_event_create(struct lttng_channel *chan,
818 struct lttng_kernel_event *event_param,
819 void *filter,
820 const struct lttng_event_desc *event_desc,
821 enum lttng_kernel_instrumentation itype)
822 {
823 struct lttng_session *session = chan->session;
824 struct lttng_event *event;
825 const char *event_name;
826 struct hlist_head *head;
827 int ret;
828
829 if (chan->free_event_id == -1U) {
830 ret = -EMFILE;
831 goto full;
832 }
833
834 switch (itype) {
835 case LTTNG_KERNEL_TRACEPOINT:
836 event_name = event_desc->name;
837 break;
838 case LTTNG_KERNEL_KPROBE:
839 case LTTNG_KERNEL_UPROBE:
840 case LTTNG_KERNEL_KRETPROBE:
841 case LTTNG_KERNEL_NOOP:
842 case LTTNG_KERNEL_SYSCALL:
843 event_name = event_param->name;
844 break;
845 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
846 default:
847 WARN_ON_ONCE(1);
848 ret = -EINVAL;
849 goto type_error;
850 }
851
852 head = utils_borrow_hash_table_bucket(session->events_ht.table,
853 LTTNG_EVENT_HT_SIZE, event_name);
854 lttng_hlist_for_each_entry(event, head, hlist) {
855 WARN_ON_ONCE(!event->desc);
856 if (!strncmp(event->desc->name, event_name,
857 LTTNG_KERNEL_SYM_NAME_LEN - 1)
858 && chan == event->chan) {
859 ret = -EEXIST;
860 goto exist;
861 }
862 }
863
864 event = kmem_cache_zalloc(event_cache, GFP_KERNEL);
865 if (!event) {
866 ret = -ENOMEM;
867 goto cache_error;
868 }
869 event->chan = chan;
870 event->filter = filter;
871 event->id = chan->free_event_id++;
872 event->instrumentation = itype;
873 event->evtype = LTTNG_TYPE_EVENT;
874 INIT_LIST_HEAD(&event->filter_bytecode_runtime_head);
875 INIT_LIST_HEAD(&event->enablers_ref_head);
876
877 switch (itype) {
878 case LTTNG_KERNEL_TRACEPOINT:
879 /* Event will be enabled by enabler sync. */
880 event->enabled = 0;
881 event->registered = 0;
882 event->desc = lttng_event_desc_get(event_name);
883 if (!event->desc) {
884 ret = -ENOENT;
885 goto register_error;
886 }
887 /* Populate lttng_event structure before event registration. */
888 smp_wmb();
889 break;
890 case LTTNG_KERNEL_KPROBE:
891 /*
892 * Needs to be explicitly enabled after creation, since
893 * we may want to apply filters.
894 */
895 event->enabled = 0;
896 event->registered = 1;
897 /*
898 * Populate lttng_event structure before event
899 * registration.
900 */
901 smp_wmb();
902 ret = lttng_kprobes_register_event(event_name,
903 event_param->u.kprobe.symbol_name,
904 event_param->u.kprobe.offset,
905 event_param->u.kprobe.addr,
906 event);
907 if (ret) {
908 ret = -EINVAL;
909 goto register_error;
910 }
911 ret = try_module_get(event->desc->owner);
912 WARN_ON_ONCE(!ret);
913 break;
914 case LTTNG_KERNEL_KRETPROBE:
915 {
916 struct lttng_event *event_return;
917
918 /* kretprobe defines 2 events */
919 /*
920 * Needs to be explicitly enabled after creation, since
921 * we may want to apply filters.
922 */
923 event->enabled = 0;
924 event->registered = 1;
925 event_return =
926 kmem_cache_zalloc(event_cache, GFP_KERNEL);
927 if (!event_return) {
928 ret = -ENOMEM;
929 goto register_error;
930 }
931 event_return->chan = chan;
932 event_return->filter = filter;
933 event_return->id = chan->free_event_id++;
934 event_return->enabled = 0;
935 event_return->registered = 1;
936 event_return->instrumentation = itype;
937 /*
938 * Populate lttng_event structure before kretprobe registration.
939 */
940 smp_wmb();
941 ret = lttng_kretprobes_register(event_name,
942 event_param->u.kretprobe.symbol_name,
943 event_param->u.kretprobe.offset,
944 event_param->u.kretprobe.addr,
945 event, event_return);
946 if (ret) {
947 kmem_cache_free(event_cache, event_return);
948 ret = -EINVAL;
949 goto register_error;
950 }
951 /* Take 2 refs on the module: one per event. */
952 ret = try_module_get(event->desc->owner);
953 WARN_ON_ONCE(!ret);
954 ret = try_module_get(event->desc->owner);
955 WARN_ON_ONCE(!ret);
956 ret = _lttng_event_metadata_statedump(chan->session, chan,
957 event_return);
958 WARN_ON_ONCE(ret > 0);
959 if (ret) {
960 kmem_cache_free(event_cache, event_return);
961 module_put(event->desc->owner);
962 module_put(event->desc->owner);
963 goto statedump_error;
964 }
965 list_add(&event_return->list, &chan->session->events);
966 break;
967 }
968 case LTTNG_KERNEL_NOOP:
969 case LTTNG_KERNEL_SYSCALL:
970 /*
971 * Needs to be explicitly enabled after creation, since
972 * we may want to apply filters.
973 */
974 event->enabled = 0;
975 event->registered = 0;
976 event->desc = event_desc;
977 switch (event_param->u.syscall.entryexit) {
978 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
979 ret = -EINVAL;
980 goto register_error;
981 case LTTNG_KERNEL_SYSCALL_ENTRY:
982 event->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
983 break;
984 case LTTNG_KERNEL_SYSCALL_EXIT:
985 event->u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
986 break;
987 }
988 switch (event_param->u.syscall.abi) {
989 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
990 ret = -EINVAL;
991 goto register_error;
992 case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
993 event->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
994 break;
995 case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
996 event->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
997 break;
998 }
999 if (!event->desc) {
1000 ret = -EINVAL;
1001 goto register_error;
1002 }
1003 break;
1004 case LTTNG_KERNEL_UPROBE:
1005 /*
1006 * Needs to be explicitly enabled after creation, since
1007 * we may want to apply filters.
1008 */
1009 event->enabled = 0;
1010 event->registered = 1;
1011
1012 /*
1013 * Populate lttng_event structure before event
1014 * registration.
1015 */
1016 smp_wmb();
1017
1018 ret = lttng_uprobes_register_event(event_param->name,
1019 event_param->u.uprobe.fd,
1020 event);
1021 if (ret)
1022 goto register_error;
1023 ret = try_module_get(event->desc->owner);
1024 WARN_ON_ONCE(!ret);
1025 break;
1026 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1027 default:
1028 WARN_ON_ONCE(1);
1029 ret = -EINVAL;
1030 goto register_error;
1031 }
1032 ret = _lttng_event_metadata_statedump(chan->session, chan, event);
1033 WARN_ON_ONCE(ret > 0);
1034 if (ret) {
1035 goto statedump_error;
1036 }
1037 hlist_add_head(&event->hlist, head);
1038 list_add(&event->list, &chan->session->events);
1039 return event;
1040
1041 statedump_error:
1042 /* If a statedump error occurs, events will not be readable. */
1043 register_error:
1044 kmem_cache_free(event_cache, event);
1045 cache_error:
1046 exist:
1047 type_error:
1048 full:
1049 return ERR_PTR(ret);
1050 }
1051
1052 struct lttng_trigger *_lttng_trigger_create(
1053 const struct lttng_event_desc *event_desc,
1054 uint64_t id, uint64_t error_counter_index,
1055 struct lttng_trigger_group *trigger_group,
1056 struct lttng_kernel_trigger *trigger_param, void *filter,
1057 enum lttng_kernel_instrumentation itype)
1058 {
1059 struct lttng_trigger *trigger;
1060 const char *event_name;
1061 struct hlist_head *head;
1062 int ret;
1063 size_t dimension_index[1];
1064
1065 switch (itype) {
1066 case LTTNG_KERNEL_TRACEPOINT:
1067 event_name = event_desc->name;
1068 break;
1069 case LTTNG_KERNEL_KPROBE:
1070 case LTTNG_KERNEL_UPROBE:
1071 case LTTNG_KERNEL_SYSCALL:
1072 event_name = trigger_param->name;
1073 break;
1074 case LTTNG_KERNEL_KRETPROBE:
1075 case LTTNG_KERNEL_FUNCTION:
1076 case LTTNG_KERNEL_NOOP:
1077 default:
1078 WARN_ON_ONCE(1);
1079 ret = -EINVAL;
1080 goto type_error;
1081 }
1082
1083 head = utils_borrow_hash_table_bucket(trigger_group->triggers_ht.table,
1084 LTTNG_TRIGGER_HT_SIZE, event_name);
1085 lttng_hlist_for_each_entry(trigger, head, hlist) {
1086 WARN_ON_ONCE(!trigger->desc);
1087 if (!strncmp(trigger->desc->name, event_name,
1088 LTTNG_KERNEL_SYM_NAME_LEN - 1)
1089 && trigger_group == trigger->group
1090 && id == trigger->id) {
1091 ret = -EEXIST;
1092 goto exist;
1093 }
1094 }
1095
1096 trigger = kmem_cache_zalloc(trigger_cache, GFP_KERNEL);
1097 if (!trigger) {
1098 ret = -ENOMEM;
1099 goto cache_error;
1100 }
1101 trigger->group = trigger_group;
1102 trigger->id = id;
1103 trigger->error_counter_index = error_counter_index;
1104 trigger->num_captures = 0;
1105 trigger->filter = filter;
1106 trigger->instrumentation = itype;
1107 trigger->evtype = LTTNG_TYPE_EVENT;
1108 trigger->send_notification = lttng_trigger_notification_send;
1109 INIT_LIST_HEAD(&trigger->filter_bytecode_runtime_head);
1110 INIT_LIST_HEAD(&trigger->capture_bytecode_runtime_head);
1111 INIT_LIST_HEAD(&trigger->enablers_ref_head);
1112
1113 switch (itype) {
1114 case LTTNG_KERNEL_TRACEPOINT:
1115 /* Event will be enabled by enabler sync. */
1116 trigger->enabled = 0;
1117 trigger->registered = 0;
1118 trigger->desc = lttng_event_desc_get(event_name);
1119 if (!trigger->desc) {
1120 ret = -ENOENT;
1121 goto register_error;
1122 }
1123 /* Populate lttng_trigger structure before event registration. */
1124 smp_wmb();
1125 break;
1126 case LTTNG_KERNEL_KPROBE:
1127 /*
1128 * Needs to be explicitly enabled after creation, since
1129 * we may want to apply filters.
1130 */
1131 trigger->enabled = 0;
1132 trigger->registered = 1;
1133 /*
1134 * Populate lttng_trigger structure before event
1135 * registration.
1136 */
1137 smp_wmb();
1138 ret = lttng_kprobes_register_trigger(
1139 trigger_param->u.kprobe.symbol_name,
1140 trigger_param->u.kprobe.offset,
1141 trigger_param->u.kprobe.addr,
1142 trigger);
1143 if (ret) {
1144 ret = -EINVAL;
1145 goto register_error;
1146 }
1147 ret = try_module_get(trigger->desc->owner);
1148 WARN_ON_ONCE(!ret);
1149 break;
1150 case LTTNG_KERNEL_NOOP:
1151 case LTTNG_KERNEL_SYSCALL:
1152 /*
1153 * Needs to be explicitly enabled after creation, since
1154 * we may want to apply filters.
1155 */
1156 trigger->enabled = 0;
1157 trigger->registered = 0;
1158 trigger->desc = event_desc;
1159 if (!trigger->desc) {
1160 ret = -EINVAL;
1161 goto register_error;
1162 }
1163 break;
1164 case LTTNG_KERNEL_UPROBE:
1165 /*
1166 * Needs to be explicitly enabled after creation, since
1167 * we may want to apply filters.
1168 */
1169 trigger->enabled = 0;
1170 trigger->registered = 1;
1171
1172 /*
1173 * Populate lttng_trigger structure before trigger
1174 * registration.
1175 */
1176 smp_wmb();
1177
1178 ret = lttng_uprobes_register_trigger(trigger_param->name,
1179 trigger_param->u.uprobe.fd,
1180 trigger);
1181 if (ret)
1182 goto register_error;
1183 ret = try_module_get(trigger->desc->owner);
1184 WARN_ON_ONCE(!ret);
1185 break;
1186 case LTTNG_KERNEL_KRETPROBE:
1187 case LTTNG_KERNEL_FUNCTION:
1188 default:
1189 WARN_ON_ONCE(1);
1190 ret = -EINVAL;
1191 goto register_error;
1192 }
1193
1194 list_add(&trigger->list, &trigger_group->triggers_head);
1195 hlist_add_head(&trigger->hlist, head);
1196
1197 /*
1198 * Clear the error counter bucket. The sessiond keeps track of which
1199 * bucket is currently in use. We trust it.
1200 */
1201 if (trigger_group->error_counter) {
1202 /*
1203 * Check that the index is within the boundary of the counter.
1204 */
1205 if (trigger->error_counter_index >= trigger_group->error_counter_len) {
1206 printk(KERN_INFO "LTTng: Trigger: Error counter index out-of-bound: counter-len=%zu, index=%llu\n",
1207 trigger_group->error_counter_len, trigger->error_counter_index);
1208 ret = -EINVAL;
1209 goto register_error;
1210 }
1211
1212 dimension_index[0] = trigger->error_counter_index;
1213 ret = trigger_group->error_counter->ops->counter_clear(
1214 trigger_group->error_counter->counter,
1215 dimension_index);
1216 if (ret) {
1217 printk(KERN_INFO "LTTng: Trigger: Unable to clear error counter bucket %llu\n",
1218 trigger->error_counter_index);
1219 goto register_error;
1220 }
1221 }
1222
1223 return trigger;
1224
1225 register_error:
1226 kmem_cache_free(trigger_cache, trigger);
1227 cache_error:
1228 exist:
1229 type_error:
1230 return ERR_PTR(ret);
1231 }
1232
1233 int lttng_kernel_counter_value(struct lttng_counter *counter,
1234 const size_t *dim_indexes, int64_t *val)
1235 {
1236 int ret;
1237 bool overflow, underflow;
1238
1239 ret = counter->ops->counter_aggregate(counter->counter, dim_indexes,
1240 val, &overflow, &underflow);
1241 if (ret) {
1242 printk(KERN_WARNING "LTTng: Error getting counter value.\n");
1243 goto error;
1244 }
1245
1246 if (overflow)
1247 printk(KERN_WARNING "LTTng: counter overflow detected.\n");
1248
1249 if (underflow)
1250 printk(KERN_WARNING "LTTng: counter underflow detected.\n");
1251
1252 error:
1253 return ret;
1254 }
1255
1256 struct lttng_event *lttng_event_create(struct lttng_channel *chan,
1257 struct lttng_kernel_event *event_param,
1258 void *filter,
1259 const struct lttng_event_desc *event_desc,
1260 enum lttng_kernel_instrumentation itype)
1261 {
1262 struct lttng_event *event;
1263
1264 mutex_lock(&sessions_mutex);
1265 event = _lttng_event_create(chan, event_param, filter, event_desc,
1266 itype);
1267 mutex_unlock(&sessions_mutex);
1268 return event;
1269 }
1270
1271 struct lttng_trigger *lttng_trigger_create(
1272 const struct lttng_event_desc *event_desc,
1273 uint64_t id, uint64_t error_counter_index,
1274 struct lttng_trigger_group *trigger_group,
1275 struct lttng_kernel_trigger *trigger_param, void *filter,
1276 enum lttng_kernel_instrumentation itype)
1277 {
1278 struct lttng_trigger *trigger;
1279
1280 mutex_lock(&sessions_mutex);
1281 trigger = _lttng_trigger_create(event_desc, id, error_counter_index,
1282 trigger_group, trigger_param, filter, itype);
1283 mutex_unlock(&sessions_mutex);
1284 return trigger;
1285 }
1286
1287 /* Only used for tracepoints for now. */
1288 static
1289 void register_event(struct lttng_event *event)
1290 {
1291 const struct lttng_event_desc *desc;
1292 int ret = -EINVAL;
1293
1294 if (event->registered)
1295 return;
1296
1297 desc = event->desc;
1298 switch (event->instrumentation) {
1299 case LTTNG_KERNEL_TRACEPOINT:
1300 ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
1301 desc->probe_callback,
1302 event);
1303 break;
1304 case LTTNG_KERNEL_SYSCALL:
1305 ret = lttng_syscall_filter_enable_event(event->chan, event);
1306 break;
1307 case LTTNG_KERNEL_KPROBE:
1308 case LTTNG_KERNEL_UPROBE:
1309 case LTTNG_KERNEL_KRETPROBE:
1310 case LTTNG_KERNEL_NOOP:
1311 ret = 0;
1312 break;
1313 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1314 default:
1315 WARN_ON_ONCE(1);
1316 }
1317 if (!ret)
1318 event->registered = 1;
1319 }
1320
1321 /*
1322 * Only used internally at session destruction.
1323 */
1324 int _lttng_event_unregister(struct lttng_event *event)
1325 {
1326 const struct lttng_event_desc *desc;
1327 int ret = -EINVAL;
1328
1329 if (!event->registered)
1330 return 0;
1331
1332 desc = event->desc;
1333 switch (event->instrumentation) {
1334 case LTTNG_KERNEL_TRACEPOINT:
1335 ret = lttng_wrapper_tracepoint_probe_unregister(event->desc->kname,
1336 event->desc->probe_callback,
1337 event);
1338 break;
1339 case LTTNG_KERNEL_KPROBE:
1340 lttng_kprobes_unregister_event(event);
1341 ret = 0;
1342 break;
1343 case LTTNG_KERNEL_KRETPROBE:
1344 lttng_kretprobes_unregister(event);
1345 ret = 0;
1346 break;
1347 case LTTNG_KERNEL_SYSCALL:
1348 ret = lttng_syscall_filter_disable_event(event->chan, event);
1349 break;
1350 case LTTNG_KERNEL_NOOP:
1351 ret = 0;
1352 break;
1353 case LTTNG_KERNEL_UPROBE:
1354 lttng_uprobes_unregister_event(event);
1355 ret = 0;
1356 break;
1357 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1358 default:
1359 WARN_ON_ONCE(1);
1360 }
1361 if (!ret)
1362 event->registered = 0;
1363 return ret;
1364 }
1365
1366 /* Only used for tracepoints for now. */
1367 static
1368 void register_trigger(struct lttng_trigger *trigger)
1369 {
1370 const struct lttng_event_desc *desc;
1371 int ret = -EINVAL;
1372
1373 if (trigger->registered)
1374 return;
1375
1376 desc = trigger->desc;
1377 switch (trigger->instrumentation) {
1378 case LTTNG_KERNEL_TRACEPOINT:
1379 ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
1380 desc->trigger_callback,
1381 trigger);
1382 break;
1383 case LTTNG_KERNEL_SYSCALL:
1384 ret = lttng_syscall_filter_enable_trigger(trigger);
1385 break;
1386 case LTTNG_KERNEL_KPROBE:
1387 case LTTNG_KERNEL_UPROBE:
1388 ret = 0;
1389 break;
1390 case LTTNG_KERNEL_KRETPROBE:
1391 case LTTNG_KERNEL_FUNCTION:
1392 case LTTNG_KERNEL_NOOP:
1393 default:
1394 WARN_ON_ONCE(1);
1395 }
1396 if (!ret)
1397 trigger->registered = 1;
1398 }
1399
1400 static
1401 int _lttng_trigger_unregister(struct lttng_trigger *trigger)
1402 {
1403 const struct lttng_event_desc *desc;
1404 int ret = -EINVAL;
1405
1406 if (!trigger->registered)
1407 return 0;
1408
1409 desc = trigger->desc;
1410 switch (trigger->instrumentation) {
1411 case LTTNG_KERNEL_TRACEPOINT:
1412 ret = lttng_wrapper_tracepoint_probe_unregister(trigger->desc->kname,
1413 trigger->desc->trigger_callback,
1414 trigger);
1415 break;
1416 case LTTNG_KERNEL_KPROBE:
1417 lttng_kprobes_unregister_trigger(trigger);
1418 ret = 0;
1419 break;
1420 case LTTNG_KERNEL_UPROBE:
1421 lttng_uprobes_unregister_trigger(trigger);
1422 ret = 0;
1423 break;
1424 case LTTNG_KERNEL_SYSCALL:
1425 ret = lttng_syscall_filter_disable_trigger(trigger);
1426 break;
1427 case LTTNG_KERNEL_KRETPROBE:
1428 case LTTNG_KERNEL_FUNCTION:
1429 case LTTNG_KERNEL_NOOP:
1430 default:
1431 WARN_ON_ONCE(1);
1432 }
1433 if (!ret)
1434 trigger->registered = 0;
1435 return ret;
1436 }
1437
1438 /*
1439 * Only used internally at session destruction.
1440 */
1441 static
1442 void _lttng_event_destroy(struct lttng_event *event)
1443 {
1444 switch (event->instrumentation) {
1445 case LTTNG_KERNEL_TRACEPOINT:
1446 lttng_event_desc_put(event->desc);
1447 break;
1448 case LTTNG_KERNEL_KPROBE:
1449 module_put(event->desc->owner);
1450 lttng_kprobes_destroy_event_private(event);
1451 break;
1452 case LTTNG_KERNEL_KRETPROBE:
1453 module_put(event->desc->owner);
1454 lttng_kretprobes_destroy_private(event);
1455 break;
1456 case LTTNG_KERNEL_NOOP:
1457 case LTTNG_KERNEL_SYSCALL:
1458 break;
1459 case LTTNG_KERNEL_UPROBE:
1460 module_put(event->desc->owner);
1461 lttng_uprobes_destroy_event_private(event);
1462 break;
1463 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1464 default:
1465 WARN_ON_ONCE(1);
1466 }
1467 list_del(&event->list);
1468 lttng_destroy_context(event->ctx);
1469 kmem_cache_free(event_cache, event);
1470 }
1471
1472 /*
1473 * Only used internally at session destruction.
1474 */
1475 static
1476 void _lttng_trigger_destroy(struct lttng_trigger *trigger)
1477 {
1478 switch (trigger->instrumentation) {
1479 case LTTNG_KERNEL_TRACEPOINT:
1480 lttng_event_desc_put(trigger->desc);
1481 break;
1482 case LTTNG_KERNEL_KPROBE:
1483 module_put(trigger->desc->owner);
1484 lttng_kprobes_destroy_trigger_private(trigger);
1485 break;
1486 case LTTNG_KERNEL_NOOP:
1487 case LTTNG_KERNEL_SYSCALL:
1488 break;
1489 case LTTNG_KERNEL_UPROBE:
1490 module_put(trigger->desc->owner);
1491 lttng_uprobes_destroy_trigger_private(trigger);
1492 break;
1493 case LTTNG_KERNEL_KRETPROBE:
1494 case LTTNG_KERNEL_FUNCTION:
1495 default:
1496 WARN_ON_ONCE(1);
1497 }
1498 list_del(&trigger->list);
1499 kmem_cache_free(trigger_cache, trigger);
1500 }
1501
1502 struct lttng_id_tracker *get_tracker(struct lttng_session *session,
1503 enum tracker_type tracker_type)
1504 {
1505 switch (tracker_type) {
1506 case TRACKER_PID:
1507 return &session->pid_tracker;
1508 case TRACKER_VPID:
1509 return &session->vpid_tracker;
1510 case TRACKER_UID:
1511 return &session->uid_tracker;
1512 case TRACKER_VUID:
1513 return &session->vuid_tracker;
1514 case TRACKER_GID:
1515 return &session->gid_tracker;
1516 case TRACKER_VGID:
1517 return &session->vgid_tracker;
1518 default:
1519 WARN_ON_ONCE(1);
1520 return NULL;
1521 }
1522 }
1523
1524 int lttng_session_track_id(struct lttng_session *session,
1525 enum tracker_type tracker_type, int id)
1526 {
1527 struct lttng_id_tracker *tracker;
1528 int ret;
1529
1530 tracker = get_tracker(session, tracker_type);
1531 if (!tracker)
1532 return -EINVAL;
1533 if (id < -1)
1534 return -EINVAL;
1535 mutex_lock(&sessions_mutex);
1536 if (id == -1) {
1537 /* track all ids: destroy tracker. */
1538 lttng_id_tracker_destroy(tracker, true);
1539 ret = 0;
1540 } else {
1541 ret = lttng_id_tracker_add(tracker, id);
1542 }
1543 mutex_unlock(&sessions_mutex);
1544 return ret;
1545 }
1546
1547 int lttng_session_untrack_id(struct lttng_session *session,
1548 enum tracker_type tracker_type, int id)
1549 {
1550 struct lttng_id_tracker *tracker;
1551 int ret;
1552
1553 tracker = get_tracker(session, tracker_type);
1554 if (!tracker)
1555 return -EINVAL;
1556 if (id < -1)
1557 return -EINVAL;
1558 mutex_lock(&sessions_mutex);
1559 if (id == -1) {
1560 /* untrack all ids: replace by empty tracker. */
1561 ret = lttng_id_tracker_empty_set(tracker);
1562 } else {
1563 ret = lttng_id_tracker_del(tracker, id);
1564 }
1565 mutex_unlock(&sessions_mutex);
1566 return ret;
1567 }
1568
1569 static
1570 void *id_list_start(struct seq_file *m, loff_t *pos)
1571 {
1572 struct lttng_id_tracker *id_tracker = m->private;
1573 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1574 struct lttng_id_hash_node *e;
1575 int iter = 0, i;
1576
1577 mutex_lock(&sessions_mutex);
1578 if (id_tracker_p) {
1579 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1580 struct hlist_head *head = &id_tracker_p->id_hash[i];
1581
1582 lttng_hlist_for_each_entry(e, head, hlist) {
1583 if (iter++ >= *pos)
1584 return e;
1585 }
1586 }
1587 } else {
1588 /* ID tracker disabled. */
1589 if (iter >= *pos && iter == 0) {
1590 return id_tracker_p; /* empty tracker */
1591 }
1592 iter++;
1593 }
1594 /* End of list */
1595 return NULL;
1596 }
1597
1598 /* Called with sessions_mutex held. */
1599 static
1600 void *id_list_next(struct seq_file *m, void *p, loff_t *ppos)
1601 {
1602 struct lttng_id_tracker *id_tracker = m->private;
1603 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1604 struct lttng_id_hash_node *e;
1605 int iter = 0, i;
1606
1607 (*ppos)++;
1608 if (id_tracker_p) {
1609 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1610 struct hlist_head *head = &id_tracker_p->id_hash[i];
1611
1612 lttng_hlist_for_each_entry(e, head, hlist) {
1613 if (iter++ >= *ppos)
1614 return e;
1615 }
1616 }
1617 } else {
1618 /* ID tracker disabled. */
1619 if (iter >= *ppos && iter == 0)
1620 return p; /* empty tracker */
1621 iter++;
1622 }
1623
1624 /* End of list */
1625 return NULL;
1626 }
1627
1628 static
1629 void id_list_stop(struct seq_file *m, void *p)
1630 {
1631 mutex_unlock(&sessions_mutex);
1632 }
1633
1634 static
1635 int id_list_show(struct seq_file *m, void *p)
1636 {
1637 struct lttng_id_tracker *id_tracker = m->private;
1638 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1639 int id;
1640
1641 if (p == id_tracker_p) {
1642 /* Tracker disabled. */
1643 id = -1;
1644 } else {
1645 const struct lttng_id_hash_node *e = p;
1646
1647 id = lttng_id_tracker_get_node_id(e);
1648 }
1649 switch (id_tracker->tracker_type) {
1650 case TRACKER_PID:
1651 seq_printf(m, "process { pid = %d; };\n", id);
1652 break;
1653 case TRACKER_VPID:
1654 seq_printf(m, "process { vpid = %d; };\n", id);
1655 break;
1656 case TRACKER_UID:
1657 seq_printf(m, "user { uid = %d; };\n", id);
1658 break;
1659 case TRACKER_VUID:
1660 seq_printf(m, "user { vuid = %d; };\n", id);
1661 break;
1662 case TRACKER_GID:
1663 seq_printf(m, "group { gid = %d; };\n", id);
1664 break;
1665 case TRACKER_VGID:
1666 seq_printf(m, "group { vgid = %d; };\n", id);
1667 break;
1668 default:
1669 seq_printf(m, "UNKNOWN { field = %d };\n", id);
1670 }
1671 return 0;
1672 }
1673
1674 static
1675 const struct seq_operations lttng_tracker_ids_list_seq_ops = {
1676 .start = id_list_start,
1677 .next = id_list_next,
1678 .stop = id_list_stop,
1679 .show = id_list_show,
1680 };
1681
1682 static
1683 int lttng_tracker_ids_list_open(struct inode *inode, struct file *file)
1684 {
1685 return seq_open(file, &lttng_tracker_ids_list_seq_ops);
1686 }
1687
1688 static
1689 int lttng_tracker_ids_list_release(struct inode *inode, struct file *file)
1690 {
1691 struct seq_file *m = file->private_data;
1692 struct lttng_id_tracker *id_tracker = m->private;
1693 int ret;
1694
1695 WARN_ON_ONCE(!id_tracker);
1696 ret = seq_release(inode, file);
1697 if (!ret)
1698 fput(id_tracker->session->file);
1699 return ret;
1700 }
1701
1702 const struct file_operations lttng_tracker_ids_list_fops = {
1703 .owner = THIS_MODULE,
1704 .open = lttng_tracker_ids_list_open,
1705 .read = seq_read,
1706 .llseek = seq_lseek,
1707 .release = lttng_tracker_ids_list_release,
1708 };
1709
1710 int lttng_session_list_tracker_ids(struct lttng_session *session,
1711 enum tracker_type tracker_type)
1712 {
1713 struct file *tracker_ids_list_file;
1714 struct seq_file *m;
1715 int file_fd, ret;
1716
1717 file_fd = lttng_get_unused_fd();
1718 if (file_fd < 0) {
1719 ret = file_fd;
1720 goto fd_error;
1721 }
1722
1723 tracker_ids_list_file = anon_inode_getfile("[lttng_tracker_ids_list]",
1724 &lttng_tracker_ids_list_fops,
1725 NULL, O_RDWR);
1726 if (IS_ERR(tracker_ids_list_file)) {
1727 ret = PTR_ERR(tracker_ids_list_file);
1728 goto file_error;
1729 }
1730 if (!atomic_long_add_unless(&session->file->f_count, 1, LONG_MAX)) {
1731 ret = -EOVERFLOW;
1732 goto refcount_error;
1733 }
1734 ret = lttng_tracker_ids_list_fops.open(NULL, tracker_ids_list_file);
1735 if (ret < 0)
1736 goto open_error;
1737 m = tracker_ids_list_file->private_data;
1738
1739 m->private = get_tracker(session, tracker_type);
1740 BUG_ON(!m->private);
1741 fd_install(file_fd, tracker_ids_list_file);
1742
1743 return file_fd;
1744
1745 open_error:
1746 atomic_long_dec(&session->file->f_count);
1747 refcount_error:
1748 fput(tracker_ids_list_file);
1749 file_error:
1750 put_unused_fd(file_fd);
1751 fd_error:
1752 return ret;
1753 }
1754
1755 /*
1756 * Enabler management.
1757 */
1758 static
1759 int lttng_match_enabler_star_glob(const char *desc_name,
1760 const char *pattern)
1761 {
1762 if (!strutils_star_glob_match(pattern, LTTNG_SIZE_MAX,
1763 desc_name, LTTNG_SIZE_MAX))
1764 return 0;
1765 return 1;
1766 }
1767
1768 static
1769 int lttng_match_enabler_name(const char *desc_name,
1770 const char *name)
1771 {
1772 if (strcmp(desc_name, name))
1773 return 0;
1774 return 1;
1775 }
1776
1777 int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
1778 struct lttng_enabler *enabler)
1779 {
1780 const char *desc_name, *enabler_name;
1781 bool compat = false, entry = false;
1782
1783 enabler_name = enabler->event_param.name;
1784 switch (enabler->event_param.instrumentation) {
1785 case LTTNG_KERNEL_TRACEPOINT:
1786 desc_name = desc->name;
1787 switch (enabler->format_type) {
1788 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1789 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1790 case LTTNG_ENABLER_FORMAT_NAME:
1791 return lttng_match_enabler_name(desc_name, enabler_name);
1792 default:
1793 return -EINVAL;
1794 }
1795 break;
1796 case LTTNG_KERNEL_SYSCALL:
1797 desc_name = desc->name;
1798 if (!strncmp(desc_name, "compat_", strlen("compat_"))) {
1799 desc_name += strlen("compat_");
1800 compat = true;
1801 }
1802 if (!strncmp(desc_name, "syscall_exit_",
1803 strlen("syscall_exit_"))) {
1804 desc_name += strlen("syscall_exit_");
1805 } else if (!strncmp(desc_name, "syscall_entry_",
1806 strlen("syscall_entry_"))) {
1807 desc_name += strlen("syscall_entry_");
1808 entry = true;
1809 } else {
1810 WARN_ON_ONCE(1);
1811 return -EINVAL;
1812 }
1813 switch (enabler->event_param.u.syscall.entryexit) {
1814 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
1815 break;
1816 case LTTNG_KERNEL_SYSCALL_ENTRY:
1817 if (!entry)
1818 return 0;
1819 break;
1820 case LTTNG_KERNEL_SYSCALL_EXIT:
1821 if (entry)
1822 return 0;
1823 break;
1824 default:
1825 return -EINVAL;
1826 }
1827 switch (enabler->event_param.u.syscall.abi) {
1828 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
1829 break;
1830 case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
1831 if (compat)
1832 return 0;
1833 break;
1834 case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
1835 if (!compat)
1836 return 0;
1837 break;
1838 default:
1839 return -EINVAL;
1840 }
1841 switch (enabler->event_param.u.syscall.match) {
1842 case LTTNG_SYSCALL_MATCH_NAME:
1843 switch (enabler->format_type) {
1844 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1845 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1846 case LTTNG_ENABLER_FORMAT_NAME:
1847 return lttng_match_enabler_name(desc_name, enabler_name);
1848 default:
1849 return -EINVAL;
1850 }
1851 break;
1852 case LTTNG_SYSCALL_MATCH_NR:
1853 return -EINVAL; /* Not implemented. */
1854 default:
1855 return -EINVAL;
1856 }
1857 break;
1858 default:
1859 WARN_ON_ONCE(1);
1860 return -EINVAL;
1861 }
1862 }
1863
1864 static
1865 int lttng_event_enabler_match_event(struct lttng_event_enabler *event_enabler,
1866 struct lttng_event *event)
1867 {
1868 struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(
1869 event_enabler);
1870
1871 if (base_enabler->event_param.instrumentation != event->instrumentation)
1872 return 0;
1873 if (lttng_desc_match_enabler(event->desc, base_enabler)
1874 && event->chan == event_enabler->chan)
1875 return 1;
1876 else
1877 return 0;
1878 }
1879
1880 static
1881 int lttng_trigger_enabler_match_trigger(struct lttng_trigger_enabler *trigger_enabler,
1882 struct lttng_trigger *trigger)
1883 {
1884 struct lttng_enabler *base_enabler = lttng_trigger_enabler_as_enabler(
1885 trigger_enabler);
1886
1887 if (base_enabler->event_param.instrumentation != trigger->instrumentation)
1888 return 0;
1889 if (lttng_desc_match_enabler(trigger->desc, base_enabler)
1890 && trigger->group == trigger_enabler->group
1891 && trigger->id == trigger_enabler->id)
1892 return 1;
1893 else
1894 return 0;
1895 }
1896
1897 static
1898 struct lttng_enabler_ref *lttng_enabler_ref(
1899 struct list_head *enablers_ref_list,
1900 struct lttng_enabler *enabler)
1901 {
1902 struct lttng_enabler_ref *enabler_ref;
1903
1904 list_for_each_entry(enabler_ref, enablers_ref_list, node) {
1905 if (enabler_ref->ref == enabler)
1906 return enabler_ref;
1907 }
1908 return NULL;
1909 }
1910
1911 static
1912 void lttng_create_tracepoint_event_if_missing(struct lttng_event_enabler *event_enabler)
1913 {
1914 struct lttng_session *session = event_enabler->chan->session;
1915 struct lttng_probe_desc *probe_desc;
1916 const struct lttng_event_desc *desc;
1917 int i;
1918 struct list_head *probe_list;
1919
1920 probe_list = lttng_get_probe_list_head();
1921 /*
1922 * For each probe event, if we find that a probe event matches
1923 * our enabler, create an associated lttng_event if not
1924 * already present.
1925 */
1926 list_for_each_entry(probe_desc, probe_list, head) {
1927 for (i = 0; i < probe_desc->nr_events; i++) {
1928 int found = 0;
1929 struct hlist_head *head;
1930 struct lttng_event *event;
1931
1932 desc = probe_desc->event_desc[i];
1933 if (!lttng_desc_match_enabler(desc,
1934 lttng_event_enabler_as_enabler(event_enabler)))
1935 continue;
1936
1937 /*
1938 * Check if already created.
1939 */
1940 head = utils_borrow_hash_table_bucket(
1941 session->events_ht.table, LTTNG_EVENT_HT_SIZE,
1942 desc->name);
1943 lttng_hlist_for_each_entry(event, head, hlist) {
1944 if (event->desc == desc
1945 && event->chan == event_enabler->chan)
1946 found = 1;
1947 }
1948 if (found)
1949 continue;
1950
1951 /*
1952 * We need to create an event for this
1953 * event probe.
1954 */
1955 event = _lttng_event_create(event_enabler->chan,
1956 NULL, NULL, desc,
1957 LTTNG_KERNEL_TRACEPOINT);
1958 if (!event) {
1959 printk(KERN_INFO "LTTng: Unable to create event %s\n",
1960 probe_desc->event_desc[i]->name);
1961 }
1962 }
1963 }
1964 }
1965
1966 static
1967 void lttng_create_tracepoint_trigger_if_missing(struct lttng_trigger_enabler *trigger_enabler)
1968 {
1969 struct lttng_trigger_group *trigger_group = trigger_enabler->group;
1970 struct lttng_probe_desc *probe_desc;
1971 const struct lttng_event_desc *desc;
1972 int i;
1973 struct list_head *probe_list;
1974
1975 probe_list = lttng_get_probe_list_head();
1976 /*
1977 * For each probe event, if we find that a probe event matches
1978 * our enabler, create an associated lttng_trigger if not
1979 * already present.
1980 */
1981 list_for_each_entry(probe_desc, probe_list, head) {
1982 for (i = 0; i < probe_desc->nr_events; i++) {
1983 int found = 0;
1984 struct hlist_head *head;
1985 struct lttng_trigger *trigger;
1986
1987 desc = probe_desc->event_desc[i];
1988 if (!lttng_desc_match_enabler(desc,
1989 lttng_trigger_enabler_as_enabler(trigger_enabler)))
1990 continue;
1991
1992 /*
1993 * Check if already created.
1994 */
1995 head = utils_borrow_hash_table_bucket(
1996 trigger_group->triggers_ht.table,
1997 LTTNG_TRIGGER_HT_SIZE, desc->name);
1998 lttng_hlist_for_each_entry(trigger, head, hlist) {
1999 if (trigger->desc == desc
2000 && trigger->id == trigger_enabler->id)
2001 found = 1;
2002 }
2003 if (found)
2004 continue;
2005
2006 /*
2007 * We need to create a trigger for this event probe.
2008 */
2009 trigger = _lttng_trigger_create(desc,
2010 trigger_enabler->id,
2011 trigger_enabler->error_counter_index,
2012 trigger_group, NULL, NULL,
2013 LTTNG_KERNEL_TRACEPOINT);
2014 if (IS_ERR(trigger)) {
2015 printk(KERN_INFO "Unable to create trigger %s\n",
2016 probe_desc->event_desc[i]->name);
2017 }
2018 }
2019 }
2020 }
2021
2022 static
2023 void lttng_create_syscall_event_if_missing(struct lttng_event_enabler *event_enabler)
2024 {
2025 int ret;
2026
2027 ret = lttng_syscalls_register_event(event_enabler->chan, NULL);
2028 WARN_ON_ONCE(ret);
2029 }
2030
2031 static
2032 void lttng_create_syscall_trigger_if_missing(struct lttng_trigger_enabler *trigger_enabler)
2033 {
2034 int ret;
2035
2036 ret = lttng_syscalls_register_trigger(trigger_enabler, NULL);
2037 WARN_ON_ONCE(ret);
2038 ret = lttng_syscals_create_matching_triggers(trigger_enabler, NULL);
2039 WARN_ON_ONCE(ret);
2040 }
2041
2042 /*
2043 * Create struct lttng_event if it is missing and present in the list of
2044 * tracepoint probes.
2045 * Should be called with sessions mutex held.
2046 */
2047 static
2048 void lttng_create_event_if_missing(struct lttng_event_enabler *event_enabler)
2049 {
2050 switch (event_enabler->base.event_param.instrumentation) {
2051 case LTTNG_KERNEL_TRACEPOINT:
2052 lttng_create_tracepoint_event_if_missing(event_enabler);
2053 break;
2054 case LTTNG_KERNEL_SYSCALL:
2055 lttng_create_syscall_event_if_missing(event_enabler);
2056 break;
2057 default:
2058 WARN_ON_ONCE(1);
2059 break;
2060 }
2061 }
2062
2063 /*
2064 * Create events associated with an event_enabler (if not already present),
2065 * and add backward reference from the event to the enabler.
2066 * Should be called with sessions mutex held.
2067 */
2068 static
2069 int lttng_event_enabler_ref_events(struct lttng_event_enabler *event_enabler)
2070 {
2071 struct lttng_channel *chan = event_enabler->chan;
2072 struct lttng_session *session = event_enabler->chan->session;
2073 struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(event_enabler);
2074 struct lttng_event *event;
2075
2076 if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_SYSCALL &&
2077 base_enabler->event_param.u.syscall.entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT &&
2078 base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_SYSCALL_ABI_ALL &&
2079 base_enabler->event_param.u.syscall.match == LTTNG_SYSCALL_MATCH_NAME &&
2080 !strcmp(base_enabler->event_param.name, "*")) {
2081 if (base_enabler->enabled)
2082 WRITE_ONCE(chan->syscall_all, 1);
2083 else
2084 WRITE_ONCE(chan->syscall_all, 0);
2085 }
2086
2087 /* First ensure that probe events are created for this enabler. */
2088 lttng_create_event_if_missing(event_enabler);
2089
2090 /* For each event matching event_enabler in session event list. */
2091 list_for_each_entry(event, &session->events, list) {
2092 struct lttng_enabler_ref *enabler_ref;
2093
2094 if (!lttng_event_enabler_match_event(event_enabler, event))
2095 continue;
2096 enabler_ref = lttng_enabler_ref(&event->enablers_ref_head,
2097 lttng_event_enabler_as_enabler(event_enabler));
2098 if (!enabler_ref) {
2099 /*
2100 * If no backward ref, create it.
2101 * Add backward ref from event to event_enabler.
2102 */
2103 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2104 if (!enabler_ref)
2105 return -ENOMEM;
2106 enabler_ref->ref = lttng_event_enabler_as_enabler(event_enabler);
2107 list_add(&enabler_ref->node,
2108 &event->enablers_ref_head);
2109 }
2110
2111 /*
2112 * Link filter bytecodes if not linked yet.
2113 */
2114 lttng_enabler_link_bytecode(event->desc,
2115 lttng_static_ctx,
2116 &event->filter_bytecode_runtime_head,
2117 &lttng_event_enabler_as_enabler(event_enabler)->filter_bytecode_head);
2118
2119 /* TODO: merge event context. */
2120 }
2121 return 0;
2122 }
2123
2124 /*
2125 * Create struct lttng_trigger if it is missing and present in the list of
2126 * tracepoint probes.
2127 * Should be called with sessions mutex held.
2128 */
2129 static
2130 void lttng_create_trigger_if_missing(struct lttng_trigger_enabler *trigger_enabler)
2131 {
2132 switch (trigger_enabler->base.event_param.instrumentation) {
2133 case LTTNG_KERNEL_TRACEPOINT:
2134 lttng_create_tracepoint_trigger_if_missing(trigger_enabler);
2135 break;
2136 case LTTNG_KERNEL_SYSCALL:
2137 lttng_create_syscall_trigger_if_missing(trigger_enabler);
2138 break;
2139 default:
2140 WARN_ON_ONCE(1);
2141 break;
2142 }
2143 }
2144
2145 /*
2146 * Create triggers associated with a trigger enabler (if not already present).
2147 */
2148 static
2149 int lttng_trigger_enabler_ref_triggers(struct lttng_trigger_enabler *trigger_enabler)
2150 {
2151 struct lttng_trigger_group *trigger_group = trigger_enabler->group;
2152 struct lttng_trigger *trigger;
2153
2154 /* First ensure that probe triggers are created for this enabler. */
2155 lttng_create_trigger_if_missing(trigger_enabler);
2156
2157 /* Link the created trigger with its associated enabler. */
2158 list_for_each_entry(trigger, &trigger_group->triggers_head, list) {
2159 struct lttng_enabler_ref *enabler_ref;
2160
2161 if (!lttng_trigger_enabler_match_trigger(trigger_enabler, trigger))
2162 continue;
2163
2164 enabler_ref = lttng_enabler_ref(&trigger->enablers_ref_head,
2165 lttng_trigger_enabler_as_enabler(trigger_enabler));
2166 if (!enabler_ref) {
2167 /*
2168 * If no backward ref, create it.
2169 * Add backward ref from trigger to enabler.
2170 */
2171 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2172 if (!enabler_ref)
2173 return -ENOMEM;
2174
2175 enabler_ref->ref = lttng_trigger_enabler_as_enabler(
2176 trigger_enabler);
2177 list_add(&enabler_ref->node,
2178 &trigger->enablers_ref_head);
2179 }
2180
2181 /*
2182 * Link filter bytecodes if not linked yet.
2183 */
2184 lttng_enabler_link_bytecode(trigger->desc,
2185 lttng_static_ctx, &trigger->filter_bytecode_runtime_head,
2186 &lttng_trigger_enabler_as_enabler(trigger_enabler)->filter_bytecode_head);
2187
2188 /* Link capture bytecodes if not linked yet. */
2189 lttng_enabler_link_bytecode(trigger->desc,
2190 lttng_static_ctx, &trigger->capture_bytecode_runtime_head,
2191 &trigger_enabler->capture_bytecode_head);
2192
2193 trigger->num_captures = trigger_enabler->num_captures;
2194 }
2195 return 0;
2196 }
2197
2198 /*
2199 * Called at module load: connect the probe on all enablers matching
2200 * this event.
2201 * Called with sessions lock held.
2202 */
2203 int lttng_fix_pending_events(void)
2204 {
2205 struct lttng_session *session;
2206
2207 list_for_each_entry(session, &sessions, list)
2208 lttng_session_lazy_sync_event_enablers(session);
2209 return 0;
2210 }
2211
2212 static bool lttng_trigger_group_has_active_triggers(
2213 struct lttng_trigger_group *trigger_group)
2214 {
2215 struct lttng_trigger_enabler *trigger_enabler;
2216
2217 list_for_each_entry(trigger_enabler, &trigger_group->enablers_head,
2218 node) {
2219 if (trigger_enabler->base.enabled)
2220 return true;
2221 }
2222 return false;
2223 }
2224
2225 bool lttng_trigger_active(void)
2226 {
2227 struct lttng_trigger_group *trigger_group;
2228
2229 list_for_each_entry(trigger_group, &trigger_groups, node) {
2230 if (lttng_trigger_group_has_active_triggers(trigger_group))
2231 return true;
2232 }
2233 return false;
2234 }
2235
2236 int lttng_fix_pending_triggers(void)
2237 {
2238 struct lttng_trigger_group *trigger_group;
2239
2240 list_for_each_entry(trigger_group, &trigger_groups, node)
2241 lttng_trigger_group_sync_enablers(trigger_group);
2242 return 0;
2243 }
2244
2245 struct lttng_event_enabler *lttng_event_enabler_create(
2246 enum lttng_enabler_format_type format_type,
2247 struct lttng_kernel_event *event_param,
2248 struct lttng_channel *chan)
2249 {
2250 struct lttng_event_enabler *event_enabler;
2251
2252 event_enabler = kzalloc(sizeof(*event_enabler), GFP_KERNEL);
2253 if (!event_enabler)
2254 return NULL;
2255 event_enabler->base.format_type = format_type;
2256 INIT_LIST_HEAD(&event_enabler->base.filter_bytecode_head);
2257 memcpy(&event_enabler->base.event_param, event_param,
2258 sizeof(event_enabler->base.event_param));
2259 event_enabler->chan = chan;
2260 /* ctx left NULL */
2261 event_enabler->base.enabled = 0;
2262 event_enabler->base.evtype = LTTNG_TYPE_ENABLER;
2263 mutex_lock(&sessions_mutex);
2264 list_add(&event_enabler->node, &event_enabler->chan->session->enablers_head);
2265 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2266 mutex_unlock(&sessions_mutex);
2267 return event_enabler;
2268 }
2269
2270 int lttng_event_enabler_enable(struct lttng_event_enabler *event_enabler)
2271 {
2272 mutex_lock(&sessions_mutex);
2273 lttng_event_enabler_as_enabler(event_enabler)->enabled = 1;
2274 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2275 mutex_unlock(&sessions_mutex);
2276 return 0;
2277 }
2278
2279 int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler)
2280 {
2281 mutex_lock(&sessions_mutex);
2282 lttng_event_enabler_as_enabler(event_enabler)->enabled = 0;
2283 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2284 mutex_unlock(&sessions_mutex);
2285 return 0;
2286 }
2287
2288 static
2289 int lttng_enabler_attach_filter_bytecode(struct lttng_enabler *enabler,
2290 struct lttng_kernel_filter_bytecode __user *bytecode)
2291 {
2292 struct lttng_bytecode_node *bytecode_node;
2293 uint32_t bytecode_len;
2294 int ret;
2295
2296 ret = get_user(bytecode_len, &bytecode->len);
2297 if (ret)
2298 return ret;
2299 bytecode_node = kzalloc(sizeof(*bytecode_node) + bytecode_len,
2300 GFP_KERNEL);
2301 if (!bytecode_node)
2302 return -ENOMEM;
2303 ret = copy_from_user(&bytecode_node->bc, bytecode,
2304 sizeof(*bytecode) + bytecode_len);
2305 if (ret)
2306 goto error_free;
2307
2308 bytecode_node->type = LTTNG_BYTECODE_NODE_TYPE_FILTER;
2309 bytecode_node->enabler = enabler;
2310 /* Enforce length based on allocated size */
2311 bytecode_node->bc.len = bytecode_len;
2312 list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
2313
2314 return 0;
2315
2316 error_free:
2317 kfree(bytecode_node);
2318 return ret;
2319 }
2320
2321 int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler *event_enabler,
2322 struct lttng_kernel_filter_bytecode __user *bytecode)
2323 {
2324 int ret;
2325 ret = lttng_enabler_attach_filter_bytecode(
2326 lttng_event_enabler_as_enabler(event_enabler), bytecode);
2327 if (ret)
2328 goto error;
2329
2330 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
2331 return 0;
2332
2333 error:
2334 return ret;
2335 }
2336
2337 int lttng_event_add_callsite(struct lttng_event *event,
2338 struct lttng_kernel_event_callsite __user *callsite)
2339 {
2340
2341 switch (event->instrumentation) {
2342 case LTTNG_KERNEL_UPROBE:
2343 return lttng_uprobes_event_add_callsite(event, callsite);
2344 default:
2345 return -EINVAL;
2346 }
2347 }
2348
2349 int lttng_event_enabler_attach_context(struct lttng_event_enabler *event_enabler,
2350 struct lttng_kernel_context *context_param)
2351 {
2352 return -ENOSYS;
2353 }
2354
2355 static
2356 void lttng_enabler_destroy(struct lttng_enabler *enabler)
2357 {
2358 struct lttng_bytecode_node *filter_node, *tmp_filter_node;
2359
2360 /* Destroy filter bytecode */
2361 list_for_each_entry_safe(filter_node, tmp_filter_node,
2362 &enabler->filter_bytecode_head, node) {
2363 kfree(filter_node);
2364 }
2365 }
2366
2367 static
2368 void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler)
2369 {
2370 lttng_enabler_destroy(lttng_event_enabler_as_enabler(event_enabler));
2371
2372 /* Destroy contexts */
2373 lttng_destroy_context(event_enabler->ctx);
2374
2375 list_del(&event_enabler->node);
2376 kfree(event_enabler);
2377 }
2378
2379 struct lttng_trigger_enabler *lttng_trigger_enabler_create(
2380 struct lttng_trigger_group *trigger_group,
2381 enum lttng_enabler_format_type format_type,
2382 struct lttng_kernel_trigger *trigger_param)
2383 {
2384 struct lttng_trigger_enabler *trigger_enabler;
2385
2386 trigger_enabler = kzalloc(sizeof(*trigger_enabler), GFP_KERNEL);
2387 if (!trigger_enabler)
2388 return NULL;
2389
2390 trigger_enabler->base.format_type = format_type;
2391 INIT_LIST_HEAD(&trigger_enabler->base.filter_bytecode_head);
2392 INIT_LIST_HEAD(&trigger_enabler->capture_bytecode_head);
2393
2394 trigger_enabler->id = trigger_param->id;
2395 trigger_enabler->error_counter_index = trigger_param->error_counter_index;
2396 trigger_enabler->num_captures = 0;
2397
2398 memcpy(&trigger_enabler->base.event_param.name, trigger_param->name,
2399 sizeof(trigger_enabler->base.event_param.name));
2400 trigger_enabler->base.event_param.instrumentation = trigger_param->instrumentation;
2401 trigger_enabler->base.evtype = LTTNG_TYPE_ENABLER;
2402
2403 trigger_enabler->base.enabled = 0;
2404 trigger_enabler->group = trigger_group;
2405
2406 mutex_lock(&sessions_mutex);
2407 list_add(&trigger_enabler->node, &trigger_enabler->group->enablers_head);
2408 lttng_trigger_group_sync_enablers(trigger_enabler->group);
2409
2410 mutex_unlock(&sessions_mutex);
2411
2412 return trigger_enabler;
2413 }
2414
2415 int lttng_trigger_enabler_enable(struct lttng_trigger_enabler *trigger_enabler)
2416 {
2417 mutex_lock(&sessions_mutex);
2418 lttng_trigger_enabler_as_enabler(trigger_enabler)->enabled = 1;
2419 lttng_trigger_group_sync_enablers(trigger_enabler->group);
2420 mutex_unlock(&sessions_mutex);
2421 return 0;
2422 }
2423
2424 int lttng_trigger_enabler_disable(struct lttng_trigger_enabler *trigger_enabler)
2425 {
2426 mutex_lock(&sessions_mutex);
2427 lttng_trigger_enabler_as_enabler(trigger_enabler)->enabled = 0;
2428 lttng_trigger_group_sync_enablers(trigger_enabler->group);
2429 mutex_unlock(&sessions_mutex);
2430 return 0;
2431 }
2432
2433 int lttng_trigger_enabler_attach_filter_bytecode(
2434 struct lttng_trigger_enabler *trigger_enabler,
2435 struct lttng_kernel_filter_bytecode __user *bytecode)
2436 {
2437 int ret;
2438
2439 ret = lttng_enabler_attach_filter_bytecode(
2440 lttng_trigger_enabler_as_enabler(trigger_enabler), bytecode);
2441 if (ret)
2442 goto error;
2443
2444 lttng_trigger_group_sync_enablers(trigger_enabler->group);
2445 return 0;
2446
2447 error:
2448 return ret;
2449 }
2450
2451 int lttng_trigger_enabler_attach_capture_bytecode(
2452 struct lttng_trigger_enabler *trigger_enabler,
2453 struct lttng_kernel_capture_bytecode __user *bytecode)
2454 {
2455 struct lttng_bytecode_node *bytecode_node;
2456 struct lttng_enabler *enabler =
2457 lttng_trigger_enabler_as_enabler(trigger_enabler);
2458 uint32_t bytecode_len;
2459 int ret;
2460
2461 ret = get_user(bytecode_len, &bytecode->len);
2462 if (ret)
2463 return ret;
2464
2465 bytecode_node = kzalloc(sizeof(*bytecode_node) + bytecode_len,
2466 GFP_KERNEL);
2467 if (!bytecode_node)
2468 return -ENOMEM;
2469
2470 ret = copy_from_user(&bytecode_node->bc, bytecode,
2471 sizeof(*bytecode) + bytecode_len);
2472 if (ret)
2473 goto error_free;
2474
2475 bytecode_node->type = LTTNG_BYTECODE_NODE_TYPE_CAPTURE;
2476 bytecode_node->enabler = enabler;
2477
2478 /* Enforce length based on allocated size */
2479 bytecode_node->bc.len = bytecode_len;
2480 list_add_tail(&bytecode_node->node, &trigger_enabler->capture_bytecode_head);
2481
2482 trigger_enabler->num_captures++;
2483
2484 lttng_trigger_group_sync_enablers(trigger_enabler->group);
2485 goto end;
2486
2487 error_free:
2488 kfree(bytecode_node);
2489 end:
2490 return ret;
2491 }
2492
2493 int lttng_trigger_add_callsite(struct lttng_trigger *trigger,
2494 struct lttng_kernel_event_callsite __user *callsite)
2495 {
2496
2497 switch (trigger->instrumentation) {
2498 case LTTNG_KERNEL_UPROBE:
2499 return lttng_uprobes_trigger_add_callsite(trigger, callsite);
2500 default:
2501 return -EINVAL;
2502 }
2503 }
2504
2505 int lttng_trigger_enabler_attach_context(struct lttng_trigger_enabler *trigger_enabler,
2506 struct lttng_kernel_context *context_param)
2507 {
2508 return -ENOSYS;
2509 }
2510
2511 static
2512 void lttng_trigger_enabler_destroy(struct lttng_trigger_enabler *trigger_enabler)
2513 {
2514 if (!trigger_enabler) {
2515 return;
2516 }
2517
2518 list_del(&trigger_enabler->node);
2519
2520 lttng_enabler_destroy(lttng_trigger_enabler_as_enabler(trigger_enabler));
2521 kfree(trigger_enabler);
2522 }
2523
2524 /*
2525 * lttng_session_sync_event_enablers should be called just before starting a
2526 * session.
2527 * Should be called with sessions mutex held.
2528 */
2529 static
2530 void lttng_session_sync_event_enablers(struct lttng_session *session)
2531 {
2532 struct lttng_event_enabler *event_enabler;
2533 struct lttng_event *event;
2534
2535 list_for_each_entry(event_enabler, &session->enablers_head, node)
2536 lttng_event_enabler_ref_events(event_enabler);
2537 /*
2538 * For each event, if at least one of its enablers is enabled,
2539 * and its channel and session transient states are enabled, we
2540 * enable the event, else we disable it.
2541 */
2542 list_for_each_entry(event, &session->events, list) {
2543 struct lttng_enabler_ref *enabler_ref;
2544 struct lttng_bytecode_runtime *runtime;
2545 int enabled = 0, has_enablers_without_bytecode = 0;
2546
2547 switch (event->instrumentation) {
2548 case LTTNG_KERNEL_TRACEPOINT:
2549 case LTTNG_KERNEL_SYSCALL:
2550 /* Enable events */
2551 list_for_each_entry(enabler_ref,
2552 &event->enablers_ref_head, node) {
2553 if (enabler_ref->ref->enabled) {
2554 enabled = 1;
2555 break;
2556 }
2557 }
2558 break;
2559 default:
2560 /* Not handled with lazy sync. */
2561 continue;
2562 }
2563 /*
2564 * Enabled state is based on union of enablers, with
2565 * intesection of session and channel transient enable
2566 * states.
2567 */
2568 enabled = enabled && session->tstate && event->chan->tstate;
2569
2570 WRITE_ONCE(event->enabled, enabled);
2571 /*
2572 * Sync tracepoint registration with event enabled
2573 * state.
2574 */
2575 if (enabled) {
2576 register_event(event);
2577 } else {
2578 _lttng_event_unregister(event);
2579 }
2580
2581 /* Check if has enablers without bytecode enabled */
2582 list_for_each_entry(enabler_ref,
2583 &event->enablers_ref_head, node) {
2584 if (enabler_ref->ref->enabled
2585 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2586 has_enablers_without_bytecode = 1;
2587 break;
2588 }
2589 }
2590 event->has_enablers_without_bytecode =
2591 has_enablers_without_bytecode;
2592
2593 /* Enable filters */
2594 list_for_each_entry(runtime,
2595 &event->filter_bytecode_runtime_head, node)
2596 lttng_bytecode_filter_sync_state(runtime);
2597 }
2598 }
2599
2600 /*
2601 * Apply enablers to session events, adding events to session if need
2602 * be. It is required after each modification applied to an active
2603 * session, and right before session "start".
2604 * "lazy" sync means we only sync if required.
2605 * Should be called with sessions mutex held.
2606 */
2607 static
2608 void lttng_session_lazy_sync_event_enablers(struct lttng_session *session)
2609 {
2610 /* We can skip if session is not active */
2611 if (!session->active)
2612 return;
2613 lttng_session_sync_event_enablers(session);
2614 }
2615
2616 static
2617 void lttng_trigger_group_sync_enablers(struct lttng_trigger_group *trigger_group)
2618 {
2619 struct lttng_trigger_enabler *trigger_enabler;
2620 struct lttng_trigger *trigger;
2621
2622 list_for_each_entry(trigger_enabler, &trigger_group->enablers_head, node)
2623 lttng_trigger_enabler_ref_triggers(trigger_enabler);
2624
2625 /*
2626 * For each trigger, if at least one of its enablers is enabled,
2627 * we enable the trigger, else we disable it.
2628 */
2629 list_for_each_entry(trigger, &trigger_group->triggers_head, list) {
2630 struct lttng_enabler_ref *enabler_ref;
2631 struct lttng_bytecode_runtime *runtime;
2632 int enabled = 0, has_enablers_without_bytecode = 0;
2633
2634 switch (trigger->instrumentation) {
2635 case LTTNG_KERNEL_TRACEPOINT:
2636 case LTTNG_KERNEL_SYSCALL:
2637 /* Enable triggers */
2638 list_for_each_entry(enabler_ref,
2639 &trigger->enablers_ref_head, node) {
2640 if (enabler_ref->ref->enabled) {
2641 enabled = 1;
2642 break;
2643 }
2644 }
2645 break;
2646 default:
2647 /* Not handled with sync. */
2648 continue;
2649 }
2650
2651 WRITE_ONCE(trigger->enabled, enabled);
2652 /*
2653 * Sync tracepoint registration with trigger enabled
2654 * state.
2655 */
2656 if (enabled) {
2657 if (!trigger->registered)
2658 register_trigger(trigger);
2659 } else {
2660 if (trigger->registered)
2661 _lttng_trigger_unregister(trigger);
2662 }
2663
2664 /* Check if has enablers without bytecode enabled */
2665 list_for_each_entry(enabler_ref,
2666 &trigger->enablers_ref_head, node) {
2667 if (enabler_ref->ref->enabled
2668 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2669 has_enablers_without_bytecode = 1;
2670 break;
2671 }
2672 }
2673 trigger->has_enablers_without_bytecode =
2674 has_enablers_without_bytecode;
2675
2676 /* Enable filters */
2677 list_for_each_entry(runtime,
2678 &trigger->filter_bytecode_runtime_head, node)
2679 lttng_bytecode_filter_sync_state(runtime);
2680
2681 /* Enable captures */
2682 list_for_each_entry(runtime,
2683 &trigger->capture_bytecode_runtime_head, node)
2684 lttng_bytecode_capture_sync_state(runtime);
2685 }
2686 }
2687
2688 /*
2689 * Serialize at most one packet worth of metadata into a metadata
2690 * channel.
2691 * We grab the metadata cache mutex to get exclusive access to our metadata
2692 * buffer and to the metadata cache. Exclusive access to the metadata buffer
2693 * allows us to do racy operations such as looking for remaining space left in
2694 * packet and write, since mutual exclusion protects us from concurrent writes.
2695 * Mutual exclusion on the metadata cache allow us to read the cache content
2696 * without racing against reallocation of the cache by updates.
2697 * Returns the number of bytes written in the channel, 0 if no data
2698 * was written and a negative value on error.
2699 */
2700 int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
2701 struct channel *chan, bool *coherent)
2702 {
2703 struct lib_ring_buffer_ctx ctx;
2704 int ret = 0;
2705 size_t len, reserve_len;
2706
2707 /*
2708 * Ensure we support mutiple get_next / put sequences followed by
2709 * put_next. The metadata cache lock protects reading the metadata
2710 * cache. It can indeed be read concurrently by "get_next_subbuf" and
2711 * "flush" operations on the buffer invoked by different processes.
2712 * Moreover, since the metadata cache memory can be reallocated, we
2713 * need to have exclusive access against updates even though we only
2714 * read it.
2715 */
2716 mutex_lock(&stream->metadata_cache->lock);
2717 WARN_ON(stream->metadata_in < stream->metadata_out);
2718 if (stream->metadata_in != stream->metadata_out)
2719 goto end;
2720
2721 /* Metadata regenerated, change the version. */
2722 if (stream->metadata_cache->version != stream->version)
2723 stream->version = stream->metadata_cache->version;
2724
2725 len = stream->metadata_cache->metadata_written -
2726 stream->metadata_in;
2727 if (!len)
2728 goto end;
2729 reserve_len = min_t(size_t,
2730 stream->transport->ops.packet_avail_size(chan),
2731 len);
2732 lib_ring_buffer_ctx_init(&ctx, chan, NULL, reserve_len,
2733 sizeof(char), -1);
2734 /*
2735 * If reservation failed, return an error to the caller.
2736 */
2737 ret = stream->transport->ops.event_reserve(&ctx, 0);
2738 if (ret != 0) {
2739 printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
2740 stream->coherent = false;
2741 goto end;
2742 }
2743 stream->transport->ops.event_write(&ctx,
2744 stream->metadata_cache->data + stream->metadata_in,
2745 reserve_len);
2746 stream->transport->ops.event_commit(&ctx);
2747 stream->metadata_in += reserve_len;
2748 if (reserve_len < len)
2749 stream->coherent = false;
2750 else
2751 stream->coherent = true;
2752 ret = reserve_len;
2753
2754 end:
2755 if (coherent)
2756 *coherent = stream->coherent;
2757 mutex_unlock(&stream->metadata_cache->lock);
2758 return ret;
2759 }
2760
2761 static
2762 void lttng_metadata_begin(struct lttng_session *session)
2763 {
2764 if (atomic_inc_return(&session->metadata_cache->producing) == 1)
2765 mutex_lock(&session->metadata_cache->lock);
2766 }
2767
2768 static
2769 void lttng_metadata_end(struct lttng_session *session)
2770 {
2771 WARN_ON_ONCE(!atomic_read(&session->metadata_cache->producing));
2772 if (atomic_dec_return(&session->metadata_cache->producing) == 0) {
2773 struct lttng_metadata_stream *stream;
2774
2775 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
2776 wake_up_interruptible(&stream->read_wait);
2777 mutex_unlock(&session->metadata_cache->lock);
2778 }
2779 }
2780
2781 /*
2782 * Write the metadata to the metadata cache.
2783 * Must be called with sessions_mutex held.
2784 * The metadata cache lock protects us from concurrent read access from
2785 * thread outputting metadata content to ring buffer.
2786 * The content of the printf is printed as a single atomic metadata
2787 * transaction.
2788 */
2789 int lttng_metadata_printf(struct lttng_session *session,
2790 const char *fmt, ...)
2791 {
2792 char *str;
2793 size_t len;
2794 va_list ap;
2795
2796 WARN_ON_ONCE(!LTTNG_READ_ONCE(session->active));
2797
2798 va_start(ap, fmt);
2799 str = kvasprintf(GFP_KERNEL, fmt, ap);
2800 va_end(ap);
2801 if (!str)
2802 return -ENOMEM;
2803
2804 len = strlen(str);
2805 WARN_ON_ONCE(!atomic_read(&session->metadata_cache->producing));
2806 if (session->metadata_cache->metadata_written + len >
2807 session->metadata_cache->cache_alloc) {
2808 char *tmp_cache_realloc;
2809 unsigned int tmp_cache_alloc_size;
2810
2811 tmp_cache_alloc_size = max_t(unsigned int,
2812 session->metadata_cache->cache_alloc + len,
2813 session->metadata_cache->cache_alloc << 1);
2814 tmp_cache_realloc = vzalloc(tmp_cache_alloc_size);
2815 if (!tmp_cache_realloc)
2816 goto err;
2817 if (session->metadata_cache->data) {
2818 memcpy(tmp_cache_realloc,
2819 session->metadata_cache->data,
2820 session->metadata_cache->cache_alloc);
2821 vfree(session->metadata_cache->data);
2822 }
2823
2824 session->metadata_cache->cache_alloc = tmp_cache_alloc_size;
2825 session->metadata_cache->data = tmp_cache_realloc;
2826 }
2827 memcpy(session->metadata_cache->data +
2828 session->metadata_cache->metadata_written,
2829 str, len);
2830 session->metadata_cache->metadata_written += len;
2831 kfree(str);
2832
2833 return 0;
2834
2835 err:
2836 kfree(str);
2837 return -ENOMEM;
2838 }
2839
2840 static
2841 int print_tabs(struct lttng_session *session, size_t nesting)
2842 {
2843 size_t i;
2844
2845 for (i = 0; i < nesting; i++) {
2846 int ret;
2847
2848 ret = lttng_metadata_printf(session, " ");
2849 if (ret) {
2850 return ret;
2851 }
2852 }
2853 return 0;
2854 }
2855
2856 static
2857 int lttng_field_name_statedump(struct lttng_session *session,
2858 const struct lttng_event_field *field,
2859 size_t nesting)
2860 {
2861 return lttng_metadata_printf(session, " _%s;\n", field->name);
2862 }
2863
2864 static
2865 int _lttng_integer_type_statedump(struct lttng_session *session,
2866 const struct lttng_type *type,
2867 size_t nesting)
2868 {
2869 int ret;
2870
2871 WARN_ON_ONCE(type->atype != atype_integer);
2872 ret = print_tabs(session, nesting);
2873 if (ret)
2874 return ret;
2875 ret = lttng_metadata_printf(session,
2876 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s }",
2877 type->u.integer.size,
2878 type->u.integer.alignment,
2879 type->u.integer.signedness,
2880 (type->u.integer.encoding == lttng_encode_none)
2881 ? "none"
2882 : (type->u.integer.encoding == lttng_encode_UTF8)
2883 ? "UTF8"
2884 : "ASCII",
2885 type->u.integer.base,
2886 #if __BYTE_ORDER == __BIG_ENDIAN
2887 type->u.integer.reverse_byte_order ? " byte_order = le;" : ""
2888 #else
2889 type->u.integer.reverse_byte_order ? " byte_order = be;" : ""
2890 #endif
2891 );
2892 return ret;
2893 }
2894
2895 /*
2896 * Must be called with sessions_mutex held.
2897 */
2898 static
2899 int _lttng_struct_type_statedump(struct lttng_session *session,
2900 const struct lttng_type *type,
2901 size_t nesting)
2902 {
2903 int ret;
2904 uint32_t i, nr_fields;
2905 unsigned int alignment;
2906
2907 WARN_ON_ONCE(type->atype != atype_struct_nestable);
2908
2909 ret = print_tabs(session, nesting);
2910 if (ret)
2911 return ret;
2912 ret = lttng_metadata_printf(session,
2913 "struct {\n");
2914 if (ret)
2915 return ret;
2916 nr_fields = type->u.struct_nestable.nr_fields;
2917 for (i = 0; i < nr_fields; i++) {
2918 const struct lttng_event_field *iter_field;
2919
2920 iter_field = &type->u.struct_nestable.fields[i];
2921 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
2922 if (ret)
2923 return ret;
2924 }
2925 ret = print_tabs(session, nesting);
2926 if (ret)
2927 return ret;
2928 alignment = type->u.struct_nestable.alignment;
2929 if (alignment) {
2930 ret = lttng_metadata_printf(session,
2931 "} align(%u)",
2932 alignment);
2933 } else {
2934 ret = lttng_metadata_printf(session,
2935 "}");
2936 }
2937 return ret;
2938 }
2939
2940 /*
2941 * Must be called with sessions_mutex held.
2942 */
2943 static
2944 int _lttng_struct_field_statedump(struct lttng_session *session,
2945 const struct lttng_event_field *field,
2946 size_t nesting)
2947 {
2948 int ret;
2949
2950 ret = _lttng_struct_type_statedump(session,
2951 &field->type, nesting);
2952 if (ret)
2953 return ret;
2954 return lttng_field_name_statedump(session, field, nesting);
2955 }
2956
2957 /*
2958 * Must be called with sessions_mutex held.
2959 */
2960 static
2961 int _lttng_variant_type_statedump(struct lttng_session *session,
2962 const struct lttng_type *type,
2963 size_t nesting)
2964 {
2965 int ret;
2966 uint32_t i, nr_choices;
2967
2968 WARN_ON_ONCE(type->atype != atype_variant_nestable);
2969 /*
2970 * CTF 1.8 does not allow expressing nonzero variant alignment in a nestable way.
2971 */
2972 if (type->u.variant_nestable.alignment != 0)
2973 return -EINVAL;
2974 ret = print_tabs(session, nesting);
2975 if (ret)
2976 return ret;
2977 ret = lttng_metadata_printf(session,
2978 "variant <_%s> {\n",
2979 type->u.variant_nestable.tag_name);
2980 if (ret)
2981 return ret;
2982 nr_choices = type->u.variant_nestable.nr_choices;
2983 for (i = 0; i < nr_choices; i++) {
2984 const struct lttng_event_field *iter_field;
2985
2986 iter_field = &type->u.variant_nestable.choices[i];
2987 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
2988 if (ret)
2989 return ret;
2990 }
2991 ret = print_tabs(session, nesting);
2992 if (ret)
2993 return ret;
2994 ret = lttng_metadata_printf(session,
2995 "}");
2996 return ret;
2997 }
2998
2999 /*
3000 * Must be called with sessions_mutex held.
3001 */
3002 static
3003 int _lttng_variant_field_statedump(struct lttng_session *session,
3004 const struct lttng_event_field *field,
3005 size_t nesting)
3006 {
3007 int ret;
3008
3009 ret = _lttng_variant_type_statedump(session,
3010 &field->type, nesting);
3011 if (ret)
3012 return ret;
3013 return lttng_field_name_statedump(session, field, nesting);
3014 }
3015
3016 /*
3017 * Must be called with sessions_mutex held.
3018 */
3019 static
3020 int _lttng_array_field_statedump(struct lttng_session *session,
3021 const struct lttng_event_field *field,
3022 size_t nesting)
3023 {
3024 int ret;
3025 const struct lttng_type *elem_type;
3026
3027 WARN_ON_ONCE(field->type.atype != atype_array_nestable);
3028
3029 if (field->type.u.array_nestable.alignment) {
3030 ret = print_tabs(session, nesting);
3031 if (ret)
3032 return ret;
3033 ret = lttng_metadata_printf(session,
3034 "struct { } align(%u) _%s_padding;\n",
3035 field->type.u.array_nestable.alignment * CHAR_BIT,
3036 field->name);
3037 if (ret)
3038 return ret;
3039 }
3040 /*
3041 * Nested compound types: Only array of structures and variants are
3042 * currently supported.
3043 */
3044 elem_type = field->type.u.array_nestable.elem_type;
3045 switch (elem_type->atype) {
3046 case atype_integer:
3047 case atype_struct_nestable:
3048 case atype_variant_nestable:
3049 ret = _lttng_type_statedump(session, elem_type, nesting);
3050 if (ret)
3051 return ret;
3052 break;
3053
3054 default:
3055 return -EINVAL;
3056 }
3057 ret = lttng_metadata_printf(session,
3058 " _%s[%u];\n",
3059 field->name,
3060 field->type.u.array_nestable.length);
3061 return ret;
3062 }
3063
3064 /*
3065 * Must be called with sessions_mutex held.
3066 */
3067 static
3068 int _lttng_sequence_field_statedump(struct lttng_session *session,
3069 const struct lttng_event_field *field,
3070 size_t nesting)
3071 {
3072 int ret;
3073 const char *length_name;
3074 const struct lttng_type *elem_type;
3075
3076 WARN_ON_ONCE(field->type.atype != atype_sequence_nestable);
3077
3078 length_name = field->type.u.sequence_nestable.length_name;
3079
3080 if (field->type.u.sequence_nestable.alignment) {
3081 ret = print_tabs(session, nesting);
3082 if (ret)
3083 return ret;
3084 ret = lttng_metadata_printf(session,
3085 "struct { } align(%u) _%s_padding;\n",
3086 field->type.u.sequence_nestable.alignment * CHAR_BIT,
3087 field->name);
3088 if (ret)
3089 return ret;
3090 }
3091
3092 /*
3093 * Nested compound types: Only array of structures and variants are
3094 * currently supported.
3095 */
3096 elem_type = field->type.u.sequence_nestable.elem_type;
3097 switch (elem_type->atype) {
3098 case atype_integer:
3099 case atype_struct_nestable:
3100 case atype_variant_nestable:
3101 ret = _lttng_type_statedump(session, elem_type, nesting);
3102 if (ret)
3103 return ret;
3104 break;
3105
3106 default:
3107 return -EINVAL;
3108 }
3109 ret = lttng_metadata_printf(session,
3110 " _%s[ _%s ];\n",
3111 field->name,
3112 field->type.u.sequence_nestable.length_name);
3113 return ret;
3114 }
3115
3116 /*
3117 * Must be called with sessions_mutex held.
3118 */
3119 static
3120 int _lttng_enum_type_statedump(struct lttng_session *session,
3121 const struct lttng_type *type,
3122 size_t nesting)
3123 {
3124 const struct lttng_enum_desc *enum_desc;
3125 const struct lttng_type *container_type;
3126 int ret;
3127 unsigned int i, nr_entries;
3128
3129 container_type = type->u.enum_nestable.container_type;
3130 if (container_type->atype != atype_integer) {
3131 ret = -EINVAL;
3132 goto end;
3133 }
3134 enum_desc = type->u.enum_nestable.desc;
3135 nr_entries = enum_desc->nr_entries;
3136
3137 ret = print_tabs(session, nesting);
3138 if (ret)
3139 goto end;
3140 ret = lttng_metadata_printf(session, "enum : ");
3141 if (ret)
3142 goto end;
3143 ret = _lttng_integer_type_statedump(session, container_type, 0);
3144 if (ret)
3145 goto end;
3146 ret = lttng_metadata_printf(session, " {\n");
3147 if (ret)
3148 goto end;
3149 /* Dump all entries */
3150 for (i = 0; i < nr_entries; i++) {
3151 const struct lttng_enum_entry *entry = &enum_desc->entries[i];
3152 int j, len;
3153
3154 ret = print_tabs(session, nesting + 1);
3155 if (ret)
3156 goto end;
3157 ret = lttng_metadata_printf(session,
3158 "\"");
3159 if (ret)
3160 goto end;
3161 len = strlen(entry->string);
3162 /* Escape the character '"' */
3163 for (j = 0; j < len; j++) {
3164 char c = entry->string[j];
3165
3166 switch (c) {
3167 case '"':
3168 ret = lttng_metadata_printf(session,
3169 "\\\"");
3170 break;
3171 case '\\':
3172 ret = lttng_metadata_printf(session,
3173 "\\\\");
3174 break;
3175 default:
3176 ret = lttng_metadata_printf(session,
3177 "%c", c);
3178 break;
3179 }
3180 if (ret)
3181 goto end;
3182 }
3183 ret = lttng_metadata_printf(session, "\"");
3184 if (ret)
3185 goto end;
3186
3187 if (entry->options.is_auto) {
3188 ret = lttng_metadata_printf(session, ",\n");
3189 if (ret)
3190 goto end;
3191 } else {
3192 ret = lttng_metadata_printf(session,
3193 " = ");
3194 if (ret)
3195 goto end;
3196 if (entry->start.signedness)
3197 ret = lttng_metadata_printf(session,
3198 "%lld", (long long) entry->start.value);
3199 else
3200 ret = lttng_metadata_printf(session,
3201 "%llu", entry->start.value);
3202 if (ret)
3203 goto end;
3204 if (entry->start.signedness == entry->end.signedness &&
3205 entry->start.value
3206 == entry->end.value) {
3207 ret = lttng_metadata_printf(session,
3208 ",\n");
3209 } else {
3210 if (entry->end.signedness) {
3211 ret = lttng_metadata_printf(session,
3212 " ... %lld,\n",
3213 (long long) entry->end.value);
3214 } else {
3215 ret = lttng_metadata_printf(session,
3216 " ... %llu,\n",
3217 entry->end.value);
3218 }
3219 }
3220 if (ret)
3221 goto end;
3222 }
3223 }
3224 ret = print_tabs(session, nesting);
3225 if (ret)
3226 goto end;
3227 ret = lttng_metadata_printf(session, "}");
3228 end:
3229 return ret;
3230 }
3231
3232 /*
3233 * Must be called with sessions_mutex held.
3234 */
3235 static
3236 int _lttng_enum_field_statedump(struct lttng_session *session,
3237 const struct lttng_event_field *field,
3238 size_t nesting)
3239 {
3240 int ret;
3241
3242 ret = _lttng_enum_type_statedump(session, &field->type, nesting);
3243 if (ret)
3244 return ret;
3245 return lttng_field_name_statedump(session, field, nesting);
3246 }
3247
3248 static
3249 int _lttng_integer_field_statedump(struct lttng_session *session,
3250 const struct lttng_event_field *field,
3251 size_t nesting)
3252 {
3253 int ret;
3254
3255 ret = _lttng_integer_type_statedump(session, &field->type, nesting);
3256 if (ret)
3257 return ret;
3258 return lttng_field_name_statedump(session, field, nesting);
3259 }
3260
3261 static
3262 int _lttng_string_type_statedump(struct lttng_session *session,
3263 const struct lttng_type *type,
3264 size_t nesting)
3265 {
3266 int ret;
3267
3268 WARN_ON_ONCE(type->atype != atype_string);
3269 /* Default encoding is UTF8 */
3270 ret = print_tabs(session, nesting);
3271 if (ret)
3272 return ret;
3273 ret = lttng_metadata_printf(session,
3274 "string%s",
3275 type->u.string.encoding == lttng_encode_ASCII ?
3276 " { encoding = ASCII; }" : "");
3277 return ret;
3278 }
3279
3280 static
3281 int _lttng_string_field_statedump(struct lttng_session *session,
3282 const struct lttng_event_field *field,
3283 size_t nesting)
3284 {
3285 int ret;
3286
3287 WARN_ON_ONCE(field->type.atype != atype_string);
3288 ret = _lttng_string_type_statedump(session, &field->type, nesting);
3289 if (ret)
3290 return ret;
3291 return lttng_field_name_statedump(session, field, nesting);
3292 }
3293
3294 /*
3295 * Must be called with sessions_mutex held.
3296 */
3297 static
3298 int _lttng_type_statedump(struct lttng_session *session,
3299 const struct lttng_type *type,
3300 size_t nesting)
3301 {
3302 int ret = 0;
3303
3304 switch (type->atype) {
3305 case atype_integer:
3306 ret = _lttng_integer_type_statedump(session, type, nesting);
3307 break;
3308 case atype_enum_nestable:
3309 ret = _lttng_enum_type_statedump(session, type, nesting);
3310 break;
3311 case atype_string:
3312 ret = _lttng_string_type_statedump(session, type, nesting);
3313 break;
3314 case atype_struct_nestable:
3315 ret = _lttng_struct_type_statedump(session, type, nesting);
3316 break;
3317 case atype_variant_nestable:
3318 ret = _lttng_variant_type_statedump(session, type, nesting);
3319 break;
3320
3321 /* Nested arrays and sequences are not supported yet. */
3322 case atype_array_nestable:
3323 case atype_sequence_nestable:
3324 default:
3325 WARN_ON_ONCE(1);
3326 return -EINVAL;
3327 }
3328 return ret;
3329 }
3330
3331 /*
3332 * Must be called with sessions_mutex held.
3333 */
3334 static
3335 int _lttng_field_statedump(struct lttng_session *session,
3336 const struct lttng_event_field *field,
3337 size_t nesting)
3338 {
3339 int ret = 0;
3340
3341 switch (field->type.atype) {
3342 case atype_integer:
3343 ret = _lttng_integer_field_statedump(session, field, nesting);
3344 break;
3345 case atype_enum_nestable:
3346 ret = _lttng_enum_field_statedump(session, field, nesting);
3347 break;
3348 case atype_string:
3349 ret = _lttng_string_field_statedump(session, field, nesting);
3350 break;
3351 case atype_struct_nestable:
3352 ret = _lttng_struct_field_statedump(session, field, nesting);
3353 break;
3354 case atype_array_nestable:
3355 ret = _lttng_array_field_statedump(session, field, nesting);
3356 break;
3357 case atype_sequence_nestable:
3358 ret = _lttng_sequence_field_statedump(session, field, nesting);
3359 break;
3360 case atype_variant_nestable:
3361 ret = _lttng_variant_field_statedump(session, field, nesting);
3362 break;
3363
3364 default:
3365 WARN_ON_ONCE(1);
3366 return -EINVAL;
3367 }
3368 return ret;
3369 }
3370
3371 static
3372 int _lttng_context_metadata_statedump(struct lttng_session *session,
3373 struct lttng_ctx *ctx)
3374 {
3375 int ret = 0;
3376 int i;
3377
3378 if (!ctx)
3379 return 0;
3380 for (i = 0; i < ctx->nr_fields; i++) {
3381 const struct lttng_ctx_field *field = &ctx->fields[i];
3382
3383 ret = _lttng_field_statedump(session, &field->event_field, 2);
3384 if (ret)
3385 return ret;
3386 }
3387 return ret;
3388 }
3389
3390 static
3391 int _lttng_fields_metadata_statedump(struct lttng_session *session,
3392 struct lttng_event *event)
3393 {
3394 const struct lttng_event_desc *desc = event->desc;
3395 int ret = 0;
3396 int i;
3397
3398 for (i = 0; i < desc->nr_fields; i++) {
3399 const struct lttng_event_field *field = &desc->fields[i];
3400
3401 ret = _lttng_field_statedump(session, field, 2);
3402 if (ret)
3403 return ret;
3404 }
3405 return ret;
3406 }
3407
3408 /*
3409 * Must be called with sessions_mutex held.
3410 * The entire event metadata is printed as a single atomic metadata
3411 * transaction.
3412 */
3413 static
3414 int _lttng_event_metadata_statedump(struct lttng_session *session,
3415 struct lttng_channel *chan,
3416 struct lttng_event *event)
3417 {
3418 int ret = 0;
3419
3420 if (event->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3421 return 0;
3422 if (chan->channel_type == METADATA_CHANNEL)
3423 return 0;
3424
3425 lttng_metadata_begin(session);
3426
3427 ret = lttng_metadata_printf(session,
3428 "event {\n"
3429 " name = \"%s\";\n"
3430 " id = %u;\n"
3431 " stream_id = %u;\n",
3432 event->desc->name,
3433 event->id,
3434 event->chan->id);
3435 if (ret)
3436 goto end;
3437
3438 if (event->ctx) {
3439 ret = lttng_metadata_printf(session,
3440 " context := struct {\n");
3441 if (ret)
3442 goto end;
3443 }
3444 ret = _lttng_context_metadata_statedump(session, event->ctx);
3445 if (ret)
3446 goto end;
3447 if (event->ctx) {
3448 ret = lttng_metadata_printf(session,
3449 " };\n");
3450 if (ret)
3451 goto end;
3452 }
3453
3454 ret = lttng_metadata_printf(session,
3455 " fields := struct {\n"
3456 );
3457 if (ret)
3458 goto end;
3459
3460 ret = _lttng_fields_metadata_statedump(session, event);
3461 if (ret)
3462 goto end;
3463
3464 /*
3465 * LTTng space reservation can only reserve multiples of the
3466 * byte size.
3467 */
3468 ret = lttng_metadata_printf(session,
3469 " };\n"
3470 "};\n\n");
3471 if (ret)
3472 goto end;
3473
3474 event->metadata_dumped = 1;
3475 end:
3476 lttng_metadata_end(session);
3477 return ret;
3478
3479 }
3480
3481 /*
3482 * Must be called with sessions_mutex held.
3483 * The entire channel metadata is printed as a single atomic metadata
3484 * transaction.
3485 */
3486 static
3487 int _lttng_channel_metadata_statedump(struct lttng_session *session,
3488 struct lttng_channel *chan)
3489 {
3490 int ret = 0;
3491
3492 if (chan->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3493 return 0;
3494
3495 if (chan->channel_type == METADATA_CHANNEL)
3496 return 0;
3497
3498 lttng_metadata_begin(session);
3499
3500 WARN_ON_ONCE(!chan->header_type);
3501 ret = lttng_metadata_printf(session,
3502 "stream {\n"
3503 " id = %u;\n"
3504 " event.header := %s;\n"
3505 " packet.context := struct packet_context;\n",
3506 chan->id,
3507 chan->header_type == 1 ? "struct event_header_compact" :
3508 "struct event_header_large");
3509 if (ret)
3510 goto end;
3511
3512 if (chan->ctx) {
3513 ret = lttng_metadata_printf(session,
3514 " event.context := struct {\n");
3515 if (ret)
3516 goto end;
3517 }
3518 ret = _lttng_context_metadata_statedump(session, chan->ctx);
3519 if (ret)
3520 goto end;
3521 if (chan->ctx) {
3522 ret = lttng_metadata_printf(session,
3523 " };\n");
3524 if (ret)
3525 goto end;
3526 }
3527
3528 ret = lttng_metadata_printf(session,
3529 "};\n\n");
3530
3531 chan->metadata_dumped = 1;
3532 end:
3533 lttng_metadata_end(session);
3534 return ret;
3535 }
3536
3537 /*
3538 * Must be called with sessions_mutex held.
3539 */
3540 static
3541 int _lttng_stream_packet_context_declare(struct lttng_session *session)
3542 {
3543 return lttng_metadata_printf(session,
3544 "struct packet_context {\n"
3545 " uint64_clock_monotonic_t timestamp_begin;\n"
3546 " uint64_clock_monotonic_t timestamp_end;\n"
3547 " uint64_t content_size;\n"
3548 " uint64_t packet_size;\n"
3549 " uint64_t packet_seq_num;\n"
3550 " unsigned long events_discarded;\n"
3551 " uint32_t cpu_id;\n"
3552 "};\n\n"
3553 );
3554 }
3555
3556 /*
3557 * Compact header:
3558 * id: range: 0 - 30.
3559 * id 31 is reserved to indicate an extended header.
3560 *
3561 * Large header:
3562 * id: range: 0 - 65534.
3563 * id 65535 is reserved to indicate an extended header.
3564 *
3565 * Must be called with sessions_mutex held.
3566 */
3567 static
3568 int _lttng_event_header_declare(struct lttng_session *session)
3569 {
3570 return lttng_metadata_printf(session,
3571 "struct event_header_compact {\n"
3572 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
3573 " variant <id> {\n"
3574 " struct {\n"
3575 " uint27_clock_monotonic_t timestamp;\n"
3576 " } compact;\n"
3577 " struct {\n"
3578 " uint32_t id;\n"
3579 " uint64_clock_monotonic_t timestamp;\n"
3580 " } extended;\n"
3581 " } v;\n"
3582 "} align(%u);\n"
3583 "\n"
3584 "struct event_header_large {\n"
3585 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
3586 " variant <id> {\n"
3587 " struct {\n"
3588 " uint32_clock_monotonic_t timestamp;\n"
3589 " } compact;\n"
3590 " struct {\n"
3591 " uint32_t id;\n"
3592 " uint64_clock_monotonic_t timestamp;\n"
3593 " } extended;\n"
3594 " } v;\n"
3595 "} align(%u);\n\n",
3596 lttng_alignof(uint32_t) * CHAR_BIT,
3597 lttng_alignof(uint16_t) * CHAR_BIT
3598 );
3599 }
3600
3601 /*
3602 * Approximation of NTP time of day to clock monotonic correlation,
3603 * taken at start of trace.
3604 * Yes, this is only an approximation. Yes, we can (and will) do better
3605 * in future versions.
3606 * This function may return a negative offset. It may happen if the
3607 * system sets the REALTIME clock to 0 after boot.
3608 *
3609 * Use 64bit timespec on kernels that have it, this makes 32bit arch
3610 * y2038 compliant.
3611 */
3612 static
3613 int64_t measure_clock_offset(void)
3614 {
3615 uint64_t monotonic_avg, monotonic[2], realtime;
3616 uint64_t tcf = trace_clock_freq();
3617 int64_t offset;
3618 unsigned long flags;
3619 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3620 struct timespec64 rts = { 0, 0 };
3621 #else
3622 struct timespec rts = { 0, 0 };
3623 #endif
3624
3625 /* Disable interrupts to increase correlation precision. */
3626 local_irq_save(flags);
3627 monotonic[0] = trace_clock_read64();
3628 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3629 ktime_get_real_ts64(&rts);
3630 #else
3631 getnstimeofday(&rts);
3632 #endif
3633 monotonic[1] = trace_clock_read64();
3634 local_irq_restore(flags);
3635
3636 monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
3637 realtime = (uint64_t) rts.tv_sec * tcf;
3638 if (tcf == NSEC_PER_SEC) {
3639 realtime += rts.tv_nsec;
3640 } else {
3641 uint64_t n = rts.tv_nsec * tcf;
3642
3643 do_div(n, NSEC_PER_SEC);
3644 realtime += n;
3645 }
3646 offset = (int64_t) realtime - monotonic_avg;
3647 return offset;
3648 }
3649
3650 static
3651 int print_escaped_ctf_string(struct lttng_session *session, const char *string)
3652 {
3653 int ret = 0;
3654 size_t i;
3655 char cur;
3656
3657 i = 0;
3658 cur = string[i];
3659 while (cur != '\0') {
3660 switch (cur) {
3661 case '\n':
3662 ret = lttng_metadata_printf(session, "%s", "\\n");
3663 break;
3664 case '\\':
3665 case '"':
3666 ret = lttng_metadata_printf(session, "%c", '\\');
3667 if (ret)
3668 goto error;
3669 /* We still print the current char */
3670 /* Fallthrough */
3671 default:
3672 ret = lttng_metadata_printf(session, "%c", cur);
3673 break;
3674 }
3675
3676 if (ret)
3677 goto error;
3678
3679 cur = string[++i];
3680 }
3681 error:
3682 return ret;
3683 }
3684
3685 static
3686 int print_metadata_escaped_field(struct lttng_session *session, const char *field,
3687 const char *field_value)
3688 {
3689 int ret;
3690
3691 ret = lttng_metadata_printf(session, " %s = \"", field);
3692 if (ret)
3693 goto error;
3694
3695 ret = print_escaped_ctf_string(session, field_value);
3696 if (ret)
3697 goto error;
3698
3699 ret = lttng_metadata_printf(session, "\";\n");
3700
3701 error:
3702 return ret;
3703 }
3704
3705 /*
3706 * Output metadata into this session's metadata buffers.
3707 * Must be called with sessions_mutex held.
3708 */
3709 static
3710 int _lttng_session_metadata_statedump(struct lttng_session *session)
3711 {
3712 unsigned char *uuid_c = session->uuid.b;
3713 unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
3714 const char *product_uuid;
3715 struct lttng_channel *chan;
3716 struct lttng_event *event;
3717 int ret = 0;
3718
3719 if (!LTTNG_READ_ONCE(session->active))
3720 return 0;
3721
3722 lttng_metadata_begin(session);
3723
3724 if (session->metadata_dumped)
3725 goto skip_session;
3726
3727 snprintf(uuid_s, sizeof(uuid_s),
3728 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
3729 uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
3730 uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
3731 uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
3732 uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
3733
3734 ret = lttng_metadata_printf(session,
3735 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
3736 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
3737 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
3738 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
3739 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
3740 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
3741 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
3742 "\n"
3743 "trace {\n"
3744 " major = %u;\n"
3745 " minor = %u;\n"
3746 " uuid = \"%s\";\n"
3747 " byte_order = %s;\n"
3748 " packet.header := struct {\n"
3749 " uint32_t magic;\n"
3750 " uint8_t uuid[16];\n"
3751 " uint32_t stream_id;\n"
3752 " uint64_t stream_instance_id;\n"
3753 " };\n"
3754 "};\n\n",
3755 lttng_alignof(uint8_t) * CHAR_BIT,
3756 lttng_alignof(uint16_t) * CHAR_BIT,
3757 lttng_alignof(uint32_t) * CHAR_BIT,
3758 lttng_alignof(uint64_t) * CHAR_BIT,
3759 sizeof(unsigned long) * CHAR_BIT,
3760 lttng_alignof(unsigned long) * CHAR_BIT,
3761 CTF_SPEC_MAJOR,
3762 CTF_SPEC_MINOR,
3763 uuid_s,
3764 #if __BYTE_ORDER == __BIG_ENDIAN
3765 "be"
3766 #else
3767 "le"
3768 #endif
3769 );
3770 if (ret)
3771 goto end;
3772
3773 ret = lttng_metadata_printf(session,
3774 "env {\n"
3775 " hostname = \"%s\";\n"
3776 " domain = \"kernel\";\n"
3777 " sysname = \"%s\";\n"
3778 " kernel_release = \"%s\";\n"
3779 " kernel_version = \"%s\";\n"
3780 " tracer_name = \"lttng-modules\";\n"
3781 " tracer_major = %d;\n"
3782 " tracer_minor = %d;\n"
3783 " tracer_patchlevel = %d;\n"
3784 " trace_buffering_scheme = \"global\";\n",
3785 current->nsproxy->uts_ns->name.nodename,
3786 utsname()->sysname,
3787 utsname()->release,
3788 utsname()->version,
3789 LTTNG_MODULES_MAJOR_VERSION,
3790 LTTNG_MODULES_MINOR_VERSION,
3791 LTTNG_MODULES_PATCHLEVEL_VERSION
3792 );
3793 if (ret)
3794 goto end;
3795
3796 ret = print_metadata_escaped_field(session, "trace_name", session->name);
3797 if (ret)
3798 goto end;
3799 ret = print_metadata_escaped_field(session, "trace_creation_datetime",
3800 session->creation_time);
3801 if (ret)
3802 goto end;
3803
3804 /* Add the product UUID to the 'env' section */
3805 product_uuid = dmi_get_system_info(DMI_PRODUCT_UUID);
3806 if (product_uuid) {
3807 ret = lttng_metadata_printf(session,
3808 " product_uuid = \"%s\";\n",
3809 product_uuid
3810 );
3811 if (ret)
3812 goto end;
3813 }
3814
3815 /* Close the 'env' section */
3816 ret = lttng_metadata_printf(session, "};\n\n");
3817 if (ret)
3818 goto end;
3819
3820 ret = lttng_metadata_printf(session,
3821 "clock {\n"
3822 " name = \"%s\";\n",
3823 trace_clock_name()
3824 );
3825 if (ret)
3826 goto end;
3827
3828 if (!trace_clock_uuid(clock_uuid_s)) {
3829 ret = lttng_metadata_printf(session,
3830 " uuid = \"%s\";\n",
3831 clock_uuid_s
3832 );
3833 if (ret)
3834 goto end;
3835 }
3836
3837 ret = lttng_metadata_printf(session,
3838 " description = \"%s\";\n"
3839 " freq = %llu; /* Frequency, in Hz */\n"
3840 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
3841 " offset = %lld;\n"
3842 "};\n\n",
3843 trace_clock_description(),
3844 (unsigned long long) trace_clock_freq(),
3845 (long long) measure_clock_offset()
3846 );
3847 if (ret)
3848 goto end;
3849
3850 ret = lttng_metadata_printf(session,
3851 "typealias integer {\n"
3852 " size = 27; align = 1; signed = false;\n"
3853 " map = clock.%s.value;\n"
3854 "} := uint27_clock_monotonic_t;\n"
3855 "\n"
3856 "typealias integer {\n"
3857 " size = 32; align = %u; signed = false;\n"
3858 " map = clock.%s.value;\n"
3859 "} := uint32_clock_monotonic_t;\n"
3860 "\n"
3861 "typealias integer {\n"
3862 " size = 64; align = %u; signed = false;\n"
3863 " map = clock.%s.value;\n"
3864 "} := uint64_clock_monotonic_t;\n\n",
3865 trace_clock_name(),
3866 lttng_alignof(uint32_t) * CHAR_BIT,
3867 trace_clock_name(),
3868 lttng_alignof(uint64_t) * CHAR_BIT,
3869 trace_clock_name()
3870 );
3871 if (ret)
3872 goto end;
3873
3874 ret = _lttng_stream_packet_context_declare(session);
3875 if (ret)
3876 goto end;
3877
3878 ret = _lttng_event_header_declare(session);
3879 if (ret)
3880 goto end;
3881
3882 skip_session:
3883 list_for_each_entry(chan, &session->chan, list) {
3884 ret = _lttng_channel_metadata_statedump(session, chan);
3885 if (ret)
3886 goto end;
3887 }
3888
3889 list_for_each_entry(event, &session->events, list) {
3890 ret = _lttng_event_metadata_statedump(session, event->chan, event);
3891 if (ret)
3892 goto end;
3893 }
3894 session->metadata_dumped = 1;
3895 end:
3896 lttng_metadata_end(session);
3897 return ret;
3898 }
3899
3900 /**
3901 * lttng_transport_register - LTT transport registration
3902 * @transport: transport structure
3903 *
3904 * Registers a transport which can be used as output to extract the data out of
3905 * LTTng. The module calling this registration function must ensure that no
3906 * trap-inducing code will be executed by the transport functions. E.g.
3907 * vmalloc_sync_mappings() must be called between a vmalloc and the moment the memory
3908 * is made visible to the transport function. This registration acts as a
3909 * vmalloc_sync_mappings. Therefore, only if the module allocates virtual memory
3910 * after its registration must it synchronize the TLBs.
3911 */
3912 void lttng_transport_register(struct lttng_transport *transport)
3913 {
3914 /*
3915 * Make sure no page fault can be triggered by the module about to be
3916 * registered. We deal with this here so we don't have to call
3917 * vmalloc_sync_mappings() in each module's init.
3918 */
3919 wrapper_vmalloc_sync_mappings();
3920
3921 mutex_lock(&sessions_mutex);
3922 list_add_tail(&transport->node, &lttng_transport_list);
3923 mutex_unlock(&sessions_mutex);
3924 }
3925 EXPORT_SYMBOL_GPL(lttng_transport_register);
3926
3927 /**
3928 * lttng_transport_unregister - LTT transport unregistration
3929 * @transport: transport structure
3930 */
3931 void lttng_transport_unregister(struct lttng_transport *transport)
3932 {
3933 mutex_lock(&sessions_mutex);
3934 list_del(&transport->node);
3935 mutex_unlock(&sessions_mutex);
3936 }
3937 EXPORT_SYMBOL_GPL(lttng_transport_unregister);
3938
3939 void lttng_counter_transport_register(struct lttng_counter_transport *transport)
3940 {
3941 /*
3942 * Make sure no page fault can be triggered by the module about to be
3943 * registered. We deal with this here so we don't have to call
3944 * vmalloc_sync_mappings() in each module's init.
3945 */
3946 wrapper_vmalloc_sync_mappings();
3947
3948 mutex_lock(&sessions_mutex);
3949 list_add_tail(&transport->node, &lttng_counter_transport_list);
3950 mutex_unlock(&sessions_mutex);
3951 }
3952 EXPORT_SYMBOL_GPL(lttng_counter_transport_register);
3953
3954 void lttng_counter_transport_unregister(struct lttng_counter_transport *transport)
3955 {
3956 mutex_lock(&sessions_mutex);
3957 list_del(&transport->node);
3958 mutex_unlock(&sessions_mutex);
3959 }
3960 EXPORT_SYMBOL_GPL(lttng_counter_transport_unregister);
3961
3962 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
3963
3964 enum cpuhp_state lttng_hp_prepare;
3965 enum cpuhp_state lttng_hp_online;
3966
3967 static int lttng_hotplug_prepare(unsigned int cpu, struct hlist_node *node)
3968 {
3969 struct lttng_cpuhp_node *lttng_node;
3970
3971 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
3972 switch (lttng_node->component) {
3973 case LTTNG_RING_BUFFER_FRONTEND:
3974 return 0;
3975 case LTTNG_RING_BUFFER_BACKEND:
3976 return lttng_cpuhp_rb_backend_prepare(cpu, lttng_node);
3977 case LTTNG_RING_BUFFER_ITER:
3978 return 0;
3979 case LTTNG_CONTEXT_PERF_COUNTERS:
3980 return 0;
3981 default:
3982 return -EINVAL;
3983 }
3984 }
3985
3986 static int lttng_hotplug_dead(unsigned int cpu, struct hlist_node *node)
3987 {
3988 struct lttng_cpuhp_node *lttng_node;
3989
3990 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
3991 switch (lttng_node->component) {
3992 case LTTNG_RING_BUFFER_FRONTEND:
3993 return lttng_cpuhp_rb_frontend_dead(cpu, lttng_node);
3994 case LTTNG_RING_BUFFER_BACKEND:
3995 return 0;
3996 case LTTNG_RING_BUFFER_ITER:
3997 return 0;
3998 case LTTNG_CONTEXT_PERF_COUNTERS:
3999 return lttng_cpuhp_perf_counter_dead(cpu, lttng_node);
4000 default:
4001 return -EINVAL;
4002 }
4003 }
4004
4005 static int lttng_hotplug_online(unsigned int cpu, struct hlist_node *node)
4006 {
4007 struct lttng_cpuhp_node *lttng_node;
4008
4009 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4010 switch (lttng_node->component) {
4011 case LTTNG_RING_BUFFER_FRONTEND:
4012 return lttng_cpuhp_rb_frontend_online(cpu, lttng_node);
4013 case LTTNG_RING_BUFFER_BACKEND:
4014 return 0;
4015 case LTTNG_RING_BUFFER_ITER:
4016 return lttng_cpuhp_rb_iter_online(cpu, lttng_node);
4017 case LTTNG_CONTEXT_PERF_COUNTERS:
4018 return lttng_cpuhp_perf_counter_online(cpu, lttng_node);
4019 default:
4020 return -EINVAL;
4021 }
4022 }
4023
4024 static int lttng_hotplug_offline(unsigned int cpu, struct hlist_node *node)
4025 {
4026 struct lttng_cpuhp_node *lttng_node;
4027
4028 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4029 switch (lttng_node->component) {
4030 case LTTNG_RING_BUFFER_FRONTEND:
4031 return lttng_cpuhp_rb_frontend_offline(cpu, lttng_node);
4032 case LTTNG_RING_BUFFER_BACKEND:
4033 return 0;
4034 case LTTNG_RING_BUFFER_ITER:
4035 return 0;
4036 case LTTNG_CONTEXT_PERF_COUNTERS:
4037 return 0;
4038 default:
4039 return -EINVAL;
4040 }
4041 }
4042
4043 static int __init lttng_init_cpu_hotplug(void)
4044 {
4045 int ret;
4046
4047 ret = cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN, "lttng:prepare",
4048 lttng_hotplug_prepare,
4049 lttng_hotplug_dead);
4050 if (ret < 0) {
4051 return ret;
4052 }
4053 lttng_hp_prepare = ret;
4054 lttng_rb_set_hp_prepare(ret);
4055
4056 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lttng:online",
4057 lttng_hotplug_online,
4058 lttng_hotplug_offline);
4059 if (ret < 0) {
4060 cpuhp_remove_multi_state(lttng_hp_prepare);
4061 lttng_hp_prepare = 0;
4062 return ret;
4063 }
4064 lttng_hp_online = ret;
4065 lttng_rb_set_hp_online(ret);
4066
4067 return 0;
4068 }
4069
4070 static void __exit lttng_exit_cpu_hotplug(void)
4071 {
4072 lttng_rb_set_hp_online(0);
4073 cpuhp_remove_multi_state(lttng_hp_online);
4074 lttng_rb_set_hp_prepare(0);
4075 cpuhp_remove_multi_state(lttng_hp_prepare);
4076 }
4077
4078 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
4079 static int lttng_init_cpu_hotplug(void)
4080 {
4081 return 0;
4082 }
4083 static void lttng_exit_cpu_hotplug(void)
4084 {
4085 }
4086 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
4087
4088
4089 static int __init lttng_events_init(void)
4090 {
4091 int ret;
4092
4093 ret = wrapper_lttng_fixup_sig(THIS_MODULE);
4094 if (ret)
4095 return ret;
4096 ret = wrapper_get_pfnblock_flags_mask_init();
4097 if (ret)
4098 return ret;
4099 ret = wrapper_get_pageblock_flags_mask_init();
4100 if (ret)
4101 return ret;
4102 ret = lttng_probes_init();
4103 if (ret)
4104 return ret;
4105 ret = lttng_context_init();
4106 if (ret)
4107 return ret;
4108 ret = lttng_tracepoint_init();
4109 if (ret)
4110 goto error_tp;
4111 event_cache = KMEM_CACHE(lttng_event, 0);
4112 if (!event_cache) {
4113 ret = -ENOMEM;
4114 goto error_kmem_event;
4115 }
4116 trigger_cache = KMEM_CACHE(lttng_trigger, 0);
4117 if (!trigger_cache) {
4118 ret = -ENOMEM;
4119 goto error_kmem_trigger;
4120 }
4121 ret = lttng_abi_init();
4122 if (ret)
4123 goto error_abi;
4124 ret = lttng_logger_init();
4125 if (ret)
4126 goto error_logger;
4127 ret = lttng_init_cpu_hotplug();
4128 if (ret)
4129 goto error_hotplug;
4130 printk(KERN_NOTICE "LTTng: Loaded modules v%s.%s.%s%s (%s)%s%s\n",
4131 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4132 __stringify(LTTNG_MODULES_MINOR_VERSION),
4133 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4134 LTTNG_MODULES_EXTRAVERSION,
4135 LTTNG_VERSION_NAME,
4136 #ifdef LTTNG_EXTRA_VERSION_GIT
4137 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4138 #else
4139 "",
4140 #endif
4141 #ifdef LTTNG_EXTRA_VERSION_NAME
4142 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4143 #else
4144 "");
4145 #endif
4146 return 0;
4147
4148 error_hotplug:
4149 lttng_logger_exit();
4150 error_logger:
4151 lttng_abi_exit();
4152 error_abi:
4153 kmem_cache_destroy(trigger_cache);
4154 error_kmem_trigger:
4155 kmem_cache_destroy(event_cache);
4156 error_kmem_event:
4157 lttng_tracepoint_exit();
4158 error_tp:
4159 lttng_context_exit();
4160 printk(KERN_NOTICE "LTTng: Failed to load modules v%s.%s.%s%s (%s)%s%s\n",
4161 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4162 __stringify(LTTNG_MODULES_MINOR_VERSION),
4163 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4164 LTTNG_MODULES_EXTRAVERSION,
4165 LTTNG_VERSION_NAME,
4166 #ifdef LTTNG_EXTRA_VERSION_GIT
4167 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4168 #else
4169 "",
4170 #endif
4171 #ifdef LTTNG_EXTRA_VERSION_NAME
4172 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4173 #else
4174 "");
4175 #endif
4176 return ret;
4177 }
4178
4179 module_init(lttng_events_init);
4180
4181 static void __exit lttng_events_exit(void)
4182 {
4183 struct lttng_session *session, *tmpsession;
4184
4185 lttng_exit_cpu_hotplug();
4186 lttng_logger_exit();
4187 lttng_abi_exit();
4188 list_for_each_entry_safe(session, tmpsession, &sessions, list)
4189 lttng_session_destroy(session);
4190 kmem_cache_destroy(event_cache);
4191 kmem_cache_destroy(trigger_cache);
4192 lttng_tracepoint_exit();
4193 lttng_context_exit();
4194 printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
4195 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4196 __stringify(LTTNG_MODULES_MINOR_VERSION),
4197 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4198 LTTNG_MODULES_EXTRAVERSION,
4199 LTTNG_VERSION_NAME,
4200 #ifdef LTTNG_EXTRA_VERSION_GIT
4201 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4202 #else
4203 "",
4204 #endif
4205 #ifdef LTTNG_EXTRA_VERSION_NAME
4206 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4207 #else
4208 "");
4209 #endif
4210 }
4211
4212 module_exit(lttng_events_exit);
4213
4214 #include <generated/patches.h>
4215 #ifdef LTTNG_EXTRA_VERSION_GIT
4216 MODULE_INFO(extra_version_git, LTTNG_EXTRA_VERSION_GIT);
4217 #endif
4218 #ifdef LTTNG_EXTRA_VERSION_NAME
4219 MODULE_INFO(extra_version_name, LTTNG_EXTRA_VERSION_NAME);
4220 #endif
4221 MODULE_LICENSE("GPL and additional rights");
4222 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
4223 MODULE_DESCRIPTION("LTTng tracer");
4224 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
4225 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
4226 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
4227 LTTNG_MODULES_EXTRAVERSION);
This page took 0.116297 seconds and 5 git commands to generate.