SoW-2020-0003: Trace Hit Counters
[deliverable/lttng-modules.git] / src / lttng-events.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-events.c
4 *
5 * Holds LTTng per-session event registry.
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 /*
11 * This page_alloc.h wrapper needs to be included before gfpflags.h because it
12 * overrides a function with a define.
13 */
14 #include "wrapper/page_alloc.h"
15
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/jiffies.h>
21 #include <linux/utsname.h>
22 #include <linux/err.h>
23 #include <linux/seq_file.h>
24 #include <linux/file.h>
25 #include <linux/anon_inodes.h>
26 #include <wrapper/file.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/dmi.h>
30
31 #include <wrapper/uuid.h>
32 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
33 #include <wrapper/random.h>
34 #include <wrapper/tracepoint.h>
35 #include <wrapper/list.h>
36 #include <wrapper/types.h>
37 #include <wrapper/barrier.h>
38 #include <lttng/kernel-version.h>
39 #include <lttng/events.h>
40 #include <lttng/lttng-bytecode.h>
41 #include <lttng/tracer.h>
42 #include <lttng/event-notifier-notification.h>
43 #include <lttng/abi-old.h>
44 #include <lttng/endian.h>
45 #include <lttng/string-utils.h>
46 #include <lttng/utils.h>
47 #include <ringbuffer/backend.h>
48 #include <ringbuffer/frontend.h>
49 #include <counter/counter.h>
50 #include <wrapper/time.h>
51
52 #define METADATA_CACHE_DEFAULT_SIZE 4096
53
54 static LIST_HEAD(sessions);
55 static LIST_HEAD(event_notifier_groups);
56 static LIST_HEAD(lttng_transport_list);
57 static LIST_HEAD(lttng_counter_transport_list);
58 /*
59 * Protect the sessions and metadata caches.
60 */
61 static DEFINE_MUTEX(sessions_mutex);
62 static struct kmem_cache *event_cache;
63 static struct kmem_cache *event_notifier_cache;
64
65 static void lttng_session_lazy_sync_event_enablers(struct lttng_session *session);
66 static void lttng_session_sync_event_enablers(struct lttng_session *session);
67 static void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler);
68 static void lttng_event_notifier_enabler_destroy(struct lttng_event_notifier_enabler *event_notifier_enabler);
69 static void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group);
70
71 static void _lttng_event_destroy(struct lttng_event *event);
72 static void _lttng_event_notifier_destroy(struct lttng_event_notifier *event_notifier);
73 static void _lttng_channel_destroy(struct lttng_channel *channel);
74 static void _lttng_session_counter_destroy(struct lttng_counter *counter);
75 static int _lttng_event_unregister(struct lttng_event *event);
76 static int _lttng_event_notifier_unregister(struct lttng_event_notifier *event_notifier);
77 static
78 int _lttng_event_metadata_statedump(struct lttng_session *session,
79 struct lttng_event *event);
80 static
81 int _lttng_session_metadata_statedump(struct lttng_session *session);
82 static
83 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
84 static
85 int _lttng_type_statedump(struct lttng_session *session,
86 const struct lttng_type *type,
87 size_t nesting);
88 static
89 int _lttng_field_statedump(struct lttng_session *session,
90 const struct lttng_event_field *field,
91 size_t nesting);
92
93 static bool lttng_event_container_is_metadata_channel(struct lttng_event_container *container)
94 {
95 switch (container->type) {
96 case LTTNG_EVENT_CONTAINER_CHANNEL:
97 {
98 struct lttng_channel *chan = lttng_event_container_get_channel(container);
99
100 return chan->channel_type == METADATA_CHANNEL;
101 }
102 case LTTNG_EVENT_CONTAINER_COUNTER:
103 return false;
104 default:
105 return false;
106 }
107 }
108
109 static bool lttng_event_within_metadata_channel(struct lttng_event *event)
110 {
111 return lttng_event_container_is_metadata_channel(event->container);
112 }
113
114 void synchronize_trace(void)
115 {
116 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,1,0))
117 synchronize_rcu();
118 #else
119 synchronize_sched();
120 #endif
121
122 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0))
123 #ifdef CONFIG_PREEMPT_RT_FULL
124 synchronize_rcu();
125 #endif
126 #else /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
127 #ifdef CONFIG_PREEMPT_RT
128 synchronize_rcu();
129 #endif
130 #endif /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
131 }
132
133 void lttng_lock_sessions(void)
134 {
135 mutex_lock(&sessions_mutex);
136 }
137
138 void lttng_unlock_sessions(void)
139 {
140 mutex_unlock(&sessions_mutex);
141 }
142
143 static struct lttng_transport *lttng_transport_find(const char *name)
144 {
145 struct lttng_transport *transport;
146
147 list_for_each_entry(transport, &lttng_transport_list, node) {
148 if (!strcmp(transport->name, name))
149 return transport;
150 }
151 return NULL;
152 }
153
154 /*
155 * Called with sessions lock held.
156 */
157 int lttng_session_active(void)
158 {
159 struct lttng_session *iter;
160
161 list_for_each_entry(iter, &sessions, list) {
162 if (iter->active)
163 return 1;
164 }
165 return 0;
166 }
167
168 struct lttng_session *lttng_session_create(void)
169 {
170 struct lttng_session *session;
171 struct lttng_metadata_cache *metadata_cache;
172 int i;
173
174 mutex_lock(&sessions_mutex);
175 session = lttng_kvzalloc(sizeof(struct lttng_session), GFP_KERNEL);
176 if (!session)
177 goto err;
178 INIT_LIST_HEAD(&session->chan);
179 INIT_LIST_HEAD(&session->events);
180 INIT_LIST_HEAD(&session->counters);
181 lttng_guid_gen(&session->uuid);
182
183 metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
184 GFP_KERNEL);
185 if (!metadata_cache)
186 goto err_free_session;
187 metadata_cache->data = vzalloc(METADATA_CACHE_DEFAULT_SIZE);
188 if (!metadata_cache->data)
189 goto err_free_cache;
190 metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
191 kref_init(&metadata_cache->refcount);
192 mutex_init(&metadata_cache->lock);
193 session->metadata_cache = metadata_cache;
194 INIT_LIST_HEAD(&metadata_cache->metadata_stream);
195 memcpy(&metadata_cache->uuid, &session->uuid,
196 sizeof(metadata_cache->uuid));
197 INIT_LIST_HEAD(&session->enablers_head);
198 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
199 INIT_HLIST_HEAD(&session->events_name_ht.table[i]);
200 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
201 INIT_HLIST_HEAD(&session->events_key_ht.table[i]);
202 list_add(&session->list, &sessions);
203 session->pid_tracker.session = session;
204 session->pid_tracker.tracker_type = TRACKER_PID;
205 session->vpid_tracker.session = session;
206 session->vpid_tracker.tracker_type = TRACKER_VPID;
207 session->uid_tracker.session = session;
208 session->uid_tracker.tracker_type = TRACKER_UID;
209 session->vuid_tracker.session = session;
210 session->vuid_tracker.tracker_type = TRACKER_VUID;
211 session->gid_tracker.session = session;
212 session->gid_tracker.tracker_type = TRACKER_GID;
213 session->vgid_tracker.session = session;
214 session->vgid_tracker.tracker_type = TRACKER_VGID;
215 mutex_unlock(&sessions_mutex);
216 return session;
217
218 err_free_cache:
219 kfree(metadata_cache);
220 err_free_session:
221 lttng_kvfree(session);
222 err:
223 mutex_unlock(&sessions_mutex);
224 return NULL;
225 }
226
227 static
228 struct lttng_counter_transport *lttng_counter_transport_find(const char *name)
229 {
230 struct lttng_counter_transport *transport;
231
232 list_for_each_entry(transport, &lttng_counter_transport_list, node) {
233 if (!strcmp(transport->name, name))
234 return transport;
235 }
236 return NULL;
237 }
238
239 static
240 struct lttng_counter *lttng_kernel_counter_create(
241 const char *counter_transport_name,
242 size_t number_dimensions, const size_t *dimensions_sizes,
243 bool coalesce_hits)
244 {
245 struct lttng_counter_transport *counter_transport = NULL;
246 struct lttng_counter *counter = NULL;
247 struct lttng_event_container *container;
248
249 counter_transport = lttng_counter_transport_find(counter_transport_name);
250 if (!counter_transport) {
251 printk(KERN_WARNING "LTTng: counter transport %s not found.\n",
252 counter_transport_name);
253 goto notransport;
254 }
255 if (!try_module_get(counter_transport->owner)) {
256 printk(KERN_WARNING "LTTng: Can't lock counter transport module.\n");
257 goto notransport;
258 }
259
260 counter = lttng_kvzalloc(sizeof(struct lttng_counter), GFP_KERNEL);
261 if (!counter)
262 goto nomem;
263 container = lttng_counter_get_event_container(counter);
264 container->type = LTTNG_EVENT_CONTAINER_COUNTER;
265 container->coalesce_hits = coalesce_hits;
266 /* Create event notifier error counter. */
267 counter->ops = &counter_transport->ops;
268 counter->transport = counter_transport;
269 mutex_init(&counter->map.lock);
270
271 counter->counter = counter->ops->counter_create(
272 number_dimensions, dimensions_sizes, 0);
273 if (!counter->counter) {
274 printk(KERN_WARNING "LTTng: Error creating counter");
275 goto create_error;
276 }
277
278 return counter;
279
280 create_error:
281 lttng_kvfree(counter);
282 nomem:
283 if (counter_transport)
284 module_put(counter_transport->owner);
285 notransport:
286 return NULL;
287 }
288
289 static
290 void lttng_kernel_counter_destroy(struct lttng_counter *counter)
291 {
292 counter->ops->counter_destroy(counter->counter);
293 module_put(counter->transport->owner);
294 lttng_kvfree(counter->map.descriptors);
295 lttng_kvfree(counter);
296 }
297
298 int lttng_event_notifier_group_set_error_counter(
299 struct lttng_event_notifier_group *event_notifier_group,
300 const char *counter_transport_name,
301 size_t counter_len)
302 {
303 struct lttng_counter *counter;
304 int ret;
305
306 /*
307 * Lock sessions to provide mutual exclusion against concurrent
308 * modification of trigger group, which would result in
309 * overwriting the error counter if set concurrently.
310 */
311 mutex_lock(&sessions_mutex);
312
313 if (event_notifier_group->error_counter) {
314 printk(KERN_ERR "Error counter already set in event notifier group\n");
315 ret = -EBUSY;
316 goto error;
317 }
318
319 counter = lttng_kernel_counter_create(counter_transport_name,
320 1, &counter_len, false);
321 if (!counter) {
322 ret = -EINVAL;
323 goto error;
324 }
325
326 event_notifier_group->error_counter_len = counter_len;
327 /*
328 * store-release to publish error counter matches load-acquire
329 * in record_error. Ensures the counter is created and the
330 * error_counter_len is set before they are used.
331 */
332 lttng_smp_store_release(&event_notifier_group->error_counter,
333 counter);
334
335 mutex_unlock(&sessions_mutex);
336 return 0;
337
338 error:
339 mutex_unlock(&sessions_mutex);
340 return ret;
341 }
342
343 struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
344 {
345 struct lttng_transport *transport = NULL;
346 struct lttng_event_notifier_group *event_notifier_group;
347 const char *transport_name = "relay-event-notifier";
348 size_t subbuf_size = 4096; //TODO
349 size_t num_subbuf = 16; //TODO
350 unsigned int switch_timer_interval = 0;
351 unsigned int read_timer_interval = 0;
352 int i;
353
354 mutex_lock(&sessions_mutex);
355
356 transport = lttng_transport_find(transport_name);
357 if (!transport) {
358 printk(KERN_WARNING "LTTng: transport %s not found\n",
359 transport_name);
360 goto notransport;
361 }
362 if (!try_module_get(transport->owner)) {
363 printk(KERN_WARNING "LTTng: Can't lock transport %s module.\n",
364 transport_name);
365 goto notransport;
366 }
367
368 event_notifier_group = lttng_kvzalloc(sizeof(struct lttng_event_notifier_group),
369 GFP_KERNEL);
370 if (!event_notifier_group)
371 goto nomem;
372
373 /*
374 * Initialize the ring buffer used to store event notifier
375 * notifications.
376 */
377 event_notifier_group->ops = &transport->ops;
378 event_notifier_group->chan = transport->ops.channel_create(
379 transport_name, event_notifier_group, NULL,
380 subbuf_size, num_subbuf, switch_timer_interval,
381 read_timer_interval);
382 if (!event_notifier_group->chan)
383 goto create_error;
384
385 event_notifier_group->transport = transport;
386
387 INIT_LIST_HEAD(&event_notifier_group->enablers_head);
388 INIT_LIST_HEAD(&event_notifier_group->event_notifiers_head);
389 for (i = 0; i < LTTNG_EVENT_NOTIFIER_HT_SIZE; i++)
390 INIT_HLIST_HEAD(&event_notifier_group->event_notifiers_ht.table[i]);
391
392 list_add(&event_notifier_group->node, &event_notifier_groups);
393
394 mutex_unlock(&sessions_mutex);
395
396 return event_notifier_group;
397
398 create_error:
399 lttng_kvfree(event_notifier_group);
400 nomem:
401 if (transport)
402 module_put(transport->owner);
403 notransport:
404 mutex_unlock(&sessions_mutex);
405 return NULL;
406 }
407
408 struct lttng_counter *lttng_session_create_counter(
409 struct lttng_session *session,
410 const char *counter_transport_name,
411 size_t number_dimensions, const size_t *dimensions_sizes,
412 bool coalesce_hits)
413 {
414 struct lttng_counter *counter;
415 struct lttng_event_container *container;
416
417 counter = lttng_kernel_counter_create(counter_transport_name,
418 number_dimensions, dimensions_sizes,
419 coalesce_hits);
420 if (!counter) {
421 goto counter_error;
422 }
423 container = lttng_counter_get_event_container(counter);
424
425 mutex_lock(&sessions_mutex);
426 container->session = session;
427 list_add(&counter->node, &session->counters);
428 mutex_unlock(&sessions_mutex);
429
430 return counter;
431
432 counter_error:
433 return NULL;
434 }
435
436 void metadata_cache_destroy(struct kref *kref)
437 {
438 struct lttng_metadata_cache *cache =
439 container_of(kref, struct lttng_metadata_cache, refcount);
440 vfree(cache->data);
441 kfree(cache);
442 }
443
444 void lttng_session_destroy(struct lttng_session *session)
445 {
446 struct lttng_channel *chan, *tmpchan;
447 struct lttng_event *event, *tmpevent;
448 struct lttng_metadata_stream *metadata_stream;
449 struct lttng_event_enabler *event_enabler, *tmp_event_enabler;
450 struct lttng_counter *counter, *tmpcounter;
451 int ret;
452
453 mutex_lock(&sessions_mutex);
454 WRITE_ONCE(session->active, 0);
455 list_for_each_entry(chan, &session->chan, list) {
456 ret = lttng_syscalls_unregister_event_container(lttng_channel_get_event_container(chan));
457 WARN_ON(ret);
458 }
459 list_for_each_entry(counter, &session->counters, node) {
460 ret = lttng_syscalls_unregister_event_container(lttng_counter_get_event_container(counter));
461 WARN_ON(ret);
462 }
463 list_for_each_entry(event, &session->events, list) {
464 ret = _lttng_event_unregister(event);
465 WARN_ON(ret);
466 }
467 synchronize_trace(); /* Wait for in-flight events to complete */
468 list_for_each_entry(chan, &session->chan, list) {
469 ret = lttng_syscalls_destroy_event_container(lttng_channel_get_event_container(chan));
470 WARN_ON(ret);
471 }
472 list_for_each_entry(counter, &session->counters, node) {
473 ret = lttng_syscalls_destroy_event_container(lttng_counter_get_event_container(counter));
474 WARN_ON(ret);
475 }
476 list_for_each_entry_safe(event_enabler, tmp_event_enabler,
477 &session->enablers_head, node)
478 lttng_event_enabler_destroy(event_enabler);
479 list_for_each_entry_safe(event, tmpevent, &session->events, list)
480 _lttng_event_destroy(event);
481 list_for_each_entry_safe(chan, tmpchan, &session->chan, list) {
482 BUG_ON(chan->channel_type == METADATA_CHANNEL);
483 _lttng_channel_destroy(chan);
484 }
485 list_for_each_entry_safe(counter, tmpcounter, &session->counters, node)
486 _lttng_session_counter_destroy(counter);
487 mutex_lock(&session->metadata_cache->lock);
488 list_for_each_entry(metadata_stream, &session->metadata_cache->metadata_stream, list)
489 _lttng_metadata_channel_hangup(metadata_stream);
490 mutex_unlock(&session->metadata_cache->lock);
491 lttng_id_tracker_destroy(&session->pid_tracker, false);
492 lttng_id_tracker_destroy(&session->vpid_tracker, false);
493 lttng_id_tracker_destroy(&session->uid_tracker, false);
494 lttng_id_tracker_destroy(&session->vuid_tracker, false);
495 lttng_id_tracker_destroy(&session->gid_tracker, false);
496 lttng_id_tracker_destroy(&session->vgid_tracker, false);
497 kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
498 list_del(&session->list);
499 mutex_unlock(&sessions_mutex);
500 lttng_kvfree(session);
501 }
502
503 void lttng_event_notifier_group_destroy(
504 struct lttng_event_notifier_group *event_notifier_group)
505 {
506 struct lttng_event_notifier_enabler *event_notifier_enabler, *tmp_event_notifier_enabler;
507 struct lttng_event_notifier *event_notifier, *tmpevent_notifier;
508 int ret;
509
510 if (!event_notifier_group)
511 return;
512
513 mutex_lock(&sessions_mutex);
514
515 ret = lttng_syscalls_unregister_event_notifier_group(event_notifier_group);
516 WARN_ON(ret);
517
518 list_for_each_entry_safe(event_notifier, tmpevent_notifier,
519 &event_notifier_group->event_notifiers_head, list) {
520 ret = _lttng_event_notifier_unregister(event_notifier);
521 WARN_ON(ret);
522 }
523
524 /* Wait for in-flight event notifier to complete */
525 synchronize_trace();
526
527 irq_work_sync(&event_notifier_group->wakeup_pending);
528
529 kfree(event_notifier_group->sc_filter);
530
531 list_for_each_entry_safe(event_notifier_enabler, tmp_event_notifier_enabler,
532 &event_notifier_group->enablers_head, node)
533 lttng_event_notifier_enabler_destroy(event_notifier_enabler);
534
535 list_for_each_entry_safe(event_notifier, tmpevent_notifier,
536 &event_notifier_group->event_notifiers_head, list)
537 _lttng_event_notifier_destroy(event_notifier);
538
539 if (event_notifier_group->error_counter) {
540 struct lttng_counter *error_counter = event_notifier_group->error_counter;
541
542 lttng_kernel_counter_destroy(error_counter);
543 event_notifier_group->error_counter = NULL;
544 }
545
546 event_notifier_group->ops->channel_destroy(event_notifier_group->chan);
547 module_put(event_notifier_group->transport->owner);
548 list_del(&event_notifier_group->node);
549
550 mutex_unlock(&sessions_mutex);
551 lttng_kvfree(event_notifier_group);
552 }
553
554 int lttng_session_statedump(struct lttng_session *session)
555 {
556 int ret;
557
558 mutex_lock(&sessions_mutex);
559 ret = lttng_statedump_start(session);
560 mutex_unlock(&sessions_mutex);
561 return ret;
562 }
563
564 int lttng_session_enable(struct lttng_session *session)
565 {
566 int ret = 0;
567 struct lttng_channel *chan;
568
569 mutex_lock(&sessions_mutex);
570 if (session->active) {
571 ret = -EBUSY;
572 goto end;
573 }
574
575 /* Set transient enabler state to "enabled" */
576 session->tstate = 1;
577
578 /* We need to sync enablers with session before activation. */
579 lttng_session_sync_event_enablers(session);
580
581 /*
582 * Snapshot the number of events per channel to know the type of header
583 * we need to use.
584 */
585 list_for_each_entry(chan, &session->chan, list) {
586 if (chan->header_type)
587 continue; /* don't change it if session stop/restart */
588 if (chan->free_event_id < 31)
589 chan->header_type = 1; /* compact */
590 else
591 chan->header_type = 2; /* large */
592 }
593
594 /* Clear each stream's quiescent state. */
595 list_for_each_entry(chan, &session->chan, list) {
596 if (chan->channel_type != METADATA_CHANNEL)
597 lib_ring_buffer_clear_quiescent_channel(chan->chan);
598 }
599
600 WRITE_ONCE(session->active, 1);
601 WRITE_ONCE(session->been_active, 1);
602 ret = _lttng_session_metadata_statedump(session);
603 if (ret) {
604 WRITE_ONCE(session->active, 0);
605 goto end;
606 }
607 ret = lttng_statedump_start(session);
608 if (ret)
609 WRITE_ONCE(session->active, 0);
610 end:
611 mutex_unlock(&sessions_mutex);
612 return ret;
613 }
614
615 int lttng_session_disable(struct lttng_session *session)
616 {
617 int ret = 0;
618 struct lttng_channel *chan;
619
620 mutex_lock(&sessions_mutex);
621 if (!session->active) {
622 ret = -EBUSY;
623 goto end;
624 }
625 WRITE_ONCE(session->active, 0);
626
627 /* Set transient enabler state to "disabled" */
628 session->tstate = 0;
629 lttng_session_sync_event_enablers(session);
630
631 /* Set each stream's quiescent state. */
632 list_for_each_entry(chan, &session->chan, list) {
633 if (chan->channel_type != METADATA_CHANNEL)
634 lib_ring_buffer_set_quiescent_channel(chan->chan);
635 }
636 end:
637 mutex_unlock(&sessions_mutex);
638 return ret;
639 }
640
641 int lttng_session_metadata_regenerate(struct lttng_session *session)
642 {
643 int ret = 0;
644 struct lttng_channel *chan;
645 struct lttng_event *event;
646 struct lttng_metadata_cache *cache = session->metadata_cache;
647 struct lttng_metadata_stream *stream;
648
649 mutex_lock(&sessions_mutex);
650 if (!session->active) {
651 ret = -EBUSY;
652 goto end;
653 }
654
655 mutex_lock(&cache->lock);
656 memset(cache->data, 0, cache->cache_alloc);
657 cache->metadata_written = 0;
658 cache->version++;
659 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list) {
660 stream->metadata_out = 0;
661 stream->metadata_in = 0;
662 }
663 mutex_unlock(&cache->lock);
664
665 session->metadata_dumped = 0;
666 list_for_each_entry(chan, &session->chan, list) {
667 chan->metadata_dumped = 0;
668 }
669
670 list_for_each_entry(event, &session->events, list) {
671 event->metadata_dumped = 0;
672 }
673
674 ret = _lttng_session_metadata_statedump(session);
675
676 end:
677 mutex_unlock(&sessions_mutex);
678 return ret;
679 }
680
681 int lttng_event_container_enable(struct lttng_event_container *container)
682 {
683 int ret = 0;
684
685 mutex_lock(&sessions_mutex);
686 if (lttng_event_container_is_metadata_channel(container)) {
687 ret = -EPERM;
688 goto end;
689 }
690 if (container->enabled) {
691 ret = -EEXIST;
692 goto end;
693 }
694 /* Set transient enabler state to "enabled" */
695 container->tstate = 1;
696 lttng_session_sync_event_enablers(container->session);
697 /* Set atomically the state to "enabled" */
698 WRITE_ONCE(container->enabled, 1);
699 end:
700 mutex_unlock(&sessions_mutex);
701 return ret;
702 }
703
704 int lttng_event_container_disable(struct lttng_event_container *container)
705 {
706 int ret = 0;
707
708 mutex_lock(&sessions_mutex);
709 if (lttng_event_container_is_metadata_channel(container)) {
710 ret = -EPERM;
711 goto end;
712 }
713 if (!container->enabled) {
714 ret = -EEXIST;
715 goto end;
716 }
717 /* Set atomically the state to "disabled" */
718 WRITE_ONCE(container->enabled, 0);
719 /* Set transient enabler state to "enabled" */
720 container->tstate = 0;
721 lttng_session_sync_event_enablers(container->session);
722 end:
723 mutex_unlock(&sessions_mutex);
724 return ret;
725 }
726
727 int lttng_event_enable(struct lttng_event *event)
728 {
729 int ret = 0;
730
731 mutex_lock(&sessions_mutex);
732 if (lttng_event_within_metadata_channel(event)) {
733 ret = -EPERM;
734 goto end;
735 }
736 if (event->enabled) {
737 ret = -EEXIST;
738 goto end;
739 }
740 switch (event->instrumentation) {
741 case LTTNG_KERNEL_TRACEPOINT:
742 case LTTNG_KERNEL_SYSCALL:
743 ret = -EINVAL;
744 break;
745 case LTTNG_KERNEL_KPROBE:
746 case LTTNG_KERNEL_UPROBE:
747 case LTTNG_KERNEL_NOOP:
748 WRITE_ONCE(event->enabled, 1);
749 break;
750 case LTTNG_KERNEL_KRETPROBE:
751 ret = lttng_kretprobes_event_enable_state(event, 1);
752 break;
753 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
754 default:
755 WARN_ON_ONCE(1);
756 ret = -EINVAL;
757 }
758 end:
759 mutex_unlock(&sessions_mutex);
760 return ret;
761 }
762
763 int lttng_event_disable(struct lttng_event *event)
764 {
765 int ret = 0;
766
767 mutex_lock(&sessions_mutex);
768 if (lttng_event_within_metadata_channel(event)) {
769 ret = -EPERM;
770 goto end;
771 }
772 if (!event->enabled) {
773 ret = -EEXIST;
774 goto end;
775 }
776 switch (event->instrumentation) {
777 case LTTNG_KERNEL_TRACEPOINT:
778 case LTTNG_KERNEL_SYSCALL:
779 ret = -EINVAL;
780 break;
781 case LTTNG_KERNEL_KPROBE:
782 case LTTNG_KERNEL_UPROBE:
783 case LTTNG_KERNEL_NOOP:
784 WRITE_ONCE(event->enabled, 0);
785 break;
786 case LTTNG_KERNEL_KRETPROBE:
787 ret = lttng_kretprobes_event_enable_state(event, 0);
788 break;
789 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
790 default:
791 WARN_ON_ONCE(1);
792 ret = -EINVAL;
793 }
794 end:
795 mutex_unlock(&sessions_mutex);
796 return ret;
797 }
798
799 int lttng_event_notifier_enable(struct lttng_event_notifier *event_notifier)
800 {
801 int ret = 0;
802
803 mutex_lock(&sessions_mutex);
804 if (event_notifier->enabled) {
805 ret = -EEXIST;
806 goto end;
807 }
808 switch (event_notifier->instrumentation) {
809 case LTTNG_KERNEL_TRACEPOINT:
810 case LTTNG_KERNEL_SYSCALL:
811 ret = -EINVAL;
812 break;
813 case LTTNG_KERNEL_KPROBE:
814 case LTTNG_KERNEL_UPROBE:
815 WRITE_ONCE(event_notifier->enabled, 1);
816 break;
817 case LTTNG_KERNEL_FUNCTION:
818 case LTTNG_KERNEL_NOOP:
819 case LTTNG_KERNEL_KRETPROBE:
820 default:
821 WARN_ON_ONCE(1);
822 ret = -EINVAL;
823 }
824 end:
825 mutex_unlock(&sessions_mutex);
826 return ret;
827 }
828
829 int lttng_event_notifier_disable(struct lttng_event_notifier *event_notifier)
830 {
831 int ret = 0;
832
833 mutex_lock(&sessions_mutex);
834 if (!event_notifier->enabled) {
835 ret = -EEXIST;
836 goto end;
837 }
838 switch (event_notifier->instrumentation) {
839 case LTTNG_KERNEL_TRACEPOINT:
840 case LTTNG_KERNEL_SYSCALL:
841 ret = -EINVAL;
842 break;
843 case LTTNG_KERNEL_KPROBE:
844 case LTTNG_KERNEL_UPROBE:
845 WRITE_ONCE(event_notifier->enabled, 0);
846 break;
847 case LTTNG_KERNEL_FUNCTION:
848 case LTTNG_KERNEL_NOOP:
849 case LTTNG_KERNEL_KRETPROBE:
850 default:
851 WARN_ON_ONCE(1);
852 ret = -EINVAL;
853 }
854 end:
855 mutex_unlock(&sessions_mutex);
856 return ret;
857 }
858
859 struct lttng_channel *lttng_channel_create(struct lttng_session *session,
860 const char *transport_name,
861 void *buf_addr,
862 size_t subbuf_size, size_t num_subbuf,
863 unsigned int switch_timer_interval,
864 unsigned int read_timer_interval,
865 enum channel_type channel_type)
866 {
867 struct lttng_event_container *container;
868 struct lttng_channel *chan = NULL;
869 struct lttng_transport *transport = NULL;
870
871 mutex_lock(&sessions_mutex);
872 if (session->been_active && channel_type != METADATA_CHANNEL)
873 goto active; /* Refuse to add channel to active session */
874 transport = lttng_transport_find(transport_name);
875 if (!transport) {
876 printk(KERN_WARNING "LTTng: transport %s not found\n",
877 transport_name);
878 goto notransport;
879 }
880 if (!try_module_get(transport->owner)) {
881 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
882 goto notransport;
883 }
884 chan = lttng_kvzalloc(sizeof(struct lttng_channel), GFP_KERNEL);
885 if (!chan)
886 goto nomem;
887 container = lttng_channel_get_event_container(chan);
888 container->type = LTTNG_EVENT_CONTAINER_CHANNEL;
889 container->session = session;
890 container->tstate = 1;
891 container->enabled = 1;
892 /*
893 * The ring buffer always coalesces hits from various event
894 * enablers matching a given event to a single event record within the
895 * ring buffer.
896 */
897 container->coalesce_hits = true;
898
899 chan->id = session->free_chan_id++;
900 chan->ops = &transport->ops;
901 /*
902 * Note: the channel creation op already writes into the packet
903 * headers. Therefore the "chan" information used as input
904 * should be already accessible.
905 */
906 chan->chan = transport->ops.channel_create(transport_name,
907 container, buf_addr, subbuf_size, num_subbuf,
908 switch_timer_interval, read_timer_interval);
909 if (!chan->chan)
910 goto create_error;
911 chan->transport = transport;
912 chan->channel_type = channel_type;
913 list_add(&chan->list, &session->chan);
914 mutex_unlock(&sessions_mutex);
915 return chan;
916
917 create_error:
918 lttng_kvfree(chan);
919 nomem:
920 if (transport)
921 module_put(transport->owner);
922 notransport:
923 active:
924 mutex_unlock(&sessions_mutex);
925 return NULL;
926 }
927
928 static
929 void _lttng_session_counter_destroy(struct lttng_counter *counter)
930 {
931 list_del(&counter->node);
932 lttng_kernel_counter_destroy(counter);
933 }
934
935 /*
936 * Only used internally at session destruction for per-cpu channels, and
937 * when metadata channel is released.
938 * Needs to be called with sessions mutex held.
939 */
940 static
941 void _lttng_channel_destroy(struct lttng_channel *chan)
942 {
943 chan->ops->channel_destroy(chan->chan);
944 module_put(chan->transport->owner);
945 list_del(&chan->list);
946 lttng_destroy_context(chan->ctx);
947 lttng_kvfree(chan);
948 }
949
950 void lttng_metadata_channel_destroy(struct lttng_channel *chan)
951 {
952 BUG_ON(chan->channel_type != METADATA_CHANNEL);
953 /* Protect the metadata cache with the sessions_mutex. */
954 mutex_lock(&sessions_mutex);
955 _lttng_channel_destroy(chan);
956 mutex_unlock(&sessions_mutex);
957 }
958 EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
959
960 static
961 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
962 {
963 stream->finalized = 1;
964 wake_up_interruptible(&stream->read_wait);
965 }
966
967 static
968 bool lttng_event_container_current_id_full(struct lttng_event_container *container)
969 {
970 switch (container->type) {
971 case LTTNG_EVENT_CONTAINER_CHANNEL:
972 {
973 struct lttng_channel *channel = lttng_event_container_get_channel(container);
974
975 return channel->free_event_id == -1U;
976 }
977 case LTTNG_EVENT_CONTAINER_COUNTER:
978 {
979 struct lttng_counter *counter = lttng_event_container_get_counter(container);
980 size_t nr_dimensions, max_nr_elem;
981
982 if (lttng_counter_get_nr_dimensions(&counter->counter->config,
983 counter->counter, &nr_dimensions))
984 return true;
985 WARN_ON_ONCE(nr_dimensions != 1);
986 if (nr_dimensions != 1)
987 return true;
988 if (lttng_counter_get_max_nr_elem(&counter->counter->config,
989 counter->counter, &max_nr_elem))
990 return true;
991 return counter->free_index >= max_nr_elem;
992 }
993 default:
994 WARN_ON_ONCE(1);
995 return true;
996 }
997 }
998
999
1000 static
1001 int lttng_event_container_allocate_id(struct lttng_event_container *container,
1002 const char *key_string, size_t *id)
1003 {
1004 struct lttng_session *session = container->session;
1005 struct lttng_event *event;
1006
1007 if (key_string[0]) {
1008 struct hlist_head *head;
1009
1010 head = utils_borrow_hash_table_bucket(session->events_key_ht.table,
1011 LTTNG_EVENT_HT_SIZE, key_string);
1012 lttng_hlist_for_each_entry(event, head, key_hlist) {
1013 if (!strcmp(key_string, event->key)) {
1014 /* Same key, use same id. */
1015 *id = event->id;
1016 return 0;
1017 }
1018 }
1019 }
1020
1021 if (lttng_event_container_current_id_full(container)) {
1022 return -EMFILE;
1023 }
1024
1025 switch (container->type) {
1026 case LTTNG_EVENT_CONTAINER_CHANNEL:
1027 {
1028 struct lttng_channel *channel = lttng_event_container_get_channel(container);
1029 *id = channel->free_event_id++;
1030 break;
1031 }
1032 case LTTNG_EVENT_CONTAINER_COUNTER:
1033 {
1034 struct lttng_counter *counter = lttng_event_container_get_counter(container);
1035 *id = counter->free_index++;
1036 break;
1037 }
1038 default:
1039 WARN_ON_ONCE(1);
1040 return 0;
1041 }
1042
1043 return 0;
1044 }
1045
1046 static
1047 int format_event_key(char *key_string, const struct lttng_counter_key *key,
1048 const char *event_name)
1049 {
1050 const struct lttng_counter_key_dimension *dim;
1051 size_t i, left = LTTNG_KEY_TOKEN_STRING_LEN_MAX;
1052
1053 key_string[0] = '\0';
1054 if (!key || !key->nr_dimensions)
1055 return 0;
1056 /* Currently event keys can only be specified on a single dimension. */
1057 if (key->nr_dimensions != 1)
1058 return -EINVAL;
1059 dim = &key->key_dimensions[0];
1060 for (i = 0; i < dim->nr_key_tokens; i++) {
1061 const struct lttng_key_token *token = &dim->key_tokens[i];
1062 size_t token_len;
1063 const char *str;
1064
1065 switch (token->type) {
1066 case LTTNG_KEY_TOKEN_STRING:
1067 str = token->arg.string;
1068 break;
1069 case LTTNG_KEY_TOKEN_EVENT_NAME:
1070 str = event_name;
1071 break;
1072 default:
1073 return -EINVAL;
1074 }
1075 token_len = strlen(str);
1076 if (token_len >= left)
1077 return -EINVAL;
1078 strcat(key_string, str);
1079 left -= token_len;
1080 }
1081 return 0;
1082 }
1083
1084 static
1085 bool match_event_token(struct lttng_event_container *container,
1086 struct lttng_event *event, uint64_t token)
1087 {
1088 if (container->coalesce_hits)
1089 return true;
1090 if (event->user_token == token)
1091 return true;
1092 return false;
1093 }
1094
1095 static
1096 int lttng_counter_append_descriptor(struct lttng_counter *counter,
1097 uint64_t user_token,
1098 size_t index,
1099 const char *key)
1100 {
1101 struct lttng_counter_map *map = &counter->map;
1102 struct lttng_counter_map_descriptor *last;
1103 int ret = 0;
1104
1105 if (strlen(key) >= LTTNG_KERNEL_COUNTER_KEY_LEN) {
1106 WARN_ON_ONCE(1);
1107 return -EOVERFLOW;
1108 }
1109 mutex_lock(&map->lock);
1110 if (map->nr_descriptors == map->alloc_len) {
1111 struct lttng_counter_map_descriptor *new_table, *old_table;
1112 size_t old_len = map->nr_descriptors;
1113 size_t new_len = max_t(size_t, old_len + 1, map->alloc_len * 2);
1114
1115 old_table = map->descriptors;
1116 new_table = lttng_kvzalloc(sizeof(struct lttng_counter_map_descriptor) * new_len,
1117 GFP_KERNEL);
1118 if (!new_table) {
1119 ret = -ENOMEM;
1120 goto unlock;
1121 }
1122
1123 if (old_table)
1124 memcpy(new_table, old_table, old_len * sizeof(struct lttng_counter_map_descriptor));
1125
1126 map->descriptors = new_table;
1127 map->alloc_len = new_len;
1128 lttng_kvfree(old_table);
1129 }
1130 last = &map->descriptors[map->nr_descriptors++];
1131 last->user_token = user_token;
1132 last->array_index = index;
1133 strcpy(last->key, key);
1134 unlock:
1135 mutex_unlock(&map->lock);
1136 return ret;
1137 }
1138
1139 /*
1140 * Supports event creation while tracing session is active.
1141 * Needs to be called with sessions mutex held.
1142 */
1143 struct lttng_event *_lttng_event_create(struct lttng_event_container *container,
1144 struct lttng_kernel_event *event_param,
1145 const struct lttng_counter_key *key,
1146 void *filter,
1147 const struct lttng_event_desc *event_desc,
1148 enum lttng_kernel_instrumentation itype,
1149 uint64_t token)
1150 {
1151 struct lttng_session *session;
1152 struct lttng_event *event;
1153 char event_name[LTTNG_KERNEL_SYM_NAME_LEN];
1154 struct hlist_head *name_head, *key_head;
1155 char key_string[LTTNG_KEY_TOKEN_STRING_LEN_MAX];
1156 int ret;
1157
1158 session = container->session;
1159 switch (itype) {
1160 case LTTNG_KERNEL_TRACEPOINT:
1161 if (strlen(event_desc->name) >= LTTNG_KERNEL_SYM_NAME_LEN) {
1162 ret = -EINVAL;
1163 goto type_error;
1164 }
1165 strcpy(event_name, event_desc->name);
1166 break;
1167 case LTTNG_KERNEL_KPROBE:
1168 case LTTNG_KERNEL_UPROBE:
1169 case LTTNG_KERNEL_SYSCALL:
1170 if (strlen(event_param->name) >= LTTNG_KERNEL_SYM_NAME_LEN) {
1171 ret = -EINVAL;
1172 goto type_error;
1173 }
1174 strcpy(event_name, event_param->name);
1175 break;
1176 case LTTNG_KERNEL_KRETPROBE:
1177 if (strlen(event_param->name) >= LTTNG_KERNEL_SYM_NAME_LEN) {
1178 ret = -EINVAL;
1179 goto type_error;
1180 }
1181 strcpy(event_name, event_param->name);
1182 if (strlen(event_name) + strlen("_entry") >= LTTNG_KERNEL_SYM_NAME_LEN) {
1183 ret = -EINVAL;
1184 goto type_error;
1185 }
1186 strcat(event_name, "_entry");
1187 break;
1188 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
1189 case LTTNG_KERNEL_NOOP: /* Fall-through. */
1190 default:
1191 WARN_ON_ONCE(1);
1192 ret = -EINVAL;
1193 goto type_error;
1194 }
1195
1196 if (format_event_key(key_string, key, event_name)) {
1197 ret = -EINVAL;
1198 goto type_error;
1199 }
1200
1201 name_head = utils_borrow_hash_table_bucket(session->events_name_ht.table,
1202 LTTNG_EVENT_HT_SIZE, event_name);
1203 lttng_hlist_for_each_entry(event, name_head, name_hlist) {
1204 bool same_event = false, same_container = false, same_key = false,
1205 same_token = false;
1206
1207 WARN_ON_ONCE(!event->desc);
1208 if (event_desc) {
1209 if (event->desc == event_desc)
1210 same_event = true;
1211 } else {
1212 if (!strcmp(event_name, event->desc->name))
1213 same_event = true;
1214 }
1215 if (container == event->container) {
1216 same_container = true;
1217 if (match_event_token(container, event, token))
1218 same_token = true;
1219 }
1220 if (key_string[0] == '\0' || !strcmp(key_string, event->key))
1221 same_key = true;
1222 if (same_event && same_container && same_key && same_token) {
1223 ret = -EEXIST;
1224 goto exist;
1225 }
1226 }
1227
1228 event = kmem_cache_zalloc(event_cache, GFP_KERNEL);
1229 if (!event) {
1230 ret = -ENOMEM;
1231 goto cache_error;
1232 }
1233 event->container = container;
1234 event->filter = filter;
1235 event->instrumentation = itype;
1236 event->evtype = LTTNG_TYPE_EVENT;
1237 if (!container->coalesce_hits)
1238 event->user_token = token;
1239 INIT_LIST_HEAD(&event->filter_bytecode_runtime_head);
1240 INIT_LIST_HEAD(&event->enablers_ref_head);
1241 if (lttng_event_container_allocate_id(container, key_string,
1242 &event->id)) {
1243 ret = -EMFILE;
1244 goto full;
1245 }
1246 if (key_string[0]) {
1247 key_head = utils_borrow_hash_table_bucket(session->events_key_ht.table,
1248 LTTNG_EVENT_HT_SIZE, key_string);
1249 hlist_add_head(&event->key_hlist, key_head);
1250 }
1251 strcpy(event->key, key_string);
1252
1253 switch (itype) {
1254 case LTTNG_KERNEL_TRACEPOINT:
1255 /* Event will be enabled by enabler sync. */
1256 event->enabled = 0;
1257 event->registered = 0;
1258 event->desc = lttng_event_desc_get(event_name);
1259 if (!event->desc) {
1260 ret = -ENOENT;
1261 goto register_error;
1262 }
1263 /* Populate lttng_event structure before event registration. */
1264 smp_wmb();
1265 break;
1266 case LTTNG_KERNEL_KPROBE:
1267 /*
1268 * Needs to be explicitly enabled after creation, since
1269 * we may want to apply filters.
1270 */
1271 event->enabled = 0;
1272 event->registered = 1;
1273 /*
1274 * Populate lttng_event structure before event
1275 * registration.
1276 */
1277 smp_wmb();
1278 ret = lttng_kprobes_register_event(event_name,
1279 event_param->u.kprobe.symbol_name,
1280 event_param->u.kprobe.offset,
1281 event_param->u.kprobe.addr,
1282 event);
1283 if (ret) {
1284 ret = -EINVAL;
1285 goto register_error;
1286 }
1287 event->u.kprobe.user_token = token;
1288 ret = try_module_get(event->desc->owner);
1289 WARN_ON_ONCE(!ret);
1290
1291 /* Append descriptor to counter. */
1292 switch (container->type) {
1293 case LTTNG_EVENT_CONTAINER_COUNTER:
1294 {
1295 struct lttng_counter *counter;
1296 const char *name = "<UNKNOWN>";
1297 int ret;
1298
1299 counter = lttng_event_container_get_counter(container);
1300 if (event->key[0])
1301 name = event->key;
1302 else
1303 name = event_name;
1304 ret = lttng_counter_append_descriptor(counter,
1305 token, event->id,
1306 name);
1307 if (ret) {
1308 WARN_ON_ONCE(1);
1309 }
1310 break;
1311 }
1312 case LTTNG_EVENT_CONTAINER_CHANNEL:
1313 default:
1314 break;
1315 }
1316 break;
1317 case LTTNG_KERNEL_KRETPROBE:
1318 {
1319 struct lttng_event *event_return;
1320
1321 /* kretprobe defines 2 events */
1322 /*
1323 * Needs to be explicitly enabled after creation, since
1324 * we may want to apply filters.
1325 */
1326 event->enabled = 0;
1327 event->registered = 1;
1328 event->u.kretprobe.user_token = token;
1329
1330 /* Append descriptor to counter. */
1331 switch (container->type) {
1332 case LTTNG_EVENT_CONTAINER_COUNTER:
1333 {
1334 struct lttng_counter *counter;
1335 const char *name = "<UNKNOWN>";
1336 int ret;
1337
1338 counter = lttng_event_container_get_counter(container);
1339 if (event->key[0])
1340 name = event->key;
1341 else
1342 name = event_name;
1343 ret = lttng_counter_append_descriptor(counter,
1344 token, event->id,
1345 name);
1346 if (ret) {
1347 WARN_ON_ONCE(1);
1348 }
1349 break;
1350 }
1351 case LTTNG_EVENT_CONTAINER_CHANNEL:
1352 default:
1353 break;
1354 }
1355
1356 event_return =
1357 kmem_cache_zalloc(event_cache, GFP_KERNEL);
1358 if (!event_return) {
1359 ret = -ENOMEM;
1360 goto register_error;
1361 }
1362 event_return->container = container;
1363 event_return->filter = filter;
1364
1365 strcpy(event_name, event_param->name);
1366 if (strlen(event_name) + strlen("_return") >= LTTNG_KERNEL_SYM_NAME_LEN) {
1367 ret = -EINVAL;
1368 goto register_error;
1369 }
1370 strcat(event_name, "_return");
1371 if (format_event_key(key_string, key, event_name)) {
1372 ret = -EINVAL;
1373 goto register_error;
1374 }
1375 if (lttng_event_container_allocate_id(container, key_string, &event_return->id)) {
1376 kmem_cache_free(event_cache, event_return);
1377 ret = -EMFILE;
1378 goto register_error;
1379 }
1380 key_head = utils_borrow_hash_table_bucket(session->events_key_ht.table,
1381 LTTNG_EVENT_HT_SIZE, key_string);
1382 hlist_add_head(&event_return->key_hlist, key_head);
1383 event_return->enabled = 0;
1384 event_return->registered = 1;
1385 event_return->instrumentation = itype;
1386 INIT_LIST_HEAD(&event_return->filter_bytecode_runtime_head);
1387 INIT_LIST_HEAD(&event_return->enablers_ref_head);
1388 event_return->u.kretprobe.user_token = token;
1389 strcpy(event_return->key, key_string);
1390 /*
1391 * Populate lttng_event structure before kretprobe registration.
1392 */
1393 smp_wmb();
1394 ret = lttng_kretprobes_register(event_name,
1395 event_param->u.kretprobe.symbol_name,
1396 event_param->u.kretprobe.offset,
1397 event_param->u.kretprobe.addr,
1398 event, event_return);
1399 if (ret) {
1400 kmem_cache_free(event_cache, event_return);
1401 ret = -EINVAL;
1402 goto register_error;
1403 }
1404 /* Take 2 refs on the module: one per event. */
1405 ret = try_module_get(event->desc->owner);
1406 WARN_ON_ONCE(!ret);
1407 ret = try_module_get(event->desc->owner);
1408 WARN_ON_ONCE(!ret);
1409
1410 /* Append exit descriptor to counter. */
1411 switch (container->type) {
1412 case LTTNG_EVENT_CONTAINER_COUNTER:
1413 {
1414 struct lttng_counter *counter;
1415 const char *name = "<UNKNOWN>";
1416 int ret;
1417
1418 counter = lttng_event_container_get_counter(container);
1419 if (event_return->key[0])
1420 name = event_return->key;
1421 else
1422 name = event_name;
1423 ret = lttng_counter_append_descriptor(counter,
1424 token, event_return->id,
1425 name);
1426 if (ret) {
1427 WARN_ON_ONCE(1);
1428 }
1429 break;
1430 }
1431 case LTTNG_EVENT_CONTAINER_CHANNEL:
1432 default:
1433 break;
1434 }
1435 switch (container->type) {
1436 case LTTNG_EVENT_CONTAINER_CHANNEL:
1437 ret = _lttng_event_metadata_statedump(session, event_return);
1438 WARN_ON_ONCE(ret > 0);
1439 if (ret) {
1440 kmem_cache_free(event_cache, event_return);
1441 module_put(event->desc->owner);
1442 module_put(event->desc->owner);
1443 goto statedump_error;
1444 }
1445 break;
1446 case LTTNG_EVENT_CONTAINER_COUNTER:
1447 default:
1448 break;
1449 }
1450 list_add(&event_return->list, &session->events);
1451 break;
1452 }
1453 case LTTNG_KERNEL_SYSCALL:
1454 /*
1455 * Needs to be explicitly enabled after creation, since
1456 * we may want to apply filters.
1457 */
1458 event->enabled = 0;
1459 event->registered = 0;
1460 event->desc = event_desc;
1461 switch (event_param->u.syscall.entryexit) {
1462 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
1463 ret = -EINVAL;
1464 goto register_error;
1465 case LTTNG_KERNEL_SYSCALL_ENTRY:
1466 event->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
1467 break;
1468 case LTTNG_KERNEL_SYSCALL_EXIT:
1469 event->u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
1470 break;
1471 }
1472 switch (event_param->u.syscall.abi) {
1473 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
1474 ret = -EINVAL;
1475 goto register_error;
1476 case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
1477 event->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
1478 break;
1479 case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
1480 event->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
1481 break;
1482 }
1483 if (!event->desc) {
1484 ret = -EINVAL;
1485 goto register_error;
1486 }
1487 break;
1488 case LTTNG_KERNEL_UPROBE:
1489 /*
1490 * Needs to be explicitly enabled after creation, since
1491 * we may want to apply filters.
1492 */
1493 event->enabled = 0;
1494 event->registered = 1;
1495 event->u.uprobe.user_token = token;
1496
1497 /*
1498 * Populate lttng_event structure before event
1499 * registration.
1500 */
1501 smp_wmb();
1502
1503 ret = lttng_uprobes_register_event(event_name,
1504 event_param->u.uprobe.fd,
1505 event);
1506 if (ret)
1507 goto register_error;
1508 ret = try_module_get(event->desc->owner);
1509 WARN_ON_ONCE(!ret);
1510
1511 /* Append descriptor to counter. */
1512 switch (container->type) {
1513 case LTTNG_EVENT_CONTAINER_COUNTER:
1514 {
1515 struct lttng_counter *counter;
1516 const char *name = "<UNKNOWN>";
1517 int ret;
1518
1519 counter = lttng_event_container_get_counter(container);
1520 if (event->key[0])
1521 name = event->key;
1522 else
1523 name = event_name;
1524 ret = lttng_counter_append_descriptor(counter,
1525 token, event->id,
1526 name);
1527 if (ret) {
1528 WARN_ON_ONCE(1);
1529 }
1530 break;
1531 }
1532 case LTTNG_EVENT_CONTAINER_CHANNEL:
1533 default:
1534 break;
1535 }
1536 break;
1537 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
1538 case LTTNG_KERNEL_NOOP: /* Fall-through.*/
1539 default:
1540 WARN_ON_ONCE(1);
1541 ret = -EINVAL;
1542 goto register_error;
1543 }
1544 switch (container->type) {
1545 case LTTNG_EVENT_CONTAINER_CHANNEL:
1546 ret = _lttng_event_metadata_statedump(session, event);
1547 WARN_ON_ONCE(ret > 0);
1548 if (ret) {
1549 goto statedump_error;
1550 }
1551 break;
1552 case LTTNG_EVENT_CONTAINER_COUNTER:
1553 default:
1554 break;
1555 }
1556 hlist_add_head(&event->name_hlist, name_head);
1557 list_add(&event->list, &session->events);
1558 return event;
1559
1560 statedump_error:
1561 /* If a statedump error occurs, events will not be readable. */
1562 register_error:
1563 full:
1564 kmem_cache_free(event_cache, event);
1565 cache_error:
1566 exist:
1567 type_error:
1568 return ERR_PTR(ret);
1569 }
1570
1571 struct lttng_event_notifier *_lttng_event_notifier_create(
1572 const struct lttng_event_desc *event_desc,
1573 uint64_t token, uint64_t error_counter_index,
1574 struct lttng_event_notifier_group *event_notifier_group,
1575 struct lttng_kernel_event_notifier *event_notifier_param,
1576 void *filter, enum lttng_kernel_instrumentation itype)
1577 {
1578 struct lttng_event_notifier *event_notifier;
1579 struct lttng_counter *error_counter;
1580 const char *event_name;
1581 struct hlist_head *head;
1582 int ret;
1583
1584 switch (itype) {
1585 case LTTNG_KERNEL_TRACEPOINT:
1586 event_name = event_desc->name;
1587 break;
1588 case LTTNG_KERNEL_KPROBE:
1589 case LTTNG_KERNEL_UPROBE:
1590 case LTTNG_KERNEL_SYSCALL:
1591 event_name = event_notifier_param->event.name;
1592 break;
1593 case LTTNG_KERNEL_KRETPROBE:
1594 case LTTNG_KERNEL_FUNCTION:
1595 case LTTNG_KERNEL_NOOP:
1596 default:
1597 WARN_ON_ONCE(1);
1598 ret = -EINVAL;
1599 goto type_error;
1600 }
1601
1602 head = utils_borrow_hash_table_bucket(event_notifier_group->event_notifiers_ht.table,
1603 LTTNG_EVENT_NOTIFIER_HT_SIZE, event_name);
1604 lttng_hlist_for_each_entry(event_notifier, head, hlist) {
1605 WARN_ON_ONCE(!event_notifier->desc);
1606 if (!strncmp(event_notifier->desc->name, event_name,
1607 LTTNG_KERNEL_SYM_NAME_LEN - 1)
1608 && event_notifier_group == event_notifier->group
1609 && token == event_notifier->user_token) {
1610 ret = -EEXIST;
1611 goto exist;
1612 }
1613 }
1614
1615 event_notifier = kmem_cache_zalloc(event_notifier_cache, GFP_KERNEL);
1616 if (!event_notifier) {
1617 ret = -ENOMEM;
1618 goto cache_error;
1619 }
1620
1621 event_notifier->group = event_notifier_group;
1622 event_notifier->user_token = token;
1623 event_notifier->error_counter_index = error_counter_index;
1624 event_notifier->num_captures = 0;
1625 event_notifier->filter = filter;
1626 event_notifier->instrumentation = itype;
1627 event_notifier->evtype = LTTNG_TYPE_EVENT;
1628 event_notifier->send_notification = lttng_event_notifier_notification_send;
1629 INIT_LIST_HEAD(&event_notifier->filter_bytecode_runtime_head);
1630 INIT_LIST_HEAD(&event_notifier->capture_bytecode_runtime_head);
1631 INIT_LIST_HEAD(&event_notifier->enablers_ref_head);
1632
1633 switch (itype) {
1634 case LTTNG_KERNEL_TRACEPOINT:
1635 /* Event will be enabled by enabler sync. */
1636 event_notifier->enabled = 0;
1637 event_notifier->registered = 0;
1638 event_notifier->desc = lttng_event_desc_get(event_name);
1639 if (!event_notifier->desc) {
1640 ret = -ENOENT;
1641 goto register_error;
1642 }
1643 /* Populate lttng_event_notifier structure before event registration. */
1644 smp_wmb();
1645 break;
1646 case LTTNG_KERNEL_KPROBE:
1647 /*
1648 * Needs to be explicitly enabled after creation, since
1649 * we may want to apply filters.
1650 */
1651 event_notifier->enabled = 0;
1652 event_notifier->registered = 1;
1653 /*
1654 * Populate lttng_event_notifier structure before event
1655 * registration.
1656 */
1657 smp_wmb();
1658 ret = lttng_kprobes_register_event_notifier(
1659 event_notifier_param->event.u.kprobe.symbol_name,
1660 event_notifier_param->event.u.kprobe.offset,
1661 event_notifier_param->event.u.kprobe.addr,
1662 event_notifier);
1663 if (ret) {
1664 ret = -EINVAL;
1665 goto register_error;
1666 }
1667 ret = try_module_get(event_notifier->desc->owner);
1668 WARN_ON_ONCE(!ret);
1669 break;
1670 case LTTNG_KERNEL_NOOP:
1671 case LTTNG_KERNEL_SYSCALL:
1672 /*
1673 * Needs to be explicitly enabled after creation, since
1674 * we may want to apply filters.
1675 */
1676 event_notifier->enabled = 0;
1677 event_notifier->registered = 0;
1678 event_notifier->desc = event_desc;
1679 switch (event_notifier_param->event.u.syscall.entryexit) {
1680 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
1681 ret = -EINVAL;
1682 goto register_error;
1683 case LTTNG_KERNEL_SYSCALL_ENTRY:
1684 event_notifier->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
1685 break;
1686 case LTTNG_KERNEL_SYSCALL_EXIT:
1687 event_notifier->u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
1688 break;
1689 }
1690 switch (event_notifier_param->event.u.syscall.abi) {
1691 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
1692 ret = -EINVAL;
1693 goto register_error;
1694 case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
1695 event_notifier->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
1696 break;
1697 case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
1698 event_notifier->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
1699 break;
1700 }
1701
1702 if (!event_notifier->desc) {
1703 ret = -EINVAL;
1704 goto register_error;
1705 }
1706 break;
1707 case LTTNG_KERNEL_UPROBE:
1708 /*
1709 * Needs to be explicitly enabled after creation, since
1710 * we may want to apply filters.
1711 */
1712 event_notifier->enabled = 0;
1713 event_notifier->registered = 1;
1714
1715 /*
1716 * Populate lttng_event_notifier structure before
1717 * event_notifier registration.
1718 */
1719 smp_wmb();
1720
1721 ret = lttng_uprobes_register_event_notifier(
1722 event_notifier_param->event.name,
1723 event_notifier_param->event.u.uprobe.fd,
1724 event_notifier);
1725 if (ret)
1726 goto register_error;
1727 ret = try_module_get(event_notifier->desc->owner);
1728 WARN_ON_ONCE(!ret);
1729 break;
1730 case LTTNG_KERNEL_KRETPROBE:
1731 case LTTNG_KERNEL_FUNCTION:
1732 default:
1733 WARN_ON_ONCE(1);
1734 ret = -EINVAL;
1735 goto register_error;
1736 }
1737
1738 list_add(&event_notifier->list, &event_notifier_group->event_notifiers_head);
1739 hlist_add_head(&event_notifier->hlist, head);
1740
1741 /*
1742 * Clear the error counter bucket. The sessiond keeps track of which
1743 * bucket is currently in use. We trust it. The session lock
1744 * synchronizes against concurrent creation of the error
1745 * counter.
1746 */
1747 error_counter = event_notifier_group->error_counter;
1748 if (error_counter) {
1749 size_t dimension_index[1];
1750
1751 /*
1752 * Check that the index is within the boundary of the counter.
1753 */
1754 if (event_notifier->error_counter_index >= event_notifier_group->error_counter_len) {
1755 printk(KERN_INFO "LTTng: event_notifier: Error counter index out-of-bound: counter-len=%zu, index=%llu\n",
1756 event_notifier_group->error_counter_len, event_notifier->error_counter_index);
1757 ret = -EINVAL;
1758 goto register_error;
1759 }
1760
1761 dimension_index[0] = event_notifier->error_counter_index;
1762 ret = error_counter->ops->counter_clear(error_counter->counter, dimension_index);
1763 if (ret) {
1764 printk(KERN_INFO "LTTng: event_notifier: Unable to clear error counter bucket %llu\n",
1765 event_notifier->error_counter_index);
1766 goto register_error;
1767 }
1768 }
1769
1770 return event_notifier;
1771
1772 register_error:
1773 kmem_cache_free(event_notifier_cache, event_notifier);
1774 cache_error:
1775 exist:
1776 type_error:
1777 return ERR_PTR(ret);
1778 }
1779
1780 int lttng_kernel_counter_read(struct lttng_counter *counter,
1781 const size_t *dim_indexes, int32_t cpu,
1782 int64_t *val, bool *overflow, bool *underflow)
1783 {
1784 return counter->ops->counter_read(counter->counter, dim_indexes,
1785 cpu, val, overflow, underflow);
1786 }
1787
1788 int lttng_kernel_counter_aggregate(struct lttng_counter *counter,
1789 const size_t *dim_indexes, int64_t *val,
1790 bool *overflow, bool *underflow)
1791 {
1792 return counter->ops->counter_aggregate(counter->counter, dim_indexes,
1793 val, overflow, underflow);
1794 }
1795
1796 int lttng_kernel_counter_clear(struct lttng_counter *counter,
1797 const size_t *dim_indexes)
1798 {
1799 return counter->ops->counter_clear(counter->counter, dim_indexes);
1800 }
1801
1802 struct lttng_event *lttng_event_create(struct lttng_event_container *container,
1803 struct lttng_kernel_event *event_param,
1804 const struct lttng_counter_key *key,
1805 void *filter,
1806 const struct lttng_event_desc *event_desc,
1807 enum lttng_kernel_instrumentation itype,
1808 uint64_t token)
1809 {
1810 struct lttng_event *event;
1811
1812 mutex_lock(&sessions_mutex);
1813 event = _lttng_event_create(container, event_param, key, filter, event_desc,
1814 itype, token);
1815 mutex_unlock(&sessions_mutex);
1816 return event;
1817 }
1818
1819 struct lttng_event_notifier *lttng_event_notifier_create(
1820 const struct lttng_event_desc *event_desc,
1821 uint64_t id, uint64_t error_counter_index,
1822 struct lttng_event_notifier_group *event_notifier_group,
1823 struct lttng_kernel_event_notifier *event_notifier_param,
1824 void *filter, enum lttng_kernel_instrumentation itype)
1825 {
1826 struct lttng_event_notifier *event_notifier;
1827
1828 mutex_lock(&sessions_mutex);
1829 event_notifier = _lttng_event_notifier_create(event_desc, id,
1830 error_counter_index, event_notifier_group,
1831 event_notifier_param, filter, itype);
1832 mutex_unlock(&sessions_mutex);
1833 return event_notifier;
1834 }
1835
1836 /* Only used for tracepoints for now. */
1837 static
1838 void register_event(struct lttng_event *event)
1839 {
1840 const struct lttng_event_desc *desc;
1841 int ret = -EINVAL;
1842
1843 if (event->registered)
1844 return;
1845
1846 desc = event->desc;
1847 switch (event->instrumentation) {
1848 case LTTNG_KERNEL_TRACEPOINT:
1849 ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
1850 desc->probe_callback, event);
1851 break;
1852 case LTTNG_KERNEL_SYSCALL:
1853 ret = lttng_syscall_filter_enable_event(event->container, event);
1854 break;
1855 case LTTNG_KERNEL_KPROBE:
1856 case LTTNG_KERNEL_UPROBE:
1857 case LTTNG_KERNEL_KRETPROBE:
1858 case LTTNG_KERNEL_NOOP:
1859 ret = 0;
1860 break;
1861 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1862 default:
1863 WARN_ON_ONCE(1);
1864 }
1865 if (!ret)
1866 event->registered = 1;
1867 }
1868
1869 /*
1870 * Only used internally at session destruction.
1871 */
1872 int _lttng_event_unregister(struct lttng_event *event)
1873 {
1874 const struct lttng_event_desc *desc;
1875 int ret = -EINVAL;
1876
1877 if (!event->registered)
1878 return 0;
1879
1880 desc = event->desc;
1881 switch (event->instrumentation) {
1882 case LTTNG_KERNEL_TRACEPOINT:
1883 ret = lttng_wrapper_tracepoint_probe_unregister(event->desc->kname,
1884 event->desc->probe_callback, event);
1885 break;
1886 case LTTNG_KERNEL_KPROBE:
1887 lttng_kprobes_unregister_event(event);
1888 ret = 0;
1889 break;
1890 case LTTNG_KERNEL_KRETPROBE:
1891 lttng_kretprobes_unregister(event);
1892 ret = 0;
1893 break;
1894 case LTTNG_KERNEL_SYSCALL:
1895 ret = lttng_syscall_filter_disable_event(event->container, event);
1896 break;
1897 case LTTNG_KERNEL_NOOP:
1898 ret = 0;
1899 break;
1900 case LTTNG_KERNEL_UPROBE:
1901 lttng_uprobes_unregister_event(event);
1902 ret = 0;
1903 break;
1904 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1905 default:
1906 WARN_ON_ONCE(1);
1907 }
1908 if (!ret)
1909 event->registered = 0;
1910 return ret;
1911 }
1912
1913 /* Only used for tracepoints for now. */
1914 static
1915 void register_event_notifier(struct lttng_event_notifier *event_notifier)
1916 {
1917 const struct lttng_event_desc *desc;
1918 int ret = -EINVAL;
1919
1920 if (event_notifier->registered)
1921 return;
1922
1923 desc = event_notifier->desc;
1924 switch (event_notifier->instrumentation) {
1925 case LTTNG_KERNEL_TRACEPOINT:
1926 ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
1927 desc->event_notifier_callback,
1928 event_notifier);
1929 break;
1930 case LTTNG_KERNEL_SYSCALL:
1931 ret = lttng_syscall_filter_enable_event_notifier(event_notifier);
1932 break;
1933 case LTTNG_KERNEL_KPROBE:
1934 case LTTNG_KERNEL_UPROBE:
1935 ret = 0;
1936 break;
1937 case LTTNG_KERNEL_KRETPROBE:
1938 case LTTNG_KERNEL_FUNCTION:
1939 case LTTNG_KERNEL_NOOP:
1940 default:
1941 WARN_ON_ONCE(1);
1942 }
1943 if (!ret)
1944 event_notifier->registered = 1;
1945 }
1946
1947 static
1948 int _lttng_event_notifier_unregister(
1949 struct lttng_event_notifier *event_notifier)
1950 {
1951 const struct lttng_event_desc *desc;
1952 int ret = -EINVAL;
1953
1954 if (!event_notifier->registered)
1955 return 0;
1956
1957 desc = event_notifier->desc;
1958 switch (event_notifier->instrumentation) {
1959 case LTTNG_KERNEL_TRACEPOINT:
1960 ret = lttng_wrapper_tracepoint_probe_unregister(event_notifier->desc->kname,
1961 event_notifier->desc->event_notifier_callback,
1962 event_notifier);
1963 break;
1964 case LTTNG_KERNEL_KPROBE:
1965 lttng_kprobes_unregister_event_notifier(event_notifier);
1966 ret = 0;
1967 break;
1968 case LTTNG_KERNEL_UPROBE:
1969 lttng_uprobes_unregister_event_notifier(event_notifier);
1970 ret = 0;
1971 break;
1972 case LTTNG_KERNEL_SYSCALL:
1973 ret = lttng_syscall_filter_disable_event_notifier(event_notifier);
1974 break;
1975 case LTTNG_KERNEL_KRETPROBE:
1976 case LTTNG_KERNEL_FUNCTION:
1977 case LTTNG_KERNEL_NOOP:
1978 default:
1979 WARN_ON_ONCE(1);
1980 }
1981 if (!ret)
1982 event_notifier->registered = 0;
1983 return ret;
1984 }
1985
1986 /*
1987 * Only used internally at session destruction.
1988 */
1989 static
1990 void _lttng_event_destroy(struct lttng_event *event)
1991 {
1992 struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
1993
1994 switch (event->instrumentation) {
1995 case LTTNG_KERNEL_TRACEPOINT:
1996 lttng_event_desc_put(event->desc);
1997 break;
1998 case LTTNG_KERNEL_KPROBE:
1999 module_put(event->desc->owner);
2000 lttng_kprobes_destroy_event_private(event);
2001 break;
2002 case LTTNG_KERNEL_KRETPROBE:
2003 module_put(event->desc->owner);
2004 lttng_kretprobes_destroy_private(event);
2005 break;
2006 case LTTNG_KERNEL_NOOP:
2007 case LTTNG_KERNEL_SYSCALL:
2008 break;
2009 case LTTNG_KERNEL_UPROBE:
2010 module_put(event->desc->owner);
2011 lttng_uprobes_destroy_event_private(event);
2012 break;
2013 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
2014 default:
2015 WARN_ON_ONCE(1);
2016 }
2017 list_del(&event->list);
2018 lttng_destroy_context(event->ctx);
2019 lttng_free_event_filter_runtime(event);
2020 /* Free event enabler refs */
2021 list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
2022 &event->enablers_ref_head, node)
2023 kfree(enabler_ref);
2024 kmem_cache_free(event_cache, event);
2025 }
2026
2027 /*
2028 * Only used internally at session destruction.
2029 */
2030 static
2031 void _lttng_event_notifier_destroy(struct lttng_event_notifier *event_notifier)
2032 {
2033 struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
2034
2035 switch (event_notifier->instrumentation) {
2036 case LTTNG_KERNEL_TRACEPOINT:
2037 lttng_event_desc_put(event_notifier->desc);
2038 break;
2039 case LTTNG_KERNEL_KPROBE:
2040 module_put(event_notifier->desc->owner);
2041 lttng_kprobes_destroy_event_notifier_private(event_notifier);
2042 break;
2043 case LTTNG_KERNEL_NOOP:
2044 case LTTNG_KERNEL_SYSCALL:
2045 break;
2046 case LTTNG_KERNEL_UPROBE:
2047 module_put(event_notifier->desc->owner);
2048 lttng_uprobes_destroy_event_notifier_private(event_notifier);
2049 break;
2050 case LTTNG_KERNEL_KRETPROBE:
2051 case LTTNG_KERNEL_FUNCTION:
2052 default:
2053 WARN_ON_ONCE(1);
2054 }
2055 list_del(&event_notifier->list);
2056 lttng_free_event_notifier_filter_runtime(event_notifier);
2057 /* Free event enabler refs */
2058 list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
2059 &event_notifier->enablers_ref_head, node)
2060 kfree(enabler_ref);
2061 kmem_cache_free(event_notifier_cache, event_notifier);
2062 }
2063
2064 struct lttng_id_tracker *get_tracker(struct lttng_session *session,
2065 enum tracker_type tracker_type)
2066 {
2067 switch (tracker_type) {
2068 case TRACKER_PID:
2069 return &session->pid_tracker;
2070 case TRACKER_VPID:
2071 return &session->vpid_tracker;
2072 case TRACKER_UID:
2073 return &session->uid_tracker;
2074 case TRACKER_VUID:
2075 return &session->vuid_tracker;
2076 case TRACKER_GID:
2077 return &session->gid_tracker;
2078 case TRACKER_VGID:
2079 return &session->vgid_tracker;
2080 default:
2081 WARN_ON_ONCE(1);
2082 return NULL;
2083 }
2084 }
2085
2086 int lttng_session_track_id(struct lttng_session *session,
2087 enum tracker_type tracker_type, int id)
2088 {
2089 struct lttng_id_tracker *tracker;
2090 int ret;
2091
2092 tracker = get_tracker(session, tracker_type);
2093 if (!tracker)
2094 return -EINVAL;
2095 if (id < -1)
2096 return -EINVAL;
2097 mutex_lock(&sessions_mutex);
2098 if (id == -1) {
2099 /* track all ids: destroy tracker. */
2100 lttng_id_tracker_destroy(tracker, true);
2101 ret = 0;
2102 } else {
2103 ret = lttng_id_tracker_add(tracker, id);
2104 }
2105 mutex_unlock(&sessions_mutex);
2106 return ret;
2107 }
2108
2109 int lttng_session_untrack_id(struct lttng_session *session,
2110 enum tracker_type tracker_type, int id)
2111 {
2112 struct lttng_id_tracker *tracker;
2113 int ret;
2114
2115 tracker = get_tracker(session, tracker_type);
2116 if (!tracker)
2117 return -EINVAL;
2118 if (id < -1)
2119 return -EINVAL;
2120 mutex_lock(&sessions_mutex);
2121 if (id == -1) {
2122 /* untrack all ids: replace by empty tracker. */
2123 ret = lttng_id_tracker_empty_set(tracker);
2124 } else {
2125 ret = lttng_id_tracker_del(tracker, id);
2126 }
2127 mutex_unlock(&sessions_mutex);
2128 return ret;
2129 }
2130
2131 static
2132 void *id_list_start(struct seq_file *m, loff_t *pos)
2133 {
2134 struct lttng_id_tracker *id_tracker = m->private;
2135 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
2136 struct lttng_id_hash_node *e;
2137 int iter = 0, i;
2138
2139 mutex_lock(&sessions_mutex);
2140 if (id_tracker_p) {
2141 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
2142 struct hlist_head *head = &id_tracker_p->id_hash[i];
2143
2144 lttng_hlist_for_each_entry(e, head, hlist) {
2145 if (iter++ >= *pos)
2146 return e;
2147 }
2148 }
2149 } else {
2150 /* ID tracker disabled. */
2151 if (iter >= *pos && iter == 0) {
2152 return id_tracker_p; /* empty tracker */
2153 }
2154 iter++;
2155 }
2156 /* End of list */
2157 return NULL;
2158 }
2159
2160 /* Called with sessions_mutex held. */
2161 static
2162 void *id_list_next(struct seq_file *m, void *p, loff_t *ppos)
2163 {
2164 struct lttng_id_tracker *id_tracker = m->private;
2165 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
2166 struct lttng_id_hash_node *e;
2167 int iter = 0, i;
2168
2169 (*ppos)++;
2170 if (id_tracker_p) {
2171 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
2172 struct hlist_head *head = &id_tracker_p->id_hash[i];
2173
2174 lttng_hlist_for_each_entry(e, head, hlist) {
2175 if (iter++ >= *ppos)
2176 return e;
2177 }
2178 }
2179 } else {
2180 /* ID tracker disabled. */
2181 if (iter >= *ppos && iter == 0)
2182 return p; /* empty tracker */
2183 iter++;
2184 }
2185
2186 /* End of list */
2187 return NULL;
2188 }
2189
2190 static
2191 void id_list_stop(struct seq_file *m, void *p)
2192 {
2193 mutex_unlock(&sessions_mutex);
2194 }
2195
2196 static
2197 int id_list_show(struct seq_file *m, void *p)
2198 {
2199 struct lttng_id_tracker *id_tracker = m->private;
2200 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
2201 int id;
2202
2203 if (p == id_tracker_p) {
2204 /* Tracker disabled. */
2205 id = -1;
2206 } else {
2207 const struct lttng_id_hash_node *e = p;
2208
2209 id = lttng_id_tracker_get_node_id(e);
2210 }
2211 switch (id_tracker->tracker_type) {
2212 case TRACKER_PID:
2213 seq_printf(m, "process { pid = %d; };\n", id);
2214 break;
2215 case TRACKER_VPID:
2216 seq_printf(m, "process { vpid = %d; };\n", id);
2217 break;
2218 case TRACKER_UID:
2219 seq_printf(m, "user { uid = %d; };\n", id);
2220 break;
2221 case TRACKER_VUID:
2222 seq_printf(m, "user { vuid = %d; };\n", id);
2223 break;
2224 case TRACKER_GID:
2225 seq_printf(m, "group { gid = %d; };\n", id);
2226 break;
2227 case TRACKER_VGID:
2228 seq_printf(m, "group { vgid = %d; };\n", id);
2229 break;
2230 default:
2231 seq_printf(m, "UNKNOWN { field = %d };\n", id);
2232 }
2233 return 0;
2234 }
2235
2236 static
2237 const struct seq_operations lttng_tracker_ids_list_seq_ops = {
2238 .start = id_list_start,
2239 .next = id_list_next,
2240 .stop = id_list_stop,
2241 .show = id_list_show,
2242 };
2243
2244 static
2245 int lttng_tracker_ids_list_open(struct inode *inode, struct file *file)
2246 {
2247 return seq_open(file, &lttng_tracker_ids_list_seq_ops);
2248 }
2249
2250 static
2251 int lttng_tracker_ids_list_release(struct inode *inode, struct file *file)
2252 {
2253 struct seq_file *m = file->private_data;
2254 struct lttng_id_tracker *id_tracker = m->private;
2255 int ret;
2256
2257 WARN_ON_ONCE(!id_tracker);
2258 ret = seq_release(inode, file);
2259 if (!ret)
2260 fput(id_tracker->session->file);
2261 return ret;
2262 }
2263
2264 const struct file_operations lttng_tracker_ids_list_fops = {
2265 .owner = THIS_MODULE,
2266 .open = lttng_tracker_ids_list_open,
2267 .read = seq_read,
2268 .llseek = seq_lseek,
2269 .release = lttng_tracker_ids_list_release,
2270 };
2271
2272 int lttng_session_list_tracker_ids(struct lttng_session *session,
2273 enum tracker_type tracker_type)
2274 {
2275 struct file *tracker_ids_list_file;
2276 struct seq_file *m;
2277 int file_fd, ret;
2278
2279 file_fd = lttng_get_unused_fd();
2280 if (file_fd < 0) {
2281 ret = file_fd;
2282 goto fd_error;
2283 }
2284
2285 tracker_ids_list_file = anon_inode_getfile("[lttng_tracker_ids_list]",
2286 &lttng_tracker_ids_list_fops,
2287 NULL, O_RDWR);
2288 if (IS_ERR(tracker_ids_list_file)) {
2289 ret = PTR_ERR(tracker_ids_list_file);
2290 goto file_error;
2291 }
2292 if (!atomic_long_add_unless(&session->file->f_count, 1, LONG_MAX)) {
2293 ret = -EOVERFLOW;
2294 goto refcount_error;
2295 }
2296 ret = lttng_tracker_ids_list_fops.open(NULL, tracker_ids_list_file);
2297 if (ret < 0)
2298 goto open_error;
2299 m = tracker_ids_list_file->private_data;
2300
2301 m->private = get_tracker(session, tracker_type);
2302 BUG_ON(!m->private);
2303 fd_install(file_fd, tracker_ids_list_file);
2304
2305 return file_fd;
2306
2307 open_error:
2308 atomic_long_dec(&session->file->f_count);
2309 refcount_error:
2310 fput(tracker_ids_list_file);
2311 file_error:
2312 put_unused_fd(file_fd);
2313 fd_error:
2314 return ret;
2315 }
2316
2317 /*
2318 * Enabler management.
2319 */
2320 static
2321 int lttng_match_enabler_star_glob(const char *desc_name,
2322 const char *pattern)
2323 {
2324 if (!strutils_star_glob_match(pattern, LTTNG_SIZE_MAX,
2325 desc_name, LTTNG_SIZE_MAX))
2326 return 0;
2327 return 1;
2328 }
2329
2330 static
2331 int lttng_match_enabler_name(const char *desc_name,
2332 const char *name)
2333 {
2334 if (strcmp(desc_name, name))
2335 return 0;
2336 return 1;
2337 }
2338
2339 int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
2340 struct lttng_enabler *enabler)
2341 {
2342 const char *desc_name, *enabler_name;
2343 bool compat = false, entry = false;
2344
2345 enabler_name = enabler->event_param.name;
2346 switch (enabler->event_param.instrumentation) {
2347 case LTTNG_KERNEL_TRACEPOINT:
2348 desc_name = desc->name;
2349 switch (enabler->format_type) {
2350 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
2351 return lttng_match_enabler_star_glob(desc_name, enabler_name);
2352 case LTTNG_ENABLER_FORMAT_NAME:
2353 return lttng_match_enabler_name(desc_name, enabler_name);
2354 default:
2355 return -EINVAL;
2356 }
2357 break;
2358 case LTTNG_KERNEL_SYSCALL:
2359 desc_name = desc->name;
2360 if (!strncmp(desc_name, "compat_", strlen("compat_"))) {
2361 desc_name += strlen("compat_");
2362 compat = true;
2363 }
2364 if (!strncmp(desc_name, "syscall_exit_",
2365 strlen("syscall_exit_"))) {
2366 desc_name += strlen("syscall_exit_");
2367 } else if (!strncmp(desc_name, "syscall_entry_",
2368 strlen("syscall_entry_"))) {
2369 desc_name += strlen("syscall_entry_");
2370 entry = true;
2371 } else {
2372 WARN_ON_ONCE(1);
2373 return -EINVAL;
2374 }
2375 switch (enabler->event_param.u.syscall.entryexit) {
2376 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
2377 break;
2378 case LTTNG_KERNEL_SYSCALL_ENTRY:
2379 if (!entry)
2380 return 0;
2381 break;
2382 case LTTNG_KERNEL_SYSCALL_EXIT:
2383 if (entry)
2384 return 0;
2385 break;
2386 default:
2387 return -EINVAL;
2388 }
2389 switch (enabler->event_param.u.syscall.abi) {
2390 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
2391 break;
2392 case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
2393 if (compat)
2394 return 0;
2395 break;
2396 case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
2397 if (!compat)
2398 return 0;
2399 break;
2400 default:
2401 return -EINVAL;
2402 }
2403 switch (enabler->event_param.u.syscall.match) {
2404 case LTTNG_KERNEL_SYSCALL_MATCH_NAME:
2405 switch (enabler->format_type) {
2406 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
2407 return lttng_match_enabler_star_glob(desc_name, enabler_name);
2408 case LTTNG_ENABLER_FORMAT_NAME:
2409 return lttng_match_enabler_name(desc_name, enabler_name);
2410 default:
2411 return -EINVAL;
2412 }
2413 break;
2414 case LTTNG_KERNEL_SYSCALL_MATCH_NR:
2415 return -EINVAL; /* Not implemented. */
2416 default:
2417 return -EINVAL;
2418 }
2419 break;
2420 default:
2421 WARN_ON_ONCE(1);
2422 return -EINVAL;
2423 }
2424 }
2425
2426 static
2427 bool lttng_event_enabler_match_event(struct lttng_event_enabler *event_enabler,
2428 struct lttng_event *event)
2429 {
2430 struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(
2431 event_enabler);
2432
2433 if (base_enabler->event_param.instrumentation == event->instrumentation
2434 && lttng_desc_match_enabler(event->desc, base_enabler) > 0
2435 && event->container == event_enabler->container
2436 && match_event_token(event->container, event, event_enabler->base.user_token))
2437 return true;
2438 else
2439 return false;
2440 }
2441
2442 static
2443 int lttng_event_notifier_enabler_match_event_notifier(struct lttng_event_notifier_enabler *event_notifier_enabler,
2444 struct lttng_event_notifier *event_notifier)
2445 {
2446 struct lttng_enabler *base_enabler = lttng_event_notifier_enabler_as_enabler(
2447 event_notifier_enabler);
2448
2449 if (base_enabler->event_param.instrumentation != event_notifier->instrumentation)
2450 return 0;
2451 if (lttng_desc_match_enabler(event_notifier->desc, base_enabler) > 0
2452 && event_notifier->group == event_notifier_enabler->group
2453 && event_notifier->user_token == event_notifier_enabler->base.user_token)
2454 return 1;
2455 else
2456 return 0;
2457 }
2458
2459 static
2460 struct lttng_enabler_ref *lttng_enabler_ref(
2461 struct list_head *enablers_ref_list,
2462 struct lttng_enabler *enabler)
2463 {
2464 struct lttng_enabler_ref *enabler_ref;
2465
2466 list_for_each_entry(enabler_ref, enablers_ref_list, node) {
2467 if (enabler_ref->ref == enabler)
2468 return enabler_ref;
2469 }
2470 return NULL;
2471 }
2472
2473 static
2474 void lttng_create_tracepoint_event_if_missing(struct lttng_event_enabler *event_enabler)
2475 {
2476 struct lttng_probe_desc *probe_desc;
2477 const struct lttng_event_desc *desc;
2478 int i;
2479 struct list_head *probe_list;
2480
2481 probe_list = lttng_get_probe_list_head();
2482 /*
2483 * For each probe event, if we find that a probe event matches
2484 * our enabler, create an associated lttng_event if not
2485 * already present.
2486 */
2487 list_for_each_entry(probe_desc, probe_list, head) {
2488 for (i = 0; i < probe_desc->nr_events; i++) {
2489 struct lttng_event *event;
2490
2491 desc = probe_desc->event_desc[i];
2492 if (lttng_desc_match_enabler(desc,
2493 lttng_event_enabler_as_enabler(event_enabler)) <= 0)
2494 continue;
2495
2496 /* Try to create an event for this event probe. */
2497 event = _lttng_event_create(event_enabler->container,
2498 NULL, &event_enabler->key, NULL, desc,
2499 LTTNG_KERNEL_TRACEPOINT,
2500 event_enabler->base.user_token);
2501 /* Skip if event is already found. */
2502 if (IS_ERR(event) && PTR_ERR(event) == -EEXIST)
2503 continue;
2504 if (IS_ERR(event)) {
2505 printk(KERN_INFO "LTTng: Unable to create event %s\n",
2506 probe_desc->event_desc[i]->name);
2507 }
2508 }
2509 }
2510 }
2511
2512 static
2513 void lttng_create_tracepoint_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
2514 {
2515 struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
2516 struct lttng_probe_desc *probe_desc;
2517 const struct lttng_event_desc *desc;
2518 int i;
2519 struct list_head *probe_list;
2520
2521 probe_list = lttng_get_probe_list_head();
2522 /*
2523 * For each probe event, if we find that a probe event matches
2524 * our enabler, create an associated lttng_event_notifier if not
2525 * already present.
2526 */
2527 list_for_each_entry(probe_desc, probe_list, head) {
2528 for (i = 0; i < probe_desc->nr_events; i++) {
2529 int found = 0;
2530 struct hlist_head *head;
2531 struct lttng_event_notifier *event_notifier;
2532
2533 desc = probe_desc->event_desc[i];
2534 if (lttng_desc_match_enabler(desc,
2535 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)) <= 0)
2536 continue;
2537
2538 /*
2539 * Check if already created.
2540 */
2541 head = utils_borrow_hash_table_bucket(
2542 event_notifier_group->event_notifiers_ht.table,
2543 LTTNG_EVENT_NOTIFIER_HT_SIZE, desc->name);
2544 lttng_hlist_for_each_entry(event_notifier, head, hlist) {
2545 if (event_notifier->desc == desc
2546 && event_notifier->user_token == event_notifier_enabler->base.user_token)
2547 found = 1;
2548 }
2549 if (found)
2550 continue;
2551
2552 /*
2553 * We need to create a event_notifier for this event probe.
2554 */
2555 event_notifier = _lttng_event_notifier_create(desc,
2556 event_notifier_enabler->base.user_token,
2557 event_notifier_enabler->error_counter_index,
2558 event_notifier_group, NULL, NULL,
2559 LTTNG_KERNEL_TRACEPOINT);
2560 if (IS_ERR(event_notifier)) {
2561 printk(KERN_INFO "Unable to create event_notifier %s\n",
2562 probe_desc->event_desc[i]->name);
2563 }
2564 }
2565 }
2566 }
2567
2568 static
2569 void lttng_create_syscall_event_if_missing(struct lttng_event_enabler *event_enabler)
2570 {
2571 int ret;
2572
2573 ret = lttng_syscalls_register_event(event_enabler, NULL);
2574 WARN_ON_ONCE(ret);
2575 }
2576
2577 static
2578 void lttng_create_syscall_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
2579 {
2580 int ret;
2581
2582 ret = lttng_syscalls_register_event_notifier(event_notifier_enabler, NULL);
2583 WARN_ON_ONCE(ret);
2584 ret = lttng_syscals_create_matching_event_notifiers(event_notifier_enabler, NULL);
2585 WARN_ON_ONCE(ret);
2586 }
2587
2588 /*
2589 * Create struct lttng_event if it is missing and present in the list of
2590 * tracepoint probes.
2591 * Should be called with sessions mutex held.
2592 */
2593 static
2594 void lttng_create_event_if_missing(struct lttng_event_enabler *event_enabler)
2595 {
2596 switch (event_enabler->base.event_param.instrumentation) {
2597 case LTTNG_KERNEL_TRACEPOINT:
2598 lttng_create_tracepoint_event_if_missing(event_enabler);
2599 break;
2600 case LTTNG_KERNEL_SYSCALL:
2601 lttng_create_syscall_event_if_missing(event_enabler);
2602 break;
2603 default:
2604 WARN_ON_ONCE(1);
2605 break;
2606 }
2607 }
2608
2609 /*
2610 * Create events associated with an event_enabler (if not already present),
2611 * and add backward reference from the event to the enabler.
2612 * Should be called with sessions mutex held.
2613 */
2614 static
2615 int lttng_event_enabler_ref_events(struct lttng_event_enabler *event_enabler)
2616 {
2617 struct lttng_event_container *container = event_enabler->container;
2618 struct lttng_session *session = container->session;
2619 struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(event_enabler);
2620 struct lttng_event *event;
2621
2622 if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_SYSCALL &&
2623 base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_SYSCALL_ABI_ALL &&
2624 base_enabler->event_param.u.syscall.match == LTTNG_KERNEL_SYSCALL_MATCH_NAME &&
2625 !strcmp(base_enabler->event_param.name, "*")) {
2626 int enabled = base_enabler->enabled;
2627 enum lttng_kernel_syscall_entryexit entryexit = base_enabler->event_param.u.syscall.entryexit;
2628
2629 if (entryexit == LTTNG_KERNEL_SYSCALL_ENTRY || entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT)
2630 WRITE_ONCE(container->syscall_all_entry, enabled);
2631
2632 if (entryexit == LTTNG_KERNEL_SYSCALL_EXIT || entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT)
2633 WRITE_ONCE(container->syscall_all_exit, enabled);
2634 }
2635
2636 /* First ensure that probe events are created for this enabler. */
2637 lttng_create_event_if_missing(event_enabler);
2638
2639 /* For each event matching event_enabler in session event list. */
2640 list_for_each_entry(event, &session->events, list) {
2641 struct lttng_enabler_ref *enabler_ref;
2642
2643 if (!lttng_event_enabler_match_event(event_enabler, event))
2644 continue;
2645 enabler_ref = lttng_enabler_ref(&event->enablers_ref_head,
2646 lttng_event_enabler_as_enabler(event_enabler));
2647 if (!enabler_ref) {
2648 /*
2649 * If no backward ref, create it.
2650 * Add backward ref from event to event_enabler.
2651 */
2652 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2653 if (!enabler_ref)
2654 return -ENOMEM;
2655 enabler_ref->ref = lttng_event_enabler_as_enabler(event_enabler);
2656 list_add(&enabler_ref->node,
2657 &event->enablers_ref_head);
2658 /* Append descriptor to counter. */
2659 switch (container->type) {
2660 case LTTNG_EVENT_CONTAINER_COUNTER:
2661 {
2662 struct lttng_counter *counter;
2663 const char *name = "<UNKNOWN>";
2664 int ret;
2665
2666 counter = lttng_event_container_get_counter(container);
2667 if (event->key[0])
2668 name = event->key;
2669 else if (event->desc && event->desc->name)
2670 name = event->desc->name;
2671 ret = lttng_counter_append_descriptor(counter,
2672 event_enabler->base.user_token, event->id,
2673 name);
2674 if (ret) {
2675 WARN_ON_ONCE(1);
2676 return ret;
2677 }
2678 break;
2679 }
2680 case LTTNG_EVENT_CONTAINER_CHANNEL:
2681 default:
2682 break;
2683 }
2684 }
2685
2686 /*
2687 * Link filter bytecodes if not linked yet.
2688 */
2689 lttng_enabler_link_bytecode(event->desc,
2690 lttng_static_ctx,
2691 &event->filter_bytecode_runtime_head,
2692 &lttng_event_enabler_as_enabler(event_enabler)->filter_bytecode_head);
2693
2694 /* TODO: merge event context. */
2695 }
2696 return 0;
2697 }
2698
2699 /*
2700 * Create struct lttng_event_notifier if it is missing and present in the list of
2701 * tracepoint probes.
2702 * Should be called with sessions mutex held.
2703 */
2704 static
2705 void lttng_create_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
2706 {
2707 switch (event_notifier_enabler->base.event_param.instrumentation) {
2708 case LTTNG_KERNEL_TRACEPOINT:
2709 lttng_create_tracepoint_event_notifier_if_missing(event_notifier_enabler);
2710 break;
2711 case LTTNG_KERNEL_SYSCALL:
2712 lttng_create_syscall_event_notifier_if_missing(event_notifier_enabler);
2713 break;
2714 default:
2715 WARN_ON_ONCE(1);
2716 break;
2717 }
2718 }
2719
2720 /*
2721 * Create event_notifiers associated with a event_notifier enabler (if not already present).
2722 */
2723 static
2724 int lttng_event_notifier_enabler_ref_event_notifiers(
2725 struct lttng_event_notifier_enabler *event_notifier_enabler)
2726 {
2727 struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
2728 struct lttng_enabler *base_enabler = lttng_event_notifier_enabler_as_enabler(event_notifier_enabler);
2729 struct lttng_event_notifier *event_notifier;
2730
2731 if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_SYSCALL &&
2732 base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_SYSCALL_ABI_ALL &&
2733 base_enabler->event_param.u.syscall.match == LTTNG_KERNEL_SYSCALL_MATCH_NAME &&
2734 !strcmp(base_enabler->event_param.name, "*")) {
2735
2736 int enabled = base_enabler->enabled;
2737 enum lttng_kernel_syscall_entryexit entryexit = base_enabler->event_param.u.syscall.entryexit;
2738
2739 if (entryexit == LTTNG_KERNEL_SYSCALL_ENTRY || entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT)
2740 WRITE_ONCE(event_notifier_group->syscall_all_entry, enabled);
2741
2742 if (entryexit == LTTNG_KERNEL_SYSCALL_EXIT || entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT)
2743 WRITE_ONCE(event_notifier_group->syscall_all_exit, enabled);
2744
2745 }
2746
2747 /* First ensure that probe event_notifiers are created for this enabler. */
2748 lttng_create_event_notifier_if_missing(event_notifier_enabler);
2749
2750 /* Link the created event_notifier with its associated enabler. */
2751 list_for_each_entry(event_notifier, &event_notifier_group->event_notifiers_head, list) {
2752 struct lttng_enabler_ref *enabler_ref;
2753
2754 if (!lttng_event_notifier_enabler_match_event_notifier(event_notifier_enabler, event_notifier))
2755 continue;
2756
2757 enabler_ref = lttng_enabler_ref(&event_notifier->enablers_ref_head,
2758 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
2759 if (!enabler_ref) {
2760 /*
2761 * If no backward ref, create it.
2762 * Add backward ref from event_notifier to enabler.
2763 */
2764 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2765 if (!enabler_ref)
2766 return -ENOMEM;
2767
2768 enabler_ref->ref = lttng_event_notifier_enabler_as_enabler(
2769 event_notifier_enabler);
2770 list_add(&enabler_ref->node,
2771 &event_notifier->enablers_ref_head);
2772 }
2773
2774 /*
2775 * Link filter bytecodes if not linked yet.
2776 */
2777 lttng_enabler_link_bytecode(event_notifier->desc,
2778 lttng_static_ctx, &event_notifier->filter_bytecode_runtime_head,
2779 &lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->filter_bytecode_head);
2780
2781 /* Link capture bytecodes if not linked yet. */
2782 lttng_enabler_link_bytecode(event_notifier->desc,
2783 lttng_static_ctx, &event_notifier->capture_bytecode_runtime_head,
2784 &event_notifier_enabler->capture_bytecode_head);
2785
2786 event_notifier->num_captures = event_notifier_enabler->num_captures;
2787 }
2788 return 0;
2789 }
2790
2791 /*
2792 * Called at module load: connect the probe on all enablers matching
2793 * this event.
2794 * Called with sessions lock held.
2795 */
2796 int lttng_fix_pending_events(void)
2797 {
2798 struct lttng_session *session;
2799
2800 list_for_each_entry(session, &sessions, list)
2801 lttng_session_lazy_sync_event_enablers(session);
2802 return 0;
2803 }
2804
2805 static bool lttng_event_notifier_group_has_active_event_notifiers(
2806 struct lttng_event_notifier_group *event_notifier_group)
2807 {
2808 struct lttng_event_notifier_enabler *event_notifier_enabler;
2809
2810 list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head,
2811 node) {
2812 if (event_notifier_enabler->base.enabled)
2813 return true;
2814 }
2815 return false;
2816 }
2817
2818 bool lttng_event_notifier_active(void)
2819 {
2820 struct lttng_event_notifier_group *event_notifier_group;
2821
2822 list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
2823 if (lttng_event_notifier_group_has_active_event_notifiers(event_notifier_group))
2824 return true;
2825 }
2826 return false;
2827 }
2828
2829 int lttng_fix_pending_event_notifiers(void)
2830 {
2831 struct lttng_event_notifier_group *event_notifier_group;
2832
2833 list_for_each_entry(event_notifier_group, &event_notifier_groups, node)
2834 lttng_event_notifier_group_sync_enablers(event_notifier_group);
2835 return 0;
2836 }
2837
2838 struct lttng_event_enabler *lttng_event_enabler_create(
2839 enum lttng_enabler_format_type format_type,
2840 struct lttng_kernel_event *event_param,
2841 const struct lttng_counter_key *key,
2842 struct lttng_event_container *container)
2843 {
2844 struct lttng_event_enabler *event_enabler;
2845
2846 event_enabler = kzalloc(sizeof(*event_enabler), GFP_KERNEL);
2847 if (!event_enabler)
2848 return NULL;
2849 event_enabler->base.format_type = format_type;
2850 INIT_LIST_HEAD(&event_enabler->base.filter_bytecode_head);
2851 memcpy(&event_enabler->base.event_param, event_param,
2852 sizeof(event_enabler->base.event_param));
2853 event_enabler->container = container;
2854 /* ctx left NULL */
2855 event_enabler->base.enabled = 0;
2856 event_enabler->base.evtype = LTTNG_TYPE_ENABLER;
2857 event_enabler->base.user_token = event_param->token;
2858 if (key)
2859 event_enabler->key = *key;
2860 mutex_lock(&sessions_mutex);
2861 list_add(&event_enabler->node, &event_enabler->container->session->enablers_head);
2862 lttng_session_lazy_sync_event_enablers(event_enabler->container->session);
2863 mutex_unlock(&sessions_mutex);
2864 return event_enabler;
2865 }
2866
2867 int lttng_event_enabler_enable(struct lttng_event_enabler *event_enabler)
2868 {
2869 mutex_lock(&sessions_mutex);
2870 lttng_event_enabler_as_enabler(event_enabler)->enabled = 1;
2871 lttng_session_lazy_sync_event_enablers(event_enabler->container->session);
2872 mutex_unlock(&sessions_mutex);
2873 return 0;
2874 }
2875
2876 int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler)
2877 {
2878 mutex_lock(&sessions_mutex);
2879 lttng_event_enabler_as_enabler(event_enabler)->enabled = 0;
2880 lttng_session_lazy_sync_event_enablers(event_enabler->container->session);
2881 mutex_unlock(&sessions_mutex);
2882 return 0;
2883 }
2884
2885 static
2886 int lttng_enabler_attach_filter_bytecode(struct lttng_enabler *enabler,
2887 struct lttng_kernel_filter_bytecode __user *bytecode)
2888 {
2889 struct lttng_bytecode_node *bytecode_node;
2890 uint32_t bytecode_len;
2891 int ret;
2892
2893 ret = get_user(bytecode_len, &bytecode->len);
2894 if (ret)
2895 return ret;
2896 bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
2897 GFP_KERNEL);
2898 if (!bytecode_node)
2899 return -ENOMEM;
2900 ret = copy_from_user(&bytecode_node->bc, bytecode,
2901 sizeof(*bytecode) + bytecode_len);
2902 if (ret)
2903 goto error_free;
2904
2905 bytecode_node->type = LTTNG_BYTECODE_NODE_TYPE_FILTER;
2906 bytecode_node->enabler = enabler;
2907 /* Enforce length based on allocated size */
2908 bytecode_node->bc.len = bytecode_len;
2909 list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
2910
2911 return 0;
2912
2913 error_free:
2914 lttng_kvfree(bytecode_node);
2915 return ret;
2916 }
2917
2918 int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler *event_enabler,
2919 struct lttng_kernel_filter_bytecode __user *bytecode)
2920 {
2921 int ret;
2922 ret = lttng_enabler_attach_filter_bytecode(
2923 lttng_event_enabler_as_enabler(event_enabler), bytecode);
2924 if (ret)
2925 goto error;
2926
2927 lttng_session_lazy_sync_event_enablers(event_enabler->container->session);
2928 return 0;
2929
2930 error:
2931 return ret;
2932 }
2933
2934 int lttng_event_add_callsite(struct lttng_event *event,
2935 struct lttng_kernel_event_callsite __user *callsite)
2936 {
2937
2938 switch (event->instrumentation) {
2939 case LTTNG_KERNEL_UPROBE:
2940 return lttng_uprobes_event_add_callsite(event, callsite);
2941 default:
2942 return -EINVAL;
2943 }
2944 }
2945
2946 int lttng_event_enabler_attach_context(struct lttng_event_enabler *event_enabler,
2947 struct lttng_kernel_context *context_param)
2948 {
2949 return -ENOSYS;
2950 }
2951
2952 static
2953 void lttng_enabler_destroy(struct lttng_enabler *enabler)
2954 {
2955 struct lttng_bytecode_node *filter_node, *tmp_filter_node;
2956
2957 /* Destroy filter bytecode */
2958 list_for_each_entry_safe(filter_node, tmp_filter_node,
2959 &enabler->filter_bytecode_head, node) {
2960 lttng_kvfree(filter_node);
2961 }
2962 }
2963
2964 static
2965 void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler)
2966 {
2967 lttng_enabler_destroy(lttng_event_enabler_as_enabler(event_enabler));
2968
2969 /* Destroy contexts */
2970 lttng_destroy_context(event_enabler->ctx);
2971
2972 list_del(&event_enabler->node);
2973 kfree(event_enabler);
2974 }
2975
2976 struct lttng_event_notifier_enabler *lttng_event_notifier_enabler_create(
2977 struct lttng_event_notifier_group *event_notifier_group,
2978 enum lttng_enabler_format_type format_type,
2979 struct lttng_kernel_event_notifier *event_notifier_param)
2980 {
2981 struct lttng_event_notifier_enabler *event_notifier_enabler;
2982
2983 event_notifier_enabler = kzalloc(sizeof(*event_notifier_enabler), GFP_KERNEL);
2984 if (!event_notifier_enabler)
2985 return NULL;
2986
2987 event_notifier_enabler->base.format_type = format_type;
2988 INIT_LIST_HEAD(&event_notifier_enabler->base.filter_bytecode_head);
2989 INIT_LIST_HEAD(&event_notifier_enabler->capture_bytecode_head);
2990
2991 event_notifier_enabler->error_counter_index = event_notifier_param->error_counter_index;
2992 event_notifier_enabler->num_captures = 0;
2993
2994 memcpy(&event_notifier_enabler->base.event_param, &event_notifier_param->event,
2995 sizeof(event_notifier_enabler->base.event_param));
2996 event_notifier_enabler->base.evtype = LTTNG_TYPE_ENABLER;
2997
2998 event_notifier_enabler->base.enabled = 0;
2999 event_notifier_enabler->base.user_token = event_notifier_param->event.token;
3000 event_notifier_enabler->group = event_notifier_group;
3001
3002 mutex_lock(&sessions_mutex);
3003 list_add(&event_notifier_enabler->node, &event_notifier_enabler->group->enablers_head);
3004 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
3005
3006 mutex_unlock(&sessions_mutex);
3007
3008 return event_notifier_enabler;
3009 }
3010
3011 int lttng_event_notifier_enabler_enable(
3012 struct lttng_event_notifier_enabler *event_notifier_enabler)
3013 {
3014 mutex_lock(&sessions_mutex);
3015 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 1;
3016 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
3017 mutex_unlock(&sessions_mutex);
3018 return 0;
3019 }
3020
3021 int lttng_event_notifier_enabler_disable(
3022 struct lttng_event_notifier_enabler *event_notifier_enabler)
3023 {
3024 mutex_lock(&sessions_mutex);
3025 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 0;
3026 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
3027 mutex_unlock(&sessions_mutex);
3028 return 0;
3029 }
3030
3031 int lttng_event_notifier_enabler_attach_filter_bytecode(
3032 struct lttng_event_notifier_enabler *event_notifier_enabler,
3033 struct lttng_kernel_filter_bytecode __user *bytecode)
3034 {
3035 int ret;
3036
3037 ret = lttng_enabler_attach_filter_bytecode(
3038 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler),
3039 bytecode);
3040 if (ret)
3041 goto error;
3042
3043 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
3044 return 0;
3045
3046 error:
3047 return ret;
3048 }
3049
3050 int lttng_event_notifier_enabler_attach_capture_bytecode(
3051 struct lttng_event_notifier_enabler *event_notifier_enabler,
3052 struct lttng_kernel_capture_bytecode __user *bytecode)
3053 {
3054 struct lttng_bytecode_node *bytecode_node;
3055 struct lttng_enabler *enabler =
3056 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler);
3057 uint32_t bytecode_len;
3058 int ret;
3059
3060 ret = get_user(bytecode_len, &bytecode->len);
3061 if (ret)
3062 return ret;
3063
3064 bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
3065 GFP_KERNEL);
3066 if (!bytecode_node)
3067 return -ENOMEM;
3068
3069 ret = copy_from_user(&bytecode_node->bc, bytecode,
3070 sizeof(*bytecode) + bytecode_len);
3071 if (ret)
3072 goto error_free;
3073
3074 bytecode_node->type = LTTNG_BYTECODE_NODE_TYPE_CAPTURE;
3075 bytecode_node->enabler = enabler;
3076
3077 /* Enforce length based on allocated size */
3078 bytecode_node->bc.len = bytecode_len;
3079 list_add_tail(&bytecode_node->node, &event_notifier_enabler->capture_bytecode_head);
3080
3081 event_notifier_enabler->num_captures++;
3082
3083 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
3084 goto end;
3085
3086 error_free:
3087 lttng_kvfree(bytecode_node);
3088 end:
3089 return ret;
3090 }
3091
3092 int lttng_event_notifier_add_callsite(struct lttng_event_notifier *event_notifier,
3093 struct lttng_kernel_event_callsite __user *callsite)
3094 {
3095
3096 switch (event_notifier->instrumentation) {
3097 case LTTNG_KERNEL_UPROBE:
3098 return lttng_uprobes_event_notifier_add_callsite(event_notifier,
3099 callsite);
3100 default:
3101 return -EINVAL;
3102 }
3103 }
3104
3105 int lttng_event_notifier_enabler_attach_context(
3106 struct lttng_event_notifier_enabler *event_notifier_enabler,
3107 struct lttng_kernel_context *context_param)
3108 {
3109 return -ENOSYS;
3110 }
3111
3112 static
3113 void lttng_event_notifier_enabler_destroy(
3114 struct lttng_event_notifier_enabler *event_notifier_enabler)
3115 {
3116 if (!event_notifier_enabler) {
3117 return;
3118 }
3119
3120 list_del(&event_notifier_enabler->node);
3121
3122 lttng_enabler_destroy(lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
3123 kfree(event_notifier_enabler);
3124 }
3125
3126 /*
3127 * lttng_session_sync_event_enablers should be called just before starting a
3128 * session.
3129 * Should be called with sessions mutex held.
3130 */
3131 static
3132 void lttng_session_sync_event_enablers(struct lttng_session *session)
3133 {
3134 struct lttng_event_enabler *event_enabler;
3135 struct lttng_event *event;
3136
3137 list_for_each_entry(event_enabler, &session->enablers_head, node)
3138 lttng_event_enabler_ref_events(event_enabler);
3139 /*
3140 * For each event, if at least one of its enablers is enabled,
3141 * and its event container and session transient states are enabled, we
3142 * enable the event, else we disable it.
3143 */
3144 list_for_each_entry(event, &session->events, list) {
3145 struct lttng_enabler_ref *enabler_ref;
3146 struct lttng_bytecode_runtime *runtime;
3147 int enabled = 0, has_enablers_without_bytecode = 0;
3148
3149 switch (event->instrumentation) {
3150 case LTTNG_KERNEL_TRACEPOINT:
3151 case LTTNG_KERNEL_SYSCALL:
3152 /* Enable events */
3153 list_for_each_entry(enabler_ref,
3154 &event->enablers_ref_head, node) {
3155 if (enabler_ref->ref->enabled) {
3156 enabled = 1;
3157 break;
3158 }
3159 }
3160 break;
3161 default:
3162 /* Not handled with lazy sync. */
3163 continue;
3164 }
3165 /*
3166 * Enabled state is based on union of enablers, with
3167 * intesection of session and event container transient enable
3168 * states.
3169 */
3170 enabled = enabled && session->tstate && event->container->tstate;
3171
3172 WRITE_ONCE(event->enabled, enabled);
3173 /*
3174 * Sync tracepoint registration with event enabled
3175 * state.
3176 */
3177 if (enabled) {
3178 register_event(event);
3179 } else {
3180 _lttng_event_unregister(event);
3181 }
3182
3183 /* Check if has enablers without bytecode enabled */
3184 list_for_each_entry(enabler_ref,
3185 &event->enablers_ref_head, node) {
3186 if (enabler_ref->ref->enabled
3187 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
3188 has_enablers_without_bytecode = 1;
3189 break;
3190 }
3191 }
3192 event->has_enablers_without_bytecode =
3193 has_enablers_without_bytecode;
3194
3195 /* Enable filters */
3196 list_for_each_entry(runtime,
3197 &event->filter_bytecode_runtime_head, node)
3198 lttng_bytecode_filter_sync_state(runtime);
3199 }
3200 }
3201
3202 /*
3203 * Apply enablers to session events, adding events to session if need
3204 * be. It is required after each modification applied to an active
3205 * session, and right before session "start".
3206 * "lazy" sync means we only sync if required.
3207 * Should be called with sessions mutex held.
3208 */
3209 static
3210 void lttng_session_lazy_sync_event_enablers(struct lttng_session *session)
3211 {
3212 /* We can skip if session is not active */
3213 if (!session->active)
3214 return;
3215 lttng_session_sync_event_enablers(session);
3216 }
3217
3218 static
3219 void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group)
3220 {
3221 struct lttng_event_notifier_enabler *event_notifier_enabler;
3222 struct lttng_event_notifier *event_notifier;
3223
3224 list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head, node)
3225 lttng_event_notifier_enabler_ref_event_notifiers(event_notifier_enabler);
3226
3227 /*
3228 * For each event_notifier, if at least one of its enablers is enabled,
3229 * we enable the event_notifier, else we disable it.
3230 */
3231 list_for_each_entry(event_notifier, &event_notifier_group->event_notifiers_head, list) {
3232 struct lttng_enabler_ref *enabler_ref;
3233 struct lttng_bytecode_runtime *runtime;
3234 int enabled = 0, has_enablers_without_bytecode = 0;
3235
3236 switch (event_notifier->instrumentation) {
3237 case LTTNG_KERNEL_TRACEPOINT:
3238 case LTTNG_KERNEL_SYSCALL:
3239 /* Enable event_notifiers */
3240 list_for_each_entry(enabler_ref,
3241 &event_notifier->enablers_ref_head, node) {
3242 if (enabler_ref->ref->enabled) {
3243 enabled = 1;
3244 break;
3245 }
3246 }
3247 break;
3248 default:
3249 /* Not handled with sync. */
3250 continue;
3251 }
3252
3253 WRITE_ONCE(event_notifier->enabled, enabled);
3254 /*
3255 * Sync tracepoint registration with event_notifier enabled
3256 * state.
3257 */
3258 if (enabled) {
3259 if (!event_notifier->registered)
3260 register_event_notifier(event_notifier);
3261 } else {
3262 if (event_notifier->registered)
3263 _lttng_event_notifier_unregister(event_notifier);
3264 }
3265
3266 /* Check if has enablers without bytecode enabled */
3267 list_for_each_entry(enabler_ref,
3268 &event_notifier->enablers_ref_head, node) {
3269 if (enabler_ref->ref->enabled
3270 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
3271 has_enablers_without_bytecode = 1;
3272 break;
3273 }
3274 }
3275 event_notifier->has_enablers_without_bytecode =
3276 has_enablers_without_bytecode;
3277
3278 /* Enable filters */
3279 list_for_each_entry(runtime,
3280 &event_notifier->filter_bytecode_runtime_head, node)
3281 lttng_bytecode_filter_sync_state(runtime);
3282
3283 /* Enable captures */
3284 list_for_each_entry(runtime,
3285 &event_notifier->capture_bytecode_runtime_head, node)
3286 lttng_bytecode_capture_sync_state(runtime);
3287
3288 WRITE_ONCE(event_notifier->eval_capture, !!event_notifier->num_captures);
3289 }
3290 }
3291
3292 /*
3293 * Serialize at most one packet worth of metadata into a metadata
3294 * channel.
3295 * We grab the metadata cache mutex to get exclusive access to our metadata
3296 * buffer and to the metadata cache. Exclusive access to the metadata buffer
3297 * allows us to do racy operations such as looking for remaining space left in
3298 * packet and write, since mutual exclusion protects us from concurrent writes.
3299 * Mutual exclusion on the metadata cache allow us to read the cache content
3300 * without racing against reallocation of the cache by updates.
3301 * Returns the number of bytes written in the channel, 0 if no data
3302 * was written and a negative value on error.
3303 */
3304 int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
3305 struct channel *chan, bool *coherent)
3306 {
3307 struct lib_ring_buffer_ctx ctx;
3308 int ret = 0;
3309 size_t len, reserve_len;
3310
3311 /*
3312 * Ensure we support mutiple get_next / put sequences followed by
3313 * put_next. The metadata cache lock protects reading the metadata
3314 * cache. It can indeed be read concurrently by "get_next_subbuf" and
3315 * "flush" operations on the buffer invoked by different processes.
3316 * Moreover, since the metadata cache memory can be reallocated, we
3317 * need to have exclusive access against updates even though we only
3318 * read it.
3319 */
3320 mutex_lock(&stream->metadata_cache->lock);
3321 WARN_ON(stream->metadata_in < stream->metadata_out);
3322 if (stream->metadata_in != stream->metadata_out)
3323 goto end;
3324
3325 /* Metadata regenerated, change the version. */
3326 if (stream->metadata_cache->version != stream->version)
3327 stream->version = stream->metadata_cache->version;
3328
3329 len = stream->metadata_cache->metadata_written -
3330 stream->metadata_in;
3331 if (!len)
3332 goto end;
3333 reserve_len = min_t(size_t,
3334 stream->transport->ops.packet_avail_size(chan),
3335 len);
3336 lib_ring_buffer_ctx_init(&ctx, chan, NULL, reserve_len,
3337 sizeof(char), -1);
3338 /*
3339 * If reservation failed, return an error to the caller.
3340 */
3341 ret = stream->transport->ops.event_reserve(&ctx, 0);
3342 if (ret != 0) {
3343 printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
3344 stream->coherent = false;
3345 goto end;
3346 }
3347 stream->transport->ops.event_write(&ctx,
3348 stream->metadata_cache->data + stream->metadata_in,
3349 reserve_len);
3350 stream->transport->ops.event_commit(&ctx);
3351 stream->metadata_in += reserve_len;
3352 if (reserve_len < len)
3353 stream->coherent = false;
3354 else
3355 stream->coherent = true;
3356 ret = reserve_len;
3357
3358 end:
3359 if (coherent)
3360 *coherent = stream->coherent;
3361 mutex_unlock(&stream->metadata_cache->lock);
3362 return ret;
3363 }
3364
3365 static
3366 void lttng_metadata_begin(struct lttng_session *session)
3367 {
3368 if (atomic_inc_return(&session->metadata_cache->producing) == 1)
3369 mutex_lock(&session->metadata_cache->lock);
3370 }
3371
3372 static
3373 void lttng_metadata_end(struct lttng_session *session)
3374 {
3375 WARN_ON_ONCE(!atomic_read(&session->metadata_cache->producing));
3376 if (atomic_dec_return(&session->metadata_cache->producing) == 0) {
3377 struct lttng_metadata_stream *stream;
3378
3379 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
3380 wake_up_interruptible(&stream->read_wait);
3381 mutex_unlock(&session->metadata_cache->lock);
3382 }
3383 }
3384
3385 /*
3386 * Write the metadata to the metadata cache.
3387 * Must be called with sessions_mutex held.
3388 * The metadata cache lock protects us from concurrent read access from
3389 * thread outputting metadata content to ring buffer.
3390 * The content of the printf is printed as a single atomic metadata
3391 * transaction.
3392 */
3393 int lttng_metadata_printf(struct lttng_session *session,
3394 const char *fmt, ...)
3395 {
3396 char *str;
3397 size_t len;
3398 va_list ap;
3399
3400 WARN_ON_ONCE(!LTTNG_READ_ONCE(session->active));
3401
3402 va_start(ap, fmt);
3403 str = kvasprintf(GFP_KERNEL, fmt, ap);
3404 va_end(ap);
3405 if (!str)
3406 return -ENOMEM;
3407
3408 len = strlen(str);
3409 WARN_ON_ONCE(!atomic_read(&session->metadata_cache->producing));
3410 if (session->metadata_cache->metadata_written + len >
3411 session->metadata_cache->cache_alloc) {
3412 char *tmp_cache_realloc;
3413 unsigned int tmp_cache_alloc_size;
3414
3415 tmp_cache_alloc_size = max_t(unsigned int,
3416 session->metadata_cache->cache_alloc + len,
3417 session->metadata_cache->cache_alloc << 1);
3418 tmp_cache_realloc = vzalloc(tmp_cache_alloc_size);
3419 if (!tmp_cache_realloc)
3420 goto err;
3421 if (session->metadata_cache->data) {
3422 memcpy(tmp_cache_realloc,
3423 session->metadata_cache->data,
3424 session->metadata_cache->cache_alloc);
3425 vfree(session->metadata_cache->data);
3426 }
3427
3428 session->metadata_cache->cache_alloc = tmp_cache_alloc_size;
3429 session->metadata_cache->data = tmp_cache_realloc;
3430 }
3431 memcpy(session->metadata_cache->data +
3432 session->metadata_cache->metadata_written,
3433 str, len);
3434 session->metadata_cache->metadata_written += len;
3435 kfree(str);
3436
3437 return 0;
3438
3439 err:
3440 kfree(str);
3441 return -ENOMEM;
3442 }
3443
3444 static
3445 int print_tabs(struct lttng_session *session, size_t nesting)
3446 {
3447 size_t i;
3448
3449 for (i = 0; i < nesting; i++) {
3450 int ret;
3451
3452 ret = lttng_metadata_printf(session, " ");
3453 if (ret) {
3454 return ret;
3455 }
3456 }
3457 return 0;
3458 }
3459
3460 static
3461 int lttng_field_name_statedump(struct lttng_session *session,
3462 const struct lttng_event_field *field,
3463 size_t nesting)
3464 {
3465 return lttng_metadata_printf(session, " _%s;\n", field->name);
3466 }
3467
3468 static
3469 int _lttng_integer_type_statedump(struct lttng_session *session,
3470 const struct lttng_type *type,
3471 size_t nesting)
3472 {
3473 int ret;
3474
3475 WARN_ON_ONCE(type->atype != atype_integer);
3476 ret = print_tabs(session, nesting);
3477 if (ret)
3478 return ret;
3479 ret = lttng_metadata_printf(session,
3480 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s }",
3481 type->u.integer.size,
3482 type->u.integer.alignment,
3483 type->u.integer.signedness,
3484 (type->u.integer.encoding == lttng_encode_none)
3485 ? "none"
3486 : (type->u.integer.encoding == lttng_encode_UTF8)
3487 ? "UTF8"
3488 : "ASCII",
3489 type->u.integer.base,
3490 #if __BYTE_ORDER == __BIG_ENDIAN
3491 type->u.integer.reverse_byte_order ? " byte_order = le;" : ""
3492 #else
3493 type->u.integer.reverse_byte_order ? " byte_order = be;" : ""
3494 #endif
3495 );
3496 return ret;
3497 }
3498
3499 /*
3500 * Must be called with sessions_mutex held.
3501 */
3502 static
3503 int _lttng_struct_type_statedump(struct lttng_session *session,
3504 const struct lttng_type *type,
3505 size_t nesting)
3506 {
3507 int ret;
3508 uint32_t i, nr_fields;
3509 unsigned int alignment;
3510
3511 WARN_ON_ONCE(type->atype != atype_struct_nestable);
3512
3513 ret = print_tabs(session, nesting);
3514 if (ret)
3515 return ret;
3516 ret = lttng_metadata_printf(session,
3517 "struct {\n");
3518 if (ret)
3519 return ret;
3520 nr_fields = type->u.struct_nestable.nr_fields;
3521 for (i = 0; i < nr_fields; i++) {
3522 const struct lttng_event_field *iter_field;
3523
3524 iter_field = &type->u.struct_nestable.fields[i];
3525 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
3526 if (ret)
3527 return ret;
3528 }
3529 ret = print_tabs(session, nesting);
3530 if (ret)
3531 return ret;
3532 alignment = type->u.struct_nestable.alignment;
3533 if (alignment) {
3534 ret = lttng_metadata_printf(session,
3535 "} align(%u)",
3536 alignment);
3537 } else {
3538 ret = lttng_metadata_printf(session,
3539 "}");
3540 }
3541 return ret;
3542 }
3543
3544 /*
3545 * Must be called with sessions_mutex held.
3546 */
3547 static
3548 int _lttng_struct_field_statedump(struct lttng_session *session,
3549 const struct lttng_event_field *field,
3550 size_t nesting)
3551 {
3552 int ret;
3553
3554 ret = _lttng_struct_type_statedump(session,
3555 &field->type, nesting);
3556 if (ret)
3557 return ret;
3558 return lttng_field_name_statedump(session, field, nesting);
3559 }
3560
3561 /*
3562 * Must be called with sessions_mutex held.
3563 */
3564 static
3565 int _lttng_variant_type_statedump(struct lttng_session *session,
3566 const struct lttng_type *type,
3567 size_t nesting)
3568 {
3569 int ret;
3570 uint32_t i, nr_choices;
3571
3572 WARN_ON_ONCE(type->atype != atype_variant_nestable);
3573 /*
3574 * CTF 1.8 does not allow expressing nonzero variant alignment in a nestable way.
3575 */
3576 if (type->u.variant_nestable.alignment != 0)
3577 return -EINVAL;
3578 ret = print_tabs(session, nesting);
3579 if (ret)
3580 return ret;
3581 ret = lttng_metadata_printf(session,
3582 "variant <_%s> {\n",
3583 type->u.variant_nestable.tag_name);
3584 if (ret)
3585 return ret;
3586 nr_choices = type->u.variant_nestable.nr_choices;
3587 for (i = 0; i < nr_choices; i++) {
3588 const struct lttng_event_field *iter_field;
3589
3590 iter_field = &type->u.variant_nestable.choices[i];
3591 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
3592 if (ret)
3593 return ret;
3594 }
3595 ret = print_tabs(session, nesting);
3596 if (ret)
3597 return ret;
3598 ret = lttng_metadata_printf(session,
3599 "}");
3600 return ret;
3601 }
3602
3603 /*
3604 * Must be called with sessions_mutex held.
3605 */
3606 static
3607 int _lttng_variant_field_statedump(struct lttng_session *session,
3608 const struct lttng_event_field *field,
3609 size_t nesting)
3610 {
3611 int ret;
3612
3613 ret = _lttng_variant_type_statedump(session,
3614 &field->type, nesting);
3615 if (ret)
3616 return ret;
3617 return lttng_field_name_statedump(session, field, nesting);
3618 }
3619
3620 /*
3621 * Must be called with sessions_mutex held.
3622 */
3623 static
3624 int _lttng_array_field_statedump(struct lttng_session *session,
3625 const struct lttng_event_field *field,
3626 size_t nesting)
3627 {
3628 int ret;
3629 const struct lttng_type *elem_type;
3630
3631 WARN_ON_ONCE(field->type.atype != atype_array_nestable);
3632
3633 if (field->type.u.array_nestable.alignment) {
3634 ret = print_tabs(session, nesting);
3635 if (ret)
3636 return ret;
3637 ret = lttng_metadata_printf(session,
3638 "struct { } align(%u) _%s_padding;\n",
3639 field->type.u.array_nestable.alignment * CHAR_BIT,
3640 field->name);
3641 if (ret)
3642 return ret;
3643 }
3644 /*
3645 * Nested compound types: Only array of structures and variants are
3646 * currently supported.
3647 */
3648 elem_type = field->type.u.array_nestable.elem_type;
3649 switch (elem_type->atype) {
3650 case atype_integer:
3651 case atype_struct_nestable:
3652 case atype_variant_nestable:
3653 ret = _lttng_type_statedump(session, elem_type, nesting);
3654 if (ret)
3655 return ret;
3656 break;
3657
3658 default:
3659 return -EINVAL;
3660 }
3661 ret = lttng_metadata_printf(session,
3662 " _%s[%u];\n",
3663 field->name,
3664 field->type.u.array_nestable.length);
3665 return ret;
3666 }
3667
3668 /*
3669 * Must be called with sessions_mutex held.
3670 */
3671 static
3672 int _lttng_sequence_field_statedump(struct lttng_session *session,
3673 const struct lttng_event_field *field,
3674 size_t nesting)
3675 {
3676 int ret;
3677 const char *length_name;
3678 const struct lttng_type *elem_type;
3679
3680 WARN_ON_ONCE(field->type.atype != atype_sequence_nestable);
3681
3682 length_name = field->type.u.sequence_nestable.length_name;
3683
3684 if (field->type.u.sequence_nestable.alignment) {
3685 ret = print_tabs(session, nesting);
3686 if (ret)
3687 return ret;
3688 ret = lttng_metadata_printf(session,
3689 "struct { } align(%u) _%s_padding;\n",
3690 field->type.u.sequence_nestable.alignment * CHAR_BIT,
3691 field->name);
3692 if (ret)
3693 return ret;
3694 }
3695
3696 /*
3697 * Nested compound types: Only array of structures and variants are
3698 * currently supported.
3699 */
3700 elem_type = field->type.u.sequence_nestable.elem_type;
3701 switch (elem_type->atype) {
3702 case atype_integer:
3703 case atype_struct_nestable:
3704 case atype_variant_nestable:
3705 ret = _lttng_type_statedump(session, elem_type, nesting);
3706 if (ret)
3707 return ret;
3708 break;
3709
3710 default:
3711 return -EINVAL;
3712 }
3713 ret = lttng_metadata_printf(session,
3714 " _%s[ _%s ];\n",
3715 field->name,
3716 field->type.u.sequence_nestable.length_name);
3717 return ret;
3718 }
3719
3720 /*
3721 * Must be called with sessions_mutex held.
3722 */
3723 static
3724 int _lttng_enum_type_statedump(struct lttng_session *session,
3725 const struct lttng_type *type,
3726 size_t nesting)
3727 {
3728 const struct lttng_enum_desc *enum_desc;
3729 const struct lttng_type *container_type;
3730 int ret;
3731 unsigned int i, nr_entries;
3732
3733 container_type = type->u.enum_nestable.container_type;
3734 if (container_type->atype != atype_integer) {
3735 ret = -EINVAL;
3736 goto end;
3737 }
3738 enum_desc = type->u.enum_nestable.desc;
3739 nr_entries = enum_desc->nr_entries;
3740
3741 ret = print_tabs(session, nesting);
3742 if (ret)
3743 goto end;
3744 ret = lttng_metadata_printf(session, "enum : ");
3745 if (ret)
3746 goto end;
3747 ret = _lttng_integer_type_statedump(session, container_type, 0);
3748 if (ret)
3749 goto end;
3750 ret = lttng_metadata_printf(session, " {\n");
3751 if (ret)
3752 goto end;
3753 /* Dump all entries */
3754 for (i = 0; i < nr_entries; i++) {
3755 const struct lttng_enum_entry *entry = &enum_desc->entries[i];
3756 int j, len;
3757
3758 ret = print_tabs(session, nesting + 1);
3759 if (ret)
3760 goto end;
3761 ret = lttng_metadata_printf(session,
3762 "\"");
3763 if (ret)
3764 goto end;
3765 len = strlen(entry->string);
3766 /* Escape the character '"' */
3767 for (j = 0; j < len; j++) {
3768 char c = entry->string[j];
3769
3770 switch (c) {
3771 case '"':
3772 ret = lttng_metadata_printf(session,
3773 "\\\"");
3774 break;
3775 case '\\':
3776 ret = lttng_metadata_printf(session,
3777 "\\\\");
3778 break;
3779 default:
3780 ret = lttng_metadata_printf(session,
3781 "%c", c);
3782 break;
3783 }
3784 if (ret)
3785 goto end;
3786 }
3787 ret = lttng_metadata_printf(session, "\"");
3788 if (ret)
3789 goto end;
3790
3791 if (entry->options.is_auto) {
3792 ret = lttng_metadata_printf(session, ",\n");
3793 if (ret)
3794 goto end;
3795 } else {
3796 ret = lttng_metadata_printf(session,
3797 " = ");
3798 if (ret)
3799 goto end;
3800 if (entry->start.signedness)
3801 ret = lttng_metadata_printf(session,
3802 "%lld", (long long) entry->start.value);
3803 else
3804 ret = lttng_metadata_printf(session,
3805 "%llu", entry->start.value);
3806 if (ret)
3807 goto end;
3808 if (entry->start.signedness == entry->end.signedness &&
3809 entry->start.value
3810 == entry->end.value) {
3811 ret = lttng_metadata_printf(session,
3812 ",\n");
3813 } else {
3814 if (entry->end.signedness) {
3815 ret = lttng_metadata_printf(session,
3816 " ... %lld,\n",
3817 (long long) entry->end.value);
3818 } else {
3819 ret = lttng_metadata_printf(session,
3820 " ... %llu,\n",
3821 entry->end.value);
3822 }
3823 }
3824 if (ret)
3825 goto end;
3826 }
3827 }
3828 ret = print_tabs(session, nesting);
3829 if (ret)
3830 goto end;
3831 ret = lttng_metadata_printf(session, "}");
3832 end:
3833 return ret;
3834 }
3835
3836 /*
3837 * Must be called with sessions_mutex held.
3838 */
3839 static
3840 int _lttng_enum_field_statedump(struct lttng_session *session,
3841 const struct lttng_event_field *field,
3842 size_t nesting)
3843 {
3844 int ret;
3845
3846 ret = _lttng_enum_type_statedump(session, &field->type, nesting);
3847 if (ret)
3848 return ret;
3849 return lttng_field_name_statedump(session, field, nesting);
3850 }
3851
3852 static
3853 int _lttng_integer_field_statedump(struct lttng_session *session,
3854 const struct lttng_event_field *field,
3855 size_t nesting)
3856 {
3857 int ret;
3858
3859 ret = _lttng_integer_type_statedump(session, &field->type, nesting);
3860 if (ret)
3861 return ret;
3862 return lttng_field_name_statedump(session, field, nesting);
3863 }
3864
3865 static
3866 int _lttng_string_type_statedump(struct lttng_session *session,
3867 const struct lttng_type *type,
3868 size_t nesting)
3869 {
3870 int ret;
3871
3872 WARN_ON_ONCE(type->atype != atype_string);
3873 /* Default encoding is UTF8 */
3874 ret = print_tabs(session, nesting);
3875 if (ret)
3876 return ret;
3877 ret = lttng_metadata_printf(session,
3878 "string%s",
3879 type->u.string.encoding == lttng_encode_ASCII ?
3880 " { encoding = ASCII; }" : "");
3881 return ret;
3882 }
3883
3884 static
3885 int _lttng_string_field_statedump(struct lttng_session *session,
3886 const struct lttng_event_field *field,
3887 size_t nesting)
3888 {
3889 int ret;
3890
3891 WARN_ON_ONCE(field->type.atype != atype_string);
3892 ret = _lttng_string_type_statedump(session, &field->type, nesting);
3893 if (ret)
3894 return ret;
3895 return lttng_field_name_statedump(session, field, nesting);
3896 }
3897
3898 /*
3899 * Must be called with sessions_mutex held.
3900 */
3901 static
3902 int _lttng_type_statedump(struct lttng_session *session,
3903 const struct lttng_type *type,
3904 size_t nesting)
3905 {
3906 int ret = 0;
3907
3908 switch (type->atype) {
3909 case atype_integer:
3910 ret = _lttng_integer_type_statedump(session, type, nesting);
3911 break;
3912 case atype_enum_nestable:
3913 ret = _lttng_enum_type_statedump(session, type, nesting);
3914 break;
3915 case atype_string:
3916 ret = _lttng_string_type_statedump(session, type, nesting);
3917 break;
3918 case atype_struct_nestable:
3919 ret = _lttng_struct_type_statedump(session, type, nesting);
3920 break;
3921 case atype_variant_nestable:
3922 ret = _lttng_variant_type_statedump(session, type, nesting);
3923 break;
3924
3925 /* Nested arrays and sequences are not supported yet. */
3926 case atype_array_nestable:
3927 case atype_sequence_nestable:
3928 default:
3929 WARN_ON_ONCE(1);
3930 return -EINVAL;
3931 }
3932 return ret;
3933 }
3934
3935 /*
3936 * Must be called with sessions_mutex held.
3937 */
3938 static
3939 int _lttng_field_statedump(struct lttng_session *session,
3940 const struct lttng_event_field *field,
3941 size_t nesting)
3942 {
3943 int ret = 0;
3944
3945 switch (field->type.atype) {
3946 case atype_integer:
3947 ret = _lttng_integer_field_statedump(session, field, nesting);
3948 break;
3949 case atype_enum_nestable:
3950 ret = _lttng_enum_field_statedump(session, field, nesting);
3951 break;
3952 case atype_string:
3953 ret = _lttng_string_field_statedump(session, field, nesting);
3954 break;
3955 case atype_struct_nestable:
3956 ret = _lttng_struct_field_statedump(session, field, nesting);
3957 break;
3958 case atype_array_nestable:
3959 ret = _lttng_array_field_statedump(session, field, nesting);
3960 break;
3961 case atype_sequence_nestable:
3962 ret = _lttng_sequence_field_statedump(session, field, nesting);
3963 break;
3964 case atype_variant_nestable:
3965 ret = _lttng_variant_field_statedump(session, field, nesting);
3966 break;
3967
3968 default:
3969 WARN_ON_ONCE(1);
3970 return -EINVAL;
3971 }
3972 return ret;
3973 }
3974
3975 static
3976 int _lttng_context_metadata_statedump(struct lttng_session *session,
3977 struct lttng_ctx *ctx)
3978 {
3979 int ret = 0;
3980 int i;
3981
3982 if (!ctx)
3983 return 0;
3984 for (i = 0; i < ctx->nr_fields; i++) {
3985 const struct lttng_ctx_field *field = &ctx->fields[i];
3986
3987 ret = _lttng_field_statedump(session, &field->event_field, 2);
3988 if (ret)
3989 return ret;
3990 }
3991 return ret;
3992 }
3993
3994 static
3995 int _lttng_fields_metadata_statedump(struct lttng_session *session,
3996 struct lttng_event *event)
3997 {
3998 const struct lttng_event_desc *desc = event->desc;
3999 int ret = 0;
4000 int i;
4001
4002 for (i = 0; i < desc->nr_fields; i++) {
4003 const struct lttng_event_field *field = &desc->fields[i];
4004
4005 ret = _lttng_field_statedump(session, field, 2);
4006 if (ret)
4007 return ret;
4008 }
4009 return ret;
4010 }
4011
4012 /*
4013 * Must be called with sessions_mutex held.
4014 * The entire event metadata is printed as a single atomic metadata
4015 * transaction.
4016 */
4017 static
4018 int _lttng_event_metadata_statedump(struct lttng_session *session,
4019 struct lttng_event *event)
4020 {
4021 struct lttng_channel *chan;
4022 int ret = 0;
4023
4024 WARN_ON_ONCE(event->container->type != LTTNG_EVENT_CONTAINER_CHANNEL);
4025 chan = lttng_event_container_get_channel(event->container);
4026 if (event->metadata_dumped || !LTTNG_READ_ONCE(session->active))
4027 return 0;
4028 if (chan->channel_type == METADATA_CHANNEL)
4029 return 0;
4030
4031 lttng_metadata_begin(session);
4032
4033 ret = lttng_metadata_printf(session,
4034 "event {\n"
4035 " name = \"%s\";\n"
4036 " id = %zu;\n"
4037 " stream_id = %u;\n",
4038 event->desc->name,
4039 event->id,
4040 chan->id);
4041 if (ret)
4042 goto end;
4043
4044 if (event->ctx) {
4045 ret = lttng_metadata_printf(session,
4046 " context := struct {\n");
4047 if (ret)
4048 goto end;
4049 }
4050 ret = _lttng_context_metadata_statedump(session, event->ctx);
4051 if (ret)
4052 goto end;
4053 if (event->ctx) {
4054 ret = lttng_metadata_printf(session,
4055 " };\n");
4056 if (ret)
4057 goto end;
4058 }
4059
4060 ret = lttng_metadata_printf(session,
4061 " fields := struct {\n"
4062 );
4063 if (ret)
4064 goto end;
4065
4066 ret = _lttng_fields_metadata_statedump(session, event);
4067 if (ret)
4068 goto end;
4069
4070 /*
4071 * LTTng space reservation can only reserve multiples of the
4072 * byte size.
4073 */
4074 ret = lttng_metadata_printf(session,
4075 " };\n"
4076 "};\n\n");
4077 if (ret)
4078 goto end;
4079
4080 event->metadata_dumped = 1;
4081 end:
4082 lttng_metadata_end(session);
4083 return ret;
4084
4085 }
4086
4087 /*
4088 * Must be called with sessions_mutex held.
4089 * The entire channel metadata is printed as a single atomic metadata
4090 * transaction.
4091 */
4092 static
4093 int _lttng_channel_metadata_statedump(struct lttng_session *session,
4094 struct lttng_channel *chan)
4095 {
4096 int ret = 0;
4097
4098 if (chan->metadata_dumped || !LTTNG_READ_ONCE(session->active))
4099 return 0;
4100
4101 if (chan->channel_type == METADATA_CHANNEL)
4102 return 0;
4103
4104 lttng_metadata_begin(session);
4105
4106 WARN_ON_ONCE(!chan->header_type);
4107 ret = lttng_metadata_printf(session,
4108 "stream {\n"
4109 " id = %u;\n"
4110 " event.header := %s;\n"
4111 " packet.context := struct packet_context;\n",
4112 chan->id,
4113 chan->header_type == 1 ? "struct event_header_compact" :
4114 "struct event_header_large");
4115 if (ret)
4116 goto end;
4117
4118 if (chan->ctx) {
4119 ret = lttng_metadata_printf(session,
4120 " event.context := struct {\n");
4121 if (ret)
4122 goto end;
4123 }
4124 ret = _lttng_context_metadata_statedump(session, chan->ctx);
4125 if (ret)
4126 goto end;
4127 if (chan->ctx) {
4128 ret = lttng_metadata_printf(session,
4129 " };\n");
4130 if (ret)
4131 goto end;
4132 }
4133
4134 ret = lttng_metadata_printf(session,
4135 "};\n\n");
4136
4137 chan->metadata_dumped = 1;
4138 end:
4139 lttng_metadata_end(session);
4140 return ret;
4141 }
4142
4143 /*
4144 * Must be called with sessions_mutex held.
4145 */
4146 static
4147 int _lttng_stream_packet_context_declare(struct lttng_session *session)
4148 {
4149 return lttng_metadata_printf(session,
4150 "struct packet_context {\n"
4151 " uint64_clock_monotonic_t timestamp_begin;\n"
4152 " uint64_clock_monotonic_t timestamp_end;\n"
4153 " uint64_t content_size;\n"
4154 " uint64_t packet_size;\n"
4155 " uint64_t packet_seq_num;\n"
4156 " unsigned long events_discarded;\n"
4157 " uint32_t cpu_id;\n"
4158 "};\n\n"
4159 );
4160 }
4161
4162 /*
4163 * Compact header:
4164 * id: range: 0 - 30.
4165 * id 31 is reserved to indicate an extended header.
4166 *
4167 * Large header:
4168 * id: range: 0 - 65534.
4169 * id 65535 is reserved to indicate an extended header.
4170 *
4171 * Must be called with sessions_mutex held.
4172 */
4173 static
4174 int _lttng_event_header_declare(struct lttng_session *session)
4175 {
4176 return lttng_metadata_printf(session,
4177 "struct event_header_compact {\n"
4178 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
4179 " variant <id> {\n"
4180 " struct {\n"
4181 " uint27_clock_monotonic_t timestamp;\n"
4182 " } compact;\n"
4183 " struct {\n"
4184 " uint32_t id;\n"
4185 " uint64_clock_monotonic_t timestamp;\n"
4186 " } extended;\n"
4187 " } v;\n"
4188 "} align(%u);\n"
4189 "\n"
4190 "struct event_header_large {\n"
4191 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
4192 " variant <id> {\n"
4193 " struct {\n"
4194 " uint32_clock_monotonic_t timestamp;\n"
4195 " } compact;\n"
4196 " struct {\n"
4197 " uint32_t id;\n"
4198 " uint64_clock_monotonic_t timestamp;\n"
4199 " } extended;\n"
4200 " } v;\n"
4201 "} align(%u);\n\n",
4202 lttng_alignof(uint32_t) * CHAR_BIT,
4203 lttng_alignof(uint16_t) * CHAR_BIT
4204 );
4205 }
4206
4207 /*
4208 * Approximation of NTP time of day to clock monotonic correlation,
4209 * taken at start of trace.
4210 * Yes, this is only an approximation. Yes, we can (and will) do better
4211 * in future versions.
4212 * This function may return a negative offset. It may happen if the
4213 * system sets the REALTIME clock to 0 after boot.
4214 *
4215 * Use 64bit timespec on kernels that have it, this makes 32bit arch
4216 * y2038 compliant.
4217 */
4218 static
4219 int64_t measure_clock_offset(void)
4220 {
4221 uint64_t monotonic_avg, monotonic[2], realtime;
4222 uint64_t tcf = trace_clock_freq();
4223 int64_t offset;
4224 unsigned long flags;
4225 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
4226 struct timespec64 rts = { 0, 0 };
4227 #else
4228 struct timespec rts = { 0, 0 };
4229 #endif
4230
4231 /* Disable interrupts to increase correlation precision. */
4232 local_irq_save(flags);
4233 monotonic[0] = trace_clock_read64();
4234 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
4235 ktime_get_real_ts64(&rts);
4236 #else
4237 getnstimeofday(&rts);
4238 #endif
4239 monotonic[1] = trace_clock_read64();
4240 local_irq_restore(flags);
4241
4242 monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
4243 realtime = (uint64_t) rts.tv_sec * tcf;
4244 if (tcf == NSEC_PER_SEC) {
4245 realtime += rts.tv_nsec;
4246 } else {
4247 uint64_t n = rts.tv_nsec * tcf;
4248
4249 do_div(n, NSEC_PER_SEC);
4250 realtime += n;
4251 }
4252 offset = (int64_t) realtime - monotonic_avg;
4253 return offset;
4254 }
4255
4256 static
4257 int print_escaped_ctf_string(struct lttng_session *session, const char *string)
4258 {
4259 int ret = 0;
4260 size_t i;
4261 char cur;
4262
4263 i = 0;
4264 cur = string[i];
4265 while (cur != '\0') {
4266 switch (cur) {
4267 case '\n':
4268 ret = lttng_metadata_printf(session, "%s", "\\n");
4269 break;
4270 case '\\':
4271 case '"':
4272 ret = lttng_metadata_printf(session, "%c", '\\');
4273 if (ret)
4274 goto error;
4275 /* We still print the current char */
4276 /* Fallthrough */
4277 default:
4278 ret = lttng_metadata_printf(session, "%c", cur);
4279 break;
4280 }
4281
4282 if (ret)
4283 goto error;
4284
4285 cur = string[++i];
4286 }
4287 error:
4288 return ret;
4289 }
4290
4291 static
4292 int print_metadata_escaped_field(struct lttng_session *session, const char *field,
4293 const char *field_value)
4294 {
4295 int ret;
4296
4297 ret = lttng_metadata_printf(session, " %s = \"", field);
4298 if (ret)
4299 goto error;
4300
4301 ret = print_escaped_ctf_string(session, field_value);
4302 if (ret)
4303 goto error;
4304
4305 ret = lttng_metadata_printf(session, "\";\n");
4306
4307 error:
4308 return ret;
4309 }
4310
4311 /*
4312 * Output metadata into this session's metadata buffers.
4313 * Must be called with sessions_mutex held.
4314 */
4315 static
4316 int _lttng_session_metadata_statedump(struct lttng_session *session)
4317 {
4318 unsigned char *uuid_c = session->uuid.b;
4319 unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
4320 const char *product_uuid;
4321 struct lttng_channel *chan;
4322 struct lttng_event *event;
4323 int ret = 0;
4324
4325 if (!LTTNG_READ_ONCE(session->active))
4326 return 0;
4327
4328 lttng_metadata_begin(session);
4329
4330 if (session->metadata_dumped)
4331 goto skip_session;
4332
4333 snprintf(uuid_s, sizeof(uuid_s),
4334 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
4335 uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
4336 uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
4337 uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
4338 uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
4339
4340 ret = lttng_metadata_printf(session,
4341 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
4342 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
4343 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
4344 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
4345 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
4346 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
4347 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
4348 "\n"
4349 "trace {\n"
4350 " major = %u;\n"
4351 " minor = %u;\n"
4352 " uuid = \"%s\";\n"
4353 " byte_order = %s;\n"
4354 " packet.header := struct {\n"
4355 " uint32_t magic;\n"
4356 " uint8_t uuid[16];\n"
4357 " uint32_t stream_id;\n"
4358 " uint64_t stream_instance_id;\n"
4359 " };\n"
4360 "};\n\n",
4361 lttng_alignof(uint8_t) * CHAR_BIT,
4362 lttng_alignof(uint16_t) * CHAR_BIT,
4363 lttng_alignof(uint32_t) * CHAR_BIT,
4364 lttng_alignof(uint64_t) * CHAR_BIT,
4365 sizeof(unsigned long) * CHAR_BIT,
4366 lttng_alignof(unsigned long) * CHAR_BIT,
4367 CTF_SPEC_MAJOR,
4368 CTF_SPEC_MINOR,
4369 uuid_s,
4370 #if __BYTE_ORDER == __BIG_ENDIAN
4371 "be"
4372 #else
4373 "le"
4374 #endif
4375 );
4376 if (ret)
4377 goto end;
4378
4379 ret = lttng_metadata_printf(session,
4380 "env {\n"
4381 " hostname = \"%s\";\n"
4382 " domain = \"kernel\";\n"
4383 " sysname = \"%s\";\n"
4384 " kernel_release = \"%s\";\n"
4385 " kernel_version = \"%s\";\n"
4386 " tracer_name = \"lttng-modules\";\n"
4387 " tracer_major = %d;\n"
4388 " tracer_minor = %d;\n"
4389 " tracer_patchlevel = %d;\n"
4390 " trace_buffering_scheme = \"global\";\n",
4391 current->nsproxy->uts_ns->name.nodename,
4392 utsname()->sysname,
4393 utsname()->release,
4394 utsname()->version,
4395 LTTNG_MODULES_MAJOR_VERSION,
4396 LTTNG_MODULES_MINOR_VERSION,
4397 LTTNG_MODULES_PATCHLEVEL_VERSION
4398 );
4399 if (ret)
4400 goto end;
4401
4402 ret = print_metadata_escaped_field(session, "trace_name", session->name);
4403 if (ret)
4404 goto end;
4405 ret = print_metadata_escaped_field(session, "trace_creation_datetime",
4406 session->creation_time);
4407 if (ret)
4408 goto end;
4409
4410 /* Add the product UUID to the 'env' section */
4411 product_uuid = dmi_get_system_info(DMI_PRODUCT_UUID);
4412 if (product_uuid) {
4413 ret = lttng_metadata_printf(session,
4414 " product_uuid = \"%s\";\n",
4415 product_uuid
4416 );
4417 if (ret)
4418 goto end;
4419 }
4420
4421 /* Close the 'env' section */
4422 ret = lttng_metadata_printf(session, "};\n\n");
4423 if (ret)
4424 goto end;
4425
4426 ret = lttng_metadata_printf(session,
4427 "clock {\n"
4428 " name = \"%s\";\n",
4429 trace_clock_name()
4430 );
4431 if (ret)
4432 goto end;
4433
4434 if (!trace_clock_uuid(clock_uuid_s)) {
4435 ret = lttng_metadata_printf(session,
4436 " uuid = \"%s\";\n",
4437 clock_uuid_s
4438 );
4439 if (ret)
4440 goto end;
4441 }
4442
4443 ret = lttng_metadata_printf(session,
4444 " description = \"%s\";\n"
4445 " freq = %llu; /* Frequency, in Hz */\n"
4446 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
4447 " offset = %lld;\n"
4448 "};\n\n",
4449 trace_clock_description(),
4450 (unsigned long long) trace_clock_freq(),
4451 (long long) measure_clock_offset()
4452 );
4453 if (ret)
4454 goto end;
4455
4456 ret = lttng_metadata_printf(session,
4457 "typealias integer {\n"
4458 " size = 27; align = 1; signed = false;\n"
4459 " map = clock.%s.value;\n"
4460 "} := uint27_clock_monotonic_t;\n"
4461 "\n"
4462 "typealias integer {\n"
4463 " size = 32; align = %u; signed = false;\n"
4464 " map = clock.%s.value;\n"
4465 "} := uint32_clock_monotonic_t;\n"
4466 "\n"
4467 "typealias integer {\n"
4468 " size = 64; align = %u; signed = false;\n"
4469 " map = clock.%s.value;\n"
4470 "} := uint64_clock_monotonic_t;\n\n",
4471 trace_clock_name(),
4472 lttng_alignof(uint32_t) * CHAR_BIT,
4473 trace_clock_name(),
4474 lttng_alignof(uint64_t) * CHAR_BIT,
4475 trace_clock_name()
4476 );
4477 if (ret)
4478 goto end;
4479
4480 ret = _lttng_stream_packet_context_declare(session);
4481 if (ret)
4482 goto end;
4483
4484 ret = _lttng_event_header_declare(session);
4485 if (ret)
4486 goto end;
4487
4488 skip_session:
4489 list_for_each_entry(chan, &session->chan, list) {
4490 ret = _lttng_channel_metadata_statedump(session, chan);
4491 if (ret)
4492 goto end;
4493 }
4494
4495 list_for_each_entry(event, &session->events, list) {
4496 /* Skip counter container. */
4497 if (event->container->type != LTTNG_EVENT_CONTAINER_CHANNEL)
4498 continue;
4499 ret = _lttng_event_metadata_statedump(session, event);
4500 if (ret)
4501 goto end;
4502 }
4503 session->metadata_dumped = 1;
4504 end:
4505 lttng_metadata_end(session);
4506 return ret;
4507 }
4508
4509 /**
4510 * lttng_transport_register - LTT transport registration
4511 * @transport: transport structure
4512 *
4513 * Registers a transport which can be used as output to extract the data out of
4514 * LTTng. The module calling this registration function must ensure that no
4515 * trap-inducing code will be executed by the transport functions. E.g.
4516 * vmalloc_sync_mappings() must be called between a vmalloc and the moment the memory
4517 * is made visible to the transport function. This registration acts as a
4518 * vmalloc_sync_mappings. Therefore, only if the module allocates virtual memory
4519 * after its registration must it synchronize the TLBs.
4520 */
4521 void lttng_transport_register(struct lttng_transport *transport)
4522 {
4523 /*
4524 * Make sure no page fault can be triggered by the module about to be
4525 * registered. We deal with this here so we don't have to call
4526 * vmalloc_sync_mappings() in each module's init.
4527 */
4528 wrapper_vmalloc_sync_mappings();
4529
4530 mutex_lock(&sessions_mutex);
4531 list_add_tail(&transport->node, &lttng_transport_list);
4532 mutex_unlock(&sessions_mutex);
4533 }
4534 EXPORT_SYMBOL_GPL(lttng_transport_register);
4535
4536 /**
4537 * lttng_transport_unregister - LTT transport unregistration
4538 * @transport: transport structure
4539 */
4540 void lttng_transport_unregister(struct lttng_transport *transport)
4541 {
4542 mutex_lock(&sessions_mutex);
4543 list_del(&transport->node);
4544 mutex_unlock(&sessions_mutex);
4545 }
4546 EXPORT_SYMBOL_GPL(lttng_transport_unregister);
4547
4548 void lttng_counter_transport_register(struct lttng_counter_transport *transport)
4549 {
4550 /*
4551 * Make sure no page fault can be triggered by the module about to be
4552 * registered. We deal with this here so we don't have to call
4553 * vmalloc_sync_mappings() in each module's init.
4554 */
4555 wrapper_vmalloc_sync_mappings();
4556
4557 mutex_lock(&sessions_mutex);
4558 list_add_tail(&transport->node, &lttng_counter_transport_list);
4559 mutex_unlock(&sessions_mutex);
4560 }
4561 EXPORT_SYMBOL_GPL(lttng_counter_transport_register);
4562
4563 void lttng_counter_transport_unregister(struct lttng_counter_transport *transport)
4564 {
4565 mutex_lock(&sessions_mutex);
4566 list_del(&transport->node);
4567 mutex_unlock(&sessions_mutex);
4568 }
4569 EXPORT_SYMBOL_GPL(lttng_counter_transport_unregister);
4570
4571 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
4572
4573 enum cpuhp_state lttng_hp_prepare;
4574 enum cpuhp_state lttng_hp_online;
4575
4576 static int lttng_hotplug_prepare(unsigned int cpu, struct hlist_node *node)
4577 {
4578 struct lttng_cpuhp_node *lttng_node;
4579
4580 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4581 switch (lttng_node->component) {
4582 case LTTNG_RING_BUFFER_FRONTEND:
4583 return 0;
4584 case LTTNG_RING_BUFFER_BACKEND:
4585 return lttng_cpuhp_rb_backend_prepare(cpu, lttng_node);
4586 case LTTNG_RING_BUFFER_ITER:
4587 return 0;
4588 case LTTNG_CONTEXT_PERF_COUNTERS:
4589 return 0;
4590 default:
4591 return -EINVAL;
4592 }
4593 }
4594
4595 static int lttng_hotplug_dead(unsigned int cpu, struct hlist_node *node)
4596 {
4597 struct lttng_cpuhp_node *lttng_node;
4598
4599 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4600 switch (lttng_node->component) {
4601 case LTTNG_RING_BUFFER_FRONTEND:
4602 return lttng_cpuhp_rb_frontend_dead(cpu, lttng_node);
4603 case LTTNG_RING_BUFFER_BACKEND:
4604 return 0;
4605 case LTTNG_RING_BUFFER_ITER:
4606 return 0;
4607 case LTTNG_CONTEXT_PERF_COUNTERS:
4608 return lttng_cpuhp_perf_counter_dead(cpu, lttng_node);
4609 default:
4610 return -EINVAL;
4611 }
4612 }
4613
4614 static int lttng_hotplug_online(unsigned int cpu, struct hlist_node *node)
4615 {
4616 struct lttng_cpuhp_node *lttng_node;
4617
4618 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4619 switch (lttng_node->component) {
4620 case LTTNG_RING_BUFFER_FRONTEND:
4621 return lttng_cpuhp_rb_frontend_online(cpu, lttng_node);
4622 case LTTNG_RING_BUFFER_BACKEND:
4623 return 0;
4624 case LTTNG_RING_BUFFER_ITER:
4625 return lttng_cpuhp_rb_iter_online(cpu, lttng_node);
4626 case LTTNG_CONTEXT_PERF_COUNTERS:
4627 return lttng_cpuhp_perf_counter_online(cpu, lttng_node);
4628 default:
4629 return -EINVAL;
4630 }
4631 }
4632
4633 static int lttng_hotplug_offline(unsigned int cpu, struct hlist_node *node)
4634 {
4635 struct lttng_cpuhp_node *lttng_node;
4636
4637 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4638 switch (lttng_node->component) {
4639 case LTTNG_RING_BUFFER_FRONTEND:
4640 return lttng_cpuhp_rb_frontend_offline(cpu, lttng_node);
4641 case LTTNG_RING_BUFFER_BACKEND:
4642 return 0;
4643 case LTTNG_RING_BUFFER_ITER:
4644 return 0;
4645 case LTTNG_CONTEXT_PERF_COUNTERS:
4646 return 0;
4647 default:
4648 return -EINVAL;
4649 }
4650 }
4651
4652 static int __init lttng_init_cpu_hotplug(void)
4653 {
4654 int ret;
4655
4656 ret = cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN, "lttng:prepare",
4657 lttng_hotplug_prepare,
4658 lttng_hotplug_dead);
4659 if (ret < 0) {
4660 return ret;
4661 }
4662 lttng_hp_prepare = ret;
4663 lttng_rb_set_hp_prepare(ret);
4664
4665 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lttng:online",
4666 lttng_hotplug_online,
4667 lttng_hotplug_offline);
4668 if (ret < 0) {
4669 cpuhp_remove_multi_state(lttng_hp_prepare);
4670 lttng_hp_prepare = 0;
4671 return ret;
4672 }
4673 lttng_hp_online = ret;
4674 lttng_rb_set_hp_online(ret);
4675
4676 return 0;
4677 }
4678
4679 static void __exit lttng_exit_cpu_hotplug(void)
4680 {
4681 lttng_rb_set_hp_online(0);
4682 cpuhp_remove_multi_state(lttng_hp_online);
4683 lttng_rb_set_hp_prepare(0);
4684 cpuhp_remove_multi_state(lttng_hp_prepare);
4685 }
4686
4687 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
4688 static int lttng_init_cpu_hotplug(void)
4689 {
4690 return 0;
4691 }
4692 static void lttng_exit_cpu_hotplug(void)
4693 {
4694 }
4695 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
4696
4697
4698 static int __init lttng_events_init(void)
4699 {
4700 int ret;
4701
4702 ret = wrapper_lttng_fixup_sig(THIS_MODULE);
4703 if (ret)
4704 return ret;
4705 ret = wrapper_get_pfnblock_flags_mask_init();
4706 if (ret)
4707 return ret;
4708 ret = wrapper_get_pageblock_flags_mask_init();
4709 if (ret)
4710 return ret;
4711 ret = lttng_probes_init();
4712 if (ret)
4713 return ret;
4714 ret = lttng_context_init();
4715 if (ret)
4716 return ret;
4717 ret = lttng_tracepoint_init();
4718 if (ret)
4719 goto error_tp;
4720 event_cache = KMEM_CACHE(lttng_event, 0);
4721 if (!event_cache) {
4722 ret = -ENOMEM;
4723 goto error_kmem_event;
4724 }
4725 event_notifier_cache = KMEM_CACHE(lttng_event_notifier, 0);
4726 if (!event_notifier_cache) {
4727 ret = -ENOMEM;
4728 goto error_kmem_event_notifier;
4729 }
4730 ret = lttng_abi_init();
4731 if (ret)
4732 goto error_abi;
4733 ret = lttng_logger_init();
4734 if (ret)
4735 goto error_logger;
4736 ret = lttng_init_cpu_hotplug();
4737 if (ret)
4738 goto error_hotplug;
4739 printk(KERN_NOTICE "LTTng: Loaded modules v%s.%s.%s%s (%s)%s%s\n",
4740 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4741 __stringify(LTTNG_MODULES_MINOR_VERSION),
4742 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4743 LTTNG_MODULES_EXTRAVERSION,
4744 LTTNG_VERSION_NAME,
4745 #ifdef LTTNG_EXTRA_VERSION_GIT
4746 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4747 #else
4748 "",
4749 #endif
4750 #ifdef LTTNG_EXTRA_VERSION_NAME
4751 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4752 #else
4753 "");
4754 #endif
4755 return 0;
4756
4757 error_hotplug:
4758 lttng_logger_exit();
4759 error_logger:
4760 lttng_abi_exit();
4761 error_abi:
4762 kmem_cache_destroy(event_notifier_cache);
4763 error_kmem_event_notifier:
4764 kmem_cache_destroy(event_cache);
4765 error_kmem_event:
4766 lttng_tracepoint_exit();
4767 error_tp:
4768 lttng_context_exit();
4769 printk(KERN_NOTICE "LTTng: Failed to load modules v%s.%s.%s%s (%s)%s%s\n",
4770 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4771 __stringify(LTTNG_MODULES_MINOR_VERSION),
4772 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4773 LTTNG_MODULES_EXTRAVERSION,
4774 LTTNG_VERSION_NAME,
4775 #ifdef LTTNG_EXTRA_VERSION_GIT
4776 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4777 #else
4778 "",
4779 #endif
4780 #ifdef LTTNG_EXTRA_VERSION_NAME
4781 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4782 #else
4783 "");
4784 #endif
4785 return ret;
4786 }
4787
4788 module_init(lttng_events_init);
4789
4790 static void __exit lttng_events_exit(void)
4791 {
4792 struct lttng_session *session, *tmpsession;
4793
4794 lttng_exit_cpu_hotplug();
4795 lttng_logger_exit();
4796 lttng_abi_exit();
4797 list_for_each_entry_safe(session, tmpsession, &sessions, list)
4798 lttng_session_destroy(session);
4799 kmem_cache_destroy(event_cache);
4800 kmem_cache_destroy(event_notifier_cache);
4801 lttng_tracepoint_exit();
4802 lttng_context_exit();
4803 printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
4804 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4805 __stringify(LTTNG_MODULES_MINOR_VERSION),
4806 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4807 LTTNG_MODULES_EXTRAVERSION,
4808 LTTNG_VERSION_NAME,
4809 #ifdef LTTNG_EXTRA_VERSION_GIT
4810 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4811 #else
4812 "",
4813 #endif
4814 #ifdef LTTNG_EXTRA_VERSION_NAME
4815 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4816 #else
4817 "");
4818 #endif
4819 }
4820
4821 module_exit(lttng_events_exit);
4822
4823 #include <generated/patches.h>
4824 #ifdef LTTNG_EXTRA_VERSION_GIT
4825 MODULE_INFO(extra_version_git, LTTNG_EXTRA_VERSION_GIT);
4826 #endif
4827 #ifdef LTTNG_EXTRA_VERSION_NAME
4828 MODULE_INFO(extra_version_name, LTTNG_EXTRA_VERSION_NAME);
4829 #endif
4830 MODULE_LICENSE("GPL and additional rights");
4831 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
4832 MODULE_DESCRIPTION("LTTng tracer");
4833 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
4834 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
4835 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
4836 LTTNG_MODULES_EXTRAVERSION);
This page took 0.197784 seconds and 5 git commands to generate.