SoW-2019-0002: Dynamic Snapshot
[lttng-ust.git] / liblttng-ust / lttng-events.c
CommitLineData
8020ceb5 1/*
7dd08bec 2 * lttng-events.c
8020ceb5 3 *
8020ceb5
MD
4 * Holds LTTng per-session event registry.
5 *
e92f3e28
MD
6 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
8020ceb5
MD
21 */
22
b5234c06 23#define _GNU_SOURCE
3fbec7dc 24#define _LGPL_SOURCE
b5234c06 25#include <stdio.h>
d5fc3224 26#include <assert.h>
b5234c06 27#include <errno.h>
d5fc3224
FD
28#include <limits.h>
29#include <pthread.h>
f4681817
MD
30#include <sys/shm.h>
31#include <sys/ipc.h>
44c72f10
MD
32#include <stdint.h>
33#include <stddef.h>
28b12049
MD
34#include <inttypes.h>
35#include <time.h>
196ec2df 36#include <stdbool.h>
d5fc3224 37#include <unistd.h>
2ae57758 38#include <lttng/ust-endian.h>
44c72f10
MD
39
40#include <urcu-bp.h>
d5fc3224 41#include <urcu/arch.h>
44c72f10 42#include <urcu/compiler.h>
d5fc3224
FD
43#include <urcu/hlist.h>
44#include <urcu/list.h>
44c72f10 45#include <urcu/uatomic.h>
44c72f10
MD
46
47#include <lttng/tracepoint.h>
4318ae1b 48#include <lttng/ust-events.h>
44c72f10
MD
49
50#include <usterr-signal-safe.h>
51#include <helper.h>
7d3e35b6 52#include <lttng/ust-ctl.h>
32ce8569 53#include <ust-comm.h>
d5fc3224 54#include <ust-fd.h>
53569322
MD
55#include <lttng/ust-dynamic-type.h>
56#include <lttng/ust-context-provider.h>
1f18504e 57#include "error.h"
08114193 58#include "compat.h"
eda498b8 59#include "lttng-ust-uuid.h"
44c72f10 60
457a6b58 61#include "tracepoint-internal.h"
196ec2df 62#include "string-utils.h"
7dd08bec
MD
63#include "lttng-tracer.h"
64#include "lttng-tracer-core.h"
cf73e0fe 65#include "lttng-ust-statedump.h"
d5fc3224
FD
66#include "share.h"
67#include "ust-events-internal.h"
b728d87e 68#include "wait.h"
8d8a24c8 69#include "../libringbuffer/shm.h"
596c4223 70#include "jhash.h"
d5fc3224 71#include "ust-abi.h"
8165c8da
MD
72
73/*
3327ac33
MD
74 * All operations within this file are called by the communication
75 * thread, under ust_lock protection.
8165c8da 76 */
8165c8da 77
b5234c06 78static CDS_LIST_HEAD(sessions);
d5fc3224 79static CDS_LIST_HEAD(trigger_groups);
8165c8da 80
37dddb65
MD
81struct cds_list_head *_lttng_get_sessions(void)
82{
83 return &sessions;
84}
85
7dd08bec 86static void _lttng_event_destroy(struct lttng_event *event);
d5fc3224 87static void _lttng_trigger_destroy(struct lttng_trigger *trigger);
c785c634 88static void _lttng_enum_destroy(struct lttng_enum *_enum);
8020ceb5 89
e58095ef 90static
d5fc3224
FD
91void lttng_session_lazy_sync_event_enablers(struct lttng_session *session);
92static
93void lttng_session_sync_event_enablers(struct lttng_session *session);
e58095ef 94static
d5fc3224 95void lttng_trigger_group_sync_enablers(struct lttng_trigger_group *trigger_group);
e58095ef
MD
96static
97void lttng_enabler_destroy(struct lttng_enabler *enabler);
98
6715d7d1
MD
99/*
100 * Called with ust lock held.
101 */
102int lttng_session_active(void)
103{
104 struct lttng_session *iter;
105
106 cds_list_for_each_entry(iter, &sessions, node) {
107 if (iter->active)
108 return 1;
109 }
110 return 0;
111}
112
e58095ef
MD
113static
114int lttng_loglevel_match(int loglevel,
115 unsigned int has_loglevel,
457a6b58
MD
116 enum lttng_ust_loglevel_type req_type,
117 int req_loglevel)
118{
e58095ef
MD
119 if (!has_loglevel)
120 loglevel = TRACE_DEFAULT;
457a6b58
MD
121 switch (req_type) {
122 case LTTNG_UST_LOGLEVEL_RANGE:
aacb3774 123 if (loglevel <= req_loglevel
67ada458 124 || (req_loglevel == -1 && loglevel <= TRACE_DEBUG))
457a6b58
MD
125 return 1;
126 else
127 return 0;
128 case LTTNG_UST_LOGLEVEL_SINGLE:
aacb3774 129 if (loglevel == req_loglevel
67ada458 130 || (req_loglevel == -1 && loglevel <= TRACE_DEBUG))
457a6b58
MD
131 return 1;
132 else
133 return 0;
134 case LTTNG_UST_LOGLEVEL_ALL:
135 default:
67ada458 136 if (loglevel <= TRACE_DEBUG)
aacb3774
MD
137 return 1;
138 else
139 return 0;
457a6b58
MD
140 }
141}
142
8020ceb5
MD
143void synchronize_trace(void)
144{
8020ceb5 145 synchronize_rcu();
8020ceb5
MD
146}
147
7dd08bec 148struct lttng_session *lttng_session_create(void)
8020ceb5 149{
7dd08bec 150 struct lttng_session *session;
74d81a6c 151 int i;
8020ceb5 152
7dd08bec 153 session = zmalloc(sizeof(struct lttng_session));
8020ceb5
MD
154 if (!session)
155 return NULL;
53569322
MD
156 if (lttng_session_context_init(&session->ctx)) {
157 free(session);
158 return NULL;
159 }
e58095ef
MD
160 CDS_INIT_LIST_HEAD(&session->chan_head);
161 CDS_INIT_LIST_HEAD(&session->events_head);
c785c634 162 CDS_INIT_LIST_HEAD(&session->enums_head);
e58095ef 163 CDS_INIT_LIST_HEAD(&session->enablers_head);
d56fa719
MD
164 for (i = 0; i < LTTNG_UST_EVENT_HT_SIZE; i++)
165 CDS_INIT_HLIST_HEAD(&session->events_ht.table[i]);
c785c634
MD
166 for (i = 0; i < LTTNG_UST_ENUM_HT_SIZE; i++)
167 CDS_INIT_HLIST_HEAD(&session->enums_ht.table[i]);
e58095ef 168 cds_list_add(&session->node, &sessions);
8020ceb5
MD
169 return session;
170}
171
d5fc3224
FD
172struct lttng_trigger_group *lttng_trigger_group_create(void)
173{
174 struct lttng_trigger_group *trigger_group;
175 int i;
176
177 trigger_group = zmalloc(sizeof(struct lttng_trigger_group));
178 if (!trigger_group)
179 return NULL;
180
181 CDS_INIT_LIST_HEAD(&trigger_group->enablers_head);
182 CDS_INIT_LIST_HEAD(&trigger_group->triggers_head);
183 for (i = 0; i < LTTNG_UST_TRIGGER_HT_SIZE; i++)
184 CDS_INIT_HLIST_HEAD(&trigger_group->triggers_ht.table[i]);
185
186 cds_list_add(&trigger_group->node, &trigger_groups);
187
188 return trigger_group;
189}
190
74d81a6c
MD
191/*
192 * Only used internally at session destruction.
193 */
194static
195void _lttng_channel_unmap(struct lttng_channel *lttng_chan)
196{
197 struct channel *chan;
198 struct lttng_ust_shm_handle *handle;
199
200 cds_list_del(&lttng_chan->node);
201 lttng_destroy_context(lttng_chan->ctx);
202 chan = lttng_chan->chan;
203 handle = lttng_chan->handle;
204 /*
205 * note: lttng_chan is private data contained within handle. It
206 * will be freed along with the handle.
207 */
208 channel_destroy(chan, handle, 0);
209}
210
ac6b4ac6
MD
211static
212void register_event(struct lttng_event *event)
213{
214 int ret;
215 const struct lttng_event_desc *desc;
216
217 assert(event->registered == 0);
218 desc = event->desc;
baa1e0bc 219 ret = __tracepoint_probe_register_queue_release(desc->name,
ac6b4ac6
MD
220 desc->probe_callback,
221 event, desc->signature);
222 WARN_ON_ONCE(ret);
223 if (!ret)
224 event->registered = 1;
225}
226
d5fc3224
FD
227static
228void register_trigger(struct lttng_trigger *trigger)
229{
230 int ret;
231 const struct lttng_event_desc *desc;
232
233 assert(trigger->registered == 0);
234 desc = trigger->desc;
235 ret = __tracepoint_probe_register_queue_release(desc->name,
236 desc->u.ext.trigger_callback, trigger, desc->signature);
237 WARN_ON_ONCE(ret);
238 if (!ret)
239 trigger->registered = 1;
240}
241
ac6b4ac6
MD
242static
243void unregister_event(struct lttng_event *event)
244{
245 int ret;
246 const struct lttng_event_desc *desc;
247
248 assert(event->registered == 1);
249 desc = event->desc;
baa1e0bc 250 ret = __tracepoint_probe_unregister_queue_release(desc->name,
ac6b4ac6
MD
251 desc->probe_callback,
252 event);
253 WARN_ON_ONCE(ret);
254 if (!ret)
255 event->registered = 0;
256}
257
d5fc3224
FD
258static
259void unregister_trigger(struct lttng_trigger *trigger)
260{
261 int ret;
262 const struct lttng_event_desc *desc;
263
264 assert(trigger->registered == 1);
265 desc = trigger->desc;
266 ret = __tracepoint_probe_unregister_queue_release(desc->name,
267 desc->u.ext.trigger_callback, trigger);
268 WARN_ON_ONCE(ret);
269 if (!ret)
270 trigger->registered = 0;
271}
272
ac6b4ac6
MD
273/*
274 * Only used internally at session destruction.
275 */
276static
277void _lttng_event_unregister(struct lttng_event *event)
278{
279 if (event->registered)
280 unregister_event(event);
281}
282
d5fc3224
FD
283/*
284 * Only used internally at session destruction.
285 */
286static
287void _lttng_trigger_unregister(struct lttng_trigger *trigger)
288{
289 if (trigger->registered)
290 unregister_trigger(trigger);
291}
292
7dd08bec 293void lttng_session_destroy(struct lttng_session *session)
8020ceb5 294{
7dd08bec
MD
295 struct lttng_channel *chan, *tmpchan;
296 struct lttng_event *event, *tmpevent;
c785c634 297 struct lttng_enum *_enum, *tmp_enum;
d5fc3224 298 struct lttng_event_enabler *event_enabler, *event_tmpenabler;
8020ceb5 299
b5234c06 300 CMM_ACCESS_ONCE(session->active) = 0;
e58095ef 301 cds_list_for_each_entry(event, &session->events_head, node) {
ac6b4ac6 302 _lttng_event_unregister(event);
8020ceb5
MD
303 }
304 synchronize_trace(); /* Wait for in-flight events to complete */
baa1e0bc 305 __tracepoint_probe_prune_release_queue();
d5fc3224 306 cds_list_for_each_entry_safe(event_enabler, event_tmpenabler,
e58095ef 307 &session->enablers_head, node)
d5fc3224 308 lttng_event_enabler_destroy(event_enabler);
e58095ef
MD
309 cds_list_for_each_entry_safe(event, tmpevent,
310 &session->events_head, node)
7dd08bec 311 _lttng_event_destroy(event);
c785c634
MD
312 cds_list_for_each_entry_safe(_enum, tmp_enum,
313 &session->enums_head, node)
314 _lttng_enum_destroy(_enum);
e58095ef 315 cds_list_for_each_entry_safe(chan, tmpchan, &session->chan_head, node)
74d81a6c 316 _lttng_channel_unmap(chan);
e58095ef 317 cds_list_del(&session->node);
53569322 318 lttng_destroy_context(session->ctx);
b5234c06 319 free(session);
8020ceb5
MD
320}
321
d5fc3224
FD
322void lttng_trigger_group_destroy(
323 struct lttng_trigger_group *trigger_group)
324{
325 int close_ret;
326 struct lttng_trigger_enabler *trigger_enabler, *tmptrigger_enabler;
327 struct lttng_trigger *trigger, *tmptrigger;
328
329 if (!trigger_group) {
330 return;
331 }
332
333 cds_list_for_each_entry(trigger, &trigger_group->triggers_head, node)
334 _lttng_trigger_unregister(trigger);
335
336 synchronize_trace();
337
338 cds_list_for_each_entry_safe(trigger_enabler, tmptrigger_enabler,
339 &trigger_group->enablers_head, node)
340 lttng_trigger_enabler_destroy(trigger_enabler);
341
342 cds_list_for_each_entry_safe(trigger, tmptrigger,
343 &trigger_group->triggers_head, node)
344 _lttng_trigger_destroy(trigger);
345
346 /* Close the notification fd to the listener of triggers. */
347
348 lttng_ust_lock_fd_tracker();
349 close_ret = close(trigger_group->notification_fd);
350 if (!close_ret) {
351 lttng_ust_delete_fd_from_tracker(trigger_group->notification_fd);
352 } else {
353 PERROR("close");
354 abort();
355 }
356 lttng_ust_unlock_fd_tracker();
357
358 cds_list_del(&trigger_group->node);
359
360 free(trigger_group);
361}
362
363static
364void lttng_enabler_destroy(struct lttng_enabler *enabler)
365{
366 struct lttng_ust_filter_bytecode_node *filter_node, *tmp_filter_node;
367 struct lttng_ust_excluder_node *excluder_node, *tmp_excluder_node;
368
369 if (!enabler) {
370 return;
371 }
372
373 /* Destroy filter bytecode */
374 cds_list_for_each_entry_safe(filter_node, tmp_filter_node,
375 &enabler->filter_bytecode_head, node) {
376 free(filter_node);
377 }
378
379 /* Destroy excluders */
380 cds_list_for_each_entry_safe(excluder_node, tmp_excluder_node,
381 &enabler->excluder_head, node) {
382 free(excluder_node);
383 }
384}
385
386 void lttng_trigger_enabler_destroy(struct lttng_trigger_enabler *trigger_enabler)
387{
388 if (!trigger_enabler) {
389 return;
390 }
391
392 cds_list_del(&trigger_enabler->node);
393
394 lttng_enabler_destroy(lttng_trigger_enabler_as_enabler(trigger_enabler));
395
396 free(trigger_enabler);
397}
398
53569322
MD
399static
400int lttng_enum_create(const struct lttng_enum_desc *desc,
401 struct lttng_session *session)
402{
403 const char *enum_name = desc->name;
404 struct lttng_enum *_enum;
405 struct cds_hlist_head *head;
53569322
MD
406 int ret = 0;
407 size_t name_len = strlen(enum_name);
408 uint32_t hash;
409 int notify_socket;
410
b33b46f7 411 /* Check if this enum is already registered for this session. */
53569322
MD
412 hash = jhash(enum_name, name_len, 0);
413 head = &session->enums_ht.table[hash & (LTTNG_UST_ENUM_HT_SIZE - 1)];
b33b46f7
FD
414
415 _enum = lttng_ust_enum_get_from_desc(session, desc);
416 if (_enum) {
417 ret = -EEXIST;
418 goto exist;
53569322
MD
419 }
420
421 notify_socket = lttng_get_notify_socket(session->owner);
422 if (notify_socket < 0) {
423 ret = notify_socket;
424 goto socket_error;
425 }
426
427 _enum = zmalloc(sizeof(*_enum));
428 if (!_enum) {
429 ret = -ENOMEM;
430 goto cache_error;
431 }
432 _enum->session = session;
433 _enum->desc = desc;
434
435 ret = ustcomm_register_enum(notify_socket,
436 session->objd,
437 enum_name,
438 desc->nr_entries,
439 desc->entries,
440 &_enum->id);
441 if (ret < 0) {
442 DBG("Error (%d) registering enumeration to sessiond", ret);
443 goto sessiond_register_error;
444 }
445 cds_list_add(&_enum->node, &session->enums_head);
446 cds_hlist_add_head(&_enum->hlist, head);
447 return 0;
448
449sessiond_register_error:
450 free(_enum);
451cache_error:
452socket_error:
453exist:
454 return ret;
455}
456
457static
458int lttng_create_enum_check(const struct lttng_type *type,
459 struct lttng_session *session)
460{
461 switch (type->atype) {
462 case atype_enum:
463 {
464 const struct lttng_enum_desc *enum_desc;
465 int ret;
466
467 enum_desc = type->u.basic.enumeration.desc;
468 ret = lttng_enum_create(enum_desc, session);
469 if (ret && ret != -EEXIST) {
470 DBG("Unable to create enum error: (%d)", ret);
471 return ret;
472 }
473 break;
474 }
475 case atype_dynamic:
476 {
477 const struct lttng_event_field *tag_field_generic;
478 const struct lttng_enum_desc *enum_desc;
479 int ret;
480
481 tag_field_generic = lttng_ust_dynamic_type_tag_field();
482 enum_desc = tag_field_generic->type.u.basic.enumeration.desc;
483 ret = lttng_enum_create(enum_desc, session);
484 if (ret && ret != -EEXIST) {
485 DBG("Unable to create enum error: (%d)", ret);
486 return ret;
487 }
488 break;
489 }
490 default:
491 /* TODO: nested types when they become supported. */
492 break;
493 }
494 return 0;
495}
496
497static
498int lttng_create_all_event_enums(size_t nr_fields,
499 const struct lttng_event_field *event_fields,
500 struct lttng_session *session)
501{
502 size_t i;
503 int ret;
504
505 /* For each field, ensure enum is part of the session. */
506 for (i = 0; i < nr_fields; i++) {
507 const struct lttng_type *type = &event_fields[i].type;
508
509 ret = lttng_create_enum_check(type, session);
510 if (ret)
511 return ret;
512 }
513 return 0;
514}
515
516static
517int lttng_create_all_ctx_enums(size_t nr_fields,
518 const struct lttng_ctx_field *ctx_fields,
519 struct lttng_session *session)
520{
521 size_t i;
522 int ret;
523
524 /* For each field, ensure enum is part of the session. */
525 for (i = 0; i < nr_fields; i++) {
526 const struct lttng_type *type = &ctx_fields[i].event_field.type;
527
528 ret = lttng_create_enum_check(type, session);
529 if (ret)
530 return ret;
531 }
532 return 0;
533}
534
710b8ee3
MD
535/*
536 * Ensure that a state-dump will be performed for this session at the end
537 * of the current handle_message().
538 */
539int lttng_session_statedump(struct lttng_session *session)
540{
541 session->statedump_pending = 1;
542 lttng_ust_sockinfo_session_enabled(session->owner);
543 return 0;
544}
53569322 545
7dd08bec 546int lttng_session_enable(struct lttng_session *session)
8020ceb5
MD
547{
548 int ret = 0;
7dd08bec 549 struct lttng_channel *chan;
32ce8569 550 int notify_socket;
8020ceb5 551
8020ceb5
MD
552 if (session->active) {
553 ret = -EBUSY;
554 goto end;
555 }
556
32ce8569
MD
557 notify_socket = lttng_get_notify_socket(session->owner);
558 if (notify_socket < 0)
559 return notify_socket;
560
ac6b4ac6
MD
561 /* Set transient enabler state to "enabled" */
562 session->tstate = 1;
e58095ef 563
3ff7660f 564 /* We need to sync enablers with session before activation. */
d5fc3224 565 lttng_session_sync_event_enablers(session);
3ff7660f 566
8020ceb5
MD
567 /*
568 * Snapshot the number of events per channel to know the type of header
569 * we need to use.
570 */
e58095ef 571 cds_list_for_each_entry(chan, &session->chan_head, node) {
32ce8569 572 const struct lttng_ctx *ctx;
83e43212 573 const struct lttng_ctx_field *fields = NULL;
32ce8569 574 size_t nr_fields = 0;
6ca18e66 575 uint32_t chan_id;
32ce8569
MD
576
577 /* don't change it if session stop/restart */
8020ceb5 578 if (chan->header_type)
32ce8569
MD
579 continue;
580 ctx = chan->ctx;
581 if (ctx) {
582 nr_fields = ctx->nr_fields;
83e43212 583 fields = ctx->fields;
53569322
MD
584 ret = lttng_create_all_ctx_enums(nr_fields, fields,
585 session);
586 if (ret < 0) {
587 DBG("Error (%d) adding enum to session", ret);
588 return ret;
589 }
32ce8569
MD
590 }
591 ret = ustcomm_register_channel(notify_socket,
53569322 592 session,
32ce8569
MD
593 session->objd,
594 chan->objd,
595 nr_fields,
596 fields,
6ca18e66 597 &chan_id,
32ce8569 598 &chan->header_type);
b869b5ae
MD
599 if (ret) {
600 DBG("Error (%d) registering channel to sessiond", ret);
32ce8569 601 return ret;
b869b5ae 602 }
6ca18e66
MD
603 if (chan_id != chan->id) {
604 DBG("Error: channel registration id (%u) does not match id assigned at creation (%u)",
605 chan_id, chan->id);
606 return -EINVAL;
607 }
8020ceb5
MD
608 }
609
ac6b4ac6 610 /* Set atomically the state to "active" */
b5234c06
MD
611 CMM_ACCESS_ONCE(session->active) = 1;
612 CMM_ACCESS_ONCE(session->been_active) = 1;
95c25348 613
710b8ee3
MD
614 ret = lttng_session_statedump(session);
615 if (ret)
616 return ret;
8020ceb5 617end:
8020ceb5
MD
618 return ret;
619}
620
7dd08bec 621int lttng_session_disable(struct lttng_session *session)
8020ceb5
MD
622{
623 int ret = 0;
624
8020ceb5
MD
625 if (!session->active) {
626 ret = -EBUSY;
627 goto end;
628 }
ac6b4ac6 629 /* Set atomically the state to "inactive" */
b5234c06 630 CMM_ACCESS_ONCE(session->active) = 0;
ac6b4ac6
MD
631
632 /* Set transient enabler state to "disabled" */
633 session->tstate = 0;
d5fc3224 634 lttng_session_sync_event_enablers(session);
8020ceb5 635end:
8020ceb5
MD
636 return ret;
637}
638
7dd08bec 639int lttng_channel_enable(struct lttng_channel *channel)
976fe9ea 640{
ac6b4ac6 641 int ret = 0;
976fe9ea 642
ac6b4ac6
MD
643 if (channel->enabled) {
644 ret = -EBUSY;
645 goto end;
646 }
647 /* Set transient enabler state to "enabled" */
648 channel->tstate = 1;
d5fc3224 649 lttng_session_sync_event_enablers(channel->session);
ac6b4ac6
MD
650 /* Set atomically the state to "enabled" */
651 CMM_ACCESS_ONCE(channel->enabled) = 1;
652end:
653 return ret;
976fe9ea
MD
654}
655
7dd08bec 656int lttng_channel_disable(struct lttng_channel *channel)
976fe9ea 657{
ac6b4ac6 658 int ret = 0;
976fe9ea 659
ac6b4ac6
MD
660 if (!channel->enabled) {
661 ret = -EBUSY;
662 goto end;
663 }
664 /* Set atomically the state to "disabled" */
665 CMM_ACCESS_ONCE(channel->enabled) = 0;
666 /* Set transient enabler state to "enabled" */
667 channel->tstate = 0;
d5fc3224 668 lttng_session_sync_event_enablers(channel->session);
ac6b4ac6
MD
669end:
670 return ret;
976fe9ea
MD
671}
672
d5fc3224
FD
673static inline
674struct cds_hlist_head *borrow_hash_table_bucket(
675 struct cds_hlist_head *hash_table,
676 unsigned int hash_table_size,
677 const struct lttng_event_desc *desc)
678{
679 const char *event_name;
680 size_t name_len;
681 uint32_t hash;
682
683 event_name = desc->name;
684 name_len = strlen(event_name);
685
686 hash = jhash(event_name, name_len, 0);
687 return &hash_table[hash & (hash_table_size - 1)];
688}
689
8020ceb5
MD
690/*
691 * Supports event creation while tracing session is active.
692 */
e58095ef
MD
693static
694int lttng_event_create(const struct lttng_event_desc *desc,
695 struct lttng_channel *chan)
8020ceb5 696{
7dd08bec 697 struct lttng_event *event;
32ce8569 698 struct lttng_session *session = chan->session;
d56fa719 699 struct cds_hlist_head *head;
576599a0 700 int ret = 0;
32ce8569
MD
701 int notify_socket, loglevel;
702 const char *uri;
8020ceb5 703
d5fc3224
FD
704 head = borrow_hash_table_bucket(chan->session->events_ht.table,
705 LTTNG_UST_EVENT_HT_SIZE, desc);
457a6b58 706
32ce8569
MD
707 notify_socket = lttng_get_notify_socket(session->owner);
708 if (notify_socket < 0) {
709 ret = notify_socket;
710 goto socket_error;
711 }
712
53569322
MD
713 ret = lttng_create_all_event_enums(desc->nr_fields, desc->fields,
714 session);
c785c634
MD
715 if (ret < 0) {
716 DBG("Error (%d) adding enum to session", ret);
717 goto create_enum_error;
718 }
719
457a6b58
MD
720 /*
721 * Check if loglevel match. Refuse to connect event if not.
722 */
7dd08bec 723 event = zmalloc(sizeof(struct lttng_event));
576599a0
MD
724 if (!event) {
725 ret = -ENOMEM;
8020ceb5 726 goto cache_error;
576599a0 727 }
8020ceb5 728 event->chan = chan;
e58095ef 729
ac6b4ac6
MD
730 /* Event will be enabled by enabler sync. */
731 event->enabled = 0;
732 event->registered = 0;
e58095ef
MD
733 CDS_INIT_LIST_HEAD(&event->bytecode_runtime_head);
734 CDS_INIT_LIST_HEAD(&event->enablers_ref_head);
735 event->desc = desc;
32ce8569
MD
736
737 if (desc->loglevel)
738 loglevel = *(*event->desc->loglevel);
739 else
740 loglevel = TRACE_DEFAULT;
741 if (desc->u.ext.model_emf_uri)
742 uri = *(desc->u.ext.model_emf_uri);
743 else
744 uri = NULL;
745
13b21cd6
MD
746 /* Fetch event ID from sessiond */
747 ret = ustcomm_register_event(notify_socket,
c785c634 748 session,
13b21cd6
MD
749 session->objd,
750 chan->objd,
d5fc3224 751 desc->name,
13b21cd6
MD
752 loglevel,
753 desc->signature,
754 desc->nr_fields,
755 desc->fields,
756 uri,
757 &event->id);
758 if (ret < 0) {
759 DBG("Error (%d) registering event to sessiond", ret);
760 goto sessiond_register_error;
32ce8569 761 }
2b213b16 762
e58095ef 763 cds_list_add(&event->node, &chan->session->events_head);
d56fa719 764 cds_hlist_add_head(&event->hlist, head);
576599a0 765 return 0;
8020ceb5 766
32ce8569 767sessiond_register_error:
b5234c06 768 free(event);
8020ceb5 769cache_error:
c785c634 770create_enum_error:
32ce8569 771socket_error:
576599a0 772 return ret;
8020ceb5
MD
773}
774
d5fc3224
FD
775static
776int lttng_trigger_create(const struct lttng_event_desc *desc,
777 uint64_t id, struct lttng_trigger_group *trigger_group)
778{
779 struct lttng_trigger *trigger;
780 struct cds_hlist_head *head;
781 int ret = 0;
782
783 /*
784 * Get the hashtable bucket the created lttng_trigger object should be
785 * inserted.
786 */
787 head = borrow_hash_table_bucket(trigger_group->triggers_ht.table,
788 LTTNG_UST_TRIGGER_HT_SIZE, desc);
789
790 trigger = zmalloc(sizeof(struct lttng_trigger));
791 if (!trigger) {
792 ret = -ENOMEM;
793 goto error;
794 }
795
796 trigger->group = trigger_group;
797 trigger->id = id;
798
799 /* Trigger will be enabled by enabler sync. */
800 trigger->enabled = 0;
801 trigger->registered = 0;
802
803 CDS_INIT_LIST_HEAD(&trigger->bytecode_runtime_head);
804 CDS_INIT_LIST_HEAD(&trigger->enablers_ref_head);
805 trigger->desc = desc;
806
807 cds_list_add(&trigger->node, &trigger_group->triggers_head);
808 cds_hlist_add_head(&trigger->hlist, head);
809
810 return 0;
811
812error:
813 return ret;
814}
815
816static
817void _lttng_trigger_destroy(struct lttng_trigger *trigger)
818{
819 struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
820
821 /* Remove from trigger list. */
822 cds_list_del(&trigger->node);
823 /* Remove from trigger hash table. */
824 cds_hlist_del(&trigger->hlist);
825
826 lttng_free_trigger_filter_runtime(trigger);
827
828 /* Free trigger enabler refs */
829 cds_list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
830 &trigger->enablers_ref_head, node)
831 free(enabler_ref);
832 free(trigger);
833}
834
e58095ef 835static
196ec2df 836int lttng_desc_match_star_glob_enabler(const struct lttng_event_desc *desc,
e58095ef
MD
837 struct lttng_enabler *enabler)
838{
839 int loglevel = 0;
7e2e405c 840 unsigned int has_loglevel = 0;
e58095ef 841
d5fc3224 842 assert(enabler->format_type == LTTNG_ENABLER_FORMAT_STAR_GLOB);
1f6f42e6
MD
843 if (!strutils_star_glob_match(enabler->event_param.name, SIZE_MAX,
844 desc->name, SIZE_MAX))
e58095ef
MD
845 return 0;
846 if (desc->loglevel) {
847 loglevel = *(*desc->loglevel);
848 has_loglevel = 1;
849 }
850 if (!lttng_loglevel_match(loglevel,
851 has_loglevel,
852 enabler->event_param.loglevel_type,
853 enabler->event_param.loglevel))
854 return 0;
855 return 1;
856}
857
858static
859int lttng_desc_match_event_enabler(const struct lttng_event_desc *desc,
860 struct lttng_enabler *enabler)
861{
862 int loglevel = 0;
863 unsigned int has_loglevel = 0;
864
d5fc3224 865 assert(enabler->format_type == LTTNG_ENABLER_FORMAT_EVENT);
e58095ef
MD
866 if (strcmp(desc->name, enabler->event_param.name))
867 return 0;
868 if (desc->loglevel) {
869 loglevel = *(*desc->loglevel);
870 has_loglevel = 1;
871 }
872 if (!lttng_loglevel_match(loglevel,
873 has_loglevel,
874 enabler->event_param.loglevel_type,
875 enabler->event_param.loglevel))
876 return 0;
877 return 1;
878}
879
880static
881int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
882 struct lttng_enabler *enabler)
883{
d5fc3224
FD
884 switch (enabler->format_type) {
885 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
196ec2df
PP
886 {
887 struct lttng_ust_excluder_node *excluder;
888
889 if (!lttng_desc_match_star_glob_enabler(desc, enabler)) {
890 return 0;
891 }
892
893 /*
894 * If the matching event matches with an excluder,
895 * return 'does not match'
896 */
897 cds_list_for_each_entry(excluder, &enabler->excluder_head, node) {
898 int count;
899
900 for (count = 0; count < excluder->excluder.count; count++) {
901 int len;
902 char *excluder_name;
903
904 excluder_name = (char *) (excluder->excluder.names)
905 + count * LTTNG_UST_SYM_NAME_LEN;
906 len = strnlen(excluder_name, LTTNG_UST_SYM_NAME_LEN);
1f6f42e6 907 if (len > 0 && strutils_star_glob_match(excluder_name, len, desc->name, SIZE_MAX))
196ec2df 908 return 0;
ed5b5bbd
JI
909 }
910 }
196ec2df 911 return 1;
ed5b5bbd 912 }
d5fc3224 913 case LTTNG_ENABLER_FORMAT_EVENT:
e58095ef
MD
914 return lttng_desc_match_event_enabler(desc, enabler);
915 default:
916 return -EINVAL;
917 }
918}
919
920static
d5fc3224
FD
921int lttng_event_enabler_match_event(struct lttng_event_enabler *event_enabler,
922 struct lttng_event *event)
923{
924 if (lttng_desc_match_enabler(event->desc,
925 lttng_event_enabler_as_enabler(event_enabler))
926 && event->chan == event_enabler->chan)
927 return 1;
928 else
929 return 0;
930}
931
932static
933int lttng_trigger_enabler_match_trigger(
934 struct lttng_trigger_enabler *trigger_enabler,
935 struct lttng_trigger *trigger)
e58095ef 936{
d5fc3224
FD
937 int desc_matches = lttng_desc_match_enabler(trigger->desc,
938 lttng_trigger_enabler_as_enabler(trigger_enabler));
939
940 if (desc_matches && trigger->group == trigger_enabler->group &&
941 trigger->id == trigger_enabler->id)
d970f72e
MD
942 return 1;
943 else
944 return 0;
e58095ef
MD
945}
946
947static
d5fc3224
FD
948struct lttng_enabler_ref *lttng_enabler_ref(
949 struct cds_list_head *enabler_ref_list,
e58095ef
MD
950 struct lttng_enabler *enabler)
951{
952 struct lttng_enabler_ref *enabler_ref;
953
d5fc3224 954 cds_list_for_each_entry(enabler_ref, enabler_ref_list, node) {
e58095ef
MD
955 if (enabler_ref->ref == enabler)
956 return enabler_ref;
957 }
958 return NULL;
959}
960
8020ceb5 961/*
e58095ef
MD
962 * Create struct lttng_event if it is missing and present in the list of
963 * tracepoint probes.
8020ceb5 964 */
e58095ef 965static
d5fc3224 966void lttng_create_event_if_missing(struct lttng_event_enabler *event_enabler)
8020ceb5 967{
d5fc3224 968 struct lttng_session *session = event_enabler->chan->session;
e58095ef
MD
969 struct lttng_probe_desc *probe_desc;
970 const struct lttng_event_desc *desc;
971 struct lttng_event *event;
972 int i;
973 struct cds_list_head *probe_list;
974
975 probe_list = lttng_get_probe_list_head();
976 /*
977 * For each probe event, if we find that a probe event matches
978 * our enabler, create an associated lttng_event if not
979 * already present.
980 */
981 cds_list_for_each_entry(probe_desc, probe_list, head) {
982 for (i = 0; i < probe_desc->nr_events; i++) {
98a97f24
FD
983 int ret;
984 bool found = false;
d56fa719
MD
985 struct cds_hlist_head *head;
986 struct cds_hlist_node *node;
e58095ef
MD
987
988 desc = probe_desc->event_desc[i];
d5fc3224
FD
989 if (!lttng_desc_match_enabler(desc,
990 lttng_event_enabler_as_enabler(event_enabler)))
e58095ef
MD
991 continue;
992
d5fc3224
FD
993 head = borrow_hash_table_bucket(
994 session->events_ht.table,
995 LTTNG_UST_EVENT_HT_SIZE, desc);
996
d56fa719 997 cds_hlist_for_each_entry(event, node, head, hlist) {
d970f72e 998 if (event->desc == desc
d5fc3224 999 && event->chan == event_enabler->chan) {
98a97f24 1000 found = true;
3140bffe
FD
1001 break;
1002 }
e58095ef
MD
1003 }
1004 if (found)
1005 continue;
1006
1007 /*
1008 * We need to create an event for this
1009 * event probe.
1010 */
1011 ret = lttng_event_create(probe_desc->event_desc[i],
d5fc3224 1012 event_enabler->chan);
e58095ef 1013 if (ret) {
32ce8569
MD
1014 DBG("Unable to create event %s, error %d\n",
1015 probe_desc->event_desc[i]->name, ret);
e58095ef 1016 }
8165c8da 1017 }
8020ceb5 1018 }
8020ceb5
MD
1019}
1020
d5fc3224
FD
1021static
1022void probe_provider_event_for_each(struct lttng_probe_desc *provider_desc,
1023 void (*event_func)(struct lttng_session *session,
1024 struct lttng_event *event),
1025 void (*trigger_func)(struct lttng_trigger *trigger))
35ac38cb
FD
1026{
1027 struct cds_hlist_node *node, *tmp_node;
1028 struct cds_list_head *sessionsp;
d5fc3224 1029 unsigned int i;
35ac38cb
FD
1030
1031 /* Get handle on list of sessions. */
1032 sessionsp = _lttng_get_sessions();
1033
1034 /*
d5fc3224
FD
1035 * Iterate over all events in the probe provider descriptions and
1036 * sessions to queue the unregistration of the events.
35ac38cb
FD
1037 */
1038 for (i = 0; i < provider_desc->nr_events; i++) {
1039 const struct lttng_event_desc *event_desc;
d5fc3224
FD
1040 struct lttng_trigger_group *trigger_group;
1041 struct lttng_trigger *trigger;
1042 struct lttng_session *session;
1043 struct cds_hlist_head *head;
1044 struct lttng_event *event;
35ac38cb
FD
1045
1046 event_desc = provider_desc->event_desc[i];
35ac38cb 1047
d5fc3224
FD
1048 /*
1049 * Iterate over all session to find the current event
1050 * description.
1051 */
35ac38cb
FD
1052 cds_list_for_each_entry(session, sessionsp, node) {
1053 /*
d5fc3224
FD
1054 * Get the list of events in the hashtable bucket and
1055 * iterate to find the event matching this descriptor.
35ac38cb 1056 */
d5fc3224
FD
1057 head = borrow_hash_table_bucket(
1058 session->events_ht.table,
1059 LTTNG_UST_EVENT_HT_SIZE, event_desc);
1060
1061 cds_hlist_for_each_entry_safe(event, node, tmp_node, head, hlist) {
35ac38cb 1062 if (event_desc == event->desc) {
d5fc3224
FD
1063 event_func(session, event);
1064 break;
1065 }
1066 }
1067 }
1068
1069 /*
1070 * Iterate over all trigger groups to find the current event
1071 * description.
1072 */
1073 cds_list_for_each_entry(trigger_group, &trigger_groups, node) {
1074 /*
1075 * Get the list of triggers in the hashtable bucket and
1076 * iterate to find the trigger matching this
1077 * descriptor.
1078 */
1079 head = borrow_hash_table_bucket(
1080 trigger_group->triggers_ht.table,
1081 LTTNG_UST_TRIGGER_HT_SIZE, event_desc);
1082
1083 cds_hlist_for_each_entry_safe(trigger, node, tmp_node, head, hlist) {
1084 if (event_desc == trigger->desc) {
1085 trigger_func(trigger);
35ac38cb
FD
1086 break;
1087 }
1088 }
1089 }
1090 }
d5fc3224
FD
1091}
1092
1093static
1094void _unregister_event(struct lttng_session *session,
1095 struct lttng_event *event)
1096{
1097 _lttng_event_unregister(event);
1098}
1099
1100static
1101void _event_enum_destroy(struct lttng_session *session,
1102 struct lttng_event *event)
1103{
1104 unsigned int i;
1105
1106 /* Destroy enums of the current event. */
1107 for (i = 0; i < event->desc->nr_fields; i++) {
1108 const struct lttng_enum_desc *enum_desc;
1109 const struct lttng_event_field *field;
1110 struct lttng_enum *curr_enum;
1111
1112 field = &(event->desc->fields[i]);
1113 if (field->type.atype != atype_enum) {
1114 continue;
1115 }
1116
1117 enum_desc = field->type.u.basic.enumeration.desc;
1118 curr_enum = lttng_ust_enum_get_from_desc(session, enum_desc);
1119 if (curr_enum) {
1120 _lttng_enum_destroy(curr_enum);
1121 }
1122 }
1123
1124 /* Destroy event. */
1125 _lttng_event_destroy(event);
1126}
1127
1128/*
1129 * Iterate over all the UST sessions to unregister and destroy all probes from
1130 * the probe provider descriptor received as argument. Must me called with the
1131 * ust_lock held.
1132 */
1133void lttng_probe_provider_unregister_events(
1134 struct lttng_probe_desc *provider_desc)
1135{
1136 /*
1137 * Iterate over all events in the probe provider descriptions and sessions
1138 * to queue the unregistration of the events.
1139 */
1140 probe_provider_event_for_each(provider_desc, _unregister_event,
1141 _lttng_trigger_unregister);
35ac38cb
FD
1142
1143 /* Wait for grace period. */
1144 synchronize_trace();
1145 /* Prune the unregistration queue. */
1146 __tracepoint_probe_prune_release_queue();
1147
1148 /*
1149 * It is now safe to destroy the events and remove them from the event list
1150 * and hashtables.
1151 */
d5fc3224
FD
1152 probe_provider_event_for_each(provider_desc, _event_enum_destroy,
1153 _lttng_trigger_destroy);
35ac38cb
FD
1154}
1155
8020ceb5 1156/*
d5fc3224 1157 * Create events associated with an event enabler (if not already present),
e58095ef 1158 * and add backward reference from the event to the enabler.
8020ceb5
MD
1159 */
1160static
d5fc3224 1161int lttng_event_enabler_ref_events(struct lttng_event_enabler *event_enabler)
8020ceb5 1162{
d5fc3224 1163 struct lttng_session *session = event_enabler->chan->session;
e58095ef
MD
1164 struct lttng_event *event;
1165
1166 /* First ensure that probe events are created for this enabler. */
d5fc3224 1167 lttng_create_event_if_missing(event_enabler);
e58095ef
MD
1168
1169 /* For each event matching enabler in session event list. */
1170 cds_list_for_each_entry(event, &session->events_head, node) {
1171 struct lttng_enabler_ref *enabler_ref;
1172
d5fc3224 1173 if (!lttng_event_enabler_match_event(event_enabler, event))
e58095ef
MD
1174 continue;
1175
d5fc3224
FD
1176 enabler_ref = lttng_enabler_ref(&event->enablers_ref_head,
1177 lttng_event_enabler_as_enabler(event_enabler));
e58095ef
MD
1178 if (!enabler_ref) {
1179 /*
1180 * If no backward ref, create it.
1181 * Add backward ref from event to enabler.
1182 */
1183 enabler_ref = zmalloc(sizeof(*enabler_ref));
1184 if (!enabler_ref)
1185 return -ENOMEM;
d5fc3224 1186 enabler_ref->ref = lttng_event_enabler_as_enabler(event_enabler);
e58095ef
MD
1187 cds_list_add(&enabler_ref->node,
1188 &event->enablers_ref_head);
8165c8da 1189 }
e58095ef
MD
1190
1191 /*
1192 * Link filter bytecodes if not linked yet.
1193 */
d5fc3224
FD
1194 lttng_enabler_link_bytecode(event->desc,
1195 &session->ctx,
1196 &event->bytecode_runtime_head,
1197 lttng_event_enabler_as_enabler(event_enabler));
e58095ef
MD
1198
1199 /* TODO: merge event context. */
1200 }
1201 return 0;
1202}
1203
1204/*
1205 * Called at library load: connect the probe on all enablers matching
1206 * this event.
5f733922 1207 * Called with session mutex held.
e58095ef 1208 */
5f733922 1209int lttng_fix_pending_events(void)
e58095ef
MD
1210{
1211 struct lttng_session *session;
1212
1213 cds_list_for_each_entry(session, &sessions, node) {
d5fc3224
FD
1214 lttng_session_lazy_sync_event_enablers(session);
1215 }
1216 return 0;
1217}
1218
1219int lttng_fix_pending_triggers(void)
1220{
1221 struct lttng_trigger_group *trigger_group;
1222
1223 cds_list_for_each_entry(trigger_group, &trigger_groups, node) {
1224 lttng_trigger_group_sync_enablers(trigger_group);
8020ceb5 1225 }
e58095ef
MD
1226 return 0;
1227}
1228
246be17e 1229/*
37dddb65
MD
1230 * For each session of the owner thread, execute pending statedump.
1231 * Only dump state for the sessions owned by the caller thread, because
1232 * we don't keep ust_lock across the entire iteration.
246be17e 1233 */
3327ac33 1234void lttng_handle_pending_statedump(void *owner)
246be17e
PW
1235{
1236 struct lttng_session *session;
1237
37dddb65 1238 /* Execute state dump */
cf73e0fe 1239 do_lttng_ust_statedump(owner);
37dddb65
MD
1240
1241 /* Clear pending state dump */
3327ac33
MD
1242 if (ust_lock()) {
1243 goto end;
1244 }
246be17e 1245 cds_list_for_each_entry(session, &sessions, node) {
37dddb65
MD
1246 if (session->owner != owner)
1247 continue;
1248 if (!session->statedump_pending)
1249 continue;
1250 session->statedump_pending = 0;
246be17e 1251 }
3327ac33 1252end:
37dddb65 1253 ust_unlock();
3327ac33 1254 return;
246be17e
PW
1255}
1256
e58095ef
MD
1257/*
1258 * Only used internally at session destruction.
1259 */
1260static
1261void _lttng_event_destroy(struct lttng_event *event)
1262{
1263 struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
1264
35ac38cb 1265 /* Remove from event list. */
e58095ef 1266 cds_list_del(&event->node);
35ac38cb
FD
1267 /* Remove from event hash table. */
1268 cds_hlist_del(&event->hlist);
1269
8020ceb5 1270 lttng_destroy_context(event->ctx);
f488575f 1271 lttng_free_event_filter_runtime(event);
e58095ef
MD
1272 /* Free event enabler refs */
1273 cds_list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
1274 &event->enablers_ref_head, node)
1275 free(enabler_ref);
b5234c06 1276 free(event);
8020ceb5
MD
1277}
1278
c785c634
MD
1279static
1280void _lttng_enum_destroy(struct lttng_enum *_enum)
1281{
1282 cds_list_del(&_enum->node);
35ac38cb 1283 cds_hlist_del(&_enum->hlist);
c785c634
MD
1284 free(_enum);
1285}
1286
003fedf4 1287void lttng_ust_events_exit(void)
8020ceb5 1288{
7dd08bec 1289 struct lttng_session *session, *tmpsession;
8020ceb5 1290
e58095ef 1291 cds_list_for_each_entry_safe(session, tmpsession, &sessions, node)
7dd08bec 1292 lttng_session_destroy(session);
8020ceb5 1293}
457a6b58 1294
e58095ef
MD
1295/*
1296 * Enabler management.
1297 */
d5fc3224
FD
1298struct lttng_event_enabler *lttng_event_enabler_create(
1299 enum lttng_enabler_format_type format_type,
e58095ef
MD
1300 struct lttng_ust_event *event_param,
1301 struct lttng_channel *chan)
457a6b58 1302{
d5fc3224 1303 struct lttng_event_enabler *event_enabler;
e58095ef 1304
d5fc3224
FD
1305 event_enabler = zmalloc(sizeof(*event_enabler));
1306 if (!event_enabler)
e58095ef 1307 return NULL;
d5fc3224
FD
1308 event_enabler->base.format_type = format_type;
1309 CDS_INIT_LIST_HEAD(&event_enabler->base.filter_bytecode_head);
1310 CDS_INIT_LIST_HEAD(&event_enabler->base.excluder_head);
1311 memcpy(&event_enabler->base.event_param, event_param,
1312 sizeof(event_enabler->base.event_param));
1313 event_enabler->chan = chan;
e58095ef 1314 /* ctx left NULL */
d5fc3224
FD
1315 event_enabler->base.enabled = 0;
1316 cds_list_add(&event_enabler->node, &event_enabler->chan->session->enablers_head);
1317 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
1318
1319 return event_enabler;
457a6b58
MD
1320}
1321
d5fc3224
FD
1322struct lttng_trigger_enabler *lttng_trigger_enabler_create(
1323 struct lttng_trigger_group *trigger_group,
1324 enum lttng_enabler_format_type format_type,
1325 struct lttng_ust_trigger *trigger_param)
457a6b58 1326{
d5fc3224
FD
1327 struct lttng_trigger_enabler *trigger_enabler;
1328
1329 trigger_enabler = zmalloc(sizeof(*trigger_enabler));
1330 if (!trigger_enabler)
1331 return NULL;
1332 trigger_enabler->base.format_type = format_type;
1333 CDS_INIT_LIST_HEAD(&trigger_enabler->base.filter_bytecode_head);
1334 CDS_INIT_LIST_HEAD(&trigger_enabler->base.excluder_head);
1335
1336 trigger_enabler->id = trigger_param->id;
1337
1338 memcpy(&trigger_enabler->base.event_param.name, trigger_param->name,
1339 sizeof(trigger_enabler->base.event_param.name));
1340 trigger_enabler->base.event_param.instrumentation = trigger_param->instrumentation;
1341 trigger_enabler->base.event_param.loglevel = trigger_param->loglevel;
1342 trigger_enabler->base.event_param.loglevel_type = trigger_param->loglevel_type;
1343
1344 trigger_enabler->base.enabled = 0;
1345 trigger_enabler->group = trigger_group;
1346
1347 cds_list_add(&trigger_enabler->node, &trigger_group->enablers_head);
1348
1349 lttng_trigger_group_sync_enablers(trigger_group);
1350
1351 return trigger_enabler;
1352}
1353
1354int lttng_event_enabler_enable(struct lttng_event_enabler *event_enabler)
1355{
1356 lttng_event_enabler_as_enabler(event_enabler)->enabled = 1;
1357 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
1358
e58095ef 1359 return 0;
457a6b58 1360}
457a6b58 1361
d5fc3224 1362int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler)
457a6b58 1363{
d5fc3224
FD
1364 lttng_event_enabler_as_enabler(event_enabler)->enabled = 0;
1365 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
1366
e58095ef
MD
1367 return 0;
1368}
457a6b58 1369
d5fc3224
FD
1370static
1371void _lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
e58095ef
MD
1372 struct lttng_ust_filter_bytecode_node *bytecode)
1373{
1374 bytecode->enabler = enabler;
1375 cds_list_add_tail(&bytecode->node, &enabler->filter_bytecode_head);
d5fc3224
FD
1376}
1377
1378int lttng_event_enabler_attach_bytecode(struct lttng_event_enabler *event_enabler,
1379 struct lttng_ust_filter_bytecode_node *bytecode)
1380{
1381 _lttng_enabler_attach_bytecode(
1382 lttng_event_enabler_as_enabler(event_enabler), bytecode);
1383
1384 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
e58095ef 1385 return 0;
0bfb5cbd
JI
1386}
1387
d5fc3224
FD
1388static
1389void _lttng_enabler_attach_exclusion(struct lttng_enabler *enabler,
0bfb5cbd
JI
1390 struct lttng_ust_excluder_node *excluder)
1391{
1392 excluder->enabler = enabler;
1393 cds_list_add_tail(&excluder->node, &enabler->excluder_head);
d5fc3224
FD
1394}
1395
1396int lttng_event_enabler_attach_exclusion(struct lttng_event_enabler *event_enabler,
1397 struct lttng_ust_excluder_node *excluder)
1398{
1399 _lttng_enabler_attach_exclusion(
1400 lttng_event_enabler_as_enabler(event_enabler), excluder);
1401
1402 lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
1403 return 0;
1404}
1405
1406int lttng_trigger_enabler_enable(struct lttng_trigger_enabler *trigger_enabler)
1407{
1408 lttng_trigger_enabler_as_enabler(trigger_enabler)->enabled = 1;
1409 lttng_trigger_group_sync_enablers(trigger_enabler->group);
1410
1411 return 0;
1412}
1413
1414int lttng_trigger_enabler_disable(struct lttng_trigger_enabler *trigger_enabler)
1415{
1416 lttng_trigger_enabler_as_enabler(trigger_enabler)->enabled = 0;
1417 lttng_trigger_group_sync_enablers(trigger_enabler->group);
1418
1419 return 0;
1420}
1421
1422int lttng_trigger_enabler_attach_bytecode( struct lttng_trigger_enabler *trigger_enabler,
1423 struct lttng_ust_filter_bytecode_node *bytecode)
1424{
1425 _lttng_enabler_attach_bytecode(
1426 lttng_trigger_enabler_as_enabler(trigger_enabler), bytecode);
1427
1428 lttng_trigger_group_sync_enablers(trigger_enabler->group);
1429 return 0;
1430}
1431
1432int lttng_trigger_enabler_attach_exclusion(
1433 struct lttng_trigger_enabler *trigger_enabler,
1434 struct lttng_ust_excluder_node *excluder)
1435{
1436 _lttng_enabler_attach_exclusion(
1437 lttng_trigger_enabler_as_enabler(trigger_enabler), excluder);
1438
1439 lttng_trigger_group_sync_enablers(trigger_enabler->group);
0bfb5cbd 1440 return 0;
e58095ef 1441}
f488575f 1442
e58095ef 1443int lttng_attach_context(struct lttng_ust_context *context_param,
8e696cfa 1444 union ust_args *uargs,
e58095ef
MD
1445 struct lttng_ctx **ctx, struct lttng_session *session)
1446{
457a6b58 1447 /*
e58095ef
MD
1448 * We cannot attach a context after trace has been started for a
1449 * session because the metadata does not allow expressing this
1450 * information outside of the original channel scope.
457a6b58 1451 */
e58095ef
MD
1452 if (session->been_active)
1453 return -EPERM;
457a6b58 1454
e58095ef
MD
1455 switch (context_param->ctx) {
1456 case LTTNG_UST_CONTEXT_PTHREAD_ID:
1457 return lttng_add_pthread_id_to_ctx(ctx);
d58d1454
MD
1458 case LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER:
1459 {
1460 struct lttng_ust_perf_counter_ctx *perf_ctx_param;
1461
1462 perf_ctx_param = &context_param->u.perf_counter;
1463 return lttng_add_perf_counter_to_ctx(
1464 perf_ctx_param->type,
1465 perf_ctx_param->config,
1466 perf_ctx_param->name,
1467 ctx);
1468 }
e58095ef
MD
1469 case LTTNG_UST_CONTEXT_VTID:
1470 return lttng_add_vtid_to_ctx(ctx);
1471 case LTTNG_UST_CONTEXT_VPID:
1472 return lttng_add_vpid_to_ctx(ctx);
1473 case LTTNG_UST_CONTEXT_PROCNAME:
1474 return lttng_add_procname_to_ctx(ctx);
96f85541
MD
1475 case LTTNG_UST_CONTEXT_IP:
1476 return lttng_add_ip_to_ctx(ctx);
c7ea8487
MD
1477 case LTTNG_UST_CONTEXT_CPU_ID:
1478 return lttng_add_cpu_id_to_ctx(ctx);
8e696cfa
MD
1479 case LTTNG_UST_CONTEXT_APP_CONTEXT:
1480 return lttng_ust_add_app_context_to_ctx_rcu(uargs->app_context.ctxname,
1481 ctx);
735bef47
MJ
1482 case LTTNG_UST_CONTEXT_CGROUP_NS:
1483 return lttng_add_cgroup_ns_to_ctx(ctx);
1484 case LTTNG_UST_CONTEXT_IPC_NS:
1485 return lttng_add_ipc_ns_to_ctx(ctx);
1486 case LTTNG_UST_CONTEXT_MNT_NS:
1487 return lttng_add_mnt_ns_to_ctx(ctx);
1488 case LTTNG_UST_CONTEXT_NET_NS:
1489 return lttng_add_net_ns_to_ctx(ctx);
1490 case LTTNG_UST_CONTEXT_PID_NS:
1491 return lttng_add_pid_ns_to_ctx(ctx);
1492 case LTTNG_UST_CONTEXT_USER_NS:
1493 return lttng_add_user_ns_to_ctx(ctx);
1494 case LTTNG_UST_CONTEXT_UTS_NS:
1495 return lttng_add_uts_ns_to_ctx(ctx);
fca2f191
MJ
1496 case LTTNG_UST_CONTEXT_VUID:
1497 return lttng_add_vuid_to_ctx(ctx);
1498 case LTTNG_UST_CONTEXT_VEUID:
1499 return lttng_add_veuid_to_ctx(ctx);
1500 case LTTNG_UST_CONTEXT_VSUID:
1501 return lttng_add_vsuid_to_ctx(ctx);
1502 case LTTNG_UST_CONTEXT_VGID:
1503 return lttng_add_vgid_to_ctx(ctx);
1504 case LTTNG_UST_CONTEXT_VEGID:
1505 return lttng_add_vegid_to_ctx(ctx);
1506 case LTTNG_UST_CONTEXT_VSGID:
1507 return lttng_add_vsgid_to_ctx(ctx);
e58095ef
MD
1508 default:
1509 return -EINVAL;
457a6b58 1510 }
457a6b58
MD
1511}
1512
d5fc3224 1513int lttng_event_enabler_attach_context(struct lttng_event_enabler *enabler,
e58095ef 1514 struct lttng_ust_context *context_param)
457a6b58 1515{
e58095ef 1516 return -ENOSYS;
457a6b58
MD
1517}
1518
d5fc3224 1519void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler)
457a6b58 1520{
d5fc3224
FD
1521 if (!event_enabler) {
1522 return;
0f63324a 1523 }
d5fc3224 1524 cds_list_del(&event_enabler->node);
0f63324a 1525
d5fc3224 1526 lttng_enabler_destroy(lttng_event_enabler_as_enabler(event_enabler));
e58095ef 1527
d5fc3224
FD
1528 lttng_destroy_context(event_enabler->ctx);
1529 free(event_enabler);
457a6b58
MD
1530}
1531
e58095ef 1532/*
d5fc3224 1533 * lttng_session_sync_event_enablers should be called just before starting a
e58095ef
MD
1534 * session.
1535 */
457a6b58 1536static
d5fc3224 1537void lttng_session_sync_event_enablers(struct lttng_session *session)
457a6b58 1538{
d5fc3224 1539 struct lttng_event_enabler *event_enabler;
e58095ef 1540 struct lttng_event *event;
457a6b58 1541
d5fc3224
FD
1542 cds_list_for_each_entry(event_enabler, &session->enablers_head, node)
1543 lttng_event_enabler_ref_events(event_enabler);
e58095ef
MD
1544 /*
1545 * For each event, if at least one of its enablers is enabled,
ac6b4ac6
MD
1546 * and its channel and session transient states are enabled, we
1547 * enable the event, else we disable it.
e58095ef
MD
1548 */
1549 cds_list_for_each_entry(event, &session->events_head, node) {
1550 struct lttng_enabler_ref *enabler_ref;
1551 struct lttng_bytecode_runtime *runtime;
dcdeaff0 1552 int enabled = 0, has_enablers_without_bytecode = 0;
e58095ef
MD
1553
1554 /* Enable events */
1555 cds_list_for_each_entry(enabler_ref,
1556 &event->enablers_ref_head, node) {
1557 if (enabler_ref->ref->enabled) {
1558 enabled = 1;
1559 break;
1560 }
1561 }
ac6b4ac6
MD
1562 /*
1563 * Enabled state is based on union of enablers, with
1564 * intesection of session and channel transient enable
1565 * states.
1566 */
1567 enabled = enabled && session->tstate && event->chan->tstate;
1568
1569 CMM_STORE_SHARED(event->enabled, enabled);
1570 /*
1571 * Sync tracepoint registration with event enabled
1572 * state.
1573 */
1574 if (enabled) {
1575 if (!event->registered)
1576 register_event(event);
1577 } else {
1578 if (event->registered)
1579 unregister_event(event);
1580 }
457a6b58 1581
1f49fc05 1582 /* Check if has enablers without bytecode enabled */
dcdeaff0
MD
1583 cds_list_for_each_entry(enabler_ref,
1584 &event->enablers_ref_head, node) {
1f49fc05
MD
1585 if (enabler_ref->ref->enabled
1586 && cds_list_empty(&enabler_ref->ref->filter_bytecode_head)) {
dcdeaff0
MD
1587 has_enablers_without_bytecode = 1;
1588 break;
1589 }
1590 }
1591 event->has_enablers_without_bytecode =
1592 has_enablers_without_bytecode;
1593
e58095ef
MD
1594 /* Enable filters */
1595 cds_list_for_each_entry(runtime,
1596 &event->bytecode_runtime_head, node) {
1597 lttng_filter_sync_state(runtime);
457a6b58
MD
1598 }
1599 }
baa1e0bc 1600 __tracepoint_probe_prune_release_queue();
457a6b58
MD
1601}
1602
d5fc3224
FD
1603static
1604void lttng_create_trigger_if_missing(struct lttng_trigger_enabler *trigger_enabler)
1605{
1606 struct lttng_trigger_group *trigger_group = trigger_enabler->group;
1607 struct lttng_probe_desc *probe_desc;
1608 struct cds_list_head *probe_list;
1609 int i;
1610
1611 probe_list = lttng_get_probe_list_head();
1612
1613 cds_list_for_each_entry(probe_desc, probe_list, head) {
1614 for (i = 0; i < probe_desc->nr_events; i++) {
1615 int ret;
1616 bool found = false;
1617 const struct lttng_event_desc *desc;
1618 struct lttng_trigger *trigger;
1619 struct cds_hlist_head *head;
1620 struct cds_hlist_node *node;
1621
1622 desc = probe_desc->event_desc[i];
1623 if (!lttng_desc_match_enabler(desc,
1624 lttng_trigger_enabler_as_enabler(trigger_enabler)))
1625 continue;
1626
1627 /*
1628 * Given the current trigger group, get the bucket that
1629 * the target trigger would be if it was already
1630 * created.
1631 */
1632 head = borrow_hash_table_bucket(
1633 trigger_group->triggers_ht.table,
1634 LTTNG_UST_TRIGGER_HT_SIZE, desc);
1635
1636 cds_hlist_for_each_entry(trigger, node, head, hlist) {
1637 /*
1638 * Check if trigger already exists by checking
1639 * if the trigger and enabler share the same
1640 * description and id.
1641 */
1642 if (trigger->desc == desc &&
1643 trigger->id == trigger_enabler->id) {
1644 found = true;
1645 break;
1646 }
1647 }
1648
1649 if (found)
1650 continue;
1651
1652 /*
1653 * We need to create a trigger for this event probe.
1654 */
1655 ret = lttng_trigger_create(desc, trigger_enabler->id,
1656 trigger_group);
1657 if (ret) {
1658 DBG("Unable to create trigger %s, error %d\n",
1659 probe_desc->event_desc[i]->name, ret);
1660 }
1661 }
1662 }
1663}
1664
1665void lttng_trigger_send_notification(struct lttng_trigger *trigger)
1666{
1667 /*
1668 * We want this write to be atomic AND non-blocking, meaning that we
1669 * want to write either everything OR nothing.
1670 * According to `pipe(7)`, writes that are smaller that the `PIPE_BUF`
1671 * value must be atomic, so we assert that the message we send is less
1672 * than PIPE_BUF.
1673 */
1674 struct lttng_ust_trigger_notification notif;
1675 ssize_t ret;
1676
1677 assert(trigger);
1678 assert(trigger->group);
1679 assert(sizeof(notif) <= PIPE_BUF);
1680
1681 notif.id = trigger->id;
1682
1683 ret = patient_write(trigger->group->notification_fd, &notif,
1684 sizeof(notif));
1685 if (ret == -1) {
1686 if (errno == EAGAIN) {
1687 DBG("Cannot send trigger notification without blocking: %s",
1688 strerror(errno));
1689 } else {
1690 DBG("Error to sending trigger notification: %s",
1691 strerror(errno));
1692 abort();
1693 }
1694 }
1695}
1696
1697/*
1698 * Create triggers associated with a trigger enabler (if not already present).
1699 */
1700static
1701int lttng_trigger_enabler_ref_triggers(struct lttng_trigger_enabler *trigger_enabler)
1702{
1703 struct lttng_trigger_group *trigger_group = trigger_enabler->group;
1704 struct lttng_trigger *trigger;
1705
1706 /* First, ensure that probe triggers are created for this enabler. */
1707 lttng_create_trigger_if_missing(trigger_enabler);
1708
1709 /* Link the created trigger with its associated enabler. */
1710 cds_list_for_each_entry(trigger, &trigger_group->triggers_head, node) {
1711 struct lttng_enabler_ref *enabler_ref;
1712
1713 if (!lttng_trigger_enabler_match_trigger(trigger_enabler, trigger))
1714 continue;
1715
1716 enabler_ref = lttng_enabler_ref(&trigger->enablers_ref_head,
1717 lttng_trigger_enabler_as_enabler(trigger_enabler));
1718 if (!enabler_ref) {
1719 /*
1720 * If no backward ref, create it.
1721 * Add backward ref from trigger to enabler.
1722 */
1723 enabler_ref = zmalloc(sizeof(*enabler_ref));
1724 if (!enabler_ref)
1725 return -ENOMEM;
1726
1727 enabler_ref->ref = lttng_trigger_enabler_as_enabler(
1728 trigger_enabler);
1729 cds_list_add(&enabler_ref->node,
1730 &trigger->enablers_ref_head);
1731 }
1732
1733 /*
1734 * Link filter bytecodes if not linked yet.
1735 */
1736 lttng_enabler_link_bytecode(trigger->desc,
1737 &trigger_group->ctx, &trigger->bytecode_runtime_head,
1738 lttng_trigger_enabler_as_enabler(trigger_enabler));
1739 }
1740 return 0;
1741}
1742
1743static
1744void lttng_trigger_group_sync_enablers(struct lttng_trigger_group *trigger_group)
1745{
1746 struct lttng_trigger_enabler *trigger_enabler;
1747 struct lttng_trigger *trigger;
1748
1749 cds_list_for_each_entry(trigger_enabler, &trigger_group->enablers_head, node) {
1750 /*
1751 * Only link enablers that are enabled to triggers, the user
1752 * might still be attaching filter or exclusion to the
1753 * trigger_enabler.
1754 */
1755 if (!lttng_trigger_enabler_as_enabler(trigger_enabler)->enabled)
1756 continue;
1757
1758 lttng_trigger_enabler_ref_triggers(trigger_enabler);
1759 }
1760
1761 /*
1762 * For each trigger, if at least one of its enablers is enabled,
1763 * we enable the trigger, else we disable it.
1764 */
1765 cds_list_for_each_entry(trigger, &trigger_group->triggers_head, node) {
1766 struct lttng_enabler_ref *enabler_ref;
1767 struct lttng_bytecode_runtime *runtime;
1768 int enabled = 0, has_enablers_without_bytecode = 0;
1769
1770 /* Enable triggers */
1771 cds_list_for_each_entry(enabler_ref,
1772 &trigger->enablers_ref_head, node) {
1773 if (enabler_ref->ref->enabled) {
1774 enabled = 1;
1775 break;
1776 }
1777 }
1778
1779 CMM_STORE_SHARED(trigger->enabled, enabled);
1780 /*
1781 * Sync tracepoint registration with trigger enabled
1782 * state.
1783 */
1784 if (enabled) {
1785 if (!trigger->registered)
1786 register_trigger(trigger);
1787 } else {
1788 if (trigger->registered)
1789 unregister_trigger(trigger);
1790 }
1791
1792 /* Check if has enablers without bytecode enabled */
1793 cds_list_for_each_entry(enabler_ref,
1794 &trigger->enablers_ref_head, node) {
1795 if (enabler_ref->ref->enabled
1796 && cds_list_empty(&enabler_ref->ref->filter_bytecode_head)) {
1797 has_enablers_without_bytecode = 1;
1798 break;
1799 }
1800 }
1801 trigger->has_enablers_without_bytecode =
1802 has_enablers_without_bytecode;
1803
1804 /* Enable filters */
1805 cds_list_for_each_entry(runtime,
1806 &trigger->bytecode_runtime_head, node) {
1807 lttng_filter_sync_state(runtime);
1808 }
1809 }
1810 __tracepoint_probe_prune_release_queue();
1811}
1812
e58095ef
MD
1813/*
1814 * Apply enablers to session events, adding events to session if need
1815 * be. It is required after each modification applied to an active
1816 * session, and right before session "start".
1817 * "lazy" sync means we only sync if required.
1818 */
1819static
d5fc3224 1820void lttng_session_lazy_sync_event_enablers(struct lttng_session *session)
457a6b58 1821{
e58095ef
MD
1822 /* We can skip if session is not active */
1823 if (!session->active)
1824 return;
d5fc3224 1825 lttng_session_sync_event_enablers(session);
457a6b58 1826}
53569322
MD
1827
1828/*
1829 * Update all sessions with the given app context.
1830 * Called with ust lock held.
1831 * This is invoked when an application context gets loaded/unloaded. It
1832 * ensures the context callbacks are in sync with the application
1833 * context (either app context callbacks, or dummy callbacks).
1834 */
1835void lttng_ust_context_set_session_provider(const char *name,
1836 size_t (*get_size)(struct lttng_ctx_field *field, size_t offset),
1837 void (*record)(struct lttng_ctx_field *field,
1838 struct lttng_ust_lib_ring_buffer_ctx *ctx,
1839 struct lttng_channel *chan),
1840 void (*get_value)(struct lttng_ctx_field *field,
1841 struct lttng_ctx_value *value))
1842{
1843 struct lttng_session *session;
1844
1845 cds_list_for_each_entry(session, &sessions, node) {
1846 struct lttng_channel *chan;
1847 struct lttng_event *event;
1848 int ret;
1849
1850 ret = lttng_ust_context_set_provider_rcu(&session->ctx,
1851 name, get_size, record, get_value);
1852 if (ret)
1853 abort();
1854 cds_list_for_each_entry(chan, &session->chan_head, node) {
1855 ret = lttng_ust_context_set_provider_rcu(&chan->ctx,
1856 name, get_size, record, get_value);
1857 if (ret)
1858 abort();
1859 }
1860 cds_list_for_each_entry(event, &session->events_head, node) {
1861 ret = lttng_ust_context_set_provider_rcu(&event->ctx,
1862 name, get_size, record, get_value);
1863 if (ret)
1864 abort();
1865 }
1866 }
1867}
d5fc3224
FD
1868
1869/*
1870 * Update all trigger groups with the given app context.
1871 * Called with ust lock held.
1872 * This is invoked when an application context gets loaded/unloaded. It
1873 * ensures the context callbacks are in sync with the application
1874 * context (either app context callbacks, or dummy callbacks).
1875 */
1876void lttng_ust_context_set_trigger_group_provider(const char *name,
1877 size_t (*get_size)(struct lttng_ctx_field *field, size_t offset),
1878 void (*record)(struct lttng_ctx_field *field,
1879 struct lttng_ust_lib_ring_buffer_ctx *ctx,
1880 struct lttng_channel *chan),
1881 void (*get_value)(struct lttng_ctx_field *field,
1882 struct lttng_ctx_value *value))
1883{
1884 struct lttng_trigger_group *trigger_group;
1885
1886 cds_list_for_each_entry(trigger_group, &trigger_groups, node) {
1887 int ret;
1888
1889 ret = lttng_ust_context_set_provider_rcu(&trigger_group->ctx,
1890 name, get_size, record, get_value);
1891 if (ret)
1892 abort();
1893 }
1894}
This page took 0.14819 seconds and 5 git commands to generate.