4 * Copyright 2010-2011 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * Mimic system calls for:
9 * - session creation, returns an object descriptor or failure.
10 * - channel creation, returns an object descriptor or failure.
11 * - Operates on a session object descriptor
12 * - Takes all channel options as parameters.
13 * - stream get, returns an object descriptor or failure.
14 * - Operates on a channel object descriptor.
15 * - stream notifier get, returns an object descriptor or failure.
16 * - Operates on a channel object descriptor.
17 * - event creation, returns an object descriptor or failure.
18 * - Operates on a channel object descriptor
19 * - Takes an event name as parameter
20 * - Takes an instrumentation source as parameter
21 * - e.g. tracepoints, dynamic_probes...
22 * - Takes instrumentation source specific arguments.
24 * Dual LGPL v2.1/GPL v2 license.
27 #include <lttng/ust-abi.h>
28 #include <urcu/compiler.h>
29 #include <urcu/list.h>
30 #include <lttng/ust-events.h>
31 #include <lttng/ust-version.h>
32 #include <usterr-signal-safe.h>
34 #include "ltt-tracer.h"
35 #include "tracepoint-internal.h"
37 struct ltt_tracepoint_list
{
38 struct tracepoint_iter iter
;
42 struct ltt_loglevel_list
{
43 struct loglevel_iter
*iter
;
47 static int lttng_ust_abi_close_in_progress
;
50 int lttng_abi_tracepoint_list(void);
53 int lttng_abi_loglevel_list(void);
56 * Object descriptor table. Should be protected from concurrent access
60 struct lttng_ust_obj
{
64 const struct lttng_ust_objd_ops
*ops
;
67 int freelist_next
; /* offset freelist. end is -1. */
71 struct lttng_ust_objd_table
{
72 struct lttng_ust_obj
*array
;
73 unsigned int len
, allocated_len
;
74 int freelist_head
; /* offset freelist head. end is -1 */
77 static struct lttng_ust_objd_table objd_table
= {
82 int objd_alloc(void *private_data
, const struct lttng_ust_objd_ops
*ops
)
84 struct lttng_ust_obj
*obj
;
86 if (objd_table
.freelist_head
!= -1) {
87 obj
= &objd_table
.array
[objd_table
.freelist_head
];
88 objd_table
.freelist_head
= obj
->u
.freelist_next
;
92 if (objd_table
.len
>= objd_table
.allocated_len
) {
93 unsigned int new_allocated_len
, old_allocated_len
;
94 struct lttng_ust_obj
*new_table
, *old_table
;
96 old_allocated_len
= objd_table
.allocated_len
;
97 old_table
= objd_table
.array
;
98 if (!old_allocated_len
)
99 new_allocated_len
= 1;
101 new_allocated_len
= old_allocated_len
<< 1;
102 new_table
= zmalloc(sizeof(struct lttng_ust_obj
) * new_allocated_len
);
105 memcpy(new_table
, old_table
,
106 sizeof(struct lttng_ust_obj
) * old_allocated_len
);
108 objd_table
.array
= new_table
;
109 objd_table
.allocated_len
= new_allocated_len
;
111 obj
= &objd_table
.array
[objd_table
.len
];
114 obj
->u
.s
.private_data
= private_data
;
116 obj
->u
.s
.f_count
= 2; /* count == 1 : object is allocated */
117 /* count == 2 : allocated + hold ref */
118 return obj
- objd_table
.array
;
122 struct lttng_ust_obj
*_objd_get(int id
)
124 if (id
>= objd_table
.len
)
126 if (!objd_table
.array
[id
].u
.s
.f_count
)
128 return &objd_table
.array
[id
];
132 void *objd_private(int id
)
134 struct lttng_ust_obj
*obj
= _objd_get(id
);
136 return obj
->u
.s
.private_data
;
140 void objd_set_private(int id
, void *private_data
)
142 struct lttng_ust_obj
*obj
= _objd_get(id
);
144 obj
->u
.s
.private_data
= private_data
;
147 const struct lttng_ust_objd_ops
*objd_ops(int id
)
149 struct lttng_ust_obj
*obj
= _objd_get(id
);
157 void objd_free(int id
)
159 struct lttng_ust_obj
*obj
= _objd_get(id
);
162 obj
->u
.freelist_next
= objd_table
.freelist_head
;
163 objd_table
.freelist_head
= obj
- objd_table
.array
;
164 assert(obj
->u
.s
.f_count
== 1);
165 obj
->u
.s
.f_count
= 0; /* deallocated */
169 void objd_ref(int id
)
171 struct lttng_ust_obj
*obj
= _objd_get(id
);
175 int lttng_ust_objd_unref(int id
)
177 struct lttng_ust_obj
*obj
= _objd_get(id
);
181 if (obj
->u
.s
.f_count
== 1) {
182 ERR("Reference counting error\n");
185 if ((--obj
->u
.s
.f_count
) == 1) {
186 const struct lttng_ust_objd_ops
*ops
= objd_ops(id
);
196 void objd_table_destroy(void)
200 for (i
= 0; i
< objd_table
.allocated_len
; i
++)
201 (void) lttng_ust_objd_unref(i
);
202 free(objd_table
.array
);
203 objd_table
.array
= NULL
;
205 objd_table
.allocated_len
= 0;
206 objd_table
.freelist_head
= -1;
210 * This is LTTng's own personal way to create an ABI for sessiond.
211 * We send commands over a socket.
214 static const struct lttng_ust_objd_ops lttng_ops
;
215 static const struct lttng_ust_objd_ops lttng_session_ops
;
216 static const struct lttng_ust_objd_ops lttng_channel_ops
;
217 static const struct lttng_ust_objd_ops lttng_metadata_ops
;
218 static const struct lttng_ust_objd_ops lttng_event_ops
;
219 static const struct lttng_ust_objd_ops lib_ring_buffer_objd_ops
;
220 static const struct lttng_ust_objd_ops lttng_tracepoint_list_ops
;
221 static const struct lttng_ust_objd_ops lttng_loglevel_list_ops
;
228 int lttng_abi_create_root_handle(void)
232 root_handle
= objd_alloc(NULL
, <tng_ops
);
237 int lttng_abi_create_session(void)
239 struct ltt_session
*session
;
240 int session_objd
, ret
;
242 session
= ltt_session_create();
245 session_objd
= objd_alloc(session
, <tng_session_ops
);
246 if (session_objd
< 0) {
250 session
->objd
= session_objd
;
254 ltt_session_destroy(session
);
259 long lttng_abi_tracer_version(int objd
,
260 struct lttng_ust_tracer_version
*v
)
262 v
->major
= LTTNG_UST_MAJOR_VERSION
;
263 v
->minor
= LTTNG_UST_MINOR_VERSION
;
264 v
->patchlevel
= LTTNG_UST_PATCHLEVEL_VERSION
;
269 long lttng_abi_add_context(int objd
,
270 struct lttng_ust_context
*context_param
,
271 struct lttng_ctx
**ctx
, struct ltt_session
*session
)
273 if (session
->been_active
)
276 switch (context_param
->ctx
) {
277 case LTTNG_UST_CONTEXT_PTHREAD_ID
:
278 return lttng_add_pthread_id_to_ctx(ctx
);
279 case LTTNG_UST_CONTEXT_VTID
:
280 return lttng_add_vtid_to_ctx(ctx
);
281 case LTTNG_UST_CONTEXT_VPID
:
282 return lttng_add_vpid_to_ctx(ctx
);
283 case LTTNG_UST_CONTEXT_PROCNAME
:
284 return lttng_add_procname_to_ctx(ctx
);
291 * lttng_cmd - lttng control through socket commands
293 * @objd: the object descriptor
297 * This descriptor implements lttng commands:
299 * Returns a LTTng trace session object descriptor
300 * LTTNG_UST_TRACER_VERSION
301 * Returns the LTTng kernel tracer version
302 * LTTNG_UST_TRACEPOINT_LIST
303 * Returns a file descriptor listing available tracepoints
304 * LTTNG_UST_WAIT_QUIESCENT
305 * Returns after all previously running probes have completed
306 * LTTNG_UST_LOGLEVEL_LIST
307 * Returns a file descriptor listing available loglevels
309 * The returned session will be deleted when its file descriptor is closed.
312 long lttng_cmd(int objd
, unsigned int cmd
, unsigned long arg
)
315 case LTTNG_UST_SESSION
:
316 return lttng_abi_create_session();
317 case LTTNG_UST_TRACER_VERSION
:
318 return lttng_abi_tracer_version(objd
,
319 (struct lttng_ust_tracer_version
*) arg
);
320 case LTTNG_UST_TRACEPOINT_LIST
:
321 return lttng_abi_tracepoint_list();
322 case LTTNG_UST_WAIT_QUIESCENT
:
325 case LTTNG_UST_LOGLEVEL_LIST
:
326 return lttng_abi_loglevel_list();
332 static const struct lttng_ust_objd_ops lttng_ops
= {
337 * We tolerate no failure in this function (if one happens, we print a dmesg
338 * error, but cannot return any error, because the channel information is
342 void lttng_metadata_create_events(int channel_objd
)
344 struct ltt_channel
*channel
= objd_private(channel_objd
);
345 static struct lttng_ust_event metadata_params
= {
346 .instrumentation
= LTTNG_UST_TRACEPOINT
,
347 .name
= "lttng_ust:metadata",
349 struct ltt_event
*event
;
353 * We tolerate no failure path after event creation. It will stay
354 * invariant for the rest of the session.
356 ret
= ltt_event_create(channel
, &metadata_params
, NULL
, &event
);
364 return; /* not allowed to return error */
367 int lttng_abi_create_channel(int session_objd
,
368 struct lttng_ust_channel
*chan_param
,
369 enum channel_type channel_type
)
371 struct ltt_session
*session
= objd_private(session_objd
);
372 const struct lttng_ust_objd_ops
*ops
;
373 const char *transport_name
;
374 struct ltt_channel
*chan
;
377 struct ltt_channel chan_priv_init
;
379 switch (channel_type
) {
380 case PER_CPU_CHANNEL
:
381 if (chan_param
->output
== LTTNG_UST_MMAP
) {
382 transport_name
= chan_param
->overwrite
?
383 "relay-overwrite-mmap" : "relay-discard-mmap";
387 ops
= <tng_channel_ops
;
389 case METADATA_CHANNEL
:
390 if (chan_param
->output
== LTTNG_UST_MMAP
)
391 transport_name
= "relay-metadata-mmap";
394 ops
= <tng_metadata_ops
;
397 transport_name
= "<unknown>";
400 chan_objd
= objd_alloc(NULL
, ops
);
405 memset(&chan_priv_init
, 0, sizeof(chan_priv_init
));
406 /* Copy of session UUID for consumer (availability through shm) */
407 memcpy(chan_priv_init
.uuid
, session
->uuid
, sizeof(session
->uuid
));
410 * We tolerate no failure path after channel creation. It will stay
411 * invariant for the rest of the session.
413 chan
= ltt_channel_create(session
, transport_name
, NULL
,
414 chan_param
->subbuf_size
,
415 chan_param
->num_subbuf
,
416 chan_param
->switch_timer_interval
,
417 chan_param
->read_timer_interval
,
419 &chan_param
->wait_fd
,
420 &chan_param
->memory_map_size
,
426 objd_set_private(chan_objd
, chan
);
427 chan
->objd
= chan_objd
;
428 if (channel_type
== METADATA_CHANNEL
) {
429 session
->metadata
= chan
;
430 lttng_metadata_create_events(chan_objd
);
432 /* The channel created holds a reference on the session */
433 objd_ref(session_objd
);
441 err
= lttng_ust_objd_unref(chan_objd
);
449 * lttng_session_cmd - lttng session object command
455 * This descriptor implements lttng commands:
457 * Returns a LTTng channel object descriptor
459 * Enables tracing for a session (weak enable)
461 * Disables tracing for a session (strong disable)
463 * Returns a LTTng metadata object descriptor
465 * The returned channel will be deleted when its file descriptor is closed.
468 long lttng_session_cmd(int objd
, unsigned int cmd
, unsigned long arg
)
470 struct ltt_session
*session
= objd_private(objd
);
473 case LTTNG_UST_CHANNEL
:
474 return lttng_abi_create_channel(objd
,
475 (struct lttng_ust_channel
*) arg
,
477 case LTTNG_UST_SESSION_START
:
478 case LTTNG_UST_ENABLE
:
479 return ltt_session_enable(session
);
480 case LTTNG_UST_SESSION_STOP
:
481 case LTTNG_UST_DISABLE
:
482 return ltt_session_disable(session
);
483 case LTTNG_UST_METADATA
:
484 return lttng_abi_create_channel(objd
,
485 (struct lttng_ust_channel
*) arg
,
493 * Called when the last file reference is dropped.
495 * Big fat note: channels and events are invariant for the whole session after
496 * their creation. So this session destruction also destroys all channel and
497 * event structures specific to this session (they are not destroyed when their
498 * individual file is released).
501 int lttng_release_session(int objd
)
503 struct ltt_session
*session
= objd_private(objd
);
506 ltt_session_destroy(session
);
513 static const struct lttng_ust_objd_ops lttng_session_ops
= {
514 .release
= lttng_release_session
,
515 .cmd
= lttng_session_cmd
,
519 * beware: we don't keep the mutex over the send, but we must walk the
520 * whole list each time we are called again. So sending one tracepoint
521 * at a time means this is O(n^2). TODO: do as in the kernel and send
522 * multiple tracepoints for each call to amortize this cost.
525 void ltt_tracepoint_list_get(struct ltt_tracepoint_list
*list
,
529 if (!list
->got_first
) {
530 tracepoint_iter_start(&list
->iter
);
534 tracepoint_iter_next(&list
->iter
);
536 if (!list
->iter
.tracepoint
) {
537 tp_list_entry
[0] = '\0'; /* end of list */
539 if (!strcmp((*list
->iter
.tracepoint
)->name
,
540 "lttng_ust:metadata"))
542 memcpy(tp_list_entry
, (*list
->iter
.tracepoint
)->name
,
543 LTTNG_UST_SYM_NAME_LEN
);
548 long lttng_tracepoint_list_cmd(int objd
, unsigned int cmd
, unsigned long arg
)
550 struct ltt_tracepoint_list
*list
= objd_private(objd
);
551 char *str
= (char *) arg
;
554 case LTTNG_UST_TRACEPOINT_LIST_GET
:
555 ltt_tracepoint_list_get(list
, str
);
565 int lttng_abi_tracepoint_list(void)
568 struct ltt_tracepoint_list
*list
;
570 list_objd
= objd_alloc(NULL
, <tng_tracepoint_list_ops
);
575 list
= zmalloc(sizeof(*list
));
580 objd_set_private(list_objd
, list
);
588 err
= lttng_ust_objd_unref(list_objd
);
596 int lttng_release_tracepoint_list(int objd
)
598 struct ltt_tracepoint_list
*list
= objd_private(objd
);
601 tracepoint_iter_stop(&list
->iter
);
609 static const struct lttng_ust_objd_ops lttng_tracepoint_list_ops
= {
610 .release
= lttng_release_tracepoint_list
,
611 .cmd
= lttng_tracepoint_list_cmd
,
615 * beware: we don't keep the mutex over the send, but we must walk the
616 * whole list each time we are called again. So sending one loglevel
617 * entry at a time means this is O(n^2). TODO: do as in the kernel and
618 * send multiple tracepoints for each call to amortize this cost.
621 void ltt_loglevel_list_get(struct ltt_loglevel_list
*list
,
622 const char *loglevel_provider
,
623 const char *loglevel
,
628 if (!list
->got_first
) {
629 //tp_loglevel_iter_start(&list->iter);
633 //tp_loglevel_iter_next(&list->iter);
635 if (!list
->iter
->desc
.provider
) {
636 loglevel_provider
[0] = '\0'; /* end of list */
638 memcpy(loglevel_provider
, list
->iter
->desc
.provider
,
639 LTTNG_UST_SYM_NAME_LEN
);
640 memcpy(loglevel
, list
->iter
.loglevel
,
641 LTTNG_UST_SYM_NAME_LEN
);
642 *value
= list
->iter
.value
;
648 long lttng_loglevel_list_cmd(int objd
, unsigned int cmd
, unsigned long arg
)
650 struct ltt_loglevel_list
*list
= objd_private(objd
);
651 struct lttng_ust_loglevel
*loglevel_list_entry
=
652 (struct lttng_ust_loglevel
*) arg
;
655 case LTTNG_UST_LOGLEVEL_LIST_GET
:
657 ltt_tracepoint_list_get(list,
658 loglevel_list_entry->provider,
659 loglevel_list_entry->loglevel,
660 &loglevel_list_entry->value);
661 if (loglevel_list_entry->provider[0] == '\0')
671 int lttng_abi_loglevel_list(void)
674 struct ltt_loglevel_list
*list
;
676 list_objd
= objd_alloc(NULL
, <tng_loglevel_list_ops
);
681 list
= zmalloc(sizeof(*list
));
686 objd_set_private(list_objd
, list
);
694 err
= lttng_ust_objd_unref(list_objd
);
702 int lttng_release_loglevel_list(int objd
)
704 struct ltt_loglevel_list
*list
= objd_private(objd
);
707 //tp_loglevel_iter_stop(&list->iter);
715 static const struct lttng_ust_objd_ops lttng_loglevel_list_ops
= {
716 .release
= lttng_release_loglevel_list
,
717 .cmd
= lttng_loglevel_list_cmd
,
720 struct stream_priv_data
{
721 struct lttng_ust_lib_ring_buffer
*buf
;
722 struct ltt_channel
*ltt_chan
;
726 int lttng_abi_open_stream(int channel_objd
, struct lttng_ust_stream
*info
)
728 struct ltt_channel
*channel
= objd_private(channel_objd
);
729 struct lttng_ust_lib_ring_buffer
*buf
;
730 struct stream_priv_data
*priv
;
731 int stream_objd
, ret
;
733 buf
= channel
->ops
->buffer_read_open(channel
->chan
, channel
->handle
,
734 &info
->shm_fd
, &info
->wait_fd
, &info
->memory_map_size
);
738 priv
= zmalloc(sizeof(*priv
));
744 priv
->ltt_chan
= channel
;
745 stream_objd
= objd_alloc(priv
, &lib_ring_buffer_objd_ops
);
746 if (stream_objd
< 0) {
750 /* Hold a reference on the channel object descriptor */
751 objd_ref(channel_objd
);
757 channel
->ops
->buffer_read_close(buf
, channel
->handle
);
762 int lttng_abi_create_event(int channel_objd
,
763 struct lttng_ust_event
*event_param
)
765 struct ltt_channel
*channel
= objd_private(channel_objd
);
766 struct ltt_event
*event
;
769 event_param
->name
[LTTNG_UST_SYM_NAME_LEN
- 1] = '\0';
770 event_objd
= objd_alloc(NULL
, <tng_event_ops
);
771 if (event_objd
< 0) {
776 * We tolerate no failure path after event creation. It will stay
777 * invariant for the rest of the session.
779 ret
= ltt_event_create(channel
, event_param
, NULL
, &event
);
783 objd_set_private(event_objd
, event
);
784 /* The event holds a reference on the channel */
785 objd_ref(channel_objd
);
792 err
= lttng_ust_objd_unref(event_objd
);
800 * lttng_channel_cmd - lttng control through object descriptors
802 * @objd: the object descriptor
806 * This object descriptor implements lttng commands:
808 * Returns an event stream object descriptor or failure.
809 * (typically, one event stream records events from one CPU)
811 * Returns an event object descriptor or failure.
813 * Prepend a context field to each event in the channel
815 * Enable recording for events in this channel (weak enable)
817 * Disable recording for events in this channel (strong disable)
819 * Channel and event file descriptors also hold a reference on the session.
822 long lttng_channel_cmd(int objd
, unsigned int cmd
, unsigned long arg
)
824 struct ltt_channel
*channel
= objd_private(objd
);
827 case LTTNG_UST_STREAM
:
829 struct lttng_ust_stream
*stream
;
831 stream
= (struct lttng_ust_stream
*) arg
;
832 /* stream used as output */
833 return lttng_abi_open_stream(objd
, stream
);
835 case LTTNG_UST_EVENT
:
836 return lttng_abi_create_event(objd
, (struct lttng_ust_event
*) arg
);
837 case LTTNG_UST_CONTEXT
:
838 return lttng_abi_add_context(objd
,
839 (struct lttng_ust_context
*) arg
,
840 &channel
->ctx
, channel
->session
);
841 case LTTNG_UST_ENABLE
:
842 return ltt_channel_enable(channel
);
843 case LTTNG_UST_DISABLE
:
844 return ltt_channel_disable(channel
);
845 case LTTNG_UST_FLUSH_BUFFER
:
846 return channel
->ops
->flush_buffer(channel
->chan
, channel
->handle
);
853 * lttng_metadata_cmd - lttng control through object descriptors
855 * @objd: the object descriptor
859 * This object descriptor implements lttng commands:
861 * Returns an event stream file descriptor or failure.
863 * Channel and event file descriptors also hold a reference on the session.
866 long lttng_metadata_cmd(int objd
, unsigned int cmd
, unsigned long arg
)
868 struct ltt_channel
*channel
= objd_private(objd
);
871 case LTTNG_UST_STREAM
:
873 struct lttng_ust_stream
*stream
;
875 stream
= (struct lttng_ust_stream
*) arg
;
876 /* stream used as output */
877 return lttng_abi_open_stream(objd
, stream
);
879 case LTTNG_UST_FLUSH_BUFFER
:
880 return channel
->ops
->flush_buffer(channel
->chan
, channel
->handle
);
888 * lttng_channel_poll - lttng stream addition/removal monitoring
893 unsigned int lttng_channel_poll(struct file
*file
, poll_table
*wait
)
895 struct ltt_channel
*channel
= file
->private_data
;
896 unsigned int mask
= 0;
898 if (file
->f_mode
& FMODE_READ
) {
899 poll_wait_set_exclusive(wait
);
900 poll_wait(file
, channel
->ops
->get_hp_wait_queue(channel
->chan
),
903 if (channel
->ops
->is_disabled(channel
->chan
))
905 if (channel
->ops
->is_finalized(channel
->chan
))
907 if (channel
->ops
->buffer_has_read_closed_stream(channel
->chan
))
908 return POLLIN
| POLLRDNORM
;
917 int lttng_channel_release(int objd
)
919 struct ltt_channel
*channel
= objd_private(objd
);
922 return lttng_ust_objd_unref(channel
->session
->objd
);
926 static const struct lttng_ust_objd_ops lttng_channel_ops
= {
927 .release
= lttng_channel_release
,
928 //.poll = lttng_channel_poll,
929 .cmd
= lttng_channel_cmd
,
932 static const struct lttng_ust_objd_ops lttng_metadata_ops
= {
933 .release
= lttng_channel_release
,
934 .cmd
= lttng_metadata_cmd
,
938 * lttng_rb_cmd - lttng ring buffer control through object descriptors
940 * @objd: the object descriptor
944 * This object descriptor implements lttng commands:
945 * (None for now. Access is done directly though shm.)
948 long lttng_rb_cmd(int objd
, unsigned int cmd
, unsigned long arg
)
957 int lttng_rb_release(int objd
)
959 struct stream_priv_data
*priv
= objd_private(objd
);
960 struct lttng_ust_lib_ring_buffer
*buf
;
961 struct ltt_channel
*channel
;
965 channel
= priv
->ltt_chan
;
968 * If we are at ABI exit, we don't want to close the
969 * buffer opened for read: it is being shared between
970 * the parent and child (right after fork), and we don't
971 * want the child to close it for the parent. For a real
972 * exit, we don't care about marking it as closed, as
973 * the consumer daemon (if there is one) will do fine
974 * even if we don't mark it as "closed" for reading on
976 * We only mark it as closed if it is being explicitely
977 * released by the session daemon with an explicit
980 if (!lttng_ust_abi_close_in_progress
)
981 channel
->ops
->buffer_read_close(buf
, channel
->handle
);
983 return lttng_ust_objd_unref(channel
->objd
);
988 static const struct lttng_ust_objd_ops lib_ring_buffer_objd_ops
= {
989 .release
= lttng_rb_release
,
994 * lttng_event_cmd - lttng control through object descriptors
996 * @objd: the object descriptor
1000 * This object descriptor implements lttng commands:
1002 * Prepend a context field to each record of this event
1004 * Enable recording for this event (weak enable)
1006 * Disable recording for this event (strong disable)
1009 long lttng_event_cmd(int objd
, unsigned int cmd
, unsigned long arg
)
1011 struct ltt_event
*event
= objd_private(objd
);
1014 case LTTNG_UST_CONTEXT
:
1015 return lttng_abi_add_context(objd
,
1016 (struct lttng_ust_context
*) arg
,
1017 &event
->ctx
, event
->chan
->session
);
1018 case LTTNG_UST_ENABLE
:
1019 return ltt_event_enable(event
);
1020 case LTTNG_UST_DISABLE
:
1021 return ltt_event_disable(event
);
1028 int lttng_event_release(int objd
)
1030 struct ltt_event
*event
= objd_private(objd
);
1033 return lttng_ust_objd_unref(event
->chan
->objd
);
1037 /* TODO: filter control ioctl */
1038 static const struct lttng_ust_objd_ops lttng_event_ops
= {
1039 .release
= lttng_event_release
,
1040 .cmd
= lttng_event_cmd
,
1043 void lttng_ust_abi_exit(void)
1045 lttng_ust_abi_close_in_progress
= 1;
1046 objd_table_destroy();
1047 lttng_ust_abi_close_in_progress
= 0;