4 * Holds LTTng probes registry.
6 * Copyright 2010-2012 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 #include <urcu/list.h>
27 #include <urcu/hlist.h>
28 #include <lttng/ust-events.h>
29 #include <lttng/tracepoint.h>
30 #include "tracepoint-internal.h"
35 #include "lttng-tracer-core.h"
38 #include "ust-events-internal.h"
41 * probe list is protected by ust_lock()/ust_unlock().
43 static CDS_LIST_HEAD(_probe_list
);
46 * List of probes registered by not yet processed.
48 static CDS_LIST_HEAD(lazy_probe_init
);
51 * lazy_nesting counter ensures we don't trigger lazy probe registration
52 * fixup while we are performing the fixup. It is protected by the ust
55 static int lazy_nesting
;
58 * Called under ust lock.
61 int check_event_provider(struct lttng_probe_desc
*desc
)
64 size_t provider_name_len
;
66 provider_name_len
= strnlen(desc
->provider
,
67 LTTNG_UST_SYM_NAME_LEN
- 1);
68 for (i
= 0; i
< desc
->nr_events
; i
++) {
69 if (strncmp(desc
->event_desc
[i
]->name
,
72 return 0; /* provider mismatch */
78 * Called under ust lock.
81 void lttng_lazy_probe_register(struct lttng_probe_desc
*desc
)
83 struct lttng_probe_desc
*iter
;
84 struct cds_list_head
*probe_list
;
87 * Each provider enforce that every event name begins with the
88 * provider name. Check this in an assertion for extra
89 * carefulness. This ensures we cannot have duplicate event
90 * names across providers.
92 assert(check_event_provider(desc
));
95 * The provider ensures there are no duplicate event names.
96 * Duplicated TRACEPOINT_EVENT event names would generate a
97 * compile-time error due to duplicated symbol names.
101 * We sort the providers by struct lttng_probe_desc pointer
104 probe_list
= &_probe_list
;
105 cds_list_for_each_entry_reverse(iter
, probe_list
, head
) {
106 BUG_ON(iter
== desc
); /* Should never be in the list twice */
108 /* We belong to the location right after iter. */
109 cds_list_add(&desc
->head
, &iter
->head
);
113 /* We should be added at the head of the list */
114 cds_list_add(&desc
->head
, probe_list
);
116 DBG("just registered probe %s containing %u events",
117 desc
->provider
, desc
->nr_events
);
121 * Called under ust lock.
124 void fixup_lazy_probes(void)
126 struct lttng_probe_desc
*iter
, *tmp
;
130 cds_list_for_each_entry_safe(iter
, tmp
,
131 &lazy_probe_init
, lazy_init_head
) {
132 lttng_lazy_probe_register(iter
);
134 cds_list_del(&iter
->lazy_init_head
);
136 ret
= lttng_fix_pending_events();
142 * Called under ust lock.
144 struct cds_list_head
*lttng_get_probe_list_head(void)
146 if (!lazy_nesting
&& !cds_list_empty(&lazy_probe_init
))
152 int check_provider_version(struct lttng_probe_desc
*desc
)
155 * Check tracepoint provider version compatibility.
157 if (desc
->major
<= LTTNG_UST_PROVIDER_MAJOR
) {
158 DBG("Provider \"%s\" accepted, version %u.%u is compatible "
159 "with LTTng UST provider version %u.%u.",
160 desc
->provider
, desc
->major
, desc
->minor
,
161 LTTNG_UST_PROVIDER_MAJOR
,
162 LTTNG_UST_PROVIDER_MINOR
);
163 if (desc
->major
< LTTNG_UST_PROVIDER_MAJOR
) {
164 DBG("However, some LTTng UST features might not be "
165 "available for this provider unless it is "
166 "recompiled against a more recent LTTng UST.");
168 return 1; /* accept */
170 ERR("Provider \"%s\" rejected, version %u.%u is incompatible "
171 "with LTTng UST provider version %u.%u. Please upgrade "
173 desc
->provider
, desc
->major
, desc
->minor
,
174 LTTNG_UST_PROVIDER_MAJOR
,
175 LTTNG_UST_PROVIDER_MINOR
);
176 return 0; /* reject */
181 int lttng_probe_register(struct lttng_probe_desc
*desc
)
185 lttng_ust_fixup_tls();
188 * If version mismatch, don't register, but don't trigger assert
189 * on caller. The version check just prints an error.
191 if (!check_provider_version(desc
))
196 cds_list_add(&desc
->lazy_init_head
, &lazy_probe_init
);
198 DBG("adding probe %s containing %u events to lazy registration list",
199 desc
->provider
, desc
->nr_events
);
201 * If there is at least one active session, we need to register
202 * the probe immediately, since we cannot delay event
203 * registration because they are needed ASAP.
205 if (lttng_session_active())
208 lttng_fix_pending_event_notifiers();
214 /* Backward compatibility with UST 2.0 */
215 int ltt_probe_register(struct lttng_probe_desc
*desc
)
217 return lttng_probe_register(desc
);
220 void lttng_probe_unregister(struct lttng_probe_desc
*desc
)
222 lttng_ust_fixup_tls();
224 if (!check_provider_version(desc
))
229 cds_list_del(&desc
->head
);
231 cds_list_del(&desc
->lazy_init_head
);
233 lttng_probe_provider_unregister_events(desc
);
234 DBG("just unregistered probes of provider %s", desc
->provider
);
239 /* Backward compatibility with UST 2.0 */
240 void ltt_probe_unregister(struct lttng_probe_desc
*desc
)
242 lttng_probe_unregister(desc
);
245 void lttng_probes_prune_event_list(struct lttng_ust_tracepoint_list
*list
)
247 struct tp_list_entry
*list_entry
, *tmp
;
249 cds_list_for_each_entry_safe(list_entry
, tmp
, &list
->head
, head
) {
250 cds_list_del(&list_entry
->head
);
256 * called with UST lock held.
258 int lttng_probes_get_event_list(struct lttng_ust_tracepoint_list
*list
)
260 struct lttng_probe_desc
*probe_desc
;
262 struct cds_list_head
*probe_list
;
264 probe_list
= lttng_get_probe_list_head();
265 CDS_INIT_LIST_HEAD(&list
->head
);
266 cds_list_for_each_entry(probe_desc
, probe_list
, head
) {
267 for (i
= 0; i
< probe_desc
->nr_events
; i
++) {
268 struct tp_list_entry
*list_entry
;
270 list_entry
= zmalloc(sizeof(*list_entry
));
273 cds_list_add(&list_entry
->head
, &list
->head
);
274 strncpy(list_entry
->tp
.name
,
275 probe_desc
->event_desc
[i
]->name
,
276 LTTNG_UST_SYM_NAME_LEN
);
277 list_entry
->tp
.name
[LTTNG_UST_SYM_NAME_LEN
- 1] = '\0';
278 if (!probe_desc
->event_desc
[i
]->loglevel
) {
279 list_entry
->tp
.loglevel
= TRACE_DEFAULT
;
281 list_entry
->tp
.loglevel
= *(*probe_desc
->event_desc
[i
]->loglevel
);
285 if (cds_list_empty(&list
->head
))
289 cds_list_first_entry(&list
->head
, struct tp_list_entry
, head
);
293 lttng_probes_prune_event_list(list
);
298 * Return current iteration position, advance internal iterator to next.
299 * Return NULL if end of list.
301 struct lttng_ust_tracepoint_iter
*
302 lttng_ust_tracepoint_list_get_iter_next(struct lttng_ust_tracepoint_list
*list
)
304 struct tp_list_entry
*entry
;
309 if (entry
->head
.next
== &list
->head
)
312 list
->iter
= cds_list_entry(entry
->head
.next
,
313 struct tp_list_entry
, head
);
317 void lttng_probes_prune_field_list(struct lttng_ust_field_list
*list
)
319 struct tp_field_list_entry
*list_entry
, *tmp
;
321 cds_list_for_each_entry_safe(list_entry
, tmp
, &list
->head
, head
) {
322 cds_list_del(&list_entry
->head
);
328 * called with UST lock held.
330 int lttng_probes_get_field_list(struct lttng_ust_field_list
*list
)
332 struct lttng_probe_desc
*probe_desc
;
334 struct cds_list_head
*probe_list
;
336 probe_list
= lttng_get_probe_list_head();
337 CDS_INIT_LIST_HEAD(&list
->head
);
338 cds_list_for_each_entry(probe_desc
, probe_list
, head
) {
339 for (i
= 0; i
< probe_desc
->nr_events
; i
++) {
340 const struct lttng_event_desc
*event_desc
=
341 probe_desc
->event_desc
[i
];
344 if (event_desc
->nr_fields
== 0) {
345 /* Events without fields. */
346 struct tp_field_list_entry
*list_entry
;
348 list_entry
= zmalloc(sizeof(*list_entry
));
351 cds_list_add(&list_entry
->head
, &list
->head
);
352 strncpy(list_entry
->field
.event_name
,
354 LTTNG_UST_SYM_NAME_LEN
);
355 list_entry
->field
.event_name
[LTTNG_UST_SYM_NAME_LEN
- 1] = '\0';
356 list_entry
->field
.field_name
[0] = '\0';
357 list_entry
->field
.type
= LTTNG_UST_FIELD_OTHER
;
358 if (!event_desc
->loglevel
) {
359 list_entry
->field
.loglevel
= TRACE_DEFAULT
;
361 list_entry
->field
.loglevel
= *(*event_desc
->loglevel
);
363 list_entry
->field
.nowrite
= 1;
366 for (j
= 0; j
< event_desc
->nr_fields
; j
++) {
367 const struct lttng_event_field
*event_field
=
368 &event_desc
->fields
[j
];
369 struct tp_field_list_entry
*list_entry
;
371 list_entry
= zmalloc(sizeof(*list_entry
));
374 cds_list_add(&list_entry
->head
, &list
->head
);
375 strncpy(list_entry
->field
.event_name
,
377 LTTNG_UST_SYM_NAME_LEN
);
378 list_entry
->field
.event_name
[LTTNG_UST_SYM_NAME_LEN
- 1] = '\0';
379 strncpy(list_entry
->field
.field_name
,
381 LTTNG_UST_SYM_NAME_LEN
);
382 list_entry
->field
.field_name
[LTTNG_UST_SYM_NAME_LEN
- 1] = '\0';
383 switch (event_field
->type
.atype
) {
385 list_entry
->field
.type
= LTTNG_UST_FIELD_INTEGER
;
388 list_entry
->field
.type
= LTTNG_UST_FIELD_STRING
;
391 if (event_field
->type
.u
.legacy
.array
.elem_type
.atype
!= atype_integer
392 || event_field
->type
.u
.legacy
.array
.elem_type
.u
.basic
.integer
.encoding
== lttng_encode_none
)
393 list_entry
->field
.type
= LTTNG_UST_FIELD_OTHER
;
395 list_entry
->field
.type
= LTTNG_UST_FIELD_STRING
;
397 case atype_array_nestable
:
398 if (event_field
->type
.u
.array_nestable
.elem_type
->atype
!= atype_integer
399 || event_field
->type
.u
.array_nestable
.elem_type
->u
.integer
.encoding
== lttng_encode_none
)
400 list_entry
->field
.type
= LTTNG_UST_FIELD_OTHER
;
402 list_entry
->field
.type
= LTTNG_UST_FIELD_STRING
;
405 if (event_field
->type
.u
.legacy
.sequence
.elem_type
.atype
!= atype_integer
406 || event_field
->type
.u
.legacy
.sequence
.elem_type
.u
.basic
.integer
.encoding
== lttng_encode_none
)
407 list_entry
->field
.type
= LTTNG_UST_FIELD_OTHER
;
409 list_entry
->field
.type
= LTTNG_UST_FIELD_STRING
;
411 case atype_sequence_nestable
:
412 if (event_field
->type
.u
.sequence_nestable
.elem_type
->atype
!= atype_integer
413 || event_field
->type
.u
.sequence_nestable
.elem_type
->u
.integer
.encoding
== lttng_encode_none
)
414 list_entry
->field
.type
= LTTNG_UST_FIELD_OTHER
;
416 list_entry
->field
.type
= LTTNG_UST_FIELD_STRING
;
419 list_entry
->field
.type
= LTTNG_UST_FIELD_FLOAT
;
421 case atype_enum
: /* Fall-through */
422 case atype_enum_nestable
:
423 list_entry
->field
.type
= LTTNG_UST_FIELD_ENUM
;
426 list_entry
->field
.type
= LTTNG_UST_FIELD_OTHER
;
428 if (!event_desc
->loglevel
) {
429 list_entry
->field
.loglevel
= TRACE_DEFAULT
;
431 list_entry
->field
.loglevel
= *(*event_desc
->loglevel
);
433 list_entry
->field
.nowrite
= event_field
->nowrite
;
437 if (cds_list_empty(&list
->head
))
441 cds_list_first_entry(&list
->head
,
442 struct tp_field_list_entry
, head
);
446 lttng_probes_prune_field_list(list
);
451 * Return current iteration position, advance internal iterator to next.
452 * Return NULL if end of list.
454 struct lttng_ust_field_iter
*
455 lttng_ust_field_list_get_iter_next(struct lttng_ust_field_list
*list
)
457 struct tp_field_list_entry
*entry
;
462 if (entry
->head
.next
== &list
->head
)
465 list
->iter
= cds_list_entry(entry
->head
.next
,
466 struct tp_field_list_entry
, head
);
467 return &entry
->field
;