Commit | Line | Data |
---|---|---|
1c8284eb MD |
1 | /** |
2 | * ltt-type-serializer.c | |
3 | * | |
4 | * LTTng specialized type serializer. | |
5 | * | |
6 | * Copyright Mathieu Desnoyers, 2008. | |
7 | * | |
8 | * Dual LGPL v2.1/GPL v2 license. | |
9 | */ | |
10 | #include <linux/module.h> | |
11 | ||
12 | #include "ltt-type-serializer.h" | |
13 | #include "ltt-relay-lockless.h" | |
14 | ||
15 | notrace | |
16 | void _ltt_specialized_trace(const struct marker *mdata, void *probe_data, | |
17 | void *serialize_private, unsigned int data_size, | |
18 | unsigned int largest_align) | |
19 | { | |
20 | int ret; | |
21 | uint16_t eID; | |
22 | size_t slot_size; | |
23 | unsigned int chan_index; | |
24 | struct ltt_chanbuf *buf; | |
25 | struct ltt_chan *chan; | |
26 | struct ltt_trace *trace; | |
27 | uint64_t tsc; | |
28 | long buf_offset; | |
29 | int cpu; | |
30 | unsigned int rflags; | |
31 | ||
32 | /* | |
33 | * If we get here, it's probably because we have useful work to do. | |
34 | */ | |
35 | if (unlikely(ltt_traces.num_active_traces == 0)) | |
36 | return; | |
37 | ||
38 | rcu_read_lock_sched_notrace(); | |
39 | cpu = smp_processor_id(); | |
40 | __get_cpu_var(ltt_nesting)++; | |
41 | /* | |
42 | * asm volatile and "memory" clobber prevent the compiler from moving | |
43 | * instructions out of the ltt nesting count. This is required to ensure | |
44 | * that probe side-effects which can cause recursion (e.g. unforeseen | |
45 | * traps, divisions by 0, ...) are triggered within the incremented | |
46 | * nesting count section. | |
47 | */ | |
48 | barrier(); | |
49 | eID = mdata->event_id; | |
50 | chan_index = mdata->channel_id; | |
51 | ||
52 | /* | |
53 | * Iterate on each trace, typically small number of active traces, | |
54 | * list iteration with prefetch is usually slower. | |
55 | */ | |
56 | __list_for_each_entry_rcu(trace, <t_traces.head, list) { | |
57 | if (unlikely(!trace->active)) | |
58 | continue; | |
59 | if (unlikely(!ltt_run_filter(trace, eID))) | |
60 | continue; | |
2e6246b4 | 61 | #ifdef LTT_DEBUG_EVENT_SIZE |
1c8284eb MD |
62 | rflags = LTT_RFLAG_ID_SIZE; |
63 | #else | |
64 | if (unlikely(eID >= LTT_FREE_EVENTS)) | |
65 | rflags = LTT_RFLAG_ID; | |
66 | else | |
67 | rflags = 0; | |
68 | #endif | |
69 | /* | |
70 | * Skip channels added after trace creation. | |
71 | */ | |
72 | if (unlikely(chan_index >= trace->nr_channels)) | |
73 | continue; | |
74 | chan = &trace->channels[chan_index]; | |
75 | if (!chan->active) | |
76 | continue; | |
77 | ||
78 | /* reserve space : header and data */ | |
79 | ret = ltt_reserve_slot(chan, trace, data_size, largest_align, | |
80 | cpu, &buf, &slot_size, &buf_offset, &tsc, | |
81 | &rflags); | |
82 | if (unlikely(ret < 0)) | |
83 | continue; /* buffer full */ | |
84 | ||
85 | /* Out-of-order write : header and data */ | |
86 | buf_offset = ltt_write_event_header(&buf->a, &chan->a, | |
87 | buf_offset, eID, data_size, | |
88 | tsc, rflags); | |
89 | if (data_size) { | |
90 | buf_offset += ltt_align(buf_offset, largest_align); | |
91 | ltt_relay_write(&buf->a, &chan->a, buf_offset, | |
92 | serialize_private, data_size); | |
93 | buf_offset += data_size; | |
94 | } | |
95 | /* Out-of-order commit */ | |
96 | ltt_commit_slot(buf, chan, buf_offset, data_size, slot_size); | |
97 | } | |
98 | /* | |
99 | * asm volatile and "memory" clobber prevent the compiler from moving | |
100 | * instructions out of the ltt nesting count. This is required to ensure | |
101 | * that probe side-effects which can cause recursion (e.g. unforeseen | |
102 | * traps, divisions by 0, ...) are triggered within the incremented | |
103 | * nesting count section. | |
104 | */ | |
105 | barrier(); | |
106 | __get_cpu_var(ltt_nesting)--; | |
107 | rcu_read_unlock_sched_notrace(); | |
108 | } | |
109 | EXPORT_SYMBOL_GPL(_ltt_specialized_trace); | |
110 | ||
111 | MODULE_LICENSE("GPL and additional rights"); | |
112 | MODULE_AUTHOR("Mathieu Desnoyers"); | |
113 | MODULE_DESCRIPTION("LTT type serializer"); |