Commit | Line | Data |
---|---|---|
7514523f MD |
1 | /* |
2 | * ltt-ring-buffer-client.c | |
3 | * | |
4 | * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
5 | * | |
6 | * LTTng lib ring buffer client. | |
7 | * | |
8 | * Dual LGPL v2.1/GPL v2 license. | |
9 | */ | |
10 | ||
11 | #include <linux/module.h> | |
12 | #include "ltt-tracer.h" | |
13 | ||
14 | struct ring_buffer_priv { | |
15 | struct dentry *dentry; | |
16 | } | |
17 | ||
18 | struct channel_priv { | |
19 | struct ltt_trace *trace; | |
20 | struct ring_buffer_priv *buf; | |
21 | }; | |
22 | ||
23 | static const struct lib_ring_buffer_config client_config; | |
24 | ||
25 | static u64 client_ring_buffer_clock_read(struct channel *chan) | |
26 | { | |
27 | return lib_ring_buffer_clock_read(chan); | |
28 | } | |
29 | ||
30 | size_t client_record_header_size(const struct lib_ring_buffer_config *config, | |
31 | struct channel *chan, size_t offset, | |
32 | size_t data_size, | |
33 | size_t *pre_header_padding, | |
34 | unsigned int rflags, | |
35 | struct lib_ring_buffer_ctx *ctx) | |
36 | { | |
37 | return record_header_size(config, chan, offset, data_size, | |
38 | pre_header_padding, rflags, ctx); | |
39 | } | |
40 | ||
41 | /** | |
42 | * client_subbuffer_header_size - called on buffer-switch to a new sub-buffer | |
43 | * | |
44 | * Return header size without padding after the structure. Don't use packed | |
45 | * structure because gcc generates inefficient code on some architectures | |
46 | * (powerpc, mips..) | |
47 | */ | |
48 | static size_t client_subbuffer_header_size(void) | |
49 | { | |
50 | return offsetof(struct subbuffer_header, header_end); | |
51 | } | |
52 | ||
53 | static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc, | |
54 | unsigned int subbuf_idx) | |
55 | { | |
56 | struct channel *chan = buf->backend.chan; | |
57 | struct subbuffer_header *header = | |
58 | (struct subbuffer_header *) | |
59 | lib_ring_buffer_offset_address(&buf->backend, | |
60 | subbuf_idx * chan->backend.subbuf_size); | |
61 | ||
62 | header->cycle_count_begin = tsc; | |
63 | header->data_size = 0xFFFFFFFF; /* for debugging */ | |
64 | write_trace_header(chan->backend.priv, header); | |
65 | } | |
66 | ||
67 | /* | |
68 | * offset is assumed to never be 0 here : never deliver a completely empty | |
69 | * subbuffer. data_size is between 1 and subbuf_size. | |
70 | */ | |
71 | static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc, | |
72 | unsigned int subbuf_idx, unsigned long data_size) | |
73 | { | |
74 | struct channel *chan = buf->backend.chan; | |
75 | struct subbuffer_header *header = | |
76 | (struct subbuffer_header *) | |
77 | lib_ring_buffer_offset_address(&buf->backend, | |
78 | subbuf_idx * chan->backend.subbuf_size); | |
79 | unsigned long records_lost = 0; | |
80 | ||
81 | header->data_size = data_size; | |
82 | header->subbuf_size = PAGE_ALIGN(data_size); | |
83 | header->cycle_count_end = tsc; | |
84 | records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf); | |
85 | records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf); | |
86 | records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf); | |
87 | header->events_lost = records_lost; | |
88 | header->subbuf_corrupt = 0; /* deprecated */ | |
89 | } | |
90 | ||
91 | static int client_buffer_create(struct lib_ring_buffer *buf, void *priv, | |
92 | int cpu, const char *name) | |
93 | { | |
94 | struct channel_priv *chan_priv = priv; | |
95 | struct ring_buffer_priv *buf_priv; | |
96 | struct dentry *trace_dentry; | |
97 | char *tmpname; | |
98 | int ret = 0; | |
99 | ||
100 | if (client_config.alloc == RING_BUFFER_ALLOC_PER_CPU) | |
101 | buf_priv = per_cpu_ptr(chan_priv->buf, cpu); | |
102 | else | |
103 | buf_priv = chan_priv->buf; | |
104 | ||
105 | tmpname = kzalloc(NAME_MAX + 1, GFP_KERNEL); | |
106 | if (!tmpname) { | |
107 | ret = -ENOMEM; | |
108 | goto end; | |
109 | } | |
110 | ||
111 | snprintf(tmpname, NAME_MAX, "%s%s_%d", | |
112 | (client_config.mode == RING_BUFFER_OVERWRITE) ? : "", | |
113 | name, cpu); | |
114 | ||
115 | trace_dentry = chan_priv->trace->dentry.trace_root; | |
116 | buf_priv->dentry = debugfs_create_file(tmpname, S_IRUSR, trace_dentry, | |
117 | buf, | |
118 | &lib_ring_buffer_file_operations); | |
119 | if (!buf_priv->dentry) { | |
120 | ret = -ENOMEM; | |
121 | goto free_name; | |
122 | } | |
123 | free_name: | |
124 | kfree(tmpname); | |
125 | end: | |
126 | return ret; | |
127 | } | |
128 | ||
129 | static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu) | |
130 | { | |
131 | struct channel_priv *chan_priv = priv; | |
132 | struct lib_ring_buffer_priv *buf_priv; | |
133 | ||
134 | if (client_config.alloc == RING_BUFFER_ALLOC_PER_CPU) | |
135 | buf_priv = per_cpu_ptr(chan_priv->buf, cpu); | |
136 | else | |
137 | buf_priv = chan_priv->buf; | |
138 | ||
139 | debugfs_remove(buf_priv->dentry); | |
140 | } | |
141 | ||
142 | static const struct lib_ring_buffer_config client_config = { | |
143 | .cb.ring_buffer_clock_read = client_ring_buffer_clock_read, | |
144 | .cb.record_header_size = client_record_header_size, | |
145 | .cb.subbuffer_header_size = client_subbuffer_header_size, | |
146 | .cb.buffer_begin = client_buffer_begin, | |
147 | .cb.buffer_end = client_buffer_end, | |
148 | .cb.buffer_create = client_buffer_create, | |
149 | .cb.buffer_finalize = client_buffer_finalize, | |
150 | ||
151 | .tsc_bits = 32, | |
152 | .alloc = RING_BUFFER_ALLOC_PER_CPU, | |
153 | .sync = RING_BUFFER_SYNC_PER_CPU, | |
154 | .mode = RING_BUFFER_OVERWRITE, | |
155 | #ifdef RING_BUFFER_ALIGN | |
156 | .align = RING_BUFFER_NATURAL, | |
157 | #else | |
158 | .align = RING_BUFFER_PACKED, | |
159 | #endif | |
160 | .backend = RING_BUFFER_PAGE, | |
161 | .output = RING_BUFFER_SPLICE, | |
162 | .oops = RING_BUFFER_OOPS_CONSISTENCY, | |
163 | .ipi = RING_BUFFER_IPI_BARRIER, | |
164 | .wakeup = RING_BUFFER_WAKEUP_BY_TIMER, | |
165 | }; | |
166 | ||
167 | struct channel *ltt_channel_create(const char *name, struct ltt_trace *trace, | |
168 | void *buf_addr, | |
169 | size_t subbuf_size, size_t num_subbuf, | |
170 | unsigned int switch_timer_interval, | |
171 | unsigned int read_timer_interval) | |
172 | { | |
173 | struct channel *chan; | |
174 | struct chan_priv *chan_priv; | |
175 | ||
176 | chan_priv = kzalloc(sizeof(struct chan_priv), GFP_KERNEL); | |
177 | if (!chan_priv) | |
178 | return NULL; | |
179 | if (client_config.alloc == RING_BUFFER_ALLOC_PER_CPU) { | |
180 | chan_priv->buf = alloc_percpu(struct lib_ring_buffer_priv); | |
181 | memset(chan_priv->buf, 0, sizeof(*chan_priv->buf)); | |
182 | } else | |
183 | chan_priv->buf = kzalloc(sizeof(*chan_priv->buf), GFP_KERNEL) | |
184 | if (!channel_priv->buf) | |
185 | goto free_chan_priv; | |
186 | chan_priv->trace = trace; | |
187 | chan = channel_create(&client_config, name, chan_priv, buf_addr, | |
188 | subbuf_size, num_subbuf, switch_timer_interval, | |
189 | read_timer_interval); | |
190 | if (!chan) | |
191 | goto free_buf_priv; | |
192 | return chan; | |
193 | ||
194 | free_buf_priv: | |
195 | if (client_config.alloc == RING_BUFFER_ALLOC_PER_CPU) | |
196 | free_percpu(chan_priv->buf); | |
197 | else | |
198 | kfree(chan_priv->buf); | |
199 | free_chan_priv: | |
200 | kfree(chan_priv); | |
201 | return NULL; | |
202 | } | |
203 | ||
204 | void ltt_channel_destroy(struct channel *chan) | |
205 | { | |
206 | struct chan_priv *chan_priv = channel_get_private(chan); | |
207 | ||
208 | channel_destroy(chan); | |
209 | if (client_config.alloc == RING_BUFFER_ALLOC_PER_CPU) | |
210 | free_percpu(chan_priv->buf); | |
211 | else | |
212 | kfree(chan_priv->buf); | |
213 | kfree(chan_priv); | |
214 | } | |
215 | ||
216 | static void ltt_relay_remove_dirs(struct ltt_trace *trace) | |
217 | { | |
218 | #if 0 | |
219 | ltt_ascii_remove_dir(trace); | |
220 | #endif //0 | |
221 | debugfs_remove(trace->dentry.trace_root); | |
222 | } | |
223 | ||
224 | static int ltt_relay_create_dirs(struct ltt_trace *new_trace) | |
225 | { | |
226 | struct dentry *ltt_root_dentry; | |
227 | int ret; | |
228 | ||
229 | ltt_root_dentry = get_ltt_root(); | |
230 | if (!ltt_root_dentry) | |
231 | return ENOENT; | |
232 | ||
233 | new_trace->dentry.trace_root = debugfs_create_dir(new_trace->trace_name, | |
234 | ltt_root_dentry); | |
235 | put_ltt_root(); | |
236 | if (new_trace->dentry.trace_root == NULL) { | |
237 | printk(KERN_ERR "LTT : Trace directory name %s already taken\n", | |
238 | new_trace->trace_name); | |
239 | return EEXIST; | |
240 | } | |
241 | #if 0 | |
242 | ret = ltt_ascii_create_dir(new_trace); | |
243 | if (ret) | |
244 | printk(KERN_WARNING "LTT : Unable to create ascii output file " | |
245 | "for trace %s\n", new_trace->trace_name); | |
246 | #endif //0 | |
247 | return 0; | |
248 | } | |
249 | static struct ltt_transport ltt_relay_transport = { | |
250 | .name = "relay", | |
251 | .owner = THIS_MODULE, | |
252 | .ops = { | |
253 | .create_dirs = ltt_relay_create_dirs, | |
254 | .remove_dirs = ltt_relay_remove_dirs, | |
255 | }, | |
256 | }; | |
257 | ||
258 | int __init ltt_ring_buffer_client_init(void) | |
259 | { | |
260 | printk(KERN_INFO "LTT : ltt ring buffer client init\n"); | |
261 | ltt_transport_register(<t_relay_transport); | |
262 | return 0; | |
263 | } | |
264 | ||
265 | void __exit ltt_ring_buffer_client_exit(void) | |
266 | { | |
267 | printk(KERN_INFO "LTT : ltt ring buffer client exit\n"); | |
268 | ltt_transport_unregister(<t_relay_transport); | |
269 | } | |
270 | ||
271 | MODULE_LICENSE("GPL and additional rights"); | |
272 | MODULE_AUTHOR("Mathieu Desnoyers"); | |
273 | MODULE_DESCRIPTION("LTTng Ring Buffer Client"); |