Commit | Line | Data |
---|---|---|
dbd0b4b3 FW |
1 | /* |
2 | * Infrastructure for statistic tracing (histogram output). | |
3 | * | |
4 | * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com> | |
5 | * | |
6 | * Based on the code from trace_branch.c which is | |
7 | * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> | |
8 | * | |
9 | */ | |
10 | ||
11 | ||
12 | #include <linux/list.h> | |
13 | #include <linux/seq_file.h> | |
14 | #include <linux/debugfs.h> | |
15 | #include "trace.h" | |
16 | ||
17 | ||
18 | /* List of stat entries from a tracer */ | |
19 | struct trace_stat_list { | |
20 | struct list_head list; | |
21 | void *stat; | |
22 | }; | |
23 | ||
ff288b27 | 24 | static LIST_HEAD(stat_list); |
dbd0b4b3 FW |
25 | |
26 | /* | |
27 | * This is a copy of the current tracer to avoid racy | |
28 | * and dangerous output while the current tracer is | |
29 | * switched. | |
30 | */ | |
31 | static struct tracer current_tracer; | |
32 | ||
33 | /* | |
34 | * Protect both the current tracer and the global | |
35 | * stat list. | |
36 | */ | |
37 | static DEFINE_MUTEX(stat_list_mutex); | |
38 | ||
39 | ||
40 | static void reset_stat_list(void) | |
41 | { | |
ff288b27 | 42 | struct trace_stat_list *node, *next; |
dbd0b4b3 | 43 | |
ff288b27 | 44 | list_for_each_entry_safe(node, next, &stat_list, list) |
dbd0b4b3 | 45 | kfree(node); |
dbd0b4b3 | 46 | |
ff288b27 | 47 | INIT_LIST_HEAD(&stat_list); |
dbd0b4b3 FW |
48 | } |
49 | ||
50 | void init_tracer_stat(struct tracer *trace) | |
51 | { | |
52 | mutex_lock(&stat_list_mutex); | |
53 | current_tracer = *trace; | |
54 | mutex_unlock(&stat_list_mutex); | |
55 | } | |
56 | ||
57 | /* | |
58 | * For tracers that don't provide a stat_cmp callback. | |
59 | * This one will force an immediate insertion on tail of | |
60 | * the list. | |
61 | */ | |
62 | static int dummy_cmp(void *p1, void *p2) | |
63 | { | |
64 | return 1; | |
65 | } | |
66 | ||
67 | /* | |
68 | * Initialize the stat list at each trace_stat file opening. | |
69 | * All of these copies and sorting are required on all opening | |
70 | * since the stats could have changed between two file sessions. | |
71 | */ | |
72 | static int stat_seq_init(void) | |
73 | { | |
74 | struct trace_stat_list *iter_entry, *new_entry; | |
75 | void *prev_stat; | |
76 | int ret = 0; | |
77 | int i; | |
78 | ||
79 | mutex_lock(&stat_list_mutex); | |
80 | reset_stat_list(); | |
81 | ||
82 | if (!current_tracer.stat_start || !current_tracer.stat_next || | |
83 | !current_tracer.stat_show) | |
84 | goto exit; | |
85 | ||
86 | if (!current_tracer.stat_cmp) | |
87 | current_tracer.stat_cmp = dummy_cmp; | |
88 | ||
89 | /* | |
90 | * The first entry. Actually this is the second, but the first | |
91 | * one (the stat_list head) is pointless. | |
92 | */ | |
93 | new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL); | |
94 | if (!new_entry) { | |
95 | ret = -ENOMEM; | |
96 | goto exit; | |
97 | } | |
98 | ||
99 | INIT_LIST_HEAD(&new_entry->list); | |
ff288b27 | 100 | list_add(&new_entry->list, &stat_list); |
dbd0b4b3 FW |
101 | new_entry->stat = current_tracer.stat_start(); |
102 | ||
103 | prev_stat = new_entry->stat; | |
104 | ||
105 | /* | |
106 | * Iterate over the tracer stat entries and store them in a sorted | |
107 | * list. | |
108 | */ | |
109 | for (i = 1; ; i++) { | |
110 | new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL); | |
111 | if (!new_entry) { | |
112 | ret = -ENOMEM; | |
113 | goto exit_free_list; | |
114 | } | |
115 | ||
116 | INIT_LIST_HEAD(&new_entry->list); | |
117 | new_entry->stat = current_tracer.stat_next(prev_stat, i); | |
118 | ||
119 | /* End of insertion */ | |
120 | if (!new_entry->stat) | |
121 | break; | |
122 | ||
ff288b27 | 123 | list_for_each_entry(iter_entry, &stat_list, list) { |
dbd0b4b3 FW |
124 | /* Insertion with a descendent sorting */ |
125 | if (current_tracer.stat_cmp(new_entry->stat, | |
126 | iter_entry->stat) > 0) { | |
127 | ||
128 | list_add_tail(&new_entry->list, | |
129 | &iter_entry->list); | |
130 | break; | |
131 | ||
132 | /* The current smaller value */ | |
133 | } else if (list_is_last(&iter_entry->list, | |
ff288b27 | 134 | &stat_list)) { |
dbd0b4b3 FW |
135 | list_add(&new_entry->list, &iter_entry->list); |
136 | break; | |
137 | } | |
138 | } | |
139 | ||
140 | prev_stat = new_entry->stat; | |
141 | } | |
142 | exit: | |
143 | mutex_unlock(&stat_list_mutex); | |
144 | return ret; | |
145 | ||
146 | exit_free_list: | |
147 | reset_stat_list(); | |
148 | mutex_unlock(&stat_list_mutex); | |
149 | return ret; | |
150 | } | |
151 | ||
152 | ||
153 | static void *stat_seq_start(struct seq_file *s, loff_t *pos) | |
154 | { | |
ff288b27 | 155 | struct list_head *l = (struct list_head *)s->private; |
dbd0b4b3 FW |
156 | |
157 | /* Prevent from tracer switch or stat_list modification */ | |
158 | mutex_lock(&stat_list_mutex); | |
159 | ||
160 | /* If we are in the beginning of the file, print the headers */ | |
161 | if (!*pos && current_tracer.stat_headers) | |
162 | current_tracer.stat_headers(s); | |
163 | ||
ff288b27 | 164 | return seq_list_start(l, *pos); |
dbd0b4b3 FW |
165 | } |
166 | ||
167 | static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos) | |
168 | { | |
ff288b27 | 169 | struct list_head *l = (struct list_head *)s->private; |
dbd0b4b3 | 170 | |
ff288b27 | 171 | return seq_list_next(p, l, pos); |
dbd0b4b3 FW |
172 | } |
173 | ||
174 | static void stat_seq_stop(struct seq_file *m, void *p) | |
175 | { | |
176 | mutex_unlock(&stat_list_mutex); | |
177 | } | |
178 | ||
179 | static int stat_seq_show(struct seq_file *s, void *v) | |
180 | { | |
e8a9cbf6 SR |
181 | struct trace_stat_list *entry; |
182 | ||
183 | entry = list_entry(v, struct trace_stat_list, list); | |
ff288b27 FW |
184 | |
185 | return current_tracer.stat_show(s, entry->stat); | |
dbd0b4b3 FW |
186 | } |
187 | ||
188 | static const struct seq_operations trace_stat_seq_ops = { | |
189 | .start = stat_seq_start, | |
190 | .next = stat_seq_next, | |
191 | .stop = stat_seq_stop, | |
192 | .show = stat_seq_show | |
193 | }; | |
194 | ||
195 | static int tracing_stat_open(struct inode *inode, struct file *file) | |
196 | { | |
197 | int ret; | |
198 | ||
199 | ret = seq_open(file, &trace_stat_seq_ops); | |
200 | if (!ret) { | |
201 | struct seq_file *m = file->private_data; | |
202 | m->private = &stat_list; | |
203 | ret = stat_seq_init(); | |
204 | } | |
205 | ||
206 | return ret; | |
207 | } | |
208 | ||
209 | ||
210 | /* | |
211 | * Avoid consuming memory with our now useless list. | |
212 | */ | |
213 | static int tracing_stat_release(struct inode *i, struct file *f) | |
214 | { | |
215 | mutex_lock(&stat_list_mutex); | |
216 | reset_stat_list(); | |
217 | mutex_unlock(&stat_list_mutex); | |
218 | return 0; | |
219 | } | |
220 | ||
221 | static const struct file_operations tracing_stat_fops = { | |
222 | .open = tracing_stat_open, | |
223 | .read = seq_read, | |
224 | .llseek = seq_lseek, | |
225 | .release = tracing_stat_release | |
226 | }; | |
227 | ||
228 | static int __init tracing_stat_init(void) | |
229 | { | |
230 | struct dentry *d_tracing; | |
231 | struct dentry *entry; | |
232 | ||
dbd0b4b3 FW |
233 | d_tracing = tracing_init_dentry(); |
234 | ||
235 | entry = debugfs_create_file("trace_stat", 0444, d_tracing, | |
236 | NULL, | |
237 | &tracing_stat_fops); | |
238 | if (!entry) | |
239 | pr_warning("Could not create debugfs " | |
240 | "'trace_stat' entry\n"); | |
241 | return 0; | |
242 | } | |
243 | fs_initcall(tracing_stat_init); |