Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.26
[deliverable/linux.git] / fs / xfs / support / ktrace.c
1 /*
2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include <xfs.h>
19
20 static kmem_zone_t *ktrace_hdr_zone;
21 static kmem_zone_t *ktrace_ent_zone;
22 static int ktrace_zentries;
23
24 void __init
25 ktrace_init(int zentries)
26 {
27 ktrace_zentries = roundup_pow_of_two(zentries);
28
29 ktrace_hdr_zone = kmem_zone_init(sizeof(ktrace_t),
30 "ktrace_hdr");
31 ASSERT(ktrace_hdr_zone);
32
33 ktrace_ent_zone = kmem_zone_init(ktrace_zentries
34 * sizeof(ktrace_entry_t),
35 "ktrace_ent");
36 ASSERT(ktrace_ent_zone);
37 }
38
39 void __exit
40 ktrace_uninit(void)
41 {
42 kmem_zone_destroy(ktrace_hdr_zone);
43 kmem_zone_destroy(ktrace_ent_zone);
44 }
45
46 /*
47 * ktrace_alloc()
48 *
49 * Allocate a ktrace header and enough buffering for the given
50 * number of entries. Round the number of entries up to a
51 * power of 2 so we can do fast masking to get the index from
52 * the atomic index counter.
53 */
54 ktrace_t *
55 ktrace_alloc(int nentries, unsigned int __nocast sleep)
56 {
57 ktrace_t *ktp;
58 ktrace_entry_t *ktep;
59 int entries;
60
61 ktp = (ktrace_t*)kmem_zone_alloc(ktrace_hdr_zone, sleep);
62
63 if (ktp == (ktrace_t*)NULL) {
64 /*
65 * KM_SLEEP callers don't expect failure.
66 */
67 if (sleep & KM_SLEEP)
68 panic("ktrace_alloc: NULL memory on KM_SLEEP request!");
69
70 return NULL;
71 }
72
73 /*
74 * Special treatment for buffers with the ktrace_zentries entries
75 */
76 entries = roundup_pow_of_two(nentries);
77 if (entries == ktrace_zentries) {
78 ktep = (ktrace_entry_t*)kmem_zone_zalloc(ktrace_ent_zone,
79 sleep);
80 } else {
81 ktep = (ktrace_entry_t*)kmem_zalloc((entries * sizeof(*ktep)),
82 sleep | KM_LARGE);
83 }
84
85 if (ktep == NULL) {
86 /*
87 * KM_SLEEP callers don't expect failure.
88 */
89 if (sleep & KM_SLEEP)
90 panic("ktrace_alloc: NULL memory on KM_SLEEP request!");
91
92 kmem_free(ktp, sizeof(*ktp));
93
94 return NULL;
95 }
96
97 ktp->kt_entries = ktep;
98 ktp->kt_nentries = entries;
99 ASSERT(is_power_of_2(entries));
100 ktp->kt_index_mask = entries - 1;
101 atomic_set(&ktp->kt_index, 0);
102 ktp->kt_rollover = 0;
103 return ktp;
104 }
105
106
107 /*
108 * ktrace_free()
109 *
110 * Free up the ktrace header and buffer. It is up to the caller
111 * to ensure that no-one is referencing it.
112 */
113 void
114 ktrace_free(ktrace_t *ktp)
115 {
116 int entries_size;
117
118 if (ktp == (ktrace_t *)NULL)
119 return;
120
121 /*
122 * Special treatment for the Vnode trace buffer.
123 */
124 if (ktp->kt_nentries == ktrace_zentries) {
125 kmem_zone_free(ktrace_ent_zone, ktp->kt_entries);
126 } else {
127 entries_size = (int)(ktp->kt_nentries * sizeof(ktrace_entry_t));
128
129 kmem_free(ktp->kt_entries, entries_size);
130 }
131
132 kmem_zone_free(ktrace_hdr_zone, ktp);
133 }
134
135
136 /*
137 * Enter the given values into the "next" entry in the trace buffer.
138 * kt_index is always the index of the next entry to be filled.
139 */
140 void
141 ktrace_enter(
142 ktrace_t *ktp,
143 void *val0,
144 void *val1,
145 void *val2,
146 void *val3,
147 void *val4,
148 void *val5,
149 void *val6,
150 void *val7,
151 void *val8,
152 void *val9,
153 void *val10,
154 void *val11,
155 void *val12,
156 void *val13,
157 void *val14,
158 void *val15)
159 {
160 int index;
161 ktrace_entry_t *ktep;
162
163 ASSERT(ktp != NULL);
164
165 /*
166 * Grab an entry by pushing the index up to the next one.
167 */
168 index = atomic_add_return(1, &ktp->kt_index);
169 index = (index - 1) & ktp->kt_index_mask;
170 if (!ktp->kt_rollover && index == ktp->kt_nentries - 1)
171 ktp->kt_rollover = 1;
172
173 ASSERT((index >= 0) && (index < ktp->kt_nentries));
174
175 ktep = &(ktp->kt_entries[index]);
176
177 ktep->val[0] = val0;
178 ktep->val[1] = val1;
179 ktep->val[2] = val2;
180 ktep->val[3] = val3;
181 ktep->val[4] = val4;
182 ktep->val[5] = val5;
183 ktep->val[6] = val6;
184 ktep->val[7] = val7;
185 ktep->val[8] = val8;
186 ktep->val[9] = val9;
187 ktep->val[10] = val10;
188 ktep->val[11] = val11;
189 ktep->val[12] = val12;
190 ktep->val[13] = val13;
191 ktep->val[14] = val14;
192 ktep->val[15] = val15;
193 }
194
195 /*
196 * Return the number of entries in the trace buffer.
197 */
198 int
199 ktrace_nentries(
200 ktrace_t *ktp)
201 {
202 int index;
203 if (ktp == NULL)
204 return 0;
205
206 index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask;
207 return (ktp->kt_rollover ? ktp->kt_nentries : index);
208 }
209
210 /*
211 * ktrace_first()
212 *
213 * This is used to find the start of the trace buffer.
214 * In conjunction with ktrace_next() it can be used to
215 * iterate through the entire trace buffer. This code does
216 * not do any locking because it is assumed that it is called
217 * from the debugger.
218 *
219 * The caller must pass in a pointer to a ktrace_snap
220 * structure in which we will keep some state used to
221 * iterate through the buffer. This state must not touched
222 * by any code outside of this module.
223 */
224 ktrace_entry_t *
225 ktrace_first(ktrace_t *ktp, ktrace_snap_t *ktsp)
226 {
227 ktrace_entry_t *ktep;
228 int index;
229 int nentries;
230
231 if (ktp->kt_rollover)
232 index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask;
233 else
234 index = 0;
235
236 ktsp->ks_start = index;
237 ktep = &(ktp->kt_entries[index]);
238
239 nentries = ktrace_nentries(ktp);
240 index++;
241 if (index < nentries) {
242 ktsp->ks_index = index;
243 } else {
244 ktsp->ks_index = 0;
245 if (index > nentries)
246 ktep = NULL;
247 }
248 return ktep;
249 }
250
251 /*
252 * ktrace_next()
253 *
254 * This is used to iterate through the entries of the given
255 * trace buffer. The caller must pass in the ktrace_snap_t
256 * structure initialized by ktrace_first(). The return value
257 * will be either a pointer to the next ktrace_entry or NULL
258 * if all of the entries have been traversed.
259 */
260 ktrace_entry_t *
261 ktrace_next(
262 ktrace_t *ktp,
263 ktrace_snap_t *ktsp)
264 {
265 int index;
266 ktrace_entry_t *ktep;
267
268 index = ktsp->ks_index;
269 if (index == ktsp->ks_start) {
270 ktep = NULL;
271 } else {
272 ktep = &ktp->kt_entries[index];
273 }
274
275 index++;
276 if (index == ktrace_nentries(ktp)) {
277 ktsp->ks_index = 0;
278 } else {
279 ktsp->ks_index = index;
280 }
281
282 return ktep;
283 }
284
285 /*
286 * ktrace_skip()
287 *
288 * Skip the next "count" entries and return the entry after that.
289 * Return NULL if this causes us to iterate past the beginning again.
290 */
291 ktrace_entry_t *
292 ktrace_skip(
293 ktrace_t *ktp,
294 int count,
295 ktrace_snap_t *ktsp)
296 {
297 int index;
298 int new_index;
299 ktrace_entry_t *ktep;
300 int nentries = ktrace_nentries(ktp);
301
302 index = ktsp->ks_index;
303 new_index = index + count;
304 while (new_index >= nentries) {
305 new_index -= nentries;
306 }
307 if (index == ktsp->ks_start) {
308 /*
309 * We've iterated around to the start, so we're done.
310 */
311 ktep = NULL;
312 } else if ((new_index < index) && (index < ktsp->ks_index)) {
313 /*
314 * We've skipped past the start again, so we're done.
315 */
316 ktep = NULL;
317 ktsp->ks_index = ktsp->ks_start;
318 } else {
319 ktep = &(ktp->kt_entries[new_index]);
320 new_index++;
321 if (new_index == nentries) {
322 ktsp->ks_index = 0;
323 } else {
324 ktsp->ks_index = new_index;
325 }
326 }
327 return ktep;
328 }
This page took 0.038742 seconds and 6 git commands to generate.