Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /** |
2 | * @file buffer_sync.c | |
3 | * | |
4 | * @remark Copyright 2002 OProfile authors | |
5 | * @remark Read the file COPYING | |
6 | * | |
7 | * @author John Levon <levon@movementarian.org> | |
345c2573 | 8 | * @author Barry Kasindorf |
1da177e4 LT |
9 | * |
10 | * This is the core of the buffer management. Each | |
11 | * CPU buffer is processed and entered into the | |
12 | * global event buffer. Such processing is necessary | |
13 | * in several circumstances, mentioned below. | |
14 | * | |
15 | * The processing does the job of converting the | |
16 | * transitory EIP value into a persistent dentry/offset | |
17 | * value that the profiler can record at its leisure. | |
18 | * | |
19 | * See fs/dcookies.c for a description of the dentry/offset | |
20 | * objects. | |
21 | */ | |
22 | ||
23 | #include <linux/mm.h> | |
24 | #include <linux/workqueue.h> | |
25 | #include <linux/notifier.h> | |
26 | #include <linux/dcookies.h> | |
27 | #include <linux/profile.h> | |
28 | #include <linux/module.h> | |
29 | #include <linux/fs.h> | |
1474855d | 30 | #include <linux/oprofile.h> |
e8edc6e0 | 31 | #include <linux/sched.h> |
1474855d | 32 | |
1da177e4 LT |
33 | #include "oprofile_stats.h" |
34 | #include "event_buffer.h" | |
35 | #include "cpu_buffer.h" | |
36 | #include "buffer_sync.h" | |
73185e0a | 37 | |
1da177e4 LT |
38 | static LIST_HEAD(dying_tasks); |
39 | static LIST_HEAD(dead_tasks); | |
40 | static cpumask_t marked_cpus = CPU_MASK_NONE; | |
41 | static DEFINE_SPINLOCK(task_mortuary); | |
42 | static void process_task_mortuary(void); | |
43 | ||
1da177e4 LT |
44 | /* Take ownership of the task struct and place it on the |
45 | * list for processing. Only after two full buffer syncs | |
46 | * does the task eventually get freed, because by then | |
47 | * we are sure we will not reference it again. | |
4369ef3c PM |
48 | * Can be invoked from softirq via RCU callback due to |
49 | * call_rcu() of the task struct, hence the _irqsave. | |
1da177e4 | 50 | */ |
73185e0a RR |
51 | static int |
52 | task_free_notify(struct notifier_block *self, unsigned long val, void *data) | |
1da177e4 | 53 | { |
4369ef3c | 54 | unsigned long flags; |
73185e0a | 55 | struct task_struct *task = data; |
4369ef3c | 56 | spin_lock_irqsave(&task_mortuary, flags); |
1da177e4 | 57 | list_add(&task->tasks, &dying_tasks); |
4369ef3c | 58 | spin_unlock_irqrestore(&task_mortuary, flags); |
1da177e4 LT |
59 | return NOTIFY_OK; |
60 | } | |
61 | ||
62 | ||
63 | /* The task is on its way out. A sync of the buffer means we can catch | |
64 | * any remaining samples for this task. | |
65 | */ | |
73185e0a RR |
66 | static int |
67 | task_exit_notify(struct notifier_block *self, unsigned long val, void *data) | |
1da177e4 LT |
68 | { |
69 | /* To avoid latency problems, we only process the current CPU, | |
70 | * hoping that most samples for the task are on this CPU | |
71 | */ | |
39c715b7 | 72 | sync_buffer(raw_smp_processor_id()); |
73185e0a | 73 | return 0; |
1da177e4 LT |
74 | } |
75 | ||
76 | ||
77 | /* The task is about to try a do_munmap(). We peek at what it's going to | |
78 | * do, and if it's an executable region, process the samples first, so | |
79 | * we don't lose any. This does not have to be exact, it's a QoI issue | |
80 | * only. | |
81 | */ | |
73185e0a RR |
82 | static int |
83 | munmap_notify(struct notifier_block *self, unsigned long val, void *data) | |
1da177e4 LT |
84 | { |
85 | unsigned long addr = (unsigned long)data; | |
73185e0a RR |
86 | struct mm_struct *mm = current->mm; |
87 | struct vm_area_struct *mpnt; | |
1da177e4 LT |
88 | |
89 | down_read(&mm->mmap_sem); | |
90 | ||
91 | mpnt = find_vma(mm, addr); | |
92 | if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) { | |
93 | up_read(&mm->mmap_sem); | |
94 | /* To avoid latency problems, we only process the current CPU, | |
95 | * hoping that most samples for the task are on this CPU | |
96 | */ | |
39c715b7 | 97 | sync_buffer(raw_smp_processor_id()); |
1da177e4 LT |
98 | return 0; |
99 | } | |
100 | ||
101 | up_read(&mm->mmap_sem); | |
102 | return 0; | |
103 | } | |
104 | ||
73185e0a | 105 | |
1da177e4 LT |
106 | /* We need to be told about new modules so we don't attribute to a previously |
107 | * loaded module, or drop the samples on the floor. | |
108 | */ | |
73185e0a RR |
109 | static int |
110 | module_load_notify(struct notifier_block *self, unsigned long val, void *data) | |
1da177e4 LT |
111 | { |
112 | #ifdef CONFIG_MODULES | |
113 | if (val != MODULE_STATE_COMING) | |
114 | return 0; | |
115 | ||
116 | /* FIXME: should we process all CPU buffers ? */ | |
59cc185a | 117 | mutex_lock(&buffer_mutex); |
1da177e4 LT |
118 | add_event_entry(ESCAPE_CODE); |
119 | add_event_entry(MODULE_LOADED_CODE); | |
59cc185a | 120 | mutex_unlock(&buffer_mutex); |
1da177e4 LT |
121 | #endif |
122 | return 0; | |
123 | } | |
124 | ||
73185e0a | 125 | |
1da177e4 LT |
126 | static struct notifier_block task_free_nb = { |
127 | .notifier_call = task_free_notify, | |
128 | }; | |
129 | ||
130 | static struct notifier_block task_exit_nb = { | |
131 | .notifier_call = task_exit_notify, | |
132 | }; | |
133 | ||
134 | static struct notifier_block munmap_nb = { | |
135 | .notifier_call = munmap_notify, | |
136 | }; | |
137 | ||
138 | static struct notifier_block module_load_nb = { | |
139 | .notifier_call = module_load_notify, | |
140 | }; | |
141 | ||
73185e0a | 142 | |
1da177e4 LT |
143 | static void end_sync(void) |
144 | { | |
145 | end_cpu_work(); | |
146 | /* make sure we don't leak task structs */ | |
147 | process_task_mortuary(); | |
148 | process_task_mortuary(); | |
149 | } | |
150 | ||
151 | ||
152 | int sync_start(void) | |
153 | { | |
154 | int err; | |
155 | ||
156 | start_cpu_work(); | |
157 | ||
158 | err = task_handoff_register(&task_free_nb); | |
159 | if (err) | |
160 | goto out1; | |
161 | err = profile_event_register(PROFILE_TASK_EXIT, &task_exit_nb); | |
162 | if (err) | |
163 | goto out2; | |
164 | err = profile_event_register(PROFILE_MUNMAP, &munmap_nb); | |
165 | if (err) | |
166 | goto out3; | |
167 | err = register_module_notifier(&module_load_nb); | |
168 | if (err) | |
169 | goto out4; | |
170 | ||
171 | out: | |
172 | return err; | |
173 | out4: | |
174 | profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); | |
175 | out3: | |
176 | profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); | |
177 | out2: | |
178 | task_handoff_unregister(&task_free_nb); | |
179 | out1: | |
180 | end_sync(); | |
181 | goto out; | |
182 | } | |
183 | ||
184 | ||
185 | void sync_stop(void) | |
186 | { | |
187 | unregister_module_notifier(&module_load_nb); | |
188 | profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); | |
189 | profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); | |
190 | task_handoff_unregister(&task_free_nb); | |
191 | end_sync(); | |
192 | } | |
193 | ||
448678a0 | 194 | |
1da177e4 LT |
195 | /* Optimisation. We can manage without taking the dcookie sem |
196 | * because we cannot reach this code without at least one | |
197 | * dcookie user still being registered (namely, the reader | |
198 | * of the event buffer). */ | |
448678a0 | 199 | static inline unsigned long fast_get_dcookie(struct path *path) |
1da177e4 LT |
200 | { |
201 | unsigned long cookie; | |
448678a0 JB |
202 | |
203 | if (path->dentry->d_cookie) | |
204 | return (unsigned long)path->dentry; | |
205 | get_dcookie(path, &cookie); | |
1da177e4 LT |
206 | return cookie; |
207 | } | |
208 | ||
448678a0 | 209 | |
1da177e4 LT |
210 | /* Look up the dcookie for the task's first VM_EXECUTABLE mapping, |
211 | * which corresponds loosely to "application name". This is | |
212 | * not strictly necessary but allows oprofile to associate | |
213 | * shared-library samples with particular applications | |
214 | */ | |
73185e0a | 215 | static unsigned long get_exec_dcookie(struct mm_struct *mm) |
1da177e4 | 216 | { |
0c0a400d | 217 | unsigned long cookie = NO_COOKIE; |
73185e0a RR |
218 | struct vm_area_struct *vma; |
219 | ||
1da177e4 LT |
220 | if (!mm) |
221 | goto out; | |
73185e0a | 222 | |
1da177e4 LT |
223 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
224 | if (!vma->vm_file) | |
225 | continue; | |
226 | if (!(vma->vm_flags & VM_EXECUTABLE)) | |
227 | continue; | |
448678a0 | 228 | cookie = fast_get_dcookie(&vma->vm_file->f_path); |
1da177e4 LT |
229 | break; |
230 | } | |
231 | ||
232 | out: | |
233 | return cookie; | |
234 | } | |
235 | ||
236 | ||
237 | /* Convert the EIP value of a sample into a persistent dentry/offset | |
238 | * pair that can then be added to the global event buffer. We make | |
239 | * sure to do this lookup before a mm->mmap modification happens so | |
240 | * we don't lose track. | |
241 | */ | |
73185e0a RR |
242 | static unsigned long |
243 | lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset) | |
1da177e4 | 244 | { |
0c0a400d | 245 | unsigned long cookie = NO_COOKIE; |
73185e0a | 246 | struct vm_area_struct *vma; |
1da177e4 LT |
247 | |
248 | for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) { | |
73185e0a | 249 | |
1da177e4 LT |
250 | if (addr < vma->vm_start || addr >= vma->vm_end) |
251 | continue; | |
252 | ||
0c0a400d | 253 | if (vma->vm_file) { |
448678a0 | 254 | cookie = fast_get_dcookie(&vma->vm_file->f_path); |
0c0a400d JL |
255 | *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr - |
256 | vma->vm_start; | |
257 | } else { | |
258 | /* must be an anonymous map */ | |
259 | *offset = addr; | |
260 | } | |
261 | ||
1da177e4 LT |
262 | break; |
263 | } | |
264 | ||
0c0a400d JL |
265 | if (!vma) |
266 | cookie = INVALID_COOKIE; | |
267 | ||
1da177e4 LT |
268 | return cookie; |
269 | } | |
270 | ||
5e11f98d RR |
271 | static void increment_tail(struct oprofile_cpu_buffer *b) |
272 | { | |
273 | unsigned long new_tail = b->tail_pos + 1; | |
274 | ||
8dbc50c3 | 275 | rmb(); /* be sure fifo pointers are synchronized */ |
5e11f98d RR |
276 | |
277 | if (new_tail < b->buffer_size) | |
278 | b->tail_pos = new_tail; | |
279 | else | |
280 | b->tail_pos = 0; | |
281 | } | |
1da177e4 | 282 | |
0c0a400d | 283 | static unsigned long last_cookie = INVALID_COOKIE; |
73185e0a | 284 | |
1da177e4 LT |
285 | static void add_cpu_switch(int i) |
286 | { | |
287 | add_event_entry(ESCAPE_CODE); | |
288 | add_event_entry(CPU_SWITCH_CODE); | |
289 | add_event_entry(i); | |
0c0a400d | 290 | last_cookie = INVALID_COOKIE; |
1da177e4 LT |
291 | } |
292 | ||
293 | static void add_kernel_ctx_switch(unsigned int in_kernel) | |
294 | { | |
295 | add_event_entry(ESCAPE_CODE); | |
296 | if (in_kernel) | |
73185e0a | 297 | add_event_entry(KERNEL_ENTER_SWITCH_CODE); |
1da177e4 | 298 | else |
73185e0a | 299 | add_event_entry(KERNEL_EXIT_SWITCH_CODE); |
1da177e4 | 300 | } |
73185e0a | 301 | |
1da177e4 | 302 | static void |
73185e0a | 303 | add_user_ctx_switch(struct task_struct const *task, unsigned long cookie) |
1da177e4 LT |
304 | { |
305 | add_event_entry(ESCAPE_CODE); | |
73185e0a | 306 | add_event_entry(CTX_SWITCH_CODE); |
1da177e4 LT |
307 | add_event_entry(task->pid); |
308 | add_event_entry(cookie); | |
309 | /* Another code for daemon back-compat */ | |
310 | add_event_entry(ESCAPE_CODE); | |
311 | add_event_entry(CTX_TGID_CODE); | |
312 | add_event_entry(task->tgid); | |
313 | } | |
314 | ||
73185e0a | 315 | |
1da177e4 LT |
316 | static void add_cookie_switch(unsigned long cookie) |
317 | { | |
318 | add_event_entry(ESCAPE_CODE); | |
319 | add_event_entry(COOKIE_SWITCH_CODE); | |
320 | add_event_entry(cookie); | |
321 | } | |
322 | ||
73185e0a | 323 | |
1da177e4 LT |
324 | static void add_trace_begin(void) |
325 | { | |
326 | add_event_entry(ESCAPE_CODE); | |
327 | add_event_entry(TRACE_BEGIN_CODE); | |
328 | } | |
329 | ||
852402cc RR |
330 | #ifdef CONFIG_OPROFILE_IBS |
331 | ||
345c2573 BK |
332 | #define IBS_FETCH_CODE_SIZE 2 |
333 | #define IBS_OP_CODE_SIZE 5 | |
7d468abe RR |
334 | #define IBS_EIP(cpu_buf) ((cpu_buffer_read_entry(cpu_buf))->eip) |
335 | #define IBS_EVENT(cpu_buf) ((cpu_buffer_read_entry(cpu_buf))->event) | |
345c2573 BK |
336 | |
337 | /* | |
338 | * Add IBS fetch and op entries to event buffer | |
339 | */ | |
340 | static void add_ibs_begin(struct oprofile_cpu_buffer *cpu_buf, int code, | |
8655a3b8 | 341 | struct mm_struct *mm) |
345c2573 BK |
342 | { |
343 | unsigned long rip; | |
344 | int i, count; | |
345 | unsigned long ibs_cookie = 0; | |
346 | off_t offset; | |
347 | ||
348 | increment_tail(cpu_buf); /* move to RIP entry */ | |
349 | ||
7d468abe | 350 | rip = IBS_EIP(cpu_buf); |
345c2573 BK |
351 | |
352 | #ifdef __LP64__ | |
7d468abe | 353 | rip += IBS_EVENT(cpu_buf) << 32; |
345c2573 BK |
354 | #endif |
355 | ||
356 | if (mm) { | |
357 | ibs_cookie = lookup_dcookie(mm, rip, &offset); | |
358 | ||
359 | if (ibs_cookie == NO_COOKIE) | |
360 | offset = rip; | |
361 | if (ibs_cookie == INVALID_COOKIE) { | |
362 | atomic_inc(&oprofile_stats.sample_lost_no_mapping); | |
363 | offset = rip; | |
364 | } | |
365 | if (ibs_cookie != last_cookie) { | |
366 | add_cookie_switch(ibs_cookie); | |
367 | last_cookie = ibs_cookie; | |
368 | } | |
369 | } else | |
370 | offset = rip; | |
371 | ||
372 | add_event_entry(ESCAPE_CODE); | |
373 | add_event_entry(code); | |
374 | add_event_entry(offset); /* Offset from Dcookie */ | |
375 | ||
376 | /* we send the Dcookie offset, but send the raw Linear Add also*/ | |
7d468abe RR |
377 | add_event_entry(IBS_EIP(cpu_buf)); |
378 | add_event_entry(IBS_EVENT(cpu_buf)); | |
345c2573 BK |
379 | |
380 | if (code == IBS_FETCH_CODE) | |
381 | count = IBS_FETCH_CODE_SIZE; /*IBS FETCH is 2 int64s*/ | |
382 | else | |
383 | count = IBS_OP_CODE_SIZE; /*IBS OP is 5 int64s*/ | |
384 | ||
385 | for (i = 0; i < count; i++) { | |
386 | increment_tail(cpu_buf); | |
7d468abe RR |
387 | add_event_entry(IBS_EIP(cpu_buf)); |
388 | add_event_entry(IBS_EVENT(cpu_buf)); | |
345c2573 BK |
389 | } |
390 | } | |
1da177e4 | 391 | |
852402cc RR |
392 | #endif |
393 | ||
1da177e4 LT |
394 | static void add_sample_entry(unsigned long offset, unsigned long event) |
395 | { | |
396 | add_event_entry(offset); | |
397 | add_event_entry(event); | |
398 | } | |
399 | ||
400 | ||
73185e0a | 401 | static int add_us_sample(struct mm_struct *mm, struct op_sample *s) |
1da177e4 LT |
402 | { |
403 | unsigned long cookie; | |
404 | off_t offset; | |
73185e0a RR |
405 | |
406 | cookie = lookup_dcookie(mm, s->eip, &offset); | |
407 | ||
0c0a400d | 408 | if (cookie == INVALID_COOKIE) { |
1da177e4 LT |
409 | atomic_inc(&oprofile_stats.sample_lost_no_mapping); |
410 | return 0; | |
411 | } | |
412 | ||
413 | if (cookie != last_cookie) { | |
414 | add_cookie_switch(cookie); | |
415 | last_cookie = cookie; | |
416 | } | |
417 | ||
418 | add_sample_entry(offset, s->event); | |
419 | ||
420 | return 1; | |
421 | } | |
422 | ||
73185e0a | 423 | |
1da177e4 LT |
424 | /* Add a sample to the global event buffer. If possible the |
425 | * sample is converted into a persistent dentry/offset pair | |
426 | * for later lookup from userspace. | |
427 | */ | |
428 | static int | |
73185e0a | 429 | add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel) |
1da177e4 LT |
430 | { |
431 | if (in_kernel) { | |
432 | add_sample_entry(s->eip, s->event); | |
433 | return 1; | |
434 | } else if (mm) { | |
435 | return add_us_sample(mm, s); | |
436 | } else { | |
437 | atomic_inc(&oprofile_stats.sample_lost_no_mm); | |
438 | } | |
439 | return 0; | |
440 | } | |
1da177e4 | 441 | |
73185e0a RR |
442 | |
443 | static void release_mm(struct mm_struct *mm) | |
1da177e4 LT |
444 | { |
445 | if (!mm) | |
446 | return; | |
447 | up_read(&mm->mmap_sem); | |
448 | mmput(mm); | |
449 | } | |
450 | ||
451 | ||
73185e0a | 452 | static struct mm_struct *take_tasks_mm(struct task_struct *task) |
1da177e4 | 453 | { |
73185e0a | 454 | struct mm_struct *mm = get_task_mm(task); |
1da177e4 LT |
455 | if (mm) |
456 | down_read(&mm->mmap_sem); | |
457 | return mm; | |
458 | } | |
459 | ||
460 | ||
461 | static inline int is_code(unsigned long val) | |
462 | { | |
463 | return val == ESCAPE_CODE; | |
464 | } | |
73185e0a | 465 | |
1da177e4 | 466 | |
1da177e4 LT |
467 | /* Move tasks along towards death. Any tasks on dead_tasks |
468 | * will definitely have no remaining references in any | |
469 | * CPU buffers at this point, because we use two lists, | |
470 | * and to have reached the list, it must have gone through | |
471 | * one full sync already. | |
472 | */ | |
473 | static void process_task_mortuary(void) | |
474 | { | |
4369ef3c PM |
475 | unsigned long flags; |
476 | LIST_HEAD(local_dead_tasks); | |
73185e0a RR |
477 | struct task_struct *task; |
478 | struct task_struct *ttask; | |
1da177e4 | 479 | |
4369ef3c | 480 | spin_lock_irqsave(&task_mortuary, flags); |
1da177e4 | 481 | |
4369ef3c PM |
482 | list_splice_init(&dead_tasks, &local_dead_tasks); |
483 | list_splice_init(&dying_tasks, &dead_tasks); | |
1da177e4 | 484 | |
4369ef3c PM |
485 | spin_unlock_irqrestore(&task_mortuary, flags); |
486 | ||
487 | list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) { | |
1da177e4 | 488 | list_del(&task->tasks); |
4369ef3c | 489 | free_task(task); |
1da177e4 | 490 | } |
1da177e4 LT |
491 | } |
492 | ||
493 | ||
494 | static void mark_done(int cpu) | |
495 | { | |
496 | int i; | |
497 | ||
498 | cpu_set(cpu, marked_cpus); | |
499 | ||
500 | for_each_online_cpu(i) { | |
501 | if (!cpu_isset(i, marked_cpus)) | |
502 | return; | |
503 | } | |
504 | ||
505 | /* All CPUs have been processed at least once, | |
506 | * we can process the mortuary once | |
507 | */ | |
508 | process_task_mortuary(); | |
509 | ||
510 | cpus_clear(marked_cpus); | |
511 | } | |
512 | ||
513 | ||
514 | /* FIXME: this is not sufficient if we implement syscall barrier backtrace | |
515 | * traversal, the code switch to sb_sample_start at first kernel enter/exit | |
516 | * switch so we need a fifth state and some special handling in sync_buffer() | |
517 | */ | |
518 | typedef enum { | |
519 | sb_bt_ignore = -2, | |
520 | sb_buffer_start, | |
521 | sb_bt_start, | |
522 | sb_sample_start, | |
523 | } sync_buffer_state; | |
524 | ||
525 | /* Sync one of the CPU's buffers into the global event buffer. | |
526 | * Here we need to go through each batch of samples punctuated | |
527 | * by context switch notes, taking the task's mmap_sem and doing | |
528 | * lookup in task->mm->mmap to convert EIP into dcookie/offset | |
529 | * value. | |
530 | */ | |
531 | void sync_buffer(int cpu) | |
532 | { | |
608dfddd | 533 | struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu); |
1da177e4 | 534 | struct mm_struct *mm = NULL; |
fd7826d5 | 535 | struct mm_struct *oldmm; |
73185e0a | 536 | struct task_struct *new; |
1da177e4 LT |
537 | unsigned long cookie = 0; |
538 | int in_kernel = 1; | |
1da177e4 | 539 | sync_buffer_state state = sb_buffer_start; |
9b1f2611 BK |
540 | #ifndef CONFIG_OPROFILE_IBS |
541 | unsigned int i; | |
1da177e4 | 542 | unsigned long available; |
9b1f2611 | 543 | #endif |
1da177e4 | 544 | |
59cc185a | 545 | mutex_lock(&buffer_mutex); |
73185e0a | 546 | |
1da177e4 LT |
547 | add_cpu_switch(cpu); |
548 | ||
549 | /* Remember, only we can modify tail_pos */ | |
550 | ||
9b1f2611 | 551 | #ifndef CONFIG_OPROFILE_IBS |
bf589e32 | 552 | available = cpu_buffer_entries(cpu_buf); |
1da177e4 LT |
553 | |
554 | for (i = 0; i < available; ++i) { | |
9b1f2611 | 555 | #else |
bf589e32 | 556 | while (cpu_buffer_entries(cpu_buf)) { |
9b1f2611 | 557 | #endif |
7d468abe | 558 | struct op_sample *s = cpu_buffer_read_entry(cpu_buf); |
73185e0a | 559 | |
1da177e4 | 560 | if (is_code(s->eip)) { |
fd7826d5 RR |
561 | switch (s->event) { |
562 | case 0: | |
563 | case CPU_IS_KERNEL: | |
1da177e4 LT |
564 | /* kernel/userspace switch */ |
565 | in_kernel = s->event; | |
566 | if (state == sb_buffer_start) | |
567 | state = sb_sample_start; | |
568 | add_kernel_ctx_switch(s->event); | |
fd7826d5 RR |
569 | break; |
570 | case CPU_TRACE_BEGIN: | |
1da177e4 LT |
571 | state = sb_bt_start; |
572 | add_trace_begin(); | |
fd7826d5 | 573 | break; |
852402cc | 574 | #ifdef CONFIG_OPROFILE_IBS |
fd7826d5 | 575 | case IBS_FETCH_BEGIN: |
345c2573 | 576 | state = sb_bt_start; |
8655a3b8 | 577 | add_ibs_begin(cpu_buf, IBS_FETCH_CODE, mm); |
fd7826d5 RR |
578 | break; |
579 | case IBS_OP_BEGIN: | |
345c2573 | 580 | state = sb_bt_start; |
8655a3b8 | 581 | add_ibs_begin(cpu_buf, IBS_OP_CODE, mm); |
fd7826d5 | 582 | break; |
852402cc | 583 | #endif |
fd7826d5 | 584 | default: |
1da177e4 | 585 | /* userspace context switch */ |
fd7826d5 | 586 | oldmm = mm; |
1da177e4 | 587 | new = (struct task_struct *)s->event; |
1da177e4 LT |
588 | release_mm(oldmm); |
589 | mm = take_tasks_mm(new); | |
590 | if (mm != oldmm) | |
591 | cookie = get_exec_dcookie(mm); | |
592 | add_user_ctx_switch(new, cookie); | |
fd7826d5 | 593 | break; |
1da177e4 | 594 | } |
73185e0a RR |
595 | } else if (state >= sb_bt_start && |
596 | !add_sample(mm, s, in_kernel)) { | |
597 | if (state == sb_bt_start) { | |
598 | state = sb_bt_ignore; | |
599 | atomic_inc(&oprofile_stats.bt_lost_no_mapping); | |
1da177e4 LT |
600 | } |
601 | } | |
602 | ||
603 | increment_tail(cpu_buf); | |
604 | } | |
605 | release_mm(mm); | |
606 | ||
607 | mark_done(cpu); | |
608 | ||
59cc185a | 609 | mutex_unlock(&buffer_mutex); |
1da177e4 | 610 | } |
a5598ca0 CL |
611 | |
612 | /* The function can be used to add a buffer worth of data directly to | |
613 | * the kernel buffer. The buffer is assumed to be a circular buffer. | |
614 | * Take the entries from index start and end at index end, wrapping | |
615 | * at max_entries. | |
616 | */ | |
617 | void oprofile_put_buff(unsigned long *buf, unsigned int start, | |
618 | unsigned int stop, unsigned int max) | |
619 | { | |
620 | int i; | |
621 | ||
622 | i = start; | |
623 | ||
624 | mutex_lock(&buffer_mutex); | |
625 | while (i != stop) { | |
626 | add_event_entry(buf[i++]); | |
627 | ||
628 | if (i >= max) | |
629 | i = 0; | |
630 | } | |
631 | ||
632 | mutex_unlock(&buffer_mutex); | |
633 | } | |
634 |