Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/profile.c | |
3 | * Simple profiling. Manages a direct-mapped profile hit count buffer, | |
4 | * with configurable resolution, support for restricting the cpus on | |
5 | * which profiling is done, and switching between cpu time and | |
6 | * schedule() calls via kernel command line parameters passed at boot. | |
7 | * | |
8 | * Scheduler profiling support, Arjan van de Ven and Ingo Molnar, | |
9 | * Red Hat, July 2004 | |
10 | * Consolidation of architecture support code for profiling, | |
11 | * William Irwin, Oracle, July 2004 | |
12 | * Amortized hit count accounting via per-cpu open-addressed hashtables | |
13 | * to resolve timer interrupt livelocks, William Irwin, Oracle, 2004 | |
14 | */ | |
15 | ||
16 | #include <linux/config.h> | |
17 | #include <linux/module.h> | |
18 | #include <linux/profile.h> | |
19 | #include <linux/bootmem.h> | |
20 | #include <linux/notifier.h> | |
21 | #include <linux/mm.h> | |
22 | #include <linux/cpumask.h> | |
23 | #include <linux/cpu.h> | |
24 | #include <linux/profile.h> | |
25 | #include <linux/highmem.h> | |
26 | #include <asm/sections.h> | |
27 | #include <asm/semaphore.h> | |
28 | ||
29 | struct profile_hit { | |
30 | u32 pc, hits; | |
31 | }; | |
32 | #define PROFILE_GRPSHIFT 3 | |
33 | #define PROFILE_GRPSZ (1 << PROFILE_GRPSHIFT) | |
34 | #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit)) | |
35 | #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ) | |
36 | ||
37 | /* Oprofile timer tick hook */ | |
6c036527 | 38 | int (*timer_hook)(struct pt_regs *) __read_mostly; |
1da177e4 LT |
39 | |
40 | static atomic_t *prof_buffer; | |
41 | static unsigned long prof_len, prof_shift; | |
6c036527 | 42 | static int prof_on __read_mostly; |
1da177e4 LT |
43 | static cpumask_t prof_cpu_mask = CPU_MASK_ALL; |
44 | #ifdef CONFIG_SMP | |
45 | static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits); | |
46 | static DEFINE_PER_CPU(int, cpu_profile_flip); | |
47 | static DECLARE_MUTEX(profile_flip_mutex); | |
48 | #endif /* CONFIG_SMP */ | |
49 | ||
50 | static int __init profile_setup(char * str) | |
51 | { | |
dfaa9c94 | 52 | static char __initdata schedstr[] = "schedule"; |
1da177e4 LT |
53 | int par; |
54 | ||
dfaa9c94 | 55 | if (!strncmp(str, schedstr, strlen(schedstr))) { |
1da177e4 | 56 | prof_on = SCHED_PROFILING; |
dfaa9c94 WLII |
57 | if (str[strlen(schedstr)] == ',') |
58 | str += strlen(schedstr) + 1; | |
59 | if (get_option(&str, &par)) | |
60 | prof_shift = par; | |
61 | printk(KERN_INFO | |
62 | "kernel schedule profiling enabled (shift: %ld)\n", | |
63 | prof_shift); | |
64 | } else if (get_option(&str, &par)) { | |
1da177e4 LT |
65 | prof_shift = par; |
66 | prof_on = CPU_PROFILING; | |
67 | printk(KERN_INFO "kernel profiling enabled (shift: %ld)\n", | |
68 | prof_shift); | |
69 | } | |
70 | return 1; | |
71 | } | |
72 | __setup("profile=", profile_setup); | |
73 | ||
74 | ||
75 | void __init profile_init(void) | |
76 | { | |
77 | if (!prof_on) | |
78 | return; | |
79 | ||
80 | /* only text is profiled */ | |
81 | prof_len = (_etext - _stext) >> prof_shift; | |
82 | prof_buffer = alloc_bootmem(prof_len*sizeof(atomic_t)); | |
83 | } | |
84 | ||
85 | /* Profile event notifications */ | |
86 | ||
87 | #ifdef CONFIG_PROFILING | |
88 | ||
89 | static DECLARE_RWSEM(profile_rwsem); | |
90 | static DEFINE_RWLOCK(handoff_lock); | |
91 | static struct notifier_block * task_exit_notifier; | |
92 | static struct notifier_block * task_free_notifier; | |
93 | static struct notifier_block * munmap_notifier; | |
94 | ||
95 | void profile_task_exit(struct task_struct * task) | |
96 | { | |
97 | down_read(&profile_rwsem); | |
98 | notifier_call_chain(&task_exit_notifier, 0, task); | |
99 | up_read(&profile_rwsem); | |
100 | } | |
101 | ||
102 | int profile_handoff_task(struct task_struct * task) | |
103 | { | |
104 | int ret; | |
105 | read_lock(&handoff_lock); | |
106 | ret = notifier_call_chain(&task_free_notifier, 0, task); | |
107 | read_unlock(&handoff_lock); | |
108 | return (ret == NOTIFY_OK) ? 1 : 0; | |
109 | } | |
110 | ||
111 | void profile_munmap(unsigned long addr) | |
112 | { | |
113 | down_read(&profile_rwsem); | |
114 | notifier_call_chain(&munmap_notifier, 0, (void *)addr); | |
115 | up_read(&profile_rwsem); | |
116 | } | |
117 | ||
118 | int task_handoff_register(struct notifier_block * n) | |
119 | { | |
120 | int err = -EINVAL; | |
121 | ||
122 | write_lock(&handoff_lock); | |
123 | err = notifier_chain_register(&task_free_notifier, n); | |
124 | write_unlock(&handoff_lock); | |
125 | return err; | |
126 | } | |
127 | ||
128 | int task_handoff_unregister(struct notifier_block * n) | |
129 | { | |
130 | int err = -EINVAL; | |
131 | ||
132 | write_lock(&handoff_lock); | |
133 | err = notifier_chain_unregister(&task_free_notifier, n); | |
134 | write_unlock(&handoff_lock); | |
135 | return err; | |
136 | } | |
137 | ||
138 | int profile_event_register(enum profile_type type, struct notifier_block * n) | |
139 | { | |
140 | int err = -EINVAL; | |
141 | ||
142 | down_write(&profile_rwsem); | |
143 | ||
144 | switch (type) { | |
145 | case PROFILE_TASK_EXIT: | |
146 | err = notifier_chain_register(&task_exit_notifier, n); | |
147 | break; | |
148 | case PROFILE_MUNMAP: | |
149 | err = notifier_chain_register(&munmap_notifier, n); | |
150 | break; | |
151 | } | |
152 | ||
153 | up_write(&profile_rwsem); | |
154 | ||
155 | return err; | |
156 | } | |
157 | ||
158 | ||
159 | int profile_event_unregister(enum profile_type type, struct notifier_block * n) | |
160 | { | |
161 | int err = -EINVAL; | |
162 | ||
163 | down_write(&profile_rwsem); | |
164 | ||
165 | switch (type) { | |
166 | case PROFILE_TASK_EXIT: | |
167 | err = notifier_chain_unregister(&task_exit_notifier, n); | |
168 | break; | |
169 | case PROFILE_MUNMAP: | |
170 | err = notifier_chain_unregister(&munmap_notifier, n); | |
171 | break; | |
172 | } | |
173 | ||
174 | up_write(&profile_rwsem); | |
175 | return err; | |
176 | } | |
177 | ||
178 | int register_timer_hook(int (*hook)(struct pt_regs *)) | |
179 | { | |
180 | if (timer_hook) | |
181 | return -EBUSY; | |
182 | timer_hook = hook; | |
183 | return 0; | |
184 | } | |
185 | ||
186 | void unregister_timer_hook(int (*hook)(struct pt_regs *)) | |
187 | { | |
188 | WARN_ON(hook != timer_hook); | |
189 | timer_hook = NULL; | |
190 | /* make sure all CPUs see the NULL hook */ | |
fbd568a3 | 191 | synchronize_sched(); /* Allow ongoing interrupts to complete. */ |
1da177e4 LT |
192 | } |
193 | ||
194 | EXPORT_SYMBOL_GPL(register_timer_hook); | |
195 | EXPORT_SYMBOL_GPL(unregister_timer_hook); | |
196 | EXPORT_SYMBOL_GPL(task_handoff_register); | |
197 | EXPORT_SYMBOL_GPL(task_handoff_unregister); | |
198 | ||
199 | #endif /* CONFIG_PROFILING */ | |
200 | ||
201 | EXPORT_SYMBOL_GPL(profile_event_register); | |
202 | EXPORT_SYMBOL_GPL(profile_event_unregister); | |
203 | ||
204 | #ifdef CONFIG_SMP | |
205 | /* | |
206 | * Each cpu has a pair of open-addressed hashtables for pending | |
207 | * profile hits. read_profile() IPI's all cpus to request them | |
208 | * to flip buffers and flushes their contents to prof_buffer itself. | |
209 | * Flip requests are serialized by the profile_flip_mutex. The sole | |
210 | * use of having a second hashtable is for avoiding cacheline | |
211 | * contention that would otherwise happen during flushes of pending | |
212 | * profile hits required for the accuracy of reported profile hits | |
213 | * and so resurrect the interrupt livelock issue. | |
214 | * | |
215 | * The open-addressed hashtables are indexed by profile buffer slot | |
216 | * and hold the number of pending hits to that profile buffer slot on | |
217 | * a cpu in an entry. When the hashtable overflows, all pending hits | |
218 | * are accounted to their corresponding profile buffer slots with | |
219 | * atomic_add() and the hashtable emptied. As numerous pending hits | |
220 | * may be accounted to a profile buffer slot in a hashtable entry, | |
221 | * this amortizes a number of atomic profile buffer increments likely | |
222 | * to be far larger than the number of entries in the hashtable, | |
223 | * particularly given that the number of distinct profile buffer | |
224 | * positions to which hits are accounted during short intervals (e.g. | |
225 | * several seconds) is usually very small. Exclusion from buffer | |
226 | * flipping is provided by interrupt disablement (note that for | |
227 | * SCHED_PROFILING profile_hit() may be called from process context). | |
228 | * The hash function is meant to be lightweight as opposed to strong, | |
229 | * and was vaguely inspired by ppc64 firmware-supported inverted | |
230 | * pagetable hash functions, but uses a full hashtable full of finite | |
231 | * collision chains, not just pairs of them. | |
232 | * | |
233 | * -- wli | |
234 | */ | |
235 | static void __profile_flip_buffers(void *unused) | |
236 | { | |
237 | int cpu = smp_processor_id(); | |
238 | ||
239 | per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu); | |
240 | } | |
241 | ||
242 | static void profile_flip_buffers(void) | |
243 | { | |
244 | int i, j, cpu; | |
245 | ||
246 | down(&profile_flip_mutex); | |
247 | j = per_cpu(cpu_profile_flip, get_cpu()); | |
248 | put_cpu(); | |
249 | on_each_cpu(__profile_flip_buffers, NULL, 0, 1); | |
250 | for_each_online_cpu(cpu) { | |
251 | struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j]; | |
252 | for (i = 0; i < NR_PROFILE_HIT; ++i) { | |
253 | if (!hits[i].hits) { | |
254 | if (hits[i].pc) | |
255 | hits[i].pc = 0; | |
256 | continue; | |
257 | } | |
258 | atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); | |
259 | hits[i].hits = hits[i].pc = 0; | |
260 | } | |
261 | } | |
262 | up(&profile_flip_mutex); | |
263 | } | |
264 | ||
265 | static void profile_discard_flip_buffers(void) | |
266 | { | |
267 | int i, cpu; | |
268 | ||
269 | down(&profile_flip_mutex); | |
270 | i = per_cpu(cpu_profile_flip, get_cpu()); | |
271 | put_cpu(); | |
272 | on_each_cpu(__profile_flip_buffers, NULL, 0, 1); | |
273 | for_each_online_cpu(cpu) { | |
274 | struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i]; | |
275 | memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit)); | |
276 | } | |
277 | up(&profile_flip_mutex); | |
278 | } | |
279 | ||
280 | void profile_hit(int type, void *__pc) | |
281 | { | |
282 | unsigned long primary, secondary, flags, pc = (unsigned long)__pc; | |
283 | int i, j, cpu; | |
284 | struct profile_hit *hits; | |
285 | ||
286 | if (prof_on != type || !prof_buffer) | |
287 | return; | |
288 | pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1); | |
289 | i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; | |
290 | secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; | |
291 | cpu = get_cpu(); | |
292 | hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)]; | |
293 | if (!hits) { | |
294 | put_cpu(); | |
295 | return; | |
296 | } | |
297 | local_irq_save(flags); | |
298 | do { | |
299 | for (j = 0; j < PROFILE_GRPSZ; ++j) { | |
300 | if (hits[i + j].pc == pc) { | |
301 | hits[i + j].hits++; | |
302 | goto out; | |
303 | } else if (!hits[i + j].hits) { | |
304 | hits[i + j].pc = pc; | |
305 | hits[i + j].hits = 1; | |
306 | goto out; | |
307 | } | |
308 | } | |
309 | i = (i + secondary) & (NR_PROFILE_HIT - 1); | |
310 | } while (i != primary); | |
311 | atomic_inc(&prof_buffer[pc]); | |
312 | for (i = 0; i < NR_PROFILE_HIT; ++i) { | |
313 | atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); | |
314 | hits[i].pc = hits[i].hits = 0; | |
315 | } | |
316 | out: | |
317 | local_irq_restore(flags); | |
318 | put_cpu(); | |
319 | } | |
320 | ||
321 | #ifdef CONFIG_HOTPLUG_CPU | |
322 | static int __devinit profile_cpu_callback(struct notifier_block *info, | |
323 | unsigned long action, void *__cpu) | |
324 | { | |
325 | int node, cpu = (unsigned long)__cpu; | |
326 | struct page *page; | |
327 | ||
328 | switch (action) { | |
329 | case CPU_UP_PREPARE: | |
330 | node = cpu_to_node(cpu); | |
331 | per_cpu(cpu_profile_flip, cpu) = 0; | |
332 | if (!per_cpu(cpu_profile_hits, cpu)[1]) { | |
333 | page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); | |
334 | if (!page) | |
335 | return NOTIFY_BAD; | |
336 | per_cpu(cpu_profile_hits, cpu)[1] = page_address(page); | |
337 | } | |
338 | if (!per_cpu(cpu_profile_hits, cpu)[0]) { | |
339 | page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); | |
340 | if (!page) | |
341 | goto out_free; | |
342 | per_cpu(cpu_profile_hits, cpu)[0] = page_address(page); | |
343 | } | |
344 | break; | |
345 | out_free: | |
346 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); | |
347 | per_cpu(cpu_profile_hits, cpu)[1] = NULL; | |
348 | __free_page(page); | |
349 | return NOTIFY_BAD; | |
350 | case CPU_ONLINE: | |
351 | cpu_set(cpu, prof_cpu_mask); | |
352 | break; | |
353 | case CPU_UP_CANCELED: | |
354 | case CPU_DEAD: | |
355 | cpu_clear(cpu, prof_cpu_mask); | |
356 | if (per_cpu(cpu_profile_hits, cpu)[0]) { | |
357 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); | |
358 | per_cpu(cpu_profile_hits, cpu)[0] = NULL; | |
359 | __free_page(page); | |
360 | } | |
361 | if (per_cpu(cpu_profile_hits, cpu)[1]) { | |
362 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); | |
363 | per_cpu(cpu_profile_hits, cpu)[1] = NULL; | |
364 | __free_page(page); | |
365 | } | |
366 | break; | |
367 | } | |
368 | return NOTIFY_OK; | |
369 | } | |
370 | #endif /* CONFIG_HOTPLUG_CPU */ | |
371 | #else /* !CONFIG_SMP */ | |
372 | #define profile_flip_buffers() do { } while (0) | |
373 | #define profile_discard_flip_buffers() do { } while (0) | |
374 | ||
375 | void profile_hit(int type, void *__pc) | |
376 | { | |
377 | unsigned long pc; | |
378 | ||
379 | if (prof_on != type || !prof_buffer) | |
380 | return; | |
381 | pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift; | |
382 | atomic_inc(&prof_buffer[min(pc, prof_len - 1)]); | |
383 | } | |
384 | #endif /* !CONFIG_SMP */ | |
385 | ||
386 | void profile_tick(int type, struct pt_regs *regs) | |
387 | { | |
388 | if (type == CPU_PROFILING && timer_hook) | |
389 | timer_hook(regs); | |
390 | if (!user_mode(regs) && cpu_isset(smp_processor_id(), prof_cpu_mask)) | |
391 | profile_hit(type, (void *)profile_pc(regs)); | |
392 | } | |
393 | ||
394 | #ifdef CONFIG_PROC_FS | |
395 | #include <linux/proc_fs.h> | |
396 | #include <asm/uaccess.h> | |
397 | #include <asm/ptrace.h> | |
398 | ||
399 | static int prof_cpu_mask_read_proc (char *page, char **start, off_t off, | |
400 | int count, int *eof, void *data) | |
401 | { | |
402 | int len = cpumask_scnprintf(page, count, *(cpumask_t *)data); | |
403 | if (count - len < 2) | |
404 | return -EINVAL; | |
405 | len += sprintf(page + len, "\n"); | |
406 | return len; | |
407 | } | |
408 | ||
409 | static int prof_cpu_mask_write_proc (struct file *file, const char __user *buffer, | |
410 | unsigned long count, void *data) | |
411 | { | |
412 | cpumask_t *mask = (cpumask_t *)data; | |
413 | unsigned long full_count = count, err; | |
414 | cpumask_t new_value; | |
415 | ||
416 | err = cpumask_parse(buffer, count, new_value); | |
417 | if (err) | |
418 | return err; | |
419 | ||
420 | *mask = new_value; | |
421 | return full_count; | |
422 | } | |
423 | ||
424 | void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir) | |
425 | { | |
426 | struct proc_dir_entry *entry; | |
427 | ||
428 | /* create /proc/irq/prof_cpu_mask */ | |
429 | if (!(entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir))) | |
430 | return; | |
431 | entry->nlink = 1; | |
432 | entry->data = (void *)&prof_cpu_mask; | |
433 | entry->read_proc = prof_cpu_mask_read_proc; | |
434 | entry->write_proc = prof_cpu_mask_write_proc; | |
435 | } | |
436 | ||
437 | /* | |
438 | * This function accesses profiling information. The returned data is | |
439 | * binary: the sampling step and the actual contents of the profile | |
440 | * buffer. Use of the program readprofile is recommended in order to | |
441 | * get meaningful info out of these data. | |
442 | */ | |
443 | static ssize_t | |
444 | read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos) | |
445 | { | |
446 | unsigned long p = *ppos; | |
447 | ssize_t read; | |
448 | char * pnt; | |
449 | unsigned int sample_step = 1 << prof_shift; | |
450 | ||
451 | profile_flip_buffers(); | |
452 | if (p >= (prof_len+1)*sizeof(unsigned int)) | |
453 | return 0; | |
454 | if (count > (prof_len+1)*sizeof(unsigned int) - p) | |
455 | count = (prof_len+1)*sizeof(unsigned int) - p; | |
456 | read = 0; | |
457 | ||
458 | while (p < sizeof(unsigned int) && count > 0) { | |
459 | put_user(*((char *)(&sample_step)+p),buf); | |
460 | buf++; p++; count--; read++; | |
461 | } | |
462 | pnt = (char *)prof_buffer + p - sizeof(atomic_t); | |
463 | if (copy_to_user(buf,(void *)pnt,count)) | |
464 | return -EFAULT; | |
465 | read += count; | |
466 | *ppos += read; | |
467 | return read; | |
468 | } | |
469 | ||
470 | /* | |
471 | * Writing to /proc/profile resets the counters | |
472 | * | |
473 | * Writing a 'profiling multiplier' value into it also re-sets the profiling | |
474 | * interrupt frequency, on architectures that support this. | |
475 | */ | |
476 | static ssize_t write_profile(struct file *file, const char __user *buf, | |
477 | size_t count, loff_t *ppos) | |
478 | { | |
479 | #ifdef CONFIG_SMP | |
480 | extern int setup_profiling_timer (unsigned int multiplier); | |
481 | ||
482 | if (count == sizeof(int)) { | |
483 | unsigned int multiplier; | |
484 | ||
485 | if (copy_from_user(&multiplier, buf, sizeof(int))) | |
486 | return -EFAULT; | |
487 | ||
488 | if (setup_profiling_timer(multiplier)) | |
489 | return -EINVAL; | |
490 | } | |
491 | #endif | |
492 | profile_discard_flip_buffers(); | |
493 | memset(prof_buffer, 0, prof_len * sizeof(atomic_t)); | |
494 | return count; | |
495 | } | |
496 | ||
497 | static struct file_operations proc_profile_operations = { | |
498 | .read = read_profile, | |
499 | .write = write_profile, | |
500 | }; | |
501 | ||
502 | #ifdef CONFIG_SMP | |
503 | static void __init profile_nop(void *unused) | |
504 | { | |
505 | } | |
506 | ||
507 | static int __init create_hash_tables(void) | |
508 | { | |
509 | int cpu; | |
510 | ||
511 | for_each_online_cpu(cpu) { | |
512 | int node = cpu_to_node(cpu); | |
513 | struct page *page; | |
514 | ||
515 | page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); | |
516 | if (!page) | |
517 | goto out_cleanup; | |
518 | per_cpu(cpu_profile_hits, cpu)[1] | |
519 | = (struct profile_hit *)page_address(page); | |
520 | page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); | |
521 | if (!page) | |
522 | goto out_cleanup; | |
523 | per_cpu(cpu_profile_hits, cpu)[0] | |
524 | = (struct profile_hit *)page_address(page); | |
525 | } | |
526 | return 0; | |
527 | out_cleanup: | |
528 | prof_on = 0; | |
d59dd462 | 529 | smp_mb(); |
1da177e4 LT |
530 | on_each_cpu(profile_nop, NULL, 0, 1); |
531 | for_each_online_cpu(cpu) { | |
532 | struct page *page; | |
533 | ||
534 | if (per_cpu(cpu_profile_hits, cpu)[0]) { | |
535 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); | |
536 | per_cpu(cpu_profile_hits, cpu)[0] = NULL; | |
537 | __free_page(page); | |
538 | } | |
539 | if (per_cpu(cpu_profile_hits, cpu)[1]) { | |
540 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); | |
541 | per_cpu(cpu_profile_hits, cpu)[1] = NULL; | |
542 | __free_page(page); | |
543 | } | |
544 | } | |
545 | return -1; | |
546 | } | |
547 | #else | |
548 | #define create_hash_tables() ({ 0; }) | |
549 | #endif | |
550 | ||
551 | static int __init create_proc_profile(void) | |
552 | { | |
553 | struct proc_dir_entry *entry; | |
554 | ||
555 | if (!prof_on) | |
556 | return 0; | |
557 | if (create_hash_tables()) | |
558 | return -1; | |
559 | if (!(entry = create_proc_entry("profile", S_IWUSR | S_IRUGO, NULL))) | |
560 | return 0; | |
561 | entry->proc_fops = &proc_profile_operations; | |
562 | entry->size = (1+prof_len) * sizeof(atomic_t); | |
563 | hotcpu_notifier(profile_cpu_callback, 0); | |
564 | return 0; | |
565 | } | |
566 | module_init(create_proc_profile); | |
567 | #endif /* CONFIG_PROC_FS */ |