ftrace: comment arch ftrace code
[deliverable/linux.git] / arch / x86 / kernel / ftrace.c
CommitLineData
3d083395
SR
1/*
2 * Code for replacing ftrace calls with jumps.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 *
6 * Thanks goes to Ingo Molnar, for suggesting the idea.
7 * Mathieu Desnoyers, for suggesting postponing the modifications.
8 * Arjan van de Ven, for keeping me straight, and explaining to me
9 * the dangers of modifying code on the run.
10 */
11
12#include <linux/spinlock.h>
13#include <linux/hardirq.h>
6f93fc07 14#include <linux/uaccess.h>
3d083395
SR
15#include <linux/ftrace.h>
16#include <linux/percpu.h>
17#include <linux/init.h>
18#include <linux/list.h>
19
395a59d0 20#include <asm/ftrace.h>
732f3ca7 21#include <asm/nops.h>
3d083395 22
3d083395 23
dfa60aba 24/* Long is fine, even if it is only 4 bytes ;-) */
37a52f5e 25static unsigned long *ftrace_nop;
3d083395 26
3d083395 27union ftrace_code_union {
395a59d0 28 char code[MCOUNT_INSN_SIZE];
3d083395
SR
29 struct {
30 char e8;
31 int offset;
32 } __attribute__((packed));
33};
34
395a59d0 35
3c1720f0
SR
36static int notrace ftrace_calc_offset(long ip, long addr)
37{
38 return (int)(addr - ip);
39}
3d083395 40
3c1720f0
SR
41notrace unsigned char *ftrace_nop_replace(void)
42{
43 return (char *)ftrace_nop;
44}
45
46notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
47{
48 static union ftrace_code_union calc;
3d083395 49
3c1720f0 50 calc.e8 = 0xe8;
395a59d0 51 calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
3c1720f0
SR
52
53 /*
54 * No locking needed, this must be called via kstop_machine
55 * which in essence is like running on a uniprocessor machine.
56 */
57 return calc.code;
3d083395
SR
58}
59
3c1720f0 60notrace int
3d083395
SR
61ftrace_modify_code(unsigned long ip, unsigned char *old_code,
62 unsigned char *new_code)
63{
6f93fc07 64 unsigned char replaced[MCOUNT_INSN_SIZE];
3d083395
SR
65
66 /*
67 * Note: Due to modules and __init, code can
68 * disappear and change, we need to protect against faulting
76aefee5
SR
69 * as well as code changing. We do this by using the
70 * __copy_*_user functions.
3d083395
SR
71 *
72 * No real locking needed, this code is run through
6f93fc07 73 * kstop_machine, or before SMP starts.
3d083395 74 */
76aefee5
SR
75
76 /* read the text we want to modify */
593eb8a2
SR
77 if (__copy_from_user_inatomic(replaced, (char __user *)ip,
78 MCOUNT_INSN_SIZE))
79 return -EFAULT;
6f93fc07 80
76aefee5 81 /* Make sure it is what we expect it to be */
6f93fc07 82 if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
593eb8a2 83 return -EINVAL;
3d083395 84
76aefee5 85 /* replace the text with the new text */
593eb8a2
SR
86 if (__copy_to_user_inatomic((char __user *)ip, new_code,
87 MCOUNT_INSN_SIZE))
88 return -EPERM;
6f93fc07
SR
89
90 sync_core();
3d083395 91
6f93fc07 92 return 0;
3d083395
SR
93}
94
d61f82d0
SR
95notrace int ftrace_update_ftrace_func(ftrace_func_t func)
96{
97 unsigned long ip = (unsigned long)(&ftrace_call);
395a59d0 98 unsigned char old[MCOUNT_INSN_SIZE], *new;
d61f82d0
SR
99 int ret;
100
395a59d0 101 memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
d61f82d0
SR
102 new = ftrace_call_replace(ip, (unsigned long)func);
103 ret = ftrace_modify_code(ip, old, new);
104
105 return ret;
106}
107
108notrace int ftrace_mcount_set(unsigned long *data)
109{
0a37605c
SR
110 /* mcount is initialized as a nop */
111 *data = 0;
d61f82d0
SR
112 return 0;
113}
114
115int __init ftrace_dyn_arch_init(void *data)
3d083395 116{
732f3ca7
SR
117 extern const unsigned char ftrace_test_p6nop[];
118 extern const unsigned char ftrace_test_nop5[];
119 extern const unsigned char ftrace_test_jmp[];
120 int faulted = 0;
d61f82d0 121
732f3ca7
SR
122 /*
123 * There is no good nop for all x86 archs.
124 * We will default to using the P6_NOP5, but first we
125 * will test to make sure that the nop will actually
126 * work on this CPU. If it faults, we will then
127 * go to a lesser efficient 5 byte nop. If that fails
128 * we then just use a jmp as our nop. This isn't the most
129 * efficient nop, but we can not use a multi part nop
130 * since we would then risk being preempted in the middle
131 * of that nop, and if we enabled tracing then, it might
132 * cause a system crash.
133 *
134 * TODO: check the cpuid to determine the best nop.
135 */
136 asm volatile (
137 "jmp ftrace_test_jmp\n"
138 /* This code needs to stay around */
139 ".section .text, \"ax\"\n"
140 "ftrace_test_jmp:"
141 "jmp ftrace_test_p6nop\n"
8b27386a
AK
142 "nop\n"
143 "nop\n"
144 "nop\n" /* 2 byte jmp + 3 bytes */
732f3ca7
SR
145 "ftrace_test_p6nop:"
146 P6_NOP5
147 "jmp 1f\n"
148 "ftrace_test_nop5:"
149 ".byte 0x66,0x66,0x66,0x66,0x90\n"
150 "jmp 1f\n"
151 ".previous\n"
152 "1:"
153 ".section .fixup, \"ax\"\n"
154 "2: movl $1, %0\n"
155 " jmp ftrace_test_nop5\n"
156 "3: movl $2, %0\n"
157 " jmp 1b\n"
158 ".previous\n"
159 _ASM_EXTABLE(ftrace_test_p6nop, 2b)
160 _ASM_EXTABLE(ftrace_test_nop5, 3b)
161 : "=r"(faulted) : "0" (faulted));
162
163 switch (faulted) {
164 case 0:
165 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
166 ftrace_nop = (unsigned long *)ftrace_test_p6nop;
167 break;
168 case 1:
169 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
170 ftrace_nop = (unsigned long *)ftrace_test_nop5;
171 break;
172 case 2:
8b27386a 173 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
732f3ca7
SR
174 ftrace_nop = (unsigned long *)ftrace_test_jmp;
175 break;
176 }
177
178 /* The return code is retured via data */
179 *(unsigned long *)data = 0;
dfa60aba 180
3d083395
SR
181 return 0;
182}
This page took 0.072736 seconds and 5 git commands to generate.