Commit | Line | Data |
---|---|---|
1965aae3 PA |
1 | #ifndef _ASM_X86_ALTERNATIVE_H |
2 | #define _ASM_X86_ALTERNATIVE_H | |
6b592570 PA |
3 | |
4 | #include <linux/types.h> | |
5 | #include <linux/stddef.h> | |
edc953fa | 6 | #include <linux/stringify.h> |
6b592570 | 7 | #include <asm/asm.h> |
17f41571 | 8 | #include <asm/ptrace.h> |
6b592570 PA |
9 | |
10 | /* | |
11 | * Alternative inline assembly for SMP. | |
12 | * | |
13 | * The LOCK_PREFIX macro defined here replaces the LOCK and | |
14 | * LOCK_PREFIX macros used everywhere in the source tree. | |
15 | * | |
16 | * SMP alternatives use the same data structures as the other | |
17 | * alternatives and the X86_FEATURE_UP flag to indicate the case of a | |
18 | * UP system running a SMP kernel. The existing apply_alternatives() | |
19 | * works fine for patching a SMP kernel for UP. | |
20 | * | |
21 | * The SMP alternative tables can be kept after boot and contain both | |
22 | * UP and SMP versions of the instructions to allow switching back to | |
23 | * SMP at runtime, when hotplugging in a new CPU, which is especially | |
24 | * useful in virtualized environments. | |
25 | * | |
26 | * The very common lock prefix is handled as special case in a | |
27 | * separate table which is a pure address list without replacement ptr | |
28 | * and size information. That keeps the table sizes small. | |
29 | */ | |
30 | ||
31 | #ifdef CONFIG_SMP | |
b3ac891b | 32 | #define LOCK_PREFIX_HERE \ |
9cebed42 PA |
33 | ".pushsection .smp_locks,\"a\"\n" \ |
34 | ".balign 4\n" \ | |
35 | ".long 671f - .\n" /* offset */ \ | |
36 | ".popsection\n" \ | |
b3ac891b LB |
37 | "671:" |
38 | ||
39 | #define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; " | |
6b592570 PA |
40 | |
41 | #else /* ! CONFIG_SMP */ | |
b701a47b | 42 | #define LOCK_PREFIX_HERE "" |
6b592570 PA |
43 | #define LOCK_PREFIX "" |
44 | #endif | |
45 | ||
6b592570 | 46 | struct alt_instr { |
59e97e4d AL |
47 | s32 instr_offset; /* original instruction */ |
48 | s32 repl_offset; /* offset to replacement instruction */ | |
83a7a2ad | 49 | u16 cpuid; /* cpuid bit set for replacement */ |
6b592570 PA |
50 | u8 instrlen; /* length of original instruction */ |
51 | u8 replacementlen; /* length of new instruction, <= instrlen */ | |
6b592570 PA |
52 | }; |
53 | ||
54 | extern void alternative_instructions(void); | |
55 | extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); | |
56 | ||
57 | struct module; | |
58 | ||
59 | #ifdef CONFIG_SMP | |
60 | extern void alternatives_smp_module_add(struct module *mod, char *name, | |
61 | void *locks, void *locks_end, | |
62 | void *text, void *text_end); | |
63 | extern void alternatives_smp_module_del(struct module *mod); | |
816afe4f | 64 | extern void alternatives_enable_smp(void); |
2cfa1978 | 65 | extern int alternatives_text_reserved(void *start, void *end); |
3fb82d56 | 66 | extern bool skip_smp_alternatives; |
6b592570 PA |
67 | #else |
68 | static inline void alternatives_smp_module_add(struct module *mod, char *name, | |
2ac1ea7c JP |
69 | void *locks, void *locks_end, |
70 | void *text, void *text_end) {} | |
6b592570 | 71 | static inline void alternatives_smp_module_del(struct module *mod) {} |
816afe4f | 72 | static inline void alternatives_enable_smp(void) {} |
2cfa1978 MH |
73 | static inline int alternatives_text_reserved(void *start, void *end) |
74 | { | |
75 | return 0; | |
76 | } | |
6b592570 PA |
77 | #endif /* CONFIG_SMP */ |
78 | ||
954e482b FY |
79 | #define OLDINSTR(oldinstr) "661:\n\t" oldinstr "\n662:\n" |
80 | ||
81 | #define b_replacement(number) "663"#number | |
82 | #define e_replacement(number) "664"#number | |
83 | ||
84 | #define alt_slen "662b-661b" | |
85 | #define alt_rlen(number) e_replacement(number)"f-"b_replacement(number)"f" | |
86 | ||
87 | #define ALTINSTR_ENTRY(feature, number) \ | |
88 | " .long 661b - .\n" /* label */ \ | |
89 | " .long " b_replacement(number)"f - .\n" /* new instruction */ \ | |
90 | " .word " __stringify(feature) "\n" /* feature bit */ \ | |
91 | " .byte " alt_slen "\n" /* source len */ \ | |
92 | " .byte " alt_rlen(number) "\n" /* replacement len */ | |
93 | ||
94 | #define DISCARD_ENTRY(number) /* rlen <= slen */ \ | |
95 | " .byte 0xff + (" alt_rlen(number) ") - (" alt_slen ")\n" | |
96 | ||
97 | #define ALTINSTR_REPLACEMENT(newinstr, feature, number) /* replacement */ \ | |
98 | b_replacement(number)":\n\t" newinstr "\n" e_replacement(number) ":\n\t" | |
99 | ||
edc953fa MD |
100 | /* alternative assembly primitive: */ |
101 | #define ALTERNATIVE(oldinstr, newinstr, feature) \ | |
954e482b | 102 | OLDINSTR(oldinstr) \ |
9cebed42 | 103 | ".pushsection .altinstructions,\"a\"\n" \ |
954e482b | 104 | ALTINSTR_ENTRY(feature, 1) \ |
9cebed42 PA |
105 | ".popsection\n" \ |
106 | ".pushsection .discard,\"aw\",@progbits\n" \ | |
954e482b | 107 | DISCARD_ENTRY(1) \ |
9cebed42 PA |
108 | ".popsection\n" \ |
109 | ".pushsection .altinstr_replacement, \"ax\"\n" \ | |
954e482b | 110 | ALTINSTR_REPLACEMENT(newinstr, feature, 1) \ |
9cebed42 | 111 | ".popsection" |
954e482b FY |
112 | |
113 | #define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\ | |
114 | OLDINSTR(oldinstr) \ | |
9cebed42 | 115 | ".pushsection .altinstructions,\"a\"\n" \ |
954e482b FY |
116 | ALTINSTR_ENTRY(feature1, 1) \ |
117 | ALTINSTR_ENTRY(feature2, 2) \ | |
9cebed42 PA |
118 | ".popsection\n" \ |
119 | ".pushsection .discard,\"aw\",@progbits\n" \ | |
954e482b FY |
120 | DISCARD_ENTRY(1) \ |
121 | DISCARD_ENTRY(2) \ | |
9cebed42 PA |
122 | ".popsection\n" \ |
123 | ".pushsection .altinstr_replacement, \"ax\"\n" \ | |
954e482b FY |
124 | ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \ |
125 | ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ | |
9cebed42 | 126 | ".popsection" |
edc953fa | 127 | |
d61931d8 BP |
128 | /* |
129 | * This must be included *after* the definition of ALTERNATIVE due to | |
130 | * <asm/arch_hweight.h> | |
131 | */ | |
132 | #include <asm/cpufeature.h> | |
133 | ||
6b592570 PA |
134 | /* |
135 | * Alternative instructions for different CPU types or capabilities. | |
136 | * | |
137 | * This allows to use optimized instructions even on generic binary | |
138 | * kernels. | |
139 | * | |
140 | * length of oldinstr must be longer or equal the length of newinstr | |
141 | * It can be padded with nops as needed. | |
142 | * | |
143 | * For non barrier like inlines please define new variants | |
144 | * without volatile and memory clobber. | |
145 | */ | |
146 | #define alternative(oldinstr, newinstr, feature) \ | |
edc953fa | 147 | asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory") |
6b592570 PA |
148 | |
149 | /* | |
150 | * Alternative inline assembly with input. | |
151 | * | |
152 | * Pecularities: | |
153 | * No memory clobber here. | |
154 | * Argument numbers start with 1. | |
155 | * Best is to use constraints that are fixed size (like (%1) ... "r") | |
156 | * If you use variable sized constraints like "m" or "g" in the | |
157 | * replacement make sure to pad to the worst case length. | |
edc953fa | 158 | * Leaving an unused argument 0 to keep API compatibility. |
6b592570 PA |
159 | */ |
160 | #define alternative_input(oldinstr, newinstr, feature, input...) \ | |
edc953fa MD |
161 | asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \ |
162 | : : "i" (0), ## input) | |
6b592570 | 163 | |
5b3e83f4 FY |
164 | /* |
165 | * This is similar to alternative_input. But it has two features and | |
166 | * respective instructions. | |
167 | * | |
168 | * If CPU has feature2, newinstr2 is used. | |
169 | * Otherwise, if CPU has feature1, newinstr1 is used. | |
170 | * Otherwise, oldinstr is used. | |
171 | */ | |
172 | #define alternative_input_2(oldinstr, newinstr1, feature1, newinstr2, \ | |
173 | feature2, input...) \ | |
174 | asm volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, \ | |
175 | newinstr2, feature2) \ | |
176 | : : "i" (0), ## input) | |
177 | ||
6b592570 PA |
178 | /* Like alternative_input, but with a single output argument */ |
179 | #define alternative_io(oldinstr, newinstr, feature, output, input...) \ | |
edc953fa MD |
180 | asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \ |
181 | : output : "i" (0), ## input) | |
6b592570 | 182 | |
1b1d9258 JB |
183 | /* Like alternative_io, but for replacing a direct call with another one. */ |
184 | #define alternative_call(oldfunc, newfunc, feature, output, input...) \ | |
185 | asm volatile (ALTERNATIVE("call %P[old]", "call %P[new]", feature) \ | |
186 | : output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input) | |
187 | ||
954e482b FY |
188 | /* |
189 | * Like alternative_call, but there are two features and respective functions. | |
190 | * If CPU has feature2, function2 is used. | |
191 | * Otherwise, if CPU has feature1, function1 is used. | |
192 | * Otherwise, old function is used. | |
193 | */ | |
194 | #define alternative_call_2(oldfunc, newfunc1, feature1, newfunc2, feature2, \ | |
195 | output, input...) \ | |
196 | asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\ | |
197 | "call %P[new2]", feature2) \ | |
198 | : output : [old] "i" (oldfunc), [new1] "i" (newfunc1), \ | |
199 | [new2] "i" (newfunc2), ## input) | |
200 | ||
6b592570 PA |
201 | /* |
202 | * use this macro(s) if you need more than one output parameter | |
203 | * in alternative_io | |
204 | */ | |
1b1d9258 | 205 | #define ASM_OUTPUT2(a...) a |
6b592570 | 206 | |
819165fb JB |
207 | /* |
208 | * use this macro if you need clobbers but no inputs in | |
209 | * alternative_{input,io,call}() | |
210 | */ | |
211 | #define ASM_NO_INPUT_CLOBBER(clbr...) "i" (0) : clbr | |
212 | ||
6b592570 PA |
213 | struct paravirt_patch_site; |
214 | #ifdef CONFIG_PARAVIRT | |
215 | void apply_paravirt(struct paravirt_patch_site *start, | |
216 | struct paravirt_patch_site *end); | |
96a388de | 217 | #else |
2ac1ea7c JP |
218 | static inline void apply_paravirt(struct paravirt_patch_site *start, |
219 | struct paravirt_patch_site *end) | |
6b592570 PA |
220 | {} |
221 | #define __parainstructions NULL | |
222 | #define __parainstructions_end NULL | |
96a388de | 223 | #endif |
6b592570 | 224 | |
fa6f2cc7 JB |
225 | extern void *text_poke_early(void *addr, const void *opcode, size_t len); |
226 | ||
e587cadd MD |
227 | /* |
228 | * Clear and restore the kernel write-protection flag on the local CPU. | |
229 | * Allows the kernel to edit read-only pages. | |
230 | * Side-effect: any interrupt handler running between save and restore will have | |
231 | * the ability to write to read-only pages. | |
232 | * | |
233 | * Warning: | |
234 | * Code patching in the UP case is safe if NMIs and MCE handlers are stopped and | |
235 | * no thread can be preempted in the instructions being modified (no iret to an | |
236 | * invalid instruction possible) or if the instructions are changed from a | |
237 | * consistent state to another consistent state atomically. | |
e587cadd MD |
238 | * On the local CPU you need to be protected again NMI or MCE handlers seeing an |
239 | * inconsistent instruction while you patch. | |
e587cadd | 240 | */ |
e587cadd | 241 | extern void *text_poke(void *addr, const void *opcode, size_t len); |
17f41571 | 242 | extern int poke_int3_handler(struct pt_regs *regs); |
fd4363ff | 243 | extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler); |
6b592570 | 244 | |
1965aae3 | 245 | #endif /* _ASM_X86_ALTERNATIVE_H */ |