Commit | Line | Data |
---|---|---|
2a342ed5 AG |
1 | /* |
2 | * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved. | |
940b45ec | 3 | * Copyright 2010-2011 Freescale Semiconductor, Inc. |
2a342ed5 AG |
4 | * |
5 | * Authors: | |
6 | * Alexander Graf <agraf@suse.de> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License, version 2, as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License | |
18 | * along with this program; if not, write to the Free Software | |
19 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
20 | */ | |
21 | ||
22 | #include <linux/kvm_host.h> | |
23 | #include <linux/init.h> | |
66b15db6 | 24 | #include <linux/export.h> |
2a342ed5 AG |
25 | #include <linux/kvm_para.h> |
26 | #include <linux/slab.h> | |
27 | #include <linux/of.h> | |
28 | ||
29 | #include <asm/reg.h> | |
2a342ed5 AG |
30 | #include <asm/sections.h> |
31 | #include <asm/cacheflush.h> | |
32 | #include <asm/disassemble.h> | |
940b45ec | 33 | #include <asm/ppc-opcode.h> |
2e1ae9c0 | 34 | #include <asm/epapr_hcalls.h> |
2a342ed5 | 35 | |
d17051cb AG |
36 | #define KVM_MAGIC_PAGE (-4096L) |
37 | #define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x) | |
38 | ||
d1293c92 AG |
39 | #define KVM_INST_LWZ 0x80000000 |
40 | #define KVM_INST_STW 0x90000000 | |
41 | #define KVM_INST_LD 0xe8000000 | |
42 | #define KVM_INST_STD 0xf8000000 | |
43 | #define KVM_INST_NOP 0x60000000 | |
44 | #define KVM_INST_B 0x48000000 | |
45 | #define KVM_INST_B_MASK 0x03ffffff | |
46 | #define KVM_INST_B_MAX 0x01ffffff | |
940b45ec | 47 | #define KVM_INST_LI 0x38000000 |
d1293c92 | 48 | |
73a18109 | 49 | #define KVM_MASK_RT 0x03e00000 |
512ba59e | 50 | #define KVM_RT_30 0x03c00000 |
cbe487fa | 51 | #define KVM_MASK_RB 0x0000f800 |
d1293c92 | 52 | #define KVM_INST_MFMSR 0x7c0000a6 |
b5904972 SW |
53 | |
54 | #define SPR_FROM 0 | |
55 | #define SPR_TO 0x100 | |
56 | ||
57 | #define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \ | |
58 | (((sprn) & 0x1f) << 16) | \ | |
59 | (((sprn) & 0x3e0) << 6) | \ | |
60 | (moveto)) | |
61 | ||
62 | #define KVM_INST_MFSPR(sprn) KVM_INST_SPR(sprn, SPR_FROM) | |
63 | #define KVM_INST_MTSPR(sprn) KVM_INST_SPR(sprn, SPR_TO) | |
73a18109 | 64 | |
d1290b15 | 65 | #define KVM_INST_TLBSYNC 0x7c00046c |
78109277 | 66 | #define KVM_INST_MTMSRD_L0 0x7c000164 |
819a63dc | 67 | #define KVM_INST_MTMSRD_L1 0x7c010164 |
78109277 | 68 | #define KVM_INST_MTMSR 0x7c000124 |
d1290b15 | 69 | |
940b45ec | 70 | #define KVM_INST_WRTEE 0x7c000106 |
644bfa01 AG |
71 | #define KVM_INST_WRTEEI_0 0x7c000146 |
72 | #define KVM_INST_WRTEEI_1 0x7c008146 | |
73 | ||
cbe487fa AG |
74 | #define KVM_INST_MTSRIN 0x7c0001e4 |
75 | ||
73a18109 | 76 | static bool kvm_patching_worked = true; |
b18db0b8 | 77 | char kvm_tmp[1024 * 1024]; |
2d4f5671 | 78 | static int kvm_tmp_index; |
73a18109 AG |
79 | |
80 | static inline void kvm_patch_ins(u32 *inst, u32 new_inst) | |
81 | { | |
82 | *inst = new_inst; | |
83 | flush_icache_range((ulong)inst, (ulong)inst + 4); | |
84 | } | |
85 | ||
512ba59e AG |
86 | static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt) |
87 | { | |
88 | #ifdef CONFIG_64BIT | |
89 | kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc)); | |
90 | #else | |
91 | kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc)); | |
92 | #endif | |
93 | } | |
94 | ||
d1293c92 AG |
95 | static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt) |
96 | { | |
97 | #ifdef CONFIG_64BIT | |
98 | kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc)); | |
99 | #else | |
100 | kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc)); | |
101 | #endif | |
102 | } | |
103 | ||
104 | static void kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt) | |
105 | { | |
106 | kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff)); | |
107 | } | |
108 | ||
109 | static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt) | |
110 | { | |
111 | #ifdef CONFIG_64BIT | |
112 | kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc)); | |
113 | #else | |
114 | kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc)); | |
115 | #endif | |
116 | } | |
117 | ||
118 | static void kvm_patch_ins_stw(u32 *inst, long addr, u32 rt) | |
119 | { | |
120 | kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc)); | |
121 | } | |
122 | ||
d1290b15 AG |
123 | static void kvm_patch_ins_nop(u32 *inst) |
124 | { | |
125 | kvm_patch_ins(inst, KVM_INST_NOP); | |
126 | } | |
127 | ||
71ee8e34 AG |
128 | static void kvm_patch_ins_b(u32 *inst, int addr) |
129 | { | |
a36be100 | 130 | #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S) |
71ee8e34 AG |
131 | /* On relocatable kernels interrupts handlers and our code |
132 | can be in different regions, so we don't patch them */ | |
133 | ||
71ee8e34 AG |
134 | if ((ulong)inst < (ulong)&__end_interrupts) |
135 | return; | |
136 | #endif | |
137 | ||
138 | kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK)); | |
139 | } | |
140 | ||
2d4f5671 AG |
141 | static u32 *kvm_alloc(int len) |
142 | { | |
143 | u32 *p; | |
144 | ||
145 | if ((kvm_tmp_index + len) > ARRAY_SIZE(kvm_tmp)) { | |
146 | printk(KERN_ERR "KVM: No more space (%d + %d)\n", | |
147 | kvm_tmp_index, len); | |
148 | kvm_patching_worked = false; | |
149 | return NULL; | |
150 | } | |
151 | ||
152 | p = (void*)&kvm_tmp[kvm_tmp_index]; | |
153 | kvm_tmp_index += len; | |
154 | ||
155 | return p; | |
156 | } | |
157 | ||
819a63dc AG |
158 | extern u32 kvm_emulate_mtmsrd_branch_offs; |
159 | extern u32 kvm_emulate_mtmsrd_reg_offs; | |
df08bd10 | 160 | extern u32 kvm_emulate_mtmsrd_orig_ins_offs; |
819a63dc AG |
161 | extern u32 kvm_emulate_mtmsrd_len; |
162 | extern u32 kvm_emulate_mtmsrd[]; | |
163 | ||
164 | static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt) | |
165 | { | |
166 | u32 *p; | |
167 | int distance_start; | |
168 | int distance_end; | |
169 | ulong next_inst; | |
170 | ||
171 | p = kvm_alloc(kvm_emulate_mtmsrd_len * 4); | |
172 | if (!p) | |
173 | return; | |
174 | ||
175 | /* Find out where we are and put everything there */ | |
176 | distance_start = (ulong)p - (ulong)inst; | |
177 | next_inst = ((ulong)inst + 4); | |
178 | distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs]; | |
179 | ||
180 | /* Make sure we only write valid b instructions */ | |
181 | if (distance_start > KVM_INST_B_MAX) { | |
182 | kvm_patching_worked = false; | |
183 | return; | |
184 | } | |
185 | ||
186 | /* Modify the chunk to fit the invocation */ | |
187 | memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4); | |
188 | p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK; | |
df08bd10 AG |
189 | switch (get_rt(rt)) { |
190 | case 30: | |
191 | kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs], | |
192 | magic_var(scratch2), KVM_RT_30); | |
193 | break; | |
194 | case 31: | |
195 | kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs], | |
196 | magic_var(scratch1), KVM_RT_30); | |
197 | break; | |
198 | default: | |
199 | p[kvm_emulate_mtmsrd_reg_offs] |= rt; | |
200 | break; | |
201 | } | |
202 | ||
203 | p[kvm_emulate_mtmsrd_orig_ins_offs] = *inst; | |
819a63dc AG |
204 | flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4); |
205 | ||
206 | /* Patch the invocation */ | |
207 | kvm_patch_ins_b(inst, distance_start); | |
208 | } | |
209 | ||
78109277 AG |
210 | extern u32 kvm_emulate_mtmsr_branch_offs; |
211 | extern u32 kvm_emulate_mtmsr_reg1_offs; | |
212 | extern u32 kvm_emulate_mtmsr_reg2_offs; | |
78109277 AG |
213 | extern u32 kvm_emulate_mtmsr_orig_ins_offs; |
214 | extern u32 kvm_emulate_mtmsr_len; | |
215 | extern u32 kvm_emulate_mtmsr[]; | |
216 | ||
217 | static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt) | |
218 | { | |
219 | u32 *p; | |
220 | int distance_start; | |
221 | int distance_end; | |
222 | ulong next_inst; | |
223 | ||
224 | p = kvm_alloc(kvm_emulate_mtmsr_len * 4); | |
225 | if (!p) | |
226 | return; | |
227 | ||
228 | /* Find out where we are and put everything there */ | |
229 | distance_start = (ulong)p - (ulong)inst; | |
230 | next_inst = ((ulong)inst + 4); | |
231 | distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs]; | |
232 | ||
233 | /* Make sure we only write valid b instructions */ | |
234 | if (distance_start > KVM_INST_B_MAX) { | |
235 | kvm_patching_worked = false; | |
236 | return; | |
237 | } | |
238 | ||
239 | /* Modify the chunk to fit the invocation */ | |
240 | memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4); | |
241 | p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK; | |
512ba59e AG |
242 | |
243 | /* Make clobbered registers work too */ | |
244 | switch (get_rt(rt)) { | |
245 | case 30: | |
246 | kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs], | |
247 | magic_var(scratch2), KVM_RT_30); | |
248 | kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs], | |
249 | magic_var(scratch2), KVM_RT_30); | |
250 | break; | |
251 | case 31: | |
252 | kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs], | |
253 | magic_var(scratch1), KVM_RT_30); | |
254 | kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs], | |
255 | magic_var(scratch1), KVM_RT_30); | |
256 | break; | |
257 | default: | |
258 | p[kvm_emulate_mtmsr_reg1_offs] |= rt; | |
259 | p[kvm_emulate_mtmsr_reg2_offs] |= rt; | |
260 | break; | |
261 | } | |
262 | ||
78109277 AG |
263 | p[kvm_emulate_mtmsr_orig_ins_offs] = *inst; |
264 | flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4); | |
265 | ||
266 | /* Patch the invocation */ | |
267 | kvm_patch_ins_b(inst, distance_start); | |
268 | } | |
269 | ||
644bfa01 AG |
270 | #ifdef CONFIG_BOOKE |
271 | ||
940b45ec SW |
272 | extern u32 kvm_emulate_wrtee_branch_offs; |
273 | extern u32 kvm_emulate_wrtee_reg_offs; | |
274 | extern u32 kvm_emulate_wrtee_orig_ins_offs; | |
275 | extern u32 kvm_emulate_wrtee_len; | |
276 | extern u32 kvm_emulate_wrtee[]; | |
644bfa01 | 277 | |
940b45ec | 278 | static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one) |
644bfa01 AG |
279 | { |
280 | u32 *p; | |
281 | int distance_start; | |
282 | int distance_end; | |
283 | ulong next_inst; | |
284 | ||
940b45ec | 285 | p = kvm_alloc(kvm_emulate_wrtee_len * 4); |
644bfa01 AG |
286 | if (!p) |
287 | return; | |
288 | ||
289 | /* Find out where we are and put everything there */ | |
290 | distance_start = (ulong)p - (ulong)inst; | |
291 | next_inst = ((ulong)inst + 4); | |
940b45ec | 292 | distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs]; |
644bfa01 AG |
293 | |
294 | /* Make sure we only write valid b instructions */ | |
295 | if (distance_start > KVM_INST_B_MAX) { | |
296 | kvm_patching_worked = false; | |
297 | return; | |
298 | } | |
299 | ||
300 | /* Modify the chunk to fit the invocation */ | |
940b45ec SW |
301 | memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4); |
302 | p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK; | |
303 | ||
304 | if (imm_one) { | |
305 | p[kvm_emulate_wrtee_reg_offs] = | |
c75df6f9 | 306 | KVM_INST_LI | __PPC_RT(R30) | MSR_EE; |
940b45ec SW |
307 | } else { |
308 | /* Make clobbered registers work too */ | |
309 | switch (get_rt(rt)) { | |
310 | case 30: | |
311 | kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs], | |
312 | magic_var(scratch2), KVM_RT_30); | |
313 | break; | |
314 | case 31: | |
315 | kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs], | |
316 | magic_var(scratch1), KVM_RT_30); | |
317 | break; | |
318 | default: | |
319 | p[kvm_emulate_wrtee_reg_offs] |= rt; | |
320 | break; | |
321 | } | |
322 | } | |
323 | ||
324 | p[kvm_emulate_wrtee_orig_ins_offs] = *inst; | |
325 | flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4); | |
326 | ||
327 | /* Patch the invocation */ | |
328 | kvm_patch_ins_b(inst, distance_start); | |
329 | } | |
330 | ||
331 | extern u32 kvm_emulate_wrteei_0_branch_offs; | |
332 | extern u32 kvm_emulate_wrteei_0_len; | |
333 | extern u32 kvm_emulate_wrteei_0[]; | |
334 | ||
335 | static void kvm_patch_ins_wrteei_0(u32 *inst) | |
336 | { | |
337 | u32 *p; | |
338 | int distance_start; | |
339 | int distance_end; | |
340 | ulong next_inst; | |
341 | ||
342 | p = kvm_alloc(kvm_emulate_wrteei_0_len * 4); | |
343 | if (!p) | |
344 | return; | |
345 | ||
346 | /* Find out where we are and put everything there */ | |
347 | distance_start = (ulong)p - (ulong)inst; | |
348 | next_inst = ((ulong)inst + 4); | |
349 | distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_0_branch_offs]; | |
350 | ||
351 | /* Make sure we only write valid b instructions */ | |
352 | if (distance_start > KVM_INST_B_MAX) { | |
353 | kvm_patching_worked = false; | |
354 | return; | |
355 | } | |
356 | ||
357 | memcpy(p, kvm_emulate_wrteei_0, kvm_emulate_wrteei_0_len * 4); | |
358 | p[kvm_emulate_wrteei_0_branch_offs] |= distance_end & KVM_INST_B_MASK; | |
359 | flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_0_len * 4); | |
644bfa01 AG |
360 | |
361 | /* Patch the invocation */ | |
362 | kvm_patch_ins_b(inst, distance_start); | |
363 | } | |
364 | ||
365 | #endif | |
366 | ||
cbe487fa AG |
367 | #ifdef CONFIG_PPC_BOOK3S_32 |
368 | ||
369 | extern u32 kvm_emulate_mtsrin_branch_offs; | |
370 | extern u32 kvm_emulate_mtsrin_reg1_offs; | |
371 | extern u32 kvm_emulate_mtsrin_reg2_offs; | |
372 | extern u32 kvm_emulate_mtsrin_orig_ins_offs; | |
373 | extern u32 kvm_emulate_mtsrin_len; | |
374 | extern u32 kvm_emulate_mtsrin[]; | |
375 | ||
376 | static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb) | |
377 | { | |
378 | u32 *p; | |
379 | int distance_start; | |
380 | int distance_end; | |
381 | ulong next_inst; | |
382 | ||
383 | p = kvm_alloc(kvm_emulate_mtsrin_len * 4); | |
384 | if (!p) | |
385 | return; | |
386 | ||
387 | /* Find out where we are and put everything there */ | |
388 | distance_start = (ulong)p - (ulong)inst; | |
389 | next_inst = ((ulong)inst + 4); | |
390 | distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs]; | |
391 | ||
392 | /* Make sure we only write valid b instructions */ | |
393 | if (distance_start > KVM_INST_B_MAX) { | |
394 | kvm_patching_worked = false; | |
395 | return; | |
396 | } | |
397 | ||
398 | /* Modify the chunk to fit the invocation */ | |
399 | memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4); | |
400 | p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK; | |
401 | p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10); | |
402 | p[kvm_emulate_mtsrin_reg2_offs] |= rt; | |
403 | p[kvm_emulate_mtsrin_orig_ins_offs] = *inst; | |
404 | flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4); | |
405 | ||
406 | /* Patch the invocation */ | |
407 | kvm_patch_ins_b(inst, distance_start); | |
408 | } | |
409 | ||
410 | #endif | |
411 | ||
73a18109 AG |
412 | static void kvm_map_magic_page(void *data) |
413 | { | |
7508e16c AG |
414 | u32 *features = data; |
415 | ||
1820a8d2 | 416 | ulong in[8] = {0}; |
7508e16c AG |
417 | ulong out[8]; |
418 | ||
419 | in[0] = KVM_MAGIC_PAGE; | |
5c165aec | 420 | in[1] = KVM_MAGIC_PAGE | MAGIC_PAGE_FLAG_NOT_MAPPED_NX; |
7508e16c | 421 | |
1820a8d2 | 422 | epapr_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE)); |
7508e16c AG |
423 | |
424 | *features = out[0]; | |
73a18109 AG |
425 | } |
426 | ||
7508e16c | 427 | static void kvm_check_ins(u32 *inst, u32 features) |
73a18109 AG |
428 | { |
429 | u32 _inst = *inst; | |
430 | u32 inst_no_rt = _inst & ~KVM_MASK_RT; | |
431 | u32 inst_rt = _inst & KVM_MASK_RT; | |
432 | ||
433 | switch (inst_no_rt) { | |
d1293c92 AG |
434 | /* Loads */ |
435 | case KVM_INST_MFMSR: | |
436 | kvm_patch_ins_ld(inst, magic_var(msr), inst_rt); | |
437 | break; | |
b5904972 | 438 | case KVM_INST_MFSPR(SPRN_SPRG0): |
d1293c92 AG |
439 | kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt); |
440 | break; | |
b5904972 | 441 | case KVM_INST_MFSPR(SPRN_SPRG1): |
d1293c92 AG |
442 | kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt); |
443 | break; | |
b5904972 | 444 | case KVM_INST_MFSPR(SPRN_SPRG2): |
d1293c92 AG |
445 | kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt); |
446 | break; | |
b5904972 | 447 | case KVM_INST_MFSPR(SPRN_SPRG3): |
d1293c92 AG |
448 | kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt); |
449 | break; | |
b5904972 | 450 | case KVM_INST_MFSPR(SPRN_SRR0): |
d1293c92 AG |
451 | kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt); |
452 | break; | |
b5904972 | 453 | case KVM_INST_MFSPR(SPRN_SRR1): |
d1293c92 AG |
454 | kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt); |
455 | break; | |
b5904972 SW |
456 | #ifdef CONFIG_BOOKE |
457 | case KVM_INST_MFSPR(SPRN_DEAR): | |
458 | #else | |
459 | case KVM_INST_MFSPR(SPRN_DAR): | |
460 | #endif | |
d1293c92 AG |
461 | kvm_patch_ins_ld(inst, magic_var(dar), inst_rt); |
462 | break; | |
b5904972 | 463 | case KVM_INST_MFSPR(SPRN_DSISR): |
d1293c92 AG |
464 | kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt); |
465 | break; | |
466 | ||
b5904972 SW |
467 | #ifdef CONFIG_PPC_BOOK3E_MMU |
468 | case KVM_INST_MFSPR(SPRN_MAS0): | |
469 | if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) | |
470 | kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt); | |
471 | break; | |
472 | case KVM_INST_MFSPR(SPRN_MAS1): | |
473 | if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) | |
474 | kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt); | |
475 | break; | |
476 | case KVM_INST_MFSPR(SPRN_MAS2): | |
477 | if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) | |
478 | kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt); | |
479 | break; | |
480 | case KVM_INST_MFSPR(SPRN_MAS3): | |
481 | if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) | |
482 | kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt); | |
483 | break; | |
484 | case KVM_INST_MFSPR(SPRN_MAS4): | |
485 | if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) | |
486 | kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt); | |
487 | break; | |
488 | case KVM_INST_MFSPR(SPRN_MAS6): | |
489 | if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) | |
490 | kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt); | |
491 | break; | |
492 | case KVM_INST_MFSPR(SPRN_MAS7): | |
493 | if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) | |
494 | kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt); | |
495 | break; | |
496 | #endif /* CONFIG_PPC_BOOK3E_MMU */ | |
497 | ||
498 | case KVM_INST_MFSPR(SPRN_SPRG4): | |
499 | #ifdef CONFIG_BOOKE | |
500 | case KVM_INST_MFSPR(SPRN_SPRG4R): | |
501 | #endif | |
502 | if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) | |
503 | kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt); | |
504 | break; | |
505 | case KVM_INST_MFSPR(SPRN_SPRG5): | |
506 | #ifdef CONFIG_BOOKE | |
507 | case KVM_INST_MFSPR(SPRN_SPRG5R): | |
508 | #endif | |
509 | if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) | |
510 | kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt); | |
511 | break; | |
512 | case KVM_INST_MFSPR(SPRN_SPRG6): | |
513 | #ifdef CONFIG_BOOKE | |
514 | case KVM_INST_MFSPR(SPRN_SPRG6R): | |
515 | #endif | |
516 | if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) | |
517 | kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt); | |
518 | break; | |
519 | case KVM_INST_MFSPR(SPRN_SPRG7): | |
520 | #ifdef CONFIG_BOOKE | |
521 | case KVM_INST_MFSPR(SPRN_SPRG7R): | |
522 | #endif | |
523 | if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) | |
524 | kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt); | |
525 | break; | |
526 | ||
527 | #ifdef CONFIG_BOOKE | |
528 | case KVM_INST_MFSPR(SPRN_ESR): | |
529 | if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) | |
530 | kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt); | |
531 | break; | |
532 | #endif | |
533 | ||
534 | case KVM_INST_MFSPR(SPRN_PIR): | |
535 | if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) | |
536 | kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt); | |
537 | break; | |
538 | ||
539 | ||
d1293c92 | 540 | /* Stores */ |
b5904972 | 541 | case KVM_INST_MTSPR(SPRN_SPRG0): |
d1293c92 AG |
542 | kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt); |
543 | break; | |
b5904972 | 544 | case KVM_INST_MTSPR(SPRN_SPRG1): |
d1293c92 AG |
545 | kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt); |
546 | break; | |
b5904972 | 547 | case KVM_INST_MTSPR(SPRN_SPRG2): |
d1293c92 AG |
548 | kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt); |
549 | break; | |
b5904972 | 550 | case KVM_INST_MTSPR(SPRN_SPRG3): |
d1293c92 AG |
551 | kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt); |
552 | break; | |
b5904972 | 553 | case KVM_INST_MTSPR(SPRN_SRR0): |
d1293c92 AG |
554 | kvm_patch_ins_std(inst, magic_var(srr0), inst_rt); |
555 | break; | |
b5904972 | 556 | case KVM_INST_MTSPR(SPRN_SRR1): |
d1293c92 AG |
557 | kvm_patch_ins_std(inst, magic_var(srr1), inst_rt); |
558 | break; | |
b5904972 SW |
559 | #ifdef CONFIG_BOOKE |
560 | case KVM_INST_MTSPR(SPRN_DEAR): | |
561 | #else | |
562 | case KVM_INST_MTSPR(SPRN_DAR): | |
563 | #endif | |
d1293c92 AG |
564 | kvm_patch_ins_std(inst, magic_var(dar), inst_rt); |
565 | break; | |
b5904972 | 566 | case KVM_INST_MTSPR(SPRN_DSISR): |
d1293c92 AG |
567 | kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt); |
568 | break; | |
b5904972 SW |
569 | #ifdef CONFIG_PPC_BOOK3E_MMU |
570 | case KVM_INST_MTSPR(SPRN_MAS0): | |
571 | if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) | |
572 | kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt); | |
573 | break; | |
574 | case KVM_INST_MTSPR(SPRN_MAS1): | |
575 | if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) | |
576 | kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt); | |
577 | break; | |
578 | case KVM_INST_MTSPR(SPRN_MAS2): | |
579 | if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) | |
580 | kvm_patch_ins_std(inst, magic_var(mas2), inst_rt); | |
581 | break; | |
582 | case KVM_INST_MTSPR(SPRN_MAS3): | |
583 | if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) | |
584 | kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt); | |
585 | break; | |
586 | case KVM_INST_MTSPR(SPRN_MAS4): | |
587 | if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) | |
588 | kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt); | |
589 | break; | |
590 | case KVM_INST_MTSPR(SPRN_MAS6): | |
591 | if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) | |
592 | kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt); | |
593 | break; | |
594 | case KVM_INST_MTSPR(SPRN_MAS7): | |
595 | if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) | |
596 | kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt); | |
597 | break; | |
598 | #endif /* CONFIG_PPC_BOOK3E_MMU */ | |
599 | ||
600 | case KVM_INST_MTSPR(SPRN_SPRG4): | |
601 | if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) | |
602 | kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt); | |
603 | break; | |
604 | case KVM_INST_MTSPR(SPRN_SPRG5): | |
605 | if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) | |
606 | kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt); | |
607 | break; | |
608 | case KVM_INST_MTSPR(SPRN_SPRG6): | |
609 | if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) | |
610 | kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt); | |
611 | break; | |
612 | case KVM_INST_MTSPR(SPRN_SPRG7): | |
613 | if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) | |
614 | kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt); | |
615 | break; | |
616 | ||
617 | #ifdef CONFIG_BOOKE | |
618 | case KVM_INST_MTSPR(SPRN_ESR): | |
619 | if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) | |
620 | kvm_patch_ins_stw(inst, magic_var(esr), inst_rt); | |
621 | break; | |
622 | #endif | |
d1290b15 AG |
623 | |
624 | /* Nops */ | |
625 | case KVM_INST_TLBSYNC: | |
626 | kvm_patch_ins_nop(inst); | |
627 | break; | |
819a63dc AG |
628 | |
629 | /* Rewrites */ | |
630 | case KVM_INST_MTMSRD_L1: | |
df08bd10 | 631 | kvm_patch_ins_mtmsrd(inst, inst_rt); |
819a63dc | 632 | break; |
78109277 AG |
633 | case KVM_INST_MTMSR: |
634 | case KVM_INST_MTMSRD_L0: | |
512ba59e | 635 | kvm_patch_ins_mtmsr(inst, inst_rt); |
78109277 | 636 | break; |
940b45ec SW |
637 | #ifdef CONFIG_BOOKE |
638 | case KVM_INST_WRTEE: | |
639 | kvm_patch_ins_wrtee(inst, inst_rt, 0); | |
640 | break; | |
641 | #endif | |
73a18109 AG |
642 | } |
643 | ||
cbe487fa AG |
644 | switch (inst_no_rt & ~KVM_MASK_RB) { |
645 | #ifdef CONFIG_PPC_BOOK3S_32 | |
646 | case KVM_INST_MTSRIN: | |
647 | if (features & KVM_MAGIC_FEAT_SR) { | |
648 | u32 inst_rb = _inst & KVM_MASK_RB; | |
649 | kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb); | |
650 | } | |
651 | break; | |
cbe487fa AG |
652 | #endif |
653 | } | |
654 | ||
73a18109 | 655 | switch (_inst) { |
644bfa01 AG |
656 | #ifdef CONFIG_BOOKE |
657 | case KVM_INST_WRTEEI_0: | |
940b45ec SW |
658 | kvm_patch_ins_wrteei_0(inst); |
659 | break; | |
660 | ||
644bfa01 | 661 | case KVM_INST_WRTEEI_1: |
940b45ec | 662 | kvm_patch_ins_wrtee(inst, 0, 1); |
644bfa01 AG |
663 | break; |
664 | #endif | |
73a18109 AG |
665 | } |
666 | } | |
667 | ||
befdc0a6 LYB |
668 | extern u32 kvm_template_start[]; |
669 | extern u32 kvm_template_end[]; | |
670 | ||
73a18109 AG |
671 | static void kvm_use_magic_page(void) |
672 | { | |
673 | u32 *p; | |
674 | u32 *start, *end; | |
675 | u32 tmp; | |
7508e16c | 676 | u32 features; |
73a18109 AG |
677 | |
678 | /* Tell the host to map the magic page to -4096 on all CPUs */ | |
7508e16c | 679 | on_each_cpu(kvm_map_magic_page, &features, 1); |
73a18109 AG |
680 | |
681 | /* Quick self-test to see if the mapping works */ | |
682 | if (__get_user(tmp, (u32*)KVM_MAGIC_PAGE)) { | |
683 | kvm_patching_worked = false; | |
684 | return; | |
685 | } | |
686 | ||
687 | /* Now loop through all code and find instructions */ | |
688 | start = (void*)_stext; | |
689 | end = (void*)_etext; | |
690 | ||
b5904972 SW |
691 | /* |
692 | * Being interrupted in the middle of patching would | |
693 | * be bad for SPRG4-7, which KVM can't keep in sync | |
694 | * with emulated accesses because reads don't trap. | |
695 | */ | |
696 | local_irq_disable(); | |
697 | ||
befdc0a6 LYB |
698 | for (p = start; p < end; p++) { |
699 | /* Avoid patching the template code */ | |
700 | if (p >= kvm_template_start && p < kvm_template_end) { | |
701 | p = kvm_template_end - 1; | |
702 | continue; | |
703 | } | |
7508e16c | 704 | kvm_check_ins(p, features); |
befdc0a6 | 705 | } |
73a18109 | 706 | |
b5904972 SW |
707 | local_irq_enable(); |
708 | ||
73a18109 AG |
709 | printk(KERN_INFO "KVM: Live patching for a fast VM %s\n", |
710 | kvm_patching_worked ? "worked" : "failed"); | |
711 | } | |
712 | ||
2d4f5671 AG |
713 | static __init void kvm_free_tmp(void) |
714 | { | |
dbe67df4 JL |
715 | free_reserved_area(&kvm_tmp[kvm_tmp_index], |
716 | &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL); | |
2d4f5671 AG |
717 | } |
718 | ||
73a18109 AG |
719 | static int __init kvm_guest_init(void) |
720 | { | |
721 | if (!kvm_para_available()) | |
2d4f5671 | 722 | goto free_tmp; |
73a18109 | 723 | |
2e1ae9c0 | 724 | if (!epapr_paravirt_enabled) |
2d4f5671 | 725 | goto free_tmp; |
73a18109 AG |
726 | |
727 | if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE)) | |
728 | kvm_use_magic_page(); | |
729 | ||
591bd8e7 | 730 | #ifdef CONFIG_PPC_BOOK3S_64 |
ad087376 AG |
731 | /* Enable napping */ |
732 | powersave_nap = 1; | |
591bd8e7 | 733 | #endif |
ad087376 | 734 | |
2d4f5671 AG |
735 | free_tmp: |
736 | kvm_free_tmp(); | |
737 | ||
73a18109 AG |
738 | return 0; |
739 | } | |
740 | ||
741 | postcore_initcall(kvm_guest_init); |