kvm: ppc: booke: Use the shared struct helpers of SPRN_DEAR
[deliverable/linux.git] / arch / powerpc / kvm / booke.c
CommitLineData
bbf45ba5
HB
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
4cd35f67 16 * Copyright 2010-2011 Freescale Semiconductor, Inc.
bbf45ba5
HB
17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
d30f6e48
SW
20 * Scott Wood <scottwood@freescale.com>
21 * Varun Sethi <varun.sethi@freescale.com>
bbf45ba5
HB
22 */
23
24#include <linux/errno.h>
25#include <linux/err.h>
26#include <linux/kvm_host.h>
5a0e3ad6 27#include <linux/gfp.h>
bbf45ba5
HB
28#include <linux/module.h>
29#include <linux/vmalloc.h>
30#include <linux/fs.h>
7924bd41 31
bbf45ba5
HB
32#include <asm/cputable.h>
33#include <asm/uaccess.h>
34#include <asm/kvm_ppc.h>
d9fbd03d 35#include <asm/cacheflush.h>
d30f6e48
SW
36#include <asm/dbell.h>
37#include <asm/hw_irq.h>
38#include <asm/irq.h>
b50df19c 39#include <asm/time.h>
bbf45ba5 40
d30f6e48 41#include "timing.h"
75f74f0d 42#include "booke.h"
dba291f2
AK
43
44#define CREATE_TRACE_POINTS
45#include "trace_booke.h"
bbf45ba5 46
d9fbd03d
HB
47unsigned long kvmppc_booke_handlers;
48
bbf45ba5
HB
49#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
50#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
51
52struct kvm_stats_debugfs_item debugfs_entries[] = {
bbf45ba5
HB
53 { "mmio", VCPU_STAT(mmio_exits) },
54 { "dcr", VCPU_STAT(dcr_exits) },
55 { "sig", VCPU_STAT(signal_exits) },
bbf45ba5
HB
56 { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
57 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
58 { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
59 { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
60 { "sysc", VCPU_STAT(syscall_exits) },
61 { "isi", VCPU_STAT(isi_exits) },
62 { "dsi", VCPU_STAT(dsi_exits) },
63 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
64 { "dec", VCPU_STAT(dec_exits) },
65 { "ext_intr", VCPU_STAT(ext_intr_exits) },
45c5eb67 66 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
d30f6e48
SW
67 { "doorbell", VCPU_STAT(dbell_exits) },
68 { "guest doorbell", VCPU_STAT(gdbell_exits) },
cf1c5ca4 69 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
bbf45ba5
HB
70 { NULL }
71};
72
bbf45ba5
HB
73/* TODO: use vcpu_printf() */
74void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
75{
76 int i;
77
666e7252 78 printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
5cf8ca22 79 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
de7906c3
AG
80 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
81 vcpu->arch.shared->srr1);
bbf45ba5
HB
82
83 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
84
85 for (i = 0; i < 32; i += 4) {
5cf8ca22 86 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
8e5b26b5
AG
87 kvmppc_get_gpr(vcpu, i),
88 kvmppc_get_gpr(vcpu, i+1),
89 kvmppc_get_gpr(vcpu, i+2),
90 kvmppc_get_gpr(vcpu, i+3));
bbf45ba5
HB
91 }
92}
93
4cd35f67
SW
94#ifdef CONFIG_SPE
95void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
96{
97 preempt_disable();
98 enable_kernel_spe();
99 kvmppc_save_guest_spe(vcpu);
100 vcpu->arch.shadow_msr &= ~MSR_SPE;
101 preempt_enable();
102}
103
104static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
105{
106 preempt_disable();
107 enable_kernel_spe();
108 kvmppc_load_guest_spe(vcpu);
109 vcpu->arch.shadow_msr |= MSR_SPE;
110 preempt_enable();
111}
112
113static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
114{
115 if (vcpu->arch.shared->msr & MSR_SPE) {
116 if (!(vcpu->arch.shadow_msr & MSR_SPE))
117 kvmppc_vcpu_enable_spe(vcpu);
118 } else if (vcpu->arch.shadow_msr & MSR_SPE) {
119 kvmppc_vcpu_disable_spe(vcpu);
120 }
121}
122#else
123static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
124{
125}
126#endif
127
7a08c274
AG
128static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
129{
130#if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV)
131 /* We always treat the FP bit as enabled from the host
132 perspective, so only need to adjust the shadow MSR */
133 vcpu->arch.shadow_msr &= ~MSR_FP;
134 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP;
135#endif
136}
137
ce11e48b
BB
138static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
139{
140 /* Synchronize guest's desire to get debug interrupts into shadow MSR */
141#ifndef CONFIG_KVM_BOOKE_HV
142 vcpu->arch.shadow_msr &= ~MSR_DE;
143 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE;
144#endif
145
146 /* Force enable debug interrupts when user space wants to debug */
147 if (vcpu->guest_debug) {
148#ifdef CONFIG_KVM_BOOKE_HV
149 /*
150 * Since there is no shadow MSR, sync MSR_DE into the guest
151 * visible MSR.
152 */
153 vcpu->arch.shared->msr |= MSR_DE;
154#else
155 vcpu->arch.shadow_msr |= MSR_DE;
156 vcpu->arch.shared->msr &= ~MSR_DE;
157#endif
158 }
159}
160
dd9ebf1f
LY
161/*
162 * Helper function for "full" MSR writes. No need to call this if only
163 * EE/CE/ME/DE/RI are changing.
164 */
4cd35f67
SW
165void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
166{
dd9ebf1f 167 u32 old_msr = vcpu->arch.shared->msr;
4cd35f67 168
d30f6e48
SW
169#ifdef CONFIG_KVM_BOOKE_HV
170 new_msr |= MSR_GS;
171#endif
172
4cd35f67
SW
173 vcpu->arch.shared->msr = new_msr;
174
dd9ebf1f 175 kvmppc_mmu_msr_notify(vcpu, old_msr);
4cd35f67 176 kvmppc_vcpu_sync_spe(vcpu);
7a08c274 177 kvmppc_vcpu_sync_fpu(vcpu);
ce11e48b 178 kvmppc_vcpu_sync_debug(vcpu);
4cd35f67
SW
179}
180
d4cf3892
HB
181static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
182 unsigned int priority)
9dd921cf 183{
6346046c 184 trace_kvm_booke_queue_irqprio(vcpu, priority);
9dd921cf
HB
185 set_bit(priority, &vcpu->arch.pending_exceptions);
186}
187
daf5e271
LY
188static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
189 ulong dear_flags, ulong esr_flags)
9dd921cf 190{
daf5e271
LY
191 vcpu->arch.queued_dear = dear_flags;
192 vcpu->arch.queued_esr = esr_flags;
193 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
194}
195
196static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
197 ulong dear_flags, ulong esr_flags)
198{
199 vcpu->arch.queued_dear = dear_flags;
200 vcpu->arch.queued_esr = esr_flags;
201 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
202}
203
204static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
205 ulong esr_flags)
206{
207 vcpu->arch.queued_esr = esr_flags;
208 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
209}
210
011da899
AG
211static void kvmppc_core_queue_alignment(struct kvm_vcpu *vcpu, ulong dear_flags,
212 ulong esr_flags)
213{
214 vcpu->arch.queued_dear = dear_flags;
215 vcpu->arch.queued_esr = esr_flags;
216 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALIGNMENT);
217}
218
daf5e271
LY
219void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
220{
221 vcpu->arch.queued_esr = esr_flags;
d4cf3892 222 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
9dd921cf
HB
223}
224
225void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
226{
d4cf3892 227 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
9dd921cf
HB
228}
229
230int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
231{
d4cf3892 232 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
9dd921cf
HB
233}
234
7706664d
AG
235void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
236{
237 clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
238}
239
9dd921cf
HB
240void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
241 struct kvm_interrupt *irq)
242{
c5335f17
AG
243 unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
244
245 if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
246 prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
247
248 kvmppc_booke_queue_irqprio(vcpu, prio);
9dd921cf
HB
249}
250
4fe27d2a 251void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
4496f974
AG
252{
253 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
c5335f17 254 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
4496f974
AG
255}
256
f61c94bb
BB
257static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu)
258{
259 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG);
260}
261
262static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
263{
264 clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
265}
266
d30f6e48
SW
267static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
268{
31579eea
BB
269 kvmppc_set_srr0(vcpu, srr0);
270 kvmppc_set_srr1(vcpu, srr1);
d30f6e48
SW
271}
272
273static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
274{
275 vcpu->arch.csrr0 = srr0;
276 vcpu->arch.csrr1 = srr1;
277}
278
279static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
280{
281 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
282 vcpu->arch.dsrr0 = srr0;
283 vcpu->arch.dsrr1 = srr1;
284 } else {
285 set_guest_csrr(vcpu, srr0, srr1);
286 }
287}
288
289static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
290{
291 vcpu->arch.mcsrr0 = srr0;
292 vcpu->arch.mcsrr1 = srr1;
293}
294
d30f6e48
SW
295static unsigned long get_guest_esr(struct kvm_vcpu *vcpu)
296{
297#ifdef CONFIG_KVM_BOOKE_HV
298 return mfspr(SPRN_GESR);
299#else
300 return vcpu->arch.shared->esr;
301#endif
302}
303
304static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr)
305{
306#ifdef CONFIG_KVM_BOOKE_HV
307 mtspr(SPRN_GESR, esr);
308#else
309 vcpu->arch.shared->esr = esr;
310#endif
311}
312
324b3e63
AG
313static unsigned long get_guest_epr(struct kvm_vcpu *vcpu)
314{
315#ifdef CONFIG_KVM_BOOKE_HV
316 return mfspr(SPRN_GEPR);
317#else
318 return vcpu->arch.epr;
319#endif
320}
321
d4cf3892
HB
322/* Deliver the interrupt of the corresponding priority, if possible. */
323static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
324 unsigned int priority)
bbf45ba5 325{
d4cf3892 326 int allowed = 0;
79300f8c 327 ulong msr_mask = 0;
1c810636 328 bool update_esr = false, update_dear = false, update_epr = false;
5c6cedf4
AG
329 ulong crit_raw = vcpu->arch.shared->critical;
330 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
331 bool crit;
c5335f17 332 bool keep_irq = false;
d30f6e48 333 enum int_class int_class;
95e90b43 334 ulong new_msr = vcpu->arch.shared->msr;
5c6cedf4
AG
335
336 /* Truncate crit indicators in 32 bit mode */
337 if (!(vcpu->arch.shared->msr & MSR_SF)) {
338 crit_raw &= 0xffffffff;
339 crit_r1 &= 0xffffffff;
340 }
341
342 /* Critical section when crit == r1 */
343 crit = (crit_raw == crit_r1);
344 /* ... and we're in supervisor mode */
345 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
d4cf3892 346
c5335f17
AG
347 if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
348 priority = BOOKE_IRQPRIO_EXTERNAL;
349 keep_irq = true;
350 }
351
5df554ad 352 if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_flags)
1c810636
AG
353 update_epr = true;
354
d4cf3892 355 switch (priority) {
d4cf3892 356 case BOOKE_IRQPRIO_DTLB_MISS:
d4cf3892 357 case BOOKE_IRQPRIO_DATA_STORAGE:
011da899 358 case BOOKE_IRQPRIO_ALIGNMENT:
daf5e271
LY
359 update_dear = true;
360 /* fall through */
d4cf3892 361 case BOOKE_IRQPRIO_INST_STORAGE:
daf5e271
LY
362 case BOOKE_IRQPRIO_PROGRAM:
363 update_esr = true;
364 /* fall through */
365 case BOOKE_IRQPRIO_ITLB_MISS:
366 case BOOKE_IRQPRIO_SYSCALL:
d4cf3892 367 case BOOKE_IRQPRIO_FP_UNAVAIL:
bb3a8a17
HB
368 case BOOKE_IRQPRIO_SPE_UNAVAIL:
369 case BOOKE_IRQPRIO_SPE_FP_DATA:
370 case BOOKE_IRQPRIO_SPE_FP_ROUND:
d4cf3892 371 case BOOKE_IRQPRIO_AP_UNAVAIL:
d4cf3892 372 allowed = 1;
79300f8c 373 msr_mask = MSR_CE | MSR_ME | MSR_DE;
d30f6e48 374 int_class = INT_CLASS_NONCRIT;
bbf45ba5 375 break;
f61c94bb 376 case BOOKE_IRQPRIO_WATCHDOG:
d4cf3892 377 case BOOKE_IRQPRIO_CRITICAL:
4ab96919 378 case BOOKE_IRQPRIO_DBELL_CRIT:
666e7252 379 allowed = vcpu->arch.shared->msr & MSR_CE;
d30f6e48 380 allowed = allowed && !crit;
79300f8c 381 msr_mask = MSR_ME;
d30f6e48 382 int_class = INT_CLASS_CRIT;
bbf45ba5 383 break;
d4cf3892 384 case BOOKE_IRQPRIO_MACHINE_CHECK:
666e7252 385 allowed = vcpu->arch.shared->msr & MSR_ME;
d30f6e48 386 allowed = allowed && !crit;
d30f6e48 387 int_class = INT_CLASS_MC;
bbf45ba5 388 break;
d4cf3892
HB
389 case BOOKE_IRQPRIO_DECREMENTER:
390 case BOOKE_IRQPRIO_FIT:
dfd4d47e
SW
391 keep_irq = true;
392 /* fall through */
393 case BOOKE_IRQPRIO_EXTERNAL:
4ab96919 394 case BOOKE_IRQPRIO_DBELL:
666e7252 395 allowed = vcpu->arch.shared->msr & MSR_EE;
5c6cedf4 396 allowed = allowed && !crit;
79300f8c 397 msr_mask = MSR_CE | MSR_ME | MSR_DE;
d30f6e48 398 int_class = INT_CLASS_NONCRIT;
bbf45ba5 399 break;
d4cf3892 400 case BOOKE_IRQPRIO_DEBUG:
666e7252 401 allowed = vcpu->arch.shared->msr & MSR_DE;
d30f6e48 402 allowed = allowed && !crit;
79300f8c 403 msr_mask = MSR_ME;
d30f6e48 404 int_class = INT_CLASS_CRIT;
bbf45ba5 405 break;
bbf45ba5
HB
406 }
407
d4cf3892 408 if (allowed) {
d30f6e48
SW
409 switch (int_class) {
410 case INT_CLASS_NONCRIT:
411 set_guest_srr(vcpu, vcpu->arch.pc,
412 vcpu->arch.shared->msr);
413 break;
414 case INT_CLASS_CRIT:
415 set_guest_csrr(vcpu, vcpu->arch.pc,
416 vcpu->arch.shared->msr);
417 break;
418 case INT_CLASS_DBG:
419 set_guest_dsrr(vcpu, vcpu->arch.pc,
420 vcpu->arch.shared->msr);
421 break;
422 case INT_CLASS_MC:
423 set_guest_mcsrr(vcpu, vcpu->arch.pc,
424 vcpu->arch.shared->msr);
425 break;
426 }
427
d4cf3892 428 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
daf5e271 429 if (update_esr == true)
d30f6e48 430 set_guest_esr(vcpu, vcpu->arch.queued_esr);
daf5e271 431 if (update_dear == true)
a5414d4b 432 kvmppc_set_dar(vcpu, vcpu->arch.queued_dear);
5df554ad
SW
433 if (update_epr == true) {
434 if (vcpu->arch.epr_flags & KVMPPC_EPR_USER)
435 kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
eb1e4f43
SW
436 else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) {
437 BUG_ON(vcpu->arch.irq_type != KVMPPC_IRQ_MPIC);
438 kvmppc_mpic_set_epr(vcpu);
439 }
5df554ad 440 }
95e90b43
MC
441
442 new_msr &= msr_mask;
443#if defined(CONFIG_64BIT)
444 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
445 new_msr |= MSR_CM;
446#endif
447 kvmppc_set_msr(vcpu, new_msr);
bbf45ba5 448
c5335f17
AG
449 if (!keep_irq)
450 clear_bit(priority, &vcpu->arch.pending_exceptions);
bbf45ba5
HB
451 }
452
d30f6e48
SW
453#ifdef CONFIG_KVM_BOOKE_HV
454 /*
455 * If an interrupt is pending but masked, raise a guest doorbell
456 * so that we are notified when the guest enables the relevant
457 * MSR bit.
458 */
459 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
460 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
461 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
462 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
463 if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
464 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
465#endif
466
d4cf3892 467 return allowed;
bbf45ba5
HB
468}
469
f61c94bb
BB
470/*
471 * Return the number of jiffies until the next timeout. If the timeout is
472 * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA
473 * because the larger value can break the timer APIs.
474 */
475static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
476{
477 u64 tb, wdt_tb, wdt_ticks = 0;
478 u64 nr_jiffies = 0;
479 u32 period = TCR_GET_WP(vcpu->arch.tcr);
480
481 wdt_tb = 1ULL << (63 - period);
482 tb = get_tb();
483 /*
484 * The watchdog timeout will hapeen when TB bit corresponding
485 * to watchdog will toggle from 0 to 1.
486 */
487 if (tb & wdt_tb)
488 wdt_ticks = wdt_tb;
489
490 wdt_ticks += wdt_tb - (tb & (wdt_tb - 1));
491
492 /* Convert timebase ticks to jiffies */
493 nr_jiffies = wdt_ticks;
494
495 if (do_div(nr_jiffies, tb_ticks_per_jiffy))
496 nr_jiffies++;
497
498 return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA);
499}
500
501static void arm_next_watchdog(struct kvm_vcpu *vcpu)
502{
503 unsigned long nr_jiffies;
504 unsigned long flags;
505
506 /*
507 * If TSR_ENW and TSR_WIS are not set then no need to exit to
508 * userspace, so clear the KVM_REQ_WATCHDOG request.
509 */
510 if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
511 clear_bit(KVM_REQ_WATCHDOG, &vcpu->requests);
512
513 spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
514 nr_jiffies = watchdog_next_timeout(vcpu);
515 /*
516 * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA
517 * then do not run the watchdog timer as this can break timer APIs.
518 */
519 if (nr_jiffies < NEXT_TIMER_MAX_DELTA)
520 mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
521 else
522 del_timer(&vcpu->arch.wdt_timer);
523 spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
524}
525
526void kvmppc_watchdog_func(unsigned long data)
527{
528 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
529 u32 tsr, new_tsr;
530 int final;
531
532 do {
533 new_tsr = tsr = vcpu->arch.tsr;
534 final = 0;
535
536 /* Time out event */
537 if (tsr & TSR_ENW) {
538 if (tsr & TSR_WIS)
539 final = 1;
540 else
541 new_tsr = tsr | TSR_WIS;
542 } else {
543 new_tsr = tsr | TSR_ENW;
544 }
545 } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr);
546
547 if (new_tsr & TSR_WIS) {
548 smp_wmb();
549 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
550 kvm_vcpu_kick(vcpu);
551 }
552
553 /*
554 * If this is final watchdog expiry and some action is required
555 * then exit to userspace.
556 */
557 if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
558 vcpu->arch.watchdog_enabled) {
559 smp_wmb();
560 kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
561 kvm_vcpu_kick(vcpu);
562 }
563
564 /*
565 * Stop running the watchdog timer after final expiration to
566 * prevent the host from being flooded with timers if the
567 * guest sets a short period.
568 * Timers will resume when TSR/TCR is updated next time.
569 */
570 if (!final)
571 arm_next_watchdog(vcpu);
572}
573
dfd4d47e
SW
574static void update_timer_ints(struct kvm_vcpu *vcpu)
575{
576 if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
577 kvmppc_core_queue_dec(vcpu);
578 else
579 kvmppc_core_dequeue_dec(vcpu);
f61c94bb
BB
580
581 if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS))
582 kvmppc_core_queue_watchdog(vcpu);
583 else
584 kvmppc_core_dequeue_watchdog(vcpu);
dfd4d47e
SW
585}
586
c59a6a3e 587static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
bbf45ba5
HB
588{
589 unsigned long *pending = &vcpu->arch.pending_exceptions;
bbf45ba5
HB
590 unsigned int priority;
591
9ab80843 592 priority = __ffs(*pending);
8b3a00fc 593 while (priority < BOOKE_IRQPRIO_MAX) {
d4cf3892 594 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
bbf45ba5 595 break;
bbf45ba5
HB
596
597 priority = find_next_bit(pending,
598 BITS_PER_BYTE * sizeof(*pending),
599 priority + 1);
600 }
90bba358
AG
601
602 /* Tell the guest about our interrupt status */
29ac26ef 603 vcpu->arch.shared->int_pending = !!*pending;
bbf45ba5
HB
604}
605
c59a6a3e 606/* Check pending exceptions and deliver one, if possible. */
a8e4ef84 607int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
c59a6a3e 608{
a8e4ef84 609 int r = 0;
c59a6a3e
SW
610 WARN_ON_ONCE(!irqs_disabled());
611
612 kvmppc_core_check_exceptions(vcpu);
613
b8c649a9
AG
614 if (vcpu->requests) {
615 /* Exception delivery raised request; start over */
616 return 1;
617 }
618
c59a6a3e
SW
619 if (vcpu->arch.shared->msr & MSR_WE) {
620 local_irq_enable();
621 kvm_vcpu_block(vcpu);
966cd0f3 622 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
6c85f52b 623 hard_irq_disable();
c59a6a3e
SW
624
625 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
a8e4ef84 626 r = 1;
c59a6a3e 627 };
a8e4ef84
AG
628
629 return r;
630}
631
7c973a2e 632int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
4ffc6356 633{
7c973a2e
AG
634 int r = 1; /* Indicate we want to get back into the guest */
635
2d8185d4
AG
636 if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
637 update_timer_ints(vcpu);
862d31f7 638#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
2d8185d4
AG
639 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
640 kvmppc_core_flush_tlb(vcpu);
862d31f7 641#endif
7c973a2e 642
f61c94bb
BB
643 if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
644 vcpu->run->exit_reason = KVM_EXIT_WATCHDOG;
645 r = 0;
646 }
647
1c810636
AG
648 if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) {
649 vcpu->run->epr.epr = 0;
650 vcpu->arch.epr_needed = true;
651 vcpu->run->exit_reason = KVM_EXIT_EPR;
652 r = 0;
653 }
654
7c973a2e 655 return r;
4ffc6356
AG
656}
657
df6909e5
PM
658int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
659{
7ee78855 660 int ret, s;
f5f97210 661 struct debug_reg debug;
df6909e5 662
af8f38b3
AG
663 if (!vcpu->arch.sane) {
664 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
665 return -EINVAL;
666 }
667
7ee78855
AG
668 s = kvmppc_prepare_to_enter(vcpu);
669 if (s <= 0) {
7ee78855 670 ret = s;
1d1ef222
SW
671 goto out;
672 }
6c85f52b 673 /* interrupts now hard-disabled */
1d1ef222 674
8fae845f
SW
675#ifdef CONFIG_PPC_FPU
676 /* Save userspace FPU state in stack */
677 enable_kernel_fp();
8fae845f
SW
678
679 /*
680 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
681 * as always using the FPU. Kernel usage of FP (via
682 * enable_kernel_fp()) in this thread must not occur while
683 * vcpu->fpu_active is set.
684 */
685 vcpu->fpu_active = 1;
686
687 kvmppc_load_guest_fp(vcpu);
688#endif
689
ce11e48b 690 /* Switch to guest debug context */
f5f97210
SW
691 debug = vcpu->arch.shadow_dbg_reg;
692 switch_booke_debug_regs(&debug);
693 debug = current->thread.debug;
ce11e48b
BB
694 current->thread.debug = vcpu->arch.shadow_dbg_reg;
695
08c9a188 696 vcpu->arch.pgdir = current->mm->pgd;
5f1c248f 697 kvmppc_fix_ee_before_entry();
f8941fbe 698
df6909e5 699 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
8fae845f 700
24afa37b
AG
701 /* No need for kvm_guest_exit. It's done in handle_exit.
702 We also get here with interrupts enabled. */
703
ce11e48b 704 /* Switch back to user space debug context */
f5f97210
SW
705 switch_booke_debug_regs(&debug);
706 current->thread.debug = debug;
ce11e48b 707
8fae845f
SW
708#ifdef CONFIG_PPC_FPU
709 kvmppc_save_guest_fp(vcpu);
710
711 vcpu->fpu_active = 0;
8fae845f
SW
712#endif
713
1d1ef222 714out:
d69c6436 715 vcpu->mode = OUTSIDE_GUEST_MODE;
df6909e5
PM
716 return ret;
717}
718
d30f6e48
SW
719static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
720{
721 enum emulation_result er;
722
723 er = kvmppc_emulate_instruction(run, vcpu);
724 switch (er) {
725 case EMULATE_DONE:
726 /* don't overwrite subtypes, just account kvm_stats */
727 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
728 /* Future optimization: only reload non-volatiles if
729 * they were actually modified by emulation. */
730 return RESUME_GUEST_NV;
731
732 case EMULATE_DO_DCR:
733 run->exit_reason = KVM_EXIT_DCR;
734 return RESUME_HOST;
735
736 case EMULATE_FAIL:
d30f6e48
SW
737 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
738 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
739 /* For debugging, encode the failing instruction and
740 * report it to userspace. */
741 run->hw.hardware_exit_reason = ~0ULL << 32;
742 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
d1ff5499 743 kvmppc_core_queue_program(vcpu, ESR_PIL);
d30f6e48
SW
744 return RESUME_HOST;
745
9b4f5308
BB
746 case EMULATE_EXIT_USER:
747 return RESUME_HOST;
748
d30f6e48
SW
749 default:
750 BUG();
751 }
752}
753
ce11e48b
BB
754static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu)
755{
756 struct debug_reg *dbg_reg = &(vcpu->arch.shadow_dbg_reg);
757 u32 dbsr = vcpu->arch.dbsr;
758
759 run->debug.arch.status = 0;
760 run->debug.arch.address = vcpu->arch.pc;
761
762 if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) {
763 run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT;
764 } else {
765 if (dbsr & (DBSR_DAC1W | DBSR_DAC2W))
766 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE;
767 else if (dbsr & (DBSR_DAC1R | DBSR_DAC2R))
768 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ;
769 if (dbsr & (DBSR_DAC1R | DBSR_DAC1W))
770 run->debug.arch.address = dbg_reg->dac1;
771 else if (dbsr & (DBSR_DAC2R | DBSR_DAC2W))
772 run->debug.arch.address = dbg_reg->dac2;
773 }
774
775 return RESUME_HOST;
776}
777
4e642ccb 778static void kvmppc_fill_pt_regs(struct pt_regs *regs)
bbf45ba5 779{
4e642ccb 780 ulong r1, ip, msr, lr;
bbf45ba5 781
4e642ccb
AG
782 asm("mr %0, 1" : "=r"(r1));
783 asm("mflr %0" : "=r"(lr));
784 asm("mfmsr %0" : "=r"(msr));
785 asm("bl 1f; 1: mflr %0" : "=r"(ip));
786
787 memset(regs, 0, sizeof(*regs));
788 regs->gpr[1] = r1;
789 regs->nip = ip;
790 regs->msr = msr;
791 regs->link = lr;
792}
793
6328e593
BB
794/*
795 * For interrupts needed to be handled by host interrupt handlers,
796 * corresponding host handler are called from here in similar way
797 * (but not exact) as they are called from low level handler
798 * (such as from arch/powerpc/kernel/head_fsl_booke.S).
799 */
4e642ccb
AG
800static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
801 unsigned int exit_nr)
802{
803 struct pt_regs regs;
73e75b41 804
d30f6e48
SW
805 switch (exit_nr) {
806 case BOOKE_INTERRUPT_EXTERNAL:
4e642ccb
AG
807 kvmppc_fill_pt_regs(&regs);
808 do_IRQ(&regs);
d30f6e48 809 break;
d30f6e48 810 case BOOKE_INTERRUPT_DECREMENTER:
4e642ccb
AG
811 kvmppc_fill_pt_regs(&regs);
812 timer_interrupt(&regs);
d30f6e48 813 break;
5f17ce8b 814#if defined(CONFIG_PPC_DOORBELL)
d30f6e48 815 case BOOKE_INTERRUPT_DOORBELL:
4e642ccb
AG
816 kvmppc_fill_pt_regs(&regs);
817 doorbell_exception(&regs);
d30f6e48
SW
818 break;
819#endif
820 case BOOKE_INTERRUPT_MACHINE_CHECK:
821 /* FIXME */
822 break;
7cc1e8ee
AG
823 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
824 kvmppc_fill_pt_regs(&regs);
825 performance_monitor_exception(&regs);
826 break;
6328e593
BB
827 case BOOKE_INTERRUPT_WATCHDOG:
828 kvmppc_fill_pt_regs(&regs);
829#ifdef CONFIG_BOOKE_WDT
830 WatchdogException(&regs);
831#else
832 unknown_exception(&regs);
833#endif
834 break;
835 case BOOKE_INTERRUPT_CRITICAL:
836 unknown_exception(&regs);
837 break;
ce11e48b
BB
838 case BOOKE_INTERRUPT_DEBUG:
839 /* Save DBSR before preemption is enabled */
840 vcpu->arch.dbsr = mfspr(SPRN_DBSR);
841 kvmppc_clear_dbsr();
842 break;
d30f6e48 843 }
4e642ccb
AG
844}
845
846/**
847 * kvmppc_handle_exit
848 *
849 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
850 */
851int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
852 unsigned int exit_nr)
853{
854 int r = RESUME_HOST;
7ee78855 855 int s;
f1e89028 856 int idx;
4e642ccb
AG
857
858 /* update before a new last_exit_type is rewritten */
859 kvmppc_update_timing_stats(vcpu);
860
861 /* restart interrupts if they were meant for the host */
862 kvmppc_restart_interrupt(vcpu, exit_nr);
d30f6e48 863
bbf45ba5
HB
864 local_irq_enable();
865
97c95059 866 trace_kvm_exit(exit_nr, vcpu);
706fb730 867 kvm_guest_exit();
97c95059 868
bbf45ba5
HB
869 run->exit_reason = KVM_EXIT_UNKNOWN;
870 run->ready_for_interrupt_injection = 1;
871
872 switch (exit_nr) {
873 case BOOKE_INTERRUPT_MACHINE_CHECK:
c35c9d84
AG
874 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
875 kvmppc_dump_vcpu(vcpu);
876 /* For debugging, send invalid exit reason to user space */
877 run->hw.hardware_exit_reason = ~1ULL << 32;
878 run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
879 r = RESUME_HOST;
bbf45ba5
HB
880 break;
881
882 case BOOKE_INTERRUPT_EXTERNAL:
7b701591 883 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
1b6766c7
HB
884 r = RESUME_GUEST;
885 break;
886
bbf45ba5 887 case BOOKE_INTERRUPT_DECREMENTER:
7b701591 888 kvmppc_account_exit(vcpu, DEC_EXITS);
bbf45ba5
HB
889 r = RESUME_GUEST;
890 break;
891
6328e593
BB
892 case BOOKE_INTERRUPT_WATCHDOG:
893 r = RESUME_GUEST;
894 break;
895
d30f6e48
SW
896 case BOOKE_INTERRUPT_DOORBELL:
897 kvmppc_account_exit(vcpu, DBELL_EXITS);
d30f6e48
SW
898 r = RESUME_GUEST;
899 break;
900
901 case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
902 kvmppc_account_exit(vcpu, GDBELL_EXITS);
903
904 /*
905 * We are here because there is a pending guest interrupt
906 * which could not be delivered as MSR_CE or MSR_ME was not
907 * set. Once we break from here we will retry delivery.
908 */
909 r = RESUME_GUEST;
910 break;
911
912 case BOOKE_INTERRUPT_GUEST_DBELL:
913 kvmppc_account_exit(vcpu, GDBELL_EXITS);
914
915 /*
916 * We are here because there is a pending guest interrupt
917 * which could not be delivered as MSR_EE was not set. Once
918 * we break from here we will retry delivery.
919 */
920 r = RESUME_GUEST;
921 break;
922
95f2e921
AG
923 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
924 r = RESUME_GUEST;
925 break;
926
d30f6e48
SW
927 case BOOKE_INTERRUPT_HV_PRIV:
928 r = emulation_exit(run, vcpu);
929 break;
930
bbf45ba5 931 case BOOKE_INTERRUPT_PROGRAM:
d30f6e48 932 if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
0268597c
AG
933 /*
934 * Program traps generated by user-level software must
935 * be handled by the guest kernel.
936 *
937 * In GS mode, hypervisor privileged instructions trap
938 * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
939 * actual program interrupts, handled by the guest.
940 */
daf5e271 941 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
bbf45ba5 942 r = RESUME_GUEST;
7b701591 943 kvmppc_account_exit(vcpu, USR_PR_INST);
bbf45ba5
HB
944 break;
945 }
946
d30f6e48 947 r = emulation_exit(run, vcpu);
bbf45ba5
HB
948 break;
949
de368dce 950 case BOOKE_INTERRUPT_FP_UNAVAIL:
d4cf3892 951 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
7b701591 952 kvmppc_account_exit(vcpu, FP_UNAVAIL);
de368dce
CE
953 r = RESUME_GUEST;
954 break;
955
4cd35f67
SW
956#ifdef CONFIG_SPE
957 case BOOKE_INTERRUPT_SPE_UNAVAIL: {
958 if (vcpu->arch.shared->msr & MSR_SPE)
959 kvmppc_vcpu_enable_spe(vcpu);
960 else
961 kvmppc_booke_queue_irqprio(vcpu,
962 BOOKE_IRQPRIO_SPE_UNAVAIL);
bb3a8a17
HB
963 r = RESUME_GUEST;
964 break;
4cd35f67 965 }
bb3a8a17
HB
966
967 case BOOKE_INTERRUPT_SPE_FP_DATA:
968 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
969 r = RESUME_GUEST;
970 break;
971
972 case BOOKE_INTERRUPT_SPE_FP_ROUND:
973 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
974 r = RESUME_GUEST;
975 break;
4cd35f67
SW
976#else
977 case BOOKE_INTERRUPT_SPE_UNAVAIL:
978 /*
979 * Guest wants SPE, but host kernel doesn't support it. Send
980 * an "unimplemented operation" program check to the guest.
981 */
982 kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
983 r = RESUME_GUEST;
984 break;
985
986 /*
987 * These really should never happen without CONFIG_SPE,
988 * as we should never enable the real MSR[SPE] in the guest.
989 */
990 case BOOKE_INTERRUPT_SPE_FP_DATA:
991 case BOOKE_INTERRUPT_SPE_FP_ROUND:
992 printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
993 __func__, exit_nr, vcpu->arch.pc);
994 run->hw.hardware_exit_reason = exit_nr;
995 r = RESUME_HOST;
996 break;
997#endif
bb3a8a17 998
bbf45ba5 999 case BOOKE_INTERRUPT_DATA_STORAGE:
daf5e271
LY
1000 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
1001 vcpu->arch.fault_esr);
7b701591 1002 kvmppc_account_exit(vcpu, DSI_EXITS);
bbf45ba5
HB
1003 r = RESUME_GUEST;
1004 break;
1005
1006 case BOOKE_INTERRUPT_INST_STORAGE:
daf5e271 1007 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
7b701591 1008 kvmppc_account_exit(vcpu, ISI_EXITS);
bbf45ba5
HB
1009 r = RESUME_GUEST;
1010 break;
1011
011da899
AG
1012 case BOOKE_INTERRUPT_ALIGNMENT:
1013 kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear,
1014 vcpu->arch.fault_esr);
1015 r = RESUME_GUEST;
1016 break;
1017
d30f6e48
SW
1018#ifdef CONFIG_KVM_BOOKE_HV
1019 case BOOKE_INTERRUPT_HV_SYSCALL:
1020 if (!(vcpu->arch.shared->msr & MSR_PR)) {
1021 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1022 } else {
1023 /*
1024 * hcall from guest userspace -- send privileged
1025 * instruction program check.
1026 */
1027 kvmppc_core_queue_program(vcpu, ESR_PPR);
1028 }
1029
1030 r = RESUME_GUEST;
1031 break;
1032#else
bbf45ba5 1033 case BOOKE_INTERRUPT_SYSCALL:
2a342ed5
AG
1034 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1035 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1036 /* KVM PV hypercalls */
1037 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1038 r = RESUME_GUEST;
1039 } else {
1040 /* Guest syscalls */
1041 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
1042 }
7b701591 1043 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
bbf45ba5
HB
1044 r = RESUME_GUEST;
1045 break;
d30f6e48 1046#endif
bbf45ba5
HB
1047
1048 case BOOKE_INTERRUPT_DTLB_MISS: {
bbf45ba5 1049 unsigned long eaddr = vcpu->arch.fault_dear;
7924bd41 1050 int gtlb_index;
475e7cdd 1051 gpa_t gpaddr;
bbf45ba5
HB
1052 gfn_t gfn;
1053
bf7ca4bd 1054#ifdef CONFIG_KVM_E500V2
a4cd8b23
SW
1055 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1056 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1057 kvmppc_map_magic(vcpu);
1058 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
1059 r = RESUME_GUEST;
1060
1061 break;
1062 }
1063#endif
1064
bbf45ba5 1065 /* Check the guest TLB. */
fa86b8dd 1066 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
7924bd41 1067 if (gtlb_index < 0) {
bbf45ba5 1068 /* The guest didn't have a mapping for it. */
daf5e271
LY
1069 kvmppc_core_queue_dtlb_miss(vcpu,
1070 vcpu->arch.fault_dear,
1071 vcpu->arch.fault_esr);
b52a638c 1072 kvmppc_mmu_dtlb_miss(vcpu);
7b701591 1073 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
bbf45ba5
HB
1074 r = RESUME_GUEST;
1075 break;
1076 }
1077
f1e89028
SW
1078 idx = srcu_read_lock(&vcpu->kvm->srcu);
1079
be8d1cae 1080 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
475e7cdd 1081 gfn = gpaddr >> PAGE_SHIFT;
bbf45ba5
HB
1082
1083 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1084 /* The guest TLB had a mapping, but the shadow TLB
1085 * didn't, and it is RAM. This could be because:
1086 * a) the entry is mapping the host kernel, or
1087 * b) the guest used a large mapping which we're faking
1088 * Either way, we need to satisfy the fault without
1089 * invoking the guest. */
58a96214 1090 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
7b701591 1091 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
bbf45ba5
HB
1092 r = RESUME_GUEST;
1093 } else {
1094 /* Guest has mapped and accessed a page which is not
1095 * actually RAM. */
475e7cdd 1096 vcpu->arch.paddr_accessed = gpaddr;
6020c0f6 1097 vcpu->arch.vaddr_accessed = eaddr;
bbf45ba5 1098 r = kvmppc_emulate_mmio(run, vcpu);
7b701591 1099 kvmppc_account_exit(vcpu, MMIO_EXITS);
bbf45ba5
HB
1100 }
1101
f1e89028 1102 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bbf45ba5
HB
1103 break;
1104 }
1105
1106 case BOOKE_INTERRUPT_ITLB_MISS: {
bbf45ba5 1107 unsigned long eaddr = vcpu->arch.pc;
89168618 1108 gpa_t gpaddr;
bbf45ba5 1109 gfn_t gfn;
7924bd41 1110 int gtlb_index;
bbf45ba5
HB
1111
1112 r = RESUME_GUEST;
1113
1114 /* Check the guest TLB. */
fa86b8dd 1115 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
7924bd41 1116 if (gtlb_index < 0) {
bbf45ba5 1117 /* The guest didn't have a mapping for it. */
d4cf3892 1118 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
b52a638c 1119 kvmppc_mmu_itlb_miss(vcpu);
7b701591 1120 kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
bbf45ba5
HB
1121 break;
1122 }
1123
7b701591 1124 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
bbf45ba5 1125
f1e89028
SW
1126 idx = srcu_read_lock(&vcpu->kvm->srcu);
1127
be8d1cae 1128 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
89168618 1129 gfn = gpaddr >> PAGE_SHIFT;
bbf45ba5
HB
1130
1131 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1132 /* The guest TLB had a mapping, but the shadow TLB
1133 * didn't. This could be because:
1134 * a) the entry is mapping the host kernel, or
1135 * b) the guest used a large mapping which we're faking
1136 * Either way, we need to satisfy the fault without
1137 * invoking the guest. */
58a96214 1138 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
bbf45ba5
HB
1139 } else {
1140 /* Guest mapped and leaped at non-RAM! */
d4cf3892 1141 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
bbf45ba5
HB
1142 }
1143
f1e89028 1144 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bbf45ba5
HB
1145 break;
1146 }
1147
6a0ab738 1148 case BOOKE_INTERRUPT_DEBUG: {
ce11e48b
BB
1149 r = kvmppc_handle_debug(run, vcpu);
1150 if (r == RESUME_HOST)
1151 run->exit_reason = KVM_EXIT_DEBUG;
7b701591 1152 kvmppc_account_exit(vcpu, DEBUG_EXITS);
6a0ab738
HB
1153 break;
1154 }
1155
bbf45ba5
HB
1156 default:
1157 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
1158 BUG();
1159 }
1160
a8e4ef84
AG
1161 /*
1162 * To avoid clobbering exit_reason, only check for signals if we
1163 * aren't already exiting to userspace for some other reason.
1164 */
03660ba2 1165 if (!(r & RESUME_HOST)) {
7ee78855 1166 s = kvmppc_prepare_to_enter(vcpu);
6c85f52b 1167 if (s <= 0)
7ee78855 1168 r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
6c85f52b
SW
1169 else {
1170 /* interrupts now hard-disabled */
5f1c248f 1171 kvmppc_fix_ee_before_entry();
03660ba2 1172 }
bbf45ba5
HB
1173 }
1174
1175 return r;
1176}
1177
d26f22c9
BB
1178static void kvmppc_set_tsr(struct kvm_vcpu *vcpu, u32 new_tsr)
1179{
1180 u32 old_tsr = vcpu->arch.tsr;
1181
1182 vcpu->arch.tsr = new_tsr;
1183
1184 if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
1185 arm_next_watchdog(vcpu);
1186
1187 update_timer_ints(vcpu);
1188}
1189
bbf45ba5
HB
1190/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
1191int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1192{
082decf2 1193 int i;
af8f38b3 1194 int r;
082decf2 1195
bbf45ba5 1196 vcpu->arch.pc = 0;
b5904972 1197 vcpu->arch.shared->pir = vcpu->vcpu_id;
8e5b26b5 1198 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
d30f6e48 1199 kvmppc_set_msr(vcpu, 0);
bbf45ba5 1200
d30f6e48 1201#ifndef CONFIG_KVM_BOOKE_HV
ce11e48b 1202 vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS;
49dd2c49 1203 vcpu->arch.shadow_pid = 1;
d30f6e48
SW
1204 vcpu->arch.shared->msr = 0;
1205#endif
49dd2c49 1206
082decf2
HB
1207 /* Eye-catching numbers so we know if the guest takes an interrupt
1208 * before it's programmed its own IVPR/IVORs. */
bbf45ba5 1209 vcpu->arch.ivpr = 0x55550000;
082decf2
HB
1210 for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
1211 vcpu->arch.ivor[i] = 0x7700 | i * 4;
bbf45ba5 1212
73e75b41
HB
1213 kvmppc_init_timing_stats(vcpu);
1214
af8f38b3
AG
1215 r = kvmppc_core_vcpu_setup(vcpu);
1216 kvmppc_sanity_check(vcpu);
1217 return r;
bbf45ba5
HB
1218}
1219
f61c94bb
BB
1220int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
1221{
1222 /* setup watchdog timer once */
1223 spin_lock_init(&vcpu->arch.wdt_lock);
1224 setup_timer(&vcpu->arch.wdt_timer, kvmppc_watchdog_func,
1225 (unsigned long)vcpu);
1226
1227 return 0;
1228}
1229
1230void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
1231{
1232 del_timer_sync(&vcpu->arch.wdt_timer);
1233}
1234
bbf45ba5
HB
1235int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1236{
1237 int i;
1238
1239 regs->pc = vcpu->arch.pc;
992b5b29 1240 regs->cr = kvmppc_get_cr(vcpu);
bbf45ba5
HB
1241 regs->ctr = vcpu->arch.ctr;
1242 regs->lr = vcpu->arch.lr;
992b5b29 1243 regs->xer = kvmppc_get_xer(vcpu);
666e7252 1244 regs->msr = vcpu->arch.shared->msr;
31579eea
BB
1245 regs->srr0 = kvmppc_get_srr0(vcpu);
1246 regs->srr1 = kvmppc_get_srr1(vcpu);
bbf45ba5 1247 regs->pid = vcpu->arch.pid;
a73a9599
AG
1248 regs->sprg0 = vcpu->arch.shared->sprg0;
1249 regs->sprg1 = vcpu->arch.shared->sprg1;
1250 regs->sprg2 = vcpu->arch.shared->sprg2;
1251 regs->sprg3 = vcpu->arch.shared->sprg3;
b5904972
SW
1252 regs->sprg4 = vcpu->arch.shared->sprg4;
1253 regs->sprg5 = vcpu->arch.shared->sprg5;
1254 regs->sprg6 = vcpu->arch.shared->sprg6;
1255 regs->sprg7 = vcpu->arch.shared->sprg7;
bbf45ba5
HB
1256
1257 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
8e5b26b5 1258 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
bbf45ba5
HB
1259
1260 return 0;
1261}
1262
1263int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1264{
1265 int i;
1266
1267 vcpu->arch.pc = regs->pc;
992b5b29 1268 kvmppc_set_cr(vcpu, regs->cr);
bbf45ba5
HB
1269 vcpu->arch.ctr = regs->ctr;
1270 vcpu->arch.lr = regs->lr;
992b5b29 1271 kvmppc_set_xer(vcpu, regs->xer);
b8fd68ac 1272 kvmppc_set_msr(vcpu, regs->msr);
31579eea
BB
1273 kvmppc_set_srr0(vcpu, regs->srr0);
1274 kvmppc_set_srr1(vcpu, regs->srr1);
5ce941ee 1275 kvmppc_set_pid(vcpu, regs->pid);
a73a9599
AG
1276 vcpu->arch.shared->sprg0 = regs->sprg0;
1277 vcpu->arch.shared->sprg1 = regs->sprg1;
1278 vcpu->arch.shared->sprg2 = regs->sprg2;
1279 vcpu->arch.shared->sprg3 = regs->sprg3;
b5904972
SW
1280 vcpu->arch.shared->sprg4 = regs->sprg4;
1281 vcpu->arch.shared->sprg5 = regs->sprg5;
1282 vcpu->arch.shared->sprg6 = regs->sprg6;
1283 vcpu->arch.shared->sprg7 = regs->sprg7;
bbf45ba5 1284
8e5b26b5
AG
1285 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1286 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
bbf45ba5
HB
1287
1288 return 0;
1289}
1290
5ce941ee
SW
1291static void get_sregs_base(struct kvm_vcpu *vcpu,
1292 struct kvm_sregs *sregs)
1293{
1294 u64 tb = get_tb();
1295
1296 sregs->u.e.features |= KVM_SREGS_E_BASE;
1297
1298 sregs->u.e.csrr0 = vcpu->arch.csrr0;
1299 sregs->u.e.csrr1 = vcpu->arch.csrr1;
1300 sregs->u.e.mcsr = vcpu->arch.mcsr;
d30f6e48 1301 sregs->u.e.esr = get_guest_esr(vcpu);
a5414d4b 1302 sregs->u.e.dear = kvmppc_get_dar(vcpu);
5ce941ee
SW
1303 sregs->u.e.tsr = vcpu->arch.tsr;
1304 sregs->u.e.tcr = vcpu->arch.tcr;
1305 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
1306 sregs->u.e.tb = tb;
1307 sregs->u.e.vrsave = vcpu->arch.vrsave;
1308}
1309
1310static int set_sregs_base(struct kvm_vcpu *vcpu,
1311 struct kvm_sregs *sregs)
1312{
1313 if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
1314 return 0;
1315
1316 vcpu->arch.csrr0 = sregs->u.e.csrr0;
1317 vcpu->arch.csrr1 = sregs->u.e.csrr1;
1318 vcpu->arch.mcsr = sregs->u.e.mcsr;
d30f6e48 1319 set_guest_esr(vcpu, sregs->u.e.esr);
a5414d4b 1320 kvmppc_set_dar(vcpu, sregs->u.e.dear);
5ce941ee 1321 vcpu->arch.vrsave = sregs->u.e.vrsave;
dfd4d47e 1322 kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
5ce941ee 1323
dfd4d47e 1324 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
5ce941ee 1325 vcpu->arch.dec = sregs->u.e.dec;
dfd4d47e
SW
1326 kvmppc_emulate_dec(vcpu);
1327 }
5ce941ee 1328
d26f22c9
BB
1329 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR)
1330 kvmppc_set_tsr(vcpu, sregs->u.e.tsr);
5ce941ee
SW
1331
1332 return 0;
1333}
1334
1335static void get_sregs_arch206(struct kvm_vcpu *vcpu,
1336 struct kvm_sregs *sregs)
1337{
1338 sregs->u.e.features |= KVM_SREGS_E_ARCH206;
1339
841741f2 1340 sregs->u.e.pir = vcpu->vcpu_id;
5ce941ee
SW
1341 sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
1342 sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
1343 sregs->u.e.decar = vcpu->arch.decar;
1344 sregs->u.e.ivpr = vcpu->arch.ivpr;
1345}
1346
1347static int set_sregs_arch206(struct kvm_vcpu *vcpu,
1348 struct kvm_sregs *sregs)
1349{
1350 if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
1351 return 0;
1352
841741f2 1353 if (sregs->u.e.pir != vcpu->vcpu_id)
5ce941ee
SW
1354 return -EINVAL;
1355
1356 vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
1357 vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
1358 vcpu->arch.decar = sregs->u.e.decar;
1359 vcpu->arch.ivpr = sregs->u.e.ivpr;
1360
1361 return 0;
1362}
1363
3a167bea 1364int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
5ce941ee
SW
1365{
1366 sregs->u.e.features |= KVM_SREGS_E_IVOR;
1367
1368 sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
1369 sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
1370 sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
1371 sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
1372 sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
1373 sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
1374 sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
1375 sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
1376 sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
1377 sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
1378 sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
1379 sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
1380 sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
1381 sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
1382 sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
1383 sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
3a167bea 1384 return 0;
5ce941ee
SW
1385}
1386
1387int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1388{
1389 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
1390 return 0;
1391
1392 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
1393 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
1394 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
1395 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
1396 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
1397 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
1398 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
1399 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
1400 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
1401 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
1402 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
1403 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
1404 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
1405 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
1406 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
1407 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
1408
1409 return 0;
1410}
1411
bbf45ba5
HB
1412int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1413 struct kvm_sregs *sregs)
1414{
5ce941ee
SW
1415 sregs->pvr = vcpu->arch.pvr;
1416
1417 get_sregs_base(vcpu, sregs);
1418 get_sregs_arch206(vcpu, sregs);
cbbc58d4 1419 return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
bbf45ba5
HB
1420}
1421
1422int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1423 struct kvm_sregs *sregs)
1424{
5ce941ee
SW
1425 int ret;
1426
1427 if (vcpu->arch.pvr != sregs->pvr)
1428 return -EINVAL;
1429
1430 ret = set_sregs_base(vcpu, sregs);
1431 if (ret < 0)
1432 return ret;
1433
1434 ret = set_sregs_arch206(vcpu, sregs);
1435 if (ret < 0)
1436 return ret;
1437
cbbc58d4 1438 return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
bbf45ba5
HB
1439}
1440
31f3438e
PM
1441int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1442{
35b299e2
MC
1443 int r = 0;
1444 union kvmppc_one_reg val;
1445 int size;
35b299e2
MC
1446
1447 size = one_reg_size(reg->id);
1448 if (size > sizeof(val))
1449 return -EINVAL;
6df8d3fc
BB
1450
1451 switch (reg->id) {
1452 case KVM_REG_PPC_IAC1:
547465ef
BB
1453 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac1);
1454 break;
6df8d3fc 1455 case KVM_REG_PPC_IAC2:
547465ef
BB
1456 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac2);
1457 break;
1458#if CONFIG_PPC_ADV_DEBUG_IACS > 2
6df8d3fc 1459 case KVM_REG_PPC_IAC3:
547465ef
BB
1460 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac3);
1461 break;
35b299e2 1462 case KVM_REG_PPC_IAC4:
547465ef 1463 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac4);
6df8d3fc 1464 break;
547465ef 1465#endif
6df8d3fc 1466 case KVM_REG_PPC_DAC1:
547465ef
BB
1467 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac1);
1468 break;
35b299e2 1469 case KVM_REG_PPC_DAC2:
547465ef 1470 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac2);
6df8d3fc 1471 break;
324b3e63
AG
1472 case KVM_REG_PPC_EPR: {
1473 u32 epr = get_guest_epr(vcpu);
35b299e2 1474 val = get_reg_val(reg->id, epr);
324b3e63
AG
1475 break;
1476 }
352df1de
MC
1477#if defined(CONFIG_64BIT)
1478 case KVM_REG_PPC_EPCR:
35b299e2 1479 val = get_reg_val(reg->id, vcpu->arch.epcr);
352df1de
MC
1480 break;
1481#endif
78accda4 1482 case KVM_REG_PPC_TCR:
35b299e2 1483 val = get_reg_val(reg->id, vcpu->arch.tcr);
78accda4
BB
1484 break;
1485 case KVM_REG_PPC_TSR:
35b299e2 1486 val = get_reg_val(reg->id, vcpu->arch.tsr);
78accda4 1487 break;
35b299e2 1488 case KVM_REG_PPC_DEBUG_INST:
b12c7841 1489 val = get_reg_val(reg->id, KVMPPC_INST_EHPRIV_DEBUG);
8c32a2ea 1490 break;
8b75cbbe
PM
1491 case KVM_REG_PPC_VRSAVE:
1492 val = get_reg_val(reg->id, vcpu->arch.vrsave);
8c32a2ea 1493 break;
6df8d3fc 1494 default:
cbbc58d4 1495 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val);
6df8d3fc
BB
1496 break;
1497 }
35b299e2
MC
1498
1499 if (r)
1500 return r;
1501
1502 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1503 r = -EFAULT;
1504
6df8d3fc 1505 return r;
31f3438e
PM
1506}
1507
1508int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1509{
35b299e2
MC
1510 int r = 0;
1511 union kvmppc_one_reg val;
1512 int size;
35b299e2
MC
1513
1514 size = one_reg_size(reg->id);
1515 if (size > sizeof(val))
1516 return -EINVAL;
1517
1518 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1519 return -EFAULT;
6df8d3fc
BB
1520
1521 switch (reg->id) {
1522 case KVM_REG_PPC_IAC1:
547465ef
BB
1523 vcpu->arch.dbg_reg.iac1 = set_reg_val(reg->id, val);
1524 break;
6df8d3fc 1525 case KVM_REG_PPC_IAC2:
547465ef
BB
1526 vcpu->arch.dbg_reg.iac2 = set_reg_val(reg->id, val);
1527 break;
1528#if CONFIG_PPC_ADV_DEBUG_IACS > 2
6df8d3fc 1529 case KVM_REG_PPC_IAC3:
547465ef
BB
1530 vcpu->arch.dbg_reg.iac3 = set_reg_val(reg->id, val);
1531 break;
35b299e2 1532 case KVM_REG_PPC_IAC4:
547465ef 1533 vcpu->arch.dbg_reg.iac4 = set_reg_val(reg->id, val);
6df8d3fc 1534 break;
547465ef 1535#endif
6df8d3fc 1536 case KVM_REG_PPC_DAC1:
547465ef
BB
1537 vcpu->arch.dbg_reg.dac1 = set_reg_val(reg->id, val);
1538 break;
35b299e2 1539 case KVM_REG_PPC_DAC2:
547465ef 1540 vcpu->arch.dbg_reg.dac2 = set_reg_val(reg->id, val);
6df8d3fc 1541 break;
324b3e63 1542 case KVM_REG_PPC_EPR: {
35b299e2
MC
1543 u32 new_epr = set_reg_val(reg->id, val);
1544 kvmppc_set_epr(vcpu, new_epr);
324b3e63
AG
1545 break;
1546 }
352df1de
MC
1547#if defined(CONFIG_64BIT)
1548 case KVM_REG_PPC_EPCR: {
35b299e2
MC
1549 u32 new_epcr = set_reg_val(reg->id, val);
1550 kvmppc_set_epcr(vcpu, new_epcr);
352df1de
MC
1551 break;
1552 }
1553#endif
78accda4 1554 case KVM_REG_PPC_OR_TSR: {
35b299e2 1555 u32 tsr_bits = set_reg_val(reg->id, val);
78accda4
BB
1556 kvmppc_set_tsr_bits(vcpu, tsr_bits);
1557 break;
1558 }
1559 case KVM_REG_PPC_CLEAR_TSR: {
35b299e2 1560 u32 tsr_bits = set_reg_val(reg->id, val);
78accda4
BB
1561 kvmppc_clr_tsr_bits(vcpu, tsr_bits);
1562 break;
1563 }
1564 case KVM_REG_PPC_TSR: {
35b299e2 1565 u32 tsr = set_reg_val(reg->id, val);
78accda4
BB
1566 kvmppc_set_tsr(vcpu, tsr);
1567 break;
1568 }
1569 case KVM_REG_PPC_TCR: {
35b299e2 1570 u32 tcr = set_reg_val(reg->id, val);
78accda4
BB
1571 kvmppc_set_tcr(vcpu, tcr);
1572 break;
1573 }
8b75cbbe
PM
1574 case KVM_REG_PPC_VRSAVE:
1575 vcpu->arch.vrsave = set_reg_val(reg->id, val);
1576 break;
6df8d3fc 1577 default:
cbbc58d4 1578 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val);
6df8d3fc
BB
1579 break;
1580 }
35b299e2 1581
6df8d3fc 1582 return r;
31f3438e
PM
1583}
1584
bbf45ba5
HB
1585int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1586{
1587 return -ENOTSUPP;
1588}
1589
1590int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1591{
1592 return -ENOTSUPP;
1593}
1594
bbf45ba5
HB
1595int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1596 struct kvm_translation *tr)
1597{
98001d8d
AK
1598 int r;
1599
98001d8d 1600 r = kvmppc_core_vcpu_translate(vcpu, tr);
98001d8d 1601 return r;
bbf45ba5 1602}
d9fbd03d 1603
4e755758
AG
1604int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1605{
1606 return -ENOTSUPP;
1607}
1608
5587027c 1609void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
a66b48c3
PM
1610 struct kvm_memory_slot *dont)
1611{
1612}
1613
5587027c 1614int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
a66b48c3
PM
1615 unsigned long npages)
1616{
1617 return 0;
1618}
1619
f9e0554d 1620int kvmppc_core_prepare_memory_region(struct kvm *kvm,
a66b48c3 1621 struct kvm_memory_slot *memslot,
f9e0554d
PM
1622 struct kvm_userspace_memory_region *mem)
1623{
1624 return 0;
1625}
1626
1627void kvmppc_core_commit_memory_region(struct kvm *kvm,
dfe49dbd 1628 struct kvm_userspace_memory_region *mem,
8482644a 1629 const struct kvm_memory_slot *old)
dfe49dbd
PM
1630{
1631}
1632
1633void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
f9e0554d
PM
1634{
1635}
1636
38f98824
MC
1637void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr)
1638{
1639#if defined(CONFIG_64BIT)
1640 vcpu->arch.epcr = new_epcr;
1641#ifdef CONFIG_KVM_BOOKE_HV
1642 vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM;
1643 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
1644 vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM;
1645#endif
1646#endif
1647}
1648
dfd4d47e
SW
1649void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
1650{
1651 vcpu->arch.tcr = new_tcr;
f61c94bb 1652 arm_next_watchdog(vcpu);
dfd4d47e
SW
1653 update_timer_ints(vcpu);
1654}
1655
1656void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1657{
1658 set_bits(tsr_bits, &vcpu->arch.tsr);
1659 smp_wmb();
1660 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1661 kvm_vcpu_kick(vcpu);
1662}
1663
1664void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1665{
1666 clear_bits(tsr_bits, &vcpu->arch.tsr);
f61c94bb
BB
1667
1668 /*
1669 * We may have stopped the watchdog due to
1670 * being stuck on final expiration.
1671 */
1672 if (tsr_bits & (TSR_ENW | TSR_WIS))
1673 arm_next_watchdog(vcpu);
1674
dfd4d47e
SW
1675 update_timer_ints(vcpu);
1676}
1677
1678void kvmppc_decrementer_func(unsigned long data)
1679{
1680 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
1681
21bd000a
BB
1682 if (vcpu->arch.tcr & TCR_ARE) {
1683 vcpu->arch.dec = vcpu->arch.decar;
1684 kvmppc_emulate_dec(vcpu);
1685 }
1686
dfd4d47e
SW
1687 kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1688}
1689
ce11e48b
BB
1690static int kvmppc_booke_add_breakpoint(struct debug_reg *dbg_reg,
1691 uint64_t addr, int index)
1692{
1693 switch (index) {
1694 case 0:
1695 dbg_reg->dbcr0 |= DBCR0_IAC1;
1696 dbg_reg->iac1 = addr;
1697 break;
1698 case 1:
1699 dbg_reg->dbcr0 |= DBCR0_IAC2;
1700 dbg_reg->iac2 = addr;
1701 break;
1702#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1703 case 2:
1704 dbg_reg->dbcr0 |= DBCR0_IAC3;
1705 dbg_reg->iac3 = addr;
1706 break;
1707 case 3:
1708 dbg_reg->dbcr0 |= DBCR0_IAC4;
1709 dbg_reg->iac4 = addr;
1710 break;
1711#endif
1712 default:
1713 return -EINVAL;
1714 }
1715
1716 dbg_reg->dbcr0 |= DBCR0_IDM;
1717 return 0;
1718}
1719
1720static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr,
1721 int type, int index)
1722{
1723 switch (index) {
1724 case 0:
1725 if (type & KVMPPC_DEBUG_WATCH_READ)
1726 dbg_reg->dbcr0 |= DBCR0_DAC1R;
1727 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1728 dbg_reg->dbcr0 |= DBCR0_DAC1W;
1729 dbg_reg->dac1 = addr;
1730 break;
1731 case 1:
1732 if (type & KVMPPC_DEBUG_WATCH_READ)
1733 dbg_reg->dbcr0 |= DBCR0_DAC2R;
1734 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1735 dbg_reg->dbcr0 |= DBCR0_DAC2W;
1736 dbg_reg->dac2 = addr;
1737 break;
1738 default:
1739 return -EINVAL;
1740 }
1741
1742 dbg_reg->dbcr0 |= DBCR0_IDM;
1743 return 0;
1744}
1745void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set)
1746{
1747 /* XXX: Add similar MSR protection for BookE-PR */
1748#ifdef CONFIG_KVM_BOOKE_HV
1749 BUG_ON(prot_bitmap & ~(MSRP_UCLEP | MSRP_DEP | MSRP_PMMP));
1750 if (set) {
1751 if (prot_bitmap & MSR_UCLE)
1752 vcpu->arch.shadow_msrp |= MSRP_UCLEP;
1753 if (prot_bitmap & MSR_DE)
1754 vcpu->arch.shadow_msrp |= MSRP_DEP;
1755 if (prot_bitmap & MSR_PMM)
1756 vcpu->arch.shadow_msrp |= MSRP_PMMP;
1757 } else {
1758 if (prot_bitmap & MSR_UCLE)
1759 vcpu->arch.shadow_msrp &= ~MSRP_UCLEP;
1760 if (prot_bitmap & MSR_DE)
1761 vcpu->arch.shadow_msrp &= ~MSRP_DEP;
1762 if (prot_bitmap & MSR_PMM)
1763 vcpu->arch.shadow_msrp &= ~MSRP_PMMP;
1764 }
1765#endif
1766}
1767
1768int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1769 struct kvm_guest_debug *dbg)
1770{
1771 struct debug_reg *dbg_reg;
1772 int n, b = 0, w = 0;
1773
1774 if (!(dbg->control & KVM_GUESTDBG_ENABLE)) {
1775 vcpu->arch.shadow_dbg_reg.dbcr0 = 0;
1776 vcpu->guest_debug = 0;
1777 kvm_guest_protect_msr(vcpu, MSR_DE, false);
1778 return 0;
1779 }
1780
1781 kvm_guest_protect_msr(vcpu, MSR_DE, true);
1782 vcpu->guest_debug = dbg->control;
1783 vcpu->arch.shadow_dbg_reg.dbcr0 = 0;
1784 /* Set DBCR0_EDM in guest visible DBCR0 register. */
1785 vcpu->arch.dbg_reg.dbcr0 = DBCR0_EDM;
1786
1787 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
1788 vcpu->arch.shadow_dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
1789
1790 /* Code below handles only HW breakpoints */
1791 dbg_reg = &(vcpu->arch.shadow_dbg_reg);
1792
1793#ifdef CONFIG_KVM_BOOKE_HV
1794 /*
1795 * On BookE-HV (e500mc) the guest is always executed with MSR.GS=1
1796 * DBCR1 and DBCR2 are set to trigger debug events when MSR.PR is 0
1797 */
1798 dbg_reg->dbcr1 = 0;
1799 dbg_reg->dbcr2 = 0;
1800#else
1801 /*
1802 * On BookE-PR (e500v2) the guest is always executed with MSR.PR=1
1803 * We set DBCR1 and DBCR2 to only trigger debug events when MSR.PR
1804 * is set.
1805 */
1806 dbg_reg->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US |
1807 DBCR1_IAC4US;
1808 dbg_reg->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
1809#endif
1810
1811 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
1812 return 0;
1813
1814 for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) {
1815 uint64_t addr = dbg->arch.bp[n].addr;
1816 uint32_t type = dbg->arch.bp[n].type;
1817
1818 if (type == KVMPPC_DEBUG_NONE)
1819 continue;
1820
1821 if (type & !(KVMPPC_DEBUG_WATCH_READ |
1822 KVMPPC_DEBUG_WATCH_WRITE |
1823 KVMPPC_DEBUG_BREAKPOINT))
1824 return -EINVAL;
1825
1826 if (type & KVMPPC_DEBUG_BREAKPOINT) {
1827 /* Setting H/W breakpoint */
1828 if (kvmppc_booke_add_breakpoint(dbg_reg, addr, b++))
1829 return -EINVAL;
1830 } else {
1831 /* Setting H/W watchpoint */
1832 if (kvmppc_booke_add_watchpoint(dbg_reg, addr,
1833 type, w++))
1834 return -EINVAL;
1835 }
1836 }
1837
1838 return 0;
1839}
1840
94fa9d99
SW
1841void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1842{
a47d72f3 1843 vcpu->cpu = smp_processor_id();
d30f6e48 1844 current->thread.kvm_vcpu = vcpu;
94fa9d99
SW
1845}
1846
1847void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
1848{
d30f6e48 1849 current->thread.kvm_vcpu = NULL;
a47d72f3 1850 vcpu->cpu = -1;
ce11e48b
BB
1851
1852 /* Clear pending debug event in DBSR */
1853 kvmppc_clear_dbsr();
94fa9d99
SW
1854}
1855
3a167bea
AK
1856void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
1857{
cbbc58d4 1858 vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
3a167bea
AK
1859}
1860
1861int kvmppc_core_init_vm(struct kvm *kvm)
1862{
cbbc58d4 1863 return kvm->arch.kvm_ops->init_vm(kvm);
3a167bea
AK
1864}
1865
1866struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1867{
cbbc58d4 1868 return kvm->arch.kvm_ops->vcpu_create(kvm, id);
3a167bea
AK
1869}
1870
1871void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
1872{
cbbc58d4 1873 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
3a167bea
AK
1874}
1875
1876void kvmppc_core_destroy_vm(struct kvm *kvm)
1877{
cbbc58d4 1878 kvm->arch.kvm_ops->destroy_vm(kvm);
3a167bea
AK
1879}
1880
1881void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1882{
cbbc58d4 1883 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
3a167bea
AK
1884}
1885
1886void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
1887{
cbbc58d4 1888 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
94fa9d99
SW
1889}
1890
2986b8c7 1891int __init kvmppc_booke_init(void)
d9fbd03d 1892{
d30f6e48 1893#ifndef CONFIG_KVM_BOOKE_HV
d9fbd03d 1894 unsigned long ivor[16];
1d542d9c 1895 unsigned long *handler = kvmppc_booke_handler_addr;
d9fbd03d 1896 unsigned long max_ivor = 0;
1d542d9c 1897 unsigned long handler_len;
d9fbd03d
HB
1898 int i;
1899
1900 /* We install our own exception handlers by hijacking IVPR. IVPR must
1901 * be 16-bit aligned, so we need a 64KB allocation. */
1902 kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
1903 VCPU_SIZE_ORDER);
1904 if (!kvmppc_booke_handlers)
1905 return -ENOMEM;
1906
1907 /* XXX make sure our handlers are smaller than Linux's */
1908
1909 /* Copy our interrupt handlers to match host IVORs. That way we don't
1910 * have to swap the IVORs on every guest/host transition. */
1911 ivor[0] = mfspr(SPRN_IVOR0);
1912 ivor[1] = mfspr(SPRN_IVOR1);
1913 ivor[2] = mfspr(SPRN_IVOR2);
1914 ivor[3] = mfspr(SPRN_IVOR3);
1915 ivor[4] = mfspr(SPRN_IVOR4);
1916 ivor[5] = mfspr(SPRN_IVOR5);
1917 ivor[6] = mfspr(SPRN_IVOR6);
1918 ivor[7] = mfspr(SPRN_IVOR7);
1919 ivor[8] = mfspr(SPRN_IVOR8);
1920 ivor[9] = mfspr(SPRN_IVOR9);
1921 ivor[10] = mfspr(SPRN_IVOR10);
1922 ivor[11] = mfspr(SPRN_IVOR11);
1923 ivor[12] = mfspr(SPRN_IVOR12);
1924 ivor[13] = mfspr(SPRN_IVOR13);
1925 ivor[14] = mfspr(SPRN_IVOR14);
1926 ivor[15] = mfspr(SPRN_IVOR15);
1927
1928 for (i = 0; i < 16; i++) {
1929 if (ivor[i] > max_ivor)
1d542d9c 1930 max_ivor = i;
d9fbd03d 1931
1d542d9c 1932 handler_len = handler[i + 1] - handler[i];
d9fbd03d 1933 memcpy((void *)kvmppc_booke_handlers + ivor[i],
1d542d9c 1934 (void *)handler[i], handler_len);
d9fbd03d 1935 }
1d542d9c
BB
1936
1937 handler_len = handler[max_ivor + 1] - handler[max_ivor];
1938 flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
1939 ivor[max_ivor] + handler_len);
d30f6e48 1940#endif /* !BOOKE_HV */
db93f574 1941 return 0;
d9fbd03d
HB
1942}
1943
db93f574 1944void __exit kvmppc_booke_exit(void)
d9fbd03d
HB
1945{
1946 free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
1947 kvm_exit();
1948}
This page took 0.511838 seconds and 5 git commands to generate.