2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 * Paul Mackerras <paulus@samba.org>
10 * Functions relating to running KVM on Book 3S processors where
11 * we don't have access to hypervisor mode, and we run the guest
12 * in problem state (user mode).
14 * This file is derived from arch/powerpc/kvm/44x.c,
15 * by Hollis Blanchard <hollisb@us.ibm.com>.
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License, version 2, as
19 * published by the Free Software Foundation.
22 #include <linux/kvm_host.h>
23 #include <linux/export.h>
24 #include <linux/err.h>
25 #include <linux/slab.h>
28 #include <asm/cputable.h>
29 #include <asm/cacheflush.h>
30 #include <asm/tlbflush.h>
31 #include <asm/uaccess.h>
33 #include <asm/kvm_ppc.h>
34 #include <asm/kvm_book3s.h>
35 #include <asm/mmu_context.h>
36 #include <linux/gfp.h>
37 #include <linux/sched.h>
38 #include <linux/vmalloc.h>
39 #include <linux/highmem.h>
43 /* #define EXIT_DEBUG */
44 /* #define DEBUG_EXT */
46 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
49 /* Some compatibility defines */
50 #ifdef CONFIG_PPC_BOOK3S_32
51 #define MSR_USER32 MSR_USER
52 #define MSR_USER64 MSR_USER
53 #define HW_PAGE_SIZE PAGE_SIZE
56 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
58 #ifdef CONFIG_PPC_BOOK3S_64
59 memcpy(to_svcpu(vcpu)->slb, to_book3s(vcpu)->slb_shadow, sizeof(to_svcpu(vcpu)->slb));
60 memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
61 sizeof(get_paca()->shadow_vcpu));
62 to_svcpu(vcpu)->slb_max = to_book3s(vcpu)->slb_shadow_max;
65 #ifdef CONFIG_PPC_BOOK3S_32
66 current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu;
70 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
72 #ifdef CONFIG_PPC_BOOK3S_64
73 memcpy(to_book3s(vcpu)->slb_shadow, to_svcpu(vcpu)->slb, sizeof(to_svcpu(vcpu)->slb));
74 memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
75 sizeof(get_paca()->shadow_vcpu));
76 to_book3s(vcpu)->slb_shadow_max = to_svcpu(vcpu)->slb_max;
79 kvmppc_giveup_ext(vcpu, MSR_FP);
80 kvmppc_giveup_ext(vcpu, MSR_VEC);
81 kvmppc_giveup_ext(vcpu, MSR_VSX);
84 static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
86 ulong smsr = vcpu->arch.shared->msr;
88 /* Guest MSR values */
89 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_DE;
90 /* Process MSR values */
91 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
92 /* External providers the guest reserved */
93 smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext);
94 /* 64-bit Process MSR values */
95 #ifdef CONFIG_PPC_BOOK3S_64
96 smsr |= MSR_ISF | MSR_HV;
98 vcpu->arch.shadow_msr = smsr;
101 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
103 ulong old_msr = vcpu->arch.shared->msr;
106 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
109 msr &= to_book3s(vcpu)->msr_mask;
110 vcpu->arch.shared->msr = msr;
111 kvmppc_recalc_shadow_msr(vcpu);
114 if (!vcpu->arch.pending_exceptions) {
115 kvm_vcpu_block(vcpu);
116 vcpu->stat.halt_wakeup++;
118 /* Unset POW bit after we woke up */
120 vcpu->arch.shared->msr = msr;
124 if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) !=
125 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
126 kvmppc_mmu_flush_segments(vcpu);
127 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
129 /* Preload magic page segment when in kernel mode */
130 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
131 struct kvm_vcpu_arch *a = &vcpu->arch;
134 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
136 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
140 /* Preload FPU if it's enabled */
141 if (vcpu->arch.shared->msr & MSR_FP)
142 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
145 void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
149 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
150 vcpu->arch.pvr = pvr;
151 #ifdef CONFIG_PPC_BOOK3S_64
152 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
153 kvmppc_mmu_book3s_64_init(vcpu);
154 if (!to_book3s(vcpu)->hior_sregs)
155 to_book3s(vcpu)->hior = 0xfff00000;
156 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
157 vcpu->arch.cpu_type = KVM_CPU_3S_64;
161 kvmppc_mmu_book3s_32_init(vcpu);
162 if (!to_book3s(vcpu)->hior_sregs)
163 to_book3s(vcpu)->hior = 0;
164 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
165 vcpu->arch.cpu_type = KVM_CPU_3S_32;
168 kvmppc_sanity_check(vcpu);
170 /* If we are in hypervisor level on 970, we can tell the CPU to
171 * treat DCBZ as 32 bytes store */
172 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
173 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
174 !strcmp(cur_cpu_spec->platform, "ppc970"))
175 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
177 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
178 really needs them in a VM on Cell and force disable them. */
179 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
180 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
182 #ifdef CONFIG_PPC_BOOK3S_32
183 /* 32 bit Book3S always has 32 byte dcbz */
184 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
187 /* On some CPUs we can execute paired single operations natively */
188 asm ( "mfpvr %0" : "=r"(host_pvr));
190 case 0x00080200: /* lonestar 2.0 */
191 case 0x00088202: /* lonestar 2.2 */
192 case 0x70000100: /* gekko 1.0 */
193 case 0x00080100: /* gekko 2.0 */
194 case 0x00083203: /* gekko 2.3a */
195 case 0x00083213: /* gekko 2.3b */
196 case 0x00083204: /* gekko 2.4 */
197 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
198 case 0x00087200: /* broadway */
199 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
200 /* Enable HID2.PSE - in case we need it later */
201 mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
205 /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
206 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
207 * emulate 32 bytes dcbz length.
209 * The Book3s_64 inventors also realized this case and implemented a special bit
210 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
212 * My approach here is to patch the dcbz instruction on executing pages.
214 static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
221 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
222 if (is_error_page(hpage)) {
223 kvm_release_page_clean(hpage);
227 hpage_offset = pte->raddr & ~PAGE_MASK;
228 hpage_offset &= ~0xFFFULL;
232 page = kmap_atomic(hpage, KM_USER0);
234 /* patch dcbz into reserved instruction, so we trap */
235 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
236 if ((page[i] & 0xff0007ff) == INS_DCBZ)
237 page[i] &= 0xfffffff7;
239 kunmap_atomic(page, KM_USER0);
243 static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
245 ulong mp_pa = vcpu->arch.magic_page_pa;
247 if (unlikely(mp_pa) &&
248 unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
252 return kvm_is_visible_gfn(vcpu->kvm, gfn);
255 int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
256 ulong eaddr, int vec)
258 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
259 int r = RESUME_GUEST;
262 struct kvmppc_pte pte;
263 bool is_mmio = false;
264 bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false;
265 bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false;
268 relocated = data ? dr : ir;
270 /* Resolve real address if translation turned on */
272 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data);
274 pte.may_execute = true;
276 pte.may_write = true;
277 pte.raddr = eaddr & KVM_PAM;
279 pte.vpage = eaddr >> 12;
282 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
284 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
288 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
290 if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR)
291 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
293 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
297 page_found = -EINVAL;
301 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
302 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
304 * If we do the dcbz hack, we have to NX on every execution,
305 * so we can patch the executing code. This renders our guest
308 pte.may_execute = !data;
311 if (page_found == -ENOENT) {
312 /* Page not found in guest PTE entries */
313 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
314 vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr;
315 vcpu->arch.shared->msr |=
316 (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
317 kvmppc_book3s_queue_irqprio(vcpu, vec);
318 } else if (page_found == -EPERM) {
319 /* Storage protection */
320 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
321 vcpu->arch.shared->dsisr =
322 to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE;
323 vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
324 vcpu->arch.shared->msr |=
325 (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
326 kvmppc_book3s_queue_irqprio(vcpu, vec);
327 } else if (page_found == -EINVAL) {
328 /* Page not found in guest SLB */
329 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
330 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
331 } else if (!is_mmio &&
332 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
333 /* The guest's PTE is not mapped yet. Map on the host */
334 kvmppc_mmu_map_page(vcpu, &pte);
336 vcpu->stat.sp_storage++;
337 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
338 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
339 kvmppc_patch_dcbz(vcpu, &pte);
342 vcpu->stat.mmio_exits++;
343 vcpu->arch.paddr_accessed = pte.raddr;
344 r = kvmppc_emulate_mmio(run, vcpu);
345 if ( r == RESUME_HOST_NV )
352 static inline int get_fpr_index(int i)
360 /* Give up external provider (FPU, Altivec, VSX) */
361 void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
363 struct thread_struct *t = ¤t->thread;
364 u64 *vcpu_fpr = vcpu->arch.fpr;
366 u64 *vcpu_vsx = vcpu->arch.vsr;
368 u64 *thread_fpr = (u64*)t->fpr;
371 if (!(vcpu->arch.guest_owned_ext & msr))
375 printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
381 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
382 vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
384 vcpu->arch.fpscr = t->fpscr.val;
387 #ifdef CONFIG_ALTIVEC
388 giveup_altivec(current);
389 memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
390 vcpu->arch.vscr = t->vscr;
395 __giveup_vsx(current);
396 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
397 vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
404 vcpu->arch.guest_owned_ext &= ~msr;
405 current->thread.regs->msr &= ~msr;
406 kvmppc_recalc_shadow_msr(vcpu);
409 static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
411 ulong srr0 = kvmppc_get_pc(vcpu);
412 u32 last_inst = kvmppc_get_last_inst(vcpu);
415 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
416 if (ret == -ENOENT) {
417 ulong msr = vcpu->arch.shared->msr;
419 msr = kvmppc_set_field(msr, 33, 33, 1);
420 msr = kvmppc_set_field(msr, 34, 36, 0);
421 vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0);
422 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
423 return EMULATE_AGAIN;
429 static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
432 /* Need to do paired single emulation? */
433 if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
436 /* Read out the instruction */
437 if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
438 /* Need to emulate */
441 return EMULATE_AGAIN;
444 /* Handle external providers (FPU, Altivec, VSX) */
445 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
448 struct thread_struct *t = ¤t->thread;
449 u64 *vcpu_fpr = vcpu->arch.fpr;
451 u64 *vcpu_vsx = vcpu->arch.vsr;
453 u64 *thread_fpr = (u64*)t->fpr;
456 /* When we have paired singles, we emulate in software */
457 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
460 if (!(vcpu->arch.shared->msr & msr)) {
461 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
465 /* We already own the ext */
466 if (vcpu->arch.guest_owned_ext & msr) {
471 printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
474 current->thread.regs->msr |= msr;
478 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
479 thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
481 t->fpscr.val = vcpu->arch.fpscr;
483 kvmppc_load_up_fpu();
486 #ifdef CONFIG_ALTIVEC
487 memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
488 t->vscr = vcpu->arch.vscr;
490 kvmppc_load_up_altivec();
495 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
496 thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
497 kvmppc_load_up_vsx();
504 vcpu->arch.guest_owned_ext |= msr;
506 kvmppc_recalc_shadow_msr(vcpu);
511 int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
512 unsigned int exit_nr)
516 vcpu->stat.sum_exits++;
518 run->exit_reason = KVM_EXIT_UNKNOWN;
519 run->ready_for_interrupt_injection = 1;
521 trace_kvm_book3s_exit(exit_nr, vcpu);
524 case BOOK3S_INTERRUPT_INST_STORAGE:
525 vcpu->stat.pf_instruc++;
527 #ifdef CONFIG_PPC_BOOK3S_32
528 /* We set segments as unused segments when invalidating them. So
529 * treat the respective fault as segment fault. */
530 if (to_svcpu(vcpu)->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT]
532 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
538 /* only care about PTEG not found errors, but leave NX alone */
539 if (to_svcpu(vcpu)->shadow_srr1 & 0x40000000) {
540 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
541 vcpu->stat.sp_instruc++;
542 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
543 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
545 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
546 * so we can't use the NX bit inside the guest. Let's cross our fingers,
547 * that no guest that needs the dcbz hack does NX.
549 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
552 vcpu->arch.shared->msr |=
553 to_svcpu(vcpu)->shadow_srr1 & 0x58000000;
554 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
558 case BOOK3S_INTERRUPT_DATA_STORAGE:
560 ulong dar = kvmppc_get_fault_dar(vcpu);
561 vcpu->stat.pf_storage++;
563 #ifdef CONFIG_PPC_BOOK3S_32
564 /* We set segments as unused segments when invalidating them. So
565 * treat the respective fault as segment fault. */
566 if ((to_svcpu(vcpu)->sr[dar >> SID_SHIFT]) == SR_INVALID) {
567 kvmppc_mmu_map_segment(vcpu, dar);
573 /* The only case we need to handle is missing shadow PTEs */
574 if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) {
575 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
577 vcpu->arch.shared->dar = dar;
578 vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr;
579 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
584 case BOOK3S_INTERRUPT_DATA_SEGMENT:
585 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
586 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
587 kvmppc_book3s_queue_irqprio(vcpu,
588 BOOK3S_INTERRUPT_DATA_SEGMENT);
592 case BOOK3S_INTERRUPT_INST_SEGMENT:
593 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
594 kvmppc_book3s_queue_irqprio(vcpu,
595 BOOK3S_INTERRUPT_INST_SEGMENT);
599 /* We're good on these - the host merely wanted to get our attention */
600 case BOOK3S_INTERRUPT_DECREMENTER:
601 vcpu->stat.dec_exits++;
604 case BOOK3S_INTERRUPT_EXTERNAL:
605 vcpu->stat.ext_intr_exits++;
608 case BOOK3S_INTERRUPT_PERFMON:
611 case BOOK3S_INTERRUPT_PROGRAM:
613 enum emulation_result er;
617 flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull;
619 if (vcpu->arch.shared->msr & MSR_PR) {
621 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
623 if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
624 (INS_DCBZ & 0xfffffff7)) {
625 kvmppc_core_queue_program(vcpu, flags);
631 vcpu->stat.emulated_inst_exits++;
632 er = kvmppc_emulate_instruction(run, vcpu);
641 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
642 __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
643 kvmppc_core_queue_program(vcpu, flags);
646 case EMULATE_DO_MMIO:
647 run->exit_reason = KVM_EXIT_MMIO;
655 case BOOK3S_INTERRUPT_SYSCALL:
656 if (vcpu->arch.papr_enabled &&
657 (kvmppc_get_last_inst(vcpu) == 0x44000022) &&
658 !(vcpu->arch.shared->msr & MSR_PR)) {
659 /* SC 1 papr hypercalls */
660 ulong cmd = kvmppc_get_gpr(vcpu, 3);
663 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
668 run->papr_hcall.nr = cmd;
669 for (i = 0; i < 9; ++i) {
670 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
671 run->papr_hcall.args[i] = gpr;
673 run->exit_reason = KVM_EXIT_PAPR_HCALL;
674 vcpu->arch.hcall_needed = 1;
676 } else if (vcpu->arch.osi_enabled &&
677 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
678 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
680 u64 *gprs = run->osi.gprs;
683 run->exit_reason = KVM_EXIT_OSI;
684 for (i = 0; i < 32; i++)
685 gprs[i] = kvmppc_get_gpr(vcpu, i);
686 vcpu->arch.osi_needed = 1;
688 } else if (!(vcpu->arch.shared->msr & MSR_PR) &&
689 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
690 /* KVM PV hypercalls */
691 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
695 vcpu->stat.syscall_exits++;
696 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
700 case BOOK3S_INTERRUPT_FP_UNAVAIL:
701 case BOOK3S_INTERRUPT_ALTIVEC:
702 case BOOK3S_INTERRUPT_VSX:
707 case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break;
708 case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break;
709 case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break;
712 switch (kvmppc_check_ext(vcpu, exit_nr)) {
714 /* everything ok - let's enable the ext */
715 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
718 /* we need to emulate this instruction */
719 goto program_interrupt;
722 /* nothing to worry about - go again */
727 case BOOK3S_INTERRUPT_ALIGNMENT:
728 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
729 vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu,
730 kvmppc_get_last_inst(vcpu));
731 vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu,
732 kvmppc_get_last_inst(vcpu));
733 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
737 case BOOK3S_INTERRUPT_MACHINE_CHECK:
738 case BOOK3S_INTERRUPT_TRACE:
739 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
743 /* Ugh - bork here! What did we get? */
744 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
745 exit_nr, kvmppc_get_pc(vcpu), to_svcpu(vcpu)->shadow_srr1);
752 if (!(r & RESUME_HOST)) {
753 /* To avoid clobbering exit_reason, only check for signals if
754 * we aren't already exiting to userspace for some other
756 if (signal_pending(current)) {
758 printk(KERN_EMERG "KVM: Going back to host\n");
760 vcpu->stat.signal_exits++;
761 run->exit_reason = KVM_EXIT_INTR;
764 /* In case an interrupt came in that was triggered
765 * from userspace (like DEC), we need to check what
767 kvmppc_core_deliver_interrupts(vcpu);
771 trace_kvm_book3s_reenter(r, vcpu);
776 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
777 struct kvm_sregs *sregs)
779 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
782 sregs->pvr = vcpu->arch.pvr;
784 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
785 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
786 for (i = 0; i < 64; i++) {
787 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
788 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
791 for (i = 0; i < 16; i++)
792 sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i];
794 for (i = 0; i < 8; i++) {
795 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
796 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
800 if (sregs->u.s.flags & KVM_SREGS_S_HIOR)
801 sregs->u.s.hior = to_book3s(vcpu)->hior;
806 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
807 struct kvm_sregs *sregs)
809 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
812 kvmppc_set_pvr(vcpu, sregs->pvr);
814 vcpu3s->sdr1 = sregs->u.s.sdr1;
815 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
816 for (i = 0; i < 64; i++) {
817 vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
818 sregs->u.s.ppc64.slb[i].slbe);
821 for (i = 0; i < 16; i++) {
822 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
824 for (i = 0; i < 8; i++) {
825 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
826 (u32)sregs->u.s.ppc32.ibat[i]);
827 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
828 (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
829 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
830 (u32)sregs->u.s.ppc32.dbat[i]);
831 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
832 (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
836 /* Flush the MMU after messing with the segments */
837 kvmppc_mmu_pte_flush(vcpu, 0, 0);
839 if (sregs->u.s.flags & KVM_SREGS_S_HIOR) {
840 to_book3s(vcpu)->hior_sregs = true;
841 to_book3s(vcpu)->hior = sregs->u.s.hior;
847 int kvmppc_core_check_processor_compat(void)
852 struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
854 struct kvmppc_vcpu_book3s *vcpu_book3s;
855 struct kvm_vcpu *vcpu;
859 vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
863 vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *)
864 kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
865 if (!vcpu_book3s->shadow_vcpu)
868 vcpu = &vcpu_book3s->vcpu;
869 err = kvm_vcpu_init(vcpu, kvm, id);
871 goto free_shadow_vcpu;
873 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
874 /* the real shared page fills the last 4k of our page */
875 vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096);
879 #ifdef CONFIG_PPC_BOOK3S_64
880 /* default to book3s_64 (970fx) */
881 vcpu->arch.pvr = 0x3C0301;
883 /* default to book3s_32 (750) */
884 vcpu->arch.pvr = 0x84202;
886 kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
887 vcpu->arch.slb_nr = 64;
889 vcpu->arch.shadow_msr = MSR_USER64;
891 err = kvmppc_mmu_init(vcpu);
898 kvm_vcpu_uninit(vcpu);
900 kfree(vcpu_book3s->shadow_vcpu);
907 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
909 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
911 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
912 kvm_vcpu_uninit(vcpu);
913 kfree(vcpu_book3s->shadow_vcpu);
917 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
920 double fpr[32][TS_FPRWIDTH];
923 #ifdef CONFIG_ALTIVEC
926 unsigned long uninitialized_var(vrsave);
934 /* Check if we can run the vcpu at all */
935 if (!vcpu->arch.sane) {
936 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
940 /* No need to go into the guest when all we do is going out */
941 if (signal_pending(current)) {
942 kvm_run->exit_reason = KVM_EXIT_INTR;
946 /* Save FPU state in stack */
947 if (current->thread.regs->msr & MSR_FP)
949 memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
950 fpscr = current->thread.fpscr.val;
951 fpexc_mode = current->thread.fpexc_mode;
953 #ifdef CONFIG_ALTIVEC
954 /* Save Altivec state in stack */
955 used_vr = current->thread.used_vr;
957 if (current->thread.regs->msr & MSR_VEC)
958 giveup_altivec(current);
959 memcpy(vr, current->thread.vr, sizeof(current->thread.vr));
960 vscr = current->thread.vscr;
961 vrsave = current->thread.vrsave;
966 /* Save VSX state in stack */
967 used_vsr = current->thread.used_vsr;
968 if (used_vsr && (current->thread.regs->msr & MSR_VSX))
969 __giveup_vsx(current);
972 /* Remember the MSR with disabled extensions */
973 ext_msr = current->thread.regs->msr;
975 /* Preload FPU if it's enabled */
976 if (vcpu->arch.shared->msr & MSR_FP)
977 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
981 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
987 current->thread.regs->msr = ext_msr;
989 /* Make sure we save the guest FPU/Altivec/VSX state */
990 kvmppc_giveup_ext(vcpu, MSR_FP);
991 kvmppc_giveup_ext(vcpu, MSR_VEC);
992 kvmppc_giveup_ext(vcpu, MSR_VSX);
994 /* Restore FPU state from stack */
995 memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
996 current->thread.fpscr.val = fpscr;
997 current->thread.fpexc_mode = fpexc_mode;
999 #ifdef CONFIG_ALTIVEC
1000 /* Restore Altivec state from stack */
1001 if (used_vr && current->thread.used_vr) {
1002 memcpy(current->thread.vr, vr, sizeof(current->thread.vr));
1003 current->thread.vscr = vscr;
1004 current->thread.vrsave = vrsave;
1006 current->thread.used_vr = used_vr;
1010 current->thread.used_vsr = used_vsr;
1016 int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1017 struct kvm_userspace_memory_region *mem)
1022 void kvmppc_core_commit_memory_region(struct kvm *kvm,
1023 struct kvm_userspace_memory_region *mem)
1027 int kvmppc_core_init_vm(struct kvm *kvm)
1032 void kvmppc_core_destroy_vm(struct kvm *kvm)
1036 static int kvmppc_book3s_init(void)
1040 r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0,
1046 r = kvmppc_mmu_hpte_sysinit();
1051 static void kvmppc_book3s_exit(void)
1053 kvmppc_mmu_hpte_sysexit();
1057 module_init(kvmppc_book3s_init);
1058 module_exit(kvmppc_book3s_exit);