2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
9 * This file is derived from arch/powerpc/kvm/44x.c,
10 * by Hollis Blanchard <hollisb@us.ibm.com>.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License, version 2, as
14 * published by the Free Software Foundation.
17 #include <linux/kvm_host.h>
18 #include <linux/err.h>
21 #include <asm/cputable.h>
22 #include <asm/cacheflush.h>
23 #include <asm/tlbflush.h>
24 #include <asm/uaccess.h>
26 #include <asm/kvm_ppc.h>
27 #include <asm/kvm_book3s.h>
28 #include <asm/mmu_context.h>
29 #include <linux/sched.h>
30 #include <linux/vmalloc.h>
32 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
34 /* #define EXIT_DEBUG */
35 /* #define EXIT_DEBUG_SIMPLE */
36 /* #define DEBUG_EXT */
38 static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
40 struct kvm_stats_debugfs_item debugfs_entries[] = {
41 { "exits", VCPU_STAT(sum_exits) },
42 { "mmio", VCPU_STAT(mmio_exits) },
43 { "sig", VCPU_STAT(signal_exits) },
44 { "sysc", VCPU_STAT(syscall_exits) },
45 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
46 { "dec", VCPU_STAT(dec_exits) },
47 { "ext_intr", VCPU_STAT(ext_intr_exits) },
48 { "queue_intr", VCPU_STAT(queue_intr) },
49 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
50 { "pf_storage", VCPU_STAT(pf_storage) },
51 { "sp_storage", VCPU_STAT(sp_storage) },
52 { "pf_instruc", VCPU_STAT(pf_instruc) },
53 { "sp_instruc", VCPU_STAT(sp_instruc) },
54 { "ld", VCPU_STAT(ld) },
55 { "ld_slow", VCPU_STAT(ld_slow) },
56 { "st", VCPU_STAT(st) },
57 { "st_slow", VCPU_STAT(st_slow) },
61 void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu)
65 void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
69 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
71 memcpy(get_paca()->kvm_slb, to_book3s(vcpu)->slb_shadow, sizeof(get_paca()->kvm_slb));
72 memcpy(&get_paca()->shadow_vcpu, &to_book3s(vcpu)->shadow_vcpu,
73 sizeof(get_paca()->shadow_vcpu));
74 get_paca()->kvm_slb_max = to_book3s(vcpu)->slb_shadow_max;
77 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
79 memcpy(to_book3s(vcpu)->slb_shadow, get_paca()->kvm_slb, sizeof(get_paca()->kvm_slb));
80 memcpy(&to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
81 sizeof(get_paca()->shadow_vcpu));
82 to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max;
84 kvmppc_giveup_ext(vcpu, MSR_FP);
85 kvmppc_giveup_ext(vcpu, MSR_VEC);
86 kvmppc_giveup_ext(vcpu, MSR_VSX);
89 #if defined(EXIT_DEBUG)
90 static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu)
92 u64 jd = mftb() - vcpu->arch.dec_jiffies;
93 return vcpu->arch.dec - jd;
97 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
99 ulong old_msr = vcpu->arch.msr;
102 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
104 msr &= to_book3s(vcpu)->msr_mask;
105 vcpu->arch.msr = msr;
106 vcpu->arch.shadow_msr = msr | MSR_USER32;
107 vcpu->arch.shadow_msr &= (MSR_FE0 | MSR_USER64 | MSR_SE | MSR_BE |
109 vcpu->arch.shadow_msr |= (msr & vcpu->arch.guest_owned_ext);
111 if (msr & (MSR_WE|MSR_POW)) {
112 if (!vcpu->arch.pending_exceptions) {
113 kvm_vcpu_block(vcpu);
114 vcpu->stat.halt_wakeup++;
118 if (((vcpu->arch.msr & (MSR_IR|MSR_DR)) != (old_msr & (MSR_IR|MSR_DR))) ||
119 (vcpu->arch.msr & MSR_PR) != (old_msr & MSR_PR)) {
120 kvmppc_mmu_flush_segments(vcpu);
121 kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc);
125 void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
127 vcpu->arch.srr0 = vcpu->arch.pc;
128 vcpu->arch.srr1 = vcpu->arch.msr | flags;
129 vcpu->arch.pc = to_book3s(vcpu)->hior + vec;
130 vcpu->arch.mmu.reset_msr(vcpu);
133 static int kvmppc_book3s_vec2irqprio(unsigned int vec)
138 case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break;
139 case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break;
140 case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break;
141 case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break;
142 case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break;
143 case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break;
144 case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break;
145 case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break;
146 case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break;
147 case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break;
148 case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break;
149 case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break;
150 case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break;
151 case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break;
152 case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break;
153 default: prio = BOOK3S_IRQPRIO_MAX; break;
159 static void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
162 clear_bit(kvmppc_book3s_vec2irqprio(vec),
163 &vcpu->arch.pending_exceptions);
166 void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
168 vcpu->stat.queue_intr++;
170 set_bit(kvmppc_book3s_vec2irqprio(vec),
171 &vcpu->arch.pending_exceptions);
173 printk(KERN_INFO "Queueing interrupt %x\n", vec);
178 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
180 to_book3s(vcpu)->prog_flags = flags;
181 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_PROGRAM);
184 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
186 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
189 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
191 return test_bit(BOOK3S_INTERRUPT_DECREMENTER >> 7, &vcpu->arch.pending_exceptions);
194 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
196 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
199 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
200 struct kvm_interrupt *irq)
202 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
205 int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
212 case BOOK3S_IRQPRIO_DECREMENTER:
213 deliver = vcpu->arch.msr & MSR_EE;
214 vec = BOOK3S_INTERRUPT_DECREMENTER;
216 case BOOK3S_IRQPRIO_EXTERNAL:
217 deliver = vcpu->arch.msr & MSR_EE;
218 vec = BOOK3S_INTERRUPT_EXTERNAL;
220 case BOOK3S_IRQPRIO_SYSTEM_RESET:
221 vec = BOOK3S_INTERRUPT_SYSTEM_RESET;
223 case BOOK3S_IRQPRIO_MACHINE_CHECK:
224 vec = BOOK3S_INTERRUPT_MACHINE_CHECK;
226 case BOOK3S_IRQPRIO_DATA_STORAGE:
227 vec = BOOK3S_INTERRUPT_DATA_STORAGE;
229 case BOOK3S_IRQPRIO_INST_STORAGE:
230 vec = BOOK3S_INTERRUPT_INST_STORAGE;
232 case BOOK3S_IRQPRIO_DATA_SEGMENT:
233 vec = BOOK3S_INTERRUPT_DATA_SEGMENT;
235 case BOOK3S_IRQPRIO_INST_SEGMENT:
236 vec = BOOK3S_INTERRUPT_INST_SEGMENT;
238 case BOOK3S_IRQPRIO_ALIGNMENT:
239 vec = BOOK3S_INTERRUPT_ALIGNMENT;
241 case BOOK3S_IRQPRIO_PROGRAM:
242 vec = BOOK3S_INTERRUPT_PROGRAM;
243 flags = to_book3s(vcpu)->prog_flags;
245 case BOOK3S_IRQPRIO_VSX:
246 vec = BOOK3S_INTERRUPT_VSX;
248 case BOOK3S_IRQPRIO_ALTIVEC:
249 vec = BOOK3S_INTERRUPT_ALTIVEC;
251 case BOOK3S_IRQPRIO_FP_UNAVAIL:
252 vec = BOOK3S_INTERRUPT_FP_UNAVAIL;
254 case BOOK3S_IRQPRIO_SYSCALL:
255 vec = BOOK3S_INTERRUPT_SYSCALL;
257 case BOOK3S_IRQPRIO_DEBUG:
258 vec = BOOK3S_INTERRUPT_TRACE;
260 case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:
261 vec = BOOK3S_INTERRUPT_PERFMON;
265 printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
270 printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver);
274 kvmppc_inject_interrupt(vcpu, vec, flags);
279 void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
281 unsigned long *pending = &vcpu->arch.pending_exceptions;
282 unsigned int priority;
285 if (vcpu->arch.pending_exceptions)
286 printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
288 priority = __ffs(*pending);
289 while (priority <= (sizeof(unsigned int) * 8)) {
290 if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
291 (priority != BOOK3S_IRQPRIO_DECREMENTER)) {
292 /* DEC interrupts get cleared by mtdec */
293 clear_bit(priority, &vcpu->arch.pending_exceptions);
297 priority = find_next_bit(pending,
298 BITS_PER_BYTE * sizeof(*pending),
303 void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
305 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
306 vcpu->arch.pvr = pvr;
307 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
308 kvmppc_mmu_book3s_64_init(vcpu);
309 to_book3s(vcpu)->hior = 0xfff00000;
310 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
312 kvmppc_mmu_book3s_32_init(vcpu);
313 to_book3s(vcpu)->hior = 0;
314 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
317 /* If we are in hypervisor level on 970, we can tell the CPU to
318 * treat DCBZ as 32 bytes store */
319 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
320 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
321 !strcmp(cur_cpu_spec->platform, "ppc970"))
322 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
326 /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
327 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
328 * emulate 32 bytes dcbz length.
330 * The Book3s_64 inventors also realized this case and implemented a special bit
331 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
333 * My approach here is to patch the dcbz instruction on executing pages.
335 static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
337 bool touched = false;
342 hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
343 if (kvm_is_error_hva(hpage))
346 hpage |= pte->raddr & ~PAGE_MASK;
349 page = vmalloc(HW_PAGE_SIZE);
351 if (copy_from_user(page, (void __user *)hpage, HW_PAGE_SIZE))
354 for (i=0; i < HW_PAGE_SIZE / 4; i++)
355 if ((page[i] & 0xff0007ff) == INS_DCBZ) {
356 page[i] &= 0xfffffff7; // reserved instruction, so we trap
361 copy_to_user((void __user *)hpage, page, HW_PAGE_SIZE);
367 static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
368 struct kvmppc_pte *pte)
370 int relocated = (vcpu->arch.msr & (data ? MSR_DR : MSR_IR));
374 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data);
377 pte->raddr = eaddr & 0xffffffff;
378 pte->vpage = eaddr >> 12;
379 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
381 pte->vpage |= VSID_REAL;
383 pte->vpage |= VSID_REAL_DR;
385 pte->vpage |= VSID_REAL_IR;
387 pte->may_read = true;
388 pte->may_write = true;
389 pte->may_execute = true;
396 static hva_t kvmppc_bad_hva(void)
401 static hva_t kvmppc_pte_to_hva(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
406 if (read && !pte->may_read)
409 if (!read && !pte->may_write)
412 hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
413 if (kvm_is_error_hva(hpage))
416 return hpage | (pte->raddr & ~PAGE_MASK);
418 return kvmppc_bad_hva();
421 int kvmppc_st(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr)
423 struct kvmppc_pte pte;
428 if (kvmppc_xlate(vcpu, eaddr, false, &pte))
431 hva = kvmppc_pte_to_hva(vcpu, &pte, false);
432 if (kvm_is_error_hva(hva))
435 if (copy_to_user((void __user *)hva, ptr, size)) {
436 printk(KERN_INFO "kvmppc_st at 0x%lx failed\n", hva);
446 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr,
449 struct kvmppc_pte pte;
454 if (kvmppc_xlate(vcpu, eaddr, data, &pte))
457 hva = kvmppc_pte_to_hva(vcpu, &pte, true);
458 if (kvm_is_error_hva(hva))
461 if (copy_from_user(ptr, (void __user *)hva, size)) {
462 printk(KERN_INFO "kvmppc_ld at 0x%lx failed\n", hva);
472 static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
474 return kvm_is_visible_gfn(vcpu->kvm, gfn);
477 int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
478 ulong eaddr, int vec)
480 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
481 int r = RESUME_GUEST;
484 struct kvmppc_pte pte;
485 bool is_mmio = false;
487 if ( vec == BOOK3S_INTERRUPT_DATA_STORAGE ) {
488 relocated = (vcpu->arch.msr & MSR_DR);
490 relocated = (vcpu->arch.msr & MSR_IR);
493 /* Resolve real address if translation turned on */
495 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data);
497 pte.may_execute = true;
499 pte.may_write = true;
500 pte.raddr = eaddr & 0xffffffff;
502 pte.vpage = eaddr >> 12;
503 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
505 pte.vpage |= VSID_REAL;
507 pte.vpage |= VSID_REAL_DR;
509 pte.vpage |= VSID_REAL_IR;
513 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
514 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
516 * If we do the dcbz hack, we have to NX on every execution,
517 * so we can patch the executing code. This renders our guest
520 pte.may_execute = !data;
523 if (page_found == -ENOENT) {
524 /* Page not found in guest PTE entries */
525 vcpu->arch.dear = vcpu->arch.fault_dear;
526 to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr;
527 vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x00000000f8000000ULL);
528 kvmppc_book3s_queue_irqprio(vcpu, vec);
529 } else if (page_found == -EPERM) {
530 /* Storage protection */
531 vcpu->arch.dear = vcpu->arch.fault_dear;
532 to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE;
533 to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT;
534 vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x00000000f8000000ULL);
535 kvmppc_book3s_queue_irqprio(vcpu, vec);
536 } else if (page_found == -EINVAL) {
537 /* Page not found in guest SLB */
538 vcpu->arch.dear = vcpu->arch.fault_dear;
539 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
540 } else if (!is_mmio &&
541 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
542 /* The guest's PTE is not mapped yet. Map on the host */
543 kvmppc_mmu_map_page(vcpu, &pte);
545 vcpu->stat.sp_storage++;
546 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
547 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
548 kvmppc_patch_dcbz(vcpu, &pte);
551 vcpu->stat.mmio_exits++;
552 vcpu->arch.paddr_accessed = pte.raddr;
553 r = kvmppc_emulate_mmio(run, vcpu);
554 if ( r == RESUME_HOST_NV )
561 static inline int get_fpr_index(int i)
569 /* Give up external provider (FPU, Altivec, VSX) */
570 static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
572 struct thread_struct *t = ¤t->thread;
573 u64 *vcpu_fpr = vcpu->arch.fpr;
574 u64 *vcpu_vsx = vcpu->arch.vsr;
575 u64 *thread_fpr = (u64*)t->fpr;
578 if (!(vcpu->arch.guest_owned_ext & msr))
582 printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
588 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
589 vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
591 vcpu->arch.fpscr = t->fpscr.val;
594 #ifdef CONFIG_ALTIVEC
595 giveup_altivec(current);
596 memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
597 vcpu->arch.vscr = t->vscr;
602 __giveup_vsx(current);
603 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
604 vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
611 vcpu->arch.guest_owned_ext &= ~msr;
612 current->thread.regs->msr &= ~msr;
613 kvmppc_set_msr(vcpu, vcpu->arch.msr);
616 /* Handle external providers (FPU, Altivec, VSX) */
617 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
620 struct thread_struct *t = ¤t->thread;
621 u64 *vcpu_fpr = vcpu->arch.fpr;
622 u64 *vcpu_vsx = vcpu->arch.vsr;
623 u64 *thread_fpr = (u64*)t->fpr;
626 if (!(vcpu->arch.msr & msr)) {
627 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
632 printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
635 current->thread.regs->msr |= msr;
639 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
640 thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
642 t->fpscr.val = vcpu->arch.fpscr;
644 kvmppc_load_up_fpu();
647 #ifdef CONFIG_ALTIVEC
648 memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
649 t->vscr = vcpu->arch.vscr;
651 kvmppc_load_up_altivec();
656 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
657 thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
658 kvmppc_load_up_vsx();
665 vcpu->arch.guest_owned_ext |= msr;
667 kvmppc_set_msr(vcpu, vcpu->arch.msr);
672 int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
673 unsigned int exit_nr)
677 vcpu->stat.sum_exits++;
679 run->exit_reason = KVM_EXIT_UNKNOWN;
680 run->ready_for_interrupt_injection = 1;
682 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | dec=0x%x | msr=0x%lx\n",
683 exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear,
684 kvmppc_get_dec(vcpu), vcpu->arch.msr);
685 #elif defined (EXIT_DEBUG_SIMPLE)
686 if ((exit_nr != 0x900) && (exit_nr != 0x500))
687 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n",
688 exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear,
693 case BOOK3S_INTERRUPT_INST_STORAGE:
694 vcpu->stat.pf_instruc++;
695 /* only care about PTEG not found errors, but leave NX alone */
696 if (vcpu->arch.shadow_msr & 0x40000000) {
697 r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.pc, exit_nr);
698 vcpu->stat.sp_instruc++;
699 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
700 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
702 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
703 * so we can't use the NX bit inside the guest. Let's cross our fingers,
704 * that no guest that needs the dcbz hack does NX.
706 kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL);
708 vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x58000000);
709 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
710 kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL);
714 case BOOK3S_INTERRUPT_DATA_STORAGE:
715 vcpu->stat.pf_storage++;
716 /* The only case we need to handle is missing shadow PTEs */
717 if (vcpu->arch.fault_dsisr & DSISR_NOHPTE) {
718 r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.fault_dear, exit_nr);
720 vcpu->arch.dear = vcpu->arch.fault_dear;
721 to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr;
722 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
723 kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFULL);
727 case BOOK3S_INTERRUPT_DATA_SEGMENT:
728 if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.fault_dear) < 0) {
729 vcpu->arch.dear = vcpu->arch.fault_dear;
730 kvmppc_book3s_queue_irqprio(vcpu,
731 BOOK3S_INTERRUPT_DATA_SEGMENT);
735 case BOOK3S_INTERRUPT_INST_SEGMENT:
736 if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc) < 0) {
737 kvmppc_book3s_queue_irqprio(vcpu,
738 BOOK3S_INTERRUPT_INST_SEGMENT);
742 /* We're good on these - the host merely wanted to get our attention */
743 case BOOK3S_INTERRUPT_DECREMENTER:
744 vcpu->stat.dec_exits++;
747 case BOOK3S_INTERRUPT_EXTERNAL:
748 vcpu->stat.ext_intr_exits++;
751 case BOOK3S_INTERRUPT_PROGRAM:
753 enum emulation_result er;
756 flags = (vcpu->arch.shadow_msr & 0x1f0000ull);
758 if (vcpu->arch.msr & MSR_PR) {
760 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", vcpu->arch.pc, vcpu->arch.last_inst);
762 if ((vcpu->arch.last_inst & 0xff0007ff) !=
763 (INS_DCBZ & 0xfffffff7)) {
764 kvmppc_core_queue_program(vcpu, flags);
770 vcpu->stat.emulated_inst_exits++;
771 er = kvmppc_emulate_instruction(run, vcpu);
777 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
778 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
779 kvmppc_core_queue_program(vcpu, flags);
787 case BOOK3S_INTERRUPT_SYSCALL:
789 printk(KERN_INFO "Syscall Nr %d\n", (int)kvmppc_get_gpr(vcpu, 0));
791 vcpu->stat.syscall_exits++;
792 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
795 case BOOK3S_INTERRUPT_FP_UNAVAIL:
796 r = kvmppc_handle_ext(vcpu, exit_nr, MSR_FP);
798 case BOOK3S_INTERRUPT_ALTIVEC:
799 r = kvmppc_handle_ext(vcpu, exit_nr, MSR_VEC);
801 case BOOK3S_INTERRUPT_VSX:
802 r = kvmppc_handle_ext(vcpu, exit_nr, MSR_VSX);
804 case BOOK3S_INTERRUPT_MACHINE_CHECK:
805 case BOOK3S_INTERRUPT_TRACE:
806 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
810 /* Ugh - bork here! What did we get? */
811 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", exit_nr, vcpu->arch.pc, vcpu->arch.shadow_msr);
818 if (!(r & RESUME_HOST)) {
819 /* To avoid clobbering exit_reason, only check for signals if
820 * we aren't already exiting to userspace for some other
822 if (signal_pending(current)) {
824 printk(KERN_EMERG "KVM: Going back to host\n");
826 vcpu->stat.signal_exits++;
827 run->exit_reason = KVM_EXIT_INTR;
830 /* In case an interrupt came in that was triggered
831 * from userspace (like DEC), we need to check what
833 kvmppc_core_deliver_interrupts(vcpu);
838 printk(KERN_EMERG "KVM exit: vcpu=0x%p pc=0x%lx r=0x%x\n", vcpu, vcpu->arch.pc, r);
844 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
849 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
853 regs->pc = vcpu->arch.pc;
854 regs->cr = kvmppc_get_cr(vcpu);
855 regs->ctr = vcpu->arch.ctr;
856 regs->lr = vcpu->arch.lr;
857 regs->xer = kvmppc_get_xer(vcpu);
858 regs->msr = vcpu->arch.msr;
859 regs->srr0 = vcpu->arch.srr0;
860 regs->srr1 = vcpu->arch.srr1;
861 regs->pid = vcpu->arch.pid;
862 regs->sprg0 = vcpu->arch.sprg0;
863 regs->sprg1 = vcpu->arch.sprg1;
864 regs->sprg2 = vcpu->arch.sprg2;
865 regs->sprg3 = vcpu->arch.sprg3;
866 regs->sprg5 = vcpu->arch.sprg4;
867 regs->sprg6 = vcpu->arch.sprg5;
868 regs->sprg7 = vcpu->arch.sprg6;
870 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
871 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
876 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
880 vcpu->arch.pc = regs->pc;
881 kvmppc_set_cr(vcpu, regs->cr);
882 vcpu->arch.ctr = regs->ctr;
883 vcpu->arch.lr = regs->lr;
884 kvmppc_set_xer(vcpu, regs->xer);
885 kvmppc_set_msr(vcpu, regs->msr);
886 vcpu->arch.srr0 = regs->srr0;
887 vcpu->arch.srr1 = regs->srr1;
888 vcpu->arch.sprg0 = regs->sprg0;
889 vcpu->arch.sprg1 = regs->sprg1;
890 vcpu->arch.sprg2 = regs->sprg2;
891 vcpu->arch.sprg3 = regs->sprg3;
892 vcpu->arch.sprg5 = regs->sprg4;
893 vcpu->arch.sprg6 = regs->sprg5;
894 vcpu->arch.sprg7 = regs->sprg6;
896 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
897 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
902 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
903 struct kvm_sregs *sregs)
905 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
908 sregs->pvr = vcpu->arch.pvr;
910 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
911 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
912 for (i = 0; i < 64; i++) {
913 sregs->u.s.ppc64.slb[i].slbe = vcpu3s->slb[i].orige | i;
914 sregs->u.s.ppc64.slb[i].slbv = vcpu3s->slb[i].origv;
917 for (i = 0; i < 16; i++) {
918 sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw;
919 sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw;
921 for (i = 0; i < 8; i++) {
922 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
923 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
929 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
930 struct kvm_sregs *sregs)
932 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
935 kvmppc_set_pvr(vcpu, sregs->pvr);
937 vcpu3s->sdr1 = sregs->u.s.sdr1;
938 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
939 for (i = 0; i < 64; i++) {
940 vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
941 sregs->u.s.ppc64.slb[i].slbe);
944 for (i = 0; i < 16; i++) {
945 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
947 for (i = 0; i < 8; i++) {
948 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
949 (u32)sregs->u.s.ppc32.ibat[i]);
950 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
951 (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
952 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
953 (u32)sregs->u.s.ppc32.dbat[i]);
954 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
955 (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
959 /* Flush the MMU after messing with the segments */
960 kvmppc_mmu_pte_flush(vcpu, 0, 0);
964 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
969 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
974 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
975 struct kvm_translation *tr)
981 * Get (and clear) the dirty memory log for a memory slot.
983 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
984 struct kvm_dirty_log *log)
986 struct kvm_memory_slot *memslot;
987 struct kvm_vcpu *vcpu;
992 mutex_lock(&kvm->slots_lock);
994 r = kvm_get_dirty_log(kvm, log, &is_dirty);
998 /* If nothing is dirty, don't bother messing with page tables. */
1000 memslot = &kvm->memslots->memslots[log->slot];
1002 ga = memslot->base_gfn << PAGE_SHIFT;
1003 ga_end = ga + (memslot->npages << PAGE_SHIFT);
1005 kvm_for_each_vcpu(n, vcpu, kvm)
1006 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
1008 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1009 memset(memslot->dirty_bitmap, 0, n);
1014 mutex_unlock(&kvm->slots_lock);
1018 int kvmppc_core_check_processor_compat(void)
1023 struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1025 struct kvmppc_vcpu_book3s *vcpu_book3s;
1026 struct kvm_vcpu *vcpu;
1029 vcpu_book3s = (struct kvmppc_vcpu_book3s *)__get_free_pages( GFP_KERNEL | __GFP_ZERO,
1030 get_order(sizeof(struct kvmppc_vcpu_book3s)));
1036 vcpu = &vcpu_book3s->vcpu;
1037 err = kvm_vcpu_init(vcpu, kvm, id);
1041 vcpu->arch.host_retip = kvm_return_point;
1042 vcpu->arch.host_msr = mfmsr();
1043 /* default to book3s_64 (970fx) */
1044 vcpu->arch.pvr = 0x3C0301;
1045 kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
1046 vcpu_book3s->slb_nr = 64;
1048 /* remember where some real-mode handlers are */
1049 vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem;
1050 vcpu->arch.trampoline_enter = kvmppc_trampoline_enter;
1051 vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem;
1052 vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall;
1054 vcpu->arch.shadow_msr = MSR_USER64;
1056 err = __init_new_context();
1059 vcpu_book3s->context_id = err;
1061 vcpu_book3s->vsid_max = ((vcpu_book3s->context_id + 1) << USER_ESID_BITS) - 1;
1062 vcpu_book3s->vsid_first = vcpu_book3s->context_id << USER_ESID_BITS;
1063 vcpu_book3s->vsid_next = vcpu_book3s->vsid_first;
1068 free_pages((long)vcpu_book3s, get_order(sizeof(struct kvmppc_vcpu_book3s)));
1070 return ERR_PTR(err);
1073 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
1075 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1077 __destroy_context(vcpu_book3s->context_id);
1078 kvm_vcpu_uninit(vcpu);
1079 free_pages((long)vcpu_book3s, get_order(sizeof(struct kvmppc_vcpu_book3s)));
1082 extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
1083 int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1086 struct thread_struct ext_bkp;
1087 bool save_vec = current->thread.used_vr;
1088 bool save_vsx = current->thread.used_vsr;
1091 /* No need to go into the guest when all we do is going out */
1092 if (signal_pending(current)) {
1093 kvm_run->exit_reason = KVM_EXIT_INTR;
1097 /* Save FPU state in stack */
1098 if (current->thread.regs->msr & MSR_FP)
1099 giveup_fpu(current);
1100 memcpy(ext_bkp.fpr, current->thread.fpr, sizeof(current->thread.fpr));
1101 ext_bkp.fpscr = current->thread.fpscr;
1102 ext_bkp.fpexc_mode = current->thread.fpexc_mode;
1104 #ifdef CONFIG_ALTIVEC
1105 /* Save Altivec state in stack */
1107 if (current->thread.regs->msr & MSR_VEC)
1108 giveup_altivec(current);
1109 memcpy(ext_bkp.vr, current->thread.vr, sizeof(ext_bkp.vr));
1110 ext_bkp.vscr = current->thread.vscr;
1111 ext_bkp.vrsave = current->thread.vrsave;
1113 ext_bkp.used_vr = current->thread.used_vr;
1117 /* Save VSX state in stack */
1118 if (save_vsx && (current->thread.regs->msr & MSR_VSX))
1119 __giveup_vsx(current);
1120 ext_bkp.used_vsr = current->thread.used_vsr;
1123 /* Remember the MSR with disabled extensions */
1124 ext_msr = current->thread.regs->msr;
1126 /* XXX we get called with irq disabled - change that! */
1129 ret = __kvmppc_vcpu_entry(kvm_run, vcpu);
1131 local_irq_disable();
1133 current->thread.regs->msr = ext_msr;
1135 /* Make sure we save the guest FPU/Altivec/VSX state */
1136 kvmppc_giveup_ext(vcpu, MSR_FP);
1137 kvmppc_giveup_ext(vcpu, MSR_VEC);
1138 kvmppc_giveup_ext(vcpu, MSR_VSX);
1140 /* Restore FPU state from stack */
1141 memcpy(current->thread.fpr, ext_bkp.fpr, sizeof(ext_bkp.fpr));
1142 current->thread.fpscr = ext_bkp.fpscr;
1143 current->thread.fpexc_mode = ext_bkp.fpexc_mode;
1145 #ifdef CONFIG_ALTIVEC
1146 /* Restore Altivec state from stack */
1147 if (save_vec && current->thread.used_vr) {
1148 memcpy(current->thread.vr, ext_bkp.vr, sizeof(ext_bkp.vr));
1149 current->thread.vscr = ext_bkp.vscr;
1150 current->thread.vrsave= ext_bkp.vrsave;
1152 current->thread.used_vr = ext_bkp.used_vr;
1156 current->thread.used_vsr = ext_bkp.used_vsr;
1162 static int kvmppc_book3s_init(void)
1164 return kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), THIS_MODULE);
1167 static void kvmppc_book3s_exit(void)
1172 module_init(kvmppc_book3s_init);
1173 module_exit(kvmppc_book3s_exit);