2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/hugetlb.h>
14 #include <linux/module.h>
16 #include <asm/tlbflush.h>
17 #include <asm/kvm_ppc.h>
18 #include <asm/kvm_book3s.h>
19 #include <asm/mmu-hash64.h>
20 #include <asm/hvcall.h>
21 #include <asm/synch.h>
22 #include <asm/ppc-opcode.h>
25 * Since this file is built in even if KVM is a module, we need
26 * a local copy of this function for the case where kvm_main.c is
29 static struct kvm_memory_slot *builtin_gfn_to_memslot(struct kvm *kvm,
32 struct kvm_memslots *slots;
33 struct kvm_memory_slot *memslot;
35 slots = kvm_memslots(kvm);
36 kvm_for_each_memslot(memslot, slots)
37 if (gfn >= memslot->base_gfn &&
38 gfn < memslot->base_gfn + memslot->npages)
43 /* Translate address of a vmalloc'd thing to a linear map address */
44 static void *real_vmalloc_addr(void *x)
46 unsigned long addr = (unsigned long) x;
49 p = find_linux_pte(swapper_pg_dir, addr);
50 if (!p || !pte_present(*p))
52 /* assume we don't have huge pages in vmalloc space... */
53 addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
58 * Add this HPTE into the chain for the real page.
59 * Must be called with the chain locked; it unlocks the chain.
61 static void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
62 unsigned long *rmap, long pte_index, int realmode)
64 struct revmap_entry *head, *tail;
67 if (*rmap & KVMPPC_RMAP_PRESENT) {
68 i = *rmap & KVMPPC_RMAP_INDEX;
69 head = &kvm->arch.revmap[i];
71 head = real_vmalloc_addr(head);
72 tail = &kvm->arch.revmap[head->back];
74 tail = real_vmalloc_addr(tail);
76 rev->back = head->back;
77 tail->forw = pte_index;
78 head->back = pte_index;
80 rev->forw = rev->back = pte_index;
84 *rmap = i | KVMPPC_RMAP_REFERENCED | KVMPPC_RMAP_PRESENT; /* unlock */
87 /* Remove this HPTE from the chain for a real page */
88 static void remove_revmap_chain(struct kvm *kvm, long pte_index,
91 struct revmap_entry *rev, *next, *prev;
92 unsigned long gfn, ptel, head;
93 struct kvm_memory_slot *memslot;
96 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
97 ptel = rev->guest_rpte;
98 gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel));
99 memslot = builtin_gfn_to_memslot(kvm, gfn);
100 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
103 rmap = real_vmalloc_addr(&memslot->rmap[gfn - memslot->base_gfn]);
106 head = *rmap & KVMPPC_RMAP_INDEX;
107 next = real_vmalloc_addr(&kvm->arch.revmap[rev->forw]);
108 prev = real_vmalloc_addr(&kvm->arch.revmap[rev->back]);
109 next->back = rev->back;
110 prev->forw = rev->forw;
111 if (head == pte_index) {
113 if (head == pte_index)
114 *rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
116 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head;
121 long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
122 long pte_index, unsigned long pteh, unsigned long ptel)
124 struct kvm *kvm = vcpu->kvm;
125 unsigned long i, pa, gpa, gfn, psize;
126 unsigned long slot_fn;
128 struct revmap_entry *rev;
129 unsigned long g_ptel = ptel;
130 struct kvm_memory_slot *memslot;
131 unsigned long *physp, pte_size;
134 bool realmode = vcpu->arch.vcore->vcore_state == VCORE_RUNNING;
136 psize = hpte_page_size(pteh, ptel);
139 pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
141 /* Find the memslot (if any) for this address */
142 gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
143 gfn = gpa >> PAGE_SHIFT;
144 memslot = builtin_gfn_to_memslot(kvm, gfn);
147 if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) {
148 /* PPC970 can't do emulated MMIO */
149 if (!cpu_has_feature(CPU_FTR_ARCH_206))
151 /* Emulated MMIO - mark this with key=31 */
152 pteh |= HPTE_V_ABSENT;
153 ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO;
157 /* Check if the requested page fits entirely in the memslot. */
158 if (!slot_is_aligned(memslot, psize))
160 slot_fn = gfn - memslot->base_gfn;
161 rmap = &memslot->rmap[slot_fn];
163 physp = kvm->arch.slot_phys[memslot->id];
168 physp = real_vmalloc_addr(physp);
172 is_io = pa & (HPTE_R_I | HPTE_R_W);
173 pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK);
176 if (pte_size < psize)
178 if (pa && pte_size > psize)
179 pa |= gpa & (pte_size - 1);
181 ptel &= ~(HPTE_R_PP0 - psize);
183 pteh |= HPTE_V_VALID;
186 if (!hpte_cache_flags_ok(ptel, is_io)) {
190 * Allow guest to map emulated device memory as
191 * uncacheable, but actually make it cacheable.
193 ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G);
198 if (pte_index >= HPT_NPTE)
200 if (likely((flags & H_EXACT) == 0)) {
202 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
203 for (i = 0; i < 8; ++i) {
204 if ((*hpte & HPTE_V_VALID) == 0 &&
205 try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
212 * Since try_lock_hpte doesn't retry (not even stdcx.
213 * failures), it could be that there is a free slot
214 * but we transiently failed to lock it. Try again,
215 * actually locking each slot and checking it.
218 for (i = 0; i < 8; ++i) {
219 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
221 if (!(*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)))
223 *hpte &= ~HPTE_V_HVLOCK;
231 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
232 if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
234 /* Lock the slot and check again */
235 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
237 if (*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
238 *hpte &= ~HPTE_V_HVLOCK;
244 /* Save away the guest's idea of the second HPTE dword */
245 rev = &kvm->arch.revmap[pte_index];
247 rev = real_vmalloc_addr(rev);
249 rev->guest_rpte = g_ptel;
251 /* Link HPTE into reverse-map chain */
252 if (pteh & HPTE_V_VALID) {
254 rmap = real_vmalloc_addr(rmap);
256 kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index, realmode);
261 /* Write the first HPTE dword, unlocking the HPTE and making it valid */
264 asm volatile("ptesync" : : : "memory");
266 vcpu->arch.gpr[4] = pte_index;
269 EXPORT_SYMBOL_GPL(kvmppc_h_enter);
271 #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
273 static inline int try_lock_tlbie(unsigned int *lock)
275 unsigned int tmp, old;
276 unsigned int token = LOCK_TOKEN;
278 asm volatile("1:lwarx %1,0,%2\n"
285 : "=&r" (tmp), "=&r" (old)
286 : "r" (lock), "r" (token)
291 long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
292 unsigned long pte_index, unsigned long avpn,
295 struct kvm *kvm = vcpu->kvm;
297 unsigned long v, r, rb;
299 if (pte_index >= HPT_NPTE)
301 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
302 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
304 if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
305 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) ||
306 ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) {
307 hpte[0] &= ~HPTE_V_HVLOCK;
310 if (atomic_read(&kvm->online_vcpus) == 1)
312 vcpu->arch.gpr[4] = v = hpte[0] & ~HPTE_V_HVLOCK;
313 vcpu->arch.gpr[5] = r = hpte[1];
314 rb = compute_tlbie_rb(v, r, pte_index);
315 if (v & HPTE_V_VALID)
316 remove_revmap_chain(kvm, pte_index, v);
319 if (!(v & HPTE_V_VALID))
321 if (!(flags & H_LOCAL)) {
322 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
324 asm volatile("ptesync" : : : "memory");
325 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
326 : : "r" (rb), "r" (kvm->arch.lpid));
327 asm volatile("ptesync" : : : "memory");
328 kvm->arch.tlbie_lock = 0;
330 asm volatile("ptesync" : : : "memory");
331 asm volatile("tlbiel %0" : : "r" (rb));
332 asm volatile("ptesync" : : : "memory");
337 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
339 struct kvm *kvm = vcpu->kvm;
340 unsigned long *args = &vcpu->arch.gpr[4];
341 unsigned long *hp, tlbrb[4];
343 long int n_inval = 0;
344 unsigned long flags, req, pte_index;
346 long int ret = H_SUCCESS;
348 if (atomic_read(&kvm->online_vcpus) == 1)
350 for (i = 0; i < 4; ++i) {
351 pte_index = args[i * 2];
352 flags = pte_index >> 56;
353 pte_index &= ((1ul << 56) - 1);
358 if (req != 1 || flags == 3 ||
359 pte_index >= HPT_NPTE) {
360 /* parameter error */
361 args[i * 2] = ((0xa0 | flags) << 56) + pte_index;
365 hp = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
366 while (!try_lock_hpte(hp, HPTE_V_HVLOCK))
369 if (hp[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) {
371 case 0: /* absolute */
374 case 1: /* andcond */
375 if (!(hp[0] & args[i * 2 + 1]))
379 if ((hp[0] & ~0x7fUL) == args[i * 2 + 1])
385 hp[0] &= ~HPTE_V_HVLOCK;
386 args[i * 2] = ((0x90 | flags) << 56) + pte_index;
389 /* insert R and C bits from PTE */
390 flags |= (hp[1] >> 5) & 0x0c;
391 args[i * 2] = ((0x80 | flags) << 56) + pte_index;
392 if (hp[0] & HPTE_V_VALID) {
393 tlbrb[n_inval++] = compute_tlbie_rb(hp[0], hp[1], pte_index);
394 remove_revmap_chain(kvm, pte_index, hp[0]);
403 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
405 asm volatile("ptesync" : : : "memory");
406 for (i = 0; i < n_inval; ++i)
407 asm volatile(PPC_TLBIE(%1,%0)
408 : : "r" (tlbrb[i]), "r" (kvm->arch.lpid));
409 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
410 kvm->arch.tlbie_lock = 0;
412 asm volatile("ptesync" : : : "memory");
413 for (i = 0; i < n_inval; ++i)
414 asm volatile("tlbiel %0" : : "r" (tlbrb[i]));
415 asm volatile("ptesync" : : : "memory");
420 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
421 unsigned long pte_index, unsigned long avpn,
424 struct kvm *kvm = vcpu->kvm;
426 struct revmap_entry *rev;
427 unsigned long v, r, rb, mask, bits;
429 if (pte_index >= HPT_NPTE)
432 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
433 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
435 if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
436 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) {
437 hpte[0] &= ~HPTE_V_HVLOCK;
441 if (atomic_read(&kvm->online_vcpus) == 1)
444 bits = (flags << 55) & HPTE_R_PP0;
445 bits |= (flags << 48) & HPTE_R_KEY_HI;
446 bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
448 /* Update guest view of 2nd HPTE dword */
449 mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
450 HPTE_R_KEY_HI | HPTE_R_KEY_LO;
451 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
453 r = (rev->guest_rpte & ~mask) | bits;
456 r = (hpte[1] & ~mask) | bits;
459 if (v & HPTE_V_VALID) {
460 rb = compute_tlbie_rb(v, r, pte_index);
461 hpte[0] = v & ~HPTE_V_VALID;
462 if (!(flags & H_LOCAL)) {
463 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
465 asm volatile("ptesync" : : : "memory");
466 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
467 : : "r" (rb), "r" (kvm->arch.lpid));
468 asm volatile("ptesync" : : : "memory");
469 kvm->arch.tlbie_lock = 0;
471 asm volatile("ptesync" : : : "memory");
472 asm volatile("tlbiel %0" : : "r" (rb));
473 asm volatile("ptesync" : : : "memory");
478 hpte[0] = v & ~HPTE_V_HVLOCK;
479 asm volatile("ptesync" : : : "memory");
483 long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
484 unsigned long pte_index)
486 struct kvm *kvm = vcpu->kvm;
487 unsigned long *hpte, v, r;
489 struct revmap_entry *rev = NULL;
491 if (pte_index >= HPT_NPTE)
493 if (flags & H_READ_4) {
497 if (flags & H_R_XLATE)
498 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
499 for (i = 0; i < n; ++i, ++pte_index) {
500 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
501 v = hpte[0] & ~HPTE_V_HVLOCK;
503 if (v & HPTE_V_ABSENT) {
507 if (v & HPTE_V_VALID) {
509 r = rev[i].guest_rpte;
511 r = hpte[1] | HPTE_R_RPN;
513 vcpu->arch.gpr[4 + i * 2] = v;
514 vcpu->arch.gpr[5 + i * 2] = r;
519 static int slb_base_page_shift[4] = {
523 20, /* 1M, unsupported */
526 long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
531 unsigned long somask;
532 unsigned long vsid, hash;
535 unsigned long mask, val;
538 /* Get page shift, work out hash and AVPN etc. */
539 mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY;
542 if (slb_v & SLB_VSID_L) {
543 mask |= HPTE_V_LARGE;
545 pshift = slb_base_page_shift[(slb_v & SLB_VSID_LP) >> 4];
547 if (slb_v & SLB_VSID_B_1T) {
548 somask = (1UL << 40) - 1;
549 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T;
552 somask = (1UL << 28) - 1;
553 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT;
555 hash = (vsid ^ ((eaddr & somask) >> pshift)) & HPT_HASH_MASK;
556 avpn = slb_v & ~(somask >> 16); /* also includes B */
557 avpn |= (eaddr & somask) >> 16;
560 avpn &= ~((1UL << (pshift - 16)) - 1);
566 hpte = (unsigned long *)(kvm->arch.hpt_virt + (hash << 7));
568 for (i = 0; i < 16; i += 2) {
569 /* Read the PTE racily */
570 v = hpte[i] & ~HPTE_V_HVLOCK;
572 /* Check valid/absent, hash, segment size and AVPN */
573 if (!(v & valid) || (v & mask) != val)
576 /* Lock the PTE and read it under the lock */
577 while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
579 v = hpte[i] & ~HPTE_V_HVLOCK;
583 * Check the HPTE again, including large page size
584 * Since we don't currently allow any MPSS (mixed
585 * page-size segment) page sizes, it is sufficient
586 * to check against the actual page size.
588 if ((v & valid) && (v & mask) == val &&
589 hpte_page_size(v, r) == (1ul << pshift))
590 /* Return with the HPTE still locked */
591 return (hash << 3) + (i >> 1);
593 /* Unlock and move on */
597 if (val & HPTE_V_SECONDARY)
599 val |= HPTE_V_SECONDARY;
600 hash = hash ^ HPT_HASH_MASK;
604 EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte);
607 * Called in real mode to check whether an HPTE not found fault
608 * is due to accessing an emulated MMIO page.
609 * Returns a possibly modified status (DSISR) value if not
610 * (i.e. pass the interrupt to the guest),
611 * -1 to pass the fault up to host kernel mode code, -2 to do that
612 * and also load the instruction word,
613 * or 0 if we should make the guest retry the access.
615 long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
616 unsigned long slb_v, unsigned int status)
618 struct kvm *kvm = vcpu->kvm;
620 unsigned long v, r, gr;
623 struct revmap_entry *rev;
624 unsigned long pp, key;
626 valid = HPTE_V_VALID | HPTE_V_ABSENT;
627 index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid);
629 return status; /* there really was no HPTE */
631 hpte = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
632 v = hpte[0] & ~HPTE_V_HVLOCK;
634 rev = real_vmalloc_addr(&kvm->arch.revmap[index]);
635 gr = rev->guest_rpte;
637 /* Unlock the HPTE */
638 asm volatile("lwsync" : : : "memory");
641 /* If the HPTE is valid by now, retry the instruction */
642 if (v & HPTE_V_VALID)
645 /* Check access permissions to the page */
646 pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
647 key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
648 if (status & DSISR_ISSTORE) {
649 /* check write permission */
650 if (!hpte_write_permission(pp, slb_v & key))
653 if (!hpte_read_permission(pp, slb_v & key))
657 /* Check storage key, if applicable */
658 if (vcpu->arch.shregs.msr & MSR_DR) {
659 unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr);
660 if (status & DSISR_ISSTORE)
663 return (status & ~DSISR_NOHPTE) | DSISR_KEYFAULT;
666 /* Save HPTE info for virtual-mode handler */
667 vcpu->arch.pgfault_addr = addr;
668 vcpu->arch.pgfault_index = index;
669 vcpu->arch.pgfault_hpte[0] = v;
670 vcpu->arch.pgfault_hpte[1] = r;
672 if (vcpu->arch.shregs.msr & MSR_IR)
673 return -2; /* MMIO emulation - load instr word */
675 return -1; /* send fault up to host kernel mode */
678 return (status & ~DSISR_NOHPTE) | DSISR_PROTFAULT;