2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/hugetlb.h>
14 #include <linux/module.h>
16 #include <asm/tlbflush.h>
17 #include <asm/kvm_ppc.h>
18 #include <asm/kvm_book3s.h>
19 #include <asm/mmu-hash64.h>
20 #include <asm/hvcall.h>
21 #include <asm/synch.h>
22 #include <asm/ppc-opcode.h>
25 * Since this file is built in even if KVM is a module, we need
26 * a local copy of this function for the case where kvm_main.c is
29 static struct kvm_memory_slot *builtin_gfn_to_memslot(struct kvm *kvm,
32 struct kvm_memslots *slots;
33 struct kvm_memory_slot *memslot;
35 slots = kvm_memslots(kvm);
36 kvm_for_each_memslot(memslot, slots)
37 if (gfn >= memslot->base_gfn &&
38 gfn < memslot->base_gfn + memslot->npages)
43 /* Translate address of a vmalloc'd thing to a linear map address */
44 static void *real_vmalloc_addr(void *x)
46 unsigned long addr = (unsigned long) x;
49 p = find_linux_pte(swapper_pg_dir, addr);
50 if (!p || !pte_present(*p))
52 /* assume we don't have huge pages in vmalloc space... */
53 addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
58 * Add this HPTE into the chain for the real page.
59 * Must be called with the chain locked; it unlocks the chain.
61 static void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
62 unsigned long *rmap, long pte_index, int realmode)
64 struct revmap_entry *head, *tail;
67 if (*rmap & KVMPPC_RMAP_PRESENT) {
68 i = *rmap & KVMPPC_RMAP_INDEX;
69 head = &kvm->arch.revmap[i];
71 head = real_vmalloc_addr(head);
72 tail = &kvm->arch.revmap[head->back];
74 tail = real_vmalloc_addr(tail);
76 rev->back = head->back;
77 tail->forw = pte_index;
78 head->back = pte_index;
80 rev->forw = rev->back = pte_index;
84 *rmap = i | KVMPPC_RMAP_REFERENCED | KVMPPC_RMAP_PRESENT; /* unlock */
87 /* Remove this HPTE from the chain for a real page */
88 static void remove_revmap_chain(struct kvm *kvm, long pte_index,
91 struct revmap_entry *rev, *next, *prev;
92 unsigned long gfn, ptel, head;
93 struct kvm_memory_slot *memslot;
96 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
97 ptel = rev->guest_rpte;
98 gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel));
99 memslot = builtin_gfn_to_memslot(kvm, gfn);
100 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
103 rmap = real_vmalloc_addr(&memslot->rmap[gfn - memslot->base_gfn]);
106 head = *rmap & KVMPPC_RMAP_INDEX;
107 next = real_vmalloc_addr(&kvm->arch.revmap[rev->forw]);
108 prev = real_vmalloc_addr(&kvm->arch.revmap[rev->back]);
109 next->back = rev->back;
110 prev->forw = rev->forw;
111 if (head == pte_index) {
113 if (head == pte_index)
114 *rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
116 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head;
121 long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
122 long pte_index, unsigned long pteh, unsigned long ptel)
124 struct kvm *kvm = vcpu->kvm;
125 unsigned long i, pa, gpa, gfn, psize;
126 unsigned long slot_fn;
128 struct revmap_entry *rev;
129 unsigned long g_ptel = ptel;
130 struct kvm_memory_slot *memslot;
131 unsigned long *physp, pte_size;
134 bool realmode = vcpu->arch.vcore->vcore_state == VCORE_RUNNING;
136 psize = hpte_page_size(pteh, ptel);
140 /* Find the memslot (if any) for this address */
141 gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
142 gfn = gpa >> PAGE_SHIFT;
143 memslot = builtin_gfn_to_memslot(kvm, gfn);
144 if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID)))
147 /* Check if the requested page fits entirely in the memslot. */
148 if (!slot_is_aligned(memslot, psize))
150 slot_fn = gfn - memslot->base_gfn;
151 rmap = &memslot->rmap[slot_fn];
153 physp = kvm->arch.slot_phys[memslot->id];
158 physp = real_vmalloc_addr(physp);
162 is_io = pa & (HPTE_R_I | HPTE_R_W);
163 pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK);
166 if (pte_size < psize)
168 if (pa && pte_size > psize)
169 pa |= gpa & (pte_size - 1);
171 ptel &= ~(HPTE_R_PP0 - psize);
175 if (!hpte_cache_flags_ok(ptel, is_io)) {
179 * Allow guest to map emulated device memory as
180 * uncacheable, but actually make it cacheable.
182 ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G);
186 pteh |= HPTE_V_VALID;
188 if (pte_index >= HPT_NPTE)
190 if (likely((flags & H_EXACT) == 0)) {
192 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
193 for (i = 0; i < 8; ++i) {
194 if ((*hpte & HPTE_V_VALID) == 0 &&
195 try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID))
201 * Since try_lock_hpte doesn't retry (not even stdcx.
202 * failures), it could be that there is a free slot
203 * but we transiently failed to lock it. Try again,
204 * actually locking each slot and checking it.
207 for (i = 0; i < 8; ++i) {
208 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
210 if ((*hpte & HPTE_V_VALID) == 0)
212 *hpte &= ~HPTE_V_HVLOCK;
220 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
221 if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID)) {
222 /* Lock the slot and check again */
223 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
225 if (*hpte & HPTE_V_VALID) {
226 *hpte &= ~HPTE_V_HVLOCK;
232 /* Save away the guest's idea of the second HPTE dword */
233 rev = &kvm->arch.revmap[pte_index];
235 rev = real_vmalloc_addr(rev);
237 rev->guest_rpte = g_ptel;
239 /* Link HPTE into reverse-map chain */
241 rmap = real_vmalloc_addr(rmap);
243 kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index, realmode);
247 /* Write the first HPTE dword, unlocking the HPTE and making it valid */
250 asm volatile("ptesync" : : : "memory");
252 vcpu->arch.gpr[4] = pte_index;
255 EXPORT_SYMBOL_GPL(kvmppc_h_enter);
257 #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
259 static inline int try_lock_tlbie(unsigned int *lock)
261 unsigned int tmp, old;
262 unsigned int token = LOCK_TOKEN;
264 asm volatile("1:lwarx %1,0,%2\n"
271 : "=&r" (tmp), "=&r" (old)
272 : "r" (lock), "r" (token)
277 long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
278 unsigned long pte_index, unsigned long avpn,
281 struct kvm *kvm = vcpu->kvm;
283 unsigned long v, r, rb;
285 if (pte_index >= HPT_NPTE)
287 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
288 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
290 if ((hpte[0] & HPTE_V_VALID) == 0 ||
291 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) ||
292 ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) {
293 hpte[0] &= ~HPTE_V_HVLOCK;
296 if (atomic_read(&kvm->online_vcpus) == 1)
298 vcpu->arch.gpr[4] = v = hpte[0] & ~HPTE_V_HVLOCK;
299 vcpu->arch.gpr[5] = r = hpte[1];
300 rb = compute_tlbie_rb(v, r, pte_index);
301 remove_revmap_chain(kvm, pte_index, v);
304 if (!(flags & H_LOCAL)) {
305 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
307 asm volatile("ptesync" : : : "memory");
308 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
309 : : "r" (rb), "r" (kvm->arch.lpid));
310 asm volatile("ptesync" : : : "memory");
311 kvm->arch.tlbie_lock = 0;
313 asm volatile("ptesync" : : : "memory");
314 asm volatile("tlbiel %0" : : "r" (rb));
315 asm volatile("ptesync" : : : "memory");
320 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
322 struct kvm *kvm = vcpu->kvm;
323 unsigned long *args = &vcpu->arch.gpr[4];
324 unsigned long *hp, tlbrb[4];
326 long int n_inval = 0;
327 unsigned long flags, req, pte_index;
329 long int ret = H_SUCCESS;
331 if (atomic_read(&kvm->online_vcpus) == 1)
333 for (i = 0; i < 4; ++i) {
334 pte_index = args[i * 2];
335 flags = pte_index >> 56;
336 pte_index &= ((1ul << 56) - 1);
341 if (req != 1 || flags == 3 ||
342 pte_index >= HPT_NPTE) {
343 /* parameter error */
344 args[i * 2] = ((0xa0 | flags) << 56) + pte_index;
348 hp = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
349 while (!try_lock_hpte(hp, HPTE_V_HVLOCK))
352 if (hp[0] & HPTE_V_VALID) {
354 case 0: /* absolute */
357 case 1: /* andcond */
358 if (!(hp[0] & args[i * 2 + 1]))
362 if ((hp[0] & ~0x7fUL) == args[i * 2 + 1])
368 hp[0] &= ~HPTE_V_HVLOCK;
369 args[i * 2] = ((0x90 | flags) << 56) + pte_index;
372 /* insert R and C bits from PTE */
373 flags |= (hp[1] >> 5) & 0x0c;
374 args[i * 2] = ((0x80 | flags) << 56) + pte_index;
375 tlbrb[n_inval++] = compute_tlbie_rb(hp[0], hp[1], pte_index);
376 remove_revmap_chain(kvm, pte_index, hp[0]);
384 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
386 asm volatile("ptesync" : : : "memory");
387 for (i = 0; i < n_inval; ++i)
388 asm volatile(PPC_TLBIE(%1,%0)
389 : : "r" (tlbrb[i]), "r" (kvm->arch.lpid));
390 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
391 kvm->arch.tlbie_lock = 0;
393 asm volatile("ptesync" : : : "memory");
394 for (i = 0; i < n_inval; ++i)
395 asm volatile("tlbiel %0" : : "r" (tlbrb[i]));
396 asm volatile("ptesync" : : : "memory");
401 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
402 unsigned long pte_index, unsigned long avpn,
405 struct kvm *kvm = vcpu->kvm;
407 struct revmap_entry *rev;
408 unsigned long v, r, rb, mask, bits;
410 if (pte_index >= HPT_NPTE)
412 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
413 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
415 if ((hpte[0] & HPTE_V_VALID) == 0 ||
416 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) {
417 hpte[0] &= ~HPTE_V_HVLOCK;
420 if (atomic_read(&kvm->online_vcpus) == 1)
423 bits = (flags << 55) & HPTE_R_PP0;
424 bits |= (flags << 48) & HPTE_R_KEY_HI;
425 bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
427 /* Update guest view of 2nd HPTE dword */
428 mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
429 HPTE_R_KEY_HI | HPTE_R_KEY_LO;
430 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
432 r = (rev->guest_rpte & ~mask) | bits;
435 r = (hpte[1] & ~mask) | bits;
438 rb = compute_tlbie_rb(v, r, pte_index);
439 hpte[0] = v & ~HPTE_V_VALID;
440 if (!(flags & H_LOCAL)) {
441 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
443 asm volatile("ptesync" : : : "memory");
444 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
445 : : "r" (rb), "r" (kvm->arch.lpid));
446 asm volatile("ptesync" : : : "memory");
447 kvm->arch.tlbie_lock = 0;
449 asm volatile("ptesync" : : : "memory");
450 asm volatile("tlbiel %0" : : "r" (rb));
451 asm volatile("ptesync" : : : "memory");
455 hpte[0] = v & ~HPTE_V_HVLOCK;
456 asm volatile("ptesync" : : : "memory");
460 long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
461 unsigned long pte_index)
463 struct kvm *kvm = vcpu->kvm;
464 unsigned long *hpte, r;
466 struct revmap_entry *rev = NULL;
468 if (pte_index >= HPT_NPTE)
470 if (flags & H_READ_4) {
474 if (flags & H_R_XLATE)
475 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
476 for (i = 0; i < n; ++i, ++pte_index) {
477 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
479 if (hpte[0] & HPTE_V_VALID) {
481 r = rev[i].guest_rpte;
483 r = hpte[1] | HPTE_R_RPN;
485 vcpu->arch.gpr[4 + i * 2] = hpte[0];
486 vcpu->arch.gpr[5 + i * 2] = r;