2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/hugetlb.h>
15 #include <asm/tlbflush.h>
16 #include <asm/kvm_ppc.h>
17 #include <asm/kvm_book3s.h>
18 #include <asm/mmu-hash64.h>
19 #include <asm/hvcall.h>
20 #include <asm/synch.h>
21 #include <asm/ppc-opcode.h>
23 /* For now use fixed-size 16MB page table */
25 #define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */
26 #define HPT_HASH_MASK (HPT_NPTEG - 1)
28 #define HPTE_V_HVLOCK 0x40UL
30 static inline long lock_hpte(unsigned long *hpte, unsigned long bits)
32 unsigned long tmp, old;
34 asm volatile(" ldarx %0,0,%2\n"
42 : "=&r" (tmp), "=&r" (old)
43 : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK)
48 long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
49 long pte_index, unsigned long pteh, unsigned long ptel)
52 struct kvm *kvm = vcpu->kvm;
53 unsigned long i, lpn, pa;
56 /* only handle 4k, 64k and 16M pages for now */
58 if (pteh & HPTE_V_LARGE) {
59 if ((ptel & 0xf000) == 0x1000) {
62 } else if ((ptel & 0xff000) == 0) {
65 /* lowest AVA bit must be 0 for 16M pages */
71 lpn = (ptel & HPTE_R_RPN) >> kvm->arch.ram_porder;
72 if (lpn >= kvm->arch.ram_npages || porder > kvm->arch.ram_porder)
74 pa = kvm->arch.ram_pginfo[lpn].pfn << PAGE_SHIFT;
78 if ((ptel & HPTE_R_WIMG) != HPTE_R_M &&
79 (ptel & HPTE_R_WIMG) != (HPTE_R_W | HPTE_R_I | HPTE_R_M))
82 ptel &= ~(HPTE_R_PP0 - kvm->arch.ram_psize);
84 if (pte_index >= (HPT_NPTEG << 3))
86 if (likely((flags & H_EXACT) == 0)) {
88 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
92 if ((*hpte & HPTE_V_VALID) == 0 &&
93 lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID))
99 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
100 if (!lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID))
106 asm volatile("ptesync" : : : "memory");
107 atomic_inc(&kvm->arch.ram_pginfo[lpn].refcnt);
108 vcpu->arch.gpr[4] = pte_index + i;
112 static unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
113 unsigned long pte_index)
115 unsigned long rb, va_low;
117 rb = (v & ~0x7fUL) << 16; /* AVA field */
118 va_low = pte_index >> 3;
119 if (v & HPTE_V_SECONDARY)
121 /* xor vsid from AVA */
122 if (!(v & HPTE_V_1TB_SEG))
127 if (v & HPTE_V_LARGE) {
128 rb |= 1; /* L field */
130 /* non-16MB large page, must be 64k */
131 /* (masks depend on page size) */
132 rb |= 0x1000; /* page encoding in LP field */
133 rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
134 rb |= (va_low & 0xfe); /* AVAL field (P7 doesn't seem to care) */
138 rb |= (va_low & 0x7ff) << 12; /* remaining 11b of VA */
140 rb |= (v >> 54) & 0x300; /* B field */
144 #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
146 static inline int try_lock_tlbie(unsigned int *lock)
148 unsigned int tmp, old;
149 unsigned int token = LOCK_TOKEN;
151 asm volatile("1:lwarx %1,0,%2\n"
158 : "=&r" (tmp), "=&r" (old)
159 : "r" (lock), "r" (token)
164 long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
165 unsigned long pte_index, unsigned long avpn,
168 struct kvm *kvm = vcpu->kvm;
170 unsigned long v, r, rb;
172 if (pte_index >= (HPT_NPTEG << 3))
174 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
175 while (!lock_hpte(hpte, HPTE_V_HVLOCK))
177 if ((hpte[0] & HPTE_V_VALID) == 0 ||
178 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) ||
179 ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) {
180 hpte[0] &= ~HPTE_V_HVLOCK;
183 if (atomic_read(&kvm->online_vcpus) == 1)
185 vcpu->arch.gpr[4] = v = hpte[0] & ~HPTE_V_HVLOCK;
186 vcpu->arch.gpr[5] = r = hpte[1];
187 rb = compute_tlbie_rb(v, r, pte_index);
189 if (!(flags & H_LOCAL)) {
190 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
192 asm volatile("ptesync" : : : "memory");
193 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
194 : : "r" (rb), "r" (kvm->arch.lpid));
195 asm volatile("ptesync" : : : "memory");
196 kvm->arch.tlbie_lock = 0;
198 asm volatile("ptesync" : : : "memory");
199 asm volatile("tlbiel %0" : : "r" (rb));
200 asm volatile("ptesync" : : : "memory");
205 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
207 struct kvm *kvm = vcpu->kvm;
208 unsigned long *args = &vcpu->arch.gpr[4];
209 unsigned long *hp, tlbrb[4];
211 long int n_inval = 0;
212 unsigned long flags, req, pte_index;
214 long int ret = H_SUCCESS;
216 if (atomic_read(&kvm->online_vcpus) == 1)
218 for (i = 0; i < 4; ++i) {
219 pte_index = args[i * 2];
220 flags = pte_index >> 56;
221 pte_index &= ((1ul << 56) - 1);
226 if (req != 1 || flags == 3 ||
227 pte_index >= (HPT_NPTEG << 3)) {
228 /* parameter error */
229 args[i * 2] = ((0xa0 | flags) << 56) + pte_index;
233 hp = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
234 while (!lock_hpte(hp, HPTE_V_HVLOCK))
237 if (hp[0] & HPTE_V_VALID) {
239 case 0: /* absolute */
242 case 1: /* andcond */
243 if (!(hp[0] & args[i * 2 + 1]))
247 if ((hp[0] & ~0x7fUL) == args[i * 2 + 1])
253 hp[0] &= ~HPTE_V_HVLOCK;
254 args[i * 2] = ((0x90 | flags) << 56) + pte_index;
257 /* insert R and C bits from PTE */
258 flags |= (hp[1] >> 5) & 0x0c;
259 args[i * 2] = ((0x80 | flags) << 56) + pte_index;
260 tlbrb[n_inval++] = compute_tlbie_rb(hp[0], hp[1], pte_index);
267 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
269 asm volatile("ptesync" : : : "memory");
270 for (i = 0; i < n_inval; ++i)
271 asm volatile(PPC_TLBIE(%1,%0)
272 : : "r" (tlbrb[i]), "r" (kvm->arch.lpid));
273 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
274 kvm->arch.tlbie_lock = 0;
276 asm volatile("ptesync" : : : "memory");
277 for (i = 0; i < n_inval; ++i)
278 asm volatile("tlbiel %0" : : "r" (tlbrb[i]));
279 asm volatile("ptesync" : : : "memory");
284 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
285 unsigned long pte_index, unsigned long avpn,
288 struct kvm *kvm = vcpu->kvm;
290 unsigned long v, r, rb;
292 if (pte_index >= (HPT_NPTEG << 3))
294 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
295 while (!lock_hpte(hpte, HPTE_V_HVLOCK))
297 if ((hpte[0] & HPTE_V_VALID) == 0 ||
298 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) {
299 hpte[0] &= ~HPTE_V_HVLOCK;
302 if (atomic_read(&kvm->online_vcpus) == 1)
305 r = hpte[1] & ~(HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
306 HPTE_R_KEY_HI | HPTE_R_KEY_LO);
307 r |= (flags << 55) & HPTE_R_PP0;
308 r |= (flags << 48) & HPTE_R_KEY_HI;
309 r |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
310 rb = compute_tlbie_rb(v, r, pte_index);
311 hpte[0] = v & ~HPTE_V_VALID;
312 if (!(flags & H_LOCAL)) {
313 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
315 asm volatile("ptesync" : : : "memory");
316 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
317 : : "r" (rb), "r" (kvm->arch.lpid));
318 asm volatile("ptesync" : : : "memory");
319 kvm->arch.tlbie_lock = 0;
321 asm volatile("ptesync" : : : "memory");
322 asm volatile("tlbiel %0" : : "r" (rb));
323 asm volatile("ptesync" : : : "memory");
327 hpte[0] = v & ~HPTE_V_HVLOCK;
328 asm volatile("ptesync" : : : "memory");
332 static unsigned long reverse_xlate(struct kvm *kvm, unsigned long realaddr)
335 unsigned long offset, rpn;
337 offset = realaddr & (kvm->arch.ram_psize - 1);
338 rpn = (realaddr - offset) >> PAGE_SHIFT;
339 for (i = 0; i < kvm->arch.ram_npages; ++i)
340 if (rpn == kvm->arch.ram_pginfo[i].pfn)
341 return (i << PAGE_SHIFT) + offset;
342 return HPTE_R_RPN; /* all 1s in the RPN field */
345 long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
346 unsigned long pte_index)
348 struct kvm *kvm = vcpu->kvm;
349 unsigned long *hpte, r;
352 if (pte_index >= (HPT_NPTEG << 3))
354 if (flags & H_READ_4) {
358 for (i = 0; i < n; ++i, ++pte_index) {
359 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
361 if ((flags & H_R_XLATE) && (hpte[0] & HPTE_V_VALID))
362 r = reverse_xlate(kvm, r & HPTE_R_RPN) |
364 vcpu->arch.gpr[4 + i * 2] = hpte[0];
365 vcpu->arch.gpr[5 + i * 2] = r;