2 * Copyright (C) 2009 SUSE Linux Products GmbH. All rights reserved.
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License, version 2, as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
22 #include <linux/kvm_host.h>
24 #include <asm/kvm_ppc.h>
25 #include <asm/kvm_book3s.h>
26 #include <asm/mmu-hash64.h>
27 #include <asm/machdep.h>
28 #include <asm/mmu_context.h>
29 #include <asm/hw_irq.h>
34 /* #define DEBUG_MMU */
35 /* #define DEBUG_SLB */
38 #define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
40 #define dprintk_mmu(a, ...) do { } while(0)
44 #define dprintk_slb(a, ...) printk(KERN_INFO a, __VA_ARGS__)
46 #define dprintk_slb(a, ...) do { } while(0)
49 static void invalidate_pte(struct hpte_cache *pte)
51 dprintk_mmu("KVM: Flushing SPT: 0x%lx (0x%llx) -> 0x%llx\n",
52 pte->pte.eaddr, pte->pte.vpage, pte->host_va);
54 ppc_md.hpte_invalidate(pte->slot, pte->host_va,
55 MMU_PAGE_4K, MMU_SEGSIZE_256M,
59 if (pte->pte.may_write)
60 kvm_release_pfn_dirty(pte->pfn);
62 kvm_release_pfn_clean(pte->pfn);
65 void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
69 dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%lx & 0x%lx\n",
70 vcpu->arch.hpte_cache_offset, guest_ea, ea_mask);
71 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
74 for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
75 struct hpte_cache *pte;
77 pte = &vcpu->arch.hpte_cache[i];
81 if ((pte->pte.eaddr & ea_mask) == guest_ea) {
86 /* Doing a complete flush -> start from scratch */
88 vcpu->arch.hpte_cache_offset = 0;
91 void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
95 dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n",
96 vcpu->arch.hpte_cache_offset, guest_vp, vp_mask);
97 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
100 for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
101 struct hpte_cache *pte;
103 pte = &vcpu->arch.hpte_cache[i];
107 if ((pte->pte.vpage & vp_mask) == guest_vp) {
113 void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
117 dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%lx & 0x%lx\n",
118 vcpu->arch.hpte_cache_offset, pa_start, pa_end);
119 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
121 for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
122 struct hpte_cache *pte;
124 pte = &vcpu->arch.hpte_cache[i];
128 if ((pte->pte.raddr >= pa_start) &&
129 (pte->pte.raddr < pa_end)) {
135 struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, bool data)
140 guest_vp = vcpu->arch.mmu.ea_to_vp(vcpu, ea, false);
141 for (i=0; i<vcpu->arch.hpte_cache_offset; i++) {
142 struct hpte_cache *pte;
144 pte = &vcpu->arch.hpte_cache[i];
148 if (pte->pte.vpage == guest_vp)
155 static int kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
157 if (vcpu->arch.hpte_cache_offset == HPTEG_CACHE_NUM)
158 kvmppc_mmu_pte_flush(vcpu, 0, 0);
160 return vcpu->arch.hpte_cache_offset++;
163 /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
164 * a hash, so we don't waste cycles on looping */
165 static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
167 return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
168 ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
169 ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
170 ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
171 ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
172 ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
173 ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
174 ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
178 static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
180 struct kvmppc_sid_map *map;
183 if (vcpu->arch.msr & MSR_PR)
186 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
187 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
188 if (map->guest_vsid == gvsid) {
189 dprintk_slb("SLB: Searching: 0x%llx -> 0x%llx\n",
190 gvsid, map->host_vsid);
194 map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
195 if (map->guest_vsid == gvsid) {
196 dprintk_slb("SLB: Searching 0x%llx -> 0x%llx\n",
197 gvsid, map->host_vsid);
201 dprintk_slb("SLB: Searching %d/%d: 0x%llx -> not found\n",
202 sid_map_mask, SID_MAP_MASK - sid_map_mask, gvsid);
206 int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
209 ulong hash, hpteg, va;
215 struct kvmppc_sid_map *map;
217 /* Get host physical address for gpa */
218 hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
219 if (kvm_is_error_hva(hpaddr)) {
220 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
223 hpaddr <<= PAGE_SHIFT;
225 #elif PAGE_SHIFT == 16
226 hpaddr |= orig_pte->raddr & 0xf000;
228 #error Unknown page size
231 /* and write the mapping ea -> hpa into the pt */
232 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
233 map = find_sid_vsid(vcpu, vsid);
235 ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr);
237 map = find_sid_vsid(vcpu, vsid);
240 printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n",
241 vsid, orig_pte->eaddr);
246 vsid = map->host_vsid;
247 va = hpt_va(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M);
249 if (!orig_pte->may_write)
252 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
254 if (!orig_pte->may_execute)
257 hash = hpt_hash(va, PTE_SIZE, MMU_SEGSIZE_256M);
260 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
262 /* In case we tried normal mapping already, let's nuke old entries */
264 if (ppc_md.hpte_remove(hpteg) < 0)
267 ret = ppc_md.hpte_insert(hpteg, va, hpaddr, rflags, vflags, MMU_PAGE_4K, MMU_SEGSIZE_256M);
270 /* If we couldn't map a primary PTE, try a secondary */
272 vflags ^= HPTE_V_SECONDARY;
276 int hpte_id = kvmppc_mmu_hpte_cache_next(vcpu);
277 struct hpte_cache *pte = &vcpu->arch.hpte_cache[hpte_id];
279 dprintk_mmu("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx\n",
280 ((rflags & HPTE_R_PP) == 3) ? '-' : 'w',
281 (rflags & HPTE_R_N) ? '-' : 'x',
282 orig_pte->eaddr, hpteg, va, orig_pte->vpage, hpaddr);
284 /* The ppc_md code may give us a secondary entry even though we
285 asked for a primary. Fix up. */
286 if ((ret & _PTEIDX_SECONDARY) && !(vflags & HPTE_V_SECONDARY)) {
288 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
291 pte->slot = hpteg + (ret & 7);
293 pte->pte = *orig_pte;
294 pte->pfn = hpaddr >> PAGE_SHIFT;
300 static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
302 struct kvmppc_sid_map *map;
303 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
305 static int backwards_map = 0;
307 if (vcpu->arch.msr & MSR_PR)
310 /* We might get collisions that trap in preceding order, so let's
311 map them differently */
313 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
315 sid_map_mask = SID_MAP_MASK - sid_map_mask;
317 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
319 /* Make sure we're taking the other map next time */
320 backwards_map = !backwards_map;
322 /* Uh-oh ... out of mappings. Let's flush! */
323 if (vcpu_book3s->vsid_next == vcpu_book3s->vsid_max) {
324 vcpu_book3s->vsid_next = vcpu_book3s->vsid_first;
325 memset(vcpu_book3s->sid_map, 0,
326 sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
327 kvmppc_mmu_pte_flush(vcpu, 0, 0);
328 kvmppc_mmu_flush_segments(vcpu);
330 map->host_vsid = vcpu_book3s->vsid_next++;
332 map->guest_vsid = gvsid;
335 dprintk_slb("SLB: New mapping at %d: 0x%llx -> 0x%llx\n",
336 sid_map_mask, gvsid, map->host_vsid);
341 static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
344 int max_slb_size = 64;
345 int found_inval = -1;
348 if (!to_svcpu(vcpu)->slb_max)
349 to_svcpu(vcpu)->slb_max = 1;
351 /* Are we overwriting? */
352 for (i = 1; i < to_svcpu(vcpu)->slb_max; i++) {
353 if (!(to_svcpu(vcpu)->slb[i].esid & SLB_ESID_V))
355 else if ((to_svcpu(vcpu)->slb[i].esid & ESID_MASK) == esid)
359 /* Found a spare entry that was invalidated before */
363 /* No spare invalid entry, so create one */
365 if (mmu_slb_size < 64)
366 max_slb_size = mmu_slb_size;
368 /* Overflowing -> purge */
369 if ((to_svcpu(vcpu)->slb_max) == max_slb_size)
370 kvmppc_mmu_flush_segments(vcpu);
372 r = to_svcpu(vcpu)->slb_max;
373 to_svcpu(vcpu)->slb_max++;
378 int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
380 u64 esid = eaddr >> SID_SHIFT;
381 u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V;
382 u64 slb_vsid = SLB_VSID_USER;
385 struct kvmppc_sid_map *map;
387 slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK);
389 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
390 /* Invalidate an entry */
391 to_svcpu(vcpu)->slb[slb_index].esid = 0;
395 map = find_sid_vsid(vcpu, gvsid);
397 map = create_sid_map(vcpu, gvsid);
399 map->guest_esid = esid;
401 slb_vsid |= (map->host_vsid << 12);
402 slb_vsid &= ~SLB_VSID_KP;
403 slb_esid |= slb_index;
405 to_svcpu(vcpu)->slb[slb_index].esid = slb_esid;
406 to_svcpu(vcpu)->slb[slb_index].vsid = slb_vsid;
408 dprintk_slb("slbmte %#llx, %#llx\n", slb_vsid, slb_esid);
413 void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
415 to_svcpu(vcpu)->slb_max = 1;
416 to_svcpu(vcpu)->slb[0].esid = 0;
419 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
421 kvmppc_mmu_pte_flush(vcpu, 0, 0);
422 __destroy_context(to_book3s(vcpu)->context_id);
425 int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
427 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
430 err = __init_new_context();
433 vcpu3s->context_id = err;
435 vcpu3s->vsid_max = ((vcpu3s->context_id + 1) << USER_ESID_BITS) - 1;
436 vcpu3s->vsid_first = vcpu3s->context_id << USER_ESID_BITS;
437 vcpu3s->vsid_next = vcpu3s->vsid_first;