KVM: PPC: Improve indirect svcpu accessors
[pandora-kernel.git] / arch / powerpc / kvm / book3s_64_mmu_host.c
index f2899b2..b0f5b4e 100644 (file)
@@ -257,16 +257,9 @@ map_again:
 
        if (ret < 0) {
                /* If we couldn't map a primary PTE, try a secondary */
-#ifdef USE_SECONDARY
                hash = ~hash;
+               vflags ^= HPTE_V_SECONDARY;
                attempt++;
-               if (attempt % 2)
-                       vflags = HPTE_V_SECONDARY;
-               else
-                       vflags = 0;
-#else
-               attempt = 2;
-#endif
                goto map_again;
        } else {
                int hpte_id = kvmppc_mmu_hpte_cache_next(vcpu);
@@ -277,6 +270,13 @@ map_again:
                            (rflags & HPTE_R_N) ? '-' : 'x',
                            orig_pte->eaddr, hpteg, va, orig_pte->vpage, hpaddr);
 
+               /* The ppc_md code may give us a secondary entry even though we
+                  asked for a primary. Fix up. */
+               if ((ret & _PTEIDX_SECONDARY) && !(vflags & HPTE_V_SECONDARY)) {
+                       hash = ~hash;
+                       hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
+               }
+
                pte->slot = hpteg + (ret & 7);
                pte->host_va = va;
                pte->pte = *orig_pte;
@@ -331,14 +331,14 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
        int found_inval = -1;
        int r;
 
-       if (!get_paca()->kvm_slb_max)
-               get_paca()->kvm_slb_max = 1;
+       if (!to_svcpu(vcpu)->slb_max)
+               to_svcpu(vcpu)->slb_max = 1;
 
        /* Are we overwriting? */
-       for (i = 1; i < get_paca()->kvm_slb_max; i++) {
-               if (!(get_paca()->kvm_slb[i].esid & SLB_ESID_V))
+       for (i = 1; i < to_svcpu(vcpu)->slb_max; i++) {
+               if (!(to_svcpu(vcpu)->slb[i].esid & SLB_ESID_V))
                        found_inval = i;
-               else if ((get_paca()->kvm_slb[i].esid & ESID_MASK) == esid)
+               else if ((to_svcpu(vcpu)->slb[i].esid & ESID_MASK) == esid)
                        return i;
        }
 
@@ -352,11 +352,11 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
                max_slb_size = mmu_slb_size;
 
        /* Overflowing -> purge */
-       if ((get_paca()->kvm_slb_max) == max_slb_size)
+       if ((to_svcpu(vcpu)->slb_max) == max_slb_size)
                kvmppc_mmu_flush_segments(vcpu);
 
-       r = get_paca()->kvm_slb_max;
-       get_paca()->kvm_slb_max++;
+       r = to_svcpu(vcpu)->slb_max;
+       to_svcpu(vcpu)->slb_max++;
 
        return r;
 }
@@ -374,7 +374,7 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
 
        if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
                /* Invalidate an entry */
-               get_paca()->kvm_slb[slb_index].esid = 0;
+               to_svcpu(vcpu)->slb[slb_index].esid = 0;
                return -ENOENT;
        }
 
@@ -388,8 +388,8 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
        slb_vsid &= ~SLB_VSID_KP;
        slb_esid |= slb_index;
 
-       get_paca()->kvm_slb[slb_index].esid = slb_esid;
-       get_paca()->kvm_slb[slb_index].vsid = slb_vsid;
+       to_svcpu(vcpu)->slb[slb_index].esid = slb_esid;
+       to_svcpu(vcpu)->slb[slb_index].vsid = slb_vsid;
 
        dprintk_slb("slbmte %#llx, %#llx\n", slb_vsid, slb_esid);
 
@@ -398,8 +398,8 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
 
 void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
 {
-       get_paca()->kvm_slb_max = 1;
-       get_paca()->kvm_slb[0].esid = 0;
+       to_svcpu(vcpu)->slb_max = 1;
+       to_svcpu(vcpu)->slb[0].esid = 0;
 }
 
 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)