2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright SUSE Linux Products GmbH 2010
17 * Authors: Alexander Graf <agraf@suse.de>
20 #ifndef __ASM_KVM_BOOK3S_64_H__
21 #define __ASM_KVM_BOOK3S_64_H__
23 #ifdef CONFIG_KVM_BOOK3S_PR
24 static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
27 return &get_paca()->shadow_vcpu;
30 static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
36 #define SPAPR_TCE_SHIFT 12
38 #ifdef CONFIG_KVM_BOOK3S_64_HV
39 /* For now use fixed-size 16MB page table */
41 #define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */
42 #define HPT_NPTE (HPT_NPTEG << 3) /* 8 PTEs per PTEG */
43 #define HPT_HASH_MASK (HPT_NPTEG - 1)
46 #define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
49 * We use a lock bit in HPTE dword 0 to synchronize updates and
50 * accesses to each HPTE, and another bit to indicate non-present
53 #define HPTE_V_HVLOCK 0x40UL
54 #define HPTE_V_ABSENT 0x20UL
56 static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits)
58 unsigned long tmp, old;
60 asm volatile(" ldarx %0,0,%2\n"
68 : "=&r" (tmp), "=&r" (old)
69 : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK)
74 static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
75 unsigned long pte_index)
77 unsigned long rb, va_low;
79 rb = (v & ~0x7fUL) << 16; /* AVA field */
80 va_low = pte_index >> 3;
81 if (v & HPTE_V_SECONDARY)
83 /* xor vsid from AVA */
84 if (!(v & HPTE_V_1TB_SEG))
89 if (v & HPTE_V_LARGE) {
90 rb |= 1; /* L field */
91 if (cpu_has_feature(CPU_FTR_ARCH_206) &&
93 /* non-16MB large page, must be 64k */
94 /* (masks depend on page size) */
95 rb |= 0x1000; /* page encoding in LP field */
96 rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
97 rb |= (va_low & 0xfe); /* AVAL field (P7 doesn't seem to care) */
101 rb |= (va_low & 0x7ff) << 12; /* remaining 11b of VA */
103 rb |= (v >> 54) & 0x300; /* B field */
107 static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
109 /* only handle 4k, 64k and 16M pages for now */
110 if (!(h & HPTE_V_LARGE))
111 return 1ul << 12; /* 4k page */
112 if ((l & 0xf000) == 0x1000 && cpu_has_feature(CPU_FTR_ARCH_206))
113 return 1ul << 16; /* 64k page */
114 if ((l & 0xff000) == 0)
115 return 1ul << 24; /* 16M page */
116 return 0; /* error */
119 static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
121 return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
124 static inline int hpte_cache_flags_ok(unsigned long ptel, unsigned long io_type)
126 unsigned int wimg = ptel & HPTE_R_WIMG;
129 if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
130 cpu_has_feature(CPU_FTR_ARCH_206))
134 return wimg == HPTE_R_M;
136 return (wimg & (HPTE_R_W | HPTE_R_I)) == io_type;
140 * Lock and read a linux PTE. If it's present and writable, atomically
141 * set dirty and referenced bits and return the PTE, otherwise return 0.
143 static inline pte_t kvmppc_read_update_linux_pte(pte_t *p)
147 /* wait until _PAGE_BUSY is clear then set it atomically */
148 __asm__ __volatile__ (
155 : "=&r" (pte), "=&r" (tmp), "=m" (*p)
156 : "r" (p), "i" (_PAGE_BUSY)
159 if (pte_present(pte)) {
160 pte = pte_mkyoung(pte);
162 pte = pte_mkdirty(pte);
165 *p = pte; /* clears _PAGE_BUSY */
170 /* Return HPTE cache control bits corresponding to Linux pte bits */
171 static inline unsigned long hpte_cache_bits(unsigned long pte_val)
173 #if _PAGE_NO_CACHE == HPTE_R_I && _PAGE_WRITETHRU == HPTE_R_W
174 return pte_val & (HPTE_R_W | HPTE_R_I);
176 return ((pte_val & _PAGE_NO_CACHE) ? HPTE_R_I : 0) +
177 ((pte_val & _PAGE_WRITETHRU) ? HPTE_R_W : 0);
181 static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
184 return PP_RWRX <= pp && pp <= PP_RXRX;
188 static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
191 return pp == PP_RWRW;
192 return pp <= PP_RWRW;
195 static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr)
199 skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) |
200 ((hpte_r & HPTE_R_KEY_LO) >> 9);
201 return (amr >> (62 - 2 * skey)) & 3;
204 static inline void lock_rmap(unsigned long *rmap)
207 while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap))
209 } while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap));
212 static inline void unlock_rmap(unsigned long *rmap)
214 __clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap);
217 static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
218 unsigned long pagesize)
220 unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
222 if (pagesize <= PAGE_SIZE)
224 return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
227 #endif /* __ASM_KVM_BOOK3S_64_H__ */