2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
21 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
22 * so the code in this file is compiled twice, once per pte size.
26 #define pt_element_t u64
27 #define guest_walker guest_walker64
28 #define FNAME(name) paging##64_##name
29 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
30 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
31 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
32 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
34 #define PT_PTE_COPY_MASK PT64_PTE_COPY_MASK
36 #define PT_MAX_FULL_LEVELS 4
38 #define PT_MAX_FULL_LEVELS 2
41 #define pt_element_t u32
42 #define guest_walker guest_walker32
43 #define FNAME(name) paging##32_##name
44 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
45 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
46 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
47 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
48 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
49 #define PT_PTE_COPY_MASK PT32_PTE_COPY_MASK
50 #define PT_MAX_FULL_LEVELS 2
52 #error Invalid PTTYPE value
56 * The guest_walker structure emulates the behavior of the hardware page
61 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
64 pt_element_t inherited_ar;
70 * Fetch a guest pte for a guest virtual address
72 static int FNAME(walk_addr)(struct guest_walker *walker,
73 struct kvm_vcpu *vcpu, gva_t addr,
74 int write_fault, int user_fault, int fetch_fault)
77 struct kvm_memory_slot *slot;
82 pgprintk("%s: addr %lx\n", __FUNCTION__, addr);
83 walker->level = vcpu->mmu.root_level;
87 if (!is_long_mode(vcpu)) {
88 walker->ptep = &vcpu->pdptrs[(addr >> 30) & 3];
90 if (!(root & PT_PRESENT_MASK))
95 table_gfn = (root & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
96 walker->table_gfn[walker->level - 1] = table_gfn;
97 pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
98 walker->level - 1, table_gfn);
99 slot = gfn_to_memslot(vcpu->kvm, table_gfn);
100 hpa = safe_gpa_to_hpa(vcpu, root & PT64_BASE_ADDR_MASK);
101 walker->table = kmap_atomic(pfn_to_page(hpa >> PAGE_SHIFT), KM_USER0);
103 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
104 (vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) == 0);
106 walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK;
109 int index = PT_INDEX(addr, walker->level);
112 ptep = &walker->table[index];
113 ASSERT(((unsigned long)walker->table & PAGE_MASK) ==
114 ((unsigned long)ptep & PAGE_MASK));
116 if (!is_present_pte(*ptep))
119 if (write_fault && !is_writeble_pte(*ptep))
120 if (user_fault || is_write_protection(vcpu))
123 if (user_fault && !(*ptep & PT_USER_MASK))
127 if (fetch_fault && is_nx(vcpu) && (*ptep & PT64_NX_MASK))
131 if (!(*ptep & PT_ACCESSED_MASK)) {
132 mark_page_dirty(vcpu->kvm, table_gfn);
133 *ptep |= PT_ACCESSED_MASK;
136 if (walker->level == PT_PAGE_TABLE_LEVEL) {
137 walker->gfn = (*ptep & PT_BASE_ADDR_MASK)
142 if (walker->level == PT_DIRECTORY_LEVEL
143 && (*ptep & PT_PAGE_SIZE_MASK)
144 && (PTTYPE == 64 || is_pse(vcpu))) {
145 walker->gfn = (*ptep & PT_DIR_BASE_ADDR_MASK)
147 walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL);
151 walker->inherited_ar &= walker->table[index];
152 table_gfn = (*ptep & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
153 paddr = safe_gpa_to_hpa(vcpu, *ptep & PT_BASE_ADDR_MASK);
154 kunmap_atomic(walker->table, KM_USER0);
155 walker->table = kmap_atomic(pfn_to_page(paddr >> PAGE_SHIFT),
158 walker->table_gfn[walker->level - 1 ] = table_gfn;
159 pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
160 walker->level - 1, table_gfn);
163 pgprintk("%s: pte %llx\n", __FUNCTION__, (u64)*ptep);
167 walker->error_code = 0;
171 walker->error_code = PFERR_PRESENT_MASK;
175 walker->error_code |= PFERR_WRITE_MASK;
177 walker->error_code |= PFERR_USER_MASK;
179 walker->error_code |= PFERR_FETCH_MASK;
183 static void FNAME(release_walker)(struct guest_walker *walker)
186 kunmap_atomic(walker->table, KM_USER0);
189 static void FNAME(mark_pagetable_dirty)(struct kvm *kvm,
190 struct guest_walker *walker)
192 mark_page_dirty(kvm, walker->table_gfn[walker->level - 1]);
195 static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
204 *shadow_pte |= access_bits << PT_SHADOW_BITS_OFFSET;
206 access_bits &= ~PT_WRITABLE_MASK;
208 paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK);
210 *shadow_pte |= access_bits;
212 if (is_error_hpa(paddr)) {
213 *shadow_pte |= gaddr;
214 *shadow_pte |= PT_SHADOW_IO_MARK;
215 *shadow_pte &= ~PT_PRESENT_MASK;
219 *shadow_pte |= paddr;
221 if (access_bits & PT_WRITABLE_MASK) {
222 struct kvm_mmu_page *shadow;
224 shadow = kvm_mmu_lookup_page(vcpu, gfn);
226 pgprintk("%s: found shadow page for %lx, marking ro\n",
228 access_bits &= ~PT_WRITABLE_MASK;
229 if (is_writeble_pte(*shadow_pte)) {
230 *shadow_pte &= ~PT_WRITABLE_MASK;
231 kvm_arch_ops->tlb_flush(vcpu);
236 if (access_bits & PT_WRITABLE_MASK)
237 mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
239 page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
240 rmap_add(vcpu, shadow_pte);
243 static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte,
244 u64 *shadow_pte, u64 access_bits, gfn_t gfn)
246 ASSERT(*shadow_pte == 0);
247 access_bits &= guest_pte;
248 *shadow_pte = (guest_pte & PT_PTE_COPY_MASK);
249 FNAME(set_pte_common)(vcpu, shadow_pte, guest_pte & PT_BASE_ADDR_MASK,
250 guest_pte & PT_DIRTY_MASK, access_bits, gfn);
253 static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
254 u64 *spte, const void *pte, int bytes)
258 if (bytes < sizeof(pt_element_t))
260 gpte = *(const pt_element_t *)pte;
261 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK))
263 pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte);
264 FNAME(set_pte)(vcpu, gpte, spte, PT_USER_MASK | PT_WRITABLE_MASK,
265 (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT);
268 static void FNAME(set_pde)(struct kvm_vcpu *vcpu, u64 guest_pde,
269 u64 *shadow_pte, u64 access_bits, gfn_t gfn)
273 ASSERT(*shadow_pte == 0);
274 access_bits &= guest_pde;
275 gaddr = (gpa_t)gfn << PAGE_SHIFT;
276 if (PTTYPE == 32 && is_cpuid_PSE36())
277 gaddr |= (guest_pde & PT32_DIR_PSE36_MASK) <<
278 (32 - PT32_DIR_PSE36_SHIFT);
279 *shadow_pte = guest_pde & PT_PTE_COPY_MASK;
280 FNAME(set_pte_common)(vcpu, shadow_pte, gaddr,
281 guest_pde & PT_DIRTY_MASK, access_bits, gfn);
285 * Fetch a shadow pte for a specific level in the paging hierarchy.
287 static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
288 struct guest_walker *walker)
293 u64 *prev_shadow_ent = NULL;
294 pt_element_t *guest_ent = walker->ptep;
296 if (!is_present_pte(*guest_ent))
299 shadow_addr = vcpu->mmu.root_hpa;
300 level = vcpu->mmu.shadow_root_level;
301 if (level == PT32E_ROOT_LEVEL) {
302 shadow_addr = vcpu->mmu.pae_root[(addr >> 30) & 3];
303 shadow_addr &= PT64_BASE_ADDR_MASK;
308 u32 index = SHADOW_PT_INDEX(addr, level);
309 struct kvm_mmu_page *shadow_page;
313 unsigned hugepage_access = 0;
315 shadow_ent = ((u64 *)__va(shadow_addr)) + index;
316 if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) {
317 if (level == PT_PAGE_TABLE_LEVEL)
319 shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
320 prev_shadow_ent = shadow_ent;
324 if (level == PT_PAGE_TABLE_LEVEL)
327 if (level - 1 == PT_PAGE_TABLE_LEVEL
328 && walker->level == PT_DIRECTORY_LEVEL) {
330 hugepage_access = *guest_ent;
331 hugepage_access &= PT_USER_MASK | PT_WRITABLE_MASK;
332 hugepage_access >>= PT_WRITABLE_SHIFT;
333 table_gfn = (*guest_ent & PT_BASE_ADDR_MASK)
337 table_gfn = walker->table_gfn[level - 2];
339 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
340 metaphysical, hugepage_access,
342 shadow_addr = __pa(shadow_page->spt);
343 shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
344 | PT_WRITABLE_MASK | PT_USER_MASK;
345 *shadow_ent = shadow_pte;
346 prev_shadow_ent = shadow_ent;
349 if (walker->level == PT_DIRECTORY_LEVEL) {
351 *prev_shadow_ent |= PT_SHADOW_PS_MARK;
352 FNAME(set_pde)(vcpu, *guest_ent, shadow_ent,
353 walker->inherited_ar, walker->gfn);
355 ASSERT(walker->level == PT_PAGE_TABLE_LEVEL);
356 FNAME(set_pte)(vcpu, *guest_ent, shadow_ent,
357 walker->inherited_ar,
364 * The guest faulted for write. We need to
366 * - check write permissions
367 * - update the guest pte dirty bit
368 * - update our own dirty page tracking structures
370 static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu,
372 struct guest_walker *walker,
377 pt_element_t *guest_ent;
380 struct kvm_mmu_page *page;
382 if (is_writeble_pte(*shadow_ent))
383 return !user || (*shadow_ent & PT_USER_MASK);
385 writable_shadow = *shadow_ent & PT_SHADOW_WRITABLE_MASK;
388 * User mode access. Fail if it's a kernel page or a read-only
391 if (!(*shadow_ent & PT_SHADOW_USER_MASK) || !writable_shadow)
393 ASSERT(*shadow_ent & PT_USER_MASK);
396 * Kernel mode access. Fail if it's a read-only page and
397 * supervisor write protection is enabled.
399 if (!writable_shadow) {
400 if (is_write_protection(vcpu))
402 *shadow_ent &= ~PT_USER_MASK;
405 guest_ent = walker->ptep;
407 if (!is_present_pte(*guest_ent)) {
416 * Usermode page faults won't be for page table updates.
418 while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) {
419 pgprintk("%s: zap %lx %x\n",
420 __FUNCTION__, gfn, page->role.word);
421 kvm_mmu_zap_page(vcpu, page);
423 } else if (kvm_mmu_lookup_page(vcpu, gfn)) {
424 pgprintk("%s: found shadow page for %lx, marking ro\n",
426 mark_page_dirty(vcpu->kvm, gfn);
427 FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
428 *guest_ent |= PT_DIRTY_MASK;
432 mark_page_dirty(vcpu->kvm, gfn);
433 *shadow_ent |= PT_WRITABLE_MASK;
434 FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
435 *guest_ent |= PT_DIRTY_MASK;
436 rmap_add(vcpu, shadow_ent);
442 * Page fault handler. There are several causes for a page fault:
443 * - there is no shadow pte for the guest pte
444 * - write access through a shadow pte marked read only so that we can set
446 * - write access to a shadow pte marked read only so we can update the page
447 * dirty bitmap, when userspace requests it
448 * - mmio access; in this case we will never install a present shadow pte
449 * - normal guest page fault due to the guest pte marked not present, not
450 * writable, or not executable
452 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
453 * a negative value on error.
455 static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
458 int write_fault = error_code & PFERR_WRITE_MASK;
459 int user_fault = error_code & PFERR_USER_MASK;
460 int fetch_fault = error_code & PFERR_FETCH_MASK;
461 struct guest_walker walker;
467 pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code);
468 kvm_mmu_audit(vcpu, "pre page fault");
470 r = mmu_topup_memory_caches(vcpu);
475 * Look up the shadow pte for the faulting address.
477 r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
481 * The page is not mapped by the guest. Let the guest handle it.
484 pgprintk("%s: guest page fault\n", __FUNCTION__);
485 inject_page_fault(vcpu, addr, walker.error_code);
486 FNAME(release_walker)(&walker);
487 vcpu->last_pt_write_count = 0; /* reset fork detector */
491 shadow_pte = FNAME(fetch)(vcpu, addr, &walker);
492 pgprintk("%s: shadow pte %p %llx\n", __FUNCTION__,
493 shadow_pte, *shadow_pte);
496 * Update the shadow pte.
499 fixed = FNAME(fix_write_pf)(vcpu, shadow_pte, &walker, addr,
500 user_fault, &write_pt);
502 fixed = fix_read_pf(shadow_pte);
504 pgprintk("%s: updated shadow pte %p %llx\n", __FUNCTION__,
505 shadow_pte, *shadow_pte);
507 FNAME(release_walker)(&walker);
510 vcpu->last_pt_write_count = 0; /* reset fork detector */
513 * mmio: emulate if accessible, otherwise its a guest fault.
515 if (is_io_pte(*shadow_pte))
518 ++vcpu->stat.pf_fixed;
519 kvm_mmu_audit(vcpu, "post page fault (fixed)");
524 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
526 struct guest_walker walker;
527 gpa_t gpa = UNMAPPED_GVA;
530 r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
533 gpa = (gpa_t)walker.gfn << PAGE_SHIFT;
534 gpa |= vaddr & ~PAGE_MASK;
537 FNAME(release_walker)(&walker);
544 #undef PT_BASE_ADDR_MASK
546 #undef SHADOW_PT_INDEX
548 #undef PT_PTE_COPY_MASK
549 #undef PT_NON_PTE_COPY_MASK
550 #undef PT_DIR_BASE_ADDR_MASK
551 #undef PT_MAX_FULL_LEVELS