2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
10 * Copyright 2010 Red Hat, Inc. and/or its affilates.
13 * Yaniv Kamay <yaniv@qumranet.com>
14 * Avi Kivity <avi@qumranet.com>
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
22 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
23 * so the code in this file is compiled twice, once per pte size.
27 #define pt_element_t u64
28 #define guest_walker guest_walker64
29 #define FNAME(name) paging##64_##name
30 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
31 #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
32 #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
33 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
34 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
35 #define PT_LEVEL_BITS PT64_LEVEL_BITS
37 #define PT_MAX_FULL_LEVELS 4
38 #define CMPXCHG cmpxchg
40 #define CMPXCHG cmpxchg64
41 #define PT_MAX_FULL_LEVELS 2
44 #define pt_element_t u32
45 #define guest_walker guest_walker32
46 #define FNAME(name) paging##32_##name
47 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
48 #define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl)
49 #define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl)
50 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
51 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
52 #define PT_LEVEL_BITS PT32_LEVEL_BITS
53 #define PT_MAX_FULL_LEVELS 2
54 #define CMPXCHG cmpxchg
56 #error Invalid PTTYPE value
59 #define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
60 #define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL)
63 * The guest_walker structure emulates the behavior of the hardware page
68 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
69 pt_element_t ptes[PT_MAX_FULL_LEVELS];
70 gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
77 static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
79 return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
82 static bool FNAME(cmpxchg_gpte)(struct kvm *kvm,
83 gfn_t table_gfn, unsigned index,
84 pt_element_t orig_pte, pt_element_t new_pte)
90 page = gfn_to_page(kvm, table_gfn);
92 table = kmap_atomic(page, KM_USER0);
93 ret = CMPXCHG(&table[index], orig_pte, new_pte);
94 kunmap_atomic(table, KM_USER0);
96 kvm_release_page_dirty(page);
98 return (ret != orig_pte);
101 static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
105 access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
108 access &= ~(gpte >> PT64_NX_SHIFT);
114 * Fetch a guest pte for a guest virtual address
116 static int FNAME(walk_addr)(struct guest_walker *walker,
117 struct kvm_vcpu *vcpu, gva_t addr,
118 int write_fault, int user_fault, int fetch_fault)
122 unsigned index, pt_access, pte_access;
126 trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault,
129 walker->level = vcpu->arch.mmu.root_level;
130 pte = vcpu->arch.cr3;
132 if (!is_long_mode(vcpu)) {
133 pte = kvm_pdptr_read(vcpu, (addr >> 30) & 3);
134 trace_kvm_mmu_paging_element(pte, walker->level);
135 if (!is_present_gpte(pte))
140 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
141 (vcpu->arch.cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
146 index = PT_INDEX(addr, walker->level);
148 table_gfn = gpte_to_gfn(pte);
149 pte_gpa = gfn_to_gpa(table_gfn);
150 pte_gpa += index * sizeof(pt_element_t);
151 walker->table_gfn[walker->level - 1] = table_gfn;
152 walker->pte_gpa[walker->level - 1] = pte_gpa;
154 if (kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte)))
157 trace_kvm_mmu_paging_element(pte, walker->level);
159 if (!is_present_gpte(pte))
162 rsvd_fault = is_rsvd_bits_set(vcpu, pte, walker->level);
166 if (write_fault && !is_writable_pte(pte))
167 if (user_fault || is_write_protection(vcpu))
170 if (user_fault && !(pte & PT_USER_MASK))
174 if (fetch_fault && (pte & PT64_NX_MASK))
178 if (!(pte & PT_ACCESSED_MASK)) {
179 trace_kvm_mmu_set_accessed_bit(table_gfn, index,
181 if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn,
182 index, pte, pte|PT_ACCESSED_MASK))
184 mark_page_dirty(vcpu->kvm, table_gfn);
185 pte |= PT_ACCESSED_MASK;
188 pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
190 walker->ptes[walker->level - 1] = pte;
192 if ((walker->level == PT_PAGE_TABLE_LEVEL) ||
193 ((walker->level == PT_DIRECTORY_LEVEL) &&
195 (PTTYPE == 64 || is_pse(vcpu))) ||
196 ((walker->level == PT_PDPE_LEVEL) &&
198 is_long_mode(vcpu))) {
199 int lvl = walker->level;
201 walker->gfn = gpte_to_gfn_lvl(pte, lvl);
202 walker->gfn += (addr & PT_LVL_OFFSET_MASK(lvl))
206 walker->level == PT_DIRECTORY_LEVEL &&
208 walker->gfn += pse36_gfn_delta(pte);
213 pt_access = pte_access;
217 if (write_fault && !is_dirty_gpte(pte)) {
220 trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
221 ret = FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte,
225 mark_page_dirty(vcpu->kvm, table_gfn);
226 pte |= PT_DIRTY_MASK;
227 walker->ptes[walker->level - 1] = pte;
230 walker->pt_access = pt_access;
231 walker->pte_access = pte_access;
232 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
233 __func__, (u64)pte, pte_access, pt_access);
237 walker->error_code = 0;
241 walker->error_code = PFERR_PRESENT_MASK;
245 walker->error_code |= PFERR_WRITE_MASK;
247 walker->error_code |= PFERR_USER_MASK;
249 walker->error_code |= PFERR_FETCH_MASK;
251 walker->error_code |= PFERR_RSVD_MASK;
252 trace_kvm_mmu_walker_error(walker->error_code);
256 static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
257 u64 *spte, const void *pte)
264 gpte = *(const pt_element_t *)pte;
265 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
266 if (!is_present_gpte(gpte)) {
268 new_spte = shadow_trap_nonpresent_pte;
270 new_spte = shadow_notrap_nonpresent_pte;
271 __set_spte(spte, new_spte);
275 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
276 pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte);
277 if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn)
279 pfn = vcpu->arch.update_pte.pfn;
280 if (is_error_pfn(pfn))
282 if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq))
286 * we call mmu_set_spte() with reset_host_protection = true beacuse that
287 * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
289 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
290 gpte & PT_DIRTY_MASK, NULL, PT_PAGE_TABLE_LEVEL,
291 gpte_to_gfn(gpte), pfn, true, true);
295 * Fetch a shadow pte for a specific level in the paging hierarchy.
297 static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
298 struct guest_walker *gw,
299 int user_fault, int write_fault, int hlevel,
300 int *ptwrite, pfn_t pfn)
302 unsigned access = gw->pt_access;
303 struct kvm_mmu_page *shadow_page;
304 u64 spte, *sptep = NULL;
309 pt_element_t curr_pte;
310 struct kvm_shadow_walk_iterator iterator;
312 if (!is_present_gpte(gw->ptes[gw->level - 1]))
315 for_each_shadow_entry(vcpu, addr, iterator) {
316 level = iterator.level;
317 sptep = iterator.sptep;
318 if (iterator.level == hlevel) {
319 mmu_set_spte(vcpu, sptep, access,
320 gw->pte_access & access,
321 user_fault, write_fault,
322 gw->ptes[gw->level-1] & PT_DIRTY_MASK,
324 gw->gfn, pfn, false, true);
328 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep))
331 if (is_large_pte(*sptep)) {
332 rmap_remove(vcpu->kvm, sptep);
333 __set_spte(sptep, shadow_trap_nonpresent_pte);
334 kvm_flush_remote_tlbs(vcpu->kvm);
337 if (level <= gw->level) {
338 int delta = level - gw->level + 1;
340 if (!is_dirty_gpte(gw->ptes[level - delta]))
341 access &= ~ACC_WRITE_MASK;
343 * It is a large guest pages backed by small host pages,
344 * So we set @direct(@shadow_page->role.direct)=1, and
345 * set @table_gfn(@shadow_page->gfn)=the base page frame
346 * for linear translations.
348 table_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
349 access &= gw->pte_access;
352 table_gfn = gw->table_gfn[level - 2];
354 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
355 direct, access, sptep);
357 r = kvm_read_guest_atomic(vcpu->kvm,
358 gw->pte_gpa[level - 2],
359 &curr_pte, sizeof(curr_pte));
360 if (r || curr_pte != gw->ptes[level - 2]) {
361 kvm_mmu_put_page(shadow_page, sptep);
362 kvm_release_pfn_clean(pfn);
368 spte = __pa(shadow_page->spt)
369 | PT_PRESENT_MASK | PT_ACCESSED_MASK
370 | PT_WRITABLE_MASK | PT_USER_MASK;
378 * Page fault handler. There are several causes for a page fault:
379 * - there is no shadow pte for the guest pte
380 * - write access through a shadow pte marked read only so that we can set
382 * - write access to a shadow pte marked read only so we can update the page
383 * dirty bitmap, when userspace requests it
384 * - mmio access; in this case we will never install a present shadow pte
385 * - normal guest page fault due to the guest pte marked not present, not
386 * writable, or not executable
388 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
389 * a negative value on error.
391 static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
394 int write_fault = error_code & PFERR_WRITE_MASK;
395 int user_fault = error_code & PFERR_USER_MASK;
396 int fetch_fault = error_code & PFERR_FETCH_MASK;
397 struct guest_walker walker;
402 int level = PT_PAGE_TABLE_LEVEL;
403 unsigned long mmu_seq;
405 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
406 kvm_mmu_audit(vcpu, "pre page fault");
408 r = mmu_topup_memory_caches(vcpu);
413 * Look up the guest pte for the faulting address.
415 r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
419 * The page is not mapped by the guest. Let the guest handle it.
422 pgprintk("%s: guest page fault\n", __func__);
423 inject_page_fault(vcpu, addr, walker.error_code);
424 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
428 if (walker.level >= PT_DIRECTORY_LEVEL) {
429 level = min(walker.level, mapping_level(vcpu, walker.gfn));
430 walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
433 mmu_seq = vcpu->kvm->mmu_notifier_seq;
435 pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
438 if (is_error_pfn(pfn))
439 return kvm_handle_bad_page(vcpu->kvm, walker.gfn, pfn);
441 spin_lock(&vcpu->kvm->mmu_lock);
442 if (mmu_notifier_retry(vcpu, mmu_seq))
444 kvm_mmu_free_some_pages(vcpu);
445 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
446 level, &write_pt, pfn);
447 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
448 sptep, *sptep, write_pt);
451 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
453 ++vcpu->stat.pf_fixed;
454 kvm_mmu_audit(vcpu, "post page fault (fixed)");
455 spin_unlock(&vcpu->kvm->mmu_lock);
460 spin_unlock(&vcpu->kvm->mmu_lock);
461 kvm_release_pfn_clean(pfn);
465 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
467 struct kvm_shadow_walk_iterator iterator;
468 struct kvm_mmu_page *sp;
474 spin_lock(&vcpu->kvm->mmu_lock);
476 for_each_shadow_entry(vcpu, gva, iterator) {
477 level = iterator.level;
478 sptep = iterator.sptep;
480 sp = page_header(__pa(sptep));
481 if (is_last_spte(*sptep, level)) {
488 (PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
489 offset = sp->role.quadrant << shift;
491 pte_gpa = (sp->gfn << PAGE_SHIFT) + offset;
492 pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
494 if (is_shadow_present_pte(*sptep)) {
495 rmap_remove(vcpu->kvm, sptep);
496 if (is_large_pte(*sptep))
497 --vcpu->kvm->stat.lpages;
500 __set_spte(sptep, shadow_trap_nonpresent_pte);
504 if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
509 kvm_flush_remote_tlbs(vcpu->kvm);
511 atomic_inc(&vcpu->kvm->arch.invlpg_counter);
513 spin_unlock(&vcpu->kvm->mmu_lock);
518 if (mmu_topup_memory_caches(vcpu))
520 kvm_mmu_pte_write(vcpu, pte_gpa, NULL, sizeof(pt_element_t), 0);
523 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
526 struct guest_walker walker;
527 gpa_t gpa = UNMAPPED_GVA;
530 r = FNAME(walk_addr)(&walker, vcpu, vaddr,
531 !!(access & PFERR_WRITE_MASK),
532 !!(access & PFERR_USER_MASK),
533 !!(access & PFERR_FETCH_MASK));
536 gpa = gfn_to_gpa(walker.gfn);
537 gpa |= vaddr & ~PAGE_MASK;
539 *error = walker.error_code;
544 static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
545 struct kvm_mmu_page *sp)
548 pt_element_t pt[256 / sizeof(pt_element_t)];
552 || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
553 nonpaging_prefetch_page(vcpu, sp);
557 pte_gpa = gfn_to_gpa(sp->gfn);
559 offset = sp->role.quadrant << PT64_LEVEL_BITS;
560 pte_gpa += offset * sizeof(pt_element_t);
563 for (i = 0; i < PT64_ENT_PER_PAGE; i += ARRAY_SIZE(pt)) {
564 r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt);
565 pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t);
566 for (j = 0; j < ARRAY_SIZE(pt); ++j)
567 if (r || is_present_gpte(pt[j]))
568 sp->spt[i+j] = shadow_trap_nonpresent_pte;
570 sp->spt[i+j] = shadow_notrap_nonpresent_pte;
575 * Using the cached information from sp->gfns is safe because:
576 * - The spte has a reference to the struct page, so the pfn for a given gfn
577 * can't change unless all sptes pointing to it are nuked first.
578 * - Alias changes zap the entire shadow cache.
580 static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
582 int i, offset, nr_present;
583 bool reset_host_protection;
586 offset = nr_present = 0;
588 /* direct kvm_mmu_page can not be unsync. */
589 BUG_ON(sp->role.direct);
592 offset = sp->role.quadrant << PT64_LEVEL_BITS;
594 first_pte_gpa = gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
596 for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
602 if (!is_shadow_present_pte(sp->spt[i]))
605 pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
607 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
608 sizeof(pt_element_t)))
611 gfn = gpte_to_gfn(gpte);
612 if (unalias_gfn(vcpu->kvm, gfn) != sp->gfns[i] ||
613 !is_present_gpte(gpte) || !(gpte & PT_ACCESSED_MASK)) {
616 rmap_remove(vcpu->kvm, &sp->spt[i]);
617 if (is_present_gpte(gpte))
618 nonpresent = shadow_trap_nonpresent_pte;
620 nonpresent = shadow_notrap_nonpresent_pte;
621 __set_spte(&sp->spt[i], nonpresent);
626 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
627 if (!(sp->spt[i] & SPTE_HOST_WRITEABLE)) {
628 pte_access &= ~ACC_WRITE_MASK;
629 reset_host_protection = 0;
631 reset_host_protection = 1;
633 set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
634 is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn,
635 spte_to_pfn(sp->spt[i]), true, false,
636 reset_host_protection);
645 #undef PT_BASE_ADDR_MASK
648 #undef PT_LVL_ADDR_MASK
649 #undef PT_LVL_OFFSET_MASK
651 #undef PT_MAX_FULL_LEVELS
653 #undef gpte_to_gfn_lvl