KVM: MMU: Rename variables of type 'struct kvm_mmu_page *'
[pandora-kernel.git] / drivers / kvm / mmu.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * MMU support
8  *
9  * Copyright (C) 2006 Qumranet, Inc.
10  *
11  * Authors:
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *   Avi Kivity   <avi@qumranet.com>
14  *
15  * This work is licensed under the terms of the GNU GPL, version 2.  See
16  * the COPYING file in the top-level directory.
17  *
18  */
19
20 #include "vmx.h"
21 #include "kvm.h"
22 #include "x86.h"
23
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/mm.h>
27 #include <linux/highmem.h>
28 #include <linux/module.h>
29
30 #include <asm/page.h>
31 #include <asm/cmpxchg.h>
32 #include <asm/io.h>
33
34 #undef MMU_DEBUG
35
36 #undef AUDIT
37
38 #ifdef AUDIT
39 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
40 #else
41 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
42 #endif
43
44 #ifdef MMU_DEBUG
45
46 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
47 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
48
49 #else
50
51 #define pgprintk(x...) do { } while (0)
52 #define rmap_printk(x...) do { } while (0)
53
54 #endif
55
56 #if defined(MMU_DEBUG) || defined(AUDIT)
57 static int dbg = 1;
58 #endif
59
60 #ifndef MMU_DEBUG
61 #define ASSERT(x) do { } while (0)
62 #else
63 #define ASSERT(x)                                                       \
64         if (!(x)) {                                                     \
65                 printk(KERN_WARNING "assertion failed %s:%d: %s\n",     \
66                        __FILE__, __LINE__, #x);                         \
67         }
68 #endif
69
70 #define PT64_PT_BITS 9
71 #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
72 #define PT32_PT_BITS 10
73 #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
74
75 #define PT_WRITABLE_SHIFT 1
76
77 #define PT_PRESENT_MASK (1ULL << 0)
78 #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
79 #define PT_USER_MASK (1ULL << 2)
80 #define PT_PWT_MASK (1ULL << 3)
81 #define PT_PCD_MASK (1ULL << 4)
82 #define PT_ACCESSED_MASK (1ULL << 5)
83 #define PT_DIRTY_MASK (1ULL << 6)
84 #define PT_PAGE_SIZE_MASK (1ULL << 7)
85 #define PT_PAT_MASK (1ULL << 7)
86 #define PT_GLOBAL_MASK (1ULL << 8)
87 #define PT64_NX_MASK (1ULL << 63)
88
89 #define PT_PAT_SHIFT 7
90 #define PT_DIR_PAT_SHIFT 12
91 #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
92
93 #define PT32_DIR_PSE36_SIZE 4
94 #define PT32_DIR_PSE36_SHIFT 13
95 #define PT32_DIR_PSE36_MASK \
96         (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
97
98
99 #define PT_FIRST_AVAIL_BITS_SHIFT 9
100 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
101
102 #define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
103
104 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
105
106 #define PT64_LEVEL_BITS 9
107
108 #define PT64_LEVEL_SHIFT(level) \
109                 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
110
111 #define PT64_LEVEL_MASK(level) \
112                 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
113
114 #define PT64_INDEX(address, level)\
115         (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
116
117
118 #define PT32_LEVEL_BITS 10
119
120 #define PT32_LEVEL_SHIFT(level) \
121                 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
122
123 #define PT32_LEVEL_MASK(level) \
124                 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
125
126 #define PT32_INDEX(address, level)\
127         (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
128
129
130 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
131 #define PT64_DIR_BASE_ADDR_MASK \
132         (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
133
134 #define PT32_BASE_ADDR_MASK PAGE_MASK
135 #define PT32_DIR_BASE_ADDR_MASK \
136         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
137
138 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
139                         | PT64_NX_MASK)
140
141 #define PFERR_PRESENT_MASK (1U << 0)
142 #define PFERR_WRITE_MASK (1U << 1)
143 #define PFERR_USER_MASK (1U << 2)
144 #define PFERR_FETCH_MASK (1U << 4)
145
146 #define PT64_ROOT_LEVEL 4
147 #define PT32_ROOT_LEVEL 2
148 #define PT32E_ROOT_LEVEL 3
149
150 #define PT_DIRECTORY_LEVEL 2
151 #define PT_PAGE_TABLE_LEVEL 1
152
153 #define RMAP_EXT 4
154
155 struct kvm_rmap_desc {
156         u64 *shadow_ptes[RMAP_EXT];
157         struct kvm_rmap_desc *more;
158 };
159
160 static struct kmem_cache *pte_chain_cache;
161 static struct kmem_cache *rmap_desc_cache;
162 static struct kmem_cache *mmu_page_header_cache;
163
164 static u64 __read_mostly shadow_trap_nonpresent_pte;
165 static u64 __read_mostly shadow_notrap_nonpresent_pte;
166
167 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
168 {
169         shadow_trap_nonpresent_pte = trap_pte;
170         shadow_notrap_nonpresent_pte = notrap_pte;
171 }
172 EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
173
174 static int is_write_protection(struct kvm_vcpu *vcpu)
175 {
176         return vcpu->cr0 & X86_CR0_WP;
177 }
178
179 static int is_cpuid_PSE36(void)
180 {
181         return 1;
182 }
183
184 static int is_nx(struct kvm_vcpu *vcpu)
185 {
186         return vcpu->shadow_efer & EFER_NX;
187 }
188
189 static int is_present_pte(unsigned long pte)
190 {
191         return pte & PT_PRESENT_MASK;
192 }
193
194 static int is_shadow_present_pte(u64 pte)
195 {
196         pte &= ~PT_SHADOW_IO_MARK;
197         return pte != shadow_trap_nonpresent_pte
198                 && pte != shadow_notrap_nonpresent_pte;
199 }
200
201 static int is_writeble_pte(unsigned long pte)
202 {
203         return pte & PT_WRITABLE_MASK;
204 }
205
206 static int is_dirty_pte(unsigned long pte)
207 {
208         return pte & PT_DIRTY_MASK;
209 }
210
211 static int is_io_pte(unsigned long pte)
212 {
213         return pte & PT_SHADOW_IO_MARK;
214 }
215
216 static int is_rmap_pte(u64 pte)
217 {
218         return pte != shadow_trap_nonpresent_pte
219                 && pte != shadow_notrap_nonpresent_pte;
220 }
221
222 static gfn_t pse36_gfn_delta(u32 gpte)
223 {
224         int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
225
226         return (gpte & PT32_DIR_PSE36_MASK) << shift;
227 }
228
229 static void set_shadow_pte(u64 *sptep, u64 spte)
230 {
231 #ifdef CONFIG_X86_64
232         set_64bit((unsigned long *)sptep, spte);
233 #else
234         set_64bit((unsigned long long *)sptep, spte);
235 #endif
236 }
237
238 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
239                                   struct kmem_cache *base_cache, int min)
240 {
241         void *obj;
242
243         if (cache->nobjs >= min)
244                 return 0;
245         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
246                 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
247                 if (!obj)
248                         return -ENOMEM;
249                 cache->objects[cache->nobjs++] = obj;
250         }
251         return 0;
252 }
253
254 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
255 {
256         while (mc->nobjs)
257                 kfree(mc->objects[--mc->nobjs]);
258 }
259
260 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
261                                        int min)
262 {
263         struct page *page;
264
265         if (cache->nobjs >= min)
266                 return 0;
267         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
268                 page = alloc_page(GFP_KERNEL);
269                 if (!page)
270                         return -ENOMEM;
271                 set_page_private(page, 0);
272                 cache->objects[cache->nobjs++] = page_address(page);
273         }
274         return 0;
275 }
276
277 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
278 {
279         while (mc->nobjs)
280                 free_page((unsigned long)mc->objects[--mc->nobjs]);
281 }
282
283 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
284 {
285         int r;
286
287         kvm_mmu_free_some_pages(vcpu);
288         r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
289                                    pte_chain_cache, 4);
290         if (r)
291                 goto out;
292         r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
293                                    rmap_desc_cache, 1);
294         if (r)
295                 goto out;
296         r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 8);
297         if (r)
298                 goto out;
299         r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache,
300                                    mmu_page_header_cache, 4);
301 out:
302         return r;
303 }
304
305 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
306 {
307         mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
308         mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
309         mmu_free_memory_cache_page(&vcpu->mmu_page_cache);
310         mmu_free_memory_cache(&vcpu->mmu_page_header_cache);
311 }
312
313 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
314                                     size_t size)
315 {
316         void *p;
317
318         BUG_ON(!mc->nobjs);
319         p = mc->objects[--mc->nobjs];
320         memset(p, 0, size);
321         return p;
322 }
323
324 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
325 {
326         return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache,
327                                       sizeof(struct kvm_pte_chain));
328 }
329
330 static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
331 {
332         kfree(pc);
333 }
334
335 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
336 {
337         return mmu_memory_cache_alloc(&vcpu->mmu_rmap_desc_cache,
338                                       sizeof(struct kvm_rmap_desc));
339 }
340
341 static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
342 {
343         kfree(rd);
344 }
345
346 /*
347  * Take gfn and return the reverse mapping to it.
348  * Note: gfn must be unaliased before this function get called
349  */
350
351 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn)
352 {
353         struct kvm_memory_slot *slot;
354
355         slot = gfn_to_memslot(kvm, gfn);
356         return &slot->rmap[gfn - slot->base_gfn];
357 }
358
359 /*
360  * Reverse mapping data structures:
361  *
362  * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
363  * that points to page_address(page).
364  *
365  * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
366  * containing more mappings.
367  */
368 static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
369 {
370         struct kvm_mmu_page *sp;
371         struct kvm_rmap_desc *desc;
372         unsigned long *rmapp;
373         int i;
374
375         if (!is_rmap_pte(*spte))
376                 return;
377         gfn = unalias_gfn(vcpu->kvm, gfn);
378         sp = page_header(__pa(spte));
379         sp->gfns[spte - sp->spt] = gfn;
380         rmapp = gfn_to_rmap(vcpu->kvm, gfn);
381         if (!*rmapp) {
382                 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
383                 *rmapp = (unsigned long)spte;
384         } else if (!(*rmapp & 1)) {
385                 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
386                 desc = mmu_alloc_rmap_desc(vcpu);
387                 desc->shadow_ptes[0] = (u64 *)*rmapp;
388                 desc->shadow_ptes[1] = spte;
389                 *rmapp = (unsigned long)desc | 1;
390         } else {
391                 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
392                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
393                 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
394                         desc = desc->more;
395                 if (desc->shadow_ptes[RMAP_EXT-1]) {
396                         desc->more = mmu_alloc_rmap_desc(vcpu);
397                         desc = desc->more;
398                 }
399                 for (i = 0; desc->shadow_ptes[i]; ++i)
400                         ;
401                 desc->shadow_ptes[i] = spte;
402         }
403 }
404
405 static void rmap_desc_remove_entry(unsigned long *rmapp,
406                                    struct kvm_rmap_desc *desc,
407                                    int i,
408                                    struct kvm_rmap_desc *prev_desc)
409 {
410         int j;
411
412         for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
413                 ;
414         desc->shadow_ptes[i] = desc->shadow_ptes[j];
415         desc->shadow_ptes[j] = NULL;
416         if (j != 0)
417                 return;
418         if (!prev_desc && !desc->more)
419                 *rmapp = (unsigned long)desc->shadow_ptes[0];
420         else
421                 if (prev_desc)
422                         prev_desc->more = desc->more;
423                 else
424                         *rmapp = (unsigned long)desc->more | 1;
425         mmu_free_rmap_desc(desc);
426 }
427
428 static void rmap_remove(struct kvm *kvm, u64 *spte)
429 {
430         struct kvm_rmap_desc *desc;
431         struct kvm_rmap_desc *prev_desc;
432         struct kvm_mmu_page *sp;
433         struct page *release_page;
434         unsigned long *rmapp;
435         int i;
436
437         if (!is_rmap_pte(*spte))
438                 return;
439         sp = page_header(__pa(spte));
440         release_page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
441         if (is_writeble_pte(*spte))
442                 kvm_release_page_dirty(release_page);
443         else
444                 kvm_release_page_clean(release_page);
445         rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt]);
446         if (!*rmapp) {
447                 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
448                 BUG();
449         } else if (!(*rmapp & 1)) {
450                 rmap_printk("rmap_remove:  %p %llx 1->0\n", spte, *spte);
451                 if ((u64 *)*rmapp != spte) {
452                         printk(KERN_ERR "rmap_remove:  %p %llx 1->BUG\n",
453                                spte, *spte);
454                         BUG();
455                 }
456                 *rmapp = 0;
457         } else {
458                 rmap_printk("rmap_remove:  %p %llx many->many\n", spte, *spte);
459                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
460                 prev_desc = NULL;
461                 while (desc) {
462                         for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
463                                 if (desc->shadow_ptes[i] == spte) {
464                                         rmap_desc_remove_entry(rmapp,
465                                                                desc, i,
466                                                                prev_desc);
467                                         return;
468                                 }
469                         prev_desc = desc;
470                         desc = desc->more;
471                 }
472                 BUG();
473         }
474 }
475
476 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
477 {
478         struct kvm_rmap_desc *desc;
479         struct kvm_rmap_desc *prev_desc;
480         u64 *prev_spte;
481         int i;
482
483         if (!*rmapp)
484                 return NULL;
485         else if (!(*rmapp & 1)) {
486                 if (!spte)
487                         return (u64 *)*rmapp;
488                 return NULL;
489         }
490         desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
491         prev_desc = NULL;
492         prev_spte = NULL;
493         while (desc) {
494                 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
495                         if (prev_spte == spte)
496                                 return desc->shadow_ptes[i];
497                         prev_spte = desc->shadow_ptes[i];
498                 }
499                 desc = desc->more;
500         }
501         return NULL;
502 }
503
504 static void rmap_write_protect(struct kvm *kvm, u64 gfn)
505 {
506         unsigned long *rmapp;
507         u64 *spte;
508
509         gfn = unalias_gfn(kvm, gfn);
510         rmapp = gfn_to_rmap(kvm, gfn);
511
512         spte = rmap_next(kvm, rmapp, NULL);
513         while (spte) {
514                 BUG_ON(!spte);
515                 BUG_ON(!(*spte & PT_PRESENT_MASK));
516                 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
517                 if (is_writeble_pte(*spte))
518                         set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
519                 kvm_flush_remote_tlbs(kvm);
520                 spte = rmap_next(kvm, rmapp, spte);
521         }
522 }
523
524 #ifdef MMU_DEBUG
525 static int is_empty_shadow_page(u64 *spt)
526 {
527         u64 *pos;
528         u64 *end;
529
530         for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
531                 if ((*pos & ~PT_SHADOW_IO_MARK) != shadow_trap_nonpresent_pte) {
532                         printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
533                                pos, *pos);
534                         return 0;
535                 }
536         return 1;
537 }
538 #endif
539
540 static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
541 {
542         ASSERT(is_empty_shadow_page(sp->spt));
543         list_del(&sp->link);
544         __free_page(virt_to_page(sp->spt));
545         __free_page(virt_to_page(sp->gfns));
546         kfree(sp);
547         ++kvm->n_free_mmu_pages;
548 }
549
550 static unsigned kvm_page_table_hashfn(gfn_t gfn)
551 {
552         return gfn;
553 }
554
555 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
556                                                u64 *parent_pte)
557 {
558         struct kvm_mmu_page *sp;
559
560         if (!vcpu->kvm->n_free_mmu_pages)
561                 return NULL;
562
563         sp = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache, sizeof *sp);
564         sp->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
565         sp->gfns = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
566         set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
567         list_add(&sp->link, &vcpu->kvm->active_mmu_pages);
568         ASSERT(is_empty_shadow_page(sp->spt));
569         sp->slot_bitmap = 0;
570         sp->multimapped = 0;
571         sp->parent_pte = parent_pte;
572         --vcpu->kvm->n_free_mmu_pages;
573         return sp;
574 }
575
576 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
577                                     struct kvm_mmu_page *sp, u64 *parent_pte)
578 {
579         struct kvm_pte_chain *pte_chain;
580         struct hlist_node *node;
581         int i;
582
583         if (!parent_pte)
584                 return;
585         if (!sp->multimapped) {
586                 u64 *old = sp->parent_pte;
587
588                 if (!old) {
589                         sp->parent_pte = parent_pte;
590                         return;
591                 }
592                 sp->multimapped = 1;
593                 pte_chain = mmu_alloc_pte_chain(vcpu);
594                 INIT_HLIST_HEAD(&sp->parent_ptes);
595                 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
596                 pte_chain->parent_ptes[0] = old;
597         }
598         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
599                 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
600                         continue;
601                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
602                         if (!pte_chain->parent_ptes[i]) {
603                                 pte_chain->parent_ptes[i] = parent_pte;
604                                 return;
605                         }
606         }
607         pte_chain = mmu_alloc_pte_chain(vcpu);
608         BUG_ON(!pte_chain);
609         hlist_add_head(&pte_chain->link, &sp->parent_ptes);
610         pte_chain->parent_ptes[0] = parent_pte;
611 }
612
613 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
614                                        u64 *parent_pte)
615 {
616         struct kvm_pte_chain *pte_chain;
617         struct hlist_node *node;
618         int i;
619
620         if (!sp->multimapped) {
621                 BUG_ON(sp->parent_pte != parent_pte);
622                 sp->parent_pte = NULL;
623                 return;
624         }
625         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
626                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
627                         if (!pte_chain->parent_ptes[i])
628                                 break;
629                         if (pte_chain->parent_ptes[i] != parent_pte)
630                                 continue;
631                         while (i + 1 < NR_PTE_CHAIN_ENTRIES
632                                 && pte_chain->parent_ptes[i + 1]) {
633                                 pte_chain->parent_ptes[i]
634                                         = pte_chain->parent_ptes[i + 1];
635                                 ++i;
636                         }
637                         pte_chain->parent_ptes[i] = NULL;
638                         if (i == 0) {
639                                 hlist_del(&pte_chain->link);
640                                 mmu_free_pte_chain(pte_chain);
641                                 if (hlist_empty(&sp->parent_ptes)) {
642                                         sp->multimapped = 0;
643                                         sp->parent_pte = NULL;
644                                 }
645                         }
646                         return;
647                 }
648         BUG();
649 }
650
651 static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
652 {
653         unsigned index;
654         struct hlist_head *bucket;
655         struct kvm_mmu_page *sp;
656         struct hlist_node *node;
657
658         pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
659         index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
660         bucket = &kvm->mmu_page_hash[index];
661         hlist_for_each_entry(sp, node, bucket, hash_link)
662                 if (sp->gfn == gfn && !sp->role.metaphysical) {
663                         pgprintk("%s: found role %x\n",
664                                  __FUNCTION__, sp->role.word);
665                         return sp;
666                 }
667         return NULL;
668 }
669
670 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
671                                              gfn_t gfn,
672                                              gva_t gaddr,
673                                              unsigned level,
674                                              int metaphysical,
675                                              unsigned hugepage_access,
676                                              u64 *parent_pte)
677 {
678         union kvm_mmu_page_role role;
679         unsigned index;
680         unsigned quadrant;
681         struct hlist_head *bucket;
682         struct kvm_mmu_page *sp;
683         struct hlist_node *node;
684
685         role.word = 0;
686         role.glevels = vcpu->mmu.root_level;
687         role.level = level;
688         role.metaphysical = metaphysical;
689         role.hugepage_access = hugepage_access;
690         if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) {
691                 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
692                 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
693                 role.quadrant = quadrant;
694         }
695         pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
696                  gfn, role.word);
697         index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
698         bucket = &vcpu->kvm->mmu_page_hash[index];
699         hlist_for_each_entry(sp, node, bucket, hash_link)
700                 if (sp->gfn == gfn && sp->role.word == role.word) {
701                         mmu_page_add_parent_pte(vcpu, sp, parent_pte);
702                         pgprintk("%s: found\n", __FUNCTION__);
703                         return sp;
704                 }
705         sp = kvm_mmu_alloc_page(vcpu, parent_pte);
706         if (!sp)
707                 return sp;
708         pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
709         sp->gfn = gfn;
710         sp->role = role;
711         hlist_add_head(&sp->hash_link, bucket);
712         vcpu->mmu.prefetch_page(vcpu, sp);
713         if (!metaphysical)
714                 rmap_write_protect(vcpu->kvm, gfn);
715         return sp;
716 }
717
718 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
719                                          struct kvm_mmu_page *sp)
720 {
721         unsigned i;
722         u64 *pt;
723         u64 ent;
724
725         pt = sp->spt;
726
727         if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
728                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
729                         if (is_shadow_present_pte(pt[i]))
730                                 rmap_remove(kvm, &pt[i]);
731                         pt[i] = shadow_trap_nonpresent_pte;
732                 }
733                 kvm_flush_remote_tlbs(kvm);
734                 return;
735         }
736
737         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
738                 ent = pt[i];
739
740                 pt[i] = shadow_trap_nonpresent_pte;
741                 if (!is_shadow_present_pte(ent))
742                         continue;
743                 ent &= PT64_BASE_ADDR_MASK;
744                 mmu_page_remove_parent_pte(page_header(ent), &pt[i]);
745         }
746         kvm_flush_remote_tlbs(kvm);
747 }
748
749 static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
750 {
751         mmu_page_remove_parent_pte(sp, parent_pte);
752 }
753
754 static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
755 {
756         int i;
757
758         for (i = 0; i < KVM_MAX_VCPUS; ++i)
759                 if (kvm->vcpus[i])
760                         kvm->vcpus[i]->last_pte_updated = NULL;
761 }
762
763 static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
764 {
765         u64 *parent_pte;
766
767         ++kvm->stat.mmu_shadow_zapped;
768         while (sp->multimapped || sp->parent_pte) {
769                 if (!sp->multimapped)
770                         parent_pte = sp->parent_pte;
771                 else {
772                         struct kvm_pte_chain *chain;
773
774                         chain = container_of(sp->parent_ptes.first,
775                                              struct kvm_pte_chain, link);
776                         parent_pte = chain->parent_ptes[0];
777                 }
778                 BUG_ON(!parent_pte);
779                 kvm_mmu_put_page(sp, parent_pte);
780                 set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
781         }
782         kvm_mmu_page_unlink_children(kvm, sp);
783         if (!sp->root_count) {
784                 hlist_del(&sp->hash_link);
785                 kvm_mmu_free_page(kvm, sp);
786         } else
787                 list_move(&sp->link, &kvm->active_mmu_pages);
788         kvm_mmu_reset_last_pte_updated(kvm);
789 }
790
791 /*
792  * Changing the number of mmu pages allocated to the vm
793  * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
794  */
795 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
796 {
797         /*
798          * If we set the number of mmu pages to be smaller be than the
799          * number of actived pages , we must to free some mmu pages before we
800          * change the value
801          */
802
803         if ((kvm->n_alloc_mmu_pages - kvm->n_free_mmu_pages) >
804             kvm_nr_mmu_pages) {
805                 int n_used_mmu_pages = kvm->n_alloc_mmu_pages
806                                        - kvm->n_free_mmu_pages;
807
808                 while (n_used_mmu_pages > kvm_nr_mmu_pages) {
809                         struct kvm_mmu_page *page;
810
811                         page = container_of(kvm->active_mmu_pages.prev,
812                                             struct kvm_mmu_page, link);
813                         kvm_mmu_zap_page(kvm, page);
814                         n_used_mmu_pages--;
815                 }
816                 kvm->n_free_mmu_pages = 0;
817         }
818         else
819                 kvm->n_free_mmu_pages += kvm_nr_mmu_pages
820                                          - kvm->n_alloc_mmu_pages;
821
822         kvm->n_alloc_mmu_pages = kvm_nr_mmu_pages;
823 }
824
825 static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
826 {
827         unsigned index;
828         struct hlist_head *bucket;
829         struct kvm_mmu_page *sp;
830         struct hlist_node *node, *n;
831         int r;
832
833         pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
834         r = 0;
835         index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
836         bucket = &kvm->mmu_page_hash[index];
837         hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
838                 if (sp->gfn == gfn && !sp->role.metaphysical) {
839                         pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
840                                  sp->role.word);
841                         kvm_mmu_zap_page(kvm, sp);
842                         r = 1;
843                 }
844         return r;
845 }
846
847 static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
848 {
849         struct kvm_mmu_page *sp;
850
851         while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
852                 pgprintk("%s: zap %lx %x\n", __FUNCTION__, gfn, sp->role.word);
853                 kvm_mmu_zap_page(kvm, sp);
854         }
855 }
856
857 static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
858 {
859         int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
860         struct kvm_mmu_page *sp = page_header(__pa(pte));
861
862         __set_bit(slot, &sp->slot_bitmap);
863 }
864
865 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
866 {
867         gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
868
869         if (gpa == UNMAPPED_GVA)
870                 return NULL;
871         return gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
872 }
873
874 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
875 {
876 }
877
878 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, struct page *page)
879 {
880         int level = PT32E_ROOT_LEVEL;
881         hpa_t table_addr = vcpu->mmu.root_hpa;
882
883         for (; ; level--) {
884                 u32 index = PT64_INDEX(v, level);
885                 u64 *table;
886                 u64 pte;
887
888                 ASSERT(VALID_PAGE(table_addr));
889                 table = __va(table_addr);
890
891                 if (level == 1) {
892                         int was_rmapped;
893
894                         pte = table[index];
895                         was_rmapped = is_rmap_pte(pte);
896                         if (is_shadow_present_pte(pte) && is_writeble_pte(pte)) {
897                                 kvm_release_page_clean(page);
898                                 return 0;
899                         }
900                         mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
901                         page_header_update_slot(vcpu->kvm, table,
902                                                 v >> PAGE_SHIFT);
903                         table[index] = page_to_phys(page)
904                                 | PT_PRESENT_MASK | PT_WRITABLE_MASK
905                                 | PT_USER_MASK;
906                         if (!was_rmapped)
907                                 rmap_add(vcpu, &table[index], v >> PAGE_SHIFT);
908                         else
909                                 kvm_release_page_clean(page);
910
911                         return 0;
912                 }
913
914                 if (table[index] == shadow_trap_nonpresent_pte) {
915                         struct kvm_mmu_page *new_table;
916                         gfn_t pseudo_gfn;
917
918                         pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
919                                 >> PAGE_SHIFT;
920                         new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
921                                                      v, level - 1,
922                                                      1, 3, &table[index]);
923                         if (!new_table) {
924                                 pgprintk("nonpaging_map: ENOMEM\n");
925                                 kvm_release_page_clean(page);
926                                 return -ENOMEM;
927                         }
928
929                         table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
930                                 | PT_WRITABLE_MASK | PT_USER_MASK;
931                 }
932                 table_addr = table[index] & PT64_BASE_ADDR_MASK;
933         }
934 }
935
936 static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
937                                     struct kvm_mmu_page *sp)
938 {
939         int i;
940
941         for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
942                 sp->spt[i] = shadow_trap_nonpresent_pte;
943 }
944
945 static void mmu_free_roots(struct kvm_vcpu *vcpu)
946 {
947         int i;
948         struct kvm_mmu_page *sp;
949
950         if (!VALID_PAGE(vcpu->mmu.root_hpa))
951                 return;
952 #ifdef CONFIG_X86_64
953         if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
954                 hpa_t root = vcpu->mmu.root_hpa;
955
956                 sp = page_header(root);
957                 --sp->root_count;
958                 vcpu->mmu.root_hpa = INVALID_PAGE;
959                 return;
960         }
961 #endif
962         for (i = 0; i < 4; ++i) {
963                 hpa_t root = vcpu->mmu.pae_root[i];
964
965                 if (root) {
966                         root &= PT64_BASE_ADDR_MASK;
967                         sp = page_header(root);
968                         --sp->root_count;
969                 }
970                 vcpu->mmu.pae_root[i] = INVALID_PAGE;
971         }
972         vcpu->mmu.root_hpa = INVALID_PAGE;
973 }
974
975 static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
976 {
977         int i;
978         gfn_t root_gfn;
979         struct kvm_mmu_page *sp;
980
981         root_gfn = vcpu->cr3 >> PAGE_SHIFT;
982
983 #ifdef CONFIG_X86_64
984         if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
985                 hpa_t root = vcpu->mmu.root_hpa;
986
987                 ASSERT(!VALID_PAGE(root));
988                 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
989                                       PT64_ROOT_LEVEL, 0, 0, NULL);
990                 root = __pa(sp->spt);
991                 ++sp->root_count;
992                 vcpu->mmu.root_hpa = root;
993                 return;
994         }
995 #endif
996         for (i = 0; i < 4; ++i) {
997                 hpa_t root = vcpu->mmu.pae_root[i];
998
999                 ASSERT(!VALID_PAGE(root));
1000                 if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL) {
1001                         if (!is_present_pte(vcpu->pdptrs[i])) {
1002                                 vcpu->mmu.pae_root[i] = 0;
1003                                 continue;
1004                         }
1005                         root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT;
1006                 } else if (vcpu->mmu.root_level == 0)
1007                         root_gfn = 0;
1008                 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
1009                                       PT32_ROOT_LEVEL, !is_paging(vcpu),
1010                                       0, NULL);
1011                 root = __pa(sp->spt);
1012                 ++sp->root_count;
1013                 vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
1014         }
1015         vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root);
1016 }
1017
1018 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
1019 {
1020         return vaddr;
1021 }
1022
1023 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
1024                                 u32 error_code)
1025 {
1026         struct page *page;
1027         int r;
1028
1029         r = mmu_topup_memory_caches(vcpu);
1030         if (r)
1031                 return r;
1032
1033         ASSERT(vcpu);
1034         ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
1035
1036         page = gfn_to_page(vcpu->kvm, gva >> PAGE_SHIFT);
1037
1038         if (is_error_page(page)) {
1039                 kvm_release_page_clean(page);
1040                 return 1;
1041         }
1042
1043         return nonpaging_map(vcpu, gva & PAGE_MASK, page);
1044 }
1045
1046 static void nonpaging_free(struct kvm_vcpu *vcpu)
1047 {
1048         mmu_free_roots(vcpu);
1049 }
1050
1051 static int nonpaging_init_context(struct kvm_vcpu *vcpu)
1052 {
1053         struct kvm_mmu *context = &vcpu->mmu;
1054
1055         context->new_cr3 = nonpaging_new_cr3;
1056         context->page_fault = nonpaging_page_fault;
1057         context->gva_to_gpa = nonpaging_gva_to_gpa;
1058         context->free = nonpaging_free;
1059         context->prefetch_page = nonpaging_prefetch_page;
1060         context->root_level = 0;
1061         context->shadow_root_level = PT32E_ROOT_LEVEL;
1062         context->root_hpa = INVALID_PAGE;
1063         return 0;
1064 }
1065
1066 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
1067 {
1068         ++vcpu->stat.tlb_flush;
1069         kvm_x86_ops->tlb_flush(vcpu);
1070 }
1071
1072 static void paging_new_cr3(struct kvm_vcpu *vcpu)
1073 {
1074         pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
1075         mmu_free_roots(vcpu);
1076 }
1077
1078 static void inject_page_fault(struct kvm_vcpu *vcpu,
1079                               u64 addr,
1080                               u32 err_code)
1081 {
1082         kvm_x86_ops->inject_page_fault(vcpu, addr, err_code);
1083 }
1084
1085 static void paging_free(struct kvm_vcpu *vcpu)
1086 {
1087         nonpaging_free(vcpu);
1088 }
1089
1090 #define PTTYPE 64
1091 #include "paging_tmpl.h"
1092 #undef PTTYPE
1093
1094 #define PTTYPE 32
1095 #include "paging_tmpl.h"
1096 #undef PTTYPE
1097
1098 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
1099 {
1100         struct kvm_mmu *context = &vcpu->mmu;
1101
1102         ASSERT(is_pae(vcpu));
1103         context->new_cr3 = paging_new_cr3;
1104         context->page_fault = paging64_page_fault;
1105         context->gva_to_gpa = paging64_gva_to_gpa;
1106         context->prefetch_page = paging64_prefetch_page;
1107         context->free = paging_free;
1108         context->root_level = level;
1109         context->shadow_root_level = level;
1110         context->root_hpa = INVALID_PAGE;
1111         return 0;
1112 }
1113
1114 static int paging64_init_context(struct kvm_vcpu *vcpu)
1115 {
1116         return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
1117 }
1118
1119 static int paging32_init_context(struct kvm_vcpu *vcpu)
1120 {
1121         struct kvm_mmu *context = &vcpu->mmu;
1122
1123         context->new_cr3 = paging_new_cr3;
1124         context->page_fault = paging32_page_fault;
1125         context->gva_to_gpa = paging32_gva_to_gpa;
1126         context->free = paging_free;
1127         context->prefetch_page = paging32_prefetch_page;
1128         context->root_level = PT32_ROOT_LEVEL;
1129         context->shadow_root_level = PT32E_ROOT_LEVEL;
1130         context->root_hpa = INVALID_PAGE;
1131         return 0;
1132 }
1133
1134 static int paging32E_init_context(struct kvm_vcpu *vcpu)
1135 {
1136         return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
1137 }
1138
1139 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1140 {
1141         ASSERT(vcpu);
1142         ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1143
1144         if (!is_paging(vcpu))
1145                 return nonpaging_init_context(vcpu);
1146         else if (is_long_mode(vcpu))
1147                 return paging64_init_context(vcpu);
1148         else if (is_pae(vcpu))
1149                 return paging32E_init_context(vcpu);
1150         else
1151                 return paging32_init_context(vcpu);
1152 }
1153
1154 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
1155 {
1156         ASSERT(vcpu);
1157         if (VALID_PAGE(vcpu->mmu.root_hpa)) {
1158                 vcpu->mmu.free(vcpu);
1159                 vcpu->mmu.root_hpa = INVALID_PAGE;
1160         }
1161 }
1162
1163 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
1164 {
1165         destroy_kvm_mmu(vcpu);
1166         return init_kvm_mmu(vcpu);
1167 }
1168 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
1169
1170 int kvm_mmu_load(struct kvm_vcpu *vcpu)
1171 {
1172         int r;
1173
1174         mutex_lock(&vcpu->kvm->lock);
1175         r = mmu_topup_memory_caches(vcpu);
1176         if (r)
1177                 goto out;
1178         mmu_alloc_roots(vcpu);
1179         kvm_x86_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
1180         kvm_mmu_flush_tlb(vcpu);
1181 out:
1182         mutex_unlock(&vcpu->kvm->lock);
1183         return r;
1184 }
1185 EXPORT_SYMBOL_GPL(kvm_mmu_load);
1186
1187 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
1188 {
1189         mmu_free_roots(vcpu);
1190 }
1191
1192 static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
1193                                   struct kvm_mmu_page *sp,
1194                                   u64 *spte)
1195 {
1196         u64 pte;
1197         struct kvm_mmu_page *child;
1198
1199         pte = *spte;
1200         if (is_shadow_present_pte(pte)) {
1201                 if (sp->role.level == PT_PAGE_TABLE_LEVEL)
1202                         rmap_remove(vcpu->kvm, spte);
1203                 else {
1204                         child = page_header(pte & PT64_BASE_ADDR_MASK);
1205                         mmu_page_remove_parent_pte(child, spte);
1206                 }
1207         }
1208         set_shadow_pte(spte, shadow_trap_nonpresent_pte);
1209 }
1210
1211 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
1212                                   struct kvm_mmu_page *sp,
1213                                   u64 *spte,
1214                                   const void *new, int bytes,
1215                                   int offset_in_pte)
1216 {
1217         if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
1218                 ++vcpu->kvm->stat.mmu_pde_zapped;
1219                 return;
1220         }
1221
1222         ++vcpu->kvm->stat.mmu_pte_updated;
1223         if (sp->role.glevels == PT32_ROOT_LEVEL)
1224                 paging32_update_pte(vcpu, sp, spte, new, bytes, offset_in_pte);
1225         else
1226                 paging64_update_pte(vcpu, sp, spte, new, bytes, offset_in_pte);
1227 }
1228
1229 static bool need_remote_flush(u64 old, u64 new)
1230 {
1231         if (!is_shadow_present_pte(old))
1232                 return false;
1233         if (!is_shadow_present_pte(new))
1234                 return true;
1235         if ((old ^ new) & PT64_BASE_ADDR_MASK)
1236                 return true;
1237         old ^= PT64_NX_MASK;
1238         new ^= PT64_NX_MASK;
1239         return (old & ~new & PT64_PERM_MASK) != 0;
1240 }
1241
1242 static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
1243 {
1244         if (need_remote_flush(old, new))
1245                 kvm_flush_remote_tlbs(vcpu->kvm);
1246         else
1247                 kvm_mmu_flush_tlb(vcpu);
1248 }
1249
1250 static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
1251 {
1252         u64 *spte = vcpu->last_pte_updated;
1253
1254         return !!(spte && (*spte & PT_ACCESSED_MASK));
1255 }
1256
1257 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1258                        const u8 *new, int bytes)
1259 {
1260         gfn_t gfn = gpa >> PAGE_SHIFT;
1261         struct kvm_mmu_page *sp;
1262         struct hlist_node *node, *n;
1263         struct hlist_head *bucket;
1264         unsigned index;
1265         u64 entry;
1266         u64 *spte;
1267         unsigned offset = offset_in_page(gpa);
1268         unsigned pte_size;
1269         unsigned page_offset;
1270         unsigned misaligned;
1271         unsigned quadrant;
1272         int level;
1273         int flooded = 0;
1274         int npte;
1275
1276         pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
1277         ++vcpu->kvm->stat.mmu_pte_write;
1278         kvm_mmu_audit(vcpu, "pre pte write");
1279         if (gfn == vcpu->last_pt_write_gfn
1280             && !last_updated_pte_accessed(vcpu)) {
1281                 ++vcpu->last_pt_write_count;
1282                 if (vcpu->last_pt_write_count >= 3)
1283                         flooded = 1;
1284         } else {
1285                 vcpu->last_pt_write_gfn = gfn;
1286                 vcpu->last_pt_write_count = 1;
1287                 vcpu->last_pte_updated = NULL;
1288         }
1289         index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
1290         bucket = &vcpu->kvm->mmu_page_hash[index];
1291         hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
1292                 if (sp->gfn != gfn || sp->role.metaphysical)
1293                         continue;
1294                 pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
1295                 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
1296                 misaligned |= bytes < 4;
1297                 if (misaligned || flooded) {
1298                         /*
1299                          * Misaligned accesses are too much trouble to fix
1300                          * up; also, they usually indicate a page is not used
1301                          * as a page table.
1302                          *
1303                          * If we're seeing too many writes to a page,
1304                          * it may no longer be a page table, or we may be
1305                          * forking, in which case it is better to unmap the
1306                          * page.
1307                          */
1308                         pgprintk("misaligned: gpa %llx bytes %d role %x\n",
1309                                  gpa, bytes, sp->role.word);
1310                         kvm_mmu_zap_page(vcpu->kvm, sp);
1311                         ++vcpu->kvm->stat.mmu_flooded;
1312                         continue;
1313                 }
1314                 page_offset = offset;
1315                 level = sp->role.level;
1316                 npte = 1;
1317                 if (sp->role.glevels == PT32_ROOT_LEVEL) {
1318                         page_offset <<= 1;      /* 32->64 */
1319                         /*
1320                          * A 32-bit pde maps 4MB while the shadow pdes map
1321                          * only 2MB.  So we need to double the offset again
1322                          * and zap two pdes instead of one.
1323                          */
1324                         if (level == PT32_ROOT_LEVEL) {
1325                                 page_offset &= ~7; /* kill rounding error */
1326                                 page_offset <<= 1;
1327                                 npte = 2;
1328                         }
1329                         quadrant = page_offset >> PAGE_SHIFT;
1330                         page_offset &= ~PAGE_MASK;
1331                         if (quadrant != sp->role.quadrant)
1332                                 continue;
1333                 }
1334                 spte = &sp->spt[page_offset / sizeof(*spte)];
1335                 while (npte--) {
1336                         entry = *spte;
1337                         mmu_pte_write_zap_pte(vcpu, sp, spte);
1338                         mmu_pte_write_new_pte(vcpu, sp, spte, new, bytes,
1339                                               page_offset & (pte_size - 1));
1340                         mmu_pte_write_flush_tlb(vcpu, entry, *spte);
1341                         ++spte;
1342                 }
1343         }
1344         kvm_mmu_audit(vcpu, "post pte write");
1345 }
1346
1347 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1348 {
1349         gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
1350
1351         return kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1352 }
1353
1354 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
1355 {
1356         while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
1357                 struct kvm_mmu_page *sp;
1358
1359                 sp = container_of(vcpu->kvm->active_mmu_pages.prev,
1360                                   struct kvm_mmu_page, link);
1361                 kvm_mmu_zap_page(vcpu->kvm, sp);
1362                 ++vcpu->kvm->stat.mmu_recycled;
1363         }
1364 }
1365
1366 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
1367 {
1368         int r;
1369         enum emulation_result er;
1370
1371         mutex_lock(&vcpu->kvm->lock);
1372         r = vcpu->mmu.page_fault(vcpu, cr2, error_code);
1373         if (r < 0)
1374                 goto out;
1375
1376         if (!r) {
1377                 r = 1;
1378                 goto out;
1379         }
1380
1381         r = mmu_topup_memory_caches(vcpu);
1382         if (r)
1383                 goto out;
1384
1385         er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
1386         mutex_unlock(&vcpu->kvm->lock);
1387
1388         switch (er) {
1389         case EMULATE_DONE:
1390                 return 1;
1391         case EMULATE_DO_MMIO:
1392                 ++vcpu->stat.mmio_exits;
1393                 return 0;
1394         case EMULATE_FAIL:
1395                 kvm_report_emulation_failure(vcpu, "pagetable");
1396                 return 1;
1397         default:
1398                 BUG();
1399         }
1400 out:
1401         mutex_unlock(&vcpu->kvm->lock);
1402         return r;
1403 }
1404 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
1405
1406 static void free_mmu_pages(struct kvm_vcpu *vcpu)
1407 {
1408         struct kvm_mmu_page *sp;
1409
1410         while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
1411                 sp = container_of(vcpu->kvm->active_mmu_pages.next,
1412                                   struct kvm_mmu_page, link);
1413                 kvm_mmu_zap_page(vcpu->kvm, sp);
1414         }
1415         free_page((unsigned long)vcpu->mmu.pae_root);
1416 }
1417
1418 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1419 {
1420         struct page *page;
1421         int i;
1422
1423         ASSERT(vcpu);
1424
1425         if (vcpu->kvm->n_requested_mmu_pages)
1426                 vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_requested_mmu_pages;
1427         else
1428                 vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_alloc_mmu_pages;
1429         /*
1430          * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
1431          * Therefore we need to allocate shadow page tables in the first
1432          * 4GB of memory, which happens to fit the DMA32 zone.
1433          */
1434         page = alloc_page(GFP_KERNEL | __GFP_DMA32);
1435         if (!page)
1436                 goto error_1;
1437         vcpu->mmu.pae_root = page_address(page);
1438         for (i = 0; i < 4; ++i)
1439                 vcpu->mmu.pae_root[i] = INVALID_PAGE;
1440
1441         return 0;
1442
1443 error_1:
1444         free_mmu_pages(vcpu);
1445         return -ENOMEM;
1446 }
1447
1448 int kvm_mmu_create(struct kvm_vcpu *vcpu)
1449 {
1450         ASSERT(vcpu);
1451         ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1452
1453         return alloc_mmu_pages(vcpu);
1454 }
1455
1456 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
1457 {
1458         ASSERT(vcpu);
1459         ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1460
1461         return init_kvm_mmu(vcpu);
1462 }
1463
1464 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
1465 {
1466         ASSERT(vcpu);
1467
1468         destroy_kvm_mmu(vcpu);
1469         free_mmu_pages(vcpu);
1470         mmu_free_memory_caches(vcpu);
1471 }
1472
1473 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
1474 {
1475         struct kvm_mmu_page *sp;
1476
1477         list_for_each_entry(sp, &kvm->active_mmu_pages, link) {
1478                 int i;
1479                 u64 *pt;
1480
1481                 if (!test_bit(slot, &sp->slot_bitmap))
1482                         continue;
1483
1484                 pt = sp->spt;
1485                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1486                         /* avoid RMW */
1487                         if (pt[i] & PT_WRITABLE_MASK)
1488                                 pt[i] &= ~PT_WRITABLE_MASK;
1489         }
1490 }
1491
1492 void kvm_mmu_zap_all(struct kvm *kvm)
1493 {
1494         struct kvm_mmu_page *sp, *node;
1495
1496         list_for_each_entry_safe(sp, node, &kvm->active_mmu_pages, link)
1497                 kvm_mmu_zap_page(kvm, sp);
1498
1499         kvm_flush_remote_tlbs(kvm);
1500 }
1501
1502 void kvm_mmu_module_exit(void)
1503 {
1504         if (pte_chain_cache)
1505                 kmem_cache_destroy(pte_chain_cache);
1506         if (rmap_desc_cache)
1507                 kmem_cache_destroy(rmap_desc_cache);
1508         if (mmu_page_header_cache)
1509                 kmem_cache_destroy(mmu_page_header_cache);
1510 }
1511
1512 int kvm_mmu_module_init(void)
1513 {
1514         pte_chain_cache = kmem_cache_create("kvm_pte_chain",
1515                                             sizeof(struct kvm_pte_chain),
1516                                             0, 0, NULL);
1517         if (!pte_chain_cache)
1518                 goto nomem;
1519         rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
1520                                             sizeof(struct kvm_rmap_desc),
1521                                             0, 0, NULL);
1522         if (!rmap_desc_cache)
1523                 goto nomem;
1524
1525         mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
1526                                                   sizeof(struct kvm_mmu_page),
1527                                                   0, 0, NULL);
1528         if (!mmu_page_header_cache)
1529                 goto nomem;
1530
1531         return 0;
1532
1533 nomem:
1534         kvm_mmu_module_exit();
1535         return -ENOMEM;
1536 }
1537
1538 /*
1539  * Caculate mmu pages needed for kvm.
1540  */
1541 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
1542 {
1543         int i;
1544         unsigned int nr_mmu_pages;
1545         unsigned int  nr_pages = 0;
1546
1547         for (i = 0; i < kvm->nmemslots; i++)
1548                 nr_pages += kvm->memslots[i].npages;
1549
1550         nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
1551         nr_mmu_pages = max(nr_mmu_pages,
1552                         (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
1553
1554         return nr_mmu_pages;
1555 }
1556
1557 #ifdef AUDIT
1558
1559 static const char *audit_msg;
1560
1561 static gva_t canonicalize(gva_t gva)
1562 {
1563 #ifdef CONFIG_X86_64
1564         gva = (long long)(gva << 16) >> 16;
1565 #endif
1566         return gva;
1567 }
1568
1569 static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
1570                                 gva_t va, int level)
1571 {
1572         u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
1573         int i;
1574         gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
1575
1576         for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
1577                 u64 ent = pt[i];
1578
1579                 if (ent == shadow_trap_nonpresent_pte)
1580                         continue;
1581
1582                 va = canonicalize(va);
1583                 if (level > 1) {
1584                         if (ent == shadow_notrap_nonpresent_pte)
1585                                 printk(KERN_ERR "audit: (%s) nontrapping pte"
1586                                        " in nonleaf level: levels %d gva %lx"
1587                                        " level %d pte %llx\n", audit_msg,
1588                                        vcpu->mmu.root_level, va, level, ent);
1589
1590                         audit_mappings_page(vcpu, ent, va, level - 1);
1591                 } else {
1592                         gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, va);
1593                         struct page *page = gpa_to_page(vcpu, gpa);
1594                         hpa_t hpa = page_to_phys(page);
1595
1596                         if (is_shadow_present_pte(ent)
1597                             && (ent & PT64_BASE_ADDR_MASK) != hpa)
1598                                 printk(KERN_ERR "xx audit error: (%s) levels %d"
1599                                        " gva %lx gpa %llx hpa %llx ent %llx %d\n",
1600                                        audit_msg, vcpu->mmu.root_level,
1601                                        va, gpa, hpa, ent,
1602                                        is_shadow_present_pte(ent));
1603                         else if (ent == shadow_notrap_nonpresent_pte
1604                                  && !is_error_hpa(hpa))
1605                                 printk(KERN_ERR "audit: (%s) notrap shadow,"
1606                                        " valid guest gva %lx\n", audit_msg, va);
1607                         kvm_release_page_clean(page);
1608
1609                 }
1610         }
1611 }
1612
1613 static void audit_mappings(struct kvm_vcpu *vcpu)
1614 {
1615         unsigned i;
1616
1617         if (vcpu->mmu.root_level == 4)
1618                 audit_mappings_page(vcpu, vcpu->mmu.root_hpa, 0, 4);
1619         else
1620                 for (i = 0; i < 4; ++i)
1621                         if (vcpu->mmu.pae_root[i] & PT_PRESENT_MASK)
1622                                 audit_mappings_page(vcpu,
1623                                                     vcpu->mmu.pae_root[i],
1624                                                     i << 30,
1625                                                     2);
1626 }
1627
1628 static int count_rmaps(struct kvm_vcpu *vcpu)
1629 {
1630         int nmaps = 0;
1631         int i, j, k;
1632
1633         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1634                 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
1635                 struct kvm_rmap_desc *d;
1636
1637                 for (j = 0; j < m->npages; ++j) {
1638                         unsigned long *rmapp = &m->rmap[j];
1639
1640                         if (!*rmapp)
1641                                 continue;
1642                         if (!(*rmapp & 1)) {
1643                                 ++nmaps;
1644                                 continue;
1645                         }
1646                         d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
1647                         while (d) {
1648                                 for (k = 0; k < RMAP_EXT; ++k)
1649                                         if (d->shadow_ptes[k])
1650                                                 ++nmaps;
1651                                         else
1652                                                 break;
1653                                 d = d->more;
1654                         }
1655                 }
1656         }
1657         return nmaps;
1658 }
1659
1660 static int count_writable_mappings(struct kvm_vcpu *vcpu)
1661 {
1662         int nmaps = 0;
1663         struct kvm_mmu_page *sp;
1664         int i;
1665
1666         list_for_each_entry(sp, &vcpu->kvm->active_mmu_pages, link) {
1667                 u64 *pt = sp->spt;
1668
1669                 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
1670                         continue;
1671
1672                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1673                         u64 ent = pt[i];
1674
1675                         if (!(ent & PT_PRESENT_MASK))
1676                                 continue;
1677                         if (!(ent & PT_WRITABLE_MASK))
1678                                 continue;
1679                         ++nmaps;
1680                 }
1681         }
1682         return nmaps;
1683 }
1684
1685 static void audit_rmap(struct kvm_vcpu *vcpu)
1686 {
1687         int n_rmap = count_rmaps(vcpu);
1688         int n_actual = count_writable_mappings(vcpu);
1689
1690         if (n_rmap != n_actual)
1691                 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
1692                        __FUNCTION__, audit_msg, n_rmap, n_actual);
1693 }
1694
1695 static void audit_write_protection(struct kvm_vcpu *vcpu)
1696 {
1697         struct kvm_mmu_page *sp;
1698         struct kvm_memory_slot *slot;
1699         unsigned long *rmapp;
1700         gfn_t gfn;
1701
1702         list_for_each_entry(sp, &vcpu->kvm->active_mmu_pages, link) {
1703                 if (sp->role.metaphysical)
1704                         continue;
1705
1706                 slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
1707                 gfn = unalias_gfn(vcpu->kvm, sp->gfn);
1708                 rmapp = &slot->rmap[gfn - slot->base_gfn];
1709                 if (*rmapp)
1710                         printk(KERN_ERR "%s: (%s) shadow page has writable"
1711                                " mappings: gfn %lx role %x\n",
1712                                __FUNCTION__, audit_msg, sp->gfn,
1713                                sp->role.word);
1714         }
1715 }
1716
1717 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
1718 {
1719         int olddbg = dbg;
1720
1721         dbg = 0;
1722         audit_msg = msg;
1723         audit_rmap(vcpu);
1724         audit_write_protection(vcpu);
1725         audit_mappings(vcpu);
1726         dbg = olddbg;
1727 }
1728
1729 #endif