Merge master.kernel.org:/home/rmk/linux-2.6-arm
[pandora-kernel.git] / drivers / kvm / mmu.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * MMU support
8  *
9  * Copyright (C) 2006 Qumranet, Inc.
10  *
11  * Authors:
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *   Avi Kivity   <avi@qumranet.com>
14  *
15  * This work is licensed under the terms of the GNU GPL, version 2.  See
16  * the COPYING file in the top-level directory.
17  *
18  */
19 #include <linux/types.h>
20 #include <linux/string.h>
21 #include <asm/page.h>
22 #include <linux/mm.h>
23 #include <linux/highmem.h>
24 #include <linux/module.h>
25
26 #include "vmx.h"
27 #include "kvm.h"
28
29 #undef MMU_DEBUG
30
31 #undef AUDIT
32
33 #ifdef AUDIT
34 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
35 #else
36 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
37 #endif
38
39 #ifdef MMU_DEBUG
40
41 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
42 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
43
44 #else
45
46 #define pgprintk(x...) do { } while (0)
47 #define rmap_printk(x...) do { } while (0)
48
49 #endif
50
51 #if defined(MMU_DEBUG) || defined(AUDIT)
52 static int dbg = 1;
53 #endif
54
55 #define ASSERT(x)                                                       \
56         if (!(x)) {                                                     \
57                 printk(KERN_WARNING "assertion failed %s:%d: %s\n",     \
58                        __FILE__, __LINE__, #x);                         \
59         }
60
61 #define PT64_PT_BITS 9
62 #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
63 #define PT32_PT_BITS 10
64 #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
65
66 #define PT_WRITABLE_SHIFT 1
67
68 #define PT_PRESENT_MASK (1ULL << 0)
69 #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
70 #define PT_USER_MASK (1ULL << 2)
71 #define PT_PWT_MASK (1ULL << 3)
72 #define PT_PCD_MASK (1ULL << 4)
73 #define PT_ACCESSED_MASK (1ULL << 5)
74 #define PT_DIRTY_MASK (1ULL << 6)
75 #define PT_PAGE_SIZE_MASK (1ULL << 7)
76 #define PT_PAT_MASK (1ULL << 7)
77 #define PT_GLOBAL_MASK (1ULL << 8)
78 #define PT64_NX_MASK (1ULL << 63)
79
80 #define PT_PAT_SHIFT 7
81 #define PT_DIR_PAT_SHIFT 12
82 #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
83
84 #define PT32_DIR_PSE36_SIZE 4
85 #define PT32_DIR_PSE36_SHIFT 13
86 #define PT32_DIR_PSE36_MASK (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
87
88
89 #define PT32_PTE_COPY_MASK \
90         (PT_PRESENT_MASK | PT_ACCESSED_MASK | PT_DIRTY_MASK | PT_GLOBAL_MASK)
91
92 #define PT64_PTE_COPY_MASK (PT64_NX_MASK | PT32_PTE_COPY_MASK)
93
94 #define PT_FIRST_AVAIL_BITS_SHIFT 9
95 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
96
97 #define PT_SHADOW_PS_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
98 #define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
99
100 #define PT_SHADOW_WRITABLE_SHIFT (PT_FIRST_AVAIL_BITS_SHIFT + 1)
101 #define PT_SHADOW_WRITABLE_MASK (1ULL << PT_SHADOW_WRITABLE_SHIFT)
102
103 #define PT_SHADOW_USER_SHIFT (PT_SHADOW_WRITABLE_SHIFT + 1)
104 #define PT_SHADOW_USER_MASK (1ULL << (PT_SHADOW_USER_SHIFT))
105
106 #define PT_SHADOW_BITS_OFFSET (PT_SHADOW_WRITABLE_SHIFT - PT_WRITABLE_SHIFT)
107
108 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
109
110 #define PT64_LEVEL_BITS 9
111
112 #define PT64_LEVEL_SHIFT(level) \
113                 ( PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS )
114
115 #define PT64_LEVEL_MASK(level) \
116                 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
117
118 #define PT64_INDEX(address, level)\
119         (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
120
121
122 #define PT32_LEVEL_BITS 10
123
124 #define PT32_LEVEL_SHIFT(level) \
125                 ( PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS )
126
127 #define PT32_LEVEL_MASK(level) \
128                 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
129
130 #define PT32_INDEX(address, level)\
131         (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
132
133
134 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & PAGE_MASK)
135 #define PT64_DIR_BASE_ADDR_MASK \
136         (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
137
138 #define PT32_BASE_ADDR_MASK PAGE_MASK
139 #define PT32_DIR_BASE_ADDR_MASK \
140         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
141
142
143 #define PFERR_PRESENT_MASK (1U << 0)
144 #define PFERR_WRITE_MASK (1U << 1)
145 #define PFERR_USER_MASK (1U << 2)
146
147 #define PT64_ROOT_LEVEL 4
148 #define PT32_ROOT_LEVEL 2
149 #define PT32E_ROOT_LEVEL 3
150
151 #define PT_DIRECTORY_LEVEL 2
152 #define PT_PAGE_TABLE_LEVEL 1
153
154 #define RMAP_EXT 4
155
156 struct kvm_rmap_desc {
157         u64 *shadow_ptes[RMAP_EXT];
158         struct kvm_rmap_desc *more;
159 };
160
161 static int is_write_protection(struct kvm_vcpu *vcpu)
162 {
163         return vcpu->cr0 & CR0_WP_MASK;
164 }
165
166 static int is_cpuid_PSE36(void)
167 {
168         return 1;
169 }
170
171 static int is_present_pte(unsigned long pte)
172 {
173         return pte & PT_PRESENT_MASK;
174 }
175
176 static int is_writeble_pte(unsigned long pte)
177 {
178         return pte & PT_WRITABLE_MASK;
179 }
180
181 static int is_io_pte(unsigned long pte)
182 {
183         return pte & PT_SHADOW_IO_MARK;
184 }
185
186 static int is_rmap_pte(u64 pte)
187 {
188         return (pte & (PT_WRITABLE_MASK | PT_PRESENT_MASK))
189                 == (PT_WRITABLE_MASK | PT_PRESENT_MASK);
190 }
191
192 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
193                                   size_t objsize, int min)
194 {
195         void *obj;
196
197         if (cache->nobjs >= min)
198                 return 0;
199         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
200                 obj = kzalloc(objsize, GFP_NOWAIT);
201                 if (!obj)
202                         return -ENOMEM;
203                 cache->objects[cache->nobjs++] = obj;
204         }
205         return 0;
206 }
207
208 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
209 {
210         while (mc->nobjs)
211                 kfree(mc->objects[--mc->nobjs]);
212 }
213
214 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
215 {
216         int r;
217
218         r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
219                                    sizeof(struct kvm_pte_chain), 4);
220         if (r)
221                 goto out;
222         r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
223                                    sizeof(struct kvm_rmap_desc), 1);
224 out:
225         return r;
226 }
227
228 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
229 {
230         mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
231         mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
232 }
233
234 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
235                                     size_t size)
236 {
237         void *p;
238
239         BUG_ON(!mc->nobjs);
240         p = mc->objects[--mc->nobjs];
241         memset(p, 0, size);
242         return p;
243 }
244
245 static void mmu_memory_cache_free(struct kvm_mmu_memory_cache *mc, void *obj)
246 {
247         if (mc->nobjs < KVM_NR_MEM_OBJS)
248                 mc->objects[mc->nobjs++] = obj;
249         else
250                 kfree(obj);
251 }
252
253 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
254 {
255         return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache,
256                                       sizeof(struct kvm_pte_chain));
257 }
258
259 static void mmu_free_pte_chain(struct kvm_vcpu *vcpu,
260                                struct kvm_pte_chain *pc)
261 {
262         mmu_memory_cache_free(&vcpu->mmu_pte_chain_cache, pc);
263 }
264
265 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
266 {
267         return mmu_memory_cache_alloc(&vcpu->mmu_rmap_desc_cache,
268                                       sizeof(struct kvm_rmap_desc));
269 }
270
271 static void mmu_free_rmap_desc(struct kvm_vcpu *vcpu,
272                                struct kvm_rmap_desc *rd)
273 {
274         mmu_memory_cache_free(&vcpu->mmu_rmap_desc_cache, rd);
275 }
276
277 /*
278  * Reverse mapping data structures:
279  *
280  * If page->private bit zero is zero, then page->private points to the
281  * shadow page table entry that points to page_address(page).
282  *
283  * If page->private bit zero is one, (then page->private & ~1) points
284  * to a struct kvm_rmap_desc containing more mappings.
285  */
286 static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte)
287 {
288         struct page *page;
289         struct kvm_rmap_desc *desc;
290         int i;
291
292         if (!is_rmap_pte(*spte))
293                 return;
294         page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
295         if (!page->private) {
296                 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
297                 page->private = (unsigned long)spte;
298         } else if (!(page->private & 1)) {
299                 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
300                 desc = mmu_alloc_rmap_desc(vcpu);
301                 desc->shadow_ptes[0] = (u64 *)page->private;
302                 desc->shadow_ptes[1] = spte;
303                 page->private = (unsigned long)desc | 1;
304         } else {
305                 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
306                 desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
307                 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
308                         desc = desc->more;
309                 if (desc->shadow_ptes[RMAP_EXT-1]) {
310                         desc->more = mmu_alloc_rmap_desc(vcpu);
311                         desc = desc->more;
312                 }
313                 for (i = 0; desc->shadow_ptes[i]; ++i)
314                         ;
315                 desc->shadow_ptes[i] = spte;
316         }
317 }
318
319 static void rmap_desc_remove_entry(struct kvm_vcpu *vcpu,
320                                    struct page *page,
321                                    struct kvm_rmap_desc *desc,
322                                    int i,
323                                    struct kvm_rmap_desc *prev_desc)
324 {
325         int j;
326
327         for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
328                 ;
329         desc->shadow_ptes[i] = desc->shadow_ptes[j];
330         desc->shadow_ptes[j] = 0;
331         if (j != 0)
332                 return;
333         if (!prev_desc && !desc->more)
334                 page->private = (unsigned long)desc->shadow_ptes[0];
335         else
336                 if (prev_desc)
337                         prev_desc->more = desc->more;
338                 else
339                         page->private = (unsigned long)desc->more | 1;
340         mmu_free_rmap_desc(vcpu, desc);
341 }
342
343 static void rmap_remove(struct kvm_vcpu *vcpu, u64 *spte)
344 {
345         struct page *page;
346         struct kvm_rmap_desc *desc;
347         struct kvm_rmap_desc *prev_desc;
348         int i;
349
350         if (!is_rmap_pte(*spte))
351                 return;
352         page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
353         if (!page->private) {
354                 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
355                 BUG();
356         } else if (!(page->private & 1)) {
357                 rmap_printk("rmap_remove:  %p %llx 1->0\n", spte, *spte);
358                 if ((u64 *)page->private != spte) {
359                         printk(KERN_ERR "rmap_remove:  %p %llx 1->BUG\n",
360                                spte, *spte);
361                         BUG();
362                 }
363                 page->private = 0;
364         } else {
365                 rmap_printk("rmap_remove:  %p %llx many->many\n", spte, *spte);
366                 desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
367                 prev_desc = NULL;
368                 while (desc) {
369                         for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
370                                 if (desc->shadow_ptes[i] == spte) {
371                                         rmap_desc_remove_entry(vcpu, page,
372                                                                desc, i,
373                                                                prev_desc);
374                                         return;
375                                 }
376                         prev_desc = desc;
377                         desc = desc->more;
378                 }
379                 BUG();
380         }
381 }
382
383 static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
384 {
385         struct kvm *kvm = vcpu->kvm;
386         struct page *page;
387         struct kvm_memory_slot *slot;
388         struct kvm_rmap_desc *desc;
389         u64 *spte;
390
391         slot = gfn_to_memslot(kvm, gfn);
392         BUG_ON(!slot);
393         page = gfn_to_page(slot, gfn);
394
395         while (page->private) {
396                 if (!(page->private & 1))
397                         spte = (u64 *)page->private;
398                 else {
399                         desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
400                         spte = desc->shadow_ptes[0];
401                 }
402                 BUG_ON(!spte);
403                 BUG_ON((*spte & PT64_BASE_ADDR_MASK) !=
404                        page_to_pfn(page) << PAGE_SHIFT);
405                 BUG_ON(!(*spte & PT_PRESENT_MASK));
406                 BUG_ON(!(*spte & PT_WRITABLE_MASK));
407                 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
408                 rmap_remove(vcpu, spte);
409                 kvm_arch_ops->tlb_flush(vcpu);
410                 *spte &= ~(u64)PT_WRITABLE_MASK;
411         }
412 }
413
414 static int is_empty_shadow_page(hpa_t page_hpa)
415 {
416         u64 *pos;
417         u64 *end;
418
419         for (pos = __va(page_hpa), end = pos + PAGE_SIZE / sizeof(u64);
420                       pos != end; pos++)
421                 if (*pos != 0) {
422                         printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
423                                pos, *pos);
424                         return 0;
425                 }
426         return 1;
427 }
428
429 static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, hpa_t page_hpa)
430 {
431         struct kvm_mmu_page *page_head = page_header(page_hpa);
432
433         ASSERT(is_empty_shadow_page(page_hpa));
434         list_del(&page_head->link);
435         page_head->page_hpa = page_hpa;
436         list_add(&page_head->link, &vcpu->free_pages);
437         ++vcpu->kvm->n_free_mmu_pages;
438 }
439
440 static unsigned kvm_page_table_hashfn(gfn_t gfn)
441 {
442         return gfn;
443 }
444
445 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
446                                                u64 *parent_pte)
447 {
448         struct kvm_mmu_page *page;
449
450         if (list_empty(&vcpu->free_pages))
451                 return NULL;
452
453         page = list_entry(vcpu->free_pages.next, struct kvm_mmu_page, link);
454         list_del(&page->link);
455         list_add(&page->link, &vcpu->kvm->active_mmu_pages);
456         ASSERT(is_empty_shadow_page(page->page_hpa));
457         page->slot_bitmap = 0;
458         page->global = 1;
459         page->multimapped = 0;
460         page->parent_pte = parent_pte;
461         --vcpu->kvm->n_free_mmu_pages;
462         return page;
463 }
464
465 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
466                                     struct kvm_mmu_page *page, u64 *parent_pte)
467 {
468         struct kvm_pte_chain *pte_chain;
469         struct hlist_node *node;
470         int i;
471
472         if (!parent_pte)
473                 return;
474         if (!page->multimapped) {
475                 u64 *old = page->parent_pte;
476
477                 if (!old) {
478                         page->parent_pte = parent_pte;
479                         return;
480                 }
481                 page->multimapped = 1;
482                 pte_chain = mmu_alloc_pte_chain(vcpu);
483                 INIT_HLIST_HEAD(&page->parent_ptes);
484                 hlist_add_head(&pte_chain->link, &page->parent_ptes);
485                 pte_chain->parent_ptes[0] = old;
486         }
487         hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link) {
488                 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
489                         continue;
490                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
491                         if (!pte_chain->parent_ptes[i]) {
492                                 pte_chain->parent_ptes[i] = parent_pte;
493                                 return;
494                         }
495         }
496         pte_chain = mmu_alloc_pte_chain(vcpu);
497         BUG_ON(!pte_chain);
498         hlist_add_head(&pte_chain->link, &page->parent_ptes);
499         pte_chain->parent_ptes[0] = parent_pte;
500 }
501
502 static void mmu_page_remove_parent_pte(struct kvm_vcpu *vcpu,
503                                        struct kvm_mmu_page *page,
504                                        u64 *parent_pte)
505 {
506         struct kvm_pte_chain *pte_chain;
507         struct hlist_node *node;
508         int i;
509
510         if (!page->multimapped) {
511                 BUG_ON(page->parent_pte != parent_pte);
512                 page->parent_pte = NULL;
513                 return;
514         }
515         hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link)
516                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
517                         if (!pte_chain->parent_ptes[i])
518                                 break;
519                         if (pte_chain->parent_ptes[i] != parent_pte)
520                                 continue;
521                         while (i + 1 < NR_PTE_CHAIN_ENTRIES
522                                 && pte_chain->parent_ptes[i + 1]) {
523                                 pte_chain->parent_ptes[i]
524                                         = pte_chain->parent_ptes[i + 1];
525                                 ++i;
526                         }
527                         pte_chain->parent_ptes[i] = NULL;
528                         if (i == 0) {
529                                 hlist_del(&pte_chain->link);
530                                 mmu_free_pte_chain(vcpu, pte_chain);
531                                 if (hlist_empty(&page->parent_ptes)) {
532                                         page->multimapped = 0;
533                                         page->parent_pte = NULL;
534                                 }
535                         }
536                         return;
537                 }
538         BUG();
539 }
540
541 static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm_vcpu *vcpu,
542                                                 gfn_t gfn)
543 {
544         unsigned index;
545         struct hlist_head *bucket;
546         struct kvm_mmu_page *page;
547         struct hlist_node *node;
548
549         pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
550         index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
551         bucket = &vcpu->kvm->mmu_page_hash[index];
552         hlist_for_each_entry(page, node, bucket, hash_link)
553                 if (page->gfn == gfn && !page->role.metaphysical) {
554                         pgprintk("%s: found role %x\n",
555                                  __FUNCTION__, page->role.word);
556                         return page;
557                 }
558         return NULL;
559 }
560
561 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
562                                              gfn_t gfn,
563                                              gva_t gaddr,
564                                              unsigned level,
565                                              int metaphysical,
566                                              u64 *parent_pte)
567 {
568         union kvm_mmu_page_role role;
569         unsigned index;
570         unsigned quadrant;
571         struct hlist_head *bucket;
572         struct kvm_mmu_page *page;
573         struct hlist_node *node;
574
575         role.word = 0;
576         role.glevels = vcpu->mmu.root_level;
577         role.level = level;
578         role.metaphysical = metaphysical;
579         if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) {
580                 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
581                 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
582                 role.quadrant = quadrant;
583         }
584         pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
585                  gfn, role.word);
586         index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
587         bucket = &vcpu->kvm->mmu_page_hash[index];
588         hlist_for_each_entry(page, node, bucket, hash_link)
589                 if (page->gfn == gfn && page->role.word == role.word) {
590                         mmu_page_add_parent_pte(vcpu, page, parent_pte);
591                         pgprintk("%s: found\n", __FUNCTION__);
592                         return page;
593                 }
594         page = kvm_mmu_alloc_page(vcpu, parent_pte);
595         if (!page)
596                 return page;
597         pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
598         page->gfn = gfn;
599         page->role = role;
600         hlist_add_head(&page->hash_link, bucket);
601         if (!metaphysical)
602                 rmap_write_protect(vcpu, gfn);
603         return page;
604 }
605
606 static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu,
607                                          struct kvm_mmu_page *page)
608 {
609         unsigned i;
610         u64 *pt;
611         u64 ent;
612
613         pt = __va(page->page_hpa);
614
615         if (page->role.level == PT_PAGE_TABLE_LEVEL) {
616                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
617                         if (pt[i] & PT_PRESENT_MASK)
618                                 rmap_remove(vcpu, &pt[i]);
619                         pt[i] = 0;
620                 }
621                 kvm_arch_ops->tlb_flush(vcpu);
622                 return;
623         }
624
625         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
626                 ent = pt[i];
627
628                 pt[i] = 0;
629                 if (!(ent & PT_PRESENT_MASK))
630                         continue;
631                 ent &= PT64_BASE_ADDR_MASK;
632                 mmu_page_remove_parent_pte(vcpu, page_header(ent), &pt[i]);
633         }
634 }
635
636 static void kvm_mmu_put_page(struct kvm_vcpu *vcpu,
637                              struct kvm_mmu_page *page,
638                              u64 *parent_pte)
639 {
640         mmu_page_remove_parent_pte(vcpu, page, parent_pte);
641 }
642
643 static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu,
644                              struct kvm_mmu_page *page)
645 {
646         u64 *parent_pte;
647
648         while (page->multimapped || page->parent_pte) {
649                 if (!page->multimapped)
650                         parent_pte = page->parent_pte;
651                 else {
652                         struct kvm_pte_chain *chain;
653
654                         chain = container_of(page->parent_ptes.first,
655                                              struct kvm_pte_chain, link);
656                         parent_pte = chain->parent_ptes[0];
657                 }
658                 BUG_ON(!parent_pte);
659                 kvm_mmu_put_page(vcpu, page, parent_pte);
660                 *parent_pte = 0;
661         }
662         kvm_mmu_page_unlink_children(vcpu, page);
663         if (!page->root_count) {
664                 hlist_del(&page->hash_link);
665                 kvm_mmu_free_page(vcpu, page->page_hpa);
666         } else {
667                 list_del(&page->link);
668                 list_add(&page->link, &vcpu->kvm->active_mmu_pages);
669         }
670 }
671
672 static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
673 {
674         unsigned index;
675         struct hlist_head *bucket;
676         struct kvm_mmu_page *page;
677         struct hlist_node *node, *n;
678         int r;
679
680         pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
681         r = 0;
682         index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
683         bucket = &vcpu->kvm->mmu_page_hash[index];
684         hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
685                 if (page->gfn == gfn && !page->role.metaphysical) {
686                         pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
687                                  page->role.word);
688                         kvm_mmu_zap_page(vcpu, page);
689                         r = 1;
690                 }
691         return r;
692 }
693
694 static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa)
695 {
696         int slot = memslot_id(kvm, gfn_to_memslot(kvm, gpa >> PAGE_SHIFT));
697         struct kvm_mmu_page *page_head = page_header(__pa(pte));
698
699         __set_bit(slot, &page_head->slot_bitmap);
700 }
701
702 hpa_t safe_gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa)
703 {
704         hpa_t hpa = gpa_to_hpa(vcpu, gpa);
705
706         return is_error_hpa(hpa) ? bad_page_address | (gpa & ~PAGE_MASK): hpa;
707 }
708
709 hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa)
710 {
711         struct kvm_memory_slot *slot;
712         struct page *page;
713
714         ASSERT((gpa & HPA_ERR_MASK) == 0);
715         slot = gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT);
716         if (!slot)
717                 return gpa | HPA_ERR_MASK;
718         page = gfn_to_page(slot, gpa >> PAGE_SHIFT);
719         return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT)
720                 | (gpa & (PAGE_SIZE-1));
721 }
722
723 hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva)
724 {
725         gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
726
727         if (gpa == UNMAPPED_GVA)
728                 return UNMAPPED_GVA;
729         return gpa_to_hpa(vcpu, gpa);
730 }
731
732 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
733 {
734 }
735
736 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
737 {
738         int level = PT32E_ROOT_LEVEL;
739         hpa_t table_addr = vcpu->mmu.root_hpa;
740
741         for (; ; level--) {
742                 u32 index = PT64_INDEX(v, level);
743                 u64 *table;
744                 u64 pte;
745
746                 ASSERT(VALID_PAGE(table_addr));
747                 table = __va(table_addr);
748
749                 if (level == 1) {
750                         pte = table[index];
751                         if (is_present_pte(pte) && is_writeble_pte(pte))
752                                 return 0;
753                         mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
754                         page_header_update_slot(vcpu->kvm, table, v);
755                         table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK |
756                                                                 PT_USER_MASK;
757                         rmap_add(vcpu, &table[index]);
758                         return 0;
759                 }
760
761                 if (table[index] == 0) {
762                         struct kvm_mmu_page *new_table;
763                         gfn_t pseudo_gfn;
764
765                         pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
766                                 >> PAGE_SHIFT;
767                         new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
768                                                      v, level - 1,
769                                                      1, &table[index]);
770                         if (!new_table) {
771                                 pgprintk("nonpaging_map: ENOMEM\n");
772                                 return -ENOMEM;
773                         }
774
775                         table[index] = new_table->page_hpa | PT_PRESENT_MASK
776                                 | PT_WRITABLE_MASK | PT_USER_MASK;
777                 }
778                 table_addr = table[index] & PT64_BASE_ADDR_MASK;
779         }
780 }
781
782 static void mmu_free_roots(struct kvm_vcpu *vcpu)
783 {
784         int i;
785         struct kvm_mmu_page *page;
786
787 #ifdef CONFIG_X86_64
788         if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
789                 hpa_t root = vcpu->mmu.root_hpa;
790
791                 ASSERT(VALID_PAGE(root));
792                 page = page_header(root);
793                 --page->root_count;
794                 vcpu->mmu.root_hpa = INVALID_PAGE;
795                 return;
796         }
797 #endif
798         for (i = 0; i < 4; ++i) {
799                 hpa_t root = vcpu->mmu.pae_root[i];
800
801                 ASSERT(VALID_PAGE(root));
802                 root &= PT64_BASE_ADDR_MASK;
803                 page = page_header(root);
804                 --page->root_count;
805                 vcpu->mmu.pae_root[i] = INVALID_PAGE;
806         }
807         vcpu->mmu.root_hpa = INVALID_PAGE;
808 }
809
810 static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
811 {
812         int i;
813         gfn_t root_gfn;
814         struct kvm_mmu_page *page;
815
816         root_gfn = vcpu->cr3 >> PAGE_SHIFT;
817
818 #ifdef CONFIG_X86_64
819         if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
820                 hpa_t root = vcpu->mmu.root_hpa;
821
822                 ASSERT(!VALID_PAGE(root));
823                 page = kvm_mmu_get_page(vcpu, root_gfn, 0,
824                                         PT64_ROOT_LEVEL, 0, NULL);
825                 root = page->page_hpa;
826                 ++page->root_count;
827                 vcpu->mmu.root_hpa = root;
828                 return;
829         }
830 #endif
831         for (i = 0; i < 4; ++i) {
832                 hpa_t root = vcpu->mmu.pae_root[i];
833
834                 ASSERT(!VALID_PAGE(root));
835                 if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL)
836                         root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT;
837                 else if (vcpu->mmu.root_level == 0)
838                         root_gfn = 0;
839                 page = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
840                                         PT32_ROOT_LEVEL, !is_paging(vcpu),
841                                         NULL);
842                 root = page->page_hpa;
843                 ++page->root_count;
844                 vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
845         }
846         vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root);
847 }
848
849 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
850 {
851         return vaddr;
852 }
853
854 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
855                                u32 error_code)
856 {
857         gpa_t addr = gva;
858         hpa_t paddr;
859         int r;
860
861         r = mmu_topup_memory_caches(vcpu);
862         if (r)
863                 return r;
864
865         ASSERT(vcpu);
866         ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
867
868
869         paddr = gpa_to_hpa(vcpu , addr & PT64_BASE_ADDR_MASK);
870
871         if (is_error_hpa(paddr))
872                 return 1;
873
874         return nonpaging_map(vcpu, addr & PAGE_MASK, paddr);
875 }
876
877 static void nonpaging_free(struct kvm_vcpu *vcpu)
878 {
879         mmu_free_roots(vcpu);
880 }
881
882 static int nonpaging_init_context(struct kvm_vcpu *vcpu)
883 {
884         struct kvm_mmu *context = &vcpu->mmu;
885
886         context->new_cr3 = nonpaging_new_cr3;
887         context->page_fault = nonpaging_page_fault;
888         context->gva_to_gpa = nonpaging_gva_to_gpa;
889         context->free = nonpaging_free;
890         context->root_level = 0;
891         context->shadow_root_level = PT32E_ROOT_LEVEL;
892         mmu_alloc_roots(vcpu);
893         ASSERT(VALID_PAGE(context->root_hpa));
894         kvm_arch_ops->set_cr3(vcpu, context->root_hpa);
895         return 0;
896 }
897
898 static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
899 {
900         ++kvm_stat.tlb_flush;
901         kvm_arch_ops->tlb_flush(vcpu);
902 }
903
904 static void paging_new_cr3(struct kvm_vcpu *vcpu)
905 {
906         pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
907         mmu_free_roots(vcpu);
908         if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
909                 kvm_mmu_free_some_pages(vcpu);
910         mmu_alloc_roots(vcpu);
911         kvm_mmu_flush_tlb(vcpu);
912         kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
913 }
914
915 static void mark_pagetable_nonglobal(void *shadow_pte)
916 {
917         page_header(__pa(shadow_pte))->global = 0;
918 }
919
920 static inline void set_pte_common(struct kvm_vcpu *vcpu,
921                              u64 *shadow_pte,
922                              gpa_t gaddr,
923                              int dirty,
924                              u64 access_bits,
925                              gfn_t gfn)
926 {
927         hpa_t paddr;
928
929         *shadow_pte |= access_bits << PT_SHADOW_BITS_OFFSET;
930         if (!dirty)
931                 access_bits &= ~PT_WRITABLE_MASK;
932
933         paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK);
934
935         *shadow_pte |= access_bits;
936
937         if (!(*shadow_pte & PT_GLOBAL_MASK))
938                 mark_pagetable_nonglobal(shadow_pte);
939
940         if (is_error_hpa(paddr)) {
941                 *shadow_pte |= gaddr;
942                 *shadow_pte |= PT_SHADOW_IO_MARK;
943                 *shadow_pte &= ~PT_PRESENT_MASK;
944                 return;
945         }
946
947         *shadow_pte |= paddr;
948
949         if (access_bits & PT_WRITABLE_MASK) {
950                 struct kvm_mmu_page *shadow;
951
952                 shadow = kvm_mmu_lookup_page(vcpu, gfn);
953                 if (shadow) {
954                         pgprintk("%s: found shadow page for %lx, marking ro\n",
955                                  __FUNCTION__, gfn);
956                         access_bits &= ~PT_WRITABLE_MASK;
957                         if (is_writeble_pte(*shadow_pte)) {
958                                     *shadow_pte &= ~PT_WRITABLE_MASK;
959                                     kvm_arch_ops->tlb_flush(vcpu);
960                         }
961                 }
962         }
963
964         if (access_bits & PT_WRITABLE_MASK)
965                 mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
966
967         page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
968         rmap_add(vcpu, shadow_pte);
969 }
970
971 static void inject_page_fault(struct kvm_vcpu *vcpu,
972                               u64 addr,
973                               u32 err_code)
974 {
975         kvm_arch_ops->inject_page_fault(vcpu, addr, err_code);
976 }
977
978 static inline int fix_read_pf(u64 *shadow_ent)
979 {
980         if ((*shadow_ent & PT_SHADOW_USER_MASK) &&
981             !(*shadow_ent & PT_USER_MASK)) {
982                 /*
983                  * If supervisor write protect is disabled, we shadow kernel
984                  * pages as user pages so we can trap the write access.
985                  */
986                 *shadow_ent |= PT_USER_MASK;
987                 *shadow_ent &= ~PT_WRITABLE_MASK;
988
989                 return 1;
990
991         }
992         return 0;
993 }
994
995 static int may_access(u64 pte, int write, int user)
996 {
997
998         if (user && !(pte & PT_USER_MASK))
999                 return 0;
1000         if (write && !(pte & PT_WRITABLE_MASK))
1001                 return 0;
1002         return 1;
1003 }
1004
1005 static void paging_free(struct kvm_vcpu *vcpu)
1006 {
1007         nonpaging_free(vcpu);
1008 }
1009
1010 #define PTTYPE 64
1011 #include "paging_tmpl.h"
1012 #undef PTTYPE
1013
1014 #define PTTYPE 32
1015 #include "paging_tmpl.h"
1016 #undef PTTYPE
1017
1018 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
1019 {
1020         struct kvm_mmu *context = &vcpu->mmu;
1021
1022         ASSERT(is_pae(vcpu));
1023         context->new_cr3 = paging_new_cr3;
1024         context->page_fault = paging64_page_fault;
1025         context->gva_to_gpa = paging64_gva_to_gpa;
1026         context->free = paging_free;
1027         context->root_level = level;
1028         context->shadow_root_level = level;
1029         mmu_alloc_roots(vcpu);
1030         ASSERT(VALID_PAGE(context->root_hpa));
1031         kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
1032                     (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
1033         return 0;
1034 }
1035
1036 static int paging64_init_context(struct kvm_vcpu *vcpu)
1037 {
1038         return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
1039 }
1040
1041 static int paging32_init_context(struct kvm_vcpu *vcpu)
1042 {
1043         struct kvm_mmu *context = &vcpu->mmu;
1044
1045         context->new_cr3 = paging_new_cr3;
1046         context->page_fault = paging32_page_fault;
1047         context->gva_to_gpa = paging32_gva_to_gpa;
1048         context->free = paging_free;
1049         context->root_level = PT32_ROOT_LEVEL;
1050         context->shadow_root_level = PT32E_ROOT_LEVEL;
1051         mmu_alloc_roots(vcpu);
1052         ASSERT(VALID_PAGE(context->root_hpa));
1053         kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
1054                     (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
1055         return 0;
1056 }
1057
1058 static int paging32E_init_context(struct kvm_vcpu *vcpu)
1059 {
1060         return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
1061 }
1062
1063 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1064 {
1065         ASSERT(vcpu);
1066         ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1067
1068         if (!is_paging(vcpu))
1069                 return nonpaging_init_context(vcpu);
1070         else if (is_long_mode(vcpu))
1071                 return paging64_init_context(vcpu);
1072         else if (is_pae(vcpu))
1073                 return paging32E_init_context(vcpu);
1074         else
1075                 return paging32_init_context(vcpu);
1076 }
1077
1078 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
1079 {
1080         ASSERT(vcpu);
1081         if (VALID_PAGE(vcpu->mmu.root_hpa)) {
1082                 vcpu->mmu.free(vcpu);
1083                 vcpu->mmu.root_hpa = INVALID_PAGE;
1084         }
1085 }
1086
1087 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
1088 {
1089         int r;
1090
1091         destroy_kvm_mmu(vcpu);
1092         r = init_kvm_mmu(vcpu);
1093         if (r < 0)
1094                 goto out;
1095         r = mmu_topup_memory_caches(vcpu);
1096 out:
1097         return r;
1098 }
1099
1100 void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
1101 {
1102         gfn_t gfn = gpa >> PAGE_SHIFT;
1103         struct kvm_mmu_page *page;
1104         struct kvm_mmu_page *child;
1105         struct hlist_node *node, *n;
1106         struct hlist_head *bucket;
1107         unsigned index;
1108         u64 *spte;
1109         u64 pte;
1110         unsigned offset = offset_in_page(gpa);
1111         unsigned pte_size;
1112         unsigned page_offset;
1113         unsigned misaligned;
1114         int level;
1115         int flooded = 0;
1116
1117         pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
1118         if (gfn == vcpu->last_pt_write_gfn) {
1119                 ++vcpu->last_pt_write_count;
1120                 if (vcpu->last_pt_write_count >= 3)
1121                         flooded = 1;
1122         } else {
1123                 vcpu->last_pt_write_gfn = gfn;
1124                 vcpu->last_pt_write_count = 1;
1125         }
1126         index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
1127         bucket = &vcpu->kvm->mmu_page_hash[index];
1128         hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
1129                 if (page->gfn != gfn || page->role.metaphysical)
1130                         continue;
1131                 pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
1132                 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
1133                 if (misaligned || flooded) {
1134                         /*
1135                          * Misaligned accesses are too much trouble to fix
1136                          * up; also, they usually indicate a page is not used
1137                          * as a page table.
1138                          *
1139                          * If we're seeing too many writes to a page,
1140                          * it may no longer be a page table, or we may be
1141                          * forking, in which case it is better to unmap the
1142                          * page.
1143                          */
1144                         pgprintk("misaligned: gpa %llx bytes %d role %x\n",
1145                                  gpa, bytes, page->role.word);
1146                         kvm_mmu_zap_page(vcpu, page);
1147                         continue;
1148                 }
1149                 page_offset = offset;
1150                 level = page->role.level;
1151                 if (page->role.glevels == PT32_ROOT_LEVEL) {
1152                         page_offset <<= 1;          /* 32->64 */
1153                         page_offset &= ~PAGE_MASK;
1154                 }
1155                 spte = __va(page->page_hpa);
1156                 spte += page_offset / sizeof(*spte);
1157                 pte = *spte;
1158                 if (is_present_pte(pte)) {
1159                         if (level == PT_PAGE_TABLE_LEVEL)
1160                                 rmap_remove(vcpu, spte);
1161                         else {
1162                                 child = page_header(pte & PT64_BASE_ADDR_MASK);
1163                                 mmu_page_remove_parent_pte(vcpu, child, spte);
1164                         }
1165                 }
1166                 *spte = 0;
1167         }
1168 }
1169
1170 void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
1171 {
1172 }
1173
1174 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1175 {
1176         gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
1177
1178         return kvm_mmu_unprotect_page(vcpu, gpa >> PAGE_SHIFT);
1179 }
1180
1181 void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
1182 {
1183         while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
1184                 struct kvm_mmu_page *page;
1185
1186                 page = container_of(vcpu->kvm->active_mmu_pages.prev,
1187                                     struct kvm_mmu_page, link);
1188                 kvm_mmu_zap_page(vcpu, page);
1189         }
1190 }
1191 EXPORT_SYMBOL_GPL(kvm_mmu_free_some_pages);
1192
1193 static void free_mmu_pages(struct kvm_vcpu *vcpu)
1194 {
1195         struct kvm_mmu_page *page;
1196
1197         while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
1198                 page = container_of(vcpu->kvm->active_mmu_pages.next,
1199                                     struct kvm_mmu_page, link);
1200                 kvm_mmu_zap_page(vcpu, page);
1201         }
1202         while (!list_empty(&vcpu->free_pages)) {
1203                 page = list_entry(vcpu->free_pages.next,
1204                                   struct kvm_mmu_page, link);
1205                 list_del(&page->link);
1206                 __free_page(pfn_to_page(page->page_hpa >> PAGE_SHIFT));
1207                 page->page_hpa = INVALID_PAGE;
1208         }
1209         free_page((unsigned long)vcpu->mmu.pae_root);
1210 }
1211
1212 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1213 {
1214         struct page *page;
1215         int i;
1216
1217         ASSERT(vcpu);
1218
1219         for (i = 0; i < KVM_NUM_MMU_PAGES; i++) {
1220                 struct kvm_mmu_page *page_header = &vcpu->page_header_buf[i];
1221
1222                 INIT_LIST_HEAD(&page_header->link);
1223                 if ((page = alloc_page(GFP_KERNEL)) == NULL)
1224                         goto error_1;
1225                 page->private = (unsigned long)page_header;
1226                 page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT;
1227                 memset(__va(page_header->page_hpa), 0, PAGE_SIZE);
1228                 list_add(&page_header->link, &vcpu->free_pages);
1229                 ++vcpu->kvm->n_free_mmu_pages;
1230         }
1231
1232         /*
1233          * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
1234          * Therefore we need to allocate shadow page tables in the first
1235          * 4GB of memory, which happens to fit the DMA32 zone.
1236          */
1237         page = alloc_page(GFP_KERNEL | __GFP_DMA32);
1238         if (!page)
1239                 goto error_1;
1240         vcpu->mmu.pae_root = page_address(page);
1241         for (i = 0; i < 4; ++i)
1242                 vcpu->mmu.pae_root[i] = INVALID_PAGE;
1243
1244         return 0;
1245
1246 error_1:
1247         free_mmu_pages(vcpu);
1248         return -ENOMEM;
1249 }
1250
1251 int kvm_mmu_create(struct kvm_vcpu *vcpu)
1252 {
1253         ASSERT(vcpu);
1254         ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1255         ASSERT(list_empty(&vcpu->free_pages));
1256
1257         return alloc_mmu_pages(vcpu);
1258 }
1259
1260 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
1261 {
1262         ASSERT(vcpu);
1263         ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1264         ASSERT(!list_empty(&vcpu->free_pages));
1265
1266         return init_kvm_mmu(vcpu);
1267 }
1268
1269 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
1270 {
1271         ASSERT(vcpu);
1272
1273         destroy_kvm_mmu(vcpu);
1274         free_mmu_pages(vcpu);
1275         mmu_free_memory_caches(vcpu);
1276 }
1277
1278 void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot)
1279 {
1280         struct kvm *kvm = vcpu->kvm;
1281         struct kvm_mmu_page *page;
1282
1283         list_for_each_entry(page, &kvm->active_mmu_pages, link) {
1284                 int i;
1285                 u64 *pt;
1286
1287                 if (!test_bit(slot, &page->slot_bitmap))
1288                         continue;
1289
1290                 pt = __va(page->page_hpa);
1291                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1292                         /* avoid RMW */
1293                         if (pt[i] & PT_WRITABLE_MASK) {
1294                                 rmap_remove(vcpu, &pt[i]);
1295                                 pt[i] &= ~PT_WRITABLE_MASK;
1296                         }
1297         }
1298 }
1299
1300 #ifdef AUDIT
1301
1302 static const char *audit_msg;
1303
1304 static gva_t canonicalize(gva_t gva)
1305 {
1306 #ifdef CONFIG_X86_64
1307         gva = (long long)(gva << 16) >> 16;
1308 #endif
1309         return gva;
1310 }
1311
1312 static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
1313                                 gva_t va, int level)
1314 {
1315         u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
1316         int i;
1317         gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
1318
1319         for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
1320                 u64 ent = pt[i];
1321
1322                 if (!ent & PT_PRESENT_MASK)
1323                         continue;
1324
1325                 va = canonicalize(va);
1326                 if (level > 1)
1327                         audit_mappings_page(vcpu, ent, va, level - 1);
1328                 else {
1329                         gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, va);
1330                         hpa_t hpa = gpa_to_hpa(vcpu, gpa);
1331
1332                         if ((ent & PT_PRESENT_MASK)
1333                             && (ent & PT64_BASE_ADDR_MASK) != hpa)
1334                                 printk(KERN_ERR "audit error: (%s) levels %d"
1335                                        " gva %lx gpa %llx hpa %llx ent %llx\n",
1336                                        audit_msg, vcpu->mmu.root_level,
1337                                        va, gpa, hpa, ent);
1338                 }
1339         }
1340 }
1341
1342 static void audit_mappings(struct kvm_vcpu *vcpu)
1343 {
1344         int i;
1345
1346         if (vcpu->mmu.root_level == 4)
1347                 audit_mappings_page(vcpu, vcpu->mmu.root_hpa, 0, 4);
1348         else
1349                 for (i = 0; i < 4; ++i)
1350                         if (vcpu->mmu.pae_root[i] & PT_PRESENT_MASK)
1351                                 audit_mappings_page(vcpu,
1352                                                     vcpu->mmu.pae_root[i],
1353                                                     i << 30,
1354                                                     2);
1355 }
1356
1357 static int count_rmaps(struct kvm_vcpu *vcpu)
1358 {
1359         int nmaps = 0;
1360         int i, j, k;
1361
1362         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1363                 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
1364                 struct kvm_rmap_desc *d;
1365
1366                 for (j = 0; j < m->npages; ++j) {
1367                         struct page *page = m->phys_mem[j];
1368
1369                         if (!page->private)
1370                                 continue;
1371                         if (!(page->private & 1)) {
1372                                 ++nmaps;
1373                                 continue;
1374                         }
1375                         d = (struct kvm_rmap_desc *)(page->private & ~1ul);
1376                         while (d) {
1377                                 for (k = 0; k < RMAP_EXT; ++k)
1378                                         if (d->shadow_ptes[k])
1379                                                 ++nmaps;
1380                                         else
1381                                                 break;
1382                                 d = d->more;
1383                         }
1384                 }
1385         }
1386         return nmaps;
1387 }
1388
1389 static int count_writable_mappings(struct kvm_vcpu *vcpu)
1390 {
1391         int nmaps = 0;
1392         struct kvm_mmu_page *page;
1393         int i;
1394
1395         list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
1396                 u64 *pt = __va(page->page_hpa);
1397
1398                 if (page->role.level != PT_PAGE_TABLE_LEVEL)
1399                         continue;
1400
1401                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1402                         u64 ent = pt[i];
1403
1404                         if (!(ent & PT_PRESENT_MASK))
1405                                 continue;
1406                         if (!(ent & PT_WRITABLE_MASK))
1407                                 continue;
1408                         ++nmaps;
1409                 }
1410         }
1411         return nmaps;
1412 }
1413
1414 static void audit_rmap(struct kvm_vcpu *vcpu)
1415 {
1416         int n_rmap = count_rmaps(vcpu);
1417         int n_actual = count_writable_mappings(vcpu);
1418
1419         if (n_rmap != n_actual)
1420                 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
1421                        __FUNCTION__, audit_msg, n_rmap, n_actual);
1422 }
1423
1424 static void audit_write_protection(struct kvm_vcpu *vcpu)
1425 {
1426         struct kvm_mmu_page *page;
1427
1428         list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
1429                 hfn_t hfn;
1430                 struct page *pg;
1431
1432                 if (page->role.metaphysical)
1433                         continue;
1434
1435                 hfn = gpa_to_hpa(vcpu, (gpa_t)page->gfn << PAGE_SHIFT)
1436                         >> PAGE_SHIFT;
1437                 pg = pfn_to_page(hfn);
1438                 if (pg->private)
1439                         printk(KERN_ERR "%s: (%s) shadow page has writable"
1440                                " mappings: gfn %lx role %x\n",
1441                                __FUNCTION__, audit_msg, page->gfn,
1442                                page->role.word);
1443         }
1444 }
1445
1446 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
1447 {
1448         int olddbg = dbg;
1449
1450         dbg = 0;
1451         audit_msg = msg;
1452         audit_rmap(vcpu);
1453         audit_write_protection(vcpu);
1454         audit_mappings(vcpu);
1455         dbg = olddbg;
1456 }
1457
1458 #endif