4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
41 #include <linux/sched.h>
42 #include <linux/highmem.h>
43 #include <linux/debugfs.h>
44 #include <linux/bug.h>
45 #include <linux/vmalloc.h>
46 #include <linux/module.h>
47 #include <linux/gfp.h>
48 #include <linux/memblock.h>
49 #include <linux/seq_file.h>
51 #include <trace/events/xen.h>
53 #include <asm/pgtable.h>
54 #include <asm/tlbflush.h>
55 #include <asm/fixmap.h>
56 #include <asm/mmu_context.h>
57 #include <asm/setup.h>
58 #include <asm/paravirt.h>
60 #include <asm/linkage.h>
66 #include <asm/xen/hypercall.h>
67 #include <asm/xen/hypervisor.h>
71 #include <xen/interface/xen.h>
72 #include <xen/interface/hvm/hvm_op.h>
73 #include <xen/interface/version.h>
74 #include <xen/interface/memory.h>
75 #include <xen/hvc-console.h>
77 #include "multicalls.h"
82 * Protects atomic reservation decrease/increase against concurrent increases.
83 * Also protects non-atomic updates of current_pages and balloon lists.
85 DEFINE_SPINLOCK(xen_reservation_lock);
89 * Identity map, in addition to plain kernel map. This needs to be
90 * large enough to allocate page table pages to allocate the rest.
91 * Each page can map 2MB.
93 #define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
94 static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
97 /* l3 pud for userspace vsyscall mapping */
98 static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
99 #endif /* CONFIG_X86_64 */
102 * Note about cr3 (pagetable base) values:
104 * xen_cr3 contains the current logical cr3 value; it contains the
105 * last set cr3. This may not be the current effective cr3, because
106 * its update may be being lazily deferred. However, a vcpu looking
107 * at its own cr3 can use this value knowing that it everything will
108 * be self-consistent.
110 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
111 * hypercall to set the vcpu cr3 is complete (so it may be a little
112 * out of date, but it will never be set early). If one vcpu is
113 * looking at another vcpu's cr3 value, it should use this variable.
115 DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
116 DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
120 * Just beyond the highest usermode address. STACK_TOP_MAX has a
121 * redzone above it, so round it up to a PGD boundary.
123 #define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
125 unsigned long arbitrary_virt_to_mfn(void *vaddr)
127 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
129 return PFN_DOWN(maddr.maddr);
132 xmaddr_t arbitrary_virt_to_machine(void *vaddr)
134 unsigned long address = (unsigned long)vaddr;
140 * if the PFN is in the linear mapped vaddr range, we can just use
141 * the (quick) virt_to_machine() p2m lookup
143 if (virt_addr_valid(vaddr))
144 return virt_to_machine(vaddr);
146 /* otherwise we have to do a (slower) full page-table walk */
148 pte = lookup_address(address, &level);
150 offset = address & ~PAGE_MASK;
151 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
153 EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
155 void make_lowmem_page_readonly(void *vaddr)
158 unsigned long address = (unsigned long)vaddr;
161 pte = lookup_address(address, &level);
163 return; /* vaddr missing */
165 ptev = pte_wrprotect(*pte);
167 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
171 void make_lowmem_page_readwrite(void *vaddr)
174 unsigned long address = (unsigned long)vaddr;
177 pte = lookup_address(address, &level);
179 return; /* vaddr missing */
181 ptev = pte_mkwrite(*pte);
183 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
188 static bool xen_page_pinned(void *ptr)
190 struct page *page = virt_to_page(ptr);
192 return PagePinned(page);
195 void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
197 struct multicall_space mcs;
198 struct mmu_update *u;
200 trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
202 mcs = xen_mc_entry(sizeof(*u));
205 /* ptep might be kmapped when using 32-bit HIGHPTE */
206 u->ptr = virt_to_machine(ptep).maddr;
207 u->val = pte_val_ma(pteval);
209 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
211 xen_mc_issue(PARAVIRT_LAZY_MMU);
213 EXPORT_SYMBOL_GPL(xen_set_domain_pte);
215 static void xen_extend_mmu_update(const struct mmu_update *update)
217 struct multicall_space mcs;
218 struct mmu_update *u;
220 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
222 if (mcs.mc != NULL) {
225 mcs = __xen_mc_entry(sizeof(*u));
226 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
233 static void xen_extend_mmuext_op(const struct mmuext_op *op)
235 struct multicall_space mcs;
238 mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
240 if (mcs.mc != NULL) {
243 mcs = __xen_mc_entry(sizeof(*u));
244 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
251 static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
259 /* ptr may be ioremapped for 64-bit pagetable setup */
260 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
261 u.val = pmd_val_ma(val);
262 xen_extend_mmu_update(&u);
264 xen_mc_issue(PARAVIRT_LAZY_MMU);
269 static void xen_set_pmd(pmd_t *ptr, pmd_t val)
271 trace_xen_mmu_set_pmd(ptr, val);
273 /* If page is not pinned, we can just update the entry
275 if (!xen_page_pinned(ptr)) {
280 xen_set_pmd_hyper(ptr, val);
284 * Associate a virtual page frame with a given physical page frame
285 * and protection flags for that frame.
287 void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
289 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
292 static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
296 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
301 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
302 u.val = pte_val_ma(pteval);
303 xen_extend_mmu_update(&u);
305 xen_mc_issue(PARAVIRT_LAZY_MMU);
310 static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
312 if (!xen_batched_set_pte(ptep, pteval))
313 native_set_pte(ptep, pteval);
316 static void xen_set_pte(pte_t *ptep, pte_t pteval)
318 trace_xen_mmu_set_pte(ptep, pteval);
319 __xen_set_pte(ptep, pteval);
322 static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
323 pte_t *ptep, pte_t pteval)
325 trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
326 __xen_set_pte(ptep, pteval);
329 pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
330 unsigned long addr, pte_t *ptep)
332 /* Just return the pte as-is. We preserve the bits on commit */
333 trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
337 void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
338 pte_t *ptep, pte_t pte)
342 trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
345 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
346 u.val = pte_val_ma(pte);
347 xen_extend_mmu_update(&u);
349 xen_mc_issue(PARAVIRT_LAZY_MMU);
352 /* Assume pteval_t is equivalent to all the other *val_t types. */
353 static pteval_t pte_mfn_to_pfn(pteval_t val)
355 if (val & _PAGE_PRESENT) {
356 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
357 unsigned long pfn = mfn_to_pfn(mfn);
359 pteval_t flags = val & PTE_FLAGS_MASK;
360 if (unlikely(pfn == ~0))
361 val = flags & ~_PAGE_PRESENT;
363 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
369 static pteval_t pte_pfn_to_mfn(pteval_t val)
371 if (val & _PAGE_PRESENT) {
372 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
373 pteval_t flags = val & PTE_FLAGS_MASK;
376 if (!xen_feature(XENFEAT_auto_translated_physmap))
377 mfn = get_phys_to_machine(pfn);
381 * If there's no mfn for the pfn, then just create an
382 * empty non-present pte. Unfortunately this loses
383 * information about the original pfn, so
384 * pte_mfn_to_pfn is asymmetric.
386 if (unlikely(mfn == INVALID_P2M_ENTRY)) {
391 * Paramount to do this test _after_ the
392 * INVALID_P2M_ENTRY as INVALID_P2M_ENTRY &
393 * IDENTITY_FRAME_BIT resolves to true.
395 mfn &= ~FOREIGN_FRAME_BIT;
396 if (mfn & IDENTITY_FRAME_BIT) {
397 mfn &= ~IDENTITY_FRAME_BIT;
398 flags |= _PAGE_IOMAP;
401 val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
407 static pteval_t iomap_pte(pteval_t val)
409 if (val & _PAGE_PRESENT) {
410 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
411 pteval_t flags = val & PTE_FLAGS_MASK;
413 /* We assume the pte frame number is a MFN, so
414 just use it as-is. */
415 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
421 static pteval_t xen_pte_val(pte_t pte)
423 pteval_t pteval = pte.pte;
425 /* If this is a WC pte, convert back from Xen WC to Linux WC */
426 if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) {
427 WARN_ON(!pat_enabled);
428 pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT;
431 if (xen_initial_domain() && (pteval & _PAGE_IOMAP))
434 return pte_mfn_to_pfn(pteval);
436 PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
438 static pgdval_t xen_pgd_val(pgd_t pgd)
440 return pte_mfn_to_pfn(pgd.pgd);
442 PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
445 * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7
446 * are reserved for now, to correspond to the Intel-reserved PAT
449 * We expect Linux's PAT set as follows:
451 * Idx PTE flags Linux Xen Default
458 * 6 PAT PCD UC- UC UC-
459 * 7 PAT PCD PWT UC UC UC
462 void xen_set_pat(u64 pat)
464 /* We expect Linux to use a PAT setting of
465 * UC UC- WC WB (ignoring the PAT flag) */
466 WARN_ON(pat != 0x0007010600070106ull);
469 static pte_t xen_make_pte(pteval_t pte)
471 phys_addr_t addr = (pte & PTE_PFN_MASK);
473 /* If Linux is trying to set a WC pte, then map to the Xen WC.
474 * If _PAGE_PAT is set, then it probably means it is really
475 * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope
476 * things work out OK...
478 * (We should never see kernel mappings with _PAGE_PSE set,
479 * but we could see hugetlbfs mappings, I think.).
481 if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) {
482 if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT)
483 pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT;
487 * Unprivileged domains are allowed to do IOMAPpings for
488 * PCI passthrough, but not map ISA space. The ISA
489 * mappings are just dummy local mappings to keep other
490 * parts of the kernel happy.
492 if (unlikely(pte & _PAGE_IOMAP) &&
493 (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
494 pte = iomap_pte(pte);
497 pte = pte_pfn_to_mfn(pte);
500 return native_make_pte(pte);
502 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
504 static pgd_t xen_make_pgd(pgdval_t pgd)
506 pgd = pte_pfn_to_mfn(pgd);
507 return native_make_pgd(pgd);
509 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
511 static pmdval_t xen_pmd_val(pmd_t pmd)
513 return pte_mfn_to_pfn(pmd.pmd);
515 PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
517 static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
525 /* ptr may be ioremapped for 64-bit pagetable setup */
526 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
527 u.val = pud_val_ma(val);
528 xen_extend_mmu_update(&u);
530 xen_mc_issue(PARAVIRT_LAZY_MMU);
535 static void xen_set_pud(pud_t *ptr, pud_t val)
537 trace_xen_mmu_set_pud(ptr, val);
539 /* If page is not pinned, we can just update the entry
541 if (!xen_page_pinned(ptr)) {
546 xen_set_pud_hyper(ptr, val);
549 #ifdef CONFIG_X86_PAE
550 static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
552 trace_xen_mmu_set_pte_atomic(ptep, pte);
553 set_64bit((u64 *)ptep, native_pte_val(pte));
556 static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
558 trace_xen_mmu_pte_clear(mm, addr, ptep);
559 if (!xen_batched_set_pte(ptep, native_make_pte(0)))
560 native_pte_clear(mm, addr, ptep);
563 static void xen_pmd_clear(pmd_t *pmdp)
565 trace_xen_mmu_pmd_clear(pmdp);
566 set_pmd(pmdp, __pmd(0));
568 #endif /* CONFIG_X86_PAE */
570 static pmd_t xen_make_pmd(pmdval_t pmd)
572 pmd = pte_pfn_to_mfn(pmd);
573 return native_make_pmd(pmd);
575 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
577 #if PAGETABLE_LEVELS == 4
578 static pudval_t xen_pud_val(pud_t pud)
580 return pte_mfn_to_pfn(pud.pud);
582 PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
584 static pud_t xen_make_pud(pudval_t pud)
586 pud = pte_pfn_to_mfn(pud);
588 return native_make_pud(pud);
590 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
592 static pgd_t *xen_get_user_pgd(pgd_t *pgd)
594 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
595 unsigned offset = pgd - pgd_page;
596 pgd_t *user_ptr = NULL;
598 if (offset < pgd_index(USER_LIMIT)) {
599 struct page *page = virt_to_page(pgd_page);
600 user_ptr = (pgd_t *)page->private;
608 static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
612 u.ptr = virt_to_machine(ptr).maddr;
613 u.val = pgd_val_ma(val);
614 xen_extend_mmu_update(&u);
618 * Raw hypercall-based set_pgd, intended for in early boot before
619 * there's a page structure. This implies:
620 * 1. The only existing pagetable is the kernel's
621 * 2. It is always pinned
622 * 3. It has no user pagetable attached to it
624 static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
630 __xen_set_pgd_hyper(ptr, val);
632 xen_mc_issue(PARAVIRT_LAZY_MMU);
637 static void xen_set_pgd(pgd_t *ptr, pgd_t val)
639 pgd_t *user_ptr = xen_get_user_pgd(ptr);
641 trace_xen_mmu_set_pgd(ptr, user_ptr, val);
643 /* If page is not pinned, we can just update the entry
645 if (!xen_page_pinned(ptr)) {
648 WARN_ON(xen_page_pinned(user_ptr));
654 /* If it's pinned, then we can at least batch the kernel and
655 user updates together. */
658 __xen_set_pgd_hyper(ptr, val);
660 __xen_set_pgd_hyper(user_ptr, val);
662 xen_mc_issue(PARAVIRT_LAZY_MMU);
664 #endif /* PAGETABLE_LEVELS == 4 */
667 * (Yet another) pagetable walker. This one is intended for pinning a
668 * pagetable. This means that it walks a pagetable and calls the
669 * callback function on each page it finds making up the page table,
670 * at every level. It walks the entire pagetable, but it only bothers
671 * pinning pte pages which are below limit. In the normal case this
672 * will be STACK_TOP_MAX, but at boot we need to pin up to
675 * For 32-bit the important bit is that we don't pin beyond there,
676 * because then we start getting into Xen's ptes.
678 * For 64-bit, we must skip the Xen hole in the middle of the address
679 * space, just after the big x86-64 virtual hole.
681 static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
682 int (*func)(struct mm_struct *mm, struct page *,
687 unsigned hole_low, hole_high;
688 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
689 unsigned pgdidx, pudidx, pmdidx;
691 /* The limit is the last byte to be touched */
693 BUG_ON(limit >= FIXADDR_TOP);
695 if (xen_feature(XENFEAT_auto_translated_physmap))
699 * 64-bit has a great big hole in the middle of the address
700 * space, which contains the Xen mappings. On 32-bit these
701 * will end up making a zero-sized hole and so is a no-op.
703 hole_low = pgd_index(USER_LIMIT);
704 hole_high = pgd_index(PAGE_OFFSET);
706 pgdidx_limit = pgd_index(limit);
708 pudidx_limit = pud_index(limit);
713 pmdidx_limit = pmd_index(limit);
718 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
721 if (pgdidx >= hole_low && pgdidx < hole_high)
724 if (!pgd_val(pgd[pgdidx]))
727 pud = pud_offset(&pgd[pgdidx], 0);
729 if (PTRS_PER_PUD > 1) /* not folded */
730 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
732 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
735 if (pgdidx == pgdidx_limit &&
736 pudidx > pudidx_limit)
739 if (pud_none(pud[pudidx]))
742 pmd = pmd_offset(&pud[pudidx], 0);
744 if (PTRS_PER_PMD > 1) /* not folded */
745 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
747 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
750 if (pgdidx == pgdidx_limit &&
751 pudidx == pudidx_limit &&
752 pmdidx > pmdidx_limit)
755 if (pmd_none(pmd[pmdidx]))
758 pte = pmd_page(pmd[pmdidx]);
759 flush |= (*func)(mm, pte, PT_PTE);
765 /* Do the top level last, so that the callbacks can use it as
766 a cue to do final things like tlb flushes. */
767 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
772 static int xen_pgd_walk(struct mm_struct *mm,
773 int (*func)(struct mm_struct *mm, struct page *,
777 return __xen_pgd_walk(mm, mm->pgd, func, limit);
780 /* If we're using split pte locks, then take the page's lock and
781 return a pointer to it. Otherwise return NULL. */
782 static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
784 spinlock_t *ptl = NULL;
786 #if USE_SPLIT_PTLOCKS
787 ptl = __pte_lockptr(page);
788 spin_lock_nest_lock(ptl, &mm->page_table_lock);
794 static void xen_pte_unlock(void *v)
800 static void xen_do_pin(unsigned level, unsigned long pfn)
805 op.arg1.mfn = pfn_to_mfn(pfn);
807 xen_extend_mmuext_op(&op);
810 static int xen_pin_page(struct mm_struct *mm, struct page *page,
813 unsigned pgfl = TestSetPagePinned(page);
817 flush = 0; /* already pinned */
818 else if (PageHighMem(page))
819 /* kmaps need flushing if we found an unpinned
823 void *pt = lowmem_page_address(page);
824 unsigned long pfn = page_to_pfn(page);
825 struct multicall_space mcs = __xen_mc_entry(0);
831 * We need to hold the pagetable lock between the time
832 * we make the pagetable RO and when we actually pin
833 * it. If we don't, then other users may come in and
834 * attempt to update the pagetable by writing it,
835 * which will fail because the memory is RO but not
836 * pinned, so Xen won't do the trap'n'emulate.
838 * If we're using split pte locks, we can't hold the
839 * entire pagetable's worth of locks during the
840 * traverse, because we may wrap the preempt count (8
841 * bits). The solution is to mark RO and pin each PTE
842 * page while holding the lock. This means the number
843 * of locks we end up holding is never more than a
844 * batch size (~32 entries, at present).
846 * If we're not using split pte locks, we needn't pin
847 * the PTE pages independently, because we're
848 * protected by the overall pagetable lock.
852 ptl = xen_pte_lock(page, mm);
854 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
855 pfn_pte(pfn, PAGE_KERNEL_RO),
856 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
859 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
861 /* Queue a deferred unlock for when this batch
863 xen_mc_callback(xen_pte_unlock, ptl);
870 /* This is called just after a mm has been created, but it has not
871 been used yet. We need to make sure that its pagetable is all
872 read-only, and can be pinned. */
873 static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
875 trace_xen_mmu_pgd_pin(mm, pgd);
879 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
880 /* re-enable interrupts for flushing */
890 pgd_t *user_pgd = xen_get_user_pgd(pgd);
892 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
895 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
896 xen_do_pin(MMUEXT_PIN_L4_TABLE,
897 PFN_DOWN(__pa(user_pgd)));
900 #else /* CONFIG_X86_32 */
901 #ifdef CONFIG_X86_PAE
902 /* Need to make sure unshared kernel PMD is pinnable */
903 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
906 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
907 #endif /* CONFIG_X86_64 */
911 static void xen_pgd_pin(struct mm_struct *mm)
913 __xen_pgd_pin(mm, mm->pgd);
917 * On save, we need to pin all pagetables to make sure they get their
918 * mfns turned into pfns. Search the list for any unpinned pgds and pin
919 * them (unpinned pgds are not currently in use, probably because the
920 * process is under construction or destruction).
922 * Expected to be called in stop_machine() ("equivalent to taking
923 * every spinlock in the system"), so the locking doesn't really
924 * matter all that much.
926 void xen_mm_pin_all(void)
930 spin_lock(&pgd_lock);
932 list_for_each_entry(page, &pgd_list, lru) {
933 if (!PagePinned(page)) {
934 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
935 SetPageSavePinned(page);
939 spin_unlock(&pgd_lock);
943 * The init_mm pagetable is really pinned as soon as its created, but
944 * that's before we have page structures to store the bits. So do all
945 * the book-keeping now.
947 static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
954 static void __init xen_mark_init_mm_pinned(void)
956 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
959 static int xen_unpin_page(struct mm_struct *mm, struct page *page,
962 unsigned pgfl = TestClearPagePinned(page);
964 if (pgfl && !PageHighMem(page)) {
965 void *pt = lowmem_page_address(page);
966 unsigned long pfn = page_to_pfn(page);
967 spinlock_t *ptl = NULL;
968 struct multicall_space mcs;
971 * Do the converse to pin_page. If we're using split
972 * pte locks, we must be holding the lock for while
973 * the pte page is unpinned but still RO to prevent
974 * concurrent updates from seeing it in this
975 * partially-pinned state.
977 if (level == PT_PTE) {
978 ptl = xen_pte_lock(page, mm);
981 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
984 mcs = __xen_mc_entry(0);
986 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
987 pfn_pte(pfn, PAGE_KERNEL),
988 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
991 /* unlock when batch completed */
992 xen_mc_callback(xen_pte_unlock, ptl);
996 return 0; /* never need to flush on unpin */
999 /* Release a pagetables pages back as normal RW */
1000 static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
1002 trace_xen_mmu_pgd_unpin(mm, pgd);
1006 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1008 #ifdef CONFIG_X86_64
1010 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1013 xen_do_pin(MMUEXT_UNPIN_TABLE,
1014 PFN_DOWN(__pa(user_pgd)));
1015 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
1020 #ifdef CONFIG_X86_PAE
1021 /* Need to make sure unshared kernel PMD is unpinned */
1022 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
1026 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
1031 static void xen_pgd_unpin(struct mm_struct *mm)
1033 __xen_pgd_unpin(mm, mm->pgd);
1037 * On resume, undo any pinning done at save, so that the rest of the
1038 * kernel doesn't see any unexpected pinned pagetables.
1040 void xen_mm_unpin_all(void)
1044 spin_lock(&pgd_lock);
1046 list_for_each_entry(page, &pgd_list, lru) {
1047 if (PageSavePinned(page)) {
1048 BUG_ON(!PagePinned(page));
1049 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
1050 ClearPageSavePinned(page);
1054 spin_unlock(&pgd_lock);
1057 static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
1059 spin_lock(&next->page_table_lock);
1061 spin_unlock(&next->page_table_lock);
1064 static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
1066 spin_lock(&mm->page_table_lock);
1068 spin_unlock(&mm->page_table_lock);
1073 /* Another cpu may still have their %cr3 pointing at the pagetable, so
1074 we need to repoint it somewhere else before we can unpin it. */
1075 static void drop_other_mm_ref(void *info)
1077 struct mm_struct *mm = info;
1078 struct mm_struct *active_mm;
1080 active_mm = this_cpu_read(cpu_tlbstate.active_mm);
1082 if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
1083 leave_mm(smp_processor_id());
1085 /* If this cpu still has a stale cr3 reference, then make sure
1086 it has been flushed. */
1087 if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
1088 load_cr3(swapper_pg_dir);
1091 static void xen_drop_mm_ref(struct mm_struct *mm)
1096 if (current->active_mm == mm) {
1097 if (current->mm == mm)
1098 load_cr3(swapper_pg_dir);
1100 leave_mm(smp_processor_id());
1103 /* Get the "official" set of cpus referring to our pagetable. */
1104 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1105 for_each_online_cpu(cpu) {
1106 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
1107 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1109 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1113 cpumask_copy(mask, mm_cpumask(mm));
1115 /* It's possible that a vcpu may have a stale reference to our
1116 cr3, because its in lazy mode, and it hasn't yet flushed
1117 its set of pending hypercalls yet. In this case, we can
1118 look at its actual current cr3 value, and force it to flush
1120 for_each_online_cpu(cpu) {
1121 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
1122 cpumask_set_cpu(cpu, mask);
1125 if (!cpumask_empty(mask))
1126 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1127 free_cpumask_var(mask);
1130 static void xen_drop_mm_ref(struct mm_struct *mm)
1132 if (current->active_mm == mm)
1133 load_cr3(swapper_pg_dir);
1138 * While a process runs, Xen pins its pagetables, which means that the
1139 * hypervisor forces it to be read-only, and it controls all updates
1140 * to it. This means that all pagetable updates have to go via the
1141 * hypervisor, which is moderately expensive.
1143 * Since we're pulling the pagetable down, we switch to use init_mm,
1144 * unpin old process pagetable and mark it all read-write, which
1145 * allows further operations on it to be simple memory accesses.
1147 * The only subtle point is that another CPU may be still using the
1148 * pagetable because of lazy tlb flushing. This means we need need to
1149 * switch all CPUs off this pagetable before we can unpin it.
1151 static void xen_exit_mmap(struct mm_struct *mm)
1153 get_cpu(); /* make sure we don't move around */
1154 xen_drop_mm_ref(mm);
1157 spin_lock(&mm->page_table_lock);
1159 /* pgd may not be pinned in the error exit path of execve */
1160 if (xen_page_pinned(mm->pgd))
1163 spin_unlock(&mm->page_table_lock);
1166 static void __init xen_pagetable_setup_start(pgd_t *base)
1170 static __init void xen_mapping_pagetable_reserve(u64 start, u64 end)
1172 /* reserve the range used */
1173 native_pagetable_reserve(start, end);
1175 /* set as RW the rest */
1176 printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", end,
1177 PFN_PHYS(pgt_buf_top));
1178 while (end < PFN_PHYS(pgt_buf_top)) {
1179 make_lowmem_page_readwrite(__va(end));
1184 static void xen_post_allocator_init(void);
1186 #ifdef CONFIG_X86_64
1187 static void __init xen_cleanhighmap(unsigned long vaddr,
1188 unsigned long vaddr_end)
1190 unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
1191 pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
1193 /* NOTE: The loop is more greedy than the cleanup_highmap variant.
1194 * We include the PMD passed in on _both_ boundaries. */
1195 for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE));
1196 pmd++, vaddr += PMD_SIZE) {
1199 if (vaddr < (unsigned long) _text || vaddr > kernel_end)
1200 set_pmd(pmd, __pmd(0));
1202 /* In case we did something silly, we should crash in this function
1203 * instead of somewhere later and be confusing. */
1207 static void __init xen_pagetable_setup_done(pgd_t *base)
1209 #ifdef CONFIG_X86_64
1214 xen_setup_shared_info();
1215 #ifdef CONFIG_X86_64
1216 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1217 unsigned long new_mfn_list;
1219 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1221 /* On 32-bit, we get zero so this never gets executed. */
1222 new_mfn_list = xen_revector_p2m_tree();
1223 if (new_mfn_list && new_mfn_list != xen_start_info->mfn_list) {
1224 /* using __ka address and sticking INVALID_P2M_ENTRY! */
1225 memset((void *)xen_start_info->mfn_list, 0xff, size);
1227 /* We should be in __ka space. */
1228 BUG_ON(xen_start_info->mfn_list < __START_KERNEL_map);
1229 addr = xen_start_info->mfn_list;
1230 /* We roundup to the PMD, which means that if anybody at this stage is
1231 * using the __ka address of xen_start_info or xen_start_info->shared_info
1232 * they are in going to crash. Fortunatly we have already revectored
1233 * in xen_setup_kernel_pagetable and in xen_setup_shared_info. */
1234 size = roundup(size, PMD_SIZE);
1235 xen_cleanhighmap(addr, addr + size);
1237 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1238 memblock_free(__pa(xen_start_info->mfn_list), size);
1239 /* And revector! Bye bye old array */
1240 xen_start_info->mfn_list = new_mfn_list;
1243 /* At this stage, cleanup_highmap has already cleaned __ka space
1244 * from _brk_limit way up to the max_pfn_mapped (which is the end of
1245 * the ramdisk). We continue on, erasing PMD entries that point to page
1246 * tables - do note that they are accessible at this stage via __va.
1247 * For good measure we also round up to the PMD - which means that if
1248 * anybody is using __ka address to the initial boot-stack - and try
1249 * to use it - they are going to crash. The xen_start_info has been
1250 * taken care of already in xen_setup_kernel_pagetable. */
1251 addr = xen_start_info->pt_base;
1252 size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE);
1254 xen_cleanhighmap(addr, addr + size);
1255 xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
1257 /* This is superflous and is not neccessary, but you know what
1258 * lets do it. The MODULES_VADDR -> MODULES_END should be clear of
1259 * anything at this stage. */
1260 xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1);
1263 xen_post_allocator_init();
1266 static void xen_write_cr2(unsigned long cr2)
1268 this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
1271 static unsigned long xen_read_cr2(void)
1273 return this_cpu_read(xen_vcpu)->arch.cr2;
1276 unsigned long xen_read_cr2_direct(void)
1278 return this_cpu_read(xen_vcpu_info.arch.cr2);
1281 static void xen_flush_tlb(void)
1283 struct mmuext_op *op;
1284 struct multicall_space mcs;
1286 trace_xen_mmu_flush_tlb(0);
1290 mcs = xen_mc_entry(sizeof(*op));
1293 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1294 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1296 xen_mc_issue(PARAVIRT_LAZY_MMU);
1301 static void xen_flush_tlb_single(unsigned long addr)
1303 struct mmuext_op *op;
1304 struct multicall_space mcs;
1306 trace_xen_mmu_flush_tlb_single(addr);
1310 mcs = xen_mc_entry(sizeof(*op));
1312 op->cmd = MMUEXT_INVLPG_LOCAL;
1313 op->arg1.linear_addr = addr & PAGE_MASK;
1314 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1316 xen_mc_issue(PARAVIRT_LAZY_MMU);
1321 static void xen_flush_tlb_others(const struct cpumask *cpus,
1322 struct mm_struct *mm, unsigned long va)
1325 struct mmuext_op op;
1327 DECLARE_BITMAP(mask, num_processors);
1329 DECLARE_BITMAP(mask, NR_CPUS);
1332 struct multicall_space mcs;
1334 trace_xen_mmu_flush_tlb_others(cpus, mm, va);
1336 if (cpumask_empty(cpus))
1337 return; /* nothing to do */
1339 mcs = xen_mc_entry(sizeof(*args));
1341 args->op.arg2.vcpumask = to_cpumask(args->mask);
1343 /* Remove us, and any offline CPUS. */
1344 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1345 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
1347 if (va == TLB_FLUSH_ALL) {
1348 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1350 args->op.cmd = MMUEXT_INVLPG_MULTI;
1351 args->op.arg1.linear_addr = va;
1354 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1356 xen_mc_issue(PARAVIRT_LAZY_MMU);
1359 static unsigned long xen_read_cr3(void)
1361 return this_cpu_read(xen_cr3);
1364 static void set_current_cr3(void *v)
1366 this_cpu_write(xen_current_cr3, (unsigned long)v);
1369 static void __xen_write_cr3(bool kernel, unsigned long cr3)
1371 struct mmuext_op op;
1374 trace_xen_mmu_write_cr3(kernel, cr3);
1377 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1381 WARN_ON(mfn == 0 && kernel);
1383 op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1386 xen_extend_mmuext_op(&op);
1389 this_cpu_write(xen_cr3, cr3);
1391 /* Update xen_current_cr3 once the batch has actually
1393 xen_mc_callback(set_current_cr3, (void *)cr3);
1397 static void xen_write_cr3(unsigned long cr3)
1399 BUG_ON(preemptible());
1401 xen_mc_batch(); /* disables interrupts */
1403 /* Update while interrupts are disabled, so its atomic with
1405 this_cpu_write(xen_cr3, cr3);
1407 __xen_write_cr3(true, cr3);
1409 #ifdef CONFIG_X86_64
1411 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1413 __xen_write_cr3(false, __pa(user_pgd));
1415 __xen_write_cr3(false, 0);
1419 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1422 static int xen_pgd_alloc(struct mm_struct *mm)
1424 pgd_t *pgd = mm->pgd;
1427 BUG_ON(PagePinned(virt_to_page(pgd)));
1429 #ifdef CONFIG_X86_64
1431 struct page *page = virt_to_page(pgd);
1434 BUG_ON(page->private != 0);
1438 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1439 page->private = (unsigned long)user_pgd;
1441 if (user_pgd != NULL) {
1442 user_pgd[pgd_index(VSYSCALL_START)] =
1443 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1447 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1454 static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1456 #ifdef CONFIG_X86_64
1457 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1460 free_page((unsigned long)user_pgd);
1464 #ifdef CONFIG_X86_32
1465 static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
1467 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1468 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1469 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1474 #else /* CONFIG_X86_64 */
1475 static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
1477 unsigned long pfn = pte_pfn(pte);
1480 * If the new pfn is within the range of the newly allocated
1481 * kernel pagetable, and it isn't being mapped into an
1482 * early_ioremap fixmap slot as a freshly allocated page, make sure
1485 if (((!is_early_ioremap_ptep(ptep) &&
1486 pfn >= pgt_buf_start && pfn < pgt_buf_top)) ||
1487 (is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1)))
1488 pte = pte_wrprotect(pte);
1492 #endif /* CONFIG_X86_64 */
1494 /* Init-time set_pte while constructing initial pagetables, which
1495 doesn't allow RO pagetable pages to be remapped RW */
1496 static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
1498 pte = mask_rw_pte(ptep, pte);
1500 xen_set_pte(ptep, pte);
1503 static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1505 struct mmuext_op op;
1507 op.arg1.mfn = pfn_to_mfn(pfn);
1508 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1512 /* Early in boot, while setting up the initial pagetable, assume
1513 everything is pinned. */
1514 static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1516 #ifdef CONFIG_FLATMEM
1517 BUG_ON(mem_map); /* should only be used early */
1519 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1520 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1523 /* Used for pmd and pud */
1524 static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
1526 #ifdef CONFIG_FLATMEM
1527 BUG_ON(mem_map); /* should only be used early */
1529 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1532 /* Early release_pte assumes that all pts are pinned, since there's
1533 only init_mm and anything attached to that is pinned. */
1534 static void __init xen_release_pte_init(unsigned long pfn)
1536 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1537 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1540 static void __init xen_release_pmd_init(unsigned long pfn)
1542 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1545 static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1547 struct multicall_space mcs;
1548 struct mmuext_op *op;
1550 mcs = __xen_mc_entry(sizeof(*op));
1553 op->arg1.mfn = pfn_to_mfn(pfn);
1555 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
1558 static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
1560 struct multicall_space mcs;
1561 unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
1563 mcs = __xen_mc_entry(0);
1564 MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
1565 pfn_pte(pfn, prot), 0);
1568 /* This needs to make sure the new pte page is pinned iff its being
1569 attached to a pinned pagetable. */
1570 static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
1573 bool pinned = PagePinned(virt_to_page(mm->pgd));
1575 trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
1578 struct page *page = pfn_to_page(pfn);
1580 SetPagePinned(page);
1582 if (!PageHighMem(page)) {
1585 __set_pfn_prot(pfn, PAGE_KERNEL_RO);
1587 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1588 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1590 xen_mc_issue(PARAVIRT_LAZY_MMU);
1592 /* make sure there are no stray mappings of
1594 kmap_flush_unused();
1599 static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1601 xen_alloc_ptpage(mm, pfn, PT_PTE);
1604 static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1606 xen_alloc_ptpage(mm, pfn, PT_PMD);
1609 /* This should never happen until we're OK to use struct page */
1610 static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
1612 struct page *page = pfn_to_page(pfn);
1613 bool pinned = PagePinned(page);
1615 trace_xen_mmu_release_ptpage(pfn, level, pinned);
1618 if (!PageHighMem(page)) {
1621 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1622 __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1624 __set_pfn_prot(pfn, PAGE_KERNEL);
1626 xen_mc_issue(PARAVIRT_LAZY_MMU);
1628 ClearPagePinned(page);
1632 static void xen_release_pte(unsigned long pfn)
1634 xen_release_ptpage(pfn, PT_PTE);
1637 static void xen_release_pmd(unsigned long pfn)
1639 xen_release_ptpage(pfn, PT_PMD);
1642 #if PAGETABLE_LEVELS == 4
1643 static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1645 xen_alloc_ptpage(mm, pfn, PT_PUD);
1648 static void xen_release_pud(unsigned long pfn)
1650 xen_release_ptpage(pfn, PT_PUD);
1654 void __init xen_reserve_top(void)
1656 #ifdef CONFIG_X86_32
1657 unsigned long top = HYPERVISOR_VIRT_START;
1658 struct xen_platform_parameters pp;
1660 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1661 top = pp.virt_start;
1663 reserve_top_address(-top);
1664 #endif /* CONFIG_X86_32 */
1668 * Like __va(), but returns address in the kernel mapping (which is
1669 * all we have until the physical memory mapping has been set up.
1671 static void *__ka(phys_addr_t paddr)
1673 #ifdef CONFIG_X86_64
1674 return (void *)(paddr + __START_KERNEL_map);
1680 /* Convert a machine address to physical address */
1681 static unsigned long m2p(phys_addr_t maddr)
1685 maddr &= PTE_PFN_MASK;
1686 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1691 /* Convert a machine address to kernel virtual */
1692 static void *m2v(phys_addr_t maddr)
1694 return __ka(m2p(maddr));
1697 /* Set the page permissions on an identity-mapped pages */
1698 static void set_page_prot(void *addr, pgprot_t prot)
1700 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1701 pte_t pte = pfn_pte(pfn, prot);
1703 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
1706 #ifdef CONFIG_X86_32
1707 static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1709 unsigned pmdidx, pteidx;
1713 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1718 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1721 /* Reuse or allocate a page of ptes */
1722 if (pmd_present(pmd[pmdidx]))
1723 pte_page = m2v(pmd[pmdidx].pmd);
1725 /* Check for free pte pages */
1726 if (ident_pte == LEVEL1_IDENT_ENTRIES)
1729 pte_page = &level1_ident_pgt[ident_pte];
1730 ident_pte += PTRS_PER_PTE;
1732 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1735 /* Install mappings */
1736 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1739 #ifdef CONFIG_X86_32
1740 if (pfn > max_pfn_mapped)
1741 max_pfn_mapped = pfn;
1744 if (!pte_none(pte_page[pteidx]))
1747 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1748 pte_page[pteidx] = pte;
1752 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1753 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1755 set_page_prot(pmd, PAGE_KERNEL_RO);
1758 void __init xen_setup_machphys_mapping(void)
1760 struct xen_machphys_mapping mapping;
1762 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1763 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
1764 machine_to_phys_nr = mapping.max_mfn + 1;
1766 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
1768 #ifdef CONFIG_X86_32
1769 WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
1770 < machine_to_phys_mapping);
1774 #ifdef CONFIG_X86_64
1775 static void convert_pfn_mfn(void *v)
1780 /* All levels are converted the same way, so just treat them
1782 for (i = 0; i < PTRS_PER_PTE; i++)
1783 pte[i] = xen_make_pte(pte[i].pte);
1785 static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
1788 if (*pt_base == PFN_DOWN(__pa(addr))) {
1789 set_page_prot((void *)addr, PAGE_KERNEL);
1790 clear_page((void *)addr);
1793 if (*pt_end == PFN_DOWN(__pa(addr))) {
1794 set_page_prot((void *)addr, PAGE_KERNEL);
1795 clear_page((void *)addr);
1800 * Set up the initial kernel pagetable.
1802 * We can construct this by grafting the Xen provided pagetable into
1803 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1804 * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
1805 * means that only the kernel has a physical mapping to start with -
1806 * but that's enough to get __va working. We need to fill in the rest
1807 * of the physical mapping once some sort of allocator has been set
1810 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
1814 unsigned long addr[3];
1815 unsigned long pt_base, pt_end;
1818 /* max_pfn_mapped is the last pfn mapped in the initial memory
1819 * mappings. Considering that on Xen after the kernel mappings we
1820 * have the mappings of some pages that don't exist in pfn space, we
1821 * set max_pfn_mapped to the last real pfn mapped. */
1822 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
1824 pt_base = PFN_DOWN(__pa(xen_start_info->pt_base));
1825 pt_end = pt_base + xen_start_info->nr_pt_frames;
1827 /* Zap identity mapping */
1828 init_level4_pgt[0] = __pgd(0);
1830 /* Pre-constructed entries are in pfn, so convert to mfn */
1831 /* L4[272] -> level3_ident_pgt
1832 * L4[511] -> level3_kernel_pgt */
1833 convert_pfn_mfn(init_level4_pgt);
1835 /* L3_i[0] -> level2_ident_pgt */
1836 convert_pfn_mfn(level3_ident_pgt);
1837 /* L3_k[510] -> level2_kernel_pgt
1838 * L3_i[511] -> level2_fixmap_pgt */
1839 convert_pfn_mfn(level3_kernel_pgt);
1841 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
1842 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1843 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1845 addr[0] = (unsigned long)pgd;
1846 addr[1] = (unsigned long)l3;
1847 addr[2] = (unsigned long)l2;
1848 /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
1849 * Both L4[272][0] and L4[511][511] have entries that point to the same
1850 * L2 (PMD) tables. Meaning that if you modify it in __va space
1851 * it will be also modified in the __ka space! (But if you just
1852 * modify the PMD table to point to other PTE's or none, then you
1853 * are OK - which is what cleanup_highmap does) */
1854 copy_page(level2_ident_pgt, l2);
1855 /* Graft it onto L4[511][511] */
1856 copy_page(level2_kernel_pgt, l2);
1858 /* Get [511][510] and graft that in level2_fixmap_pgt */
1859 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
1860 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
1861 copy_page(level2_fixmap_pgt, l2);
1862 /* Note that we don't do anything with level1_fixmap_pgt which
1865 /* Make pagetable pieces RO */
1866 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1867 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1868 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1869 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1870 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
1871 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1872 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1874 /* Pin down new L4 */
1875 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1876 PFN_DOWN(__pa_symbol(init_level4_pgt)));
1878 /* Unpin Xen-provided one */
1879 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1882 * At this stage there can be no user pgd, and no page
1883 * structure to attach it to, so make sure we just set kernel
1887 __xen_write_cr3(true, __pa(init_level4_pgt));
1888 xen_mc_issue(PARAVIRT_LAZY_CPU);
1890 /* We can't that easily rip out L3 and L2, as the Xen pagetables are
1891 * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
1892 * the initial domain. For guests using the toolstack, they are in:
1893 * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only
1894 * rip out the [L4] (pgd), but for guests we shave off three pages.
1896 for (i = 0; i < ARRAY_SIZE(addr); i++)
1897 check_pt_base(&pt_base, &pt_end, addr[i]);
1899 /* Our (by three pages) smaller Xen pagetable that we are using */
1900 memblock_reserve(PFN_PHYS(pt_base), (pt_end - pt_base) * PAGE_SIZE);
1901 /* Revector the xen_start_info */
1902 xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
1904 #else /* !CONFIG_X86_64 */
1905 static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
1906 static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
1908 static void __init xen_write_cr3_init(unsigned long cr3)
1910 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
1912 BUG_ON(read_cr3() != __pa(initial_page_table));
1913 BUG_ON(cr3 != __pa(swapper_pg_dir));
1916 * We are switching to swapper_pg_dir for the first time (from
1917 * initial_page_table) and therefore need to mark that page
1918 * read-only and then pin it.
1920 * Xen disallows sharing of kernel PMDs for PAE
1921 * guests. Therefore we must copy the kernel PMD from
1922 * initial_page_table into a new kernel PMD to be used in
1925 swapper_kernel_pmd =
1926 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
1927 copy_page(swapper_kernel_pmd, initial_kernel_pmd);
1928 swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
1929 __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
1930 set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
1932 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
1934 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
1936 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
1937 PFN_DOWN(__pa(initial_page_table)));
1938 set_page_prot(initial_page_table, PAGE_KERNEL);
1939 set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
1941 pv_mmu_ops.write_cr3 = &xen_write_cr3;
1944 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
1948 initial_kernel_pmd =
1949 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
1951 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
1952 xen_start_info->nr_pt_frames * PAGE_SIZE +
1955 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
1956 copy_page(initial_kernel_pmd, kernel_pmd);
1958 xen_map_identity_early(initial_kernel_pmd, max_pfn);
1960 copy_page(initial_page_table, pgd);
1961 initial_page_table[KERNEL_PGD_BOUNDARY] =
1962 __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
1964 set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
1965 set_page_prot(initial_page_table, PAGE_KERNEL_RO);
1966 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
1968 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1970 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
1971 PFN_DOWN(__pa(initial_page_table)));
1972 xen_write_cr3(__pa(initial_page_table));
1974 memblock_reserve(__pa(xen_start_info->pt_base),
1975 xen_start_info->nr_pt_frames * PAGE_SIZE);
1977 #endif /* CONFIG_X86_64 */
1979 static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
1981 static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
1985 phys >>= PAGE_SHIFT;
1988 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
1989 #ifdef CONFIG_X86_F00F_BUG
1992 #ifdef CONFIG_X86_32
1995 # ifdef CONFIG_HIGHMEM
1996 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
1999 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
2002 case FIX_TEXT_POKE0:
2003 case FIX_TEXT_POKE1:
2004 /* All local page mappings */
2005 pte = pfn_pte(phys, prot);
2008 #ifdef CONFIG_X86_LOCAL_APIC
2009 case FIX_APIC_BASE: /* maps dummy local APIC */
2010 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2014 #ifdef CONFIG_X86_IO_APIC
2015 case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
2017 * We just don't map the IO APIC - all access is via
2018 * hypercalls. Keep the address in the pte for reference.
2020 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2024 case FIX_PARAVIRT_BOOTMAP:
2025 /* This is an MFN, but it isn't an IO mapping from the
2027 pte = mfn_pte(phys, prot);
2031 /* By default, set_fixmap is used for hardware mappings */
2032 pte = mfn_pte(phys, __pgprot(pgprot_val(prot) | _PAGE_IOMAP));
2036 __native_set_fixmap(idx, pte);
2038 #ifdef CONFIG_X86_64
2039 /* Replicate changes to map the vsyscall page into the user
2040 pagetable vsyscall mapping. */
2041 if ((idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) ||
2043 unsigned long vaddr = __fix_to_virt(idx);
2044 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
2049 static void __init xen_post_allocator_init(void)
2051 pv_mmu_ops.set_pte = xen_set_pte;
2052 pv_mmu_ops.set_pmd = xen_set_pmd;
2053 pv_mmu_ops.set_pud = xen_set_pud;
2054 #if PAGETABLE_LEVELS == 4
2055 pv_mmu_ops.set_pgd = xen_set_pgd;
2058 /* This will work as long as patching hasn't happened yet
2059 (which it hasn't) */
2060 pv_mmu_ops.alloc_pte = xen_alloc_pte;
2061 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
2062 pv_mmu_ops.release_pte = xen_release_pte;
2063 pv_mmu_ops.release_pmd = xen_release_pmd;
2064 #if PAGETABLE_LEVELS == 4
2065 pv_mmu_ops.alloc_pud = xen_alloc_pud;
2066 pv_mmu_ops.release_pud = xen_release_pud;
2069 #ifdef CONFIG_X86_64
2070 SetPagePinned(virt_to_page(level3_user_vsyscall));
2072 xen_mark_init_mm_pinned();
2075 static void xen_leave_lazy_mmu(void)
2079 paravirt_leave_lazy_mmu();
2083 static const struct pv_mmu_ops xen_mmu_ops __initconst = {
2084 .read_cr2 = xen_read_cr2,
2085 .write_cr2 = xen_write_cr2,
2087 .read_cr3 = xen_read_cr3,
2088 #ifdef CONFIG_X86_32
2089 .write_cr3 = xen_write_cr3_init,
2091 .write_cr3 = xen_write_cr3,
2094 .flush_tlb_user = xen_flush_tlb,
2095 .flush_tlb_kernel = xen_flush_tlb,
2096 .flush_tlb_single = xen_flush_tlb_single,
2097 .flush_tlb_others = xen_flush_tlb_others,
2099 .pte_update = paravirt_nop,
2100 .pte_update_defer = paravirt_nop,
2102 .pgd_alloc = xen_pgd_alloc,
2103 .pgd_free = xen_pgd_free,
2105 .alloc_pte = xen_alloc_pte_init,
2106 .release_pte = xen_release_pte_init,
2107 .alloc_pmd = xen_alloc_pmd_init,
2108 .release_pmd = xen_release_pmd_init,
2110 .set_pte = xen_set_pte_init,
2111 .set_pte_at = xen_set_pte_at,
2112 .set_pmd = xen_set_pmd_hyper,
2114 .ptep_modify_prot_start = __ptep_modify_prot_start,
2115 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2117 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2118 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
2120 .make_pte = PV_CALLEE_SAVE(xen_make_pte),
2121 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
2123 #ifdef CONFIG_X86_PAE
2124 .set_pte_atomic = xen_set_pte_atomic,
2125 .pte_clear = xen_pte_clear,
2126 .pmd_clear = xen_pmd_clear,
2127 #endif /* CONFIG_X86_PAE */
2128 .set_pud = xen_set_pud_hyper,
2130 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2131 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
2133 #if PAGETABLE_LEVELS == 4
2134 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2135 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
2136 .set_pgd = xen_set_pgd_hyper,
2138 .alloc_pud = xen_alloc_pmd_init,
2139 .release_pud = xen_release_pmd_init,
2140 #endif /* PAGETABLE_LEVELS == 4 */
2142 .activate_mm = xen_activate_mm,
2143 .dup_mmap = xen_dup_mmap,
2144 .exit_mmap = xen_exit_mmap,
2147 .enter = paravirt_enter_lazy_mmu,
2148 .leave = xen_leave_lazy_mmu,
2151 .set_fixmap = xen_set_fixmap,
2154 void __init xen_init_mmu_ops(void)
2156 x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve;
2157 x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start;
2158 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
2159 pv_mmu_ops = xen_mmu_ops;
2161 memset(dummy_mapping, 0xff, PAGE_SIZE);
2164 /* Protected by xen_reservation_lock. */
2165 #define MAX_CONTIG_ORDER 9 /* 2MB */
2166 static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2168 #define VOID_PTE (mfn_pte(0, __pgprot(0)))
2169 static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2170 unsigned long *in_frames,
2171 unsigned long *out_frames)
2174 struct multicall_space mcs;
2177 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2178 mcs = __xen_mc_entry(0);
2181 in_frames[i] = virt_to_mfn(vaddr);
2183 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
2184 __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
2187 out_frames[i] = virt_to_pfn(vaddr);
2193 * Update the pfn-to-mfn mappings for a virtual address range, either to
2194 * point to an array of mfns, or contiguously from a single starting
2197 static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2198 unsigned long *mfns,
2199 unsigned long first_mfn)
2206 limit = 1u << order;
2207 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2208 struct multicall_space mcs;
2211 mcs = __xen_mc_entry(0);
2215 mfn = first_mfn + i;
2217 if (i < (limit - 1))
2221 flags = UVMF_INVLPG | UVMF_ALL;
2223 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2226 MULTI_update_va_mapping(mcs.mc, vaddr,
2227 mfn_pte(mfn, PAGE_KERNEL), flags);
2229 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2236 * Perform the hypercall to exchange a region of our pfns to point to
2237 * memory with the required contiguous alignment. Takes the pfns as
2238 * input, and populates mfns as output.
2240 * Returns a success code indicating whether the hypervisor was able to
2241 * satisfy the request or not.
2243 static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2244 unsigned long *pfns_in,
2245 unsigned long extents_out,
2246 unsigned int order_out,
2247 unsigned long *mfns_out,
2248 unsigned int address_bits)
2253 struct xen_memory_exchange exchange = {
2255 .nr_extents = extents_in,
2256 .extent_order = order_in,
2257 .extent_start = pfns_in,
2261 .nr_extents = extents_out,
2262 .extent_order = order_out,
2263 .extent_start = mfns_out,
2264 .address_bits = address_bits,
2269 BUG_ON(extents_in << order_in != extents_out << order_out);
2271 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2272 success = (exchange.nr_exchanged == extents_in);
2274 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2275 BUG_ON(success && (rc != 0));
2280 int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
2281 unsigned int address_bits)
2283 unsigned long *in_frames = discontig_frames, out_frame;
2284 unsigned long flags;
2288 * Currently an auto-translated guest will not perform I/O, nor will
2289 * it require PAE page directories below 4GB. Therefore any calls to
2290 * this function are redundant and can be ignored.
2293 if (xen_feature(XENFEAT_auto_translated_physmap))
2296 if (unlikely(order > MAX_CONTIG_ORDER))
2299 memset((void *) vstart, 0, PAGE_SIZE << order);
2301 spin_lock_irqsave(&xen_reservation_lock, flags);
2303 /* 1. Zap current PTEs, remembering MFNs. */
2304 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2306 /* 2. Get a new contiguous memory extent. */
2307 out_frame = virt_to_pfn(vstart);
2308 success = xen_exchange_memory(1UL << order, 0, in_frames,
2309 1, order, &out_frame,
2312 /* 3. Map the new extent in place of old pages. */
2314 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2316 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2318 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2320 return success ? 0 : -ENOMEM;
2322 EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2324 void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
2326 unsigned long *out_frames = discontig_frames, in_frame;
2327 unsigned long flags;
2330 if (xen_feature(XENFEAT_auto_translated_physmap))
2333 if (unlikely(order > MAX_CONTIG_ORDER))
2336 memset((void *) vstart, 0, PAGE_SIZE << order);
2338 spin_lock_irqsave(&xen_reservation_lock, flags);
2340 /* 1. Find start MFN of contiguous extent. */
2341 in_frame = virt_to_mfn(vstart);
2343 /* 2. Zap current PTEs. */
2344 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2346 /* 3. Do the exchange for non-contiguous MFNs. */
2347 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2350 /* 4. Map new pages in place of old pages. */
2352 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2354 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2356 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2358 EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
2360 #ifdef CONFIG_XEN_PVHVM
2361 static void xen_hvm_exit_mmap(struct mm_struct *mm)
2363 struct xen_hvm_pagetable_dying a;
2366 a.domid = DOMID_SELF;
2367 a.gpa = __pa(mm->pgd);
2368 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2369 WARN_ON_ONCE(rc < 0);
2372 static int is_pagetable_dying_supported(void)
2374 struct xen_hvm_pagetable_dying a;
2377 a.domid = DOMID_SELF;
2379 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2381 printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
2387 void __init xen_hvm_init_mmu_ops(void)
2389 if (is_pagetable_dying_supported())
2390 pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
2394 #define REMAP_BATCH_SIZE 16
2399 struct mmu_update *mmu_update;
2402 static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
2403 unsigned long addr, void *data)
2405 struct remap_data *rmd = data;
2406 pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot));
2408 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
2409 rmd->mmu_update->val = pte_val_ma(pte);
2415 int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
2417 unsigned long mfn, int nr,
2418 pgprot_t prot, unsigned domid)
2420 struct remap_data rmd;
2421 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
2423 unsigned long range;
2426 prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
2428 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_RESERVED | VM_IO)) ==
2429 (VM_PFNMAP | VM_RESERVED | VM_IO)));
2435 batch = min(REMAP_BATCH_SIZE, nr);
2436 range = (unsigned long)batch << PAGE_SHIFT;
2438 rmd.mmu_update = mmu_update;
2439 err = apply_to_page_range(vma->vm_mm, addr, range,
2440 remap_area_mfn_pte_fn, &rmd);
2445 if (HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid) < 0)
2459 EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);