Merge branch 'linus' into cpus4096
[pandora-kernel.git] / arch / x86 / mm / pgtable.c
index 5accc08..557b2ab 100644 (file)
@@ -1,6 +1,8 @@
 #include <linux/mm.h>
 #include <asm/pgalloc.h>
+#include <asm/pgtable.h>
 #include <asm/tlb.h>
+#include <asm/fixmap.h>
 
 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 {
@@ -58,82 +60,31 @@ static inline void pgd_list_del(pgd_t *pgd)
        list_del(&page->lru);
 }
 
-#ifdef CONFIG_X86_64
-pgd_t *pgd_alloc(struct mm_struct *mm)
-{
-       unsigned boundary;
-       pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
-       unsigned long flags;
-       if (!pgd)
-               return NULL;
-       spin_lock_irqsave(&pgd_lock, flags);
-       pgd_list_add(pgd);
-       spin_unlock_irqrestore(&pgd_lock, flags);
-       /*
-        * Copy kernel pointers in from init.
-        * Could keep a freelist or slab cache of those because the kernel
-        * part never changes.
-        */
-       boundary = pgd_index(__PAGE_OFFSET);
-       memset(pgd, 0, boundary * sizeof(pgd_t));
-       memcpy(pgd + boundary,
-              init_level4_pgt + boundary,
-              (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
-       return pgd;
-}
-
-void pgd_free(struct mm_struct *mm, pgd_t *pgd)
-{
-       unsigned long flags;
-       BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
-       spin_lock_irqsave(&pgd_lock, flags);
-       pgd_list_del(pgd);
-       spin_unlock_irqrestore(&pgd_lock, flags);
-       free_page((unsigned long)pgd);
-}
-#else
-/*
- * List of all pgd's needed for non-PAE so it can invalidate entries
- * in both cached and uncached pgd's; not needed for PAE since the
- * kernel pmd is shared. If PAE were not to share the pmd a similar
- * tactic would be needed. This is essentially codepath-based locking
- * against pageattr.c; it is the unique case in which a valid change
- * of kernel pagetables can't be lazily synchronized by vmalloc faults.
- * vmalloc faults work because attached pagetables are never freed.
- * -- wli
- */
 #define UNSHARED_PTRS_PER_PGD                          \
-       (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
+       (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
 
 static void pgd_ctor(void *p)
 {
        pgd_t *pgd = p;
-       unsigned long flags;
-
-       /* Clear usermode parts of PGD */
-       memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
-
-       spin_lock_irqsave(&pgd_lock, flags);
 
        /* If the pgd points to a shared pagetable level (either the
           ptes in non-PAE, or shared PMD in PAE), then just copy the
           references from swapper_pg_dir. */
        if (PAGETABLE_LEVELS == 2 ||
-           (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD)) {
-               clone_pgd_range(pgd + USER_PTRS_PER_PGD,
-                               swapper_pg_dir + USER_PTRS_PER_PGD,
+           (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
+           PAGETABLE_LEVELS == 4) {
+               clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
+                               swapper_pg_dir + KERNEL_PGD_BOUNDARY,
                                KERNEL_PGD_PTRS);
                paravirt_alloc_pmd_clone(__pa(pgd) >> PAGE_SHIFT,
                                         __pa(swapper_pg_dir) >> PAGE_SHIFT,
-                                        USER_PTRS_PER_PGD,
+                                        KERNEL_PGD_BOUNDARY,
                                         KERNEL_PGD_PTRS);
        }
 
        /* list required to sync kernel mapping updates */
        if (!SHARED_KERNEL_PMD)
                pgd_list_add(pgd);
-
-       spin_unlock_irqrestore(&pgd_lock, flags);
 }
 
 static void pgd_dtor(void *pgd)
@@ -148,7 +99,84 @@ static void pgd_dtor(void *pgd)
        spin_unlock_irqrestore(&pgd_lock, flags);
 }
 
+/*
+ * List of all pgd's needed for non-PAE so it can invalidate entries
+ * in both cached and uncached pgd's; not needed for PAE since the
+ * kernel pmd is shared. If PAE were not to share the pmd a similar
+ * tactic would be needed. This is essentially codepath-based locking
+ * against pageattr.c; it is the unique case in which a valid change
+ * of kernel pagetables can't be lazily synchronized by vmalloc faults.
+ * vmalloc faults work because attached pagetables are never freed.
+ * -- wli
+ */
+
 #ifdef CONFIG_X86_PAE
+/*
+ * In PAE mode, we need to do a cr3 reload (=tlb flush) when
+ * updating the top-level pagetable entries to guarantee the
+ * processor notices the update.  Since this is expensive, and
+ * all 4 top-level entries are used almost immediately in a
+ * new process's life, we just pre-populate them here.
+ *
+ * Also, if we're in a paravirt environment where the kernel pmd is
+ * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
+ * and initialize the kernel pmds here.
+ */
+#define PREALLOCATED_PMDS      UNSHARED_PTRS_PER_PGD
+
+void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
+{
+       paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
+
+       /* Note: almost everything apart from _PAGE_PRESENT is
+          reserved at the pmd (PDPT) level. */
+       set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
+
+       /*
+        * According to Intel App note "TLBs, Paging-Structure Caches,
+        * and Their Invalidation", April 2007, document 317080-001,
+        * section 8.1: in PAE mode we explicitly have to flush the
+        * TLB via cr3 if the top-level pgd is changed...
+        */
+       if (mm == current->active_mm)
+               write_cr3(read_cr3());
+}
+#else  /* !CONFIG_X86_PAE */
+
+/* No need to prepopulate any pagetable entries in non-PAE modes. */
+#define PREALLOCATED_PMDS      0
+
+#endif /* CONFIG_X86_PAE */
+
+static void free_pmds(pmd_t *pmds[])
+{
+       int i;
+
+       for(i = 0; i < PREALLOCATED_PMDS; i++)
+               if (pmds[i])
+                       free_page((unsigned long)pmds[i]);
+}
+
+static int preallocate_pmds(pmd_t *pmds[])
+{
+       int i;
+       bool failed = false;
+
+       for(i = 0; i < PREALLOCATED_PMDS; i++) {
+               pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+               if (pmd == NULL)
+                       failed = true;
+               pmds[i] = pmd;
+       }
+
+       if (failed) {
+               free_pmds(pmds);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
 /*
  * Mop up any pmd pages which may still be attached to the pgd.
  * Normally they will be freed by munmap/exit_mmap, but any pmd we
@@ -159,7 +187,7 @@ static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
 {
        int i;
 
-       for(i = 0; i < UNSHARED_PTRS_PER_PGD; i++) {
+       for(i = 0; i < PREALLOCATED_PMDS; i++) {
                pgd_t pgd = pgdp[i];
 
                if (pgd_val(pgd) != 0) {
@@ -173,94 +201,132 @@ static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
        }
 }
 
-/*
- * In PAE mode, we need to do a cr3 reload (=tlb flush) when
- * updating the top-level pagetable entries to guarantee the
- * processor notices the update.  Since this is expensive, and
- * all 4 top-level entries are used almost immediately in a
- * new process's life, we just pre-populate them here.
- *
- * Also, if we're in a paravirt environment where the kernel pmd is
- * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
- * and initialize the kernel pmds here.
- */
-static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
+static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
 {
        pud_t *pud;
        unsigned long addr;
        int i;
 
        pud = pud_offset(pgd, 0);
-       for (addr = i = 0; i < UNSHARED_PTRS_PER_PGD;
-            i++, pud++, addr += PUD_SIZE) {
-               pmd_t *pmd = pmd_alloc_one(mm, addr);
 
-               if (!pmd) {
-                       pgd_mop_up_pmds(mm, pgd);
-                       return 0;
-               }
+       for (addr = i = 0; i < PREALLOCATED_PMDS;
+            i++, pud++, addr += PUD_SIZE) {
+               pmd_t *pmd = pmds[i];
 
-               if (i >= USER_PTRS_PER_PGD)
+               if (i >= KERNEL_PGD_BOUNDARY)
                        memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
                               sizeof(pmd_t) * PTRS_PER_PMD);
 
                pud_populate(mm, pud, pmd);
        }
-
-       return 1;
 }
 
-void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
+pgd_t *pgd_alloc(struct mm_struct *mm)
 {
-       paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
+       pgd_t *pgd;
+       pmd_t *pmds[PREALLOCATED_PMDS];
+       unsigned long flags;
 
-       /* Note: almost everything apart from _PAGE_PRESENT is
-          reserved at the pmd (PDPT) level. */
-       set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
+       pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+
+       if (pgd == NULL)
+               goto out;
+
+       mm->pgd = pgd;
+
+       if (preallocate_pmds(pmds) != 0)
+               goto out_free_pgd;
+
+       if (paravirt_pgd_alloc(mm) != 0)
+               goto out_free_pmds;
 
        /*
-        * According to Intel App note "TLBs, Paging-Structure Caches,
-        * and Their Invalidation", April 2007, document 317080-001,
-        * section 8.1: in PAE mode we explicitly have to flush the
-        * TLB via cr3 if the top-level pgd is changed...
+        * Make sure that pre-populating the pmds is atomic with
+        * respect to anything walking the pgd_list, so that they
+        * never see a partially populated pgd.
         */
-       if (mm == current->active_mm)
-               write_cr3(read_cr3());
+       spin_lock_irqsave(&pgd_lock, flags);
+
+       pgd_ctor(pgd);
+       pgd_prepopulate_pmd(mm, pgd, pmds);
+
+       spin_unlock_irqrestore(&pgd_lock, flags);
+
+       return pgd;
+
+out_free_pmds:
+       free_pmds(pmds);
+out_free_pgd:
+       free_page((unsigned long)pgd);
+out:
+       return NULL;
 }
-#else  /* !CONFIG_X86_PAE */
-/* No need to prepopulate any pagetable entries in non-PAE modes. */
-static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
+
+void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 {
-       return 1;
+       pgd_mop_up_pmds(mm, pgd);
+       pgd_dtor(pgd);
+       paravirt_pgd_free(mm, pgd);
+       free_page((unsigned long)pgd);
 }
 
-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgd)
+int ptep_set_access_flags(struct vm_area_struct *vma,
+                         unsigned long address, pte_t *ptep,
+                         pte_t entry, int dirty)
 {
+       int changed = !pte_same(*ptep, entry);
+
+       if (changed && dirty) {
+               *ptep = entry;
+               pte_update_defer(vma->vm_mm, address, ptep);
+               flush_tlb_page(vma, address);
+       }
+
+       return changed;
 }
-#endif /* CONFIG_X86_PAE */
 
-pgd_t *pgd_alloc(struct mm_struct *mm)
+int ptep_test_and_clear_young(struct vm_area_struct *vma,
+                             unsigned long addr, pte_t *ptep)
 {
-       pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+       int ret = 0;
 
-       /* so that alloc_pmd can use it */
-       mm->pgd = pgd;
-       if (pgd)
-               pgd_ctor(pgd);
+       if (pte_young(*ptep))
+               ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
+                                        (unsigned long *) &ptep->pte);
 
-       if (pgd && !pgd_prepopulate_pmd(mm, pgd)) {
-               pgd_dtor(pgd);
-               free_page((unsigned long)pgd);
-               pgd = NULL;
-       }
+       if (ret)
+               pte_update(vma->vm_mm, addr, ptep);
 
-       return pgd;
+       return ret;
 }
 
-void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+int ptep_clear_flush_young(struct vm_area_struct *vma,
+                          unsigned long address, pte_t *ptep)
 {
-       pgd_mop_up_pmds(mm, pgd);
-       pgd_dtor(pgd);
-       free_page((unsigned long)pgd);
+       int young;
+
+       young = ptep_test_and_clear_young(vma, address, ptep);
+       if (young)
+               flush_tlb_page(vma, address);
+
+       return young;
+}
+
+int fixmaps_set;
+
+void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
+{
+       unsigned long address = __fix_to_virt(idx);
+
+       if (idx >= __end_of_fixed_addresses) {
+               BUG();
+               return;
+       }
+       set_pte_vaddr(address, pte);
+       fixmaps_set++;
+}
+
+void native_set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
+{
+       __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
 }
-#endif