x86: rename paravirt_alloc_pt etc after the pagetable structure
[pandora-kernel.git] / arch / x86 / mm / pgtable.c
index d526b46..1d44d6d 100644 (file)
@@ -21,34 +21,53 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
        return pte;
 }
 
-#ifdef CONFIG_X86_64
+void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
+{
+       pgtable_page_dtor(pte);
+       paravirt_release_pte(page_to_pfn(pte));
+       tlb_remove_page(tlb, pte);
+}
+
+#if PAGETABLE_LEVELS > 2
+void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
+{
+       paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
+       tlb_remove_page(tlb, virt_to_page(pmd));
+}
+
+#if PAGETABLE_LEVELS > 3
+void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
+{
+       tlb_remove_page(tlb, virt_to_page(pud));
+}
+#endif /* PAGETABLE_LEVELS > 3 */
+#endif /* PAGETABLE_LEVELS > 2 */
+
 static inline void pgd_list_add(pgd_t *pgd)
 {
        struct page *page = virt_to_page(pgd);
-       unsigned long flags;
 
-       spin_lock_irqsave(&pgd_lock, flags);
        list_add(&page->lru, &pgd_list);
-       spin_unlock_irqrestore(&pgd_lock, flags);
 }
 
 static inline void pgd_list_del(pgd_t *pgd)
 {
        struct page *page = virt_to_page(pgd);
-       unsigned long flags;
 
-       spin_lock_irqsave(&pgd_lock, flags);
        list_del(&page->lru);
-       spin_unlock_irqrestore(&pgd_lock, flags);
 }
 
+#ifdef CONFIG_X86_64
 pgd_t *pgd_alloc(struct mm_struct *mm)
 {
        unsigned boundary;
        pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
+       unsigned long flags;
        if (!pgd)
                return NULL;
+       spin_lock_irqsave(&pgd_lock, flags);
        pgd_list_add(pgd);
+       spin_unlock_irqrestore(&pgd_lock, flags);
        /*
         * Copy kernel pointers in from init.
         * Could keep a freelist or slab cache of those because the kernel
@@ -64,8 +83,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
 
 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 {
+       unsigned long flags;
        BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
+       spin_lock_irqsave(&pgd_lock, flags);
        pgd_list_del(pgd);
+       spin_unlock_irqrestore(&pgd_lock, flags);
        free_page((unsigned long)pgd);
 }
 #else
@@ -79,20 +101,6 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
  * vmalloc faults work because attached pagetables are never freed.
  * -- wli
  */
-static inline void pgd_list_add(pgd_t *pgd)
-{
-       struct page *page = virt_to_page(pgd);
-
-       list_add(&page->lru, &pgd_list);
-}
-
-static inline void pgd_list_del(pgd_t *pgd)
-{
-       struct page *page = virt_to_page(pgd);
-
-       list_del(&page->lru);
-}
-
 #define UNSHARED_PTRS_PER_PGD                          \
        (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
 
@@ -114,10 +122,10 @@ static void pgd_ctor(void *p)
                clone_pgd_range(pgd + USER_PTRS_PER_PGD,
                                swapper_pg_dir + USER_PTRS_PER_PGD,
                                KERNEL_PGD_PTRS);
-               paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT,
-                                       __pa(swapper_pg_dir) >> PAGE_SHIFT,
-                                       USER_PTRS_PER_PGD,
-                                       KERNEL_PGD_PTRS);
+               paravirt_alloc_pmd_clone(__pa(pgd) >> PAGE_SHIFT,
+                                        __pa(swapper_pg_dir) >> PAGE_SHIFT,
+                                        USER_PTRS_PER_PGD,
+                                        KERNEL_PGD_PTRS);
        }
 
        /* list required to sync kernel mapping updates */
@@ -158,7 +166,7 @@ static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
 
                        pgdp[i] = native_make_pgd(0);
 
-                       paravirt_release_pd(pgd_val(pgd) >> PAGE_SHIFT);
+                       paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
                        pmd_free(mm, pmd);
                }
        }
@@ -200,6 +208,24 @@ static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
 
        return 1;
 }
+
+void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
+{
+       paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
+
+       /* Note: almost everything apart from _PAGE_PRESENT is
+          reserved at the pmd (PDPT) level. */
+       set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
+
+       /*
+        * According to Intel App note "TLBs, Paging-Structure Caches,
+        * and Their Invalidation", April 2007, document 317080-001,
+        * section 8.1: in PAE mode we explicitly have to flush the
+        * TLB via cr3 if the top-level pgd is changed...
+        */
+       if (mm == current->active_mm)
+               write_cr3(read_cr3());
+}
 #else  /* !CONFIG_X86_PAE */
 /* No need to prepopulate any pagetable entries in non-PAE modes. */
 static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
@@ -216,7 +242,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
 {
        pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
 
-       /* so that alloc_pd can use it */
+       /* so that alloc_pmd can use it */
        mm->pgd = pgd;
        if (pgd)
                pgd_ctor(pgd);