paravirt: add an "mm" argument to alloc_pt
[pandora-kernel.git] / arch / i386 / mm / pageattr.c
index e223b1d..37992ff 100644 (file)
@@ -60,6 +60,7 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot,
        address = __pa(address);
        addr = address & LARGE_PAGE_MASK; 
        pbase = (pte_t *)page_address(base);
+       paravirt_alloc_pt(&init_mm, page_to_pfn(base));
        for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
                set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT,
                                           addr == address ? prot : ref_prot));
@@ -67,14 +68,23 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot,
        return base;
 } 
 
-static void flush_kernel_map(void *arg)
+static void cache_flush_page(struct page *p)
 { 
-       unsigned long adr = (unsigned long)arg;
+       unsigned long adr = (unsigned long)page_address(p);
+       int i;
+       for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
+               asm volatile("clflush (%0)" :: "r" (adr + i));
+}
+
+static void flush_kernel_map(void *arg)
+{
+       struct list_head *lh = (struct list_head *)arg;
+       struct page *p;
 
-       if (adr && cpu_has_clflush) {
-               int i;
-               for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
-                       asm volatile("clflush (%0)" :: "r" (adr + i));
+       /* High level code is not ready for clflush yet */
+       if (0 && cpu_has_clflush) {
+               list_for_each_entry (p, lh, lru)
+                       cache_flush_page(p);
        } else if (boot_cpu_data.x86_model >= 4)
                wbinvd();
 
@@ -90,7 +100,7 @@ static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
        unsigned long flags;
 
        set_pte_atomic(kpte, pte);      /* change init_mm */
-       if (PTRS_PER_PMD > 1)
+       if (SHARED_KERNEL_PMD)
                return;
 
        spin_lock_irqsave(&pgd_lock, flags);
@@ -141,7 +151,7 @@ __change_page_attr(struct page *page, pgprot_t prot)
                return -EINVAL;
        kpte_page = virt_to_page(kpte);
        if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { 
-               if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
+               if (!pte_huge(*kpte)) {
                        set_pte_atomic(kpte, mk_pte(page, prot)); 
                } else {
                        pgprot_t ref_prot;
@@ -157,7 +167,7 @@ __change_page_attr(struct page *page, pgprot_t prot)
                        kpte_page = split;
                }
                page_private(kpte_page)++;
-       } else if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
+       } else if (!pte_huge(*kpte)) {
                set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
                BUG_ON(page_private(kpte_page) == 0);
                page_private(kpte_page)--;
@@ -172,6 +182,7 @@ __change_page_attr(struct page *page, pgprot_t prot)
        if (!PageReserved(kpte_page)) {
                if (cpu_has_pse && (page_private(kpte_page) == 0)) {
                        ClearPagePrivate(kpte_page);
+                       paravirt_release_pt(page_to_pfn(kpte_page));
                        list_add(&kpte_page->lru, &df_list);
                        revert_page(kpte_page, address);
                }
@@ -179,9 +190,9 @@ __change_page_attr(struct page *page, pgprot_t prot)
        return 0;
 } 
 
-static inline void flush_map(void *adr)
+static inline void flush_map(struct list_head *l)
 {
-       on_each_cpu(flush_kernel_map, adr, 1, 1);
+       on_each_cpu(flush_kernel_map, l, 1, 1);
 }
 
 /*
@@ -223,11 +234,8 @@ void global_flush_tlb(void)
        spin_lock_irq(&cpa_lock);
        list_replace_init(&df_list, &l);
        spin_unlock_irq(&cpa_lock);
-       if (!cpu_has_clflush)
-               flush_map(NULL);
+       flush_map(&l);
        list_for_each_entry_safe(pg, next, &l, lru) {
-               if (cpu_has_clflush)
-                       flush_map(page_address(pg));
                __free_page(pg);
        }
 }