agp/intel: Fix the pre-9xx chipset flush.
[pandora-kernel.git] / arch / x86 / mm / pageattr.c
index 3cfe9ce..5866b28 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/interrupt.h>
 #include <linux/seq_file.h>
 #include <linux/debugfs.h>
+#include <linux/pfn.h>
 
 #include <asm/e820.h>
 #include <asm/processor.h>
@@ -142,6 +143,7 @@ void clflush_cache_range(void *vaddr, unsigned int size)
 
        mb();
 }
+EXPORT_SYMBOL_GPL(clflush_cache_range);
 
 static void __cpa_flush_all(void *arg)
 {
@@ -590,9 +592,12 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
        unsigned int level;
        pte_t *kpte, old_pte;
 
-       if (cpa->flags & CPA_PAGES_ARRAY)
-               address = (unsigned long)page_address(cpa->pages[cpa->curpage]);
-       else if (cpa->flags & CPA_ARRAY)
+       if (cpa->flags & CPA_PAGES_ARRAY) {
+               struct page *page = cpa->pages[cpa->curpage];
+               if (unlikely(PageHighMem(page)))
+                       return 0;
+               address = (unsigned long)page_address(page);
+       } else if (cpa->flags & CPA_ARRAY)
                address = cpa->vaddr[cpa->curpage];
        else
                address = *cpa->vaddr;
@@ -681,8 +686,9 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias);
 static int cpa_process_alias(struct cpa_data *cpa)
 {
        struct cpa_data alias_cpa;
-       int ret = 0;
-       unsigned long temp_cpa_vaddr, vaddr;
+       unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT);
+       unsigned long vaddr, remapped;
+       int ret;
 
        if (cpa->pfn >= max_pfn_mapped)
                return 0;
@@ -695,9 +701,12 @@ static int cpa_process_alias(struct cpa_data *cpa)
         * No need to redo, when the primary call touched the direct
         * mapping already:
         */
-       if (cpa->flags & CPA_PAGES_ARRAY)
-               vaddr = (unsigned long)page_address(cpa->pages[cpa->curpage]);
-       else if (cpa->flags & CPA_ARRAY)
+       if (cpa->flags & CPA_PAGES_ARRAY) {
+               struct page *page = cpa->pages[cpa->curpage];
+               if (unlikely(PageHighMem(page)))
+                       return 0;
+               vaddr = (unsigned long)page_address(page);
+       } else if (cpa->flags & CPA_ARRAY)
                vaddr = cpa->vaddr[cpa->curpage];
        else
                vaddr = *cpa->vaddr;
@@ -706,42 +715,55 @@ static int cpa_process_alias(struct cpa_data *cpa)
                    PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) {
 
                alias_cpa = *cpa;
-               temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT);
-               alias_cpa.vaddr = &temp_cpa_vaddr;
+               alias_cpa.vaddr = &laddr;
                alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
 
-
                ret = __change_page_attr_set_clr(&alias_cpa, 0);
+               if (ret)
+                       return ret;
        }
 
 #ifdef CONFIG_X86_64
-       if (ret)
-               return ret;
-       /*
-        * No need to redo, when the primary call touched the high
-        * mapping already:
-        */
-       if (within(vaddr, (unsigned long) _text, _brk_end))
-               return 0;
-
        /*
-        * If the physical address is inside the kernel map, we need
+        * If the primary call didn't touch the high mapping already
+        * and the physical address is inside the kernel map, we need
         * to touch the high mapped kernel as well:
         */
-       if (!within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn()))
-               return 0;
+       if (!within(vaddr, (unsigned long)_text, _brk_end) &&
+           within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn())) {
+               unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) +
+                                              __START_KERNEL_map - phys_base;
+               alias_cpa = *cpa;
+               alias_cpa.vaddr = &temp_cpa_vaddr;
+               alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
 
-       alias_cpa = *cpa;
-       temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base;
-       alias_cpa.vaddr = &temp_cpa_vaddr;
-       alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
+               /*
+                * The high mapping range is imprecise, so ignore the
+                * return value.
+                */
+               __change_page_attr_set_clr(&alias_cpa, 0);
+       }
+#endif
 
        /*
-        * The high mapping range is imprecise, so ignore the return value.
+        * If the PMD page was partially used for per-cpu remapping,
+        * the recycled area needs to be split and modified.  Because
+        * the area is always proper subset of a PMD page
+        * cpa->numpages is guaranteed to be 1 for these areas, so
+        * there's no need to loop over and check for further remaps.
         */
-       __change_page_attr_set_clr(&alias_cpa, 0);
-#endif
-       return ret;
+       remapped = (unsigned long)pcpu_lpage_remapped((void *)laddr);
+       if (remapped) {
+               WARN_ON(cpa->numpages > 1);
+               alias_cpa = *cpa;
+               alias_cpa.vaddr = &remapped;
+               alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
+               ret = __change_page_attr_set_clr(&alias_cpa, 0);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
 }
 
 static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
@@ -982,12 +1004,15 @@ EXPORT_SYMBOL(set_memory_array_uc);
 int _set_memory_wc(unsigned long addr, int numpages)
 {
        int ret;
+       unsigned long addr_copy = addr;
+
        ret = change_page_attr_set(&addr, numpages,
                                    __pgprot(_PAGE_CACHE_UC_MINUS), 0);
-
        if (!ret) {
-               ret = change_page_attr_set(&addr, numpages,
-                                   __pgprot(_PAGE_CACHE_WC), 0);
+               ret = change_page_attr_set_clr(&addr_copy, numpages,
+                                              __pgprot(_PAGE_CACHE_WC),
+                                              __pgprot(_PAGE_CACHE_MASK),
+                                              0, 0, NULL);
        }
        return ret;
 }
@@ -1104,7 +1129,9 @@ int set_pages_array_uc(struct page **pages, int addrinarray)
        int free_idx;
 
        for (i = 0; i < addrinarray; i++) {
-               start = (unsigned long)page_address(pages[i]);
+               if (PageHighMem(pages[i]))
+                       continue;
+               start = page_to_pfn(pages[i]) << PAGE_SHIFT;
                end = start + PAGE_SIZE;
                if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL))
                        goto err_out;
@@ -1117,7 +1144,9 @@ int set_pages_array_uc(struct page **pages, int addrinarray)
 err_out:
        free_idx = i;
        for (i = 0; i < free_idx; i++) {
-               start = (unsigned long)page_address(pages[i]);
+               if (PageHighMem(pages[i]))
+                       continue;
+               start = page_to_pfn(pages[i]) << PAGE_SHIFT;
                end = start + PAGE_SIZE;
                free_memtype(start, end);
        }
@@ -1146,7 +1175,9 @@ int set_pages_array_wb(struct page **pages, int addrinarray)
                return retval;
 
        for (i = 0; i < addrinarray; i++) {
-               start = (unsigned long)page_address(pages[i]);
+               if (PageHighMem(pages[i]))
+                       continue;
+               start = page_to_pfn(pages[i]) << PAGE_SHIFT;
                end = start + PAGE_SIZE;
                free_memtype(start, end);
        }