Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/hch/hfsplus
[pandora-kernel.git] / arch / x86 / mm / highmem_32.c
index 5e8fa12..b499626 100644 (file)
@@ -9,6 +9,7 @@ void *kmap(struct page *page)
                return page_address(page);
        return kmap_high(page);
 }
+EXPORT_SYMBOL(kmap);
 
 void kunmap(struct page *page)
 {
@@ -18,6 +19,7 @@ void kunmap(struct page *page)
                return;
        kunmap_high(page);
 }
+EXPORT_SYMBOL(kunmap);
 
 /*
  * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
@@ -27,10 +29,10 @@ void kunmap(struct page *page)
  * However when holding an atomic kmap it is not legal to sleep, so atomic
  * kmaps are appropriate for short, tight code paths only.
  */
-void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
+void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 {
-       enum fixed_addresses idx;
        unsigned long vaddr;
+       int idx, type;
 
        /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
        pagefault_disable();
@@ -38,8 +40,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
        if (!PageHighMem(page))
                return page_address(page);
 
-       debug_kmap_atomic(type);
-
+       type = kmap_atomic_idx_push();
        idx = type + KM_TYPE_NR*smp_processor_id();
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
        BUG_ON(!pte_none(*(kmap_pte-idx)));
@@ -47,44 +48,57 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
 
        return (void *)vaddr;
 }
+EXPORT_SYMBOL(kmap_atomic_prot);
+
+void *__kmap_atomic(struct page *page)
+{
+       return kmap_atomic_prot(page, kmap_prot);
+}
+EXPORT_SYMBOL(__kmap_atomic);
 
-void *kmap_atomic(struct page *page, enum km_type type)
+/*
+ * This is the same as kmap_atomic() but can map memory that doesn't
+ * have a struct page associated with it.
+ */
+void *kmap_atomic_pfn(unsigned long pfn)
 {
-       return kmap_atomic_prot(page, type, kmap_prot);
+       return kmap_atomic_prot_pfn(pfn, kmap_prot);
 }
+EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
 
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
+void __kunmap_atomic(void *kvaddr)
 {
        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
-       enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
-
-       /*
-        * Force other mappings to Oops if they'll try to access this pte
-        * without first remap it.  Keeping stale mappings around is a bad idea
-        * also, in case the page changes cacheability attributes or becomes
-        * a protected page in a hypervisor.
-        */
-       if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
+
+       if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
+           vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
+               int idx, type;
+
+               type = kmap_atomic_idx();
+               idx = type + KM_TYPE_NR * smp_processor_id();
+
+#ifdef CONFIG_DEBUG_HIGHMEM
+               WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+#endif
+               /*
+                * Force other mappings to Oops if they'll try to access this
+                * pte without first remap it.  Keeping stale mappings around
+                * is a bad idea also, in case the page changes cacheability
+                * attributes or becomes a protected page in a hypervisor.
+                */
                kpte_clear_flush(kmap_pte-idx, vaddr);
-       else {
+               kmap_atomic_idx_pop();
+       }
 #ifdef CONFIG_DEBUG_HIGHMEM
+       else {
                BUG_ON(vaddr < PAGE_OFFSET);
                BUG_ON(vaddr >= (unsigned long)high_memory);
-#endif
        }
+#endif
 
        pagefault_enable();
 }
-
-/*
- * This is the same as kmap_atomic() but can map memory that doesn't
- * have a struct page associated with it.
- */
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
-{
-       return kmap_atomic_prot_pfn(pfn, type, kmap_prot);
-}
-EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
+EXPORT_SYMBOL(__kunmap_atomic);
 
 struct page *kmap_atomic_to_page(void *ptr)
 {
@@ -98,12 +112,6 @@ struct page *kmap_atomic_to_page(void *ptr)
        pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
        return pte_page(*pte);
 }
-
-EXPORT_SYMBOL(kmap);
-EXPORT_SYMBOL(kunmap);
-EXPORT_SYMBOL(kmap_atomic);
-EXPORT_SYMBOL(kunmap_atomic_notypecheck);
-EXPORT_SYMBOL(kmap_atomic_prot);
 EXPORT_SYMBOL(kmap_atomic_to_page);
 
 void __init set_highmem_pages_init(void)