1 #include <linux/highmem.h>
2 #include <linux/module.h>
4 void *kmap(struct page *page)
7 if (!PageHighMem(page))
8 return page_address(page);
9 return kmap_high(page);
12 void kunmap(struct page *page)
16 if (!PageHighMem(page))
22 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
23 * no global lock is needed and because the kmap code must perform a global TLB
24 * invalidation when the kmap pool wraps.
26 * However when holding an atomic kmap is is not legal to sleep, so atomic
27 * kmaps are appropriate for short, tight code paths only.
29 void *kmap_atomic(struct page *page, enum km_type type)
31 enum fixed_addresses idx;
34 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
37 idx = type + KM_TYPE_NR*smp_processor_id();
38 BUG_ON(!pte_none(*(kmap_pte-idx)));
40 if (!PageHighMem(page))
41 return page_address(page);
43 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
44 set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
45 arch_flush_lazy_mmu_mode();
50 void kunmap_atomic(void *kvaddr, enum km_type type)
52 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
53 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
56 * Force other mappings to Oops if they'll try to access this pte
57 * without first remap it. Keeping stale mappings around is a bad idea
58 * also, in case the page changes cacheability attributes or becomes
59 * a protected page in a hypervisor.
61 if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
62 kpte_clear_flush(kmap_pte-idx, vaddr);
64 #ifdef CONFIG_DEBUG_HIGHMEM
65 BUG_ON(vaddr < PAGE_OFFSET);
66 BUG_ON(vaddr >= (unsigned long)high_memory);
73 /* This is the same as kmap_atomic() but can map memory that doesn't
74 * have a struct page associated with it.
76 void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
78 enum fixed_addresses idx;
83 idx = type + KM_TYPE_NR*smp_processor_id();
84 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
85 set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
86 arch_flush_lazy_mmu_mode();
91 struct page *kmap_atomic_to_page(void *ptr)
93 unsigned long idx, vaddr = (unsigned long)ptr;
96 if (vaddr < FIXADDR_START)
97 return virt_to_page(ptr);
99 idx = virt_to_fix(vaddr);
100 pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
101 return pte_page(*pte);
105 EXPORT_SYMBOL(kunmap);
106 EXPORT_SYMBOL(kmap_atomic);
107 EXPORT_SYMBOL(kunmap_atomic);
108 EXPORT_SYMBOL(kmap_atomic_to_page);