1 #ifndef _LINUX_HIGHMEM_H
2 #define _LINUX_HIGHMEM_H
5 #include <linux/kernel.h>
7 #include <linux/uaccess.h>
8 #include <linux/hardirq.h>
10 #include <asm/cacheflush.h>
12 #ifndef ARCH_HAS_FLUSH_ANON_PAGE
13 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
18 #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
19 static inline void flush_kernel_dcache_page(struct page *page)
22 static inline void flush_kernel_vmap_range(void *vaddr, int size)
25 static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
30 #include <asm/kmap_types.h>
33 #include <asm/highmem.h>
35 /* declarations for linux/mm/highmem.c */
36 unsigned int nr_free_highpages(void);
37 extern unsigned long totalhigh_pages;
39 void kmap_flush_unused(void);
41 #else /* CONFIG_HIGHMEM */
43 static inline unsigned int nr_free_highpages(void) { return 0; }
45 #define totalhigh_pages 0UL
48 static inline void *kmap(struct page *page)
51 return page_address(page);
54 static inline void kunmap(struct page *page)
58 static inline void *__kmap_atomic(struct page *page)
61 return page_address(page);
63 #define kmap_atomic_prot(page, prot) __kmap_atomic(page)
65 static inline void __kunmap_atomic(void *addr)
70 #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
71 #define kmap_atomic_to_page(ptr) virt_to_page(ptr)
73 #define kmap_flush_unused() do {} while(0)
76 #endif /* CONFIG_HIGHMEM */
78 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
80 DECLARE_PER_CPU(int, __kmap_atomic_idx);
82 static inline int kmap_atomic_idx_push(void)
84 int idx = __get_cpu_var(__kmap_atomic_idx)++;
85 #ifdef CONFIG_DEBUG_HIGHMEM
86 WARN_ON_ONCE(in_irq() && !irqs_disabled());
87 BUG_ON(idx > KM_TYPE_NR);
92 static inline int kmap_atomic_idx(void)
94 return __get_cpu_var(__kmap_atomic_idx) - 1;
97 static inline int kmap_atomic_idx_pop(void)
99 int idx = --__get_cpu_var(__kmap_atomic_idx);
100 #ifdef CONFIG_DEBUG_HIGHMEM
109 * Make both: kmap_atomic(page, idx) and kmap_atomic(page) work.
111 #define kmap_atomic(page, args...) __kmap_atomic(page)
114 * Prevent people trying to call kunmap_atomic() as if it were kunmap()
115 * kunmap_atomic() should get the return value of kmap_atomic, not the page.
117 #define kunmap_atomic(addr, args...) \
119 BUILD_BUG_ON(__same_type((addr), struct page *)); \
120 __kunmap_atomic(addr); \
123 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
124 #ifndef clear_user_highpage
125 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
127 void *addr = kmap_atomic(page, KM_USER0);
128 clear_user_page(addr, vaddr, page);
129 kunmap_atomic(addr, KM_USER0);
133 #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
135 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
136 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
137 * @vma: The VMA the page is to be allocated for
138 * @vaddr: The virtual address the page will be inserted into
140 * This function will allocate a page for a VMA but the caller is expected
141 * to specify via movableflags whether the page will be movable in the
144 * An architecture may override this function by defining
145 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
148 static inline struct page *
149 __alloc_zeroed_user_highpage(gfp_t movableflags,
150 struct vm_area_struct *vma,
153 struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
157 clear_user_highpage(page, vaddr);
164 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
165 * @vma: The VMA the page is to be allocated for
166 * @vaddr: The virtual address the page will be inserted into
168 * This function will allocate a page for a VMA that the caller knows will
169 * be able to migrate in the future using move_pages() or reclaimed
171 static inline struct page *
172 alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
175 return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
178 static inline void clear_highpage(struct page *page)
180 void *kaddr = kmap_atomic(page, KM_USER0);
182 kunmap_atomic(kaddr, KM_USER0);
185 static inline void zero_user_segments(struct page *page,
186 unsigned start1, unsigned end1,
187 unsigned start2, unsigned end2)
189 void *kaddr = kmap_atomic(page, KM_USER0);
191 BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
194 memset(kaddr + start1, 0, end1 - start1);
197 memset(kaddr + start2, 0, end2 - start2);
199 kunmap_atomic(kaddr, KM_USER0);
200 flush_dcache_page(page);
203 static inline void zero_user_segment(struct page *page,
204 unsigned start, unsigned end)
206 zero_user_segments(page, start, end, 0, 0);
209 static inline void zero_user(struct page *page,
210 unsigned start, unsigned size)
212 zero_user_segments(page, start, start + size, 0, 0);
215 static inline void __deprecated memclear_highpage_flush(struct page *page,
216 unsigned int offset, unsigned int size)
218 zero_user(page, offset, size);
221 #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
223 static inline void copy_user_highpage(struct page *to, struct page *from,
224 unsigned long vaddr, struct vm_area_struct *vma)
228 vfrom = kmap_atomic(from, KM_USER0);
229 vto = kmap_atomic(to, KM_USER1);
230 copy_user_page(vto, vfrom, vaddr, to);
231 kunmap_atomic(vto, KM_USER1);
232 kunmap_atomic(vfrom, KM_USER0);
237 static inline void copy_highpage(struct page *to, struct page *from)
241 vfrom = kmap_atomic(from, KM_USER0);
242 vto = kmap_atomic(to, KM_USER1);
243 copy_page(vto, vfrom);
244 kunmap_atomic(vto, KM_USER1);
245 kunmap_atomic(vfrom, KM_USER0);
248 #endif /* _LINUX_HIGHMEM_H */