2 * linux/arch/arm/mm/flush.c
4 * Copyright (C) 1995-2002 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
12 #include <linux/pagemap.h>
13 #include <linux/highmem.h>
15 #include <asm/cacheflush.h>
16 #include <asm/cachetype.h>
17 #include <asm/highmem.h>
18 #include <asm/smp_plat.h>
19 #include <asm/system.h>
20 #include <asm/tlbflush.h>
21 #include <linux/hugetlb.h>
25 #ifdef CONFIG_CPU_CACHE_VIPT
27 #define ALIAS_FLUSH_START 0xffff4000
29 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
31 unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
34 set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0);
35 flush_tlb_kernel_page(to);
37 asm( "mcrr p15, 0, %1, %0, c14\n"
38 " mcr p15, 0, %2, c7, c10, 4"
40 : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
44 static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
46 unsigned long colour = CACHE_COLOUR(vaddr);
47 unsigned long offset = vaddr & (PAGE_SIZE - 1);
50 set_pte_ext(TOP_PTE(ALIAS_FLUSH_START) + colour, pfn_pte(pfn, PAGE_KERNEL), 0);
51 to = ALIAS_FLUSH_START + (colour << PAGE_SHIFT) + offset;
52 flush_tlb_kernel_page(to);
53 flush_icache_range(to, to + len);
56 void flush_cache_mm(struct mm_struct *mm)
58 if (cache_is_vivt()) {
59 vivt_flush_cache_mm(mm);
63 if (cache_is_vipt_aliasing()) {
64 asm( "mcr p15, 0, %0, c7, c14, 0\n"
65 " mcr p15, 0, %0, c7, c10, 4"
72 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
74 if (cache_is_vivt()) {
75 vivt_flush_cache_range(vma, start, end);
79 if (cache_is_vipt_aliasing()) {
80 asm( "mcr p15, 0, %0, c7, c14, 0\n"
81 " mcr p15, 0, %0, c7, c10, 4"
87 if (vma->vm_flags & VM_EXEC)
91 void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
93 if (cache_is_vivt()) {
94 vivt_flush_cache_page(vma, user_addr, pfn);
98 if (cache_is_vipt_aliasing()) {
99 flush_pfn_alias(pfn, user_addr);
100 __flush_icache_all();
103 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
104 __flush_icache_all();
108 #define flush_pfn_alias(pfn,vaddr) do { } while (0)
109 #define flush_icache_alias(pfn,vaddr,len) do { } while (0)
112 static void flush_ptrace_access_other(void *args)
114 __flush_icache_all();
118 void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
119 unsigned long uaddr, void *kaddr, unsigned long len)
121 if (cache_is_vivt()) {
122 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
123 unsigned long addr = (unsigned long)kaddr;
124 __cpuc_coherent_kern_range(addr, addr + len);
129 if (cache_is_vipt_aliasing()) {
130 flush_pfn_alias(page_to_pfn(page), uaddr);
131 __flush_icache_all();
135 /* VIPT non-aliasing D-cache */
136 if (vma->vm_flags & VM_EXEC) {
137 unsigned long addr = (unsigned long)kaddr;
138 if (icache_is_vipt_aliasing())
139 flush_icache_alias(page_to_pfn(page), uaddr, len);
141 __cpuc_coherent_kern_range(addr, addr + len);
142 if (cache_ops_need_broadcast())
143 smp_call_function(flush_ptrace_access_other,
149 * Copy user data from/to a page which is mapped into a different
150 * processes address space. Really, we want to allow our "user
151 * space" model to handle this.
153 * Note that this code needs to run on the current CPU.
155 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
156 unsigned long uaddr, void *dst, const void *src,
162 memcpy(dst, src, len);
163 flush_ptrace_access(vma, page, uaddr, dst, len);
169 void __flush_dcache_page(struct address_space *mapping, struct page *page)
172 * Writeback any data associated with the kernel mapping of this
173 * page. This ensures that data in the physical page is mutually
174 * coherent with the kernels mapping.
176 if (!PageHighMem(page)) {
177 size_t page_size = PAGE_SIZE << compound_order(page);
178 __cpuc_flush_dcache_area(page_address(page), page_size);
181 for(i = 0; i < (1 << compound_order(page)); i++) {
182 struct page *cpage = page + i;
183 void *addr = kmap_high_get(cpage);
185 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
187 } else if (cache_is_vipt()) {
188 /* unmapped pages might still be cached */
189 addr = kmap_atomic(cpage);
190 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
197 * If this is a page cache page, and we have an aliasing VIPT cache,
198 * we only need to do one flush - which would be at the relevant
199 * userspace colour, which is congruent with page->index.
201 if (mapping && cache_is_vipt_aliasing())
202 flush_pfn_alias(page_to_pfn(page),
203 page->index << PAGE_CACHE_SHIFT);
206 static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
208 struct mm_struct *mm = current->active_mm;
209 struct vm_area_struct *mpnt;
210 struct prio_tree_iter iter;
214 * There are possible user space mappings of this page:
215 * - VIVT cache: we need to also write back and invalidate all user
216 * data in the current VM view associated with this page.
217 * - aliasing VIPT: we only need to find one mapping of this page.
219 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
221 flush_dcache_mmap_lock(mapping);
222 vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
223 unsigned long offset;
226 * If this VMA is not in our MM, we can ignore it.
228 if (mpnt->vm_mm != mm)
230 if (!(mpnt->vm_flags & VM_MAYSHARE))
232 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
233 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
235 flush_dcache_mmap_unlock(mapping);
238 #if __LINUX_ARM_ARCH__ >= 6
239 void __sync_icache_dcache(pte_t pteval)
243 struct address_space *mapping;
245 if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
246 /* only flush non-aliasing VIPT caches for exec mappings */
248 pfn = pte_pfn(pteval);
252 page = pfn_to_page(pfn);
253 if (cache_is_vipt_aliasing())
254 mapping = page_mapping(page);
258 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
259 __flush_dcache_page(mapping, page);
261 if (pte_exec(pteval))
262 __flush_icache_all();
267 * Ensure cache coherency between kernel mapping and userspace mapping
270 * We have three cases to consider:
271 * - VIPT non-aliasing cache: fully coherent so nothing required.
272 * - VIVT: fully aliasing, so we need to handle every alias in our
274 * - VIPT aliasing: need to handle one alias in our current VM view.
276 * If we need to handle aliasing:
277 * If the page only exists in the page cache and there are no user
278 * space mappings, we can be lazy and remember that we may have dirty
279 * kernel cache lines for later. Otherwise, we assume we have
282 * Note that we disable the lazy flush for SMP configurations where
283 * the cache maintenance operations are not automatically broadcasted.
285 void flush_dcache_page(struct page *page)
287 struct address_space *mapping;
290 * The zero page is never written to, so never has any dirty
291 * cache lines, and therefore never needs to be flushed.
293 if (page == ZERO_PAGE(0))
296 mapping = page_mapping(page);
298 if (!cache_ops_need_broadcast() &&
299 mapping && !page_mapped(page))
300 clear_bit(PG_dcache_clean, &page->flags);
302 __flush_dcache_page(mapping, page);
303 if (mapping && cache_is_vivt())
304 __flush_dcache_aliases(mapping, page);
306 __flush_icache_all();
307 set_bit(PG_dcache_clean, &page->flags);
310 EXPORT_SYMBOL(flush_dcache_page);
313 * Ensure cache coherency for the kernel mapping of this page. We can
314 * assume that the page is pinned via kmap.
316 * If the page only exists in the page cache and there are no user
317 * space mappings, this is a no-op since the page was already marked
318 * dirty at creation. Otherwise, we need to flush the dirty kernel
319 * cache lines directly.
321 void flush_kernel_dcache_page(struct page *page)
323 if (cache_is_vivt() || cache_is_vipt_aliasing()) {
324 struct address_space *mapping;
326 mapping = page_mapping(page);
328 if (!mapping || mapping_mapped(mapping)) {
331 addr = page_address(page);
333 * kmap_atomic() doesn't set the page virtual
334 * address for highmem pages, and
335 * kunmap_atomic() takes care of cache
338 if (!IS_ENABLED(CONFIG_HIGHMEM) || addr)
339 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
343 EXPORT_SYMBOL(flush_kernel_dcache_page);
346 * Flush an anonymous page so that users of get_user_pages()
347 * can safely access the data. The expected sequence is:
351 * memcpy() to/from page
352 * if written to page, flush_dcache_page()
354 void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
358 /* VIPT non-aliasing caches need do nothing */
359 if (cache_is_vipt_nonaliasing())
363 * Write back and invalidate userspace mapping.
365 pfn = page_to_pfn(page);
366 if (cache_is_vivt()) {
367 flush_cache_page(vma, vmaddr, pfn);
370 * For aliasing VIPT, we can flush an alias of the
371 * userspace address only.
373 flush_pfn_alias(pfn, vmaddr);
374 __flush_icache_all();
378 * Invalidate kernel mapping. No data should be contained
379 * in this mapping of the page. FIXME: this is overkill
380 * since we actually ask for a write-back and invalidate.
382 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);