2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2009, Wind River Systems Inc
7 * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
10 #include <linux/export.h>
11 #include <linux/sched.h>
15 #include <asm/cacheflush.h>
16 #include <asm/cpuinfo.h>
18 static void __flush_dcache(unsigned long start, unsigned long end)
22 start &= ~(cpuinfo.dcache_line_size - 1);
23 end += (cpuinfo.dcache_line_size - 1);
24 end &= ~(cpuinfo.dcache_line_size - 1);
26 for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
27 __asm__ __volatile__ (" flushda 0(%0)\n"
29 : /* Inputs */ "r"(addr)
34 static void __flush_dcache_all(unsigned long start, unsigned long end)
38 start &= ~(cpuinfo.dcache_line_size - 1);
39 end += (cpuinfo.dcache_line_size - 1);
40 end &= ~(cpuinfo.dcache_line_size - 1);
42 if (end > start + cpuinfo.dcache_size)
43 end = start + cpuinfo.dcache_size;
45 for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
46 __asm__ __volatile__ (" flushd 0(%0)\n"
48 : /* Inputs */ "r"(addr)
53 static void __invalidate_dcache(unsigned long start, unsigned long end)
57 start &= ~(cpuinfo.dcache_line_size - 1);
58 end += (cpuinfo.dcache_line_size - 1);
59 end &= ~(cpuinfo.dcache_line_size - 1);
61 for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
62 __asm__ __volatile__ (" initda 0(%0)\n"
64 : /* Inputs */ "r"(addr)
69 static void __flush_icache(unsigned long start, unsigned long end)
73 start &= ~(cpuinfo.icache_line_size - 1);
74 end += (cpuinfo.icache_line_size - 1);
75 end &= ~(cpuinfo.icache_line_size - 1);
77 if (end > start + cpuinfo.icache_size)
78 end = start + cpuinfo.icache_size;
80 for (addr = start; addr < end; addr += cpuinfo.icache_line_size) {
81 __asm__ __volatile__ (" flushi %0\n"
83 : /* Inputs */ "r"(addr)
86 __asm__ __volatile(" flushp\n");
89 static void flush_aliases(struct address_space *mapping, struct page *page)
91 struct mm_struct *mm = current->active_mm;
92 struct vm_area_struct *mpnt;
97 flush_dcache_mmap_lock(mapping);
98 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
101 if (mpnt->vm_mm != mm)
103 if (!(mpnt->vm_flags & VM_MAYSHARE))
106 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
107 flush_cache_page(mpnt, mpnt->vm_start + offset,
110 flush_dcache_mmap_unlock(mapping);
113 void flush_cache_all(void)
115 __flush_dcache_all(0, cpuinfo.dcache_size);
116 __flush_icache(0, cpuinfo.icache_size);
119 void flush_cache_mm(struct mm_struct *mm)
124 void flush_cache_dup_mm(struct mm_struct *mm)
129 void flush_icache_range(unsigned long start, unsigned long end)
131 __flush_icache(start, end);
134 void flush_dcache_range(unsigned long start, unsigned long end)
136 __flush_dcache(start, end);
138 EXPORT_SYMBOL(flush_dcache_range);
140 void invalidate_dcache_range(unsigned long start, unsigned long end)
142 __invalidate_dcache(start, end);
144 EXPORT_SYMBOL(invalidate_dcache_range);
146 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
149 __flush_dcache(start, end);
150 if (vma == NULL || (vma->vm_flags & VM_EXEC))
151 __flush_icache(start, end);
154 void flush_icache_page(struct vm_area_struct *vma, struct page *page)
156 unsigned long start = (unsigned long) page_address(page);
157 unsigned long end = start + PAGE_SIZE;
159 __flush_icache(start, end);
162 void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
165 unsigned long start = vmaddr;
166 unsigned long end = start + PAGE_SIZE;
168 __flush_dcache(start, end);
169 if (vma->vm_flags & VM_EXEC)
170 __flush_icache(start, end);
173 void flush_dcache_page(struct page *page)
175 struct address_space *mapping;
178 * The zero page is never written to, so never has any dirty
179 * cache lines, and therefore never needs to be flushed.
181 if (page == ZERO_PAGE(0))
184 mapping = page_mapping(page);
186 /* Flush this page if there are aliases. */
187 if (mapping && !mapping_mapped(mapping)) {
188 clear_bit(PG_dcache_clean, &page->flags);
190 unsigned long start = (unsigned long)page_address(page);
192 __flush_dcache_all(start, start + PAGE_SIZE);
194 flush_aliases(mapping, page);
195 set_bit(PG_dcache_clean, &page->flags);
198 EXPORT_SYMBOL(flush_dcache_page);
200 void update_mmu_cache(struct vm_area_struct *vma,
201 unsigned long address, pte_t *pte)
203 unsigned long pfn = pte_pfn(*pte);
210 * The zero page is never written to, so never has any dirty
211 * cache lines, and therefore never needs to be flushed.
213 page = pfn_to_page(pfn);
214 if (page == ZERO_PAGE(0))
217 if (!PageReserved(page) &&
218 !test_and_set_bit(PG_dcache_clean, &page->flags)) {
219 unsigned long start = page_to_virt(page);
220 struct address_space *mapping;
222 __flush_dcache(start, start + PAGE_SIZE);
224 mapping = page_mapping(page);
226 flush_aliases(mapping, page);
230 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
233 __flush_dcache(vaddr, vaddr + PAGE_SIZE);
234 copy_page(vto, vfrom);
235 __flush_dcache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
238 void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
240 __flush_dcache(vaddr, vaddr + PAGE_SIZE);
242 __flush_dcache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
245 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
246 unsigned long user_vaddr,
247 void *dst, void *src, int len)
249 flush_cache_page(vma, user_vaddr, page_to_pfn(page));
250 memcpy(dst, src, len);
251 __flush_dcache((unsigned long)src, (unsigned long)src + len);
252 if (vma->vm_flags & VM_EXEC)
253 __flush_icache((unsigned long)src, (unsigned long)src + len);
256 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
257 unsigned long user_vaddr,
258 void *dst, void *src, int len)
260 flush_cache_page(vma, user_vaddr, page_to_pfn(page));
261 memcpy(dst, src, len);
262 __flush_dcache((unsigned long)dst, (unsigned long)dst + len);
263 if (vma->vm_flags & VM_EXEC)
264 __flush_icache((unsigned long)dst, (unsigned long)dst + len);