2 * Cache control for MicroBlaze cache memories
4 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2007-2009 PetaLogix
6 * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
13 #include <asm/cacheflush.h>
14 #include <linux/cache.h>
15 #include <asm/cpuinfo.h>
17 /* Exported functions */
19 void _enable_icache(void)
21 if (cpuinfo.use_icache) {
22 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
23 __asm__ __volatile__ (" \
30 __asm__ __volatile__ (" \
43 void _disable_icache(void)
45 if (cpuinfo.use_icache) {
46 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
47 __asm__ __volatile__ (" \
54 __asm__ __volatile__ (" \
67 void _invalidate_icache(unsigned int addr)
69 if (cpuinfo.use_icache) {
70 __asm__ __volatile__ (" \
77 void _enable_dcache(void)
79 if (cpuinfo.use_dcache) {
80 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
81 __asm__ __volatile__ (" \
88 __asm__ __volatile__ (" \
101 void _disable_dcache(void)
103 if (cpuinfo.use_dcache) {
104 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
105 __asm__ __volatile__ (" \
112 __asm__ __volatile__ (" \
115 andi r12, r12, ~%0; \
125 void _invalidate_dcache(unsigned int addr)
127 if (cpuinfo.use_dcache)
128 __asm__ __volatile__ (" \
134 void __invalidate_icache_all(void)
139 if (cpuinfo.use_icache) {
140 local_irq_save(flags);
143 /* Just loop through cache size and invalidate, no need to add
144 CACHE_BASE address */
145 for (i = 0; i < cpuinfo.icache_size;
146 i += cpuinfo.icache_line)
147 __invalidate_icache(i);
150 local_irq_restore(flags);
154 void __invalidate_icache_range(unsigned long start, unsigned long end)
160 if (cpuinfo.use_icache) {
162 * No need to cover entire cache range,
163 * just cover cache footprint
165 end = min(start + cpuinfo.icache_size, end);
166 align = ~(cpuinfo.icache_line - 1);
167 start &= align; /* Make sure we are aligned */
168 /* Push end up to the next cache line */
169 end = ((end & align) + cpuinfo.icache_line);
171 local_irq_save(flags);
174 for (i = start; i < end; i += cpuinfo.icache_line)
175 __invalidate_icache(i);
178 local_irq_restore(flags);
182 void __invalidate_icache_page(struct vm_area_struct *vma, struct page *page)
184 __invalidate_icache_all();
187 void __invalidate_icache_user_range(struct vm_area_struct *vma,
188 struct page *page, unsigned long adr,
191 __invalidate_icache_all();
194 void __invalidate_cache_sigtramp(unsigned long addr)
196 __invalidate_icache_range(addr, addr + 8);
199 void __invalidate_dcache_all(void)
204 if (cpuinfo.use_dcache) {
205 local_irq_save(flags);
209 * Just loop through cache size and invalidate,
210 * no need to add CACHE_BASE address
212 for (i = 0; i < cpuinfo.dcache_size;
213 i += cpuinfo.dcache_line)
214 __invalidate_dcache(i);
217 local_irq_restore(flags);
221 void __invalidate_dcache_range(unsigned long start, unsigned long end)
227 if (cpuinfo.use_dcache) {
229 * No need to cover entire cache range,
230 * just cover cache footprint
232 end = min(start + cpuinfo.dcache_size, end);
233 align = ~(cpuinfo.dcache_line - 1);
234 start &= align; /* Make sure we are aligned */
235 /* Push end up to the next cache line */
236 end = ((end & align) + cpuinfo.dcache_line);
237 local_irq_save(flags);
240 for (i = start; i < end; i += cpuinfo.dcache_line)
241 __invalidate_dcache(i);
244 local_irq_restore(flags);
248 void __invalidate_dcache_page(struct vm_area_struct *vma, struct page *page)
250 __invalidate_dcache_all();
253 void __invalidate_dcache_user_range(struct vm_area_struct *vma,
254 struct page *page, unsigned long adr,
257 __invalidate_dcache_all();