2 * Cache control for MicroBlaze cache memories
4 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2007-2009 PetaLogix
6 * Copyright (C) 2007-2009 John Williams <john.williams@petalogix.com>
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
13 #include <asm/cacheflush.h>
14 #include <linux/cache.h>
15 #include <asm/cpuinfo.h>
18 static inline void __enable_icache_msr(void)
20 __asm__ __volatile__ (" msrset r0, %0; \
22 : : "i" (MSR_ICE) : "memory");
25 static inline void __disable_icache_msr(void)
27 __asm__ __volatile__ (" msrclr r0, %0; \
29 : : "i" (MSR_ICE) : "memory");
32 static inline void __enable_dcache_msr(void)
34 __asm__ __volatile__ (" msrset r0, %0; \
41 static inline void __disable_dcache_msr(void)
43 __asm__ __volatile__ (" msrclr r0, %0; \
50 static inline void __enable_icache_nomsr(void)
52 __asm__ __volatile__ (" mfs r12, rmsr; \
62 static inline void __disable_icache_nomsr(void)
64 __asm__ __volatile__ (" mfs r12, rmsr; \
74 static inline void __enable_dcache_nomsr(void)
76 __asm__ __volatile__ (" mfs r12, rmsr; \
86 static inline void __disable_dcache_nomsr(void)
88 __asm__ __volatile__ (" mfs r12, rmsr; \
99 /* Helper macro for computing the limits of cache range loops
101 * End address can be unaligned which is OK for C implementation.
102 * ASM implementation align it in ASM macros
104 #define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \
106 int align = ~(cache_line_length - 1); \
107 end = min(start + cache_size, end); \
112 * Helper macro to loop over the specified cache_size/line_length and
113 * execute 'op' on that cacheline
115 #define CACHE_ALL_LOOP(cache_size, line_length, op) \
117 unsigned int len = cache_size - line_length; \
118 int step = -line_length; \
119 WARN_ON(step >= 0); \
121 __asm__ __volatile__ (" 1: " #op " %0, r0; \
124 " : : "r" (len), "r" (step) \
128 /* Used for wdc.flush/clear which can use rB for offset which is not possible
129 * to use for simple wdc or wic.
131 * start address is cache aligned
132 * end address is not aligned, if end is aligned then I have to substract
133 * cacheline length because I can't flush/invalidate the next cacheline.
134 * If is not, I align it because I will flush/invalidate whole line.
136 #define CACHE_RANGE_LOOP_2(start, end, line_length, op) \
138 int step = -line_length; \
139 int align = ~(line_length - 1); \
140 end = ((end & align) == end) ? end - line_length : end & align; \
141 int count = end - start; \
142 WARN_ON(count < 0); \
144 __asm__ __volatile__ (" 1: " #op " %0, %1; \
147 " : : "r" (start), "r" (count), \
148 "r" (step) : "memory"); \
151 /* It is used only first parameter for OP - for wic, wdc */
152 #define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
155 int align = ~(line_length - 1); \
156 end = ((end & align) == end) ? end - line_length : end & align; \
157 WARN_ON(end - start < 0); \
159 __asm__ __volatile__ (" 1: " #op " %1, r0; \
163 " : : "r" (temp), "r" (start), "r" (end),\
164 "r" (line_length) : "memory"); \
169 static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
175 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
176 (unsigned int)start, (unsigned int) end);
178 CACHE_LOOP_LIMITS(start, end,
179 cpuinfo.icache_line_length, cpuinfo.icache_size);
181 local_irq_save(flags);
182 __disable_icache_msr();
185 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
187 for (i = start; i < end; i += cpuinfo.icache_line_length)
188 __asm__ __volatile__ ("wic %0, r0;" \
191 __enable_icache_msr();
192 local_irq_restore(flags);
195 static void __flush_icache_range_nomsr_irq(unsigned long start,
202 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
203 (unsigned int)start, (unsigned int) end);
205 CACHE_LOOP_LIMITS(start, end,
206 cpuinfo.icache_line_length, cpuinfo.icache_size);
208 local_irq_save(flags);
209 __disable_icache_nomsr();
212 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
214 for (i = start; i < end; i += cpuinfo.icache_line_length)
215 __asm__ __volatile__ ("wic %0, r0;" \
219 __enable_icache_nomsr();
220 local_irq_restore(flags);
223 static void __flush_icache_range_noirq(unsigned long start,
229 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
230 (unsigned int)start, (unsigned int) end);
232 CACHE_LOOP_LIMITS(start, end,
233 cpuinfo.icache_line_length, cpuinfo.icache_size);
235 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
237 for (i = start; i < end; i += cpuinfo.icache_line_length)
238 __asm__ __volatile__ ("wic %0, r0;" \
243 static void __flush_icache_all_msr_irq(void)
249 pr_debug("%s\n", __func__);
251 local_irq_save(flags);
252 __disable_icache_msr();
254 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
256 for (i = 0; i < cpuinfo.icache_size;
257 i += cpuinfo.icache_line_length)
258 __asm__ __volatile__ ("wic %0, r0;" \
261 __enable_icache_msr();
262 local_irq_restore(flags);
265 static void __flush_icache_all_nomsr_irq(void)
271 pr_debug("%s\n", __func__);
273 local_irq_save(flags);
274 __disable_icache_nomsr();
276 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
278 for (i = 0; i < cpuinfo.icache_size;
279 i += cpuinfo.icache_line_length)
280 __asm__ __volatile__ ("wic %0, r0;" \
283 __enable_icache_nomsr();
284 local_irq_restore(flags);
287 static void __flush_icache_all_noirq(void)
292 pr_debug("%s\n", __func__);
294 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
296 for (i = 0; i < cpuinfo.icache_size;
297 i += cpuinfo.icache_line_length)
298 __asm__ __volatile__ ("wic %0, r0;" \
303 static void __invalidate_dcache_all_msr_irq(void)
309 pr_debug("%s\n", __func__);
311 local_irq_save(flags);
312 __disable_dcache_msr();
314 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
316 for (i = 0; i < cpuinfo.dcache_size;
317 i += cpuinfo.dcache_line_length)
318 __asm__ __volatile__ ("wdc %0, r0;" \
321 __enable_dcache_msr();
322 local_irq_restore(flags);
325 static void __invalidate_dcache_all_nomsr_irq(void)
331 pr_debug("%s\n", __func__);
333 local_irq_save(flags);
334 __disable_dcache_nomsr();
336 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
338 for (i = 0; i < cpuinfo.dcache_size;
339 i += cpuinfo.dcache_line_length)
340 __asm__ __volatile__ ("wdc %0, r0;" \
343 __enable_dcache_nomsr();
344 local_irq_restore(flags);
347 static void __invalidate_dcache_all_noirq_wt(void)
352 pr_debug("%s\n", __func__);
354 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc)
356 for (i = 0; i < cpuinfo.dcache_size;
357 i += cpuinfo.dcache_line_length)
358 __asm__ __volatile__ ("wdc %0, r0;" \
363 /* FIXME It is blindly invalidation as is expected
364 * but can't be called on noMMU in microblaze_cache_init below
366 * MS: noMMU kernel won't boot if simple wdc is used
367 * The reason should be that there are discared data which kernel needs
369 static void __invalidate_dcache_all_wb(void)
374 pr_debug("%s\n", __func__);
376 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
379 for (i = 0; i < cpuinfo.dcache_size;
380 i += cpuinfo.dcache_line_length)
381 __asm__ __volatile__ ("wdc %0, r0;" \
386 static void __invalidate_dcache_range_wb(unsigned long start,
392 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
393 (unsigned int)start, (unsigned int) end);
395 CACHE_LOOP_LIMITS(start, end,
396 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
398 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
400 for (i = start; i < end; i += cpuinfo.dcache_line_length)
401 __asm__ __volatile__ ("wdc.clear %0, r0;" \
406 static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
412 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
413 (unsigned int)start, (unsigned int) end);
414 CACHE_LOOP_LIMITS(start, end,
415 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
418 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
420 for (i = start; i < end; i += cpuinfo.dcache_line_length)
421 __asm__ __volatile__ ("wdc %0, r0;" \
426 static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
433 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
434 (unsigned int)start, (unsigned int) end);
435 CACHE_LOOP_LIMITS(start, end,
436 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
438 local_irq_save(flags);
439 __disable_dcache_msr();
442 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
444 for (i = start; i < end; i += cpuinfo.dcache_line_length)
445 __asm__ __volatile__ ("wdc %0, r0;" \
449 __enable_dcache_msr();
450 local_irq_restore(flags);
453 static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
460 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
461 (unsigned int)start, (unsigned int) end);
463 CACHE_LOOP_LIMITS(start, end,
464 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
466 local_irq_save(flags);
467 __disable_dcache_nomsr();
470 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
472 for (i = start; i < end; i += cpuinfo.dcache_line_length)
473 __asm__ __volatile__ ("wdc %0, r0;" \
477 __enable_dcache_nomsr();
478 local_irq_restore(flags);
481 static void __flush_dcache_all_wb(void)
486 pr_debug("%s\n", __func__);
488 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
491 for (i = 0; i < cpuinfo.dcache_size;
492 i += cpuinfo.dcache_line_length)
493 __asm__ __volatile__ ("wdc.flush %0, r0;" \
498 static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
503 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
504 (unsigned int)start, (unsigned int) end);
506 CACHE_LOOP_LIMITS(start, end,
507 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
509 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
511 for (i = start; i < end; i += cpuinfo.dcache_line_length)
512 __asm__ __volatile__ ("wdc.flush %0, r0;" \
517 /* struct for wb caches and for wt caches */
520 /* new wb cache model */
521 const struct scache wb_msr = {
522 .ie = __enable_icache_msr,
523 .id = __disable_icache_msr,
524 .ifl = __flush_icache_all_noirq,
525 .iflr = __flush_icache_range_noirq,
526 .iin = __flush_icache_all_noirq,
527 .iinr = __flush_icache_range_noirq,
528 .de = __enable_dcache_msr,
529 .dd = __disable_dcache_msr,
530 .dfl = __flush_dcache_all_wb,
531 .dflr = __flush_dcache_range_wb,
532 .din = __invalidate_dcache_all_wb,
533 .dinr = __invalidate_dcache_range_wb,
536 /* There is only difference in ie, id, de, dd functions */
537 const struct scache wb_nomsr = {
538 .ie = __enable_icache_nomsr,
539 .id = __disable_icache_nomsr,
540 .ifl = __flush_icache_all_noirq,
541 .iflr = __flush_icache_range_noirq,
542 .iin = __flush_icache_all_noirq,
543 .iinr = __flush_icache_range_noirq,
544 .de = __enable_dcache_nomsr,
545 .dd = __disable_dcache_nomsr,
546 .dfl = __flush_dcache_all_wb,
547 .dflr = __flush_dcache_range_wb,
548 .din = __invalidate_dcache_all_wb,
549 .dinr = __invalidate_dcache_range_wb,
552 /* Old wt cache model with disabling irq and turn off cache */
553 const struct scache wt_msr = {
554 .ie = __enable_icache_msr,
555 .id = __disable_icache_msr,
556 .ifl = __flush_icache_all_msr_irq,
557 .iflr = __flush_icache_range_msr_irq,
558 .iin = __flush_icache_all_msr_irq,
559 .iinr = __flush_icache_range_msr_irq,
560 .de = __enable_dcache_msr,
561 .dd = __disable_dcache_msr,
562 .dfl = __invalidate_dcache_all_msr_irq,
563 .dflr = __invalidate_dcache_range_msr_irq_wt,
564 .din = __invalidate_dcache_all_msr_irq,
565 .dinr = __invalidate_dcache_range_msr_irq_wt,
568 const struct scache wt_nomsr = {
569 .ie = __enable_icache_nomsr,
570 .id = __disable_icache_nomsr,
571 .ifl = __flush_icache_all_nomsr_irq,
572 .iflr = __flush_icache_range_nomsr_irq,
573 .iin = __flush_icache_all_nomsr_irq,
574 .iinr = __flush_icache_range_nomsr_irq,
575 .de = __enable_dcache_nomsr,
576 .dd = __disable_dcache_nomsr,
577 .dfl = __invalidate_dcache_all_nomsr_irq,
578 .dflr = __invalidate_dcache_range_nomsr_irq,
579 .din = __invalidate_dcache_all_nomsr_irq,
580 .dinr = __invalidate_dcache_range_nomsr_irq,
583 /* New wt cache model for newer Microblaze versions */
584 const struct scache wt_msr_noirq = {
585 .ie = __enable_icache_msr,
586 .id = __disable_icache_msr,
587 .ifl = __flush_icache_all_noirq,
588 .iflr = __flush_icache_range_noirq,
589 .iin = __flush_icache_all_noirq,
590 .iinr = __flush_icache_range_noirq,
591 .de = __enable_dcache_msr,
592 .dd = __disable_dcache_msr,
593 .dfl = __invalidate_dcache_all_noirq_wt,
594 .dflr = __invalidate_dcache_range_nomsr_wt,
595 .din = __invalidate_dcache_all_noirq_wt,
596 .dinr = __invalidate_dcache_range_nomsr_wt,
599 const struct scache wt_nomsr_noirq = {
600 .ie = __enable_icache_nomsr,
601 .id = __disable_icache_nomsr,
602 .ifl = __flush_icache_all_noirq,
603 .iflr = __flush_icache_range_noirq,
604 .iin = __flush_icache_all_noirq,
605 .iinr = __flush_icache_range_noirq,
606 .de = __enable_dcache_nomsr,
607 .dd = __disable_dcache_nomsr,
608 .dfl = __invalidate_dcache_all_noirq_wt,
609 .dflr = __invalidate_dcache_range_nomsr_wt,
610 .din = __invalidate_dcache_all_noirq_wt,
611 .dinr = __invalidate_dcache_range_nomsr_wt,
614 /* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */
615 #define CPUVER_7_20_A 0x0c
616 #define CPUVER_7_20_D 0x0f
618 #define INFO(s) printk(KERN_INFO "cache: " s "\n");
620 void microblaze_cache_init(void)
622 if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) {
623 if (cpuinfo.dcache_wb) {
625 mbc = (struct scache *)&wb_msr;
626 if (cpuinfo.ver_code < CPUVER_7_20_D) {
627 /* MS: problem with signal handling - hw bug */
628 INFO("WB won't work properly");
631 if (cpuinfo.ver_code >= CPUVER_7_20_A) {
632 INFO("wt_msr_noirq");
633 mbc = (struct scache *)&wt_msr_noirq;
636 mbc = (struct scache *)&wt_msr;
640 if (cpuinfo.dcache_wb) {
642 mbc = (struct scache *)&wb_nomsr;
643 if (cpuinfo.ver_code < CPUVER_7_20_D) {
644 /* MS: problem with signal handling - hw bug */
645 INFO("WB won't work properly");
648 if (cpuinfo.ver_code >= CPUVER_7_20_A) {
649 INFO("wt_nomsr_noirq");
650 mbc = (struct scache *)&wt_nomsr_noirq;
653 mbc = (struct scache *)&wt_nomsr;
657 /* FIXME Invalidation is done in U-BOOT
658 * WT cache: Data is already written to main memory
659 * WB cache: Discard data on noMMU which caused that kernel doesn't boot
661 /* invalidate_dcache(); */