rcu: remove all rcu head initializations, except on_stack initializations
[pandora-kernel.git] / arch / microblaze / kernel / cpu / cache.c
1 /*
2  * Cache control for MicroBlaze cache memories
3  *
4  * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
5  * Copyright (C) 2007-2009 PetaLogix
6  * Copyright (C) 2007-2009 John Williams <john.williams@petalogix.com>
7  *
8  * This file is subject to the terms and conditions of the GNU General
9  * Public License. See the file COPYING in the main directory of this
10  * archive for more details.
11  */
12
13 #include <asm/cacheflush.h>
14 #include <linux/cache.h>
15 #include <asm/cpuinfo.h>
16 #include <asm/pvr.h>
17
18 static inline void __enable_icache_msr(void)
19 {
20         __asm__ __volatile__ (" msrset  r0, %0;         \
21                                 nop; "                  \
22                         : : "i" (MSR_ICE) : "memory");
23 }
24
25 static inline void __disable_icache_msr(void)
26 {
27         __asm__ __volatile__ (" msrclr  r0, %0;         \
28                                 nop; "                  \
29                         : : "i" (MSR_ICE) : "memory");
30 }
31
32 static inline void __enable_dcache_msr(void)
33 {
34         __asm__ __volatile__ (" msrset  r0, %0;         \
35                                 nop; "                  \
36                                 :                       \
37                                 : "i" (MSR_DCE)         \
38                                 : "memory");
39 }
40
41 static inline void __disable_dcache_msr(void)
42 {
43         __asm__ __volatile__ (" msrclr  r0, %0;         \
44                                 nop; "                  \
45                                 :                       \
46                                 : "i" (MSR_DCE)         \
47                                 : "memory");
48 }
49
50 static inline void __enable_icache_nomsr(void)
51 {
52         __asm__ __volatile__ (" mfs     r12, rmsr;      \
53                                 nop;                    \
54                                 ori     r12, r12, %0;   \
55                                 mts     rmsr, r12;      \
56                                 nop; "                  \
57                                 :                       \
58                                 : "i" (MSR_ICE)         \
59                                 : "memory", "r12");
60 }
61
62 static inline void __disable_icache_nomsr(void)
63 {
64         __asm__ __volatile__ (" mfs     r12, rmsr;      \
65                                 nop;                    \
66                                 andi    r12, r12, ~%0;  \
67                                 mts     rmsr, r12;      \
68                                 nop; "                  \
69                                 :                       \
70                                 : "i" (MSR_ICE)         \
71                                 : "memory", "r12");
72 }
73
74 static inline void __enable_dcache_nomsr(void)
75 {
76         __asm__ __volatile__ (" mfs     r12, rmsr;      \
77                                 nop;                    \
78                                 ori     r12, r12, %0;   \
79                                 mts     rmsr, r12;      \
80                                 nop; "                  \
81                                 :                       \
82                                 : "i" (MSR_DCE)         \
83                                 : "memory", "r12");
84 }
85
86 static inline void __disable_dcache_nomsr(void)
87 {
88         __asm__ __volatile__ (" mfs     r12, rmsr;      \
89                                 nop;                    \
90                                 andi    r12, r12, ~%0;  \
91                                 mts     rmsr, r12;      \
92                                 nop; "                  \
93                                 :                       \
94                                 : "i" (MSR_DCE)         \
95                                 : "memory", "r12");
96 }
97
98
99 /* Helper macro for computing the limits of cache range loops
100  *
101  * End address can be unaligned which is OK for C implementation.
102  * ASM implementation align it in ASM macros
103  */
104 #define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size)    \
105 do {                                                                    \
106         int align = ~(cache_line_length - 1);                           \
107         end = min(start + cache_size, end);                             \
108         start &= align;                                                 \
109 } while (0);
110
111 /*
112  * Helper macro to loop over the specified cache_size/line_length and
113  * execute 'op' on that cacheline
114  */
115 #define CACHE_ALL_LOOP(cache_size, line_length, op)                     \
116 do {                                                                    \
117         unsigned int len = cache_size - line_length;                    \
118         int step = -line_length;                                        \
119         WARN_ON(step >= 0);                                             \
120                                                                         \
121         __asm__ __volatile__ (" 1:      " #op " %0, r0;                 \
122                                         bgtid   %0, 1b;                 \
123                                         addk    %0, %0, %1;             \
124                                         " : : "r" (len), "r" (step)     \
125                                         : "memory");                    \
126 } while (0);
127
128 /* Used for wdc.flush/clear which can use rB for offset which is not possible
129  * to use for simple wdc or wic.
130  *
131  * start address is cache aligned
132  * end address is not aligned, if end is aligned then I have to substract
133  * cacheline length because I can't flush/invalidate the next cacheline.
134  * If is not, I align it because I will flush/invalidate whole line.
135  */
136 #define CACHE_RANGE_LOOP_2(start, end, line_length, op)                 \
137 do {                                                                    \
138         int step = -line_length;                                        \
139         int align = ~(line_length - 1);                                 \
140         end = ((end & align) == end) ? end - line_length : end & align; \
141         int count = end - start;                                        \
142         WARN_ON(count < 0);                                             \
143                                                                         \
144         __asm__ __volatile__ (" 1:      " #op " %0, %1;                 \
145                                         bgtid   %1, 1b;                 \
146                                         addk    %1, %1, %2;             \
147                                         " : : "r" (start), "r" (count), \
148                                         "r" (step) : "memory");         \
149 } while (0);
150
151 /* It is used only first parameter for OP - for wic, wdc */
152 #define CACHE_RANGE_LOOP_1(start, end, line_length, op)                 \
153 do {                                                                    \
154         int volatile temp;                                              \
155         int align = ~(line_length - 1);                                 \
156         end = ((end & align) == end) ? end - line_length : end & align; \
157         WARN_ON(end - start < 0);                                       \
158                                                                         \
159         __asm__ __volatile__ (" 1:      " #op " %1, r0;                 \
160                                         cmpu    %0, %1, %2;             \
161                                         bgtid   %0, 1b;                 \
162                                         addk    %1, %1, %3;             \
163                                 " : : "r" (temp), "r" (start), "r" (end),\
164                                         "r" (line_length) : "memory");  \
165 } while (0);
166
167 #define ASM_LOOP
168
169 static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
170 {
171         unsigned long flags;
172 #ifndef ASM_LOOP
173         int i;
174 #endif
175         pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
176                                 (unsigned int)start, (unsigned int) end);
177
178         CACHE_LOOP_LIMITS(start, end,
179                         cpuinfo.icache_line_length, cpuinfo.icache_size);
180
181         local_irq_save(flags);
182         __disable_icache_msr();
183
184 #ifdef ASM_LOOP
185         CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
186 #else
187         for (i = start; i < end; i += cpuinfo.icache_line_length)
188                 __asm__ __volatile__ ("wic      %0, r0;"        \
189                                 : : "r" (i));
190 #endif
191         __enable_icache_msr();
192         local_irq_restore(flags);
193 }
194
195 static void __flush_icache_range_nomsr_irq(unsigned long start,
196                                 unsigned long end)
197 {
198         unsigned long flags;
199 #ifndef ASM_LOOP
200         int i;
201 #endif
202         pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
203                                 (unsigned int)start, (unsigned int) end);
204
205         CACHE_LOOP_LIMITS(start, end,
206                         cpuinfo.icache_line_length, cpuinfo.icache_size);
207
208         local_irq_save(flags);
209         __disable_icache_nomsr();
210
211 #ifdef ASM_LOOP
212         CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
213 #else
214         for (i = start; i < end; i += cpuinfo.icache_line_length)
215                 __asm__ __volatile__ ("wic      %0, r0;"        \
216                                 : : "r" (i));
217 #endif
218
219         __enable_icache_nomsr();
220         local_irq_restore(flags);
221 }
222
223 static void __flush_icache_range_noirq(unsigned long start,
224                                 unsigned long end)
225 {
226 #ifndef ASM_LOOP
227         int i;
228 #endif
229         pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
230                                 (unsigned int)start, (unsigned int) end);
231
232         CACHE_LOOP_LIMITS(start, end,
233                         cpuinfo.icache_line_length, cpuinfo.icache_size);
234 #ifdef ASM_LOOP
235         CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
236 #else
237         for (i = start; i < end; i += cpuinfo.icache_line_length)
238                 __asm__ __volatile__ ("wic      %0, r0;"        \
239                                 : : "r" (i));
240 #endif
241 }
242
243 static void __flush_icache_all_msr_irq(void)
244 {
245         unsigned long flags;
246 #ifndef ASM_LOOP
247         int i;
248 #endif
249         pr_debug("%s\n", __func__);
250
251         local_irq_save(flags);
252         __disable_icache_msr();
253 #ifdef ASM_LOOP
254         CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
255 #else
256         for (i = 0; i < cpuinfo.icache_size;
257                  i += cpuinfo.icache_line_length)
258                         __asm__ __volatile__ ("wic      %0, r0;" \
259                                         : : "r" (i));
260 #endif
261         __enable_icache_msr();
262         local_irq_restore(flags);
263 }
264
265 static void __flush_icache_all_nomsr_irq(void)
266 {
267         unsigned long flags;
268 #ifndef ASM_LOOP
269         int i;
270 #endif
271         pr_debug("%s\n", __func__);
272
273         local_irq_save(flags);
274         __disable_icache_nomsr();
275 #ifdef ASM_LOOP
276         CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
277 #else
278         for (i = 0; i < cpuinfo.icache_size;
279                  i += cpuinfo.icache_line_length)
280                         __asm__ __volatile__ ("wic      %0, r0;" \
281                                         : : "r" (i));
282 #endif
283         __enable_icache_nomsr();
284         local_irq_restore(flags);
285 }
286
287 static void __flush_icache_all_noirq(void)
288 {
289 #ifndef ASM_LOOP
290         int i;
291 #endif
292         pr_debug("%s\n", __func__);
293 #ifdef ASM_LOOP
294         CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
295 #else
296         for (i = 0; i < cpuinfo.icache_size;
297                  i += cpuinfo.icache_line_length)
298                         __asm__ __volatile__ ("wic      %0, r0;" \
299                                         : : "r" (i));
300 #endif
301 }
302
303 static void __invalidate_dcache_all_msr_irq(void)
304 {
305         unsigned long flags;
306 #ifndef ASM_LOOP
307         int i;
308 #endif
309         pr_debug("%s\n", __func__);
310
311         local_irq_save(flags);
312         __disable_dcache_msr();
313 #ifdef ASM_LOOP
314         CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
315 #else
316         for (i = 0; i < cpuinfo.dcache_size;
317                  i += cpuinfo.dcache_line_length)
318                         __asm__ __volatile__ ("wdc      %0, r0;" \
319                                         : : "r" (i));
320 #endif
321         __enable_dcache_msr();
322         local_irq_restore(flags);
323 }
324
325 static void __invalidate_dcache_all_nomsr_irq(void)
326 {
327         unsigned long flags;
328 #ifndef ASM_LOOP
329         int i;
330 #endif
331         pr_debug("%s\n", __func__);
332
333         local_irq_save(flags);
334         __disable_dcache_nomsr();
335 #ifdef ASM_LOOP
336         CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
337 #else
338         for (i = 0; i < cpuinfo.dcache_size;
339                  i += cpuinfo.dcache_line_length)
340                         __asm__ __volatile__ ("wdc      %0, r0;" \
341                                         : : "r" (i));
342 #endif
343         __enable_dcache_nomsr();
344         local_irq_restore(flags);
345 }
346
347 static void __invalidate_dcache_all_noirq_wt(void)
348 {
349 #ifndef ASM_LOOP
350         int i;
351 #endif
352         pr_debug("%s\n", __func__);
353 #ifdef ASM_LOOP
354         CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc)
355 #else
356         for (i = 0; i < cpuinfo.dcache_size;
357                  i += cpuinfo.dcache_line_length)
358                         __asm__ __volatile__ ("wdc      %0, r0;" \
359                                         : : "r" (i));
360 #endif
361 }
362
363 /* FIXME It is blindly invalidation as is expected
364  * but can't be called on noMMU in microblaze_cache_init below
365  *
366  * MS: noMMU kernel won't boot if simple wdc is used
367  * The reason should be that there are discared data which kernel needs
368  */
369 static void __invalidate_dcache_all_wb(void)
370 {
371 #ifndef ASM_LOOP
372         int i;
373 #endif
374         pr_debug("%s\n", __func__);
375 #ifdef ASM_LOOP
376         CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
377                                         wdc)
378 #else
379         for (i = 0; i < cpuinfo.dcache_size;
380                  i += cpuinfo.dcache_line_length)
381                         __asm__ __volatile__ ("wdc      %0, r0;" \
382                                         : : "r" (i));
383 #endif
384 }
385
386 static void __invalidate_dcache_range_wb(unsigned long start,
387                                                 unsigned long end)
388 {
389 #ifndef ASM_LOOP
390         int i;
391 #endif
392         pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
393                                 (unsigned int)start, (unsigned int) end);
394
395         CACHE_LOOP_LIMITS(start, end,
396                         cpuinfo.dcache_line_length, cpuinfo.dcache_size);
397 #ifdef ASM_LOOP
398         CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
399 #else
400         for (i = start; i < end; i += cpuinfo.dcache_line_length)
401                 __asm__ __volatile__ ("wdc.clear        %0, r0;"        \
402                                 : : "r" (i));
403 #endif
404 }
405
406 static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
407                                                         unsigned long end)
408 {
409 #ifndef ASM_LOOP
410         int i;
411 #endif
412         pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
413                                 (unsigned int)start, (unsigned int) end);
414         CACHE_LOOP_LIMITS(start, end,
415                         cpuinfo.dcache_line_length, cpuinfo.dcache_size);
416
417 #ifdef ASM_LOOP
418         CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
419 #else
420         for (i = start; i < end; i += cpuinfo.dcache_line_length)
421                 __asm__ __volatile__ ("wdc      %0, r0;"        \
422                                 : : "r" (i));
423 #endif
424 }
425
426 static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
427                                                         unsigned long end)
428 {
429         unsigned long flags;
430 #ifndef ASM_LOOP
431         int i;
432 #endif
433         pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
434                                 (unsigned int)start, (unsigned int) end);
435         CACHE_LOOP_LIMITS(start, end,
436                         cpuinfo.dcache_line_length, cpuinfo.dcache_size);
437
438         local_irq_save(flags);
439         __disable_dcache_msr();
440
441 #ifdef ASM_LOOP
442         CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
443 #else
444         for (i = start; i < end; i += cpuinfo.dcache_line_length)
445                 __asm__ __volatile__ ("wdc      %0, r0;"        \
446                                 : : "r" (i));
447 #endif
448
449         __enable_dcache_msr();
450         local_irq_restore(flags);
451 }
452
453 static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
454                                                         unsigned long end)
455 {
456         unsigned long flags;
457 #ifndef ASM_LOOP
458         int i;
459 #endif
460         pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
461                                 (unsigned int)start, (unsigned int) end);
462
463         CACHE_LOOP_LIMITS(start, end,
464                         cpuinfo.dcache_line_length, cpuinfo.dcache_size);
465
466         local_irq_save(flags);
467         __disable_dcache_nomsr();
468
469 #ifdef ASM_LOOP
470         CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
471 #else
472         for (i = start; i < end; i += cpuinfo.dcache_line_length)
473                 __asm__ __volatile__ ("wdc      %0, r0;"        \
474                                 : : "r" (i));
475 #endif
476
477         __enable_dcache_nomsr();
478         local_irq_restore(flags);
479 }
480
481 static void __flush_dcache_all_wb(void)
482 {
483 #ifndef ASM_LOOP
484         int i;
485 #endif
486         pr_debug("%s\n", __func__);
487 #ifdef ASM_LOOP
488         CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
489                                 wdc.flush);
490 #else
491         for (i = 0; i < cpuinfo.dcache_size;
492                  i += cpuinfo.dcache_line_length)
493                         __asm__ __volatile__ ("wdc.flush        %0, r0;" \
494                                         : : "r" (i));
495 #endif
496 }
497
498 static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
499 {
500 #ifndef ASM_LOOP
501         int i;
502 #endif
503         pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
504                                 (unsigned int)start, (unsigned int) end);
505
506         CACHE_LOOP_LIMITS(start, end,
507                         cpuinfo.dcache_line_length, cpuinfo.dcache_size);
508 #ifdef ASM_LOOP
509         CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
510 #else
511         for (i = start; i < end; i += cpuinfo.dcache_line_length)
512                 __asm__ __volatile__ ("wdc.flush        %0, r0;"        \
513                                 : : "r" (i));
514 #endif
515 }
516
517 /* struct for wb caches and for wt caches */
518 struct scache *mbc;
519
520 /* new wb cache model */
521 const struct scache wb_msr = {
522         .ie = __enable_icache_msr,
523         .id = __disable_icache_msr,
524         .ifl = __flush_icache_all_noirq,
525         .iflr = __flush_icache_range_noirq,
526         .iin = __flush_icache_all_noirq,
527         .iinr = __flush_icache_range_noirq,
528         .de = __enable_dcache_msr,
529         .dd = __disable_dcache_msr,
530         .dfl = __flush_dcache_all_wb,
531         .dflr = __flush_dcache_range_wb,
532         .din = __invalidate_dcache_all_wb,
533         .dinr = __invalidate_dcache_range_wb,
534 };
535
536 /* There is only difference in ie, id, de, dd functions */
537 const struct scache wb_nomsr = {
538         .ie = __enable_icache_nomsr,
539         .id = __disable_icache_nomsr,
540         .ifl = __flush_icache_all_noirq,
541         .iflr = __flush_icache_range_noirq,
542         .iin = __flush_icache_all_noirq,
543         .iinr = __flush_icache_range_noirq,
544         .de = __enable_dcache_nomsr,
545         .dd = __disable_dcache_nomsr,
546         .dfl = __flush_dcache_all_wb,
547         .dflr = __flush_dcache_range_wb,
548         .din = __invalidate_dcache_all_wb,
549         .dinr = __invalidate_dcache_range_wb,
550 };
551
552 /* Old wt cache model with disabling irq and turn off cache */
553 const struct scache wt_msr = {
554         .ie = __enable_icache_msr,
555         .id = __disable_icache_msr,
556         .ifl = __flush_icache_all_msr_irq,
557         .iflr = __flush_icache_range_msr_irq,
558         .iin = __flush_icache_all_msr_irq,
559         .iinr = __flush_icache_range_msr_irq,
560         .de = __enable_dcache_msr,
561         .dd = __disable_dcache_msr,
562         .dfl = __invalidate_dcache_all_msr_irq,
563         .dflr = __invalidate_dcache_range_msr_irq_wt,
564         .din = __invalidate_dcache_all_msr_irq,
565         .dinr = __invalidate_dcache_range_msr_irq_wt,
566 };
567
568 const struct scache wt_nomsr = {
569         .ie = __enable_icache_nomsr,
570         .id = __disable_icache_nomsr,
571         .ifl = __flush_icache_all_nomsr_irq,
572         .iflr = __flush_icache_range_nomsr_irq,
573         .iin = __flush_icache_all_nomsr_irq,
574         .iinr = __flush_icache_range_nomsr_irq,
575         .de = __enable_dcache_nomsr,
576         .dd = __disable_dcache_nomsr,
577         .dfl = __invalidate_dcache_all_nomsr_irq,
578         .dflr = __invalidate_dcache_range_nomsr_irq,
579         .din = __invalidate_dcache_all_nomsr_irq,
580         .dinr = __invalidate_dcache_range_nomsr_irq,
581 };
582
583 /* New wt cache model for newer Microblaze versions */
584 const struct scache wt_msr_noirq = {
585         .ie = __enable_icache_msr,
586         .id = __disable_icache_msr,
587         .ifl = __flush_icache_all_noirq,
588         .iflr = __flush_icache_range_noirq,
589         .iin = __flush_icache_all_noirq,
590         .iinr = __flush_icache_range_noirq,
591         .de = __enable_dcache_msr,
592         .dd = __disable_dcache_msr,
593         .dfl = __invalidate_dcache_all_noirq_wt,
594         .dflr = __invalidate_dcache_range_nomsr_wt,
595         .din = __invalidate_dcache_all_noirq_wt,
596         .dinr = __invalidate_dcache_range_nomsr_wt,
597 };
598
599 const struct scache wt_nomsr_noirq = {
600         .ie = __enable_icache_nomsr,
601         .id = __disable_icache_nomsr,
602         .ifl = __flush_icache_all_noirq,
603         .iflr = __flush_icache_range_noirq,
604         .iin = __flush_icache_all_noirq,
605         .iinr = __flush_icache_range_noirq,
606         .de = __enable_dcache_nomsr,
607         .dd = __disable_dcache_nomsr,
608         .dfl = __invalidate_dcache_all_noirq_wt,
609         .dflr = __invalidate_dcache_range_nomsr_wt,
610         .din = __invalidate_dcache_all_noirq_wt,
611         .dinr = __invalidate_dcache_range_nomsr_wt,
612 };
613
614 /* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */
615 #define CPUVER_7_20_A   0x0c
616 #define CPUVER_7_20_D   0x0f
617
618 #define INFO(s) printk(KERN_INFO "cache: " s "\n");
619
620 void microblaze_cache_init(void)
621 {
622         if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) {
623                 if (cpuinfo.dcache_wb) {
624                         INFO("wb_msr");
625                         mbc = (struct scache *)&wb_msr;
626                         if (cpuinfo.ver_code < CPUVER_7_20_D) {
627                                 /* MS: problem with signal handling - hw bug */
628                                 INFO("WB won't work properly");
629                         }
630                 } else {
631                         if (cpuinfo.ver_code >= CPUVER_7_20_A) {
632                                 INFO("wt_msr_noirq");
633                                 mbc = (struct scache *)&wt_msr_noirq;
634                         } else {
635                                 INFO("wt_msr");
636                                 mbc = (struct scache *)&wt_msr;
637                         }
638                 }
639         } else {
640                 if (cpuinfo.dcache_wb) {
641                         INFO("wb_nomsr");
642                         mbc = (struct scache *)&wb_nomsr;
643                         if (cpuinfo.ver_code < CPUVER_7_20_D) {
644                                 /* MS: problem with signal handling - hw bug */
645                                 INFO("WB won't work properly");
646                         }
647                 } else {
648                         if (cpuinfo.ver_code >= CPUVER_7_20_A) {
649                                 INFO("wt_nomsr_noirq");
650                                 mbc = (struct scache *)&wt_nomsr_noirq;
651                         } else {
652                                 INFO("wt_nomsr");
653                                 mbc = (struct scache *)&wt_nomsr;
654                         }
655                 }
656         }
657 /* FIXME Invalidation is done in U-BOOT
658  * WT cache: Data is already written to main memory
659  * WB cache: Discard data on noMMU which caused that kernel doesn't boot
660  */
661         /* invalidate_dcache(); */
662         enable_dcache();
663
664         invalidate_icache();
665         enable_icache();
666 }