Merge git://git.kernel.org/pub/scm/linux/kernel/git/hirofumi/fatfs-2.6
[pandora-kernel.git] / arch / microblaze / kernel / cpu / cache.c
1 /*
2  * Cache control for MicroBlaze cache memories
3  *
4  * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
5  * Copyright (C) 2007-2009 PetaLogix
6  * Copyright (C) 2007-2009 John Williams <john.williams@petalogix.com>
7  *
8  * This file is subject to the terms and conditions of the GNU General
9  * Public License. See the file COPYING in the main directory of this
10  * archive for more details.
11  */
12
13 #include <asm/cacheflush.h>
14 #include <linux/cache.h>
15 #include <asm/cpuinfo.h>
16 #include <asm/pvr.h>
17
18 static inline void __invalidate_flush_icache(unsigned int addr)
19 {
20         __asm__ __volatile__ ("wic      %0, r0;"        \
21                                         : : "r" (addr));
22 }
23
24 static inline void __flush_dcache(unsigned int addr)
25 {
26         __asm__ __volatile__ ("wdc.flush        %0, r0;"        \
27                                         : : "r" (addr));
28 }
29
30 static inline void __invalidate_dcache(unsigned int baseaddr,
31                                                 unsigned int offset)
32 {
33         __asm__ __volatile__ ("wdc.clear        %0, %1;"        \
34                                         : : "r" (baseaddr), "r" (offset));
35 }
36
37 static inline void __enable_icache_msr(void)
38 {
39         __asm__ __volatile__ (" msrset  r0, %0;         \
40                                 nop; "                  \
41                         : : "i" (MSR_ICE) : "memory");
42 }
43
44 static inline void __disable_icache_msr(void)
45 {
46         __asm__ __volatile__ (" msrclr  r0, %0;         \
47                                 nop; "                  \
48                         : : "i" (MSR_ICE) : "memory");
49 }
50
51 static inline void __enable_dcache_msr(void)
52 {
53         __asm__ __volatile__ (" msrset  r0, %0;         \
54                                 nop; "                  \
55                                 :                       \
56                                 : "i" (MSR_DCE)         \
57                                 : "memory");
58 }
59
60 static inline void __disable_dcache_msr(void)
61 {
62         __asm__ __volatile__ (" msrclr  r0, %0;         \
63                                 nop; "                  \
64                                 :                       \
65                                 : "i" (MSR_DCE)         \
66                                 : "memory");
67 }
68
69 static inline void __enable_icache_nomsr(void)
70 {
71         __asm__ __volatile__ (" mfs     r12, rmsr;      \
72                                 nop;                    \
73                                 ori     r12, r12, %0;   \
74                                 mts     rmsr, r12;      \
75                                 nop; "                  \
76                                 :                       \
77                                 : "i" (MSR_ICE)         \
78                                 : "memory", "r12");
79 }
80
81 static inline void __disable_icache_nomsr(void)
82 {
83         __asm__ __volatile__ (" mfs     r12, rmsr;      \
84                                 nop;                    \
85                                 andi    r12, r12, ~%0;  \
86                                 mts     rmsr, r12;      \
87                                 nop; "                  \
88                                 :                       \
89                                 : "i" (MSR_ICE)         \
90                                 : "memory", "r12");
91 }
92
93 static inline void __enable_dcache_nomsr(void)
94 {
95         __asm__ __volatile__ (" mfs     r12, rmsr;      \
96                                 nop;                    \
97                                 ori     r12, r12, %0;   \
98                                 mts     rmsr, r12;      \
99                                 nop; "                  \
100                                 :                       \
101                                 : "i" (MSR_DCE)         \
102                                 : "memory", "r12");
103 }
104
105 static inline void __disable_dcache_nomsr(void)
106 {
107         __asm__ __volatile__ (" mfs     r12, rmsr;      \
108                                 nop;                    \
109                                 andi    r12, r12, ~%0;  \
110                                 mts     rmsr, r12;      \
111                                 nop; "                  \
112                                 :                       \
113                                 : "i" (MSR_DCE)         \
114                                 : "memory", "r12");
115 }
116
117
118 /* Helper macro for computing the limits of cache range loops */
119 #define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size)    \
120 do {                                                                    \
121         int align = ~(cache_line_length - 1);                           \
122         end = min(start + cache_size, end);                             \
123         start &= align;                                                 \
124         end = ((end & align) + cache_line_length);                      \
125 } while (0);
126
127 /*
128  * Helper macro to loop over the specified cache_size/line_length and
129  * execute 'op' on that cacheline
130  */
131 #define CACHE_ALL_LOOP(cache_size, line_length, op)                     \
132 do {                                                                    \
133         unsigned int len = cache_size;                                  \
134         int step = -line_length;                                        \
135         BUG_ON(step >= 0);                                              \
136                                                                         \
137         __asm__ __volatile__ (" 1:      " #op " %0, r0;                 \
138                                         bgtid   %0, 1b;                 \
139                                         addk    %0, %0, %1;             \
140                                         " : : "r" (len), "r" (step)     \
141                                         : "memory");                    \
142 } while (0);
143
144
145 #define CACHE_ALL_LOOP2(cache_size, line_length, op)                    \
146 do {                                                                    \
147         unsigned int len = cache_size;                                  \
148         int step = -line_length;                                        \
149         BUG_ON(step >= 0);                                              \
150                                                                         \
151         __asm__ __volatile__ (" 1:      " #op " r0, %0;                 \
152                                         bgtid   %0, 1b;                 \
153                                         addk    %0, %0, %1;             \
154                                         " : : "r" (len), "r" (step)     \
155                                         : "memory");                    \
156 } while (0);
157
158 /* for wdc.flush/clear */
159 #define CACHE_RANGE_LOOP_2(start, end, line_length, op)                 \
160 do {                                                                    \
161         int step = -line_length;                                        \
162         int count = end - start;                                        \
163         BUG_ON(count <= 0);                                             \
164                                                                         \
165         __asm__ __volatile__ (" 1:      " #op " %0, %1;                 \
166                                         bgtid   %1, 1b;                 \
167                                         addk    %1, %1, %2;             \
168                                         " : : "r" (start), "r" (count), \
169                                         "r" (step) : "memory");         \
170 } while (0);
171
172 /* It is used only first parameter for OP - for wic, wdc */
173 #define CACHE_RANGE_LOOP_1(start, end, line_length, op)                 \
174 do {                                                                    \
175         int volatile temp;                                              \
176         BUG_ON(end - start <= 0);                                       \
177                                                                         \
178         __asm__ __volatile__ (" 1:      " #op " %1, r0;                 \
179                                         cmpu    %0, %1, %2;             \
180                                         bgtid   %0, 1b;                 \
181                                         addk    %1, %1, %3;             \
182                                 " : : "r" (temp), "r" (start), "r" (end),\
183                                         "r" (line_length) : "memory");  \
184 } while (0);
185
186 static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
187 {
188         unsigned long flags;
189
190         pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
191                                 (unsigned int)start, (unsigned int) end);
192
193         CACHE_LOOP_LIMITS(start, end,
194                         cpuinfo.icache_line_length, cpuinfo.icache_size);
195
196         local_irq_save(flags);
197         __disable_icache_msr();
198
199         CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
200
201         __enable_icache_msr();
202         local_irq_restore(flags);
203 }
204
205 static void __flush_icache_range_nomsr_irq(unsigned long start,
206                                 unsigned long end)
207 {
208         unsigned long flags;
209
210         pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
211                                 (unsigned int)start, (unsigned int) end);
212
213         CACHE_LOOP_LIMITS(start, end,
214                         cpuinfo.icache_line_length, cpuinfo.icache_size);
215
216         local_irq_save(flags);
217         __disable_icache_nomsr();
218
219         CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
220
221         __enable_icache_nomsr();
222         local_irq_restore(flags);
223 }
224
225 static void __flush_icache_range_noirq(unsigned long start,
226                                 unsigned long end)
227 {
228         pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
229                                 (unsigned int)start, (unsigned int) end);
230
231         CACHE_LOOP_LIMITS(start, end,
232                         cpuinfo.icache_line_length, cpuinfo.icache_size);
233         CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
234 }
235
236 static void __flush_icache_all_msr_irq(void)
237 {
238         unsigned long flags;
239
240         pr_debug("%s\n", __func__);
241
242         local_irq_save(flags);
243         __disable_icache_msr();
244
245         CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
246
247         __enable_icache_msr();
248         local_irq_restore(flags);
249 }
250
251 static void __flush_icache_all_nomsr_irq(void)
252 {
253         unsigned long flags;
254
255         pr_debug("%s\n", __func__);
256
257         local_irq_save(flags);
258         __disable_icache_nomsr();
259
260         CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
261
262         __enable_icache_nomsr();
263         local_irq_restore(flags);
264 }
265
266 static void __flush_icache_all_noirq(void)
267 {
268         pr_debug("%s\n", __func__);
269         CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
270 }
271
272 static void __invalidate_dcache_all_msr_irq(void)
273 {
274         unsigned long flags;
275
276         pr_debug("%s\n", __func__);
277
278         local_irq_save(flags);
279         __disable_dcache_msr();
280
281         CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
282
283         __enable_dcache_msr();
284         local_irq_restore(flags);
285 }
286
287 static void __invalidate_dcache_all_nomsr_irq(void)
288 {
289         unsigned long flags;
290
291         pr_debug("%s\n", __func__);
292
293         local_irq_save(flags);
294         __disable_dcache_nomsr();
295
296         CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
297
298         __enable_dcache_nomsr();
299         local_irq_restore(flags);
300 }
301
302 static void __invalidate_dcache_all_noirq_wt(void)
303 {
304         pr_debug("%s\n", __func__);
305         CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc)
306 }
307
308 /* FIXME this is weird - should be only wdc but not work
309  * MS: I am getting bus errors and other weird things */
310 static void __invalidate_dcache_all_wb(void)
311 {
312         pr_debug("%s\n", __func__);
313         CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
314                                         wdc.clear)
315 }
316
317 static void __invalidate_dcache_range_wb(unsigned long start,
318                                                 unsigned long end)
319 {
320         pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
321                                 (unsigned int)start, (unsigned int) end);
322
323         CACHE_LOOP_LIMITS(start, end,
324                         cpuinfo.dcache_line_length, cpuinfo.dcache_size);
325         CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
326 }
327
328 static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
329                                                         unsigned long end)
330 {
331         pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
332                                 (unsigned int)start, (unsigned int) end);
333         CACHE_LOOP_LIMITS(start, end,
334                         cpuinfo.dcache_line_length, cpuinfo.dcache_size);
335
336         CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
337 }
338
339 static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
340                                                         unsigned long end)
341 {
342         unsigned long flags;
343
344         pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
345                                 (unsigned int)start, (unsigned int) end);
346         CACHE_LOOP_LIMITS(start, end,
347                         cpuinfo.dcache_line_length, cpuinfo.dcache_size);
348
349         local_irq_save(flags);
350         __disable_dcache_msr();
351
352         CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
353
354         __enable_dcache_msr();
355         local_irq_restore(flags);
356 }
357
358 static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
359                                                         unsigned long end)
360 {
361         unsigned long flags;
362
363         pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
364                                 (unsigned int)start, (unsigned int) end);
365
366         CACHE_LOOP_LIMITS(start, end,
367                         cpuinfo.dcache_line_length, cpuinfo.dcache_size);
368
369         local_irq_save(flags);
370         __disable_dcache_nomsr();
371
372         CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
373
374         __enable_dcache_nomsr();
375         local_irq_restore(flags);
376 }
377
378 static void __flush_dcache_all_wb(void)
379 {
380         pr_debug("%s\n", __func__);
381         CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
382                                 wdc.flush);
383 }
384
385 static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
386 {
387         pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
388                                 (unsigned int)start, (unsigned int) end);
389
390         CACHE_LOOP_LIMITS(start, end,
391                         cpuinfo.dcache_line_length, cpuinfo.dcache_size);
392         CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
393 }
394
395 /* struct for wb caches and for wt caches */
396 struct scache *mbc;
397
398 /* new wb cache model */
399 const struct scache wb_msr = {
400         .ie = __enable_icache_msr,
401         .id = __disable_icache_msr,
402         .ifl = __flush_icache_all_noirq,
403         .iflr = __flush_icache_range_noirq,
404         .iin = __flush_icache_all_noirq,
405         .iinr = __flush_icache_range_noirq,
406         .de = __enable_dcache_msr,
407         .dd = __disable_dcache_msr,
408         .dfl = __flush_dcache_all_wb,
409         .dflr = __flush_dcache_range_wb,
410         .din = __invalidate_dcache_all_wb,
411         .dinr = __invalidate_dcache_range_wb,
412 };
413
414 /* There is only difference in ie, id, de, dd functions */
415 const struct scache wb_nomsr = {
416         .ie = __enable_icache_nomsr,
417         .id = __disable_icache_nomsr,
418         .ifl = __flush_icache_all_noirq,
419         .iflr = __flush_icache_range_noirq,
420         .iin = __flush_icache_all_noirq,
421         .iinr = __flush_icache_range_noirq,
422         .de = __enable_dcache_nomsr,
423         .dd = __disable_dcache_nomsr,
424         .dfl = __flush_dcache_all_wb,
425         .dflr = __flush_dcache_range_wb,
426         .din = __invalidate_dcache_all_wb,
427         .dinr = __invalidate_dcache_range_wb,
428 };
429
430 /* Old wt cache model with disabling irq and turn off cache */
431 const struct scache wt_msr = {
432         .ie = __enable_icache_msr,
433         .id = __disable_icache_msr,
434         .ifl = __flush_icache_all_msr_irq,
435         .iflr = __flush_icache_range_msr_irq,
436         .iin = __flush_icache_all_msr_irq,
437         .iinr = __flush_icache_range_msr_irq,
438         .de = __enable_dcache_msr,
439         .dd = __disable_dcache_msr,
440         .dfl = __invalidate_dcache_all_msr_irq,
441         .dflr = __invalidate_dcache_range_msr_irq_wt,
442         .din = __invalidate_dcache_all_msr_irq,
443         .dinr = __invalidate_dcache_range_msr_irq_wt,
444 };
445
446 const struct scache wt_nomsr = {
447         .ie = __enable_icache_nomsr,
448         .id = __disable_icache_nomsr,
449         .ifl = __flush_icache_all_nomsr_irq,
450         .iflr = __flush_icache_range_nomsr_irq,
451         .iin = __flush_icache_all_nomsr_irq,
452         .iinr = __flush_icache_range_nomsr_irq,
453         .de = __enable_dcache_nomsr,
454         .dd = __disable_dcache_nomsr,
455         .dfl = __invalidate_dcache_all_nomsr_irq,
456         .dflr = __invalidate_dcache_range_nomsr_irq,
457         .din = __invalidate_dcache_all_nomsr_irq,
458         .dinr = __invalidate_dcache_range_nomsr_irq,
459 };
460
461 /* New wt cache model for newer Microblaze versions */
462 const struct scache wt_msr_noirq = {
463         .ie = __enable_icache_msr,
464         .id = __disable_icache_msr,
465         .ifl = __flush_icache_all_noirq,
466         .iflr = __flush_icache_range_noirq,
467         .iin = __flush_icache_all_noirq,
468         .iinr = __flush_icache_range_noirq,
469         .de = __enable_dcache_msr,
470         .dd = __disable_dcache_msr,
471         .dfl = __invalidate_dcache_all_noirq_wt,
472         .dflr = __invalidate_dcache_range_nomsr_wt,
473         .din = __invalidate_dcache_all_noirq_wt,
474         .dinr = __invalidate_dcache_range_nomsr_wt,
475 };
476
477 const struct scache wt_nomsr_noirq = {
478         .ie = __enable_icache_nomsr,
479         .id = __disable_icache_nomsr,
480         .ifl = __flush_icache_all_noirq,
481         .iflr = __flush_icache_range_noirq,
482         .iin = __flush_icache_all_noirq,
483         .iinr = __flush_icache_range_noirq,
484         .de = __enable_dcache_nomsr,
485         .dd = __disable_dcache_nomsr,
486         .dfl = __invalidate_dcache_all_noirq_wt,
487         .dflr = __invalidate_dcache_range_nomsr_wt,
488         .din = __invalidate_dcache_all_noirq_wt,
489         .dinr = __invalidate_dcache_range_nomsr_wt,
490 };
491
492 /* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */
493 #define CPUVER_7_20_A   0x0c
494 #define CPUVER_7_20_D   0x0f
495
496 #define INFO(s) printk(KERN_INFO "cache: " s " \n");
497
498 void microblaze_cache_init(void)
499 {
500         if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) {
501                 if (cpuinfo.dcache_wb) {
502                         INFO("wb_msr");
503                         mbc = (struct scache *)&wb_msr;
504                         if (cpuinfo.ver_code < CPUVER_7_20_D) {
505                                 /* MS: problem with signal handling - hw bug */
506                                 INFO("WB won't work properly");
507                         }
508                 } else {
509                         if (cpuinfo.ver_code >= CPUVER_7_20_A) {
510                                 INFO("wt_msr_noirq");
511                                 mbc = (struct scache *)&wt_msr_noirq;
512                         } else {
513                                 INFO("wt_msr");
514                                 mbc = (struct scache *)&wt_msr;
515                         }
516                 }
517         } else {
518                 if (cpuinfo.dcache_wb) {
519                         INFO("wb_nomsr");
520                         mbc = (struct scache *)&wb_nomsr;
521                         if (cpuinfo.ver_code < CPUVER_7_20_D) {
522                                 /* MS: problem with signal handling - hw bug */
523                                 INFO("WB won't work properly");
524                         }
525                 } else {
526                         if (cpuinfo.ver_code >= CPUVER_7_20_A) {
527                                 INFO("wt_nomsr_noirq");
528                                 mbc = (struct scache *)&wt_nomsr_noirq;
529                         } else {
530                                 INFO("wt_nomsr");
531                                 mbc = (struct scache *)&wt_nomsr;
532                         }
533                 }
534         }
535 }