Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
[pandora-kernel.git] / arch / microblaze / kernel / cpu / cache.c
1 /*
2  * Cache control for MicroBlaze cache memories
3  *
4  * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
5  * Copyright (C) 2007-2009 PetaLogix
6  * Copyright (C) 2007-2009 John Williams <john.williams@petalogix.com>
7  *
8  * This file is subject to the terms and conditions of the GNU General
9  * Public License. See the file COPYING in the main directory of this
10  * archive for more details.
11  */
12
13 #include <asm/cacheflush.h>
14 #include <linux/cache.h>
15 #include <asm/cpuinfo.h>
16 #include <asm/pvr.h>
17
18 static inline void __invalidate_flush_icache(unsigned int addr)
19 {
20         __asm__ __volatile__ ("wic      %0, r0;"        \
21                                         : : "r" (addr));
22 }
23
24 static inline void __flush_dcache(unsigned int addr)
25 {
26         __asm__ __volatile__ ("wdc.flush        %0, r0;"        \
27                                         : : "r" (addr));
28 }
29
30 static inline void __invalidate_dcache(unsigned int baseaddr,
31                                                 unsigned int offset)
32 {
33         __asm__ __volatile__ ("wdc.clear        %0, %1;"        \
34                                         : : "r" (baseaddr), "r" (offset));
35 }
36
37 static inline void __enable_icache_msr(void)
38 {
39         __asm__ __volatile__ (" msrset  r0, %0;         \
40                                 nop; "                  \
41                         : : "i" (MSR_ICE) : "memory");
42 }
43
44 static inline void __disable_icache_msr(void)
45 {
46         __asm__ __volatile__ (" msrclr  r0, %0;         \
47                                 nop; "                  \
48                         : : "i" (MSR_ICE) : "memory");
49 }
50
51 static inline void __enable_dcache_msr(void)
52 {
53         __asm__ __volatile__ (" msrset  r0, %0;         \
54                                 nop; "                  \
55                                 :                       \
56                                 : "i" (MSR_DCE)         \
57                                 : "memory");
58 }
59
60 static inline void __disable_dcache_msr(void)
61 {
62         __asm__ __volatile__ (" msrclr  r0, %0;         \
63                                 nop; "                  \
64                                 :                       \
65                                 : "i" (MSR_DCE)         \
66                                 : "memory");
67 }
68
69 static inline void __enable_icache_nomsr(void)
70 {
71         __asm__ __volatile__ (" mfs     r12, rmsr;      \
72                                 nop;                    \
73                                 ori     r12, r12, %0;   \
74                                 mts     rmsr, r12;      \
75                                 nop; "                  \
76                                 :                       \
77                                 : "i" (MSR_ICE)         \
78                                 : "memory", "r12");
79 }
80
81 static inline void __disable_icache_nomsr(void)
82 {
83         __asm__ __volatile__ (" mfs     r12, rmsr;      \
84                                 nop;                    \
85                                 andi    r12, r12, ~%0;  \
86                                 mts     rmsr, r12;      \
87                                 nop; "                  \
88                                 :                       \
89                                 : "i" (MSR_ICE)         \
90                                 : "memory", "r12");
91 }
92
93 static inline void __enable_dcache_nomsr(void)
94 {
95         __asm__ __volatile__ (" mfs     r12, rmsr;      \
96                                 nop;                    \
97                                 ori     r12, r12, %0;   \
98                                 mts     rmsr, r12;      \
99                                 nop; "                  \
100                                 :                       \
101                                 : "i" (MSR_DCE)         \
102                                 : "memory", "r12");
103 }
104
105 static inline void __disable_dcache_nomsr(void)
106 {
107         __asm__ __volatile__ (" mfs     r12, rmsr;      \
108                                 nop;                    \
109                                 andi    r12, r12, ~%0;  \
110                                 mts     rmsr, r12;      \
111                                 nop; "                  \
112                                 :                       \
113                                 : "i" (MSR_DCE)         \
114                                 : "memory", "r12");
115 }
116
117
118 /* Helper macro for computing the limits of cache range loops */
119 #define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size)    \
120 do {                                                                    \
121         int align = ~(cache_line_length - 1);                           \
122         end = min(start + cache_size, end);                             \
123         start &= align;                                                 \
124         end = ((end & align) + cache_line_length);                      \
125 } while (0);
126
127 /*
128  * Helper macro to loop over the specified cache_size/line_length and
129  * execute 'op' on that cacheline
130  */
131 #define CACHE_ALL_LOOP(cache_size, line_length, op)                     \
132 do {                                                                    \
133         unsigned int len = cache_size;                                  \
134         int step = -line_length;                                        \
135         BUG_ON(step >= 0);                                              \
136                                                                         \
137         __asm__ __volatile__ (" 1:      " #op " %0, r0;                 \
138                                         bgtid   %0, 1b;                 \
139                                         addk    %0, %0, %1;             \
140                                         " : : "r" (len), "r" (step)     \
141                                         : "memory");                    \
142 } while (0);
143
144
145 #define CACHE_ALL_LOOP2(cache_size, line_length, op)                    \
146 do {                                                                    \
147         unsigned int len = cache_size;                                  \
148         int step = -line_length;                                        \
149         BUG_ON(step >= 0);                                              \
150                                                                         \
151         __asm__ __volatile__ (" 1:      " #op " r0, %0;                 \
152                                         bgtid   %0, 1b;                 \
153                                         addk    %0, %0, %1;             \
154                                         " : : "r" (len), "r" (step)     \
155                                         : "memory");                    \
156 } while (0);
157
158 /* for wdc.flush/clear */
159 #define CACHE_RANGE_LOOP_2(start, end, line_length, op)                 \
160 do {                                                                    \
161         int step = -line_length;                                        \
162         int count = end - start;                                        \
163         BUG_ON(count <= 0);                                             \
164                                                                         \
165         __asm__ __volatile__ (" 1:      " #op " %0, %1;                 \
166                                         bgtid   %1, 1b;                 \
167                                         addk    %1, %1, %2;             \
168                                         " : : "r" (start), "r" (count), \
169                                         "r" (step) : "memory");         \
170 } while (0);
171
172 /* It is used only first parameter for OP - for wic, wdc */
173 #define CACHE_RANGE_LOOP_1(start, end, line_length, op)                 \
174 do {                                                                    \
175         int step = -line_length;                                        \
176         int count = end - start;                                        \
177         BUG_ON(count <= 0);                                             \
178                                                                         \
179         __asm__ __volatile__ (" 1:      addk    %0, %0, %1;             \
180                                         " #op " %0, r0;                 \
181                                         bgtid   %1, 1b;                 \
182                                         addk    %1, %1, %2;             \
183                                         " : : "r" (start), "r" (count), \
184                                         "r" (step) : "memory");         \
185 } while (0);
186
187 static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
188 {
189         unsigned long flags;
190
191         pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
192                                 (unsigned int)start, (unsigned int) end);
193
194         CACHE_LOOP_LIMITS(start, end,
195                         cpuinfo.icache_line_length, cpuinfo.icache_size);
196
197         local_irq_save(flags);
198         __disable_icache_msr();
199
200         CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
201
202         __enable_icache_msr();
203         local_irq_restore(flags);
204 }
205
206 static void __flush_icache_range_nomsr_irq(unsigned long start,
207                                 unsigned long end)
208 {
209         unsigned long flags;
210
211         pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
212                                 (unsigned int)start, (unsigned int) end);
213
214         CACHE_LOOP_LIMITS(start, end,
215                         cpuinfo.icache_line_length, cpuinfo.icache_size);
216
217         local_irq_save(flags);
218         __disable_icache_nomsr();
219
220         CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
221
222         __enable_icache_nomsr();
223         local_irq_restore(flags);
224 }
225
226 static void __flush_icache_range_noirq(unsigned long start,
227                                 unsigned long end)
228 {
229         pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
230                                 (unsigned int)start, (unsigned int) end);
231
232         CACHE_LOOP_LIMITS(start, end,
233                         cpuinfo.icache_line_length, cpuinfo.icache_size);
234         CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
235 }
236
237 static void __flush_icache_all_msr_irq(void)
238 {
239         unsigned long flags;
240
241         pr_debug("%s\n", __func__);
242
243         local_irq_save(flags);
244         __disable_icache_msr();
245
246         CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
247
248         __enable_icache_msr();
249         local_irq_restore(flags);
250 }
251
252 static void __flush_icache_all_nomsr_irq(void)
253 {
254         unsigned long flags;
255
256         pr_debug("%s\n", __func__);
257
258         local_irq_save(flags);
259         __disable_icache_nomsr();
260
261         CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
262
263         __enable_icache_nomsr();
264         local_irq_restore(flags);
265 }
266
267 static void __flush_icache_all_noirq(void)
268 {
269         pr_debug("%s\n", __func__);
270         CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
271 }
272
273 static void __invalidate_dcache_all_msr_irq(void)
274 {
275         unsigned long flags;
276
277         pr_debug("%s\n", __func__);
278
279         local_irq_save(flags);
280         __disable_dcache_msr();
281
282         CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
283
284         __enable_dcache_msr();
285         local_irq_restore(flags);
286 }
287
288 static void __invalidate_dcache_all_nomsr_irq(void)
289 {
290         unsigned long flags;
291
292         pr_debug("%s\n", __func__);
293
294         local_irq_save(flags);
295         __disable_dcache_nomsr();
296
297         CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
298
299         __enable_dcache_nomsr();
300         local_irq_restore(flags);
301 }
302
303 static void __invalidate_dcache_all_noirq_wt(void)
304 {
305         pr_debug("%s\n", __func__);
306         CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc)
307 }
308
309 /* FIXME this is weird - should be only wdc but not work
310  * MS: I am getting bus errors and other weird things */
311 static void __invalidate_dcache_all_wb(void)
312 {
313         pr_debug("%s\n", __func__);
314         CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
315                                         wdc.clear)
316
317 #if 0
318         unsigned int i;
319
320         pr_debug("%s\n", __func__);
321
322         /* Just loop through cache size and invalidate it */
323         for (i = 0; i < cpuinfo.dcache_size; i += cpuinfo.dcache_line_length)
324                         __invalidate_dcache(0, i);
325 #endif
326 }
327
328 static void __invalidate_dcache_range_wb(unsigned long start,
329                                                 unsigned long end)
330 {
331         pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
332                                 (unsigned int)start, (unsigned int) end);
333
334         CACHE_LOOP_LIMITS(start, end,
335                         cpuinfo.dcache_line_length, cpuinfo.dcache_size);
336         CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
337 }
338
339 static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
340                                                         unsigned long end)
341 {
342         pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
343                                 (unsigned int)start, (unsigned int) end);
344         CACHE_LOOP_LIMITS(start, end,
345                         cpuinfo.dcache_line_length, cpuinfo.dcache_size);
346
347         CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
348 }
349
350 static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
351                                                         unsigned long end)
352 {
353         unsigned long flags;
354
355         pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
356                                 (unsigned int)start, (unsigned int) end);
357         CACHE_LOOP_LIMITS(start, end,
358                         cpuinfo.dcache_line_length, cpuinfo.dcache_size);
359
360         local_irq_save(flags);
361         __disable_dcache_msr();
362
363         CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
364
365         __enable_dcache_msr();
366         local_irq_restore(flags);
367 }
368
369 static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
370                                                         unsigned long end)
371 {
372         unsigned long flags;
373
374         pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
375                                 (unsigned int)start, (unsigned int) end);
376
377         CACHE_LOOP_LIMITS(start, end,
378                         cpuinfo.dcache_line_length, cpuinfo.dcache_size);
379
380         local_irq_save(flags);
381         __disable_dcache_nomsr();
382
383         CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
384
385         __enable_dcache_nomsr();
386         local_irq_restore(flags);
387 }
388
389 static void __flush_dcache_all_wb(void)
390 {
391         pr_debug("%s\n", __func__);
392         CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
393                                 wdc.flush);
394 }
395
396 static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
397 {
398         pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
399                                 (unsigned int)start, (unsigned int) end);
400
401         CACHE_LOOP_LIMITS(start, end,
402                         cpuinfo.dcache_line_length, cpuinfo.dcache_size);
403         CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
404 }
405
406 /* struct for wb caches and for wt caches */
407 struct scache *mbc;
408
409 /* new wb cache model */
410 const struct scache wb_msr = {
411         .ie = __enable_icache_msr,
412         .id = __disable_icache_msr,
413         .ifl = __flush_icache_all_noirq,
414         .iflr = __flush_icache_range_noirq,
415         .iin = __flush_icache_all_noirq,
416         .iinr = __flush_icache_range_noirq,
417         .de = __enable_dcache_msr,
418         .dd = __disable_dcache_msr,
419         .dfl = __flush_dcache_all_wb,
420         .dflr = __flush_dcache_range_wb,
421         .din = __invalidate_dcache_all_wb,
422         .dinr = __invalidate_dcache_range_wb,
423 };
424
425 /* There is only difference in ie, id, de, dd functions */
426 const struct scache wb_nomsr = {
427         .ie = __enable_icache_nomsr,
428         .id = __disable_icache_nomsr,
429         .ifl = __flush_icache_all_noirq,
430         .iflr = __flush_icache_range_noirq,
431         .iin = __flush_icache_all_noirq,
432         .iinr = __flush_icache_range_noirq,
433         .de = __enable_dcache_nomsr,
434         .dd = __disable_dcache_nomsr,
435         .dfl = __flush_dcache_all_wb,
436         .dflr = __flush_dcache_range_wb,
437         .din = __invalidate_dcache_all_wb,
438         .dinr = __invalidate_dcache_range_wb,
439 };
440
441 /* Old wt cache model with disabling irq and turn off cache */
442 const struct scache wt_msr = {
443         .ie = __enable_icache_msr,
444         .id = __disable_icache_msr,
445         .ifl = __flush_icache_all_msr_irq,
446         .iflr = __flush_icache_range_msr_irq,
447         .iin = __flush_icache_all_msr_irq,
448         .iinr = __flush_icache_range_msr_irq,
449         .de = __enable_dcache_msr,
450         .dd = __disable_dcache_msr,
451         .dfl = __invalidate_dcache_all_msr_irq,
452         .dflr = __invalidate_dcache_range_msr_irq_wt,
453         .din = __invalidate_dcache_all_msr_irq,
454         .dinr = __invalidate_dcache_range_msr_irq_wt,
455 };
456
457 const struct scache wt_nomsr = {
458         .ie = __enable_icache_nomsr,
459         .id = __disable_icache_nomsr,
460         .ifl = __flush_icache_all_nomsr_irq,
461         .iflr = __flush_icache_range_nomsr_irq,
462         .iin = __flush_icache_all_nomsr_irq,
463         .iinr = __flush_icache_range_nomsr_irq,
464         .de = __enable_dcache_nomsr,
465         .dd = __disable_dcache_nomsr,
466         .dfl = __invalidate_dcache_all_nomsr_irq,
467         .dflr = __invalidate_dcache_range_nomsr_irq,
468         .din = __invalidate_dcache_all_nomsr_irq,
469         .dinr = __invalidate_dcache_range_nomsr_irq,
470 };
471
472 /* New wt cache model for newer Microblaze versions */
473 const struct scache wt_msr_noirq = {
474         .ie = __enable_icache_msr,
475         .id = __disable_icache_msr,
476         .ifl = __flush_icache_all_noirq,
477         .iflr = __flush_icache_range_noirq,
478         .iin = __flush_icache_all_noirq,
479         .iinr = __flush_icache_range_noirq,
480         .de = __enable_dcache_msr,
481         .dd = __disable_dcache_msr,
482         .dfl = __invalidate_dcache_all_noirq_wt,
483         .dflr = __invalidate_dcache_range_nomsr_wt,
484         .din = __invalidate_dcache_all_noirq_wt,
485         .dinr = __invalidate_dcache_range_nomsr_wt,
486 };
487
488 const struct scache wt_nomsr_noirq = {
489         .ie = __enable_icache_nomsr,
490         .id = __disable_icache_nomsr,
491         .ifl = __flush_icache_all_noirq,
492         .iflr = __flush_icache_range_noirq,
493         .iin = __flush_icache_all_noirq,
494         .iinr = __flush_icache_range_noirq,
495         .de = __enable_dcache_nomsr,
496         .dd = __disable_dcache_nomsr,
497         .dfl = __invalidate_dcache_all_noirq_wt,
498         .dflr = __invalidate_dcache_range_nomsr_wt,
499         .din = __invalidate_dcache_all_noirq_wt,
500         .dinr = __invalidate_dcache_range_nomsr_wt,
501 };
502
503 /* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */
504 #define CPUVER_7_20_A   0x0c
505 #define CPUVER_7_20_D   0x0f
506
507 #define INFO(s) printk(KERN_INFO "cache: " s " \n");
508
509 void microblaze_cache_init(void)
510 {
511         if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) {
512                 if (cpuinfo.dcache_wb) {
513                         INFO("wb_msr");
514                         mbc = (struct scache *)&wb_msr;
515                         if (cpuinfo.ver_code < CPUVER_7_20_D) {
516                                 /* MS: problem with signal handling - hw bug */
517                                 INFO("WB won't work properly");
518                         }
519                 } else {
520                         if (cpuinfo.ver_code >= CPUVER_7_20_A) {
521                                 INFO("wt_msr_noirq");
522                                 mbc = (struct scache *)&wt_msr_noirq;
523                         } else {
524                                 INFO("wt_msr");
525                                 mbc = (struct scache *)&wt_msr;
526                         }
527                 }
528         } else {
529                 if (cpuinfo.dcache_wb) {
530                         INFO("wb_nomsr");
531                         mbc = (struct scache *)&wb_nomsr;
532                         if (cpuinfo.ver_code < CPUVER_7_20_D) {
533                                 /* MS: problem with signal handling - hw bug */
534                                 INFO("WB won't work properly");
535                         }
536                 } else {
537                         if (cpuinfo.ver_code >= CPUVER_7_20_A) {
538                                 INFO("wt_nomsr_noirq");
539                                 mbc = (struct scache *)&wt_nomsr_noirq;
540                         } else {
541                                 INFO("wt_nomsr");
542                                 mbc = (struct scache *)&wt_nomsr;
543                         }
544                 }
545         }
546 }