2 * arch/metag/mm/cache.c
4 * Copyright (C) 2001, 2002, 2005, 2007, 2012 Imagination Technologies.
6 * This program is free software; you can redistribute it and/or modify it under
7 * the terms of the GNU General Public License version 2 as published by the
8 * Free Software Foundation.
13 #include <linux/export.h>
15 #include <asm/cacheflush.h>
16 #include <asm/core_reg.h>
17 #include <asm/metag_isa.h>
18 #include <asm/metag_mem.h>
19 #include <asm/metag_regs.h>
21 #define DEFAULT_CACHE_WAYS_LOG2 2
24 * Size of a set in the caches. Initialised for default 16K stride, adjusted
25 * according to values passed through TBI global heap segment via LDLK (on ATP)
26 * or config registers (on HTP/MTP)
28 static int dcache_set_shift = METAG_TBI_CACHE_SIZE_BASE_LOG2
29 - DEFAULT_CACHE_WAYS_LOG2;
30 static int icache_set_shift = METAG_TBI_CACHE_SIZE_BASE_LOG2
31 - DEFAULT_CACHE_WAYS_LOG2;
33 * The number of sets in the caches. Initialised for HTP/ATP, adjusted
34 * according to NOMMU setting in config registers
36 static unsigned char dcache_sets_log2 = DEFAULT_CACHE_WAYS_LOG2;
37 static unsigned char icache_sets_log2 = DEFAULT_CACHE_WAYS_LOG2;
40 * metag_cache_probe() - Probe L1 cache configuration.
42 * Probe the L1 cache configuration to aid the L1 physical cache flushing
45 void __init metag_cache_probe(void)
47 #ifndef CONFIG_METAG_META12
48 int coreid = metag_in32(METAC_CORE_ID);
49 int config = metag_in32(METAC_CORE_CONFIG2);
50 int cfgcache = coreid & METAC_COREID_CFGCACHE_BITS;
52 if (cfgcache == METAC_COREID_CFGCACHE_TYPE0 ||
53 cfgcache == METAC_COREID_CFGCACHE_PRIVNOMMU) {
58 /* For normal size caches, the smallest size is 4Kb.
59 For small caches, the smallest size is 64b */
60 icache_set_shift = (config & METAC_CORECFG2_ICSMALL_BIT)
62 icache_set_shift += (config & METAC_CORE_C2ICSZ_BITS)
63 >> METAC_CORE_C2ICSZ_S;
64 icache_set_shift -= icache_sets_log2;
66 dcache_set_shift = (config & METAC_CORECFG2_DCSMALL_BIT)
68 dcache_set_shift += (config & METAC_CORECFG2_DCSZ_BITS)
69 >> METAC_CORECFG2_DCSZ_S;
70 dcache_set_shift -= dcache_sets_log2;
72 /* Extract cache sizes from global heap segment */
74 int width, shift, addend;
77 seg = __TBIFindSeg(NULL, TBID_SEG(TBID_THREAD_GLOBAL,
83 /* Work out width of I-cache size bit-field */
84 u = ((unsigned long) METAG_TBI_ICACHE_SIZE_BITS)
85 >> METAG_TBI_ICACHE_SIZE_S;
91 /* Extract sign-extended size addend value */
92 shift = 32 - (METAG_TBI_ICACHE_SIZE_S + width);
93 addend = (long) ((val & METAG_TBI_ICACHE_SIZE_BITS)
95 >> (shift + METAG_TBI_ICACHE_SIZE_S);
96 /* Now calculate I-cache set size */
97 icache_set_shift = (METAG_TBI_CACHE_SIZE_BASE_LOG2
98 - DEFAULT_CACHE_WAYS_LOG2)
101 /* Similarly for D-cache */
102 u = ((unsigned long) METAG_TBI_DCACHE_SIZE_BITS)
103 >> METAG_TBI_DCACHE_SIZE_S;
109 shift = 32 - (METAG_TBI_DCACHE_SIZE_S + width);
110 addend = (long) ((val & METAG_TBI_DCACHE_SIZE_BITS)
112 >> (shift + METAG_TBI_DCACHE_SIZE_S);
113 dcache_set_shift = (METAG_TBI_CACHE_SIZE_BASE_LOG2
114 - DEFAULT_CACHE_WAYS_LOG2)
120 static void metag_phys_data_cache_flush(const void *start)
122 unsigned long flush0, flush1, flush2, flush3;
128 /* Use a sequence of writes to flush the cache region requested */
129 thread = (__core_reg_get(TXENABLE) & TXENABLE_THREAD_BITS)
130 >> TXENABLE_THREAD_S;
132 /* Cache is broken into sets which lie in contiguous RAMs */
133 set_shift = dcache_set_shift;
135 /* Move to the base of the physical cache flush region */
136 flush0 = LINSYSCFLUSH_DCACHE_LINE;
139 /* Get partition data for this thread */
140 part = metag_in32(SYSC_DCPART0 +
141 (SYSC_xCPARTn_STRIDE * thread));
144 /* Access Global vs Local partition */
145 part >>= SYSC_xCPARTG_AND_S
146 - SYSC_xCPARTL_AND_S;
148 /* Extract offset and move SetOff */
149 offset = (part & SYSC_xCPARTL_OR_BITS)
150 >> SYSC_xCPARTL_OR_S;
151 flush0 += (offset << (set_shift - 4));
154 part = (part & SYSC_xCPARTL_AND_BITS)
155 >> SYSC_xCPARTL_AND_S;
156 loops = ((part + 1) << (set_shift - 4));
158 /* Reduce loops by step of cache line size */
161 flush1 = flush0 + (1 << set_shift);
162 flush2 = flush0 + (2 << set_shift);
163 flush3 = flush0 + (3 << set_shift);
165 if (dcache_sets_log2 == 1) {
167 flush3 = flush1 + step;
168 flush1 = flush0 + step;
173 /* Clear loops ways in cache */
174 while (loops-- != 0) {
175 /* Clear the ways. */
178 * GCC doesn't generate very good code for this so we
179 * provide inline assembly instead.
181 metag_out8(0, flush0);
182 metag_out8(0, flush1);
183 metag_out8(0, flush2);
184 metag_out8(0, flush3);
192 "SETB\t[%0+%4++],%5\n"
193 "SETB\t[%1+%4++],%5\n"
194 "SETB\t[%2+%4++],%5\n"
195 "SETB\t[%3+%4++],%5\n"
200 : "e" (step), "a" (0));
205 void metag_data_cache_flush_all(const void *start)
207 if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_DC_ON_BIT) == 0)
208 /* No need to flush the data cache it's not actually enabled */
211 metag_phys_data_cache_flush(start);
214 void metag_data_cache_flush(const void *start, int bytes)
216 unsigned long flush0;
219 if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_DC_ON_BIT) == 0)
220 /* No need to flush the data cache it's not actually enabled */
224 metag_phys_data_cache_flush(start);
228 /* Use linear cache flush mechanism on META IP */
230 loops = ((int)start & (DCACHE_LINE_BYTES - 1)) + bytes +
231 (DCACHE_LINE_BYTES - 1);
232 loops >>= DCACHE_LINE_S;
234 #define PRIM_FLUSH(addr, offset) do { \
235 int __addr = ((int) (addr)) + ((offset) * 64); \
236 __builtin_dcache_flush((void *)(__addr)); \
239 #define LOOP_INC (4*64)
242 /* By default stop */
246 /* Drop Thru Cases! */
248 PRIM_FLUSH(flush0, 3);
252 PRIM_FLUSH(flush0, 2);
254 PRIM_FLUSH(flush0, 1);
256 PRIM_FLUSH(flush0, 0);
263 EXPORT_SYMBOL(metag_data_cache_flush);
265 static void metag_phys_code_cache_flush(const void *start, int bytes)
267 unsigned long flush0, flush1, flush2, flush3, end_set;
270 int set_shift, set_size;
273 /* Use a sequence of writes to flush the cache region requested */
274 thread = (__core_reg_get(TXENABLE) & TXENABLE_THREAD_BITS)
275 >> TXENABLE_THREAD_S;
276 set_shift = icache_set_shift;
278 /* Move to the base of the physical cache flush region */
279 flush0 = LINSYSCFLUSH_ICACHE_LINE;
282 /* Get partition code for this thread */
283 part = metag_in32(SYSC_ICPART0 +
284 (SYSC_xCPARTn_STRIDE * thread));
287 /* Access Global vs Local partition */
288 part >>= SYSC_xCPARTG_AND_S-SYSC_xCPARTL_AND_S;
290 /* Extract offset and move SetOff */
291 offset = (part & SYSC_xCPARTL_OR_BITS)
292 >> SYSC_xCPARTL_OR_S;
293 flush0 += (offset << (set_shift - 4));
296 part = (part & SYSC_xCPARTL_AND_BITS)
297 >> SYSC_xCPARTL_AND_S;
298 loops = ((part + 1) << (set_shift - 4));
300 /* Where does the Set end? */
301 end_set = flush0 + loops;
304 #ifdef CONFIG_METAG_META12
305 if ((bytes < 4096) && (bytes < loops)) {
306 /* Unreachable on HTP/MTP */
307 /* Only target the sets that could be relavent */
308 flush0 += (loops - step) & ((int) start);
309 loops = (((int) start) & (step-1)) + bytes + step - 1;
313 /* Reduce loops by step of cache line size */
316 flush1 = flush0 + (1<<set_shift);
317 flush2 = flush0 + (2<<set_shift);
318 flush3 = flush0 + (3<<set_shift);
320 if (icache_sets_log2 == 1) {
322 flush3 = flush1 + step;
323 flush1 = flush0 + step;
325 /* flush0 will stop one line early in this case
326 * (flush1 will do the final line).
327 * However we don't correct end_set here at the moment
328 * because it will never wrap on HTP/MTP
336 /* Clear loops ways in cache */
337 while (loops-- != 0) {
340 * GCC doesn't generate very good code for this so we
341 * provide inline assembly instead.
344 metag_out8(0, flush0);
345 metag_out8(0, flush1);
346 metag_out8(0, flush2);
347 metag_out8(0, flush3);
355 "SETB\t[%0+%4++],%5\n"
356 "SETB\t[%1+%4++],%5\n"
357 "SETB\t[%2+%4++],%5\n"
358 "SETB\t[%3+%4++],%5\n"
363 : "e" (step), "a" (0));
366 if (flush0 == end_set) {
367 /* Wrap within Set 0 */
376 void metag_code_cache_flush_all(const void *start)
378 if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_IC_ON_BIT) == 0)
379 /* No need to flush the code cache it's not actually enabled */
382 metag_phys_code_cache_flush(start, 4096);
385 void metag_code_cache_flush(const void *start, int bytes)
387 #ifndef CONFIG_METAG_META12
390 #endif /* !CONFIG_METAG_META12 */
392 if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_IC_ON_BIT) == 0)
393 /* No need to flush the code cache it's not actually enabled */
396 #ifdef CONFIG_METAG_META12
397 /* CACHEWD isn't available on Meta1, so always do full cache flush */
398 metag_phys_code_cache_flush(start, bytes);
400 #else /* CONFIG_METAG_META12 */
401 /* If large size do full physical cache flush */
403 metag_phys_code_cache_flush(start, bytes);
407 /* Use linear cache flush mechanism on META IP */
408 flush = (void *)((int)start & ~(ICACHE_LINE_BYTES-1));
409 loops = ((int)start & (ICACHE_LINE_BYTES-1)) + bytes +
410 (ICACHE_LINE_BYTES-1);
411 loops >>= ICACHE_LINE_S;
413 #define PRIM_IFLUSH(addr, offset) \
414 __builtin_meta2_cachewd(((addr) + ((offset) * 64)), CACHEW_ICACHE_BIT)
416 #define LOOP_INC (4*64)
419 /* By default stop */
423 /* Drop Thru Cases! */
425 PRIM_IFLUSH(flush, 3);
429 PRIM_IFLUSH(flush, 2);
431 PRIM_IFLUSH(flush, 1);
433 PRIM_IFLUSH(flush, 0);
439 #endif /* !CONFIG_METAG_META12 */
441 EXPORT_SYMBOL(metag_code_cache_flush);