2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
4 * Copyright (C) 2007 ARM Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/spinlock.h>
24 #include <linux/of_address.h>
26 #include <asm/cacheflush.h>
27 #include <asm/hardware/cache-l2x0.h>
29 #define CACHE_LINE_SIZE 32
31 static void __iomem *l2x0_base;
32 static DEFINE_SPINLOCK(l2x0_lock);
33 static uint32_t l2x0_way_mask; /* Bitmask of active ways */
34 static uint32_t l2x0_size;
36 static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
38 /* wait for cache operation by line or way to complete */
39 while (readl_relaxed(reg) & mask)
43 #ifdef CONFIG_CACHE_PL310
44 static inline void cache_wait(void __iomem *reg, unsigned long mask)
46 /* cache operations by line are atomic on PL310 */
49 #define cache_wait cache_wait_way
52 static inline void cache_sync(void)
54 void __iomem *base = l2x0_base;
56 #ifdef CONFIG_ARM_ERRATA_753970
57 /* write to an unmmapped register */
58 writel_relaxed(0, base + L2X0_DUMMY_REG);
60 writel_relaxed(0, base + L2X0_CACHE_SYNC);
62 cache_wait(base + L2X0_CACHE_SYNC, 1);
65 static inline void l2x0_clean_line(unsigned long addr)
67 void __iomem *base = l2x0_base;
68 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
69 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
72 static inline void l2x0_inv_line(unsigned long addr)
74 void __iomem *base = l2x0_base;
75 cache_wait(base + L2X0_INV_LINE_PA, 1);
76 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
79 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
81 #define debug_writel(val) outer_cache.set_debug(val)
83 static void l2x0_set_debug(unsigned long val)
85 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
88 /* Optimised out for non-errata case */
89 static inline void debug_writel(unsigned long val)
93 #define l2x0_set_debug NULL
96 #ifdef CONFIG_PL310_ERRATA_588369
97 static inline void l2x0_flush_line(unsigned long addr)
99 void __iomem *base = l2x0_base;
101 /* Clean by PA followed by Invalidate by PA */
102 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
103 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
104 cache_wait(base + L2X0_INV_LINE_PA, 1);
105 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
109 static inline void l2x0_flush_line(unsigned long addr)
111 void __iomem *base = l2x0_base;
112 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
113 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
117 static void l2x0_cache_sync(void)
121 spin_lock_irqsave(&l2x0_lock, flags);
123 spin_unlock_irqrestore(&l2x0_lock, flags);
126 static void __l2x0_flush_all(void)
129 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
130 cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
135 static void l2x0_flush_all(void)
140 spin_lock_irqsave(&l2x0_lock, flags);
142 spin_unlock_irqrestore(&l2x0_lock, flags);
145 static void l2x0_clean_all(void)
150 spin_lock_irqsave(&l2x0_lock, flags);
151 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
152 cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
154 spin_unlock_irqrestore(&l2x0_lock, flags);
157 static void l2x0_inv_all(void)
161 /* invalidate all ways */
162 spin_lock_irqsave(&l2x0_lock, flags);
163 /* Invalidating when L2 is enabled is a nono */
164 BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1);
165 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
166 cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
168 spin_unlock_irqrestore(&l2x0_lock, flags);
171 static void l2x0_inv_range(unsigned long start, unsigned long end)
173 void __iomem *base = l2x0_base;
176 spin_lock_irqsave(&l2x0_lock, flags);
177 if (start & (CACHE_LINE_SIZE - 1)) {
178 start &= ~(CACHE_LINE_SIZE - 1);
180 l2x0_flush_line(start);
182 start += CACHE_LINE_SIZE;
185 if (end & (CACHE_LINE_SIZE - 1)) {
186 end &= ~(CACHE_LINE_SIZE - 1);
188 l2x0_flush_line(end);
192 while (start < end) {
193 unsigned long blk_end = start + min(end - start, 4096UL);
195 while (start < blk_end) {
196 l2x0_inv_line(start);
197 start += CACHE_LINE_SIZE;
201 spin_unlock_irqrestore(&l2x0_lock, flags);
202 spin_lock_irqsave(&l2x0_lock, flags);
205 cache_wait(base + L2X0_INV_LINE_PA, 1);
207 spin_unlock_irqrestore(&l2x0_lock, flags);
210 static void l2x0_clean_range(unsigned long start, unsigned long end)
212 void __iomem *base = l2x0_base;
215 if ((end - start) >= l2x0_size) {
220 spin_lock_irqsave(&l2x0_lock, flags);
221 start &= ~(CACHE_LINE_SIZE - 1);
222 while (start < end) {
223 unsigned long blk_end = start + min(end - start, 4096UL);
225 while (start < blk_end) {
226 l2x0_clean_line(start);
227 start += CACHE_LINE_SIZE;
231 spin_unlock_irqrestore(&l2x0_lock, flags);
232 spin_lock_irqsave(&l2x0_lock, flags);
235 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
237 spin_unlock_irqrestore(&l2x0_lock, flags);
240 static void l2x0_flush_range(unsigned long start, unsigned long end)
242 void __iomem *base = l2x0_base;
245 if ((end - start) >= l2x0_size) {
250 spin_lock_irqsave(&l2x0_lock, flags);
251 start &= ~(CACHE_LINE_SIZE - 1);
252 while (start < end) {
253 unsigned long blk_end = start + min(end - start, 4096UL);
256 while (start < blk_end) {
257 l2x0_flush_line(start);
258 start += CACHE_LINE_SIZE;
263 spin_unlock_irqrestore(&l2x0_lock, flags);
264 spin_lock_irqsave(&l2x0_lock, flags);
267 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
269 spin_unlock_irqrestore(&l2x0_lock, flags);
272 static void l2x0_disable(void)
276 spin_lock_irqsave(&l2x0_lock, flags);
278 writel_relaxed(0, l2x0_base + L2X0_CTRL);
280 spin_unlock_irqrestore(&l2x0_lock, flags);
283 static void __init l2x0_unlock(__u32 cache_id)
288 if (cache_id == L2X0_CACHE_ID_PART_L310)
291 /* L210 and unknown types */
294 for (i = 0; i < lockregs; i++) {
295 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
296 i * L2X0_LOCKDOWN_STRIDE);
297 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
298 i * L2X0_LOCKDOWN_STRIDE);
302 void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
312 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
313 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
318 /* Determine the number of ways */
319 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
320 case L2X0_CACHE_ID_PART_L310:
327 case L2X0_CACHE_ID_PART_L210:
328 ways = (aux >> 13) & 0xf;
332 /* Assume unknown chips have 8 ways */
334 type = "L2x0 series";
338 l2x0_way_mask = (1 << ways) - 1;
341 * L2 cache Size = Way size * Number of ways
343 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
344 way_size = 1 << (way_size + 3);
345 l2x0_size = ways * way_size * SZ_1K;
348 * Check if l2x0 controller is already enabled.
349 * If you are booting from non-secure mode
350 * accessing the below registers will fault.
352 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
353 /* Make sure that I&D is not locked down when starting */
354 l2x0_unlock(cache_id);
356 /* l2x0 controller is disabled */
357 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
362 writel_relaxed(1, l2x0_base + L2X0_CTRL);
365 outer_cache.inv_range = l2x0_inv_range;
366 outer_cache.clean_range = l2x0_clean_range;
367 outer_cache.flush_range = l2x0_flush_range;
368 outer_cache.sync = l2x0_cache_sync;
369 outer_cache.flush_all = l2x0_flush_all;
370 outer_cache.inv_all = l2x0_inv_all;
371 outer_cache.disable = l2x0_disable;
372 outer_cache.set_debug = l2x0_set_debug;
374 printk(KERN_INFO "%s cache controller enabled\n", type);
375 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
376 ways, cache_id, aux, l2x0_size);
380 static void __init l2x0_of_setup(const struct device_node *np,
381 __u32 *aux_val, __u32 *aux_mask)
383 u32 data[2] = { 0, 0 };
386 u32 val = 0, mask = 0;
388 of_property_read_u32(np, "arm,tag-latency", &tag);
390 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
391 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
394 of_property_read_u32_array(np, "arm,data-latency",
395 data, ARRAY_SIZE(data));
396 if (data[0] && data[1]) {
397 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
398 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
399 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
400 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
403 of_property_read_u32(np, "arm,dirty-latency", &dirty);
405 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
406 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
414 static void __init pl310_of_setup(const struct device_node *np,
415 __u32 *aux_val, __u32 *aux_mask)
417 u32 data[3] = { 0, 0, 0 };
418 u32 tag[3] = { 0, 0, 0 };
419 u32 filter[2] = { 0, 0 };
421 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
422 if (tag[0] && tag[1] && tag[2])
424 ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
425 ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
426 ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
427 l2x0_base + L2X0_TAG_LATENCY_CTRL);
429 of_property_read_u32_array(np, "arm,data-latency",
430 data, ARRAY_SIZE(data));
431 if (data[0] && data[1] && data[2])
433 ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
434 ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
435 ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
436 l2x0_base + L2X0_DATA_LATENCY_CTRL);
438 of_property_read_u32_array(np, "arm,filter-ranges",
439 filter, ARRAY_SIZE(filter));
440 if (filter[0] && filter[1]) {
441 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
442 l2x0_base + L2X0_ADDR_FILTER_END);
443 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
444 l2x0_base + L2X0_ADDR_FILTER_START);
448 static const struct of_device_id l2x0_ids[] __initconst = {
449 { .compatible = "arm,pl310-cache", .data = pl310_of_setup },
450 { .compatible = "arm,l220-cache", .data = l2x0_of_setup },
451 { .compatible = "arm,l210-cache", .data = l2x0_of_setup },
455 int __init l2x0_of_init(__u32 aux_val, __u32 aux_mask)
457 struct device_node *np;
458 void (*l2_setup)(const struct device_node *np,
459 __u32 *aux_val, __u32 *aux_mask);
461 np = of_find_matching_node(NULL, l2x0_ids);
464 l2x0_base = of_iomap(np, 0);
468 /* L2 configuration can only be changed if the cache is disabled */
469 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
470 l2_setup = of_match_node(l2x0_ids, np)->data;
472 l2_setup(np, &aux_val, &aux_mask);
474 l2x0_init(l2x0_base, aux_val, aux_mask);