2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
4 * Copyright (C) 2007 ARM Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/init.h>
20 #include <linux/spinlock.h>
23 #include <asm/cacheflush.h>
24 #include <asm/hardware/cache-l2x0.h>
26 #define CACHE_LINE_SIZE 32
28 static void __iomem *l2x0_base;
29 static DEFINE_SPINLOCK(l2x0_lock);
30 static uint32_t l2x0_way_mask; /* Bitmask of active ways */
31 static uint32_t l2x0_size;
33 static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
35 /* wait for cache operation by line or way to complete */
36 while (readl_relaxed(reg) & mask)
40 #ifdef CONFIG_CACHE_PL310
41 static inline void cache_wait(void __iomem *reg, unsigned long mask)
43 /* cache operations by line are atomic on PL310 */
46 #define cache_wait cache_wait_way
49 static inline void cache_sync(void)
51 void __iomem *base = l2x0_base;
52 writel_relaxed(0, base + L2X0_CACHE_SYNC);
53 cache_wait(base + L2X0_CACHE_SYNC, 1);
56 static inline void l2x0_clean_line(unsigned long addr)
58 void __iomem *base = l2x0_base;
59 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
60 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
63 static inline void l2x0_inv_line(unsigned long addr)
65 void __iomem *base = l2x0_base;
66 cache_wait(base + L2X0_INV_LINE_PA, 1);
67 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
70 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
72 #define debug_writel(val) outer_cache.set_debug(val)
74 static void l2x0_set_debug(unsigned long val)
76 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
79 /* Optimised out for non-errata case */
80 static inline void debug_writel(unsigned long val)
84 #define l2x0_set_debug NULL
87 #ifdef CONFIG_PL310_ERRATA_588369
88 static inline void l2x0_flush_line(unsigned long addr)
90 void __iomem *base = l2x0_base;
92 /* Clean by PA followed by Invalidate by PA */
93 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
94 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
95 cache_wait(base + L2X0_INV_LINE_PA, 1);
96 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
100 static inline void l2x0_flush_line(unsigned long addr)
102 void __iomem *base = l2x0_base;
103 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
104 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
108 static void l2x0_cache_sync(void)
112 spin_lock_irqsave(&l2x0_lock, flags);
114 spin_unlock_irqrestore(&l2x0_lock, flags);
117 static void l2x0_flush_all(void)
122 spin_lock_irqsave(&l2x0_lock, flags);
124 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
125 cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
128 spin_unlock_irqrestore(&l2x0_lock, flags);
131 static void l2x0_clean_all(void)
136 spin_lock_irqsave(&l2x0_lock, flags);
137 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
138 cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
140 spin_unlock_irqrestore(&l2x0_lock, flags);
143 static void l2x0_inv_all(void)
147 /* invalidate all ways */
148 spin_lock_irqsave(&l2x0_lock, flags);
149 /* Invalidating when L2 is enabled is a nono */
150 BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1);
151 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
152 cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
154 spin_unlock_irqrestore(&l2x0_lock, flags);
157 static void l2x0_inv_range(unsigned long start, unsigned long end)
159 void __iomem *base = l2x0_base;
162 spin_lock_irqsave(&l2x0_lock, flags);
163 if (start & (CACHE_LINE_SIZE - 1)) {
164 start &= ~(CACHE_LINE_SIZE - 1);
166 l2x0_flush_line(start);
168 start += CACHE_LINE_SIZE;
171 if (end & (CACHE_LINE_SIZE - 1)) {
172 end &= ~(CACHE_LINE_SIZE - 1);
174 l2x0_flush_line(end);
178 while (start < end) {
179 unsigned long blk_end = start + min(end - start, 4096UL);
181 while (start < blk_end) {
182 l2x0_inv_line(start);
183 start += CACHE_LINE_SIZE;
187 spin_unlock_irqrestore(&l2x0_lock, flags);
188 spin_lock_irqsave(&l2x0_lock, flags);
191 cache_wait(base + L2X0_INV_LINE_PA, 1);
193 spin_unlock_irqrestore(&l2x0_lock, flags);
196 static void l2x0_clean_range(unsigned long start, unsigned long end)
198 void __iomem *base = l2x0_base;
201 if ((end - start) >= l2x0_size) {
206 spin_lock_irqsave(&l2x0_lock, flags);
207 start &= ~(CACHE_LINE_SIZE - 1);
208 while (start < end) {
209 unsigned long blk_end = start + min(end - start, 4096UL);
211 while (start < blk_end) {
212 l2x0_clean_line(start);
213 start += CACHE_LINE_SIZE;
217 spin_unlock_irqrestore(&l2x0_lock, flags);
218 spin_lock_irqsave(&l2x0_lock, flags);
221 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
223 spin_unlock_irqrestore(&l2x0_lock, flags);
226 static void l2x0_flush_range(unsigned long start, unsigned long end)
228 void __iomem *base = l2x0_base;
231 if ((end - start) >= l2x0_size) {
236 spin_lock_irqsave(&l2x0_lock, flags);
237 start &= ~(CACHE_LINE_SIZE - 1);
238 while (start < end) {
239 unsigned long blk_end = start + min(end - start, 4096UL);
242 while (start < blk_end) {
243 l2x0_flush_line(start);
244 start += CACHE_LINE_SIZE;
249 spin_unlock_irqrestore(&l2x0_lock, flags);
250 spin_lock_irqsave(&l2x0_lock, flags);
253 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
255 spin_unlock_irqrestore(&l2x0_lock, flags);
258 static void l2x0_disable(void)
262 spin_lock_irqsave(&l2x0_lock, flags);
263 writel(0, l2x0_base + L2X0_CTRL);
264 spin_unlock_irqrestore(&l2x0_lock, flags);
267 void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
277 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
278 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
283 /* Determine the number of ways */
284 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
285 case L2X0_CACHE_ID_PART_L310:
292 case L2X0_CACHE_ID_PART_L210:
293 ways = (aux >> 13) & 0xf;
297 /* Assume unknown chips have 8 ways */
299 type = "L2x0 series";
303 l2x0_way_mask = (1 << ways) - 1;
306 * L2 cache Size = Way size * Number of ways
308 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
309 way_size = 1 << (way_size + 3);
310 l2x0_size = ways * way_size * SZ_1K;
313 * Check if l2x0 controller is already enabled.
314 * If you are booting from non-secure mode
315 * accessing the below registers will fault.
317 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
319 /* l2x0 controller is disabled */
320 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
325 writel_relaxed(1, l2x0_base + L2X0_CTRL);
328 outer_cache.inv_range = l2x0_inv_range;
329 outer_cache.clean_range = l2x0_clean_range;
330 outer_cache.flush_range = l2x0_flush_range;
331 outer_cache.sync = l2x0_cache_sync;
332 outer_cache.flush_all = l2x0_flush_all;
333 outer_cache.inv_all = l2x0_inv_all;
334 outer_cache.disable = l2x0_disable;
335 outer_cache.set_debug = l2x0_set_debug;
337 printk(KERN_INFO "%s cache controller enabled\n", type);
338 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
339 ways, cache_id, aux, l2x0_size);