2 * arch/sh/kernel/cpu/init.c
6 * Copyright (C) 2002 - 2009 Paul Mundt
7 * Copyright (C) 2003 Richard Curnow
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/init.h>
14 #include <linux/kernel.h>
16 #include <linux/log2.h>
17 #include <asm/mmu_context.h>
18 #include <asm/processor.h>
19 #include <asm/uaccess.h>
21 #include <asm/system.h>
22 #include <asm/cacheflush.h>
23 #include <asm/cache.h>
41 * Generic wrapper for command line arguments to disable on-chip
42 * peripherals (nofpu, nodsp, and so forth).
44 #define onchip_setup(x) \
45 static int x##_disabled __initdata = !cpu_has_##x; \
47 static int __init x##_setup(char *opts) \
52 __setup("no" __stringify(x), x##_setup);
57 #ifdef CONFIG_SPECULATIVE_EXECUTION
58 #define CPUOPM 0xff2f0000
59 #define CPUOPM_RABD (1 << 5)
61 static void __init speculative_execution_init(void)
64 ctrl_outl(ctrl_inl(CPUOPM) & ~CPUOPM_RABD, CPUOPM);
66 /* Flush the update */
67 (void)ctrl_inl(CPUOPM);
71 #define speculative_execution_init() do { } while (0)
74 #ifdef CONFIG_CPU_SH4A
75 #define EXPMASK 0xff2f0004
76 #define EXPMASK_RTEDS (1 << 0)
77 #define EXPMASK_BRDSSLP (1 << 1)
78 #define EXPMASK_MMCAW (1 << 4)
80 static void __init expmask_init(void)
82 unsigned long expmask = __raw_readl(EXPMASK);
87 * Disable support for slottable sleep instruction, non-nop
88 * instructions in the rte delay slot, and associative writes to
89 * the memory-mapped cache array.
91 expmask &= ~(EXPMASK_RTEDS | EXPMASK_BRDSSLP | EXPMASK_MMCAW);
93 __raw_writel(expmask, EXPMASK);
97 #define expmask_init() do { } while (0)
100 /* 2nd-level cache init */
101 void __attribute__ ((weak)) l2_cache_init(void)
106 * Generic first-level cache init
108 #ifdef CONFIG_SUPERH32
109 static void cache_init(void)
111 unsigned long ccr, flags;
117 * At this point we don't know whether the cache is enabled or not - a
118 * bootloader may have enabled it. There are at least 2 things that
119 * could be dirty in the cache at this point:
120 * 1. kernel command line set up by boot loader
121 * 2. spilled registers from the prolog of this function
122 * => before re-initialising the cache, we must do a purge of the whole
123 * cache out to memory for safety. As long as nothing is spilled
124 * during the loop to lines that have already been done, this is safe.
127 if (ccr & CCR_CACHE_ENABLE) {
128 unsigned long ways, waysize, addrstart;
130 waysize = current_cpu_data.dcache.sets;
134 * If the OC is already in RAM mode, we only have
135 * half of the entries to flush..
137 if (ccr & CCR_CACHE_ORA)
141 waysize <<= current_cpu_data.dcache.entry_shift;
143 #ifdef CCR_CACHE_EMODE
144 /* If EMODE is not set, we only have 1 way to flush. */
145 if (!(ccr & CCR_CACHE_EMODE))
149 ways = current_cpu_data.dcache.ways;
151 addrstart = CACHE_OC_ADDRESS_ARRAY;
155 for (addr = addrstart;
156 addr < addrstart + waysize;
157 addr += current_cpu_data.dcache.linesz)
160 addrstart += current_cpu_data.dcache.way_incr;
165 * Default CCR values .. enable the caches
166 * and invalidate them immediately..
168 flags = CCR_CACHE_ENABLE | CCR_CACHE_INVALIDATE;
170 #ifdef CCR_CACHE_EMODE
171 /* Force EMODE if possible */
172 if (current_cpu_data.dcache.ways > 1)
173 flags |= CCR_CACHE_EMODE;
175 flags &= ~CCR_CACHE_EMODE;
178 #if defined(CONFIG_CACHE_WRITETHROUGH)
180 flags |= CCR_CACHE_WT;
181 #elif defined(CONFIG_CACHE_WRITEBACK)
183 flags |= CCR_CACHE_CB;
186 flags &= ~CCR_CACHE_ENABLE;
191 ctrl_outl(flags, CCR);
195 #define cache_init() do { } while (0)
198 #define CSHAPE(totalsize, linesize, assoc) \
199 ((totalsize & ~0xff) | (linesize << 4) | assoc)
201 #define CACHE_DESC_SHAPE(desc) \
202 CSHAPE((desc).way_size * (desc).ways, ilog2((desc).linesz), (desc).ways)
204 static void detect_cache_shape(void)
206 l1d_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.dcache);
208 if (current_cpu_data.dcache.flags & SH_CACHE_COMBINED)
209 l1i_cache_shape = l1d_cache_shape;
211 l1i_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.icache);
213 if (current_cpu_data.flags & CPU_HAS_L2_CACHE)
214 l2_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.scache);
216 l2_cache_shape = -1; /* No S-cache */
219 static void __init fpu_init(void)
221 /* Disable the FPU */
222 if (fpu_disabled && (current_cpu_data.flags & CPU_HAS_FPU)) {
223 printk("FPU Disabled\n");
224 current_cpu_data.flags &= ~CPU_HAS_FPU;
232 static void __init release_dsp(void)
236 /* Clear SR.DSP bit */
237 __asm__ __volatile__ (
246 static void __init dsp_init(void)
251 * Set the SR.DSP bit, wait for one instruction, and then read
254 __asm__ __volatile__ (
264 /* If the DSP bit is still set, this CPU has a DSP */
266 current_cpu_data.flags |= CPU_HAS_DSP;
268 /* Disable the DSP */
269 if (dsp_disabled && (current_cpu_data.flags & CPU_HAS_DSP)) {
270 printk("DSP Disabled\n");
271 current_cpu_data.flags &= ~CPU_HAS_DSP;
274 /* Now that we've determined the DSP status, clear the DSP bit. */
278 static inline void __init dsp_init(void) { }
279 #endif /* CONFIG_SH_DSP */
284 * This is our initial entry point for each CPU, and is invoked on the
285 * boot CPU prior to calling start_kernel(). For SMP, a combination of
286 * this and start_secondary() will bring up each processor to a ready
287 * state prior to hand forking the idle loop.
289 * We do all of the basic processor init here, including setting up
290 * the caches, FPU, DSP, etc. By the time start_kernel() is hit (and
291 * subsequently platform_setup()) things like determining the CPU
292 * subtype and initial configuration will all be done.
294 * Each processor family is still responsible for doing its own probing
295 * and cache configuration in detect_cpu_and_cache_system().
297 asmlinkage void __init sh_cpu_init(void)
299 current_thread_info()->cpu = hard_smp_processor_id();
301 /* First, probe the CPU */
302 detect_cpu_and_cache_system();
304 if (current_cpu_data.type == CPU_SH_NONE)
305 panic("Unknown CPU");
307 /* First setup the rest of the I-cache info */
308 current_cpu_data.icache.entry_mask = current_cpu_data.icache.way_incr -
309 current_cpu_data.icache.linesz;
311 current_cpu_data.icache.way_size = current_cpu_data.icache.sets *
312 current_cpu_data.icache.linesz;
314 /* And the D-cache too */
315 current_cpu_data.dcache.entry_mask = current_cpu_data.dcache.way_incr -
316 current_cpu_data.dcache.linesz;
318 current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets *
319 current_cpu_data.dcache.linesz;
324 if (raw_smp_processor_id() == 0) {
325 shm_align_mask = max_t(unsigned long,
326 current_cpu_data.dcache.way_size - 1,
329 /* Boot CPU sets the cache shape */
330 detect_cache_shape();
337 * Initialize the per-CPU ASID cache very early, since the
338 * TLB flushing routines depend on this being setup.
340 current_cpu_data.asid_cache = NO_CONTEXT;
342 speculative_execution_init();
346 * Boot processor to setup the FP and extended state context info.
348 if (raw_smp_processor_id() == 0)
349 init_thread_xstate();