1 #ifndef __ASM_ARM_SYSTEM_H
2 #define __ASM_ARM_SYSTEM_H
6 #define CPU_ARCH_UNKNOWN 0
7 #define CPU_ARCH_ARMv3 1
8 #define CPU_ARCH_ARMv4 2
9 #define CPU_ARCH_ARMv4T 3
10 #define CPU_ARCH_ARMv5 4
11 #define CPU_ARCH_ARMv5T 5
12 #define CPU_ARCH_ARMv5TE 6
13 #define CPU_ARCH_ARMv5TEJ 7
14 #define CPU_ARCH_ARMv6 8
15 #define CPU_ARCH_ARMv7 9
18 * CR1 bits (CP#15 CR1)
20 #define CR_M (1 << 0) /* MMU enable */
21 #define CR_A (1 << 1) /* Alignment abort enable */
22 #define CR_C (1 << 2) /* Dcache enable */
23 #define CR_W (1 << 3) /* Write buffer enable */
24 #define CR_P (1 << 4) /* 32-bit exception handler */
25 #define CR_D (1 << 5) /* 32-bit data address range */
26 #define CR_L (1 << 6) /* Implementation defined */
27 #define CR_B (1 << 7) /* Big endian */
28 #define CR_S (1 << 8) /* System MMU protection */
29 #define CR_R (1 << 9) /* ROM MMU protection */
30 #define CR_F (1 << 10) /* Implementation defined */
31 #define CR_Z (1 << 11) /* Implementation defined */
32 #define CR_I (1 << 12) /* Icache enable */
33 #define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */
34 #define CR_RR (1 << 14) /* Round Robin cache replacement */
35 #define CR_L4 (1 << 15) /* LDR pc can set T bit */
36 #define CR_DT (1 << 16)
37 #define CR_IT (1 << 18)
38 #define CR_ST (1 << 19)
39 #define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */
40 #define CR_U (1 << 22) /* Unaligned access operation */
41 #define CR_XP (1 << 23) /* Extended page tables */
42 #define CR_VE (1 << 24) /* Vectored interrupts */
43 #define CR_EE (1 << 25) /* Exception (Big) Endian */
44 #define CR_TRE (1 << 28) /* TEX remap enable */
45 #define CR_AFE (1 << 29) /* Access flag enable */
46 #define CR_TE (1 << 30) /* Thumb exception enable */
49 * This is used to ensure the compiler did actually allocate the register we
50 * asked it for some inline assembly sequences. Apparently we can't trust
51 * the compiler from one version to another so a bit of paranoia won't hurt.
52 * This string is meant to be concatenated with the inline asm string and
53 * will cause compilation to stop on mismatch.
54 * (for details, see gcc PR 15089)
56 #define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
60 #include <linux/compiler.h>
61 #include <linux/linkage.h>
62 #include <linux/irqflags.h>
64 #include <asm/outercache.h>
69 /* information about the system we're running on */
70 extern unsigned int system_rev;
71 extern unsigned int system_serial_low;
72 extern unsigned int system_serial_high;
73 extern unsigned int mem_fclk_21285;
77 void die(const char *msg, struct pt_regs *regs, int err);
80 void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
81 unsigned long err, unsigned long trap);
83 #ifdef CONFIG_ARM_LPAE
84 #define FAULT_CODE_ALIGNMENT 33
85 #define FAULT_CODE_DEBUG 34
87 #define FAULT_CODE_ALIGNMENT 1
88 #define FAULT_CODE_DEBUG 2
91 void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
93 int sig, int code, const char *name);
95 void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int,
97 int sig, int code, const char *name);
100 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
102 extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
105 extern void show_pte(struct mm_struct *mm, unsigned long addr);
106 extern void __show_regs(struct pt_regs *);
108 extern int __pure cpu_architecture(void);
109 extern void cpu_init(void);
111 void arm_machine_restart(char mode, const char *cmd);
112 void soft_restart(unsigned long);
113 extern void (*arm_pm_restart)(char str, const char *cmd);
115 #define UDBG_UNDEFINED (1 << 0)
116 #define UDBG_SYSCALL (1 << 1)
117 #define UDBG_BADABORT (1 << 2)
118 #define UDBG_SEGV (1 << 3)
119 #define UDBG_BUS (1 << 4)
120 #define UDBG_SEGV_SHORT (1 << 8)
122 extern unsigned int user_debug;
124 #if __LINUX_ARM_ARCH__ >= 4
125 #define vectors_high() (cr_alignment & CR_V)
127 #define vectors_high() (0)
130 #if __LINUX_ARM_ARCH__ >= 7 || \
131 (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K))
132 #define sev() __asm__ __volatile__ ("sev" : : : "memory")
133 #define wfe() __asm__ __volatile__ ("wfe" : : : "memory")
134 #define wfi() __asm__ __volatile__ ("wfi" : : : "memory")
137 #if __LINUX_ARM_ARCH__ >= 7
138 #define isb() __asm__ __volatile__ ("isb" : : : "memory")
139 #define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
140 #define dmb() __asm__ __volatile__ ("dmb" : : : "memory")
141 #elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
142 #define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
143 : : "r" (0) : "memory")
144 #define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
145 : : "r" (0) : "memory")
146 #define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
147 : : "r" (0) : "memory")
148 #elif defined(CONFIG_CPU_FA526)
149 #define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
150 : : "r" (0) : "memory")
151 #define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
152 : : "r" (0) : "memory")
153 #define dmb() __asm__ __volatile__ ("" : : : "memory")
155 #define isb() __asm__ __volatile__ ("" : : : "memory")
156 #define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
157 : : "r" (0) : "memory")
158 #define dmb() __asm__ __volatile__ ("" : : : "memory")
161 #ifdef CONFIG_ARCH_HAS_BARRIERS
162 #include <mach/barriers.h>
163 #elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
164 #define mb() do { dsb(); outer_sync(); } while (0)
168 #include <asm/memory.h>
169 #define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
170 #define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
171 #define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
175 #define smp_mb() barrier()
176 #define smp_rmb() barrier()
177 #define smp_wmb() barrier()
179 #define smp_mb() dmb()
180 #define smp_rmb() dmb()
181 #define smp_wmb() dmb()
184 #define read_barrier_depends() do { } while(0)
185 #define smp_read_barrier_depends() do { } while(0)
187 #define set_mb(var, value) do { var = value; smp_mb(); } while (0)
188 #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
190 extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
191 extern unsigned long cr_alignment; /* defined in entry-armv.S */
193 static inline unsigned int get_cr(void)
196 asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc");
200 static inline void set_cr(unsigned int val)
202 asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR"
203 : : "r" (val) : "cc");
208 extern void adjust_cr(unsigned long mask, unsigned long set);
211 #define CPACC_FULL(n) (3 << (n * 2))
212 #define CPACC_SVC(n) (1 << (n * 2))
213 #define CPACC_DISABLE(n) (0 << (n * 2))
215 static inline unsigned int get_copro_access(void)
218 asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access"
219 : "=r" (val) : : "cc");
223 static inline void set_copro_access(unsigned int val)
225 asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access"
226 : : "r" (val) : "cc");
231 * switch_mm() may do a full cache flush over the context switch,
232 * so enable interrupts over the context switch to avoid high
235 #define __ARCH_WANT_INTERRUPTS_ON_CTXSW
238 * switch_to(prev, next) should switch from task `prev' to `next'
239 * `prev' will never be the same as `next'. schedule() itself
240 * contains the memory barrier to tell GCC not to cache `current'.
242 extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *);
244 #define switch_to(prev,next,last) \
246 last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
249 #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
251 * On the StrongARM, "swp" is terminally broken since it bypasses the
252 * cache totally. This means that the cache becomes inconsistent, and,
253 * since we use normal loads/stores as well, this is really bad.
254 * Typically, this causes oopsen in filp_close, but could have other,
255 * more disastrous effects. There are two work-arounds:
256 * 1. Disable interrupts and emulate the atomic swap
257 * 2. Clean the cache, perform atomic swap, flush the cache
259 * We choose (1) since its the "easiest" to achieve here and is not
260 * dependent on the processor type.
262 * NOTE that this solution won't work on an SMP system, so explcitly
268 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
270 extern void __bad_xchg(volatile void *, int);
275 #if __LINUX_ARM_ARCH__ >= 6
282 #if __LINUX_ARM_ARCH__ >= 6
284 asm volatile("@ __xchg1\n"
285 "1: ldrexb %0, [%3]\n"
286 " strexb %1, %2, [%3]\n"
289 : "=&r" (ret), "=&r" (tmp)
294 asm volatile("@ __xchg4\n"
295 "1: ldrex %0, [%3]\n"
296 " strex %1, %2, [%3]\n"
299 : "=&r" (ret), "=&r" (tmp)
303 #elif defined(swp_is_buggy)
305 #error SMP is not supported on this platform
308 raw_local_irq_save(flags);
309 ret = *(volatile unsigned char *)ptr;
310 *(volatile unsigned char *)ptr = x;
311 raw_local_irq_restore(flags);
315 raw_local_irq_save(flags);
316 ret = *(volatile unsigned long *)ptr;
317 *(volatile unsigned long *)ptr = x;
318 raw_local_irq_restore(flags);
322 asm volatile("@ __xchg1\n"
329 asm volatile("@ __xchg4\n"
337 __bad_xchg(ptr, size), ret = 0;
345 extern void disable_hlt(void);
346 extern void enable_hlt(void);
348 void cpu_idle_wait(void);
350 #include <asm-generic/cmpxchg-local.h>
352 #if __LINUX_ARM_ARCH__ < 6
353 /* min ARCH < ARMv6 */
356 #error "SMP is not supported on this platform"
360 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
363 #define cmpxchg_local(ptr, o, n) \
364 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
365 (unsigned long)(n), sizeof(*(ptr))))
366 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
369 #include <asm-generic/cmpxchg.h>
372 #else /* min ARCH >= ARMv6 */
374 extern void __bad_cmpxchg(volatile void *ptr, int size);
377 * cmpxchg only support 32-bits operands on ARMv6.
380 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
381 unsigned long new, int size)
383 unsigned long oldval, res;
386 #ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
389 asm volatile("@ __cmpxchg1\n"
393 " strexbeq %0, %4, [%2]\n"
394 : "=&r" (res), "=&r" (oldval)
395 : "r" (ptr), "Ir" (old), "r" (new)
401 asm volatile("@ __cmpxchg1\n"
405 " strexheq %0, %4, [%2]\n"
406 : "=&r" (res), "=&r" (oldval)
407 : "r" (ptr), "Ir" (old), "r" (new)
414 asm volatile("@ __cmpxchg4\n"
418 " strexeq %0, %4, [%2]\n"
419 : "=&r" (res), "=&r" (oldval)
420 : "r" (ptr), "Ir" (old), "r" (new)
425 __bad_cmpxchg(ptr, size);
432 static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
433 unsigned long new, int size)
438 ret = __cmpxchg(ptr, old, new, size);
444 #define cmpxchg(ptr,o,n) \
445 ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
446 (unsigned long)(o), \
447 (unsigned long)(n), \
450 static inline unsigned long __cmpxchg_local(volatile void *ptr,
452 unsigned long new, int size)
457 #ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */
460 ret = __cmpxchg_local_generic(ptr, old, new, size);
464 ret = __cmpxchg(ptr, old, new, size);
470 #define cmpxchg_local(ptr,o,n) \
471 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
472 (unsigned long)(o), \
473 (unsigned long)(n), \
476 #ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
479 * Note : ARMv7-M (currently unsupported by Linux) does not support
480 * ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should
481 * not be allowed to use __cmpxchg64.
483 static inline unsigned long long __cmpxchg64(volatile void *ptr,
484 unsigned long long old,
485 unsigned long long new)
487 register unsigned long long oldval asm("r0");
488 register unsigned long long __old asm("r2") = old;
489 register unsigned long long __new asm("r4") = new;
495 " ldrexd %1, %H1, [%2]\n"
499 " strexdeq %0, %4, %H4, [%2]\n"
500 : "=&r" (res), "=&r" (oldval)
501 : "r" (ptr), "Ir" (__old), "r" (__new)
508 static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
509 unsigned long long old,
510 unsigned long long new)
512 unsigned long long ret;
515 ret = __cmpxchg64(ptr, old, new);
521 #define cmpxchg64(ptr,o,n) \
522 ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
523 (unsigned long long)(o), \
524 (unsigned long long)(n)))
526 #define cmpxchg64_local(ptr,o,n) \
527 ((__typeof__(*(ptr)))__cmpxchg64((ptr), \
528 (unsigned long long)(o), \
529 (unsigned long long)(n)))
531 #else /* min ARCH = ARMv6 */
533 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
537 #endif /* __LINUX_ARM_ARCH__ >= 6 */
539 #endif /* __ASSEMBLY__ */
541 #define arch_align_stack(x) (x)
543 #endif /* __KERNEL__ */