sh: nommu: Support building without an uncached mapping.
authorPaul Mundt <lethal@linux-sh.org>
Thu, 4 Nov 2010 03:46:19 +0000 (12:46 +0900)
committerPaul Mundt <lethal@linux-sh.org>
Thu, 4 Nov 2010 03:46:19 +0000 (12:46 +0900)
Now that nommu selects 32BIT we run in to the situation where SH-2A
supports an uncached identity mapping by way of the BSC, while the SH-2
does not. This provides stubs for the PC manglers and tidies up some of
the system*.h mess in the process.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
arch/sh/include/asm/system.h
arch/sh/include/asm/system_32.h
arch/sh/include/asm/system_64.h
arch/sh/include/asm/uncached.h

index 1f1af5a..10c8b18 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/compiler.h>
 #include <linux/linkage.h>
 #include <asm/types.h>
+#include <asm/uncached.h>
 
 #define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */
 
@@ -137,9 +138,6 @@ extern unsigned int instruction_size(unsigned int insn);
 #define instruction_size(insn) (4)
 #endif
 
-extern unsigned long cached_to_uncached;
-extern unsigned long uncached_size;
-
 void per_cpu_trap_init(void);
 void default_idle(void);
 void cpu_idle_wait(void);
index c941b27..a4ad1cd 100644 (file)
@@ -145,42 +145,6 @@ do {                                                               \
                __restore_dsp(prev);                            \
 } while (0)
 
-/*
- * Jump to uncached area.
- * When handling TLB or caches, we need to do it from an uncached area.
- */
-#define jump_to_uncached()                     \
-do {                                           \
-       unsigned long __dummy;                  \
-                                               \
-       __asm__ __volatile__(                   \
-               "mova   1f, %0\n\t"             \
-               "add    %1, %0\n\t"             \
-               "jmp    @%0\n\t"                \
-               " nop\n\t"                      \
-               ".balign 4\n"                   \
-               "1:"                            \
-               : "=&z" (__dummy)               \
-               : "r" (cached_to_uncached));    \
-} while (0)
-
-/*
- * Back to cached area.
- */
-#define back_to_cached()                               \
-do {                                                   \
-       unsigned long __dummy;                          \
-       ctrl_barrier();                                 \
-       __asm__ __volatile__(                           \
-               "mov.l  1f, %0\n\t"                     \
-               "jmp    @%0\n\t"                        \
-               " nop\n\t"                              \
-               ".balign 4\n"                           \
-               "1:     .long 2f\n"                     \
-               "2:"                                    \
-               : "=&r" (__dummy));                     \
-} while (0)
-
 #ifdef CONFIG_CPU_HAS_SR_RB
 #define lookup_exception_vector()      \
 ({                                     \
index 3633864..8593bc8 100644 (file)
@@ -34,9 +34,6 @@ do {                                                          \
                              &next->thread);                   \
 } while (0)
 
-#define jump_to_uncached()     do { } while (0)
-#define back_to_cached()       do { } while (0)
-
 #define __icbi(addr)   __asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr))
 #define __ocbp(addr)   __asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr))
 #define __ocbi(addr)   __asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr))
index e3419f9..6f8816b 100644 (file)
@@ -4,15 +4,55 @@
 #include <linux/bug.h>
 
 #ifdef CONFIG_UNCACHED_MAPPING
+extern unsigned long cached_to_uncached;
+extern unsigned long uncached_size;
 extern unsigned long uncached_start, uncached_end;
 
 extern int virt_addr_uncached(unsigned long kaddr);
 extern void uncached_init(void);
 extern void uncached_resize(unsigned long size);
+
+/*
+ * Jump to uncached area.
+ * When handling TLB or caches, we need to do it from an uncached area.
+ */
+#define jump_to_uncached()                     \
+do {                                           \
+       unsigned long __dummy;                  \
+                                               \
+       __asm__ __volatile__(                   \
+               "mova   1f, %0\n\t"             \
+               "add    %1, %0\n\t"             \
+               "jmp    @%0\n\t"                \
+               " nop\n\t"                      \
+               ".balign 4\n"                   \
+               "1:"                            \
+               : "=&z" (__dummy)               \
+               : "r" (cached_to_uncached));    \
+} while (0)
+
+/*
+ * Back to cached area.
+ */
+#define back_to_cached()                               \
+do {                                                   \
+       unsigned long __dummy;                          \
+       ctrl_barrier();                                 \
+       __asm__ __volatile__(                           \
+               "mov.l  1f, %0\n\t"                     \
+               "jmp    @%0\n\t"                        \
+               " nop\n\t"                              \
+               ".balign 4\n"                           \
+               "1:     .long 2f\n"                     \
+               "2:"                                    \
+               : "=&r" (__dummy));                     \
+} while (0)
 #else
 #define virt_addr_uncached(kaddr)      (0)
 #define uncached_init()                        do { } while (0)
 #define uncached_resize(size)          BUG()
+#define jump_to_uncached()             do { } while (0)
+#define back_to_cached()               do { } while (0)
 #endif
 
 #endif /* __ASM_SH_UNCACHED_H */