sh64: fix up memory offset calculation.
authorPaul Mundt <lethal@linux-sh.org>
Fri, 12 Feb 2010 06:41:45 +0000 (15:41 +0900)
committerPaul Mundt <lethal@linux-sh.org>
Fri, 12 Feb 2010 06:41:45 +0000 (15:41 +0900)
The linker script offsets were broken by the recent 29/32-bit
integration, so this fixes it up for sh64.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
arch/sh/include/asm/vmlinux.lds.h
arch/sh/kernel/vmlinux.lds.S

index 244ec4a..d58ad49 100644 (file)
 #define DWARF_EH_FRAME
 #endif
 
+#ifdef CONFIG_SUPERH64
+#define EXTRA_TEXT             \
+       *(.text64)              \
+       *(.text..SHmedia32)
+#else
+#define EXTRA_TEXT
+#endif
+
 #endif /* __ASM_SH_VMLINUX_LDS_H */
index f0bc6b8..dcf4891 100644 (file)
@@ -3,10 +3,12 @@
  * Written by Niibe Yutaka and Paul Mundt
  */
 #ifdef CONFIG_SUPERH64
-#define LOAD_OFFSET    CONFIG_PAGE_OFFSET
+#define LOAD_OFFSET    PAGE_OFFSET
+#define MEMORY_OFFSET  __MEMORY_START
 OUTPUT_ARCH(sh:sh5)
 #else
 #define LOAD_OFFSET    0
+#define MEMORY_OFFSET  0
 OUTPUT_ARCH(sh)
 #endif
 
@@ -14,16 +16,10 @@ OUTPUT_ARCH(sh)
 #include <asm/cache.h>
 #include <asm/vmlinux.lds.h>
 
-#if defined(CONFIG_32BIT) && !defined(CONFIG_PMB_LEGACY)
-#define MEMORY_OFFSET  0
-#else
-#define MEMORY_OFFSET  (CONFIG_MEMORY_START & 0x1fffffff)
-#endif
-
 ENTRY(_start)
 SECTIONS
 {
-       . = CONFIG_PAGE_OFFSET + MEMORY_OFFSET + CONFIG_ZERO_PAGE_OFFSET;
+       . = PAGE_OFFSET + MEMORY_OFFSET + CONFIG_ZERO_PAGE_OFFSET;
 
        _text = .;              /* Text and read-only data */
 
@@ -34,12 +30,7 @@ SECTIONS
        .text : AT(ADDR(.text) - LOAD_OFFSET) {
                HEAD_TEXT
                TEXT_TEXT
-
-#ifdef CONFIG_SUPERH64
-               *(.text64)
-               *(.text..SHmedia32)
-#endif
-
+               EXTRA_TEXT
                SCHED_TEXT
                LOCK_TEXT
                KPROBES_TEXT