2 * ld script for the x86 kernel
4 * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
6 * Modernisation, unification and other changes and fixes:
7 * Copyright (C) 2007-2009 Sam Ravnborg <sam@ravnborg.org>
10 * Don't define absolute symbols until and unless you know that symbol
11 * value is should remain constant even if kernel image is relocated
12 * at run time. Absolute symbols are not relocated. If symbol value should
13 * change if kernel is relocated, make the symbol section relative and
14 * put it inside the section definition.
18 #define LOAD_OFFSET __PAGE_OFFSET
20 #define LOAD_OFFSET __START_KERNEL_map
23 #include <asm-generic/vmlinux.lds.h>
24 #include <asm/asm-offsets.h>
25 #include <asm/thread_info.h>
26 #include <asm/page_types.h>
27 #include <asm/cache.h>
30 #undef i386 /* in case the preprocessor is a 32bit one */
32 OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
36 ENTRY(phys_startup_32)
38 OUTPUT_ARCH(i386:x86-64)
39 ENTRY(phys_startup_64)
42 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
44 * On 64-bit, align RODATA to 2MB so that even with CONFIG_DEBUG_RODATA
45 * we retain large page mappings for boundaries spanning kernel text, rodata
48 * However, kernel identity mappings will have different RWX permissions
49 * to the pages mapping to text and to the pages padding (which are freed) the
50 * text section. Hence kernel identity mappings will be broken to smaller
51 * pages. For 64-bit, kernel text and kernel identity mappings are different,
52 * so we can enable protection checks that come with CONFIG_DEBUG_RODATA,
53 * as well as retain 2MB large page mappings for kernel text.
55 #define X64_ALIGN_DEBUG_RODATA_BEGIN . = ALIGN(HPAGE_SIZE);
57 #define X64_ALIGN_DEBUG_RODATA_END \
58 . = ALIGN(HPAGE_SIZE); \
59 __end_rodata_hpage_align = .;
63 #define X64_ALIGN_DEBUG_RODATA_BEGIN
64 #define X64_ALIGN_DEBUG_RODATA_END
69 text PT_LOAD FLAGS(5); /* R_E */
70 data PT_LOAD FLAGS(6); /* RW_ */
72 user PT_LOAD FLAGS(5); /* R_E */
74 percpu PT_LOAD FLAGS(6); /* RW_ */
76 init PT_LOAD FLAGS(7); /* RWE */
78 note PT_NOTE FLAGS(0); /* ___ */
84 . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
85 phys_startup_32 = startup_32 - LOAD_OFFSET;
88 phys_startup_64 = startup_64 - LOAD_OFFSET;
91 /* Text and read-only data */
92 .text : AT(ADDR(.text) - LOAD_OFFSET) {
94 /* bootstrapping code */
98 *(.text..page_aligned)
109 /* End of text section */
115 EXCEPTION_TABLE(16) :text = 0x9090
117 #if defined(CONFIG_DEBUG_RODATA)
118 /* .text should occupy whole number of pages */
119 . = ALIGN(PAGE_SIZE);
121 X64_ALIGN_DEBUG_RODATA_BEGIN
123 X64_ALIGN_DEBUG_RODATA_END
126 .data : AT(ADDR(.data) - LOAD_OFFSET) {
127 /* Start of data section */
131 INIT_TASK_DATA(THREAD_SIZE)
134 /* 32 bit has nosave before _edata */
138 PAGE_ALIGNED_DATA(PAGE_SIZE)
140 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
144 * Workaround a binutils (2.20.51.0.12 to 2.21.51.0.3) bug.
145 * This makes jiffies relocatable in such binutils
148 jiffies = jiffies_64;
150 jiffies_64 = jiffies;
154 /* rarely changed data like cpu maps */
155 READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES)
157 /* End of data section */
163 #define VSYSCALL_ADDR (-10*1024*1024)
165 #define VLOAD_OFFSET (VSYSCALL_ADDR - __vsyscall_0 + LOAD_OFFSET)
166 #define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
168 #define VVIRT_OFFSET (VSYSCALL_ADDR - __vsyscall_0)
169 #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
175 .vsyscall_0 : AT(VLOAD(.vsyscall_0)) {
179 . = ALIGN(L1_CACHE_BYTES);
180 .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) {
184 . = ALIGN(L1_CACHE_BYTES);
185 .vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data)) {
186 *(.vsyscall_gtod_data)
189 vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data);
190 .vsyscall_clock : AT(VLOAD(.vsyscall_clock)) {
193 vsyscall_clock = VVIRT(.vsyscall_clock);
196 .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) {
199 .vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2)) {
203 .vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) {
206 vgetcpu_mode = VVIRT(.vgetcpu_mode);
208 . = ALIGN(L1_CACHE_BYTES);
209 .jiffies : AT(VLOAD(.jiffies)) {
212 jiffies = VVIRT(.jiffies);
214 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
218 . = __vsyscall_0 + PAGE_SIZE;
226 #endif /* CONFIG_X86_64 */
228 /* Init code and data - will be freed after init */
229 . = ALIGN(PAGE_SIZE);
230 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
231 __init_begin = .; /* paired with __init_end */
234 #if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
236 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
237 * output PHDR, so the next output section - .init.text - should
238 * start another segment - init.
240 PERCPU_VADDR(0, :percpu)
243 INIT_TEXT_SECTION(PAGE_SIZE)
248 INIT_DATA_SECTION(16)
250 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
251 __x86_cpu_dev_start = .;
253 __x86_cpu_dev_end = .;
257 * start address and size of operations which during runtime
258 * can be patched with virtualization friendly instructions or
259 * baremetal native ones. Think page table operations.
260 * Details in paravirt_types.h
263 .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
264 __parainstructions = .;
266 __parainstructions_end = .;
270 * struct alt_inst entries. From the header (alternative.h):
271 * "Alternative instructions for different CPU types or capabilities"
272 * Think locking instructions on spinlocks.
275 .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
276 __alt_instructions = .;
278 __alt_instructions_end = .;
282 * And here are the replacement instructions. The linker sticks
283 * them as binary blobs. The .altinstructions has enough data to
284 * get the address and the length of them to patch the kernel safely.
286 .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
287 *(.altinstr_replacement)
291 * struct iommu_table_entry entries are injected in this section.
292 * It is an array of IOMMUs which during run time gets sorted depending
293 * on its dependency order. After rootfs_initcall is complete
294 * this section can be safely removed.
296 .iommu_table : AT(ADDR(.iommu_table) - LOAD_OFFSET) {
299 __iommu_table_end = .;
303 * .exit.text is discard at runtime, not link time, to deal with
304 * references from .altinstructions and .eh_frame
306 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
310 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
314 #if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
318 . = ALIGN(PAGE_SIZE);
320 /* freed after init ends here */
321 .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) {
326 * smp_locks might be freed after init
327 * start/end must be page aligned
329 . = ALIGN(PAGE_SIZE);
330 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
333 . = ALIGN(PAGE_SIZE);
338 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
344 . = ALIGN(PAGE_SIZE);
345 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
347 *(.bss..page_aligned)
349 . = ALIGN(PAGE_SIZE);
353 . = ALIGN(PAGE_SIZE);
354 .brk : AT(ADDR(.brk) - LOAD_OFFSET) {
356 . += 64 * 1024; /* 64k alignment slop space */
357 *(.brk_reservation) /* areas brk users have reserved */
366 /* Sections to be discarded */
368 /DISCARD/ : { *(.eh_frame) }
374 * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
376 . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
377 "kernel image bigger than KERNEL_IMAGE_SIZE");
380 * Per-cpu symbols which need to be offset from __per_cpu_load
381 * for the boot processor.
383 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
384 INIT_PER_CPU(gdt_page);
385 INIT_PER_CPU(irq_stack_union);
388 * Build-time check on the image size:
390 . = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
391 "kernel image bigger than KERNEL_IMAGE_SIZE");
394 . = ASSERT((irq_stack_union == 0),
395 "irq_stack_union is not at start of per-cpu area");
398 #endif /* CONFIG_X86_32 */
401 #include <asm/kexec.h>
403 . = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
404 "kexec control code size is too big");