2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2
8 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/init.h>
11 #include <linux/random.h>
12 #include <linux/elf.h>
13 #include <asm/vsyscall.h>
14 #include <asm/vgtod.h>
15 #include <asm/proto.h>
19 #if defined(CONFIG_X86_64)
20 unsigned int __read_mostly vdso_enabled = 1;
22 extern char vdso_start[], vdso_end[];
23 extern unsigned short vdso_sync_cpuid;
25 extern struct page *vdso_pages[];
26 static unsigned vdso_size;
28 #ifdef CONFIG_X86_X32_ABI
29 extern char vdsox32_start[], vdsox32_end[];
30 extern struct page *vdsox32_pages[];
31 static unsigned vdsox32_size;
35 #if defined(CONFIG_X86_32) || defined(CONFIG_X86_X32_ABI) || \
36 defined(CONFIG_COMPAT)
37 void __init patch_vdso32(void *vdso, size_t len)
39 Elf32_Ehdr *hdr = vdso;
40 Elf32_Shdr *sechdrs, *alt_sec = 0;
45 BUG_ON(len < sizeof(Elf32_Ehdr));
46 BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
48 sechdrs = (void *)hdr + hdr->e_shoff;
49 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
51 for (i = 1; i < hdr->e_shnum; i++) {
52 Elf32_Shdr *shdr = &sechdrs[i];
53 if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
59 /* If we get here, it's probably a bug. */
60 pr_warning("patch_vdso32: .altinstructions not found\n");
61 return; /* nothing to patch */
64 alt_data = (void *)hdr + alt_sec->sh_offset;
65 apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
69 #if defined(CONFIG_X86_64)
70 static void __init patch_vdso64(void *vdso, size_t len)
72 Elf64_Ehdr *hdr = vdso;
73 Elf64_Shdr *sechdrs, *alt_sec = 0;
78 BUG_ON(len < sizeof(Elf64_Ehdr));
79 BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
81 sechdrs = (void *)hdr + hdr->e_shoff;
82 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
84 for (i = 1; i < hdr->e_shnum; i++) {
85 Elf64_Shdr *shdr = &sechdrs[i];
86 if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
92 /* If we get here, it's probably a bug. */
93 pr_warning("patch_vdso64: .altinstructions not found\n");
94 return; /* nothing to patch */
97 alt_data = (void *)hdr + alt_sec->sh_offset;
98 apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
101 static int __init init_vdso(void)
103 int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
106 patch_vdso64(vdso_start, vdso_end - vdso_start);
108 vdso_size = npages << PAGE_SHIFT;
109 for (i = 0; i < npages; i++)
110 vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE);
112 #ifdef CONFIG_X86_X32_ABI
113 patch_vdso32(vdsox32_start, vdsox32_end - vdsox32_start);
114 npages = (vdsox32_end - vdsox32_start + PAGE_SIZE - 1) / PAGE_SIZE;
115 vdsox32_size = npages << PAGE_SHIFT;
116 for (i = 0; i < npages; i++)
117 vdsox32_pages[i] = virt_to_page(vdsox32_start + i*PAGE_SIZE);
122 subsys_initcall(init_vdso);
126 /* Put the vdso above the (randomized) stack with another randomized offset.
127 This way there is no hole in the middle of address space.
128 To save memory make sure it is still in the same PTE as the stack top.
129 This doesn't give that many random bits */
130 static unsigned long vdso_addr(unsigned long start, unsigned len)
132 unsigned long addr, end;
134 end = (start + PMD_SIZE - 1) & PMD_MASK;
135 if (end >= TASK_SIZE_MAX)
138 /* This loses some more bits than a modulo, but is cheaper */
139 offset = get_random_int() & (PTRS_PER_PTE - 1);
140 addr = start + (offset << PAGE_SHIFT);
145 * page-align it here so that get_unmapped_area doesn't
146 * align it wrongfully again to the next page. addr can come in 4K
147 * unaligned here as a result of stack start randomization.
149 addr = PAGE_ALIGN(addr);
150 addr = align_vdso_addr(addr);
155 /* Setup a VMA at program startup for the vsyscall page.
156 Not called for compat tasks */
157 static int setup_additional_pages(struct linux_binprm *bprm,
162 struct mm_struct *mm = current->mm;
169 down_write(&mm->mmap_sem);
170 addr = vdso_addr(mm->start_stack, size);
171 addr = get_unmapped_area(NULL, addr, size, 0, 0);
172 if (IS_ERR_VALUE(addr)) {
177 current->mm->context.vdso = (void *)addr;
179 ret = install_special_mapping(mm, addr, size,
181 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
184 current->mm->context.vdso = NULL;
189 up_write(&mm->mmap_sem);
193 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
195 return setup_additional_pages(bprm, uses_interp, vdso_pages,
199 #ifdef CONFIG_X86_X32_ABI
200 int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
202 return setup_additional_pages(bprm, uses_interp, vdsox32_pages,
207 static __init int vdso_setup(char *s)
209 vdso_enabled = simple_strtoul(s, NULL, 0);
212 __setup("vdso=", vdso_setup);