2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2
8 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/init.h>
11 #include <linux/random.h>
12 #include <linux/elf.h>
13 #include <asm/vsyscall.h>
14 #include <asm/vgtod.h>
15 #include <asm/proto.h>
19 unsigned int __read_mostly vdso_enabled = 1;
21 extern char vdso_start[], vdso_end[];
22 extern unsigned short vdso_sync_cpuid;
24 extern struct page *vdso_pages[];
25 static unsigned vdso_size;
27 static void __init patch_vdso(void *vdso, size_t len)
29 Elf64_Ehdr *hdr = vdso;
30 Elf64_Shdr *sechdrs, *alt_sec = 0;
35 BUG_ON(len < sizeof(Elf64_Ehdr));
36 BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
38 sechdrs = (void *)hdr + hdr->e_shoff;
39 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
41 for (i = 1; i < hdr->e_shnum; i++) {
42 Elf64_Shdr *shdr = &sechdrs[i];
43 if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
49 /* If we get here, it's probably a bug. */
50 pr_warning("patch_vdso: .altinstructions not found\n");
51 return; /* nothing to patch */
54 alt_data = (void *)hdr + alt_sec->sh_offset;
55 apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
58 static int __init init_vdso(void)
60 int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
63 patch_vdso(vdso_start, vdso_end - vdso_start);
65 vdso_size = npages << PAGE_SHIFT;
66 for (i = 0; i < npages; i++)
67 vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE);
71 subsys_initcall(init_vdso);
76 * Put the vdso above the (randomized) stack with another randomized
77 * offset. This way there is no hole in the middle of address space.
78 * To save memory make sure it is still in the same PTE as the stack
79 * top. This doesn't give that many random bits.
81 * Note that this algorithm is imperfect: the distribution of the vdso
82 * start address within a PMD is biased toward the end.
84 static unsigned long vdso_addr(unsigned long start, unsigned len)
86 unsigned long addr, end;
90 * Round up the start address. It can start out unaligned as a result
91 * of stack start randomization.
93 start = PAGE_ALIGN(start);
95 /* Round the lowest possible end address up to a PMD boundary. */
96 end = (start + len + PMD_SIZE - 1) & PMD_MASK;
97 if (end >= TASK_SIZE_MAX)
102 offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
103 addr = start + (offset << PAGE_SHIFT);
109 * Forcibly align the final address in case we have a hardware
110 * issue that requires alignment for performance reasons.
112 addr = align_addr(addr, NULL, ALIGN_VDSO);
117 /* Setup a VMA at program startup for the vsyscall page.
118 Not called for compat tasks */
119 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
121 struct mm_struct *mm = current->mm;
128 down_write(&mm->mmap_sem);
129 addr = vdso_addr(mm->start_stack, vdso_size);
130 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
131 if (IS_ERR_VALUE(addr)) {
136 current->mm->context.vdso = (void *)addr;
138 ret = install_special_mapping(mm, addr, vdso_size,
140 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
144 current->mm->context.vdso = NULL;
149 up_write(&mm->mmap_sem);
153 static __init int vdso_setup(char *s)
155 vdso_enabled = simple_strtoul(s, NULL, 0);
158 __setup("vdso=", vdso_setup);