mm: larger stack guard gap, between vmas
[pandora-kernel.git] / arch / parisc / kernel / sys_parisc.c
1
2 /*
3  *    PARISC specific syscalls
4  *
5  *    Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
6  *    Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
7  *    Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
8  *
9  *
10  *    This program is free software; you can redistribute it and/or modify
11  *    it under the terms of the GNU General Public License as published by
12  *    the Free Software Foundation; either version 2 of the License, or
13  *    (at your option) any later version.
14  *
15  *    This program is distributed in the hope that it will be useful,
16  *    but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  *    GNU General Public License for more details.
19  *
20  *    You should have received a copy of the GNU General Public License
21  *    along with this program; if not, write to the Free Software
22  *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
23  */
24
25 #include <asm/uaccess.h>
26 #include <linux/file.h>
27 #include <linux/fs.h>
28 #include <linux/linkage.h>
29 #include <linux/mm.h>
30 #include <linux/mman.h>
31 #include <linux/shm.h>
32 #include <linux/syscalls.h>
33 #include <linux/utsname.h>
34 #include <linux/personality.h>
35
36 static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
37 {
38         struct vm_area_struct *vma, *prev;
39         unsigned long prev_end;
40
41         addr = PAGE_ALIGN(addr);
42
43         for (vma = find_vma_prev(current->mm, addr, &prev); ; prev = vma,
44                                                         vma = vma->vm_next) {
45                 if (prev) {
46                         prev_end = vm_end_gap(prev);
47                         if (addr < prev_end) {
48                                 addr = prev_end;
49                                 /* If vma already violates gap, forget it */
50                                 if (vma && addr > vma->vm_start)
51                                         addr = vma->vm_start;
52                         }
53                 }
54                 /* At this point:  (!vma || addr < vma->vm_end). */
55                 if (TASK_SIZE - len < addr)
56                         return -ENOMEM;
57                 if (!vma || addr + len <= vm_start_gap(vma))
58                         return addr;
59         }
60 }
61
62 #define DCACHE_ALIGN(addr) (((addr) + (SHMLBA - 1)) &~ (SHMLBA - 1))
63
64 /*
65  * We need to know the offset to use.  Old scheme was to look for
66  * existing mapping and use the same offset.  New scheme is to use the
67  * address of the kernel data structure as the seed for the offset.
68  * We'll see how that works...
69  *
70  * The mapping is cacheline aligned, so there's no information in the bottom
71  * few bits of the address.  We're looking for 10 bits (4MB / 4k), so let's
72  * drop the bottom 8 bits and use bits 8-17.  
73  */
74 static int get_offset(struct address_space *mapping)
75 {
76         int offset = (unsigned long) mapping << (PAGE_SHIFT - 8);
77         return offset & 0x3FF000;
78 }
79
80 static unsigned long get_shared_area(struct address_space *mapping,
81                 unsigned long addr, unsigned long len, unsigned long pgoff)
82 {
83         struct vm_area_struct *vma, *prev;
84         unsigned long prev_end;
85         int offset = mapping ? get_offset(mapping) : 0;
86
87         offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000;
88
89         addr = DCACHE_ALIGN(addr - offset) + offset;
90
91         for (vma = find_vma_prev(current->mm, addr, &prev); ; prev = vma,
92                                                         vma = vma->vm_next) {
93                 if (prev) {
94                         prev_end = vm_end_gap(prev);
95                         if (addr < prev_end) {
96                                 addr = DCACHE_ALIGN(prev_end - offset) + offset;
97                                 if (addr < prev_end)    /* handle wraparound */
98                                         return -ENOMEM;
99                                 /* If vma already violates gap, forget it */
100                                 if (vma && addr > vma->vm_start)
101                                         addr = vma->vm_start;
102                         }
103                 }
104                 /* At this point:  (!vma || addr < vma->vm_end). */
105                 if (TASK_SIZE - len < addr)
106                         return -ENOMEM;
107                 if (!vma || addr + len <= vm_start_gap(vma))
108                         return addr;
109         }
110 }
111
112 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
113                 unsigned long len, unsigned long pgoff, unsigned long flags)
114 {
115         if (len > TASK_SIZE)
116                 return -ENOMEM;
117         /* Might want to check for cache aliasing issues for MAP_FIXED case
118          * like ARM or MIPS ??? --BenH.
119          */
120         if (flags & MAP_FIXED)
121                 return addr;
122         if (!addr)
123                 addr = TASK_UNMAPPED_BASE;
124
125         if (filp) {
126                 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
127         } else if(flags & MAP_SHARED) {
128                 addr = get_shared_area(NULL, addr, len, pgoff);
129         } else {
130                 addr = get_unshared_area(addr, len);
131         }
132         return addr;
133 }
134
135 asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
136         unsigned long prot, unsigned long flags, unsigned long fd,
137         unsigned long pgoff)
138 {
139         /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
140            we have. */
141         return sys_mmap_pgoff(addr, len, prot, flags, fd,
142                               pgoff >> (PAGE_SHIFT - 12));
143 }
144
145 asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
146                 unsigned long prot, unsigned long flags, unsigned long fd,
147                 unsigned long offset)
148 {
149         if (!(offset & ~PAGE_MASK)) {
150                 return sys_mmap_pgoff(addr, len, prot, flags, fd,
151                                         offset >> PAGE_SHIFT);
152         } else {
153                 return -EINVAL;
154         }
155 }
156
157 /* Fucking broken ABI */
158
159 #ifdef CONFIG_64BIT
160 asmlinkage long parisc_truncate64(const char __user * path,
161                                         unsigned int high, unsigned int low)
162 {
163         return sys_truncate(path, (long)high << 32 | low);
164 }
165
166 asmlinkage long parisc_ftruncate64(unsigned int fd,
167                                         unsigned int high, unsigned int low)
168 {
169         return sys_ftruncate(fd, (long)high << 32 | low);
170 }
171
172 /* stubs for the benefit of the syscall_table since truncate64 and truncate 
173  * are identical on LP64 */
174 asmlinkage long sys_truncate64(const char __user * path, unsigned long length)
175 {
176         return sys_truncate(path, length);
177 }
178 asmlinkage long sys_ftruncate64(unsigned int fd, unsigned long length)
179 {
180         return sys_ftruncate(fd, length);
181 }
182 asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg)
183 {
184         return sys_fcntl(fd, cmd, arg);
185 }
186 #else
187
188 asmlinkage long parisc_truncate64(const char __user * path,
189                                         unsigned int high, unsigned int low)
190 {
191         return sys_truncate64(path, (loff_t)high << 32 | low);
192 }
193
194 asmlinkage long parisc_ftruncate64(unsigned int fd,
195                                         unsigned int high, unsigned int low)
196 {
197         return sys_ftruncate64(fd, (loff_t)high << 32 | low);
198 }
199 #endif
200
201 asmlinkage ssize_t parisc_pread64(unsigned int fd, char __user *buf, size_t count,
202                                         unsigned int high, unsigned int low)
203 {
204         return sys_pread64(fd, buf, count, (loff_t)high << 32 | low);
205 }
206
207 asmlinkage ssize_t parisc_pwrite64(unsigned int fd, const char __user *buf,
208                         size_t count, unsigned int high, unsigned int low)
209 {
210         return sys_pwrite64(fd, buf, count, (loff_t)high << 32 | low);
211 }
212
213 asmlinkage ssize_t parisc_readahead(int fd, unsigned int high, unsigned int low,
214                                     size_t count)
215 {
216         return sys_readahead(fd, (loff_t)high << 32 | low, count);
217 }
218
219 asmlinkage long parisc_fadvise64_64(int fd,
220                         unsigned int high_off, unsigned int low_off,
221                         unsigned int high_len, unsigned int low_len, int advice)
222 {
223         return sys_fadvise64_64(fd, (loff_t)high_off << 32 | low_off,
224                         (loff_t)high_len << 32 | low_len, advice);
225 }
226
227 asmlinkage long parisc_sync_file_range(int fd,
228                         u32 hi_off, u32 lo_off, u32 hi_nbytes, u32 lo_nbytes,
229                         unsigned int flags)
230 {
231         return sys_sync_file_range(fd, (loff_t)hi_off << 32 | lo_off,
232                         (loff_t)hi_nbytes << 32 | lo_nbytes, flags);
233 }
234
235 asmlinkage unsigned long sys_alloc_hugepages(int key, unsigned long addr, unsigned long len, int prot, int flag)
236 {
237         return -ENOMEM;
238 }
239
240 asmlinkage int sys_free_hugepages(unsigned long addr)
241 {
242         return -EINVAL;
243 }
244
245 long parisc_personality(unsigned long personality)
246 {
247         long err;
248
249         if (personality(current->personality) == PER_LINUX32
250             && personality == PER_LINUX)
251                 personality = PER_LINUX32;
252
253         err = sys_personality(personality);
254         if (err == PER_LINUX32)
255                 err = PER_LINUX;
256
257         return err;
258 }