Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[pandora-kernel.git] / arch / um / sys-i386 / ldt.c
1 /*
2  * Copyright (C) 2001, 2002 Jeff Dike (jdike@karaya.com)
3  * Licensed under the GPL
4  */
5
6 #include "linux/stddef.h"
7 #include "linux/config.h"
8 #include "linux/sched.h"
9 #include "linux/slab.h"
10 #include "linux/types.h"
11 #include "linux/errno.h"
12 #include "asm/uaccess.h"
13 #include "asm/smp.h"
14 #include "asm/ldt.h"
15 #include "asm/unistd.h"
16 #include "choose-mode.h"
17 #include "kern.h"
18 #include "mode_kern.h"
19 #include "os.h"
20
21 extern int modify_ldt(int func, void *ptr, unsigned long bytecount);
22
23 #ifdef CONFIG_MODE_TT
24
25 static long do_modify_ldt_tt(int func, void __user *ptr,
26                               unsigned long bytecount)
27 {
28         struct user_desc info;
29         int res = 0;
30         void *buf = NULL;
31         void *p = NULL; /* What we pass to host. */
32
33         switch(func){
34         case 1:
35         case 0x11: /* write_ldt */
36                 /* Do this check now to avoid overflows. */
37                 if (bytecount != sizeof(struct user_desc)) {
38                         res = -EINVAL;
39                         goto out;
40                 }
41
42                 if(copy_from_user(&info, ptr, sizeof(info))) {
43                         res = -EFAULT;
44                         goto out;
45                 }
46
47                 p = &info;
48                 break;
49         case 0:
50         case 2: /* read_ldt */
51
52                 /* The use of info avoids kmalloc on the write case, not on the
53                  * read one. */
54                 buf = kmalloc(bytecount, GFP_KERNEL);
55                 if (!buf) {
56                         res = -ENOMEM;
57                         goto out;
58                 }
59                 p = buf;
60                 break;
61         default:
62                 res = -ENOSYS;
63                 goto out;
64         }
65
66         res = modify_ldt(func, p, bytecount);
67         if(res < 0)
68                 goto out;
69
70         switch(func){
71         case 0:
72         case 2:
73                 /* Modify_ldt was for reading and returned the number of read
74                  * bytes.*/
75                 if(copy_to_user(ptr, p, res))
76                         res = -EFAULT;
77                 break;
78         }
79
80 out:
81         kfree(buf);
82         return res;
83 }
84
85 #endif
86
87 #ifdef CONFIG_MODE_SKAS
88
89 #include "skas.h"
90 #include "skas_ptrace.h"
91 #include "asm/mmu_context.h"
92 #include "proc_mm.h"
93
94 long write_ldt_entry(struct mm_id * mm_idp, int func, struct user_desc * desc,
95                      void **addr, int done)
96 {
97         long res;
98
99         if(proc_mm){
100                 /* This is a special handling for the case, that the mm to
101                  * modify isn't current->active_mm.
102                  * If this is called directly by modify_ldt,
103                  *     (current->active_mm->context.skas.u == mm_idp)
104                  * will be true. So no call to switch_mm_skas(mm_idp) is done.
105                  * If this is called in case of init_new_ldt or PTRACE_LDT,
106                  * mm_idp won't belong to current->active_mm, but child->mm.
107                  * So we need to switch child's mm into our userspace, then
108                  * later switch back.
109                  *
110                  * Note: I'm unsure: should interrupts be disabled here?
111                  */
112                 if(!current->active_mm || current->active_mm == &init_mm ||
113                    mm_idp != &current->active_mm->context.skas.id)
114                         switch_mm_skas(mm_idp);
115         }
116
117         if(ptrace_ldt) {
118                 struct ptrace_ldt ldt_op = (struct ptrace_ldt) {
119                         .func = func,
120                         .ptr = desc,
121                         .bytecount = sizeof(*desc)};
122                 u32 cpu;
123                 int pid;
124
125                 if(!proc_mm)
126                         pid = mm_idp->u.pid;
127                 else {
128                         cpu = get_cpu();
129                         pid = userspace_pid[cpu];
130                 }
131
132                 res = os_ptrace_ldt(pid, 0, (unsigned long) &ldt_op);
133
134                 if(proc_mm)
135                         put_cpu();
136         }
137         else {
138                 void *stub_addr;
139                 res = syscall_stub_data(mm_idp, (unsigned long *)desc,
140                                         (sizeof(*desc) + sizeof(long) - 1) &
141                                             ~(sizeof(long) - 1),
142                                         addr, &stub_addr);
143                 if(!res){
144                         unsigned long args[] = { func,
145                                                  (unsigned long)stub_addr,
146                                                  sizeof(*desc),
147                                                  0, 0, 0 };
148                         res = run_syscall_stub(mm_idp, __NR_modify_ldt, args,
149                                                0, addr, done);
150                 }
151         }
152
153         if(proc_mm){
154                 /* This is the second part of special handling, that makes
155                  * PTRACE_LDT possible to implement.
156                  */
157                 if(current->active_mm && current->active_mm != &init_mm &&
158                    mm_idp != &current->active_mm->context.skas.id)
159                         switch_mm_skas(&current->active_mm->context.skas.id);
160         }
161
162         return res;
163 }
164
165 static long read_ldt_from_host(void __user * ptr, unsigned long bytecount)
166 {
167         int res, n;
168         struct ptrace_ldt ptrace_ldt = (struct ptrace_ldt) {
169                         .func = 0,
170                         .bytecount = bytecount,
171                         .ptr = (void *)kmalloc(bytecount, GFP_KERNEL)};
172         u32 cpu;
173
174         if(ptrace_ldt.ptr == NULL)
175                 return -ENOMEM;
176
177         /* This is called from sys_modify_ldt only, so userspace_pid gives
178          * us the right number
179          */
180
181         cpu = get_cpu();
182         res = os_ptrace_ldt(userspace_pid[cpu], 0, (unsigned long) &ptrace_ldt);
183         put_cpu();
184         if(res < 0)
185                 goto out;
186
187         n = copy_to_user(ptr, ptrace_ldt.ptr, res);
188         if(n != 0)
189                 res = -EFAULT;
190
191   out:
192         kfree(ptrace_ldt.ptr);
193
194         return res;
195 }
196
197 /*
198  * In skas mode, we hold our own ldt data in UML.
199  * Thus, the code implementing sys_modify_ldt_skas
200  * is very similar to (and mostly stolen from) sys_modify_ldt
201  * for arch/i386/kernel/ldt.c
202  * The routines copied and modified in part are:
203  * - read_ldt
204  * - read_default_ldt
205  * - write_ldt
206  * - sys_modify_ldt_skas
207  */
208
209 static int read_ldt(void __user * ptr, unsigned long bytecount)
210 {
211         int i, err = 0;
212         unsigned long size;
213         uml_ldt_t * ldt = &current->mm->context.skas.ldt;
214
215         if(!ldt->entry_count)
216                 goto out;
217         if(bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
218                 bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
219         err = bytecount;
220
221         if(ptrace_ldt){
222                 return read_ldt_from_host(ptr, bytecount);
223         }
224
225         down(&ldt->semaphore);
226         if(ldt->entry_count <= LDT_DIRECT_ENTRIES){
227                 size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES;
228                 if(size > bytecount)
229                         size = bytecount;
230                 if(copy_to_user(ptr, ldt->u.entries, size))
231                         err = -EFAULT;
232                 bytecount -= size;
233                 ptr += size;
234         }
235         else {
236                 for(i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount;
237                          i++){
238                         size = PAGE_SIZE;
239                         if(size > bytecount)
240                                 size = bytecount;
241                         if(copy_to_user(ptr, ldt->u.pages[i], size)){
242                                 err = -EFAULT;
243                                 break;
244                         }
245                         bytecount -= size;
246                         ptr += size;
247                 }
248         }
249         up(&ldt->semaphore);
250
251         if(bytecount == 0 || err == -EFAULT)
252                 goto out;
253
254         if(clear_user(ptr, bytecount))
255                 err = -EFAULT;
256
257 out:
258         return err;
259 }
260
261 static int read_default_ldt(void __user * ptr, unsigned long bytecount)
262 {
263         int err;
264
265         if(bytecount > 5*LDT_ENTRY_SIZE)
266                 bytecount = 5*LDT_ENTRY_SIZE;
267
268         err = bytecount;
269         /* UML doesn't support lcall7 and lcall27.
270          * So, we don't really have a default ldt, but emulate
271          * an empty ldt of common host default ldt size.
272          */
273         if(clear_user(ptr, bytecount))
274                 err = -EFAULT;
275
276         return err;
277 }
278
279 static int write_ldt(void __user * ptr, unsigned long bytecount, int func)
280 {
281         uml_ldt_t * ldt = &current->mm->context.skas.ldt;
282         struct mm_id * mm_idp = &current->mm->context.skas.id;
283         int i, err;
284         struct user_desc ldt_info;
285         struct ldt_entry entry0, *ldt_p;
286         void *addr = NULL;
287
288         err = -EINVAL;
289         if(bytecount != sizeof(ldt_info))
290                 goto out;
291         err = -EFAULT;
292         if(copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
293                 goto out;
294
295         err = -EINVAL;
296         if(ldt_info.entry_number >= LDT_ENTRIES)
297                 goto out;
298         if(ldt_info.contents == 3){
299                 if (func == 1)
300                         goto out;
301                 if (ldt_info.seg_not_present == 0)
302                         goto out;
303         }
304
305         if(!ptrace_ldt)
306                 down(&ldt->semaphore);
307
308         err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1);
309         if(err)
310                 goto out_unlock;
311         else if(ptrace_ldt) {
312         /* With PTRACE_LDT available, this is used as a flag only */
313                 ldt->entry_count = 1;
314                 goto out;
315         }
316
317         if(ldt_info.entry_number >= ldt->entry_count &&
318            ldt_info.entry_number >= LDT_DIRECT_ENTRIES){
319                 for(i=ldt->entry_count/LDT_ENTRIES_PER_PAGE;
320                     i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number;
321                     i++){
322                         if(i == 0)
323                                 memcpy(&entry0, ldt->u.entries,
324                                        sizeof(entry0));
325                         ldt->u.pages[i] = (struct ldt_entry *)
326                                 __get_free_page(GFP_KERNEL|__GFP_ZERO);
327                         if(!ldt->u.pages[i]){
328                                 err = -ENOMEM;
329                                 /* Undo the change in host */
330                                 memset(&ldt_info, 0, sizeof(ldt_info));
331                                 write_ldt_entry(mm_idp, 1, &ldt_info, &addr, 1);
332                                 goto out_unlock;
333                         }
334                         if(i == 0) {
335                                 memcpy(ldt->u.pages[0], &entry0,
336                                        sizeof(entry0));
337                                 memcpy(ldt->u.pages[0]+1, ldt->u.entries+1,
338                                        sizeof(entry0)*(LDT_DIRECT_ENTRIES-1));
339                         }
340                         ldt->entry_count = (i + 1) * LDT_ENTRIES_PER_PAGE;
341                 }
342         }
343         if(ldt->entry_count <= ldt_info.entry_number)
344                 ldt->entry_count = ldt_info.entry_number + 1;
345
346         if(ldt->entry_count <= LDT_DIRECT_ENTRIES)
347                 ldt_p = ldt->u.entries + ldt_info.entry_number;
348         else
349                 ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] +
350                         ldt_info.entry_number%LDT_ENTRIES_PER_PAGE;
351
352         if(ldt_info.base_addr == 0 && ldt_info.limit == 0 &&
353            (func == 1 || LDT_empty(&ldt_info))){
354                 ldt_p->a = 0;
355                 ldt_p->b = 0;
356         }
357         else{
358                 if (func == 1)
359                         ldt_info.useable = 0;
360                 ldt_p->a = LDT_entry_a(&ldt_info);
361                 ldt_p->b = LDT_entry_b(&ldt_info);
362         }
363         err = 0;
364
365 out_unlock:
366         up(&ldt->semaphore);
367 out:
368         return err;
369 }
370
371 static long do_modify_ldt_skas(int func, void __user *ptr,
372                                unsigned long bytecount)
373 {
374         int ret = -ENOSYS;
375
376         switch (func) {
377                 case 0:
378                         ret = read_ldt(ptr, bytecount);
379                         break;
380                 case 1:
381                 case 0x11:
382                         ret = write_ldt(ptr, bytecount, func);
383                         break;
384                 case 2:
385                         ret = read_default_ldt(ptr, bytecount);
386                         break;
387         }
388         return ret;
389 }
390
391 short dummy_list[9] = {0, -1};
392 short * host_ldt_entries = NULL;
393
394 void ldt_get_host_info(void)
395 {
396         long ret;
397         struct ldt_entry * ldt;
398         int i, size, k, order;
399
400         host_ldt_entries = dummy_list+1;
401
402         for(i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++);
403
404         ldt = (struct ldt_entry *)
405               __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
406         if(ldt == NULL) {
407                 printk("ldt_get_host_info: couldn't allocate buffer for host ldt\n");
408                 return;
409         }
410
411         ret = modify_ldt(0, ldt, (1<<order)*PAGE_SIZE);
412         if(ret < 0) {
413                 printk("ldt_get_host_info: couldn't read host ldt\n");
414                 goto out_free;
415         }
416         if(ret == 0) {
417                 /* default_ldt is active, simply write an empty entry 0 */
418                 host_ldt_entries = dummy_list;
419                 goto out_free;
420         }
421
422         for(i=0, size=0; i<ret/LDT_ENTRY_SIZE; i++){
423                 if(ldt[i].a != 0 || ldt[i].b != 0)
424                         size++;
425         }
426
427         if(size < ARRAY_SIZE(dummy_list))
428                 host_ldt_entries = dummy_list;
429         else {
430                 size = (size + 1) * sizeof(dummy_list[0]);
431                 host_ldt_entries = (short *)kmalloc(size, GFP_KERNEL);
432                 if(host_ldt_entries == NULL) {
433                         printk("ldt_get_host_info: couldn't allocate host ldt list\n");
434                         goto out_free;
435                 }
436         }
437
438         for(i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++){
439                 if(ldt[i].a != 0 || ldt[i].b != 0) {
440                         host_ldt_entries[k++] = i;
441                 }
442         }
443         host_ldt_entries[k] = -1;
444
445 out_free:
446         free_pages((unsigned long)ldt, order);
447 }
448
449 long init_new_ldt(struct mmu_context_skas * new_mm,
450                   struct mmu_context_skas * from_mm)
451 {
452         struct user_desc desc;
453         short * num_p;
454         int i;
455         long page, err=0;
456         void *addr = NULL;
457         struct proc_mm_op copy;
458
459
460         if(!ptrace_ldt)
461                 init_MUTEX(&new_mm->ldt.semaphore);
462
463         if(!from_mm){
464                 memset(&desc, 0, sizeof(desc));
465                 /*
466                  * We have to initialize a clean ldt.
467                  */
468                 if(proc_mm) {
469                         /*
470                          * If the new mm was created using proc_mm, host's
471                          * default-ldt currently is assigned, which normally
472                          * contains the call-gates for lcall7 and lcall27.
473                          * To remove these gates, we simply write an empty
474                          * entry as number 0 to the host.
475                          */
476                         err = write_ldt_entry(&new_mm->id, 1, &desc,
477                                               &addr, 1);
478                 }
479                 else{
480                         /*
481                          * Now we try to retrieve info about the ldt, we
482                          * inherited from the host. All ldt-entries found
483                          * will be reset in the following loop
484                          */
485                         if(host_ldt_entries == NULL)
486                                 ldt_get_host_info();
487                         for(num_p=host_ldt_entries; *num_p != -1; num_p++){
488                                 desc.entry_number = *num_p;
489                                 err = write_ldt_entry(&new_mm->id, 1, &desc,
490                                                       &addr, *(num_p + 1) == -1);
491                                 if(err)
492                                         break;
493                         }
494                 }
495                 new_mm->ldt.entry_count = 0;
496
497                 goto out;
498         }
499
500         if(proc_mm){
501                 /* We have a valid from_mm, so we now have to copy the LDT of
502                  * from_mm to new_mm, because using proc_mm an new mm with
503                  * an empty/default LDT was created in new_mm()
504                  */
505                 copy = ((struct proc_mm_op) { .op       = MM_COPY_SEGMENTS,
506                                               .u        =
507                                               { .copy_segments =
508                                                         from_mm->id.u.mm_fd } } );
509                 i = os_write_file(new_mm->id.u.mm_fd, &copy, sizeof(copy));
510                 if(i != sizeof(copy))
511                         printk("new_mm : /proc/mm copy_segments failed, "
512                                "err = %d\n", -i);
513         }
514
515         if(!ptrace_ldt) {
516                 /* Our local LDT is used to supply the data for
517                  * modify_ldt(READLDT), if PTRACE_LDT isn't available,
518                  * i.e., we have to use the stub for modify_ldt, which
519                  * can't handle the big read buffer of up to 64kB.
520                  */
521                 down(&from_mm->ldt.semaphore);
522                 if(from_mm->ldt.entry_count <= LDT_DIRECT_ENTRIES){
523                         memcpy(new_mm->ldt.u.entries, from_mm->ldt.u.entries,
524                                sizeof(new_mm->ldt.u.entries));
525                 }
526                 else{
527                         i = from_mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE;
528                         while(i-->0){
529                                 page = __get_free_page(GFP_KERNEL|__GFP_ZERO);
530                                 if (!page){
531                                         err = -ENOMEM;
532                                         break;
533                                 }
534                                 new_mm->ldt.u.pages[i] =
535                                         (struct ldt_entry *) page;
536                                 memcpy(new_mm->ldt.u.pages[i],
537                                        from_mm->ldt.u.pages[i], PAGE_SIZE);
538                         }
539                 }
540                 new_mm->ldt.entry_count = from_mm->ldt.entry_count;
541                 up(&from_mm->ldt.semaphore);
542         }
543
544     out:
545         return err;
546 }
547
548
549 void free_ldt(struct mmu_context_skas * mm)
550 {
551         int i;
552
553         if(!ptrace_ldt && mm->ldt.entry_count > LDT_DIRECT_ENTRIES){
554                 i = mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE;
555                 while(i-- > 0){
556                         free_page((long )mm->ldt.u.pages[i]);
557                 }
558         }
559         mm->ldt.entry_count = 0;
560 }
561 #endif
562
563 int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
564 {
565         return(CHOOSE_MODE_PROC(do_modify_ldt_tt, do_modify_ldt_skas, func,
566                                 ptr, bytecount));
567 }