Add generic sys_old_select()
[pandora-kernel.git] / arch / m68k / kernel / sys_m68k.c
1 /*
2  * linux/arch/m68k/kernel/sys_m68k.c
3  *
4  * This file contains various random system calls that
5  * have a non-standard calling sequence on the Linux/m68k
6  * platform.
7  */
8
9 #include <linux/capability.h>
10 #include <linux/errno.h>
11 #include <linux/sched.h>
12 #include <linux/mm.h>
13 #include <linux/fs.h>
14 #include <linux/smp.h>
15 #include <linux/smp_lock.h>
16 #include <linux/sem.h>
17 #include <linux/msg.h>
18 #include <linux/shm.h>
19 #include <linux/stat.h>
20 #include <linux/syscalls.h>
21 #include <linux/mman.h>
22 #include <linux/file.h>
23 #include <linux/ipc.h>
24
25 #include <asm/setup.h>
26 #include <asm/uaccess.h>
27 #include <asm/cachectl.h>
28 #include <asm/traps.h>
29 #include <asm/page.h>
30 #include <asm/unistd.h>
31 #include <linux/elf.h>
32 #include <asm/tlb.h>
33
34 asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
35                              unsigned long error_code);
36
37 asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
38         unsigned long prot, unsigned long flags,
39         unsigned long fd, unsigned long pgoff)
40 {
41         /*
42          * This is wrong for sun3 - there PAGE_SIZE is 8Kb,
43          * so we need to shift the argument down by 1; m68k mmap64(3)
44          * (in libc) expects the last argument of mmap2 in 4Kb units.
45          */
46         return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
47 }
48
49 /*
50  * Perform the select(nd, in, out, ex, tv) and mmap() system
51  * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
52  * handle more than 4 system call parameters, so these system calls
53  * used a memory block for parameter passing..
54  */
55
56 struct mmap_arg_struct {
57         unsigned long addr;
58         unsigned long len;
59         unsigned long prot;
60         unsigned long flags;
61         unsigned long fd;
62         unsigned long offset;
63 };
64
65 asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
66 {
67         struct mmap_arg_struct a;
68         int error = -EFAULT;
69
70         if (copy_from_user(&a, arg, sizeof(a)))
71                 goto out;
72
73         error = -EINVAL;
74         if (a.offset & ~PAGE_MASK)
75                 goto out;
76
77         error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
78                                a.offset >> PAGE_SHIFT);
79 out:
80         return error;
81 }
82
83 /*
84  * sys_ipc() is the de-multiplexer for the SysV IPC calls..
85  *
86  * This is really horribly ugly.
87  */
88 asmlinkage int sys_ipc (uint call, int first, int second,
89                         int third, void __user *ptr, long fifth)
90 {
91         int version, ret;
92
93         version = call >> 16; /* hack for backward compatibility */
94         call &= 0xffff;
95
96         if (call <= SEMCTL)
97                 switch (call) {
98                 case SEMOP:
99                         return sys_semop (first, ptr, second);
100                 case SEMGET:
101                         return sys_semget (first, second, third);
102                 case SEMCTL: {
103                         union semun fourth;
104                         if (!ptr)
105                                 return -EINVAL;
106                         if (get_user(fourth.__pad, (void __user *__user *) ptr))
107                                 return -EFAULT;
108                         return sys_semctl (first, second, third, fourth);
109                         }
110                 default:
111                         return -ENOSYS;
112                 }
113         if (call <= MSGCTL)
114                 switch (call) {
115                 case MSGSND:
116                         return sys_msgsnd (first, ptr, second, third);
117                 case MSGRCV:
118                         switch (version) {
119                         case 0: {
120                                 struct ipc_kludge tmp;
121                                 if (!ptr)
122                                         return -EINVAL;
123                                 if (copy_from_user (&tmp, ptr, sizeof (tmp)))
124                                         return -EFAULT;
125                                 return sys_msgrcv (first, tmp.msgp, second,
126                                                    tmp.msgtyp, third);
127                                 }
128                         default:
129                                 return sys_msgrcv (first, ptr,
130                                                    second, fifth, third);
131                         }
132                 case MSGGET:
133                         return sys_msgget ((key_t) first, second);
134                 case MSGCTL:
135                         return sys_msgctl (first, second, ptr);
136                 default:
137                         return -ENOSYS;
138                 }
139         if (call <= SHMCTL)
140                 switch (call) {
141                 case SHMAT:
142                         switch (version) {
143                         default: {
144                                 ulong raddr;
145                                 ret = do_shmat (first, ptr, second, &raddr);
146                                 if (ret)
147                                         return ret;
148                                 return put_user (raddr, (ulong __user *) third);
149                         }
150                         }
151                 case SHMDT:
152                         return sys_shmdt (ptr);
153                 case SHMGET:
154                         return sys_shmget (first, second, third);
155                 case SHMCTL:
156                         return sys_shmctl (first, second, ptr);
157                 default:
158                         return -ENOSYS;
159                 }
160
161         return -EINVAL;
162 }
163
164 /* Convert virtual (user) address VADDR to physical address PADDR */
165 #define virt_to_phys_040(vaddr)                                         \
166 ({                                                                      \
167   unsigned long _mmusr, _paddr;                                         \
168                                                                         \
169   __asm__ __volatile__ (".chip 68040\n\t"                               \
170                         "ptestr (%1)\n\t"                               \
171                         "movec %%mmusr,%0\n\t"                          \
172                         ".chip 68k"                                     \
173                         : "=r" (_mmusr)                                 \
174                         : "a" (vaddr));                                 \
175   _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0;             \
176   _paddr;                                                               \
177 })
178
179 static inline int
180 cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
181 {
182   unsigned long paddr, i;
183
184   switch (scope)
185     {
186     case FLUSH_SCOPE_ALL:
187       switch (cache)
188         {
189         case FLUSH_CACHE_DATA:
190           /* This nop is needed for some broken versions of the 68040.  */
191           __asm__ __volatile__ ("nop\n\t"
192                                 ".chip 68040\n\t"
193                                 "cpusha %dc\n\t"
194                                 ".chip 68k");
195           break;
196         case FLUSH_CACHE_INSN:
197           __asm__ __volatile__ ("nop\n\t"
198                                 ".chip 68040\n\t"
199                                 "cpusha %ic\n\t"
200                                 ".chip 68k");
201           break;
202         default:
203         case FLUSH_CACHE_BOTH:
204           __asm__ __volatile__ ("nop\n\t"
205                                 ".chip 68040\n\t"
206                                 "cpusha %bc\n\t"
207                                 ".chip 68k");
208           break;
209         }
210       break;
211
212     case FLUSH_SCOPE_LINE:
213       /* Find the physical address of the first mapped page in the
214          address range.  */
215       if ((paddr = virt_to_phys_040(addr))) {
216         paddr += addr & ~(PAGE_MASK | 15);
217         len = (len + (addr & 15) + 15) >> 4;
218       } else {
219         unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
220
221         if (len <= tmp)
222           return 0;
223         addr += tmp;
224         len -= tmp;
225         tmp = PAGE_SIZE;
226         for (;;)
227           {
228             if ((paddr = virt_to_phys_040(addr)))
229               break;
230             if (len <= tmp)
231               return 0;
232             addr += tmp;
233             len -= tmp;
234           }
235         len = (len + 15) >> 4;
236       }
237       i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
238       while (len--)
239         {
240           switch (cache)
241             {
242             case FLUSH_CACHE_DATA:
243               __asm__ __volatile__ ("nop\n\t"
244                                     ".chip 68040\n\t"
245                                     "cpushl %%dc,(%0)\n\t"
246                                     ".chip 68k"
247                                     : : "a" (paddr));
248               break;
249             case FLUSH_CACHE_INSN:
250               __asm__ __volatile__ ("nop\n\t"
251                                     ".chip 68040\n\t"
252                                     "cpushl %%ic,(%0)\n\t"
253                                     ".chip 68k"
254                                     : : "a" (paddr));
255               break;
256             default:
257             case FLUSH_CACHE_BOTH:
258               __asm__ __volatile__ ("nop\n\t"
259                                     ".chip 68040\n\t"
260                                     "cpushl %%bc,(%0)\n\t"
261                                     ".chip 68k"
262                                     : : "a" (paddr));
263               break;
264             }
265           if (!--i && len)
266             {
267               /*
268                * No need to page align here since it is done by
269                * virt_to_phys_040().
270                */
271               addr += PAGE_SIZE;
272               i = PAGE_SIZE / 16;
273               /* Recompute physical address when crossing a page
274                  boundary. */
275               for (;;)
276                 {
277                   if ((paddr = virt_to_phys_040(addr)))
278                     break;
279                   if (len <= i)
280                     return 0;
281                   len -= i;
282                   addr += PAGE_SIZE;
283                 }
284             }
285           else
286             paddr += 16;
287         }
288       break;
289
290     default:
291     case FLUSH_SCOPE_PAGE:
292       len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
293       for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
294         {
295           if (!(paddr = virt_to_phys_040(addr)))
296             continue;
297           switch (cache)
298             {
299             case FLUSH_CACHE_DATA:
300               __asm__ __volatile__ ("nop\n\t"
301                                     ".chip 68040\n\t"
302                                     "cpushp %%dc,(%0)\n\t"
303                                     ".chip 68k"
304                                     : : "a" (paddr));
305               break;
306             case FLUSH_CACHE_INSN:
307               __asm__ __volatile__ ("nop\n\t"
308                                     ".chip 68040\n\t"
309                                     "cpushp %%ic,(%0)\n\t"
310                                     ".chip 68k"
311                                     : : "a" (paddr));
312               break;
313             default:
314             case FLUSH_CACHE_BOTH:
315               __asm__ __volatile__ ("nop\n\t"
316                                     ".chip 68040\n\t"
317                                     "cpushp %%bc,(%0)\n\t"
318                                     ".chip 68k"
319                                     : : "a" (paddr));
320               break;
321             }
322         }
323       break;
324     }
325   return 0;
326 }
327
328 #define virt_to_phys_060(vaddr)                         \
329 ({                                                      \
330   unsigned long paddr;                                  \
331   __asm__ __volatile__ (".chip 68060\n\t"               \
332                         "plpar (%0)\n\t"                \
333                         ".chip 68k"                     \
334                         : "=a" (paddr)                  \
335                         : "0" (vaddr));                 \
336   (paddr); /* XXX */                                    \
337 })
338
339 static inline int
340 cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
341 {
342   unsigned long paddr, i;
343
344   /*
345    * 68060 manual says:
346    *  cpush %dc : flush DC, remains valid (with our %cacr setup)
347    *  cpush %ic : invalidate IC
348    *  cpush %bc : flush DC + invalidate IC
349    */
350   switch (scope)
351     {
352     case FLUSH_SCOPE_ALL:
353       switch (cache)
354         {
355         case FLUSH_CACHE_DATA:
356           __asm__ __volatile__ (".chip 68060\n\t"
357                                 "cpusha %dc\n\t"
358                                 ".chip 68k");
359           break;
360         case FLUSH_CACHE_INSN:
361           __asm__ __volatile__ (".chip 68060\n\t"
362                                 "cpusha %ic\n\t"
363                                 ".chip 68k");
364           break;
365         default:
366         case FLUSH_CACHE_BOTH:
367           __asm__ __volatile__ (".chip 68060\n\t"
368                                 "cpusha %bc\n\t"
369                                 ".chip 68k");
370           break;
371         }
372       break;
373
374     case FLUSH_SCOPE_LINE:
375       /* Find the physical address of the first mapped page in the
376          address range.  */
377       len += addr & 15;
378       addr &= -16;
379       if (!(paddr = virt_to_phys_060(addr))) {
380         unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
381
382         if (len <= tmp)
383           return 0;
384         addr += tmp;
385         len -= tmp;
386         tmp = PAGE_SIZE;
387         for (;;)
388           {
389             if ((paddr = virt_to_phys_060(addr)))
390               break;
391             if (len <= tmp)
392               return 0;
393             addr += tmp;
394             len -= tmp;
395           }
396       }
397       len = (len + 15) >> 4;
398       i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
399       while (len--)
400         {
401           switch (cache)
402             {
403             case FLUSH_CACHE_DATA:
404               __asm__ __volatile__ (".chip 68060\n\t"
405                                     "cpushl %%dc,(%0)\n\t"
406                                     ".chip 68k"
407                                     : : "a" (paddr));
408               break;
409             case FLUSH_CACHE_INSN:
410               __asm__ __volatile__ (".chip 68060\n\t"
411                                     "cpushl %%ic,(%0)\n\t"
412                                     ".chip 68k"
413                                     : : "a" (paddr));
414               break;
415             default:
416             case FLUSH_CACHE_BOTH:
417               __asm__ __volatile__ (".chip 68060\n\t"
418                                     "cpushl %%bc,(%0)\n\t"
419                                     ".chip 68k"
420                                     : : "a" (paddr));
421               break;
422             }
423           if (!--i && len)
424             {
425
426               /*
427                * We just want to jump to the first cache line
428                * in the next page.
429                */
430               addr += PAGE_SIZE;
431               addr &= PAGE_MASK;
432
433               i = PAGE_SIZE / 16;
434               /* Recompute physical address when crossing a page
435                  boundary. */
436               for (;;)
437                 {
438                   if ((paddr = virt_to_phys_060(addr)))
439                     break;
440                   if (len <= i)
441                     return 0;
442                   len -= i;
443                   addr += PAGE_SIZE;
444                 }
445             }
446           else
447             paddr += 16;
448         }
449       break;
450
451     default:
452     case FLUSH_SCOPE_PAGE:
453       len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
454       addr &= PAGE_MASK;        /* Workaround for bug in some
455                                    revisions of the 68060 */
456       for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
457         {
458           if (!(paddr = virt_to_phys_060(addr)))
459             continue;
460           switch (cache)
461             {
462             case FLUSH_CACHE_DATA:
463               __asm__ __volatile__ (".chip 68060\n\t"
464                                     "cpushp %%dc,(%0)\n\t"
465                                     ".chip 68k"
466                                     : : "a" (paddr));
467               break;
468             case FLUSH_CACHE_INSN:
469               __asm__ __volatile__ (".chip 68060\n\t"
470                                     "cpushp %%ic,(%0)\n\t"
471                                     ".chip 68k"
472                                     : : "a" (paddr));
473               break;
474             default:
475             case FLUSH_CACHE_BOTH:
476               __asm__ __volatile__ (".chip 68060\n\t"
477                                     "cpushp %%bc,(%0)\n\t"
478                                     ".chip 68k"
479                                     : : "a" (paddr));
480               break;
481             }
482         }
483       break;
484     }
485   return 0;
486 }
487
488 /* sys_cacheflush -- flush (part of) the processor cache.  */
489 asmlinkage int
490 sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
491 {
492         struct vm_area_struct *vma;
493         int ret = -EINVAL;
494
495         lock_kernel();
496         if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
497             cache & ~FLUSH_CACHE_BOTH)
498                 goto out;
499
500         if (scope == FLUSH_SCOPE_ALL) {
501                 /* Only the superuser may explicitly flush the whole cache. */
502                 ret = -EPERM;
503                 if (!capable(CAP_SYS_ADMIN))
504                         goto out;
505         } else {
506                 /*
507                  * Verify that the specified address region actually belongs
508                  * to this process.
509                  */
510                 vma = find_vma (current->mm, addr);
511                 ret = -EINVAL;
512                 /* Check for overflow.  */
513                 if (addr + len < addr)
514                         goto out;
515                 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
516                         goto out;
517         }
518
519         if (CPU_IS_020_OR_030) {
520                 if (scope == FLUSH_SCOPE_LINE && len < 256) {
521                         unsigned long cacr;
522                         __asm__ ("movec %%cacr, %0" : "=r" (cacr));
523                         if (cache & FLUSH_CACHE_INSN)
524                                 cacr |= 4;
525                         if (cache & FLUSH_CACHE_DATA)
526                                 cacr |= 0x400;
527                         len >>= 2;
528                         while (len--) {
529                                 __asm__ __volatile__ ("movec %1, %%caar\n\t"
530                                                       "movec %0, %%cacr"
531                                                       : /* no outputs */
532                                                       : "r" (cacr), "r" (addr));
533                                 addr += 4;
534                         }
535                 } else {
536                         /* Flush the whole cache, even if page granularity requested. */
537                         unsigned long cacr;
538                         __asm__ ("movec %%cacr, %0" : "=r" (cacr));
539                         if (cache & FLUSH_CACHE_INSN)
540                                 cacr |= 8;
541                         if (cache & FLUSH_CACHE_DATA)
542                                 cacr |= 0x800;
543                         __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
544                 }
545                 ret = 0;
546                 goto out;
547         } else {
548             /*
549              * 040 or 060: don't blindly trust 'scope', someone could
550              * try to flush a few megs of memory.
551              */
552
553             if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
554                 scope=FLUSH_SCOPE_PAGE;
555             if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
556                 scope=FLUSH_SCOPE_ALL;
557             if (CPU_IS_040) {
558                 ret = cache_flush_040 (addr, scope, cache, len);
559             } else if (CPU_IS_060) {
560                 ret = cache_flush_060 (addr, scope, cache, len);
561             }
562         }
563 out:
564         unlock_kernel();
565         return ret;
566 }
567
568 asmlinkage int sys_getpagesize(void)
569 {
570         return PAGE_SIZE;
571 }
572
573 /*
574  * Do a system call from kernel instead of calling sys_execve so we
575  * end up with proper pt_regs.
576  */
577 int kernel_execve(const char *filename, char *const argv[], char *const envp[])
578 {
579         register long __res asm ("%d0") = __NR_execve;
580         register long __a asm ("%d1") = (long)(filename);
581         register long __b asm ("%d2") = (long)(argv);
582         register long __c asm ("%d3") = (long)(envp);
583         asm volatile ("trap  #0" : "+d" (__res)
584                         : "d" (__a), "d" (__b), "d" (__c));
585         return __res;
586 }
587
588 asmlinkage unsigned long sys_get_thread_area(void)
589 {
590         return current_thread_info()->tp_value;
591 }
592
593 asmlinkage int sys_set_thread_area(unsigned long tp)
594 {
595         current_thread_info()->tp_value = tp;
596         return 0;
597 }
598
599 /* This syscall gets its arguments in A0 (mem), D2 (oldval) and
600    D1 (newval).  */
601 asmlinkage int
602 sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
603                       unsigned long __user * mem)
604 {
605         /* This was borrowed from ARM's implementation.  */
606         for (;;) {
607                 struct mm_struct *mm = current->mm;
608                 pgd_t *pgd;
609                 pmd_t *pmd;
610                 pte_t *pte;
611                 spinlock_t *ptl;
612                 unsigned long mem_value;
613
614                 down_read(&mm->mmap_sem);
615                 pgd = pgd_offset(mm, (unsigned long)mem);
616                 if (!pgd_present(*pgd))
617                         goto bad_access;
618                 pmd = pmd_offset(pgd, (unsigned long)mem);
619                 if (!pmd_present(*pmd))
620                         goto bad_access;
621                 pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);
622                 if (!pte_present(*pte) || !pte_dirty(*pte)
623                     || !pte_write(*pte)) {
624                         pte_unmap_unlock(pte, ptl);
625                         goto bad_access;
626                 }
627
628                 mem_value = *mem;
629                 if (mem_value == oldval)
630                         *mem = newval;
631
632                 pte_unmap_unlock(pte, ptl);
633                 up_read(&mm->mmap_sem);
634                 return mem_value;
635
636               bad_access:
637                 up_read(&mm->mmap_sem);
638                 /* This is not necessarily a bad access, we can get here if
639                    a memory we're trying to write to should be copied-on-write.
640                    Make the kernel do the necessary page stuff, then re-iterate.
641                    Simulate a write access fault to do that.  */
642                 {
643                         /* The first argument of the function corresponds to
644                            D1, which is the first field of struct pt_regs.  */
645                         struct pt_regs *fp = (struct pt_regs *)&newval;
646
647                         /* '3' is an RMW flag.  */
648                         if (do_page_fault(fp, (unsigned long)mem, 3))
649                                 /* If the do_page_fault() failed, we don't
650                                    have anything meaningful to return.
651                                    There should be a SIGSEGV pending for
652                                    the process.  */
653                                 return 0xdeadbeef;
654                 }
655         }
656 }
657
658 asmlinkage int sys_atomic_barrier(void)
659 {
660         /* no code needed for uniprocs */
661         return 0;
662 }