Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / kernel / ptrace.c
1 /*
2  * linux/kernel/ptrace.c
3  *
4  * (C) Copyright 1999 Linus Torvalds
5  *
6  * Common interfaces for "ptrace()" which we do not want
7  * to continually duplicate across every architecture.
8  */
9
10 #include <linux/capability.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/errno.h>
14 #include <linux/mm.h>
15 #include <linux/highmem.h>
16 #include <linux/pagemap.h>
17 #include <linux/ptrace.h>
18 #include <linux/security.h>
19 #include <linux/signal.h>
20 #include <linux/audit.h>
21 #include <linux/pid_namespace.h>
22 #include <linux/syscalls.h>
23 #include <linux/uaccess.h>
24 #include <linux/regset.h>
25 #include <linux/hw_breakpoint.h>
26
27
28 /*
29  * ptrace a task: make the debugger its new parent and
30  * move it to the ptrace list.
31  *
32  * Must be called with the tasklist lock write-held.
33  */
34 void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
35 {
36         BUG_ON(!list_empty(&child->ptrace_entry));
37         list_add(&child->ptrace_entry, &new_parent->ptraced);
38         child->parent = new_parent;
39 }
40
41 /*
42  * Turn a tracing stop into a normal stop now, since with no tracer there
43  * would be no way to wake it up with SIGCONT or SIGKILL.  If there was a
44  * signal sent that would resume the child, but didn't because it was in
45  * TASK_TRACED, resume it now.
46  * Requires that irqs be disabled.
47  */
48 static void ptrace_untrace(struct task_struct *child)
49 {
50         spin_lock(&child->sighand->siglock);
51         if (task_is_traced(child)) {
52                 /*
53                  * If the group stop is completed or in progress,
54                  * this thread was already counted as stopped.
55                  */
56                 if (child->signal->flags & SIGNAL_STOP_STOPPED ||
57                     child->signal->group_stop_count)
58                         __set_task_state(child, TASK_STOPPED);
59                 else
60                         signal_wake_up(child, 1);
61         }
62         spin_unlock(&child->sighand->siglock);
63 }
64
65 /*
66  * unptrace a task: move it back to its original parent and
67  * remove it from the ptrace list.
68  *
69  * Must be called with the tasklist lock write-held.
70  */
71 void __ptrace_unlink(struct task_struct *child)
72 {
73         BUG_ON(!child->ptrace);
74
75         child->ptrace = 0;
76         child->parent = child->real_parent;
77         list_del_init(&child->ptrace_entry);
78
79         if (task_is_traced(child))
80                 ptrace_untrace(child);
81 }
82
83 /*
84  * Check that we have indeed attached to the thing..
85  */
86 int ptrace_check_attach(struct task_struct *child, int kill)
87 {
88         int ret = -ESRCH;
89
90         /*
91          * We take the read lock around doing both checks to close a
92          * possible race where someone else was tracing our child and
93          * detached between these two checks.  After this locked check,
94          * we are sure that this is our traced child and that can only
95          * be changed by us so it's not changing right after this.
96          */
97         read_lock(&tasklist_lock);
98         if ((child->ptrace & PT_PTRACED) && child->parent == current) {
99                 ret = 0;
100                 /*
101                  * child->sighand can't be NULL, release_task()
102                  * does ptrace_unlink() before __exit_signal().
103                  */
104                 spin_lock_irq(&child->sighand->siglock);
105                 if (task_is_stopped(child))
106                         child->state = TASK_TRACED;
107                 else if (!task_is_traced(child) && !kill)
108                         ret = -ESRCH;
109                 spin_unlock_irq(&child->sighand->siglock);
110         }
111         read_unlock(&tasklist_lock);
112
113         if (!ret && !kill)
114                 ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
115
116         /* All systems go.. */
117         return ret;
118 }
119
120 int __ptrace_may_access(struct task_struct *task, unsigned int mode)
121 {
122         const struct cred *cred = current_cred(), *tcred;
123
124         /* May we inspect the given task?
125          * This check is used both for attaching with ptrace
126          * and for allowing access to sensitive information in /proc.
127          *
128          * ptrace_attach denies several cases that /proc allows
129          * because setting up the necessary parent/child relationship
130          * or halting the specified task is impossible.
131          */
132         int dumpable = 0;
133         /* Don't let security modules deny introspection */
134         if (task == current)
135                 return 0;
136         rcu_read_lock();
137         tcred = __task_cred(task);
138         if (cred->user->user_ns == tcred->user->user_ns &&
139             (cred->uid == tcred->euid &&
140              cred->uid == tcred->suid &&
141              cred->uid == tcred->uid  &&
142              cred->gid == tcred->egid &&
143              cred->gid == tcred->sgid &&
144              cred->gid == tcred->gid))
145                 goto ok;
146         if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
147                 goto ok;
148         rcu_read_unlock();
149         return -EPERM;
150 ok:
151         rcu_read_unlock();
152         smp_rmb();
153         if (task->mm)
154                 dumpable = get_dumpable(task->mm);
155         if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
156                 return -EPERM;
157
158         return security_ptrace_access_check(task, mode);
159 }
160
161 bool ptrace_may_access(struct task_struct *task, unsigned int mode)
162 {
163         int err;
164         task_lock(task);
165         err = __ptrace_may_access(task, mode);
166         task_unlock(task);
167         return !err;
168 }
169
170 static int ptrace_attach(struct task_struct *task)
171 {
172         int retval;
173
174         audit_ptrace(task);
175
176         retval = -EPERM;
177         if (unlikely(task->flags & PF_KTHREAD))
178                 goto out;
179         if (same_thread_group(task, current))
180                 goto out;
181
182         /*
183          * Protect exec's credential calculations against our interference;
184          * interference; SUID, SGID and LSM creds get determined differently
185          * under ptrace.
186          */
187         retval = -ERESTARTNOINTR;
188         if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
189                 goto out;
190
191         task_lock(task);
192         retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
193         task_unlock(task);
194         if (retval)
195                 goto unlock_creds;
196
197         write_lock_irq(&tasklist_lock);
198         retval = -EPERM;
199         if (unlikely(task->exit_state))
200                 goto unlock_tasklist;
201         if (task->ptrace)
202                 goto unlock_tasklist;
203
204         task->ptrace = PT_PTRACED;
205         if (task_ns_capable(task, CAP_SYS_PTRACE))
206                 task->ptrace |= PT_PTRACE_CAP;
207
208         __ptrace_link(task, current);
209         send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
210
211         retval = 0;
212 unlock_tasklist:
213         write_unlock_irq(&tasklist_lock);
214 unlock_creds:
215         mutex_unlock(&task->signal->cred_guard_mutex);
216 out:
217         return retval;
218 }
219
220 /**
221  * ptrace_traceme  --  helper for PTRACE_TRACEME
222  *
223  * Performs checks and sets PT_PTRACED.
224  * Should be used by all ptrace implementations for PTRACE_TRACEME.
225  */
226 static int ptrace_traceme(void)
227 {
228         int ret = -EPERM;
229
230         write_lock_irq(&tasklist_lock);
231         /* Are we already being traced? */
232         if (!current->ptrace) {
233                 ret = security_ptrace_traceme(current->parent);
234                 /*
235                  * Check PF_EXITING to ensure ->real_parent has not passed
236                  * exit_ptrace(). Otherwise we don't report the error but
237                  * pretend ->real_parent untraces us right after return.
238                  */
239                 if (!ret && !(current->real_parent->flags & PF_EXITING)) {
240                         current->ptrace = PT_PTRACED;
241                         __ptrace_link(current, current->real_parent);
242                 }
243         }
244         write_unlock_irq(&tasklist_lock);
245
246         return ret;
247 }
248
249 /*
250  * Called with irqs disabled, returns true if childs should reap themselves.
251  */
252 static int ignoring_children(struct sighand_struct *sigh)
253 {
254         int ret;
255         spin_lock(&sigh->siglock);
256         ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
257               (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
258         spin_unlock(&sigh->siglock);
259         return ret;
260 }
261
262 /*
263  * Called with tasklist_lock held for writing.
264  * Unlink a traced task, and clean it up if it was a traced zombie.
265  * Return true if it needs to be reaped with release_task().
266  * (We can't call release_task() here because we already hold tasklist_lock.)
267  *
268  * If it's a zombie, our attachedness prevented normal parent notification
269  * or self-reaping.  Do notification now if it would have happened earlier.
270  * If it should reap itself, return true.
271  *
272  * If it's our own child, there is no notification to do. But if our normal
273  * children self-reap, then this child was prevented by ptrace and we must
274  * reap it now, in that case we must also wake up sub-threads sleeping in
275  * do_wait().
276  */
277 static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
278 {
279         __ptrace_unlink(p);
280
281         if (p->exit_state == EXIT_ZOMBIE) {
282                 if (!task_detached(p) && thread_group_empty(p)) {
283                         if (!same_thread_group(p->real_parent, tracer))
284                                 do_notify_parent(p, p->exit_signal);
285                         else if (ignoring_children(tracer->sighand)) {
286                                 __wake_up_parent(p, tracer);
287                                 p->exit_signal = -1;
288                         }
289                 }
290                 if (task_detached(p)) {
291                         /* Mark it as in the process of being reaped. */
292                         p->exit_state = EXIT_DEAD;
293                         return true;
294                 }
295         }
296
297         return false;
298 }
299
300 static int ptrace_detach(struct task_struct *child, unsigned int data)
301 {
302         bool dead = false;
303
304         if (!valid_signal(data))
305                 return -EIO;
306
307         /* Architecture-specific hardware disable .. */
308         ptrace_disable(child);
309         clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
310
311         write_lock_irq(&tasklist_lock);
312         /*
313          * This child can be already killed. Make sure de_thread() or
314          * our sub-thread doing do_wait() didn't do release_task() yet.
315          */
316         if (child->ptrace) {
317                 child->exit_code = data;
318                 dead = __ptrace_detach(current, child);
319                 if (!child->exit_state)
320                         wake_up_state(child, TASK_TRACED | TASK_STOPPED);
321         }
322         write_unlock_irq(&tasklist_lock);
323
324         if (unlikely(dead))
325                 release_task(child);
326
327         return 0;
328 }
329
330 /*
331  * Detach all tasks we were using ptrace on. Called with tasklist held
332  * for writing, and returns with it held too. But note it can release
333  * and reacquire the lock.
334  */
335 void exit_ptrace(struct task_struct *tracer)
336         __releases(&tasklist_lock)
337         __acquires(&tasklist_lock)
338 {
339         struct task_struct *p, *n;
340         LIST_HEAD(ptrace_dead);
341
342         if (likely(list_empty(&tracer->ptraced)))
343                 return;
344
345         list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
346                 if (__ptrace_detach(tracer, p))
347                         list_add(&p->ptrace_entry, &ptrace_dead);
348         }
349
350         write_unlock_irq(&tasklist_lock);
351         BUG_ON(!list_empty(&tracer->ptraced));
352
353         list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) {
354                 list_del_init(&p->ptrace_entry);
355                 release_task(p);
356         }
357
358         write_lock_irq(&tasklist_lock);
359 }
360
361 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
362 {
363         int copied = 0;
364
365         while (len > 0) {
366                 char buf[128];
367                 int this_len, retval;
368
369                 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
370                 retval = access_process_vm(tsk, src, buf, this_len, 0);
371                 if (!retval) {
372                         if (copied)
373                                 break;
374                         return -EIO;
375                 }
376                 if (copy_to_user(dst, buf, retval))
377                         return -EFAULT;
378                 copied += retval;
379                 src += retval;
380                 dst += retval;
381                 len -= retval;
382         }
383         return copied;
384 }
385
386 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
387 {
388         int copied = 0;
389
390         while (len > 0) {
391                 char buf[128];
392                 int this_len, retval;
393
394                 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
395                 if (copy_from_user(buf, src, this_len))
396                         return -EFAULT;
397                 retval = access_process_vm(tsk, dst, buf, this_len, 1);
398                 if (!retval) {
399                         if (copied)
400                                 break;
401                         return -EIO;
402                 }
403                 copied += retval;
404                 src += retval;
405                 dst += retval;
406                 len -= retval;
407         }
408         return copied;
409 }
410
411 static int ptrace_setoptions(struct task_struct *child, unsigned long data)
412 {
413         child->ptrace &= ~PT_TRACE_MASK;
414
415         if (data & PTRACE_O_TRACESYSGOOD)
416                 child->ptrace |= PT_TRACESYSGOOD;
417
418         if (data & PTRACE_O_TRACEFORK)
419                 child->ptrace |= PT_TRACE_FORK;
420
421         if (data & PTRACE_O_TRACEVFORK)
422                 child->ptrace |= PT_TRACE_VFORK;
423
424         if (data & PTRACE_O_TRACECLONE)
425                 child->ptrace |= PT_TRACE_CLONE;
426
427         if (data & PTRACE_O_TRACEEXEC)
428                 child->ptrace |= PT_TRACE_EXEC;
429
430         if (data & PTRACE_O_TRACEVFORKDONE)
431                 child->ptrace |= PT_TRACE_VFORK_DONE;
432
433         if (data & PTRACE_O_TRACEEXIT)
434                 child->ptrace |= PT_TRACE_EXIT;
435
436         return (data & ~PTRACE_O_MASK) ? -EINVAL : 0;
437 }
438
439 static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
440 {
441         unsigned long flags;
442         int error = -ESRCH;
443
444         if (lock_task_sighand(child, &flags)) {
445                 error = -EINVAL;
446                 if (likely(child->last_siginfo != NULL)) {
447                         *info = *child->last_siginfo;
448                         error = 0;
449                 }
450                 unlock_task_sighand(child, &flags);
451         }
452         return error;
453 }
454
455 static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
456 {
457         unsigned long flags;
458         int error = -ESRCH;
459
460         if (lock_task_sighand(child, &flags)) {
461                 error = -EINVAL;
462                 if (likely(child->last_siginfo != NULL)) {
463                         *child->last_siginfo = *info;
464                         error = 0;
465                 }
466                 unlock_task_sighand(child, &flags);
467         }
468         return error;
469 }
470
471
472 #ifdef PTRACE_SINGLESTEP
473 #define is_singlestep(request)          ((request) == PTRACE_SINGLESTEP)
474 #else
475 #define is_singlestep(request)          0
476 #endif
477
478 #ifdef PTRACE_SINGLEBLOCK
479 #define is_singleblock(request)         ((request) == PTRACE_SINGLEBLOCK)
480 #else
481 #define is_singleblock(request)         0
482 #endif
483
484 #ifdef PTRACE_SYSEMU
485 #define is_sysemu_singlestep(request)   ((request) == PTRACE_SYSEMU_SINGLESTEP)
486 #else
487 #define is_sysemu_singlestep(request)   0
488 #endif
489
490 static int ptrace_resume(struct task_struct *child, long request,
491                          unsigned long data)
492 {
493         if (!valid_signal(data))
494                 return -EIO;
495
496         if (request == PTRACE_SYSCALL)
497                 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
498         else
499                 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
500
501 #ifdef TIF_SYSCALL_EMU
502         if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
503                 set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
504         else
505                 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
506 #endif
507
508         if (is_singleblock(request)) {
509                 if (unlikely(!arch_has_block_step()))
510                         return -EIO;
511                 user_enable_block_step(child);
512         } else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
513                 if (unlikely(!arch_has_single_step()))
514                         return -EIO;
515                 user_enable_single_step(child);
516         } else {
517                 user_disable_single_step(child);
518         }
519
520         child->exit_code = data;
521         wake_up_process(child);
522
523         return 0;
524 }
525
526 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
527
528 static const struct user_regset *
529 find_regset(const struct user_regset_view *view, unsigned int type)
530 {
531         const struct user_regset *regset;
532         int n;
533
534         for (n = 0; n < view->n; ++n) {
535                 regset = view->regsets + n;
536                 if (regset->core_note_type == type)
537                         return regset;
538         }
539
540         return NULL;
541 }
542
543 static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
544                          struct iovec *kiov)
545 {
546         const struct user_regset_view *view = task_user_regset_view(task);
547         const struct user_regset *regset = find_regset(view, type);
548         int regset_no;
549
550         if (!regset || (kiov->iov_len % regset->size) != 0)
551                 return -EINVAL;
552
553         regset_no = regset - view->regsets;
554         kiov->iov_len = min(kiov->iov_len,
555                             (__kernel_size_t) (regset->n * regset->size));
556
557         if (req == PTRACE_GETREGSET)
558                 return copy_regset_to_user(task, view, regset_no, 0,
559                                            kiov->iov_len, kiov->iov_base);
560         else
561                 return copy_regset_from_user(task, view, regset_no, 0,
562                                              kiov->iov_len, kiov->iov_base);
563 }
564
565 #endif
566
567 int ptrace_request(struct task_struct *child, long request,
568                    unsigned long addr, unsigned long data)
569 {
570         int ret = -EIO;
571         siginfo_t siginfo;
572         void __user *datavp = (void __user *) data;
573         unsigned long __user *datalp = datavp;
574
575         switch (request) {
576         case PTRACE_PEEKTEXT:
577         case PTRACE_PEEKDATA:
578                 return generic_ptrace_peekdata(child, addr, data);
579         case PTRACE_POKETEXT:
580         case PTRACE_POKEDATA:
581                 return generic_ptrace_pokedata(child, addr, data);
582
583 #ifdef PTRACE_OLDSETOPTIONS
584         case PTRACE_OLDSETOPTIONS:
585 #endif
586         case PTRACE_SETOPTIONS:
587                 ret = ptrace_setoptions(child, data);
588                 break;
589         case PTRACE_GETEVENTMSG:
590                 ret = put_user(child->ptrace_message, datalp);
591                 break;
592
593         case PTRACE_GETSIGINFO:
594                 ret = ptrace_getsiginfo(child, &siginfo);
595                 if (!ret)
596                         ret = copy_siginfo_to_user(datavp, &siginfo);
597                 break;
598
599         case PTRACE_SETSIGINFO:
600                 if (copy_from_user(&siginfo, datavp, sizeof siginfo))
601                         ret = -EFAULT;
602                 else
603                         ret = ptrace_setsiginfo(child, &siginfo);
604                 break;
605
606         case PTRACE_DETACH:      /* detach a process that was attached. */
607                 ret = ptrace_detach(child, data);
608                 break;
609
610 #ifdef CONFIG_BINFMT_ELF_FDPIC
611         case PTRACE_GETFDPIC: {
612                 struct mm_struct *mm = get_task_mm(child);
613                 unsigned long tmp = 0;
614
615                 ret = -ESRCH;
616                 if (!mm)
617                         break;
618
619                 switch (addr) {
620                 case PTRACE_GETFDPIC_EXEC:
621                         tmp = mm->context.exec_fdpic_loadmap;
622                         break;
623                 case PTRACE_GETFDPIC_INTERP:
624                         tmp = mm->context.interp_fdpic_loadmap;
625                         break;
626                 default:
627                         break;
628                 }
629                 mmput(mm);
630
631                 ret = put_user(tmp, datalp);
632                 break;
633         }
634 #endif
635
636 #ifdef PTRACE_SINGLESTEP
637         case PTRACE_SINGLESTEP:
638 #endif
639 #ifdef PTRACE_SINGLEBLOCK
640         case PTRACE_SINGLEBLOCK:
641 #endif
642 #ifdef PTRACE_SYSEMU
643         case PTRACE_SYSEMU:
644         case PTRACE_SYSEMU_SINGLESTEP:
645 #endif
646         case PTRACE_SYSCALL:
647         case PTRACE_CONT:
648                 return ptrace_resume(child, request, data);
649
650         case PTRACE_KILL:
651                 if (child->exit_state)  /* already dead */
652                         return 0;
653                 return ptrace_resume(child, request, SIGKILL);
654
655 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
656         case PTRACE_GETREGSET:
657         case PTRACE_SETREGSET:
658         {
659                 struct iovec kiov;
660                 struct iovec __user *uiov = datavp;
661
662                 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
663                         return -EFAULT;
664
665                 if (__get_user(kiov.iov_base, &uiov->iov_base) ||
666                     __get_user(kiov.iov_len, &uiov->iov_len))
667                         return -EFAULT;
668
669                 ret = ptrace_regset(child, request, addr, &kiov);
670                 if (!ret)
671                         ret = __put_user(kiov.iov_len, &uiov->iov_len);
672                 break;
673         }
674 #endif
675         default:
676                 break;
677         }
678
679         return ret;
680 }
681
682 static struct task_struct *ptrace_get_task_struct(pid_t pid)
683 {
684         struct task_struct *child;
685
686         rcu_read_lock();
687         child = find_task_by_vpid(pid);
688         if (child)
689                 get_task_struct(child);
690         rcu_read_unlock();
691
692         if (!child)
693                 return ERR_PTR(-ESRCH);
694         return child;
695 }
696
697 #ifndef arch_ptrace_attach
698 #define arch_ptrace_attach(child)       do { } while (0)
699 #endif
700
701 SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
702                 unsigned long, data)
703 {
704         struct task_struct *child;
705         long ret;
706
707         if (request == PTRACE_TRACEME) {
708                 ret = ptrace_traceme();
709                 if (!ret)
710                         arch_ptrace_attach(current);
711                 goto out;
712         }
713
714         child = ptrace_get_task_struct(pid);
715         if (IS_ERR(child)) {
716                 ret = PTR_ERR(child);
717                 goto out;
718         }
719
720         if (request == PTRACE_ATTACH) {
721                 ret = ptrace_attach(child);
722                 /*
723                  * Some architectures need to do book-keeping after
724                  * a ptrace attach.
725                  */
726                 if (!ret)
727                         arch_ptrace_attach(child);
728                 goto out_put_task_struct;
729         }
730
731         ret = ptrace_check_attach(child, request == PTRACE_KILL);
732         if (ret < 0)
733                 goto out_put_task_struct;
734
735         ret = arch_ptrace(child, request, addr, data);
736
737  out_put_task_struct:
738         put_task_struct(child);
739  out:
740         return ret;
741 }
742
743 int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
744                             unsigned long data)
745 {
746         unsigned long tmp;
747         int copied;
748
749         copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
750         if (copied != sizeof(tmp))
751                 return -EIO;
752         return put_user(tmp, (unsigned long __user *)data);
753 }
754
755 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
756                             unsigned long data)
757 {
758         int copied;
759
760         copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
761         return (copied == sizeof(data)) ? 0 : -EIO;
762 }
763
764 #if defined CONFIG_COMPAT
765 #include <linux/compat.h>
766
767 int compat_ptrace_request(struct task_struct *child, compat_long_t request,
768                           compat_ulong_t addr, compat_ulong_t data)
769 {
770         compat_ulong_t __user *datap = compat_ptr(data);
771         compat_ulong_t word;
772         siginfo_t siginfo;
773         int ret;
774
775         switch (request) {
776         case PTRACE_PEEKTEXT:
777         case PTRACE_PEEKDATA:
778                 ret = access_process_vm(child, addr, &word, sizeof(word), 0);
779                 if (ret != sizeof(word))
780                         ret = -EIO;
781                 else
782                         ret = put_user(word, datap);
783                 break;
784
785         case PTRACE_POKETEXT:
786         case PTRACE_POKEDATA:
787                 ret = access_process_vm(child, addr, &data, sizeof(data), 1);
788                 ret = (ret != sizeof(data) ? -EIO : 0);
789                 break;
790
791         case PTRACE_GETEVENTMSG:
792                 ret = put_user((compat_ulong_t) child->ptrace_message, datap);
793                 break;
794
795         case PTRACE_GETSIGINFO:
796                 ret = ptrace_getsiginfo(child, &siginfo);
797                 if (!ret)
798                         ret = copy_siginfo_to_user32(
799                                 (struct compat_siginfo __user *) datap,
800                                 &siginfo);
801                 break;
802
803         case PTRACE_SETSIGINFO:
804                 memset(&siginfo, 0, sizeof siginfo);
805                 if (copy_siginfo_from_user32(
806                             &siginfo, (struct compat_siginfo __user *) datap))
807                         ret = -EFAULT;
808                 else
809                         ret = ptrace_setsiginfo(child, &siginfo);
810                 break;
811 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
812         case PTRACE_GETREGSET:
813         case PTRACE_SETREGSET:
814         {
815                 struct iovec kiov;
816                 struct compat_iovec __user *uiov =
817                         (struct compat_iovec __user *) datap;
818                 compat_uptr_t ptr;
819                 compat_size_t len;
820
821                 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
822                         return -EFAULT;
823
824                 if (__get_user(ptr, &uiov->iov_base) ||
825                     __get_user(len, &uiov->iov_len))
826                         return -EFAULT;
827
828                 kiov.iov_base = compat_ptr(ptr);
829                 kiov.iov_len = len;
830
831                 ret = ptrace_regset(child, request, addr, &kiov);
832                 if (!ret)
833                         ret = __put_user(kiov.iov_len, &uiov->iov_len);
834                 break;
835         }
836 #endif
837
838         default:
839                 ret = ptrace_request(child, request, addr, data);
840         }
841
842         return ret;
843 }
844
845 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
846                                   compat_long_t addr, compat_long_t data)
847 {
848         struct task_struct *child;
849         long ret;
850
851         if (request == PTRACE_TRACEME) {
852                 ret = ptrace_traceme();
853                 goto out;
854         }
855
856         child = ptrace_get_task_struct(pid);
857         if (IS_ERR(child)) {
858                 ret = PTR_ERR(child);
859                 goto out;
860         }
861
862         if (request == PTRACE_ATTACH) {
863                 ret = ptrace_attach(child);
864                 /*
865                  * Some architectures need to do book-keeping after
866                  * a ptrace attach.
867                  */
868                 if (!ret)
869                         arch_ptrace_attach(child);
870                 goto out_put_task_struct;
871         }
872
873         ret = ptrace_check_attach(child, request == PTRACE_KILL);
874         if (!ret)
875                 ret = compat_arch_ptrace(child, request, addr, data);
876
877  out_put_task_struct:
878         put_task_struct(child);
879  out:
880         return ret;
881 }
882 #endif  /* CONFIG_COMPAT */
883
884 #ifdef CONFIG_HAVE_HW_BREAKPOINT
885 int ptrace_get_breakpoints(struct task_struct *tsk)
886 {
887         if (atomic_inc_not_zero(&tsk->ptrace_bp_refcnt))
888                 return 0;
889
890         return -1;
891 }
892
893 void ptrace_put_breakpoints(struct task_struct *tsk)
894 {
895         if (atomic_dec_and_test(&tsk->ptrace_bp_refcnt))
896                 flush_ptrace_hw_breakpoint(tsk);
897 }
898 #endif /* CONFIG_HAVE_HW_BREAKPOINT */