2 * linux/kernel/ptrace.c
4 * (C) Copyright 1999 Linus Torvalds
6 * Common interfaces for "ptrace()" which we do not want
7 * to continually duplicate across every architecture.
10 #include <linux/capability.h>
11 #include <linux/export.h>
12 #include <linux/sched.h>
13 #include <linux/errno.h>
15 #include <linux/highmem.h>
16 #include <linux/pagemap.h>
17 #include <linux/ptrace.h>
18 #include <linux/security.h>
19 #include <linux/signal.h>
20 #include <linux/audit.h>
21 #include <linux/pid_namespace.h>
22 #include <linux/syscalls.h>
23 #include <linux/uaccess.h>
24 #include <linux/regset.h>
25 #include <linux/hw_breakpoint.h>
26 #include <linux/cn_proc.h>
29 static int ptrace_trapping_sleep_fn(void *flags)
36 * ptrace a task: make the debugger its new parent and
37 * move it to the ptrace list.
39 * Must be called with the tasklist lock write-held.
41 void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
43 BUG_ON(!list_empty(&child->ptrace_entry));
44 list_add(&child->ptrace_entry, &new_parent->ptraced);
45 child->parent = new_parent;
49 * __ptrace_unlink - unlink ptracee and restore its execution state
50 * @child: ptracee to be unlinked
52 * Remove @child from the ptrace list, move it back to the original parent,
53 * and restore the execution state so that it conforms to the group stop
56 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
57 * exiting. For PTRACE_DETACH, unless the ptracee has been killed between
58 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
59 * If the ptracer is exiting, the ptracee can be in any state.
61 * After detach, the ptracee should be in a state which conforms to the
62 * group stop. If the group is stopped or in the process of stopping, the
63 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
64 * up from TASK_TRACED.
66 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
67 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
68 * to but in the opposite direction of what happens while attaching to a
69 * stopped task. However, in this direction, the intermediate RUNNING
70 * state is not hidden even from the current ptracer and if it immediately
71 * re-attaches and performs a WNOHANG wait(2), it may fail.
74 * write_lock_irq(tasklist_lock)
76 void __ptrace_unlink(struct task_struct *child)
78 BUG_ON(!child->ptrace);
81 child->parent = child->real_parent;
82 list_del_init(&child->ptrace_entry);
84 spin_lock(&child->sighand->siglock);
87 * Clear all pending traps and TRAPPING. TRAPPING should be
88 * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly.
90 task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
91 task_clear_jobctl_trapping(child);
94 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
97 if (!(child->flags & PF_EXITING) &&
98 (child->signal->flags & SIGNAL_STOP_STOPPED ||
99 child->signal->group_stop_count)) {
100 child->jobctl |= JOBCTL_STOP_PENDING;
103 * This is only possible if this thread was cloned by the
104 * traced task running in the stopped group, set the signal
105 * for the future reports.
106 * FIXME: we should change ptrace_init_task() to handle this
109 if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
110 child->jobctl |= SIGSTOP;
114 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
115 * @child in the butt. Note that @resume should be used iff @child
116 * is in TASK_TRACED; otherwise, we might unduly disrupt
117 * TASK_KILLABLE sleeps.
119 if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
120 ptrace_signal_wake_up(child, true);
122 spin_unlock(&child->sighand->siglock);
125 /* Ensure that nothing can wake it up, even SIGKILL */
126 static bool ptrace_freeze_traced(struct task_struct *task)
130 /* Lockless, nobody but us can set this flag */
131 if (task->jobctl & JOBCTL_LISTENING)
134 spin_lock_irq(&task->sighand->siglock);
135 if (task_is_traced(task) && !__fatal_signal_pending(task)) {
136 task->state = __TASK_TRACED;
139 spin_unlock_irq(&task->sighand->siglock);
144 static void ptrace_unfreeze_traced(struct task_struct *task)
146 if (task->state != __TASK_TRACED)
149 WARN_ON(!task->ptrace || task->parent != current);
152 * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
153 * Recheck state under the lock to close this race.
155 spin_lock_irq(&task->sighand->siglock);
156 if (task->state == __TASK_TRACED) {
157 if (__fatal_signal_pending(task))
158 wake_up_state(task, __TASK_TRACED);
160 task->state = TASK_TRACED;
162 spin_unlock_irq(&task->sighand->siglock);
166 * ptrace_check_attach - check whether ptracee is ready for ptrace operation
167 * @child: ptracee to check for
168 * @ignore_state: don't check whether @child is currently %TASK_TRACED
170 * Check whether @child is being ptraced by %current and ready for further
171 * ptrace operations. If @ignore_state is %false, @child also should be in
172 * %TASK_TRACED state and on return the child is guaranteed to be traced
173 * and not executing. If @ignore_state is %true, @child can be in any
177 * Grabs and releases tasklist_lock and @child->sighand->siglock.
180 * 0 on success, -ESRCH if %child is not ready.
182 int ptrace_check_attach(struct task_struct *child, bool ignore_state)
187 * We take the read lock around doing both checks to close a
188 * possible race where someone else was tracing our child and
189 * detached between these two checks. After this locked check,
190 * we are sure that this is our traced child and that can only
191 * be changed by us so it's not changing right after this.
193 read_lock(&tasklist_lock);
194 if (child->ptrace && child->parent == current) {
195 WARN_ON(child->state == __TASK_TRACED);
197 * child->sighand can't be NULL, release_task()
198 * does ptrace_unlink() before __exit_signal().
200 if (ignore_state || ptrace_freeze_traced(child))
203 read_unlock(&tasklist_lock);
205 if (!ret && !ignore_state) {
206 if (!wait_task_inactive(child, __TASK_TRACED)) {
208 * This can only happen if may_ptrace_stop() fails and
209 * ptrace_stop() changes ->state back to TASK_RUNNING,
210 * so we should not worry about leaking __TASK_TRACED.
212 WARN_ON(child->state == __TASK_TRACED);
220 int __ptrace_may_access(struct task_struct *task, unsigned int mode)
222 const struct cred *cred = current_cred(), *tcred;
224 /* May we inspect the given task?
225 * This check is used both for attaching with ptrace
226 * and for allowing access to sensitive information in /proc.
228 * ptrace_attach denies several cases that /proc allows
229 * because setting up the necessary parent/child relationship
230 * or halting the specified task is impossible.
233 /* Don't let security modules deny introspection */
234 if (same_thread_group(task, current))
237 tcred = __task_cred(task);
238 if (cred->user->user_ns == tcred->user->user_ns &&
239 (cred->uid == tcred->euid &&
240 cred->uid == tcred->suid &&
241 cred->uid == tcred->uid &&
242 cred->gid == tcred->egid &&
243 cred->gid == tcred->sgid &&
244 cred->gid == tcred->gid))
246 if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
254 dumpable = get_dumpable(task->mm);
255 if (dumpable != SUID_DUMP_USER &&
256 !task_ns_capable(task, CAP_SYS_PTRACE))
259 return security_ptrace_access_check(task, mode);
262 bool ptrace_may_access(struct task_struct *task, unsigned int mode)
266 err = __ptrace_may_access(task, mode);
271 static int ptrace_attach(struct task_struct *task, long request,
274 bool seize = (request == PTRACE_SEIZE);
278 * SEIZE will enable new ptrace behaviors which will be implemented
279 * gradually. SEIZE_DEVEL is used to prevent applications
280 * expecting full SEIZE behaviors trapping on kernel commits which
281 * are still in the process of implementing them.
283 * Only test programs for new ptrace behaviors being implemented
284 * should set SEIZE_DEVEL. If unset, SEIZE will fail with -EIO.
286 * Once SEIZE behaviors are completely implemented, this flag and
287 * the following test will be removed.
290 if (seize && !(flags & PTRACE_SEIZE_DEVEL))
296 if (unlikely(task->flags & PF_KTHREAD))
298 if (same_thread_group(task, current))
302 * Protect exec's credential calculations against our interference;
303 * interference; SUID, SGID and LSM creds get determined differently
306 retval = -ERESTARTNOINTR;
307 if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
311 retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
316 write_lock_irq(&tasklist_lock);
318 if (unlikely(task->exit_state))
319 goto unlock_tasklist;
321 goto unlock_tasklist;
323 task->ptrace = PT_PTRACED;
325 task->ptrace |= PT_SEIZED;
326 if (task_ns_capable(task, CAP_SYS_PTRACE))
327 task->ptrace |= PT_PTRACE_CAP;
329 __ptrace_link(task, current);
331 /* SEIZE doesn't trap tracee on attach */
333 send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
335 spin_lock(&task->sighand->siglock);
338 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
339 * TRAPPING, and kick it so that it transits to TRACED. TRAPPING
340 * will be cleared if the child completes the transition or any
341 * event which clears the group stop states happens. We'll wait
342 * for the transition to complete before returning from this
345 * This hides STOPPED -> RUNNING -> TRACED transition from the
346 * attaching thread but a different thread in the same group can
347 * still observe the transient RUNNING state. IOW, if another
348 * thread's WNOHANG wait(2) on the stopped tracee races against
349 * ATTACH, the wait(2) may fail due to the transient RUNNING.
351 * The following task_is_stopped() test is safe as both transitions
352 * in and out of STOPPED are protected by siglock.
354 if (task_is_stopped(task) &&
355 task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
356 signal_wake_up_state(task, __TASK_STOPPED);
358 spin_unlock(&task->sighand->siglock);
362 write_unlock_irq(&tasklist_lock);
364 mutex_unlock(&task->signal->cred_guard_mutex);
367 wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT,
368 ptrace_trapping_sleep_fn, TASK_UNINTERRUPTIBLE);
369 proc_ptrace_connector(task, PTRACE_ATTACH);
376 * ptrace_traceme -- helper for PTRACE_TRACEME
378 * Performs checks and sets PT_PTRACED.
379 * Should be used by all ptrace implementations for PTRACE_TRACEME.
381 static int ptrace_traceme(void)
385 write_lock_irq(&tasklist_lock);
386 /* Are we already being traced? */
387 if (!current->ptrace) {
388 ret = security_ptrace_traceme(current->parent);
390 * Check PF_EXITING to ensure ->real_parent has not passed
391 * exit_ptrace(). Otherwise we don't report the error but
392 * pretend ->real_parent untraces us right after return.
394 if (!ret && !(current->real_parent->flags & PF_EXITING)) {
395 current->ptrace = PT_PTRACED;
396 __ptrace_link(current, current->real_parent);
399 write_unlock_irq(&tasklist_lock);
405 * Called with irqs disabled, returns true if childs should reap themselves.
407 static int ignoring_children(struct sighand_struct *sigh)
410 spin_lock(&sigh->siglock);
411 ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
412 (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
413 spin_unlock(&sigh->siglock);
418 * Called with tasklist_lock held for writing.
419 * Unlink a traced task, and clean it up if it was a traced zombie.
420 * Return true if it needs to be reaped with release_task().
421 * (We can't call release_task() here because we already hold tasklist_lock.)
423 * If it's a zombie, our attachedness prevented normal parent notification
424 * or self-reaping. Do notification now if it would have happened earlier.
425 * If it should reap itself, return true.
427 * If it's our own child, there is no notification to do. But if our normal
428 * children self-reap, then this child was prevented by ptrace and we must
429 * reap it now, in that case we must also wake up sub-threads sleeping in
432 static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
438 if (p->exit_state != EXIT_ZOMBIE)
441 dead = !thread_group_leader(p);
443 if (!dead && thread_group_empty(p)) {
444 if (!same_thread_group(p->real_parent, tracer))
445 dead = do_notify_parent(p, p->exit_signal);
446 else if (ignoring_children(tracer->sighand)) {
447 __wake_up_parent(p, tracer);
451 /* Mark it as in the process of being reaped. */
453 p->exit_state = EXIT_DEAD;
457 static int ptrace_detach(struct task_struct *child, unsigned int data)
461 if (!valid_signal(data))
464 /* Architecture-specific hardware disable .. */
465 ptrace_disable(child);
466 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
468 write_lock_irq(&tasklist_lock);
470 * This child can be already killed. Make sure de_thread() or
471 * our sub-thread doing do_wait() didn't do release_task() yet.
474 child->exit_code = data;
475 dead = __ptrace_detach(current, child);
477 write_unlock_irq(&tasklist_lock);
479 proc_ptrace_connector(child, PTRACE_DETACH);
487 * Detach all tasks we were using ptrace on. Called with tasklist held
488 * for writing, and returns with it held too. But note it can release
489 * and reacquire the lock.
491 void exit_ptrace(struct task_struct *tracer)
492 __releases(&tasklist_lock)
493 __acquires(&tasklist_lock)
495 struct task_struct *p, *n;
496 LIST_HEAD(ptrace_dead);
498 if (likely(list_empty(&tracer->ptraced)))
501 list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
502 if (__ptrace_detach(tracer, p))
503 list_add(&p->ptrace_entry, &ptrace_dead);
506 write_unlock_irq(&tasklist_lock);
507 BUG_ON(!list_empty(&tracer->ptraced));
509 list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) {
510 list_del_init(&p->ptrace_entry);
514 write_lock_irq(&tasklist_lock);
517 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
523 int this_len, retval;
525 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
526 retval = access_process_vm(tsk, src, buf, this_len, 0);
532 if (copy_to_user(dst, buf, retval))
542 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
548 int this_len, retval;
550 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
551 if (copy_from_user(buf, src, this_len))
553 retval = access_process_vm(tsk, dst, buf, this_len, 1);
567 static int ptrace_setoptions(struct task_struct *child, unsigned long data)
569 child->ptrace &= ~PT_TRACE_MASK;
571 if (data & PTRACE_O_TRACESYSGOOD)
572 child->ptrace |= PT_TRACESYSGOOD;
574 if (data & PTRACE_O_TRACEFORK)
575 child->ptrace |= PT_TRACE_FORK;
577 if (data & PTRACE_O_TRACEVFORK)
578 child->ptrace |= PT_TRACE_VFORK;
580 if (data & PTRACE_O_TRACECLONE)
581 child->ptrace |= PT_TRACE_CLONE;
583 if (data & PTRACE_O_TRACEEXEC)
584 child->ptrace |= PT_TRACE_EXEC;
586 if (data & PTRACE_O_TRACEVFORKDONE)
587 child->ptrace |= PT_TRACE_VFORK_DONE;
589 if (data & PTRACE_O_TRACEEXIT)
590 child->ptrace |= PT_TRACE_EXIT;
592 return (data & ~PTRACE_O_MASK) ? -EINVAL : 0;
595 static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
600 if (lock_task_sighand(child, &flags)) {
602 if (likely(child->last_siginfo != NULL)) {
603 *info = *child->last_siginfo;
606 unlock_task_sighand(child, &flags);
611 static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
616 if (lock_task_sighand(child, &flags)) {
618 if (likely(child->last_siginfo != NULL)) {
619 *child->last_siginfo = *info;
622 unlock_task_sighand(child, &flags);
628 #ifdef PTRACE_SINGLESTEP
629 #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP)
631 #define is_singlestep(request) 0
634 #ifdef PTRACE_SINGLEBLOCK
635 #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK)
637 #define is_singleblock(request) 0
641 #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP)
643 #define is_sysemu_singlestep(request) 0
646 static int ptrace_resume(struct task_struct *child, long request,
651 if (!valid_signal(data))
654 if (request == PTRACE_SYSCALL)
655 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
657 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
659 #ifdef TIF_SYSCALL_EMU
660 if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
661 set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
663 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
666 if (is_singleblock(request)) {
667 if (unlikely(!arch_has_block_step()))
669 user_enable_block_step(child);
670 } else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
671 if (unlikely(!arch_has_single_step()))
673 user_enable_single_step(child);
675 user_disable_single_step(child);
679 * Change ->exit_code and ->state under siglock to avoid the race
680 * with wait_task_stopped() in between; a non-zero ->exit_code will
681 * wrongly look like another report from tracee.
683 * Note that we need siglock even if ->exit_code == data and/or this
684 * status was not reported yet, the new status must not be cleared by
685 * wait_task_stopped() after resume.
687 * If data == 0 we do not care if wait_task_stopped() reports the old
688 * status and clears the code too; this can't race with the tracee, it
689 * takes siglock after resume.
691 need_siglock = data && !thread_group_empty(current);
693 spin_lock_irq(&child->sighand->siglock);
694 child->exit_code = data;
695 wake_up_state(child, __TASK_TRACED);
697 spin_unlock_irq(&child->sighand->siglock);
702 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
704 static const struct user_regset *
705 find_regset(const struct user_regset_view *view, unsigned int type)
707 const struct user_regset *regset;
710 for (n = 0; n < view->n; ++n) {
711 regset = view->regsets + n;
712 if (regset->core_note_type == type)
719 static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
722 const struct user_regset_view *view = task_user_regset_view(task);
723 const struct user_regset *regset = find_regset(view, type);
726 if (!regset || (kiov->iov_len % regset->size) != 0)
729 regset_no = regset - view->regsets;
730 kiov->iov_len = min(kiov->iov_len,
731 (__kernel_size_t) (regset->n * regset->size));
733 if (req == PTRACE_GETREGSET)
734 return copy_regset_to_user(task, view, regset_no, 0,
735 kiov->iov_len, kiov->iov_base);
737 return copy_regset_from_user(task, view, regset_no, 0,
738 kiov->iov_len, kiov->iov_base);
743 int ptrace_request(struct task_struct *child, long request,
744 unsigned long addr, unsigned long data)
746 bool seized = child->ptrace & PT_SEIZED;
748 siginfo_t siginfo, *si;
749 void __user *datavp = (void __user *) data;
750 unsigned long __user *datalp = datavp;
754 case PTRACE_PEEKTEXT:
755 case PTRACE_PEEKDATA:
756 return generic_ptrace_peekdata(child, addr, data);
757 case PTRACE_POKETEXT:
758 case PTRACE_POKEDATA:
759 return generic_ptrace_pokedata(child, addr, data);
761 #ifdef PTRACE_OLDSETOPTIONS
762 case PTRACE_OLDSETOPTIONS:
764 case PTRACE_SETOPTIONS:
765 ret = ptrace_setoptions(child, data);
767 case PTRACE_GETEVENTMSG:
768 ret = put_user(child->ptrace_message, datalp);
771 case PTRACE_GETSIGINFO:
772 ret = ptrace_getsiginfo(child, &siginfo);
774 ret = copy_siginfo_to_user(datavp, &siginfo);
777 case PTRACE_SETSIGINFO:
778 if (copy_from_user(&siginfo, datavp, sizeof siginfo))
781 ret = ptrace_setsiginfo(child, &siginfo);
784 case PTRACE_INTERRUPT:
786 * Stop tracee without any side-effect on signal or job
787 * control. At least one trap is guaranteed to happen
788 * after this request. If @child is already trapped, the
789 * current trap is not disturbed and another trap will
790 * happen after the current trap is ended with PTRACE_CONT.
792 * The actual trap might not be PTRACE_EVENT_STOP trap but
793 * the pending condition is cleared regardless.
795 if (unlikely(!seized || !lock_task_sighand(child, &flags)))
799 * INTERRUPT doesn't disturb existing trap sans one
800 * exception. If ptracer issued LISTEN for the current
801 * STOP, this INTERRUPT should clear LISTEN and re-trap
804 if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
805 ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
807 unlock_task_sighand(child, &flags);
813 * Listen for events. Tracee must be in STOP. It's not
814 * resumed per-se but is not considered to be in TRACED by
815 * wait(2) or ptrace(2). If an async event (e.g. group
816 * stop state change) happens, tracee will enter STOP trap
817 * again. Alternatively, ptracer can issue INTERRUPT to
818 * finish listening and re-trap tracee into STOP.
820 if (unlikely(!seized || !lock_task_sighand(child, &flags)))
823 si = child->last_siginfo;
824 if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
825 child->jobctl |= JOBCTL_LISTENING;
827 * If NOTIFY is set, it means event happened between
828 * start of this trap and now. Trigger re-trap.
830 if (child->jobctl & JOBCTL_TRAP_NOTIFY)
831 ptrace_signal_wake_up(child, true);
834 unlock_task_sighand(child, &flags);
837 case PTRACE_DETACH: /* detach a process that was attached. */
838 ret = ptrace_detach(child, data);
841 #ifdef CONFIG_BINFMT_ELF_FDPIC
842 case PTRACE_GETFDPIC: {
843 struct mm_struct *mm = get_task_mm(child);
844 unsigned long tmp = 0;
851 case PTRACE_GETFDPIC_EXEC:
852 tmp = mm->context.exec_fdpic_loadmap;
854 case PTRACE_GETFDPIC_INTERP:
855 tmp = mm->context.interp_fdpic_loadmap;
862 ret = put_user(tmp, datalp);
867 #ifdef PTRACE_SINGLESTEP
868 case PTRACE_SINGLESTEP:
870 #ifdef PTRACE_SINGLEBLOCK
871 case PTRACE_SINGLEBLOCK:
875 case PTRACE_SYSEMU_SINGLESTEP:
879 return ptrace_resume(child, request, data);
882 if (child->exit_state) /* already dead */
884 return ptrace_resume(child, request, SIGKILL);
886 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
887 case PTRACE_GETREGSET:
888 case PTRACE_SETREGSET:
891 struct iovec __user *uiov = datavp;
893 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
896 if (__get_user(kiov.iov_base, &uiov->iov_base) ||
897 __get_user(kiov.iov_len, &uiov->iov_len))
900 ret = ptrace_regset(child, request, addr, &kiov);
902 ret = __put_user(kiov.iov_len, &uiov->iov_len);
913 static struct task_struct *ptrace_get_task_struct(pid_t pid)
915 struct task_struct *child;
918 child = find_task_by_vpid(pid);
920 get_task_struct(child);
924 return ERR_PTR(-ESRCH);
928 #ifndef arch_ptrace_attach
929 #define arch_ptrace_attach(child) do { } while (0)
932 SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
935 struct task_struct *child;
938 if (request == PTRACE_TRACEME) {
939 ret = ptrace_traceme();
941 arch_ptrace_attach(current);
945 child = ptrace_get_task_struct(pid);
947 ret = PTR_ERR(child);
951 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
952 ret = ptrace_attach(child, request, data);
954 * Some architectures need to do book-keeping after
958 arch_ptrace_attach(child);
959 goto out_put_task_struct;
962 ret = ptrace_check_attach(child, request == PTRACE_KILL ||
963 request == PTRACE_INTERRUPT);
965 goto out_put_task_struct;
967 ret = arch_ptrace(child, request, addr, data);
968 if (ret || request != PTRACE_DETACH)
969 ptrace_unfreeze_traced(child);
972 put_task_struct(child);
977 int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
983 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
984 if (copied != sizeof(tmp))
986 return put_user(tmp, (unsigned long __user *)data);
989 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
994 copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
995 return (copied == sizeof(data)) ? 0 : -EIO;
998 #if defined CONFIG_COMPAT
999 #include <linux/compat.h>
1001 int compat_ptrace_request(struct task_struct *child, compat_long_t request,
1002 compat_ulong_t addr, compat_ulong_t data)
1004 compat_ulong_t __user *datap = compat_ptr(data);
1005 compat_ulong_t word;
1010 case PTRACE_PEEKTEXT:
1011 case PTRACE_PEEKDATA:
1012 ret = access_process_vm(child, addr, &word, sizeof(word), 0);
1013 if (ret != sizeof(word))
1016 ret = put_user(word, datap);
1019 case PTRACE_POKETEXT:
1020 case PTRACE_POKEDATA:
1021 ret = access_process_vm(child, addr, &data, sizeof(data), 1);
1022 ret = (ret != sizeof(data) ? -EIO : 0);
1025 case PTRACE_GETEVENTMSG:
1026 ret = put_user((compat_ulong_t) child->ptrace_message, datap);
1029 case PTRACE_GETSIGINFO:
1030 ret = ptrace_getsiginfo(child, &siginfo);
1032 ret = copy_siginfo_to_user32(
1033 (struct compat_siginfo __user *) datap,
1037 case PTRACE_SETSIGINFO:
1038 memset(&siginfo, 0, sizeof siginfo);
1039 if (copy_siginfo_from_user32(
1040 &siginfo, (struct compat_siginfo __user *) datap))
1043 ret = ptrace_setsiginfo(child, &siginfo);
1045 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1046 case PTRACE_GETREGSET:
1047 case PTRACE_SETREGSET:
1050 struct compat_iovec __user *uiov =
1051 (struct compat_iovec __user *) datap;
1055 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
1058 if (__get_user(ptr, &uiov->iov_base) ||
1059 __get_user(len, &uiov->iov_len))
1062 kiov.iov_base = compat_ptr(ptr);
1065 ret = ptrace_regset(child, request, addr, &kiov);
1067 ret = __put_user(kiov.iov_len, &uiov->iov_len);
1073 ret = ptrace_request(child, request, addr, data);
1079 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
1080 compat_long_t addr, compat_long_t data)
1082 struct task_struct *child;
1085 if (request == PTRACE_TRACEME) {
1086 ret = ptrace_traceme();
1090 child = ptrace_get_task_struct(pid);
1091 if (IS_ERR(child)) {
1092 ret = PTR_ERR(child);
1096 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1097 ret = ptrace_attach(child, request, data);
1099 * Some architectures need to do book-keeping after
1103 arch_ptrace_attach(child);
1104 goto out_put_task_struct;
1107 ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1108 request == PTRACE_INTERRUPT);
1110 ret = compat_arch_ptrace(child, request, addr, data);
1111 if (ret || request != PTRACE_DETACH)
1112 ptrace_unfreeze_traced(child);
1115 out_put_task_struct:
1116 put_task_struct(child);
1120 #endif /* CONFIG_COMPAT */
1122 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1123 int ptrace_get_breakpoints(struct task_struct *tsk)
1125 if (atomic_inc_not_zero(&tsk->ptrace_bp_refcnt))
1131 void ptrace_put_breakpoints(struct task_struct *tsk)
1133 if (atomic_dec_and_test(&tsk->ptrace_bp_refcnt))
1134 flush_ptrace_hw_breakpoint(tsk);
1136 #endif /* CONFIG_HAVE_HW_BREAKPOINT */