1 /* By Ross Biro 1/23/92 */
3 * Pentium III FXSR, SSE support
4 * Gareth Hughes <gareth@valinux.com>, May 2000
7 * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
13 #include <linux/smp.h>
14 #include <linux/errno.h>
15 #include <linux/ptrace.h>
16 #include <linux/regset.h>
17 #include <linux/tracehook.h>
18 #include <linux/user.h>
19 #include <linux/elf.h>
20 #include <linux/security.h>
21 #include <linux/audit.h>
22 #include <linux/seccomp.h>
23 #include <linux/signal.h>
24 #include <linux/workqueue.h>
26 #include <asm/uaccess.h>
27 #include <asm/pgtable.h>
28 #include <asm/system.h>
29 #include <asm/processor.h>
31 #include <asm/debugreg.h>
34 #include <asm/prctl.h>
35 #include <asm/proto.h>
38 #include <trace/syscall.h>
40 DEFINE_TRACE_FN(syscall_enter, syscall_regfunc, syscall_unregfunc);
41 DEFINE_TRACE_FN(syscall_exit, syscall_regfunc, syscall_unregfunc);
49 REGSET_IOPERM64 = REGSET_XFP,
55 * does not yet catch signals sent when the child dies.
56 * in exit.c or in signal.c.
60 * Determines which flags the user has access to [1 = access, 0 = no access].
62 #define FLAG_MASK_32 ((unsigned long) \
63 (X86_EFLAGS_CF | X86_EFLAGS_PF | \
64 X86_EFLAGS_AF | X86_EFLAGS_ZF | \
65 X86_EFLAGS_SF | X86_EFLAGS_TF | \
66 X86_EFLAGS_DF | X86_EFLAGS_OF | \
67 X86_EFLAGS_RF | X86_EFLAGS_AC))
70 * Determines whether a value may be installed in a segment register.
72 static inline bool invalid_selector(u16 value)
74 return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL);
79 #define FLAG_MASK FLAG_MASK_32
81 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
83 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
84 return ®s->bx + (regno >> 2);
87 static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
90 * Returning the value truncates it to 16 bits.
93 if (offset != offsetof(struct user_regs_struct, gs))
94 retval = *pt_regs_access(task_pt_regs(task), offset);
97 retval = get_user_gs(task_pt_regs(task));
99 retval = task_user_gs(task);
104 static int set_segment_reg(struct task_struct *task,
105 unsigned long offset, u16 value)
108 * The value argument was already truncated to 16 bits.
110 if (invalid_selector(value))
114 * For %cs and %ss we cannot permit a null selector.
115 * We can permit a bogus selector as long as it has USER_RPL.
116 * Null selectors are fine for other segment registers, but
117 * we will never get back to user mode with invalid %cs or %ss
118 * and will take the trap in iret instead. Much code relies
119 * on user_mode() to distinguish a user trap frame (which can
120 * safely use invalid selectors) from a kernel trap frame.
123 case offsetof(struct user_regs_struct, cs):
124 case offsetof(struct user_regs_struct, ss):
125 if (unlikely(value == 0))
129 *pt_regs_access(task_pt_regs(task), offset) = value;
132 case offsetof(struct user_regs_struct, gs):
134 set_user_gs(task_pt_regs(task), value);
136 task_user_gs(task) = value;
142 static unsigned long debugreg_addr_limit(struct task_struct *task)
144 return TASK_SIZE - 3;
147 #else /* CONFIG_X86_64 */
149 #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT)
151 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset)
153 BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0);
154 return ®s->r15 + (offset / sizeof(regs->r15));
157 static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
160 * Returning the value truncates it to 16 bits.
165 case offsetof(struct user_regs_struct, fs):
166 if (task == current) {
167 /* Older gas can't assemble movq %?s,%r?? */
168 asm("movl %%fs,%0" : "=r" (seg));
171 return task->thread.fsindex;
172 case offsetof(struct user_regs_struct, gs):
173 if (task == current) {
174 asm("movl %%gs,%0" : "=r" (seg));
177 return task->thread.gsindex;
178 case offsetof(struct user_regs_struct, ds):
179 if (task == current) {
180 asm("movl %%ds,%0" : "=r" (seg));
183 return task->thread.ds;
184 case offsetof(struct user_regs_struct, es):
185 if (task == current) {
186 asm("movl %%es,%0" : "=r" (seg));
189 return task->thread.es;
191 case offsetof(struct user_regs_struct, cs):
192 case offsetof(struct user_regs_struct, ss):
195 return *pt_regs_access(task_pt_regs(task), offset);
198 static int set_segment_reg(struct task_struct *task,
199 unsigned long offset, u16 value)
202 * The value argument was already truncated to 16 bits.
204 if (invalid_selector(value))
208 case offsetof(struct user_regs_struct,fs):
210 * If this is setting fs as for normal 64-bit use but
211 * setting fs_base has implicitly changed it, leave it.
213 if ((value == FS_TLS_SEL && task->thread.fsindex == 0 &&
214 task->thread.fs != 0) ||
215 (value == 0 && task->thread.fsindex == FS_TLS_SEL &&
216 task->thread.fs == 0))
218 task->thread.fsindex = value;
220 loadsegment(fs, task->thread.fsindex);
222 case offsetof(struct user_regs_struct,gs):
224 * If this is setting gs as for normal 64-bit use but
225 * setting gs_base has implicitly changed it, leave it.
227 if ((value == GS_TLS_SEL && task->thread.gsindex == 0 &&
228 task->thread.gs != 0) ||
229 (value == 0 && task->thread.gsindex == GS_TLS_SEL &&
230 task->thread.gs == 0))
232 task->thread.gsindex = value;
234 load_gs_index(task->thread.gsindex);
236 case offsetof(struct user_regs_struct,ds):
237 task->thread.ds = value;
239 loadsegment(ds, task->thread.ds);
241 case offsetof(struct user_regs_struct,es):
242 task->thread.es = value;
244 loadsegment(es, task->thread.es);
248 * Can't actually change these in 64-bit mode.
250 case offsetof(struct user_regs_struct,cs):
251 if (unlikely(value == 0))
253 #ifdef CONFIG_IA32_EMULATION
254 if (test_tsk_thread_flag(task, TIF_IA32))
255 task_pt_regs(task)->cs = value;
258 case offsetof(struct user_regs_struct,ss):
259 if (unlikely(value == 0))
261 #ifdef CONFIG_IA32_EMULATION
262 if (test_tsk_thread_flag(task, TIF_IA32))
263 task_pt_regs(task)->ss = value;
271 static unsigned long debugreg_addr_limit(struct task_struct *task)
273 #ifdef CONFIG_IA32_EMULATION
274 if (test_tsk_thread_flag(task, TIF_IA32))
275 return IA32_PAGE_OFFSET - 3;
277 return TASK_SIZE_MAX - 7;
280 #endif /* CONFIG_X86_32 */
282 static unsigned long get_flags(struct task_struct *task)
284 unsigned long retval = task_pt_regs(task)->flags;
287 * If the debugger set TF, hide it from the readout.
289 if (test_tsk_thread_flag(task, TIF_FORCED_TF))
290 retval &= ~X86_EFLAGS_TF;
295 static int set_flags(struct task_struct *task, unsigned long value)
297 struct pt_regs *regs = task_pt_regs(task);
300 * If the user value contains TF, mark that
301 * it was not "us" (the debugger) that set it.
302 * If not, make sure it stays set if we had.
304 if (value & X86_EFLAGS_TF)
305 clear_tsk_thread_flag(task, TIF_FORCED_TF);
306 else if (test_tsk_thread_flag(task, TIF_FORCED_TF))
307 value |= X86_EFLAGS_TF;
309 regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK);
314 static int putreg(struct task_struct *child,
315 unsigned long offset, unsigned long value)
318 case offsetof(struct user_regs_struct, cs):
319 case offsetof(struct user_regs_struct, ds):
320 case offsetof(struct user_regs_struct, es):
321 case offsetof(struct user_regs_struct, fs):
322 case offsetof(struct user_regs_struct, gs):
323 case offsetof(struct user_regs_struct, ss):
324 return set_segment_reg(child, offset, value);
326 case offsetof(struct user_regs_struct, flags):
327 return set_flags(child, value);
331 * Orig_ax is really just a flag with small positive and
332 * negative values, so make sure to always sign-extend it
333 * from 32 bits so that it works correctly regardless of
334 * whether we come from a 32-bit environment or not.
336 case offsetof(struct user_regs_struct, orig_ax):
337 value = (long) (s32) value;
340 case offsetof(struct user_regs_struct,fs_base):
341 if (value >= TASK_SIZE_OF(child))
344 * When changing the segment base, use do_arch_prctl
345 * to set either thread.fs or thread.fsindex and the
346 * corresponding GDT slot.
348 if (child->thread.fs != value)
349 return do_arch_prctl(child, ARCH_SET_FS, value);
351 case offsetof(struct user_regs_struct,gs_base):
353 * Exactly the same here as the %fs handling above.
355 if (value >= TASK_SIZE_OF(child))
357 if (child->thread.gs != value)
358 return do_arch_prctl(child, ARCH_SET_GS, value);
363 *pt_regs_access(task_pt_regs(child), offset) = value;
367 static unsigned long getreg(struct task_struct *task, unsigned long offset)
370 case offsetof(struct user_regs_struct, cs):
371 case offsetof(struct user_regs_struct, ds):
372 case offsetof(struct user_regs_struct, es):
373 case offsetof(struct user_regs_struct, fs):
374 case offsetof(struct user_regs_struct, gs):
375 case offsetof(struct user_regs_struct, ss):
376 return get_segment_reg(task, offset);
378 case offsetof(struct user_regs_struct, flags):
379 return get_flags(task);
382 case offsetof(struct user_regs_struct, fs_base): {
384 * do_arch_prctl may have used a GDT slot instead of
385 * the MSR. To userland, it appears the same either
386 * way, except the %fs segment selector might not be 0.
388 unsigned int seg = task->thread.fsindex;
389 if (task->thread.fs != 0)
390 return task->thread.fs;
392 asm("movl %%fs,%0" : "=r" (seg));
393 if (seg != FS_TLS_SEL)
395 return get_desc_base(&task->thread.tls_array[FS_TLS]);
397 case offsetof(struct user_regs_struct, gs_base): {
399 * Exactly the same here as the %fs handling above.
401 unsigned int seg = task->thread.gsindex;
402 if (task->thread.gs != 0)
403 return task->thread.gs;
405 asm("movl %%gs,%0" : "=r" (seg));
406 if (seg != GS_TLS_SEL)
408 return get_desc_base(&task->thread.tls_array[GS_TLS]);
413 return *pt_regs_access(task_pt_regs(task), offset);
416 static int genregs_get(struct task_struct *target,
417 const struct user_regset *regset,
418 unsigned int pos, unsigned int count,
419 void *kbuf, void __user *ubuf)
422 unsigned long *k = kbuf;
424 *k++ = getreg(target, pos);
429 unsigned long __user *u = ubuf;
431 if (__put_user(getreg(target, pos), u++))
441 static int genregs_set(struct task_struct *target,
442 const struct user_regset *regset,
443 unsigned int pos, unsigned int count,
444 const void *kbuf, const void __user *ubuf)
448 const unsigned long *k = kbuf;
449 while (count > 0 && !ret) {
450 ret = putreg(target, pos, *k++);
455 const unsigned long __user *u = ubuf;
456 while (count > 0 && !ret) {
458 ret = __get_user(word, u++);
461 ret = putreg(target, pos, word);
470 * This function is trivial and will be inlined by the compiler.
471 * Having it separates the implementation details of debug
472 * registers from the interface details of ptrace.
474 static unsigned long ptrace_get_debugreg(struct task_struct *child, int n)
477 case 0: return child->thread.debugreg0;
478 case 1: return child->thread.debugreg1;
479 case 2: return child->thread.debugreg2;
480 case 3: return child->thread.debugreg3;
481 case 6: return child->thread.debugreg6;
482 case 7: return child->thread.debugreg7;
487 static int ptrace_set_debugreg(struct task_struct *child,
488 int n, unsigned long data)
492 if (unlikely(n == 4 || n == 5))
495 if (n < 4 && unlikely(data >= debugreg_addr_limit(child)))
499 case 0: child->thread.debugreg0 = data; break;
500 case 1: child->thread.debugreg1 = data; break;
501 case 2: child->thread.debugreg2 = data; break;
502 case 3: child->thread.debugreg3 = data; break;
505 if ((data & ~0xffffffffUL) != 0)
507 child->thread.debugreg6 = data;
512 * Sanity-check data. Take one half-byte at once with
513 * check = (val >> (16 + 4*i)) & 0xf. It contains the
514 * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits
515 * 2 and 3 are LENi. Given a list of invalid values,
516 * we do mask |= 1 << invalid_value, so that
517 * (mask >> check) & 1 is a correct test for invalid
520 * R/Wi contains the type of the breakpoint /
521 * watchpoint, LENi contains the length of the watched
522 * data in the watchpoint case.
524 * The invalid values are:
525 * - LENi == 0x10 (undefined), so mask |= 0x0f00. [32-bit]
526 * - R/Wi == 0x10 (break on I/O reads or writes), so
528 * - R/Wi == 0x00 && LENi != 0x00, so we have mask |=
531 * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54.
533 * See the Intel Manual "System Programming Guide",
536 * Note that LENi == 0x10 is defined on x86_64 in long
537 * mode (i.e. even for 32-bit userspace software, but
538 * 64-bit kernel), so the x86_64 mask value is 0x5454.
539 * See the AMD manual no. 24593 (AMD64 System Programming)
542 #define DR7_MASK 0x5f54
544 #define DR7_MASK 0x5554
546 data &= ~DR_CONTROL_RESERVED;
547 for (i = 0; i < 4; i++)
548 if ((DR7_MASK >> ((data >> (16 + 4*i)) & 0xf)) & 1)
550 child->thread.debugreg7 = data;
552 set_tsk_thread_flag(child, TIF_DEBUG);
554 clear_tsk_thread_flag(child, TIF_DEBUG);
562 * These access the current or another (stopped) task's io permission
563 * bitmap for debugging or core dump.
565 static int ioperm_active(struct task_struct *target,
566 const struct user_regset *regset)
568 return target->thread.io_bitmap_max / regset->size;
571 static int ioperm_get(struct task_struct *target,
572 const struct user_regset *regset,
573 unsigned int pos, unsigned int count,
574 void *kbuf, void __user *ubuf)
576 if (!target->thread.io_bitmap_ptr)
579 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
580 target->thread.io_bitmap_ptr,
584 #ifdef CONFIG_X86_PTRACE_BTS
586 * A branch trace store context.
588 * Contexts may only be installed by ptrace_bts_config() and only for
591 * Contexts are destroyed when the tracee is detached from the tracer.
592 * The actual destruction work requires interrupts enabled, so the
593 * work is deferred and will be scheduled during __ptrace_unlink().
595 * Contexts hold an additional task_struct reference on the traced
596 * task, as well as a reference on the tracer's mm.
598 * Ptrace already holds a task_struct for the duration of ptrace operations,
599 * but since destruction is deferred, it may be executed after both
600 * tracer and tracee exited.
603 /* The branch trace handle. */
604 struct bts_tracer *tracer;
606 /* The buffer used to store the branch trace and its size. */
610 /* The mm that paid for the above buffer. */
611 struct mm_struct *mm;
613 /* The task this context belongs to. */
614 struct task_struct *task;
616 /* The signal to send on a bts buffer overflow. */
617 unsigned int bts_ovfl_signal;
619 /* The work struct to destroy a context. */
620 struct work_struct work;
623 static int alloc_bts_buffer(struct bts_context *context, unsigned int size)
628 err = account_locked_memory(current->mm, current->signal->rlim, size);
632 buffer = kzalloc(size, GFP_KERNEL);
636 context->buffer = buffer;
637 context->size = size;
638 context->mm = get_task_mm(current);
643 refund_locked_memory(current->mm, size);
647 static inline void free_bts_buffer(struct bts_context *context)
649 if (!context->buffer)
652 kfree(context->buffer);
653 context->buffer = NULL;
655 refund_locked_memory(context->mm, context->size);
662 static void free_bts_context_work(struct work_struct *w)
664 struct bts_context *context;
666 context = container_of(w, struct bts_context, work);
668 ds_release_bts(context->tracer);
669 put_task_struct(context->task);
670 free_bts_buffer(context);
674 static inline void free_bts_context(struct bts_context *context)
676 INIT_WORK(&context->work, free_bts_context_work);
677 schedule_work(&context->work);
680 static inline struct bts_context *alloc_bts_context(struct task_struct *task)
682 struct bts_context *context = kzalloc(sizeof(*context), GFP_KERNEL);
684 context->task = task;
687 get_task_struct(task);
693 static int ptrace_bts_read_record(struct task_struct *child, size_t index,
694 struct bts_struct __user *out)
696 struct bts_context *context;
697 const struct bts_trace *trace;
698 struct bts_struct bts;
699 const unsigned char *at;
702 context = child->bts;
706 trace = ds_read_bts(context->tracer);
710 at = trace->ds.top - ((index + 1) * trace->ds.size);
711 if ((void *)at < trace->ds.begin)
712 at += (trace->ds.n * trace->ds.size);
717 error = trace->read(context->tracer, at, &bts);
721 if (copy_to_user(out, &bts, sizeof(bts)))
727 static int ptrace_bts_drain(struct task_struct *child,
729 struct bts_struct __user *out)
731 struct bts_context *context;
732 const struct bts_trace *trace;
733 const unsigned char *at;
734 int error, drained = 0;
736 context = child->bts;
740 trace = ds_read_bts(context->tracer);
747 if (size < (trace->ds.top - trace->ds.begin))
750 for (at = trace->ds.begin; (void *)at < trace->ds.top;
751 out++, drained++, at += trace->ds.size) {
752 struct bts_struct bts;
754 error = trace->read(context->tracer, at, &bts);
758 if (copy_to_user(out, &bts, sizeof(bts)))
762 memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size);
764 error = ds_reset_bts(context->tracer);
771 static int ptrace_bts_config(struct task_struct *child,
773 const struct ptrace_bts_config __user *ucfg)
775 struct bts_context *context;
776 struct ptrace_bts_config cfg;
777 unsigned int flags = 0;
779 if (cfg_size < sizeof(cfg))
782 if (copy_from_user(&cfg, ucfg, sizeof(cfg)))
785 context = child->bts;
787 context = alloc_bts_context(child);
791 if (cfg.flags & PTRACE_BTS_O_SIGNAL) {
796 context->bts_ovfl_signal = cfg.signal;
799 ds_release_bts(context->tracer);
800 context->tracer = NULL;
802 if ((cfg.flags & PTRACE_BTS_O_ALLOC) && (cfg.size != context->size)) {
805 free_bts_buffer(context);
809 err = alloc_bts_buffer(context, cfg.size);
814 if (cfg.flags & PTRACE_BTS_O_TRACE)
817 if (cfg.flags & PTRACE_BTS_O_SCHED)
818 flags |= BTS_TIMESTAMPS;
821 ds_request_bts_task(child, context->buffer, context->size,
822 NULL, (size_t)-1, flags);
823 if (unlikely(IS_ERR(context->tracer))) {
824 int error = PTR_ERR(context->tracer);
826 free_bts_buffer(context);
827 context->tracer = NULL;
834 static int ptrace_bts_status(struct task_struct *child,
836 struct ptrace_bts_config __user *ucfg)
838 struct bts_context *context;
839 const struct bts_trace *trace;
840 struct ptrace_bts_config cfg;
842 context = child->bts;
846 if (cfg_size < sizeof(cfg))
849 trace = ds_read_bts(context->tracer);
853 memset(&cfg, 0, sizeof(cfg));
854 cfg.size = trace->ds.end - trace->ds.begin;
855 cfg.signal = context->bts_ovfl_signal;
856 cfg.bts_size = sizeof(struct bts_struct);
859 cfg.flags |= PTRACE_BTS_O_SIGNAL;
861 if (trace->ds.flags & BTS_USER)
862 cfg.flags |= PTRACE_BTS_O_TRACE;
864 if (trace->ds.flags & BTS_TIMESTAMPS)
865 cfg.flags |= PTRACE_BTS_O_SCHED;
867 if (copy_to_user(ucfg, &cfg, sizeof(cfg)))
873 static int ptrace_bts_clear(struct task_struct *child)
875 struct bts_context *context;
876 const struct bts_trace *trace;
878 context = child->bts;
882 trace = ds_read_bts(context->tracer);
886 memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size);
888 return ds_reset_bts(context->tracer);
891 static int ptrace_bts_size(struct task_struct *child)
893 struct bts_context *context;
894 const struct bts_trace *trace;
896 context = child->bts;
900 trace = ds_read_bts(context->tracer);
904 return (trace->ds.top - trace->ds.begin) / trace->ds.size;
908 * Called from __ptrace_unlink() after the child has been moved back
909 * to its original parent.
911 void ptrace_bts_untrace(struct task_struct *child)
913 if (unlikely(child->bts)) {
914 free_bts_context(child->bts);
918 #endif /* CONFIG_X86_PTRACE_BTS */
921 * Called by kernel/ptrace.c when detaching..
923 * Make sure the single step bit is not set.
925 void ptrace_disable(struct task_struct *child)
927 user_disable_single_step(child);
928 #ifdef TIF_SYSCALL_EMU
929 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
933 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
934 static const struct user_regset_view user_x86_32_view; /* Initialized below. */
937 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
940 unsigned long __user *datap = (unsigned long __user *)data;
943 /* read the word at location addr in the USER area. */
944 case PTRACE_PEEKUSR: {
948 if ((addr & (sizeof(data) - 1)) || addr < 0 ||
949 addr >= sizeof(struct user))
952 tmp = 0; /* Default return condition */
953 if (addr < sizeof(struct user_regs_struct))
954 tmp = getreg(child, addr);
955 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
956 addr <= offsetof(struct user, u_debugreg[7])) {
957 addr -= offsetof(struct user, u_debugreg[0]);
958 tmp = ptrace_get_debugreg(child, addr / sizeof(data));
960 ret = put_user(tmp, datap);
964 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
966 if ((addr & (sizeof(data) - 1)) || addr < 0 ||
967 addr >= sizeof(struct user))
970 if (addr < sizeof(struct user_regs_struct))
971 ret = putreg(child, addr, data);
972 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
973 addr <= offsetof(struct user, u_debugreg[7])) {
974 addr -= offsetof(struct user, u_debugreg[0]);
975 ret = ptrace_set_debugreg(child,
976 addr / sizeof(data), data);
980 case PTRACE_GETREGS: /* Get all gp regs from the child. */
981 return copy_regset_to_user(child,
982 task_user_regset_view(current),
984 0, sizeof(struct user_regs_struct),
987 case PTRACE_SETREGS: /* Set all gp regs in the child. */
988 return copy_regset_from_user(child,
989 task_user_regset_view(current),
991 0, sizeof(struct user_regs_struct),
994 case PTRACE_GETFPREGS: /* Get the child FPU state. */
995 return copy_regset_to_user(child,
996 task_user_regset_view(current),
998 0, sizeof(struct user_i387_struct),
1001 case PTRACE_SETFPREGS: /* Set the child FPU state. */
1002 return copy_regset_from_user(child,
1003 task_user_regset_view(current),
1005 0, sizeof(struct user_i387_struct),
1008 #ifdef CONFIG_X86_32
1009 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
1010 return copy_regset_to_user(child, &user_x86_32_view,
1012 0, sizeof(struct user_fxsr_struct),
1015 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
1016 return copy_regset_from_user(child, &user_x86_32_view,
1018 0, sizeof(struct user_fxsr_struct),
1022 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1023 case PTRACE_GET_THREAD_AREA:
1026 ret = do_get_thread_area(child, addr,
1027 (struct user_desc __user *) data);
1030 case PTRACE_SET_THREAD_AREA:
1033 ret = do_set_thread_area(child, addr,
1034 (struct user_desc __user *) data, 0);
1038 #ifdef CONFIG_X86_64
1039 /* normal 64bit interface to access TLS data.
1040 Works just like arch_prctl, except that the arguments
1042 case PTRACE_ARCH_PRCTL:
1043 ret = do_arch_prctl(child, data, addr);
1048 * These bits need more cooking - not enabled yet:
1050 #ifdef CONFIG_X86_PTRACE_BTS
1051 case PTRACE_BTS_CONFIG:
1052 ret = ptrace_bts_config
1053 (child, data, (struct ptrace_bts_config __user *)addr);
1056 case PTRACE_BTS_STATUS:
1057 ret = ptrace_bts_status
1058 (child, data, (struct ptrace_bts_config __user *)addr);
1061 case PTRACE_BTS_SIZE:
1062 ret = ptrace_bts_size(child);
1065 case PTRACE_BTS_GET:
1066 ret = ptrace_bts_read_record
1067 (child, data, (struct bts_struct __user *) addr);
1070 case PTRACE_BTS_CLEAR:
1071 ret = ptrace_bts_clear(child);
1074 case PTRACE_BTS_DRAIN:
1075 ret = ptrace_bts_drain
1076 (child, data, (struct bts_struct __user *) addr);
1078 #endif /* CONFIG_X86_PTRACE_BTS */
1081 ret = ptrace_request(child, request, addr, data);
1088 #ifdef CONFIG_IA32_EMULATION
1090 #include <linux/compat.h>
1091 #include <linux/syscalls.h>
1092 #include <asm/ia32.h>
1093 #include <asm/user32.h>
1096 case offsetof(struct user32, regs.l): \
1097 regs->q = value; break
1100 case offsetof(struct user32, regs.rs): \
1101 return set_segment_reg(child, \
1102 offsetof(struct user_regs_struct, rs), \
1106 static int putreg32(struct task_struct *child, unsigned regno, u32 value)
1108 struct pt_regs *regs = task_pt_regs(child);
1129 case offsetof(struct user32, regs.orig_eax):
1131 * Sign-extend the value so that orig_eax = -1
1132 * causes (long)orig_ax < 0 tests to fire correctly.
1134 regs->orig_ax = (long) (s32) value;
1137 case offsetof(struct user32, regs.eflags):
1138 return set_flags(child, value);
1140 case offsetof(struct user32, u_debugreg[0]) ...
1141 offsetof(struct user32, u_debugreg[7]):
1142 regno -= offsetof(struct user32, u_debugreg[0]);
1143 return ptrace_set_debugreg(child, regno / 4, value);
1146 if (regno > sizeof(struct user32) || (regno & 3))
1150 * Other dummy fields in the virtual user structure
1162 case offsetof(struct user32, regs.l): \
1163 *val = regs->q; break
1166 case offsetof(struct user32, regs.rs): \
1167 *val = get_segment_reg(child, \
1168 offsetof(struct user_regs_struct, rs)); \
1171 static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
1173 struct pt_regs *regs = task_pt_regs(child);
1191 R32(orig_eax, orig_ax);
1195 case offsetof(struct user32, regs.eflags):
1196 *val = get_flags(child);
1199 case offsetof(struct user32, u_debugreg[0]) ...
1200 offsetof(struct user32, u_debugreg[7]):
1201 regno -= offsetof(struct user32, u_debugreg[0]);
1202 *val = ptrace_get_debugreg(child, regno / 4);
1206 if (regno > sizeof(struct user32) || (regno & 3))
1210 * Other dummy fields in the virtual user structure
1222 static int genregs32_get(struct task_struct *target,
1223 const struct user_regset *regset,
1224 unsigned int pos, unsigned int count,
1225 void *kbuf, void __user *ubuf)
1228 compat_ulong_t *k = kbuf;
1230 getreg32(target, pos, k++);
1231 count -= sizeof(*k);
1235 compat_ulong_t __user *u = ubuf;
1237 compat_ulong_t word;
1238 getreg32(target, pos, &word);
1239 if (__put_user(word, u++))
1241 count -= sizeof(*u);
1249 static int genregs32_set(struct task_struct *target,
1250 const struct user_regset *regset,
1251 unsigned int pos, unsigned int count,
1252 const void *kbuf, const void __user *ubuf)
1256 const compat_ulong_t *k = kbuf;
1257 while (count > 0 && !ret) {
1258 ret = putreg32(target, pos, *k++);
1259 count -= sizeof(*k);
1263 const compat_ulong_t __user *u = ubuf;
1264 while (count > 0 && !ret) {
1265 compat_ulong_t word;
1266 ret = __get_user(word, u++);
1269 ret = putreg32(target, pos, word);
1270 count -= sizeof(*u);
1277 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1278 compat_ulong_t caddr, compat_ulong_t cdata)
1280 unsigned long addr = caddr;
1281 unsigned long data = cdata;
1282 void __user *datap = compat_ptr(data);
1287 case PTRACE_PEEKUSR:
1288 ret = getreg32(child, addr, &val);
1290 ret = put_user(val, (__u32 __user *)datap);
1293 case PTRACE_POKEUSR:
1294 ret = putreg32(child, addr, data);
1297 case PTRACE_GETREGS: /* Get all gp regs from the child. */
1298 return copy_regset_to_user(child, &user_x86_32_view,
1300 0, sizeof(struct user_regs_struct32),
1303 case PTRACE_SETREGS: /* Set all gp regs in the child. */
1304 return copy_regset_from_user(child, &user_x86_32_view,
1306 sizeof(struct user_regs_struct32),
1309 case PTRACE_GETFPREGS: /* Get the child FPU state. */
1310 return copy_regset_to_user(child, &user_x86_32_view,
1312 sizeof(struct user_i387_ia32_struct),
1315 case PTRACE_SETFPREGS: /* Set the child FPU state. */
1316 return copy_regset_from_user(
1317 child, &user_x86_32_view, REGSET_FP,
1318 0, sizeof(struct user_i387_ia32_struct), datap);
1320 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
1321 return copy_regset_to_user(child, &user_x86_32_view,
1323 sizeof(struct user32_fxsr_struct),
1326 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
1327 return copy_regset_from_user(child, &user_x86_32_view,
1329 sizeof(struct user32_fxsr_struct),
1332 case PTRACE_GET_THREAD_AREA:
1333 case PTRACE_SET_THREAD_AREA:
1334 #ifdef CONFIG_X86_PTRACE_BTS
1335 case PTRACE_BTS_CONFIG:
1336 case PTRACE_BTS_STATUS:
1337 case PTRACE_BTS_SIZE:
1338 case PTRACE_BTS_GET:
1339 case PTRACE_BTS_CLEAR:
1340 case PTRACE_BTS_DRAIN:
1341 #endif /* CONFIG_X86_PTRACE_BTS */
1342 return arch_ptrace(child, request, addr, data);
1345 return compat_ptrace_request(child, request, addr, data);
1351 #endif /* CONFIG_IA32_EMULATION */
1353 #ifdef CONFIG_X86_64
1355 static const struct user_regset x86_64_regsets[] = {
1356 [REGSET_GENERAL] = {
1357 .core_note_type = NT_PRSTATUS,
1358 .n = sizeof(struct user_regs_struct) / sizeof(long),
1359 .size = sizeof(long), .align = sizeof(long),
1360 .get = genregs_get, .set = genregs_set
1363 .core_note_type = NT_PRFPREG,
1364 .n = sizeof(struct user_i387_struct) / sizeof(long),
1365 .size = sizeof(long), .align = sizeof(long),
1366 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
1368 [REGSET_IOPERM64] = {
1369 .core_note_type = NT_386_IOPERM,
1370 .n = IO_BITMAP_LONGS,
1371 .size = sizeof(long), .align = sizeof(long),
1372 .active = ioperm_active, .get = ioperm_get
1376 static const struct user_regset_view user_x86_64_view = {
1377 .name = "x86_64", .e_machine = EM_X86_64,
1378 .regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets)
1381 #else /* CONFIG_X86_32 */
1383 #define user_regs_struct32 user_regs_struct
1384 #define genregs32_get genregs_get
1385 #define genregs32_set genregs_set
1387 #define user_i387_ia32_struct user_i387_struct
1388 #define user32_fxsr_struct user_fxsr_struct
1390 #endif /* CONFIG_X86_64 */
1392 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1393 static const struct user_regset x86_32_regsets[] = {
1394 [REGSET_GENERAL] = {
1395 .core_note_type = NT_PRSTATUS,
1396 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
1397 .size = sizeof(u32), .align = sizeof(u32),
1398 .get = genregs32_get, .set = genregs32_set
1401 .core_note_type = NT_PRFPREG,
1402 .n = sizeof(struct user_i387_ia32_struct) / sizeof(u32),
1403 .size = sizeof(u32), .align = sizeof(u32),
1404 .active = fpregs_active, .get = fpregs_get, .set = fpregs_set
1407 .core_note_type = NT_PRXFPREG,
1408 .n = sizeof(struct user32_fxsr_struct) / sizeof(u32),
1409 .size = sizeof(u32), .align = sizeof(u32),
1410 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
1413 .core_note_type = NT_386_TLS,
1414 .n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN,
1415 .size = sizeof(struct user_desc),
1416 .align = sizeof(struct user_desc),
1417 .active = regset_tls_active,
1418 .get = regset_tls_get, .set = regset_tls_set
1420 [REGSET_IOPERM32] = {
1421 .core_note_type = NT_386_IOPERM,
1422 .n = IO_BITMAP_BYTES / sizeof(u32),
1423 .size = sizeof(u32), .align = sizeof(u32),
1424 .active = ioperm_active, .get = ioperm_get
1428 static const struct user_regset_view user_x86_32_view = {
1429 .name = "i386", .e_machine = EM_386,
1430 .regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets)
1434 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1436 #ifdef CONFIG_IA32_EMULATION
1437 if (test_tsk_thread_flag(task, TIF_IA32))
1439 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1440 return &user_x86_32_view;
1442 #ifdef CONFIG_X86_64
1443 return &user_x86_64_view;
1447 void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
1448 int error_code, int si_code)
1450 struct siginfo info;
1452 tsk->thread.trap_no = 1;
1453 tsk->thread.error_code = error_code;
1455 memset(&info, 0, sizeof(info));
1456 info.si_signo = SIGTRAP;
1457 info.si_code = si_code;
1460 info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
1462 /* Send us the fake SIGTRAP */
1463 force_sig_info(SIGTRAP, &info, tsk);
1467 #ifdef CONFIG_X86_32
1469 #elif defined CONFIG_IA32_EMULATION
1470 # define IS_IA32 is_compat_task()
1476 * We must return the syscall number to actually look up in the table.
1477 * This can be -1L to skip running any syscall at all.
1479 asmregparm long syscall_trace_enter(struct pt_regs *regs)
1484 * If we stepped into a sysenter/syscall insn, it trapped in
1485 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
1486 * If user-mode had set TF itself, then it's still clear from
1487 * do_debug() and we need to set it again to restore the user
1488 * state. If we entered on the slow path, TF was already set.
1490 if (test_thread_flag(TIF_SINGLESTEP))
1491 regs->flags |= X86_EFLAGS_TF;
1493 /* do the secure computing check first */
1494 secure_computing(regs->orig_ax);
1496 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
1499 if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) &&
1500 tracehook_report_syscall_entry(regs))
1503 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1504 trace_syscall_enter(regs, regs->orig_ax);
1506 if (unlikely(current->audit_context)) {
1508 audit_syscall_entry(AUDIT_ARCH_I386,
1511 regs->dx, regs->si);
1512 #ifdef CONFIG_X86_64
1514 audit_syscall_entry(AUDIT_ARCH_X86_64,
1517 regs->dx, regs->r10);
1521 return ret ?: regs->orig_ax;
1524 asmregparm void syscall_trace_leave(struct pt_regs *regs)
1526 if (unlikely(current->audit_context))
1527 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
1529 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1530 trace_syscall_exit(regs, regs->ax);
1532 if (test_thread_flag(TIF_SYSCALL_TRACE))
1533 tracehook_report_syscall_exit(regs, 0);
1536 * If TIF_SYSCALL_EMU is set, we only get here because of
1537 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
1538 * We already reported this syscall instruction in
1539 * syscall_trace_enter(), so don't do any more now.
1541 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
1545 * If we are single-stepping, synthesize a trap to follow the
1546 * system call instruction.
1548 if (test_thread_flag(TIF_SINGLESTEP) &&
1549 tracehook_consider_fatal_signal(current, SIGTRAP))
1550 send_sigtrap(current, regs, 0, TRAP_BRKPT);