2 * linux/arch/x86-64/kernel/process.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
12 * CPU hotplug support - ashok.raj@intel.com
13 * $Id: process.c,v 1.38 2002/01/15 10:08:03 ak Exp $
17 * This file handles the architecture-dependent parts of process handling..
22 #include <linux/cpu.h>
23 #include <linux/errno.h>
24 #include <linux/sched.h>
25 #include <linux/kernel.h>
27 #include <linux/elfcore.h>
28 #include <linux/smp.h>
29 #include <linux/slab.h>
30 #include <linux/user.h>
31 #include <linux/module.h>
32 #include <linux/a.out.h>
33 #include <linux/interrupt.h>
34 #include <linux/delay.h>
35 #include <linux/irq.h>
36 #include <linux/ptrace.h>
37 #include <linux/utsname.h>
38 #include <linux/random.h>
39 #include <linux/kprobes.h>
41 #include <asm/uaccess.h>
42 #include <asm/pgtable.h>
43 #include <asm/system.h>
45 #include <asm/processor.h>
47 #include <asm/mmu_context.h>
49 #include <asm/prctl.h>
50 #include <asm/kdebug.h>
52 #include <asm/proto.h>
55 asmlinkage extern void ret_from_fork(void);
57 unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
59 static atomic_t hlt_counter = ATOMIC_INIT(0);
61 unsigned long boot_option_idle_override = 0;
62 EXPORT_SYMBOL(boot_option_idle_override);
65 * Powermanagement idle function, if any..
67 void (*pm_idle)(void);
68 static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
70 void disable_hlt(void)
72 atomic_inc(&hlt_counter);
75 EXPORT_SYMBOL(disable_hlt);
79 atomic_dec(&hlt_counter);
82 EXPORT_SYMBOL(enable_hlt);
85 * We use this if we don't have any better
88 void default_idle(void)
90 if (!atomic_read(&hlt_counter)) {
100 * On SMP it's slightly faster (but much more power-consuming!)
101 * to poll the ->need_resched flag instead of waiting for the
102 * cross-CPU IPI to arrive. Use this option with caution.
104 static void poll_idle (void)
111 * Deal with another CPU just having chosen a thread to
114 oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
117 set_thread_flag(TIF_POLLING_NRFLAG);
124 "i" (_TIF_NEED_RESCHED),
125 "m" (current_thread_info()->flags));
126 clear_thread_flag(TIF_POLLING_NRFLAG);
132 void cpu_idle_wait(void)
134 unsigned int cpu, this_cpu = get_cpu();
137 set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
141 for_each_online_cpu(cpu) {
142 per_cpu(cpu_idle_state, cpu) = 1;
146 __get_cpu_var(cpu_idle_state) = 0;
151 for_each_online_cpu(cpu) {
152 if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
155 cpus_and(map, map, cpu_online_map);
156 } while (!cpus_empty(map));
158 EXPORT_SYMBOL_GPL(cpu_idle_wait);
160 #ifdef CONFIG_HOTPLUG_CPU
161 DECLARE_PER_CPU(int, cpu_state);
164 /* We don't actually take CPU down, just spin without interrupts. */
165 static inline void play_dead(void)
171 __get_cpu_var(cpu_state) = CPU_DEAD;
177 static inline void play_dead(void)
181 #endif /* CONFIG_HOTPLUG_CPU */
184 * The idle thread. There's no useful work to be
185 * done, so just try to conserve power and have a
186 * low exit latency (ie sit in a loop waiting for
187 * somebody to say that they'd like to reschedule)
191 /* endless idle loop with no priority at all */
193 while (!need_resched()) {
196 if (__get_cpu_var(cpu_idle_state))
197 __get_cpu_var(cpu_idle_state) = 0;
203 if (cpu_is_offline(smp_processor_id()))
213 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
214 * which can obviate IPI to trigger checking of need_resched.
215 * We execute MONITOR against need_resched and enter optimized wait state
216 * through MWAIT. Whenever someone changes need_resched, we would be woken
217 * up from MWAIT (without an IPI).
219 static void mwait_idle(void)
223 if (!need_resched()) {
224 set_thread_flag(TIF_POLLING_NRFLAG);
226 __monitor((void *)¤t_thread_info()->flags, 0, 0);
230 } while (!need_resched());
231 clear_thread_flag(TIF_POLLING_NRFLAG);
235 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
238 if (cpu_has(c, X86_FEATURE_MWAIT)) {
240 * Skip, if setup has overridden idle.
241 * One CPU supports mwait => All CPUs supports mwait
245 printk("using mwait in idle threads.\n");
248 pm_idle = mwait_idle;
253 static int __init idle_setup (char *str)
255 if (!strncmp(str, "poll", 4)) {
256 printk("using polling idle threads.\n");
260 boot_option_idle_override = 1;
264 __setup("idle=", idle_setup);
266 /* Prints also some state that isn't saved in the pt_regs */
267 void __show_regs(struct pt_regs * regs)
269 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
270 unsigned int fsindex,gsindex;
271 unsigned int ds,cs,es;
275 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
276 current->pid, current->comm, print_tainted(),
277 system_utsname.release,
278 (int)strcspn(system_utsname.version, " "),
279 system_utsname.version);
280 printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
281 printk_address(regs->rip);
282 printk("\nRSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp, regs->eflags);
283 printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
284 regs->rax, regs->rbx, regs->rcx);
285 printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
286 regs->rdx, regs->rsi, regs->rdi);
287 printk("RBP: %016lx R08: %016lx R09: %016lx\n",
288 regs->rbp, regs->r8, regs->r9);
289 printk("R10: %016lx R11: %016lx R12: %016lx\n",
290 regs->r10, regs->r11, regs->r12);
291 printk("R13: %016lx R14: %016lx R15: %016lx\n",
292 regs->r13, regs->r14, regs->r15);
294 asm("movl %%ds,%0" : "=r" (ds));
295 asm("movl %%cs,%0" : "=r" (cs));
296 asm("movl %%es,%0" : "=r" (es));
297 asm("movl %%fs,%0" : "=r" (fsindex));
298 asm("movl %%gs,%0" : "=r" (gsindex));
300 rdmsrl(MSR_FS_BASE, fs);
301 rdmsrl(MSR_GS_BASE, gs);
302 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
304 asm("movq %%cr0, %0": "=r" (cr0));
305 asm("movq %%cr2, %0": "=r" (cr2));
306 asm("movq %%cr3, %0": "=r" (cr3));
307 asm("movq %%cr4, %0": "=r" (cr4));
309 printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
310 fs,fsindex,gs,gsindex,shadowgs);
311 printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0);
312 printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4);
315 void show_regs(struct pt_regs *regs)
317 printk("CPU %d:", smp_processor_id());
319 show_trace(®s->rsp);
323 * Free current thread data structures etc..
325 void exit_thread(void)
327 struct task_struct *me = current;
328 struct thread_struct *t = &me->thread;
331 * Remove function-return probe instances associated with this task
332 * and put them back on the free list. Do not insert an exit probe for
333 * this function, it will be disabled by kprobe_flush_task if you do.
335 kprobe_flush_task(me);
337 if (me->thread.io_bitmap_ptr) {
338 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
340 kfree(t->io_bitmap_ptr);
341 t->io_bitmap_ptr = NULL;
343 * Careful, clear this in the TSS too:
345 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
346 t->io_bitmap_max = 0;
351 void flush_thread(void)
353 struct task_struct *tsk = current;
354 struct thread_info *t = current_thread_info();
357 * Remove function-return probe instances associated with this task
358 * and put them back on the free list. Do not insert an exit probe for
359 * this function, it will be disabled by kprobe_flush_task if you do.
361 kprobe_flush_task(tsk);
363 if (t->flags & _TIF_ABI_PENDING)
364 t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
366 tsk->thread.debugreg0 = 0;
367 tsk->thread.debugreg1 = 0;
368 tsk->thread.debugreg2 = 0;
369 tsk->thread.debugreg3 = 0;
370 tsk->thread.debugreg6 = 0;
371 tsk->thread.debugreg7 = 0;
372 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
374 * Forget coprocessor state..
380 void release_thread(struct task_struct *dead_task)
383 if (dead_task->mm->context.size) {
384 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
386 dead_task->mm->context.ldt,
387 dead_task->mm->context.size);
393 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
395 struct user_desc ud = {
402 struct n_desc_struct *desc = (void *)t->thread.tls_array;
404 desc->a = LDT_entry_a(&ud);
405 desc->b = LDT_entry_b(&ud);
408 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
410 struct desc_struct *desc = (void *)t->thread.tls_array;
413 (((u32)desc->base1) << 16) |
414 (((u32)desc->base2) << 24);
418 * This gets called before we allocate a new thread and copy
419 * the current task into it.
421 void prepare_to_copy(struct task_struct *tsk)
426 int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
427 unsigned long unused,
428 struct task_struct * p, struct pt_regs * regs)
431 struct pt_regs * childregs;
432 struct task_struct *me = current;
434 childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
439 childregs->rsp = rsp;
441 childregs->rsp = (unsigned long)childregs;
444 p->thread.rsp = (unsigned long) childregs;
445 p->thread.rsp0 = (unsigned long) (childregs+1);
446 p->thread.userrsp = me->thread.userrsp;
448 set_ti_thread_flag(p->thread_info, TIF_FORK);
450 p->thread.fs = me->thread.fs;
451 p->thread.gs = me->thread.gs;
453 asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
454 asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
455 asm("mov %%es,%0" : "=m" (p->thread.es));
456 asm("mov %%ds,%0" : "=m" (p->thread.ds));
458 if (unlikely(me->thread.io_bitmap_ptr != NULL)) {
459 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
460 if (!p->thread.io_bitmap_ptr) {
461 p->thread.io_bitmap_max = 0;
464 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr, IO_BITMAP_BYTES);
468 * Set a new TLS for the child thread?
470 if (clone_flags & CLONE_SETTLS) {
471 #ifdef CONFIG_IA32_EMULATION
472 if (test_thread_flag(TIF_IA32))
473 err = ia32_child_tls(p, childregs);
476 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
482 if (err && p->thread.io_bitmap_ptr) {
483 kfree(p->thread.io_bitmap_ptr);
484 p->thread.io_bitmap_max = 0;
490 * This function selects if the context switch from prev to next
491 * has to tweak the TSC disable bit in the cr4.
493 static inline void disable_tsc(struct task_struct *prev_p,
494 struct task_struct *next_p)
496 struct thread_info *prev, *next;
499 * gcc should eliminate the ->thread_info dereference if
500 * has_secure_computing returns 0 at compile time (SECCOMP=n).
502 prev = prev_p->thread_info;
503 next = next_p->thread_info;
505 if (has_secure_computing(prev) || has_secure_computing(next)) {
507 if (has_secure_computing(prev) &&
508 !has_secure_computing(next)) {
509 write_cr4(read_cr4() & ~X86_CR4_TSD);
510 } else if (!has_secure_computing(prev) &&
511 has_secure_computing(next))
512 write_cr4(read_cr4() | X86_CR4_TSD);
517 * This special macro can be used to load a debugging register
519 #define loaddebug(thread,r) set_debug(thread->debugreg ## r, r)
522 * switch_to(x,y) should switch tasks from x to y.
524 * This could still be optimized:
525 * - fold all the options into a flag word and test it with a single test.
526 * - could test fs/gs bitsliced
528 struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
530 struct thread_struct *prev = &prev_p->thread,
531 *next = &next_p->thread;
532 int cpu = smp_processor_id();
533 struct tss_struct *tss = &per_cpu(init_tss, cpu);
538 * Reload esp0, LDT and the page table pointer:
540 tss->rsp0 = next->rsp0;
544 * This won't pick up thread selector changes, but I guess that is ok.
546 asm volatile("mov %%es,%0" : "=m" (prev->es));
547 if (unlikely(next->es | prev->es))
548 loadsegment(es, next->es);
550 asm volatile ("mov %%ds,%0" : "=m" (prev->ds));
551 if (unlikely(next->ds | prev->ds))
552 loadsegment(ds, next->ds);
561 asm volatile("movl %%fs,%0" : "=r" (fsindex));
562 /* segment register != 0 always requires a reload.
563 also reload when it has changed.
564 when prev process used 64bit base always reload
565 to avoid an information leak. */
566 if (unlikely(fsindex | next->fsindex | prev->fs)) {
567 loadsegment(fs, next->fsindex);
568 /* check if the user used a selector != 0
569 * if yes clear 64bit base, since overloaded base
570 * is always mapped to the Null selector
575 /* when next process has a 64bit base use it */
577 wrmsrl(MSR_FS_BASE, next->fs);
578 prev->fsindex = fsindex;
582 asm volatile("movl %%gs,%0" : "=r" (gsindex));
583 if (unlikely(gsindex | next->gsindex | prev->gs)) {
584 load_gs_index(next->gsindex);
589 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
590 prev->gsindex = gsindex;
594 * Switch the PDA context.
596 prev->userrsp = read_pda(oldrsp);
597 write_pda(oldrsp, next->userrsp);
598 write_pda(pcurrent, next_p);
599 write_pda(kernelstack, (unsigned long)next_p->thread_info + THREAD_SIZE - PDA_STACKOFFSET);
602 * Now maybe reload the debug registers
604 if (unlikely(next->debugreg7)) {
616 * Handle the IO bitmap
618 if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
619 if (next->io_bitmap_ptr)
621 * Copy the relevant range of the IO bitmap.
622 * Normally this is 128 bytes or less:
624 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
625 max(prev->io_bitmap_max, next->io_bitmap_max));
628 * Clear any possible leftover bits:
630 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
634 disable_tsc(prev_p, next_p);
640 * sys_execve() executes a new program.
643 long sys_execve(char __user *name, char __user * __user *argv,
644 char __user * __user *envp, struct pt_regs regs)
649 filename = getname(name);
650 error = PTR_ERR(filename);
651 if (IS_ERR(filename))
653 error = do_execve(filename, argv, envp, ®s);
656 current->ptrace &= ~PT_DTRACE;
657 task_unlock(current);
663 void set_personality_64bit(void)
665 /* inherit personality from parent */
667 /* Make sure to be in 64bit mode */
668 clear_thread_flag(TIF_IA32);
670 /* TBD: overwrites user setup. Should have two bits.
671 But 64bit processes have always behaved this way,
672 so it's not too bad. The main problem is just that
673 32bit childs are affected again. */
674 current->personality &= ~READ_IMPLIES_EXEC;
677 asmlinkage long sys_fork(struct pt_regs *regs)
679 return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
682 asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp, void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
686 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
690 * This is trivial, and on the face of it looks like it
691 * could equally well be done in user mode.
693 * Not so, for quite unobvious reasons - register pressure.
694 * In user mode vfork() cannot have a stack frame, and if
695 * done by calling the "clone()" system call directly, you
696 * do not have enough call-clobbered registers to hold all
697 * the information you need.
699 asmlinkage long sys_vfork(struct pt_regs *regs)
701 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0,
705 unsigned long get_wchan(struct task_struct *p)
711 if (!p || p == current || p->state==TASK_RUNNING)
713 stack = (unsigned long)p->thread_info;
714 if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
716 fp = *(u64 *)(p->thread.rsp);
718 if (fp < (unsigned long)stack || fp > (unsigned long)stack+THREAD_SIZE)
720 rip = *(u64 *)(fp+8);
721 if (!in_sched_functions(rip))
724 } while (count++ < 16);
728 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
731 int doit = task == current;
736 if (addr >= TASK_SIZE_OF(task))
739 /* handle small bases via the GDT because that's faster to
741 if (addr <= 0xffffffff) {
742 set_32bit_tls(task, GS_TLS, addr);
744 load_TLS(&task->thread, cpu);
745 load_gs_index(GS_TLS_SEL);
747 task->thread.gsindex = GS_TLS_SEL;
750 task->thread.gsindex = 0;
751 task->thread.gs = addr;
754 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
760 /* Not strictly needed for fs, but do it for symmetry
762 if (addr >= TASK_SIZE_OF(task))
765 /* handle small bases via the GDT because that's faster to
767 if (addr <= 0xffffffff) {
768 set_32bit_tls(task, FS_TLS, addr);
770 load_TLS(&task->thread, cpu);
771 asm volatile("movl %0,%%fs" :: "r" (FS_TLS_SEL));
773 task->thread.fsindex = FS_TLS_SEL;
776 task->thread.fsindex = 0;
777 task->thread.fs = addr;
779 /* set the selector to 0 to not confuse
781 asm volatile("movl %0,%%fs" :: "r" (0));
782 ret = checking_wrmsrl(MSR_FS_BASE, addr);
789 if (task->thread.fsindex == FS_TLS_SEL)
790 base = read_32bit_tls(task, FS_TLS);
792 rdmsrl(MSR_FS_BASE, base);
794 base = task->thread.fs;
795 ret = put_user(base, (unsigned long __user *)addr);
800 if (task->thread.gsindex == GS_TLS_SEL)
801 base = read_32bit_tls(task, GS_TLS);
803 rdmsrl(MSR_KERNEL_GS_BASE, base);
805 base = task->thread.gs;
806 ret = put_user(base, (unsigned long __user *)addr);
818 long sys_arch_prctl(int code, unsigned long addr)
820 return do_arch_prctl(current, code, addr);
824 * Capture the user space registers if the task is not running (in user space)
826 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
828 struct pt_regs *pp, ptregs;
830 pp = (struct pt_regs *)(tsk->thread.rsp0);
837 elf_core_copy_regs(regs, &ptregs);
842 unsigned long arch_align_stack(unsigned long sp)
844 if (randomize_va_space)
845 sp -= get_random_int() % 8192;