2 * Copyright (C) 1995 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * CPU hotplug support - ashok.raj@intel.com
14 * This file handles the architecture-dependent parts of process handling..
17 #include <linux/stackprotector.h>
18 #include <linux/cpu.h>
19 #include <linux/errno.h>
20 #include <linux/sched.h>
22 #include <linux/kernel.h>
24 #include <linux/elfcore.h>
25 #include <linux/smp.h>
26 #include <linux/slab.h>
27 #include <linux/user.h>
28 #include <linux/interrupt.h>
29 #include <linux/delay.h>
30 #include <linux/module.h>
31 #include <linux/ptrace.h>
32 #include <linux/notifier.h>
33 #include <linux/kprobes.h>
34 #include <linux/kdebug.h>
35 #include <linux/tick.h>
36 #include <linux/prctl.h>
37 #include <linux/uaccess.h>
39 #include <linux/ftrace.h>
41 #include <asm/pgtable.h>
42 #include <asm/system.h>
43 #include <asm/processor.h>
45 #include <asm/mmu_context.h>
46 #include <asm/prctl.h>
48 #include <asm/proto.h>
51 #include <asm/syscalls.h>
52 #include <asm/debugreg.h>
54 asmlinkage extern void ret_from_fork(void);
56 DEFINE_PER_CPU(unsigned long, old_rsp);
57 static DEFINE_PER_CPU(unsigned char, is_idle);
59 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
61 void idle_notifier_register(struct notifier_block *n)
63 atomic_notifier_chain_register(&idle_notifier, n);
65 EXPORT_SYMBOL_GPL(idle_notifier_register);
67 void idle_notifier_unregister(struct notifier_block *n)
69 atomic_notifier_chain_unregister(&idle_notifier, n);
71 EXPORT_SYMBOL_GPL(idle_notifier_unregister);
75 percpu_write(is_idle, 1);
76 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
79 static void __exit_idle(void)
81 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
83 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
86 /* Called from interrupts to signify idle end */
89 /* idle loop has pid 0 */
96 static inline void play_dead(void)
103 * The idle thread. There's no useful work to be
104 * done, so just try to conserve power and have a
105 * low exit latency (ie sit in a loop waiting for
106 * somebody to say that they'd like to reschedule)
110 current_thread_info()->status |= TS_POLLING;
113 * If we're the non-boot CPU, nothing set the stack canary up
114 * for us. CPU0 already has it initialized but no harm in
115 * doing it again. This is a good place for updating it, as
116 * we wont ever return from this function (so the invalid
117 * canaries already on the stack wont ever trigger).
119 boot_init_stack_canary();
121 /* endless idle loop with no priority at all */
123 tick_nohz_stop_sched_tick(1);
124 while (!need_resched()) {
128 if (cpu_is_offline(smp_processor_id()))
131 * Idle routines should keep interrupts disabled
132 * from here on, until they go to idle.
133 * Otherwise, idle callbacks can misfire.
137 /* Don't trace irqs off for idle */
138 stop_critical_timings();
140 start_critical_timings();
141 /* In many cases the interrupt that ended idle
142 has already called exit_idle. But some idle
143 loops can be woken up without interrupt. */
147 tick_nohz_restart_sched_tick();
148 preempt_enable_no_resched();
154 /* Prints also some state that isn't saved in the pt_regs */
155 void __show_regs(struct pt_regs *regs, int all)
157 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
158 unsigned long d0, d1, d2, d3, d6, d7;
159 unsigned int fsindex, gsindex;
160 unsigned int ds, cs, es;
163 printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
164 printk_address(regs->ip, 1);
165 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
166 regs->sp, regs->flags);
167 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
168 regs->ax, regs->bx, regs->cx);
169 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
170 regs->dx, regs->si, regs->di);
171 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
172 regs->bp, regs->r8, regs->r9);
173 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
174 regs->r10, regs->r11, regs->r12);
175 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
176 regs->r13, regs->r14, regs->r15);
178 asm("movl %%ds,%0" : "=r" (ds));
179 asm("movl %%cs,%0" : "=r" (cs));
180 asm("movl %%es,%0" : "=r" (es));
181 asm("movl %%fs,%0" : "=r" (fsindex));
182 asm("movl %%gs,%0" : "=r" (gsindex));
184 rdmsrl(MSR_FS_BASE, fs);
185 rdmsrl(MSR_GS_BASE, gs);
186 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
196 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
197 fs, fsindex, gs, gsindex, shadowgs);
198 printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
200 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
206 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
210 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
213 void release_thread(struct task_struct *dead_task)
216 if (dead_task->mm->context.size) {
217 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
219 dead_task->mm->context.ldt,
220 dead_task->mm->context.size);
226 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
228 struct user_desc ud = {
235 struct desc_struct *desc = t->thread.tls_array;
240 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
242 return get_desc_base(&t->thread.tls_array[tls]);
246 * This gets called before we allocate a new thread and copy
247 * the current task into it.
249 void prepare_to_copy(struct task_struct *tsk)
254 int copy_thread(unsigned long clone_flags, unsigned long sp,
255 unsigned long unused,
256 struct task_struct *p, struct pt_regs *regs)
259 struct pt_regs *childregs;
260 struct task_struct *me = current;
262 childregs = ((struct pt_regs *)
263 (THREAD_SIZE + task_stack_page(p))) - 1;
270 childregs->sp = (unsigned long)childregs;
272 p->thread.sp = (unsigned long) childregs;
273 p->thread.sp0 = (unsigned long) (childregs+1);
274 p->thread.usersp = me->thread.usersp;
276 set_tsk_thread_flag(p, TIF_FORK);
278 p->thread.io_bitmap_ptr = NULL;
280 savesegment(gs, p->thread.gsindex);
281 p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
282 savesegment(fs, p->thread.fsindex);
283 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
284 savesegment(es, p->thread.es);
285 savesegment(ds, p->thread.ds);
288 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
290 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
291 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
292 if (!p->thread.io_bitmap_ptr) {
293 p->thread.io_bitmap_max = 0;
296 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
298 set_tsk_thread_flag(p, TIF_IO_BITMAP);
302 * Set a new TLS for the child thread?
304 if (clone_flags & CLONE_SETTLS) {
305 #ifdef CONFIG_IA32_EMULATION
306 if (test_thread_flag(TIF_IA32))
307 err = do_set_thread_area(p, -1,
308 (struct user_desc __user *)childregs->si, 0);
311 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
317 if (err && p->thread.io_bitmap_ptr) {
318 kfree(p->thread.io_bitmap_ptr);
319 p->thread.io_bitmap_max = 0;
326 start_thread_common(struct pt_regs *regs, unsigned long new_ip,
327 unsigned long new_sp,
328 unsigned int _cs, unsigned int _ss, unsigned int _ds)
331 loadsegment(es, _ds);
332 loadsegment(ds, _ds);
336 percpu_write(old_rsp, new_sp);
339 regs->flags = X86_EFLAGS_IF;
342 * Free the old FP and other extended state
344 free_thread_xstate(current);
348 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
350 start_thread_common(regs, new_ip, new_sp,
351 __USER_CS, __USER_DS, 0);
354 #ifdef CONFIG_IA32_EMULATION
355 void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp)
357 start_thread_common(regs, new_ip, new_sp,
358 __USER32_CS, __USER32_DS, __USER32_DS);
363 * switch_to(x,y) should switch tasks from x to y.
365 * This could still be optimized:
366 * - fold all the options into a flag word and test it with a single test.
367 * - could test fs/gs bitsliced
369 * Kprobes not supported here. Set the probe on schedule instead.
370 * Function graph tracer not supported too.
372 __notrace_funcgraph struct task_struct *
373 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
375 struct thread_struct *prev = &prev_p->thread;
376 struct thread_struct *next = &next_p->thread;
377 int cpu = smp_processor_id();
378 struct tss_struct *tss = &per_cpu(init_tss, cpu);
379 unsigned fsindex, gsindex;
383 * If the task has used fpu the last 5 timeslices, just do a full
384 * restore of the math state immediately to avoid the trap; the
385 * chances of needing FPU soon are obviously high now
387 preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
389 /* we're going to use this soon, after a few expensive things */
391 prefetch(next->fpu.state);
394 * Reload esp0, LDT and the page table pointer:
400 * This won't pick up thread selector changes, but I guess that is ok.
402 savesegment(es, prev->es);
403 if (unlikely(next->es | prev->es))
404 loadsegment(es, next->es);
406 savesegment(ds, prev->ds);
407 if (unlikely(next->ds | prev->ds))
408 loadsegment(ds, next->ds);
411 /* We must save %fs and %gs before load_TLS() because
412 * %fs and %gs may be cleared by load_TLS().
414 * (e.g. xen_load_tls())
416 savesegment(fs, fsindex);
417 savesegment(gs, gsindex);
421 /* Must be after DS reload */
424 /* Make sure cpu is ready for new context */
429 * Leave lazy mode, flushing any hypercalls made here.
430 * This must be done before restoring TLS segments so
431 * the GDT and LDT are properly updated, and must be
432 * done before math_state_restore, so the TS bit is up
435 arch_end_context_switch(next_p);
440 * Segment register != 0 always requires a reload. Also
441 * reload when it has changed. When prev process used 64bit
442 * base always reload to avoid an information leak.
444 if (unlikely(fsindex | next->fsindex | prev->fs)) {
445 loadsegment(fs, next->fsindex);
447 * Check if the user used a selector != 0; if yes
448 * clear 64bit base, since overloaded base is always
449 * mapped to the Null selector
454 /* when next process has a 64bit base use it */
456 wrmsrl(MSR_FS_BASE, next->fs);
457 prev->fsindex = fsindex;
459 if (unlikely(gsindex | next->gsindex | prev->gs)) {
460 load_gs_index(next->gsindex);
465 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
466 prev->gsindex = gsindex;
469 * Switch the PDA and FPU contexts.
471 prev->usersp = percpu_read(old_rsp);
472 percpu_write(old_rsp, next->usersp);
473 percpu_write(current_task, next_p);
475 percpu_write(kernel_stack,
476 (unsigned long)task_stack_page(next_p) +
477 THREAD_SIZE - KERNEL_STACK_OFFSET);
480 * Now maybe reload the debug registers and handle I/O bitmaps
482 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
483 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
484 __switch_to_xtra(prev_p, next_p, tss);
487 * Preload the FPU context, now that we've determined that the
488 * task is likely to be using it.
491 __math_state_restore();
496 void set_personality_64bit(void)
498 /* inherit personality from parent */
500 /* Make sure to be in 64bit mode */
501 clear_thread_flag(TIF_IA32);
503 /* TBD: overwrites user setup. Should have two bits.
504 But 64bit processes have always behaved this way,
505 so it's not too bad. The main problem is just that
506 32bit childs are affected again. */
507 current->personality &= ~READ_IMPLIES_EXEC;
510 void set_personality_ia32(void)
512 /* inherit personality from parent */
514 /* Make sure to be in 32bit mode */
515 set_thread_flag(TIF_IA32);
516 current->personality |= force_personality32;
518 /* Prepare the first "return" to user space */
519 current_thread_info()->status |= TS_COMPAT;
522 unsigned long get_wchan(struct task_struct *p)
528 if (!p || p == current || p->state == TASK_RUNNING)
530 stack = (unsigned long)task_stack_page(p);
531 if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
533 fp = *(u64 *)(p->thread.sp);
535 if (fp < (unsigned long)stack ||
536 fp >= (unsigned long)stack+THREAD_SIZE)
539 if (!in_sched_functions(ip))
542 } while (count++ < 16);
546 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
549 int doit = task == current;
554 if (addr >= TASK_SIZE_OF(task))
557 /* handle small bases via the GDT because that's faster to
559 if (addr <= 0xffffffff) {
560 set_32bit_tls(task, GS_TLS, addr);
562 load_TLS(&task->thread, cpu);
563 load_gs_index(GS_TLS_SEL);
565 task->thread.gsindex = GS_TLS_SEL;
568 task->thread.gsindex = 0;
569 task->thread.gs = addr;
572 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
578 /* Not strictly needed for fs, but do it for symmetry
580 if (addr >= TASK_SIZE_OF(task))
583 /* handle small bases via the GDT because that's faster to
585 if (addr <= 0xffffffff) {
586 set_32bit_tls(task, FS_TLS, addr);
588 load_TLS(&task->thread, cpu);
589 loadsegment(fs, FS_TLS_SEL);
591 task->thread.fsindex = FS_TLS_SEL;
594 task->thread.fsindex = 0;
595 task->thread.fs = addr;
597 /* set the selector to 0 to not confuse
600 ret = checking_wrmsrl(MSR_FS_BASE, addr);
607 if (task->thread.fsindex == FS_TLS_SEL)
608 base = read_32bit_tls(task, FS_TLS);
610 rdmsrl(MSR_FS_BASE, base);
612 base = task->thread.fs;
613 ret = put_user(base, (unsigned long __user *)addr);
619 if (task->thread.gsindex == GS_TLS_SEL)
620 base = read_32bit_tls(task, GS_TLS);
622 savesegment(gs, gsindex);
624 rdmsrl(MSR_KERNEL_GS_BASE, base);
626 base = task->thread.gs;
628 base = task->thread.gs;
629 ret = put_user(base, (unsigned long __user *)addr);
641 long sys_arch_prctl(int code, unsigned long addr)
643 return do_arch_prctl(current, code, addr);
646 unsigned long KSTK_ESP(struct task_struct *task)
648 return (test_tsk_thread_flag(task, TIF_IA32)) ?
649 (task_pt_regs(task)->sp) : ((task)->thread.usersp);