2 * Copyright (C) 1995 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * CPU hotplug support - ashok.raj@intel.com
14 * This file handles the architecture-dependent parts of process handling..
17 #include <linux/stackprotector.h>
18 #include <linux/cpu.h>
19 #include <linux/errno.h>
20 #include <linux/sched.h>
22 #include <linux/kernel.h>
24 #include <linux/elfcore.h>
25 #include <linux/smp.h>
26 #include <linux/slab.h>
27 #include <linux/user.h>
28 #include <linux/interrupt.h>
29 #include <linux/delay.h>
30 #include <linux/module.h>
31 #include <linux/ptrace.h>
32 #include <linux/notifier.h>
33 #include <linux/kprobes.h>
34 #include <linux/kdebug.h>
35 #include <linux/tick.h>
36 #include <linux/prctl.h>
37 #include <linux/uaccess.h>
39 #include <linux/ftrace.h>
40 #include <linux/cpuidle.h>
42 #include <asm/pgtable.h>
43 #include <asm/system.h>
44 #include <asm/processor.h>
46 #include <asm/mmu_context.h>
47 #include <asm/prctl.h>
49 #include <asm/proto.h>
52 #include <asm/syscalls.h>
53 #include <asm/debugreg.h>
56 asmlinkage extern void ret_from_fork(void);
58 DEFINE_PER_CPU(unsigned long, old_rsp);
59 static DEFINE_PER_CPU(unsigned char, is_idle);
61 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
63 void idle_notifier_register(struct notifier_block *n)
65 atomic_notifier_chain_register(&idle_notifier, n);
67 EXPORT_SYMBOL_GPL(idle_notifier_register);
69 void idle_notifier_unregister(struct notifier_block *n)
71 atomic_notifier_chain_unregister(&idle_notifier, n);
73 EXPORT_SYMBOL_GPL(idle_notifier_unregister);
77 percpu_write(is_idle, 1);
78 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
81 static void __exit_idle(void)
83 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
85 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
88 /* Called from interrupts to signify idle end */
91 /* idle loop has pid 0 */
98 static inline void play_dead(void)
105 * The idle thread. There's no useful work to be
106 * done, so just try to conserve power and have a
107 * low exit latency (ie sit in a loop waiting for
108 * somebody to say that they'd like to reschedule)
112 current_thread_info()->status |= TS_POLLING;
115 * If we're the non-boot CPU, nothing set the stack canary up
116 * for us. CPU0 already has it initialized but no harm in
117 * doing it again. This is a good place for updating it, as
118 * we wont ever return from this function (so the invalid
119 * canaries already on the stack wont ever trigger).
121 boot_init_stack_canary();
123 /* endless idle loop with no priority at all */
125 tick_nohz_stop_sched_tick(1);
126 while (!need_resched()) {
130 if (cpu_is_offline(smp_processor_id()))
133 * Idle routines should keep interrupts disabled
134 * from here on, until they go to idle.
135 * Otherwise, idle callbacks can misfire.
140 /* Don't trace irqs off for idle */
141 stop_critical_timings();
142 if (cpuidle_idle_call())
144 start_critical_timings();
146 /* In many cases the interrupt that ended idle
147 has already called exit_idle. But some idle
148 loops can be woken up without interrupt. */
152 tick_nohz_restart_sched_tick();
153 preempt_enable_no_resched();
159 /* Prints also some state that isn't saved in the pt_regs */
160 void __show_regs(struct pt_regs *regs, int all)
162 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
163 unsigned long d0, d1, d2, d3, d6, d7;
164 unsigned int fsindex, gsindex;
165 unsigned int ds, cs, es;
168 printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
169 printk_address(regs->ip, 1);
170 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
171 regs->sp, regs->flags);
172 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
173 regs->ax, regs->bx, regs->cx);
174 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
175 regs->dx, regs->si, regs->di);
176 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
177 regs->bp, regs->r8, regs->r9);
178 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
179 regs->r10, regs->r11, regs->r12);
180 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
181 regs->r13, regs->r14, regs->r15);
183 asm("movl %%ds,%0" : "=r" (ds));
184 asm("movl %%cs,%0" : "=r" (cs));
185 asm("movl %%es,%0" : "=r" (es));
186 asm("movl %%fs,%0" : "=r" (fsindex));
187 asm("movl %%gs,%0" : "=r" (gsindex));
189 rdmsrl(MSR_FS_BASE, fs);
190 rdmsrl(MSR_GS_BASE, gs);
191 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
201 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
202 fs, fsindex, gs, gsindex, shadowgs);
203 printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
205 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
211 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
215 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
218 void release_thread(struct task_struct *dead_task)
221 if (dead_task->mm->context.size) {
222 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
224 dead_task->mm->context.ldt,
225 dead_task->mm->context.size);
231 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
233 struct user_desc ud = {
240 struct desc_struct *desc = t->thread.tls_array;
245 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
247 return get_desc_base(&t->thread.tls_array[tls]);
251 * This gets called before we allocate a new thread and copy
252 * the current task into it.
254 void prepare_to_copy(struct task_struct *tsk)
259 int copy_thread(unsigned long clone_flags, unsigned long sp,
260 unsigned long unused,
261 struct task_struct *p, struct pt_regs *regs)
264 struct pt_regs *childregs;
265 struct task_struct *me = current;
267 childregs = ((struct pt_regs *)
268 (THREAD_SIZE + task_stack_page(p))) - 1;
275 childregs->sp = (unsigned long)childregs;
277 p->thread.sp = (unsigned long) childregs;
278 p->thread.sp0 = (unsigned long) (childregs+1);
279 p->thread.usersp = me->thread.usersp;
281 set_tsk_thread_flag(p, TIF_FORK);
283 p->thread.io_bitmap_ptr = NULL;
285 savesegment(gs, p->thread.gsindex);
286 p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
287 savesegment(fs, p->thread.fsindex);
288 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
289 savesegment(es, p->thread.es);
290 savesegment(ds, p->thread.ds);
293 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
295 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
296 p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
297 IO_BITMAP_BYTES, GFP_KERNEL);
298 if (!p->thread.io_bitmap_ptr) {
299 p->thread.io_bitmap_max = 0;
302 set_tsk_thread_flag(p, TIF_IO_BITMAP);
306 * Set a new TLS for the child thread?
308 if (clone_flags & CLONE_SETTLS) {
309 #ifdef CONFIG_IA32_EMULATION
310 if (test_thread_flag(TIF_IA32))
311 err = do_set_thread_area(p, -1,
312 (struct user_desc __user *)childregs->si, 0);
315 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
321 if (err && p->thread.io_bitmap_ptr) {
322 kfree(p->thread.io_bitmap_ptr);
323 p->thread.io_bitmap_max = 0;
330 start_thread_common(struct pt_regs *regs, unsigned long new_ip,
331 unsigned long new_sp,
332 unsigned int _cs, unsigned int _ss, unsigned int _ds)
335 loadsegment(es, _ds);
336 loadsegment(ds, _ds);
340 percpu_write(old_rsp, new_sp);
343 regs->flags = X86_EFLAGS_IF;
345 * Free the old FP and other extended state
347 free_thread_xstate(current);
351 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
353 start_thread_common(regs, new_ip, new_sp,
354 __USER_CS, __USER_DS, 0);
357 #ifdef CONFIG_IA32_EMULATION
358 void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp)
360 start_thread_common(regs, new_ip, new_sp,
361 __USER32_CS, __USER32_DS, __USER32_DS);
366 * switch_to(x,y) should switch tasks from x to y.
368 * This could still be optimized:
369 * - fold all the options into a flag word and test it with a single test.
370 * - could test fs/gs bitsliced
372 * Kprobes not supported here. Set the probe on schedule instead.
373 * Function graph tracer not supported too.
375 __notrace_funcgraph struct task_struct *
376 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
378 struct thread_struct *prev = &prev_p->thread;
379 struct thread_struct *next = &next_p->thread;
380 int cpu = smp_processor_id();
381 struct tss_struct *tss = &per_cpu(init_tss, cpu);
382 unsigned fsindex, gsindex;
386 * If the task has used fpu the last 5 timeslices, just do a full
387 * restore of the math state immediately to avoid the trap; the
388 * chances of needing FPU soon are obviously high now
390 preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
392 /* we're going to use this soon, after a few expensive things */
394 prefetch(next->fpu.state);
397 * Reload esp0, LDT and the page table pointer:
403 * This won't pick up thread selector changes, but I guess that is ok.
405 savesegment(es, prev->es);
406 if (unlikely(next->es | prev->es))
407 loadsegment(es, next->es);
409 savesegment(ds, prev->ds);
410 if (unlikely(next->ds | prev->ds))
411 loadsegment(ds, next->ds);
414 /* We must save %fs and %gs before load_TLS() because
415 * %fs and %gs may be cleared by load_TLS().
417 * (e.g. xen_load_tls())
419 savesegment(fs, fsindex);
420 savesegment(gs, gsindex);
424 /* Must be after DS reload */
425 __unlazy_fpu(prev_p);
427 /* Make sure cpu is ready for new context */
432 * Leave lazy mode, flushing any hypercalls made here.
433 * This must be done before restoring TLS segments so
434 * the GDT and LDT are properly updated, and must be
435 * done before math_state_restore, so the TS bit is up
438 arch_end_context_switch(next_p);
443 * Segment register != 0 always requires a reload. Also
444 * reload when it has changed. When prev process used 64bit
445 * base always reload to avoid an information leak.
447 if (unlikely(fsindex | next->fsindex | prev->fs)) {
448 loadsegment(fs, next->fsindex);
450 * Check if the user used a selector != 0; if yes
451 * clear 64bit base, since overloaded base is always
452 * mapped to the Null selector
457 /* when next process has a 64bit base use it */
459 wrmsrl(MSR_FS_BASE, next->fs);
460 prev->fsindex = fsindex;
462 if (unlikely(gsindex | next->gsindex | prev->gs)) {
463 load_gs_index(next->gsindex);
468 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
469 prev->gsindex = gsindex;
472 * Switch the PDA and FPU contexts.
474 prev->usersp = percpu_read(old_rsp);
475 percpu_write(old_rsp, next->usersp);
476 percpu_write(current_task, next_p);
478 percpu_write(kernel_stack,
479 (unsigned long)task_stack_page(next_p) +
480 THREAD_SIZE - KERNEL_STACK_OFFSET);
483 * Now maybe reload the debug registers and handle I/O bitmaps
485 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
486 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
487 __switch_to_xtra(prev_p, next_p, tss);
490 * Preload the FPU context, now that we've determined that the
491 * task is likely to be using it.
494 __math_state_restore();
499 void set_personality_64bit(void)
501 /* inherit personality from parent */
503 /* Make sure to be in 64bit mode */
504 clear_thread_flag(TIF_IA32);
506 /* Ensure the corresponding mm is not marked. */
508 current->mm->context.ia32_compat = 0;
510 /* TBD: overwrites user setup. Should have two bits.
511 But 64bit processes have always behaved this way,
512 so it's not too bad. The main problem is just that
513 32bit childs are affected again. */
514 current->personality &= ~READ_IMPLIES_EXEC;
517 void set_personality_ia32(void)
519 /* inherit personality from parent */
521 /* Make sure to be in 32bit mode */
522 set_thread_flag(TIF_IA32);
523 current->personality |= force_personality32;
525 /* Mark the associated mm as containing 32-bit tasks. */
527 current->mm->context.ia32_compat = 1;
529 /* Prepare the first "return" to user space */
530 current_thread_info()->status |= TS_COMPAT;
533 unsigned long get_wchan(struct task_struct *p)
539 if (!p || p == current || p->state == TASK_RUNNING)
541 stack = (unsigned long)task_stack_page(p);
542 if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
544 fp = *(u64 *)(p->thread.sp);
546 if (fp < (unsigned long)stack ||
547 fp >= (unsigned long)stack+THREAD_SIZE)
550 if (!in_sched_functions(ip))
553 } while (count++ < 16);
557 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
560 int doit = task == current;
565 if (addr >= TASK_SIZE_OF(task))
568 /* handle small bases via the GDT because that's faster to
570 if (addr <= 0xffffffff) {
571 set_32bit_tls(task, GS_TLS, addr);
573 load_TLS(&task->thread, cpu);
574 load_gs_index(GS_TLS_SEL);
576 task->thread.gsindex = GS_TLS_SEL;
579 task->thread.gsindex = 0;
580 task->thread.gs = addr;
583 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
589 /* Not strictly needed for fs, but do it for symmetry
591 if (addr >= TASK_SIZE_OF(task))
594 /* handle small bases via the GDT because that's faster to
596 if (addr <= 0xffffffff) {
597 set_32bit_tls(task, FS_TLS, addr);
599 load_TLS(&task->thread, cpu);
600 loadsegment(fs, FS_TLS_SEL);
602 task->thread.fsindex = FS_TLS_SEL;
605 task->thread.fsindex = 0;
606 task->thread.fs = addr;
608 /* set the selector to 0 to not confuse
611 ret = checking_wrmsrl(MSR_FS_BASE, addr);
618 if (task->thread.fsindex == FS_TLS_SEL)
619 base = read_32bit_tls(task, FS_TLS);
621 rdmsrl(MSR_FS_BASE, base);
623 base = task->thread.fs;
624 ret = put_user(base, (unsigned long __user *)addr);
630 if (task->thread.gsindex == GS_TLS_SEL)
631 base = read_32bit_tls(task, GS_TLS);
633 savesegment(gs, gsindex);
635 rdmsrl(MSR_KERNEL_GS_BASE, base);
637 base = task->thread.gs;
639 base = task->thread.gs;
640 ret = put_user(base, (unsigned long __user *)addr);
652 long sys_arch_prctl(int code, unsigned long addr)
654 return do_arch_prctl(current, code, addr);
657 unsigned long KSTK_ESP(struct task_struct *task)
659 return (test_tsk_thread_flag(task, TIF_IA32)) ?
660 (task_pt_regs(task)->sp) : ((task)->thread.usersp);