2 * Copyright (C) 1995 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * CPU hotplug support - ashok.raj@intel.com
14 * This file handles the architecture-dependent parts of process handling..
17 #include <linux/stackprotector.h>
18 #include <linux/cpu.h>
19 #include <linux/errno.h>
20 #include <linux/sched.h>
22 #include <linux/kernel.h>
24 #include <linux/elfcore.h>
25 #include <linux/smp.h>
26 #include <linux/slab.h>
27 #include <linux/user.h>
28 #include <linux/interrupt.h>
29 #include <linux/delay.h>
30 #include <linux/module.h>
31 #include <linux/ptrace.h>
32 #include <linux/notifier.h>
33 #include <linux/kprobes.h>
34 #include <linux/kdebug.h>
35 #include <linux/tick.h>
36 #include <linux/prctl.h>
37 #include <linux/uaccess.h>
39 #include <linux/ftrace.h>
40 #include <linux/cpuidle.h>
42 #include <asm/pgtable.h>
43 #include <asm/system.h>
44 #include <asm/processor.h>
46 #include <asm/mmu_context.h>
47 #include <asm/prctl.h>
49 #include <asm/proto.h>
52 #include <asm/syscalls.h>
53 #include <asm/debugreg.h>
56 asmlinkage extern void ret_from_fork(void);
58 DEFINE_PER_CPU(unsigned long, old_rsp);
59 static DEFINE_PER_CPU(unsigned char, is_idle);
61 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
63 void idle_notifier_register(struct notifier_block *n)
65 atomic_notifier_chain_register(&idle_notifier, n);
67 EXPORT_SYMBOL_GPL(idle_notifier_register);
69 void idle_notifier_unregister(struct notifier_block *n)
71 atomic_notifier_chain_unregister(&idle_notifier, n);
73 EXPORT_SYMBOL_GPL(idle_notifier_unregister);
77 percpu_write(is_idle, 1);
78 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
81 static void __exit_idle(void)
83 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
85 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
88 /* Called from interrupts to signify idle end */
91 /* idle loop has pid 0 */
98 static inline void play_dead(void)
105 * The idle thread. There's no useful work to be
106 * done, so just try to conserve power and have a
107 * low exit latency (ie sit in a loop waiting for
108 * somebody to say that they'd like to reschedule)
112 current_thread_info()->status |= TS_POLLING;
115 * If we're the non-boot CPU, nothing set the stack canary up
116 * for us. CPU0 already has it initialized but no harm in
117 * doing it again. This is a good place for updating it, as
118 * we wont ever return from this function (so the invalid
119 * canaries already on the stack wont ever trigger).
121 boot_init_stack_canary();
123 /* endless idle loop with no priority at all */
125 tick_nohz_stop_sched_tick(1);
126 while (!need_resched()) {
130 if (cpu_is_offline(smp_processor_id()))
133 * Idle routines should keep interrupts disabled
134 * from here on, until they go to idle.
135 * Otherwise, idle callbacks can misfire.
140 /* Don't trace irqs off for idle */
141 stop_critical_timings();
142 if (cpuidle_idle_call())
144 start_critical_timings();
146 /* In many cases the interrupt that ended idle
147 has already called exit_idle. But some idle
148 loops can be woken up without interrupt. */
152 tick_nohz_restart_sched_tick();
153 preempt_enable_no_resched();
159 /* Prints also some state that isn't saved in the pt_regs */
160 void __show_regs(struct pt_regs *regs, int all)
162 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
163 unsigned long d0, d1, d2, d3, d6, d7;
164 unsigned int fsindex, gsindex;
165 unsigned int ds, cs, es;
168 printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
169 printk_address(regs->ip, 1);
170 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
171 regs->sp, regs->flags);
172 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
173 regs->ax, regs->bx, regs->cx);
174 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
175 regs->dx, regs->si, regs->di);
176 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
177 regs->bp, regs->r8, regs->r9);
178 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
179 regs->r10, regs->r11, regs->r12);
180 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
181 regs->r13, regs->r14, regs->r15);
183 asm("movl %%ds,%0" : "=r" (ds));
184 asm("movl %%cs,%0" : "=r" (cs));
185 asm("movl %%es,%0" : "=r" (es));
186 asm("movl %%fs,%0" : "=r" (fsindex));
187 asm("movl %%gs,%0" : "=r" (gsindex));
189 rdmsrl(MSR_FS_BASE, fs);
190 rdmsrl(MSR_GS_BASE, gs);
191 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
201 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
202 fs, fsindex, gs, gsindex, shadowgs);
203 printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
205 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
211 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
215 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
218 void release_thread(struct task_struct *dead_task)
221 if (dead_task->mm->context.size) {
222 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
224 dead_task->mm->context.ldt,
225 dead_task->mm->context.size);
231 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
233 struct user_desc ud = {
240 struct desc_struct *desc = t->thread.tls_array;
245 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
247 return get_desc_base(&t->thread.tls_array[tls]);
251 * This gets called before we allocate a new thread and copy
252 * the current task into it.
254 void prepare_to_copy(struct task_struct *tsk)
259 int copy_thread(unsigned long clone_flags, unsigned long sp,
260 unsigned long unused,
261 struct task_struct *p, struct pt_regs *regs)
264 struct pt_regs *childregs;
265 struct task_struct *me = current;
267 childregs = ((struct pt_regs *)
268 (THREAD_SIZE + task_stack_page(p))) - 1;
275 childregs->sp = (unsigned long)childregs;
277 p->thread.sp = (unsigned long) childregs;
278 p->thread.sp0 = (unsigned long) (childregs+1);
279 p->thread.usersp = me->thread.usersp;
281 set_tsk_thread_flag(p, TIF_FORK);
283 p->thread.io_bitmap_ptr = NULL;
285 savesegment(gs, p->thread.gsindex);
286 p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
287 savesegment(fs, p->thread.fsindex);
288 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
289 savesegment(es, p->thread.es);
290 savesegment(ds, p->thread.ds);
293 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
295 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
296 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
297 if (!p->thread.io_bitmap_ptr) {
298 p->thread.io_bitmap_max = 0;
301 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
303 set_tsk_thread_flag(p, TIF_IO_BITMAP);
307 * Set a new TLS for the child thread?
309 if (clone_flags & CLONE_SETTLS) {
310 #ifdef CONFIG_IA32_EMULATION
311 if (test_thread_flag(TIF_IA32))
312 err = do_set_thread_area(p, -1,
313 (struct user_desc __user *)childregs->si, 0);
316 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
322 if (err && p->thread.io_bitmap_ptr) {
323 kfree(p->thread.io_bitmap_ptr);
324 p->thread.io_bitmap_max = 0;
331 start_thread_common(struct pt_regs *regs, unsigned long new_ip,
332 unsigned long new_sp,
333 unsigned int _cs, unsigned int _ss, unsigned int _ds)
336 loadsegment(es, _ds);
337 loadsegment(ds, _ds);
341 percpu_write(old_rsp, new_sp);
344 regs->flags = X86_EFLAGS_IF;
346 * Free the old FP and other extended state
348 free_thread_xstate(current);
352 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
354 start_thread_common(regs, new_ip, new_sp,
355 __USER_CS, __USER_DS, 0);
358 #ifdef CONFIG_IA32_EMULATION
359 void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp)
361 start_thread_common(regs, new_ip, new_sp,
362 __USER32_CS, __USER32_DS, __USER32_DS);
367 * switch_to(x,y) should switch tasks from x to y.
369 * This could still be optimized:
370 * - fold all the options into a flag word and test it with a single test.
371 * - could test fs/gs bitsliced
373 * Kprobes not supported here. Set the probe on schedule instead.
374 * Function graph tracer not supported too.
376 __notrace_funcgraph struct task_struct *
377 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
379 struct thread_struct *prev = &prev_p->thread;
380 struct thread_struct *next = &next_p->thread;
381 int cpu = smp_processor_id();
382 struct tss_struct *tss = &per_cpu(init_tss, cpu);
383 unsigned fsindex, gsindex;
386 fpu = switch_fpu_prepare(prev_p, next_p);
388 /* Reload esp0 and ss1. */
391 /* We must save %fs and %gs before load_TLS() because
392 * %fs and %gs may be cleared by load_TLS().
394 * (e.g. xen_load_tls())
396 savesegment(fs, fsindex);
397 savesegment(gs, gsindex);
400 * Load TLS before restoring any segments so that segment loads
401 * reference the correct GDT entries.
406 * Leave lazy mode, flushing any hypercalls made here. This
407 * must be done after loading TLS entries in the GDT but before
408 * loading segments that might reference them, and and it must
409 * be done before math_state_restore, so the TS bit is up to
412 arch_end_context_switch(next_p);
416 * Reading them only returns the selectors, but writing them (if
417 * nonzero) loads the full descriptor from the GDT or LDT. The
418 * LDT for next is loaded in switch_mm, and the GDT is loaded
421 * We therefore need to write new values to the segment
422 * registers on every context switch unless both the new and old
425 * Note that we don't need to do anything for CS and SS, as
426 * those are saved and restored as part of pt_regs.
428 savesegment(es, prev->es);
429 if (unlikely(next->es | prev->es))
430 loadsegment(es, next->es);
432 savesegment(ds, prev->ds);
433 if (unlikely(next->ds | prev->ds))
434 loadsegment(ds, next->ds);
439 * These are even more complicated than FS and GS: they have
440 * 64-bit bases are that controlled by arch_prctl. Those bases
441 * only differ from the values in the GDT or LDT if the selector
444 * Loading the segment register resets the hidden base part of
445 * the register to 0 or the value from the GDT / LDT. If the
446 * next base address zero, writing 0 to the segment register is
447 * much faster than using wrmsr to explicitly zero the base.
449 * The thread_struct.fs and thread_struct.gs values are 0
450 * if the fs and gs bases respectively are not overridden
451 * from the values implied by fsindex and gsindex. They
452 * are nonzero, and store the nonzero base addresses, if
453 * the bases are overridden.
455 * (fs != 0 && fsindex != 0) || (gs != 0 && gsindex != 0) should
458 * Therefore we need to reload the segment registers if either
459 * the old or new selector is nonzero, and we need to override
460 * the base address if next thread expects it to be overridden.
462 * This code is unnecessarily slow in the case where the old and
463 * new indexes are zero and the new base is nonzero -- it will
464 * unnecessarily write 0 to the selector before writing the new
467 * Note: This all depends on arch_prctl being the only way that
468 * user code can override the segment base. Once wrfsbase and
469 * wrgsbase are enabled, most of this code will need to change.
471 if (unlikely(fsindex | next->fsindex | prev->fs)) {
472 loadsegment(fs, next->fsindex);
475 * If user code wrote a nonzero value to FS, then it also
476 * cleared the overridden base address.
478 * XXX: if user code wrote 0 to FS and cleared the base
479 * address itself, we won't notice and we'll incorrectly
480 * restore the prior base address next time we reschdule
487 wrmsrl(MSR_FS_BASE, next->fs);
488 prev->fsindex = fsindex;
490 if (unlikely(gsindex | next->gsindex | prev->gs)) {
491 load_gs_index(next->gsindex);
493 /* This works (and fails) the same way as fsindex above. */
498 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
499 prev->gsindex = gsindex;
501 switch_fpu_finish(next_p, fpu);
504 * Switch the PDA and FPU contexts.
506 prev->usersp = percpu_read(old_rsp);
507 percpu_write(old_rsp, next->usersp);
508 percpu_write(current_task, next_p);
510 percpu_write(kernel_stack,
511 (unsigned long)task_stack_page(next_p) +
512 THREAD_SIZE - KERNEL_STACK_OFFSET);
515 * Now maybe reload the debug registers and handle I/O bitmaps
517 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
518 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
519 __switch_to_xtra(prev_p, next_p, tss);
524 void set_personality_64bit(void)
526 /* inherit personality from parent */
528 /* Make sure to be in 64bit mode */
529 clear_thread_flag(TIF_IA32);
531 /* Ensure the corresponding mm is not marked. */
533 current->mm->context.ia32_compat = 0;
535 /* TBD: overwrites user setup. Should have two bits.
536 But 64bit processes have always behaved this way,
537 so it's not too bad. The main problem is just that
538 32bit childs are affected again. */
539 current->personality &= ~READ_IMPLIES_EXEC;
542 void set_personality_ia32(void)
544 /* inherit personality from parent */
546 /* Make sure to be in 32bit mode */
547 set_thread_flag(TIF_IA32);
548 current->personality |= force_personality32;
550 /* Mark the associated mm as containing 32-bit tasks. */
552 current->mm->context.ia32_compat = 1;
554 /* Prepare the first "return" to user space */
555 current_thread_info()->status |= TS_COMPAT;
558 unsigned long get_wchan(struct task_struct *p)
564 if (!p || p == current || p->state == TASK_RUNNING)
566 stack = (unsigned long)task_stack_page(p);
567 if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
569 fp = *(u64 *)(p->thread.sp);
571 if (fp < (unsigned long)stack ||
572 fp >= (unsigned long)stack+THREAD_SIZE)
575 if (!in_sched_functions(ip))
578 } while (count++ < 16);
582 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
585 int doit = task == current;
590 if (addr >= TASK_SIZE_OF(task))
593 /* handle small bases via the GDT because that's faster to
595 if (addr <= 0xffffffff) {
596 set_32bit_tls(task, GS_TLS, addr);
598 load_TLS(&task->thread, cpu);
599 load_gs_index(GS_TLS_SEL);
601 task->thread.gsindex = GS_TLS_SEL;
604 task->thread.gsindex = 0;
605 task->thread.gs = addr;
608 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
614 /* Not strictly needed for fs, but do it for symmetry
616 if (addr >= TASK_SIZE_OF(task))
619 /* handle small bases via the GDT because that's faster to
621 if (addr <= 0xffffffff) {
622 set_32bit_tls(task, FS_TLS, addr);
624 load_TLS(&task->thread, cpu);
625 loadsegment(fs, FS_TLS_SEL);
627 task->thread.fsindex = FS_TLS_SEL;
630 task->thread.fsindex = 0;
631 task->thread.fs = addr;
633 /* set the selector to 0 to not confuse
636 ret = checking_wrmsrl(MSR_FS_BASE, addr);
643 if (task->thread.fsindex == FS_TLS_SEL)
644 base = read_32bit_tls(task, FS_TLS);
646 rdmsrl(MSR_FS_BASE, base);
648 base = task->thread.fs;
649 ret = put_user(base, (unsigned long __user *)addr);
655 if (task->thread.gsindex == GS_TLS_SEL)
656 base = read_32bit_tls(task, GS_TLS);
658 savesegment(gs, gsindex);
660 rdmsrl(MSR_KERNEL_GS_BASE, base);
662 base = task->thread.gs;
664 base = task->thread.gs;
665 ret = put_user(base, (unsigned long __user *)addr);
677 long sys_arch_prctl(int code, unsigned long addr)
679 return do_arch_prctl(current, code, addr);
682 unsigned long KSTK_ESP(struct task_struct *task)
684 return (test_tsk_thread_flag(task, TIF_IA32)) ?
685 (task_pt_regs(task)->sp) : ((task)->thread.usersp);