2 * Copyright (C) 1995 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * CPU hotplug support - ashok.raj@intel.com
14 * This file handles the architecture-dependent parts of process handling..
17 #include <linux/stackprotector.h>
18 #include <linux/cpu.h>
19 #include <linux/errno.h>
20 #include <linux/sched.h>
22 #include <linux/kernel.h>
24 #include <linux/elfcore.h>
25 #include <linux/smp.h>
26 #include <linux/slab.h>
27 #include <linux/user.h>
28 #include <linux/interrupt.h>
29 #include <linux/delay.h>
30 #include <linux/module.h>
31 #include <linux/ptrace.h>
32 #include <linux/notifier.h>
33 #include <linux/kprobes.h>
34 #include <linux/kdebug.h>
35 #include <linux/tick.h>
36 #include <linux/prctl.h>
37 #include <linux/uaccess.h>
39 #include <linux/ftrace.h>
40 #include <linux/cpuidle.h>
43 #include <asm/pgtable.h>
44 #include <asm/system.h>
45 #include <asm/processor.h>
47 #include <asm/mmu_context.h>
48 #include <asm/prctl.h>
50 #include <asm/proto.h>
53 #include <asm/syscalls.h>
54 #include <asm/debugreg.h>
56 #include <asm/xen/hypervisor.h>
58 asmlinkage extern void ret_from_fork(void);
60 DEFINE_PER_CPU_USER_MAPPED(unsigned long, old_rsp);
61 static DEFINE_PER_CPU(unsigned char, is_idle);
63 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
65 void idle_notifier_register(struct notifier_block *n)
67 atomic_notifier_chain_register(&idle_notifier, n);
69 EXPORT_SYMBOL_GPL(idle_notifier_register);
71 void idle_notifier_unregister(struct notifier_block *n)
73 atomic_notifier_chain_unregister(&idle_notifier, n);
75 EXPORT_SYMBOL_GPL(idle_notifier_unregister);
79 percpu_write(is_idle, 1);
80 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
83 static void __exit_idle(void)
85 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
87 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
90 /* Called from interrupts to signify idle end */
93 /* idle loop has pid 0 */
100 static inline void play_dead(void)
107 * The idle thread. There's no useful work to be
108 * done, so just try to conserve power and have a
109 * low exit latency (ie sit in a loop waiting for
110 * somebody to say that they'd like to reschedule)
114 current_thread_info()->status |= TS_POLLING;
117 * If we're the non-boot CPU, nothing set the stack canary up
118 * for us. CPU0 already has it initialized but no harm in
119 * doing it again. This is a good place for updating it, as
120 * we wont ever return from this function (so the invalid
121 * canaries already on the stack wont ever trigger).
123 boot_init_stack_canary();
125 /* endless idle loop with no priority at all */
127 tick_nohz_stop_sched_tick(1);
128 while (!need_resched()) {
132 if (cpu_is_offline(smp_processor_id()))
135 * Idle routines should keep interrupts disabled
136 * from here on, until they go to idle.
137 * Otherwise, idle callbacks can misfire.
142 /* Don't trace irqs off for idle */
143 stop_critical_timings();
144 if (cpuidle_idle_call())
146 start_critical_timings();
148 /* In many cases the interrupt that ended idle
149 has already called exit_idle. But some idle
150 loops can be woken up without interrupt. */
154 tick_nohz_restart_sched_tick();
155 preempt_enable_no_resched();
161 /* Prints also some state that isn't saved in the pt_regs */
162 void __show_regs(struct pt_regs *regs, int all)
164 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
165 unsigned long d0, d1, d2, d3, d6, d7;
166 unsigned int fsindex, gsindex;
167 unsigned int ds, cs, es;
170 printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
171 printk_address(regs->ip, 1);
172 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
173 regs->sp, regs->flags);
174 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
175 regs->ax, regs->bx, regs->cx);
176 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
177 regs->dx, regs->si, regs->di);
178 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
179 regs->bp, regs->r8, regs->r9);
180 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
181 regs->r10, regs->r11, regs->r12);
182 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
183 regs->r13, regs->r14, regs->r15);
185 asm("movl %%ds,%0" : "=r" (ds));
186 asm("movl %%cs,%0" : "=r" (cs));
187 asm("movl %%es,%0" : "=r" (es));
188 asm("movl %%fs,%0" : "=r" (fsindex));
189 asm("movl %%gs,%0" : "=r" (gsindex));
191 rdmsrl(MSR_FS_BASE, fs);
192 rdmsrl(MSR_GS_BASE, gs);
193 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
203 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
204 fs, fsindex, gs, gsindex, shadowgs);
205 printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
207 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
213 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
217 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
220 void release_thread(struct task_struct *dead_task)
223 if (dead_task->mm->context.ldt) {
224 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
226 dead_task->mm->context.ldt->entries,
227 dead_task->mm->context.ldt->size);
233 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
235 struct user_desc ud = {
242 struct desc_struct *desc = t->thread.tls_array;
247 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
249 return get_desc_base(&t->thread.tls_array[tls]);
253 * This gets called before we allocate a new thread and copy
254 * the current task into it.
256 void prepare_to_copy(struct task_struct *tsk)
261 int copy_thread(unsigned long clone_flags, unsigned long sp,
262 unsigned long unused,
263 struct task_struct *p, struct pt_regs *regs)
266 struct pt_regs *childregs;
267 struct task_struct *me = current;
269 childregs = ((struct pt_regs *)
270 (THREAD_SIZE + task_stack_page(p))) - 1;
277 childregs->sp = (unsigned long)childregs;
279 p->thread.sp = (unsigned long) childregs;
280 p->thread.sp0 = (unsigned long) (childregs+1);
281 p->thread.usersp = me->thread.usersp;
283 set_tsk_thread_flag(p, TIF_FORK);
285 p->thread.io_bitmap_ptr = NULL;
287 savesegment(gs, p->thread.gsindex);
288 p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
289 savesegment(fs, p->thread.fsindex);
290 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
291 savesegment(es, p->thread.es);
292 savesegment(ds, p->thread.ds);
295 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
297 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
298 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
299 if (!p->thread.io_bitmap_ptr) {
300 p->thread.io_bitmap_max = 0;
303 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
305 set_tsk_thread_flag(p, TIF_IO_BITMAP);
309 * Set a new TLS for the child thread?
311 if (clone_flags & CLONE_SETTLS) {
312 #ifdef CONFIG_IA32_EMULATION
313 if (test_thread_flag(TIF_IA32))
314 err = do_set_thread_area(p, -1,
315 (struct user_desc __user *)childregs->si, 0);
318 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
324 if (err && p->thread.io_bitmap_ptr) {
325 kfree(p->thread.io_bitmap_ptr);
326 p->thread.io_bitmap_max = 0;
333 start_thread_common(struct pt_regs *regs, unsigned long new_ip,
334 unsigned long new_sp,
335 unsigned int _cs, unsigned int _ss, unsigned int _ds)
338 loadsegment(es, _ds);
339 loadsegment(ds, _ds);
343 percpu_write(old_rsp, new_sp);
346 regs->flags = X86_EFLAGS_IF;
348 * Free the old FP and other extended state
350 free_thread_xstate(current);
354 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
356 start_thread_common(regs, new_ip, new_sp,
357 __USER_CS, __USER_DS, 0);
360 #ifdef CONFIG_IA32_EMULATION
361 void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp)
363 start_thread_common(regs, new_ip, new_sp,
364 __USER32_CS, __USER32_DS, __USER32_DS);
369 * switch_to(x,y) should switch tasks from x to y.
371 * This could still be optimized:
372 * - fold all the options into a flag word and test it with a single test.
373 * - could test fs/gs bitsliced
375 * Kprobes not supported here. Set the probe on schedule instead.
376 * Function graph tracer not supported too.
378 __notrace_funcgraph struct task_struct *
379 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
381 struct thread_struct *prev = &prev_p->thread;
382 struct thread_struct *next = &next_p->thread;
383 int cpu = smp_processor_id();
384 struct tss_struct *tss = &per_cpu(init_tss, cpu);
385 unsigned fsindex, gsindex;
388 fpu = switch_fpu_prepare(prev_p, next_p);
390 /* Reload esp0 and ss1. */
393 /* We must save %fs and %gs before load_TLS() because
394 * %fs and %gs may be cleared by load_TLS().
396 * (e.g. xen_load_tls())
398 savesegment(fs, fsindex);
399 savesegment(gs, gsindex);
402 * Load TLS before restoring any segments so that segment loads
403 * reference the correct GDT entries.
408 * Leave lazy mode, flushing any hypercalls made here. This
409 * must be done after loading TLS entries in the GDT but before
410 * loading segments that might reference them, and and it must
411 * be done before math_state_restore, so the TS bit is up to
414 arch_end_context_switch(next_p);
418 * Reading them only returns the selectors, but writing them (if
419 * nonzero) loads the full descriptor from the GDT or LDT. The
420 * LDT for next is loaded in switch_mm, and the GDT is loaded
423 * We therefore need to write new values to the segment
424 * registers on every context switch unless both the new and old
427 * Note that we don't need to do anything for CS and SS, as
428 * those are saved and restored as part of pt_regs.
430 savesegment(es, prev->es);
431 if (unlikely(next->es | prev->es))
432 loadsegment(es, next->es);
434 savesegment(ds, prev->ds);
435 if (unlikely(next->ds | prev->ds))
436 loadsegment(ds, next->ds);
441 * These are even more complicated than FS and GS: they have
442 * 64-bit bases are that controlled by arch_prctl. Those bases
443 * only differ from the values in the GDT or LDT if the selector
446 * Loading the segment register resets the hidden base part of
447 * the register to 0 or the value from the GDT / LDT. If the
448 * next base address zero, writing 0 to the segment register is
449 * much faster than using wrmsr to explicitly zero the base.
451 * The thread_struct.fs and thread_struct.gs values are 0
452 * if the fs and gs bases respectively are not overridden
453 * from the values implied by fsindex and gsindex. They
454 * are nonzero, and store the nonzero base addresses, if
455 * the bases are overridden.
457 * (fs != 0 && fsindex != 0) || (gs != 0 && gsindex != 0) should
460 * Therefore we need to reload the segment registers if either
461 * the old or new selector is nonzero, and we need to override
462 * the base address if next thread expects it to be overridden.
464 * This code is unnecessarily slow in the case where the old and
465 * new indexes are zero and the new base is nonzero -- it will
466 * unnecessarily write 0 to the selector before writing the new
469 * Note: This all depends on arch_prctl being the only way that
470 * user code can override the segment base. Once wrfsbase and
471 * wrgsbase are enabled, most of this code will need to change.
473 if (unlikely(fsindex | next->fsindex | prev->fs)) {
474 loadsegment(fs, next->fsindex);
477 * If user code wrote a nonzero value to FS, then it also
478 * cleared the overridden base address.
480 * XXX: if user code wrote 0 to FS and cleared the base
481 * address itself, we won't notice and we'll incorrectly
482 * restore the prior base address next time we reschdule
489 wrmsrl(MSR_FS_BASE, next->fs);
490 prev->fsindex = fsindex;
492 if (unlikely(gsindex | next->gsindex | prev->gs)) {
493 load_gs_index(next->gsindex);
495 /* This works (and fails) the same way as fsindex above. */
500 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
501 prev->gsindex = gsindex;
503 switch_fpu_finish(next_p, fpu);
506 * Switch the PDA and FPU contexts.
508 prev->usersp = percpu_read(old_rsp);
509 percpu_write(old_rsp, next->usersp);
510 percpu_write(current_task, next_p);
512 percpu_write(kernel_stack,
513 (unsigned long)task_stack_page(next_p) +
514 THREAD_SIZE - KERNEL_STACK_OFFSET);
517 * Now maybe reload the debug registers and handle I/O bitmaps
519 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
520 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
521 __switch_to_xtra(prev_p, next_p, tss);
525 * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
526 * current_pt_regs()->flags may not match the current task's
527 * intended IOPL. We need to switch it manually.
529 if (unlikely(xen_pv_domain() && prev->iopl != next->iopl))
530 xen_set_iopl_mask(next->iopl);
536 void set_personality_64bit(void)
538 /* inherit personality from parent */
540 /* Make sure to be in 64bit mode */
541 clear_thread_flag(TIF_IA32);
543 /* Ensure the corresponding mm is not marked. */
545 current->mm->context.ia32_compat = 0;
547 /* TBD: overwrites user setup. Should have two bits.
548 But 64bit processes have always behaved this way,
549 so it's not too bad. The main problem is just that
550 32bit childs are affected again. */
551 current->personality &= ~READ_IMPLIES_EXEC;
554 void set_personality_ia32(void)
556 /* inherit personality from parent */
558 /* Make sure to be in 32bit mode */
559 set_thread_flag(TIF_IA32);
560 current->personality |= force_personality32;
562 /* Mark the associated mm as containing 32-bit tasks. */
564 current->mm->context.ia32_compat = 1;
566 /* Prepare the first "return" to user space */
567 current_thread_info()->status |= TS_COMPAT;
571 * Called from fs/proc with a reference on @p to find the function
572 * which called into schedule(). This needs to be done carefully
573 * because the task might wake up and we might look at a stack
576 unsigned long get_wchan(struct task_struct *p)
578 unsigned long start, bottom, top, sp, fp, ip;
581 if (!p || p == current || p->state == TASK_RUNNING)
584 start = (unsigned long)task_stack_page(p);
589 * Layout of the stack page:
591 * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
593 * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
595 * ----------- bottom = start + sizeof(thread_info)
599 * The tasks stack pointer points at the location where the
600 * framepointer is stored. The data on the stack is:
601 * ... IP FP ... IP FP
603 * We need to read FP and IP, so we need to adjust the upper
604 * bound by another unsigned long.
606 top = start + THREAD_SIZE;
607 top -= 2 * sizeof(unsigned long);
608 bottom = start + sizeof(struct thread_info);
610 sp = ACCESS_ONCE(p->thread.sp);
611 if (sp < bottom || sp > top)
614 fp = ACCESS_ONCE(*(unsigned long *)sp);
616 if (fp < bottom || fp > top)
618 ip = ACCESS_ONCE(*(unsigned long *)(fp + sizeof(unsigned long)));
619 if (!in_sched_functions(ip))
621 fp = ACCESS_ONCE(*(unsigned long *)fp);
622 } while (count++ < 16 && p->state != TASK_RUNNING);
626 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
629 int doit = task == current;
634 if (addr >= TASK_SIZE_OF(task))
637 /* handle small bases via the GDT because that's faster to
639 if (addr <= 0xffffffff) {
640 set_32bit_tls(task, GS_TLS, addr);
642 load_TLS(&task->thread, cpu);
643 load_gs_index(GS_TLS_SEL);
645 task->thread.gsindex = GS_TLS_SEL;
648 task->thread.gsindex = 0;
649 task->thread.gs = addr;
652 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
658 /* Not strictly needed for fs, but do it for symmetry
660 if (addr >= TASK_SIZE_OF(task))
663 /* handle small bases via the GDT because that's faster to
665 if (addr <= 0xffffffff) {
666 set_32bit_tls(task, FS_TLS, addr);
668 load_TLS(&task->thread, cpu);
669 loadsegment(fs, FS_TLS_SEL);
671 task->thread.fsindex = FS_TLS_SEL;
674 task->thread.fsindex = 0;
675 task->thread.fs = addr;
677 /* set the selector to 0 to not confuse
680 ret = checking_wrmsrl(MSR_FS_BASE, addr);
687 if (task->thread.fsindex == FS_TLS_SEL)
688 base = read_32bit_tls(task, FS_TLS);
690 rdmsrl(MSR_FS_BASE, base);
692 base = task->thread.fs;
693 ret = put_user(base, (unsigned long __user *)addr);
699 if (task->thread.gsindex == GS_TLS_SEL)
700 base = read_32bit_tls(task, GS_TLS);
702 savesegment(gs, gsindex);
704 rdmsrl(MSR_KERNEL_GS_BASE, base);
706 base = task->thread.gs;
708 base = task->thread.gs;
709 ret = put_user(base, (unsigned long __user *)addr);
721 long sys_arch_prctl(int code, unsigned long addr)
723 return do_arch_prctl(current, code, addr);
726 unsigned long KSTK_ESP(struct task_struct *task)
728 return (test_tsk_thread_flag(task, TIF_IA32)) ?
729 (task_pt_regs(task)->sp) : ((task)->thread.usersp);