1 #include <linux/errno.h>
2 #include <linux/kernel.h>
5 #include <linux/prctl.h>
6 #include <linux/slab.h>
7 #include <linux/sched.h>
8 #include <linux/module.h>
10 #include <linux/clockchips.h>
11 #include <linux/random.h>
12 #include <linux/user-return-notifier.h>
13 #include <linux/dmi.h>
14 #include <linux/utsname.h>
15 #include <trace/events/power.h>
16 #include <linux/hw_breakpoint.h>
17 #include <asm/system.h>
19 #include <asm/syscalls.h>
21 #include <asm/uaccess.h>
23 #include <asm/debugreg.h>
25 struct kmem_cache *task_xstate_cachep;
26 EXPORT_SYMBOL_GPL(task_xstate_cachep);
28 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
33 if (fpu_allocated(&src->thread.fpu)) {
34 memset(&dst->thread.fpu, 0, sizeof(dst->thread.fpu));
35 ret = fpu_alloc(&dst->thread.fpu);
38 fpu_copy(&dst->thread.fpu, &src->thread.fpu);
43 void free_thread_xstate(struct task_struct *tsk)
45 fpu_free(&tsk->thread.fpu);
48 void free_thread_info(struct thread_info *ti)
50 free_thread_xstate(ti->task);
51 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
54 void arch_task_cache_init(void)
57 kmem_cache_create("task_xstate", xstate_size,
58 __alignof__(union thread_xstate),
59 SLAB_PANIC | SLAB_NOTRACK, NULL);
63 * Free current thread data structures etc..
65 void exit_thread(void)
67 struct task_struct *me = current;
68 struct thread_struct *t = &me->thread;
69 unsigned long *bp = t->io_bitmap_ptr;
72 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
74 t->io_bitmap_ptr = NULL;
75 clear_thread_flag(TIF_IO_BITMAP);
77 * Careful, clear this in the TSS too:
79 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
86 void show_regs(struct pt_regs *regs)
89 show_trace(NULL, regs, (unsigned long *)kernel_stack_pointer(regs));
92 void show_regs_common(void)
94 const char *board, *product;
96 board = dmi_get_system_info(DMI_BOARD_NAME);
99 product = dmi_get_system_info(DMI_PRODUCT_NAME);
103 printk(KERN_CONT "\n");
104 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s/%s\n",
105 current->pid, current->comm, print_tainted(),
106 init_utsname()->release,
107 (int)strcspn(init_utsname()->version, " "),
108 init_utsname()->version, board, product);
111 void flush_thread(void)
113 struct task_struct *tsk = current;
115 flush_ptrace_hw_breakpoint(tsk);
116 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
118 * Forget coprocessor state..
120 tsk->fpu_counter = 0;
125 static void hard_disable_TSC(void)
127 write_cr4(read_cr4() | X86_CR4_TSD);
130 void disable_TSC(void)
133 if (!test_and_set_thread_flag(TIF_NOTSC))
135 * Must flip the CPU state synchronously with
136 * TIF_NOTSC in the current running context.
142 static void hard_enable_TSC(void)
144 write_cr4(read_cr4() & ~X86_CR4_TSD);
147 static void enable_TSC(void)
150 if (test_and_clear_thread_flag(TIF_NOTSC))
152 * Must flip the CPU state synchronously with
153 * TIF_NOTSC in the current running context.
159 int get_tsc_mode(unsigned long adr)
163 if (test_thread_flag(TIF_NOTSC))
164 val = PR_TSC_SIGSEGV;
168 return put_user(val, (unsigned int __user *)adr);
171 int set_tsc_mode(unsigned int val)
173 if (val == PR_TSC_SIGSEGV)
175 else if (val == PR_TSC_ENABLE)
183 void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
184 struct tss_struct *tss)
186 struct thread_struct *prev, *next;
188 prev = &prev_p->thread;
189 next = &next_p->thread;
191 if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
192 test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
193 unsigned long debugctl = get_debugctlmsr();
195 debugctl &= ~DEBUGCTLMSR_BTF;
196 if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
197 debugctl |= DEBUGCTLMSR_BTF;
199 update_debugctlmsr(debugctl);
202 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
203 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
204 /* prev and next are different */
205 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
211 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
213 * Copy the relevant range of the IO bitmap.
214 * Normally this is 128 bytes or less:
216 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
217 max(prev->io_bitmap_max, next->io_bitmap_max));
218 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
220 * Clear any possible leftover bits:
222 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
224 propagate_user_return_notify(prev_p, next_p);
227 int sys_fork(struct pt_regs *regs)
229 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
233 * This is trivial, and on the face of it looks like it
234 * could equally well be done in user mode.
236 * Not so, for quite unobvious reasons - register pressure.
237 * In user mode vfork() cannot have a stack frame, and if
238 * done by calling the "clone()" system call directly, you
239 * do not have enough call-clobbered registers to hold all
240 * the information you need.
242 int sys_vfork(struct pt_regs *regs)
244 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
249 sys_clone(unsigned long clone_flags, unsigned long newsp,
250 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
254 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
258 * This gets run with %si containing the
259 * function to call, and %di containing
262 extern void kernel_thread_helper(void);
265 * Create a kernel thread
267 int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
271 memset(®s, 0, sizeof(regs));
273 regs.si = (unsigned long) fn;
274 regs.di = (unsigned long) arg;
279 regs.fs = __KERNEL_PERCPU;
280 regs.gs = __KERNEL_STACK_CANARY;
282 regs.ss = __KERNEL_DS;
286 regs.ip = (unsigned long) kernel_thread_helper;
287 regs.cs = __KERNEL_CS | get_kernel_rpl();
288 regs.flags = X86_EFLAGS_IF | 0x2;
290 /* Ok, create the new process.. */
291 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL);
293 EXPORT_SYMBOL(kernel_thread);
296 * sys_execve() executes a new program.
298 long sys_execve(const char __user *name,
299 const char __user *const __user *argv,
300 const char __user *const __user *envp, struct pt_regs *regs)
305 filename = getname(name);
306 error = PTR_ERR(filename);
307 if (IS_ERR(filename))
309 error = do_execve(filename, argv, envp, regs);
313 /* Make sure we don't return using sysenter.. */
314 set_thread_flag(TIF_IRET);
323 * Idle related variables and functions
325 unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
326 EXPORT_SYMBOL(boot_option_idle_override);
329 * Powermanagement idle function, if any..
331 void (*pm_idle)(void);
332 EXPORT_SYMBOL(pm_idle);
336 * This halt magic was a workaround for ancient floppy DMA
337 * wreckage. It should be safe to remove.
339 static int hlt_counter;
340 void disable_hlt(void)
344 EXPORT_SYMBOL(disable_hlt);
346 void enable_hlt(void)
350 EXPORT_SYMBOL(enable_hlt);
352 static inline int hlt_use_halt(void)
354 return (!hlt_counter && boot_cpu_data.hlt_works_ok);
357 static inline int hlt_use_halt(void)
364 * We use this if we don't have any better
367 void default_idle(void)
369 if (hlt_use_halt()) {
370 trace_power_start(POWER_CSTATE, 1, smp_processor_id());
371 trace_cpu_idle(1, smp_processor_id());
372 current_thread_info()->status &= ~TS_POLLING;
374 * TS_POLLING-cleared state must be visible before we
380 safe_halt(); /* enables interrupts racelessly */
383 current_thread_info()->status |= TS_POLLING;
384 trace_power_end(smp_processor_id());
385 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
388 /* loop is done by the caller */
392 #ifdef CONFIG_APM_MODULE
393 EXPORT_SYMBOL(default_idle);
396 void stop_this_cpu(void *dummy)
402 set_cpu_online(smp_processor_id(), false);
403 disable_local_APIC();
406 if (hlt_works(smp_processor_id()))
411 static void do_nothing(void *unused)
416 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
417 * pm_idle and update to new pm_idle value. Required while changing pm_idle
418 * handler on SMP systems.
420 * Caller must have changed pm_idle to the new value before the call. Old
421 * pm_idle value will not be used by any CPU after the return of this function.
423 void cpu_idle_wait(void)
426 /* kick all the CPUs so that they exit out of pm_idle */
427 smp_call_function(do_nothing, NULL, 1);
429 EXPORT_SYMBOL_GPL(cpu_idle_wait);
432 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
433 * which can obviate IPI to trigger checking of need_resched.
434 * We execute MONITOR against need_resched and enter optimized wait state
435 * through MWAIT. Whenever someone changes need_resched, we would be woken
436 * up from MWAIT (without an IPI).
438 * New with Core Duo processors, MWAIT can take some hints based on CPU
441 void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
443 if (!need_resched()) {
444 if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR))
445 clflush((void *)¤t_thread_info()->flags);
447 __monitor((void *)¤t_thread_info()->flags, 0, 0);
454 /* Default MONITOR/MWAIT with no hints, used for default C1 state */
455 static void mwait_idle(void)
457 if (!need_resched()) {
458 trace_power_start(POWER_CSTATE, 1, smp_processor_id());
459 trace_cpu_idle(1, smp_processor_id());
460 if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR))
461 clflush((void *)¤t_thread_info()->flags);
463 __monitor((void *)¤t_thread_info()->flags, 0, 0);
469 trace_power_end(smp_processor_id());
470 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
476 * On SMP it's slightly faster (but much more power-consuming!)
477 * to poll the ->work.need_resched flag instead of waiting for the
478 * cross-CPU IPI to arrive. Use this option with caution.
480 static void poll_idle(void)
482 trace_power_start(POWER_CSTATE, 0, smp_processor_id());
483 trace_cpu_idle(0, smp_processor_id());
485 while (!need_resched())
487 trace_power_end(smp_processor_id());
488 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
492 * mwait selection logic:
494 * It depends on the CPU. For AMD CPUs that support MWAIT this is
495 * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
496 * then depend on a clock divisor and current Pstate of the core. If
497 * all cores of a processor are in halt state (C1) the processor can
498 * enter the C1E (C1 enhanced) state. If mwait is used this will never
501 * idle=mwait overrides this decision and forces the usage of mwait.
504 #define MWAIT_INFO 0x05
505 #define MWAIT_ECX_EXTENDED_INFO 0x01
506 #define MWAIT_EDX_C1 0xf0
508 static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
510 u32 eax, ebx, ecx, edx;
512 if (boot_option_idle_override == IDLE_FORCE_MWAIT)
515 if (c->cpuid_level < MWAIT_INFO)
518 cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx);
519 /* Check, whether EDX has extended info about MWAIT */
520 if (!(ecx & MWAIT_ECX_EXTENDED_INFO))
524 * edx enumeratios MONITOR/MWAIT extensions. Check, whether
527 return (edx & MWAIT_EDX_C1);
531 EXPORT_SYMBOL(c1e_detected);
533 static cpumask_var_t c1e_mask;
535 void c1e_remove_cpu(int cpu)
537 if (c1e_mask != NULL)
538 cpumask_clear_cpu(cpu, c1e_mask);
542 * C1E aware idle routine. We check for C1E active in the interrupt
543 * pending message MSR. If we detect C1E, then we handle it the same
544 * way as C3 power states (local apic timer and TSC stop)
546 static void c1e_idle(void)
554 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
556 if (lo & K8_INTP_C1E_ACTIVE_MASK) {
558 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
559 mark_tsc_unstable("TSC halt in AMD C1E");
560 printk(KERN_INFO "System has AMD C1E enabled\n");
565 int cpu = smp_processor_id();
567 if (!cpumask_test_cpu(cpu, c1e_mask)) {
568 cpumask_set_cpu(cpu, c1e_mask);
570 * Force broadcast so ACPI can not interfere.
572 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
574 printk(KERN_INFO "Switch to broadcast mode on CPU%d\n",
577 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
582 * The switch back from broadcast mode needs to be
583 * called with interrupts disabled.
586 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
592 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
595 if (pm_idle == poll_idle && smp_num_siblings > 1) {
596 printk_once(KERN_WARNING "WARNING: polling idle and HT enabled,"
597 " performance may degrade.\n");
603 if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
605 * One CPU supports mwait => All CPUs supports mwait
607 printk(KERN_INFO "using mwait in idle threads.\n");
608 pm_idle = mwait_idle;
609 } else if (cpu_has_amd_erratum(amd_erratum_400)) {
610 /* E400: APIC timer interrupt does not wake up CPU from C1e */
611 printk(KERN_INFO "using C1E aware idle routine\n");
614 pm_idle = default_idle;
617 void __init init_c1e_mask(void)
619 /* If we're using c1e_idle, we need to allocate c1e_mask. */
620 if (pm_idle == c1e_idle)
621 zalloc_cpumask_var(&c1e_mask, GFP_KERNEL);
624 static int __init idle_setup(char *str)
629 if (!strcmp(str, "poll")) {
630 printk("using polling idle threads.\n");
632 boot_option_idle_override = IDLE_POLL;
633 } else if (!strcmp(str, "mwait")) {
634 boot_option_idle_override = IDLE_FORCE_MWAIT;
635 } else if (!strcmp(str, "halt")) {
637 * When the boot option of idle=halt is added, halt is
638 * forced to be used for CPU idle. In such case CPU C2/C3
639 * won't be used again.
640 * To continue to load the CPU idle driver, don't touch
641 * the boot_option_idle_override.
643 pm_idle = default_idle;
644 boot_option_idle_override = IDLE_HALT;
645 } else if (!strcmp(str, "nomwait")) {
647 * If the boot option of "idle=nomwait" is added,
648 * it means that mwait will be disabled for CPU C2/C3
649 * states. In such case it won't touch the variable
650 * of boot_option_idle_override.
652 boot_option_idle_override = IDLE_NOMWAIT;
658 early_param("idle", idle_setup);
660 unsigned long arch_align_stack(unsigned long sp)
662 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
663 sp -= get_random_int() % 8192;
667 unsigned long arch_randomize_brk(struct mm_struct *mm)
669 unsigned long range_end = mm->brk + 0x02000000;
670 return randomize_range(mm->brk, range_end, 0) ? : mm->brk;