tracing, perf: Convert the power tracer into an event tracer
[pandora-kernel.git] / arch / x86 / kernel / process.c
1 #include <linux/errno.h>
2 #include <linux/kernel.h>
3 #include <linux/mm.h>
4 #include <linux/smp.h>
5 #include <linux/prctl.h>
6 #include <linux/slab.h>
7 #include <linux/sched.h>
8 #include <linux/module.h>
9 #include <linux/pm.h>
10 #include <linux/clockchips.h>
11 #include <linux/random.h>
12 #include <trace/events/power.h>
13 #include <asm/system.h>
14 #include <asm/apic.h>
15 #include <asm/syscalls.h>
16 #include <asm/idle.h>
17 #include <asm/uaccess.h>
18 #include <asm/i387.h>
19 #include <asm/ds.h>
20
21 unsigned long idle_halt;
22 EXPORT_SYMBOL(idle_halt);
23 unsigned long idle_nomwait;
24 EXPORT_SYMBOL(idle_nomwait);
25
26 struct kmem_cache *task_xstate_cachep;
27
28 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
29 {
30         *dst = *src;
31         if (src->thread.xstate) {
32                 dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
33                                                       GFP_KERNEL);
34                 if (!dst->thread.xstate)
35                         return -ENOMEM;
36                 WARN_ON((unsigned long)dst->thread.xstate & 15);
37                 memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
38         }
39         return 0;
40 }
41
42 void free_thread_xstate(struct task_struct *tsk)
43 {
44         if (tsk->thread.xstate) {
45                 kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
46                 tsk->thread.xstate = NULL;
47         }
48
49         WARN(tsk->thread.ds_ctx, "leaking DS context\n");
50 }
51
52 void free_thread_info(struct thread_info *ti)
53 {
54         free_thread_xstate(ti->task);
55         free_pages((unsigned long)ti, get_order(THREAD_SIZE));
56 }
57
58 void arch_task_cache_init(void)
59 {
60         task_xstate_cachep =
61                 kmem_cache_create("task_xstate", xstate_size,
62                                   __alignof__(union thread_xstate),
63                                   SLAB_PANIC | SLAB_NOTRACK, NULL);
64 }
65
66 /*
67  * Free current thread data structures etc..
68  */
69 void exit_thread(void)
70 {
71         struct task_struct *me = current;
72         struct thread_struct *t = &me->thread;
73         unsigned long *bp = t->io_bitmap_ptr;
74
75         if (bp) {
76                 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
77
78                 t->io_bitmap_ptr = NULL;
79                 clear_thread_flag(TIF_IO_BITMAP);
80                 /*
81                  * Careful, clear this in the TSS too:
82                  */
83                 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
84                 t->io_bitmap_max = 0;
85                 put_cpu();
86                 kfree(bp);
87         }
88 }
89
90 void flush_thread(void)
91 {
92         struct task_struct *tsk = current;
93
94 #ifdef CONFIG_X86_64
95         if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
96                 clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
97                 if (test_tsk_thread_flag(tsk, TIF_IA32)) {
98                         clear_tsk_thread_flag(tsk, TIF_IA32);
99                 } else {
100                         set_tsk_thread_flag(tsk, TIF_IA32);
101                         current_thread_info()->status |= TS_COMPAT;
102                 }
103         }
104 #endif
105
106         clear_tsk_thread_flag(tsk, TIF_DEBUG);
107
108         tsk->thread.debugreg0 = 0;
109         tsk->thread.debugreg1 = 0;
110         tsk->thread.debugreg2 = 0;
111         tsk->thread.debugreg3 = 0;
112         tsk->thread.debugreg6 = 0;
113         tsk->thread.debugreg7 = 0;
114         memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
115         /*
116          * Forget coprocessor state..
117          */
118         tsk->fpu_counter = 0;
119         clear_fpu(tsk);
120         clear_used_math();
121 }
122
123 static void hard_disable_TSC(void)
124 {
125         write_cr4(read_cr4() | X86_CR4_TSD);
126 }
127
128 void disable_TSC(void)
129 {
130         preempt_disable();
131         if (!test_and_set_thread_flag(TIF_NOTSC))
132                 /*
133                  * Must flip the CPU state synchronously with
134                  * TIF_NOTSC in the current running context.
135                  */
136                 hard_disable_TSC();
137         preempt_enable();
138 }
139
140 static void hard_enable_TSC(void)
141 {
142         write_cr4(read_cr4() & ~X86_CR4_TSD);
143 }
144
145 static void enable_TSC(void)
146 {
147         preempt_disable();
148         if (test_and_clear_thread_flag(TIF_NOTSC))
149                 /*
150                  * Must flip the CPU state synchronously with
151                  * TIF_NOTSC in the current running context.
152                  */
153                 hard_enable_TSC();
154         preempt_enable();
155 }
156
157 int get_tsc_mode(unsigned long adr)
158 {
159         unsigned int val;
160
161         if (test_thread_flag(TIF_NOTSC))
162                 val = PR_TSC_SIGSEGV;
163         else
164                 val = PR_TSC_ENABLE;
165
166         return put_user(val, (unsigned int __user *)adr);
167 }
168
169 int set_tsc_mode(unsigned int val)
170 {
171         if (val == PR_TSC_SIGSEGV)
172                 disable_TSC();
173         else if (val == PR_TSC_ENABLE)
174                 enable_TSC();
175         else
176                 return -EINVAL;
177
178         return 0;
179 }
180
181 void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
182                       struct tss_struct *tss)
183 {
184         struct thread_struct *prev, *next;
185
186         prev = &prev_p->thread;
187         next = &next_p->thread;
188
189         if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) ||
190             test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR))
191                 ds_switch_to(prev_p, next_p);
192         else if (next->debugctlmsr != prev->debugctlmsr)
193                 update_debugctlmsr(next->debugctlmsr);
194
195         if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
196                 set_debugreg(next->debugreg0, 0);
197                 set_debugreg(next->debugreg1, 1);
198                 set_debugreg(next->debugreg2, 2);
199                 set_debugreg(next->debugreg3, 3);
200                 /* no 4 and 5 */
201                 set_debugreg(next->debugreg6, 6);
202                 set_debugreg(next->debugreg7, 7);
203         }
204
205         if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
206             test_tsk_thread_flag(next_p, TIF_NOTSC)) {
207                 /* prev and next are different */
208                 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
209                         hard_disable_TSC();
210                 else
211                         hard_enable_TSC();
212         }
213
214         if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
215                 /*
216                  * Copy the relevant range of the IO bitmap.
217                  * Normally this is 128 bytes or less:
218                  */
219                 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
220                        max(prev->io_bitmap_max, next->io_bitmap_max));
221         } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
222                 /*
223                  * Clear any possible leftover bits:
224                  */
225                 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
226         }
227 }
228
229 int sys_fork(struct pt_regs *regs)
230 {
231         return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
232 }
233
234 /*
235  * This is trivial, and on the face of it looks like it
236  * could equally well be done in user mode.
237  *
238  * Not so, for quite unobvious reasons - register pressure.
239  * In user mode vfork() cannot have a stack frame, and if
240  * done by calling the "clone()" system call directly, you
241  * do not have enough call-clobbered registers to hold all
242  * the information you need.
243  */
244 int sys_vfork(struct pt_regs *regs)
245 {
246         return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
247                        NULL, NULL);
248 }
249
250
251 /*
252  * Idle related variables and functions
253  */
254 unsigned long boot_option_idle_override = 0;
255 EXPORT_SYMBOL(boot_option_idle_override);
256
257 /*
258  * Powermanagement idle function, if any..
259  */
260 void (*pm_idle)(void);
261 EXPORT_SYMBOL(pm_idle);
262
263 #ifdef CONFIG_X86_32
264 /*
265  * This halt magic was a workaround for ancient floppy DMA
266  * wreckage. It should be safe to remove.
267  */
268 static int hlt_counter;
269 void disable_hlt(void)
270 {
271         hlt_counter++;
272 }
273 EXPORT_SYMBOL(disable_hlt);
274
275 void enable_hlt(void)
276 {
277         hlt_counter--;
278 }
279 EXPORT_SYMBOL(enable_hlt);
280
281 static inline int hlt_use_halt(void)
282 {
283         return (!hlt_counter && boot_cpu_data.hlt_works_ok);
284 }
285 #else
286 static inline int hlt_use_halt(void)
287 {
288         return 1;
289 }
290 #endif
291
292 /*
293  * We use this if we don't have any better
294  * idle routine..
295  */
296 void default_idle(void)
297 {
298         if (hlt_use_halt()) {
299                 trace_power_start(POWER_CSTATE, 1);
300                 current_thread_info()->status &= ~TS_POLLING;
301                 /*
302                  * TS_POLLING-cleared state must be visible before we
303                  * test NEED_RESCHED:
304                  */
305                 smp_mb();
306
307                 if (!need_resched())
308                         safe_halt();    /* enables interrupts racelessly */
309                 else
310                         local_irq_enable();
311                 current_thread_info()->status |= TS_POLLING;
312                 trace_power_end(0);
313         } else {
314                 local_irq_enable();
315                 /* loop is done by the caller */
316                 cpu_relax();
317         }
318 }
319 #ifdef CONFIG_APM_MODULE
320 EXPORT_SYMBOL(default_idle);
321 #endif
322
323 void stop_this_cpu(void *dummy)
324 {
325         local_irq_disable();
326         /*
327          * Remove this CPU:
328          */
329         set_cpu_online(smp_processor_id(), false);
330         disable_local_APIC();
331
332         for (;;) {
333                 if (hlt_works(smp_processor_id()))
334                         halt();
335         }
336 }
337
338 static void do_nothing(void *unused)
339 {
340 }
341
342 /*
343  * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
344  * pm_idle and update to new pm_idle value. Required while changing pm_idle
345  * handler on SMP systems.
346  *
347  * Caller must have changed pm_idle to the new value before the call. Old
348  * pm_idle value will not be used by any CPU after the return of this function.
349  */
350 void cpu_idle_wait(void)
351 {
352         smp_mb();
353         /* kick all the CPUs so that they exit out of pm_idle */
354         smp_call_function(do_nothing, NULL, 1);
355 }
356 EXPORT_SYMBOL_GPL(cpu_idle_wait);
357
358 /*
359  * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
360  * which can obviate IPI to trigger checking of need_resched.
361  * We execute MONITOR against need_resched and enter optimized wait state
362  * through MWAIT. Whenever someone changes need_resched, we would be woken
363  * up from MWAIT (without an IPI).
364  *
365  * New with Core Duo processors, MWAIT can take some hints based on CPU
366  * capability.
367  */
368 void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
369 {
370         trace_power_start(POWER_CSTATE, (ax>>4)+1);
371         if (!need_resched()) {
372                 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
373                         clflush((void *)&current_thread_info()->flags);
374
375                 __monitor((void *)&current_thread_info()->flags, 0, 0);
376                 smp_mb();
377                 if (!need_resched())
378                         __mwait(ax, cx);
379         }
380         trace_power_end(0);
381 }
382
383 /* Default MONITOR/MWAIT with no hints, used for default C1 state */
384 static void mwait_idle(void)
385 {
386         if (!need_resched()) {
387                 trace_power_start(POWER_CSTATE, 1);
388                 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
389                         clflush((void *)&current_thread_info()->flags);
390
391                 __monitor((void *)&current_thread_info()->flags, 0, 0);
392                 smp_mb();
393                 if (!need_resched())
394                         __sti_mwait(0, 0);
395                 else
396                         local_irq_enable();
397                 trace_power_end(0);
398         } else
399                 local_irq_enable();
400 }
401
402 /*
403  * On SMP it's slightly faster (but much more power-consuming!)
404  * to poll the ->work.need_resched flag instead of waiting for the
405  * cross-CPU IPI to arrive. Use this option with caution.
406  */
407 static void poll_idle(void)
408 {
409         trace_power_start(POWER_CSTATE, 0);
410         local_irq_enable();
411         while (!need_resched())
412                 cpu_relax();
413         trace_power_end(0);
414 }
415
416 /*
417  * mwait selection logic:
418  *
419  * It depends on the CPU. For AMD CPUs that support MWAIT this is
420  * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
421  * then depend on a clock divisor and current Pstate of the core. If
422  * all cores of a processor are in halt state (C1) the processor can
423  * enter the C1E (C1 enhanced) state. If mwait is used this will never
424  * happen.
425  *
426  * idle=mwait overrides this decision and forces the usage of mwait.
427  */
428 static int __cpuinitdata force_mwait;
429
430 #define MWAIT_INFO                      0x05
431 #define MWAIT_ECX_EXTENDED_INFO         0x01
432 #define MWAIT_EDX_C1                    0xf0
433
434 static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
435 {
436         u32 eax, ebx, ecx, edx;
437
438         if (force_mwait)
439                 return 1;
440
441         if (c->cpuid_level < MWAIT_INFO)
442                 return 0;
443
444         cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx);
445         /* Check, whether EDX has extended info about MWAIT */
446         if (!(ecx & MWAIT_ECX_EXTENDED_INFO))
447                 return 1;
448
449         /*
450          * edx enumeratios MONITOR/MWAIT extensions. Check, whether
451          * C1  supports MWAIT
452          */
453         return (edx & MWAIT_EDX_C1);
454 }
455
456 /*
457  * Check for AMD CPUs, which have potentially C1E support
458  */
459 static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
460 {
461         if (c->x86_vendor != X86_VENDOR_AMD)
462                 return 0;
463
464         if (c->x86 < 0x0F)
465                 return 0;
466
467         /* Family 0x0f models < rev F do not have C1E */
468         if (c->x86 == 0x0f && c->x86_model < 0x40)
469                 return 0;
470
471         return 1;
472 }
473
474 static cpumask_var_t c1e_mask;
475 static int c1e_detected;
476
477 void c1e_remove_cpu(int cpu)
478 {
479         if (c1e_mask != NULL)
480                 cpumask_clear_cpu(cpu, c1e_mask);
481 }
482
483 /*
484  * C1E aware idle routine. We check for C1E active in the interrupt
485  * pending message MSR. If we detect C1E, then we handle it the same
486  * way as C3 power states (local apic timer and TSC stop)
487  */
488 static void c1e_idle(void)
489 {
490         if (need_resched())
491                 return;
492
493         if (!c1e_detected) {
494                 u32 lo, hi;
495
496                 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
497                 if (lo & K8_INTP_C1E_ACTIVE_MASK) {
498                         c1e_detected = 1;
499                         if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
500                                 mark_tsc_unstable("TSC halt in AMD C1E");
501                         printk(KERN_INFO "System has AMD C1E enabled\n");
502                         set_cpu_cap(&boot_cpu_data, X86_FEATURE_AMDC1E);
503                 }
504         }
505
506         if (c1e_detected) {
507                 int cpu = smp_processor_id();
508
509                 if (!cpumask_test_cpu(cpu, c1e_mask)) {
510                         cpumask_set_cpu(cpu, c1e_mask);
511                         /*
512                          * Force broadcast so ACPI can not interfere.
513                          */
514                         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
515                                            &cpu);
516                         printk(KERN_INFO "Switch to broadcast mode on CPU%d\n",
517                                cpu);
518                 }
519                 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
520
521                 default_idle();
522
523                 /*
524                  * The switch back from broadcast mode needs to be
525                  * called with interrupts disabled.
526                  */
527                  local_irq_disable();
528                  clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
529                  local_irq_enable();
530         } else
531                 default_idle();
532 }
533
534 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
535 {
536 #ifdef CONFIG_SMP
537         if (pm_idle == poll_idle && smp_num_siblings > 1) {
538                 printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
539                         " performance may degrade.\n");
540         }
541 #endif
542         if (pm_idle)
543                 return;
544
545         if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
546                 /*
547                  * One CPU supports mwait => All CPUs supports mwait
548                  */
549                 printk(KERN_INFO "using mwait in idle threads.\n");
550                 pm_idle = mwait_idle;
551         } else if (check_c1e_idle(c)) {
552                 printk(KERN_INFO "using C1E aware idle routine\n");
553                 pm_idle = c1e_idle;
554         } else
555                 pm_idle = default_idle;
556 }
557
558 void __init init_c1e_mask(void)
559 {
560         /* If we're using c1e_idle, we need to allocate c1e_mask. */
561         if (pm_idle == c1e_idle) {
562                 alloc_cpumask_var(&c1e_mask, GFP_KERNEL);
563                 cpumask_clear(c1e_mask);
564         }
565 }
566
567 static int __init idle_setup(char *str)
568 {
569         if (!str)
570                 return -EINVAL;
571
572         if (!strcmp(str, "poll")) {
573                 printk("using polling idle threads.\n");
574                 pm_idle = poll_idle;
575         } else if (!strcmp(str, "mwait"))
576                 force_mwait = 1;
577         else if (!strcmp(str, "halt")) {
578                 /*
579                  * When the boot option of idle=halt is added, halt is
580                  * forced to be used for CPU idle. In such case CPU C2/C3
581                  * won't be used again.
582                  * To continue to load the CPU idle driver, don't touch
583                  * the boot_option_idle_override.
584                  */
585                 pm_idle = default_idle;
586                 idle_halt = 1;
587                 return 0;
588         } else if (!strcmp(str, "nomwait")) {
589                 /*
590                  * If the boot option of "idle=nomwait" is added,
591                  * it means that mwait will be disabled for CPU C2/C3
592                  * states. In such case it won't touch the variable
593                  * of boot_option_idle_override.
594                  */
595                 idle_nomwait = 1;
596                 return 0;
597         } else
598                 return -1;
599
600         boot_option_idle_override = 1;
601         return 0;
602 }
603 early_param("idle", idle_setup);
604
605 unsigned long arch_align_stack(unsigned long sp)
606 {
607         if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
608                 sp -= get_random_int() % 8192;
609         return sp & ~0xf;
610 }
611
612 unsigned long arch_randomize_brk(struct mm_struct *mm)
613 {
614         unsigned long range_end = mm->brk + 0x02000000;
615         return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
616 }
617