Merge branch 'tip-x86-fpu' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp...
[pandora-kernel.git] / arch / x86 / include / asm / fpu-internal.h
1 /*
2  * Copyright (C) 1994 Linus Torvalds
3  *
4  * Pentium III FXSR, SSE support
5  * General FPU state handling cleanups
6  *      Gareth Hughes <gareth@valinux.com>, May 2000
7  * x86-64 work by Andi Kleen 2002
8  */
9
10 #ifndef _FPU_INTERNAL_H
11 #define _FPU_INTERNAL_H
12
13 #include <linux/kernel_stat.h>
14 #include <linux/regset.h>
15 #include <linux/compat.h>
16 #include <linux/slab.h>
17 #include <asm/asm.h>
18 #include <asm/cpufeature.h>
19 #include <asm/processor.h>
20 #include <asm/sigcontext.h>
21 #include <asm/user.h>
22 #include <asm/uaccess.h>
23 #include <asm/xsave.h>
24 #include <asm/smap.h>
25
26 #ifdef CONFIG_X86_64
27 # include <asm/sigcontext32.h>
28 # include <asm/user32.h>
29 struct ksignal;
30 int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
31                         compat_sigset_t *set, struct pt_regs *regs);
32 int ia32_setup_frame(int sig, struct ksignal *ksig,
33                      compat_sigset_t *set, struct pt_regs *regs);
34 #else
35 # define user_i387_ia32_struct  user_i387_struct
36 # define user32_fxsr_struct     user_fxsr_struct
37 # define ia32_setup_frame       __setup_frame
38 # define ia32_setup_rt_frame    __setup_rt_frame
39 #endif
40
41 extern unsigned int mxcsr_feature_mask;
42 extern void fpu_init(void);
43 extern void eager_fpu_init(void);
44
45 DECLARE_PER_CPU(struct task_struct *, fpu_owner_task);
46
47 extern void convert_from_fxsr(struct user_i387_ia32_struct *env,
48                               struct task_struct *tsk);
49 extern void convert_to_fxsr(struct task_struct *tsk,
50                             const struct user_i387_ia32_struct *env);
51
52 extern user_regset_active_fn fpregs_active, xfpregs_active;
53 extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
54                                 xstateregs_get;
55 extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
56                                  xstateregs_set;
57
58 /*
59  * xstateregs_active == fpregs_active. Please refer to the comment
60  * at the definition of fpregs_active.
61  */
62 #define xstateregs_active       fpregs_active
63
64 #ifdef CONFIG_MATH_EMULATION
65 extern void finit_soft_fpu(struct i387_soft_struct *soft);
66 #else
67 static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
68 #endif
69
70 /*
71  * Must be run with preemption disabled: this clears the fpu_owner_task,
72  * on this CPU.
73  *
74  * This will disable any lazy FPU state restore of the current FPU state,
75  * but if the current thread owns the FPU, it will still be saved by.
76  */
77 static inline void __cpu_disable_lazy_restore(unsigned int cpu)
78 {
79         per_cpu(fpu_owner_task, cpu) = NULL;
80 }
81
82 /*
83  * Used to indicate that the FPU state in memory is newer than the FPU
84  * state in registers, and the FPU state should be reloaded next time the
85  * task is run. Only safe on the current task, or non-running tasks.
86  */
87 static inline void task_disable_lazy_fpu_restore(struct task_struct *tsk)
88 {
89         tsk->thread.fpu.last_cpu = ~0;
90 }
91
92 static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
93 {
94         return new == this_cpu_read_stable(fpu_owner_task) &&
95                 cpu == new->thread.fpu.last_cpu;
96 }
97
98 static inline int is_ia32_compat_frame(void)
99 {
100         return config_enabled(CONFIG_IA32_EMULATION) &&
101                test_thread_flag(TIF_IA32);
102 }
103
104 static inline int is_ia32_frame(void)
105 {
106         return config_enabled(CONFIG_X86_32) || is_ia32_compat_frame();
107 }
108
109 static inline int is_x32_frame(void)
110 {
111         return config_enabled(CONFIG_X86_X32_ABI) && test_thread_flag(TIF_X32);
112 }
113
114 #define X87_FSW_ES (1 << 7)     /* Exception Summary */
115
116 static __always_inline __pure bool use_eager_fpu(void)
117 {
118         return static_cpu_has_safe(X86_FEATURE_EAGER_FPU);
119 }
120
121 static __always_inline __pure bool use_xsaveopt(void)
122 {
123         return static_cpu_has_safe(X86_FEATURE_XSAVEOPT);
124 }
125
126 static __always_inline __pure bool use_xsave(void)
127 {
128         return static_cpu_has_safe(X86_FEATURE_XSAVE);
129 }
130
131 static __always_inline __pure bool use_fxsr(void)
132 {
133         return static_cpu_has_safe(X86_FEATURE_FXSR);
134 }
135
136 static inline void fx_finit(struct i387_fxsave_struct *fx)
137 {
138         memset(fx, 0, xstate_size);
139         fx->cwd = 0x37f;
140         fx->mxcsr = MXCSR_DEFAULT;
141 }
142
143 extern void __sanitize_i387_state(struct task_struct *);
144
145 static inline void sanitize_i387_state(struct task_struct *tsk)
146 {
147         if (!use_xsaveopt())
148                 return;
149         __sanitize_i387_state(tsk);
150 }
151
152 #define user_insn(insn, output, input...)                               \
153 ({                                                                      \
154         int err;                                                        \
155         asm volatile(ASM_STAC "\n"                                      \
156                      "1:" #insn "\n\t"                                  \
157                      "2: " ASM_CLAC "\n"                                \
158                      ".section .fixup,\"ax\"\n"                         \
159                      "3:  movl $-1,%[err]\n"                            \
160                      "    jmp  2b\n"                                    \
161                      ".previous\n"                                      \
162                      _ASM_EXTABLE(1b, 3b)                               \
163                      : [err] "=r" (err), output                         \
164                      : "0"(0), input);                                  \
165         err;                                                            \
166 })
167
168 #define check_insn(insn, output, input...)                              \
169 ({                                                                      \
170         int err;                                                        \
171         asm volatile("1:" #insn "\n\t"                                  \
172                      "2:\n"                                             \
173                      ".section .fixup,\"ax\"\n"                         \
174                      "3:  movl $-1,%[err]\n"                            \
175                      "    jmp  2b\n"                                    \
176                      ".previous\n"                                      \
177                      _ASM_EXTABLE(1b, 3b)                               \
178                      : [err] "=r" (err), output                         \
179                      : "0"(0), input);                                  \
180         err;                                                            \
181 })
182
183 static inline int fsave_user(struct i387_fsave_struct __user *fx)
184 {
185         return user_insn(fnsave %[fx]; fwait,  [fx] "=m" (*fx), "m" (*fx));
186 }
187
188 static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
189 {
190         if (config_enabled(CONFIG_X86_32))
191                 return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
192         else if (config_enabled(CONFIG_AS_FXSAVEQ))
193                 return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
194
195         /* See comment in fpu_fxsave() below. */
196         return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx));
197 }
198
199 static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
200 {
201         if (config_enabled(CONFIG_X86_32))
202                 return check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
203         else if (config_enabled(CONFIG_AS_FXSAVEQ))
204                 return check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
205
206         /* See comment in fpu_fxsave() below. */
207         return check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
208                           "m" (*fx));
209 }
210
211 static inline int fxrstor_user(struct i387_fxsave_struct __user *fx)
212 {
213         if (config_enabled(CONFIG_X86_32))
214                 return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
215         else if (config_enabled(CONFIG_AS_FXSAVEQ))
216                 return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
217
218         /* See comment in fpu_fxsave() below. */
219         return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
220                           "m" (*fx));
221 }
222
223 static inline int frstor_checking(struct i387_fsave_struct *fx)
224 {
225         return check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
226 }
227
228 static inline int frstor_user(struct i387_fsave_struct __user *fx)
229 {
230         return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
231 }
232
233 static inline void fpu_fxsave(struct fpu *fpu)
234 {
235         if (config_enabled(CONFIG_X86_32))
236                 asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave));
237         else if (config_enabled(CONFIG_AS_FXSAVEQ))
238                 asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state->fxsave));
239         else {
240                 /* Using "rex64; fxsave %0" is broken because, if the memory
241                  * operand uses any extended registers for addressing, a second
242                  * REX prefix will be generated (to the assembler, rex64
243                  * followed by semicolon is a separate instruction), and hence
244                  * the 64-bitness is lost.
245                  *
246                  * Using "fxsaveq %0" would be the ideal choice, but is only
247                  * supported starting with gas 2.16.
248                  *
249                  * Using, as a workaround, the properly prefixed form below
250                  * isn't accepted by any binutils version so far released,
251                  * complaining that the same type of prefix is used twice if
252                  * an extended register is needed for addressing (fix submitted
253                  * to mainline 2005-11-21).
254                  *
255                  *  asm volatile("rex64/fxsave %0" : "=m" (fpu->state->fxsave));
256                  *
257                  * This, however, we can work around by forcing the compiler to
258                  * select an addressing mode that doesn't require extended
259                  * registers.
260                  */
261                 asm volatile( "rex64/fxsave (%[fx])"
262                              : "=m" (fpu->state->fxsave)
263                              : [fx] "R" (&fpu->state->fxsave));
264         }
265 }
266
267 /*
268  * These must be called with preempt disabled. Returns
269  * 'true' if the FPU state is still intact.
270  */
271 static inline int fpu_save_init(struct fpu *fpu)
272 {
273         if (use_xsave()) {
274                 fpu_xsave(fpu);
275
276                 /*
277                  * xsave header may indicate the init state of the FP.
278                  */
279                 if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
280                         return 1;
281         } else if (use_fxsr()) {
282                 fpu_fxsave(fpu);
283         } else {
284                 asm volatile("fnsave %[fx]; fwait"
285                              : [fx] "=m" (fpu->state->fsave));
286                 return 0;
287         }
288
289         /*
290          * If exceptions are pending, we need to clear them so
291          * that we don't randomly get exceptions later.
292          *
293          * FIXME! Is this perhaps only true for the old-style
294          * irq13 case? Maybe we could leave the x87 state
295          * intact otherwise?
296          */
297         if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) {
298                 asm volatile("fnclex");
299                 return 0;
300         }
301         return 1;
302 }
303
304 static inline int __save_init_fpu(struct task_struct *tsk)
305 {
306         return fpu_save_init(&tsk->thread.fpu);
307 }
308
309 static inline int fpu_restore_checking(struct fpu *fpu)
310 {
311         if (use_xsave())
312                 return fpu_xrstor_checking(&fpu->state->xsave);
313         else if (use_fxsr())
314                 return fxrstor_checking(&fpu->state->fxsave);
315         else
316                 return frstor_checking(&fpu->state->fsave);
317 }
318
319 static inline int restore_fpu_checking(struct task_struct *tsk)
320 {
321         /*
322          * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
323          * pending. Clear the x87 state here by setting it to fixed values.
324          * "m" is a random variable that should be in L1.
325          */
326         if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) {
327                 asm volatile(
328                         "fnclex\n\t"
329                         "emms\n\t"
330                         "fildl %P[addr]"        /* set F?P to defined value */
331                         : : [addr] "m" (tsk->thread.fpu.has_fpu));
332         }
333
334         return fpu_restore_checking(&tsk->thread.fpu);
335 }
336
337 /*
338  * Software FPU state helpers. Careful: these need to
339  * be preemption protection *and* they need to be
340  * properly paired with the CR0.TS changes!
341  */
342 static inline int __thread_has_fpu(struct task_struct *tsk)
343 {
344         return tsk->thread.fpu.has_fpu;
345 }
346
347 /* Must be paired with an 'stts' after! */
348 static inline void __thread_clear_has_fpu(struct task_struct *tsk)
349 {
350         tsk->thread.fpu.has_fpu = 0;
351         this_cpu_write(fpu_owner_task, NULL);
352 }
353
354 /* Must be paired with a 'clts' before! */
355 static inline void __thread_set_has_fpu(struct task_struct *tsk)
356 {
357         tsk->thread.fpu.has_fpu = 1;
358         this_cpu_write(fpu_owner_task, tsk);
359 }
360
361 /*
362  * Encapsulate the CR0.TS handling together with the
363  * software flag.
364  *
365  * These generally need preemption protection to work,
366  * do try to avoid using these on their own.
367  */
368 static inline void __thread_fpu_end(struct task_struct *tsk)
369 {
370         __thread_clear_has_fpu(tsk);
371         if (!use_eager_fpu())
372                 stts();
373 }
374
375 static inline void __thread_fpu_begin(struct task_struct *tsk)
376 {
377         if (!use_eager_fpu())
378                 clts();
379         __thread_set_has_fpu(tsk);
380 }
381
382 static inline void __drop_fpu(struct task_struct *tsk)
383 {
384         if (__thread_has_fpu(tsk)) {
385                 /* Ignore delayed exceptions from user space */
386                 asm volatile("1: fwait\n"
387                              "2:\n"
388                              _ASM_EXTABLE(1b, 2b));
389                 __thread_fpu_end(tsk);
390         }
391 }
392
393 static inline void drop_fpu(struct task_struct *tsk)
394 {
395         /*
396          * Forget coprocessor state..
397          */
398         preempt_disable();
399         tsk->thread.fpu_counter = 0;
400         __drop_fpu(tsk);
401         clear_used_math();
402         preempt_enable();
403 }
404
405 static inline void drop_init_fpu(struct task_struct *tsk)
406 {
407         if (!use_eager_fpu())
408                 drop_fpu(tsk);
409         else {
410                 if (use_xsave())
411                         xrstor_state(init_xstate_buf, -1);
412                 else
413                         fxrstor_checking(&init_xstate_buf->i387);
414         }
415 }
416
417 /*
418  * FPU state switching for scheduling.
419  *
420  * This is a two-stage process:
421  *
422  *  - switch_fpu_prepare() saves the old state and
423  *    sets the new state of the CR0.TS bit. This is
424  *    done within the context of the old process.
425  *
426  *  - switch_fpu_finish() restores the new state as
427  *    necessary.
428  */
429 typedef struct { int preload; } fpu_switch_t;
430
431 static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu)
432 {
433         fpu_switch_t fpu;
434
435         /*
436          * If the task has used the math, pre-load the FPU on xsave processors
437          * or if the past 5 consecutive context-switches used math.
438          */
439         fpu.preload = tsk_used_math(new) &&
440                       (use_eager_fpu() || new->thread.fpu_counter > 5);
441
442         if (__thread_has_fpu(old)) {
443                 if (!__save_init_fpu(old))
444                         task_disable_lazy_fpu_restore(old);
445                 else
446                         old->thread.fpu.last_cpu = cpu;
447
448                 /* But leave fpu_owner_task! */
449                 old->thread.fpu.has_fpu = 0;
450
451                 /* Don't change CR0.TS if we just switch! */
452                 if (fpu.preload) {
453                         new->thread.fpu_counter++;
454                         __thread_set_has_fpu(new);
455                         prefetch(new->thread.fpu.state);
456                 } else if (!use_eager_fpu())
457                         stts();
458         } else {
459                 old->thread.fpu_counter = 0;
460                 task_disable_lazy_fpu_restore(old);
461                 if (fpu.preload) {
462                         new->thread.fpu_counter++;
463                         if (fpu_lazy_restore(new, cpu))
464                                 fpu.preload = 0;
465                         else
466                                 prefetch(new->thread.fpu.state);
467                         __thread_fpu_begin(new);
468                 }
469         }
470         return fpu;
471 }
472
473 /*
474  * By the time this gets called, we've already cleared CR0.TS and
475  * given the process the FPU if we are going to preload the FPU
476  * state - all we need to do is to conditionally restore the register
477  * state itself.
478  */
479 static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
480 {
481         if (fpu.preload) {
482                 if (unlikely(restore_fpu_checking(new)))
483                         drop_init_fpu(new);
484         }
485 }
486
487 /*
488  * Signal frame handlers...
489  */
490 extern int save_xstate_sig(void __user *buf, void __user *fx, int size);
491 extern int __restore_xstate_sig(void __user *buf, void __user *fx, int size);
492
493 static inline int xstate_sigframe_size(void)
494 {
495         return use_xsave() ? xstate_size + FP_XSTATE_MAGIC2_SIZE : xstate_size;
496 }
497
498 static inline int restore_xstate_sig(void __user *buf, int ia32_frame)
499 {
500         void __user *buf_fx = buf;
501         int size = xstate_sigframe_size();
502
503         if (ia32_frame && use_fxsr()) {
504                 buf_fx = buf + sizeof(struct i387_fsave_struct);
505                 size += sizeof(struct i387_fsave_struct);
506         }
507
508         return __restore_xstate_sig(buf, buf_fx, size);
509 }
510
511 /*
512  * Need to be preemption-safe.
513  *
514  * NOTE! user_fpu_begin() must be used only immediately before restoring
515  * it. This function does not do any save/restore on their own.
516  */
517 static inline void user_fpu_begin(void)
518 {
519         preempt_disable();
520         if (!user_has_fpu())
521                 __thread_fpu_begin(current);
522         preempt_enable();
523 }
524
525 static inline void __save_fpu(struct task_struct *tsk)
526 {
527         if (use_xsave()) {
528                 if (unlikely(system_state == SYSTEM_BOOTING))
529                         xsave_state_booting(&tsk->thread.fpu.state->xsave, -1);
530                 else
531                         xsave_state(&tsk->thread.fpu.state->xsave, -1);
532         } else
533                 fpu_fxsave(&tsk->thread.fpu);
534 }
535
536 /*
537  * i387 state interaction
538  */
539 static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
540 {
541         if (cpu_has_fxsr) {
542                 return tsk->thread.fpu.state->fxsave.cwd;
543         } else {
544                 return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
545         }
546 }
547
548 static inline unsigned short get_fpu_swd(struct task_struct *tsk)
549 {
550         if (cpu_has_fxsr) {
551                 return tsk->thread.fpu.state->fxsave.swd;
552         } else {
553                 return (unsigned short)tsk->thread.fpu.state->fsave.swd;
554         }
555 }
556
557 static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
558 {
559         if (cpu_has_xmm) {
560                 return tsk->thread.fpu.state->fxsave.mxcsr;
561         } else {
562                 return MXCSR_DEFAULT;
563         }
564 }
565
566 static bool fpu_allocated(struct fpu *fpu)
567 {
568         return fpu->state != NULL;
569 }
570
571 static inline int fpu_alloc(struct fpu *fpu)
572 {
573         if (fpu_allocated(fpu))
574                 return 0;
575         fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
576         if (!fpu->state)
577                 return -ENOMEM;
578         WARN_ON((unsigned long)fpu->state & 15);
579         return 0;
580 }
581
582 static inline void fpu_free(struct fpu *fpu)
583 {
584         if (fpu->state) {
585                 kmem_cache_free(task_xstate_cachep, fpu->state);
586                 fpu->state = NULL;
587         }
588 }
589
590 static inline void fpu_copy(struct task_struct *dst, struct task_struct *src)
591 {
592         if (use_eager_fpu()) {
593                 memset(&dst->thread.fpu.state->xsave, 0, xstate_size);
594                 __save_fpu(dst);
595         } else {
596                 struct fpu *dfpu = &dst->thread.fpu;
597                 struct fpu *sfpu = &src->thread.fpu;
598
599                 unlazy_fpu(src);
600                 memcpy(dfpu->state, sfpu->state, xstate_size);
601         }
602 }
603
604 static inline unsigned long
605 alloc_mathframe(unsigned long sp, int ia32_frame, unsigned long *buf_fx,
606                 unsigned long *size)
607 {
608         unsigned long frame_size = xstate_sigframe_size();
609
610         *buf_fx = sp = round_down(sp - frame_size, 64);
611         if (ia32_frame && use_fxsr()) {
612                 frame_size += sizeof(struct i387_fsave_struct);
613                 sp -= sizeof(struct i387_fsave_struct);
614         }
615
616         *size = frame_size;
617         return sp;
618 }
619
620 #endif