2 * Copyright (C) 1994 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 * x86-64 work by Andi Kleen 2002
10 #ifndef _FPU_INTERNAL_H
11 #define _FPU_INTERNAL_H
13 #include <linux/kernel_stat.h>
14 #include <linux/regset.h>
15 #include <linux/compat.h>
16 #include <linux/slab.h>
18 #include <asm/cpufeature.h>
19 #include <asm/processor.h>
20 #include <asm/sigcontext.h>
22 #include <asm/uaccess.h>
23 #include <asm/xsave.h>
27 # include <asm/sigcontext32.h>
28 # include <asm/user32.h>
30 int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
31 compat_sigset_t *set, struct pt_regs *regs);
32 int ia32_setup_frame(int sig, struct ksignal *ksig,
33 compat_sigset_t *set, struct pt_regs *regs);
35 # define user_i387_ia32_struct user_i387_struct
36 # define user32_fxsr_struct user_fxsr_struct
37 # define ia32_setup_frame __setup_frame
38 # define ia32_setup_rt_frame __setup_rt_frame
41 extern unsigned int mxcsr_feature_mask;
42 extern void fpu_init(void);
43 extern void eager_fpu_init(void);
45 DECLARE_PER_CPU(struct task_struct *, fpu_owner_task);
47 extern void convert_from_fxsr(struct user_i387_ia32_struct *env,
48 struct task_struct *tsk);
49 extern void convert_to_fxsr(struct task_struct *tsk,
50 const struct user_i387_ia32_struct *env);
52 extern user_regset_active_fn fpregs_active, xfpregs_active;
53 extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
55 extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
59 * xstateregs_active == fpregs_active. Please refer to the comment
60 * at the definition of fpregs_active.
62 #define xstateregs_active fpregs_active
64 #ifdef CONFIG_MATH_EMULATION
65 extern void finit_soft_fpu(struct i387_soft_struct *soft);
67 static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
71 * Must be run with preemption disabled: this clears the fpu_owner_task,
74 * This will disable any lazy FPU state restore of the current FPU state,
75 * but if the current thread owns the FPU, it will still be saved by.
77 static inline void __cpu_disable_lazy_restore(unsigned int cpu)
79 per_cpu(fpu_owner_task, cpu) = NULL;
83 * Used to indicate that the FPU state in memory is newer than the FPU
84 * state in registers, and the FPU state should be reloaded next time the
85 * task is run. Only safe on the current task, or non-running tasks.
87 static inline void task_disable_lazy_fpu_restore(struct task_struct *tsk)
89 tsk->thread.fpu.last_cpu = ~0;
92 static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
94 return new == this_cpu_read_stable(fpu_owner_task) &&
95 cpu == new->thread.fpu.last_cpu;
98 static inline int is_ia32_compat_frame(void)
100 return config_enabled(CONFIG_IA32_EMULATION) &&
101 test_thread_flag(TIF_IA32);
104 static inline int is_ia32_frame(void)
106 return config_enabled(CONFIG_X86_32) || is_ia32_compat_frame();
109 static inline int is_x32_frame(void)
111 return config_enabled(CONFIG_X86_X32_ABI) && test_thread_flag(TIF_X32);
114 #define X87_FSW_ES (1 << 7) /* Exception Summary */
116 static __always_inline __pure bool use_eager_fpu(void)
118 return static_cpu_has_safe(X86_FEATURE_EAGER_FPU);
121 static __always_inline __pure bool use_xsaveopt(void)
123 return static_cpu_has_safe(X86_FEATURE_XSAVEOPT);
126 static __always_inline __pure bool use_xsave(void)
128 return static_cpu_has_safe(X86_FEATURE_XSAVE);
131 static __always_inline __pure bool use_fxsr(void)
133 return static_cpu_has_safe(X86_FEATURE_FXSR);
136 static inline void fx_finit(struct i387_fxsave_struct *fx)
138 memset(fx, 0, xstate_size);
140 fx->mxcsr = MXCSR_DEFAULT;
143 extern void __sanitize_i387_state(struct task_struct *);
145 static inline void sanitize_i387_state(struct task_struct *tsk)
149 __sanitize_i387_state(tsk);
152 #define user_insn(insn, output, input...) \
155 asm volatile(ASM_STAC "\n" \
157 "2: " ASM_CLAC "\n" \
158 ".section .fixup,\"ax\"\n" \
159 "3: movl $-1,%[err]\n" \
162 _ASM_EXTABLE(1b, 3b) \
163 : [err] "=r" (err), output \
168 #define check_insn(insn, output, input...) \
171 asm volatile("1:" #insn "\n\t" \
173 ".section .fixup,\"ax\"\n" \
174 "3: movl $-1,%[err]\n" \
177 _ASM_EXTABLE(1b, 3b) \
178 : [err] "=r" (err), output \
183 static inline int fsave_user(struct i387_fsave_struct __user *fx)
185 return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx));
188 static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
190 if (config_enabled(CONFIG_X86_32))
191 return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
192 else if (config_enabled(CONFIG_AS_FXSAVEQ))
193 return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
195 /* See comment in fpu_fxsave() below. */
196 return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx));
199 static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
201 if (config_enabled(CONFIG_X86_32))
202 return check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
203 else if (config_enabled(CONFIG_AS_FXSAVEQ))
204 return check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
206 /* See comment in fpu_fxsave() below. */
207 return check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
211 static inline int fxrstor_user(struct i387_fxsave_struct __user *fx)
213 if (config_enabled(CONFIG_X86_32))
214 return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
215 else if (config_enabled(CONFIG_AS_FXSAVEQ))
216 return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
218 /* See comment in fpu_fxsave() below. */
219 return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
223 static inline int frstor_checking(struct i387_fsave_struct *fx)
225 return check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
228 static inline int frstor_user(struct i387_fsave_struct __user *fx)
230 return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
233 static inline void fpu_fxsave(struct fpu *fpu)
235 if (config_enabled(CONFIG_X86_32))
236 asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave));
237 else if (config_enabled(CONFIG_AS_FXSAVEQ))
238 asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state->fxsave));
240 /* Using "rex64; fxsave %0" is broken because, if the memory
241 * operand uses any extended registers for addressing, a second
242 * REX prefix will be generated (to the assembler, rex64
243 * followed by semicolon is a separate instruction), and hence
244 * the 64-bitness is lost.
246 * Using "fxsaveq %0" would be the ideal choice, but is only
247 * supported starting with gas 2.16.
249 * Using, as a workaround, the properly prefixed form below
250 * isn't accepted by any binutils version so far released,
251 * complaining that the same type of prefix is used twice if
252 * an extended register is needed for addressing (fix submitted
253 * to mainline 2005-11-21).
255 * asm volatile("rex64/fxsave %0" : "=m" (fpu->state->fxsave));
257 * This, however, we can work around by forcing the compiler to
258 * select an addressing mode that doesn't require extended
261 asm volatile( "rex64/fxsave (%[fx])"
262 : "=m" (fpu->state->fxsave)
263 : [fx] "R" (&fpu->state->fxsave));
268 * These must be called with preempt disabled. Returns
269 * 'true' if the FPU state is still intact.
271 static inline int fpu_save_init(struct fpu *fpu)
277 * xsave header may indicate the init state of the FP.
279 if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
281 } else if (use_fxsr()) {
284 asm volatile("fnsave %[fx]; fwait"
285 : [fx] "=m" (fpu->state->fsave));
290 * If exceptions are pending, we need to clear them so
291 * that we don't randomly get exceptions later.
293 * FIXME! Is this perhaps only true for the old-style
294 * irq13 case? Maybe we could leave the x87 state
297 if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) {
298 asm volatile("fnclex");
304 static inline int __save_init_fpu(struct task_struct *tsk)
306 return fpu_save_init(&tsk->thread.fpu);
309 static inline int fpu_restore_checking(struct fpu *fpu)
312 return fpu_xrstor_checking(&fpu->state->xsave);
314 return fxrstor_checking(&fpu->state->fxsave);
316 return frstor_checking(&fpu->state->fsave);
319 static inline int restore_fpu_checking(struct task_struct *tsk)
322 * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
323 * pending. Clear the x87 state here by setting it to fixed values.
324 * "m" is a random variable that should be in L1.
326 if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) {
330 "fildl %P[addr]" /* set F?P to defined value */
331 : : [addr] "m" (tsk->thread.fpu.has_fpu));
334 return fpu_restore_checking(&tsk->thread.fpu);
338 * Software FPU state helpers. Careful: these need to
339 * be preemption protection *and* they need to be
340 * properly paired with the CR0.TS changes!
342 static inline int __thread_has_fpu(struct task_struct *tsk)
344 return tsk->thread.fpu.has_fpu;
347 /* Must be paired with an 'stts' after! */
348 static inline void __thread_clear_has_fpu(struct task_struct *tsk)
350 tsk->thread.fpu.has_fpu = 0;
351 this_cpu_write(fpu_owner_task, NULL);
354 /* Must be paired with a 'clts' before! */
355 static inline void __thread_set_has_fpu(struct task_struct *tsk)
357 tsk->thread.fpu.has_fpu = 1;
358 this_cpu_write(fpu_owner_task, tsk);
362 * Encapsulate the CR0.TS handling together with the
365 * These generally need preemption protection to work,
366 * do try to avoid using these on their own.
368 static inline void __thread_fpu_end(struct task_struct *tsk)
370 __thread_clear_has_fpu(tsk);
371 if (!use_eager_fpu())
375 static inline void __thread_fpu_begin(struct task_struct *tsk)
377 if (!use_eager_fpu())
379 __thread_set_has_fpu(tsk);
382 static inline void __drop_fpu(struct task_struct *tsk)
384 if (__thread_has_fpu(tsk)) {
385 /* Ignore delayed exceptions from user space */
386 asm volatile("1: fwait\n"
388 _ASM_EXTABLE(1b, 2b));
389 __thread_fpu_end(tsk);
393 static inline void drop_fpu(struct task_struct *tsk)
396 * Forget coprocessor state..
399 tsk->thread.fpu_counter = 0;
405 static inline void drop_init_fpu(struct task_struct *tsk)
407 if (!use_eager_fpu())
411 xrstor_state(init_xstate_buf, -1);
413 fxrstor_checking(&init_xstate_buf->i387);
418 * FPU state switching for scheduling.
420 * This is a two-stage process:
422 * - switch_fpu_prepare() saves the old state and
423 * sets the new state of the CR0.TS bit. This is
424 * done within the context of the old process.
426 * - switch_fpu_finish() restores the new state as
429 typedef struct { int preload; } fpu_switch_t;
431 static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu)
436 * If the task has used the math, pre-load the FPU on xsave processors
437 * or if the past 5 consecutive context-switches used math.
439 fpu.preload = tsk_used_math(new) &&
440 (use_eager_fpu() || new->thread.fpu_counter > 5);
442 if (__thread_has_fpu(old)) {
443 if (!__save_init_fpu(old))
444 task_disable_lazy_fpu_restore(old);
446 old->thread.fpu.last_cpu = cpu;
448 /* But leave fpu_owner_task! */
449 old->thread.fpu.has_fpu = 0;
451 /* Don't change CR0.TS if we just switch! */
453 new->thread.fpu_counter++;
454 __thread_set_has_fpu(new);
455 prefetch(new->thread.fpu.state);
456 } else if (!use_eager_fpu())
459 old->thread.fpu_counter = 0;
460 task_disable_lazy_fpu_restore(old);
462 new->thread.fpu_counter++;
463 if (fpu_lazy_restore(new, cpu))
466 prefetch(new->thread.fpu.state);
467 __thread_fpu_begin(new);
474 * By the time this gets called, we've already cleared CR0.TS and
475 * given the process the FPU if we are going to preload the FPU
476 * state - all we need to do is to conditionally restore the register
479 static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
482 if (unlikely(restore_fpu_checking(new)))
488 * Signal frame handlers...
490 extern int save_xstate_sig(void __user *buf, void __user *fx, int size);
491 extern int __restore_xstate_sig(void __user *buf, void __user *fx, int size);
493 static inline int xstate_sigframe_size(void)
495 return use_xsave() ? xstate_size + FP_XSTATE_MAGIC2_SIZE : xstate_size;
498 static inline int restore_xstate_sig(void __user *buf, int ia32_frame)
500 void __user *buf_fx = buf;
501 int size = xstate_sigframe_size();
503 if (ia32_frame && use_fxsr()) {
504 buf_fx = buf + sizeof(struct i387_fsave_struct);
505 size += sizeof(struct i387_fsave_struct);
508 return __restore_xstate_sig(buf, buf_fx, size);
512 * Need to be preemption-safe.
514 * NOTE! user_fpu_begin() must be used only immediately before restoring
515 * it. This function does not do any save/restore on their own.
517 static inline void user_fpu_begin(void)
521 __thread_fpu_begin(current);
525 static inline void __save_fpu(struct task_struct *tsk)
528 if (unlikely(system_state == SYSTEM_BOOTING))
529 xsave_state_booting(&tsk->thread.fpu.state->xsave, -1);
531 xsave_state(&tsk->thread.fpu.state->xsave, -1);
533 fpu_fxsave(&tsk->thread.fpu);
537 * i387 state interaction
539 static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
542 return tsk->thread.fpu.state->fxsave.cwd;
544 return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
548 static inline unsigned short get_fpu_swd(struct task_struct *tsk)
551 return tsk->thread.fpu.state->fxsave.swd;
553 return (unsigned short)tsk->thread.fpu.state->fsave.swd;
557 static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
560 return tsk->thread.fpu.state->fxsave.mxcsr;
562 return MXCSR_DEFAULT;
566 static bool fpu_allocated(struct fpu *fpu)
568 return fpu->state != NULL;
571 static inline int fpu_alloc(struct fpu *fpu)
573 if (fpu_allocated(fpu))
575 fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
578 WARN_ON((unsigned long)fpu->state & 15);
582 static inline void fpu_free(struct fpu *fpu)
585 kmem_cache_free(task_xstate_cachep, fpu->state);
590 static inline void fpu_copy(struct task_struct *dst, struct task_struct *src)
592 if (use_eager_fpu()) {
593 memset(&dst->thread.fpu.state->xsave, 0, xstate_size);
596 struct fpu *dfpu = &dst->thread.fpu;
597 struct fpu *sfpu = &src->thread.fpu;
600 memcpy(dfpu->state, sfpu->state, xstate_size);
604 static inline unsigned long
605 alloc_mathframe(unsigned long sp, int ia32_frame, unsigned long *buf_fx,
608 unsigned long frame_size = xstate_sigframe_size();
610 *buf_fx = sp = round_down(sp - frame_size, 64);
611 if (ia32_frame && use_fxsr()) {
612 frame_size += sizeof(struct i387_fsave_struct);
613 sp -= sizeof(struct i387_fsave_struct);