static inline void fpu_fxsave(struct fpu *fpu)
{
if (config_enabled(CONFIG_X86_32))
- asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave));
+ asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave));
else if (config_enabled(CONFIG_AS_FXSAVEQ))
- asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state->fxsave));
+ asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave));
else {
/* Using "rex64; fxsave %0" is broken because, if the memory
* operand uses any extended registers for addressing, a second
* an extended register is needed for addressing (fix submitted
* to mainline 2005-11-21).
*
- * asm volatile("rex64/fxsave %0" : "=m" (fpu->state->fxsave));
+ * asm volatile("rex64/fxsave %0" : "=m" (fpu->state.fxsave));
*
* This, however, we can work around by forcing the compiler to
* select an addressing mode that doesn't require extended
* registers.
*/
asm volatile( "rex64/fxsave (%[fx])"
- : "=m" (fpu->state->fxsave)
- : [fx] "R" (&fpu->state->fxsave));
+ : "=m" (fpu->state.fxsave)
+ : [fx] "R" (&fpu->state.fxsave));
}
}
static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
{
if (likely(use_xsave())) {
- xsave_state(&fpu->state->xsave);
+ xsave_state(&fpu->state.xsave);
return 1;
}
* Legacy FPU register saving, FNSAVE always clears FPU registers,
* so we have to mark them inactive:
*/
- asm volatile("fnsave %[fx]; fwait" : [fx] "=m" (fpu->state->fsave));
+ asm volatile("fnsave %[fx]; fwait" : [fx] "=m" (fpu->state.fsave));
return 0;
}
static inline int fpu_restore_checking(struct fpu *fpu)
{
if (use_xsave())
- return fpu_xrstor_checking(&fpu->state->xsave);
+ return fpu_xrstor_checking(&fpu->state.xsave);
else if (use_fxsr())
- return fxrstor_checking(&fpu->state->fxsave);
+ return fxrstor_checking(&fpu->state.fxsave);
else
- return frstor_checking(&fpu->state->fsave);
+ return frstor_checking(&fpu->state.fsave);
}
static inline int restore_fpu_checking(struct fpu *fpu)
if (fpu.preload) {
new_fpu->counter++;
__fpregs_activate(new_fpu);
- prefetch(new_fpu->state);
+ prefetch(&new_fpu->state);
} else if (!use_eager_fpu())
stts();
} else {
if (fpu_want_lazy_restore(new_fpu, cpu))
fpu.preload = 0;
else
- prefetch(new_fpu->state);
+ prefetch(&new_fpu->state);
fpregs_activate(new_fpu);
}
}
static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
{
if (cpu_has_fxsr) {
- return tsk->thread.fpu.state->fxsave.cwd;
+ return tsk->thread.fpu.state.fxsave.cwd;
} else {
- return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
+ return (unsigned short)tsk->thread.fpu.state.fsave.cwd;
}
}
static inline unsigned short get_fpu_swd(struct task_struct *tsk)
{
if (cpu_has_fxsr) {
- return tsk->thread.fpu.state->fxsave.swd;
+ return tsk->thread.fpu.state.fxsave.swd;
} else {
- return (unsigned short)tsk->thread.fpu.state->fsave.swd;
+ return (unsigned short)tsk->thread.fpu.state.fsave.swd;
}
}
static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
{
if (cpu_has_xmm) {
- return tsk->thread.fpu.state->fxsave.mxcsr;
+ return tsk->thread.fpu.state.fxsave.mxcsr;
} else {
return MXCSR_DEFAULT;
}
{
if (use_xsave()) {
if (unlikely(system_state == SYSTEM_BOOTING))
- xsave_state_booting(&fpu->state->xsave);
+ xsave_state_booting(&fpu->state.xsave);
else
- xsave_state(&fpu->state->xsave);
+ xsave_state(&fpu->state.xsave);
} else {
fpu_fxsave(fpu);
}
void fpstate_init(struct fpu *fpu)
{
if (!cpu_has_fpu) {
- finit_soft_fpu(&fpu->state->soft);
+ finit_soft_fpu(&fpu->state.soft);
return;
}
- memset(fpu->state, 0, xstate_size);
+ memset(&fpu->state, 0, xstate_size);
if (cpu_has_fxsr) {
- fx_finit(&fpu->state->fxsave);
+ fx_finit(&fpu->state.fxsave);
} else {
- struct i387_fsave_struct *fp = &fpu->state->fsave;
+ struct i387_fsave_struct *fp = &fpu->state.fsave;
fp->cwd = 0xffff037fu;
fp->swd = 0xffff0000u;
fp->twd = 0xffffffffu;
int fpstate_alloc(struct fpu *fpu)
{
- if (fpu->state)
- return 0;
-
- fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
- if (!fpu->state)
- return -ENOMEM;
-
/* The CPU requires the FPU state to be aligned to 16 byte boundaries: */
- WARN_ON((unsigned long)fpu->state & 15);
+ WARN_ON((unsigned long)&fpu->state & 15);
return 0;
}
void fpstate_free(struct fpu *fpu)
{
- if (fpu->state) {
- kmem_cache_free(task_xstate_cachep, fpu->state);
- fpu->state = NULL;
- }
}
EXPORT_SYMBOL_GPL(fpstate_free);
WARN_ON(src_fpu != ¤t->thread.fpu);
if (use_eager_fpu()) {
- memset(&dst_fpu->state->xsave, 0, xstate_size);
+ memset(&dst_fpu->state.xsave, 0, xstate_size);
__save_fpu(dst_fpu);
} else {
fpu__save(src_fpu);
- memcpy(dst_fpu->state, src_fpu->state, xstate_size);
+ memcpy(&dst_fpu->state, &src_fpu->state, xstate_size);
}
}
{
dst_fpu->counter = 0;
dst_fpu->fpregs_active = 0;
- dst_fpu->state = NULL;
dst_fpu->last_cpu = -1;
if (src_fpu->fpstate_active) {
sanitize_i387_state(target);
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &fpu->state->fxsave, 0, -1);
+ &fpu->state.fxsave, 0, -1);
}
int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
sanitize_i387_state(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &fpu->state->fxsave, 0, -1);
+ &fpu->state.fxsave, 0, -1);
/*
* mxcsr reserved bits must be masked to zero for security reasons.
*/
- fpu->state->fxsave.mxcsr &= mxcsr_feature_mask;
+ fpu->state.fxsave.mxcsr &= mxcsr_feature_mask;
/*
* update the header bits in the xsave header, indicating the
* presence of FP and SSE state.
*/
if (cpu_has_xsave)
- fpu->state->xsave.header.xfeatures |= XSTATE_FPSSE;
+ fpu->state.xsave.header.xfeatures |= XSTATE_FPSSE;
return ret;
}
if (ret)
return ret;
- xsave = &fpu->state->xsave;
+ xsave = &fpu->state.xsave;
/*
* Copy the 48bytes defined by the software first into the xstate
if (ret)
return ret;
- xsave = &fpu->state->xsave;
+ xsave = &fpu->state.xsave;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
/*
void
convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
{
- struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
+ struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state.fxsave;
struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
int i;
const struct user_i387_ia32_struct *env)
{
- struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
+ struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state.fxsave;
struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
int i;
if (!cpu_has_fxsr)
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &fpu->state->fsave, 0,
+ &fpu->state.fsave, 0,
-1);
sanitize_i387_state(target);
if (!cpu_has_fxsr)
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &fpu->state->fsave, 0,
+ &fpu->state.fsave, 0,
-1);
if (pos > 0 || count < sizeof(env))
* presence of FP.
*/
if (cpu_has_xsave)
- fpu->state->xsave.header.xfeatures |= XSTATE_FP;
+ fpu->state.xsave.header.xfeatures |= XSTATE_FP;
return ret;
}