return 0;
}
-static inline void __copy_kernel_to_fpregs(struct fpu *fpu)
+static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate)
{
if (use_xsave()) {
- copy_kernel_to_xregs(&fpu->state.xsave, -1);
+ copy_kernel_to_xregs(&fpstate->xsave, -1);
} else {
if (use_fxsr())
- copy_kernel_to_fxregs(&fpu->state.fxsave);
+ copy_kernel_to_fxregs(&fpstate->fxsave);
else
- copy_kernel_to_fregs(&fpu->state.fsave);
+ copy_kernel_to_fregs(&fpstate->fsave);
}
}
-static inline void copy_kernel_to_fpregs(struct fpu *fpu)
+static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
{
/*
* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
"fnclex\n\t"
"emms\n\t"
"fildl %P[addr]" /* set F?P to defined value */
- : : [addr] "m" (fpu->fpregs_active));
+ : : [addr] "m" (fpstate));
}
- __copy_kernel_to_fpregs(fpu);
+ __copy_kernel_to_fpregs(fpstate);
}
extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);
static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch)
{
if (fpu_switch.preload)
- copy_kernel_to_fpregs(new_fpu);
+ copy_kernel_to_fpregs(&new_fpu->state);
}
/*
struct fpu *fpu = ¤t->thread.fpu;
if (fpu->fpregs_active)
- copy_kernel_to_fpregs(fpu);
+ copy_kernel_to_fpregs(&fpu->state);
else
__fpregs_deactivate_hw();
/* Avoid __kernel_fpu_begin() right after fpregs_activate() */
kernel_fpu_disable();
fpregs_activate(fpu);
- copy_kernel_to_fpregs(fpu);
+ copy_kernel_to_fpregs(&fpu->state);
fpu->counter++;
kernel_fpu_enable();
}
kvm_put_guest_xcr0(vcpu);
vcpu->guest_fpu_loaded = 1;
__kernel_fpu_begin();
- __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu);
+ __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
trace_kvm_fpu(1);
}