return 0;
}
-static inline int __copy_fpstate_to_fpregs(struct fpu *fpu)
+static inline int __copy_kernel_to_fpregs(struct fpu *fpu)
{
if (use_xsave()) {
copy_kernel_to_xregs(&fpu->state.xsave, -1);
}
}
-static inline int copy_fpstate_to_fpregs(struct fpu *fpu)
+static inline int copy_kernel_to_fpregs(struct fpu *fpu)
{
/*
* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
: : [addr] "m" (fpu->fpregs_active));
}
- return __copy_fpstate_to_fpregs(fpu);
+ return __copy_kernel_to_fpregs(fpu);
}
extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);
static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch)
{
if (fpu_switch.preload) {
- if (unlikely(copy_fpstate_to_fpregs(new_fpu))) {
+ if (unlikely(copy_kernel_to_fpregs(new_fpu))) {
WARN_ON_FPU(1);
fpu__clear(new_fpu);
}
struct fpu *fpu = ¤t->thread.fpu;
if (fpu->fpregs_active) {
- if (WARN_ON_FPU(copy_fpstate_to_fpregs(fpu)))
+ if (WARN_ON_FPU(copy_kernel_to_fpregs(fpu)))
fpu__clear(fpu);
} else {
__fpregs_deactivate_hw();
/* Avoid __kernel_fpu_begin() right after fpregs_activate() */
kernel_fpu_disable();
fpregs_activate(fpu);
- if (unlikely(copy_fpstate_to_fpregs(fpu))) {
+ if (unlikely(copy_kernel_to_fpregs(fpu))) {
/* Copying the kernel state to FPU registers should never fail: */
WARN_ON_FPU(1);
fpu__clear(fpu);
kvm_put_guest_xcr0(vcpu);
vcpu->guest_fpu_loaded = 1;
__kernel_fpu_begin();
- __copy_fpstate_to_fpregs(&vcpu->arch.guest_fpu);
+ __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu);
trace_kvm_fpu(1);
}