Migrate this function to pure 'struct fpu' usage.
Reviewed-by: Borislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
stts();
}
-static inline void __thread_fpu_begin(struct task_struct *tsk)
+static inline void __thread_fpu_begin(struct fpu *fpu)
{
if (!use_eager_fpu())
clts();
- __thread_set_has_fpu(&tsk->thread.fpu);
+ __thread_set_has_fpu(fpu);
}
static inline void drop_fpu(struct task_struct *tsk)
fpu.preload = 0;
else
prefetch(new->thread.fpu.state);
- __thread_fpu_begin(new);
+ __thread_fpu_begin(new_fpu);
}
}
return fpu;
*/
static inline void user_fpu_begin(void)
{
+ struct fpu *fpu = ¤t->thread.fpu;
+
preempt_disable();
if (!user_has_fpu())
- __thread_fpu_begin(current);
+ __thread_fpu_begin(fpu);
preempt_enable();
}
void fpu__restore(void)
{
struct task_struct *tsk = current;
+ struct fpu *fpu = &tsk->thread.fpu;
if (!tsk_used_math(tsk)) {
local_irq_enable();
/* Avoid __kernel_fpu_begin() right after __thread_fpu_begin() */
kernel_fpu_disable();
- __thread_fpu_begin(tsk);
+ __thread_fpu_begin(fpu);
if (unlikely(restore_fpu_checking(tsk))) {
fpu_reset_state(tsk);
force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);