From c4d72e2db3a36bf560b506df8a3490f140aeae26 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 27 Apr 2015 07:18:17 +0200 Subject: x86/fpu: Simplify fpstate_init_curr() usage Now that fpstate_init_curr() is not doing implicit allocations anymore, almost all uses of it involve a very simple pattern: if (!fpu->fpstate_active) fpstate_init_curr(fpu); which is basically activating the FPU fpstate if it was not active before. So propagate the check into the function itself, and rename the function according to its new purpose: fpu__activate_curr(fpu); Reviewed-by: Borislav Petkov Cc: Andy Lutomirski Cc: Dave Hansen Cc: Fenghua Yu Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Oleg Nesterov Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 1345ab3..de19fc5 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -44,7 +44,7 @@ extern void fpu__init_system_xstate(void); extern void fpu__init_cpu_xstate(void); extern void fpu__init_system(struct cpuinfo_x86 *c); -extern void fpstate_init_curr(struct fpu *fpu); +extern void fpu__activate_curr(struct fpu *fpu); extern void fpstate_init(struct fpu *fpu); extern void fpu__clear(struct task_struct *tsk); diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 97b4f9e..74cc322 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -259,19 +259,21 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu) } /* - * Initialize the current task's in-memory FPU context: + * Activate the current task's in-memory FPU context, + * if it has not been used before: */ -void fpstate_init_curr(struct fpu *fpu) +void fpu__activate_curr(struct fpu *fpu) { WARN_ON_ONCE(fpu != ¤t->thread.fpu); - WARN_ON_ONCE(fpu->fpstate_active); - fpstate_init(fpu); + if (!fpu->fpstate_active) { + fpstate_init(fpu); - /* Safe to do for the current task: */ - fpu->fpstate_active = 1; + /* Safe to do for the current task: */ + fpu->fpstate_active = 1; + } } -EXPORT_SYMBOL_GPL(fpstate_init_curr); +EXPORT_SYMBOL_GPL(fpu__activate_curr); /* * This function is called before we modify a stopped child's @@ -325,8 +327,7 @@ void fpu__restore(void) struct task_struct *tsk = current; struct fpu *fpu = &tsk->thread.fpu; - if (!fpu->fpstate_active) - fpstate_init_curr(fpu); + fpu__activate_curr(fpu); /* Avoid __kernel_fpu_begin() right after fpregs_activate() */ kernel_fpu_disable(); @@ -352,7 +353,7 @@ void fpu__clear(struct task_struct *tsk) drop_fpu(fpu); } else { if (!fpu->fpstate_active) { - fpstate_init_curr(fpu); + fpu__activate_curr(fpu); user_fpu_begin(); } restore_init_xstate(); diff --git a/arch/x86/kernel/fpu/xsave.c b/arch/x86/kernel/fpu/xsave.c index 49d9f3d..f549e2a 100644 --- a/arch/x86/kernel/fpu/xsave.c +++ b/arch/x86/kernel/fpu/xsave.c @@ -358,8 +358,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) if (!access_ok(VERIFY_READ, buf, size)) return -EACCES; - if (!fpu->fpstate_active) - fpstate_init_curr(fpu); + fpu__activate_curr(fpu); if (!static_cpu_has(X86_FEATURE_FPU)) return fpregs_soft_set(current, NULL, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 92a8490..9ff4df7 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6601,8 +6601,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) int r; sigset_t sigsaved; - if (!fpu->fpstate_active) - fpstate_init_curr(fpu); + fpu__activate_curr(fpu); if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c index 4c6ab79..5b85051 100644 --- a/arch/x86/math-emu/fpu_entry.c +++ b/arch/x86/math-emu/fpu_entry.c @@ -149,8 +149,7 @@ void math_emulate(struct math_emu_info *info) struct desc_struct code_descriptor; struct fpu *fpu = ¤t->thread.fpu; - if (!fpu->fpstate_active) - fpstate_init_curr(fpu); + fpu__activate_curr(fpu); #ifdef RE_ENTRANT_CHECKING if (emulating) { -- cgit v0.10.2