diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index eb8fa0f9d27989c5ada048747a421d9e7d28e674..6193b7a9cf00bd68fba9527c00e991d9828fcc4e 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -450,7 +450,7 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
 	return 0;
 }
 
-static inline int __copy_fpstate_to_fpregs(struct fpu *fpu)
+static inline int __copy_kernel_to_fpregs(struct fpu *fpu)
 {
 	if (use_xsave()) {
 		copy_kernel_to_xregs(&fpu->state.xsave, -1);
@@ -463,7 +463,7 @@ static inline int __copy_fpstate_to_fpregs(struct fpu *fpu)
 	}
 }
 
-static inline int copy_fpstate_to_fpregs(struct fpu *fpu)
+static inline int copy_kernel_to_fpregs(struct fpu *fpu)
 {
 	/*
 	 * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
@@ -478,7 +478,7 @@ static inline int copy_fpstate_to_fpregs(struct fpu *fpu)
 			: : [addr] "m" (fpu->fpregs_active));
 	}
 
-	return __copy_fpstate_to_fpregs(fpu);
+	return __copy_kernel_to_fpregs(fpu);
 }
 
 extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);
@@ -647,7 +647,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
 static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch)
 {
 	if (fpu_switch.preload) {
-		if (unlikely(copy_fpstate_to_fpregs(new_fpu))) {
+		if (unlikely(copy_kernel_to_fpregs(new_fpu))) {
 			WARN_ON_FPU(1);
 			fpu__clear(new_fpu);
 		}
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 874ef17017507884cd5cba4ccca83269f5776f33..e0e0ee565dc30fb533102300f5c2a8d20204e959 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -127,7 +127,7 @@ void __kernel_fpu_end(void)
 	struct fpu *fpu = &current->thread.fpu;
 
 	if (fpu->fpregs_active) {
-		if (WARN_ON_FPU(copy_fpstate_to_fpregs(fpu)))
+		if (WARN_ON_FPU(copy_kernel_to_fpregs(fpu)))
 			fpu__clear(fpu);
 	} else {
 		__fpregs_deactivate_hw();
@@ -370,7 +370,7 @@ void fpu__restore(struct fpu *fpu)
 	/* Avoid __kernel_fpu_begin() right after fpregs_activate() */
 	kernel_fpu_disable();
 	fpregs_activate(fpu);
-	if (unlikely(copy_fpstate_to_fpregs(fpu))) {
+	if (unlikely(copy_kernel_to_fpregs(fpu))) {
 		/* Copying the kernel state to FPU registers should never fail: */
 		WARN_ON_FPU(1);
 		fpu__clear(fpu);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 989cfc01e2a5e298f16c7ab2d7fe8a7186910fe7..66871f4937fa3f624f7aff2944ac8c10fdf5baf5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7030,7 +7030,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
 	kvm_put_guest_xcr0(vcpu);
 	vcpu->guest_fpu_loaded = 1;
 	__kernel_fpu_begin();
-	__copy_fpstate_to_fpregs(&vcpu->arch.guest_fpu);
+	__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu);
 	trace_kvm_fpu(1);
 }