diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 10663b02ee22f2c539d191b6a30b6b5288b1ff1a..4ce830fb3f313ed2fdf525a238f83b0dd868fcfb 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -232,9 +232,9 @@ static inline int frstor_user(struct i387_fsave_struct __user *fx)
 static inline void fpu_fxsave(struct fpu *fpu)
 {
 	if (config_enabled(CONFIG_X86_32))
-		asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave));
+		asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave));
 	else if (config_enabled(CONFIG_AS_FXSAVEQ))
-		asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state->fxsave));
+		asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave));
 	else {
 		/* Using "rex64; fxsave %0" is broken because, if the memory
 		 * operand uses any extended registers for addressing, a second
@@ -251,15 +251,15 @@ static inline void fpu_fxsave(struct fpu *fpu)
 		 * an extended register is needed for addressing (fix submitted
 		 * to mainline 2005-11-21).
 		 *
-		 *  asm volatile("rex64/fxsave %0" : "=m" (fpu->state->fxsave));
+		 *  asm volatile("rex64/fxsave %0" : "=m" (fpu->state.fxsave));
 		 *
 		 * This, however, we can work around by forcing the compiler to
 		 * select an addressing mode that doesn't require extended
 		 * registers.
 		 */
 		asm volatile( "rex64/fxsave (%[fx])"
-			     : "=m" (fpu->state->fxsave)
-			     : [fx] "R" (&fpu->state->fxsave));
+			     : "=m" (fpu->state.fxsave)
+			     : [fx] "R" (&fpu->state.fxsave));
 	}
 }
 
@@ -276,7 +276,7 @@ static inline void fpu_fxsave(struct fpu *fpu)
 static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
 {
 	if (likely(use_xsave())) {
-		xsave_state(&fpu->state->xsave);
+		xsave_state(&fpu->state.xsave);
 		return 1;
 	}
 
@@ -289,7 +289,7 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
 	 * Legacy FPU register saving, FNSAVE always clears FPU registers,
 	 * so we have to mark them inactive:
 	 */
-	asm volatile("fnsave %[fx]; fwait" : [fx] "=m" (fpu->state->fsave));
+	asm volatile("fnsave %[fx]; fwait" : [fx] "=m" (fpu->state.fsave));
 
 	return 0;
 }
@@ -299,11 +299,11 @@ extern void fpu__save(struct fpu *fpu);
 static inline int fpu_restore_checking(struct fpu *fpu)
 {
 	if (use_xsave())
-		return fpu_xrstor_checking(&fpu->state->xsave);
+		return fpu_xrstor_checking(&fpu->state.xsave);
 	else if (use_fxsr())
-		return fxrstor_checking(&fpu->state->fxsave);
+		return fxrstor_checking(&fpu->state.fxsave);
 	else
-		return frstor_checking(&fpu->state->fsave);
+		return frstor_checking(&fpu->state.fsave);
 }
 
 static inline int restore_fpu_checking(struct fpu *fpu)
@@ -454,7 +454,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
 		if (fpu.preload) {
 			new_fpu->counter++;
 			__fpregs_activate(new_fpu);
-			prefetch(new_fpu->state);
+			prefetch(&new_fpu->state);
 		} else if (!use_eager_fpu())
 			stts();
 	} else {
@@ -465,7 +465,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
 			if (fpu_want_lazy_restore(new_fpu, cpu))
 				fpu.preload = 0;
 			else
-				prefetch(new_fpu->state);
+				prefetch(&new_fpu->state);
 			fpregs_activate(new_fpu);
 		}
 	}
@@ -534,25 +534,25 @@ static inline void user_fpu_begin(void)
 static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
 {
 	if (cpu_has_fxsr) {
-		return tsk->thread.fpu.state->fxsave.cwd;
+		return tsk->thread.fpu.state.fxsave.cwd;
 	} else {
-		return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
+		return (unsigned short)tsk->thread.fpu.state.fsave.cwd;
 	}
 }
 
 static inline unsigned short get_fpu_swd(struct task_struct *tsk)
 {
 	if (cpu_has_fxsr) {
-		return tsk->thread.fpu.state->fxsave.swd;
+		return tsk->thread.fpu.state.fxsave.swd;
 	} else {
-		return (unsigned short)tsk->thread.fpu.state->fsave.swd;
+		return (unsigned short)tsk->thread.fpu.state.fsave.swd;
 	}
 }
 
 static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
 {
 	if (cpu_has_xmm) {
-		return tsk->thread.fpu.state->fxsave.mxcsr;
+		return tsk->thread.fpu.state.fxsave.mxcsr;
 	} else {
 		return MXCSR_DEFAULT;
 	}
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
index 231a8f53b2f89e873f10f423dfee3752d1869e1f..3a15ac6032eb37f03ec662a873b1f4111f9b73ec 100644
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -143,7 +143,7 @@ struct fpu {
 	unsigned int			last_cpu;
 
 	unsigned int			fpregs_active;
-	union thread_xstate		*state;
+	union thread_xstate		state;
 	/*
 	 * This counter contains the number of consecutive context switches
 	 * during which the FPU stays used. If this is over a threshold, the
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index ca88608a62a59a8c0f0f05dca74b20399e2af7ad..1697a9a34ff0d1eaaea3d444d0e0f577bcb42c3a 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -174,9 +174,9 @@ static void __save_fpu(struct fpu *fpu)
 {
 	if (use_xsave()) {
 		if (unlikely(system_state == SYSTEM_BOOTING))
-			xsave_state_booting(&fpu->state->xsave);
+			xsave_state_booting(&fpu->state.xsave);
 		else
-			xsave_state(&fpu->state->xsave);
+			xsave_state(&fpu->state.xsave);
 	} else {
 		fpu_fxsave(fpu);
 	}
@@ -207,16 +207,16 @@ EXPORT_SYMBOL_GPL(fpu__save);
 void fpstate_init(struct fpu *fpu)
 {
 	if (!cpu_has_fpu) {
-		finit_soft_fpu(&fpu->state->soft);
+		finit_soft_fpu(&fpu->state.soft);
 		return;
 	}
 
-	memset(fpu->state, 0, xstate_size);
+	memset(&fpu->state, 0, xstate_size);
 
 	if (cpu_has_fxsr) {
-		fx_finit(&fpu->state->fxsave);
+		fx_finit(&fpu->state.fxsave);
 	} else {
-		struct i387_fsave_struct *fp = &fpu->state->fsave;
+		struct i387_fsave_struct *fp = &fpu->state.fsave;
 		fp->cwd = 0xffff037fu;
 		fp->swd = 0xffff0000u;
 		fp->twd = 0xffffffffu;
@@ -241,15 +241,8 @@ void fpstate_cache_init(void)
 
 int fpstate_alloc(struct fpu *fpu)
 {
-	if (fpu->state)
-		return 0;
-
-	fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
-	if (!fpu->state)
-		return -ENOMEM;
-
 	/* The CPU requires the FPU state to be aligned to 16 byte boundaries: */
-	WARN_ON((unsigned long)fpu->state & 15);
+	WARN_ON((unsigned long)&fpu->state & 15);
 
 	return 0;
 }
@@ -257,10 +250,6 @@ EXPORT_SYMBOL_GPL(fpstate_alloc);
 
 void fpstate_free(struct fpu *fpu)
 {
-	if (fpu->state) {
-		kmem_cache_free(task_xstate_cachep, fpu->state);
-		fpu->state = NULL;
-	}
 }
 EXPORT_SYMBOL_GPL(fpstate_free);
 
@@ -277,11 +266,11 @@ static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
 	WARN_ON(src_fpu != &current->thread.fpu);
 
 	if (use_eager_fpu()) {
-		memset(&dst_fpu->state->xsave, 0, xstate_size);
+		memset(&dst_fpu->state.xsave, 0, xstate_size);
 		__save_fpu(dst_fpu);
 	} else {
 		fpu__save(src_fpu);
-		memcpy(dst_fpu->state, src_fpu->state, xstate_size);
+		memcpy(&dst_fpu->state, &src_fpu->state, xstate_size);
 	}
 }
 
@@ -289,7 +278,6 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
 {
 	dst_fpu->counter = 0;
 	dst_fpu->fpregs_active = 0;
-	dst_fpu->state = NULL;
 	dst_fpu->last_cpu = -1;
 
 	if (src_fpu->fpstate_active) {
@@ -483,7 +471,7 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
 	sanitize_i387_state(target);
 
 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
-				   &fpu->state->fxsave, 0, -1);
+				   &fpu->state.fxsave, 0, -1);
 }
 
 int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
@@ -503,19 +491,19 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
 	sanitize_i387_state(target);
 
 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-				 &fpu->state->fxsave, 0, -1);
+				 &fpu->state.fxsave, 0, -1);
 
 	/*
 	 * mxcsr reserved bits must be masked to zero for security reasons.
 	 */
-	fpu->state->fxsave.mxcsr &= mxcsr_feature_mask;
+	fpu->state.fxsave.mxcsr &= mxcsr_feature_mask;
 
 	/*
 	 * update the header bits in the xsave header, indicating the
 	 * presence of FP and SSE state.
 	 */
 	if (cpu_has_xsave)
-		fpu->state->xsave.header.xfeatures |= XSTATE_FPSSE;
+		fpu->state.xsave.header.xfeatures |= XSTATE_FPSSE;
 
 	return ret;
 }
@@ -535,7 +523,7 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
 	if (ret)
 		return ret;
 
-	xsave = &fpu->state->xsave;
+	xsave = &fpu->state.xsave;
 
 	/*
 	 * Copy the 48bytes defined by the software first into the xstate
@@ -566,7 +554,7 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
 	if (ret)
 		return ret;
 
-	xsave = &fpu->state->xsave;
+	xsave = &fpu->state.xsave;
 
 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
 	/*
@@ -657,7 +645,7 @@ static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
 void
 convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
 {
-	struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
+	struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state.fxsave;
 	struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
 	struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
 	int i;
@@ -695,7 +683,7 @@ void convert_to_fxsr(struct task_struct *tsk,
 		     const struct user_i387_ia32_struct *env)
 
 {
-	struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
+	struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state.fxsave;
 	struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
 	struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
 	int i;
@@ -736,7 +724,7 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
 
 	if (!cpu_has_fxsr)
 		return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
-					   &fpu->state->fsave, 0,
+					   &fpu->state.fsave, 0,
 					   -1);
 
 	sanitize_i387_state(target);
@@ -770,7 +758,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
 
 	if (!cpu_has_fxsr)
 		return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-					  &fpu->state->fsave, 0,
+					  &fpu->state.fsave, 0,
 					  -1);
 
 	if (pos > 0 || count < sizeof(env))
@@ -785,7 +773,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
 	 * presence of FP.
 	 */
 	if (cpu_has_xsave)
-		fpu->state->xsave.header.xfeatures |= XSTATE_FP;
+		fpu->state.xsave.header.xfeatures |= XSTATE_FP;
 	return ret;
 }
 
diff --git a/arch/x86/kernel/fpu/xsave.c b/arch/x86/kernel/fpu/xsave.c
index a23236358fb03092be6bc1eb6220ddcc9fb8c51a..c7d48eb0a19480207136acddf1212ccdb70ea101 100644
--- a/arch/x86/kernel/fpu/xsave.c
+++ b/arch/x86/kernel/fpu/xsave.c
@@ -44,14 +44,14 @@ static unsigned int xfeatures_nr;
  */
 void __sanitize_i387_state(struct task_struct *tsk)
 {
-	struct i387_fxsave_struct *fx = &tsk->thread.fpu.state->fxsave;
+	struct i387_fxsave_struct *fx = &tsk->thread.fpu.state.fxsave;
 	int feature_bit;
 	u64 xfeatures;
 
 	if (!fx)
 		return;
 
-	xfeatures = tsk->thread.fpu.state->xsave.header.xfeatures;
+	xfeatures = tsk->thread.fpu.state.xsave.header.xfeatures;
 
 	/*
 	 * None of the feature bits are in init state. So nothing else
@@ -147,7 +147,7 @@ static inline int check_for_xstate(struct i387_fxsave_struct __user *buf,
 static inline int save_fsave_header(struct task_struct *tsk, void __user *buf)
 {
 	if (use_fxsr()) {
-		struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave;
+		struct xsave_struct *xsave = &tsk->thread.fpu.state.xsave;
 		struct user_i387_ia32_struct env;
 		struct _fpstate_ia32 __user *fp = buf;
 
@@ -245,7 +245,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
  */
 int save_xstate_sig(void __user *buf, void __user *buf_fx, int size)
 {
-	struct xsave_struct *xsave = &current->thread.fpu.state->xsave;
+	struct xsave_struct *xsave = &current->thread.fpu.state.xsave;
 	struct task_struct *tsk = current;
 	int ia32_fxstate = (buf != buf_fx);
 
@@ -288,7 +288,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
 			 struct user_i387_ia32_struct *ia32_env,
 			 u64 xfeatures, int fx_only)
 {
-	struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave;
+	struct xsave_struct *xsave = &tsk->thread.fpu.state.xsave;
 	struct xstate_header *header = &xsave->header;
 
 	if (use_xsave()) {
@@ -402,7 +402,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
 		 */
 		drop_fpu(fpu);
 
-		if (__copy_from_user(&fpu->state->xsave, buf_fx, state_size) ||
+		if (__copy_from_user(&fpu->state.xsave, buf_fx, state_size) ||
 		    __copy_from_user(&env, buf, sizeof(env))) {
 			fpstate_init(fpu);
 			err = -1;
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index f028f1da348025c0e9128535a467afc87a72fffe..48dfcd9ed3517608433ca549b4d7bfcba808fd67 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -396,7 +396,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
 	 * do an xsave and then pull it out of the xsave buffer.
 	 */
 	copy_fpregs_to_fpstate(&tsk->thread.fpu);
-	xsave_buf = &(tsk->thread.fpu.state->xsave);
+	xsave_buf = &(tsk->thread.fpu.state.xsave);
 	bndcsr = get_xsave_addr(xsave_buf, XSTATE_BNDCSR);
 	if (!bndcsr)
 		goto exit_trap;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index d90bf4afa2b06a30049f11c92750c14b1afa5851..8bb0de5bf9c0579ae6eb6e129b244184e9c86938 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3196,7 +3196,7 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
 
 static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
 {
-	struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave;
+	struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state.xsave;
 	u64 xstate_bv = xsave->header.xfeatures;
 	u64 valid;
 
@@ -3232,7 +3232,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
 
 static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
 {
-	struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave;
+	struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state.xsave;
 	u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET);
 	u64 valid;
 
@@ -3277,7 +3277,7 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
 		fill_xsave((u8 *) guest_xsave->region, vcpu);
 	} else {
 		memcpy(guest_xsave->region,
-			&vcpu->arch.guest_fpu.state->fxsave,
+			&vcpu->arch.guest_fpu.state.fxsave,
 			sizeof(struct i387_fxsave_struct));
 		*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
 			XSTATE_FPSSE;
@@ -3302,7 +3302,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
 	} else {
 		if (xstate_bv & ~XSTATE_FPSSE)
 			return -EINVAL;
-		memcpy(&vcpu->arch.guest_fpu.state->fxsave,
+		memcpy(&vcpu->arch.guest_fpu.state.fxsave,
 			guest_xsave->region, sizeof(struct i387_fxsave_struct));
 	}
 	return 0;
@@ -6973,7 +6973,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 {
 	struct i387_fxsave_struct *fxsave =
-			&vcpu->arch.guest_fpu.state->fxsave;
+			&vcpu->arch.guest_fpu.state.fxsave;
 
 	memcpy(fpu->fpr, fxsave->st_space, 128);
 	fpu->fcw = fxsave->cwd;
@@ -6990,7 +6990,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 {
 	struct i387_fxsave_struct *fxsave =
-			&vcpu->arch.guest_fpu.state->fxsave;
+			&vcpu->arch.guest_fpu.state.fxsave;
 
 	memcpy(fxsave->st_space, fpu->fpr, 128);
 	fxsave->cwd = fpu->fcw;
@@ -7014,7 +7014,7 @@ int fx_init(struct kvm_vcpu *vcpu)
 
 	fpstate_init(&vcpu->arch.guest_fpu);
 	if (cpu_has_xsaves)
-		vcpu->arch.guest_fpu.state->xsave.header.xcomp_bv =
+		vcpu->arch.guest_fpu.state.xsave.header.xcomp_bv =
 			host_xcr0 | XSTATE_COMPACTION_ENABLED;
 
 	/*
diff --git a/arch/x86/math-emu/fpu_aux.c b/arch/x86/math-emu/fpu_aux.c
index dc8adad10a2f3881cdbca82b9a624dc17b88c4f8..7562341ce2997138a892d23c020e2c61c593c5c7 100644
--- a/arch/x86/math-emu/fpu_aux.c
+++ b/arch/x86/math-emu/fpu_aux.c
@@ -52,7 +52,7 @@ void finit_soft_fpu(struct i387_soft_struct *soft)
 
 void finit(void)
 {
-	finit_soft_fpu(&current->thread.fpu.state->soft);
+	finit_soft_fpu(&current->thread.fpu.state.soft);
 }
 
 /*
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
index cf843855e4f6c73b2df0f2c91a2930fe26918bda..5e003704ebfaa0985684ed83befb816abc258f99 100644
--- a/arch/x86/math-emu/fpu_entry.c
+++ b/arch/x86/math-emu/fpu_entry.c
@@ -683,7 +683,7 @@ int fpregs_soft_set(struct task_struct *target,
 		    unsigned int pos, unsigned int count,
 		    const void *kbuf, const void __user *ubuf)
 {
-	struct i387_soft_struct *s387 = &target->thread.fpu.state->soft;
+	struct i387_soft_struct *s387 = &target->thread.fpu.state.soft;
 	void *space = s387->st_space;
 	int ret;
 	int offset, other, i, tags, regnr, tag, newtop;
@@ -735,7 +735,7 @@ int fpregs_soft_get(struct task_struct *target,
 		    unsigned int pos, unsigned int count,
 		    void *kbuf, void __user *ubuf)
 {
-	struct i387_soft_struct *s387 = &target->thread.fpu.state->soft;
+	struct i387_soft_struct *s387 = &target->thread.fpu.state.soft;
 	const void *space = s387->st_space;
 	int ret;
 	int offset = (S387->ftop & 7) * 10, other = 80 - offset;
diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h
index 2c614410a5f3978d646f87d2814edaf2ec383396..9ccecb61a4fa129a82028b27edc18b91a2f99042 100644
--- a/arch/x86/math-emu/fpu_system.h
+++ b/arch/x86/math-emu/fpu_system.h
@@ -31,7 +31,7 @@
 #define SEG_EXPAND_DOWN(s)	(((s).b & ((1 << 11) | (1 << 10))) \
 				 == (1 << 10))
 
-#define I387			(current->thread.fpu.state)
+#define I387			(&current->thread.fpu.state)
 #define FPU_info		(I387->soft.info)
 
 #define FPU_CS			(*(unsigned short *) &(FPU_info->regs->cs))
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index 3287215be60a5d59951a6d2f3b4ed6b7acedfb7d..ea5b367b63a928121223741fb7cba2a46e8b16b1 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -358,7 +358,7 @@ static __user void *task_get_bounds_dir(struct task_struct *tsk)
 	 * only accessible if we first do an xsave.
 	 */
 	copy_fpregs_to_fpstate(&tsk->thread.fpu);
-	bndcsr = get_xsave_addr(&tsk->thread.fpu.state->xsave, XSTATE_BNDCSR);
+	bndcsr = get_xsave_addr(&tsk->thread.fpu.state.xsave, XSTATE_BNDCSR);
 	if (!bndcsr)
 		return MPX_INVALID_BOUNDS_DIR;