diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 9bb0b52cbd32ed4f8274864efea3f4f61a1e9424..bcc9460c2d653302c25926dcae7089a450233832 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -667,7 +667,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 
 	WARN_ON_ONCE(!irqs_disabled());
 
-	if (!tr->current_trace->allocated_snapshot) {
+	if (!tr->allocated_snapshot) {
 		/* Only the nop tracer should hit this when disabling */
 		WARN_ON_ONCE(tr->current_trace != &nop_trace);
 		return;
@@ -700,7 +700,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
 		return;
 
 	WARN_ON_ONCE(!irqs_disabled());
-	if (WARN_ON_ONCE(!tr->current_trace->allocated_snapshot))
+	if (WARN_ON_ONCE(!tr->allocated_snapshot))
 		return;
 
 	arch_spin_lock(&ftrace_max_lock);
@@ -802,7 +802,7 @@ int register_tracer(struct tracer *type)
 			if (ring_buffer_expanded)
 				ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
 						   RING_BUFFER_ALL_CPUS);
-			type->allocated_snapshot = true;
+			tr->allocated_snapshot = true;
 		}
 #endif
 
@@ -822,7 +822,7 @@ int register_tracer(struct tracer *type)
 
 #ifdef CONFIG_TRACER_MAX_TRACE
 		if (type->use_max_tr) {
-			type->allocated_snapshot = false;
+			tr->allocated_snapshot = false;
 
 			/* Shrink the max buffer again */
 			if (ring_buffer_expanded)
@@ -2463,7 +2463,7 @@ static void show_snapshot_percpu_help(struct seq_file *m)
 
 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
 {
-	if (iter->trace->allocated_snapshot)
+	if (iter->tr->allocated_snapshot)
 		seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
 	else
 		seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
@@ -3364,12 +3364,12 @@ static int tracing_set_tracer(const char *buf)
 	if (tr->current_trace->reset)
 		tr->current_trace->reset(tr);
 
-#ifdef CONFIG_TRACER_MAX_TRACE
-	had_max_tr = tr->current_trace->allocated_snapshot;
-
 	/* Current trace needs to be nop_trace before synchronize_sched */
 	tr->current_trace = &nop_trace;
 
+#ifdef CONFIG_TRACER_MAX_TRACE
+	had_max_tr = tr->allocated_snapshot;
+
 	if (had_max_tr && !t->use_max_tr) {
 		/*
 		 * We need to make sure that the update_max_tr sees that
@@ -3387,10 +3387,8 @@ static int tracing_set_tracer(const char *buf)
 		ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
 		set_buffer_entries(&tr->max_buffer, 1);
 		tracing_reset_online_cpus(&tr->max_buffer);
-		tr->current_trace->allocated_snapshot = false;
+		tr->allocated_snapshot = false;
 	}
-#else
-	tr->current_trace = &nop_trace;
 #endif
 	destroy_trace_option_files(topts);
 
@@ -3403,7 +3401,7 @@ static int tracing_set_tracer(const char *buf)
 						   RING_BUFFER_ALL_CPUS);
 		if (ret < 0)
 			goto out;
-		t->allocated_snapshot = true;
+		tr->allocated_snapshot = true;
 	}
 #endif
 
@@ -4275,13 +4273,13 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
 			ret = -EINVAL;
 			break;
 		}
-		if (tr->current_trace->allocated_snapshot) {
+		if (tr->allocated_snapshot) {
 			/* free spare buffer */
 			ring_buffer_resize(tr->max_buffer.buffer, 1,
 					   RING_BUFFER_ALL_CPUS);
 			set_buffer_entries(&tr->max_buffer, 1);
 			tracing_reset_online_cpus(&tr->max_buffer);
-			tr->current_trace->allocated_snapshot = false;
+			tr->allocated_snapshot = false;
 		}
 		break;
 	case 1:
@@ -4292,13 +4290,13 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
 			break;
 		}
 #endif
-		if (!tr->current_trace->allocated_snapshot) {
+		if (!tr->allocated_snapshot) {
 			/* allocate spare buffer */
 			ret = resize_buffer_duplicate_size(&tr->max_buffer,
 					&tr->trace_buffer, RING_BUFFER_ALL_CPUS);
 			if (ret < 0)
 				break;
-			tr->current_trace->allocated_snapshot = true;
+			tr->allocated_snapshot = true;
 		}
 		local_irq_disable();
 		/* Now, we're going to swap */
@@ -4309,7 +4307,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
 		local_irq_enable();
 		break;
 	default:
-		if (tr->current_trace->allocated_snapshot) {
+		if (tr->allocated_snapshot) {
 			if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
 				tracing_reset_online_cpus(&tr->max_buffer);
 			else
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 986834f1f4dde6385e1080f57dde6f73a9916122..1a456c291a07413a81df9796f4b0fcbbd76d42fe 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -197,6 +197,7 @@ struct trace_array {
 	 * the trace_buffer so the tracing can continue.
 	 */
 	struct trace_buffer	max_buffer;
+	bool			allocated_snapshot;
 #endif
 	int			buffer_disabled;
 	struct trace_cpu	trace_cpu;	/* place holder */
@@ -367,7 +368,6 @@ struct tracer {
 	bool			enabled;
 #ifdef CONFIG_TRACER_MAX_TRACE
 	bool			use_max_tr;
-	bool			allocated_snapshot;
 #endif
 };