From 208da1d5fc3c67d8ae5d34e844fd67cc47a136f0 Mon Sep 17 00:00:00 2001
From: Sven Schnelle <svens@linux.ibm.com>
Date: Mon, 10 Jun 2024 13:45:25 +0200
Subject: [PATCH] s390: Replace S390_lowcore by get_lowcore()

Replace all S390_lowcore usages in arch/s390/ by get_lowcore().

Acked-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Sven Schnelle <svens@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
---
 arch/s390/include/asm/current.h       |  2 +-
 arch/s390/include/asm/facility.h      |  4 +-
 arch/s390/include/asm/hardirq.h       |  6 +--
 arch/s390/include/asm/mmu_context.h   |  8 +--
 arch/s390/include/asm/pai.h           |  8 +--
 arch/s390/include/asm/percpu.h        |  2 +-
 arch/s390/include/asm/preempt.h       | 30 +++++------
 arch/s390/include/asm/processor.h     |  8 +--
 arch/s390/include/asm/setup.h         | 36 ++++++-------
 arch/s390/include/asm/smp.h           |  2 +-
 arch/s390/include/asm/softirq_stack.h |  2 +-
 arch/s390/include/asm/spinlock.h      |  2 +-
 arch/s390/include/asm/timex.h         | 10 ++--
 arch/s390/include/asm/vtime.h         | 12 ++---
 arch/s390/kernel/dumpstack.c          |  8 +--
 arch/s390/kernel/early.c              | 36 ++++++-------
 arch/s390/kernel/idle.c               | 10 ++--
 arch/s390/kernel/irq.c                | 18 +++----
 arch/s390/kernel/machine_kexec.c      |  4 +-
 arch/s390/kernel/nmi.c                | 29 +++++------
 arch/s390/kernel/perf_cpum_sf.c       |  2 +-
 arch/s390/kernel/perf_pai_crypto.c    |  4 +-
 arch/s390/kernel/perf_pai_ext.c       |  4 +-
 arch/s390/kernel/process.c            |  6 +--
 arch/s390/kernel/setup.c              | 24 ++++-----
 arch/s390/kernel/smp.c                | 30 +++++------
 arch/s390/kernel/syscall.c            |  4 +-
 arch/s390/kernel/time.c               | 22 ++++----
 arch/s390/kernel/traps.c              | 24 ++++-----
 arch/s390/kernel/vtime.c              | 74 +++++++++++++--------------
 arch/s390/kvm/kvm-s390.c              |  2 +-
 arch/s390/lib/spinlock.c              |  4 +-
 arch/s390/lib/test_unwind.c           |  2 +-
 arch/s390/lib/uaccess.c               |  4 +-
 arch/s390/mm/dump_pagetables.c        |  2 +-
 arch/s390/mm/fault.c                  | 16 +++---
 arch/s390/mm/gmap.c                   |  6 +--
 arch/s390/mm/pageattr.c               |  2 +-
 arch/s390/mm/pgalloc.c                |  4 +-
 arch/s390/pci/pci.c                   |  2 +-
 40 files changed, 237 insertions(+), 238 deletions(-)

diff --git a/arch/s390/include/asm/current.h b/arch/s390/include/asm/current.h
index 68f84315277c6..d03a922c641e2 100644
--- a/arch/s390/include/asm/current.h
+++ b/arch/s390/include/asm/current.h
@@ -14,6 +14,6 @@
 
 struct task_struct;
 
-#define current ((struct task_struct *const)S390_lowcore.current_task)
+#define current ((struct task_struct *const)get_lowcore()->current_task)
 
 #endif /* !(_S390_CURRENT_H) */
diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
index 796007125dff2..d46cc725f0241 100644
--- a/arch/s390/include/asm/facility.h
+++ b/arch/s390/include/asm/facility.h
@@ -92,8 +92,8 @@ static inline void __stfle(u64 *stfle_fac_list, int size)
 
 	asm volatile(
 		"	stfl	0(0)\n"
-		: "=m" (S390_lowcore.stfl_fac_list));
-	stfl_fac_list = S390_lowcore.stfl_fac_list;
+		: "=m" (get_lowcore()->stfl_fac_list));
+	stfl_fac_list = get_lowcore()->stfl_fac_list;
 	memcpy(stfle_fac_list, &stfl_fac_list, 4);
 	nr = 4; /* bytes stored by stfl */
 	if (stfl_fac_list & 0x01000000) {
diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h
index 58668ffb54882..a5b45388c91f9 100644
--- a/arch/s390/include/asm/hardirq.h
+++ b/arch/s390/include/asm/hardirq.h
@@ -13,9 +13,9 @@
 
 #include <asm/lowcore.h>
 
-#define local_softirq_pending() (S390_lowcore.softirq_pending)
-#define set_softirq_pending(x) (S390_lowcore.softirq_pending = (x))
-#define or_softirq_pending(x)  (S390_lowcore.softirq_pending |= (x))
+#define local_softirq_pending() (get_lowcore()->softirq_pending)
+#define set_softirq_pending(x) (get_lowcore()->softirq_pending = (x))
+#define or_softirq_pending(x)  (get_lowcore()->softirq_pending |= (x))
 
 #define __ARCH_IRQ_STAT
 #define __ARCH_IRQ_EXIT_IRQS_DISABLED
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index a7789a9f62186..d56eb0a1f37bb 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -76,9 +76,9 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *
 	int cpu = smp_processor_id();
 
 	if (next == &init_mm)
-		S390_lowcore.user_asce = s390_invalid_asce;
+		get_lowcore()->user_asce = s390_invalid_asce;
 	else
-		S390_lowcore.user_asce.val = next->context.asce;
+		get_lowcore()->user_asce.val = next->context.asce;
 	cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
 	/* Clear previous user-ASCE from CR7 */
 	local_ctl_load(7, &s390_invalid_asce);
@@ -111,7 +111,7 @@ static inline void finish_arch_post_lock_switch(void)
 		__tlb_flush_mm_lazy(mm);
 		preempt_enable();
 	}
-	local_ctl_load(7, &S390_lowcore.user_asce);
+	local_ctl_load(7, &get_lowcore()->user_asce);
 }
 
 #define activate_mm activate_mm
@@ -120,7 +120,7 @@ static inline void activate_mm(struct mm_struct *prev,
 {
 	switch_mm(prev, next, current);
 	cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
-	local_ctl_load(7, &S390_lowcore.user_asce);
+	local_ctl_load(7, &get_lowcore()->user_asce);
 }
 
 #include <asm-generic/mmu_context.h>
diff --git a/arch/s390/include/asm/pai.h b/arch/s390/include/asm/pai.h
index 8e1dd389ae88f..25f2077ba3c97 100644
--- a/arch/s390/include/asm/pai.h
+++ b/arch/s390/include/asm/pai.h
@@ -55,11 +55,11 @@ static __always_inline void pai_kernel_enter(struct pt_regs *regs)
 		return;
 	if (!static_branch_unlikely(&pai_key))
 		return;
-	if (!S390_lowcore.ccd)
+	if (!get_lowcore()->ccd)
 		return;
 	if (!user_mode(regs))
 		return;
-	WRITE_ONCE(S390_lowcore.ccd, S390_lowcore.ccd | PAI_CRYPTO_KERNEL_OFFSET);
+	WRITE_ONCE(get_lowcore()->ccd, get_lowcore()->ccd | PAI_CRYPTO_KERNEL_OFFSET);
 }
 
 static __always_inline void pai_kernel_exit(struct pt_regs *regs)
@@ -68,11 +68,11 @@ static __always_inline void pai_kernel_exit(struct pt_regs *regs)
 		return;
 	if (!static_branch_unlikely(&pai_key))
 		return;
-	if (!S390_lowcore.ccd)
+	if (!get_lowcore()->ccd)
 		return;
 	if (!user_mode(regs))
 		return;
-	WRITE_ONCE(S390_lowcore.ccd, S390_lowcore.ccd & ~PAI_CRYPTO_KERNEL_OFFSET);
+	WRITE_ONCE(get_lowcore()->ccd, get_lowcore()->ccd & ~PAI_CRYPTO_KERNEL_OFFSET);
 }
 
 #define PAI_SAVE_AREA(x)	((x)->hw.event_base)
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 264095dd84bc7..89a28740b6ab8 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -9,7 +9,7 @@
  * s390 uses its own implementation for per cpu data, the offset of
  * the cpu local data area is cached in the cpu's lowcore memory.
  */
-#define __my_cpu_offset S390_lowcore.percpu_offset
+#define __my_cpu_offset get_lowcore()->percpu_offset
 
 /*
  * For 64 bit module code, the module may be more than 4G above the
diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h
index 0e3da500e98c1..3ae5f31c665d1 100644
--- a/arch/s390/include/asm/preempt.h
+++ b/arch/s390/include/asm/preempt.h
@@ -14,7 +14,7 @@
 
 static __always_inline int preempt_count(void)
 {
-	return READ_ONCE(S390_lowcore.preempt_count) & ~PREEMPT_NEED_RESCHED;
+	return READ_ONCE(get_lowcore()->preempt_count) & ~PREEMPT_NEED_RESCHED;
 }
 
 static __always_inline void preempt_count_set(int pc)
@@ -22,26 +22,26 @@ static __always_inline void preempt_count_set(int pc)
 	int old, new;
 
 	do {
-		old = READ_ONCE(S390_lowcore.preempt_count);
+		old = READ_ONCE(get_lowcore()->preempt_count);
 		new = (old & PREEMPT_NEED_RESCHED) |
 			(pc & ~PREEMPT_NEED_RESCHED);
-	} while (__atomic_cmpxchg(&S390_lowcore.preempt_count,
+	} while (__atomic_cmpxchg(&get_lowcore()->preempt_count,
 				  old, new) != old);
 }
 
 static __always_inline void set_preempt_need_resched(void)
 {
-	__atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
+	__atomic_and(~PREEMPT_NEED_RESCHED, &get_lowcore()->preempt_count);
 }
 
 static __always_inline void clear_preempt_need_resched(void)
 {
-	__atomic_or(PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
+	__atomic_or(PREEMPT_NEED_RESCHED, &get_lowcore()->preempt_count);
 }
 
 static __always_inline bool test_preempt_need_resched(void)
 {
-	return !(READ_ONCE(S390_lowcore.preempt_count) & PREEMPT_NEED_RESCHED);
+	return !(READ_ONCE(get_lowcore()->preempt_count) & PREEMPT_NEED_RESCHED);
 }
 
 static __always_inline void __preempt_count_add(int val)
@@ -52,11 +52,11 @@ static __always_inline void __preempt_count_add(int val)
 	 */
 	if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES)) {
 		if (__builtin_constant_p(val) && (val >= -128) && (val <= 127)) {
-			__atomic_add_const(val, &S390_lowcore.preempt_count);
+			__atomic_add_const(val, &get_lowcore()->preempt_count);
 			return;
 		}
 	}
-	__atomic_add(val, &S390_lowcore.preempt_count);
+	__atomic_add(val, &get_lowcore()->preempt_count);
 }
 
 static __always_inline void __preempt_count_sub(int val)
@@ -66,12 +66,12 @@ static __always_inline void __preempt_count_sub(int val)
 
 static __always_inline bool __preempt_count_dec_and_test(void)
 {
-	return __atomic_add(-1, &S390_lowcore.preempt_count) == 1;
+	return __atomic_add(-1, &get_lowcore()->preempt_count) == 1;
 }
 
 static __always_inline bool should_resched(int preempt_offset)
 {
-	return unlikely(READ_ONCE(S390_lowcore.preempt_count) ==
+	return unlikely(READ_ONCE(get_lowcore()->preempt_count) ==
 			preempt_offset);
 }
 
@@ -81,12 +81,12 @@ static __always_inline bool should_resched(int preempt_offset)
 
 static __always_inline int preempt_count(void)
 {
-	return READ_ONCE(S390_lowcore.preempt_count);
+	return READ_ONCE(get_lowcore()->preempt_count);
 }
 
 static __always_inline void preempt_count_set(int pc)
 {
-	S390_lowcore.preempt_count = pc;
+	get_lowcore()->preempt_count = pc;
 }
 
 static __always_inline void set_preempt_need_resched(void)
@@ -104,17 +104,17 @@ static __always_inline bool test_preempt_need_resched(void)
 
 static __always_inline void __preempt_count_add(int val)
 {
-	S390_lowcore.preempt_count += val;
+	get_lowcore()->preempt_count += val;
 }
 
 static __always_inline void __preempt_count_sub(int val)
 {
-	S390_lowcore.preempt_count -= val;
+	get_lowcore()->preempt_count -= val;
 }
 
 static __always_inline bool __preempt_count_dec_and_test(void)
 {
-	return !--S390_lowcore.preempt_count && tif_need_resched();
+	return !--get_lowcore()->preempt_count && tif_need_resched();
 }
 
 static __always_inline bool should_resched(int preempt_offset)
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 07ad5a1df878a..c87cf2b8e81af 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -46,17 +46,17 @@ typedef long (*sys_call_ptr_t)(struct pt_regs *regs);
 
 static __always_inline void set_cpu_flag(int flag)
 {
-	S390_lowcore.cpu_flags |= (1UL << flag);
+	get_lowcore()->cpu_flags |= (1UL << flag);
 }
 
 static __always_inline void clear_cpu_flag(int flag)
 {
-	S390_lowcore.cpu_flags &= ~(1UL << flag);
+	get_lowcore()->cpu_flags &= ~(1UL << flag);
 }
 
 static __always_inline bool test_cpu_flag(int flag)
 {
-	return S390_lowcore.cpu_flags & (1UL << flag);
+	return get_lowcore()->cpu_flags & (1UL << flag);
 }
 
 static __always_inline bool test_and_set_cpu_flag(int flag)
@@ -269,7 +269,7 @@ static __always_inline unsigned long __current_stack_pointer(void)
 
 static __always_inline bool on_thread_stack(void)
 {
-	unsigned long ksp = S390_lowcore.kernel_stack;
+	unsigned long ksp = get_lowcore()->kernel_stack;
 
 	return !((ksp ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
 }
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 32f70873e2b7d..8505737712ee8 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -77,24 +77,24 @@ extern unsigned long max_mappable;
 /* The Write Back bit position in the physaddr is given by the SLPC PCI */
 extern unsigned long mio_wb_bit_mask;
 
-#define MACHINE_IS_VM		(S390_lowcore.machine_flags & MACHINE_FLAG_VM)
-#define MACHINE_IS_KVM		(S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
-#define MACHINE_IS_LPAR		(S390_lowcore.machine_flags & MACHINE_FLAG_LPAR)
-
-#define MACHINE_HAS_DIAG9C	(S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C)
-#define MACHINE_HAS_ESOP	(S390_lowcore.machine_flags & MACHINE_FLAG_ESOP)
-#define MACHINE_HAS_IDTE	(S390_lowcore.machine_flags & MACHINE_FLAG_IDTE)
-#define MACHINE_HAS_EDAT1	(S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1)
-#define MACHINE_HAS_EDAT2	(S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2)
-#define MACHINE_HAS_TOPOLOGY	(S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
-#define MACHINE_HAS_TE		(S390_lowcore.machine_flags & MACHINE_FLAG_TE)
-#define MACHINE_HAS_TLB_LC	(S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC)
-#define MACHINE_HAS_TLB_GUEST	(S390_lowcore.machine_flags & MACHINE_FLAG_TLB_GUEST)
-#define MACHINE_HAS_NX		(S390_lowcore.machine_flags & MACHINE_FLAG_NX)
-#define MACHINE_HAS_GS		(S390_lowcore.machine_flags & MACHINE_FLAG_GS)
-#define MACHINE_HAS_SCC		(S390_lowcore.machine_flags & MACHINE_FLAG_SCC)
-#define MACHINE_HAS_PCI_MIO	(S390_lowcore.machine_flags & MACHINE_FLAG_PCI_MIO)
-#define MACHINE_HAS_RDP		(S390_lowcore.machine_flags & MACHINE_FLAG_RDP)
+#define MACHINE_IS_VM		(get_lowcore()->machine_flags & MACHINE_FLAG_VM)
+#define MACHINE_IS_KVM		(get_lowcore()->machine_flags & MACHINE_FLAG_KVM)
+#define MACHINE_IS_LPAR		(get_lowcore()->machine_flags & MACHINE_FLAG_LPAR)
+
+#define MACHINE_HAS_DIAG9C	(get_lowcore()->machine_flags & MACHINE_FLAG_DIAG9C)
+#define MACHINE_HAS_ESOP	(get_lowcore()->machine_flags & MACHINE_FLAG_ESOP)
+#define MACHINE_HAS_IDTE	(get_lowcore()->machine_flags & MACHINE_FLAG_IDTE)
+#define MACHINE_HAS_EDAT1	(get_lowcore()->machine_flags & MACHINE_FLAG_EDAT1)
+#define MACHINE_HAS_EDAT2	(get_lowcore()->machine_flags & MACHINE_FLAG_EDAT2)
+#define MACHINE_HAS_TOPOLOGY	(get_lowcore()->machine_flags & MACHINE_FLAG_TOPOLOGY)
+#define MACHINE_HAS_TE		(get_lowcore()->machine_flags & MACHINE_FLAG_TE)
+#define MACHINE_HAS_TLB_LC	(get_lowcore()->machine_flags & MACHINE_FLAG_TLB_LC)
+#define MACHINE_HAS_TLB_GUEST	(get_lowcore()->machine_flags & MACHINE_FLAG_TLB_GUEST)
+#define MACHINE_HAS_NX		(get_lowcore()->machine_flags & MACHINE_FLAG_NX)
+#define MACHINE_HAS_GS		(get_lowcore()->machine_flags & MACHINE_FLAG_GS)
+#define MACHINE_HAS_SCC		(get_lowcore()->machine_flags & MACHINE_FLAG_SCC)
+#define MACHINE_HAS_PCI_MIO	(get_lowcore()->machine_flags & MACHINE_FLAG_PCI_MIO)
+#define MACHINE_HAS_RDP		(get_lowcore()->machine_flags & MACHINE_FLAG_RDP)
 
 /*
  * Console mode. Override with conmode=
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 6e5b1b4b19a91..0b1ed637bfd62 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -11,7 +11,7 @@
 #include <asm/lowcore.h>
 #include <asm/processor.h>
 
-#define raw_smp_processor_id()	(S390_lowcore.cpu_nr)
+#define raw_smp_processor_id()	(get_lowcore()->cpu_nr)
 
 extern struct mutex smp_cpu_state_mutex;
 extern unsigned int smp_cpu_mt_shift;
diff --git a/arch/s390/include/asm/softirq_stack.h b/arch/s390/include/asm/softirq_stack.h
index 1ac5115d3115e..42d61296bbad3 100644
--- a/arch/s390/include/asm/softirq_stack.h
+++ b/arch/s390/include/asm/softirq_stack.h
@@ -8,7 +8,7 @@
 #ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
 static inline void do_softirq_own_stack(void)
 {
-	call_on_stack(0, S390_lowcore.async_stack, void, __do_softirq);
+	call_on_stack(0, get_lowcore()->async_stack, void, __do_softirq);
 }
 #endif
 #endif /* __ASM_S390_SOFTIRQ_STACK_H */
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 37127cd7749ea..3e43c90ff1354 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -16,7 +16,7 @@
 #include <asm/processor.h>
 #include <asm/alternative.h>
 
-#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
+#define SPINLOCK_LOCKVAL (get_lowcore()->spinlock_lockval)
 
 extern int spin_retry;
 
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 4d646659a5f58..640901f2fbc3c 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -161,16 +161,16 @@ static inline unsigned long local_tick_disable(void)
 {
 	unsigned long old;
 
-	old = S390_lowcore.clock_comparator;
-	S390_lowcore.clock_comparator = clock_comparator_max;
-	set_clock_comparator(S390_lowcore.clock_comparator);
+	old = get_lowcore()->clock_comparator;
+	get_lowcore()->clock_comparator = clock_comparator_max;
+	set_clock_comparator(get_lowcore()->clock_comparator);
 	return old;
 }
 
 static inline void local_tick_enable(unsigned long comp)
 {
-	S390_lowcore.clock_comparator = comp;
-	set_clock_comparator(S390_lowcore.clock_comparator);
+	get_lowcore()->clock_comparator = comp;
+	set_clock_comparator(get_lowcore()->clock_comparator);
 }
 
 #define CLOCK_TICK_RATE		1193180 /* Underlying HZ */
diff --git a/arch/s390/include/asm/vtime.h b/arch/s390/include/asm/vtime.h
index 561c91c1a87c3..ef4dd7d057a29 100644
--- a/arch/s390/include/asm/vtime.h
+++ b/arch/s390/include/asm/vtime.h
@@ -4,16 +4,16 @@
 
 static inline void update_timer_sys(void)
 {
-	S390_lowcore.system_timer += S390_lowcore.last_update_timer - S390_lowcore.exit_timer;
-	S390_lowcore.user_timer += S390_lowcore.exit_timer - S390_lowcore.sys_enter_timer;
-	S390_lowcore.last_update_timer = S390_lowcore.sys_enter_timer;
+	get_lowcore()->system_timer += get_lowcore()->last_update_timer - get_lowcore()->exit_timer;
+	get_lowcore()->user_timer += get_lowcore()->exit_timer - get_lowcore()->sys_enter_timer;
+	get_lowcore()->last_update_timer = get_lowcore()->sys_enter_timer;
 }
 
 static inline void update_timer_mcck(void)
 {
-	S390_lowcore.system_timer += S390_lowcore.last_update_timer - S390_lowcore.exit_timer;
-	S390_lowcore.user_timer += S390_lowcore.exit_timer - S390_lowcore.mcck_enter_timer;
-	S390_lowcore.last_update_timer = S390_lowcore.mcck_enter_timer;
+	get_lowcore()->system_timer += get_lowcore()->last_update_timer - get_lowcore()->exit_timer;
+	get_lowcore()->user_timer += get_lowcore()->exit_timer - get_lowcore()->mcck_enter_timer;
+	get_lowcore()->last_update_timer = get_lowcore()->mcck_enter_timer;
 }
 
 #endif /* _S390_VTIME_H */
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index d2012635b0939..1ecd0580561f6 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -61,28 +61,28 @@ static bool in_task_stack(unsigned long sp, struct task_struct *task,
 
 static bool in_irq_stack(unsigned long sp, struct stack_info *info)
 {
-	unsigned long stack = S390_lowcore.async_stack - STACK_INIT_OFFSET;
+	unsigned long stack = get_lowcore()->async_stack - STACK_INIT_OFFSET;
 
 	return in_stack(sp, info, STACK_TYPE_IRQ, stack);
 }
 
 static bool in_nodat_stack(unsigned long sp, struct stack_info *info)
 {
-	unsigned long stack = S390_lowcore.nodat_stack - STACK_INIT_OFFSET;
+	unsigned long stack = get_lowcore()->nodat_stack - STACK_INIT_OFFSET;
 
 	return in_stack(sp, info, STACK_TYPE_NODAT, stack);
 }
 
 static bool in_mcck_stack(unsigned long sp, struct stack_info *info)
 {
-	unsigned long stack = S390_lowcore.mcck_stack - STACK_INIT_OFFSET;
+	unsigned long stack = get_lowcore()->mcck_stack - STACK_INIT_OFFSET;
 
 	return in_stack(sp, info, STACK_TYPE_MCCK, stack);
 }
 
 static bool in_restart_stack(unsigned long sp, struct stack_info *info)
 {
-	unsigned long stack = S390_lowcore.restart_stack - STACK_INIT_OFFSET;
+	unsigned long stack = get_lowcore()->restart_stack - STACK_INIT_OFFSET;
 
 	return in_stack(sp, info, STACK_TYPE_RESTART, stack);
 }
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index c666271433fb0..467ed4dba817b 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -72,7 +72,7 @@ static void __init reset_tod_clock(void)
 
 	memset(&tod_clock_base, 0, sizeof(tod_clock_base));
 	tod_clock_base.tod = TOD_UNIX_EPOCH;
-	S390_lowcore.last_update_clock = TOD_UNIX_EPOCH;
+	get_lowcore()->last_update_clock = TOD_UNIX_EPOCH;
 }
 
 /*
@@ -99,7 +99,7 @@ static noinline __init void detect_machine_type(void)
 
 	/* Check current-configuration-level */
 	if (stsi(NULL, 0, 0, 0) <= 2) {
-		S390_lowcore.machine_flags |= MACHINE_FLAG_LPAR;
+		get_lowcore()->machine_flags |= MACHINE_FLAG_LPAR;
 		return;
 	}
 	/* Get virtual-machine cpu information. */
@@ -108,9 +108,9 @@ static noinline __init void detect_machine_type(void)
 
 	/* Detect known hypervisors */
 	if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
-		S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
+		get_lowcore()->machine_flags |= MACHINE_FLAG_KVM;
 	else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
-		S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
+		get_lowcore()->machine_flags |= MACHINE_FLAG_VM;
 }
 
 /* Remove leading, trailing and double whitespace. */
@@ -166,7 +166,7 @@ static __init void setup_topology(void)
 
 	if (!test_facility(11))
 		return;
-	S390_lowcore.machine_flags |= MACHINE_FLAG_TOPOLOGY;
+	get_lowcore()->machine_flags |= MACHINE_FLAG_TOPOLOGY;
 	for (max_mnest = 6; max_mnest > 1; max_mnest--) {
 		if (stsi(&sysinfo_page, 15, 1, max_mnest) == 0)
 			break;
@@ -186,8 +186,8 @@ static noinline __init void setup_lowcore_early(void)
 
 	psw.addr = (unsigned long)early_pgm_check_handler;
 	psw.mask = PSW_KERNEL_BITS;
-	S390_lowcore.program_new_psw = psw;
-	S390_lowcore.preempt_count = INIT_PREEMPT_COUNT;
+	get_lowcore()->program_new_psw = psw;
+	get_lowcore()->preempt_count = INIT_PREEMPT_COUNT;
 }
 
 static noinline __init void setup_facility_list(void)
@@ -211,43 +211,43 @@ static __init void detect_diag9c(void)
 		EX_TABLE(0b,1b)
 		: "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc");
 	if (!rc)
-		S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG9C;
+		get_lowcore()->machine_flags |= MACHINE_FLAG_DIAG9C;
 }
 
 static __init void detect_machine_facilities(void)
 {
 	if (test_facility(8)) {
-		S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1;
+		get_lowcore()->machine_flags |= MACHINE_FLAG_EDAT1;
 		system_ctl_set_bit(0, CR0_EDAT_BIT);
 	}
 	if (test_facility(78))
-		S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT2;
+		get_lowcore()->machine_flags |= MACHINE_FLAG_EDAT2;
 	if (test_facility(3))
-		S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
+		get_lowcore()->machine_flags |= MACHINE_FLAG_IDTE;
 	if (test_facility(50) && test_facility(73)) {
-		S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
+		get_lowcore()->machine_flags |= MACHINE_FLAG_TE;
 		system_ctl_set_bit(0, CR0_TRANSACTIONAL_EXECUTION_BIT);
 	}
 	if (test_facility(51))
-		S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
+		get_lowcore()->machine_flags |= MACHINE_FLAG_TLB_LC;
 	if (test_facility(129))
 		system_ctl_set_bit(0, CR0_VECTOR_BIT);
 	if (test_facility(130))
-		S390_lowcore.machine_flags |= MACHINE_FLAG_NX;
+		get_lowcore()->machine_flags |= MACHINE_FLAG_NX;
 	if (test_facility(133))
-		S390_lowcore.machine_flags |= MACHINE_FLAG_GS;
+		get_lowcore()->machine_flags |= MACHINE_FLAG_GS;
 	if (test_facility(139) && (tod_clock_base.tod >> 63)) {
 		/* Enabled signed clock comparator comparisons */
-		S390_lowcore.machine_flags |= MACHINE_FLAG_SCC;
+		get_lowcore()->machine_flags |= MACHINE_FLAG_SCC;
 		clock_comparator_max = -1ULL >> 1;
 		system_ctl_set_bit(0, CR0_CLOCK_COMPARATOR_SIGN_BIT);
 	}
 	if (IS_ENABLED(CONFIG_PCI) && test_facility(153)) {
-		S390_lowcore.machine_flags |= MACHINE_FLAG_PCI_MIO;
+		get_lowcore()->machine_flags |= MACHINE_FLAG_PCI_MIO;
 		/* the control bit is set during PCI initialization */
 	}
 	if (test_facility(194))
-		S390_lowcore.machine_flags |= MACHINE_FLAG_RDP;
+		get_lowcore()->machine_flags |= MACHINE_FLAG_RDP;
 }
 
 static inline void save_vector_registers(void)
diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c
index af9c97c0ad730..2c34e02ae64ba 100644
--- a/arch/s390/kernel/idle.c
+++ b/arch/s390/kernel/idle.c
@@ -34,13 +34,13 @@ void account_idle_time_irq(void)
 			this_cpu_add(mt_cycles[i], cycles_new[i] - idle->mt_cycles_enter[i]);
 	}
 
-	idle_time = S390_lowcore.int_clock - idle->clock_idle_enter;
+	idle_time = get_lowcore()->int_clock - idle->clock_idle_enter;
 
-	S390_lowcore.steal_timer += idle->clock_idle_enter - S390_lowcore.last_update_clock;
-	S390_lowcore.last_update_clock = S390_lowcore.int_clock;
+	get_lowcore()->steal_timer += idle->clock_idle_enter - get_lowcore()->last_update_clock;
+	get_lowcore()->last_update_clock = get_lowcore()->int_clock;
 
-	S390_lowcore.system_timer += S390_lowcore.last_update_timer - idle->timer_idle_enter;
-	S390_lowcore.last_update_timer = S390_lowcore.sys_enter_timer;
+	get_lowcore()->system_timer += get_lowcore()->last_update_timer - idle->timer_idle_enter;
+	get_lowcore()->last_update_timer = get_lowcore()->sys_enter_timer;
 
 	/* Account time spent with enabled wait psw loaded as idle time. */
 	WRITE_ONCE(idle->idle_time, READ_ONCE(idle->idle_time) + idle_time);
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 9acc6630abd31..1af5a08d72abe 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -100,8 +100,8 @@ static const struct irq_class irqclass_sub_desc[] = {
 
 static void do_IRQ(struct pt_regs *regs, int irq)
 {
-	if (tod_after_eq(S390_lowcore.int_clock,
-			 S390_lowcore.clock_comparator))
+	if (tod_after_eq(get_lowcore()->int_clock,
+			 get_lowcore()->clock_comparator))
 		/* Serve timer interrupts first. */
 		clock_comparator_work();
 	generic_handle_irq(irq);
@@ -111,7 +111,7 @@ static int on_async_stack(void)
 {
 	unsigned long frame = current_frame_address();
 
-	return ((S390_lowcore.async_stack ^ frame) & ~(THREAD_SIZE - 1)) == 0;
+	return ((get_lowcore()->async_stack ^ frame) & ~(THREAD_SIZE - 1)) == 0;
 }
 
 static void do_irq_async(struct pt_regs *regs, int irq)
@@ -119,7 +119,7 @@ static void do_irq_async(struct pt_regs *regs, int irq)
 	if (on_async_stack()) {
 		do_IRQ(regs, irq);
 	} else {
-		call_on_stack(2, S390_lowcore.async_stack, void, do_IRQ,
+		call_on_stack(2, get_lowcore()->async_stack, void, do_IRQ,
 			      struct pt_regs *, regs, int, irq);
 	}
 }
@@ -153,8 +153,8 @@ void noinstr do_io_irq(struct pt_regs *regs)
 
 	set_cpu_flag(CIF_NOHZ_DELAY);
 	do {
-		regs->tpi_info = S390_lowcore.tpi_info;
-		if (S390_lowcore.tpi_info.adapter_IO)
+		regs->tpi_info = get_lowcore()->tpi_info;
+		if (get_lowcore()->tpi_info.adapter_IO)
 			do_irq_async(regs, THIN_INTERRUPT);
 		else
 			do_irq_async(regs, IO_INTERRUPT);
@@ -183,9 +183,9 @@ void noinstr do_ext_irq(struct pt_regs *regs)
 			current->thread.last_break = regs->last_break;
 	}
 
-	regs->int_code = S390_lowcore.ext_int_code_addr;
-	regs->int_parm = S390_lowcore.ext_params;
-	regs->int_parm_long = S390_lowcore.ext_params2;
+	regs->int_code = get_lowcore()->ext_int_code_addr;
+	regs->int_parm = get_lowcore()->ext_params;
+	regs->int_parm_long = get_lowcore()->ext_params2;
 
 	from_idle = test_and_clear_cpu_flag(CIF_ENABLED_WAIT);
 	if (from_idle)
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 3aee98efc3742..f4cf65da6d499 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -52,7 +52,7 @@ static void __do_machine_kdump(void *data)
 	purgatory = (purgatory_t)image->start;
 
 	/* store_status() saved the prefix register to lowcore */
-	prefix = (unsigned long) S390_lowcore.prefixreg_save_area;
+	prefix = (unsigned long)get_lowcore()->prefixreg_save_area;
 
 	/* Now do the reset  */
 	s390_reset_system();
@@ -91,7 +91,7 @@ static noinline void __machine_kdump(void *image)
 			continue;
 	}
 	/* Store status of the boot CPU */
-	mcesa = __va(S390_lowcore.mcesad & MCESA_ORIGIN_MASK);
+	mcesa = __va(get_lowcore()->mcesad & MCESA_ORIGIN_MASK);
 	if (cpu_has_vx())
 		save_vx_regs((__vector128 *) mcesa->vector_save_area);
 	if (MACHINE_HAS_GS) {
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 230d010bac9b3..db562416d7286 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -125,7 +125,7 @@ static notrace void s390_handle_damage(void)
 	smp_emergency_stop();
 	diag_amode31_ops.diag308_reset();
 	ptr = nmi_puts(message, "System stopped due to unrecoverable machine check, code: 0x");
-	u64_to_hex(ptr, S390_lowcore.mcck_interruption_code);
+	u64_to_hex(ptr, get_lowcore()->mcck_interruption_code);
 
 	/*
 	 * Disable low address protection and make machine check new PSW a
@@ -135,17 +135,17 @@ static notrace void s390_handle_damage(void)
 	cr0_new = cr0;
 	cr0_new.lap = 0;
 	local_ctl_load(0, &cr0_new.reg);
-	psw_save = S390_lowcore.mcck_new_psw;
-	psw_bits(S390_lowcore.mcck_new_psw).io = 0;
-	psw_bits(S390_lowcore.mcck_new_psw).ext = 0;
-	psw_bits(S390_lowcore.mcck_new_psw).wait = 1;
+	psw_save = get_lowcore()->mcck_new_psw;
+	psw_bits(get_lowcore()->mcck_new_psw).io = 0;
+	psw_bits(get_lowcore()->mcck_new_psw).ext = 0;
+	psw_bits(get_lowcore()->mcck_new_psw).wait = 1;
 	sclp_emergency_printk(message);
 
 	/*
 	 * Restore machine check new PSW and control register 0 to original
 	 * values. This makes possible system dump analysis easier.
 	 */
-	S390_lowcore.mcck_new_psw = psw_save;
+	get_lowcore()->mcck_new_psw = psw_save;
 	local_ctl_load(0, &cr0.reg);
 	disabled_wait();
 	while (1);
@@ -226,7 +226,7 @@ static bool notrace nmi_registers_valid(union mci mci)
 	/*
 	 * Set the clock comparator register to the next expected value.
 	 */
-	set_clock_comparator(S390_lowcore.clock_comparator);
+	set_clock_comparator(get_lowcore()->clock_comparator);
 	if (!mci.gr || !mci.fp || !mci.fc)
 		return false;
 	/*
@@ -252,7 +252,7 @@ static bool notrace nmi_registers_valid(union mci mci)
 	 * check handling must take care of this. The host values are saved by
 	 * KVM and are not affected.
 	 */
-	cr2.reg = S390_lowcore.cregs_save_area[2];
+	cr2.reg = get_lowcore()->cregs_save_area[2];
 	if (cr2.gse && !mci.gs && !test_cpu_flag(CIF_MCCK_GUEST))
 		return false;
 	if (!mci.ms || !mci.pm || !mci.ia)
@@ -278,11 +278,10 @@ static void notrace s390_backup_mcck_info(struct pt_regs *regs)
 
 	sie_page = container_of(sie_block, struct sie_page, sie_block);
 	mcck_backup = &sie_page->mcck_info;
-	mcck_backup->mcic = S390_lowcore.mcck_interruption_code &
+	mcck_backup->mcic = get_lowcore()->mcck_interruption_code &
 				~(MCCK_CODE_CP | MCCK_CODE_EXT_DAMAGE);
-	mcck_backup->ext_damage_code = S390_lowcore.external_damage_code;
-	mcck_backup->failing_storage_address
-			= S390_lowcore.failing_storage_address;
+	mcck_backup->ext_damage_code = get_lowcore()->external_damage_code;
+	mcck_backup->failing_storage_address = get_lowcore()->failing_storage_address;
 }
 NOKPROBE_SYMBOL(s390_backup_mcck_info);
 
@@ -314,7 +313,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
 	if (user_mode(regs))
 		update_timer_mcck();
 	inc_irq_stat(NMI_NMI);
-	mci.val = S390_lowcore.mcck_interruption_code;
+	mci.val = get_lowcore()->mcck_interruption_code;
 	mcck = this_cpu_ptr(&cpu_mcck);
 
 	/*
@@ -382,9 +381,9 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
 	}
 	if (mci.ed && mci.ec) {
 		/* External damage */
-		if (S390_lowcore.external_damage_code & (1U << ED_STP_SYNC))
+		if (get_lowcore()->external_damage_code & (1U << ED_STP_SYNC))
 			mcck->stp_queue |= stp_sync_check();
-		if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND))
+		if (get_lowcore()->external_damage_code & (1U << ED_STP_ISLAND))
 			mcck->stp_queue |= stp_island_check();
 		mcck_pending = 1;
 	}
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 06efad5b4f931..736c1d9632dd5 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1022,7 +1022,7 @@ static void cpumsf_pmu_enable(struct pmu *pmu)
 	}
 
 	/* Load current program parameter */
-	lpp(&S390_lowcore.lpp);
+	lpp(&get_lowcore()->lpp);
 
 	debug_sprintf_event(sfdbg, 6, "%s: es %i cs %i ed %i cd %i "
 			    "interval %#lx tear %#lx dear %#lx\n", __func__,
diff --git a/arch/s390/kernel/perf_pai_crypto.c b/arch/s390/kernel/perf_pai_crypto.c
index 95079a2891098..2f5a20e300f6f 100644
--- a/arch/s390/kernel/perf_pai_crypto.c
+++ b/arch/s390/kernel/perf_pai_crypto.c
@@ -372,7 +372,7 @@ static int paicrypt_add(struct perf_event *event, int flags)
 
 	if (++cpump->active_events == 1) {
 		ccd = virt_to_phys(cpump->page) | PAI_CRYPTO_KERNEL_OFFSET;
-		WRITE_ONCE(S390_lowcore.ccd, ccd);
+		WRITE_ONCE(get_lowcore()->ccd, ccd);
 		local_ctl_set_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
 	}
 	if (flags & PERF_EF_START)
@@ -409,7 +409,7 @@ static void paicrypt_del(struct perf_event *event, int flags)
 	paicrypt_stop(event, PERF_EF_UPDATE);
 	if (--cpump->active_events == 0) {
 		local_ctl_clear_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
-		WRITE_ONCE(S390_lowcore.ccd, 0);
+		WRITE_ONCE(get_lowcore()->ccd, 0);
 	}
 }
 
diff --git a/arch/s390/kernel/perf_pai_ext.c b/arch/s390/kernel/perf_pai_ext.c
index 6040f3c2b9424..6295531b39a20 100644
--- a/arch/s390/kernel/perf_pai_ext.c
+++ b/arch/s390/kernel/perf_pai_ext.c
@@ -389,7 +389,7 @@ static int paiext_add(struct perf_event *event, int flags)
 	struct paiext_cb *pcb = cpump->paiext_cb;
 
 	if (++cpump->active_events == 1) {
-		S390_lowcore.aicd = virt_to_phys(cpump->paiext_cb);
+		get_lowcore()->aicd = virt_to_phys(cpump->paiext_cb);
 		pcb->acc = virt_to_phys(cpump->area) | 0x1;
 		/* Enable CPU instruction lookup for PAIE1 control block */
 		local_ctl_set_bit(0, CR0_PAI_EXTENSION_BIT);
@@ -431,7 +431,7 @@ static void paiext_del(struct perf_event *event, int flags)
 		/* Disable CPU instruction lookup for PAIE1 control block */
 		local_ctl_clear_bit(0, CR0_PAI_EXTENSION_BIT);
 		pcb->acc = 0;
-		S390_lowcore.aicd = 0;
+		get_lowcore()->aicd = 0;
 	}
 }
 
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index d8740631df4bc..9637aee43c401 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -71,10 +71,10 @@ void flush_thread(void)
 
 void arch_setup_new_exec(void)
 {
-	if (S390_lowcore.current_pid != current->pid) {
-		S390_lowcore.current_pid = current->pid;
+	if (get_lowcore()->current_pid != current->pid) {
+		get_lowcore()->current_pid = current->pid;
 		if (test_facility(40))
-			lpp(&S390_lowcore.lpp);
+			lpp(&get_lowcore()->lpp);
 	}
 }
 
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 90c2c786bb355..3993f4caf224a 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -421,16 +421,16 @@ static void __init setup_lowcore(void)
 	lc->clock_comparator = clock_comparator_max;
 	lc->current_task = (unsigned long)&init_task;
 	lc->lpp = LPP_MAGIC;
-	lc->machine_flags = S390_lowcore.machine_flags;
-	lc->preempt_count = S390_lowcore.preempt_count;
+	lc->machine_flags = get_lowcore()->machine_flags;
+	lc->preempt_count = get_lowcore()->preempt_count;
 	nmi_alloc_mcesa_early(&lc->mcesad);
-	lc->sys_enter_timer = S390_lowcore.sys_enter_timer;
-	lc->exit_timer = S390_lowcore.exit_timer;
-	lc->user_timer = S390_lowcore.user_timer;
-	lc->system_timer = S390_lowcore.system_timer;
-	lc->steal_timer = S390_lowcore.steal_timer;
-	lc->last_update_timer = S390_lowcore.last_update_timer;
-	lc->last_update_clock = S390_lowcore.last_update_clock;
+	lc->sys_enter_timer = get_lowcore()->sys_enter_timer;
+	lc->exit_timer = get_lowcore()->exit_timer;
+	lc->user_timer = get_lowcore()->user_timer;
+	lc->system_timer = get_lowcore()->system_timer;
+	lc->steal_timer = get_lowcore()->steal_timer;
+	lc->last_update_timer = get_lowcore()->last_update_timer;
+	lc->last_update_clock = get_lowcore()->last_update_clock;
 	/*
 	 * Allocate the global restart stack which is the same for
 	 * all CPUs in case *one* of them does a PSW restart.
@@ -439,7 +439,7 @@ static void __init setup_lowcore(void)
 	lc->mcck_stack = stack_alloc_early() + STACK_INIT_OFFSET;
 	lc->async_stack = stack_alloc_early() + STACK_INIT_OFFSET;
 	lc->nodat_stack = stack_alloc_early() + STACK_INIT_OFFSET;
-	lc->kernel_stack = S390_lowcore.kernel_stack;
+	lc->kernel_stack = get_lowcore()->kernel_stack;
 	/*
 	 * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
 	 * restart data to the absolute zero lowcore. This is necessary if
@@ -455,8 +455,8 @@ static void __init setup_lowcore(void)
 	lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
 	lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
 	lc->preempt_count = PREEMPT_DISABLED;
-	lc->kernel_asce = S390_lowcore.kernel_asce;
-	lc->user_asce = S390_lowcore.user_asce;
+	lc->kernel_asce = get_lowcore()->kernel_asce;
+	lc->user_asce = get_lowcore()->user_asce;
 
 	system_ctlreg_init_save_area(lc);
 	abs_lc = get_abs_lowcore();
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 0324649aae0a3..ebe4bc326a6b0 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -203,7 +203,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
 	mcck_stack = stack_alloc();
 	if (!lc || !nodat_stack || !async_stack || !mcck_stack)
 		goto out;
-	memcpy(lc, &S390_lowcore, 512);
+	memcpy(lc, get_lowcore(), 512);
 	memset((char *) lc + 512, 0, sizeof(*lc) - 512);
 	lc->async_stack = async_stack + STACK_INIT_OFFSET;
 	lc->nodat_stack = nodat_stack + STACK_INIT_OFFSET;
@@ -265,9 +265,9 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
 	lc->spinlock_lockval = arch_spin_lockval(cpu);
 	lc->spinlock_index = 0;
 	lc->percpu_offset = __per_cpu_offset[cpu];
-	lc->kernel_asce = S390_lowcore.kernel_asce;
+	lc->kernel_asce = get_lowcore()->kernel_asce;
 	lc->user_asce = s390_invalid_asce;
-	lc->machine_flags = S390_lowcore.machine_flags;
+	lc->machine_flags = get_lowcore()->machine_flags;
 	lc->user_timer = lc->system_timer =
 		lc->steal_timer = lc->avg_steal_timer = 0;
 	abs_lc = get_abs_lowcore();
@@ -407,7 +407,7 @@ void smp_call_ipl_cpu(void (*func)(void *), void *data)
 	struct lowcore *lc = lowcore_ptr[0];
 
 	if (pcpu_devices[0].address == stap())
-		lc = &S390_lowcore;
+		lc = get_lowcore();
 
 	pcpu_delegate(&pcpu_devices[0], func, data,
 		      lc->nodat_stack);
@@ -844,13 +844,13 @@ static void smp_start_secondary(void *cpuvoid)
 {
 	int cpu = raw_smp_processor_id();
 
-	S390_lowcore.last_update_clock = get_tod_clock();
-	S390_lowcore.restart_stack = (unsigned long)restart_stack;
-	S390_lowcore.restart_fn = (unsigned long)do_restart;
-	S390_lowcore.restart_data = 0;
-	S390_lowcore.restart_source = -1U;
-	S390_lowcore.restart_flags = 0;
-	restore_access_regs(S390_lowcore.access_regs_save_area);
+	get_lowcore()->last_update_clock = get_tod_clock();
+	get_lowcore()->restart_stack = (unsigned long)restart_stack;
+	get_lowcore()->restart_fn = (unsigned long)do_restart;
+	get_lowcore()->restart_data = 0;
+	get_lowcore()->restart_source = -1U;
+	get_lowcore()->restart_flags = 0;
+	restore_access_regs(get_lowcore()->access_regs_save_area);
 	cpu_init();
 	rcutree_report_cpu_starting(cpu);
 	init_cpu_timer();
@@ -981,16 +981,16 @@ void __init smp_prepare_boot_cpu(void)
 
 	WARN_ON(!cpu_present(0) || !cpu_online(0));
 	pcpu->state = CPU_STATE_CONFIGURED;
-	S390_lowcore.percpu_offset = __per_cpu_offset[0];
+	get_lowcore()->percpu_offset = __per_cpu_offset[0];
 	smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
 }
 
 void __init smp_setup_processor_id(void)
 {
 	pcpu_devices[0].address = stap();
-	S390_lowcore.cpu_nr = 0;
-	S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
-	S390_lowcore.spinlock_index = 0;
+	get_lowcore()->cpu_nr = 0;
+	get_lowcore()->spinlock_lockval = arch_spin_lockval(0);
+	get_lowcore()->spinlock_index = 0;
 }
 
 /*
diff --git a/arch/s390/kernel/syscall.c b/arch/s390/kernel/syscall.c
index dc2355c623d6e..7b9bf0ad364d3 100644
--- a/arch/s390/kernel/syscall.c
+++ b/arch/s390/kernel/syscall.c
@@ -151,8 +151,8 @@ void noinstr __do_syscall(struct pt_regs *regs, int per_trap)
 {
 	add_random_kstack_offset();
 	enter_from_user_mode(regs);
-	regs->psw = S390_lowcore.svc_old_psw;
-	regs->int_code = S390_lowcore.svc_int_code;
+	regs->psw = get_lowcore()->svc_old_psw;
+	regs->int_code = get_lowcore()->svc_int_code;
 	update_timer_sys();
 	if (static_branch_likely(&cpu_has_bear))
 		current->thread.last_break = regs->last_break;
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index fb9f31f366281..b713effe05796 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -131,7 +131,7 @@ void clock_comparator_work(void)
 {
 	struct clock_event_device *cd;
 
-	S390_lowcore.clock_comparator = clock_comparator_max;
+	get_lowcore()->clock_comparator = clock_comparator_max;
 	cd = this_cpu_ptr(&comparators);
 	cd->event_handler(cd);
 }
@@ -139,8 +139,8 @@ void clock_comparator_work(void)
 static int s390_next_event(unsigned long delta,
 			   struct clock_event_device *evt)
 {
-	S390_lowcore.clock_comparator = get_tod_clock() + delta;
-	set_clock_comparator(S390_lowcore.clock_comparator);
+	get_lowcore()->clock_comparator = get_tod_clock() + delta;
+	set_clock_comparator(get_lowcore()->clock_comparator);
 	return 0;
 }
 
@@ -153,8 +153,8 @@ void init_cpu_timer(void)
 	struct clock_event_device *cd;
 	int cpu;
 
-	S390_lowcore.clock_comparator = clock_comparator_max;
-	set_clock_comparator(S390_lowcore.clock_comparator);
+	get_lowcore()->clock_comparator = clock_comparator_max;
+	set_clock_comparator(get_lowcore()->clock_comparator);
 
 	cpu = smp_processor_id();
 	cd = &per_cpu(comparators, cpu);
@@ -184,8 +184,8 @@ static void clock_comparator_interrupt(struct ext_code ext_code,
 				       unsigned long param64)
 {
 	inc_irq_stat(IRQEXT_CLK);
-	if (S390_lowcore.clock_comparator == clock_comparator_max)
-		set_clock_comparator(S390_lowcore.clock_comparator);
+	if (get_lowcore()->clock_comparator == clock_comparator_max)
+		set_clock_comparator(get_lowcore()->clock_comparator);
 }
 
 static void stp_timing_alert(struct stp_irq_parm *);
@@ -408,12 +408,12 @@ static void clock_sync_global(long delta)
 static void clock_sync_local(long delta)
 {
 	/* Add the delta to the clock comparator. */
-	if (S390_lowcore.clock_comparator != clock_comparator_max) {
-		S390_lowcore.clock_comparator += delta;
-		set_clock_comparator(S390_lowcore.clock_comparator);
+	if (get_lowcore()->clock_comparator != clock_comparator_max) {
+		get_lowcore()->clock_comparator += delta;
+		set_clock_comparator(get_lowcore()->clock_comparator);
 	}
 	/* Adjust the last_update_clock time-stamp. */
-	S390_lowcore.last_update_clock += delta;
+	get_lowcore()->last_update_clock += delta;
 }
 
 /* Single threaded workqueue used for stp sync events */
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 52578b5cecbd9..8b904f7efb0e3 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -293,10 +293,10 @@ void __init trap_init(void)
 
 	local_irq_save(flags);
 	cr0 = local_ctl_clear_bit(0, CR0_LOW_ADDRESS_PROTECTION_BIT);
-	psw_bits(S390_lowcore.external_new_psw).mcheck = 1;
-	psw_bits(S390_lowcore.program_new_psw).mcheck = 1;
-	psw_bits(S390_lowcore.svc_new_psw).mcheck = 1;
-	psw_bits(S390_lowcore.io_new_psw).mcheck = 1;
+	psw_bits(get_lowcore()->external_new_psw).mcheck = 1;
+	psw_bits(get_lowcore()->program_new_psw).mcheck = 1;
+	psw_bits(get_lowcore()->svc_new_psw).mcheck = 1;
+	psw_bits(get_lowcore()->io_new_psw).mcheck = 1;
 	local_ctl_load(0, &cr0);
 	local_irq_restore(flags);
 	local_mcck_enable();
@@ -310,8 +310,8 @@ void noinstr __do_pgm_check(struct pt_regs *regs)
 	unsigned int trapnr;
 	irqentry_state_t state;
 
-	regs->int_code = S390_lowcore.pgm_int_code;
-	regs->int_parm_long = S390_lowcore.trans_exc_code;
+	regs->int_code = get_lowcore()->pgm_int_code;
+	regs->int_parm_long = get_lowcore()->trans_exc_code;
 
 	state = irqentry_enter(regs);
 
@@ -324,19 +324,19 @@ void noinstr __do_pgm_check(struct pt_regs *regs)
 		current->thread.last_break = regs->last_break;
 	}
 
-	if (S390_lowcore.pgm_code & 0x0200) {
+	if (get_lowcore()->pgm_code & 0x0200) {
 		/* transaction abort */
-		current->thread.trap_tdb = S390_lowcore.pgm_tdb;
+		current->thread.trap_tdb = get_lowcore()->pgm_tdb;
 	}
 
-	if (S390_lowcore.pgm_code & PGM_INT_CODE_PER) {
+	if (get_lowcore()->pgm_code & PGM_INT_CODE_PER) {
 		if (user_mode(regs)) {
 			struct per_event *ev = &current->thread.per_event;
 
 			set_thread_flag(TIF_PER_TRAP);
-			ev->address = S390_lowcore.per_address;
-			ev->cause = S390_lowcore.per_code_combined;
-			ev->paid = S390_lowcore.per_access_id;
+			ev->address = get_lowcore()->per_address;
+			ev->cause = get_lowcore()->per_code_combined;
+			ev->paid = get_lowcore()->per_access_id;
 		} else {
 			/* PER event in kernel is kprobes */
 			__arch_local_irq_ssm(regs->psw.mask & ~PSW_MASK_PER);
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index ffc1db0cbf9c2..7d8991c3cd3a0 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -41,8 +41,8 @@ static inline void set_vtimer(u64 expires)
 		"	stpt	%0\n"	/* Store current cpu timer value */
 		"	spt	%1"	/* Set new value imm. afterwards */
 		: "=Q" (timer) : "Q" (expires));
-	S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
-	S390_lowcore.last_update_timer = expires;
+	get_lowcore()->system_timer += get_lowcore()->last_update_timer - timer;
+	get_lowcore()->last_update_timer = expires;
 }
 
 static inline int virt_timer_forward(u64 elapsed)
@@ -118,21 +118,21 @@ static int do_account_vtime(struct task_struct *tsk)
 {
 	u64 timer, clock, user, guest, system, hardirq, softirq;
 
-	timer = S390_lowcore.last_update_timer;
-	clock = S390_lowcore.last_update_clock;
+	timer = get_lowcore()->last_update_timer;
+	clock = get_lowcore()->last_update_clock;
 	asm volatile(
 		"	stpt	%0\n"	/* Store current cpu timer value */
 		"	stckf	%1"	/* Store current tod clock value */
-		: "=Q" (S390_lowcore.last_update_timer),
-		  "=Q" (S390_lowcore.last_update_clock)
+		: "=Q" (get_lowcore()->last_update_timer),
+		  "=Q" (get_lowcore()->last_update_clock)
 		: : "cc");
-	clock = S390_lowcore.last_update_clock - clock;
-	timer -= S390_lowcore.last_update_timer;
+	clock = get_lowcore()->last_update_clock - clock;
+	timer -= get_lowcore()->last_update_timer;
 
 	if (hardirq_count())
-		S390_lowcore.hardirq_timer += timer;
+		get_lowcore()->hardirq_timer += timer;
 	else
-		S390_lowcore.system_timer += timer;
+		get_lowcore()->system_timer += timer;
 
 	/* Update MT utilization calculation */
 	if (smp_cpu_mtid &&
@@ -141,16 +141,16 @@ static int do_account_vtime(struct task_struct *tsk)
 
 	/* Calculate cputime delta */
 	user = update_tsk_timer(&tsk->thread.user_timer,
-				READ_ONCE(S390_lowcore.user_timer));
+				READ_ONCE(get_lowcore()->user_timer));
 	guest = update_tsk_timer(&tsk->thread.guest_timer,
-				 READ_ONCE(S390_lowcore.guest_timer));
+				 READ_ONCE(get_lowcore()->guest_timer));
 	system = update_tsk_timer(&tsk->thread.system_timer,
-				  READ_ONCE(S390_lowcore.system_timer));
+				  READ_ONCE(get_lowcore()->system_timer));
 	hardirq = update_tsk_timer(&tsk->thread.hardirq_timer,
-				   READ_ONCE(S390_lowcore.hardirq_timer));
+				   READ_ONCE(get_lowcore()->hardirq_timer));
 	softirq = update_tsk_timer(&tsk->thread.softirq_timer,
-				   READ_ONCE(S390_lowcore.softirq_timer));
-	S390_lowcore.steal_timer +=
+				   READ_ONCE(get_lowcore()->softirq_timer));
+	get_lowcore()->steal_timer +=
 		clock - user - guest - system - hardirq - softirq;
 
 	/* Push account value */
@@ -177,16 +177,16 @@ static int do_account_vtime(struct task_struct *tsk)
 void vtime_task_switch(struct task_struct *prev)
 {
 	do_account_vtime(prev);
-	prev->thread.user_timer = S390_lowcore.user_timer;
-	prev->thread.guest_timer = S390_lowcore.guest_timer;
-	prev->thread.system_timer = S390_lowcore.system_timer;
-	prev->thread.hardirq_timer = S390_lowcore.hardirq_timer;
-	prev->thread.softirq_timer = S390_lowcore.softirq_timer;
-	S390_lowcore.user_timer = current->thread.user_timer;
-	S390_lowcore.guest_timer = current->thread.guest_timer;
-	S390_lowcore.system_timer = current->thread.system_timer;
-	S390_lowcore.hardirq_timer = current->thread.hardirq_timer;
-	S390_lowcore.softirq_timer = current->thread.softirq_timer;
+	prev->thread.user_timer = get_lowcore()->user_timer;
+	prev->thread.guest_timer = get_lowcore()->guest_timer;
+	prev->thread.system_timer = get_lowcore()->system_timer;
+	prev->thread.hardirq_timer = get_lowcore()->hardirq_timer;
+	prev->thread.softirq_timer = get_lowcore()->softirq_timer;
+	get_lowcore()->user_timer = current->thread.user_timer;
+	get_lowcore()->guest_timer = current->thread.guest_timer;
+	get_lowcore()->system_timer = current->thread.system_timer;
+	get_lowcore()->hardirq_timer = current->thread.hardirq_timer;
+	get_lowcore()->softirq_timer = current->thread.softirq_timer;
 }
 
 /*
@@ -201,23 +201,23 @@ void vtime_flush(struct task_struct *tsk)
 	if (do_account_vtime(tsk))
 		virt_timer_expire();
 
-	steal = S390_lowcore.steal_timer;
-	avg_steal = S390_lowcore.avg_steal_timer;
+	steal = get_lowcore()->steal_timer;
+	avg_steal = get_lowcore()->avg_steal_timer;
 	if ((s64) steal > 0) {
-		S390_lowcore.steal_timer = 0;
+		get_lowcore()->steal_timer = 0;
 		account_steal_time(cputime_to_nsecs(steal));
 		avg_steal += steal;
 	}
-	S390_lowcore.avg_steal_timer = avg_steal / 2;
+	get_lowcore()->avg_steal_timer = avg_steal / 2;
 }
 
 static u64 vtime_delta(void)
 {
-	u64 timer = S390_lowcore.last_update_timer;
+	u64 timer = get_lowcore()->last_update_timer;
 
-	S390_lowcore.last_update_timer = get_cpu_timer();
+	get_lowcore()->last_update_timer = get_cpu_timer();
 
-	return timer - S390_lowcore.last_update_timer;
+	return timer - get_lowcore()->last_update_timer;
 }
 
 /*
@@ -229,9 +229,9 @@ void vtime_account_kernel(struct task_struct *tsk)
 	u64 delta = vtime_delta();
 
 	if (tsk->flags & PF_VCPU)
-		S390_lowcore.guest_timer += delta;
+		get_lowcore()->guest_timer += delta;
 	else
-		S390_lowcore.system_timer += delta;
+		get_lowcore()->system_timer += delta;
 
 	virt_timer_forward(delta);
 }
@@ -241,7 +241,7 @@ void vtime_account_softirq(struct task_struct *tsk)
 {
 	u64 delta = vtime_delta();
 
-	S390_lowcore.softirq_timer += delta;
+	get_lowcore()->softirq_timer += delta;
 
 	virt_timer_forward(delta);
 }
@@ -250,7 +250,7 @@ void vtime_account_hardirq(struct task_struct *tsk)
 {
 	u64 delta = vtime_delta();
 
-	S390_lowcore.hardirq_timer += delta;
+	get_lowcore()->hardirq_timer += delta;
 
 	virt_timer_forward(delta);
 }
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 82e9631cd9efb..50b77b7590425 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -4079,7 +4079,7 @@ static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
 {
 	/* do not poll with more than halt_poll_max_steal percent of steal time */
-	if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
+	if (get_lowcore()->avg_steal_timer * 100 / (TICK_USEC << 12) >=
 	    READ_ONCE(halt_poll_max_steal)) {
 		vcpu->stat.halt_no_poll_steal++;
 		return true;
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 81c53440b3e66..0c9a73a18826c 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -119,7 +119,7 @@ static inline void arch_spin_lock_queued(arch_spinlock_t *lp)
 	struct spin_wait *node, *next;
 	int lockval, ix, node_id, tail_id, old, new, owner, count;
 
-	ix = S390_lowcore.spinlock_index++;
+	ix = get_lowcore()->spinlock_index++;
 	barrier();
 	lockval = SPINLOCK_LOCKVAL;	/* cpu + 1 */
 	node = this_cpu_ptr(&spin_wait[ix]);
@@ -205,7 +205,7 @@ static inline void arch_spin_lock_queued(arch_spinlock_t *lp)
 	}
 
  out:
-	S390_lowcore.spinlock_index--;
+	get_lowcore()->spinlock_index--;
 }
 
 static inline void arch_spin_lock_classic(arch_spinlock_t *lp)
diff --git a/arch/s390/lib/test_unwind.c b/arch/s390/lib/test_unwind.c
index 2848e3fb2ff5e..768898dacb929 100644
--- a/arch/s390/lib/test_unwind.c
+++ b/arch/s390/lib/test_unwind.c
@@ -356,7 +356,7 @@ static noinline int unwindme_func2(struct unwindme *u)
 	if (u->flags & UWM_SWITCH_STACK) {
 		local_irq_save(flags);
 		local_mcck_save(mflags);
-		rc = call_on_stack(1, S390_lowcore.nodat_stack,
+		rc = call_on_stack(1, get_lowcore()->nodat_stack,
 				   int, unwindme_func3, struct unwindme *, u);
 		local_mcck_restore(mflags);
 		local_irq_restore(flags);
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index 61d8dcd95bbcf..c7c269d5c491d 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -21,13 +21,13 @@ void debug_user_asce(int exit)
 
 	local_ctl_store(1, &cr1);
 	local_ctl_store(7, &cr7);
-	if (cr1.val == S390_lowcore.kernel_asce.val && cr7.val == S390_lowcore.user_asce.val)
+	if (cr1.val == get_lowcore()->kernel_asce.val && cr7.val == get_lowcore()->user_asce.val)
 		return;
 	panic("incorrect ASCE on kernel %s\n"
 	      "cr1:    %016lx cr7:  %016lx\n"
 	      "kernel: %016lx user: %016lx\n",
 	      exit ? "exit" : "entry", cr1.val, cr7.val,
-	      S390_lowcore.kernel_asce.val, S390_lowcore.user_asce.val);
+	      get_lowcore()->kernel_asce.val, get_lowcore()->user_asce.val);
 }
 #endif /*CONFIG_DEBUG_ENTRY */
 
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index ffd07ed7b4af8..45db5f47b22dd 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -288,7 +288,7 @@ static int pt_dump_init(void)
 	 * kernel ASCE. We need this to keep the page table walker functions
 	 * from accessing non-existent entries.
 	 */
-	max_addr = (S390_lowcore.kernel_asce.val & _REGION_ENTRY_TYPE_MASK) >> 2;
+	max_addr = (get_lowcore()->kernel_asce.val & _REGION_ENTRY_TYPE_MASK) >> 2;
 	max_addr = 1UL << (max_addr * 11 + 31);
 	address_markers[IDENTITY_AFTER_END_NR].start_address = ident_map_size;
 	address_markers[AMODE31_START_NR].start_address = (unsigned long)__samode31;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 7cd50ad3b4ade..6b19a33c49c29 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -74,7 +74,7 @@ static enum fault_type get_fault_type(struct pt_regs *regs)
 			return USER_FAULT;
 		if (!IS_ENABLED(CONFIG_PGSTE))
 			return KERNEL_FAULT;
-		gmap = (struct gmap *)S390_lowcore.gmap;
+		gmap = (struct gmap *)get_lowcore()->gmap;
 		if (gmap && gmap->asce == regs->cr1)
 			return GMAP_FAULT;
 		return KERNEL_FAULT;
@@ -182,15 +182,15 @@ static void dump_fault_info(struct pt_regs *regs)
 	pr_cont("mode while using ");
 	switch (get_fault_type(regs)) {
 	case USER_FAULT:
-		asce = S390_lowcore.user_asce.val;
+		asce = get_lowcore()->user_asce.val;
 		pr_cont("user ");
 		break;
 	case GMAP_FAULT:
-		asce = ((struct gmap *)S390_lowcore.gmap)->asce;
+		asce = ((struct gmap *)get_lowcore()->gmap)->asce;
 		pr_cont("gmap ");
 		break;
 	case KERNEL_FAULT:
-		asce = S390_lowcore.kernel_asce.val;
+		asce = get_lowcore()->kernel_asce.val;
 		pr_cont("kernel ");
 		break;
 	default:
@@ -351,7 +351,7 @@ static void do_exception(struct pt_regs *regs, int access)
 	mmap_read_lock(mm);
 	gmap = NULL;
 	if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
-		gmap = (struct gmap *)S390_lowcore.gmap;
+		gmap = (struct gmap *)get_lowcore()->gmap;
 		current->thread.gmap_addr = address;
 		current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
 		current->thread.gmap_int_code = regs->int_code & 0xffff;
@@ -522,7 +522,7 @@ void do_secure_storage_access(struct pt_regs *regs)
 	switch (get_fault_type(regs)) {
 	case GMAP_FAULT:
 		mm = current->mm;
-		gmap = (struct gmap *)S390_lowcore.gmap;
+		gmap = (struct gmap *)get_lowcore()->gmap;
 		mmap_read_lock(mm);
 		addr = __gmap_translate(gmap, addr);
 		mmap_read_unlock(mm);
@@ -563,7 +563,7 @@ NOKPROBE_SYMBOL(do_secure_storage_access);
 
 void do_non_secure_storage_access(struct pt_regs *regs)
 {
-	struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
+	struct gmap *gmap = (struct gmap *)get_lowcore()->gmap;
 	unsigned long gaddr = get_fault_address(regs);
 
 	if (WARN_ON_ONCE(get_fault_type(regs) != GMAP_FAULT))
@@ -575,7 +575,7 @@ NOKPROBE_SYMBOL(do_non_secure_storage_access);
 
 void do_secure_storage_violation(struct pt_regs *regs)
 {
-	struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
+	struct gmap *gmap = (struct gmap *)get_lowcore()->gmap;
 	unsigned long gaddr = get_fault_address(regs);
 
 	/*
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index d5a5756dd69f2..eb0b51a36be01 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -287,7 +287,7 @@ EXPORT_SYMBOL_GPL(gmap_remove);
  */
 void gmap_enable(struct gmap *gmap)
 {
-	S390_lowcore.gmap = (unsigned long) gmap;
+	get_lowcore()->gmap = (unsigned long)gmap;
 }
 EXPORT_SYMBOL_GPL(gmap_enable);
 
@@ -297,7 +297,7 @@ EXPORT_SYMBOL_GPL(gmap_enable);
  */
 void gmap_disable(struct gmap *gmap)
 {
-	S390_lowcore.gmap = 0UL;
+	get_lowcore()->gmap = 0UL;
 }
 EXPORT_SYMBOL_GPL(gmap_disable);
 
@@ -308,7 +308,7 @@ EXPORT_SYMBOL_GPL(gmap_disable);
  */
 struct gmap *gmap_get_enabled(void)
 {
-	return (struct gmap *) S390_lowcore.gmap;
+	return (struct gmap *)get_lowcore()->gmap;
 }
 EXPORT_SYMBOL_GPL(gmap_get_enabled);
 
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 01bc8fad64d6c..5f805ad42d4c3 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -75,7 +75,7 @@ static void pgt_set(unsigned long *old, unsigned long new, unsigned long addr,
 			break;
 		}
 		table = (unsigned long *)((unsigned long)old & mask);
-		crdte(*old, new, table, dtt, addr, S390_lowcore.kernel_asce.val);
+		crdte(*old, new, table, dtt, addr, get_lowcore()->kernel_asce.val);
 	} else if (MACHINE_HAS_IDTE) {
 		cspg(old, *old, new);
 	} else {
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index abb629d7e1319..07d0fe197dad7 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -64,8 +64,8 @@ static void __crst_table_upgrade(void *arg)
 
 	/* change all active ASCEs to avoid the creation of new TLBs */
 	if (current->active_mm == mm) {
-		S390_lowcore.user_asce.val = mm->context.asce;
-		local_ctl_load(7, &S390_lowcore.user_asce);
+		get_lowcore()->user_asce.val = mm->context.asce;
+		local_ctl_load(7, &get_lowcore()->user_asce);
 	}
 	__tlb_flush_local();
 }
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 0de0f6e405b51..cff4838fad216 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -1064,7 +1064,7 @@ char * __init pcibios_setup(char *str)
 		return NULL;
 	}
 	if (!strcmp(str, "nomio")) {
-		S390_lowcore.machine_flags &= ~MACHINE_FLAG_PCI_MIO;
+		get_lowcore()->machine_flags &= ~MACHINE_FLAG_PCI_MIO;
 		return NULL;
 	}
 	if (!strcmp(str, "force_floating")) {
-- 
GitLab