diff --git a/arch/s390/boot/boot.h b/arch/s390/boot/boot.h
index 2b404810641896b3c5b3aad582d7e859254d7709..872963c8a0abe27ec5ae0eb21160529b1cf45642 100644
--- a/arch/s390/boot/boot.h
+++ b/arch/s390/boot/boot.h
@@ -32,6 +32,13 @@ struct vmlinux_info {
 	unsigned long init_mm_off;
 	unsigned long swapper_pg_dir_off;
 	unsigned long invalid_pg_dir_off;
+#ifdef CONFIG_KASAN
+	unsigned long kasan_early_shadow_page_off;
+	unsigned long kasan_early_shadow_pte_off;
+	unsigned long kasan_early_shadow_pmd_off;
+	unsigned long kasan_early_shadow_pud_off;
+	unsigned long kasan_early_shadow_p4d_off;
+#endif
 };
 
 void startup_kernel(void);
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index bc07e24329b9517fdb58d81ab6b7b2affb4d174f..bdf305a939879cfe6ea38dcec99ef5e3416dd213 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -266,6 +266,13 @@ static void offset_vmlinux_info(unsigned long offset)
 	vmlinux.init_mm_off += offset;
 	vmlinux.swapper_pg_dir_off += offset;
 	vmlinux.invalid_pg_dir_off += offset;
+#ifdef CONFIG_KASAN
+	vmlinux.kasan_early_shadow_page_off += offset;
+	vmlinux.kasan_early_shadow_pte_off += offset;
+	vmlinux.kasan_early_shadow_pmd_off += offset;
+	vmlinux.kasan_early_shadow_pud_off += offset;
+	vmlinux.kasan_early_shadow_p4d_off += offset;
+#endif
 }
 
 void startup_kernel(void)
@@ -307,10 +314,6 @@ void startup_kernel(void)
 	detect_physmem_online_ranges(max_physmem_end);
 	save_ipl_cert_comp_list();
 	rescue_initrd(safe_addr, ident_map_size);
-#ifdef CONFIG_KASAN
-	physmem_alloc_top_down(RR_KASAN, kasan_estimate_memory_needs(get_physmem_usable_total()),
-			       _SEGMENT_SIZE);
-#endif
 
 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {
 		random_lma = get_random_base();
diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c
index 8f16e6f9fb20fb0cdde418a109c1d45bc672ee9e..b01ea2abda03467c3bf99f4e652147adca72e5c6 100644
--- a/arch/s390/boot/vmem.c
+++ b/arch/s390/boot/vmem.c
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/sched/task.h>
 #include <linux/pgtable.h>
+#include <linux/kasan.h>
 #include <asm/pgalloc.h>
 #include <asm/facility.h>
 #include <asm/sections.h>
@@ -16,6 +17,182 @@ unsigned long __bootdata_preserved(s390_invalid_asce);
 #define swapper_pg_dir		vmlinux.swapper_pg_dir_off
 #define invalid_pg_dir		vmlinux.invalid_pg_dir_off
 
+enum populate_mode {
+	POPULATE_NONE,
+	POPULATE_ONE2ONE,
+	POPULATE_ABS_LOWCORE,
+#ifdef CONFIG_KASAN
+	POPULATE_KASAN_MAP_SHADOW,
+	POPULATE_KASAN_ZERO_SHADOW,
+	POPULATE_KASAN_SHALLOW
+#endif
+};
+
+static void pgtable_populate(unsigned long addr, unsigned long end, enum populate_mode mode);
+
+#ifdef CONFIG_KASAN
+
+#define kasan_early_shadow_page	vmlinux.kasan_early_shadow_page_off
+#define kasan_early_shadow_pte	((pte_t *)vmlinux.kasan_early_shadow_pte_off)
+#define kasan_early_shadow_pmd	((pmd_t *)vmlinux.kasan_early_shadow_pmd_off)
+#define kasan_early_shadow_pud	((pud_t *)vmlinux.kasan_early_shadow_pud_off)
+#define kasan_early_shadow_p4d	((p4d_t *)vmlinux.kasan_early_shadow_p4d_off)
+#define __sha(x)		((unsigned long)kasan_mem_to_shadow((void *)x))
+
+static pte_t pte_z;
+
+static void kasan_populate_shadow(void)
+{
+	pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
+	pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
+	p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
+	unsigned long untracked_end;
+	unsigned long start, end;
+	int i;
+
+	pte_z = __pte(__pa(kasan_early_shadow_page) | pgprot_val(PAGE_KERNEL_RO));
+	if (!machine.has_nx)
+		pte_z = clear_pte_bit(pte_z, __pgprot(_PAGE_NOEXEC));
+	crst_table_init((unsigned long *)kasan_early_shadow_p4d, p4d_val(p4d_z));
+	crst_table_init((unsigned long *)kasan_early_shadow_pud, pud_val(pud_z));
+	crst_table_init((unsigned long *)kasan_early_shadow_pmd, pmd_val(pmd_z));
+	memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
+
+	/*
+	 * Current memory layout:
+	 * +- 0 -------------+	       +- shadow start -+
+	 * |1:1 ident mapping|	      /|1/8 of ident map|
+	 * |		     |	     / |		|
+	 * +-end of ident map+	    /  +----------------+
+	 * | ... gap ...     |	   /   |    kasan	|
+	 * |		     |	  /    |  zero page	|
+	 * +- vmalloc area  -+	 /     |   mapping	|
+	 * | vmalloc_size    |	/      | (untracked)	|
+	 * +- modules vaddr -+ /       +----------------+
+	 * | 2Gb	     |/        |    unmapped	| allocated per module
+	 * +- shadow start  -+	       +----------------+
+	 * | 1/8 addr space  |	       | zero pg mapping| (untracked)
+	 * +- shadow end ----+---------+- shadow end ---+
+	 *
+	 * Current memory layout (KASAN_VMALLOC):
+	 * +- 0 -------------+	       +- shadow start -+
+	 * |1:1 ident mapping|	      /|1/8 of ident map|
+	 * |		     |	     / |		|
+	 * +-end of ident map+	    /  +----------------+
+	 * | ... gap ...     |	   /   | kasan zero page| (untracked)
+	 * |		     |	  /    | mapping	|
+	 * +- vmalloc area  -+	 /     +----------------+
+	 * | vmalloc_size    |	/      |shallow populate|
+	 * +- modules vaddr -+ /       +----------------+
+	 * | 2Gb	     |/        |shallow populate|
+	 * +- shadow start  -+	       +----------------+
+	 * | 1/8 addr space  |	       | zero pg mapping| (untracked)
+	 * +- shadow end ----+---------+- shadow end ---+
+	 */
+
+	for_each_physmem_usable_range(i, &start, &end)
+		pgtable_populate(__sha(start), __sha(end), POPULATE_KASAN_MAP_SHADOW);
+	if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
+		untracked_end = VMALLOC_START;
+		/* shallowly populate kasan shadow for vmalloc and modules */
+		pgtable_populate(__sha(VMALLOC_START), __sha(MODULES_END), POPULATE_KASAN_SHALLOW);
+	} else {
+		untracked_end = MODULES_VADDR;
+	}
+	/* populate kasan shadow for untracked memory */
+	pgtable_populate(__sha(ident_map_size), __sha(untracked_end), POPULATE_KASAN_ZERO_SHADOW);
+	pgtable_populate(__sha(MODULES_END), __sha(_REGION1_SIZE), POPULATE_KASAN_ZERO_SHADOW);
+}
+
+static bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
+					   unsigned long end, enum populate_mode mode)
+{
+	if (mode == POPULATE_KASAN_ZERO_SHADOW &&
+	    IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) {
+		pgd_populate(&init_mm, pgd, kasan_early_shadow_p4d);
+		return true;
+	}
+	return false;
+}
+
+static bool kasan_p4d_populate_zero_shadow(p4d_t *p4d, unsigned long addr,
+					   unsigned long end, enum populate_mode mode)
+{
+	if (mode == POPULATE_KASAN_ZERO_SHADOW &&
+	    IS_ALIGNED(addr, P4D_SIZE) && end - addr >= P4D_SIZE) {
+		p4d_populate(&init_mm, p4d, kasan_early_shadow_pud);
+		return true;
+	}
+	return false;
+}
+
+static bool kasan_pud_populate_zero_shadow(pud_t *pud, unsigned long addr,
+					   unsigned long end, enum populate_mode mode)
+{
+	if (mode == POPULATE_KASAN_ZERO_SHADOW &&
+	    IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
+		pud_populate(&init_mm, pud, kasan_early_shadow_pmd);
+		return true;
+	}
+	return false;
+}
+
+static bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr,
+					   unsigned long end, enum populate_mode mode)
+{
+	if (mode == POPULATE_KASAN_ZERO_SHADOW &&
+	    IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
+		pmd_populate(&init_mm, pmd, kasan_early_shadow_pte);
+		return true;
+	}
+	return false;
+}
+
+static bool kasan_pte_populate_zero_shadow(pte_t *pte, enum populate_mode mode)
+{
+	pte_t entry;
+
+	if (mode == POPULATE_KASAN_ZERO_SHADOW) {
+		set_pte(pte, pte_z);
+		return true;
+	}
+	return false;
+}
+#else
+
+static inline void kasan_populate_shadow(void) {}
+
+static inline bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
+						  unsigned long end, enum populate_mode mode)
+{
+	return false;
+}
+
+static inline bool kasan_p4d_populate_zero_shadow(p4d_t *p4d, unsigned long addr,
+						  unsigned long end, enum populate_mode mode)
+{
+	return false;
+}
+
+static inline bool kasan_pud_populate_zero_shadow(pud_t *pud, unsigned long addr,
+						  unsigned long end, enum populate_mode mode)
+{
+	return false;
+}
+
+static inline bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr,
+						  unsigned long end, enum populate_mode mode)
+{
+	return false;
+}
+
+static bool kasan_pte_populate_zero_shadow(pte_t *pte, enum populate_mode mode)
+{
+	return false;
+}
+
+#endif
+
 /*
  * Mimic virt_to_kpte() in lack of init_mm symbol. Skip pmd NULL check though.
  */
@@ -24,12 +201,6 @@ static inline pte_t *__virt_to_kpte(unsigned long va)
 	return pte_offset_kernel(pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va), va);
 }
 
-enum populate_mode {
-	POPULATE_NONE,
-	POPULATE_ONE2ONE,
-	POPULATE_ABS_LOWCORE,
-};
-
 static void *boot_crst_alloc(unsigned long val)
 {
 	unsigned long size = PAGE_SIZE << CRST_ALLOC_ORDER;
@@ -42,14 +213,26 @@ static void *boot_crst_alloc(unsigned long val)
 
 static pte_t *boot_pte_alloc(void)
 {
+	static void *pte_leftover;
 	pte_t *pte;
 
-	pte = (pte_t *)physmem_alloc_top_down(RR_VMEM, _PAGE_TABLE_SIZE, _PAGE_TABLE_SIZE);
+	/*
+	 * handling pte_leftovers this way helps to avoid memory fragmentation
+	 * during POPULATE_KASAN_MAP_SHADOW when EDAT is off
+	 */
+	if (!pte_leftover) {
+		pte_leftover = (void *)physmem_alloc_top_down(RR_VMEM, PAGE_SIZE, PAGE_SIZE);
+		pte = pte_leftover + _PAGE_TABLE_SIZE;
+	} else {
+		pte = pte_leftover;
+		pte_leftover = NULL;
+	}
+
 	memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
 	return pte;
 }
 
-static unsigned long _pa(unsigned long addr, enum populate_mode mode)
+static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_mode mode)
 {
 	switch (mode) {
 	case POPULATE_NONE:
@@ -58,6 +241,12 @@ static unsigned long _pa(unsigned long addr, enum populate_mode mode)
 		return addr;
 	case POPULATE_ABS_LOWCORE:
 		return __abs_lowcore_pa(addr);
+#ifdef CONFIG_KASAN
+	case POPULATE_KASAN_MAP_SHADOW:
+		addr = physmem_alloc_top_down(RR_VMEM, size, size);
+		memset((void *)addr, 0, size);
+		return addr;
+#endif
 	default:
 		return -1;
 	}
@@ -83,7 +272,9 @@ static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long e
 	pte = pte_offset_kernel(pmd, addr);
 	for (; addr < end; addr += PAGE_SIZE, pte++) {
 		if (pte_none(*pte)) {
-			entry = __pte(_pa(addr, mode));
+			if (kasan_pte_populate_zero_shadow(pte, mode))
+				continue;
+			entry = __pte(_pa(addr, PAGE_SIZE, mode));
 			entry = set_pte_bit(entry, PAGE_KERNEL_EXEC);
 			set_pte(pte, entry);
 		}
@@ -101,8 +292,10 @@ static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long e
 	for (; addr < end; addr = next, pmd++) {
 		next = pmd_addr_end(addr, end);
 		if (pmd_none(*pmd)) {
+			if (kasan_pmd_populate_zero_shadow(pmd, addr, next, mode))
+				continue;
 			if (can_large_pmd(pmd, addr, next)) {
-				entry = __pmd(_pa(addr, mode));
+				entry = __pmd(_pa(addr, _SEGMENT_SIZE, mode));
 				entry = set_pmd_bit(entry, SEGMENT_KERNEL_EXEC);
 				set_pmd(pmd, entry);
 				continue;
@@ -127,8 +320,10 @@ static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long e
 	for (; addr < end; addr = next, pud++) {
 		next = pud_addr_end(addr, end);
 		if (pud_none(*pud)) {
+			if (kasan_pud_populate_zero_shadow(pud, addr, next, mode))
+				continue;
 			if (can_large_pud(pud, addr, next)) {
-				entry = __pud(_pa(addr, mode));
+				entry = __pud(_pa(addr, _REGION3_SIZE, mode));
 				entry = set_pud_bit(entry, REGION3_KERNEL_EXEC);
 				set_pud(pud, entry);
 				continue;
@@ -153,6 +348,8 @@ static void pgtable_p4d_populate(pgd_t *pgd, unsigned long addr, unsigned long e
 	for (; addr < end; addr = next, p4d++) {
 		next = p4d_addr_end(addr, end);
 		if (p4d_none(*p4d)) {
+			if (kasan_p4d_populate_zero_shadow(p4d, addr, next, mode))
+				continue;
 			pud = boot_crst_alloc(_REGION3_ENTRY_EMPTY);
 			p4d_populate(&init_mm, p4d, pud);
 		}
@@ -170,9 +367,15 @@ static void pgtable_populate(unsigned long addr, unsigned long end, enum populat
 	for (; addr < end; addr = next, pgd++) {
 		next = pgd_addr_end(addr, end);
 		if (pgd_none(*pgd)) {
+			if (kasan_pgd_populate_zero_shadow(pgd, addr, next, mode))
+				continue;
 			p4d = boot_crst_alloc(_REGION2_ENTRY_EMPTY);
 			pgd_populate(&init_mm, pgd, p4d);
 		}
+#ifdef CONFIG_KASAN
+		if (mode == POPULATE_KASAN_SHALLOW)
+			continue;
+#endif
 		pgtable_p4d_populate(pgd, addr, next, mode);
 	}
 }
@@ -210,6 +413,8 @@ void setup_vmem(unsigned long asce_limit)
 			 POPULATE_NONE);
 	memcpy_real_ptep = __virt_to_kpte(__memcpy_real_area);
 
+	kasan_populate_shadow();
+
 	S390_lowcore.kernel_asce = swapper_pg_dir | asce_bits;
 	S390_lowcore.user_asce = s390_invalid_asce;
 
diff --git a/arch/s390/include/asm/kasan.h b/arch/s390/include/asm/kasan.h
index e5cfc81d5b6128a87c36341e7127bd598b26990d..0cffead0f2f273d4eea28d2f328d0c0de8134fe9 100644
--- a/arch/s390/include/asm/kasan.h
+++ b/arch/s390/include/asm/kasan.h
@@ -2,7 +2,7 @@
 #ifndef __ASM_KASAN_H
 #define __ASM_KASAN_H
 
-#include <asm/pgtable.h>
+#include <linux/const.h>
 
 #ifdef CONFIG_KASAN
 
@@ -13,35 +13,6 @@
 #define KASAN_SHADOW_START	KASAN_SHADOW_OFFSET
 #define KASAN_SHADOW_END	(KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
 
-extern void kasan_early_init(void);
-
-/*
- * Estimate kasan memory requirements, which it will reserve
- * at the very end of available physical memory. To estimate
- * that, we take into account that kasan would require
- * 1/8 of available physical memory (for shadow memory) +
- * creating page tables for the shadow memory region.
- * To keep page tables estimates simple take the double of
- * combined ptes size.
- *
- * physmem parameter has to be already adjusted if not entire physical memory
- * would be used (e.g. due to effect of "mem=" option).
- */
-static inline unsigned long kasan_estimate_memory_needs(unsigned long physmem)
-{
-	unsigned long kasan_needs;
-	unsigned long pages;
-	/* for shadow memory */
-	kasan_needs = round_up(physmem / 8, PAGE_SIZE);
-	/* for paging structures */
-	pages = DIV_ROUND_UP(kasan_needs, PAGE_SIZE);
-	kasan_needs += DIV_ROUND_UP(pages, _PAGE_ENTRIES) * _PAGE_TABLE_SIZE * 2;
-
-	return kasan_needs;
-}
-#else
-static inline void kasan_early_init(void) { }
-static inline unsigned long kasan_estimate_memory_needs(unsigned long physmem) { return 0; }
 #endif
 
 #endif
diff --git a/arch/s390/include/asm/physmem_info.h b/arch/s390/include/asm/physmem_info.h
index 27234fa1da8ea0e5fd2792906bc4c86437b0f95b..8e9c582592b3f18db90864e09d74c0357224772e 100644
--- a/arch/s390/include/asm/physmem_info.h
+++ b/arch/s390/include/asm/physmem_info.h
@@ -26,9 +26,6 @@ enum reserved_range_type {
 	RR_CERT_COMP_LIST,
 	RR_MEM_DETECT_EXTENDED,
 	RR_VMEM,
-#ifdef CONFIG_KASAN
-	RR_KASAN,
-#endif
 	RR_MAX
 };
 
@@ -129,9 +126,6 @@ static inline const char *get_rr_type_name(enum reserved_range_type t)
 	RR_TYPE_NAME(CERT_COMP_LIST);
 	RR_TYPE_NAME(MEM_DETECT_EXTENDED);
 	RR_TYPE_NAME(VMEM);
-#ifdef CONFIG_KASAN
-	RR_TYPE_NAME(KASAN);
-#endif
 	default:
 		return "UNKNOWN";
 	}
@@ -166,17 +160,6 @@ static inline struct reserved_range *__physmem_reserved_next(enum reserved_range
 	     range; range = __physmem_reserved_next(&t, range),			\
 	    *p_start = range ? range->start : 0, *p_end = range ? range->end : 0)
 
-static inline unsigned long get_physmem_usable_total(void)
-{
-	unsigned long start, end, total = 0;
-	int i;
-
-	for_each_physmem_usable_range(i, &start, &end)
-		total += end - start;
-
-	return total;
-}
-
 static inline unsigned long get_physmem_reserved(enum reserved_range_type type,
 						 unsigned long *addr, unsigned long *size)
 {
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 8225a4c1f2e288c47ac6bae0dee89c523b811ed6..2dd5976a55ac221ec2aafd38c05587984112a6ac 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -51,6 +51,14 @@ decompressor_handled_param(nokaslr);
 decompressor_handled_param(prot_virt);
 #endif
 
+static void __init kasan_early_init(void)
+{
+#ifdef CONFIG_KASAN
+	init_task.kasan_depth = 0;
+	sclp_early_printk("KernelAddressSanitizer initialized\n");
+#endif
+}
+
 static void __init reset_tod_clock(void)
 {
 	union tod_clock clk;
@@ -293,6 +301,7 @@ static void __init sort_amode31_extable(void)
 
 void __init startup_init(void)
 {
+	kasan_early_init();
 	reset_tod_clock();
 	time_early_init();
 	init_kernel_storage_key();
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 3b3bf8329e6c1c03f84c35d9fe46a13da04b8e8f..f68be3951103662fdb3a48fdbf6afb1e454a6ab0 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -26,9 +26,6 @@ ENTRY(startup_continue)
 	stg	%r14,__LC_CURRENT
 	larl	%r15,init_thread_union+THREAD_SIZE-STACK_FRAME_OVERHEAD-__PT_SIZE
 	brasl	%r14,sclp_early_adjust_va	# allow sclp_early_printk
-#ifdef CONFIG_KASAN
-	brasl	%r14,kasan_early_init
-#endif
 	brasl	%r14,startup_init		# s390 specific early init
 	brasl	%r14,start_kernel		# common init code
 #
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index b653ba8d51e6bbec485778ab0d1f4e7d9579527b..8d2288a5ba2503f4a2a1585441697ce6fdd1f3d0 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -219,6 +219,13 @@ SECTIONS
 		QUAD(init_mm)
 		QUAD(swapper_pg_dir)
 		QUAD(invalid_pg_dir)
+#ifdef CONFIG_KASAN
+		QUAD(kasan_early_shadow_page)
+		QUAD(kasan_early_shadow_pte)
+		QUAD(kasan_early_shadow_pmd)
+		QUAD(kasan_early_shadow_pud)
+		QUAD(kasan_early_shadow_p4d)
+#endif
 	} :NONE
 
 	/* Debugging sections.	*/
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index 57e4f3a248299cd0f15b7ce44d894dabfe7fde3b..d90db06a8af5776937060c94d17baa9b6392f05d 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -10,6 +10,3 @@ obj-$(CONFIG_CMM)		+= cmm.o
 obj-$(CONFIG_HUGETLB_PAGE)	+= hugetlbpage.o
 obj-$(CONFIG_PTDUMP_CORE)	+= dump_pagetables.o
 obj-$(CONFIG_PGSTE)		+= gmap.o
-
-KASAN_SANITIZE_kasan_init.o	:= n
-obj-$(CONFIG_KASAN)		+= kasan_init.o
diff --git a/arch/s390/mm/kasan_init.c b/arch/s390/mm/kasan_init.c
deleted file mode 100644
index 2b20382f1bd8450b46fd60834795b26fd9d6e65d..0000000000000000000000000000000000000000
--- a/arch/s390/mm/kasan_init.c
+++ /dev/null
@@ -1,298 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/memblock.h>
-#include <linux/pgtable.h>
-#include <linux/kasan.h>
-#include <asm/physmem_info.h>
-#include <asm/processor.h>
-#include <asm/facility.h>
-#include <asm/pgalloc.h>
-#include <asm/sclp.h>
-
-static unsigned long pgalloc_pos __initdata;
-static unsigned long segment_pos __initdata;
-static bool has_edat __initdata;
-static bool has_nx __initdata;
-
-#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
-
-static void __init kasan_early_panic(const char *reason)
-{
-	sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n");
-	sclp_early_printk(reason);
-	disabled_wait();
-}
-
-static void * __init kasan_early_alloc_segment(void)
-{
-	unsigned long addr = segment_pos;
-
-	segment_pos += _SEGMENT_SIZE;
-	if (segment_pos > pgalloc_pos)
-		kasan_early_panic("out of memory during initialisation\n");
-
-	return __va(addr);
-}
-
-static void * __init kasan_early_alloc_pages(unsigned int order)
-{
-	pgalloc_pos -= (PAGE_SIZE << order);
-
-	if (segment_pos > pgalloc_pos)
-		kasan_early_panic("out of memory during initialisation\n");
-
-	return __va(pgalloc_pos);
-}
-
-static void * __init kasan_early_crst_alloc(unsigned long val)
-{
-	unsigned long *table;
-
-	table = kasan_early_alloc_pages(CRST_ALLOC_ORDER);
-	if (table)
-		crst_table_init(table, val);
-	return table;
-}
-
-static pte_t * __init kasan_early_pte_alloc(void)
-{
-	static void *pte_leftover;
-	pte_t *pte;
-
-	BUILD_BUG_ON(_PAGE_TABLE_SIZE * 2 != PAGE_SIZE);
-
-	if (!pte_leftover) {
-		pte_leftover = kasan_early_alloc_pages(0);
-		pte = pte_leftover + _PAGE_TABLE_SIZE;
-	} else {
-		pte = pte_leftover;
-		pte_leftover = NULL;
-	}
-	memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
-	return pte;
-}
-
-enum populate_mode {
-	POPULATE_MAP,
-	POPULATE_ZERO_SHADOW,
-	POPULATE_SHALLOW
-};
-
-static inline pgprot_t pgprot_clear_bit(pgprot_t pgprot, unsigned long bit)
-{
-	return __pgprot(pgprot_val(pgprot) & ~bit);
-}
-
-static void __init kasan_early_pgtable_populate(unsigned long address,
-						unsigned long end,
-						enum populate_mode mode)
-{
-	pgprot_t pgt_prot_zero = PAGE_KERNEL_RO;
-	pgprot_t pgt_prot = PAGE_KERNEL;
-	pgprot_t sgt_prot = SEGMENT_KERNEL;
-	pgd_t *pg_dir;
-	p4d_t *p4_dir;
-	pud_t *pu_dir;
-	pmd_t *pm_dir;
-	pte_t *pt_dir;
-	pmd_t pmd;
-	pte_t pte;
-
-	if (!has_nx) {
-		pgt_prot_zero = pgprot_clear_bit(pgt_prot_zero, _PAGE_NOEXEC);
-		pgt_prot = pgprot_clear_bit(pgt_prot, _PAGE_NOEXEC);
-		sgt_prot = pgprot_clear_bit(sgt_prot, _SEGMENT_ENTRY_NOEXEC);
-	}
-
-	while (address < end) {
-		pg_dir = pgd_offset_k(address);
-		if (pgd_none(*pg_dir)) {
-			if (mode == POPULATE_ZERO_SHADOW &&
-			    IS_ALIGNED(address, PGDIR_SIZE) &&
-			    end - address >= PGDIR_SIZE) {
-				pgd_populate(&init_mm, pg_dir,
-						kasan_early_shadow_p4d);
-				address = (address + PGDIR_SIZE) & PGDIR_MASK;
-				continue;
-			}
-			p4_dir = kasan_early_crst_alloc(_REGION2_ENTRY_EMPTY);
-			pgd_populate(&init_mm, pg_dir, p4_dir);
-		}
-
-		if (mode == POPULATE_SHALLOW) {
-			address = (address + P4D_SIZE) & P4D_MASK;
-			continue;
-		}
-
-		p4_dir = p4d_offset(pg_dir, address);
-		if (p4d_none(*p4_dir)) {
-			if (mode == POPULATE_ZERO_SHADOW &&
-			    IS_ALIGNED(address, P4D_SIZE) &&
-			    end - address >= P4D_SIZE) {
-				p4d_populate(&init_mm, p4_dir,
-						kasan_early_shadow_pud);
-				address = (address + P4D_SIZE) & P4D_MASK;
-				continue;
-			}
-			pu_dir = kasan_early_crst_alloc(_REGION3_ENTRY_EMPTY);
-			p4d_populate(&init_mm, p4_dir, pu_dir);
-		}
-
-		pu_dir = pud_offset(p4_dir, address);
-		if (pud_none(*pu_dir)) {
-			if (mode == POPULATE_ZERO_SHADOW &&
-			    IS_ALIGNED(address, PUD_SIZE) &&
-			    end - address >= PUD_SIZE) {
-				pud_populate(&init_mm, pu_dir,
-						kasan_early_shadow_pmd);
-				address = (address + PUD_SIZE) & PUD_MASK;
-				continue;
-			}
-			pm_dir = kasan_early_crst_alloc(_SEGMENT_ENTRY_EMPTY);
-			pud_populate(&init_mm, pu_dir, pm_dir);
-		}
-
-		pm_dir = pmd_offset(pu_dir, address);
-		if (pmd_none(*pm_dir)) {
-			if (IS_ALIGNED(address, PMD_SIZE) &&
-			    end - address >= PMD_SIZE) {
-				if (mode == POPULATE_ZERO_SHADOW) {
-					pmd_populate(&init_mm, pm_dir, kasan_early_shadow_pte);
-					address = (address + PMD_SIZE) & PMD_MASK;
-					continue;
-				} else if (has_edat) {
-					void *page = kasan_early_alloc_segment();
-
-					memset(page, 0, _SEGMENT_SIZE);
-					pmd = __pmd(__pa(page));
-					pmd = set_pmd_bit(pmd, sgt_prot);
-					set_pmd(pm_dir, pmd);
-					address = (address + PMD_SIZE) & PMD_MASK;
-					continue;
-				}
-			}
-			pt_dir = kasan_early_pte_alloc();
-			pmd_populate(&init_mm, pm_dir, pt_dir);
-		} else if (pmd_large(*pm_dir)) {
-			address = (address + PMD_SIZE) & PMD_MASK;
-			continue;
-		}
-
-		pt_dir = pte_offset_kernel(pm_dir, address);
-		if (pte_none(*pt_dir)) {
-			void *page;
-
-			switch (mode) {
-			case POPULATE_MAP:
-				page = kasan_early_alloc_pages(0);
-				memset(page, 0, PAGE_SIZE);
-				pte = __pte(__pa(page));
-				pte = set_pte_bit(pte, pgt_prot);
-				set_pte(pt_dir, pte);
-				break;
-			case POPULATE_ZERO_SHADOW:
-				page = kasan_early_shadow_page;
-				pte = __pte(__pa(page));
-				pte = set_pte_bit(pte, pgt_prot_zero);
-				set_pte(pt_dir, pte);
-				break;
-			case POPULATE_SHALLOW:
-				/* should never happen */
-				break;
-			}
-		}
-		address += PAGE_SIZE;
-	}
-}
-
-static void __init kasan_early_detect_facilities(void)
-{
-	if (test_facility(8)) {
-		has_edat = true;
-		__ctl_set_bit(0, 23);
-	}
-	if (!noexec_disabled && test_facility(130)) {
-		has_nx = true;
-		__ctl_set_bit(0, 20);
-	}
-}
-
-void __init kasan_early_init(void)
-{
-	pte_t pte_z = __pte(__pa(kasan_early_shadow_page) | pgprot_val(PAGE_KERNEL_RO));
-	pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
-	pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
-	p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
-	unsigned long pgalloc_pos_initial, segment_pos_initial;
-	unsigned long untracked_end = MODULES_VADDR;
-	unsigned long start, end;
-	int i;
-
-	kasan_early_detect_facilities();
-	if (!has_nx)
-		pte_z = clear_pte_bit(pte_z, __pgprot(_PAGE_NOEXEC));
-
-	BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
-	BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
-
-	/* init kasan zero shadow */
-	crst_table_init((unsigned long *)kasan_early_shadow_p4d, p4d_val(p4d_z));
-	crst_table_init((unsigned long *)kasan_early_shadow_pud, pud_val(pud_z));
-	crst_table_init((unsigned long *)kasan_early_shadow_pmd, pmd_val(pmd_z));
-	memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
-
-	/* segment allocations go bottom up -> <- pgalloc go top down */
-	segment_pos_initial = physmem_info.reserved[RR_KASAN].start;
-	segment_pos = segment_pos_initial;
-	pgalloc_pos_initial = physmem_info.reserved[RR_KASAN].end;
-	pgalloc_pos = pgalloc_pos_initial;
-	/*
-	 * Current memory layout:
-	 * +- 0 -------------+	       +- shadow start -+
-	 * |1:1 ident mapping|	      /|1/8 of ident map|
-	 * |		     |	     / |		|
-	 * +-end of ident map+	    /  +----------------+
-	 * | ... gap ...     |	   /   |    kasan	|
-	 * |		     |	  /    |  zero page	|
-	 * +- vmalloc area  -+	 /     |   mapping	|
-	 * | vmalloc_size    |	/      | (untracked)	|
-	 * +- modules vaddr -+ /       +----------------+
-	 * | 2Gb	     |/        |    unmapped	| allocated per module
-	 * +- shadow start  -+	       +----------------+
-	 * | 1/8 addr space  |	       | zero pg mapping| (untracked)
-	 * +- shadow end ----+---------+- shadow end ---+
-	 *
-	 * Current memory layout (KASAN_VMALLOC):
-	 * +- 0 -------------+	       +- shadow start -+
-	 * |1:1 ident mapping|	      /|1/8 of ident map|
-	 * |		     |	     / |		|
-	 * +-end of ident map+	    /  +----------------+
-	 * | ... gap ...     |	   /   | kasan zero page| (untracked)
-	 * |		     |	  /    | mapping	|
-	 * +- vmalloc area  -+	 /     +----------------+
-	 * | vmalloc_size    |	/      |shallow populate|
-	 * +- modules vaddr -+ /       +----------------+
-	 * | 2Gb	     |/        |shallow populate|
-	 * +- shadow start  -+	       +----------------+
-	 * | 1/8 addr space  |	       | zero pg mapping| (untracked)
-	 * +- shadow end ----+---------+- shadow end ---+
-	 */
-	/* populate kasan shadow (for identity mapping and zero page mapping) */
-	for_each_physmem_usable_range(i, &start, &end)
-		kasan_early_pgtable_populate(__sha(start), __sha(end), POPULATE_MAP);
-	if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
-		untracked_end = VMALLOC_START;
-		/* shallowly populate kasan shadow for vmalloc and modules */
-		kasan_early_pgtable_populate(__sha(VMALLOC_START), __sha(MODULES_END),
-					     POPULATE_SHALLOW);
-	}
-	/* populate kasan shadow for untracked memory */
-	kasan_early_pgtable_populate(__sha(ident_map_size), __sha(untracked_end),
-				     POPULATE_ZERO_SHADOW);
-	kasan_early_pgtable_populate(__sha(MODULES_END), __sha(_REGION1_SIZE),
-				     POPULATE_ZERO_SHADOW);
-	/* enable kasan */
-	init_task.kasan_depth = 0;
-	sclp_early_printk("KernelAddressSanitizer initialized\n");
-	memblock_reserve(segment_pos_initial, segment_pos - segment_pos_initial);
-	memblock_reserve(pgalloc_pos, pgalloc_pos_initial - pgalloc_pos);
-}
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 4113a7ffa149b19b7a61594c98034b83fc9238d7..242f95aa9801cf8329ba050bf42bed9c2d371d07 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -5,6 +5,7 @@
 
 #include <linux/memory_hotplug.h>
 #include <linux/memblock.h>
+#include <linux/kasan.h>
 #include <linux/pfn.h>
 #include <linux/mm.h>
 #include <linux/init.h>
@@ -664,6 +665,9 @@ static void __init memblock_region_swap(void *a, void *b, int size)
 	swap(*(struct memblock_region *)a, *(struct memblock_region *)b);
 }
 
+#ifdef CONFIG_KASAN
+#define __sha(x)	((unsigned long)kasan_mem_to_shadow((void *)x))
+#endif
 /*
  * map whole physical memory to virtual memory (identity mapping)
  * we reserve enough space in the vmalloc area for vmemmap to hotplug
@@ -733,6 +737,13 @@ void __init vmem_map_init(void)
 			     SET_MEMORY_RW | SET_MEMORY_NX);
 	}
 
+#ifdef CONFIG_KASAN
+	for_each_mem_range(i, &base, &end)
+		__set_memory(__sha(base),
+			     (__sha(end) - __sha(base)) >> PAGE_SHIFT,
+			     SET_MEMORY_RW | SET_MEMORY_NX);
+#endif
+
 	__set_memory((unsigned long)_stext,
 		     (unsigned long)(_etext - _stext) >> PAGE_SHIFT,
 		     SET_MEMORY_RO | SET_MEMORY_X);