diff --git a/arch/s390/include/asm/kasan.h b/arch/s390/include/asm/kasan.h
index 89d6886040c896213ad46a4932b130abb126aad8..e9bf486de136b8809c192c344972c1d7712f6138 100644
--- a/arch/s390/include/asm/kasan.h
+++ b/arch/s390/include/asm/kasan.h
@@ -19,6 +19,7 @@
 extern void kasan_early_init(void);
 extern void kasan_copy_shadow(pgd_t *dst);
 extern void kasan_free_early_identity(void);
+extern unsigned long kasan_vmax;
 #else
 static inline void kasan_early_init(void) { }
 static inline void kasan_copy_shadow(pgd_t *dst) { }
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index dd3fa7039cb0e23f9187462991d515aea494a765..ae2f4d9460486a0c1e290d1a137d9556f00f9009 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -552,22 +552,17 @@ static void __init setup_memory_end(void)
 	unsigned long vmax, tmp;
 
 	/* Choose kernel address space layout: 3 or 4 levels. */
-	if (IS_ENABLED(CONFIG_KASAN)) {
-		vmax = IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING)
-			   ? _REGION1_SIZE
-			   : _REGION2_SIZE;
-	} else {
-		tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
-		tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
-		if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE)
-			vmax = _REGION2_SIZE; /* 3-level kernel page table */
-		else
-			vmax = _REGION1_SIZE; /* 4-level kernel page table */
-	}
-
+	tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
+	tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
+	if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE)
+		vmax = _REGION2_SIZE; /* 3-level kernel page table */
+	else
+		vmax = _REGION1_SIZE; /* 4-level kernel page table */
 	if (is_prot_virt_host())
 		adjust_to_uv_max(&vmax);
-
+#ifdef CONFIG_KASAN
+	vmax = kasan_vmax;
+#endif
 	/* module area is at the end of the kernel address space. */
 	MODULES_END = vmax;
 	MODULES_VADDR = MODULES_END - MODULES_LEN;
diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
index 1a166a1119c0eebfc59ea7280c5c68f2b7f30170..14bd9d58edc902822a8070e0a59fee58f8abdad0 100644
--- a/arch/s390/kernel/uv.c
+++ b/arch/s390/kernel/uv.c
@@ -51,6 +51,9 @@ void __init setup_uv(void)
 {
 	unsigned long uv_stor_base;
 
+	/*
+	 * keep these conditions in line with kasan init code has_uv_sec_stor_limit()
+	 */
 	if (!is_prot_virt_host())
 		return;
 
diff --git a/arch/s390/mm/kasan_init.c b/arch/s390/mm/kasan_init.c
index 1a27a71433495c53c1915647b8e994942f25327b..5646b39c728a92947d8ac02eed5275a93ed3d255 100644
--- a/arch/s390/mm/kasan_init.c
+++ b/arch/s390/mm/kasan_init.c
@@ -11,7 +11,9 @@
 #include <asm/facility.h>
 #include <asm/sections.h>
 #include <asm/setup.h>
+#include <asm/uv.h>
 
+unsigned long kasan_vmax;
 static unsigned long segment_pos __initdata;
 static unsigned long segment_low __initdata;
 static unsigned long pgalloc_pos __initdata;
@@ -256,14 +258,31 @@ static void __init kasan_early_detect_facilities(void)
 	}
 }
 
+static bool __init has_uv_sec_stor_limit(void)
+{
+	/*
+	 * keep these conditions in line with setup_uv()
+	 */
+	if (!is_prot_virt_host())
+		return false;
+
+	if (is_prot_virt_guest())
+		return false;
+
+	if (!test_facility(158))
+		return false;
+
+	return !!uv_info.max_sec_stor_addr;
+}
+
 void __init kasan_early_init(void)
 {
 	unsigned long untracked_mem_end;
 	unsigned long shadow_alloc_size;
+	unsigned long vmax_unlimited;
 	unsigned long initrd_end;
 	unsigned long asce_type;
 	unsigned long memsize;
-	unsigned long vmax;
 	unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO);
 	pte_t pte_z;
 	pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
@@ -291,7 +310,9 @@ void __init kasan_early_init(void)
 		BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
 		crst_table_init((unsigned long *)early_pg_dir,
 				_REGION2_ENTRY_EMPTY);
-		untracked_mem_end = vmax = _REGION1_SIZE;
+		untracked_mem_end = kasan_vmax = vmax_unlimited = _REGION1_SIZE;
+		if (has_uv_sec_stor_limit())
+			kasan_vmax = min(vmax_unlimited, uv_info.max_sec_stor_addr);
 		asce_type = _ASCE_TYPE_REGION2;
 	} else {
 		/* 3 level paging */
@@ -299,7 +320,7 @@ void __init kasan_early_init(void)
 		BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PUD_SIZE));
 		crst_table_init((unsigned long *)early_pg_dir,
 				_REGION3_ENTRY_EMPTY);
-		untracked_mem_end = vmax = _REGION2_SIZE;
+		untracked_mem_end = kasan_vmax = vmax_unlimited = _REGION2_SIZE;
 		asce_type = _ASCE_TYPE_REGION3;
 	}
 
@@ -369,17 +390,20 @@ void __init kasan_early_init(void)
 	/* populate kasan shadow (for identity mapping and zero page mapping) */
 	kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP);
 	if (IS_ENABLED(CONFIG_MODULES))
-		untracked_mem_end = vmax - MODULES_LEN;
+		untracked_mem_end = kasan_vmax - MODULES_LEN;
 	if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
-		untracked_mem_end = vmax - vmalloc_size - MODULES_LEN;
+		untracked_mem_end = kasan_vmax - vmalloc_size - MODULES_LEN;
 		/* shallowly populate kasan shadow for vmalloc and modules */
 		kasan_early_vmemmap_populate(__sha(untracked_mem_end),
-					     __sha(vmax), POPULATE_SHALLOW);
+					     __sha(kasan_vmax), POPULATE_SHALLOW);
 	}
 	/* populate kasan shadow for untracked memory */
 	kasan_early_vmemmap_populate(__sha(max_physmem_end),
 				     __sha(untracked_mem_end),
 				     POPULATE_ZERO_SHADOW);
+	kasan_early_vmemmap_populate(__sha(kasan_vmax),
+				     __sha(vmax_unlimited),
+				     POPULATE_ZERO_SHADOW);
 	/* memory allocated for identity mapping structs will be freed later */
 	pgalloc_freeable = pgalloc_pos;
 	/* populate identity mapping */