diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 2ea58513da972a1e76647bf16731e98d73c15f0e..a3f26e3aa8b50ff6f1f2ea4494ad721de66a9959 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -834,15 +834,13 @@ static inline int pte_soft_dirty(pte_t pte)
 
 static inline pte_t pte_mksoft_dirty(pte_t pte)
 {
-	pte_val(pte) |= _PAGE_SOFT_DIRTY;
-	return pte;
+	return set_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY));
 }
 #define pte_swp_mksoft_dirty pte_mksoft_dirty
 
 static inline pte_t pte_clear_soft_dirty(pte_t pte)
 {
-	pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
-	return pte;
+	return clear_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY));
 }
 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty
 
@@ -853,14 +851,12 @@ static inline int pmd_soft_dirty(pmd_t pmd)
 
 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
 {
-	pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
-	return pmd;
+	return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY));
 }
 
 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
 {
-	pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
-	return pmd;
+	return clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY));
 }
 
 /*
@@ -970,79 +966,74 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
  */
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
-	pte_val(pte) &= _PAGE_CHG_MASK;
-	pte_val(pte) |= pgprot_val(newprot);
+	pte = clear_pte_bit(pte, __pgprot(~_PAGE_CHG_MASK));
+	pte = set_pte_bit(pte, newprot);
 	/*
 	 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
 	 * has the invalid bit set, clear it again for readable, young pages
 	 */
 	if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
-		pte_val(pte) &= ~_PAGE_INVALID;
+		pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID));
 	/*
 	 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
 	 * protection bit set, clear it again for writable, dirty pages
 	 */
 	if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
-		pte_val(pte) &= ~_PAGE_PROTECT;
+		pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
 	return pte;
 }
 
 static inline pte_t pte_wrprotect(pte_t pte)
 {
-	pte_val(pte) &= ~_PAGE_WRITE;
-	pte_val(pte) |= _PAGE_PROTECT;
-	return pte;
+	pte = clear_pte_bit(pte, __pgprot(_PAGE_WRITE));
+	return set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
 }
 
 static inline pte_t pte_mkwrite(pte_t pte)
 {
-	pte_val(pte) |= _PAGE_WRITE;
+	pte = set_pte_bit(pte, __pgprot(_PAGE_WRITE));
 	if (pte_val(pte) & _PAGE_DIRTY)
-		pte_val(pte) &= ~_PAGE_PROTECT;
+		pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
 	return pte;
 }
 
 static inline pte_t pte_mkclean(pte_t pte)
 {
-	pte_val(pte) &= ~_PAGE_DIRTY;
-	pte_val(pte) |= _PAGE_PROTECT;
-	return pte;
+	pte = clear_pte_bit(pte, __pgprot(_PAGE_DIRTY));
+	return set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
 }
 
 static inline pte_t pte_mkdirty(pte_t pte)
 {
-	pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
+	pte = set_pte_bit(pte, __pgprot(_PAGE_DIRTY | _PAGE_SOFT_DIRTY));
 	if (pte_val(pte) & _PAGE_WRITE)
-		pte_val(pte) &= ~_PAGE_PROTECT;
+		pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
 	return pte;
 }
 
 static inline pte_t pte_mkold(pte_t pte)
 {
-	pte_val(pte) &= ~_PAGE_YOUNG;
-	pte_val(pte) |= _PAGE_INVALID;
-	return pte;
+	pte = clear_pte_bit(pte, __pgprot(_PAGE_YOUNG));
+	return set_pte_bit(pte, __pgprot(_PAGE_INVALID));
 }
 
 static inline pte_t pte_mkyoung(pte_t pte)
 {
-	pte_val(pte) |= _PAGE_YOUNG;
+	pte = set_pte_bit(pte, __pgprot(_PAGE_YOUNG));
 	if (pte_val(pte) & _PAGE_READ)
-		pte_val(pte) &= ~_PAGE_INVALID;
+		pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID));
 	return pte;
 }
 
 static inline pte_t pte_mkspecial(pte_t pte)
 {
-	pte_val(pte) |= _PAGE_SPECIAL;
-	return pte;
+	return set_pte_bit(pte, __pgprot(_PAGE_SPECIAL));
 }
 
 #ifdef CONFIG_HUGETLB_PAGE
 static inline pte_t pte_mkhuge(pte_t pte)
 {
-	pte_val(pte) |= _PAGE_LARGE;
-	return pte;
+	return set_pte_bit(pte, __pgprot(_PAGE_LARGE));
 }
 #endif
 
@@ -1253,7 +1244,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 			      pte_t *ptep, pte_t entry)
 {
 	if (pte_present(entry))
-		pte_val(entry) &= ~_PAGE_UNUSED;
+		entry = clear_pte_bit(entry, __pgprot(_PAGE_UNUSED));
 	if (mm_has_pgste(mm))
 		ptep_set_pte_at(mm, addr, ptep, entry);
 	else
@@ -1268,9 +1259,9 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
 {
 	pte_t __pte;
 
-	pte_val(__pte) = physpage | pgprot_val(pgprot);
+	__pte = __pte(physpage | pgprot_val(pgprot));
 	if (!MACHINE_HAS_NX)
-		pte_val(__pte) &= ~_PAGE_NOEXEC;
+		__pte = clear_pte_bit(__pte, __pgprot(_PAGE_NOEXEC));
 	return pte_mkyoung(__pte);
 }
 
@@ -1410,61 +1401,57 @@ static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
 
 static inline pmd_t pmd_wrprotect(pmd_t pmd)
 {
-	pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
-	pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
-	return pmd;
+	pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE));
+	return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
 }
 
 static inline pmd_t pmd_mkwrite(pmd_t pmd)
 {
-	pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
+	pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE));
 	if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)
-		pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
+		pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
 	return pmd;
 }
 
 static inline pmd_t pmd_mkclean(pmd_t pmd)
 {
-	pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
-	pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
-	return pmd;
+	pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY));
+	return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
 }
 
 static inline pmd_t pmd_mkdirty(pmd_t pmd)
 {
-	pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY;
+	pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY));
 	if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
-		pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
+		pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
 	return pmd;
 }
 
 static inline pud_t pud_wrprotect(pud_t pud)
 {
-	pud_val(pud) &= ~_REGION3_ENTRY_WRITE;
-	pud_val(pud) |= _REGION_ENTRY_PROTECT;
-	return pud;
+	pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE));
+	return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
 }
 
 static inline pud_t pud_mkwrite(pud_t pud)
 {
-	pud_val(pud) |= _REGION3_ENTRY_WRITE;
+	pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE));
 	if (pud_val(pud) & _REGION3_ENTRY_DIRTY)
-		pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
+		pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
 	return pud;
 }
 
 static inline pud_t pud_mkclean(pud_t pud)
 {
-	pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
-	pud_val(pud) |= _REGION_ENTRY_PROTECT;
-	return pud;
+	pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY));
+	return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
 }
 
 static inline pud_t pud_mkdirty(pud_t pud)
 {
-	pud_val(pud) |= _REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY;
+	pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY));
 	if (pud_val(pud) & _REGION3_ENTRY_WRITE)
-		pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
+		pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
 	return pud;
 }
 
@@ -1488,37 +1475,39 @@ static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
 
 static inline pmd_t pmd_mkyoung(pmd_t pmd)
 {
-	pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
+	pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
 	if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
-		pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
+		pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
 	return pmd;
 }
 
 static inline pmd_t pmd_mkold(pmd_t pmd)
 {
-	pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
-	pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
-	return pmd;
+	pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
+	return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
 }
 
 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
 {
-	pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
-		_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
-		_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
-	pmd_val(pmd) |= massage_pgprot_pmd(newprot);
+	unsigned long mask;
+
+	mask  = _SEGMENT_ENTRY_ORIGIN_LARGE;
+	mask |= _SEGMENT_ENTRY_DIRTY;
+	mask |= _SEGMENT_ENTRY_YOUNG;
+	mask |=	_SEGMENT_ENTRY_LARGE;
+	mask |= _SEGMENT_ENTRY_SOFT_DIRTY;
+	pmd = __pmd(pmd_val(pmd) & mask);
+	pmd = set_pmd_bit(pmd, __pgprot(massage_pgprot_pmd(newprot)));
 	if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
-		pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
+		pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
 	if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
-		pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
+		pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
 	return pmd;
 }
 
 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
 {
-	pmd_t __pmd;
-	pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
-	return __pmd;
+	return __pmd(physpage + massage_pgprot_pmd(pgprot));
 }
 
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
@@ -1640,16 +1629,15 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
 			      pmd_t *pmdp, pmd_t entry)
 {
 	if (!MACHINE_HAS_NX)
-		pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC;
+		entry = clear_pmd_bit(entry, __pgprot(_SEGMENT_ENTRY_NOEXEC));
 	set_pmd(pmdp, entry);
 }
 
 static inline pmd_t pmd_mkhuge(pmd_t pmd)
 {
-	pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
-	pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
-	pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
-	return pmd;
+	pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_LARGE));
+	pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
+	return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
 }
 
 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
@@ -1745,12 +1733,12 @@ static inline int has_transparent_hugepage(void)
 
 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
 {
-	pte_t pte;
+	unsigned long pteval;
 
-	pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
-	pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
-	pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
-	return pte;
+	pteval = _PAGE_INVALID | _PAGE_PROTECT;
+	pteval |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
+	pteval |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
+	return __pte(pteval);
 }
 
 static inline unsigned long __swp_type(swp_entry_t entry)
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 26f8a53f0ae18eccb1479bf322a2e6bfb83a62e2..697df02362af179c209a170e7c07f997d3cb86e4 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -224,8 +224,8 @@ static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
 			 * Without enhanced suppression-on-protection force
 			 * the dirty bit on for all writable ptes.
 			 */
-			pte_val(entry) |= _PAGE_DIRTY;
-			pte_val(entry) &= ~_PAGE_PROTECT;
+			entry = set_pte_bit(entry, __pgprot(_PAGE_DIRTY));
+			entry = clear_pte_bit(entry, __pgprot(_PAGE_PROTECT));
 		}
 		if (!(pte_val(entry) & _PAGE_PROTECT))
 			/* This pte allows write access, set user-dirty */
@@ -275,7 +275,7 @@ static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
 			pgste = pgste_update_all(old, pgste, mm);
 			if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
 			    _PGSTE_GPS_USAGE_UNUSED)
-				pte_val(old) |= _PAGE_UNUSED;
+				old = set_pte_bit(old, __pgprot(_PAGE_UNUSED));
 		}
 		pgste = pgste_set_pte(ptep, pgste, new);
 		pgste_set_unlock(ptep, pgste);
@@ -345,7 +345,7 @@ void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
 	struct mm_struct *mm = vma->vm_mm;
 
 	if (!MACHINE_HAS_NX)
-		pte_val(pte) &= ~_PAGE_NOEXEC;
+		pte = clear_pte_bit(pte, __pgprot(_PAGE_NOEXEC));
 	if (mm_has_pgste(mm)) {
 		pgste = pgste_get(ptep);
 		pgste_set_key(ptep, pgste, pte, mm);
@@ -646,12 +646,12 @@ int ptep_force_prot(struct mm_struct *mm, unsigned long addr,
 	if (prot == PROT_NONE && !pte_i) {
 		ptep_flush_direct(mm, addr, ptep, nodat);
 		pgste = pgste_update_all(entry, pgste, mm);
-		pte_val(entry) |= _PAGE_INVALID;
+		entry = set_pte_bit(entry, __pgprot(_PAGE_INVALID));
 	}
 	if (prot == PROT_READ && !pte_p) {
 		ptep_flush_direct(mm, addr, ptep, nodat);
-		pte_val(entry) &= ~_PAGE_INVALID;
-		pte_val(entry) |= _PAGE_PROTECT;
+		entry = clear_pte_bit(entry, __pgprot(_PAGE_INVALID));
+		entry = set_pte_bit(entry, __pgprot(_PAGE_PROTECT));
 	}
 	pgste_val(pgste) |= bit;
 	pgste = pgste_set_pte(ptep, pgste, entry);
@@ -675,8 +675,8 @@ int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
 	      !(pte_val(pte) & _PAGE_PROTECT))) {
 		pgste_val(spgste) |= PGSTE_VSIE_BIT;
 		tpgste = pgste_get_lock(tptep);
-		pte_val(tpte) = (pte_val(spte) & PAGE_MASK) |
-				(pte_val(pte) & _PAGE_PROTECT);
+		tpte = __pte((pte_val(spte) & PAGE_MASK) |
+			     (pte_val(pte) & _PAGE_PROTECT));
 		/* don't touch the storage key - it belongs to parent pgste */
 		tpgste = pgste_set_pte(tptep, tpgste, tpte);
 		pgste_set_unlock(tptep, tpgste);
@@ -773,9 +773,9 @@ bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long addr,
 		nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
 		ptep_ipte_global(mm, addr, ptep, nodat);
 		if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
-			pte_val(pte) |= _PAGE_PROTECT;
+			pte = set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
 		else
-			pte_val(pte) |= _PAGE_INVALID;
+			pte = set_pte_bit(pte, __pgprot(_PAGE_INVALID));
 		set_pte(ptep, pte);
 	}
 	pgste_set_unlock(ptep, pgste);