diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c
index e888acf1c4039234a2109d343f6ca9bdb56fccee..cecca6729b0407d7552fd558fdd33112c6dffb70 100644
--- a/arch/csky/mm/fault.c
+++ b/arch/csky/mm/fault.c
@@ -80,6 +80,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
 	struct vm_area_struct *vma = NULL;
 	struct task_struct *tsk = current;
 	struct mm_struct *mm = tsk->mm;
+	unsigned int flags = FAULT_FLAG_DEFAULT;
 	int si_code;
 	int fault;
 	unsigned long address = read_mmu_entryhi() & PAGE_MASK;
@@ -150,6 +151,13 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
 	if (in_atomic() || !mm)
 		goto bad_area_nosemaphore;
 
+	if (user_mode(regs))
+		flags |= FAULT_FLAG_USER;
+
+	if (is_write(regs))
+		flags |= FAULT_FLAG_WRITE;
+
+retry:
 	mmap_read_lock(mm);
 	vma = find_vma(mm, address);
 	if (!vma)
@@ -180,8 +188,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
 	 * make sure we exit gracefully rather than endlessly redo
 	 * the fault.
 	 */
-	fault = handle_mm_fault(vma, address, is_write(regs) ? FAULT_FLAG_WRITE : 0,
-				regs);
+	fault = handle_mm_fault(vma, address, flags, regs);
 	if (unlikely(fault & VM_FAULT_ERROR)) {
 		if (fault & VM_FAULT_OOM)
 			goto out_of_memory;
@@ -191,6 +198,18 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
 			goto bad_area;
 		BUG();
 	}
+
+	if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) {
+		flags |= FAULT_FLAG_TRIED;
+
+		/*
+		 * No need to mmap_read_unlock(mm) as we would
+		 * have already released it in __lock_page_or_retry
+		 * in mm/filemap.c.
+		 */
+		goto retry;
+	}
+
 	mmap_read_unlock(mm);
 	return;