diff --git a/arch/arm64/kernel/probes/decode-insn.c b/arch/arm64/kernel/probes/decode-insn.c
index 37e47a9d617e1b4da19265c6f4b7494ae756b4cd..d1731bf977ef0f5325be4ddd4f8c6809667d2329 100644
--- a/arch/arm64/kernel/probes/decode-insn.c
+++ b/arch/arm64/kernel/probes/decode-insn.c
@@ -16,6 +16,7 @@
 #include <linux/kernel.h>
 #include <linux/kprobes.h>
 #include <linux/module.h>
+#include <linux/kallsyms.h>
 #include <asm/kprobes.h>
 #include <asm/insn.h>
 #include <asm/sections.h>
@@ -122,7 +123,7 @@ arm_probe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
 static bool __kprobes
 is_probed_address_atomic(kprobe_opcode_t *scan_start, kprobe_opcode_t *scan_end)
 {
-	while (scan_start > scan_end) {
+	while (scan_start >= scan_end) {
 		/*
 		 * atomic region starts from exclusive load and ends with
 		 * exclusive store.
@@ -142,33 +143,30 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
 {
 	enum kprobe_insn decoded;
 	kprobe_opcode_t insn = le32_to_cpu(*addr);
-	kprobe_opcode_t *scan_start = addr - 1;
-	kprobe_opcode_t *scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE;
-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
-	struct module *mod;
-#endif
-
-	if (addr >= (kprobe_opcode_t *)_text &&
-	    scan_end < (kprobe_opcode_t *)_text)
-		scan_end = (kprobe_opcode_t *)_text;
-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
-	else {
-		preempt_disable();
-		mod = __module_address((unsigned long)addr);
-		if (mod && within_module_init((unsigned long)addr, mod) &&
-			!within_module_init((unsigned long)scan_end, mod))
-			scan_end = (kprobe_opcode_t *)mod->init_layout.base;
-		else if (mod && within_module_core((unsigned long)addr, mod) &&
-			!within_module_core((unsigned long)scan_end, mod))
-			scan_end = (kprobe_opcode_t *)mod->core_layout.base;
-		preempt_enable();
+	kprobe_opcode_t *scan_end = NULL;
+	unsigned long size = 0, offset = 0;
+
+	/*
+	 * If there's a symbol defined in front of and near enough to
+	 * the probe address assume it is the entry point to this
+	 * code and use it to further limit how far back we search
+	 * when determining if we're in an atomic sequence. If we could
+	 * not find any symbol skip the atomic test altogether as we
+	 * could otherwise end up searching irrelevant text/literals.
+	 * KPROBES depends on KALLSYMS so this last case should never
+	 * happen.
+	 */
+	if (kallsyms_lookup_size_offset((unsigned long) addr, &size, &offset)) {
+		if (offset < (MAX_ATOMIC_CONTEXT_SIZE*sizeof(kprobe_opcode_t)))
+			scan_end = addr - (offset / sizeof(kprobe_opcode_t));
+		else
+			scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE;
 	}
-#endif
 	decoded = arm_probe_decode_insn(insn, asi);
 
-	if (decoded == INSN_REJECTED ||
-			is_probed_address_atomic(scan_start, scan_end))
-		return INSN_REJECTED;
+	if (decoded != INSN_REJECTED && scan_end)
+		if (is_probed_address_atomic(addr - 1, scan_end))
+			return INSN_REJECTED;
 
 	return decoded;
 }