diff --git a/MAINTAINERS b/MAINTAINERS
index ecc43c255eb8d73722f885c4487b253ce575747d..e9caa4b288284b92a34681366a29223f691338b9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -10300,7 +10300,7 @@ F:	include/net/switchdev.h
 
 SYNOPSYS ARC ARCHITECTURE
 M:	Vineet Gupta <vgupta@synopsys.com>
-L:	linux-snps-arc@lists.infraded.org
+L:	linux-snps-arc@lists.infradead.org
 S:	Supported
 F:	arch/arc/
 F:	Documentation/devicetree/bindings/arc/*
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index 44545354e9e85616b703f531787ed74def265bf5..1d694c1ef6d6bf33ccb17745db6e58e816915875 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -57,11 +57,7 @@ struct task_struct;
  * A lot of busy-wait loops in SMP are based off of non-volatile data otherwise
  * get optimised away by gcc
  */
-#ifdef CONFIG_SMP
 #define cpu_relax()	__asm__ __volatile__ ("" : : : "memory")
-#else
-#define cpu_relax()	do { } while (0)
-#endif
 
 #define cpu_relax_lowlatency() cpu_relax()
 
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
index 445e63a10754fbdb0be82663af03f489282cfce5..cbfec79137bf77735fa675d0eb2be57da217ba63 100644
--- a/arch/arc/kernel/entry-arcv2.S
+++ b/arch/arc/kernel/entry-arcv2.S
@@ -91,6 +91,25 @@ ENTRY(EV_DCError)
 	flag 1
 END(EV_DCError)
 
+; ---------------------------------------------
+; Memory Error Exception Handler
+;   - Unlike ARCompact, handles Bus errors for both User/Kernel mode,
+;     Instruction fetch or Data access, under a single Exception Vector
+; ---------------------------------------------
+
+ENTRY(mem_service)
+
+	EXCEPTION_PROLOGUE
+
+	lr  r0, [efa]
+	mov r1, sp
+
+	FAKE_RET_FROM_EXCPN
+
+	bl  do_memory_error
+	b   ret_from_exception
+END(mem_service)
+
 ENTRY(EV_Misaligned)
 
 	EXCEPTION_PROLOGUE
diff --git a/arch/arc/kernel/entry-compact.S b/arch/arc/kernel/entry-compact.S
index 59f52035b4ea34a582b50b80e5db48893c1dc7bf..431433929189c8b63e8b3efe41ef03030fa8b86f 100644
--- a/arch/arc/kernel/entry-compact.S
+++ b/arch/arc/kernel/entry-compact.S
@@ -142,16 +142,12 @@ int1_saved_reg:
 	.zero 4
 
 /* Each Interrupt level needs its own scratch */
-#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
-
 ARCFP_DATA int2_saved_reg
 	.type   int2_saved_reg, @object
 	.size   int2_saved_reg, 4
 int2_saved_reg:
 	.zero 4
 
-#endif
-
 ; ---------------------------------------------
 	.section .text, "ax",@progbits
 
@@ -215,6 +211,31 @@ END(handle_interrupt_level2)
 
 #endif
 
+; ---------------------------------------------
+; User Mode Memory Bus Error Interrupt Handler
+; (Kernel mode memory errors handled via seperate exception vectors)
+; ---------------------------------------------
+ENTRY(mem_service)
+
+	INTERRUPT_PROLOGUE 2
+
+	mov r0, ilink2
+	mov r1, sp
+
+	; User process needs to be killed with SIGBUS, but first need to get
+	; out of the L2 interrupt context (drop to pure kernel mode) and jump
+	; off to "C" code where SIGBUS in enqueued
+	lr  r3, [status32]
+	bclr r3, r3, STATUS_A2_BIT
+	or  r3, r3, (STATUS_E1_MASK|STATUS_E2_MASK)
+	sr  r3, [status32_l2]
+	mov ilink2, 1f
+	rtie
+1:
+	bl  do_memory_error
+	b   ret_from_exception
+END(mem_service)
+
 ; ---------------------------------------------
 ;  Level 1 ISR
 ; ---------------------------------------------
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index 589abf5172d6a19047337e315fb2ff5ad1fab8a4..2efb0625331d6d5a6b6f8b1558a05e6e3cc5679e 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -92,23 +92,6 @@ ENTRY(instr_service)
 	b   ret_from_exception
 END(instr_service)
 
-; ---------------------------------------------
-; Memory Error Exception Handler
-; ---------------------------------------------
-
-ENTRY(mem_service)
-
-	EXCEPTION_PROLOGUE
-
-	lr  r0, [efa]
-	mov r1, sp
-
-	FAKE_RET_FROM_EXCPN
-
-	bl  do_memory_error
-	b   ret_from_exception
-END(mem_service)
-
 ; ---------------------------------------------
 ; Machine Check Exception Handler
 ; ---------------------------------------------
diff --git a/arch/arc/lib/memcpy-archs.S b/arch/arc/lib/memcpy-archs.S
index 0cab0b8a57c5665e6686e9bef843fbfa51f141fd..f96c75edf30af5b38ecb7aa73710bc523dbfd366 100644
--- a/arch/arc/lib/memcpy-archs.S
+++ b/arch/arc/lib/memcpy-archs.S
@@ -50,26 +50,26 @@ ENTRY(memcpy)
 
 ;;; if size <= 8
 	cmp	r2, 8
-	bls.d	@smallchunk
+	bls.d	@.Lsmallchunk
 	mov.f	lp_count, r2
 
 	and.f	r4, r0, 0x03
 	rsub	lp_count, r4, 4
-	lpnz	@aligndestination
+	lpnz	@.Laligndestination
 	;; LOOP BEGIN
 	ldb.ab	r5, [r1,1]
 	sub	r2, r2, 1
 	stb.ab	r5, [r3,1]
-aligndestination:
+.Laligndestination:
 
 ;;; Check the alignment of the source
 	and.f	r4, r1, 0x03
-	bnz.d	@sourceunaligned
+	bnz.d	@.Lsourceunaligned
 
 ;;; CASE 0: Both source and destination are 32bit aligned
 ;;; Convert len to Dwords, unfold x4
 	lsr.f	lp_count, r2, ZOLSHFT
-	lpnz	@copy32_64bytes
+	lpnz	@.Lcopy32_64bytes
 	;; LOOP START
 	LOADX (r6, r1)
 	PREFETCH_READ (r1)
@@ -81,25 +81,25 @@ aligndestination:
 	STOREX (r8, r3)
 	STOREX (r10, r3)
 	STOREX (r4, r3)
-copy32_64bytes:
+.Lcopy32_64bytes:
 
 	and.f	lp_count, r2, ZOLAND ;Last remaining 31 bytes
-smallchunk:
-	lpnz	@copyremainingbytes
+.Lsmallchunk:
+	lpnz	@.Lcopyremainingbytes
 	;; LOOP START
 	ldb.ab	r5, [r1,1]
 	stb.ab	r5, [r3,1]
-copyremainingbytes:
+.Lcopyremainingbytes:
 
 	j	[blink]
 ;;; END CASE 0
 
-sourceunaligned:
+.Lsourceunaligned:
 	cmp	r4, 2
-	beq.d	@unalignedOffby2
+	beq.d	@.LunalignedOffby2
 	sub	r2, r2, 1
 
-	bhi.d	@unalignedOffby3
+	bhi.d	@.LunalignedOffby3
 	ldb.ab	r5, [r1, 1]
 
 ;;; CASE 1: The source is unaligned, off by 1
@@ -114,7 +114,7 @@ sourceunaligned:
 	or	r5, r5, r6
 
 	;; Both src and dst are aligned
-	lpnz	@copy8bytes_1
+	lpnz	@.Lcopy8bytes_1
 	;; LOOP START
 	ld.ab	r6, [r1, 4]
 	prefetch [r1, 28]	;Prefetch the next read location
@@ -131,7 +131,7 @@ sourceunaligned:
 
 	st.ab	r7, [r3, 4]
 	st.ab	r9, [r3, 4]
-copy8bytes_1:
+.Lcopy8bytes_1:
 
 	;; Write back the remaining 16bits
 	EXTRACT_1 (r6, r5, 16)
@@ -141,14 +141,14 @@ copy8bytes_1:
 	stb.ab	r5, [r3, 1]
 
 	and.f	lp_count, r2, 0x07 ;Last 8bytes
-	lpnz	@copybytewise_1
+	lpnz	@.Lcopybytewise_1
 	;; LOOP START
 	ldb.ab	r6, [r1,1]
 	stb.ab	r6, [r3,1]
-copybytewise_1:
+.Lcopybytewise_1:
 	j	[blink]
 
-unalignedOffby2:
+.LunalignedOffby2:
 ;;; CASE 2: The source is unaligned, off by 2
 	ldh.ab	r5, [r1, 2]
 	sub	r2, r2, 1
@@ -159,7 +159,7 @@ unalignedOffby2:
 #ifdef __BIG_ENDIAN__
 	asl.nz	r5, r5, 16
 #endif
-	lpnz	@copy8bytes_2
+	lpnz	@.Lcopy8bytes_2
 	;; LOOP START
 	ld.ab	r6, [r1, 4]
 	prefetch [r1, 28]	;Prefetch the next read location
@@ -176,7 +176,7 @@ unalignedOffby2:
 
 	st.ab	r7, [r3, 4]
 	st.ab	r9, [r3, 4]
-copy8bytes_2:
+.Lcopy8bytes_2:
 
 #ifdef __BIG_ENDIAN__
 	lsr.nz	r5, r5, 16
@@ -184,14 +184,14 @@ copy8bytes_2:
 	sth.ab	r5, [r3, 2]
 
 	and.f	lp_count, r2, 0x07 ;Last 8bytes
-	lpnz	@copybytewise_2
+	lpnz	@.Lcopybytewise_2
 	;; LOOP START
 	ldb.ab	r6, [r1,1]
 	stb.ab	r6, [r3,1]
-copybytewise_2:
+.Lcopybytewise_2:
 	j	[blink]
 
-unalignedOffby3:
+.LunalignedOffby3:
 ;;; CASE 3: The source is unaligned, off by 3
 ;;; Hence, I need to read 1byte for achieve the 32bit alignment
 
@@ -201,7 +201,7 @@ unalignedOffby3:
 #ifdef __BIG_ENDIAN__
 	asl.ne	r5, r5, 24
 #endif
-	lpnz	@copy8bytes_3
+	lpnz	@.Lcopy8bytes_3
 	;; LOOP START
 	ld.ab	r6, [r1, 4]
 	prefetch [r1, 28]	;Prefetch the next read location
@@ -218,7 +218,7 @@ unalignedOffby3:
 
 	st.ab	r7, [r3, 4]
 	st.ab	r9, [r3, 4]
-copy8bytes_3:
+.Lcopy8bytes_3:
 
 #ifdef __BIG_ENDIAN__
 	lsr.nz	r5, r5, 24
@@ -226,11 +226,11 @@ copy8bytes_3:
 	stb.ab	r5, [r3, 1]
 
 	and.f	lp_count, r2, 0x07 ;Last 8bytes
-	lpnz	@copybytewise_3
+	lpnz	@.Lcopybytewise_3
 	;; LOOP START
 	ldb.ab	r6, [r1,1]
 	stb.ab	r6, [r3,1]
-copybytewise_3:
+.Lcopybytewise_3:
 	j	[blink]
 
 END(memcpy)
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
index 63860adc4814083dd5365d83ffd790fa6d390ab4..f1967eeb32e757bb906580fecfce84a309df9983 100644
--- a/arch/arc/mm/tlbex.S
+++ b/arch/arc/mm/tlbex.S
@@ -88,7 +88,7 @@ ex_saved_reg1:
 #ifdef CONFIG_SMP
 	sr  r0, [ARC_REG_SCRATCH_DATA0]	; freeup r0 to code with
 	GET_CPU_ID  r0			; get to per cpu scratch mem,
-	lsl r0, r0, L1_CACHE_SHIFT	; cache line wide per cpu
+	asl r0, r0, L1_CACHE_SHIFT	; cache line wide per cpu
 	add r0, @ex_saved_reg1, r0
 #else
 	st    r0, [@ex_saved_reg1]
@@ -107,7 +107,7 @@ ex_saved_reg1:
 .macro TLBMISS_RESTORE_REGS
 #ifdef CONFIG_SMP
 	GET_CPU_ID  r0			; get to per cpu scratch mem
-	lsl r0, r0, L1_CACHE_SHIFT	; each is cache line wide
+	asl r0, r0, L1_CACHE_SHIFT	; each is cache line wide
 	add r0, @ex_saved_reg1, r0
 	ld_s  r3, [r0,12]
 	ld_s  r2, [r0, 8]
@@ -256,7 +256,7 @@ ex_saved_reg1:
 
 .macro CONV_PTE_TO_TLB
 	and    r3, r0, PTE_BITS_RWX	;          r  w  x
-	lsl    r2, r3, 3		; Kr Kw Kx 0  0  0 (GLOBAL, kernel only)
+	asl    r2, r3, 3		; Kr Kw Kx 0  0  0 (GLOBAL, kernel only)
 	and.f  0,  r0, _PAGE_GLOBAL
 	or.z   r2, r2, r3		; Kr Kw Kx Ur Uw Ux (!GLOBAL, user page)
 
diff --git a/arch/arc/plat-sim/platform.c b/arch/arc/plat-sim/platform.c
index dde692812bc16ac70bc3bfba24fc14ba71d158a2..e4fe5145680889edd270edbc4d23655afdedf660 100644
--- a/arch/arc/plat-sim/platform.c
+++ b/arch/arc/plat-sim/platform.c
@@ -10,7 +10,6 @@
 
 #include <linux/init.h>
 #include <asm/mach_desc.h>
-#include <asm/mcip.h>
 
 /*----------------------- Machine Descriptions ------------------------------
  *