diff options
-rw-r--r-- | MAINTAINERS | 2 | ||||
-rw-r--r-- | arch/arc/include/asm/processor.h | 4 | ||||
-rw-r--r-- | arch/arc/kernel/entry-arcv2.S | 19 | ||||
-rw-r--r-- | arch/arc/kernel/entry-compact.S | 29 | ||||
-rw-r--r-- | arch/arc/kernel/entry.S | 17 | ||||
-rw-r--r-- | arch/arc/lib/memcpy-archs.S | 52 | ||||
-rw-r--r-- | arch/arc/mm/tlbex.S | 6 | ||||
-rw-r--r-- | arch/arc/plat-sim/platform.c | 1 |
8 files changed, 74 insertions, 56 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index ecc43c255eb8..e9caa4b28828 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -10300,7 +10300,7 @@ F: include/net/switchdev.h | |||
10300 | 10300 | ||
10301 | SYNOPSYS ARC ARCHITECTURE | 10301 | SYNOPSYS ARC ARCHITECTURE |
10302 | M: Vineet Gupta <vgupta@synopsys.com> | 10302 | M: Vineet Gupta <vgupta@synopsys.com> |
10303 | L: linux-snps-arc@lists.infraded.org | 10303 | L: linux-snps-arc@lists.infradead.org |
10304 | S: Supported | 10304 | S: Supported |
10305 | F: arch/arc/ | 10305 | F: arch/arc/ |
10306 | F: Documentation/devicetree/bindings/arc/* | 10306 | F: Documentation/devicetree/bindings/arc/* |
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h index 44545354e9e8..1d694c1ef6d6 100644 --- a/arch/arc/include/asm/processor.h +++ b/arch/arc/include/asm/processor.h | |||
@@ -57,11 +57,7 @@ struct task_struct; | |||
57 | * A lot of busy-wait loops in SMP are based off of non-volatile data otherwise | 57 | * A lot of busy-wait loops in SMP are based off of non-volatile data otherwise |
58 | * get optimised away by gcc | 58 | * get optimised away by gcc |
59 | */ | 59 | */ |
60 | #ifdef CONFIG_SMP | ||
61 | #define cpu_relax() __asm__ __volatile__ ("" : : : "memory") | 60 | #define cpu_relax() __asm__ __volatile__ ("" : : : "memory") |
62 | #else | ||
63 | #define cpu_relax() do { } while (0) | ||
64 | #endif | ||
65 | 61 | ||
66 | #define cpu_relax_lowlatency() cpu_relax() | 62 | #define cpu_relax_lowlatency() cpu_relax() |
67 | 63 | ||
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S index 445e63a10754..cbfec79137bf 100644 --- a/arch/arc/kernel/entry-arcv2.S +++ b/arch/arc/kernel/entry-arcv2.S | |||
@@ -91,6 +91,25 @@ ENTRY(EV_DCError) | |||
91 | flag 1 | 91 | flag 1 |
92 | END(EV_DCError) | 92 | END(EV_DCError) |
93 | 93 | ||
94 | ; --------------------------------------------- | ||
95 | ; Memory Error Exception Handler | ||
96 | ; - Unlike ARCompact, handles Bus errors for both User/Kernel mode, | ||
97 | ; Instruction fetch or Data access, under a single Exception Vector | ||
98 | ; --------------------------------------------- | ||
99 | |||
100 | ENTRY(mem_service) | ||
101 | |||
102 | EXCEPTION_PROLOGUE | ||
103 | |||
104 | lr r0, [efa] | ||
105 | mov r1, sp | ||
106 | |||
107 | FAKE_RET_FROM_EXCPN | ||
108 | |||
109 | bl do_memory_error | ||
110 | b ret_from_exception | ||
111 | END(mem_service) | ||
112 | |||
94 | ENTRY(EV_Misaligned) | 113 | ENTRY(EV_Misaligned) |
95 | 114 | ||
96 | EXCEPTION_PROLOGUE | 115 | EXCEPTION_PROLOGUE |
diff --git a/arch/arc/kernel/entry-compact.S b/arch/arc/kernel/entry-compact.S index 59f52035b4ea..431433929189 100644 --- a/arch/arc/kernel/entry-compact.S +++ b/arch/arc/kernel/entry-compact.S | |||
@@ -142,16 +142,12 @@ int1_saved_reg: | |||
142 | .zero 4 | 142 | .zero 4 |
143 | 143 | ||
144 | /* Each Interrupt level needs its own scratch */ | 144 | /* Each Interrupt level needs its own scratch */ |
145 | #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS | ||
146 | |||
147 | ARCFP_DATA int2_saved_reg | 145 | ARCFP_DATA int2_saved_reg |
148 | .type int2_saved_reg, @object | 146 | .type int2_saved_reg, @object |
149 | .size int2_saved_reg, 4 | 147 | .size int2_saved_reg, 4 |
150 | int2_saved_reg: | 148 | int2_saved_reg: |
151 | .zero 4 | 149 | .zero 4 |
152 | 150 | ||
153 | #endif | ||
154 | |||
155 | ; --------------------------------------------- | 151 | ; --------------------------------------------- |
156 | .section .text, "ax",@progbits | 152 | .section .text, "ax",@progbits |
157 | 153 | ||
@@ -216,6 +212,31 @@ END(handle_interrupt_level2) | |||
216 | #endif | 212 | #endif |
217 | 213 | ||
218 | ; --------------------------------------------- | 214 | ; --------------------------------------------- |
215 | ; User Mode Memory Bus Error Interrupt Handler | ||
216 | ; (Kernel mode memory errors handled via seperate exception vectors) | ||
217 | ; --------------------------------------------- | ||
218 | ENTRY(mem_service) | ||
219 | |||
220 | INTERRUPT_PROLOGUE 2 | ||
221 | |||
222 | mov r0, ilink2 | ||
223 | mov r1, sp | ||
224 | |||
225 | ; User process needs to be killed with SIGBUS, but first need to get | ||
226 | ; out of the L2 interrupt context (drop to pure kernel mode) and jump | ||
227 | ; off to "C" code where SIGBUS in enqueued | ||
228 | lr r3, [status32] | ||
229 | bclr r3, r3, STATUS_A2_BIT | ||
230 | or r3, r3, (STATUS_E1_MASK|STATUS_E2_MASK) | ||
231 | sr r3, [status32_l2] | ||
232 | mov ilink2, 1f | ||
233 | rtie | ||
234 | 1: | ||
235 | bl do_memory_error | ||
236 | b ret_from_exception | ||
237 | END(mem_service) | ||
238 | |||
239 | ; --------------------------------------------- | ||
219 | ; Level 1 ISR | 240 | ; Level 1 ISR |
220 | ; --------------------------------------------- | 241 | ; --------------------------------------------- |
221 | ENTRY(handle_interrupt_level1) | 242 | ENTRY(handle_interrupt_level1) |
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S index 589abf5172d6..2efb0625331d 100644 --- a/arch/arc/kernel/entry.S +++ b/arch/arc/kernel/entry.S | |||
@@ -93,23 +93,6 @@ ENTRY(instr_service) | |||
93 | END(instr_service) | 93 | END(instr_service) |
94 | 94 | ||
95 | ; --------------------------------------------- | 95 | ; --------------------------------------------- |
96 | ; Memory Error Exception Handler | ||
97 | ; --------------------------------------------- | ||
98 | |||
99 | ENTRY(mem_service) | ||
100 | |||
101 | EXCEPTION_PROLOGUE | ||
102 | |||
103 | lr r0, [efa] | ||
104 | mov r1, sp | ||
105 | |||
106 | FAKE_RET_FROM_EXCPN | ||
107 | |||
108 | bl do_memory_error | ||
109 | b ret_from_exception | ||
110 | END(mem_service) | ||
111 | |||
112 | ; --------------------------------------------- | ||
113 | ; Machine Check Exception Handler | 96 | ; Machine Check Exception Handler |
114 | ; --------------------------------------------- | 97 | ; --------------------------------------------- |
115 | 98 | ||
diff --git a/arch/arc/lib/memcpy-archs.S b/arch/arc/lib/memcpy-archs.S index 0cab0b8a57c5..f96c75edf30a 100644 --- a/arch/arc/lib/memcpy-archs.S +++ b/arch/arc/lib/memcpy-archs.S | |||
@@ -50,26 +50,26 @@ ENTRY(memcpy) | |||
50 | 50 | ||
51 | ;;; if size <= 8 | 51 | ;;; if size <= 8 |
52 | cmp r2, 8 | 52 | cmp r2, 8 |
53 | bls.d @smallchunk | 53 | bls.d @.Lsmallchunk |
54 | mov.f lp_count, r2 | 54 | mov.f lp_count, r2 |
55 | 55 | ||
56 | and.f r4, r0, 0x03 | 56 | and.f r4, r0, 0x03 |
57 | rsub lp_count, r4, 4 | 57 | rsub lp_count, r4, 4 |
58 | lpnz @aligndestination | 58 | lpnz @.Laligndestination |
59 | ;; LOOP BEGIN | 59 | ;; LOOP BEGIN |
60 | ldb.ab r5, [r1,1] | 60 | ldb.ab r5, [r1,1] |
61 | sub r2, r2, 1 | 61 | sub r2, r2, 1 |
62 | stb.ab r5, [r3,1] | 62 | stb.ab r5, [r3,1] |
63 | aligndestination: | 63 | .Laligndestination: |
64 | 64 | ||
65 | ;;; Check the alignment of the source | 65 | ;;; Check the alignment of the source |
66 | and.f r4, r1, 0x03 | 66 | and.f r4, r1, 0x03 |
67 | bnz.d @sourceunaligned | 67 | bnz.d @.Lsourceunaligned |
68 | 68 | ||
69 | ;;; CASE 0: Both source and destination are 32bit aligned | 69 | ;;; CASE 0: Both source and destination are 32bit aligned |
70 | ;;; Convert len to Dwords, unfold x4 | 70 | ;;; Convert len to Dwords, unfold x4 |
71 | lsr.f lp_count, r2, ZOLSHFT | 71 | lsr.f lp_count, r2, ZOLSHFT |
72 | lpnz @copy32_64bytes | 72 | lpnz @.Lcopy32_64bytes |
73 | ;; LOOP START | 73 | ;; LOOP START |
74 | LOADX (r6, r1) | 74 | LOADX (r6, r1) |
75 | PREFETCH_READ (r1) | 75 | PREFETCH_READ (r1) |
@@ -81,25 +81,25 @@ aligndestination: | |||
81 | STOREX (r8, r3) | 81 | STOREX (r8, r3) |
82 | STOREX (r10, r3) | 82 | STOREX (r10, r3) |
83 | STOREX (r4, r3) | 83 | STOREX (r4, r3) |
84 | copy32_64bytes: | 84 | .Lcopy32_64bytes: |
85 | 85 | ||
86 | and.f lp_count, r2, ZOLAND ;Last remaining 31 bytes | 86 | and.f lp_count, r2, ZOLAND ;Last remaining 31 bytes |
87 | smallchunk: | 87 | .Lsmallchunk: |
88 | lpnz @copyremainingbytes | 88 | lpnz @.Lcopyremainingbytes |
89 | ;; LOOP START | 89 | ;; LOOP START |
90 | ldb.ab r5, [r1,1] | 90 | ldb.ab r5, [r1,1] |
91 | stb.ab r5, [r3,1] | 91 | stb.ab r5, [r3,1] |
92 | copyremainingbytes: | 92 | .Lcopyremainingbytes: |
93 | 93 | ||
94 | j [blink] | 94 | j [blink] |
95 | ;;; END CASE 0 | 95 | ;;; END CASE 0 |
96 | 96 | ||
97 | sourceunaligned: | 97 | .Lsourceunaligned: |
98 | cmp r4, 2 | 98 | cmp r4, 2 |
99 | beq.d @unalignedOffby2 | 99 | beq.d @.LunalignedOffby2 |
100 | sub r2, r2, 1 | 100 | sub r2, r2, 1 |
101 | 101 | ||
102 | bhi.d @unalignedOffby3 | 102 | bhi.d @.LunalignedOffby3 |
103 | ldb.ab r5, [r1, 1] | 103 | ldb.ab r5, [r1, 1] |
104 | 104 | ||
105 | ;;; CASE 1: The source is unaligned, off by 1 | 105 | ;;; CASE 1: The source is unaligned, off by 1 |
@@ -114,7 +114,7 @@ sourceunaligned: | |||
114 | or r5, r5, r6 | 114 | or r5, r5, r6 |
115 | 115 | ||
116 | ;; Both src and dst are aligned | 116 | ;; Both src and dst are aligned |
117 | lpnz @copy8bytes_1 | 117 | lpnz @.Lcopy8bytes_1 |
118 | ;; LOOP START | 118 | ;; LOOP START |
119 | ld.ab r6, [r1, 4] | 119 | ld.ab r6, [r1, 4] |
120 | prefetch [r1, 28] ;Prefetch the next read location | 120 | prefetch [r1, 28] ;Prefetch the next read location |
@@ -131,7 +131,7 @@ sourceunaligned: | |||
131 | 131 | ||
132 | st.ab r7, [r3, 4] | 132 | st.ab r7, [r3, 4] |
133 | st.ab r9, [r3, 4] | 133 | st.ab r9, [r3, 4] |
134 | copy8bytes_1: | 134 | .Lcopy8bytes_1: |
135 | 135 | ||
136 | ;; Write back the remaining 16bits | 136 | ;; Write back the remaining 16bits |
137 | EXTRACT_1 (r6, r5, 16) | 137 | EXTRACT_1 (r6, r5, 16) |
@@ -141,14 +141,14 @@ copy8bytes_1: | |||
141 | stb.ab r5, [r3, 1] | 141 | stb.ab r5, [r3, 1] |
142 | 142 | ||
143 | and.f lp_count, r2, 0x07 ;Last 8bytes | 143 | and.f lp_count, r2, 0x07 ;Last 8bytes |
144 | lpnz @copybytewise_1 | 144 | lpnz @.Lcopybytewise_1 |
145 | ;; LOOP START | 145 | ;; LOOP START |
146 | ldb.ab r6, [r1,1] | 146 | ldb.ab r6, [r1,1] |
147 | stb.ab r6, [r3,1] | 147 | stb.ab r6, [r3,1] |
148 | copybytewise_1: | 148 | .Lcopybytewise_1: |
149 | j [blink] | 149 | j [blink] |
150 | 150 | ||
151 | unalignedOffby2: | 151 | .LunalignedOffby2: |
152 | ;;; CASE 2: The source is unaligned, off by 2 | 152 | ;;; CASE 2: The source is unaligned, off by 2 |
153 | ldh.ab r5, [r1, 2] | 153 | ldh.ab r5, [r1, 2] |
154 | sub r2, r2, 1 | 154 | sub r2, r2, 1 |
@@ -159,7 +159,7 @@ unalignedOffby2: | |||
159 | #ifdef __BIG_ENDIAN__ | 159 | #ifdef __BIG_ENDIAN__ |
160 | asl.nz r5, r5, 16 | 160 | asl.nz r5, r5, 16 |
161 | #endif | 161 | #endif |
162 | lpnz @copy8bytes_2 | 162 | lpnz @.Lcopy8bytes_2 |
163 | ;; LOOP START | 163 | ;; LOOP START |
164 | ld.ab r6, [r1, 4] | 164 | ld.ab r6, [r1, 4] |
165 | prefetch [r1, 28] ;Prefetch the next read location | 165 | prefetch [r1, 28] ;Prefetch the next read location |
@@ -176,7 +176,7 @@ unalignedOffby2: | |||
176 | 176 | ||
177 | st.ab r7, [r3, 4] | 177 | st.ab r7, [r3, 4] |
178 | st.ab r9, [r3, 4] | 178 | st.ab r9, [r3, 4] |
179 | copy8bytes_2: | 179 | .Lcopy8bytes_2: |
180 | 180 | ||
181 | #ifdef __BIG_ENDIAN__ | 181 | #ifdef __BIG_ENDIAN__ |
182 | lsr.nz r5, r5, 16 | 182 | lsr.nz r5, r5, 16 |
@@ -184,14 +184,14 @@ copy8bytes_2: | |||
184 | sth.ab r5, [r3, 2] | 184 | sth.ab r5, [r3, 2] |
185 | 185 | ||
186 | and.f lp_count, r2, 0x07 ;Last 8bytes | 186 | and.f lp_count, r2, 0x07 ;Last 8bytes |
187 | lpnz @copybytewise_2 | 187 | lpnz @.Lcopybytewise_2 |
188 | ;; LOOP START | 188 | ;; LOOP START |
189 | ldb.ab r6, [r1,1] | 189 | ldb.ab r6, [r1,1] |
190 | stb.ab r6, [r3,1] | 190 | stb.ab r6, [r3,1] |
191 | copybytewise_2: | 191 | .Lcopybytewise_2: |
192 | j [blink] | 192 | j [blink] |
193 | 193 | ||
194 | unalignedOffby3: | 194 | .LunalignedOffby3: |
195 | ;;; CASE 3: The source is unaligned, off by 3 | 195 | ;;; CASE 3: The source is unaligned, off by 3 |
196 | ;;; Hence, I need to read 1byte for achieve the 32bit alignment | 196 | ;;; Hence, I need to read 1byte for achieve the 32bit alignment |
197 | 197 | ||
@@ -201,7 +201,7 @@ unalignedOffby3: | |||
201 | #ifdef __BIG_ENDIAN__ | 201 | #ifdef __BIG_ENDIAN__ |
202 | asl.ne r5, r5, 24 | 202 | asl.ne r5, r5, 24 |
203 | #endif | 203 | #endif |
204 | lpnz @copy8bytes_3 | 204 | lpnz @.Lcopy8bytes_3 |
205 | ;; LOOP START | 205 | ;; LOOP START |
206 | ld.ab r6, [r1, 4] | 206 | ld.ab r6, [r1, 4] |
207 | prefetch [r1, 28] ;Prefetch the next read location | 207 | prefetch [r1, 28] ;Prefetch the next read location |
@@ -218,7 +218,7 @@ unalignedOffby3: | |||
218 | 218 | ||
219 | st.ab r7, [r3, 4] | 219 | st.ab r7, [r3, 4] |
220 | st.ab r9, [r3, 4] | 220 | st.ab r9, [r3, 4] |
221 | copy8bytes_3: | 221 | .Lcopy8bytes_3: |
222 | 222 | ||
223 | #ifdef __BIG_ENDIAN__ | 223 | #ifdef __BIG_ENDIAN__ |
224 | lsr.nz r5, r5, 24 | 224 | lsr.nz r5, r5, 24 |
@@ -226,11 +226,11 @@ copy8bytes_3: | |||
226 | stb.ab r5, [r3, 1] | 226 | stb.ab r5, [r3, 1] |
227 | 227 | ||
228 | and.f lp_count, r2, 0x07 ;Last 8bytes | 228 | and.f lp_count, r2, 0x07 ;Last 8bytes |
229 | lpnz @copybytewise_3 | 229 | lpnz @.Lcopybytewise_3 |
230 | ;; LOOP START | 230 | ;; LOOP START |
231 | ldb.ab r6, [r1,1] | 231 | ldb.ab r6, [r1,1] |
232 | stb.ab r6, [r3,1] | 232 | stb.ab r6, [r3,1] |
233 | copybytewise_3: | 233 | .Lcopybytewise_3: |
234 | j [blink] | 234 | j [blink] |
235 | 235 | ||
236 | END(memcpy) | 236 | END(memcpy) |
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S index 63860adc4814..f1967eeb32e7 100644 --- a/arch/arc/mm/tlbex.S +++ b/arch/arc/mm/tlbex.S | |||
@@ -88,7 +88,7 @@ ex_saved_reg1: | |||
88 | #ifdef CONFIG_SMP | 88 | #ifdef CONFIG_SMP |
89 | sr r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with | 89 | sr r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with |
90 | GET_CPU_ID r0 ; get to per cpu scratch mem, | 90 | GET_CPU_ID r0 ; get to per cpu scratch mem, |
91 | lsl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu | 91 | asl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu |
92 | add r0, @ex_saved_reg1, r0 | 92 | add r0, @ex_saved_reg1, r0 |
93 | #else | 93 | #else |
94 | st r0, [@ex_saved_reg1] | 94 | st r0, [@ex_saved_reg1] |
@@ -107,7 +107,7 @@ ex_saved_reg1: | |||
107 | .macro TLBMISS_RESTORE_REGS | 107 | .macro TLBMISS_RESTORE_REGS |
108 | #ifdef CONFIG_SMP | 108 | #ifdef CONFIG_SMP |
109 | GET_CPU_ID r0 ; get to per cpu scratch mem | 109 | GET_CPU_ID r0 ; get to per cpu scratch mem |
110 | lsl r0, r0, L1_CACHE_SHIFT ; each is cache line wide | 110 | asl r0, r0, L1_CACHE_SHIFT ; each is cache line wide |
111 | add r0, @ex_saved_reg1, r0 | 111 | add r0, @ex_saved_reg1, r0 |
112 | ld_s r3, [r0,12] | 112 | ld_s r3, [r0,12] |
113 | ld_s r2, [r0, 8] | 113 | ld_s r2, [r0, 8] |
@@ -256,7 +256,7 @@ ex_saved_reg1: | |||
256 | 256 | ||
257 | .macro CONV_PTE_TO_TLB | 257 | .macro CONV_PTE_TO_TLB |
258 | and r3, r0, PTE_BITS_RWX ; r w x | 258 | and r3, r0, PTE_BITS_RWX ; r w x |
259 | lsl r2, r3, 3 ; Kr Kw Kx 0 0 0 (GLOBAL, kernel only) | 259 | asl r2, r3, 3 ; Kr Kw Kx 0 0 0 (GLOBAL, kernel only) |
260 | and.f 0, r0, _PAGE_GLOBAL | 260 | and.f 0, r0, _PAGE_GLOBAL |
261 | or.z r2, r2, r3 ; Kr Kw Kx Ur Uw Ux (!GLOBAL, user page) | 261 | or.z r2, r2, r3 ; Kr Kw Kx Ur Uw Ux (!GLOBAL, user page) |
262 | 262 | ||
diff --git a/arch/arc/plat-sim/platform.c b/arch/arc/plat-sim/platform.c index dde692812bc1..e4fe51456808 100644 --- a/arch/arc/plat-sim/platform.c +++ b/arch/arc/plat-sim/platform.c | |||
@@ -10,7 +10,6 @@ | |||
10 | 10 | ||
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <asm/mach_desc.h> | 12 | #include <asm/mach_desc.h> |
13 | #include <asm/mcip.h> | ||
14 | 13 | ||
15 | /*----------------------- Machine Descriptions ------------------------------ | 14 | /*----------------------- Machine Descriptions ------------------------------ |
16 | * | 15 | * |