diff options
Diffstat (limited to 'arch/arm/kernel/entry-armv.S')
-rw-r--r-- | arch/arm/kernel/entry-armv.S | 523 |
1 files changed, 204 insertions, 319 deletions
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 90c62cd51ca9..a87cbf889ff4 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S | |||
@@ -29,21 +29,53 @@ | |||
29 | #include <asm/entry-macro-multi.S> | 29 | #include <asm/entry-macro-multi.S> |
30 | 30 | ||
31 | /* | 31 | /* |
32 | * Interrupt handling. Preserves r7, r8, r9 | 32 | * Interrupt handling. |
33 | */ | 33 | */ |
34 | .macro irq_handler | 34 | .macro irq_handler |
35 | #ifdef CONFIG_MULTI_IRQ_HANDLER | 35 | #ifdef CONFIG_MULTI_IRQ_HANDLER |
36 | ldr r5, =handle_arch_irq | 36 | ldr r1, =handle_arch_irq |
37 | mov r0, sp | 37 | mov r0, sp |
38 | ldr r5, [r5] | 38 | ldr r1, [r1] |
39 | adr lr, BSYM(9997f) | 39 | adr lr, BSYM(9997f) |
40 | teq r5, #0 | 40 | teq r1, #0 |
41 | movne pc, r5 | 41 | movne pc, r1 |
42 | #endif | 42 | #endif |
43 | arch_irq_handler_default | 43 | arch_irq_handler_default |
44 | 9997: | 44 | 9997: |
45 | .endm | 45 | .endm |
46 | 46 | ||
47 | .macro pabt_helper | ||
48 | @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5 | ||
49 | #ifdef MULTI_PABORT | ||
50 | ldr ip, .LCprocfns | ||
51 | mov lr, pc | ||
52 | ldr pc, [ip, #PROCESSOR_PABT_FUNC] | ||
53 | #else | ||
54 | bl CPU_PABORT_HANDLER | ||
55 | #endif | ||
56 | .endm | ||
57 | |||
58 | .macro dabt_helper | ||
59 | |||
60 | @ | ||
61 | @ Call the processor-specific abort handler: | ||
62 | @ | ||
63 | @ r2 - pt_regs | ||
64 | @ r4 - aborted context pc | ||
65 | @ r5 - aborted context psr | ||
66 | @ | ||
67 | @ The abort handler must return the aborted address in r0, and | ||
68 | @ the fault status register in r1. r9 must be preserved. | ||
69 | @ | ||
70 | #ifdef MULTI_DABORT | ||
71 | ldr ip, .LCprocfns | ||
72 | mov lr, pc | ||
73 | ldr pc, [ip, #PROCESSOR_DABT_FUNC] | ||
74 | #else | ||
75 | bl CPU_DABORT_HANDLER | ||
76 | #endif | ||
77 | .endm | ||
78 | |||
47 | #ifdef CONFIG_KPROBES | 79 | #ifdef CONFIG_KPROBES |
48 | .section .kprobes.text,"ax",%progbits | 80 | .section .kprobes.text,"ax",%progbits |
49 | #else | 81 | #else |
@@ -126,106 +158,74 @@ ENDPROC(__und_invalid) | |||
126 | SPFIX( subeq sp, sp, #4 ) | 158 | SPFIX( subeq sp, sp, #4 ) |
127 | stmia sp, {r1 - r12} | 159 | stmia sp, {r1 - r12} |
128 | 160 | ||
129 | ldmia r0, {r1 - r3} | 161 | ldmia r0, {r3 - r5} |
130 | add r5, sp, #S_SP - 4 @ here for interlock avoidance | 162 | add r7, sp, #S_SP - 4 @ here for interlock avoidance |
131 | mov r4, #-1 @ "" "" "" "" | 163 | mov r6, #-1 @ "" "" "" "" |
132 | add r0, sp, #(S_FRAME_SIZE + \stack_hole - 4) | 164 | add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4) |
133 | SPFIX( addeq r0, r0, #4 ) | 165 | SPFIX( addeq r2, r2, #4 ) |
134 | str r1, [sp, #-4]! @ save the "real" r0 copied | 166 | str r3, [sp, #-4]! @ save the "real" r0 copied |
135 | @ from the exception stack | 167 | @ from the exception stack |
136 | 168 | ||
137 | mov r1, lr | 169 | mov r3, lr |
138 | 170 | ||
139 | @ | 171 | @ |
140 | @ We are now ready to fill in the remaining blanks on the stack: | 172 | @ We are now ready to fill in the remaining blanks on the stack: |
141 | @ | 173 | @ |
142 | @ r0 - sp_svc | 174 | @ r2 - sp_svc |
143 | @ r1 - lr_svc | 175 | @ r3 - lr_svc |
144 | @ r2 - lr_<exception>, already fixed up for correct return/restart | 176 | @ r4 - lr_<exception>, already fixed up for correct return/restart |
145 | @ r3 - spsr_<exception> | 177 | @ r5 - spsr_<exception> |
146 | @ r4 - orig_r0 (see pt_regs definition in ptrace.h) | 178 | @ r6 - orig_r0 (see pt_regs definition in ptrace.h) |
147 | @ | 179 | @ |
148 | stmia r5, {r0 - r4} | 180 | stmia r7, {r2 - r6} |
181 | |||
182 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
183 | bl trace_hardirqs_off | ||
184 | #endif | ||
149 | .endm | 185 | .endm |
150 | 186 | ||
151 | .align 5 | 187 | .align 5 |
152 | __dabt_svc: | 188 | __dabt_svc: |
153 | svc_entry | 189 | svc_entry |
154 | |||
155 | @ | ||
156 | @ get ready to re-enable interrupts if appropriate | ||
157 | @ | ||
158 | mrs r9, cpsr | ||
159 | tst r3, #PSR_I_BIT | ||
160 | biceq r9, r9, #PSR_I_BIT | ||
161 | |||
162 | @ | ||
163 | @ Call the processor-specific abort handler: | ||
164 | @ | ||
165 | @ r2 - aborted context pc | ||
166 | @ r3 - aborted context cpsr | ||
167 | @ | ||
168 | @ The abort handler must return the aborted address in r0, and | ||
169 | @ the fault status register in r1. r9 must be preserved. | ||
170 | @ | ||
171 | #ifdef MULTI_DABORT | ||
172 | ldr r4, .LCprocfns | ||
173 | mov lr, pc | ||
174 | ldr pc, [r4, #PROCESSOR_DABT_FUNC] | ||
175 | #else | ||
176 | bl CPU_DABORT_HANDLER | ||
177 | #endif | ||
178 | |||
179 | @ | ||
180 | @ set desired IRQ state, then call main handler | ||
181 | @ | ||
182 | debug_entry r1 | ||
183 | msr cpsr_c, r9 | ||
184 | mov r2, sp | 190 | mov r2, sp |
185 | bl do_DataAbort | 191 | dabt_helper |
186 | 192 | ||
187 | @ | 193 | @ |
188 | @ IRQs off again before pulling preserved data off the stack | 194 | @ IRQs off again before pulling preserved data off the stack |
189 | @ | 195 | @ |
190 | disable_irq_notrace | 196 | disable_irq_notrace |
191 | 197 | ||
192 | @ | 198 | #ifdef CONFIG_TRACE_IRQFLAGS |
193 | @ restore SPSR and restart the instruction | 199 | tst r5, #PSR_I_BIT |
194 | @ | 200 | bleq trace_hardirqs_on |
195 | ldr r2, [sp, #S_PSR] | 201 | tst r5, #PSR_I_BIT |
196 | svc_exit r2 @ return from exception | 202 | blne trace_hardirqs_off |
203 | #endif | ||
204 | svc_exit r5 @ return from exception | ||
197 | UNWIND(.fnend ) | 205 | UNWIND(.fnend ) |
198 | ENDPROC(__dabt_svc) | 206 | ENDPROC(__dabt_svc) |
199 | 207 | ||
200 | .align 5 | 208 | .align 5 |
201 | __irq_svc: | 209 | __irq_svc: |
202 | svc_entry | 210 | svc_entry |
211 | irq_handler | ||
203 | 212 | ||
204 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
205 | bl trace_hardirqs_off | ||
206 | #endif | ||
207 | #ifdef CONFIG_PREEMPT | 213 | #ifdef CONFIG_PREEMPT |
208 | get_thread_info tsk | 214 | get_thread_info tsk |
209 | ldr r8, [tsk, #TI_PREEMPT] @ get preempt count | 215 | ldr r8, [tsk, #TI_PREEMPT] @ get preempt count |
210 | add r7, r8, #1 @ increment it | ||
211 | str r7, [tsk, #TI_PREEMPT] | ||
212 | #endif | ||
213 | |||
214 | irq_handler | ||
215 | #ifdef CONFIG_PREEMPT | ||
216 | str r8, [tsk, #TI_PREEMPT] @ restore preempt count | ||
217 | ldr r0, [tsk, #TI_FLAGS] @ get flags | 216 | ldr r0, [tsk, #TI_FLAGS] @ get flags |
218 | teq r8, #0 @ if preempt count != 0 | 217 | teq r8, #0 @ if preempt count != 0 |
219 | movne r0, #0 @ force flags to 0 | 218 | movne r0, #0 @ force flags to 0 |
220 | tst r0, #_TIF_NEED_RESCHED | 219 | tst r0, #_TIF_NEED_RESCHED |
221 | blne svc_preempt | 220 | blne svc_preempt |
222 | #endif | 221 | #endif |
223 | ldr r4, [sp, #S_PSR] @ irqs are already disabled | 222 | |
224 | #ifdef CONFIG_TRACE_IRQFLAGS | 223 | #ifdef CONFIG_TRACE_IRQFLAGS |
225 | tst r4, #PSR_I_BIT | 224 | @ The parent context IRQs must have been enabled to get here in |
226 | bleq trace_hardirqs_on | 225 | @ the first place, so there's no point checking the PSR I bit. |
226 | bl trace_hardirqs_on | ||
227 | #endif | 227 | #endif |
228 | svc_exit r4 @ return from exception | 228 | svc_exit r5 @ return from exception |
229 | UNWIND(.fnend ) | 229 | UNWIND(.fnend ) |
230 | ENDPROC(__irq_svc) | 230 | ENDPROC(__irq_svc) |
231 | 231 | ||
@@ -251,7 +251,6 @@ __und_svc: | |||
251 | #else | 251 | #else |
252 | svc_entry | 252 | svc_entry |
253 | #endif | 253 | #endif |
254 | |||
255 | @ | 254 | @ |
256 | @ call emulation code, which returns using r9 if it has emulated | 255 | @ call emulation code, which returns using r9 if it has emulated |
257 | @ the instruction, or the more conventional lr if we are to treat | 256 | @ the instruction, or the more conventional lr if we are to treat |
@@ -260,15 +259,16 @@ __und_svc: | |||
260 | @ r0 - instruction | 259 | @ r0 - instruction |
261 | @ | 260 | @ |
262 | #ifndef CONFIG_THUMB2_KERNEL | 261 | #ifndef CONFIG_THUMB2_KERNEL |
263 | ldr r0, [r2, #-4] | 262 | ldr r0, [r4, #-4] |
264 | #else | 263 | #else |
265 | ldrh r0, [r2, #-2] @ Thumb instruction at LR - 2 | 264 | ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2 |
266 | and r9, r0, #0xf800 | 265 | and r9, r0, #0xf800 |
267 | cmp r9, #0xe800 @ 32-bit instruction if xx >= 0 | 266 | cmp r9, #0xe800 @ 32-bit instruction if xx >= 0 |
268 | ldrhhs r9, [r2] @ bottom 16 bits | 267 | ldrhhs r9, [r4] @ bottom 16 bits |
269 | orrhs r0, r9, r0, lsl #16 | 268 | orrhs r0, r9, r0, lsl #16 |
270 | #endif | 269 | #endif |
271 | adr r9, BSYM(1f) | 270 | adr r9, BSYM(1f) |
271 | mov r2, r4 | ||
272 | bl call_fpe | 272 | bl call_fpe |
273 | 273 | ||
274 | mov r0, sp @ struct pt_regs *regs | 274 | mov r0, sp @ struct pt_regs *regs |
@@ -282,45 +282,35 @@ __und_svc: | |||
282 | @ | 282 | @ |
283 | @ restore SPSR and restart the instruction | 283 | @ restore SPSR and restart the instruction |
284 | @ | 284 | @ |
285 | ldr r2, [sp, #S_PSR] @ Get SVC cpsr | 285 | ldr r5, [sp, #S_PSR] @ Get SVC cpsr |
286 | svc_exit r2 @ return from exception | 286 | #ifdef CONFIG_TRACE_IRQFLAGS |
287 | tst r5, #PSR_I_BIT | ||
288 | bleq trace_hardirqs_on | ||
289 | tst r5, #PSR_I_BIT | ||
290 | blne trace_hardirqs_off | ||
291 | #endif | ||
292 | svc_exit r5 @ return from exception | ||
287 | UNWIND(.fnend ) | 293 | UNWIND(.fnend ) |
288 | ENDPROC(__und_svc) | 294 | ENDPROC(__und_svc) |
289 | 295 | ||
290 | .align 5 | 296 | .align 5 |
291 | __pabt_svc: | 297 | __pabt_svc: |
292 | svc_entry | 298 | svc_entry |
293 | |||
294 | @ | ||
295 | @ re-enable interrupts if appropriate | ||
296 | @ | ||
297 | mrs r9, cpsr | ||
298 | tst r3, #PSR_I_BIT | ||
299 | biceq r9, r9, #PSR_I_BIT | ||
300 | |||
301 | mov r0, r2 @ pass address of aborted instruction. | ||
302 | #ifdef MULTI_PABORT | ||
303 | ldr r4, .LCprocfns | ||
304 | mov lr, pc | ||
305 | ldr pc, [r4, #PROCESSOR_PABT_FUNC] | ||
306 | #else | ||
307 | bl CPU_PABORT_HANDLER | ||
308 | #endif | ||
309 | debug_entry r1 | ||
310 | msr cpsr_c, r9 @ Maybe enable interrupts | ||
311 | mov r2, sp @ regs | 299 | mov r2, sp @ regs |
312 | bl do_PrefetchAbort @ call abort handler | 300 | pabt_helper |
313 | 301 | ||
314 | @ | 302 | @ |
315 | @ IRQs off again before pulling preserved data off the stack | 303 | @ IRQs off again before pulling preserved data off the stack |
316 | @ | 304 | @ |
317 | disable_irq_notrace | 305 | disable_irq_notrace |
318 | 306 | ||
319 | @ | 307 | #ifdef CONFIG_TRACE_IRQFLAGS |
320 | @ restore SPSR and restart the instruction | 308 | tst r5, #PSR_I_BIT |
321 | @ | 309 | bleq trace_hardirqs_on |
322 | ldr r2, [sp, #S_PSR] | 310 | tst r5, #PSR_I_BIT |
323 | svc_exit r2 @ return from exception | 311 | blne trace_hardirqs_off |
312 | #endif | ||
313 | svc_exit r5 @ return from exception | ||
324 | UNWIND(.fnend ) | 314 | UNWIND(.fnend ) |
325 | ENDPROC(__pabt_svc) | 315 | ENDPROC(__pabt_svc) |
326 | 316 | ||
@@ -351,23 +341,23 @@ ENDPROC(__pabt_svc) | |||
351 | ARM( stmib sp, {r1 - r12} ) | 341 | ARM( stmib sp, {r1 - r12} ) |
352 | THUMB( stmia sp, {r0 - r12} ) | 342 | THUMB( stmia sp, {r0 - r12} ) |
353 | 343 | ||
354 | ldmia r0, {r1 - r3} | 344 | ldmia r0, {r3 - r5} |
355 | add r0, sp, #S_PC @ here for interlock avoidance | 345 | add r0, sp, #S_PC @ here for interlock avoidance |
356 | mov r4, #-1 @ "" "" "" "" | 346 | mov r6, #-1 @ "" "" "" "" |
357 | 347 | ||
358 | str r1, [sp] @ save the "real" r0 copied | 348 | str r3, [sp] @ save the "real" r0 copied |
359 | @ from the exception stack | 349 | @ from the exception stack |
360 | 350 | ||
361 | @ | 351 | @ |
362 | @ We are now ready to fill in the remaining blanks on the stack: | 352 | @ We are now ready to fill in the remaining blanks on the stack: |
363 | @ | 353 | @ |
364 | @ r2 - lr_<exception>, already fixed up for correct return/restart | 354 | @ r4 - lr_<exception>, already fixed up for correct return/restart |
365 | @ r3 - spsr_<exception> | 355 | @ r5 - spsr_<exception> |
366 | @ r4 - orig_r0 (see pt_regs definition in ptrace.h) | 356 | @ r6 - orig_r0 (see pt_regs definition in ptrace.h) |
367 | @ | 357 | @ |
368 | @ Also, separately save sp_usr and lr_usr | 358 | @ Also, separately save sp_usr and lr_usr |
369 | @ | 359 | @ |
370 | stmia r0, {r2 - r4} | 360 | stmia r0, {r4 - r6} |
371 | ARM( stmdb r0, {sp, lr}^ ) | 361 | ARM( stmdb r0, {sp, lr}^ ) |
372 | THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) | 362 | THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) |
373 | 363 | ||
@@ -380,10 +370,14 @@ ENDPROC(__pabt_svc) | |||
380 | @ Clear FP to mark the first stack frame | 370 | @ Clear FP to mark the first stack frame |
381 | @ | 371 | @ |
382 | zero_fp | 372 | zero_fp |
373 | |||
374 | #ifdef CONFIG_IRQSOFF_TRACER | ||
375 | bl trace_hardirqs_off | ||
376 | #endif | ||
383 | .endm | 377 | .endm |
384 | 378 | ||
385 | .macro kuser_cmpxchg_check | 379 | .macro kuser_cmpxchg_check |
386 | #if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) | 380 | #if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) |
387 | #ifndef CONFIG_MMU | 381 | #ifndef CONFIG_MMU |
388 | #warning "NPTL on non MMU needs fixing" | 382 | #warning "NPTL on non MMU needs fixing" |
389 | #else | 383 | #else |
@@ -391,8 +385,8 @@ ENDPROC(__pabt_svc) | |||
391 | @ if it was interrupted in a critical region. Here we | 385 | @ if it was interrupted in a critical region. Here we |
392 | @ perform a quick test inline since it should be false | 386 | @ perform a quick test inline since it should be false |
393 | @ 99.9999% of the time. The rest is done out of line. | 387 | @ 99.9999% of the time. The rest is done out of line. |
394 | cmp r2, #TASK_SIZE | 388 | cmp r4, #TASK_SIZE |
395 | blhs kuser_cmpxchg_fixup | 389 | blhs kuser_cmpxchg64_fixup |
396 | #endif | 390 | #endif |
397 | #endif | 391 | #endif |
398 | .endm | 392 | .endm |
@@ -401,32 +395,9 @@ ENDPROC(__pabt_svc) | |||
401 | __dabt_usr: | 395 | __dabt_usr: |
402 | usr_entry | 396 | usr_entry |
403 | kuser_cmpxchg_check | 397 | kuser_cmpxchg_check |
404 | |||
405 | @ | ||
406 | @ Call the processor-specific abort handler: | ||
407 | @ | ||
408 | @ r2 - aborted context pc | ||
409 | @ r3 - aborted context cpsr | ||
410 | @ | ||
411 | @ The abort handler must return the aborted address in r0, and | ||
412 | @ the fault status register in r1. | ||
413 | @ | ||
414 | #ifdef MULTI_DABORT | ||
415 | ldr r4, .LCprocfns | ||
416 | mov lr, pc | ||
417 | ldr pc, [r4, #PROCESSOR_DABT_FUNC] | ||
418 | #else | ||
419 | bl CPU_DABORT_HANDLER | ||
420 | #endif | ||
421 | |||
422 | @ | ||
423 | @ IRQs on, then call the main handler | ||
424 | @ | ||
425 | debug_entry r1 | ||
426 | enable_irq | ||
427 | mov r2, sp | 398 | mov r2, sp |
428 | adr lr, BSYM(ret_from_exception) | 399 | dabt_helper |
429 | b do_DataAbort | 400 | b ret_from_exception |
430 | UNWIND(.fnend ) | 401 | UNWIND(.fnend ) |
431 | ENDPROC(__dabt_usr) | 402 | ENDPROC(__dabt_usr) |
432 | 403 | ||
@@ -434,28 +405,8 @@ ENDPROC(__dabt_usr) | |||
434 | __irq_usr: | 405 | __irq_usr: |
435 | usr_entry | 406 | usr_entry |
436 | kuser_cmpxchg_check | 407 | kuser_cmpxchg_check |
437 | |||
438 | #ifdef CONFIG_IRQSOFF_TRACER | ||
439 | bl trace_hardirqs_off | ||
440 | #endif | ||
441 | |||
442 | get_thread_info tsk | ||
443 | #ifdef CONFIG_PREEMPT | ||
444 | ldr r8, [tsk, #TI_PREEMPT] @ get preempt count | ||
445 | add r7, r8, #1 @ increment it | ||
446 | str r7, [tsk, #TI_PREEMPT] | ||
447 | #endif | ||
448 | |||
449 | irq_handler | 408 | irq_handler |
450 | #ifdef CONFIG_PREEMPT | 409 | get_thread_info tsk |
451 | ldr r0, [tsk, #TI_PREEMPT] | ||
452 | str r8, [tsk, #TI_PREEMPT] | ||
453 | teq r0, r7 | ||
454 | ARM( strne r0, [r0, -r0] ) | ||
455 | THUMB( movne r0, #0 ) | ||
456 | THUMB( strne r0, [r0] ) | ||
457 | #endif | ||
458 | |||
459 | mov why, #0 | 410 | mov why, #0 |
460 | b ret_to_user_from_irq | 411 | b ret_to_user_from_irq |
461 | UNWIND(.fnend ) | 412 | UNWIND(.fnend ) |
@@ -467,6 +418,9 @@ ENDPROC(__irq_usr) | |||
467 | __und_usr: | 418 | __und_usr: |
468 | usr_entry | 419 | usr_entry |
469 | 420 | ||
421 | mov r2, r4 | ||
422 | mov r3, r5 | ||
423 | |||
470 | @ | 424 | @ |
471 | @ fall through to the emulation code, which returns using r9 if | 425 | @ fall through to the emulation code, which returns using r9 if |
472 | @ it has emulated the instruction, or the more conventional lr | 426 | @ it has emulated the instruction, or the more conventional lr |
@@ -682,19 +636,8 @@ ENDPROC(__und_usr_unknown) | |||
682 | .align 5 | 636 | .align 5 |
683 | __pabt_usr: | 637 | __pabt_usr: |
684 | usr_entry | 638 | usr_entry |
685 | |||
686 | mov r0, r2 @ pass address of aborted instruction. | ||
687 | #ifdef MULTI_PABORT | ||
688 | ldr r4, .LCprocfns | ||
689 | mov lr, pc | ||
690 | ldr pc, [r4, #PROCESSOR_PABT_FUNC] | ||
691 | #else | ||
692 | bl CPU_PABORT_HANDLER | ||
693 | #endif | ||
694 | debug_entry r1 | ||
695 | enable_irq @ Enable interrupts | ||
696 | mov r2, sp @ regs | 639 | mov r2, sp @ regs |
697 | bl do_PrefetchAbort @ call abort handler | 640 | pabt_helper |
698 | UNWIND(.fnend ) | 641 | UNWIND(.fnend ) |
699 | /* fall through */ | 642 | /* fall through */ |
700 | /* | 643 | /* |
@@ -758,31 +701,12 @@ ENDPROC(__switch_to) | |||
758 | /* | 701 | /* |
759 | * User helpers. | 702 | * User helpers. |
760 | * | 703 | * |
761 | * These are segment of kernel provided user code reachable from user space | ||
762 | * at a fixed address in kernel memory. This is used to provide user space | ||
763 | * with some operations which require kernel help because of unimplemented | ||
764 | * native feature and/or instructions in many ARM CPUs. The idea is for | ||
765 | * this code to be executed directly in user mode for best efficiency but | ||
766 | * which is too intimate with the kernel counter part to be left to user | ||
767 | * libraries. In fact this code might even differ from one CPU to another | ||
768 | * depending on the available instruction set and restrictions like on | ||
769 | * SMP systems. In other words, the kernel reserves the right to change | ||
770 | * this code as needed without warning. Only the entry points and their | ||
771 | * results are guaranteed to be stable. | ||
772 | * | ||
773 | * Each segment is 32-byte aligned and will be moved to the top of the high | 704 | * Each segment is 32-byte aligned and will be moved to the top of the high |
774 | * vector page. New segments (if ever needed) must be added in front of | 705 | * vector page. New segments (if ever needed) must be added in front of |
775 | * existing ones. This mechanism should be used only for things that are | 706 | * existing ones. This mechanism should be used only for things that are |
776 | * really small and justified, and not be abused freely. | 707 | * really small and justified, and not be abused freely. |
777 | * | 708 | * |
778 | * User space is expected to implement those things inline when optimizing | 709 | * See Documentation/arm/kernel_user_helpers.txt for formal definitions. |
779 | * for a processor that has the necessary native support, but only if such | ||
780 | * resulting binaries are already to be incompatible with earlier ARM | ||
781 | * processors due to the use of unsupported instructions other than what | ||
782 | * is provided here. In other words don't make binaries unable to run on | ||
783 | * earlier processors just for the sake of not using these kernel helpers | ||
784 | * if your compiled code is not going to use the new instructions for other | ||
785 | * purpose. | ||
786 | */ | 710 | */ |
787 | THUMB( .arm ) | 711 | THUMB( .arm ) |
788 | 712 | ||
@@ -799,96 +723,103 @@ ENDPROC(__switch_to) | |||
799 | __kuser_helper_start: | 723 | __kuser_helper_start: |
800 | 724 | ||
801 | /* | 725 | /* |
802 | * Reference prototype: | 726 | * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular |
803 | * | 727 | * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point. |
804 | * void __kernel_memory_barrier(void) | ||
805 | * | ||
806 | * Input: | ||
807 | * | ||
808 | * lr = return address | ||
809 | * | ||
810 | * Output: | ||
811 | * | ||
812 | * none | ||
813 | * | ||
814 | * Clobbered: | ||
815 | * | ||
816 | * none | ||
817 | * | ||
818 | * Definition and user space usage example: | ||
819 | * | ||
820 | * typedef void (__kernel_dmb_t)(void); | ||
821 | * #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0) | ||
822 | * | ||
823 | * Apply any needed memory barrier to preserve consistency with data modified | ||
824 | * manually and __kuser_cmpxchg usage. | ||
825 | * | ||
826 | * This could be used as follows: | ||
827 | * | ||
828 | * #define __kernel_dmb() \ | ||
829 | * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \ | ||
830 | * : : : "r0", "lr","cc" ) | ||
831 | */ | 728 | */ |
832 | 729 | ||
833 | __kuser_memory_barrier: @ 0xffff0fa0 | 730 | __kuser_cmpxchg64: @ 0xffff0f60 |
731 | |||
732 | #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) | ||
733 | |||
734 | /* | ||
735 | * Poor you. No fast solution possible... | ||
736 | * The kernel itself must perform the operation. | ||
737 | * A special ghost syscall is used for that (see traps.c). | ||
738 | */ | ||
739 | stmfd sp!, {r7, lr} | ||
740 | ldr r7, 1f @ it's 20 bits | ||
741 | swi __ARM_NR_cmpxchg64 | ||
742 | ldmfd sp!, {r7, pc} | ||
743 | 1: .word __ARM_NR_cmpxchg64 | ||
744 | |||
745 | #elif defined(CONFIG_CPU_32v6K) | ||
746 | |||
747 | stmfd sp!, {r4, r5, r6, r7} | ||
748 | ldrd r4, r5, [r0] @ load old val | ||
749 | ldrd r6, r7, [r1] @ load new val | ||
750 | smp_dmb arm | ||
751 | 1: ldrexd r0, r1, [r2] @ load current val | ||
752 | eors r3, r0, r4 @ compare with oldval (1) | ||
753 | eoreqs r3, r1, r5 @ compare with oldval (2) | ||
754 | strexdeq r3, r6, r7, [r2] @ store newval if eq | ||
755 | teqeq r3, #1 @ success? | ||
756 | beq 1b @ if no then retry | ||
834 | smp_dmb arm | 757 | smp_dmb arm |
758 | rsbs r0, r3, #0 @ set returned val and C flag | ||
759 | ldmfd sp!, {r4, r5, r6, r7} | ||
760 | bx lr | ||
761 | |||
762 | #elif !defined(CONFIG_SMP) | ||
763 | |||
764 | #ifdef CONFIG_MMU | ||
765 | |||
766 | /* | ||
767 | * The only thing that can break atomicity in this cmpxchg64 | ||
768 | * implementation is either an IRQ or a data abort exception | ||
769 | * causing another process/thread to be scheduled in the middle of | ||
770 | * the critical sequence. The same strategy as for cmpxchg is used. | ||
771 | */ | ||
772 | stmfd sp!, {r4, r5, r6, lr} | ||
773 | ldmia r0, {r4, r5} @ load old val | ||
774 | ldmia r1, {r6, lr} @ load new val | ||
775 | 1: ldmia r2, {r0, r1} @ load current val | ||
776 | eors r3, r0, r4 @ compare with oldval (1) | ||
777 | eoreqs r3, r1, r5 @ compare with oldval (2) | ||
778 | 2: stmeqia r2, {r6, lr} @ store newval if eq | ||
779 | rsbs r0, r3, #0 @ set return val and C flag | ||
780 | ldmfd sp!, {r4, r5, r6, pc} | ||
781 | |||
782 | .text | ||
783 | kuser_cmpxchg64_fixup: | ||
784 | @ Called from kuser_cmpxchg_fixup. | ||
785 | @ r4 = address of interrupted insn (must be preserved). | ||
786 | @ sp = saved regs. r7 and r8 are clobbered. | ||
787 | @ 1b = first critical insn, 2b = last critical insn. | ||
788 | @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b. | ||
789 | mov r7, #0xffff0fff | ||
790 | sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64))) | ||
791 | subs r8, r4, r7 | ||
792 | rsbcss r8, r8, #(2b - 1b) | ||
793 | strcs r7, [sp, #S_PC] | ||
794 | #if __LINUX_ARM_ARCH__ < 6 | ||
795 | bcc kuser_cmpxchg32_fixup | ||
796 | #endif | ||
797 | mov pc, lr | ||
798 | .previous | ||
799 | |||
800 | #else | ||
801 | #warning "NPTL on non MMU needs fixing" | ||
802 | mov r0, #-1 | ||
803 | adds r0, r0, #0 | ||
835 | usr_ret lr | 804 | usr_ret lr |
805 | #endif | ||
806 | |||
807 | #else | ||
808 | #error "incoherent kernel configuration" | ||
809 | #endif | ||
810 | |||
811 | /* pad to next slot */ | ||
812 | .rept (16 - (. - __kuser_cmpxchg64)/4) | ||
813 | .word 0 | ||
814 | .endr | ||
836 | 815 | ||
837 | .align 5 | 816 | .align 5 |
838 | 817 | ||
839 | /* | 818 | __kuser_memory_barrier: @ 0xffff0fa0 |
840 | * Reference prototype: | 819 | smp_dmb arm |
841 | * | 820 | usr_ret lr |
842 | * int __kernel_cmpxchg(int oldval, int newval, int *ptr) | 821 | |
843 | * | 822 | .align 5 |
844 | * Input: | ||
845 | * | ||
846 | * r0 = oldval | ||
847 | * r1 = newval | ||
848 | * r2 = ptr | ||
849 | * lr = return address | ||
850 | * | ||
851 | * Output: | ||
852 | * | ||
853 | * r0 = returned value (zero or non-zero) | ||
854 | * C flag = set if r0 == 0, clear if r0 != 0 | ||
855 | * | ||
856 | * Clobbered: | ||
857 | * | ||
858 | * r3, ip, flags | ||
859 | * | ||
860 | * Definition and user space usage example: | ||
861 | * | ||
862 | * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr); | ||
863 | * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0) | ||
864 | * | ||
865 | * Atomically store newval in *ptr if *ptr is equal to oldval for user space. | ||
866 | * Return zero if *ptr was changed or non-zero if no exchange happened. | ||
867 | * The C flag is also set if *ptr was changed to allow for assembly | ||
868 | * optimization in the calling code. | ||
869 | * | ||
870 | * Notes: | ||
871 | * | ||
872 | * - This routine already includes memory barriers as needed. | ||
873 | * | ||
874 | * For example, a user space atomic_add implementation could look like this: | ||
875 | * | ||
876 | * #define atomic_add(ptr, val) \ | ||
877 | * ({ register unsigned int *__ptr asm("r2") = (ptr); \ | ||
878 | * register unsigned int __result asm("r1"); \ | ||
879 | * asm volatile ( \ | ||
880 | * "1: @ atomic_add\n\t" \ | ||
881 | * "ldr r0, [r2]\n\t" \ | ||
882 | * "mov r3, #0xffff0fff\n\t" \ | ||
883 | * "add lr, pc, #4\n\t" \ | ||
884 | * "add r1, r0, %2\n\t" \ | ||
885 | * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \ | ||
886 | * "bcc 1b" \ | ||
887 | * : "=&r" (__result) \ | ||
888 | * : "r" (__ptr), "rIL" (val) \ | ||
889 | * : "r0","r3","ip","lr","cc","memory" ); \ | ||
890 | * __result; }) | ||
891 | */ | ||
892 | 823 | ||
893 | __kuser_cmpxchg: @ 0xffff0fc0 | 824 | __kuser_cmpxchg: @ 0xffff0fc0 |
894 | 825 | ||
@@ -925,15 +856,15 @@ __kuser_cmpxchg: @ 0xffff0fc0 | |||
925 | usr_ret lr | 856 | usr_ret lr |
926 | 857 | ||
927 | .text | 858 | .text |
928 | kuser_cmpxchg_fixup: | 859 | kuser_cmpxchg32_fixup: |
929 | @ Called from kuser_cmpxchg_check macro. | 860 | @ Called from kuser_cmpxchg_check macro. |
930 | @ r2 = address of interrupted insn (must be preserved). | 861 | @ r4 = address of interrupted insn (must be preserved). |
931 | @ sp = saved regs. r7 and r8 are clobbered. | 862 | @ sp = saved regs. r7 and r8 are clobbered. |
932 | @ 1b = first critical insn, 2b = last critical insn. | 863 | @ 1b = first critical insn, 2b = last critical insn. |
933 | @ If r2 >= 1b and r2 <= 2b then saved pc_usr is set to 1b. | 864 | @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b. |
934 | mov r7, #0xffff0fff | 865 | mov r7, #0xffff0fff |
935 | sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg))) | 866 | sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg))) |
936 | subs r8, r2, r7 | 867 | subs r8, r4, r7 |
937 | rsbcss r8, r8, #(2b - 1b) | 868 | rsbcss r8, r8, #(2b - 1b) |
938 | strcs r7, [sp, #S_PC] | 869 | strcs r7, [sp, #S_PC] |
939 | mov pc, lr | 870 | mov pc, lr |
@@ -963,39 +894,6 @@ kuser_cmpxchg_fixup: | |||
963 | 894 | ||
964 | .align 5 | 895 | .align 5 |
965 | 896 | ||
966 | /* | ||
967 | * Reference prototype: | ||
968 | * | ||
969 | * int __kernel_get_tls(void) | ||
970 | * | ||
971 | * Input: | ||
972 | * | ||
973 | * lr = return address | ||
974 | * | ||
975 | * Output: | ||
976 | * | ||
977 | * r0 = TLS value | ||
978 | * | ||
979 | * Clobbered: | ||
980 | * | ||
981 | * none | ||
982 | * | ||
983 | * Definition and user space usage example: | ||
984 | * | ||
985 | * typedef int (__kernel_get_tls_t)(void); | ||
986 | * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0) | ||
987 | * | ||
988 | * Get the TLS value as previously set via the __ARM_NR_set_tls syscall. | ||
989 | * | ||
990 | * This could be used as follows: | ||
991 | * | ||
992 | * #define __kernel_get_tls() \ | ||
993 | * ({ register unsigned int __val asm("r0"); \ | ||
994 | * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \ | ||
995 | * : "=r" (__val) : : "lr","cc" ); \ | ||
996 | * __val; }) | ||
997 | */ | ||
998 | |||
999 | __kuser_get_tls: @ 0xffff0fe0 | 897 | __kuser_get_tls: @ 0xffff0fe0 |
1000 | ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init | 898 | ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init |
1001 | usr_ret lr | 899 | usr_ret lr |
@@ -1004,19 +902,6 @@ __kuser_get_tls: @ 0xffff0fe0 | |||
1004 | .word 0 @ 0xffff0ff0 software TLS value, then | 902 | .word 0 @ 0xffff0ff0 software TLS value, then |
1005 | .endr @ pad up to __kuser_helper_version | 903 | .endr @ pad up to __kuser_helper_version |
1006 | 904 | ||
1007 | /* | ||
1008 | * Reference declaration: | ||
1009 | * | ||
1010 | * extern unsigned int __kernel_helper_version; | ||
1011 | * | ||
1012 | * Definition and user space usage example: | ||
1013 | * | ||
1014 | * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc) | ||
1015 | * | ||
1016 | * User space may read this to determine the curent number of helpers | ||
1017 | * available. | ||
1018 | */ | ||
1019 | |||
1020 | __kuser_helper_version: @ 0xffff0ffc | 905 | __kuser_helper_version: @ 0xffff0ffc |
1021 | .word ((__kuser_helper_end - __kuser_helper_start) >> 5) | 906 | .word ((__kuser_helper_end - __kuser_helper_start) >> 5) |
1022 | 907 | ||