aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/entry-armv.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel/entry-armv.S')
-rw-r--r--arch/arm/kernel/entry-armv.S169
1 files changed, 102 insertions, 67 deletions
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 792abd0dfae1..3d727a8a23bc 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -34,7 +34,7 @@
34 @ 34 @
35 @ routine called with r0 = irq number, r1 = struct pt_regs * 35 @ routine called with r0 = irq number, r1 = struct pt_regs *
36 @ 36 @
37 adrne lr, 1b 37 adrne lr, BSYM(1b)
38 bne asm_do_IRQ 38 bne asm_do_IRQ
39 39
40#ifdef CONFIG_SMP 40#ifdef CONFIG_SMP
@@ -46,13 +46,13 @@
46 */ 46 */
47 test_for_ipi r0, r6, r5, lr 47 test_for_ipi r0, r6, r5, lr
48 movne r0, sp 48 movne r0, sp
49 adrne lr, 1b 49 adrne lr, BSYM(1b)
50 bne do_IPI 50 bne do_IPI
51 51
52#ifdef CONFIG_LOCAL_TIMERS 52#ifdef CONFIG_LOCAL_TIMERS
53 test_for_ltirq r0, r6, r5, lr 53 test_for_ltirq r0, r6, r5, lr
54 movne r0, sp 54 movne r0, sp
55 adrne lr, 1b 55 adrne lr, BSYM(1b)
56 bne do_local_timer 56 bne do_local_timer
57#endif 57#endif
58#endif 58#endif
@@ -70,7 +70,10 @@
70 */ 70 */
71 .macro inv_entry, reason 71 .macro inv_entry, reason
72 sub sp, sp, #S_FRAME_SIZE 72 sub sp, sp, #S_FRAME_SIZE
73 stmib sp, {r1 - lr} 73 ARM( stmib sp, {r1 - lr} )
74 THUMB( stmia sp, {r0 - r12} )
75 THUMB( str sp, [sp, #S_SP] )
76 THUMB( str lr, [sp, #S_LR] )
74 mov r1, #\reason 77 mov r1, #\reason
75 .endm 78 .endm
76 79
@@ -126,17 +129,24 @@ ENDPROC(__und_invalid)
126 .macro svc_entry, stack_hole=0 129 .macro svc_entry, stack_hole=0
127 UNWIND(.fnstart ) 130 UNWIND(.fnstart )
128 UNWIND(.save {r0 - pc} ) 131 UNWIND(.save {r0 - pc} )
129 sub sp, sp, #(S_FRAME_SIZE + \stack_hole) 132 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
133#ifdef CONFIG_THUMB2_KERNEL
134 SPFIX( str r0, [sp] ) @ temporarily saved
135 SPFIX( mov r0, sp )
136 SPFIX( tst r0, #4 ) @ test original stack alignment
137 SPFIX( ldr r0, [sp] ) @ restored
138#else
130 SPFIX( tst sp, #4 ) 139 SPFIX( tst sp, #4 )
131 SPFIX( bicne sp, sp, #4 ) 140#endif
132 stmib sp, {r1 - r12} 141 SPFIX( subeq sp, sp, #4 )
142 stmia sp, {r1 - r12}
133 143
134 ldmia r0, {r1 - r3} 144 ldmia r0, {r1 - r3}
135 add r5, sp, #S_SP @ here for interlock avoidance 145 add r5, sp, #S_SP - 4 @ here for interlock avoidance
136 mov r4, #-1 @ "" "" "" "" 146 mov r4, #-1 @ "" "" "" ""
137 add r0, sp, #(S_FRAME_SIZE + \stack_hole) 147 add r0, sp, #(S_FRAME_SIZE + \stack_hole - 4)
138 SPFIX( addne r0, r0, #4 ) 148 SPFIX( addeq r0, r0, #4 )
139 str r1, [sp] @ save the "real" r0 copied 149 str r1, [sp, #-4]! @ save the "real" r0 copied
140 @ from the exception stack 150 @ from the exception stack
141 151
142 mov r1, lr 152 mov r1, lr
@@ -198,9 +208,8 @@ __dabt_svc:
198 @ 208 @
199 @ restore SPSR and restart the instruction 209 @ restore SPSR and restart the instruction
200 @ 210 @
201 ldr r0, [sp, #S_PSR] 211 ldr r2, [sp, #S_PSR]
202 msr spsr_cxsf, r0 212 svc_exit r2 @ return from exception
203 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
204 UNWIND(.fnend ) 213 UNWIND(.fnend )
205ENDPROC(__dabt_svc) 214ENDPROC(__dabt_svc)
206 215
@@ -224,13 +233,12 @@ __irq_svc:
224 tst r0, #_TIF_NEED_RESCHED 233 tst r0, #_TIF_NEED_RESCHED
225 blne svc_preempt 234 blne svc_preempt
226#endif 235#endif
227 ldr r0, [sp, #S_PSR] @ irqs are already disabled 236 ldr r4, [sp, #S_PSR] @ irqs are already disabled
228 msr spsr_cxsf, r0
229#ifdef CONFIG_TRACE_IRQFLAGS 237#ifdef CONFIG_TRACE_IRQFLAGS
230 tst r0, #PSR_I_BIT 238 tst r4, #PSR_I_BIT
231 bleq trace_hardirqs_on 239 bleq trace_hardirqs_on
232#endif 240#endif
233 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr 241 svc_exit r4 @ return from exception
234 UNWIND(.fnend ) 242 UNWIND(.fnend )
235ENDPROC(__irq_svc) 243ENDPROC(__irq_svc)
236 244
@@ -265,7 +273,7 @@ __und_svc:
265 @ r0 - instruction 273 @ r0 - instruction
266 @ 274 @
267 ldr r0, [r2, #-4] 275 ldr r0, [r2, #-4]
268 adr r9, 1f 276 adr r9, BSYM(1f)
269 bl call_fpe 277 bl call_fpe
270 278
271 mov r0, sp @ struct pt_regs *regs 279 mov r0, sp @ struct pt_regs *regs
@@ -279,9 +287,8 @@ __und_svc:
279 @ 287 @
280 @ restore SPSR and restart the instruction 288 @ restore SPSR and restart the instruction
281 @ 289 @
282 ldr lr, [sp, #S_PSR] @ Get SVC cpsr 290 ldr r2, [sp, #S_PSR] @ Get SVC cpsr
283 msr spsr_cxsf, lr 291 svc_exit r2 @ return from exception
284 ldmia sp, {r0 - pc}^ @ Restore SVC registers
285 UNWIND(.fnend ) 292 UNWIND(.fnend )
286ENDPROC(__und_svc) 293ENDPROC(__und_svc)
287 294
@@ -322,9 +329,8 @@ __pabt_svc:
322 @ 329 @
323 @ restore SPSR and restart the instruction 330 @ restore SPSR and restart the instruction
324 @ 331 @
325 ldr r0, [sp, #S_PSR] 332 ldr r2, [sp, #S_PSR]
326 msr spsr_cxsf, r0 333 svc_exit r2 @ return from exception
327 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
328 UNWIND(.fnend ) 334 UNWIND(.fnend )
329ENDPROC(__pabt_svc) 335ENDPROC(__pabt_svc)
330 336
@@ -352,7 +358,8 @@ ENDPROC(__pabt_svc)
352 UNWIND(.fnstart ) 358 UNWIND(.fnstart )
353 UNWIND(.cantunwind ) @ don't unwind the user space 359 UNWIND(.cantunwind ) @ don't unwind the user space
354 sub sp, sp, #S_FRAME_SIZE 360 sub sp, sp, #S_FRAME_SIZE
355 stmib sp, {r1 - r12} 361 ARM( stmib sp, {r1 - r12} )
362 THUMB( stmia sp, {r0 - r12} )
356 363
357 ldmia r0, {r1 - r3} 364 ldmia r0, {r1 - r3}
358 add r0, sp, #S_PC @ here for interlock avoidance 365 add r0, sp, #S_PC @ here for interlock avoidance
@@ -371,7 +378,8 @@ ENDPROC(__pabt_svc)
371 @ Also, separately save sp_usr and lr_usr 378 @ Also, separately save sp_usr and lr_usr
372 @ 379 @
373 stmia r0, {r2 - r4} 380 stmia r0, {r2 - r4}
374 stmdb r0, {sp, lr}^ 381 ARM( stmdb r0, {sp, lr}^ )
382 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
375 383
376 @ 384 @
377 @ Enable the alignment trap while in kernel mode 385 @ Enable the alignment trap while in kernel mode
@@ -428,7 +436,7 @@ __dabt_usr:
428 @ 436 @
429 enable_irq 437 enable_irq
430 mov r2, sp 438 mov r2, sp
431 adr lr, ret_from_exception 439 adr lr, BSYM(ret_from_exception)
432 b do_DataAbort 440 b do_DataAbort
433 UNWIND(.fnend ) 441 UNWIND(.fnend )
434ENDPROC(__dabt_usr) 442ENDPROC(__dabt_usr)
@@ -450,7 +458,9 @@ __irq_usr:
450 ldr r0, [tsk, #TI_PREEMPT] 458 ldr r0, [tsk, #TI_PREEMPT]
451 str r8, [tsk, #TI_PREEMPT] 459 str r8, [tsk, #TI_PREEMPT]
452 teq r0, r7 460 teq r0, r7
453 strne r0, [r0, -r0] 461 ARM( strne r0, [r0, -r0] )
462 THUMB( movne r0, #0 )
463 THUMB( strne r0, [r0] )
454#endif 464#endif
455#ifdef CONFIG_TRACE_IRQFLAGS 465#ifdef CONFIG_TRACE_IRQFLAGS
456 bl trace_hardirqs_on 466 bl trace_hardirqs_on
@@ -474,9 +484,10 @@ __und_usr:
474 @ 484 @
475 @ r0 - instruction 485 @ r0 - instruction
476 @ 486 @
477 adr r9, ret_from_exception 487 adr r9, BSYM(ret_from_exception)
478 adr lr, __und_usr_unknown 488 adr lr, BSYM(__und_usr_unknown)
479 tst r3, #PSR_T_BIT @ Thumb mode? 489 tst r3, #PSR_T_BIT @ Thumb mode?
490 itet eq @ explicit IT needed for the 1f label
480 subeq r4, r2, #4 @ ARM instr at LR - 4 491 subeq r4, r2, #4 @ ARM instr at LR - 4
481 subne r4, r2, #2 @ Thumb instr at LR - 2 492 subne r4, r2, #2 @ Thumb instr at LR - 2
4821: ldreqt r0, [r4] 4931: ldreqt r0, [r4]
@@ -486,7 +497,10 @@ __und_usr:
486 beq call_fpe 497 beq call_fpe
487 @ Thumb instruction 498 @ Thumb instruction
488#if __LINUX_ARM_ARCH__ >= 7 499#if __LINUX_ARM_ARCH__ >= 7
4892: ldrht r5, [r4], #2 5002:
501 ARM( ldrht r5, [r4], #2 )
502 THUMB( ldrht r5, [r4] )
503 THUMB( add r4, r4, #2 )
490 and r0, r5, #0xf800 @ mask bits 111x x... .... .... 504 and r0, r5, #0xf800 @ mask bits 111x x... .... ....
491 cmp r0, #0xe800 @ 32bit instruction if xx != 0 505 cmp r0, #0xe800 @ 32bit instruction if xx != 0
492 blo __und_usr_unknown 506 blo __und_usr_unknown
@@ -575,9 +589,11 @@ call_fpe:
575 moveq pc, lr 589 moveq pc, lr
576 get_thread_info r10 @ get current thread 590 get_thread_info r10 @ get current thread
577 and r8, r0, #0x00000f00 @ mask out CP number 591 and r8, r0, #0x00000f00 @ mask out CP number
592 THUMB( lsr r8, r8, #8 )
578 mov r7, #1 593 mov r7, #1
579 add r6, r10, #TI_USED_CP 594 add r6, r10, #TI_USED_CP
580 strb r7, [r6, r8, lsr #8] @ set appropriate used_cp[] 595 ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
596 THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
581#ifdef CONFIG_IWMMXT 597#ifdef CONFIG_IWMMXT
582 @ Test if we need to give access to iWMMXt coprocessors 598 @ Test if we need to give access to iWMMXt coprocessors
583 ldr r5, [r10, #TI_FLAGS] 599 ldr r5, [r10, #TI_FLAGS]
@@ -585,36 +601,38 @@ call_fpe:
585 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1) 601 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
586 bcs iwmmxt_task_enable 602 bcs iwmmxt_task_enable
587#endif 603#endif
588 add pc, pc, r8, lsr #6 604 ARM( add pc, pc, r8, lsr #6 )
589 mov r0, r0 605 THUMB( lsl r8, r8, #2 )
590 606 THUMB( add pc, r8 )
591 mov pc, lr @ CP#0 607 nop
592 b do_fpe @ CP#1 (FPE) 608
593 b do_fpe @ CP#2 (FPE) 609 W(mov) pc, lr @ CP#0
594 mov pc, lr @ CP#3 610 W(b) do_fpe @ CP#1 (FPE)
611 W(b) do_fpe @ CP#2 (FPE)
612 W(mov) pc, lr @ CP#3
595#ifdef CONFIG_CRUNCH 613#ifdef CONFIG_CRUNCH
596 b crunch_task_enable @ CP#4 (MaverickCrunch) 614 b crunch_task_enable @ CP#4 (MaverickCrunch)
597 b crunch_task_enable @ CP#5 (MaverickCrunch) 615 b crunch_task_enable @ CP#5 (MaverickCrunch)
598 b crunch_task_enable @ CP#6 (MaverickCrunch) 616 b crunch_task_enable @ CP#6 (MaverickCrunch)
599#else 617#else
600 mov pc, lr @ CP#4 618 W(mov) pc, lr @ CP#4
601 mov pc, lr @ CP#5 619 W(mov) pc, lr @ CP#5
602 mov pc, lr @ CP#6 620 W(mov) pc, lr @ CP#6
603#endif 621#endif
604 mov pc, lr @ CP#7 622 W(mov) pc, lr @ CP#7
605 mov pc, lr @ CP#8 623 W(mov) pc, lr @ CP#8
606 mov pc, lr @ CP#9 624 W(mov) pc, lr @ CP#9
607#ifdef CONFIG_VFP 625#ifdef CONFIG_VFP
608 b do_vfp @ CP#10 (VFP) 626 W(b) do_vfp @ CP#10 (VFP)
609 b do_vfp @ CP#11 (VFP) 627 W(b) do_vfp @ CP#11 (VFP)
610#else 628#else
611 mov pc, lr @ CP#10 (VFP) 629 W(mov) pc, lr @ CP#10 (VFP)
612 mov pc, lr @ CP#11 (VFP) 630 W(mov) pc, lr @ CP#11 (VFP)
613#endif 631#endif
614 mov pc, lr @ CP#12 632 W(mov) pc, lr @ CP#12
615 mov pc, lr @ CP#13 633 W(mov) pc, lr @ CP#13
616 mov pc, lr @ CP#14 (Debug) 634 W(mov) pc, lr @ CP#14 (Debug)
617 mov pc, lr @ CP#15 (Control) 635 W(mov) pc, lr @ CP#15 (Control)
618 636
619#ifdef CONFIG_NEON 637#ifdef CONFIG_NEON
620 .align 6 638 .align 6
@@ -665,7 +683,7 @@ no_fp: mov pc, lr
665__und_usr_unknown: 683__und_usr_unknown:
666 enable_irq 684 enable_irq
667 mov r0, sp 685 mov r0, sp
668 adr lr, ret_from_exception 686 adr lr, BSYM(ret_from_exception)
669 b do_undefinstr 687 b do_undefinstr
670ENDPROC(__und_usr_unknown) 688ENDPROC(__und_usr_unknown)
671 689
@@ -709,7 +727,10 @@ ENTRY(__switch_to)
709 UNWIND(.cantunwind ) 727 UNWIND(.cantunwind )
710 add ip, r1, #TI_CPU_SAVE 728 add ip, r1, #TI_CPU_SAVE
711 ldr r3, [r2, #TI_TP_VALUE] 729 ldr r3, [r2, #TI_TP_VALUE]
712 stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack 730 ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
731 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
732 THUMB( str sp, [ip], #4 )
733 THUMB( str lr, [ip], #4 )
713#ifdef CONFIG_MMU 734#ifdef CONFIG_MMU
714 ldr r6, [r2, #TI_CPU_DOMAIN] 735 ldr r6, [r2, #TI_CPU_DOMAIN]
715#endif 736#endif
@@ -734,8 +755,12 @@ ENTRY(__switch_to)
734 ldr r0, =thread_notify_head 755 ldr r0, =thread_notify_head
735 mov r1, #THREAD_NOTIFY_SWITCH 756 mov r1, #THREAD_NOTIFY_SWITCH
736 bl atomic_notifier_call_chain 757 bl atomic_notifier_call_chain
758 THUMB( mov ip, r4 )
737 mov r0, r5 759 mov r0, r5
738 ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously 760 ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
761 THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
762 THUMB( ldr sp, [ip], #4 )
763 THUMB( ldr pc, [ip] )
739 UNWIND(.fnend ) 764 UNWIND(.fnend )
740ENDPROC(__switch_to) 765ENDPROC(__switch_to)
741 766
@@ -770,6 +795,7 @@ ENDPROC(__switch_to)
770 * if your compiled code is not going to use the new instructions for other 795 * if your compiled code is not going to use the new instructions for other
771 * purpose. 796 * purpose.
772 */ 797 */
798 THUMB( .arm )
773 799
774 .macro usr_ret, reg 800 .macro usr_ret, reg
775#ifdef CONFIG_ARM_THUMB 801#ifdef CONFIG_ARM_THUMB
@@ -1018,6 +1044,7 @@ __kuser_helper_version: @ 0xffff0ffc
1018 .globl __kuser_helper_end 1044 .globl __kuser_helper_end
1019__kuser_helper_end: 1045__kuser_helper_end:
1020 1046
1047 THUMB( .thumb )
1021 1048
1022/* 1049/*
1023 * Vector stubs. 1050 * Vector stubs.
@@ -1052,17 +1079,23 @@ vector_\name:
1052 @ Prepare for SVC32 mode. IRQs remain disabled. 1079 @ Prepare for SVC32 mode. IRQs remain disabled.
1053 @ 1080 @
1054 mrs r0, cpsr 1081 mrs r0, cpsr
1055 eor r0, r0, #(\mode ^ SVC_MODE) 1082 eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
1056 msr spsr_cxsf, r0 1083 msr spsr_cxsf, r0
1057 1084
1058 @ 1085 @
1059 @ the branch table must immediately follow this code 1086 @ the branch table must immediately follow this code
1060 @ 1087 @
1061 and lr, lr, #0x0f 1088 and lr, lr, #0x0f
1089 THUMB( adr r0, 1f )
1090 THUMB( ldr lr, [r0, lr, lsl #2] )
1062 mov r0, sp 1091 mov r0, sp
1063 ldr lr, [pc, lr, lsl #2] 1092 ARM( ldr lr, [pc, lr, lsl #2] )
1064 movs pc, lr @ branch to handler in SVC mode 1093 movs pc, lr @ branch to handler in SVC mode
1065ENDPROC(vector_\name) 1094ENDPROC(vector_\name)
1095
1096 .align 2
1097 @ handler addresses follow this label
10981:
1066 .endm 1099 .endm
1067 1100
1068 .globl __stubs_start 1101 .globl __stubs_start
@@ -1200,14 +1233,16 @@ __stubs_end:
1200 1233
1201 .globl __vectors_start 1234 .globl __vectors_start
1202__vectors_start: 1235__vectors_start:
1203 swi SYS_ERROR0 1236 ARM( swi SYS_ERROR0 )
1204 b vector_und + stubs_offset 1237 THUMB( svc #0 )
1205 ldr pc, .LCvswi + stubs_offset 1238 THUMB( nop )
1206 b vector_pabt + stubs_offset 1239 W(b) vector_und + stubs_offset
1207 b vector_dabt + stubs_offset 1240 W(ldr) pc, .LCvswi + stubs_offset
1208 b vector_addrexcptn + stubs_offset 1241 W(b) vector_pabt + stubs_offset
1209 b vector_irq + stubs_offset 1242 W(b) vector_dabt + stubs_offset
1210 b vector_fiq + stubs_offset 1243 W(b) vector_addrexcptn + stubs_offset
1244 W(b) vector_irq + stubs_offset
1245 W(b) vector_fiq + stubs_offset
1211 1246
1212 .globl __vectors_end 1247 .globl __vectors_end
1213__vectors_end: 1248__vectors_end: