aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/entry-armv.S
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2009-07-24 07:32:54 -0400
committerCatalin Marinas <catalin.marinas@arm.com>2009-07-24 07:32:54 -0400
commitb86040a59feb255a8193173caa4d5199464433d5 (patch)
tree89c07450eabc2abb88bb4d6e32d61fd3855f000e /arch/arm/kernel/entry-armv.S
parent0becb088501886f37ade38762c8eaaf4263572cc (diff)
Thumb-2: Implementation of the unified start-up and exceptions code
This patch implements the ARM/Thumb-2 unified kernel start-up and exception handling code. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm/kernel/entry-armv.S')
-rw-r--r--arch/arm/kernel/entry-armv.S165
1 files changed, 98 insertions, 67 deletions
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 0befd1cabf45..468425f937dd 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -34,7 +34,7 @@
34 @ 34 @
35 @ routine called with r0 = irq number, r1 = struct pt_regs * 35 @ routine called with r0 = irq number, r1 = struct pt_regs *
36 @ 36 @
37 adrne lr, 1b 37 adrne lr, BSYM(1b)
38 bne asm_do_IRQ 38 bne asm_do_IRQ
39 39
40#ifdef CONFIG_SMP 40#ifdef CONFIG_SMP
@@ -46,13 +46,13 @@
46 */ 46 */
47 test_for_ipi r0, r6, r5, lr 47 test_for_ipi r0, r6, r5, lr
48 movne r0, sp 48 movne r0, sp
49 adrne lr, 1b 49 adrne lr, BSYM(1b)
50 bne do_IPI 50 bne do_IPI
51 51
52#ifdef CONFIG_LOCAL_TIMERS 52#ifdef CONFIG_LOCAL_TIMERS
53 test_for_ltirq r0, r6, r5, lr 53 test_for_ltirq r0, r6, r5, lr
54 movne r0, sp 54 movne r0, sp
55 adrne lr, 1b 55 adrne lr, BSYM(1b)
56 bne do_local_timer 56 bne do_local_timer
57#endif 57#endif
58#endif 58#endif
@@ -70,7 +70,10 @@
70 */ 70 */
71 .macro inv_entry, reason 71 .macro inv_entry, reason
72 sub sp, sp, #S_FRAME_SIZE 72 sub sp, sp, #S_FRAME_SIZE
73 stmib sp, {r1 - lr} 73 ARM( stmib sp, {r1 - lr} )
74 THUMB( stmia sp, {r0 - r12} )
75 THUMB( str sp, [sp, #S_SP] )
76 THUMB( str lr, [sp, #S_LR] )
74 mov r1, #\reason 77 mov r1, #\reason
75 .endm 78 .endm
76 79
@@ -126,17 +129,24 @@ ENDPROC(__und_invalid)
126 .macro svc_entry, stack_hole=0 129 .macro svc_entry, stack_hole=0
127 UNWIND(.fnstart ) 130 UNWIND(.fnstart )
128 UNWIND(.save {r0 - pc} ) 131 UNWIND(.save {r0 - pc} )
129 sub sp, sp, #(S_FRAME_SIZE + \stack_hole) 132 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
133#ifdef CONFIG_THUMB2_KERNEL
134 SPFIX( str r0, [sp] ) @ temporarily saved
135 SPFIX( mov r0, sp )
136 SPFIX( tst r0, #4 ) @ test original stack alignment
137 SPFIX( ldr r0, [sp] ) @ restored
138#else
130 SPFIX( tst sp, #4 ) 139 SPFIX( tst sp, #4 )
131 SPFIX( bicne sp, sp, #4 ) 140#endif
132 stmib sp, {r1 - r12} 141 SPFIX( subeq sp, sp, #4 )
142 stmia sp, {r1 - r12}
133 143
134 ldmia r0, {r1 - r3} 144 ldmia r0, {r1 - r3}
135 add r5, sp, #S_SP @ here for interlock avoidance 145 add r5, sp, #S_SP - 4 @ here for interlock avoidance
136 mov r4, #-1 @ "" "" "" "" 146 mov r4, #-1 @ "" "" "" ""
137 add r0, sp, #(S_FRAME_SIZE + \stack_hole) 147 add r0, sp, #(S_FRAME_SIZE + \stack_hole - 4)
138 SPFIX( addne r0, r0, #4 ) 148 SPFIX( addeq r0, r0, #4 )
139 str r1, [sp] @ save the "real" r0 copied 149 str r1, [sp, #-4]! @ save the "real" r0 copied
140 @ from the exception stack 150 @ from the exception stack
141 151
142 mov r1, lr 152 mov r1, lr
@@ -196,9 +206,8 @@ __dabt_svc:
196 @ 206 @
197 @ restore SPSR and restart the instruction 207 @ restore SPSR and restart the instruction
198 @ 208 @
199 ldr r0, [sp, #S_PSR] 209 ldr r2, [sp, #S_PSR]
200 msr spsr_cxsf, r0 210 svc_exit r2 @ return from exception
201 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
202 UNWIND(.fnend ) 211 UNWIND(.fnend )
203ENDPROC(__dabt_svc) 212ENDPROC(__dabt_svc)
204 213
@@ -225,13 +234,12 @@ __irq_svc:
225 tst r0, #_TIF_NEED_RESCHED 234 tst r0, #_TIF_NEED_RESCHED
226 blne svc_preempt 235 blne svc_preempt
227#endif 236#endif
228 ldr r0, [sp, #S_PSR] @ irqs are already disabled 237 ldr r4, [sp, #S_PSR] @ irqs are already disabled
229 msr spsr_cxsf, r0
230#ifdef CONFIG_TRACE_IRQFLAGS 238#ifdef CONFIG_TRACE_IRQFLAGS
231 tst r0, #PSR_I_BIT 239 tst r4, #PSR_I_BIT
232 bleq trace_hardirqs_on 240 bleq trace_hardirqs_on
233#endif 241#endif
234 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr 242 svc_exit r4 @ return from exception
235 UNWIND(.fnend ) 243 UNWIND(.fnend )
236ENDPROC(__irq_svc) 244ENDPROC(__irq_svc)
237 245
@@ -266,7 +274,7 @@ __und_svc:
266 @ r0 - instruction 274 @ r0 - instruction
267 @ 275 @
268 ldr r0, [r2, #-4] 276 ldr r0, [r2, #-4]
269 adr r9, 1f 277 adr r9, BSYM(1f)
270 bl call_fpe 278 bl call_fpe
271 279
272 mov r0, sp @ struct pt_regs *regs 280 mov r0, sp @ struct pt_regs *regs
@@ -280,9 +288,8 @@ __und_svc:
280 @ 288 @
281 @ restore SPSR and restart the instruction 289 @ restore SPSR and restart the instruction
282 @ 290 @
283 ldr lr, [sp, #S_PSR] @ Get SVC cpsr 291 ldr r2, [sp, #S_PSR] @ Get SVC cpsr
284 msr spsr_cxsf, lr 292 svc_exit r2 @ return from exception
285 ldmia sp, {r0 - pc}^ @ Restore SVC registers
286 UNWIND(.fnend ) 293 UNWIND(.fnend )
287ENDPROC(__und_svc) 294ENDPROC(__und_svc)
288 295
@@ -323,9 +330,8 @@ __pabt_svc:
323 @ 330 @
324 @ restore SPSR and restart the instruction 331 @ restore SPSR and restart the instruction
325 @ 332 @
326 ldr r0, [sp, #S_PSR] 333 ldr r2, [sp, #S_PSR]
327 msr spsr_cxsf, r0 334 svc_exit r2 @ return from exception
328 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
329 UNWIND(.fnend ) 335 UNWIND(.fnend )
330ENDPROC(__pabt_svc) 336ENDPROC(__pabt_svc)
331 337
@@ -353,7 +359,8 @@ ENDPROC(__pabt_svc)
353 UNWIND(.fnstart ) 359 UNWIND(.fnstart )
354 UNWIND(.cantunwind ) @ don't unwind the user space 360 UNWIND(.cantunwind ) @ don't unwind the user space
355 sub sp, sp, #S_FRAME_SIZE 361 sub sp, sp, #S_FRAME_SIZE
356 stmib sp, {r1 - r12} 362 ARM( stmib sp, {r1 - r12} )
363 THUMB( stmia sp, {r0 - r12} )
357 364
358 ldmia r0, {r1 - r3} 365 ldmia r0, {r1 - r3}
359 add r0, sp, #S_PC @ here for interlock avoidance 366 add r0, sp, #S_PC @ here for interlock avoidance
@@ -372,7 +379,8 @@ ENDPROC(__pabt_svc)
372 @ Also, separately save sp_usr and lr_usr 379 @ Also, separately save sp_usr and lr_usr
373 @ 380 @
374 stmia r0, {r2 - r4} 381 stmia r0, {r2 - r4}
375 stmdb r0, {sp, lr}^ 382 ARM( stmdb r0, {sp, lr}^ )
383 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
376 384
377 @ 385 @
378 @ Enable the alignment trap while in kernel mode 386 @ Enable the alignment trap while in kernel mode
@@ -427,7 +435,7 @@ __dabt_usr:
427 @ 435 @
428 enable_irq 436 enable_irq
429 mov r2, sp 437 mov r2, sp
430 adr lr, ret_from_exception 438 adr lr, BSYM(ret_from_exception)
431 b do_DataAbort 439 b do_DataAbort
432 UNWIND(.fnend ) 440 UNWIND(.fnend )
433ENDPROC(__dabt_usr) 441ENDPROC(__dabt_usr)
@@ -452,7 +460,9 @@ __irq_usr:
452 ldr r0, [tsk, #TI_PREEMPT] 460 ldr r0, [tsk, #TI_PREEMPT]
453 str r8, [tsk, #TI_PREEMPT] 461 str r8, [tsk, #TI_PREEMPT]
454 teq r0, r7 462 teq r0, r7
455 strne r0, [r0, -r0] 463 ARM( strne r0, [r0, -r0] )
464 THUMB( movne r0, #0 )
465 THUMB( strne r0, [r0] )
456#endif 466#endif
457#ifdef CONFIG_TRACE_IRQFLAGS 467#ifdef CONFIG_TRACE_IRQFLAGS
458 bl trace_hardirqs_on 468 bl trace_hardirqs_on
@@ -476,9 +486,10 @@ __und_usr:
476 @ 486 @
477 @ r0 - instruction 487 @ r0 - instruction
478 @ 488 @
479 adr r9, ret_from_exception 489 adr r9, BSYM(ret_from_exception)
480 adr lr, __und_usr_unknown 490 adr lr, BSYM(__und_usr_unknown)
481 tst r3, #PSR_T_BIT @ Thumb mode? 491 tst r3, #PSR_T_BIT @ Thumb mode?
492 itet eq @ explicit IT needed for the 1f label
482 subeq r4, r2, #4 @ ARM instr at LR - 4 493 subeq r4, r2, #4 @ ARM instr at LR - 4
483 subne r4, r2, #2 @ Thumb instr at LR - 2 494 subne r4, r2, #2 @ Thumb instr at LR - 2
4841: ldreqt r0, [r4] 4951: ldreqt r0, [r4]
@@ -488,7 +499,10 @@ __und_usr:
488 beq call_fpe 499 beq call_fpe
489 @ Thumb instruction 500 @ Thumb instruction
490#if __LINUX_ARM_ARCH__ >= 7 501#if __LINUX_ARM_ARCH__ >= 7
4912: ldrht r5, [r4], #2 5022:
503 ARM( ldrht r5, [r4], #2 )
504 THUMB( ldrht r5, [r4] )
505 THUMB( add r4, r4, #2 )
492 and r0, r5, #0xf800 @ mask bits 111x x... .... .... 506 and r0, r5, #0xf800 @ mask bits 111x x... .... ....
493 cmp r0, #0xe800 @ 32bit instruction if xx != 0 507 cmp r0, #0xe800 @ 32bit instruction if xx != 0
494 blo __und_usr_unknown 508 blo __und_usr_unknown
@@ -577,9 +591,11 @@ call_fpe:
577 moveq pc, lr 591 moveq pc, lr
578 get_thread_info r10 @ get current thread 592 get_thread_info r10 @ get current thread
579 and r8, r0, #0x00000f00 @ mask out CP number 593 and r8, r0, #0x00000f00 @ mask out CP number
594 THUMB( lsr r8, r8, #8 )
580 mov r7, #1 595 mov r7, #1
581 add r6, r10, #TI_USED_CP 596 add r6, r10, #TI_USED_CP
582 strb r7, [r6, r8, lsr #8] @ set appropriate used_cp[] 597 ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
598 THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
583#ifdef CONFIG_IWMMXT 599#ifdef CONFIG_IWMMXT
584 @ Test if we need to give access to iWMMXt coprocessors 600 @ Test if we need to give access to iWMMXt coprocessors
585 ldr r5, [r10, #TI_FLAGS] 601 ldr r5, [r10, #TI_FLAGS]
@@ -587,36 +603,38 @@ call_fpe:
587 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1) 603 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
588 bcs iwmmxt_task_enable 604 bcs iwmmxt_task_enable
589#endif 605#endif
590 add pc, pc, r8, lsr #6 606 ARM( add pc, pc, r8, lsr #6 )
591 mov r0, r0 607 THUMB( lsl r8, r8, #2 )
592 608 THUMB( add pc, r8 )
593 mov pc, lr @ CP#0 609 nop
594 b do_fpe @ CP#1 (FPE) 610
595 b do_fpe @ CP#2 (FPE) 611 W(mov) pc, lr @ CP#0
596 mov pc, lr @ CP#3 612 W(b) do_fpe @ CP#1 (FPE)
613 W(b) do_fpe @ CP#2 (FPE)
614 W(mov) pc, lr @ CP#3
597#ifdef CONFIG_CRUNCH 615#ifdef CONFIG_CRUNCH
598 b crunch_task_enable @ CP#4 (MaverickCrunch) 616 b crunch_task_enable @ CP#4 (MaverickCrunch)
599 b crunch_task_enable @ CP#5 (MaverickCrunch) 617 b crunch_task_enable @ CP#5 (MaverickCrunch)
600 b crunch_task_enable @ CP#6 (MaverickCrunch) 618 b crunch_task_enable @ CP#6 (MaverickCrunch)
601#else 619#else
602 mov pc, lr @ CP#4 620 W(mov) pc, lr @ CP#4
603 mov pc, lr @ CP#5 621 W(mov) pc, lr @ CP#5
604 mov pc, lr @ CP#6 622 W(mov) pc, lr @ CP#6
605#endif 623#endif
606 mov pc, lr @ CP#7 624 W(mov) pc, lr @ CP#7
607 mov pc, lr @ CP#8 625 W(mov) pc, lr @ CP#8
608 mov pc, lr @ CP#9 626 W(mov) pc, lr @ CP#9
609#ifdef CONFIG_VFP 627#ifdef CONFIG_VFP
610 b do_vfp @ CP#10 (VFP) 628 W(b) do_vfp @ CP#10 (VFP)
611 b do_vfp @ CP#11 (VFP) 629 W(b) do_vfp @ CP#11 (VFP)
612#else 630#else
613 mov pc, lr @ CP#10 (VFP) 631 W(mov) pc, lr @ CP#10 (VFP)
614 mov pc, lr @ CP#11 (VFP) 632 W(mov) pc, lr @ CP#11 (VFP)
615#endif 633#endif
616 mov pc, lr @ CP#12 634 W(mov) pc, lr @ CP#12
617 mov pc, lr @ CP#13 635 W(mov) pc, lr @ CP#13
618 mov pc, lr @ CP#14 (Debug) 636 W(mov) pc, lr @ CP#14 (Debug)
619 mov pc, lr @ CP#15 (Control) 637 W(mov) pc, lr @ CP#15 (Control)
620 638
621#ifdef CONFIG_NEON 639#ifdef CONFIG_NEON
622 .align 6 640 .align 6
@@ -667,7 +685,7 @@ no_fp: mov pc, lr
667__und_usr_unknown: 685__und_usr_unknown:
668 enable_irq 686 enable_irq
669 mov r0, sp 687 mov r0, sp
670 adr lr, ret_from_exception 688 adr lr, BSYM(ret_from_exception)
671 b do_undefinstr 689 b do_undefinstr
672ENDPROC(__und_usr_unknown) 690ENDPROC(__und_usr_unknown)
673 691
@@ -711,7 +729,10 @@ ENTRY(__switch_to)
711 UNWIND(.cantunwind ) 729 UNWIND(.cantunwind )
712 add ip, r1, #TI_CPU_SAVE 730 add ip, r1, #TI_CPU_SAVE
713 ldr r3, [r2, #TI_TP_VALUE] 731 ldr r3, [r2, #TI_TP_VALUE]
714 stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack 732 ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
733 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
734 THUMB( str sp, [ip], #4 )
735 THUMB( str lr, [ip], #4 )
715#ifdef CONFIG_MMU 736#ifdef CONFIG_MMU
716 ldr r6, [r2, #TI_CPU_DOMAIN] 737 ldr r6, [r2, #TI_CPU_DOMAIN]
717#endif 738#endif
@@ -736,8 +757,12 @@ ENTRY(__switch_to)
736 ldr r0, =thread_notify_head 757 ldr r0, =thread_notify_head
737 mov r1, #THREAD_NOTIFY_SWITCH 758 mov r1, #THREAD_NOTIFY_SWITCH
738 bl atomic_notifier_call_chain 759 bl atomic_notifier_call_chain
760 THUMB( mov ip, r4 )
739 mov r0, r5 761 mov r0, r5
740 ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously 762 ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
763 THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
764 THUMB( ldr sp, [ip], #4 )
765 THUMB( ldr pc, [ip] )
741 UNWIND(.fnend ) 766 UNWIND(.fnend )
742ENDPROC(__switch_to) 767ENDPROC(__switch_to)
743 768
@@ -772,6 +797,7 @@ ENDPROC(__switch_to)
772 * if your compiled code is not going to use the new instructions for other 797 * if your compiled code is not going to use the new instructions for other
773 * purpose. 798 * purpose.
774 */ 799 */
800 THUMB( .arm )
775 801
776 .macro usr_ret, reg 802 .macro usr_ret, reg
777#ifdef CONFIG_ARM_THUMB 803#ifdef CONFIG_ARM_THUMB
@@ -1020,6 +1046,7 @@ __kuser_helper_version: @ 0xffff0ffc
1020 .globl __kuser_helper_end 1046 .globl __kuser_helper_end
1021__kuser_helper_end: 1047__kuser_helper_end:
1022 1048
1049 THUMB( .thumb )
1023 1050
1024/* 1051/*
1025 * Vector stubs. 1052 * Vector stubs.
@@ -1054,15 +1081,17 @@ vector_\name:
1054 @ Prepare for SVC32 mode. IRQs remain disabled. 1081 @ Prepare for SVC32 mode. IRQs remain disabled.
1055 @ 1082 @
1056 mrs r0, cpsr 1083 mrs r0, cpsr
1057 eor r0, r0, #(\mode ^ SVC_MODE) 1084 eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
1058 msr spsr_cxsf, r0 1085 msr spsr_cxsf, r0
1059 1086
1060 @ 1087 @
1061 @ the branch table must immediately follow this code 1088 @ the branch table must immediately follow this code
1062 @ 1089 @
1063 and lr, lr, #0x0f 1090 and lr, lr, #0x0f
1091 THUMB( adr r0, 1f )
1092 THUMB( ldr lr, [r0, lr, lsl #2] )
1064 mov r0, sp 1093 mov r0, sp
1065 ldr lr, [pc, lr, lsl #2] 1094 ARM( ldr lr, [pc, lr, lsl #2] )
1066 movs pc, lr @ branch to handler in SVC mode 1095 movs pc, lr @ branch to handler in SVC mode
1067ENDPROC(vector_\name) 1096ENDPROC(vector_\name)
1068 1097
@@ -1206,14 +1235,16 @@ __stubs_end:
1206 1235
1207 .globl __vectors_start 1236 .globl __vectors_start
1208__vectors_start: 1237__vectors_start:
1209 swi SYS_ERROR0 1238 ARM( swi SYS_ERROR0 )
1210 b vector_und + stubs_offset 1239 THUMB( svc #0 )
1211 ldr pc, .LCvswi + stubs_offset 1240 THUMB( nop )
1212 b vector_pabt + stubs_offset 1241 W(b) vector_und + stubs_offset
1213 b vector_dabt + stubs_offset 1242 W(ldr) pc, .LCvswi + stubs_offset
1214 b vector_addrexcptn + stubs_offset 1243 W(b) vector_pabt + stubs_offset
1215 b vector_irq + stubs_offset 1244 W(b) vector_dabt + stubs_offset
1216 b vector_fiq + stubs_offset 1245 W(b) vector_addrexcptn + stubs_offset
1246 W(b) vector_irq + stubs_offset
1247 W(b) vector_fiq + stubs_offset
1217 1248
1218 .globl __vectors_end 1249 .globl __vectors_end
1219__vectors_end: 1250__vectors_end: