aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorUwe Kleine-König <u.kleine-koenig@pengutronix.de>2010-05-21 13:06:42 -0400
committerUwe Kleine-König <u.kleine-koenig@pengutronix.de>2013-04-17 15:44:46 -0400
commit19c4d593f0b4bd46f6d923a3e514719982a22058 (patch)
treed49347eeaf897139242020418fd104f1c2daed91 /arch/arm
parent55bdd694116597d2f16510b121463cd579ba78da (diff)
ARM: ARMv7-M: Add support for exception handling
This patch implements the exception handling for the ARMv7-M architecture (pretty different from the A or R profiles). It bases on work done earlier by Catalin for 2.6.33 but was nearly completely rewritten to use a pt_regs layout compatible to the A profile. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Jonathan Austin <jonathan.austin@arm.com> Tested-by: Jonathan Austin <jonathan.austin@arm.com> Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/kernel/entry-common.S4
-rw-r--r--arch/arm/kernel/entry-header.S124
-rw-r--r--arch/arm/kernel/entry-v7m.S143
3 files changed, 271 insertions, 0 deletions
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 3248cde504ed..c45e0027613b 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -339,6 +339,9 @@ ENDPROC(ftrace_stub)
339 339
340 .align 5 340 .align 5
341ENTRY(vector_swi) 341ENTRY(vector_swi)
342#ifdef CONFIG_CPU_V7M
343 v7m_exception_entry
344#else
342 sub sp, sp, #S_FRAME_SIZE 345 sub sp, sp, #S_FRAME_SIZE
343 stmia sp, {r0 - r12} @ Calling r0 - r12 346 stmia sp, {r0 - r12} @ Calling r0 - r12
344 ARM( add r8, sp, #S_PC ) 347 ARM( add r8, sp, #S_PC )
@@ -349,6 +352,7 @@ ENTRY(vector_swi)
349 str lr, [sp, #S_PC] @ Save calling PC 352 str lr, [sp, #S_PC] @ Save calling PC
350 str r8, [sp, #S_PSR] @ Save CPSR 353 str r8, [sp, #S_PSR] @ Save CPSR
351 str r0, [sp, #S_OLD_R0] @ Save OLD_R0 354 str r0, [sp, #S_OLD_R0] @ Save OLD_R0
355#endif
352 zero_fp 356 zero_fp
353 357
354 /* 358 /*
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 9a8531eadd3d..b02e1394dc54 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -5,6 +5,7 @@
5#include <asm/asm-offsets.h> 5#include <asm/asm-offsets.h>
6#include <asm/errno.h> 6#include <asm/errno.h>
7#include <asm/thread_info.h> 7#include <asm/thread_info.h>
8#include <asm/v7m.h>
8 9
9@ Bad Abort numbers 10@ Bad Abort numbers
10@ ----------------- 11@ -----------------
@@ -44,6 +45,116 @@
44#endif 45#endif
45 .endm 46 .endm
46 47
48#ifdef CONFIG_CPU_V7M
49/*
50 * ARMv7-M exception entry/exit macros.
51 *
52 * xPSR, ReturnAddress(), LR (R14), R12, R3, R2, R1, and R0 are
53 * automatically saved on the current stack (32 words) before
54 * switching to the exception stack (SP_main).
55 *
56 * If exception is taken while in user mode, SP_main is
57 * empty. Otherwise, SP_main is aligned to 64 bit automatically
58 * (CCR.STKALIGN set).
59 *
60 * Linux assumes that the interrupts are disabled when entering an
61 * exception handler and it may BUG if this is not the case. Interrupts
62 * are disabled during entry and reenabled in the exit macro.
63 *
64 * v7m_exception_slow_exit is used when returning from SVC or PendSV.
65 * When returning to kernel mode, we don't return from exception.
66 */
67 .macro v7m_exception_entry
68 @ determine the location of the registers saved by the core during
69 @ exception entry. Depending on the mode the cpu was in when the
70 @ exception happend that is either on the main or the process stack.
71 @ Bit 2 of EXC_RETURN stored in the lr register specifies which stack
72 @ was used.
73 tst lr, #EXC_RET_STACK_MASK
74 mrsne r12, psp
75 moveq r12, sp
76
77 @ we cannot rely on r0-r3 and r12 matching the value saved in the
78 @ exception frame because of tail-chaining. So these have to be
79 @ reloaded.
80 ldmia r12!, {r0-r3}
81
82 @ Linux expects to have irqs off. Do it here before taking stack space
83 cpsid i
84
85 sub sp, #S_FRAME_SIZE-S_IP
86 stmdb sp!, {r0-r11}
87
88 @ load saved r12, lr, return address and xPSR.
89 @ r0-r7 are used for signals and never touched from now on. Clobbering
90 @ r8-r12 is OK.
91 mov r9, r12
92 ldmia r9!, {r8, r10-r12}
93
94 @ calculate the original stack pointer value.
95 @ r9 currently points to the memory location just above the auto saved
96 @ xPSR.
97 @ The cpu might automatically 8-byte align the stack. Bit 9
98 @ of the saved xPSR specifies if stack aligning took place. In this case
99 @ another 32-bit value is included in the stack.
100
101 tst r12, V7M_xPSR_FRAMEPTRALIGN
102 addne r9, r9, #4
103
104 @ store saved r12 using str to have a register to hold the base for stm
105 str r8, [sp, #S_IP]
106 add r8, sp, #S_SP
107 @ store r13-r15, xPSR
108 stmia r8!, {r9-r12}
109 @ store old_r0
110 str r0, [r8]
111 .endm
112
113 /*
114 * PENDSV and SVCALL are configured to have the same exception
115 * priorities. As a kernel thread runs at SVCALL execution priority it
116 * can never be preempted and so we will never have to return to a
117 * kernel thread here.
118 */
119 .macro v7m_exception_slow_exit ret_r0
120 cpsid i
121 ldr lr, =EXC_RET_THREADMODE_PROCESSSTACK
122
123 @ read original r12, sp, lr, pc and xPSR
124 add r12, sp, #S_IP
125 ldmia r12, {r1-r5}
126
127 @ an exception frame is always 8-byte aligned. To tell the hardware if
128 @ the sp to be restored is aligned or not set bit 9 of the saved xPSR
129 @ accordingly.
130 tst r2, #4
131 subne r2, r2, #4
132 orrne r5, V7M_xPSR_FRAMEPTRALIGN
133 biceq r5, V7M_xPSR_FRAMEPTRALIGN
134
135 @ write basic exception frame
136 stmdb r2!, {r1, r3-r5}
137 ldmia sp, {r1, r3-r5}
138 .if \ret_r0
139 stmdb r2!, {r0, r3-r5}
140 .else
141 stmdb r2!, {r1, r3-r5}
142 .endif
143
144 @ restore process sp
145 msr psp, r2
146
147 @ restore original r4-r11
148 ldmia sp!, {r0-r11}
149
150 @ restore main sp
151 add sp, sp, #S_FRAME_SIZE-S_IP
152
153 cpsie i
154 bx lr
155 .endm
156#endif /* CONFIG_CPU_V7M */
157
47 @ 158 @
48 @ Store/load the USER SP and LR registers by switching to the SYS 159 @ Store/load the USER SP and LR registers by switching to the SYS
49 @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not 160 @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not
@@ -131,6 +242,18 @@
131 rfeia sp! 242 rfeia sp!
132 .endm 243 .endm
133 244
245#ifdef CONFIG_CPU_V7M
246 /*
247 * Note we don't need to do clrex here as clearing the local monitor is
248 * part of each exception entry and exit sequence.
249 */
250 .macro restore_user_regs, fast = 0, offset = 0
251 .if \offset
252 add sp, #\offset
253 .endif
254 v7m_exception_slow_exit ret_r0 = \fast
255 .endm
256#else /* ifdef CONFIG_CPU_V7M */
134 .macro restore_user_regs, fast = 0, offset = 0 257 .macro restore_user_regs, fast = 0, offset = 0
135 clrex @ clear the exclusive monitor 258 clrex @ clear the exclusive monitor
136 mov r2, sp 259 mov r2, sp
@@ -147,6 +270,7 @@
147 add sp, sp, #S_FRAME_SIZE - S_SP 270 add sp, sp, #S_FRAME_SIZE - S_SP
148 movs pc, lr @ return & move spsr_svc into cpsr 271 movs pc, lr @ return & move spsr_svc into cpsr
149 .endm 272 .endm
273#endif /* ifdef CONFIG_CPU_V7M / else */
150 274
151 .macro get_thread_info, rd 275 .macro get_thread_info, rd
152 mov \rd, sp 276 mov \rd, sp
diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S
new file mode 100644
index 000000000000..e00621f1403f
--- /dev/null
+++ b/arch/arm/kernel/entry-v7m.S
@@ -0,0 +1,143 @@
1/*
2 * linux/arch/arm/kernel/entry-v7m.S
3 *
4 * Copyright (C) 2008 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Low-level vector interface routines for the ARMv7-M architecture
11 */
12#include <asm/memory.h>
13#include <asm/glue.h>
14#include <asm/thread_notify.h>
15#include <asm/v7m.h>
16
17#include <mach/entry-macro.S>
18
19#include "entry-header.S"
20
21#ifdef CONFIG_TRACE_IRQFLAGS
22#error "CONFIG_TRACE_IRQFLAGS not supported on the current ARMv7M implementation"
23#endif
24
25__invalid_entry:
26 v7m_exception_entry
27 adr r0, strerr
28 mrs r1, ipsr
29 mov r2, lr
30 bl printk
31 mov r0, sp
32 bl show_regs
331: b 1b
34ENDPROC(__invalid_entry)
35
36strerr: .asciz "\nUnhandled exception: IPSR = %08lx LR = %08lx\n"
37
38 .align 2
39__irq_entry:
40 v7m_exception_entry
41
42 @
43 @ Invoke the IRQ handler
44 @
45 mrs r0, ipsr
46 ldr r1, =V7M_xPSR_EXCEPTIONNO
47 and r0, r1
48 sub r0, #16
49 mov r1, sp
50 stmdb sp!, {lr}
51 @ routine called with r0 = irq number, r1 = struct pt_regs *
52 bl nvic_do_IRQ
53
54 pop {lr}
55 @
56 @ Check for any pending work if returning to user
57 @
58 ldr r1, =BASEADDR_V7M_SCB
59 ldr r0, [r1, V7M_SCB_ICSR]
60 tst r0, V7M_SCB_ICSR_RETTOBASE
61 beq 2f
62
63 get_thread_info tsk
64 ldr r2, [tsk, #TI_FLAGS]
65 tst r2, #_TIF_WORK_MASK
66 beq 2f @ no work pending
67 mov r0, #V7M_SCB_ICSR_PENDSVSET
68 str r0, [r1, V7M_SCB_ICSR] @ raise PendSV
69
702:
71 @ registers r0-r3 and r12 are automatically restored on exception
72 @ return. r4-r7 were not clobbered in v7m_exception_entry so for
73 @ correctness they don't need to be restored. So only r8-r11 must be
74 @ restored here. The easiest way to do so is to restore r0-r7, too.
75 ldmia sp!, {r0-r11}
76 add sp, #S_FRAME_SIZE-S_IP
77 cpsie i
78 bx lr
79ENDPROC(__irq_entry)
80
81__pendsv_entry:
82 v7m_exception_entry
83
84 ldr r1, =BASEADDR_V7M_SCB
85 mov r0, #V7M_SCB_ICSR_PENDSVCLR
86 str r0, [r1, V7M_SCB_ICSR] @ clear PendSV
87
88 @ execute the pending work, including reschedule
89 get_thread_info tsk
90 mov why, #0
91 b ret_to_user
92ENDPROC(__pendsv_entry)
93
94/*
95 * Register switch for ARMv7-M processors.
96 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
97 * previous and next are guaranteed not to be the same.
98 */
99ENTRY(__switch_to)
100 .fnstart
101 .cantunwind
102 add ip, r1, #TI_CPU_SAVE
103 stmia ip!, {r4 - r11} @ Store most regs on stack
104 str sp, [ip], #4
105 str lr, [ip], #4
106 mov r5, r0
107 add r4, r2, #TI_CPU_SAVE
108 ldr r0, =thread_notify_head
109 mov r1, #THREAD_NOTIFY_SWITCH
110 bl atomic_notifier_call_chain
111 mov ip, r4
112 mov r0, r5
113 ldmia ip!, {r4 - r11} @ Load all regs saved previously
114 ldr sp, [ip]
115 ldr pc, [ip, #4]!
116 .fnend
117ENDPROC(__switch_to)
118
119 .data
120 .align 8
121/*
122 * Vector table (64 words => 256 bytes natural alignment)
123 */
124ENTRY(vector_table)
125 .long 0 @ 0 - Reset stack pointer
126 .long __invalid_entry @ 1 - Reset
127 .long __invalid_entry @ 2 - NMI
128 .long __invalid_entry @ 3 - HardFault
129 .long __invalid_entry @ 4 - MemManage
130 .long __invalid_entry @ 5 - BusFault
131 .long __invalid_entry @ 6 - UsageFault
132 .long __invalid_entry @ 7 - Reserved
133 .long __invalid_entry @ 8 - Reserved
134 .long __invalid_entry @ 9 - Reserved
135 .long __invalid_entry @ 10 - Reserved
136 .long vector_swi @ 11 - SVCall
137 .long __invalid_entry @ 12 - Debug Monitor
138 .long __invalid_entry @ 13 - Reserved
139 .long __pendsv_entry @ 14 - PendSV
140 .long __invalid_entry @ 15 - SysTick
141 .rept 64 - 16
142 .long __irq_entry @ 16..64 - External Interrupts
143 .endr