aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/kernel/entry-armv.S98
-rw-r--r--arch/arm/kernel/entry-header.S47
-rw-r--r--arch/arm/kernel/fiq.c17
-rw-r--r--arch/arm/kernel/setup.c8
-rw-r--r--arch/arm/kernel/traps.c26
5 files changed, 180 insertions, 16 deletions
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 36276cdccfbc..859f56cb122b 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -146,7 +146,7 @@ ENDPROC(__und_invalid)
146#define SPFIX(code...) 146#define SPFIX(code...)
147#endif 147#endif
148 148
149 .macro svc_entry, stack_hole=0 149 .macro svc_entry, stack_hole=0, trace=1
150 UNWIND(.fnstart ) 150 UNWIND(.fnstart )
151 UNWIND(.save {r0 - pc} ) 151 UNWIND(.save {r0 - pc} )
152 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4) 152 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
@@ -182,9 +182,11 @@ ENDPROC(__und_invalid)
182 @ 182 @
183 stmia r7, {r2 - r6} 183 stmia r7, {r2 - r6}
184 184
185 .if \trace
185#ifdef CONFIG_TRACE_IRQFLAGS 186#ifdef CONFIG_TRACE_IRQFLAGS
186 bl trace_hardirqs_off 187 bl trace_hardirqs_off
187#endif 188#endif
189 .endif
188 .endm 190 .endm
189 191
190 .align 5 192 .align 5
@@ -295,6 +297,15 @@ __pabt_svc:
295ENDPROC(__pabt_svc) 297ENDPROC(__pabt_svc)
296 298
297 .align 5 299 .align 5
300__fiq_svc:
301 svc_entry trace=0
302 mov r0, sp @ struct pt_regs *regs
303 bl handle_fiq_as_nmi
304 svc_exit_via_fiq
305 UNWIND(.fnend )
306ENDPROC(__fiq_svc)
307
308 .align 5
298.LCcralign: 309.LCcralign:
299 .word cr_alignment 310 .word cr_alignment
300#ifdef MULTI_DABORT 311#ifdef MULTI_DABORT
@@ -305,6 +316,46 @@ ENDPROC(__pabt_svc)
305 .word fp_enter 316 .word fp_enter
306 317
307/* 318/*
319 * Abort mode handlers
320 */
321
322@
323@ Taking a FIQ in abort mode is similar to taking a FIQ in SVC mode
324@ and reuses the same macros. However in abort mode we must also
325@ save/restore lr_abt and spsr_abt to make nested aborts safe.
326@
327 .align 5
328__fiq_abt:
329 svc_entry trace=0
330
331 ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
332 THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
333 THUMB( msr cpsr_c, r0 )
334 mov r1, lr @ Save lr_abt
335 mrs r2, spsr @ Save spsr_abt, abort is now safe
336 ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
337 THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
338 THUMB( msr cpsr_c, r0 )
339 stmfd sp!, {r1 - r2}
340
341 add r0, sp, #8 @ struct pt_regs *regs
342 bl handle_fiq_as_nmi
343
344 ldmfd sp!, {r1 - r2}
345 ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
346 THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
347 THUMB( msr cpsr_c, r0 )
348 mov lr, r1 @ Restore lr_abt, abort is unsafe
349 msr spsr_cxsf, r2 @ Restore spsr_abt
350 ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
351 THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
352 THUMB( msr cpsr_c, r0 )
353
354 svc_exit_via_fiq
355 UNWIND(.fnend )
356ENDPROC(__fiq_abt)
357
358/*
308 * User mode handlers 359 * User mode handlers
309 * 360 *
310 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE 361 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
@@ -314,7 +365,7 @@ ENDPROC(__pabt_svc)
314#error "sizeof(struct pt_regs) must be a multiple of 8" 365#error "sizeof(struct pt_regs) must be a multiple of 8"
315#endif 366#endif
316 367
317 .macro usr_entry 368 .macro usr_entry, trace=1
318 UNWIND(.fnstart ) 369 UNWIND(.fnstart )
319 UNWIND(.cantunwind ) @ don't unwind the user space 370 UNWIND(.cantunwind ) @ don't unwind the user space
320 sub sp, sp, #S_FRAME_SIZE 371 sub sp, sp, #S_FRAME_SIZE
@@ -351,10 +402,12 @@ ENDPROC(__pabt_svc)
351 @ 402 @
352 zero_fp 403 zero_fp
353 404
405 .if \trace
354#ifdef CONFIG_IRQSOFF_TRACER 406#ifdef CONFIG_IRQSOFF_TRACER
355 bl trace_hardirqs_off 407 bl trace_hardirqs_off
356#endif 408#endif
357 ct_user_exit save = 0 409 ct_user_exit save = 0
410 .endif
358 .endm 411 .endm
359 412
360 .macro kuser_cmpxchg_check 413 .macro kuser_cmpxchg_check
@@ -683,6 +736,17 @@ ENTRY(ret_from_exception)
683ENDPROC(__pabt_usr) 736ENDPROC(__pabt_usr)
684ENDPROC(ret_from_exception) 737ENDPROC(ret_from_exception)
685 738
739 .align 5
740__fiq_usr:
741 usr_entry trace=0
742 kuser_cmpxchg_check
743 mov r0, sp @ struct pt_regs *regs
744 bl handle_fiq_as_nmi
745 get_thread_info tsk
746 restore_user_regs fast = 0, offset = 0
747 UNWIND(.fnend )
748ENDPROC(__fiq_usr)
749
686/* 750/*
687 * Register switch for ARMv3 and ARMv4 processors 751 * Register switch for ARMv3 and ARMv4 processors
688 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info 752 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
@@ -1118,17 +1182,29 @@ vector_addrexcptn:
1118 b vector_addrexcptn 1182 b vector_addrexcptn
1119 1183
1120/*============================================================================= 1184/*=============================================================================
1121 * Undefined FIQs 1185 * FIQ "NMI" handler
1122 *----------------------------------------------------------------------------- 1186 *-----------------------------------------------------------------------------
1123 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC 1187 * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
1124 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg. 1188 * systems.
1125 * Basically to switch modes, we *HAVE* to clobber one register... brain
1126 * damage alert! I don't think that we can execute any code in here in any
1127 * other mode than FIQ... Ok you can switch to another mode, but you can't
1128 * get out of that mode without clobbering one register.
1129 */ 1189 */
1130vector_fiq: 1190 vector_stub fiq, FIQ_MODE, 4
1131 subs pc, lr, #4 1191
1192 .long __fiq_usr @ 0 (USR_26 / USR_32)
1193 .long __fiq_svc @ 1 (FIQ_26 / FIQ_32)
1194 .long __fiq_svc @ 2 (IRQ_26 / IRQ_32)
1195 .long __fiq_svc @ 3 (SVC_26 / SVC_32)
1196 .long __fiq_svc @ 4
1197 .long __fiq_svc @ 5
1198 .long __fiq_svc @ 6
1199 .long __fiq_abt @ 7
1200 .long __fiq_svc @ 8
1201 .long __fiq_svc @ 9
1202 .long __fiq_svc @ a
1203 .long __fiq_svc @ b
1204 .long __fiq_svc @ c
1205 .long __fiq_svc @ d
1206 .long __fiq_svc @ e
1207 .long __fiq_svc @ f
1132 1208
1133 .globl vector_fiq_offset 1209 .globl vector_fiq_offset
1134 .equ vector_fiq_offset, vector_fiq 1210 .equ vector_fiq_offset, vector_fiq
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 2fdf8679b46e..0d91ca05d55b 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -216,6 +216,34 @@
216 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr 216 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
217 .endm 217 .endm
218 218
219 @
220 @ svc_exit_via_fiq - like svc_exit but switches to FIQ mode before exit
221 @
222 @ This macro acts in a similar manner to svc_exit but switches to FIQ
223 @ mode to restore the final part of the register state.
224 @
225 @ We cannot use the normal svc_exit procedure because that would
226 @ clobber spsr_svc (FIQ could be delivered during the first few
227 @ instructions of vector_swi meaning its contents have not been
228 @ saved anywhere).
229 @
230 @ Note that, unlike svc_exit, this macro also does not allow a caller
231 @ supplied rpsr. This is because the FIQ exceptions are not re-entrant
232 @ and the handlers cannot call into the scheduler (meaning the value
233 @ on the stack remains correct).
234 @
235 .macro svc_exit_via_fiq
236 mov r0, sp
237 ldmib r0, {r1 - r14} @ abort is deadly from here onward (it will
238 @ clobber state restored below)
239 msr cpsr_c, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
240 add r8, r0, #S_PC
241 ldr r9, [r0, #S_PSR]
242 msr spsr_cxsf, r9
243 ldr r0, [r0, #S_R0]
244 ldmia r8, {pc}^
245 .endm
246
219 .macro restore_user_regs, fast = 0, offset = 0 247 .macro restore_user_regs, fast = 0, offset = 0
220 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr 248 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
221 ldr lr, [sp, #\offset + S_PC]! @ get pc 249 ldr lr, [sp, #\offset + S_PC]! @ get pc
@@ -267,6 +295,25 @@
267 rfeia sp! 295 rfeia sp!
268 .endm 296 .endm
269 297
298 @
299 @ svc_exit_via_fiq - like svc_exit but switches to FIQ mode before exit
300 @
301 @ For full details see non-Thumb implementation above.
302 @
303 .macro svc_exit_via_fiq
304 add r0, sp, #S_R2
305 ldr lr, [sp, #S_LR]
306 ldr sp, [sp, #S_SP] @ abort is deadly from here onward (it will
307 @ clobber state restored below)
308 ldmia r0, {r2 - r12}
309 mov r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
310 msr cpsr_c, r1
311 sub r0, #S_R2
312 add r8, r0, #S_PC
313 ldmia r0, {r0 - r1}
314 rfeia r8
315 .endm
316
270#ifdef CONFIG_CPU_V7M 317#ifdef CONFIG_CPU_V7M
271 /* 318 /*
272 * Note we don't need to do clrex here as clearing the local monitor is 319 * Note we don't need to do clrex here as clearing the local monitor is
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
index 918875d96d5d..b37752a96652 100644
--- a/arch/arm/kernel/fiq.c
+++ b/arch/arm/kernel/fiq.c
@@ -52,7 +52,8 @@
52 (unsigned)&vector_fiq_offset; \ 52 (unsigned)&vector_fiq_offset; \
53 }) 53 })
54 54
55static unsigned long no_fiq_insn; 55static unsigned long dfl_fiq_insn;
56static struct pt_regs dfl_fiq_regs;
56 57
57/* Default reacquire function 58/* Default reacquire function
58 * - we always relinquish FIQ control 59 * - we always relinquish FIQ control
@@ -60,8 +61,15 @@ static unsigned long no_fiq_insn;
60 */ 61 */
61static int fiq_def_op(void *ref, int relinquish) 62static int fiq_def_op(void *ref, int relinquish)
62{ 63{
63 if (!relinquish) 64 if (!relinquish) {
64 set_fiq_handler(&no_fiq_insn, sizeof(no_fiq_insn)); 65 /* Restore default handler and registers */
66 local_fiq_disable();
67 set_fiq_regs(&dfl_fiq_regs);
68 set_fiq_handler(&dfl_fiq_insn, sizeof(dfl_fiq_insn));
69 local_fiq_enable();
70
71 /* FIXME: notify irq controller to standard enable FIQs */
72 }
65 73
66 return 0; 74 return 0;
67} 75}
@@ -150,6 +158,7 @@ EXPORT_SYMBOL(disable_fiq);
150void __init init_FIQ(int start) 158void __init init_FIQ(int start)
151{ 159{
152 unsigned offset = FIQ_OFFSET; 160 unsigned offset = FIQ_OFFSET;
153 no_fiq_insn = *(unsigned long *)(0xffff0000 + offset); 161 dfl_fiq_insn = *(unsigned long *)(0xffff0000 + offset);
162 get_fiq_regs(&dfl_fiq_regs);
154 fiq_start = start; 163 fiq_start = start;
155} 164}
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 84db893dedc2..c03106378b49 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -133,6 +133,7 @@ struct stack {
133 u32 irq[3]; 133 u32 irq[3];
134 u32 abt[3]; 134 u32 abt[3];
135 u32 und[3]; 135 u32 und[3];
136 u32 fiq[3];
136} ____cacheline_aligned; 137} ____cacheline_aligned;
137 138
138#ifndef CONFIG_CPU_V7M 139#ifndef CONFIG_CPU_V7M
@@ -470,7 +471,10 @@ void notrace cpu_init(void)
470 "msr cpsr_c, %5\n\t" 471 "msr cpsr_c, %5\n\t"
471 "add r14, %0, %6\n\t" 472 "add r14, %0, %6\n\t"
472 "mov sp, r14\n\t" 473 "mov sp, r14\n\t"
473 "msr cpsr_c, %7" 474 "msr cpsr_c, %7\n\t"
475 "add r14, %0, %8\n\t"
476 "mov sp, r14\n\t"
477 "msr cpsr_c, %9"
474 : 478 :
475 : "r" (stk), 479 : "r" (stk),
476 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), 480 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
@@ -479,6 +483,8 @@ void notrace cpu_init(void)
479 "I" (offsetof(struct stack, abt[0])), 483 "I" (offsetof(struct stack, abt[0])),
480 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE), 484 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
481 "I" (offsetof(struct stack, und[0])), 485 "I" (offsetof(struct stack, und[0])),
486 PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
487 "I" (offsetof(struct stack, fiq[0])),
482 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE) 488 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
483 : "r14"); 489 : "r14");
484#endif 490#endif
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index c8e4bb714944..58348631671d 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -25,6 +25,7 @@
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/sched.h> 27#include <linux/sched.h>
28#include <linux/irq.h>
28 29
29#include <linux/atomic.h> 30#include <linux/atomic.h>
30#include <asm/cacheflush.h> 31#include <asm/cacheflush.h>
@@ -467,6 +468,31 @@ asmlinkage void do_unexp_fiq (struct pt_regs *regs)
467} 468}
468 469
469/* 470/*
471 * Handle FIQ similarly to NMI on x86 systems.
472 *
473 * The runtime environment for NMIs is extremely restrictive
474 * (NMIs can pre-empt critical sections meaning almost all locking is
475 * forbidden) meaning this default FIQ handling must only be used in
476 * circumstances where non-maskability improves robustness, such as
477 * watchdog or debug logic.
478 *
479 * This handler is not appropriate for general purpose use in drivers
480 * platform code and can be overrideen using set_fiq_handler.
481 */
482asmlinkage void __exception_irq_entry handle_fiq_as_nmi(struct pt_regs *regs)
483{
484 struct pt_regs *old_regs = set_irq_regs(regs);
485
486 nmi_enter();
487
488 /* nop. FIQ handlers for special arch/arm features can be added here. */
489
490 nmi_exit();
491
492 set_irq_regs(old_regs);
493}
494
495/*
470 * bad_mode handles the impossible case in the vectors. If you see one of 496 * bad_mode handles the impossible case in the vectors. If you see one of
471 * these, then it's extremely serious, and could mean you have buggy hardware. 497 * these, then it's extremely serious, and could mean you have buggy hardware.
472 * It never returns, and never tries to sync. We hope that we can at least 498 * It never returns, and never tries to sync. We hope that we can at least