aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2012-07-30 14:42:10 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2012-07-31 07:04:30 -0400
commit15ac49b65024f55c4371a53214879a9c77c4fbf9 (patch)
treebcff50c21d64e51e672697771fc6a0cc29af1afd /arch
parentc5dff4ffd327088d85035bec535b7d0c9ea03151 (diff)
ARM: Fix undefined instruction exception handling
While trying to get a v3.5 kernel booted on the cubox, I noticed that VFP does not work correctly with VFP bounce handling. This is because of the confusion over 16-bit vs 32-bit instructions, and where PC is supposed to point to. The rule is that FP handlers are entered with regs->ARM_pc pointing at the _next_ instruction to be executed. However, if the exception is not handled, regs->ARM_pc points at the faulting instruction. This is easy for ARM mode, because we know that the next instruction and previous instructions are separated by four bytes. This is not true of Thumb2 though. Since all FP instructions are 32-bit in Thumb2, it makes things easy. We just need to select the appropriate adjustment. Do this by moving the adjustment out of do_undefinstr() into the assembly code, as only the assembly code knows whether it's dealing with a 32-bit or 16-bit instruction. Cc: <stable@vger.kernel.org> Acked-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/kernel/entry-armv.S111
-rw-r--r--arch/arm/kernel/traps.c8
-rw-r--r--arch/arm/vfp/entry.S16
-rw-r--r--arch/arm/vfp/vfphw.S19
4 files changed, 92 insertions, 62 deletions
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 0d1851ca6eb9..0f82098c9bfe 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -244,6 +244,19 @@ svc_preempt:
244 b 1b 244 b 1b
245#endif 245#endif
246 246
247__und_fault:
248 @ Correct the PC such that it is pointing at the instruction
249 @ which caused the fault. If the faulting instruction was ARM
250 @ the PC will be pointing at the next instruction, and have to
251 @ subtract 4. Otherwise, it is Thumb, and the PC will be
252 @ pointing at the second half of the Thumb instruction. We
253 @ have to subtract 2.
254 ldr r2, [r0, #S_PC]
255 sub r2, r2, r1
256 str r2, [r0, #S_PC]
257 b do_undefinstr
258ENDPROC(__und_fault)
259
247 .align 5 260 .align 5
248__und_svc: 261__und_svc:
249#ifdef CONFIG_KPROBES 262#ifdef CONFIG_KPROBES
@@ -261,25 +274,32 @@ __und_svc:
261 @ 274 @
262 @ r0 - instruction 275 @ r0 - instruction
263 @ 276 @
264#ifndef CONFIG_THUMB2_KERNEL 277#ifndef CONFIG_THUMB2_KERNEL
265 ldr r0, [r4, #-4] 278 ldr r0, [r4, #-4]
266#else 279#else
280 mov r1, #2
267 ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2 281 ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
268 cmp r0, #0xe800 @ 32-bit instruction if xx >= 0 282 cmp r0, #0xe800 @ 32-bit instruction if xx >= 0
269 ldrhhs r9, [r4] @ bottom 16 bits 283 blo __und_svc_fault
270 orrhs r0, r9, r0, lsl #16 284 ldrh r9, [r4] @ bottom 16 bits
285 add r4, r4, #2
286 str r4, [sp, #S_PC]
287 orr r0, r9, r0, lsl #16
271#endif 288#endif
272 adr r9, BSYM(1f) 289 adr r9, BSYM(__und_svc_finish)
273 mov r2, r4 290 mov r2, r4
274 bl call_fpe 291 bl call_fpe
275 292
293 mov r1, #4 @ PC correction to apply
294__und_svc_fault:
276 mov r0, sp @ struct pt_regs *regs 295 mov r0, sp @ struct pt_regs *regs
277 bl do_undefinstr 296 bl __und_fault
278 297
279 @ 298 @
280 @ IRQs off again before pulling preserved data off the stack 299 @ IRQs off again before pulling preserved data off the stack
281 @ 300 @
2821: disable_irq_notrace 301__und_svc_finish:
302 disable_irq_notrace
283 303
284 @ 304 @
285 @ restore SPSR and restart the instruction 305 @ restore SPSR and restart the instruction
@@ -423,25 +443,33 @@ __und_usr:
423 mov r2, r4 443 mov r2, r4
424 mov r3, r5 444 mov r3, r5
425 445
446 @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
447 @ faulting instruction depending on Thumb mode.
448 @ r3 = regs->ARM_cpsr
426 @ 449 @
427 @ fall through to the emulation code, which returns using r9 if 450 @ The emulation code returns using r9 if it has emulated the
428 @ it has emulated the instruction, or the more conventional lr 451 @ instruction, or the more conventional lr if we are to treat
429 @ if we are to treat this as a real undefined instruction 452 @ this as a real undefined instruction
430 @
431 @ r0 - instruction
432 @ 453 @
433 adr r9, BSYM(ret_from_exception) 454 adr r9, BSYM(ret_from_exception)
434 adr lr, BSYM(__und_usr_unknown) 455
435 tst r3, #PSR_T_BIT @ Thumb mode? 456 tst r3, #PSR_T_BIT @ Thumb mode?
436 itet eq @ explicit IT needed for the 1f label 457 bne __und_usr_thumb
437 subeq r4, r2, #4 @ ARM instr at LR - 4 458 sub r4, r2, #4 @ ARM instr at LR - 4
438 subne r4, r2, #2 @ Thumb instr at LR - 2 4591: ldrt r0, [r4]
4391: ldreqt r0, [r4]
440#ifdef CONFIG_CPU_ENDIAN_BE8 460#ifdef CONFIG_CPU_ENDIAN_BE8
441 reveq r0, r0 @ little endian instruction 461 rev r0, r0 @ little endian instruction
442#endif 462#endif
443 beq call_fpe 463 @ r0 = 32-bit ARM instruction which caused the exception
464 @ r2 = PC value for the following instruction (:= regs->ARM_pc)
465 @ r4 = PC value for the faulting instruction
466 @ lr = 32-bit undefined instruction function
467 adr lr, BSYM(__und_usr_fault_32)
468 b call_fpe
469
470__und_usr_thumb:
444 @ Thumb instruction 471 @ Thumb instruction
472 sub r4, r2, #2 @ First half of thumb instr at LR - 2
445#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7 473#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
446/* 474/*
447 * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms 475 * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms
@@ -455,7 +483,7 @@ __und_usr:
455 ldr r5, .LCcpu_architecture 483 ldr r5, .LCcpu_architecture
456 ldr r5, [r5] 484 ldr r5, [r5]
457 cmp r5, #CPU_ARCH_ARMv7 485 cmp r5, #CPU_ARCH_ARMv7
458 blo __und_usr_unknown 486 blo __und_usr_fault_16 @ 16bit undefined instruction
459/* 487/*
460 * The following code won't get run unless the running CPU really is v7, so 488 * The following code won't get run unless the running CPU really is v7, so
461 * coding round the lack of ldrht on older arches is pointless. Temporarily 489 * coding round the lack of ldrht on older arches is pointless. Temporarily
@@ -463,15 +491,18 @@ __und_usr:
463 */ 491 */
464 .arch armv6t2 492 .arch armv6t2
465#endif 493#endif
4662: 4942: ldrht r5, [r4]
467 ARM( ldrht r5, [r4], #2 )
468 THUMB( ldrht r5, [r4] )
469 THUMB( add r4, r4, #2 )
470 cmp r5, #0xe800 @ 32bit instruction if xx != 0 495 cmp r5, #0xe800 @ 32bit instruction if xx != 0
471 blo __und_usr_unknown 496 blo __und_usr_fault_16 @ 16bit undefined instruction
4723: ldrht r0, [r4] 4973: ldrht r0, [r2]
473 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 498 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
499 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
474 orr r0, r0, r5, lsl #16 500 orr r0, r0, r5, lsl #16
501 adr lr, BSYM(__und_usr_fault_32)
502 @ r0 = the two 16-bit Thumb instructions which caused the exception
503 @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
504 @ r4 = PC value for the first 16-bit Thumb instruction
505 @ lr = 32bit undefined instruction function
475 506
476#if __LINUX_ARM_ARCH__ < 7 507#if __LINUX_ARM_ARCH__ < 7
477/* If the target arch was overridden, change it back: */ 508/* If the target arch was overridden, change it back: */
@@ -482,17 +513,13 @@ __und_usr:
482#endif 513#endif
483#endif /* __LINUX_ARM_ARCH__ < 7 */ 514#endif /* __LINUX_ARM_ARCH__ < 7 */
484#else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */ 515#else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
485 b __und_usr_unknown 516 b __und_usr_fault_16
486#endif 517#endif
487 UNWIND(.fnend ) 518 UNWIND(.fnend)
488ENDPROC(__und_usr) 519ENDPROC(__und_usr)
489 520
490 @
491 @ fallthrough to call_fpe
492 @
493
494/* 521/*
495 * The out of line fixup for the ldrt above. 522 * The out of line fixup for the ldrt instructions above.
496 */ 523 */
497 .pushsection .fixup, "ax" 524 .pushsection .fixup, "ax"
498 .align 2 525 .align 2
@@ -524,11 +551,12 @@ ENDPROC(__und_usr)
524 * NEON handler code. 551 * NEON handler code.
525 * 552 *
526 * Emulators may wish to make use of the following registers: 553 * Emulators may wish to make use of the following registers:
527 * r0 = instruction opcode. 554 * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
528 * r2 = PC+4 555 * r2 = PC value to resume execution after successful emulation
529 * r9 = normal "successful" return address 556 * r9 = normal "successful" return address
530 * r10 = this threads thread_info structure. 557 * r10 = this threads thread_info structure
531 * lr = unrecognised instruction return address 558 * lr = unrecognised instruction return address
559 * IRQs disabled, FIQs enabled.
532 */ 560 */
533 @ 561 @
534 @ Fall-through from Thumb-2 __und_usr 562 @ Fall-through from Thumb-2 __und_usr
@@ -659,12 +687,17 @@ ENTRY(no_fp)
659 mov pc, lr 687 mov pc, lr
660ENDPROC(no_fp) 688ENDPROC(no_fp)
661 689
662__und_usr_unknown: 690__und_usr_fault_32:
663 enable_irq 691 mov r1, #4
692 b 1f
693__und_usr_fault_16:
694 mov r1, #2
6951: enable_irq
664 mov r0, sp 696 mov r0, sp
665 adr lr, BSYM(ret_from_exception) 697 adr lr, BSYM(ret_from_exception)
666 b do_undefinstr 698 b __und_fault
667ENDPROC(__und_usr_unknown) 699ENDPROC(__und_usr_fault_32)
700ENDPROC(__und_usr_fault_16)
668 701
669 .align 5 702 .align 5
670__pabt_usr: 703__pabt_usr:
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 3647170e9a16..c7cae6b9a4d9 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -370,18 +370,10 @@ static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
370 370
371asmlinkage void __exception do_undefinstr(struct pt_regs *regs) 371asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
372{ 372{
373 unsigned int correction = thumb_mode(regs) ? 2 : 4;
374 unsigned int instr; 373 unsigned int instr;
375 siginfo_t info; 374 siginfo_t info;
376 void __user *pc; 375 void __user *pc;
377 376
378 /*
379 * According to the ARM ARM, PC is 2 or 4 bytes ahead,
380 * depending whether we're in Thumb mode or not.
381 * Correct this offset.
382 */
383 regs->ARM_pc -= correction;
384
385 pc = (void __user *)instruction_pointer(regs); 377 pc = (void __user *)instruction_pointer(regs);
386 378
387 if (processor_mode(regs) == SVC_MODE) { 379 if (processor_mode(regs) == SVC_MODE) {
diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S
index 4fa9903b83cf..cc926c985981 100644
--- a/arch/arm/vfp/entry.S
+++ b/arch/arm/vfp/entry.S
@@ -7,18 +7,20 @@
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 *
11 * Basic entry code, called from the kernel's undefined instruction trap.
12 * r0 = faulted instruction
13 * r5 = faulted PC+4
14 * r9 = successful return
15 * r10 = thread_info structure
16 * lr = failure return
17 */ 10 */
18#include <asm/thread_info.h> 11#include <asm/thread_info.h>
19#include <asm/vfpmacros.h> 12#include <asm/vfpmacros.h>
20#include "../kernel/entry-header.S" 13#include "../kernel/entry-header.S"
21 14
15@ VFP entry point.
16@
17@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
18@ r2 = PC value to resume execution after successful emulation
19@ r9 = normal "successful" return address
20@ r10 = this threads thread_info structure
21@ lr = unrecognised instruction return address
22@ IRQs disabled.
23@
22ENTRY(do_vfp) 24ENTRY(do_vfp)
23#ifdef CONFIG_PREEMPT 25#ifdef CONFIG_PREEMPT
24 ldr r4, [r10, #TI_PREEMPT] @ get preempt count 26 ldr r4, [r10, #TI_PREEMPT] @ get preempt count
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index 2d30c7f6edd3..3a0efaad6090 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -61,13 +61,13 @@
61 61
62@ VFP hardware support entry point. 62@ VFP hardware support entry point.
63@ 63@
64@ r0 = faulted instruction 64@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
65@ r2 = faulted PC+4 65@ r2 = PC value to resume execution after successful emulation
66@ r9 = successful return 66@ r9 = normal "successful" return address
67@ r10 = vfp_state union 67@ r10 = vfp_state union
68@ r11 = CPU number 68@ r11 = CPU number
69@ lr = failure return 69@ lr = unrecognised instruction return address
70 70@ IRQs enabled.
71ENTRY(vfp_support_entry) 71ENTRY(vfp_support_entry)
72 DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10 72 DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10
73 73
@@ -161,9 +161,12 @@ vfp_hw_state_valid:
161 @ exception before retrying branch 161 @ exception before retrying branch
162 @ out before setting an FPEXC that 162 @ out before setting an FPEXC that
163 @ stops us reading stuff 163 @ stops us reading stuff
164 VFPFMXR FPEXC, r1 @ restore FPEXC last 164 VFPFMXR FPEXC, r1 @ Restore FPEXC last
165 sub r2, r2, #4 165 sub r2, r2, #4 @ Retry current instruction - if Thumb
166 str r2, [sp, #S_PC] @ retry the instruction 166 str r2, [sp, #S_PC] @ mode it's two 16-bit instructions,
167 @ else it's one 32-bit instruction, so
168 @ always subtract 4 from the following
169 @ instruction address.
167#ifdef CONFIG_PREEMPT 170#ifdef CONFIG_PREEMPT
168 get_thread_info r10 171 get_thread_info r10
169 ldr r4, [r10, #TI_PREEMPT] @ get preempt count 172 ldr r4, [r10, #TI_PREEMPT] @ get preempt count