diff options
author | George G. Davis <gdavis@mvista.com> | 2009-04-01 15:27:18 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-04-01 17:17:50 -0400 |
commit | f2255be8126e86142901c61dd482c1c2a5ffdda7 (patch) | |
tree | a59ff8fecbbf30a7a1e33f97b59e1ed3cd38c018 /arch/arm/vfp/entry.S | |
parent | fe68e68f6a379d317a87ae24de050a65b11ea1fb (diff) |
[ARM] 5440/1: Fix VFP state corruption due to preemption during VFP exceptions
We've observed that ARM VFP state can be corrupted during VFP exception
handling when PREEMPT is enabled. The exact conditions are difficult
to reproduce but appear to occur during VFP exception handling when a
task causes a VFP exception which is handled via VFP_bounce and is then
preempted by yet another task which in turn causes yet another VFP
exception. Since the VFP_bounce code is not preempt safe, VFP state then
becomes corrupt. In order to prevent preemption from occuring while
handling a VFP exception, this patch disables preemption while handling
VFP exceptions.
Signed-off-by: George G. Davis <gdavis@mvista.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/vfp/entry.S')
-rw-r--r-- | arch/arm/vfp/entry.S | 23 |
1 files changed, 19 insertions, 4 deletions
diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S index ba592a9e6fb3..a2bed62aec21 100644 --- a/arch/arm/vfp/entry.S +++ b/arch/arm/vfp/entry.S | |||
@@ -15,13 +15,16 @@ | |||
15 | * r10 = thread_info structure | 15 | * r10 = thread_info structure |
16 | * lr = failure return | 16 | * lr = failure return |
17 | */ | 17 | */ |
18 | #include <linux/linkage.h> | 18 | #include <asm/thread_info.h> |
19 | #include <linux/init.h> | ||
20 | #include <asm/asm-offsets.h> | ||
21 | #include <asm/assembler.h> | ||
22 | #include <asm/vfpmacros.h> | 19 | #include <asm/vfpmacros.h> |
20 | #include "../kernel/entry-header.S" | ||
23 | 21 | ||
24 | ENTRY(do_vfp) | 22 | ENTRY(do_vfp) |
23 | #ifdef CONFIG_PREEMPT | ||
24 | ldr r4, [r10, #TI_PREEMPT] @ get preempt count | ||
25 | add r11, r4, #1 @ increment it | ||
26 | str r11, [r10, #TI_PREEMPT] | ||
27 | #endif | ||
25 | enable_irq | 28 | enable_irq |
26 | ldr r4, .LCvfp | 29 | ldr r4, .LCvfp |
27 | ldr r11, [r10, #TI_CPU] @ CPU number | 30 | ldr r11, [r10, #TI_CPU] @ CPU number |
@@ -30,6 +33,12 @@ ENTRY(do_vfp) | |||
30 | ENDPROC(do_vfp) | 33 | ENDPROC(do_vfp) |
31 | 34 | ||
32 | ENTRY(vfp_null_entry) | 35 | ENTRY(vfp_null_entry) |
36 | #ifdef CONFIG_PREEMPT | ||
37 | get_thread_info r10 | ||
38 | ldr r4, [r10, #TI_PREEMPT] @ get preempt count | ||
39 | sub r11, r4, #1 @ decrement it | ||
40 | str r11, [r10, #TI_PREEMPT] | ||
41 | #endif | ||
33 | mov pc, lr | 42 | mov pc, lr |
34 | ENDPROC(vfp_null_entry) | 43 | ENDPROC(vfp_null_entry) |
35 | 44 | ||
@@ -41,6 +50,12 @@ ENDPROC(vfp_null_entry) | |||
41 | 50 | ||
42 | __INIT | 51 | __INIT |
43 | ENTRY(vfp_testing_entry) | 52 | ENTRY(vfp_testing_entry) |
53 | #ifdef CONFIG_PREEMPT | ||
54 | get_thread_info r10 | ||
55 | ldr r4, [r10, #TI_PREEMPT] @ get preempt count | ||
56 | sub r11, r4, #1 @ decrement it | ||
57 | str r11, [r10, #TI_PREEMPT] | ||
58 | #endif | ||
44 | ldr r0, VFP_arch_address | 59 | ldr r0, VFP_arch_address |
45 | str r5, [r0] @ known non-zero value | 60 | str r5, [r0] @ known non-zero value |
46 | mov pc, r9 @ we have handled the fault | 61 | mov pc, r9 @ we have handled the fault |