diff options
Diffstat (limited to 'arch/arm/vfp/vfpmodule.c')
-rw-r--r-- | arch/arm/vfp/vfpmodule.c | 30 |
1 files changed, 26 insertions, 4 deletions
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 490d9d18a7d1..f1e5951dc721 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c | |||
@@ -28,7 +28,7 @@ void vfp_testing_entry(void); | |||
28 | void vfp_support_entry(void); | 28 | void vfp_support_entry(void); |
29 | 29 | ||
30 | void (*vfp_vector)(void) = vfp_testing_entry; | 30 | void (*vfp_vector)(void) = vfp_testing_entry; |
31 | union vfp_state *last_VFP_context; | 31 | union vfp_state *last_VFP_context[NR_CPUS]; |
32 | 32 | ||
33 | /* | 33 | /* |
34 | * Dual-use variable. | 34 | * Dual-use variable. |
@@ -41,13 +41,35 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) | |||
41 | { | 41 | { |
42 | struct thread_info *thread = v; | 42 | struct thread_info *thread = v; |
43 | union vfp_state *vfp; | 43 | union vfp_state *vfp; |
44 | __u32 cpu = thread->cpu; | ||
44 | 45 | ||
45 | if (likely(cmd == THREAD_NOTIFY_SWITCH)) { | 46 | if (likely(cmd == THREAD_NOTIFY_SWITCH)) { |
47 | u32 fpexc = fmrx(FPEXC); | ||
48 | |||
49 | #ifdef CONFIG_SMP | ||
50 | /* | ||
51 | * On SMP, if VFP is enabled, save the old state in | ||
52 | * case the thread migrates to a different CPU. The | ||
53 | * restoring is done lazily. | ||
54 | */ | ||
55 | if ((fpexc & FPEXC_ENABLE) && last_VFP_context[cpu]) { | ||
56 | vfp_save_state(last_VFP_context[cpu], fpexc); | ||
57 | last_VFP_context[cpu]->hard.cpu = cpu; | ||
58 | } | ||
59 | /* | ||
60 | * Thread migration, just force the reloading of the | ||
61 | * state on the new CPU in case the VFP registers | ||
62 | * contain stale data. | ||
63 | */ | ||
64 | if (thread->vfpstate.hard.cpu != cpu) | ||
65 | last_VFP_context[cpu] = NULL; | ||
66 | #endif | ||
67 | |||
46 | /* | 68 | /* |
47 | * Always disable VFP so we can lazily save/restore the | 69 | * Always disable VFP so we can lazily save/restore the |
48 | * old state. | 70 | * old state. |
49 | */ | 71 | */ |
50 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_ENABLE); | 72 | fmxr(FPEXC, fpexc & ~FPEXC_ENABLE); |
51 | return NOTIFY_DONE; | 73 | return NOTIFY_DONE; |
52 | } | 74 | } |
53 | 75 | ||
@@ -68,8 +90,8 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) | |||
68 | } | 90 | } |
69 | 91 | ||
70 | /* flush and release case: Per-thread VFP cleanup. */ | 92 | /* flush and release case: Per-thread VFP cleanup. */ |
71 | if (last_VFP_context == vfp) | 93 | if (last_VFP_context[cpu] == vfp) |
72 | last_VFP_context = NULL; | 94 | last_VFP_context[cpu] = NULL; |
73 | 95 | ||
74 | return NOTIFY_DONE; | 96 | return NOTIFY_DONE; |
75 | } | 97 | } |