diff options
author | Catalin Marinas <catalin.marinas@arm.com> | 2007-01-24 12:47:08 -0500 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2007-01-25 11:35:29 -0500 |
commit | c6428464894889e110418928e6b37dc2eb4cee56 (patch) | |
tree | 292410b297ef2332715aabd7a87ef9fe0c03de4a /arch/arm | |
parent | 412489af76b5c0e4029d4406d93554c22a88fc73 (diff) |
[ARM] 4111/1: Allow VFP to work with thread migration on SMP
The current lazy saving of the VFP registers is no longer possible
with thread migration on SMP. This patch implements a per-CPU
vfp-state pointer and the saving of the VFP registers at every context
switch. The registers restoring is still performed in a lazy way.
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/vfp/entry.S | 1 | ||||
-rw-r--r-- | arch/arm/vfp/vfp.h | 4 | ||||
-rw-r--r-- | arch/arm/vfp/vfphw.S | 26 | ||||
-rw-r--r-- | arch/arm/vfp/vfpmodule.c | 30 |
4 files changed, 55 insertions, 6 deletions
diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S index 7b595547c1c8..ca2a5ad19ea6 100644 --- a/arch/arm/vfp/entry.S +++ b/arch/arm/vfp/entry.S | |||
@@ -25,6 +25,7 @@ | |||
25 | do_vfp: | 25 | do_vfp: |
26 | enable_irq | 26 | enable_irq |
27 | ldr r4, .LCvfp | 27 | ldr r4, .LCvfp |
28 | ldr r11, [r10, #TI_CPU] @ CPU number | ||
28 | add r10, r10, #TI_VFPSTATE @ r10 = workspace | 29 | add r10, r10, #TI_VFPSTATE @ r10 = workspace |
29 | ldr pc, [r4] @ call VFP entry point | 30 | ldr pc, [r4] @ call VFP entry point |
30 | 31 | ||
diff --git a/arch/arm/vfp/vfp.h b/arch/arm/vfp/vfp.h index f2797896e6d5..54a2ad6d9ca2 100644 --- a/arch/arm/vfp/vfp.h +++ b/arch/arm/vfp/vfp.h | |||
@@ -370,3 +370,7 @@ struct op { | |||
370 | u32 (* const fn)(int dd, int dn, int dm, u32 fpscr); | 370 | u32 (* const fn)(int dd, int dn, int dm, u32 fpscr); |
371 | u32 flags; | 371 | u32 flags; |
372 | }; | 372 | }; |
373 | |||
374 | #ifdef CONFIG_SMP | ||
375 | extern void vfp_save_state(void *location, u32 fpexc); | ||
376 | #endif | ||
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S index e51e6679c402..d4b7b229631d 100644 --- a/arch/arm/vfp/vfphw.S +++ b/arch/arm/vfp/vfphw.S | |||
@@ -65,6 +65,7 @@ | |||
65 | @ r2 = faulted PC+4 | 65 | @ r2 = faulted PC+4 |
66 | @ r9 = successful return | 66 | @ r9 = successful return |
67 | @ r10 = vfp_state union | 67 | @ r10 = vfp_state union |
68 | @ r11 = CPU number | ||
68 | @ lr = failure return | 69 | @ lr = failure return |
69 | 70 | ||
70 | .globl vfp_support_entry | 71 | .globl vfp_support_entry |
@@ -79,7 +80,7 @@ vfp_support_entry: | |||
79 | DBGSTR1 "enable %x", r10 | 80 | DBGSTR1 "enable %x", r10 |
80 | ldr r3, last_VFP_context_address | 81 | ldr r3, last_VFP_context_address |
81 | orr r1, r1, #FPEXC_ENABLE @ user FPEXC has the enable bit set | 82 | orr r1, r1, #FPEXC_ENABLE @ user FPEXC has the enable bit set |
82 | ldr r4, [r3] @ last_VFP_context pointer | 83 | ldr r4, [r3, r11, lsl #2] @ last_VFP_context pointer |
83 | bic r5, r1, #FPEXC_EXCEPTION @ make sure exceptions are disabled | 84 | bic r5, r1, #FPEXC_EXCEPTION @ make sure exceptions are disabled |
84 | cmp r4, r10 | 85 | cmp r4, r10 |
85 | beq check_for_exception @ we are returning to the same | 86 | beq check_for_exception @ we are returning to the same |
@@ -91,7 +92,9 @@ vfp_support_entry: | |||
91 | @ exceptions, so we can get at the | 92 | @ exceptions, so we can get at the |
92 | @ rest of it | 93 | @ rest of it |
93 | 94 | ||
95 | #ifndef CONFIG_SMP | ||
94 | @ Save out the current registers to the old thread state | 96 | @ Save out the current registers to the old thread state |
97 | @ No need for SMP since this is not done lazily | ||
95 | 98 | ||
96 | DBGSTR1 "save old state %p", r4 | 99 | DBGSTR1 "save old state %p", r4 |
97 | cmp r4, #0 | 100 | cmp r4, #0 |
@@ -105,10 +108,11 @@ vfp_support_entry: | |||
105 | stmia r4, {r1, r5, r6, r8} @ save FPEXC, FPSCR, FPINST, FPINST2 | 108 | stmia r4, {r1, r5, r6, r8} @ save FPEXC, FPSCR, FPINST, FPINST2 |
106 | @ and point r4 at the word at the | 109 | @ and point r4 at the word at the |
107 | @ start of the register dump | 110 | @ start of the register dump |
111 | #endif | ||
108 | 112 | ||
109 | no_old_VFP_process: | 113 | no_old_VFP_process: |
110 | DBGSTR1 "load state %p", r10 | 114 | DBGSTR1 "load state %p", r10 |
111 | str r10, [r3] @ update the last_VFP_context pointer | 115 | str r10, [r3, r11, lsl #2] @ update the last_VFP_context pointer |
112 | @ Load the saved state back into the VFP | 116 | @ Load the saved state back into the VFP |
113 | VFPFLDMIA r10 @ reload the working registers while | 117 | VFPFLDMIA r10 @ reload the working registers while |
114 | @ FPEXC is in a safe state | 118 | @ FPEXC is in a safe state |
@@ -162,6 +166,24 @@ process_exception: | |||
162 | @ required. If not, the user code will | 166 | @ required. If not, the user code will |
163 | @ retry the faulted instruction | 167 | @ retry the faulted instruction |
164 | 168 | ||
169 | #ifdef CONFIG_SMP | ||
170 | .globl vfp_save_state | ||
171 | .type vfp_save_state, %function | ||
172 | vfp_save_state: | ||
173 | @ Save the current VFP state | ||
174 | @ r0 - save location | ||
175 | @ r1 - FPEXC | ||
176 | DBGSTR1 "save VFP state %p", r0 | ||
177 | VFPFMRX r2, FPSCR @ current status | ||
178 | VFPFMRX r3, FPINST @ FPINST (always there, rev0 onwards) | ||
179 | tst r1, #FPEXC_FPV2 @ is there an FPINST2 to read? | ||
180 | VFPFMRX r12, FPINST2, NE @ FPINST2 if needed - avoids reading | ||
181 | @ nonexistant reg on rev0 | ||
182 | VFPFSTMIA r0 @ save the working registers | ||
183 | stmia r0, {r1, r2, r3, r12} @ save FPEXC, FPSCR, FPINST, FPINST2 | ||
184 | mov pc, lr | ||
185 | #endif | ||
186 | |||
165 | last_VFP_context_address: | 187 | last_VFP_context_address: |
166 | .word last_VFP_context | 188 | .word last_VFP_context |
167 | 189 | ||
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 490d9d18a7d1..f1e5951dc721 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c | |||
@@ -28,7 +28,7 @@ void vfp_testing_entry(void); | |||
28 | void vfp_support_entry(void); | 28 | void vfp_support_entry(void); |
29 | 29 | ||
30 | void (*vfp_vector)(void) = vfp_testing_entry; | 30 | void (*vfp_vector)(void) = vfp_testing_entry; |
31 | union vfp_state *last_VFP_context; | 31 | union vfp_state *last_VFP_context[NR_CPUS]; |
32 | 32 | ||
33 | /* | 33 | /* |
34 | * Dual-use variable. | 34 | * Dual-use variable. |
@@ -41,13 +41,35 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) | |||
41 | { | 41 | { |
42 | struct thread_info *thread = v; | 42 | struct thread_info *thread = v; |
43 | union vfp_state *vfp; | 43 | union vfp_state *vfp; |
44 | __u32 cpu = thread->cpu; | ||
44 | 45 | ||
45 | if (likely(cmd == THREAD_NOTIFY_SWITCH)) { | 46 | if (likely(cmd == THREAD_NOTIFY_SWITCH)) { |
47 | u32 fpexc = fmrx(FPEXC); | ||
48 | |||
49 | #ifdef CONFIG_SMP | ||
50 | /* | ||
51 | * On SMP, if VFP is enabled, save the old state in | ||
52 | * case the thread migrates to a different CPU. The | ||
53 | * restoring is done lazily. | ||
54 | */ | ||
55 | if ((fpexc & FPEXC_ENABLE) && last_VFP_context[cpu]) { | ||
56 | vfp_save_state(last_VFP_context[cpu], fpexc); | ||
57 | last_VFP_context[cpu]->hard.cpu = cpu; | ||
58 | } | ||
59 | /* | ||
60 | * Thread migration, just force the reloading of the | ||
61 | * state on the new CPU in case the VFP registers | ||
62 | * contain stale data. | ||
63 | */ | ||
64 | if (thread->vfpstate.hard.cpu != cpu) | ||
65 | last_VFP_context[cpu] = NULL; | ||
66 | #endif | ||
67 | |||
46 | /* | 68 | /* |
47 | * Always disable VFP so we can lazily save/restore the | 69 | * Always disable VFP so we can lazily save/restore the |
48 | * old state. | 70 | * old state. |
49 | */ | 71 | */ |
50 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_ENABLE); | 72 | fmxr(FPEXC, fpexc & ~FPEXC_ENABLE); |
51 | return NOTIFY_DONE; | 73 | return NOTIFY_DONE; |
52 | } | 74 | } |
53 | 75 | ||
@@ -68,8 +90,8 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) | |||
68 | } | 90 | } |
69 | 91 | ||
70 | /* flush and release case: Per-thread VFP cleanup. */ | 92 | /* flush and release case: Per-thread VFP cleanup. */ |
71 | if (last_VFP_context == vfp) | 93 | if (last_VFP_context[cpu] == vfp) |
72 | last_VFP_context = NULL; | 94 | last_VFP_context[cpu] = NULL; |
73 | 95 | ||
74 | return NOTIFY_DONE; | 96 | return NOTIFY_DONE; |
75 | } | 97 | } |