aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/vfp
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/vfp')
-rw-r--r--arch/arm/vfp/entry.S3
-rw-r--r--arch/arm/vfp/vfphw.S20
-rw-r--r--arch/arm/vfp/vfpmodule.c83
3 files changed, 77 insertions, 29 deletions
diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S
index 4fa9903b83c..c1a97840258 100644
--- a/arch/arm/vfp/entry.S
+++ b/arch/arm/vfp/entry.S
@@ -10,7 +10,7 @@
10 * 10 *
11 * Basic entry code, called from the kernel's undefined instruction trap. 11 * Basic entry code, called from the kernel's undefined instruction trap.
12 * r0 = faulted instruction 12 * r0 = faulted instruction
13 * r5 = faulted PC+4 13 * r2 = faulted PC+4
14 * r9 = successful return 14 * r9 = successful return
15 * r10 = thread_info structure 15 * r10 = thread_info structure
16 * lr = failure return 16 * lr = failure return
@@ -26,6 +26,7 @@ ENTRY(do_vfp)
26 str r11, [r10, #TI_PREEMPT] 26 str r11, [r10, #TI_PREEMPT]
27#endif 27#endif
28 enable_irq 28 enable_irq
29 str r2, [sp, #S_PC] @ update regs->ARM_pc for Thumb 2 case
29 ldr r4, .LCvfp 30 ldr r4, .LCvfp
30 ldr r11, [r10, #TI_CPU] @ CPU number 31 ldr r11, [r10, #TI_CPU] @ CPU number
31 add r10, r10, #TI_VFPSTATE @ r10 = workspace 32 add r10, r10, #TI_VFPSTATE @ r10 = workspace
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index 9897dcfc16d..404538ae591 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -77,15 +77,12 @@ ENTRY(vfp_support_entry)
77 bne look_for_VFP_exceptions @ VFP is already enabled 77 bne look_for_VFP_exceptions @ VFP is already enabled
78 78
79 DBGSTR1 "enable %x", r10 79 DBGSTR1 "enable %x", r10
80 ldr r3, last_VFP_context_address 80 ldr r3, vfp_current_hw_state_address
81 orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set 81 orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set
82 ldr r4, [r3, r11, lsl #2] @ last_VFP_context pointer 82 ldr r4, [r3, r11, lsl #2] @ vfp_current_hw_state pointer
83 bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled 83 bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled
84 cmp r4, r10 84 cmp r4, r10 @ this thread owns the hw context?
85 beq check_for_exception @ we are returning to the same 85 beq vfp_hw_state_valid
86 @ process, so the registers are
87 @ still there. In this case, we do
88 @ not want to drop a pending exception.
89 86
90 VFPFMXR FPEXC, r5 @ enable VFP, disable any pending 87 VFPFMXR FPEXC, r5 @ enable VFP, disable any pending
91 @ exceptions, so we can get at the 88 @ exceptions, so we can get at the
@@ -116,7 +113,7 @@ ENTRY(vfp_support_entry)
116 113
117no_old_VFP_process: 114no_old_VFP_process:
118 DBGSTR1 "load state %p", r10 115 DBGSTR1 "load state %p", r10
119 str r10, [r3, r11, lsl #2] @ update the last_VFP_context pointer 116 str r10, [r3, r11, lsl #2] @ update the vfp_current_hw_state pointer
120 @ Load the saved state back into the VFP 117 @ Load the saved state back into the VFP
121 VFPFLDMIA r10, r5 @ reload the working registers while 118 VFPFLDMIA r10, r5 @ reload the working registers while
122 @ FPEXC is in a safe state 119 @ FPEXC is in a safe state
@@ -132,7 +129,8 @@ no_old_VFP_process:
132#endif 129#endif
133 VFPFMXR FPSCR, r5 @ restore status 130 VFPFMXR FPSCR, r5 @ restore status
134 131
135check_for_exception: 132@ The context stored in the VFP hardware is up to date with this thread
133vfp_hw_state_valid:
136 tst r1, #FPEXC_EX 134 tst r1, #FPEXC_EX
137 bne process_exception @ might as well handle the pending 135 bne process_exception @ might as well handle the pending
138 @ exception before retrying branch 136 @ exception before retrying branch
@@ -207,8 +205,8 @@ ENTRY(vfp_save_state)
207ENDPROC(vfp_save_state) 205ENDPROC(vfp_save_state)
208 206
209 .align 207 .align
210last_VFP_context_address: 208vfp_current_hw_state_address:
211 .word last_VFP_context 209 .word vfp_current_hw_state
212 210
213 .macro tbl_branch, base, tmp, shift 211 .macro tbl_branch, base, tmp, shift
214#ifdef CONFIG_THUMB2_KERNEL 212#ifdef CONFIG_THUMB2_KERNEL
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index f25e7ec8941..e381dc68505 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -21,6 +21,7 @@
21#include <asm/cputype.h> 21#include <asm/cputype.h>
22#include <asm/thread_notify.h> 22#include <asm/thread_notify.h>
23#include <asm/vfp.h> 23#include <asm/vfp.h>
24#include <asm/cpu_pm.h>
24 25
25#include "vfpinstr.h" 26#include "vfpinstr.h"
26#include "vfp.h" 27#include "vfp.h"
@@ -33,7 +34,13 @@ void vfp_support_entry(void);
33void vfp_null_entry(void); 34void vfp_null_entry(void);
34 35
35void (*vfp_vector)(void) = vfp_null_entry; 36void (*vfp_vector)(void) = vfp_null_entry;
36union vfp_state *last_VFP_context[NR_CPUS]; 37
38/*
39 * The pointer to the vfpstate structure of the thread which currently
40 * owns the context held in the VFP hardware, or NULL if the hardware
41 * context is invalid.
42 */
43union vfp_state *vfp_current_hw_state[NR_CPUS];
37 44
38/* 45/*
39 * Dual-use variable. 46 * Dual-use variable.
@@ -57,12 +64,12 @@ static void vfp_thread_flush(struct thread_info *thread)
57 64
58 /* 65 /*
59 * Disable VFP to ensure we initialize it first. We must ensure 66 * Disable VFP to ensure we initialize it first. We must ensure
60 * that the modification of last_VFP_context[] and hardware disable 67 * that the modification of vfp_current_hw_state[] and hardware disable
61 * are done for the same CPU and without preemption. 68 * are done for the same CPU and without preemption.
62 */ 69 */
63 cpu = get_cpu(); 70 cpu = get_cpu();
64 if (last_VFP_context[cpu] == vfp) 71 if (vfp_current_hw_state[cpu] == vfp)
65 last_VFP_context[cpu] = NULL; 72 vfp_current_hw_state[cpu] = NULL;
66 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); 73 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
67 put_cpu(); 74 put_cpu();
68} 75}
@@ -73,8 +80,8 @@ static void vfp_thread_exit(struct thread_info *thread)
73 union vfp_state *vfp = &thread->vfpstate; 80 union vfp_state *vfp = &thread->vfpstate;
74 unsigned int cpu = get_cpu(); 81 unsigned int cpu = get_cpu();
75 82
76 if (last_VFP_context[cpu] == vfp) 83 if (vfp_current_hw_state[cpu] == vfp)
77 last_VFP_context[cpu] = NULL; 84 vfp_current_hw_state[cpu] = NULL;
78 put_cpu(); 85 put_cpu();
79} 86}
80 87
@@ -129,9 +136,9 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
129 * case the thread migrates to a different CPU. The 136 * case the thread migrates to a different CPU. The
130 * restoring is done lazily. 137 * restoring is done lazily.
131 */ 138 */
132 if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) { 139 if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu]) {
133 vfp_save_state(last_VFP_context[cpu], fpexc); 140 vfp_save_state(vfp_current_hw_state[cpu], fpexc);
134 last_VFP_context[cpu]->hard.cpu = cpu; 141 vfp_current_hw_state[cpu]->hard.cpu = cpu;
135 } 142 }
136 /* 143 /*
137 * Thread migration, just force the reloading of the 144 * Thread migration, just force the reloading of the
@@ -139,7 +146,7 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
139 * contain stale data. 146 * contain stale data.
140 */ 147 */
141 if (thread->vfpstate.hard.cpu != cpu) 148 if (thread->vfpstate.hard.cpu != cpu)
142 last_VFP_context[cpu] = NULL; 149 vfp_current_hw_state[cpu] = NULL;
143#endif 150#endif
144 151
145 /* 152 /*
@@ -169,6 +176,35 @@ static struct notifier_block vfp_notifier_block = {
169 .notifier_call = vfp_notifier, 176 .notifier_call = vfp_notifier,
170}; 177};
171 178
179static int vfp_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd,
180 void *v)
181{
182 u32 fpexc = fmrx(FPEXC);
183 unsigned int cpu = smp_processor_id();
184
185 switch (cmd) {
186 case CPU_PM_ENTER:
187 if (vfp_current_hw_state[cpu]) {
188 fmxr(FPEXC, fpexc | FPEXC_EN);
189 vfp_save_state(vfp_current_hw_state[cpu], fpexc);
190 /* force a reload when coming back from idle */
191 vfp_current_hw_state[cpu] = NULL;
192 fmxr(FPEXC, fpexc & ~FPEXC_EN);
193 }
194 break;
195 case CPU_PM_ENTER_FAILED:
196 case CPU_PM_EXIT:
197 /* make sure VFP is disabled when leaving idle */
198 fmxr(FPEXC, fpexc & ~FPEXC_EN);
199 break;
200 }
201 return NOTIFY_OK;
202}
203
204static struct notifier_block vfp_cpu_pm_notifier_block = {
205 .notifier_call = vfp_cpu_pm_notifier,
206};
207
172/* 208/*
173 * Raise a SIGFPE for the current process. 209 * Raise a SIGFPE for the current process.
174 * sicode describes the signal being raised. 210 * sicode describes the signal being raised.
@@ -405,6 +441,12 @@ static int vfp_pm_suspend(void)
405 struct thread_info *ti = current_thread_info(); 441 struct thread_info *ti = current_thread_info();
406 u32 fpexc = fmrx(FPEXC); 442 u32 fpexc = fmrx(FPEXC);
407 443
444 /* If lazy disable, re-enable the VFP ready for it to be saved */
445 if (vfp_current_hw_state[ti->cpu] != &ti->vfpstate) {
446 fpexc |= FPEXC_EN;
447 fmxr(FPEXC, fpexc);
448 }
449
408 /* if vfp is on, then save state for resumption */ 450 /* if vfp is on, then save state for resumption */
409 if (fpexc & FPEXC_EN) { 451 if (fpexc & FPEXC_EN) {
410 printk(KERN_DEBUG "%s: saving vfp state\n", __func__); 452 printk(KERN_DEBUG "%s: saving vfp state\n", __func__);
@@ -412,10 +454,14 @@ static int vfp_pm_suspend(void)
412 454
413 /* disable, just in case */ 455 /* disable, just in case */
414 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); 456 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
457 } else if (vfp_current_hw_state[ti->cpu]) {
458 fmxr(FPEXC, fpexc | FPEXC_EN);
459 vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc);
460 fmxr(FPEXC, fpexc);
415 } 461 }
416 462
417 /* clear any information we had about last context state */ 463 /* clear any information we had about last context state */
418 memset(last_VFP_context, 0, sizeof(last_VFP_context)); 464 memset(vfp_current_hw_state, 0, sizeof(vfp_current_hw_state));
419 465
420 return 0; 466 return 0;
421} 467}
@@ -451,7 +497,7 @@ void vfp_sync_hwstate(struct thread_info *thread)
451 * If the thread we're interested in is the current owner of the 497 * If the thread we're interested in is the current owner of the
452 * hardware VFP state, then we need to save its state. 498 * hardware VFP state, then we need to save its state.
453 */ 499 */
454 if (last_VFP_context[cpu] == &thread->vfpstate) { 500 if (vfp_current_hw_state[cpu] == &thread->vfpstate) {
455 u32 fpexc = fmrx(FPEXC); 501 u32 fpexc = fmrx(FPEXC);
456 502
457 /* 503 /*
@@ -473,7 +519,7 @@ void vfp_flush_hwstate(struct thread_info *thread)
473 * If the thread we're interested in is the current owner of the 519 * If the thread we're interested in is the current owner of the
474 * hardware VFP state, then we need to save its state. 520 * hardware VFP state, then we need to save its state.
475 */ 521 */
476 if (last_VFP_context[cpu] == &thread->vfpstate) { 522 if (vfp_current_hw_state[cpu] == &thread->vfpstate) {
477 u32 fpexc = fmrx(FPEXC); 523 u32 fpexc = fmrx(FPEXC);
478 524
479 fmxr(FPEXC, fpexc & ~FPEXC_EN); 525 fmxr(FPEXC, fpexc & ~FPEXC_EN);
@@ -482,7 +528,7 @@ void vfp_flush_hwstate(struct thread_info *thread)
482 * Set the context to NULL to force a reload the next time 528 * Set the context to NULL to force a reload the next time
483 * the thread uses the VFP. 529 * the thread uses the VFP.
484 */ 530 */
485 last_VFP_context[cpu] = NULL; 531 vfp_current_hw_state[cpu] = NULL;
486 } 532 }
487 533
488#ifdef CONFIG_SMP 534#ifdef CONFIG_SMP
@@ -514,7 +560,7 @@ static int vfp_hotplug(struct notifier_block *b, unsigned long action,
514{ 560{
515 if (action == CPU_DYING || action == CPU_DYING_FROZEN) { 561 if (action == CPU_DYING || action == CPU_DYING_FROZEN) {
516 unsigned int cpu = (long)hcpu; 562 unsigned int cpu = (long)hcpu;
517 last_VFP_context[cpu] = NULL; 563 vfp_current_hw_state[cpu] = NULL;
518 } else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) 564 } else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
519 vfp_enable(NULL); 565 vfp_enable(NULL);
520 return NOTIFY_OK; 566 return NOTIFY_OK;
@@ -563,6 +609,7 @@ static int __init vfp_init(void)
563 vfp_vector = vfp_support_entry; 609 vfp_vector = vfp_support_entry;
564 610
565 thread_register_notifier(&vfp_notifier_block); 611 thread_register_notifier(&vfp_notifier_block);
612 cpu_pm_register_notifier(&vfp_cpu_pm_notifier_block);
566 vfp_pm_init(); 613 vfp_pm_init();
567 614
568 /* 615 /*
@@ -582,7 +629,6 @@ static int __init vfp_init(void)
582 elf_hwcap |= HWCAP_VFPv3D16; 629 elf_hwcap |= HWCAP_VFPv3D16;
583 } 630 }
584#endif 631#endif
585#ifdef CONFIG_NEON
586 /* 632 /*
587 * Check for the presence of the Advanced SIMD 633 * Check for the presence of the Advanced SIMD
588 * load/store instructions, integer and single 634 * load/store instructions, integer and single
@@ -590,10 +636,13 @@ static int __init vfp_init(void)
590 * for NEON if the hardware has the MVFR registers. 636 * for NEON if the hardware has the MVFR registers.
591 */ 637 */
592 if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { 638 if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
639#ifdef CONFIG_NEON
593 if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100) 640 if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
594 elf_hwcap |= HWCAP_NEON; 641 elf_hwcap |= HWCAP_NEON;
595 }
596#endif 642#endif
643 if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000)
644 elf_hwcap |= HWCAP_VFPv4;
645 }
597 } 646 }
598 return 0; 647 return 0;
599} 648}