aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/vfp/vfpmodule.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/vfp/vfpmodule.c')
-rw-r--r--arch/arm/vfp/vfpmodule.c127
1 files changed, 71 insertions, 56 deletions
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index f25e7ec89416..0a96f71f0abd 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -33,7 +33,6 @@ void vfp_support_entry(void);
33void vfp_null_entry(void); 33void vfp_null_entry(void);
34 34
35void (*vfp_vector)(void) = vfp_null_entry; 35void (*vfp_vector)(void) = vfp_null_entry;
36union vfp_state *last_VFP_context[NR_CPUS];
37 36
38/* 37/*
39 * Dual-use variable. 38 * Dual-use variable.
@@ -43,6 +42,46 @@ union vfp_state *last_VFP_context[NR_CPUS];
43unsigned int VFP_arch; 42unsigned int VFP_arch;
44 43
45/* 44/*
45 * The pointer to the vfpstate structure of the thread which currently
46 * owns the context held in the VFP hardware, or NULL if the hardware
47 * context is invalid.
48 *
49 * For UP, this is sufficient to tell which thread owns the VFP context.
50 * However, for SMP, we also need to check the CPU number stored in the
51 * saved state too to catch migrations.
52 */
53union vfp_state *vfp_current_hw_state[NR_CPUS];
54
55/*
56 * Is 'thread's most up to date state stored in this CPUs hardware?
57 * Must be called from non-preemptible context.
58 */
59static bool vfp_state_in_hw(unsigned int cpu, struct thread_info *thread)
60{
61#ifdef CONFIG_SMP
62 if (thread->vfpstate.hard.cpu != cpu)
63 return false;
64#endif
65 return vfp_current_hw_state[cpu] == &thread->vfpstate;
66}
67
68/*
69 * Force a reload of the VFP context from the thread structure. We do
70 * this by ensuring that access to the VFP hardware is disabled, and
71 * clear last_VFP_context. Must be called from non-preemptible context.
72 */
73static void vfp_force_reload(unsigned int cpu, struct thread_info *thread)
74{
75 if (vfp_state_in_hw(cpu, thread)) {
76 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
77 vfp_current_hw_state[cpu] = NULL;
78 }
79#ifdef CONFIG_SMP
80 thread->vfpstate.hard.cpu = NR_CPUS;
81#endif
82}
83
84/*
46 * Per-thread VFP initialization. 85 * Per-thread VFP initialization.
47 */ 86 */
48static void vfp_thread_flush(struct thread_info *thread) 87static void vfp_thread_flush(struct thread_info *thread)
@@ -50,21 +89,27 @@ static void vfp_thread_flush(struct thread_info *thread)
50 union vfp_state *vfp = &thread->vfpstate; 89 union vfp_state *vfp = &thread->vfpstate;
51 unsigned int cpu; 90 unsigned int cpu;
52 91
53 memset(vfp, 0, sizeof(union vfp_state));
54
55 vfp->hard.fpexc = FPEXC_EN;
56 vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
57
58 /* 92 /*
59 * Disable VFP to ensure we initialize it first. We must ensure 93 * Disable VFP to ensure we initialize it first. We must ensure
60 * that the modification of last_VFP_context[] and hardware disable 94 * that the modification of vfp_current_hw_state[] and hardware
61 * are done for the same CPU and without preemption. 95 * disable are done for the same CPU and without preemption.
96 *
97 * Do this first to ensure that preemption won't overwrite our
98 * state saving should access to the VFP be enabled at this point.
62 */ 99 */
63 cpu = get_cpu(); 100 cpu = get_cpu();
64 if (last_VFP_context[cpu] == vfp) 101 if (vfp_current_hw_state[cpu] == vfp)
65 last_VFP_context[cpu] = NULL; 102 vfp_current_hw_state[cpu] = NULL;
66 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); 103 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
67 put_cpu(); 104 put_cpu();
105
106 memset(vfp, 0, sizeof(union vfp_state));
107
108 vfp->hard.fpexc = FPEXC_EN;
109 vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
110#ifdef CONFIG_SMP
111 vfp->hard.cpu = NR_CPUS;
112#endif
68} 113}
69 114
70static void vfp_thread_exit(struct thread_info *thread) 115static void vfp_thread_exit(struct thread_info *thread)
@@ -73,8 +118,8 @@ static void vfp_thread_exit(struct thread_info *thread)
73 union vfp_state *vfp = &thread->vfpstate; 118 union vfp_state *vfp = &thread->vfpstate;
74 unsigned int cpu = get_cpu(); 119 unsigned int cpu = get_cpu();
75 120
76 if (last_VFP_context[cpu] == vfp) 121 if (vfp_current_hw_state[cpu] == vfp)
77 last_VFP_context[cpu] = NULL; 122 vfp_current_hw_state[cpu] = NULL;
78 put_cpu(); 123 put_cpu();
79} 124}
80 125
@@ -84,6 +129,9 @@ static void vfp_thread_copy(struct thread_info *thread)
84 129
85 vfp_sync_hwstate(parent); 130 vfp_sync_hwstate(parent);
86 thread->vfpstate = parent->vfpstate; 131 thread->vfpstate = parent->vfpstate;
132#ifdef CONFIG_SMP
133 thread->vfpstate.hard.cpu = NR_CPUS;
134#endif
87} 135}
88 136
89/* 137/*
@@ -129,17 +177,8 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
129 * case the thread migrates to a different CPU. The 177 * case the thread migrates to a different CPU. The
130 * restoring is done lazily. 178 * restoring is done lazily.
131 */ 179 */
132 if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) { 180 if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu])
133 vfp_save_state(last_VFP_context[cpu], fpexc); 181 vfp_save_state(vfp_current_hw_state[cpu], fpexc);
134 last_VFP_context[cpu]->hard.cpu = cpu;
135 }
136 /*
137 * Thread migration, just force the reloading of the
138 * state on the new CPU in case the VFP registers
139 * contain stale data.
140 */
141 if (thread->vfpstate.hard.cpu != cpu)
142 last_VFP_context[cpu] = NULL;
143#endif 182#endif
144 183
145 /* 184 /*
@@ -415,7 +454,7 @@ static int vfp_pm_suspend(void)
415 } 454 }
416 455
417 /* clear any information we had about last context state */ 456 /* clear any information we had about last context state */
418 memset(last_VFP_context, 0, sizeof(last_VFP_context)); 457 memset(vfp_current_hw_state, 0, sizeof(vfp_current_hw_state));
419 458
420 return 0; 459 return 0;
421} 460}
@@ -443,15 +482,15 @@ static void vfp_pm_init(void)
443static inline void vfp_pm_init(void) { } 482static inline void vfp_pm_init(void) { }
444#endif /* CONFIG_PM */ 483#endif /* CONFIG_PM */
445 484
485/*
486 * Ensure that the VFP state stored in 'thread->vfpstate' is up to date
487 * with the hardware state.
488 */
446void vfp_sync_hwstate(struct thread_info *thread) 489void vfp_sync_hwstate(struct thread_info *thread)
447{ 490{
448 unsigned int cpu = get_cpu(); 491 unsigned int cpu = get_cpu();
449 492
450 /* 493 if (vfp_state_in_hw(cpu, thread)) {
451 * If the thread we're interested in is the current owner of the
452 * hardware VFP state, then we need to save its state.
453 */
454 if (last_VFP_context[cpu] == &thread->vfpstate) {
455 u32 fpexc = fmrx(FPEXC); 494 u32 fpexc = fmrx(FPEXC);
456 495
457 /* 496 /*
@@ -465,36 +504,13 @@ void vfp_sync_hwstate(struct thread_info *thread)
465 put_cpu(); 504 put_cpu();
466} 505}
467 506
507/* Ensure that the thread reloads the hardware VFP state on the next use. */
468void vfp_flush_hwstate(struct thread_info *thread) 508void vfp_flush_hwstate(struct thread_info *thread)
469{ 509{
470 unsigned int cpu = get_cpu(); 510 unsigned int cpu = get_cpu();
471 511
472 /* 512 vfp_force_reload(cpu, thread);
473 * If the thread we're interested in is the current owner of the
474 * hardware VFP state, then we need to save its state.
475 */
476 if (last_VFP_context[cpu] == &thread->vfpstate) {
477 u32 fpexc = fmrx(FPEXC);
478
479 fmxr(FPEXC, fpexc & ~FPEXC_EN);
480
481 /*
482 * Set the context to NULL to force a reload the next time
483 * the thread uses the VFP.
484 */
485 last_VFP_context[cpu] = NULL;
486 }
487 513
488#ifdef CONFIG_SMP
489 /*
490 * For SMP we still have to take care of the case where the thread
491 * migrates to another CPU and then back to the original CPU on which
492 * the last VFP user is still the same thread. Mark the thread VFP
493 * state as belonging to a non-existent CPU so that the saved one will
494 * be reloaded in the above case.
495 */
496 thread->vfpstate.hard.cpu = NR_CPUS;
497#endif
498 put_cpu(); 514 put_cpu();
499} 515}
500 516
@@ -513,8 +529,7 @@ static int vfp_hotplug(struct notifier_block *b, unsigned long action,
513 void *hcpu) 529 void *hcpu)
514{ 530{
515 if (action == CPU_DYING || action == CPU_DYING_FROZEN) { 531 if (action == CPU_DYING || action == CPU_DYING_FROZEN) {
516 unsigned int cpu = (long)hcpu; 532 vfp_force_reload((long)hcpu, current_thread_info());
517 last_VFP_context[cpu] = NULL;
518 } else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) 533 } else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
519 vfp_enable(NULL); 534 vfp_enable(NULL);
520 return NOTIFY_OK; 535 return NOTIFY_OK;