diff options
| -rw-r--r-- | arch/arm/vfp/vfpmodule.c | 83 |
1 files changed, 62 insertions, 21 deletions
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 2d7423af1197..aed05bc3c2ea 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c | |||
| @@ -38,16 +38,72 @@ union vfp_state *last_VFP_context[NR_CPUS]; | |||
| 38 | */ | 38 | */ |
| 39 | unsigned int VFP_arch; | 39 | unsigned int VFP_arch; |
| 40 | 40 | ||
| 41 | /* | ||
| 42 | * Per-thread VFP initialization. | ||
| 43 | */ | ||
| 44 | static void vfp_thread_flush(struct thread_info *thread) | ||
| 45 | { | ||
| 46 | union vfp_state *vfp = &thread->vfpstate; | ||
| 47 | unsigned int cpu; | ||
| 48 | |||
| 49 | memset(vfp, 0, sizeof(union vfp_state)); | ||
| 50 | |||
| 51 | vfp->hard.fpexc = FPEXC_EN; | ||
| 52 | vfp->hard.fpscr = FPSCR_ROUND_NEAREST; | ||
| 53 | |||
| 54 | /* | ||
| 55 | * Disable VFP to ensure we initialize it first. We must ensure | ||
| 56 | * that the modification of last_VFP_context[] and hardware disable | ||
| 57 | * are done for the same CPU and without preemption. | ||
| 58 | */ | ||
| 59 | cpu = get_cpu(); | ||
| 60 | if (last_VFP_context[cpu] == vfp) | ||
| 61 | last_VFP_context[cpu] = NULL; | ||
| 62 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); | ||
| 63 | put_cpu(); | ||
| 64 | } | ||
| 65 | |||
| 66 | static void vfp_thread_release(struct thread_info *thread) | ||
| 67 | { | ||
| 68 | /* release case: Per-thread VFP cleanup. */ | ||
| 69 | union vfp_state *vfp = &thread->vfpstate; | ||
| 70 | unsigned int cpu = thread->cpu; | ||
| 71 | |||
| 72 | if (last_VFP_context[cpu] == vfp) | ||
| 73 | last_VFP_context[cpu] = NULL; | ||
| 74 | } | ||
| 75 | |||
| 76 | /* | ||
| 77 | * When this function is called with the following 'cmd's, the following | ||
| 78 | * is true while this function is being run: | ||
| 79 | * THREAD_NOFTIFY_SWTICH: | ||
| 80 | * - the previously running thread will not be scheduled onto another CPU. | ||
| 81 | * - the next thread to be run (v) will not be running on another CPU. | ||
| 82 | * - thread->cpu is the local CPU number | ||
| 83 | * - not preemptible as we're called in the middle of a thread switch | ||
| 84 | * THREAD_NOTIFY_FLUSH: | ||
| 85 | * - the thread (v) will be running on the local CPU, so | ||
| 86 | * v === current_thread_info() | ||
| 87 | * - thread->cpu is the local CPU number at the time it is accessed, | ||
| 88 | * but may change at any time. | ||
| 89 | * - we could be preempted if tree preempt rcu is enabled, so | ||
| 90 | * it is unsafe to use thread->cpu. | ||
| 91 | * THREAD_NOTIFY_RELEASE: | ||
| 92 | * - the thread (v) will not be running on any CPU; it is a dead thread. | ||
| 93 | * - thread->cpu will be the last CPU the thread ran on, which may not | ||
| 94 | * be the current CPU. | ||
| 95 | * - we could be preempted if tree preempt rcu is enabled. | ||
| 96 | */ | ||
| 41 | static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) | 97 | static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) |
| 42 | { | 98 | { |
| 43 | struct thread_info *thread = v; | 99 | struct thread_info *thread = v; |
| 44 | union vfp_state *vfp; | ||
| 45 | __u32 cpu = thread->cpu; | ||
| 46 | 100 | ||
| 47 | if (likely(cmd == THREAD_NOTIFY_SWITCH)) { | 101 | if (likely(cmd == THREAD_NOTIFY_SWITCH)) { |
| 48 | u32 fpexc = fmrx(FPEXC); | 102 | u32 fpexc = fmrx(FPEXC); |
| 49 | 103 | ||
| 50 | #ifdef CONFIG_SMP | 104 | #ifdef CONFIG_SMP |
| 105 | unsigned int cpu = thread->cpu; | ||
| 106 | |||
| 51 | /* | 107 | /* |
| 52 | * On SMP, if VFP is enabled, save the old state in | 108 | * On SMP, if VFP is enabled, save the old state in |
| 53 | * case the thread migrates to a different CPU. The | 109 | * case the thread migrates to a different CPU. The |
| @@ -74,25 +130,10 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) | |||
| 74 | return NOTIFY_DONE; | 130 | return NOTIFY_DONE; |
| 75 | } | 131 | } |
| 76 | 132 | ||
| 77 | vfp = &thread->vfpstate; | 133 | if (cmd == THREAD_NOTIFY_FLUSH) |
| 78 | if (cmd == THREAD_NOTIFY_FLUSH) { | 134 | vfp_thread_flush(thread); |
| 79 | /* | 135 | else |
| 80 | * Per-thread VFP initialisation. | 136 | vfp_thread_release(thread); |
| 81 | */ | ||
| 82 | memset(vfp, 0, sizeof(union vfp_state)); | ||
| 83 | |||
| 84 | vfp->hard.fpexc = FPEXC_EN; | ||
| 85 | vfp->hard.fpscr = FPSCR_ROUND_NEAREST; | ||
| 86 | |||
| 87 | /* | ||
| 88 | * Disable VFP to ensure we initialise it first. | ||
| 89 | */ | ||
| 90 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); | ||
| 91 | } | ||
| 92 | |||
| 93 | /* flush and release case: Per-thread VFP cleanup. */ | ||
| 94 | if (last_VFP_context[cpu] == vfp) | ||
| 95 | last_VFP_context[cpu] = NULL; | ||
| 96 | 137 | ||
| 97 | return NOTIFY_DONE; | 138 | return NOTIFY_DONE; |
| 98 | } | 139 | } |
