diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-07-09 08:44:04 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-07-09 12:11:22 -0400 |
commit | af61bdf035e2e4dd646b37b270bd558188a127c0 (patch) | |
tree | 81eef62331770c2dce693223b458e24c570711de /arch/arm/vfp | |
parent | fe0d42203cb5616eeff68b14576a0f7e2dd56625 (diff) |
ARM: vfp: rename last_VFP_context to vfp_current_hw_state
Rename the slightly confusing 'last_VFP_context' variable to be more
descriptive of what it actually is. This variable stores a pointer
to the current owner's vfpstate structure for the context held in the
VFP hardware.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/vfp')
-rw-r--r-- | arch/arm/vfp/vfphw.S | 10 | ||||
-rw-r--r-- | arch/arm/vfp/vfpmodule.c | 36 |
2 files changed, 26 insertions, 20 deletions
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S index 9897dcfc16d6..c75443e204b2 100644 --- a/arch/arm/vfp/vfphw.S +++ b/arch/arm/vfp/vfphw.S | |||
@@ -77,9 +77,9 @@ ENTRY(vfp_support_entry) | |||
77 | bne look_for_VFP_exceptions @ VFP is already enabled | 77 | bne look_for_VFP_exceptions @ VFP is already enabled |
78 | 78 | ||
79 | DBGSTR1 "enable %x", r10 | 79 | DBGSTR1 "enable %x", r10 |
80 | ldr r3, last_VFP_context_address | 80 | ldr r3, vfp_current_hw_state_address |
81 | orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set | 81 | orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set |
82 | ldr r4, [r3, r11, lsl #2] @ last_VFP_context pointer | 82 | ldr r4, [r3, r11, lsl #2] @ vfp_current_hw_state pointer |
83 | bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled | 83 | bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled |
84 | cmp r4, r10 | 84 | cmp r4, r10 |
85 | beq check_for_exception @ we are returning to the same | 85 | beq check_for_exception @ we are returning to the same |
@@ -116,7 +116,7 @@ ENTRY(vfp_support_entry) | |||
116 | 116 | ||
117 | no_old_VFP_process: | 117 | no_old_VFP_process: |
118 | DBGSTR1 "load state %p", r10 | 118 | DBGSTR1 "load state %p", r10 |
119 | str r10, [r3, r11, lsl #2] @ update the last_VFP_context pointer | 119 | str r10, [r3, r11, lsl #2] @ update the vfp_current_hw_state pointer |
120 | @ Load the saved state back into the VFP | 120 | @ Load the saved state back into the VFP |
121 | VFPFLDMIA r10, r5 @ reload the working registers while | 121 | VFPFLDMIA r10, r5 @ reload the working registers while |
122 | @ FPEXC is in a safe state | 122 | @ FPEXC is in a safe state |
@@ -207,8 +207,8 @@ ENTRY(vfp_save_state) | |||
207 | ENDPROC(vfp_save_state) | 207 | ENDPROC(vfp_save_state) |
208 | 208 | ||
209 | .align | 209 | .align |
210 | last_VFP_context_address: | 210 | vfp_current_hw_state_address: |
211 | .word last_VFP_context | 211 | .word vfp_current_hw_state |
212 | 212 | ||
213 | .macro tbl_branch, base, tmp, shift | 213 | .macro tbl_branch, base, tmp, shift |
214 | #ifdef CONFIG_THUMB2_KERNEL | 214 | #ifdef CONFIG_THUMB2_KERNEL |
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index f25e7ec89416..3640351171b8 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c | |||
@@ -33,7 +33,13 @@ void vfp_support_entry(void); | |||
33 | void vfp_null_entry(void); | 33 | void vfp_null_entry(void); |
34 | 34 | ||
35 | void (*vfp_vector)(void) = vfp_null_entry; | 35 | void (*vfp_vector)(void) = vfp_null_entry; |
36 | union vfp_state *last_VFP_context[NR_CPUS]; | 36 | |
37 | /* | ||
38 | * The pointer to the vfpstate structure of the thread which currently | ||
39 | * owns the context held in the VFP hardware, or NULL if the hardware | ||
40 | * context is invalid. | ||
41 | */ | ||
42 | union vfp_state *vfp_current_hw_state[NR_CPUS]; | ||
37 | 43 | ||
38 | /* | 44 | /* |
39 | * Dual-use variable. | 45 | * Dual-use variable. |
@@ -57,12 +63,12 @@ static void vfp_thread_flush(struct thread_info *thread) | |||
57 | 63 | ||
58 | /* | 64 | /* |
59 | * Disable VFP to ensure we initialize it first. We must ensure | 65 | * Disable VFP to ensure we initialize it first. We must ensure |
60 | * that the modification of last_VFP_context[] and hardware disable | 66 | * that the modification of vfp_current_hw_state[] and hardware disable |
61 | * are done for the same CPU and without preemption. | 67 | * are done for the same CPU and without preemption. |
62 | */ | 68 | */ |
63 | cpu = get_cpu(); | 69 | cpu = get_cpu(); |
64 | if (last_VFP_context[cpu] == vfp) | 70 | if (vfp_current_hw_state[cpu] == vfp) |
65 | last_VFP_context[cpu] = NULL; | 71 | vfp_current_hw_state[cpu] = NULL; |
66 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); | 72 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); |
67 | put_cpu(); | 73 | put_cpu(); |
68 | } | 74 | } |
@@ -73,8 +79,8 @@ static void vfp_thread_exit(struct thread_info *thread) | |||
73 | union vfp_state *vfp = &thread->vfpstate; | 79 | union vfp_state *vfp = &thread->vfpstate; |
74 | unsigned int cpu = get_cpu(); | 80 | unsigned int cpu = get_cpu(); |
75 | 81 | ||
76 | if (last_VFP_context[cpu] == vfp) | 82 | if (vfp_current_hw_state[cpu] == vfp) |
77 | last_VFP_context[cpu] = NULL; | 83 | vfp_current_hw_state[cpu] = NULL; |
78 | put_cpu(); | 84 | put_cpu(); |
79 | } | 85 | } |
80 | 86 | ||
@@ -129,9 +135,9 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) | |||
129 | * case the thread migrates to a different CPU. The | 135 | * case the thread migrates to a different CPU. The |
130 | * restoring is done lazily. | 136 | * restoring is done lazily. |
131 | */ | 137 | */ |
132 | if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) { | 138 | if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu]) { |
133 | vfp_save_state(last_VFP_context[cpu], fpexc); | 139 | vfp_save_state(vfp_current_hw_state[cpu], fpexc); |
134 | last_VFP_context[cpu]->hard.cpu = cpu; | 140 | vfp_current_hw_state[cpu]->hard.cpu = cpu; |
135 | } | 141 | } |
136 | /* | 142 | /* |
137 | * Thread migration, just force the reloading of the | 143 | * Thread migration, just force the reloading of the |
@@ -139,7 +145,7 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) | |||
139 | * contain stale data. | 145 | * contain stale data. |
140 | */ | 146 | */ |
141 | if (thread->vfpstate.hard.cpu != cpu) | 147 | if (thread->vfpstate.hard.cpu != cpu) |
142 | last_VFP_context[cpu] = NULL; | 148 | vfp_current_hw_state[cpu] = NULL; |
143 | #endif | 149 | #endif |
144 | 150 | ||
145 | /* | 151 | /* |
@@ -415,7 +421,7 @@ static int vfp_pm_suspend(void) | |||
415 | } | 421 | } |
416 | 422 | ||
417 | /* clear any information we had about last context state */ | 423 | /* clear any information we had about last context state */ |
418 | memset(last_VFP_context, 0, sizeof(last_VFP_context)); | 424 | memset(vfp_current_hw_state, 0, sizeof(vfp_current_hw_state)); |
419 | 425 | ||
420 | return 0; | 426 | return 0; |
421 | } | 427 | } |
@@ -451,7 +457,7 @@ void vfp_sync_hwstate(struct thread_info *thread) | |||
451 | * If the thread we're interested in is the current owner of the | 457 | * If the thread we're interested in is the current owner of the |
452 | * hardware VFP state, then we need to save its state. | 458 | * hardware VFP state, then we need to save its state. |
453 | */ | 459 | */ |
454 | if (last_VFP_context[cpu] == &thread->vfpstate) { | 460 | if (vfp_current_hw_state[cpu] == &thread->vfpstate) { |
455 | u32 fpexc = fmrx(FPEXC); | 461 | u32 fpexc = fmrx(FPEXC); |
456 | 462 | ||
457 | /* | 463 | /* |
@@ -473,7 +479,7 @@ void vfp_flush_hwstate(struct thread_info *thread) | |||
473 | * If the thread we're interested in is the current owner of the | 479 | * If the thread we're interested in is the current owner of the |
474 | * hardware VFP state, then we need to save its state. | 480 | * hardware VFP state, then we need to save its state. |
475 | */ | 481 | */ |
476 | if (last_VFP_context[cpu] == &thread->vfpstate) { | 482 | if (vfp_current_hw_state[cpu] == &thread->vfpstate) { |
477 | u32 fpexc = fmrx(FPEXC); | 483 | u32 fpexc = fmrx(FPEXC); |
478 | 484 | ||
479 | fmxr(FPEXC, fpexc & ~FPEXC_EN); | 485 | fmxr(FPEXC, fpexc & ~FPEXC_EN); |
@@ -482,7 +488,7 @@ void vfp_flush_hwstate(struct thread_info *thread) | |||
482 | * Set the context to NULL to force a reload the next time | 488 | * Set the context to NULL to force a reload the next time |
483 | * the thread uses the VFP. | 489 | * the thread uses the VFP. |
484 | */ | 490 | */ |
485 | last_VFP_context[cpu] = NULL; | 491 | vfp_current_hw_state[cpu] = NULL; |
486 | } | 492 | } |
487 | 493 | ||
488 | #ifdef CONFIG_SMP | 494 | #ifdef CONFIG_SMP |
@@ -514,7 +520,7 @@ static int vfp_hotplug(struct notifier_block *b, unsigned long action, | |||
514 | { | 520 | { |
515 | if (action == CPU_DYING || action == CPU_DYING_FROZEN) { | 521 | if (action == CPU_DYING || action == CPU_DYING_FROZEN) { |
516 | unsigned int cpu = (long)hcpu; | 522 | unsigned int cpu = (long)hcpu; |
517 | last_VFP_context[cpu] = NULL; | 523 | vfp_current_hw_state[cpu] = NULL; |
518 | } else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) | 524 | } else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) |
519 | vfp_enable(NULL); | 525 | vfp_enable(NULL); |
520 | return NOTIFY_OK; | 526 | return NOTIFY_OK; |