aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/vfp
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2007-07-18 04:37:10 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2007-07-20 04:39:57 -0400
commit228adef16d6e7b7725ef6b9ba760810d5966afa5 (patch)
treef7473090e2284a7f3b2933d97e684e4b2445d79c /arch/arm/vfp
parent21d1ca04532005c50ed57c2b2948e465b2e90720 (diff)
[ARM] vfp: make fpexc bit names less verbose
Use the fpexc abbreviated names instead of long verbose names for fpexc bits. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/vfp')
-rw-r--r--arch/arm/vfp/vfphw.S12
-rw-r--r--arch/arm/vfp/vfpmodule.c12
2 files changed, 12 insertions, 12 deletions
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index d4b7b229631d..0ac022f800a1 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -74,14 +74,14 @@ vfp_support_entry:
74 74
75 VFPFMRX r1, FPEXC @ Is the VFP enabled? 75 VFPFMRX r1, FPEXC @ Is the VFP enabled?
76 DBGSTR1 "fpexc %08x", r1 76 DBGSTR1 "fpexc %08x", r1
77 tst r1, #FPEXC_ENABLE 77 tst r1, #FPEXC_EN
78 bne look_for_VFP_exceptions @ VFP is already enabled 78 bne look_for_VFP_exceptions @ VFP is already enabled
79 79
80 DBGSTR1 "enable %x", r10 80 DBGSTR1 "enable %x", r10
81 ldr r3, last_VFP_context_address 81 ldr r3, last_VFP_context_address
82 orr r1, r1, #FPEXC_ENABLE @ user FPEXC has the enable bit set 82 orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set
83 ldr r4, [r3, r11, lsl #2] @ last_VFP_context pointer 83 ldr r4, [r3, r11, lsl #2] @ last_VFP_context pointer
84 bic r5, r1, #FPEXC_EXCEPTION @ make sure exceptions are disabled 84 bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled
85 cmp r4, r10 85 cmp r4, r10
86 beq check_for_exception @ we are returning to the same 86 beq check_for_exception @ we are returning to the same
87 @ process, so the registers are 87 @ process, so the registers are
@@ -124,7 +124,7 @@ no_old_VFP_process:
124 VFPFMXR FPSCR, r5 @ restore status 124 VFPFMXR FPSCR, r5 @ restore status
125 125
126check_for_exception: 126check_for_exception:
127 tst r1, #FPEXC_EXCEPTION 127 tst r1, #FPEXC_EX
128 bne process_exception @ might as well handle the pending 128 bne process_exception @ might as well handle the pending
129 @ exception before retrying branch 129 @ exception before retrying branch
130 @ out before setting an FPEXC that 130 @ out before setting an FPEXC that
@@ -136,10 +136,10 @@ check_for_exception:
136 136
137 137
138look_for_VFP_exceptions: 138look_for_VFP_exceptions:
139 tst r1, #FPEXC_EXCEPTION 139 tst r1, #FPEXC_EX
140 bne process_exception 140 bne process_exception
141 VFPFMRX r5, FPSCR 141 VFPFMRX r5, FPSCR
142 tst r5, #FPSCR_IXE @ IXE doesn't set FPEXC_EXCEPTION ! 142 tst r5, #FPSCR_IXE @ IXE doesn't set FPEXC_EX !
143 bne process_exception 143 bne process_exception
144 144
145 @ Fall into hand on to next handler - appropriate coproc instr 145 @ Fall into hand on to next handler - appropriate coproc instr
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 1106b5f9cf19..04ddab2bd876 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -53,7 +53,7 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
53 * case the thread migrates to a different CPU. The 53 * case the thread migrates to a different CPU. The
54 * restoring is done lazily. 54 * restoring is done lazily.
55 */ 55 */
56 if ((fpexc & FPEXC_ENABLE) && last_VFP_context[cpu]) { 56 if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) {
57 vfp_save_state(last_VFP_context[cpu], fpexc); 57 vfp_save_state(last_VFP_context[cpu], fpexc);
58 last_VFP_context[cpu]->hard.cpu = cpu; 58 last_VFP_context[cpu]->hard.cpu = cpu;
59 } 59 }
@@ -70,7 +70,7 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
70 * Always disable VFP so we can lazily save/restore the 70 * Always disable VFP so we can lazily save/restore the
71 * old state. 71 * old state.
72 */ 72 */
73 fmxr(FPEXC, fpexc & ~FPEXC_ENABLE); 73 fmxr(FPEXC, fpexc & ~FPEXC_EN);
74 return NOTIFY_DONE; 74 return NOTIFY_DONE;
75 } 75 }
76 76
@@ -81,13 +81,13 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
81 */ 81 */
82 memset(vfp, 0, sizeof(union vfp_state)); 82 memset(vfp, 0, sizeof(union vfp_state));
83 83
84 vfp->hard.fpexc = FPEXC_ENABLE; 84 vfp->hard.fpexc = FPEXC_EN;
85 vfp->hard.fpscr = FPSCR_ROUND_NEAREST; 85 vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
86 86
87 /* 87 /*
88 * Disable VFP to ensure we initialise it first. 88 * Disable VFP to ensure we initialise it first.
89 */ 89 */
90 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_ENABLE); 90 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
91 } 91 }
92 92
93 /* flush and release case: Per-thread VFP cleanup. */ 93 /* flush and release case: Per-thread VFP cleanup. */
@@ -229,7 +229,7 @@ void VFP9_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
229 /* 229 /*
230 * Enable access to the VFP so we can handle the bounce. 230 * Enable access to the VFP so we can handle the bounce.
231 */ 231 */
232 fmxr(FPEXC, fpexc & ~(FPEXC_EXCEPTION|FPEXC_INV|FPEXC_UFC|FPEXC_IOC)); 232 fmxr(FPEXC, fpexc & ~(FPEXC_EX|FPEXC_INV|FPEXC_UFC|FPEXC_IOC));
233 233
234 orig_fpscr = fpscr = fmrx(FPSCR); 234 orig_fpscr = fpscr = fmrx(FPSCR);
235 235
@@ -248,7 +248,7 @@ void VFP9_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
248 /* 248 /*
249 * Modify fpscr to indicate the number of iterations remaining 249 * Modify fpscr to indicate the number of iterations remaining
250 */ 250 */
251 if (fpexc & FPEXC_EXCEPTION) { 251 if (fpexc & FPEXC_EX) {
252 u32 len; 252 u32 len;
253 253
254 len = fpexc + (1 << FPEXC_LENGTH_BIT); 254 len = fpexc + (1 << FPEXC_LENGTH_BIT);