aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/vfp/vfphw.S
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
commit8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch)
treea8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /arch/arm/vfp/vfphw.S
parent406089d01562f1e2bf9f089fd7637009ebaad589 (diff)
Patched in Tegra support.
Diffstat (limited to 'arch/arm/vfp/vfphw.S')
-rw-r--r--arch/arm/vfp/vfphw.S69
1 files changed, 20 insertions, 49 deletions
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index ea0349f6358..404538ae591 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -16,7 +16,6 @@
16 */ 16 */
17#include <asm/thread_info.h> 17#include <asm/thread_info.h>
18#include <asm/vfpmacros.h> 18#include <asm/vfpmacros.h>
19#include <linux/kern_levels.h>
20#include "../kernel/entry-header.S" 19#include "../kernel/entry-header.S"
21 20
22 .macro DBGSTR, str 21 .macro DBGSTR, str
@@ -25,7 +24,7 @@
25 add r0, pc, #4 24 add r0, pc, #4
26 bl printk 25 bl printk
27 b 1f 26 b 1f
28 .asciz KERN_DEBUG "VFP: \str\n" 27 .asciz "<7>VFP: \str\n"
29 .balign 4 28 .balign 4
301: ldmfd sp!, {r0-r3, ip, lr} 291: ldmfd sp!, {r0-r3, ip, lr}
31#endif 30#endif
@@ -38,7 +37,7 @@
38 add r0, pc, #4 37 add r0, pc, #4
39 bl printk 38 bl printk
40 b 1f 39 b 1f
41 .asciz KERN_DEBUG "VFP: \str\n" 40 .asciz "<7>VFP: \str\n"
42 .balign 4 41 .balign 4
431: ldmfd sp!, {r0-r3, ip, lr} 421: ldmfd sp!, {r0-r3, ip, lr}
44#endif 43#endif
@@ -53,7 +52,7 @@
53 add r0, pc, #4 52 add r0, pc, #4
54 bl printk 53 bl printk
55 b 1f 54 b 1f
56 .asciz KERN_DEBUG "VFP: \str\n" 55 .asciz "<7>VFP: \str\n"
57 .balign 4 56 .balign 4
581: ldmfd sp!, {r0-r3, ip, lr} 571: ldmfd sp!, {r0-r3, ip, lr}
59#endif 58#endif
@@ -62,13 +61,13 @@
62 61
63@ VFP hardware support entry point. 62@ VFP hardware support entry point.
64@ 63@
65@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb) 64@ r0 = faulted instruction
66@ r2 = PC value to resume execution after successful emulation 65@ r2 = faulted PC+4
67@ r9 = normal "successful" return address 66@ r9 = successful return
68@ r10 = vfp_state union 67@ r10 = vfp_state union
69@ r11 = CPU number 68@ r11 = CPU number
70@ lr = unrecognised instruction return address 69@ lr = failure return
71@ IRQs enabled. 70
72ENTRY(vfp_support_entry) 71ENTRY(vfp_support_entry)
73 DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10 72 DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10
74 73
@@ -83,22 +82,19 @@ ENTRY(vfp_support_entry)
83 ldr r4, [r3, r11, lsl #2] @ vfp_current_hw_state pointer 82 ldr r4, [r3, r11, lsl #2] @ vfp_current_hw_state pointer
84 bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled 83 bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled
85 cmp r4, r10 @ this thread owns the hw context? 84 cmp r4, r10 @ this thread owns the hw context?
86#ifndef CONFIG_SMP
87 @ For UP, checking that this thread owns the hw context is
88 @ sufficient to determine that the hardware state is valid.
89 beq vfp_hw_state_valid 85 beq vfp_hw_state_valid
90 86
91 @ On UP, we lazily save the VFP context. As a different
92 @ thread wants ownership of the VFP hardware, save the old
93 @ state if there was a previous (valid) owner.
94
95 VFPFMXR FPEXC, r5 @ enable VFP, disable any pending 87 VFPFMXR FPEXC, r5 @ enable VFP, disable any pending
96 @ exceptions, so we can get at the 88 @ exceptions, so we can get at the
97 @ rest of it 89 @ rest of it
98 90
91#ifndef CONFIG_SMP
92 @ Save out the current registers to the old thread state
93 @ No need for SMP since this is not done lazily
94
99 DBGSTR1 "save old state %p", r4 95 DBGSTR1 "save old state %p", r4
100 cmp r4, #0 @ if the vfp_current_hw_state is NULL 96 cmp r4, #0
101 beq vfp_reload_hw @ then the hw state needs reloading 97 beq no_old_VFP_process
102 VFPFSTMIA r4, r5 @ save the working registers 98 VFPFSTMIA r4, r5 @ save the working registers
103 VFPFMRX r5, FPSCR @ current status 99 VFPFMRX r5, FPSCR @ current status
104#ifndef CONFIG_CPU_FEROCEON 100#ifndef CONFIG_CPU_FEROCEON
@@ -111,33 +107,11 @@ ENTRY(vfp_support_entry)
1111: 1071:
112#endif 108#endif
113 stmia r4, {r1, r5, r6, r8} @ save FPEXC, FPSCR, FPINST, FPINST2 109 stmia r4, {r1, r5, r6, r8} @ save FPEXC, FPSCR, FPINST, FPINST2
114vfp_reload_hw: 110 @ and point r4 at the word at the
115 111 @ start of the register dump
116#else
117 @ For SMP, if this thread does not own the hw context, then we
118 @ need to reload it. No need to save the old state as on SMP,
119 @ we always save the state when we switch away from a thread.
120 bne vfp_reload_hw
121
122 @ This thread has ownership of the current hardware context.
123 @ However, it may have been migrated to another CPU, in which
124 @ case the saved state is newer than the hardware context.
125 @ Check this by looking at the CPU number which the state was
126 @ last loaded onto.
127 ldr ip, [r10, #VFP_CPU]
128 teq ip, r11
129 beq vfp_hw_state_valid
130
131vfp_reload_hw:
132 @ We're loading this threads state into the VFP hardware. Update
133 @ the CPU number which contains the most up to date VFP context.
134 str r11, [r10, #VFP_CPU]
135
136 VFPFMXR FPEXC, r5 @ enable VFP, disable any pending
137 @ exceptions, so we can get at the
138 @ rest of it
139#endif 112#endif
140 113
114no_old_VFP_process:
141 DBGSTR1 "load state %p", r10 115 DBGSTR1 "load state %p", r10
142 str r10, [r3, r11, lsl #2] @ update the vfp_current_hw_state pointer 116 str r10, [r3, r11, lsl #2] @ update the vfp_current_hw_state pointer
143 @ Load the saved state back into the VFP 117 @ Load the saved state back into the VFP
@@ -162,12 +136,9 @@ vfp_hw_state_valid:
162 @ exception before retrying branch 136 @ exception before retrying branch
163 @ out before setting an FPEXC that 137 @ out before setting an FPEXC that
164 @ stops us reading stuff 138 @ stops us reading stuff
165 VFPFMXR FPEXC, r1 @ Restore FPEXC last 139 VFPFMXR FPEXC, r1 @ restore FPEXC last
166 sub r2, r2, #4 @ Retry current instruction - if Thumb 140 sub r2, r2, #4
167 str r2, [sp, #S_PC] @ mode it's two 16-bit instructions, 141 str r2, [sp, #S_PC] @ retry the instruction
168 @ else it's one 32-bit instruction, so
169 @ always subtract 4 from the following
170 @ instruction address.
171#ifdef CONFIG_PREEMPT 142#ifdef CONFIG_PREEMPT
172 get_thread_info r10 143 get_thread_info r10
173 ldr r4, [r10, #TI_PREEMPT] @ get preempt count 144 ldr r4, [r10, #TI_PREEMPT] @ get preempt count