aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/vfp
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2009-07-24 07:32:57 -0400
committerCatalin Marinas <catalin.marinas@arm.com>2009-07-24 07:32:57 -0400
commit07f33a035ddda78095bed64f39db54334776841d (patch)
tree279fca43986fe10f7dc4c7b05c851f79ebd7cb0d /arch/arm/vfp
parent8b592783a2e8b7721a99730bd549aab5208f36af (diff)
Thumb-2: Implement the unified VFP support
This patch modifies the VFP files for the ARM/Thumb-2 unified assembly syntax. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm/vfp')
-rw-r--r--arch/arm/vfp/vfphw.S48
1 files changed, 32 insertions, 16 deletions
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index 1aeae38725dd..66dc2d03b7fc 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -209,40 +209,55 @@ ENDPROC(vfp_save_state)
209last_VFP_context_address: 209last_VFP_context_address:
210 .word last_VFP_context 210 .word last_VFP_context
211 211
212ENTRY(vfp_get_float) 212 .macro tbl_branch, base, tmp, shift
213 add pc, pc, r0, lsl #3 213#ifdef CONFIG_THUMB2_KERNEL
214 adr \tmp, 1f
215 add \tmp, \tmp, \base, lsl \shift
216 mov pc, \tmp
217#else
218 add pc, pc, \base, lsl \shift
214 mov r0, r0 219 mov r0, r0
220#endif
2211:
222 .endm
223
224ENTRY(vfp_get_float)
225 tbl_branch r0, r3, #3
215 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 226 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
216 mrc p10, 0, r0, c\dr, c0, 0 @ fmrs r0, s0 2271: mrc p10, 0, r0, c\dr, c0, 0 @ fmrs r0, s0
217 mov pc, lr 228 mov pc, lr
218 mrc p10, 0, r0, c\dr, c0, 4 @ fmrs r0, s1 229 .org 1b + 8
2301: mrc p10, 0, r0, c\dr, c0, 4 @ fmrs r0, s1
219 mov pc, lr 231 mov pc, lr
232 .org 1b + 8
220 .endr 233 .endr
221ENDPROC(vfp_get_float) 234ENDPROC(vfp_get_float)
222 235
223ENTRY(vfp_put_float) 236ENTRY(vfp_put_float)
224 add pc, pc, r1, lsl #3 237 tbl_branch r1, r3, #3
225 mov r0, r0
226 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 238 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
227 mcr p10, 0, r0, c\dr, c0, 0 @ fmsr r0, s0 2391: mcr p10, 0, r0, c\dr, c0, 0 @ fmsr r0, s0
228 mov pc, lr 240 mov pc, lr
229 mcr p10, 0, r0, c\dr, c0, 4 @ fmsr r0, s1 241 .org 1b + 8
2421: mcr p10, 0, r0, c\dr, c0, 4 @ fmsr r0, s1
230 mov pc, lr 243 mov pc, lr
244 .org 1b + 8
231 .endr 245 .endr
232ENDPROC(vfp_put_float) 246ENDPROC(vfp_put_float)
233 247
234ENTRY(vfp_get_double) 248ENTRY(vfp_get_double)
235 add pc, pc, r0, lsl #3 249 tbl_branch r0, r3, #3
236 mov r0, r0
237 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 250 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
238 fmrrd r0, r1, d\dr 2511: fmrrd r0, r1, d\dr
239 mov pc, lr 252 mov pc, lr
253 .org 1b + 8
240 .endr 254 .endr
241#ifdef CONFIG_VFPv3 255#ifdef CONFIG_VFPv3
242 @ d16 - d31 registers 256 @ d16 - d31 registers
243 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 257 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
244 mrrc p11, 3, r0, r1, c\dr @ fmrrd r0, r1, d\dr 2581: mrrc p11, 3, r0, r1, c\dr @ fmrrd r0, r1, d\dr
245 mov pc, lr 259 mov pc, lr
260 .org 1b + 8
246 .endr 261 .endr
247#endif 262#endif
248 263
@@ -253,17 +268,18 @@ ENTRY(vfp_get_double)
253ENDPROC(vfp_get_double) 268ENDPROC(vfp_get_double)
254 269
255ENTRY(vfp_put_double) 270ENTRY(vfp_put_double)
256 add pc, pc, r2, lsl #3 271 tbl_branch r2, r3, #3
257 mov r0, r0
258 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 272 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
259 fmdrr d\dr, r0, r1 2731: fmdrr d\dr, r0, r1
260 mov pc, lr 274 mov pc, lr
275 .org 1b + 8
261 .endr 276 .endr
262#ifdef CONFIG_VFPv3 277#ifdef CONFIG_VFPv3
263 @ d16 - d31 registers 278 @ d16 - d31 registers
264 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 279 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
265 mcrr p11, 3, r1, r2, c\dr @ fmdrr r1, r2, d\dr 2801: mcrr p11, 3, r1, r2, c\dr @ fmdrr r1, r2, d\dr
266 mov pc, lr 281 mov pc, lr
282 .org 1b + 8
267 .endr 283 .endr
268#endif 284#endif
269ENDPROC(vfp_put_double) 285ENDPROC(vfp_put_double)