aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-10-14 22:37:58 -0400
committerDavid S. Miller <davem@davemloft.net>2014-10-14 22:37:58 -0400
commitf4da3628dc7c32a59d1fb7116bb042e6f436d611 (patch)
tree23b22ac260cd63de28d8500c8d58d0565c9ec830
parent2d65a9f48fcdf7866aab6457bc707ca233e0c791 (diff)
sparc64: Fix FPU register corruption with AES crypto offload.
The AES loops in arch/sparc/crypto/aes_glue.c use a scheme where the key material is preloaded into the FPU registers, and then we loop over and over doing the crypt operation, reusing those pre-cooked key registers. There are intervening blkcipher*() calls between the crypt operation calls. And those might perform memcpy() and thus also try to use the FPU. The sparc64 kernel FPU usage mechanism is designed to allow such recursive uses, but with a catch. There has to be a trap between the two FPU using threads of control. The mechanism works by, when the FPU is already in use by the kernel, allocating a slot for FPU saving at trap time. Then if, within the trap handler, we try to use the FPU registers, the pre-trap FPU register state is saved into the slot. Then at trap return time we notice this and restore the pre-trap FPU state. Over the long term there are various more involved ways we can make this work, but for a quick fix let's take advantage of the fact that the situation where this happens is very limited. All sparc64 chips that support the crypto instructiosn also are using the Niagara4 memcpy routine, and that routine only uses the FPU for large copies where we can't get the source aligned properly to a multiple of 8 bytes. We look to see if the FPU is already in use in this context, and if so we use the non-large copy path which only uses integer registers. Furthermore, we also limit this special logic to when we are doing kernel copy, rather than a user copy. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc/include/asm/visasm.h8
-rw-r--r--arch/sparc/lib/NG4memcpy.S14
2 files changed, 21 insertions, 1 deletions
diff --git a/arch/sparc/include/asm/visasm.h b/arch/sparc/include/asm/visasm.h
index b26673759283..1f0aa2024e94 100644
--- a/arch/sparc/include/asm/visasm.h
+++ b/arch/sparc/include/asm/visasm.h
@@ -39,6 +39,14 @@
39297: wr %o5, FPRS_FEF, %fprs; \ 39297: wr %o5, FPRS_FEF, %fprs; \
40298: 40298:
41 41
42#define VISEntryHalfFast(fail_label) \
43 rd %fprs, %o5; \
44 andcc %o5, FPRS_FEF, %g0; \
45 be,pt %icc, 297f; \
46 nop; \
47 ba,a,pt %xcc, fail_label; \
48297: wr %o5, FPRS_FEF, %fprs;
49
42#define VISExitHalf \ 50#define VISExitHalf \
43 wr %o5, 0, %fprs; 51 wr %o5, 0, %fprs;
44 52
diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
index 9cf2ee01cee3..140527a20e7d 100644
--- a/arch/sparc/lib/NG4memcpy.S
+++ b/arch/sparc/lib/NG4memcpy.S
@@ -41,6 +41,10 @@
41#endif 41#endif
42#endif 42#endif
43 43
44#if !defined(EX_LD) && !defined(EX_ST)
45#define NON_USER_COPY
46#endif
47
44#ifndef EX_LD 48#ifndef EX_LD
45#define EX_LD(x) x 49#define EX_LD(x) x
46#endif 50#endif
@@ -197,9 +201,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
197 mov EX_RETVAL(%o3), %o0 201 mov EX_RETVAL(%o3), %o0
198 202
199.Llarge_src_unaligned: 203.Llarge_src_unaligned:
204#ifdef NON_USER_COPY
205 VISEntryHalfFast(.Lmedium_vis_entry_fail)
206#else
207 VISEntryHalf
208#endif
200 andn %o2, 0x3f, %o4 209 andn %o2, 0x3f, %o4
201 sub %o2, %o4, %o2 210 sub %o2, %o4, %o2
202 VISEntryHalf
203 alignaddr %o1, %g0, %g1 211 alignaddr %o1, %g0, %g1
204 add %o1, %o4, %o1 212 add %o1, %o4, %o1
205 EX_LD(LOAD(ldd, %g1 + 0x00, %f0)) 213 EX_LD(LOAD(ldd, %g1 + 0x00, %f0))
@@ -240,6 +248,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
240 nop 248 nop
241 ba,a,pt %icc, .Lmedium_unaligned 249 ba,a,pt %icc, .Lmedium_unaligned
242 250
251#ifdef NON_USER_COPY
252.Lmedium_vis_entry_fail:
253 or %o0, %o1, %g2
254#endif
243.Lmedium: 255.Lmedium:
244 LOAD(prefetch, %o1 + 0x40, #n_reads_strong) 256 LOAD(prefetch, %o1 + 0x40, #n_reads_strong)
245 andcc %g2, 0x7, %g0 257 andcc %g2, 0x7, %g0