aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-12-19 18:19:11 -0500
committerDavid S. Miller <davem@davemloft.net>2012-12-19 18:19:11 -0500
commit9f28ffc03e93343ac04874fda9edb7affea45165 (patch)
tree5d65df3dacd78570469a5c1183359595015fdd9b /arch/sparc
parent4a9d1946b0135b15d901d7e7c9796d36f352aaea (diff)
sparc64: Fix unrolled AES 256-bit key loops.
The basic scheme of the block mode assembler is that we start by enabling the FPU, loading the key into the floating point registers, then iterate calling the encrypt/decrypt routine for each block. For the 256-bit key cases, we run short on registers in the unrolled loops. So the {ENCRYPT,DECRYPT}_256_2() macros reload the key registers that get clobbered. The unrolled macros, {ENCRYPT,DECRYPT}_256(), are not mindful of this. So if we have a mix of multi-block and single-block calls, the single-block unrolled 256-bit encrypt/decrypt can run with some of the key registers clobbered. Handle this by always explicitly loading those registers before using the non-unrolled 256-bit macro. This was discovered thanks to all of the new test cases added by Jussi Kivilinna. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/crypto/aes_asm.S20
1 files changed, 14 insertions, 6 deletions
diff --git a/arch/sparc/crypto/aes_asm.S b/arch/sparc/crypto/aes_asm.S
index 23f6cbb910d3..1cda8aa7cb85 100644
--- a/arch/sparc/crypto/aes_asm.S
+++ b/arch/sparc/crypto/aes_asm.S
@@ -1024,7 +1024,11 @@ ENTRY(aes_sparc64_ecb_encrypt_256)
1024 add %o2, 0x20, %o2 1024 add %o2, 0x20, %o2
1025 brlz,pt %o3, 11f 1025 brlz,pt %o3, 11f
1026 nop 1026 nop
102710: ldx [%o1 + 0x00], %g3 102710: ldd [%o0 + 0xd0], %f56
1028 ldd [%o0 + 0xd8], %f58
1029 ldd [%o0 + 0xe0], %f60
1030 ldd [%o0 + 0xe8], %f62
1031 ldx [%o1 + 0x00], %g3
1028 ldx [%o1 + 0x08], %g7 1032 ldx [%o1 + 0x08], %g7
1029 xor %g1, %g3, %g3 1033 xor %g1, %g3, %g3
1030 xor %g2, %g7, %g7 1034 xor %g2, %g7, %g7
@@ -1128,9 +1132,9 @@ ENTRY(aes_sparc64_ecb_decrypt_256)
1128 /* %o0=&key[key_len], %o1=input, %o2=output, %o3=len */ 1132 /* %o0=&key[key_len], %o1=input, %o2=output, %o3=len */
1129 ldx [%o0 - 0x10], %g1 1133 ldx [%o0 - 0x10], %g1
1130 subcc %o3, 0x10, %o3 1134 subcc %o3, 0x10, %o3
1135 ldx [%o0 - 0x08], %g2
1131 be 10f 1136 be 10f
1132 ldx [%o0 - 0x08], %g2 1137 sub %o0, 0xf0, %o0
1133 sub %o0, 0xf0, %o0
11341: ldx [%o1 + 0x00], %g3 11381: ldx [%o1 + 0x00], %g3
1135 ldx [%o1 + 0x08], %g7 1139 ldx [%o1 + 0x08], %g7
1136 ldx [%o1 + 0x10], %o4 1140 ldx [%o1 + 0x10], %o4
@@ -1154,7 +1158,11 @@ ENTRY(aes_sparc64_ecb_decrypt_256)
1154 add %o2, 0x20, %o2 1158 add %o2, 0x20, %o2
1155 brlz,pt %o3, 11f 1159 brlz,pt %o3, 11f
1156 nop 1160 nop
115710: ldx [%o1 + 0x00], %g3 116110: ldd [%o0 + 0x18], %f56
1162 ldd [%o0 + 0x10], %f58
1163 ldd [%o0 + 0x08], %f60
1164 ldd [%o0 + 0x00], %f62
1165 ldx [%o1 + 0x00], %g3
1158 ldx [%o1 + 0x08], %g7 1166 ldx [%o1 + 0x08], %g7
1159 xor %g1, %g3, %g3 1167 xor %g1, %g3, %g3
1160 xor %g2, %g7, %g7 1168 xor %g2, %g7, %g7
@@ -1511,11 +1519,11 @@ ENTRY(aes_sparc64_ctr_crypt_256)
1511 add %o2, 0x20, %o2 1519 add %o2, 0x20, %o2
1512 brlz,pt %o3, 11f 1520 brlz,pt %o3, 11f
1513 nop 1521 nop
1514 ldd [%o0 + 0xd0], %f56 152210: ldd [%o0 + 0xd0], %f56
1515 ldd [%o0 + 0xd8], %f58 1523 ldd [%o0 + 0xd8], %f58
1516 ldd [%o0 + 0xe0], %f60 1524 ldd [%o0 + 0xe0], %f60
1517 ldd [%o0 + 0xe8], %f62 1525 ldd [%o0 + 0xe8], %f62
151810: xor %g1, %g3, %o5 1526 xor %g1, %g3, %o5
1519 MOVXTOD_O5_F0 1527 MOVXTOD_O5_F0
1520 xor %g2, %g7, %o5 1528 xor %g2, %g7, %o5
1521 MOVXTOD_O5_F2 1529 MOVXTOD_O5_F2