diff options
author | Eli Cooper <elicooper@gmx.com> | 2016-01-21 11:24:08 -0500 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2016-01-25 08:47:45 -0500 |
commit | cbe09bd51bf23b42c3a94c5fb6815e1397c5fc3f (patch) | |
tree | f1dc325e655350589e60bfe8217495f4dc371475 | |
parent | 7ee7014d0eb6bcac679c0bd5fe9ce65bc4325648 (diff) |
crypto: chacha20-ssse3 - Align stack pointer to 64 bytes
This aligns the stack pointer in chacha20_4block_xor_ssse3 to 64 bytes.
Fixes general protection faults and potential kernel panics.
Cc: stable@vger.kernel.org
Signed-off-by: Eli Cooper <elicooper@gmx.com>
Acked-by: Martin Willi <martin@strongswan.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r-- | arch/x86/crypto/chacha20-ssse3-x86_64.S | 6 |
1 files changed, 4 insertions, 2 deletions
diff --git a/arch/x86/crypto/chacha20-ssse3-x86_64.S b/arch/x86/crypto/chacha20-ssse3-x86_64.S index 712b13047b41..3a33124e9112 100644 --- a/arch/x86/crypto/chacha20-ssse3-x86_64.S +++ b/arch/x86/crypto/chacha20-ssse3-x86_64.S | |||
@@ -157,7 +157,9 @@ ENTRY(chacha20_4block_xor_ssse3) | |||
157 | # done with the slightly better performing SSSE3 byte shuffling, | 157 | # done with the slightly better performing SSSE3 byte shuffling, |
158 | # 7/12-bit word rotation uses traditional shift+OR. | 158 | # 7/12-bit word rotation uses traditional shift+OR. |
159 | 159 | ||
160 | sub $0x40,%rsp | 160 | mov %rsp,%r11 |
161 | sub $0x80,%rsp | ||
162 | and $~63,%rsp | ||
161 | 163 | ||
162 | # x0..15[0-3] = s0..3[0..3] | 164 | # x0..15[0-3] = s0..3[0..3] |
163 | movq 0x00(%rdi),%xmm1 | 165 | movq 0x00(%rdi),%xmm1 |
@@ -620,6 +622,6 @@ ENTRY(chacha20_4block_xor_ssse3) | |||
620 | pxor %xmm1,%xmm15 | 622 | pxor %xmm1,%xmm15 |
621 | movdqu %xmm15,0xf0(%rsi) | 623 | movdqu %xmm15,0xf0(%rsi) |
622 | 624 | ||
623 | add $0x40,%rsp | 625 | mov %r11,%rsp |
624 | ret | 626 | ret |
625 | ENDPROC(chacha20_4block_xor_ssse3) | 627 | ENDPROC(chacha20_4block_xor_ssse3) |