aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-10-30 12:31:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-10-30 12:31:15 -0400
commit1960e8eabcba2749db9443adb2a5d93e4dabf590 (patch)
tree17a757b4d80872d99be54dfd2caa0d83d3c00125
parent0b07194bb55ed836c2cc7c22e866b87a14681984 (diff)
parent4635742d1cef5ee5f217f89310a8782ebb4e25dd (diff)
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto fix from Herbert Xu: "This fixes an objtool regression" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: crypto: x86/chacha20 - satisfy stack validation 2.0
-rw-r--r--arch/x86/crypto/chacha20-avx2-x86_64.S4
-rw-r--r--arch/x86/crypto/chacha20-ssse3-x86_64.S4
2 files changed, 4 insertions, 4 deletions
diff --git a/arch/x86/crypto/chacha20-avx2-x86_64.S b/arch/x86/crypto/chacha20-avx2-x86_64.S
index 3a2dc3dc6cac..f3cd26f48332 100644
--- a/arch/x86/crypto/chacha20-avx2-x86_64.S
+++ b/arch/x86/crypto/chacha20-avx2-x86_64.S
@@ -45,7 +45,7 @@ ENTRY(chacha20_8block_xor_avx2)
45 45
46 vzeroupper 46 vzeroupper
47 # 4 * 32 byte stack, 32-byte aligned 47 # 4 * 32 byte stack, 32-byte aligned
48 mov %rsp, %r8 48 lea 8(%rsp),%r10
49 and $~31, %rsp 49 and $~31, %rsp
50 sub $0x80, %rsp 50 sub $0x80, %rsp
51 51
@@ -443,6 +443,6 @@ ENTRY(chacha20_8block_xor_avx2)
443 vmovdqu %ymm15,0x01e0(%rsi) 443 vmovdqu %ymm15,0x01e0(%rsi)
444 444
445 vzeroupper 445 vzeroupper
446 mov %r8,%rsp 446 lea -8(%r10),%rsp
447 ret 447 ret
448ENDPROC(chacha20_8block_xor_avx2) 448ENDPROC(chacha20_8block_xor_avx2)
diff --git a/arch/x86/crypto/chacha20-ssse3-x86_64.S b/arch/x86/crypto/chacha20-ssse3-x86_64.S
index 3f511a7d73b8..512a2b500fd1 100644
--- a/arch/x86/crypto/chacha20-ssse3-x86_64.S
+++ b/arch/x86/crypto/chacha20-ssse3-x86_64.S
@@ -160,7 +160,7 @@ ENTRY(chacha20_4block_xor_ssse3)
160 # done with the slightly better performing SSSE3 byte shuffling, 160 # done with the slightly better performing SSSE3 byte shuffling,
161 # 7/12-bit word rotation uses traditional shift+OR. 161 # 7/12-bit word rotation uses traditional shift+OR.
162 162
163 mov %rsp,%r11 163 lea 8(%rsp),%r10
164 sub $0x80,%rsp 164 sub $0x80,%rsp
165 and $~63,%rsp 165 and $~63,%rsp
166 166
@@ -625,6 +625,6 @@ ENTRY(chacha20_4block_xor_ssse3)
625 pxor %xmm1,%xmm15 625 pxor %xmm1,%xmm15
626 movdqu %xmm15,0xf0(%rsi) 626 movdqu %xmm15,0xf0(%rsi)
627 627
628 mov %r11,%rsp 628 lea -8(%r10),%rsp
629 ret 629 ret
630ENDPROC(chacha20_4block_xor_ssse3) 630ENDPROC(chacha20_4block_xor_ssse3)