summaryrefslogtreecommitdiffstats
path: root/crypto/poly1305_generic.c
diff options
context:
space:
mode:
authorEric Biggers <ebiggers@google.com>2017-12-29 11:10:24 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2018-01-05 02:43:10 -0500
commitfcfbeedf79adc7abaea35b0f88ec23cf546d3b77 (patch)
treef63e386916ea32ed7b6e5f9f543fabf73baa51c6 /crypto/poly1305_generic.c
parent8b55107c57f763c7ca393e72f7ce6f89ea1ba49a (diff)
crypto: poly1305 - use unaligned access macros to output digest
Currently the only part of poly1305-generic which is assuming special alignment is the part where the final digest is written. Switch this over to the unaligned access macros so that we'll be able to remove the cra_alignmask. Signed-off-by: Eric Biggers <ebiggers@google.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto/poly1305_generic.c')
-rw-r--r--crypto/poly1305_generic.c9
1 files changed, 4 insertions, 5 deletions
diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c
index b1c2d57dc734..d752901ba0bc 100644
--- a/crypto/poly1305_generic.c
+++ b/crypto/poly1305_generic.c
@@ -210,7 +210,6 @@ EXPORT_SYMBOL_GPL(crypto_poly1305_update);
210int crypto_poly1305_final(struct shash_desc *desc, u8 *dst) 210int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
211{ 211{
212 struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); 212 struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
213 __le32 *mac = (__le32 *)dst;
214 u32 h0, h1, h2, h3, h4; 213 u32 h0, h1, h2, h3, h4;
215 u32 g0, g1, g2, g3, g4; 214 u32 g0, g1, g2, g3, g4;
216 u32 mask; 215 u32 mask;
@@ -267,10 +266,10 @@ int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
267 h3 = (h3 >> 18) | (h4 << 8); 266 h3 = (h3 >> 18) | (h4 << 8);
268 267
269 /* mac = (h + s) % (2^128) */ 268 /* mac = (h + s) % (2^128) */
270 f = (f >> 32) + h0 + dctx->s[0]; mac[0] = cpu_to_le32(f); 269 f = (f >> 32) + h0 + dctx->s[0]; put_unaligned_le32(f, dst + 0);
271 f = (f >> 32) + h1 + dctx->s[1]; mac[1] = cpu_to_le32(f); 270 f = (f >> 32) + h1 + dctx->s[1]; put_unaligned_le32(f, dst + 4);
272 f = (f >> 32) + h2 + dctx->s[2]; mac[2] = cpu_to_le32(f); 271 f = (f >> 32) + h2 + dctx->s[2]; put_unaligned_le32(f, dst + 8);
273 f = (f >> 32) + h3 + dctx->s[3]; mac[3] = cpu_to_le32(f); 272 f = (f >> 32) + h3 + dctx->s[3]; put_unaligned_le32(f, dst + 12);
274 273
275 return 0; 274 return 0;
276} 275}