aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/padlock-aes.c
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2006-05-16 08:09:29 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2006-06-26 03:34:39 -0400
commit6c2bb98bc33ae33c7a33a133a4cd5a06395fece5 (patch)
tree96684cd2c473cd05d651ce1fa3dd72b1b4b19b09 /drivers/crypto/padlock-aes.c
parent43600106e32809a4dead79fec67a63e9860e3d5d (diff)
[CRYPTO] all: Pass tfm instead of ctx to algorithms
Up until now algorithms have been happy to get a context pointer since they know everything that's in the tfm already (e.g., alignment, block size). However, once we have parameterised algorithms, such information will be specific to each tfm. So the algorithm API needs to be changed to pass the tfm structure instead of the context pointer. This patch is basically a text substitution. The only tricky bit is the assembly routines that need to get the context pointer offset through asm-offsets.h. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/padlock-aes.c')
-rw-r--r--drivers/crypto/padlock-aes.c29
1 files changed, 14 insertions, 15 deletions
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 5158a9db4bc5..b98ad203d6cb 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -282,19 +282,20 @@ aes_hw_extkey_available(uint8_t key_len)
282 return 0; 282 return 0;
283} 283}
284 284
285static inline struct aes_ctx *aes_ctx(void *ctx) 285static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
286{ 286{
287 unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm);
287 unsigned long align = PADLOCK_ALIGNMENT; 288 unsigned long align = PADLOCK_ALIGNMENT;
288 289
289 if (align <= crypto_tfm_ctx_alignment()) 290 if (align <= crypto_tfm_ctx_alignment())
290 align = 1; 291 align = 1;
291 return (struct aes_ctx *)ALIGN((unsigned long)ctx, align); 292 return (struct aes_ctx *)ALIGN(addr, align);
292} 293}
293 294
294static int 295static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
295aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t *flags) 296 unsigned int key_len, u32 *flags)
296{ 297{
297 struct aes_ctx *ctx = aes_ctx(ctx_arg); 298 struct aes_ctx *ctx = aes_ctx(tfm);
298 const __le32 *key = (const __le32 *)in_key; 299 const __le32 *key = (const __le32 *)in_key;
299 uint32_t i, t, u, v, w; 300 uint32_t i, t, u, v, w;
300 uint32_t P[AES_EXTENDED_KEY_SIZE]; 301 uint32_t P[AES_EXTENDED_KEY_SIZE];
@@ -414,24 +415,22 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
414 return iv; 415 return iv;
415} 416}
416 417
417static void 418static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
418aes_encrypt(void *ctx_arg, uint8_t *out, const uint8_t *in)
419{ 419{
420 struct aes_ctx *ctx = aes_ctx(ctx_arg); 420 struct aes_ctx *ctx = aes_ctx(tfm);
421 padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt, 1); 421 padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt, 1);
422} 422}
423 423
424static void 424static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
425aes_decrypt(void *ctx_arg, uint8_t *out, const uint8_t *in)
426{ 425{
427 struct aes_ctx *ctx = aes_ctx(ctx_arg); 426 struct aes_ctx *ctx = aes_ctx(tfm);
428 padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 1); 427 padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 1);
429} 428}
430 429
431static unsigned int aes_encrypt_ecb(const struct cipher_desc *desc, u8 *out, 430static unsigned int aes_encrypt_ecb(const struct cipher_desc *desc, u8 *out,
432 const u8 *in, unsigned int nbytes) 431 const u8 *in, unsigned int nbytes)
433{ 432{
434 struct aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(desc->tfm)); 433 struct aes_ctx *ctx = aes_ctx(desc->tfm);
435 padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt, 434 padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt,
436 nbytes / AES_BLOCK_SIZE); 435 nbytes / AES_BLOCK_SIZE);
437 return nbytes & ~(AES_BLOCK_SIZE - 1); 436 return nbytes & ~(AES_BLOCK_SIZE - 1);
@@ -440,7 +439,7 @@ static unsigned int aes_encrypt_ecb(const struct cipher_desc *desc, u8 *out,
440static unsigned int aes_decrypt_ecb(const struct cipher_desc *desc, u8 *out, 439static unsigned int aes_decrypt_ecb(const struct cipher_desc *desc, u8 *out,
441 const u8 *in, unsigned int nbytes) 440 const u8 *in, unsigned int nbytes)
442{ 441{
443 struct aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(desc->tfm)); 442 struct aes_ctx *ctx = aes_ctx(desc->tfm);
444 padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 443 padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt,
445 nbytes / AES_BLOCK_SIZE); 444 nbytes / AES_BLOCK_SIZE);
446 return nbytes & ~(AES_BLOCK_SIZE - 1); 445 return nbytes & ~(AES_BLOCK_SIZE - 1);
@@ -449,7 +448,7 @@ static unsigned int aes_decrypt_ecb(const struct cipher_desc *desc, u8 *out,
449static unsigned int aes_encrypt_cbc(const struct cipher_desc *desc, u8 *out, 448static unsigned int aes_encrypt_cbc(const struct cipher_desc *desc, u8 *out,
450 const u8 *in, unsigned int nbytes) 449 const u8 *in, unsigned int nbytes)
451{ 450{
452 struct aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(desc->tfm)); 451 struct aes_ctx *ctx = aes_ctx(desc->tfm);
453 u8 *iv; 452 u8 *iv;
454 453
455 iv = padlock_xcrypt_cbc(in, out, ctx->E, desc->info, 454 iv = padlock_xcrypt_cbc(in, out, ctx->E, desc->info,
@@ -462,7 +461,7 @@ static unsigned int aes_encrypt_cbc(const struct cipher_desc *desc, u8 *out,
462static unsigned int aes_decrypt_cbc(const struct cipher_desc *desc, u8 *out, 461static unsigned int aes_decrypt_cbc(const struct cipher_desc *desc, u8 *out,
463 const u8 *in, unsigned int nbytes) 462 const u8 *in, unsigned int nbytes)
464{ 463{
465 struct aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(desc->tfm)); 464 struct aes_ctx *ctx = aes_ctx(desc->tfm);
466 padlock_xcrypt_cbc(in, out, ctx->D, desc->info, &ctx->cword.decrypt, 465 padlock_xcrypt_cbc(in, out, ctx->D, desc->info, &ctx->cword.decrypt,
467 nbytes / AES_BLOCK_SIZE); 466 nbytes / AES_BLOCK_SIZE);
468 return nbytes & ~(AES_BLOCK_SIZE - 1); 467 return nbytes & ~(AES_BLOCK_SIZE - 1);