aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/padlock-aes.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/padlock-aes.c')
-rw-r--r--drivers/crypto/padlock-aes.c52
1 files changed, 42 insertions, 10 deletions
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index bf2917d197a0..856b3cc25583 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -15,6 +15,8 @@
15#include <linux/errno.h> 15#include <linux/errno.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/percpu.h>
19#include <linux/smp.h>
18#include <asm/byteorder.h> 20#include <asm/byteorder.h>
19#include <asm/i387.h> 21#include <asm/i387.h>
20#include "padlock.h" 22#include "padlock.h"
@@ -49,6 +51,8 @@ struct aes_ctx {
49 u32 *D; 51 u32 *D;
50}; 52};
51 53
54static DEFINE_PER_CPU(struct cword *, last_cword);
55
52/* Tells whether the ACE is capable to generate 56/* Tells whether the ACE is capable to generate
53 the extended key for a given key_len. */ 57 the extended key for a given key_len. */
54static inline int 58static inline int
@@ -89,6 +93,7 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
89 const __le32 *key = (const __le32 *)in_key; 93 const __le32 *key = (const __le32 *)in_key;
90 u32 *flags = &tfm->crt_flags; 94 u32 *flags = &tfm->crt_flags;
91 struct crypto_aes_ctx gen_aes; 95 struct crypto_aes_ctx gen_aes;
96 int cpu;
92 97
93 if (key_len % 8) { 98 if (key_len % 8) {
94 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 99 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
@@ -118,7 +123,7 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
118 123
119 /* Don't generate extended keys if the hardware can do it. */ 124 /* Don't generate extended keys if the hardware can do it. */
120 if (aes_hw_extkey_available(key_len)) 125 if (aes_hw_extkey_available(key_len))
121 return 0; 126 goto ok;
122 127
123 ctx->D = ctx->d_data; 128 ctx->D = ctx->d_data;
124 ctx->cword.encrypt.keygen = 1; 129 ctx->cword.encrypt.keygen = 1;
@@ -131,15 +136,30 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
131 136
132 memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH); 137 memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH);
133 memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH); 138 memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH);
139
140ok:
141 for_each_online_cpu(cpu)
142 if (&ctx->cword.encrypt == per_cpu(last_cword, cpu) ||
143 &ctx->cword.decrypt == per_cpu(last_cword, cpu))
144 per_cpu(last_cword, cpu) = NULL;
145
134 return 0; 146 return 0;
135} 147}
136 148
137/* ====== Encryption/decryption routines ====== */ 149/* ====== Encryption/decryption routines ====== */
138 150
139/* These are the real call to PadLock. */ 151/* These are the real call to PadLock. */
140static inline void padlock_reset_key(void) 152static inline void padlock_reset_key(struct cword *cword)
153{
154 int cpu = raw_smp_processor_id();
155
156 if (cword != per_cpu(last_cword, cpu))
157 asm volatile ("pushfl; popfl");
158}
159
160static inline void padlock_store_cword(struct cword *cword)
141{ 161{
142 asm volatile ("pushfl; popfl"); 162 per_cpu(last_cword, raw_smp_processor_id()) = cword;
143} 163}
144 164
145/* 165/*
@@ -149,7 +169,7 @@ static inline void padlock_reset_key(void)
149 */ 169 */
150 170
151static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key, 171static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key,
152 void *control_word) 172 struct cword *control_word)
153{ 173{
154 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ 174 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
155 : "+S"(input), "+D"(output) 175 : "+S"(input), "+D"(output)
@@ -213,22 +233,24 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
213{ 233{
214 struct aes_ctx *ctx = aes_ctx(tfm); 234 struct aes_ctx *ctx = aes_ctx(tfm);
215 int ts_state; 235 int ts_state;
216 padlock_reset_key();
217 236
237 padlock_reset_key(&ctx->cword.encrypt);
218 ts_state = irq_ts_save(); 238 ts_state = irq_ts_save();
219 aes_crypt(in, out, ctx->E, &ctx->cword.encrypt); 239 aes_crypt(in, out, ctx->E, &ctx->cword.encrypt);
220 irq_ts_restore(ts_state); 240 irq_ts_restore(ts_state);
241 padlock_store_cword(&ctx->cword.encrypt);
221} 242}
222 243
223static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 244static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
224{ 245{
225 struct aes_ctx *ctx = aes_ctx(tfm); 246 struct aes_ctx *ctx = aes_ctx(tfm);
226 int ts_state; 247 int ts_state;
227 padlock_reset_key();
228 248
249 padlock_reset_key(&ctx->cword.encrypt);
229 ts_state = irq_ts_save(); 250 ts_state = irq_ts_save();
230 aes_crypt(in, out, ctx->D, &ctx->cword.decrypt); 251 aes_crypt(in, out, ctx->D, &ctx->cword.decrypt);
231 irq_ts_restore(ts_state); 252 irq_ts_restore(ts_state);
253 padlock_store_cword(&ctx->cword.encrypt);
232} 254}
233 255
234static struct crypto_alg aes_alg = { 256static struct crypto_alg aes_alg = {
@@ -261,7 +283,7 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
261 int err; 283 int err;
262 int ts_state; 284 int ts_state;
263 285
264 padlock_reset_key(); 286 padlock_reset_key(&ctx->cword.encrypt);
265 287
266 blkcipher_walk_init(&walk, dst, src, nbytes); 288 blkcipher_walk_init(&walk, dst, src, nbytes);
267 err = blkcipher_walk_virt(desc, &walk); 289 err = blkcipher_walk_virt(desc, &walk);
@@ -276,6 +298,8 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
276 } 298 }
277 irq_ts_restore(ts_state); 299 irq_ts_restore(ts_state);
278 300
301 padlock_store_cword(&ctx->cword.encrypt);
302
279 return err; 303 return err;
280} 304}
281 305
@@ -288,7 +312,7 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
288 int err; 312 int err;
289 int ts_state; 313 int ts_state;
290 314
291 padlock_reset_key(); 315 padlock_reset_key(&ctx->cword.decrypt);
292 316
293 blkcipher_walk_init(&walk, dst, src, nbytes); 317 blkcipher_walk_init(&walk, dst, src, nbytes);
294 err = blkcipher_walk_virt(desc, &walk); 318 err = blkcipher_walk_virt(desc, &walk);
@@ -302,6 +326,9 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
302 err = blkcipher_walk_done(desc, &walk, nbytes); 326 err = blkcipher_walk_done(desc, &walk, nbytes);
303 } 327 }
304 irq_ts_restore(ts_state); 328 irq_ts_restore(ts_state);
329
330 padlock_store_cword(&ctx->cword.encrypt);
331
305 return err; 332 return err;
306} 333}
307 334
@@ -336,7 +363,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
336 int err; 363 int err;
337 int ts_state; 364 int ts_state;
338 365
339 padlock_reset_key(); 366 padlock_reset_key(&ctx->cword.encrypt);
340 367
341 blkcipher_walk_init(&walk, dst, src, nbytes); 368 blkcipher_walk_init(&walk, dst, src, nbytes);
342 err = blkcipher_walk_virt(desc, &walk); 369 err = blkcipher_walk_virt(desc, &walk);
@@ -353,6 +380,8 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
353 } 380 }
354 irq_ts_restore(ts_state); 381 irq_ts_restore(ts_state);
355 382
383 padlock_store_cword(&ctx->cword.decrypt);
384
356 return err; 385 return err;
357} 386}
358 387
@@ -365,7 +394,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
365 int err; 394 int err;
366 int ts_state; 395 int ts_state;
367 396
368 padlock_reset_key(); 397 padlock_reset_key(&ctx->cword.encrypt);
369 398
370 blkcipher_walk_init(&walk, dst, src, nbytes); 399 blkcipher_walk_init(&walk, dst, src, nbytes);
371 err = blkcipher_walk_virt(desc, &walk); 400 err = blkcipher_walk_virt(desc, &walk);
@@ -380,6 +409,9 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
380 } 409 }
381 410
382 irq_ts_restore(ts_state); 411 irq_ts_restore(ts_state);
412
413 padlock_store_cword(&ctx->cword.encrypt);
414
383 return err; 415 return err;
384} 416}
385 417