aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorChuck Ebbert <cebbert@redhat.com>2009-06-18 07:24:10 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2009-06-18 07:24:10 -0400
commita76c1c23d0c33d98f2d9b36e76e7f71289fc8391 (patch)
treec04959c24c8db5e8353e2cbab50fedc6cb4f110b /drivers/crypto
parent1d89b30cc9be41af87881682ec82e2c107849dbe (diff)
crypto: padlock-aes - work around Nano CPU errata in ECB mode
The VIA Nano processor has a bug that makes it prefetch extra data during encryption operations, causing spurious page faults. Extend existing workarounds for ECB mode to copy the data to an temporary buffer to avoid the problem. Signed-off-by: Chuck Ebbert <cebbert@redhat.com> Acked-by: Harald Welte <HaraldWelte@viatech.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/padlock-aes.c81
1 files changed, 46 insertions, 35 deletions
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 87f92c39b5f0..e1d8776c6972 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -18,9 +18,17 @@
18#include <linux/percpu.h> 18#include <linux/percpu.h>
19#include <linux/smp.h> 19#include <linux/smp.h>
20#include <asm/byteorder.h> 20#include <asm/byteorder.h>
21#include <asm/processor.h>
21#include <asm/i387.h> 22#include <asm/i387.h>
22#include "padlock.h" 23#include "padlock.h"
23 24
25/* number of data blocks actually fetched for each xcrypt insn */
26static unsigned int ecb_fetch_blocks = 2;
27static unsigned int cbc_fetch_blocks = 1;
28
29#define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE)
30#define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE)
31
24/* Control word. */ 32/* Control word. */
25struct cword { 33struct cword {
26 unsigned int __attribute__ ((__packed__)) 34 unsigned int __attribute__ ((__packed__))
@@ -173,63 +181,59 @@ static inline void padlock_store_cword(struct cword *cword)
173 */ 181 */
174 182
175static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key, 183static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key,
176 struct cword *control_word) 184 struct cword *control_word, int count)
177{ 185{
178 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ 186 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
179 : "+S"(input), "+D"(output) 187 : "+S"(input), "+D"(output)
180 : "d"(control_word), "b"(key), "c"(1)); 188 : "d"(control_word), "b"(key), "c"(count));
181} 189}
182 190
183static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword) 191static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key,
192 struct cword *cword, int count)
184{ 193{
185 u8 buf[AES_BLOCK_SIZE * 2 + PADLOCK_ALIGNMENT - 1]; 194 /*
195 * Padlock prefetches extra data so we must provide mapped input buffers.
196 * Assume there are at least 16 bytes of stack already in use.
197 */
198 u8 buf[AES_BLOCK_SIZE * 7 + PADLOCK_ALIGNMENT - 1];
186 u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); 199 u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
187 200
188 memcpy(tmp, in, AES_BLOCK_SIZE); 201 memcpy(tmp, in, count * AES_BLOCK_SIZE);
189 padlock_xcrypt(tmp, out, key, cword); 202 padlock_xcrypt(tmp, out, key, cword, count);
190} 203}
191 204
192static inline void aes_crypt(const u8 *in, u8 *out, u32 *key, 205static inline void aes_crypt(const u8 *in, u8 *out, u32 *key,
193 struct cword *cword) 206 struct cword *cword, int count)
194{ 207{
195 /* padlock_xcrypt requires at least two blocks of data. */ 208 /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
196 if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) & 209 * We could avoid some copying here but it's probably not worth it.
197 (PAGE_SIZE - 1)))) { 210 */
198 aes_crypt_copy(in, out, key, cword); 211 if (unlikely(((unsigned long)in & PAGE_SIZE) + ecb_fetch_bytes > PAGE_SIZE)) {
212 aes_crypt_copy(in, out, key, cword, count);
199 return; 213 return;
200 } 214 }
201 215
202 padlock_xcrypt(in, out, key, cword); 216 padlock_xcrypt(in, out, key, cword, count);
203} 217}
204 218
205static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, 219static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
206 void *control_word, u32 count) 220 void *control_word, u32 count)
207{ 221{
208 if (count == 1) { 222 u32 initial = count & (ecb_fetch_blocks - 1);
209 aes_crypt(input, output, key, control_word); 223
224 if (count < ecb_fetch_blocks) {
225 aes_crypt(input, output, key, control_word, count);
210 return; 226 return;
211 } 227 }
212 228
213 asm volatile ("test $1, %%cl;" 229 if (initial)
214 "je 1f;" 230 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
215#ifndef CONFIG_X86_64 231 : "+S"(input), "+D"(output)
216 "lea -1(%%ecx), %%eax;" 232 : "d"(control_word), "b"(key), "c"(initial));
217 "mov $1, %%ecx;" 233
218#else 234 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
219 "lea -1(%%rcx), %%rax;"
220 "mov $1, %%rcx;"
221#endif
222 ".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */
223#ifndef CONFIG_X86_64
224 "mov %%eax, %%ecx;"
225#else
226 "mov %%rax, %%rcx;"
227#endif
228 "1:"
229 ".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
230 : "+S"(input), "+D"(output) 235 : "+S"(input), "+D"(output)
231 : "d"(control_word), "b"(key), "c"(count) 236 : "d"(control_word), "b"(key), "c"(count - initial));
232 : "ax");
233} 237}
234 238
235static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, 239static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
@@ -249,7 +253,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
249 253
250 padlock_reset_key(&ctx->cword.encrypt); 254 padlock_reset_key(&ctx->cword.encrypt);
251 ts_state = irq_ts_save(); 255 ts_state = irq_ts_save();
252 aes_crypt(in, out, ctx->E, &ctx->cword.encrypt); 256 aes_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1);
253 irq_ts_restore(ts_state); 257 irq_ts_restore(ts_state);
254 padlock_store_cword(&ctx->cword.encrypt); 258 padlock_store_cword(&ctx->cword.encrypt);
255} 259}
@@ -261,7 +265,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
261 265
262 padlock_reset_key(&ctx->cword.encrypt); 266 padlock_reset_key(&ctx->cword.encrypt);
263 ts_state = irq_ts_save(); 267 ts_state = irq_ts_save();
264 aes_crypt(in, out, ctx->D, &ctx->cword.decrypt); 268 aes_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1);
265 irq_ts_restore(ts_state); 269 irq_ts_restore(ts_state);
266 padlock_store_cword(&ctx->cword.encrypt); 270 padlock_store_cword(&ctx->cword.encrypt);
267} 271}
@@ -454,6 +458,7 @@ static struct crypto_alg cbc_aes_alg = {
454static int __init padlock_init(void) 458static int __init padlock_init(void)
455{ 459{
456 int ret; 460 int ret;
461 struct cpuinfo_x86 *c = &cpu_data(0);
457 462
458 if (!cpu_has_xcrypt) { 463 if (!cpu_has_xcrypt) {
459 printk(KERN_NOTICE PFX "VIA PadLock not detected.\n"); 464 printk(KERN_NOTICE PFX "VIA PadLock not detected.\n");
@@ -476,6 +481,12 @@ static int __init padlock_init(void)
476 481
477 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); 482 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
478 483
484 if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) {
485 ecb_fetch_blocks = 8;
486 cbc_fetch_blocks = 4; /* NOTE: notused */
487 printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
488 }
489
479out: 490out:
480 return ret; 491 return ret;
481 492