aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/geode-aes.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-01-25 11:38:25 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2008-01-25 11:38:25 -0500
commiteba0e319c12fb098d66316a8eafbaaa9174a07c3 (patch)
treeb2703117db9e36bb3510654efd55361f61c54742 /drivers/crypto/geode-aes.c
parentdf8dc74e8a383eaf2d9b44b80a71ec6f0e52b42e (diff)
parent15e7b4452b72ae890f2fcb027b4c4fa63a1c9a7a (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (125 commits) [CRYPTO] twofish: Merge common glue code [CRYPTO] hifn_795x: Fixup container_of() usage [CRYPTO] cast6: inline bloat-- [CRYPTO] api: Set default CRYPTO_MINALIGN to unsigned long long [CRYPTO] tcrypt: Make xcbc available as a standalone test [CRYPTO] xcbc: Remove bogus hash/cipher test [CRYPTO] xcbc: Fix algorithm leak when block size check fails [CRYPTO] tcrypt: Zero axbuf in the right function [CRYPTO] padlock: Only reset the key once for each CBC and ECB operation [CRYPTO] api: Include sched.h for cond_resched in scatterwalk.h [CRYPTO] salsa20-asm: Remove unnecessary dependency on CRYPTO_SALSA20 [CRYPTO] tcrypt: Add select of AEAD [CRYPTO] salsa20: Add x86-64 assembly version [CRYPTO] salsa20_i586: Salsa20 stream cipher algorithm (i586 version) [CRYPTO] gcm: Introduce rfc4106 [CRYPTO] api: Show async type [CRYPTO] chainiv: Avoid lock spinning where possible [CRYPTO] seqiv: Add select AEAD in Kconfig [CRYPTO] scatterwalk: Handle zero nbytes in scatterwalk_map_and_copy [CRYPTO] null: Allow setkey on digest_null ...
Diffstat (limited to 'drivers/crypto/geode-aes.c')
-rw-r--r--drivers/crypto/geode-aes.c298
1 files changed, 211 insertions, 87 deletions
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index 711e246e1ef0..4801162919d9 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -13,44 +13,13 @@
13#include <linux/crypto.h> 13#include <linux/crypto.h>
14#include <linux/spinlock.h> 14#include <linux/spinlock.h>
15#include <crypto/algapi.h> 15#include <crypto/algapi.h>
16#include <crypto/aes.h>
16 17
17#include <asm/io.h> 18#include <asm/io.h>
18#include <asm/delay.h> 19#include <asm/delay.h>
19 20
20#include "geode-aes.h" 21#include "geode-aes.h"
21 22
22/* Register definitions */
23
24#define AES_CTRLA_REG 0x0000
25
26#define AES_CTRL_START 0x01
27#define AES_CTRL_DECRYPT 0x00
28#define AES_CTRL_ENCRYPT 0x02
29#define AES_CTRL_WRKEY 0x04
30#define AES_CTRL_DCA 0x08
31#define AES_CTRL_SCA 0x10
32#define AES_CTRL_CBC 0x20
33
34#define AES_INTR_REG 0x0008
35
36#define AES_INTRA_PENDING (1 << 16)
37#define AES_INTRB_PENDING (1 << 17)
38
39#define AES_INTR_PENDING (AES_INTRA_PENDING | AES_INTRB_PENDING)
40#define AES_INTR_MASK 0x07
41
42#define AES_SOURCEA_REG 0x0010
43#define AES_DSTA_REG 0x0014
44#define AES_LENA_REG 0x0018
45#define AES_WRITEKEY0_REG 0x0030
46#define AES_WRITEIV0_REG 0x0040
47
48/* A very large counter that is used to gracefully bail out of an
49 * operation in case of trouble
50 */
51
52#define AES_OP_TIMEOUT 0x50000
53
54/* Static structures */ 23/* Static structures */
55 24
56static void __iomem * _iobase; 25static void __iomem * _iobase;
@@ -87,9 +56,10 @@ do_crypt(void *src, void *dst, int len, u32 flags)
87 /* Start the operation */ 56 /* Start the operation */
88 iowrite32(AES_CTRL_START | flags, _iobase + AES_CTRLA_REG); 57 iowrite32(AES_CTRL_START | flags, _iobase + AES_CTRLA_REG);
89 58
90 do 59 do {
91 status = ioread32(_iobase + AES_INTR_REG); 60 status = ioread32(_iobase + AES_INTR_REG);
92 while(!(status & AES_INTRA_PENDING) && --counter); 61 cpu_relax();
62 } while(!(status & AES_INTRA_PENDING) && --counter);
93 63
94 /* Clear the event */ 64 /* Clear the event */
95 iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG); 65 iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG);
@@ -101,6 +71,7 @@ geode_aes_crypt(struct geode_aes_op *op)
101{ 71{
102 u32 flags = 0; 72 u32 flags = 0;
103 unsigned long iflags; 73 unsigned long iflags;
74 int ret;
104 75
105 if (op->len == 0) 76 if (op->len == 0)
106 return 0; 77 return 0;
@@ -129,7 +100,8 @@ geode_aes_crypt(struct geode_aes_op *op)
129 _writefield(AES_WRITEKEY0_REG, op->key); 100 _writefield(AES_WRITEKEY0_REG, op->key);
130 } 101 }
131 102
132 do_crypt(op->src, op->dst, op->len, flags); 103 ret = do_crypt(op->src, op->dst, op->len, flags);
104 BUG_ON(ret);
133 105
134 if (op->mode == AES_MODE_CBC) 106 if (op->mode == AES_MODE_CBC)
135 _readfield(AES_WRITEIV0_REG, op->iv); 107 _readfield(AES_WRITEIV0_REG, op->iv);
@@ -141,18 +113,103 @@ geode_aes_crypt(struct geode_aes_op *op)
141 113
142/* CRYPTO-API Functions */ 114/* CRYPTO-API Functions */
143 115
144static int 116static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
145geode_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int len) 117 unsigned int len)
146{ 118{
147 struct geode_aes_op *op = crypto_tfm_ctx(tfm); 119 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
120 unsigned int ret;
148 121
149 if (len != AES_KEY_LENGTH) { 122 op->keylen = len;
123
124 if (len == AES_KEYSIZE_128) {
125 memcpy(op->key, key, len);
126 return 0;
127 }
128
129 if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
130 /* not supported at all */
150 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 131 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
151 return -EINVAL; 132 return -EINVAL;
152 } 133 }
153 134
154 memcpy(op->key, key, len); 135 /*
155 return 0; 136 * The requested key size is not supported by HW, do a fallback
137 */
138 op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
139 op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
140
141 ret = crypto_cipher_setkey(op->fallback.cip, key, len);
142 if (ret) {
143 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
144 tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK);
145 }
146 return ret;
147}
148
149static int geode_setkey_blk(struct crypto_tfm *tfm, const u8 *key,
150 unsigned int len)
151{
152 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
153 unsigned int ret;
154
155 op->keylen = len;
156
157 if (len == AES_KEYSIZE_128) {
158 memcpy(op->key, key, len);
159 return 0;
160 }
161
162 if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
163 /* not supported at all */
164 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
165 return -EINVAL;
166 }
167
168 /*
169 * The requested key size is not supported by HW, do a fallback
170 */
171 op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
172 op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
173
174 ret = crypto_blkcipher_setkey(op->fallback.blk, key, len);
175 if (ret) {
176 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
177 tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK);
178 }
179 return ret;
180}
181
182static int fallback_blk_dec(struct blkcipher_desc *desc,
183 struct scatterlist *dst, struct scatterlist *src,
184 unsigned int nbytes)
185{
186 unsigned int ret;
187 struct crypto_blkcipher *tfm;
188 struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
189
190 tfm = desc->tfm;
191 desc->tfm = op->fallback.blk;
192
193 ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
194
195 desc->tfm = tfm;
196 return ret;
197}
198static int fallback_blk_enc(struct blkcipher_desc *desc,
199 struct scatterlist *dst, struct scatterlist *src,
200 unsigned int nbytes)
201{
202 unsigned int ret;
203 struct crypto_blkcipher *tfm;
204 struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
205
206 tfm = desc->tfm;
207 desc->tfm = op->fallback.blk;
208
209 ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
210
211 desc->tfm = tfm;
212 return ret;
156} 213}
157 214
158static void 215static void
@@ -160,8 +217,10 @@ geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
160{ 217{
161 struct geode_aes_op *op = crypto_tfm_ctx(tfm); 218 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
162 219
163 if ((out == NULL) || (in == NULL)) 220 if (unlikely(op->keylen != AES_KEYSIZE_128)) {
221 crypto_cipher_encrypt_one(op->fallback.cip, out, in);
164 return; 222 return;
223 }
165 224
166 op->src = (void *) in; 225 op->src = (void *) in;
167 op->dst = (void *) out; 226 op->dst = (void *) out;
@@ -179,8 +238,10 @@ geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
179{ 238{
180 struct geode_aes_op *op = crypto_tfm_ctx(tfm); 239 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
181 240
182 if ((out == NULL) || (in == NULL)) 241 if (unlikely(op->keylen != AES_KEYSIZE_128)) {
242 crypto_cipher_decrypt_one(op->fallback.cip, out, in);
183 return; 243 return;
244 }
184 245
185 op->src = (void *) in; 246 op->src = (void *) in;
186 op->dst = (void *) out; 247 op->dst = (void *) out;
@@ -192,24 +253,50 @@ geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
192 geode_aes_crypt(op); 253 geode_aes_crypt(op);
193} 254}
194 255
256static int fallback_init_cip(struct crypto_tfm *tfm)
257{
258 const char *name = tfm->__crt_alg->cra_name;
259 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
260
261 op->fallback.cip = crypto_alloc_cipher(name, 0,
262 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
263
264 if (IS_ERR(op->fallback.cip)) {
265 printk(KERN_ERR "Error allocating fallback algo %s\n", name);
266 return PTR_ERR(op->fallback.blk);
267 }
268
269 return 0;
270}
271
272static void fallback_exit_cip(struct crypto_tfm *tfm)
273{
274 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
275
276 crypto_free_cipher(op->fallback.cip);
277 op->fallback.cip = NULL;
278}
195 279
196static struct crypto_alg geode_alg = { 280static struct crypto_alg geode_alg = {
197 .cra_name = "aes", 281 .cra_name = "aes",
198 .cra_driver_name = "geode-aes-128", 282 .cra_driver_name = "geode-aes",
199 .cra_priority = 300, 283 .cra_priority = 300,
200 .cra_alignmask = 15, 284 .cra_alignmask = 15,
201 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 285 .cra_flags = CRYPTO_ALG_TYPE_CIPHER |
286 CRYPTO_ALG_NEED_FALLBACK,
287 .cra_init = fallback_init_cip,
288 .cra_exit = fallback_exit_cip,
202 .cra_blocksize = AES_MIN_BLOCK_SIZE, 289 .cra_blocksize = AES_MIN_BLOCK_SIZE,
203 .cra_ctxsize = sizeof(struct geode_aes_op), 290 .cra_ctxsize = sizeof(struct geode_aes_op),
204 .cra_module = THIS_MODULE, 291 .cra_module = THIS_MODULE,
205 .cra_list = LIST_HEAD_INIT(geode_alg.cra_list), 292 .cra_list = LIST_HEAD_INIT(geode_alg.cra_list),
206 .cra_u = { 293 .cra_u = {
207 .cipher = { 294 .cipher = {
208 .cia_min_keysize = AES_KEY_LENGTH, 295 .cia_min_keysize = AES_MIN_KEY_SIZE,
209 .cia_max_keysize = AES_KEY_LENGTH, 296 .cia_max_keysize = AES_MAX_KEY_SIZE,
210 .cia_setkey = geode_setkey, 297 .cia_setkey = geode_setkey_cip,
211 .cia_encrypt = geode_encrypt, 298 .cia_encrypt = geode_encrypt,
212 .cia_decrypt = geode_decrypt 299 .cia_decrypt = geode_decrypt
213 } 300 }
214 } 301 }
215}; 302};
@@ -223,8 +310,12 @@ geode_cbc_decrypt(struct blkcipher_desc *desc,
223 struct blkcipher_walk walk; 310 struct blkcipher_walk walk;
224 int err, ret; 311 int err, ret;
225 312
313 if (unlikely(op->keylen != AES_KEYSIZE_128))
314 return fallback_blk_dec(desc, dst, src, nbytes);
315
226 blkcipher_walk_init(&walk, dst, src, nbytes); 316 blkcipher_walk_init(&walk, dst, src, nbytes);
227 err = blkcipher_walk_virt(desc, &walk); 317 err = blkcipher_walk_virt(desc, &walk);
318 op->iv = walk.iv;
228 319
229 while((nbytes = walk.nbytes)) { 320 while((nbytes = walk.nbytes)) {
230 op->src = walk.src.virt.addr, 321 op->src = walk.src.virt.addr,
@@ -233,13 +324,9 @@ geode_cbc_decrypt(struct blkcipher_desc *desc,
233 op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE); 324 op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
234 op->dir = AES_DIR_DECRYPT; 325 op->dir = AES_DIR_DECRYPT;
235 326
236 memcpy(op->iv, walk.iv, AES_IV_LENGTH);
237
238 ret = geode_aes_crypt(op); 327 ret = geode_aes_crypt(op);
239 328
240 memcpy(walk.iv, op->iv, AES_IV_LENGTH);
241 nbytes -= ret; 329 nbytes -= ret;
242
243 err = blkcipher_walk_done(desc, &walk, nbytes); 330 err = blkcipher_walk_done(desc, &walk, nbytes);
244 } 331 }
245 332
@@ -255,8 +342,12 @@ geode_cbc_encrypt(struct blkcipher_desc *desc,
255 struct blkcipher_walk walk; 342 struct blkcipher_walk walk;
256 int err, ret; 343 int err, ret;
257 344
345 if (unlikely(op->keylen != AES_KEYSIZE_128))
346 return fallback_blk_enc(desc, dst, src, nbytes);
347
258 blkcipher_walk_init(&walk, dst, src, nbytes); 348 blkcipher_walk_init(&walk, dst, src, nbytes);
259 err = blkcipher_walk_virt(desc, &walk); 349 err = blkcipher_walk_virt(desc, &walk);
350 op->iv = walk.iv;
260 351
261 while((nbytes = walk.nbytes)) { 352 while((nbytes = walk.nbytes)) {
262 op->src = walk.src.virt.addr, 353 op->src = walk.src.virt.addr,
@@ -265,8 +356,6 @@ geode_cbc_encrypt(struct blkcipher_desc *desc,
265 op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE); 356 op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
266 op->dir = AES_DIR_ENCRYPT; 357 op->dir = AES_DIR_ENCRYPT;
267 358
268 memcpy(op->iv, walk.iv, AES_IV_LENGTH);
269
270 ret = geode_aes_crypt(op); 359 ret = geode_aes_crypt(op);
271 nbytes -= ret; 360 nbytes -= ret;
272 err = blkcipher_walk_done(desc, &walk, nbytes); 361 err = blkcipher_walk_done(desc, &walk, nbytes);
@@ -275,22 +364,49 @@ geode_cbc_encrypt(struct blkcipher_desc *desc,
275 return err; 364 return err;
276} 365}
277 366
367static int fallback_init_blk(struct crypto_tfm *tfm)
368{
369 const char *name = tfm->__crt_alg->cra_name;
370 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
371
372 op->fallback.blk = crypto_alloc_blkcipher(name, 0,
373 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
374
375 if (IS_ERR(op->fallback.blk)) {
376 printk(KERN_ERR "Error allocating fallback algo %s\n", name);
377 return PTR_ERR(op->fallback.blk);
378 }
379
380 return 0;
381}
382
383static void fallback_exit_blk(struct crypto_tfm *tfm)
384{
385 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
386
387 crypto_free_blkcipher(op->fallback.blk);
388 op->fallback.blk = NULL;
389}
390
278static struct crypto_alg geode_cbc_alg = { 391static struct crypto_alg geode_cbc_alg = {
279 .cra_name = "cbc(aes)", 392 .cra_name = "cbc(aes)",
280 .cra_driver_name = "cbc-aes-geode-128", 393 .cra_driver_name = "cbc-aes-geode",
281 .cra_priority = 400, 394 .cra_priority = 400,
282 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 395 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
396 CRYPTO_ALG_NEED_FALLBACK,
397 .cra_init = fallback_init_blk,
398 .cra_exit = fallback_exit_blk,
283 .cra_blocksize = AES_MIN_BLOCK_SIZE, 399 .cra_blocksize = AES_MIN_BLOCK_SIZE,
284 .cra_ctxsize = sizeof(struct geode_aes_op), 400 .cra_ctxsize = sizeof(struct geode_aes_op),
285 .cra_alignmask = 15, 401 .cra_alignmask = 15,
286 .cra_type = &crypto_blkcipher_type, 402 .cra_type = &crypto_blkcipher_type,
287 .cra_module = THIS_MODULE, 403 .cra_module = THIS_MODULE,
288 .cra_list = LIST_HEAD_INIT(geode_cbc_alg.cra_list), 404 .cra_list = LIST_HEAD_INIT(geode_cbc_alg.cra_list),
289 .cra_u = { 405 .cra_u = {
290 .blkcipher = { 406 .blkcipher = {
291 .min_keysize = AES_KEY_LENGTH, 407 .min_keysize = AES_MIN_KEY_SIZE,
292 .max_keysize = AES_KEY_LENGTH, 408 .max_keysize = AES_MAX_KEY_SIZE,
293 .setkey = geode_setkey, 409 .setkey = geode_setkey_blk,
294 .encrypt = geode_cbc_encrypt, 410 .encrypt = geode_cbc_encrypt,
295 .decrypt = geode_cbc_decrypt, 411 .decrypt = geode_cbc_decrypt,
296 .ivsize = AES_IV_LENGTH, 412 .ivsize = AES_IV_LENGTH,
@@ -307,6 +423,9 @@ geode_ecb_decrypt(struct blkcipher_desc *desc,
307 struct blkcipher_walk walk; 423 struct blkcipher_walk walk;
308 int err, ret; 424 int err, ret;
309 425
426 if (unlikely(op->keylen != AES_KEYSIZE_128))
427 return fallback_blk_dec(desc, dst, src, nbytes);
428
310 blkcipher_walk_init(&walk, dst, src, nbytes); 429 blkcipher_walk_init(&walk, dst, src, nbytes);
311 err = blkcipher_walk_virt(desc, &walk); 430 err = blkcipher_walk_virt(desc, &walk);
312 431
@@ -334,6 +453,9 @@ geode_ecb_encrypt(struct blkcipher_desc *desc,
334 struct blkcipher_walk walk; 453 struct blkcipher_walk walk;
335 int err, ret; 454 int err, ret;
336 455
456 if (unlikely(op->keylen != AES_KEYSIZE_128))
457 return fallback_blk_enc(desc, dst, src, nbytes);
458
337 blkcipher_walk_init(&walk, dst, src, nbytes); 459 blkcipher_walk_init(&walk, dst, src, nbytes);
338 err = blkcipher_walk_virt(desc, &walk); 460 err = blkcipher_walk_virt(desc, &walk);
339 461
@@ -353,28 +475,31 @@ geode_ecb_encrypt(struct blkcipher_desc *desc,
353} 475}
354 476
355static struct crypto_alg geode_ecb_alg = { 477static struct crypto_alg geode_ecb_alg = {
356 .cra_name = "ecb(aes)", 478 .cra_name = "ecb(aes)",
357 .cra_driver_name = "ecb-aes-geode-128", 479 .cra_driver_name = "ecb-aes-geode",
358 .cra_priority = 400, 480 .cra_priority = 400,
359 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 481 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
482 CRYPTO_ALG_NEED_FALLBACK,
483 .cra_init = fallback_init_blk,
484 .cra_exit = fallback_exit_blk,
360 .cra_blocksize = AES_MIN_BLOCK_SIZE, 485 .cra_blocksize = AES_MIN_BLOCK_SIZE,
361 .cra_ctxsize = sizeof(struct geode_aes_op), 486 .cra_ctxsize = sizeof(struct geode_aes_op),
362 .cra_alignmask = 15, 487 .cra_alignmask = 15,
363 .cra_type = &crypto_blkcipher_type, 488 .cra_type = &crypto_blkcipher_type,
364 .cra_module = THIS_MODULE, 489 .cra_module = THIS_MODULE,
365 .cra_list = LIST_HEAD_INIT(geode_ecb_alg.cra_list), 490 .cra_list = LIST_HEAD_INIT(geode_ecb_alg.cra_list),
366 .cra_u = { 491 .cra_u = {
367 .blkcipher = { 492 .blkcipher = {
368 .min_keysize = AES_KEY_LENGTH, 493 .min_keysize = AES_MIN_KEY_SIZE,
369 .max_keysize = AES_KEY_LENGTH, 494 .max_keysize = AES_MAX_KEY_SIZE,
370 .setkey = geode_setkey, 495 .setkey = geode_setkey_blk,
371 .encrypt = geode_ecb_encrypt, 496 .encrypt = geode_ecb_encrypt,
372 .decrypt = geode_ecb_decrypt, 497 .decrypt = geode_ecb_decrypt,
373 } 498 }
374 } 499 }
375}; 500};
376 501
377static void 502static void __devexit
378geode_aes_remove(struct pci_dev *dev) 503geode_aes_remove(struct pci_dev *dev)
379{ 504{
380 crypto_unregister_alg(&geode_alg); 505 crypto_unregister_alg(&geode_alg);
@@ -389,7 +514,7 @@ geode_aes_remove(struct pci_dev *dev)
389} 514}
390 515
391 516
392static int 517static int __devinit
393geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id) 518geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
394{ 519{
395 int ret; 520 int ret;
@@ -397,7 +522,7 @@ geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
397 if ((ret = pci_enable_device(dev))) 522 if ((ret = pci_enable_device(dev)))
398 return ret; 523 return ret;
399 524
400 if ((ret = pci_request_regions(dev, "geode-aes-128"))) 525 if ((ret = pci_request_regions(dev, "geode-aes")))
401 goto eenable; 526 goto eenable;
402 527
403 _iobase = pci_iomap(dev, 0, 0); 528 _iobase = pci_iomap(dev, 0, 0);
@@ -472,7 +597,6 @@ geode_aes_exit(void)
472MODULE_AUTHOR("Advanced Micro Devices, Inc."); 597MODULE_AUTHOR("Advanced Micro Devices, Inc.");
473MODULE_DESCRIPTION("Geode LX Hardware AES driver"); 598MODULE_DESCRIPTION("Geode LX Hardware AES driver");
474MODULE_LICENSE("GPL"); 599MODULE_LICENSE("GPL");
475MODULE_ALIAS("aes");
476 600
477module_init(geode_aes_init); 601module_init(geode_aes_init);
478module_exit(geode_aes_exit); 602module_exit(geode_aes_exit);