aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-01-25 11:38:25 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2008-01-25 11:38:25 -0500
commiteba0e319c12fb098d66316a8eafbaaa9174a07c3 (patch)
treeb2703117db9e36bb3510654efd55361f61c54742 /drivers/crypto
parentdf8dc74e8a383eaf2d9b44b80a71ec6f0e52b42e (diff)
parent15e7b4452b72ae890f2fcb027b4c4fa63a1c9a7a (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (125 commits) [CRYPTO] twofish: Merge common glue code [CRYPTO] hifn_795x: Fixup container_of() usage [CRYPTO] cast6: inline bloat-- [CRYPTO] api: Set default CRYPTO_MINALIGN to unsigned long long [CRYPTO] tcrypt: Make xcbc available as a standalone test [CRYPTO] xcbc: Remove bogus hash/cipher test [CRYPTO] xcbc: Fix algorithm leak when block size check fails [CRYPTO] tcrypt: Zero axbuf in the right function [CRYPTO] padlock: Only reset the key once for each CBC and ECB operation [CRYPTO] api: Include sched.h for cond_resched in scatterwalk.h [CRYPTO] salsa20-asm: Remove unnecessary dependency on CRYPTO_SALSA20 [CRYPTO] tcrypt: Add select of AEAD [CRYPTO] salsa20: Add x86-64 assembly version [CRYPTO] salsa20_i586: Salsa20 stream cipher algorithm (i586 version) [CRYPTO] gcm: Introduce rfc4106 [CRYPTO] api: Show async type [CRYPTO] chainiv: Avoid lock spinning where possible [CRYPTO] seqiv: Add select AEAD in Kconfig [CRYPTO] scatterwalk: Handle zero nbytes in scatterwalk_map_and_copy [CRYPTO] null: Allow setkey on digest_null ...
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Kconfig11
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/geode-aes.c298
-rw-r--r--drivers/crypto/geode-aes.h44
-rw-r--r--drivers/crypto/hifn_795x.c2838
-rw-r--r--drivers/crypto/padlock-aes.c24
6 files changed, 3118 insertions, 98 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index ddd3a259cea1..74bd599dfb0c 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -83,4 +83,15 @@ config ZCRYPT_MONOLITHIC
83 that contains all parts of the crypto device driver (ap bus, 83 that contains all parts of the crypto device driver (ap bus,
84 request router and all the card drivers). 84 request router and all the card drivers).
85 85
86config CRYPTO_DEV_HIFN_795X
87 tristate "Driver HIFN 795x crypto accelerator chips"
88 select CRYPTO_DES
89 select CRYPTO_ALGAPI
90 select CRYPTO_BLKCIPHER
91 depends on PCI
92 help
93 This option allows you to have support for HIFN 795x crypto adapters.
94
95
96
86endif # CRYPTO_HW 97endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index d070030f7d7e..c0327f0dadc5 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -1,3 +1,4 @@
1obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o 1obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
2obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o 2obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
3obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o 3obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
4obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index 711e246e1ef0..4801162919d9 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -13,44 +13,13 @@
13#include <linux/crypto.h> 13#include <linux/crypto.h>
14#include <linux/spinlock.h> 14#include <linux/spinlock.h>
15#include <crypto/algapi.h> 15#include <crypto/algapi.h>
16#include <crypto/aes.h>
16 17
17#include <asm/io.h> 18#include <asm/io.h>
18#include <asm/delay.h> 19#include <asm/delay.h>
19 20
20#include "geode-aes.h" 21#include "geode-aes.h"
21 22
22/* Register definitions */
23
24#define AES_CTRLA_REG 0x0000
25
26#define AES_CTRL_START 0x01
27#define AES_CTRL_DECRYPT 0x00
28#define AES_CTRL_ENCRYPT 0x02
29#define AES_CTRL_WRKEY 0x04
30#define AES_CTRL_DCA 0x08
31#define AES_CTRL_SCA 0x10
32#define AES_CTRL_CBC 0x20
33
34#define AES_INTR_REG 0x0008
35
36#define AES_INTRA_PENDING (1 << 16)
37#define AES_INTRB_PENDING (1 << 17)
38
39#define AES_INTR_PENDING (AES_INTRA_PENDING | AES_INTRB_PENDING)
40#define AES_INTR_MASK 0x07
41
42#define AES_SOURCEA_REG 0x0010
43#define AES_DSTA_REG 0x0014
44#define AES_LENA_REG 0x0018
45#define AES_WRITEKEY0_REG 0x0030
46#define AES_WRITEIV0_REG 0x0040
47
48/* A very large counter that is used to gracefully bail out of an
49 * operation in case of trouble
50 */
51
52#define AES_OP_TIMEOUT 0x50000
53
54/* Static structures */ 23/* Static structures */
55 24
56static void __iomem * _iobase; 25static void __iomem * _iobase;
@@ -87,9 +56,10 @@ do_crypt(void *src, void *dst, int len, u32 flags)
87 /* Start the operation */ 56 /* Start the operation */
88 iowrite32(AES_CTRL_START | flags, _iobase + AES_CTRLA_REG); 57 iowrite32(AES_CTRL_START | flags, _iobase + AES_CTRLA_REG);
89 58
90 do 59 do {
91 status = ioread32(_iobase + AES_INTR_REG); 60 status = ioread32(_iobase + AES_INTR_REG);
92 while(!(status & AES_INTRA_PENDING) && --counter); 61 cpu_relax();
62 } while(!(status & AES_INTRA_PENDING) && --counter);
93 63
94 /* Clear the event */ 64 /* Clear the event */
95 iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG); 65 iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG);
@@ -101,6 +71,7 @@ geode_aes_crypt(struct geode_aes_op *op)
101{ 71{
102 u32 flags = 0; 72 u32 flags = 0;
103 unsigned long iflags; 73 unsigned long iflags;
74 int ret;
104 75
105 if (op->len == 0) 76 if (op->len == 0)
106 return 0; 77 return 0;
@@ -129,7 +100,8 @@ geode_aes_crypt(struct geode_aes_op *op)
129 _writefield(AES_WRITEKEY0_REG, op->key); 100 _writefield(AES_WRITEKEY0_REG, op->key);
130 } 101 }
131 102
132 do_crypt(op->src, op->dst, op->len, flags); 103 ret = do_crypt(op->src, op->dst, op->len, flags);
104 BUG_ON(ret);
133 105
134 if (op->mode == AES_MODE_CBC) 106 if (op->mode == AES_MODE_CBC)
135 _readfield(AES_WRITEIV0_REG, op->iv); 107 _readfield(AES_WRITEIV0_REG, op->iv);
@@ -141,18 +113,103 @@ geode_aes_crypt(struct geode_aes_op *op)
141 113
142/* CRYPTO-API Functions */ 114/* CRYPTO-API Functions */
143 115
144static int 116static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
145geode_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int len) 117 unsigned int len)
146{ 118{
147 struct geode_aes_op *op = crypto_tfm_ctx(tfm); 119 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
120 unsigned int ret;
148 121
149 if (len != AES_KEY_LENGTH) { 122 op->keylen = len;
123
124 if (len == AES_KEYSIZE_128) {
125 memcpy(op->key, key, len);
126 return 0;
127 }
128
129 if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
130 /* not supported at all */
150 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 131 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
151 return -EINVAL; 132 return -EINVAL;
152 } 133 }
153 134
154 memcpy(op->key, key, len); 135 /*
155 return 0; 136 * The requested key size is not supported by HW, do a fallback
137 */
138 op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
139 op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
140
141 ret = crypto_cipher_setkey(op->fallback.cip, key, len);
142 if (ret) {
143 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
144 tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK);
145 }
146 return ret;
147}
148
149static int geode_setkey_blk(struct crypto_tfm *tfm, const u8 *key,
150 unsigned int len)
151{
152 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
153 unsigned int ret;
154
155 op->keylen = len;
156
157 if (len == AES_KEYSIZE_128) {
158 memcpy(op->key, key, len);
159 return 0;
160 }
161
162 if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
163 /* not supported at all */
164 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
165 return -EINVAL;
166 }
167
168 /*
169 * The requested key size is not supported by HW, do a fallback
170 */
171 op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
172 op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
173
174 ret = crypto_blkcipher_setkey(op->fallback.blk, key, len);
175 if (ret) {
176 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
177 tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK);
178 }
179 return ret;
180}
181
182static int fallback_blk_dec(struct blkcipher_desc *desc,
183 struct scatterlist *dst, struct scatterlist *src,
184 unsigned int nbytes)
185{
186 unsigned int ret;
187 struct crypto_blkcipher *tfm;
188 struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
189
190 tfm = desc->tfm;
191 desc->tfm = op->fallback.blk;
192
193 ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
194
195 desc->tfm = tfm;
196 return ret;
197}
198static int fallback_blk_enc(struct blkcipher_desc *desc,
199 struct scatterlist *dst, struct scatterlist *src,
200 unsigned int nbytes)
201{
202 unsigned int ret;
203 struct crypto_blkcipher *tfm;
204 struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
205
206 tfm = desc->tfm;
207 desc->tfm = op->fallback.blk;
208
209 ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
210
211 desc->tfm = tfm;
212 return ret;
156} 213}
157 214
158static void 215static void
@@ -160,8 +217,10 @@ geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
160{ 217{
161 struct geode_aes_op *op = crypto_tfm_ctx(tfm); 218 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
162 219
163 if ((out == NULL) || (in == NULL)) 220 if (unlikely(op->keylen != AES_KEYSIZE_128)) {
221 crypto_cipher_encrypt_one(op->fallback.cip, out, in);
164 return; 222 return;
223 }
165 224
166 op->src = (void *) in; 225 op->src = (void *) in;
167 op->dst = (void *) out; 226 op->dst = (void *) out;
@@ -179,8 +238,10 @@ geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
179{ 238{
180 struct geode_aes_op *op = crypto_tfm_ctx(tfm); 239 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
181 240
182 if ((out == NULL) || (in == NULL)) 241 if (unlikely(op->keylen != AES_KEYSIZE_128)) {
242 crypto_cipher_decrypt_one(op->fallback.cip, out, in);
183 return; 243 return;
244 }
184 245
185 op->src = (void *) in; 246 op->src = (void *) in;
186 op->dst = (void *) out; 247 op->dst = (void *) out;
@@ -192,24 +253,50 @@ geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
192 geode_aes_crypt(op); 253 geode_aes_crypt(op);
193} 254}
194 255
256static int fallback_init_cip(struct crypto_tfm *tfm)
257{
258 const char *name = tfm->__crt_alg->cra_name;
259 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
260
261 op->fallback.cip = crypto_alloc_cipher(name, 0,
262 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
263
264 if (IS_ERR(op->fallback.cip)) {
265 printk(KERN_ERR "Error allocating fallback algo %s\n", name);
266 return PTR_ERR(op->fallback.blk);
267 }
268
269 return 0;
270}
271
272static void fallback_exit_cip(struct crypto_tfm *tfm)
273{
274 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
275
276 crypto_free_cipher(op->fallback.cip);
277 op->fallback.cip = NULL;
278}
195 279
196static struct crypto_alg geode_alg = { 280static struct crypto_alg geode_alg = {
197 .cra_name = "aes", 281 .cra_name = "aes",
198 .cra_driver_name = "geode-aes-128", 282 .cra_driver_name = "geode-aes",
199 .cra_priority = 300, 283 .cra_priority = 300,
200 .cra_alignmask = 15, 284 .cra_alignmask = 15,
201 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 285 .cra_flags = CRYPTO_ALG_TYPE_CIPHER |
286 CRYPTO_ALG_NEED_FALLBACK,
287 .cra_init = fallback_init_cip,
288 .cra_exit = fallback_exit_cip,
202 .cra_blocksize = AES_MIN_BLOCK_SIZE, 289 .cra_blocksize = AES_MIN_BLOCK_SIZE,
203 .cra_ctxsize = sizeof(struct geode_aes_op), 290 .cra_ctxsize = sizeof(struct geode_aes_op),
204 .cra_module = THIS_MODULE, 291 .cra_module = THIS_MODULE,
205 .cra_list = LIST_HEAD_INIT(geode_alg.cra_list), 292 .cra_list = LIST_HEAD_INIT(geode_alg.cra_list),
206 .cra_u = { 293 .cra_u = {
207 .cipher = { 294 .cipher = {
208 .cia_min_keysize = AES_KEY_LENGTH, 295 .cia_min_keysize = AES_MIN_KEY_SIZE,
209 .cia_max_keysize = AES_KEY_LENGTH, 296 .cia_max_keysize = AES_MAX_KEY_SIZE,
210 .cia_setkey = geode_setkey, 297 .cia_setkey = geode_setkey_cip,
211 .cia_encrypt = geode_encrypt, 298 .cia_encrypt = geode_encrypt,
212 .cia_decrypt = geode_decrypt 299 .cia_decrypt = geode_decrypt
213 } 300 }
214 } 301 }
215}; 302};
@@ -223,8 +310,12 @@ geode_cbc_decrypt(struct blkcipher_desc *desc,
223 struct blkcipher_walk walk; 310 struct blkcipher_walk walk;
224 int err, ret; 311 int err, ret;
225 312
313 if (unlikely(op->keylen != AES_KEYSIZE_128))
314 return fallback_blk_dec(desc, dst, src, nbytes);
315
226 blkcipher_walk_init(&walk, dst, src, nbytes); 316 blkcipher_walk_init(&walk, dst, src, nbytes);
227 err = blkcipher_walk_virt(desc, &walk); 317 err = blkcipher_walk_virt(desc, &walk);
318 op->iv = walk.iv;
228 319
229 while((nbytes = walk.nbytes)) { 320 while((nbytes = walk.nbytes)) {
230 op->src = walk.src.virt.addr, 321 op->src = walk.src.virt.addr,
@@ -233,13 +324,9 @@ geode_cbc_decrypt(struct blkcipher_desc *desc,
233 op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE); 324 op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
234 op->dir = AES_DIR_DECRYPT; 325 op->dir = AES_DIR_DECRYPT;
235 326
236 memcpy(op->iv, walk.iv, AES_IV_LENGTH);
237
238 ret = geode_aes_crypt(op); 327 ret = geode_aes_crypt(op);
239 328
240 memcpy(walk.iv, op->iv, AES_IV_LENGTH);
241 nbytes -= ret; 329 nbytes -= ret;
242
243 err = blkcipher_walk_done(desc, &walk, nbytes); 330 err = blkcipher_walk_done(desc, &walk, nbytes);
244 } 331 }
245 332
@@ -255,8 +342,12 @@ geode_cbc_encrypt(struct blkcipher_desc *desc,
255 struct blkcipher_walk walk; 342 struct blkcipher_walk walk;
256 int err, ret; 343 int err, ret;
257 344
345 if (unlikely(op->keylen != AES_KEYSIZE_128))
346 return fallback_blk_enc(desc, dst, src, nbytes);
347
258 blkcipher_walk_init(&walk, dst, src, nbytes); 348 blkcipher_walk_init(&walk, dst, src, nbytes);
259 err = blkcipher_walk_virt(desc, &walk); 349 err = blkcipher_walk_virt(desc, &walk);
350 op->iv = walk.iv;
260 351
261 while((nbytes = walk.nbytes)) { 352 while((nbytes = walk.nbytes)) {
262 op->src = walk.src.virt.addr, 353 op->src = walk.src.virt.addr,
@@ -265,8 +356,6 @@ geode_cbc_encrypt(struct blkcipher_desc *desc,
265 op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE); 356 op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
266 op->dir = AES_DIR_ENCRYPT; 357 op->dir = AES_DIR_ENCRYPT;
267 358
268 memcpy(op->iv, walk.iv, AES_IV_LENGTH);
269
270 ret = geode_aes_crypt(op); 359 ret = geode_aes_crypt(op);
271 nbytes -= ret; 360 nbytes -= ret;
272 err = blkcipher_walk_done(desc, &walk, nbytes); 361 err = blkcipher_walk_done(desc, &walk, nbytes);
@@ -275,22 +364,49 @@ geode_cbc_encrypt(struct blkcipher_desc *desc,
275 return err; 364 return err;
276} 365}
277 366
367static int fallback_init_blk(struct crypto_tfm *tfm)
368{
369 const char *name = tfm->__crt_alg->cra_name;
370 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
371
372 op->fallback.blk = crypto_alloc_blkcipher(name, 0,
373 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
374
375 if (IS_ERR(op->fallback.blk)) {
376 printk(KERN_ERR "Error allocating fallback algo %s\n", name);
377 return PTR_ERR(op->fallback.blk);
378 }
379
380 return 0;
381}
382
383static void fallback_exit_blk(struct crypto_tfm *tfm)
384{
385 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
386
387 crypto_free_blkcipher(op->fallback.blk);
388 op->fallback.blk = NULL;
389}
390
278static struct crypto_alg geode_cbc_alg = { 391static struct crypto_alg geode_cbc_alg = {
279 .cra_name = "cbc(aes)", 392 .cra_name = "cbc(aes)",
280 .cra_driver_name = "cbc-aes-geode-128", 393 .cra_driver_name = "cbc-aes-geode",
281 .cra_priority = 400, 394 .cra_priority = 400,
282 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 395 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
396 CRYPTO_ALG_NEED_FALLBACK,
397 .cra_init = fallback_init_blk,
398 .cra_exit = fallback_exit_blk,
283 .cra_blocksize = AES_MIN_BLOCK_SIZE, 399 .cra_blocksize = AES_MIN_BLOCK_SIZE,
284 .cra_ctxsize = sizeof(struct geode_aes_op), 400 .cra_ctxsize = sizeof(struct geode_aes_op),
285 .cra_alignmask = 15, 401 .cra_alignmask = 15,
286 .cra_type = &crypto_blkcipher_type, 402 .cra_type = &crypto_blkcipher_type,
287 .cra_module = THIS_MODULE, 403 .cra_module = THIS_MODULE,
288 .cra_list = LIST_HEAD_INIT(geode_cbc_alg.cra_list), 404 .cra_list = LIST_HEAD_INIT(geode_cbc_alg.cra_list),
289 .cra_u = { 405 .cra_u = {
290 .blkcipher = { 406 .blkcipher = {
291 .min_keysize = AES_KEY_LENGTH, 407 .min_keysize = AES_MIN_KEY_SIZE,
292 .max_keysize = AES_KEY_LENGTH, 408 .max_keysize = AES_MAX_KEY_SIZE,
293 .setkey = geode_setkey, 409 .setkey = geode_setkey_blk,
294 .encrypt = geode_cbc_encrypt, 410 .encrypt = geode_cbc_encrypt,
295 .decrypt = geode_cbc_decrypt, 411 .decrypt = geode_cbc_decrypt,
296 .ivsize = AES_IV_LENGTH, 412 .ivsize = AES_IV_LENGTH,
@@ -307,6 +423,9 @@ geode_ecb_decrypt(struct blkcipher_desc *desc,
307 struct blkcipher_walk walk; 423 struct blkcipher_walk walk;
308 int err, ret; 424 int err, ret;
309 425
426 if (unlikely(op->keylen != AES_KEYSIZE_128))
427 return fallback_blk_dec(desc, dst, src, nbytes);
428
310 blkcipher_walk_init(&walk, dst, src, nbytes); 429 blkcipher_walk_init(&walk, dst, src, nbytes);
311 err = blkcipher_walk_virt(desc, &walk); 430 err = blkcipher_walk_virt(desc, &walk);
312 431
@@ -334,6 +453,9 @@ geode_ecb_encrypt(struct blkcipher_desc *desc,
334 struct blkcipher_walk walk; 453 struct blkcipher_walk walk;
335 int err, ret; 454 int err, ret;
336 455
456 if (unlikely(op->keylen != AES_KEYSIZE_128))
457 return fallback_blk_enc(desc, dst, src, nbytes);
458
337 blkcipher_walk_init(&walk, dst, src, nbytes); 459 blkcipher_walk_init(&walk, dst, src, nbytes);
338 err = blkcipher_walk_virt(desc, &walk); 460 err = blkcipher_walk_virt(desc, &walk);
339 461
@@ -353,28 +475,31 @@ geode_ecb_encrypt(struct blkcipher_desc *desc,
353} 475}
354 476
355static struct crypto_alg geode_ecb_alg = { 477static struct crypto_alg geode_ecb_alg = {
356 .cra_name = "ecb(aes)", 478 .cra_name = "ecb(aes)",
357 .cra_driver_name = "ecb-aes-geode-128", 479 .cra_driver_name = "ecb-aes-geode",
358 .cra_priority = 400, 480 .cra_priority = 400,
359 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 481 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
482 CRYPTO_ALG_NEED_FALLBACK,
483 .cra_init = fallback_init_blk,
484 .cra_exit = fallback_exit_blk,
360 .cra_blocksize = AES_MIN_BLOCK_SIZE, 485 .cra_blocksize = AES_MIN_BLOCK_SIZE,
361 .cra_ctxsize = sizeof(struct geode_aes_op), 486 .cra_ctxsize = sizeof(struct geode_aes_op),
362 .cra_alignmask = 15, 487 .cra_alignmask = 15,
363 .cra_type = &crypto_blkcipher_type, 488 .cra_type = &crypto_blkcipher_type,
364 .cra_module = THIS_MODULE, 489 .cra_module = THIS_MODULE,
365 .cra_list = LIST_HEAD_INIT(geode_ecb_alg.cra_list), 490 .cra_list = LIST_HEAD_INIT(geode_ecb_alg.cra_list),
366 .cra_u = { 491 .cra_u = {
367 .blkcipher = { 492 .blkcipher = {
368 .min_keysize = AES_KEY_LENGTH, 493 .min_keysize = AES_MIN_KEY_SIZE,
369 .max_keysize = AES_KEY_LENGTH, 494 .max_keysize = AES_MAX_KEY_SIZE,
370 .setkey = geode_setkey, 495 .setkey = geode_setkey_blk,
371 .encrypt = geode_ecb_encrypt, 496 .encrypt = geode_ecb_encrypt,
372 .decrypt = geode_ecb_decrypt, 497 .decrypt = geode_ecb_decrypt,
373 } 498 }
374 } 499 }
375}; 500};
376 501
377static void 502static void __devexit
378geode_aes_remove(struct pci_dev *dev) 503geode_aes_remove(struct pci_dev *dev)
379{ 504{
380 crypto_unregister_alg(&geode_alg); 505 crypto_unregister_alg(&geode_alg);
@@ -389,7 +514,7 @@ geode_aes_remove(struct pci_dev *dev)
389} 514}
390 515
391 516
392static int 517static int __devinit
393geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id) 518geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
394{ 519{
395 int ret; 520 int ret;
@@ -397,7 +522,7 @@ geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
397 if ((ret = pci_enable_device(dev))) 522 if ((ret = pci_enable_device(dev)))
398 return ret; 523 return ret;
399 524
400 if ((ret = pci_request_regions(dev, "geode-aes-128"))) 525 if ((ret = pci_request_regions(dev, "geode-aes")))
401 goto eenable; 526 goto eenable;
402 527
403 _iobase = pci_iomap(dev, 0, 0); 528 _iobase = pci_iomap(dev, 0, 0);
@@ -472,7 +597,6 @@ geode_aes_exit(void)
472MODULE_AUTHOR("Advanced Micro Devices, Inc."); 597MODULE_AUTHOR("Advanced Micro Devices, Inc.");
473MODULE_DESCRIPTION("Geode LX Hardware AES driver"); 598MODULE_DESCRIPTION("Geode LX Hardware AES driver");
474MODULE_LICENSE("GPL"); 599MODULE_LICENSE("GPL");
475MODULE_ALIAS("aes");
476 600
477module_init(geode_aes_init); 601module_init(geode_aes_init);
478module_exit(geode_aes_exit); 602module_exit(geode_aes_exit);
diff --git a/drivers/crypto/geode-aes.h b/drivers/crypto/geode-aes.h
index f47968671ae7..f1855b50da48 100644
--- a/drivers/crypto/geode-aes.h
+++ b/drivers/crypto/geode-aes.h
@@ -9,9 +9,9 @@
9#ifndef _GEODE_AES_H_ 9#ifndef _GEODE_AES_H_
10#define _GEODE_AES_H_ 10#define _GEODE_AES_H_
11 11
12#define AES_KEY_LENGTH 16 12/* driver logic flags */
13#define AES_IV_LENGTH 16 13#define AES_IV_LENGTH 16
14 14#define AES_KEY_LENGTH 16
15#define AES_MIN_BLOCK_SIZE 16 15#define AES_MIN_BLOCK_SIZE 16
16 16
17#define AES_MODE_ECB 0 17#define AES_MODE_ECB 0
@@ -22,6 +22,38 @@
22 22
23#define AES_FLAGS_HIDDENKEY (1 << 0) 23#define AES_FLAGS_HIDDENKEY (1 << 0)
24 24
25/* Register definitions */
26
27#define AES_CTRLA_REG 0x0000
28
29#define AES_CTRL_START 0x01
30#define AES_CTRL_DECRYPT 0x00
31#define AES_CTRL_ENCRYPT 0x02
32#define AES_CTRL_WRKEY 0x04
33#define AES_CTRL_DCA 0x08
34#define AES_CTRL_SCA 0x10
35#define AES_CTRL_CBC 0x20
36
37#define AES_INTR_REG 0x0008
38
39#define AES_INTRA_PENDING (1 << 16)
40#define AES_INTRB_PENDING (1 << 17)
41
42#define AES_INTR_PENDING (AES_INTRA_PENDING | AES_INTRB_PENDING)
43#define AES_INTR_MASK 0x07
44
45#define AES_SOURCEA_REG 0x0010
46#define AES_DSTA_REG 0x0014
47#define AES_LENA_REG 0x0018
48#define AES_WRITEKEY0_REG 0x0030
49#define AES_WRITEIV0_REG 0x0040
50
51/* A very large counter that is used to gracefully bail out of an
52 * operation in case of trouble
53 */
54
55#define AES_OP_TIMEOUT 0x50000
56
25struct geode_aes_op { 57struct geode_aes_op {
26 58
27 void *src; 59 void *src;
@@ -33,7 +65,13 @@ struct geode_aes_op {
33 int len; 65 int len;
34 66
35 u8 key[AES_KEY_LENGTH]; 67 u8 key[AES_KEY_LENGTH];
36 u8 iv[AES_IV_LENGTH]; 68 u8 *iv;
69
70 union {
71 struct crypto_blkcipher *blk;
72 struct crypto_cipher *cip;
73 } fallback;
74 u32 keylen;
37}; 75};
38 76
39#endif 77#endif
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
new file mode 100644
index 000000000000..16413e57597c
--- /dev/null
+++ b/drivers/crypto/hifn_795x.c
@@ -0,0 +1,2838 @@
1/*
2 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/moduleparam.h>
23#include <linux/mod_devicetable.h>
24#include <linux/interrupt.h>
25#include <linux/pci.h>
26#include <linux/slab.h>
27#include <linux/delay.h>
28#include <linux/mm.h>
29#include <linux/dma-mapping.h>
30#include <linux/scatterlist.h>
31#include <linux/highmem.h>
32#include <linux/interrupt.h>
33#include <linux/crypto.h>
34#include <linux/hw_random.h>
35#include <linux/ktime.h>
36
37#include <crypto/algapi.h>
38#include <crypto/des.h>
39
40#include <asm/kmap_types.h>
41
42#undef dprintk
43
44#define HIFN_TEST
45//#define HIFN_DEBUG
46
47#ifdef HIFN_DEBUG
48#define dprintk(f, a...) printk(f, ##a)
49#else
50#define dprintk(f, a...) do {} while (0)
51#endif
52
53static char hifn_pll_ref[sizeof("extNNN")] = "ext";
54module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
55MODULE_PARM_DESC(hifn_pll_ref,
56 "PLL reference clock (pci[freq] or ext[freq], default ext)");
57
58static atomic_t hifn_dev_number;
59
60#define ACRYPTO_OP_DECRYPT 0
61#define ACRYPTO_OP_ENCRYPT 1
62#define ACRYPTO_OP_HMAC 2
63#define ACRYPTO_OP_RNG 3
64
65#define ACRYPTO_MODE_ECB 0
66#define ACRYPTO_MODE_CBC 1
67#define ACRYPTO_MODE_CFB 2
68#define ACRYPTO_MODE_OFB 3
69
70#define ACRYPTO_TYPE_AES_128 0
71#define ACRYPTO_TYPE_AES_192 1
72#define ACRYPTO_TYPE_AES_256 2
73#define ACRYPTO_TYPE_3DES 3
74#define ACRYPTO_TYPE_DES 4
75
76#define PCI_VENDOR_ID_HIFN 0x13A3
77#define PCI_DEVICE_ID_HIFN_7955 0x0020
78#define PCI_DEVICE_ID_HIFN_7956 0x001d
79
80/* I/O region sizes */
81
82#define HIFN_BAR0_SIZE 0x1000
83#define HIFN_BAR1_SIZE 0x2000
84#define HIFN_BAR2_SIZE 0x8000
85
86/* DMA registres */
87
88#define HIFN_DMA_CRA 0x0C /* DMA Command Ring Address */
89#define HIFN_DMA_SDRA 0x1C /* DMA Source Data Ring Address */
90#define HIFN_DMA_RRA 0x2C /* DMA Result Ring Address */
91#define HIFN_DMA_DDRA 0x3C /* DMA Destination Data Ring Address */
92#define HIFN_DMA_STCTL 0x40 /* DMA Status and Control */
93#define HIFN_DMA_INTREN 0x44 /* DMA Interrupt Enable */
94#define HIFN_DMA_CFG1 0x48 /* DMA Configuration #1 */
95#define HIFN_DMA_CFG2 0x6C /* DMA Configuration #2 */
96#define HIFN_CHIP_ID 0x98 /* Chip ID */
97
98/*
99 * Processing Unit Registers (offset from BASEREG0)
100 */
101#define HIFN_0_PUDATA 0x00 /* Processing Unit Data */
102#define HIFN_0_PUCTRL 0x04 /* Processing Unit Control */
103#define HIFN_0_PUISR 0x08 /* Processing Unit Interrupt Status */
104#define HIFN_0_PUCNFG 0x0c /* Processing Unit Configuration */
105#define HIFN_0_PUIER 0x10 /* Processing Unit Interrupt Enable */
106#define HIFN_0_PUSTAT 0x14 /* Processing Unit Status/Chip ID */
107#define HIFN_0_FIFOSTAT 0x18 /* FIFO Status */
108#define HIFN_0_FIFOCNFG 0x1c /* FIFO Configuration */
109#define HIFN_0_SPACESIZE 0x20 /* Register space size */
110
111/* Processing Unit Control Register (HIFN_0_PUCTRL) */
112#define HIFN_PUCTRL_CLRSRCFIFO 0x0010 /* clear source fifo */
113#define HIFN_PUCTRL_STOP 0x0008 /* stop pu */
114#define HIFN_PUCTRL_LOCKRAM 0x0004 /* lock ram */
115#define HIFN_PUCTRL_DMAENA 0x0002 /* enable dma */
116#define HIFN_PUCTRL_RESET 0x0001 /* Reset processing unit */
117
118/* Processing Unit Interrupt Status Register (HIFN_0_PUISR) */
119#define HIFN_PUISR_CMDINVAL 0x8000 /* Invalid command interrupt */
120#define HIFN_PUISR_DATAERR 0x4000 /* Data error interrupt */
121#define HIFN_PUISR_SRCFIFO 0x2000 /* Source FIFO ready interrupt */
122#define HIFN_PUISR_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */
123#define HIFN_PUISR_DSTOVER 0x0200 /* Destination overrun interrupt */
124#define HIFN_PUISR_SRCCMD 0x0080 /* Source command interrupt */
125#define HIFN_PUISR_SRCCTX 0x0040 /* Source context interrupt */
126#define HIFN_PUISR_SRCDATA 0x0020 /* Source data interrupt */
127#define HIFN_PUISR_DSTDATA 0x0010 /* Destination data interrupt */
128#define HIFN_PUISR_DSTRESULT 0x0004 /* Destination result interrupt */
129
130/* Processing Unit Configuration Register (HIFN_0_PUCNFG) */
131#define HIFN_PUCNFG_DRAMMASK 0xe000 /* DRAM size mask */
132#define HIFN_PUCNFG_DSZ_256K 0x0000 /* 256k dram */
133#define HIFN_PUCNFG_DSZ_512K 0x2000 /* 512k dram */
134#define HIFN_PUCNFG_DSZ_1M 0x4000 /* 1m dram */
135#define HIFN_PUCNFG_DSZ_2M 0x6000 /* 2m dram */
136#define HIFN_PUCNFG_DSZ_4M 0x8000 /* 4m dram */
137#define HIFN_PUCNFG_DSZ_8M 0xa000 /* 8m dram */
138#define HIFN_PUNCFG_DSZ_16M 0xc000 /* 16m dram */
139#define HIFN_PUCNFG_DSZ_32M 0xe000 /* 32m dram */
140#define HIFN_PUCNFG_DRAMREFRESH 0x1800 /* DRAM refresh rate mask */
141#define HIFN_PUCNFG_DRFR_512 0x0000 /* 512 divisor of ECLK */
142#define HIFN_PUCNFG_DRFR_256 0x0800 /* 256 divisor of ECLK */
143#define HIFN_PUCNFG_DRFR_128 0x1000 /* 128 divisor of ECLK */
144#define HIFN_PUCNFG_TCALLPHASES 0x0200 /* your guess is as good as mine... */
145#define HIFN_PUCNFG_TCDRVTOTEM 0x0100 /* your guess is as good as mine... */
146#define HIFN_PUCNFG_BIGENDIAN 0x0080 /* DMA big endian mode */
147#define HIFN_PUCNFG_BUS32 0x0040 /* Bus width 32bits */
148#define HIFN_PUCNFG_BUS16 0x0000 /* Bus width 16 bits */
149#define HIFN_PUCNFG_CHIPID 0x0020 /* Allow chipid from PUSTAT */
150#define HIFN_PUCNFG_DRAM 0x0010 /* Context RAM is DRAM */
151#define HIFN_PUCNFG_SRAM 0x0000 /* Context RAM is SRAM */
152#define HIFN_PUCNFG_COMPSING 0x0004 /* Enable single compression context */
153#define HIFN_PUCNFG_ENCCNFG 0x0002 /* Encryption configuration */
154
155/* Processing Unit Interrupt Enable Register (HIFN_0_PUIER) */
156#define HIFN_PUIER_CMDINVAL 0x8000 /* Invalid command interrupt */
157#define HIFN_PUIER_DATAERR 0x4000 /* Data error interrupt */
158#define HIFN_PUIER_SRCFIFO 0x2000 /* Source FIFO ready interrupt */
159#define HIFN_PUIER_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */
160#define HIFN_PUIER_DSTOVER 0x0200 /* Destination overrun interrupt */
161#define HIFN_PUIER_SRCCMD 0x0080 /* Source command interrupt */
162#define HIFN_PUIER_SRCCTX 0x0040 /* Source context interrupt */
163#define HIFN_PUIER_SRCDATA 0x0020 /* Source data interrupt */
164#define HIFN_PUIER_DSTDATA 0x0010 /* Destination data interrupt */
165#define HIFN_PUIER_DSTRESULT 0x0004 /* Destination result interrupt */
166
167/* Processing Unit Status Register/Chip ID (HIFN_0_PUSTAT) */
168#define HIFN_PUSTAT_CMDINVAL 0x8000 /* Invalid command interrupt */
169#define HIFN_PUSTAT_DATAERR 0x4000 /* Data error interrupt */
170#define HIFN_PUSTAT_SRCFIFO 0x2000 /* Source FIFO ready interrupt */
171#define HIFN_PUSTAT_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */
172#define HIFN_PUSTAT_DSTOVER 0x0200 /* Destination overrun interrupt */
173#define HIFN_PUSTAT_SRCCMD 0x0080 /* Source command interrupt */
174#define HIFN_PUSTAT_SRCCTX 0x0040 /* Source context interrupt */
175#define HIFN_PUSTAT_SRCDATA 0x0020 /* Source data interrupt */
176#define HIFN_PUSTAT_DSTDATA 0x0010 /* Destination data interrupt */
177#define HIFN_PUSTAT_DSTRESULT 0x0004 /* Destination result interrupt */
178#define HIFN_PUSTAT_CHIPREV 0x00ff /* Chip revision mask */
179#define HIFN_PUSTAT_CHIPENA 0xff00 /* Chip enabled mask */
180#define HIFN_PUSTAT_ENA_2 0x1100 /* Level 2 enabled */
181#define HIFN_PUSTAT_ENA_1 0x1000 /* Level 1 enabled */
182#define HIFN_PUSTAT_ENA_0 0x3000 /* Level 0 enabled */
183#define HIFN_PUSTAT_REV_2 0x0020 /* 7751 PT6/2 */
184#define HIFN_PUSTAT_REV_3 0x0030 /* 7751 PT6/3 */
185
186/* FIFO Status Register (HIFN_0_FIFOSTAT) */
187#define HIFN_FIFOSTAT_SRC 0x7f00 /* Source FIFO available */
188#define HIFN_FIFOSTAT_DST 0x007f /* Destination FIFO available */
189
190/* FIFO Configuration Register (HIFN_0_FIFOCNFG) */
191#define HIFN_FIFOCNFG_THRESHOLD 0x0400 /* must be written as 1 */
192
193/*
194 * DMA Interface Registers (offset from BASEREG1)
195 */
196#define HIFN_1_DMA_CRAR 0x0c /* DMA Command Ring Address */
197#define HIFN_1_DMA_SRAR 0x1c /* DMA Source Ring Address */
198#define HIFN_1_DMA_RRAR 0x2c /* DMA Result Ring Address */
199#define HIFN_1_DMA_DRAR 0x3c /* DMA Destination Ring Address */
200#define HIFN_1_DMA_CSR 0x40 /* DMA Status and Control */
201#define HIFN_1_DMA_IER 0x44 /* DMA Interrupt Enable */
202#define HIFN_1_DMA_CNFG 0x48 /* DMA Configuration */
203#define HIFN_1_PLL 0x4c /* 795x: PLL config */
204#define HIFN_1_7811_RNGENA 0x60 /* 7811: rng enable */
205#define HIFN_1_7811_RNGCFG 0x64 /* 7811: rng config */
206#define HIFN_1_7811_RNGDAT 0x68 /* 7811: rng data */
207#define HIFN_1_7811_RNGSTS 0x6c /* 7811: rng status */
208#define HIFN_1_7811_MIPSRST 0x94 /* 7811: MIPS reset */
209#define HIFN_1_REVID 0x98 /* Revision ID */
210#define HIFN_1_UNLOCK_SECRET1 0xf4
211#define HIFN_1_UNLOCK_SECRET2 0xfc
212#define HIFN_1_PUB_RESET 0x204 /* Public/RNG Reset */
213#define HIFN_1_PUB_BASE 0x300 /* Public Base Address */
214#define HIFN_1_PUB_OPLEN 0x304 /* Public Operand Length */
215#define HIFN_1_PUB_OP 0x308 /* Public Operand */
216#define HIFN_1_PUB_STATUS 0x30c /* Public Status */
217#define HIFN_1_PUB_IEN 0x310 /* Public Interrupt enable */
218#define HIFN_1_RNG_CONFIG 0x314 /* RNG config */
219#define HIFN_1_RNG_DATA 0x318 /* RNG data */
220#define HIFN_1_PUB_MEM 0x400 /* start of Public key memory */
221#define HIFN_1_PUB_MEMEND 0xbff /* end of Public key memory */
222
223/* DMA Status and Control Register (HIFN_1_DMA_CSR) */
224#define HIFN_DMACSR_D_CTRLMASK 0xc0000000 /* Destinition Ring Control */
225#define HIFN_DMACSR_D_CTRL_NOP 0x00000000 /* Dest. Control: no-op */
226#define HIFN_DMACSR_D_CTRL_DIS 0x40000000 /* Dest. Control: disable */
227#define HIFN_DMACSR_D_CTRL_ENA 0x80000000 /* Dest. Control: enable */
228#define HIFN_DMACSR_D_ABORT 0x20000000 /* Destinition Ring PCIAbort */
229#define HIFN_DMACSR_D_DONE 0x10000000 /* Destinition Ring Done */
230#define HIFN_DMACSR_D_LAST 0x08000000 /* Destinition Ring Last */
231#define HIFN_DMACSR_D_WAIT 0x04000000 /* Destinition Ring Waiting */
232#define HIFN_DMACSR_D_OVER 0x02000000 /* Destinition Ring Overflow */
233#define HIFN_DMACSR_R_CTRL 0x00c00000 /* Result Ring Control */
234#define HIFN_DMACSR_R_CTRL_NOP 0x00000000 /* Result Control: no-op */
235#define HIFN_DMACSR_R_CTRL_DIS 0x00400000 /* Result Control: disable */
236#define HIFN_DMACSR_R_CTRL_ENA 0x00800000 /* Result Control: enable */
237#define HIFN_DMACSR_R_ABORT 0x00200000 /* Result Ring PCI Abort */
238#define HIFN_DMACSR_R_DONE 0x00100000 /* Result Ring Done */
239#define HIFN_DMACSR_R_LAST 0x00080000 /* Result Ring Last */
240#define HIFN_DMACSR_R_WAIT 0x00040000 /* Result Ring Waiting */
241#define HIFN_DMACSR_R_OVER 0x00020000 /* Result Ring Overflow */
242#define HIFN_DMACSR_S_CTRL 0x0000c000 /* Source Ring Control */
243#define HIFN_DMACSR_S_CTRL_NOP 0x00000000 /* Source Control: no-op */
244#define HIFN_DMACSR_S_CTRL_DIS 0x00004000 /* Source Control: disable */
245#define HIFN_DMACSR_S_CTRL_ENA 0x00008000 /* Source Control: enable */
246#define HIFN_DMACSR_S_ABORT 0x00002000 /* Source Ring PCI Abort */
247#define HIFN_DMACSR_S_DONE 0x00001000 /* Source Ring Done */
248#define HIFN_DMACSR_S_LAST 0x00000800 /* Source Ring Last */
249#define HIFN_DMACSR_S_WAIT 0x00000400 /* Source Ring Waiting */
250#define HIFN_DMACSR_ILLW 0x00000200 /* Illegal write (7811 only) */
251#define HIFN_DMACSR_ILLR 0x00000100 /* Illegal read (7811 only) */
252#define HIFN_DMACSR_C_CTRL 0x000000c0 /* Command Ring Control */
253#define HIFN_DMACSR_C_CTRL_NOP 0x00000000 /* Command Control: no-op */
254#define HIFN_DMACSR_C_CTRL_DIS 0x00000040 /* Command Control: disable */
255#define HIFN_DMACSR_C_CTRL_ENA 0x00000080 /* Command Control: enable */
256#define HIFN_DMACSR_C_ABORT 0x00000020 /* Command Ring PCI Abort */
257#define HIFN_DMACSR_C_DONE 0x00000010 /* Command Ring Done */
258#define HIFN_DMACSR_C_LAST 0x00000008 /* Command Ring Last */
259#define HIFN_DMACSR_C_WAIT 0x00000004 /* Command Ring Waiting */
260#define HIFN_DMACSR_PUBDONE 0x00000002 /* Public op done (7951 only) */
261#define HIFN_DMACSR_ENGINE 0x00000001 /* Command Ring Engine IRQ */
262
263/* DMA Interrupt Enable Register (HIFN_1_DMA_IER) */
264#define HIFN_DMAIER_D_ABORT 0x20000000 /* Destination Ring PCIAbort */
265#define HIFN_DMAIER_D_DONE 0x10000000 /* Destination Ring Done */
266#define HIFN_DMAIER_D_LAST 0x08000000 /* Destination Ring Last */
267#define HIFN_DMAIER_D_WAIT 0x04000000 /* Destination Ring Waiting */
268#define HIFN_DMAIER_D_OVER 0x02000000 /* Destination Ring Overflow */
269#define HIFN_DMAIER_R_ABORT 0x00200000 /* Result Ring PCI Abort */
270#define HIFN_DMAIER_R_DONE 0x00100000 /* Result Ring Done */
271#define HIFN_DMAIER_R_LAST 0x00080000 /* Result Ring Last */
272#define HIFN_DMAIER_R_WAIT 0x00040000 /* Result Ring Waiting */
273#define HIFN_DMAIER_R_OVER 0x00020000 /* Result Ring Overflow */
274#define HIFN_DMAIER_S_ABORT 0x00002000 /* Source Ring PCI Abort */
275#define HIFN_DMAIER_S_DONE 0x00001000 /* Source Ring Done */
276#define HIFN_DMAIER_S_LAST 0x00000800 /* Source Ring Last */
277#define HIFN_DMAIER_S_WAIT 0x00000400 /* Source Ring Waiting */
278#define HIFN_DMAIER_ILLW 0x00000200 /* Illegal write (7811 only) */
279#define HIFN_DMAIER_ILLR 0x00000100 /* Illegal read (7811 only) */
280#define HIFN_DMAIER_C_ABORT 0x00000020 /* Command Ring PCI Abort */
281#define HIFN_DMAIER_C_DONE 0x00000010 /* Command Ring Done */
282#define HIFN_DMAIER_C_LAST 0x00000008 /* Command Ring Last */
283#define HIFN_DMAIER_C_WAIT 0x00000004 /* Command Ring Waiting */
284#define HIFN_DMAIER_PUBDONE 0x00000002 /* public op done (7951 only) */
285#define HIFN_DMAIER_ENGINE 0x00000001 /* Engine IRQ */
286
287/* DMA Configuration Register (HIFN_1_DMA_CNFG) */
288#define HIFN_DMACNFG_BIGENDIAN 0x10000000 /* big endian mode */
289#define HIFN_DMACNFG_POLLFREQ 0x00ff0000 /* Poll frequency mask */
290#define HIFN_DMACNFG_UNLOCK 0x00000800
291#define HIFN_DMACNFG_POLLINVAL 0x00000700 /* Invalid Poll Scalar */
292#define HIFN_DMACNFG_LAST 0x00000010 /* Host control LAST bit */
293#define HIFN_DMACNFG_MODE 0x00000004 /* DMA mode */
294#define HIFN_DMACNFG_DMARESET 0x00000002 /* DMA Reset # */
295#define HIFN_DMACNFG_MSTRESET 0x00000001 /* Master Reset # */
296
297/* PLL configuration register */
298#define HIFN_PLL_REF_CLK_HBI 0x00000000 /* HBI reference clock */
299#define HIFN_PLL_REF_CLK_PLL 0x00000001 /* PLL reference clock */
300#define HIFN_PLL_BP 0x00000002 /* Reference clock bypass */
301#define HIFN_PLL_PK_CLK_HBI 0x00000000 /* PK engine HBI clock */
302#define HIFN_PLL_PK_CLK_PLL 0x00000008 /* PK engine PLL clock */
303#define HIFN_PLL_PE_CLK_HBI 0x00000000 /* PE engine HBI clock */
304#define HIFN_PLL_PE_CLK_PLL 0x00000010 /* PE engine PLL clock */
305#define HIFN_PLL_RESERVED_1 0x00000400 /* Reserved bit, must be 1 */
306#define HIFN_PLL_ND_SHIFT 11 /* Clock multiplier shift */
307#define HIFN_PLL_ND_MULT_2 0x00000000 /* PLL clock multiplier 2 */
308#define HIFN_PLL_ND_MULT_4 0x00000800 /* PLL clock multiplier 4 */
309#define HIFN_PLL_ND_MULT_6 0x00001000 /* PLL clock multiplier 6 */
310#define HIFN_PLL_ND_MULT_8 0x00001800 /* PLL clock multiplier 8 */
311#define HIFN_PLL_ND_MULT_10 0x00002000 /* PLL clock multiplier 10 */
312#define HIFN_PLL_ND_MULT_12 0x00002800 /* PLL clock multiplier 12 */
313#define HIFN_PLL_IS_1_8 0x00000000 /* charge pump (mult. 1-8) */
314#define HIFN_PLL_IS_9_12 0x00010000 /* charge pump (mult. 9-12) */
315
316#define HIFN_PLL_FCK_MAX 266 /* Maximum PLL frequency */
317
318/* Public key reset register (HIFN_1_PUB_RESET) */
319#define HIFN_PUBRST_RESET 0x00000001 /* reset public/rng unit */
320
321/* Public base address register (HIFN_1_PUB_BASE) */
322#define HIFN_PUBBASE_ADDR 0x00003fff /* base address */
323
324/* Public operand length register (HIFN_1_PUB_OPLEN) */
325#define HIFN_PUBOPLEN_MOD_M 0x0000007f /* modulus length mask */
326#define HIFN_PUBOPLEN_MOD_S 0 /* modulus length shift */
327#define HIFN_PUBOPLEN_EXP_M 0x0003ff80 /* exponent length mask */
328#define HIFN_PUBOPLEN_EXP_S 7 /* exponent lenght shift */
329#define HIFN_PUBOPLEN_RED_M 0x003c0000 /* reducend length mask */
330#define HIFN_PUBOPLEN_RED_S 18 /* reducend length shift */
331
332/* Public operation register (HIFN_1_PUB_OP) */
333#define HIFN_PUBOP_AOFFSET_M 0x0000007f /* A offset mask */
334#define HIFN_PUBOP_AOFFSET_S 0 /* A offset shift */
335#define HIFN_PUBOP_BOFFSET_M 0x00000f80 /* B offset mask */
336#define HIFN_PUBOP_BOFFSET_S 7 /* B offset shift */
337#define HIFN_PUBOP_MOFFSET_M 0x0003f000 /* M offset mask */
338#define HIFN_PUBOP_MOFFSET_S 12 /* M offset shift */
339#define HIFN_PUBOP_OP_MASK 0x003c0000 /* Opcode: */
340#define HIFN_PUBOP_OP_NOP 0x00000000 /* NOP */
341#define HIFN_PUBOP_OP_ADD 0x00040000 /* ADD */
342#define HIFN_PUBOP_OP_ADDC 0x00080000 /* ADD w/carry */
343#define HIFN_PUBOP_OP_SUB 0x000c0000 /* SUB */
344#define HIFN_PUBOP_OP_SUBC 0x00100000 /* SUB w/carry */
345#define HIFN_PUBOP_OP_MODADD 0x00140000 /* Modular ADD */
346#define HIFN_PUBOP_OP_MODSUB 0x00180000 /* Modular SUB */
347#define HIFN_PUBOP_OP_INCA 0x001c0000 /* INC A */
348#define HIFN_PUBOP_OP_DECA 0x00200000 /* DEC A */
349#define HIFN_PUBOP_OP_MULT 0x00240000 /* MULT */
350#define HIFN_PUBOP_OP_MODMULT 0x00280000 /* Modular MULT */
351#define HIFN_PUBOP_OP_MODRED 0x002c0000 /* Modular RED */
352#define HIFN_PUBOP_OP_MODEXP 0x00300000 /* Modular EXP */
353
354/* Public status register (HIFN_1_PUB_STATUS) */
355#define HIFN_PUBSTS_DONE 0x00000001 /* operation done */
356#define HIFN_PUBSTS_CARRY 0x00000002 /* carry */
357
358/* Public interrupt enable register (HIFN_1_PUB_IEN) */
359#define HIFN_PUBIEN_DONE 0x00000001 /* operation done interrupt */
360
361/* Random number generator config register (HIFN_1_RNG_CONFIG) */
362#define HIFN_RNGCFG_ENA 0x00000001 /* enable rng */
363
364#define HIFN_NAMESIZE 32
365#define HIFN_MAX_RESULT_ORDER 5
366
367#define HIFN_D_CMD_RSIZE 24*4
368#define HIFN_D_SRC_RSIZE 80*4
369#define HIFN_D_DST_RSIZE 80*4
370#define HIFN_D_RES_RSIZE 24*4
371
372#define HIFN_QUEUE_LENGTH HIFN_D_CMD_RSIZE-5
373
374#define AES_MIN_KEY_SIZE 16
375#define AES_MAX_KEY_SIZE 32
376
377#define HIFN_DES_KEY_LENGTH 8
378#define HIFN_3DES_KEY_LENGTH 24
379#define HIFN_MAX_CRYPT_KEY_LENGTH AES_MAX_KEY_SIZE
380#define HIFN_IV_LENGTH 8
381#define HIFN_AES_IV_LENGTH 16
382#define HIFN_MAX_IV_LENGTH HIFN_AES_IV_LENGTH
383
384#define HIFN_MAC_KEY_LENGTH 64
385#define HIFN_MD5_LENGTH 16
386#define HIFN_SHA1_LENGTH 20
387#define HIFN_MAC_TRUNC_LENGTH 12
388
389#define HIFN_MAX_COMMAND (8 + 8 + 8 + 64 + 260)
390#define HIFN_MAX_RESULT (8 + 4 + 4 + 20 + 4)
391#define HIFN_USED_RESULT 12
392
393struct hifn_desc
394{
395 volatile u32 l;
396 volatile u32 p;
397};
398
399struct hifn_dma {
400 struct hifn_desc cmdr[HIFN_D_CMD_RSIZE+1];
401 struct hifn_desc srcr[HIFN_D_SRC_RSIZE+1];
402 struct hifn_desc dstr[HIFN_D_DST_RSIZE+1];
403 struct hifn_desc resr[HIFN_D_RES_RSIZE+1];
404
405 u8 command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND];
406 u8 result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT];
407
408 u64 test_src, test_dst;
409
410 /*
411 * Our current positions for insertion and removal from the descriptor
412 * rings.
413 */
414 volatile int cmdi, srci, dsti, resi;
415 volatile int cmdu, srcu, dstu, resu;
416 int cmdk, srck, dstk, resk;
417};
418
419#define HIFN_FLAG_CMD_BUSY (1<<0)
420#define HIFN_FLAG_SRC_BUSY (1<<1)
421#define HIFN_FLAG_DST_BUSY (1<<2)
422#define HIFN_FLAG_RES_BUSY (1<<3)
423#define HIFN_FLAG_OLD_KEY (1<<4)
424
425#define HIFN_DEFAULT_ACTIVE_NUM 5
426
427struct hifn_device
428{
429 char name[HIFN_NAMESIZE];
430
431 int irq;
432
433 struct pci_dev *pdev;
434 void __iomem *bar[3];
435
436 unsigned long result_mem;
437 dma_addr_t dst;
438
439 void *desc_virt;
440 dma_addr_t desc_dma;
441
442 u32 dmareg;
443
444 void *sa[HIFN_D_RES_RSIZE];
445
446 spinlock_t lock;
447
448 void *priv;
449
450 u32 flags;
451 int active, started;
452 struct delayed_work work;
453 unsigned long reset;
454 unsigned long success;
455 unsigned long prev_success;
456
457 u8 snum;
458
459 struct tasklet_struct tasklet;
460
461 struct crypto_queue queue;
462 struct list_head alg_list;
463
464 unsigned int pk_clk_freq;
465
466#if defined(CONFIG_HW_RANDOM) || defined(CONFIG_HW_RANDOM_MODULE)
467 unsigned int rng_wait_time;
468 ktime_t rngtime;
469 struct hwrng rng;
470#endif
471};
472
473#define HIFN_D_LENGTH 0x0000ffff
474#define HIFN_D_NOINVALID 0x01000000
475#define HIFN_D_MASKDONEIRQ 0x02000000
476#define HIFN_D_DESTOVER 0x04000000
477#define HIFN_D_OVER 0x08000000
478#define HIFN_D_LAST 0x20000000
479#define HIFN_D_JUMP 0x40000000
480#define HIFN_D_VALID 0x80000000
481
482struct hifn_base_command
483{
484 volatile u16 masks;
485 volatile u16 session_num;
486 volatile u16 total_source_count;
487 volatile u16 total_dest_count;
488};
489
490#define HIFN_BASE_CMD_COMP 0x0100 /* enable compression engine */
491#define HIFN_BASE_CMD_PAD 0x0200 /* enable padding engine */
492#define HIFN_BASE_CMD_MAC 0x0400 /* enable MAC engine */
493#define HIFN_BASE_CMD_CRYPT 0x0800 /* enable crypt engine */
494#define HIFN_BASE_CMD_DECODE 0x2000
495#define HIFN_BASE_CMD_SRCLEN_M 0xc000
496#define HIFN_BASE_CMD_SRCLEN_S 14
497#define HIFN_BASE_CMD_DSTLEN_M 0x3000
498#define HIFN_BASE_CMD_DSTLEN_S 12
499#define HIFN_BASE_CMD_LENMASK_HI 0x30000
500#define HIFN_BASE_CMD_LENMASK_LO 0x0ffff
501
502/*
503 * Structure to help build up the command data structure.
504 */
505struct hifn_crypt_command
506{
507 volatile u16 masks;
508 volatile u16 header_skip;
509 volatile u16 source_count;
510 volatile u16 reserved;
511};
512
513#define HIFN_CRYPT_CMD_ALG_MASK 0x0003 /* algorithm: */
514#define HIFN_CRYPT_CMD_ALG_DES 0x0000 /* DES */
515#define HIFN_CRYPT_CMD_ALG_3DES 0x0001 /* 3DES */
516#define HIFN_CRYPT_CMD_ALG_RC4 0x0002 /* RC4 */
517#define HIFN_CRYPT_CMD_ALG_AES 0x0003 /* AES */
518#define HIFN_CRYPT_CMD_MODE_MASK 0x0018 /* Encrypt mode: */
519#define HIFN_CRYPT_CMD_MODE_ECB 0x0000 /* ECB */
520#define HIFN_CRYPT_CMD_MODE_CBC 0x0008 /* CBC */
521#define HIFN_CRYPT_CMD_MODE_CFB 0x0010 /* CFB */
522#define HIFN_CRYPT_CMD_MODE_OFB 0x0018 /* OFB */
523#define HIFN_CRYPT_CMD_CLR_CTX 0x0040 /* clear context */
524#define HIFN_CRYPT_CMD_KSZ_MASK 0x0600 /* AES key size: */
525#define HIFN_CRYPT_CMD_KSZ_128 0x0000 /* 128 bit */
526#define HIFN_CRYPT_CMD_KSZ_192 0x0200 /* 192 bit */
527#define HIFN_CRYPT_CMD_KSZ_256 0x0400 /* 256 bit */
528#define HIFN_CRYPT_CMD_NEW_KEY 0x0800 /* expect new key */
529#define HIFN_CRYPT_CMD_NEW_IV 0x1000 /* expect new iv */
530#define HIFN_CRYPT_CMD_SRCLEN_M 0xc000
531#define HIFN_CRYPT_CMD_SRCLEN_S 14
532
533/*
534 * Structure to help build up the command data structure.
535 */
536struct hifn_mac_command
537{
538 volatile u16 masks;
539 volatile u16 header_skip;
540 volatile u16 source_count;
541 volatile u16 reserved;
542};
543
544#define HIFN_MAC_CMD_ALG_MASK 0x0001
545#define HIFN_MAC_CMD_ALG_SHA1 0x0000
546#define HIFN_MAC_CMD_ALG_MD5 0x0001
547#define HIFN_MAC_CMD_MODE_MASK 0x000c
548#define HIFN_MAC_CMD_MODE_HMAC 0x0000
549#define HIFN_MAC_CMD_MODE_SSL_MAC 0x0004
550#define HIFN_MAC_CMD_MODE_HASH 0x0008
551#define HIFN_MAC_CMD_MODE_FULL 0x0004
552#define HIFN_MAC_CMD_TRUNC 0x0010
553#define HIFN_MAC_CMD_RESULT 0x0020
554#define HIFN_MAC_CMD_APPEND 0x0040
555#define HIFN_MAC_CMD_SRCLEN_M 0xc000
556#define HIFN_MAC_CMD_SRCLEN_S 14
557
558/*
559 * MAC POS IPsec initiates authentication after encryption on encodes
560 * and before decryption on decodes.
561 */
562#define HIFN_MAC_CMD_POS_IPSEC 0x0200
563#define HIFN_MAC_CMD_NEW_KEY 0x0800
564
565struct hifn_comp_command
566{
567 volatile u16 masks;
568 volatile u16 header_skip;
569 volatile u16 source_count;
570 volatile u16 reserved;
571};
572
573#define HIFN_COMP_CMD_SRCLEN_M 0xc000
574#define HIFN_COMP_CMD_SRCLEN_S 14
575#define HIFN_COMP_CMD_ONE 0x0100 /* must be one */
576#define HIFN_COMP_CMD_CLEARHIST 0x0010 /* clear history */
577#define HIFN_COMP_CMD_UPDATEHIST 0x0008 /* update history */
578#define HIFN_COMP_CMD_LZS_STRIP0 0x0004 /* LZS: strip zero */
579#define HIFN_COMP_CMD_MPPC_RESTART 0x0004 /* MPPC: restart */
580#define HIFN_COMP_CMD_ALG_MASK 0x0001 /* compression mode: */
581#define HIFN_COMP_CMD_ALG_MPPC 0x0001 /* MPPC */
582#define HIFN_COMP_CMD_ALG_LZS 0x0000 /* LZS */
583
584struct hifn_base_result
585{
586 volatile u16 flags;
587 volatile u16 session;
588 volatile u16 src_cnt; /* 15:0 of source count */
589 volatile u16 dst_cnt; /* 15:0 of dest count */
590};
591
592#define HIFN_BASE_RES_DSTOVERRUN 0x0200 /* destination overrun */
593#define HIFN_BASE_RES_SRCLEN_M 0xc000 /* 17:16 of source count */
594#define HIFN_BASE_RES_SRCLEN_S 14
595#define HIFN_BASE_RES_DSTLEN_M 0x3000 /* 17:16 of dest count */
596#define HIFN_BASE_RES_DSTLEN_S 12
597
598struct hifn_comp_result
599{
600 volatile u16 flags;
601 volatile u16 crc;
602};
603
604#define HIFN_COMP_RES_LCB_M 0xff00 /* longitudinal check byte */
605#define HIFN_COMP_RES_LCB_S 8
606#define HIFN_COMP_RES_RESTART 0x0004 /* MPPC: restart */
607#define HIFN_COMP_RES_ENDMARKER 0x0002 /* LZS: end marker seen */
608#define HIFN_COMP_RES_SRC_NOTZERO 0x0001 /* source expired */
609
610struct hifn_mac_result
611{
612 volatile u16 flags;
613 volatile u16 reserved;
614 /* followed by 0, 6, 8, or 10 u16's of the MAC, then crypt */
615};
616
617#define HIFN_MAC_RES_MISCOMPARE 0x0002 /* compare failed */
618#define HIFN_MAC_RES_SRC_NOTZERO 0x0001 /* source expired */
619
620struct hifn_crypt_result
621{
622 volatile u16 flags;
623 volatile u16 reserved;
624};
625
626#define HIFN_CRYPT_RES_SRC_NOTZERO 0x0001 /* source expired */
627
628#ifndef HIFN_POLL_FREQUENCY
629#define HIFN_POLL_FREQUENCY 0x1
630#endif
631
632#ifndef HIFN_POLL_SCALAR
633#define HIFN_POLL_SCALAR 0x0
634#endif
635
636#define HIFN_MAX_SEGLEN 0xffff /* maximum dma segment len */
637#define HIFN_MAX_DMALEN 0x3ffff /* maximum dma length */
638
639struct hifn_crypto_alg
640{
641 struct list_head entry;
642 struct crypto_alg alg;
643 struct hifn_device *dev;
644};
645
646#define ASYNC_SCATTERLIST_CACHE 16
647
648#define ASYNC_FLAGS_MISALIGNED (1<<0)
649
650struct ablkcipher_walk
651{
652 struct scatterlist cache[ASYNC_SCATTERLIST_CACHE];
653 u32 flags;
654 int num;
655};
656
657struct hifn_context
658{
659 u8 key[HIFN_MAX_CRYPT_KEY_LENGTH], *iv;
660 struct hifn_device *dev;
661 unsigned int keysize, ivsize;
662 u8 op, type, mode, unused;
663 struct ablkcipher_walk walk;
664 atomic_t sg_num;
665};
666
667#define crypto_alg_to_hifn(a) container_of(a, struct hifn_crypto_alg, alg)
668
669static inline u32 hifn_read_0(struct hifn_device *dev, u32 reg)
670{
671 u32 ret;
672
673 ret = readl((char *)(dev->bar[0]) + reg);
674
675 return ret;
676}
677
678static inline u32 hifn_read_1(struct hifn_device *dev, u32 reg)
679{
680 u32 ret;
681
682 ret = readl((char *)(dev->bar[1]) + reg);
683
684 return ret;
685}
686
687static inline void hifn_write_0(struct hifn_device *dev, u32 reg, u32 val)
688{
689 writel(val, (char *)(dev->bar[0]) + reg);
690}
691
692static inline void hifn_write_1(struct hifn_device *dev, u32 reg, u32 val)
693{
694 writel(val, (char *)(dev->bar[1]) + reg);
695}
696
697static void hifn_wait_puc(struct hifn_device *dev)
698{
699 int i;
700 u32 ret;
701
702 for (i=10000; i > 0; --i) {
703 ret = hifn_read_0(dev, HIFN_0_PUCTRL);
704 if (!(ret & HIFN_PUCTRL_RESET))
705 break;
706
707 udelay(1);
708 }
709
710 if (!i)
711 dprintk("%s: Failed to reset PUC unit.\n", dev->name);
712}
713
714static void hifn_reset_puc(struct hifn_device *dev)
715{
716 hifn_write_0(dev, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
717 hifn_wait_puc(dev);
718}
719
720static void hifn_stop_device(struct hifn_device *dev)
721{
722 hifn_write_1(dev, HIFN_1_DMA_CSR,
723 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
724 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS);
725 hifn_write_0(dev, HIFN_0_PUIER, 0);
726 hifn_write_1(dev, HIFN_1_DMA_IER, 0);
727}
728
729static void hifn_reset_dma(struct hifn_device *dev, int full)
730{
731 hifn_stop_device(dev);
732
733 /*
734 * Setting poll frequency and others to 0.
735 */
736 hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
737 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
738 mdelay(1);
739
740 /*
741 * Reset DMA.
742 */
743 if (full) {
744 hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
745 mdelay(1);
746 } else {
747 hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE |
748 HIFN_DMACNFG_MSTRESET);
749 hifn_reset_puc(dev);
750 }
751
752 hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
753 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
754
755 hifn_reset_puc(dev);
756}
757
758static u32 hifn_next_signature(u_int32_t a, u_int cnt)
759{
760 int i;
761 u32 v;
762
763 for (i = 0; i < cnt; i++) {
764
765 /* get the parity */
766 v = a & 0x80080125;
767 v ^= v >> 16;
768 v ^= v >> 8;
769 v ^= v >> 4;
770 v ^= v >> 2;
771 v ^= v >> 1;
772
773 a = (v & 1) ^ (a << 1);
774 }
775
776 return a;
777}
778
779static struct pci2id {
780 u_short pci_vendor;
781 u_short pci_prod;
782 char card_id[13];
783} pci2id[] = {
784 {
785 PCI_VENDOR_ID_HIFN,
786 PCI_DEVICE_ID_HIFN_7955,
787 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
788 0x00, 0x00, 0x00, 0x00, 0x00 }
789 },
790 {
791 PCI_VENDOR_ID_HIFN,
792 PCI_DEVICE_ID_HIFN_7956,
793 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
794 0x00, 0x00, 0x00, 0x00, 0x00 }
795 }
796};
797
798#if defined(CONFIG_HW_RANDOM) || defined(CONFIG_HW_RANDOM_MODULE)
799static int hifn_rng_data_present(struct hwrng *rng, int wait)
800{
801 struct hifn_device *dev = (struct hifn_device *)rng->priv;
802 s64 nsec;
803
804 nsec = ktime_to_ns(ktime_sub(ktime_get(), dev->rngtime));
805 nsec -= dev->rng_wait_time;
806 if (nsec <= 0)
807 return 1;
808 if (!wait)
809 return 0;
810 ndelay(nsec);
811 return 1;
812}
813
814static int hifn_rng_data_read(struct hwrng *rng, u32 *data)
815{
816 struct hifn_device *dev = (struct hifn_device *)rng->priv;
817
818 *data = hifn_read_1(dev, HIFN_1_RNG_DATA);
819 dev->rngtime = ktime_get();
820 return 4;
821}
822
823static int hifn_register_rng(struct hifn_device *dev)
824{
825 /*
826 * We must wait at least 256 Pk_clk cycles between two reads of the rng.
827 */
828 dev->rng_wait_time = DIV_ROUND_UP(NSEC_PER_SEC, dev->pk_clk_freq) *
829 256;
830
831 dev->rng.name = dev->name;
832 dev->rng.data_present = hifn_rng_data_present,
833 dev->rng.data_read = hifn_rng_data_read,
834 dev->rng.priv = (unsigned long)dev;
835
836 return hwrng_register(&dev->rng);
837}
838
839static void hifn_unregister_rng(struct hifn_device *dev)
840{
841 hwrng_unregister(&dev->rng);
842}
843#else
844#define hifn_register_rng(dev) 0
845#define hifn_unregister_rng(dev)
846#endif
847
848static int hifn_init_pubrng(struct hifn_device *dev)
849{
850 int i;
851
852 hifn_write_1(dev, HIFN_1_PUB_RESET, hifn_read_1(dev, HIFN_1_PUB_RESET) |
853 HIFN_PUBRST_RESET);
854
855 for (i=100; i > 0; --i) {
856 mdelay(1);
857
858 if ((hifn_read_1(dev, HIFN_1_PUB_RESET) & HIFN_PUBRST_RESET) == 0)
859 break;
860 }
861
862 if (!i)
863 dprintk("Chip %s: Failed to initialise public key engine.\n",
864 dev->name);
865 else {
866 hifn_write_1(dev, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
867 dev->dmareg |= HIFN_DMAIER_PUBDONE;
868 hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
869
870 dprintk("Chip %s: Public key engine has been sucessfully "
871 "initialised.\n", dev->name);
872 }
873
874 /*
875 * Enable RNG engine.
876 */
877
878 hifn_write_1(dev, HIFN_1_RNG_CONFIG,
879 hifn_read_1(dev, HIFN_1_RNG_CONFIG) | HIFN_RNGCFG_ENA);
880 dprintk("Chip %s: RNG engine has been successfully initialised.\n",
881 dev->name);
882
883#if defined(CONFIG_HW_RANDOM) || defined(CONFIG_HW_RANDOM_MODULE)
884 /* First value must be discarded */
885 hifn_read_1(dev, HIFN_1_RNG_DATA);
886 dev->rngtime = ktime_get();
887#endif
888 return 0;
889}
890
891static int hifn_enable_crypto(struct hifn_device *dev)
892{
893 u32 dmacfg, addr;
894 char *offtbl = NULL;
895 int i;
896
897 for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) {
898 if (pci2id[i].pci_vendor == dev->pdev->vendor &&
899 pci2id[i].pci_prod == dev->pdev->device) {
900 offtbl = pci2id[i].card_id;
901 break;
902 }
903 }
904
905 if (offtbl == NULL) {
906 dprintk("Chip %s: Unknown card!\n", dev->name);
907 return -ENODEV;
908 }
909
910 dmacfg = hifn_read_1(dev, HIFN_1_DMA_CNFG);
911
912 hifn_write_1(dev, HIFN_1_DMA_CNFG,
913 HIFN_DMACNFG_UNLOCK | HIFN_DMACNFG_MSTRESET |
914 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
915 mdelay(1);
916 addr = hifn_read_1(dev, HIFN_1_UNLOCK_SECRET1);
917 mdelay(1);
918 hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, 0);
919 mdelay(1);
920
921 for (i=0; i<12; ++i) {
922 addr = hifn_next_signature(addr, offtbl[i] + 0x101);
923 hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, addr);
924
925 mdelay(1);
926 }
927 hifn_write_1(dev, HIFN_1_DMA_CNFG, dmacfg);
928
929 dprintk("Chip %s: %s.\n", dev->name, pci_name(dev->pdev));
930
931 return 0;
932}
933
934static void hifn_init_dma(struct hifn_device *dev)
935{
936 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
937 u32 dptr = dev->desc_dma;
938 int i;
939
940 for (i=0; i<HIFN_D_CMD_RSIZE; ++i)
941 dma->cmdr[i].p = __cpu_to_le32(dptr +
942 offsetof(struct hifn_dma, command_bufs[i][0]));
943 for (i=0; i<HIFN_D_RES_RSIZE; ++i)
944 dma->resr[i].p = __cpu_to_le32(dptr +
945 offsetof(struct hifn_dma, result_bufs[i][0]));
946
947 /*
948 * Setup LAST descriptors.
949 */
950 dma->cmdr[HIFN_D_CMD_RSIZE].p = __cpu_to_le32(dptr +
951 offsetof(struct hifn_dma, cmdr[0]));
952 dma->srcr[HIFN_D_SRC_RSIZE].p = __cpu_to_le32(dptr +
953 offsetof(struct hifn_dma, srcr[0]));
954 dma->dstr[HIFN_D_DST_RSIZE].p = __cpu_to_le32(dptr +
955 offsetof(struct hifn_dma, dstr[0]));
956 dma->resr[HIFN_D_RES_RSIZE].p = __cpu_to_le32(dptr +
957 offsetof(struct hifn_dma, resr[0]));
958
959 dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
960 dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
961 dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
962}
963
964/*
965 * Initialize the PLL. We need to know the frequency of the reference clock
966 * to calculate the optimal multiplier. For PCI we assume 66MHz, since that
967 * allows us to operate without the risk of overclocking the chip. If it
968 * actually uses 33MHz, the chip will operate at half the speed, this can be
969 * overriden by specifying the frequency as module parameter (pci33).
970 *
971 * Unfortunately the PCI clock is not very suitable since the HIFN needs a
972 * stable clock and the PCI clock frequency may vary, so the default is the
973 * external clock. There is no way to find out its frequency, we default to
974 * 66MHz since according to Mike Ham of HiFn, almost every board in existence
975 * has an external crystal populated at 66MHz.
976 */
977static void hifn_init_pll(struct hifn_device *dev)
978{
979 unsigned int freq, m;
980 u32 pllcfg;
981
982 pllcfg = HIFN_1_PLL | HIFN_PLL_RESERVED_1;
983
984 if (strncmp(hifn_pll_ref, "ext", 3) == 0)
985 pllcfg |= HIFN_PLL_REF_CLK_PLL;
986 else
987 pllcfg |= HIFN_PLL_REF_CLK_HBI;
988
989 if (hifn_pll_ref[3] != '\0')
990 freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10);
991 else {
992 freq = 66;
993 printk(KERN_INFO "hifn795x: assuming %uMHz clock speed, "
994 "override with hifn_pll_ref=%.3s<frequency>\n",
995 freq, hifn_pll_ref);
996 }
997
998 m = HIFN_PLL_FCK_MAX / freq;
999
1000 pllcfg |= (m / 2 - 1) << HIFN_PLL_ND_SHIFT;
1001 if (m <= 8)
1002 pllcfg |= HIFN_PLL_IS_1_8;
1003 else
1004 pllcfg |= HIFN_PLL_IS_9_12;
1005
1006 /* Select clock source and enable clock bypass */
1007 hifn_write_1(dev, HIFN_1_PLL, pllcfg |
1008 HIFN_PLL_PK_CLK_HBI | HIFN_PLL_PE_CLK_HBI | HIFN_PLL_BP);
1009
1010 /* Let the chip lock to the input clock */
1011 mdelay(10);
1012
1013 /* Disable clock bypass */
1014 hifn_write_1(dev, HIFN_1_PLL, pllcfg |
1015 HIFN_PLL_PK_CLK_HBI | HIFN_PLL_PE_CLK_HBI);
1016
1017 /* Switch the engines to the PLL */
1018 hifn_write_1(dev, HIFN_1_PLL, pllcfg |
1019 HIFN_PLL_PK_CLK_PLL | HIFN_PLL_PE_CLK_PLL);
1020
1021 /*
1022 * The Fpk_clk runs at half the total speed. Its frequency is needed to
1023 * calculate the minimum time between two reads of the rng. Since 33MHz
1024 * is actually 33.333... we overestimate the frequency here, resulting
1025 * in slightly larger intervals.
1026 */
1027 dev->pk_clk_freq = 1000000 * (freq + 1) * m / 2;
1028}
1029
1030static void hifn_init_registers(struct hifn_device *dev)
1031{
1032 u32 dptr = dev->desc_dma;
1033
1034 /* Initialization magic... */
1035 hifn_write_0(dev, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
1036 hifn_write_0(dev, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
1037 hifn_write_0(dev, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
1038
1039 /* write all 4 ring address registers */
1040 hifn_write_1(dev, HIFN_1_DMA_CRAR, __cpu_to_le32(dptr +
1041 offsetof(struct hifn_dma, cmdr[0])));
1042 hifn_write_1(dev, HIFN_1_DMA_SRAR, __cpu_to_le32(dptr +
1043 offsetof(struct hifn_dma, srcr[0])));
1044 hifn_write_1(dev, HIFN_1_DMA_DRAR, __cpu_to_le32(dptr +
1045 offsetof(struct hifn_dma, dstr[0])));
1046 hifn_write_1(dev, HIFN_1_DMA_RRAR, __cpu_to_le32(dptr +
1047 offsetof(struct hifn_dma, resr[0])));
1048
1049 mdelay(2);
1050#if 0
1051 hifn_write_1(dev, HIFN_1_DMA_CSR,
1052 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
1053 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
1054 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
1055 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
1056 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
1057 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
1058 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
1059 HIFN_DMACSR_S_WAIT |
1060 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
1061 HIFN_DMACSR_C_WAIT |
1062 HIFN_DMACSR_ENGINE |
1063 HIFN_DMACSR_PUBDONE);
1064#else
1065 hifn_write_1(dev, HIFN_1_DMA_CSR,
1066 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1067 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA |
1068 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
1069 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
1070 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
1071 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
1072 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
1073 HIFN_DMACSR_S_WAIT |
1074 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
1075 HIFN_DMACSR_C_WAIT |
1076 HIFN_DMACSR_ENGINE |
1077 HIFN_DMACSR_PUBDONE);
1078#endif
1079 hifn_read_1(dev, HIFN_1_DMA_CSR);
1080
1081 dev->dmareg |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
1082 HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
1083 HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
1084 HIFN_DMAIER_ENGINE;
1085 dev->dmareg &= ~HIFN_DMAIER_C_WAIT;
1086
1087 hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
1088 hifn_read_1(dev, HIFN_1_DMA_IER);
1089#if 0
1090 hifn_write_0(dev, HIFN_0_PUCNFG, HIFN_PUCNFG_ENCCNFG |
1091 HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
1092 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
1093 HIFN_PUCNFG_DRAM);
1094#else
1095 hifn_write_0(dev, HIFN_0_PUCNFG, 0x10342);
1096#endif
1097 hifn_init_pll(dev);
1098
1099 hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
1100 hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
1101 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
1102 ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
1103 ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
1104}
1105
1106static int hifn_setup_base_command(struct hifn_device *dev, u8 *buf,
1107 unsigned dlen, unsigned slen, u16 mask, u8 snum)
1108{
1109 struct hifn_base_command *base_cmd;
1110 u8 *buf_pos = buf;
1111
1112 base_cmd = (struct hifn_base_command *)buf_pos;
1113 base_cmd->masks = __cpu_to_le16(mask);
1114 base_cmd->total_source_count =
1115 __cpu_to_le16(slen & HIFN_BASE_CMD_LENMASK_LO);
1116 base_cmd->total_dest_count =
1117 __cpu_to_le16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1118
1119 dlen >>= 16;
1120 slen >>= 16;
1121 base_cmd->session_num = __cpu_to_le16(snum |
1122 ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1123 ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1124
1125 return sizeof(struct hifn_base_command);
1126}
1127
1128static int hifn_setup_crypto_command(struct hifn_device *dev,
1129 u8 *buf, unsigned dlen, unsigned slen,
1130 u8 *key, int keylen, u8 *iv, int ivsize, u16 mode)
1131{
1132 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1133 struct hifn_crypt_command *cry_cmd;
1134 u8 *buf_pos = buf;
1135 u16 cmd_len;
1136
1137 cry_cmd = (struct hifn_crypt_command *)buf_pos;
1138
1139 cry_cmd->source_count = __cpu_to_le16(dlen & 0xffff);
1140 dlen >>= 16;
1141 cry_cmd->masks = __cpu_to_le16(mode |
1142 ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) &
1143 HIFN_CRYPT_CMD_SRCLEN_M));
1144 cry_cmd->header_skip = 0;
1145 cry_cmd->reserved = 0;
1146
1147 buf_pos += sizeof(struct hifn_crypt_command);
1148
1149 dma->cmdu++;
1150 if (dma->cmdu > 1) {
1151 dev->dmareg |= HIFN_DMAIER_C_WAIT;
1152 hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
1153 }
1154
1155 if (keylen) {
1156 memcpy(buf_pos, key, keylen);
1157 buf_pos += keylen;
1158 }
1159 if (ivsize) {
1160 memcpy(buf_pos, iv, ivsize);
1161 buf_pos += ivsize;
1162 }
1163
1164 cmd_len = buf_pos - buf;
1165
1166 return cmd_len;
1167}
1168
1169static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page,
1170 unsigned int offset, unsigned int size)
1171{
1172 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1173 int idx;
1174 dma_addr_t addr;
1175
1176 addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_TODEVICE);
1177
1178 idx = dma->srci;
1179
1180 dma->srcr[idx].p = __cpu_to_le32(addr);
1181 dma->srcr[idx].l = __cpu_to_le32(size) | HIFN_D_VALID |
1182 HIFN_D_MASKDONEIRQ | HIFN_D_NOINVALID | HIFN_D_LAST;
1183
1184 if (++idx == HIFN_D_SRC_RSIZE) {
1185 dma->srcr[idx].l = __cpu_to_le32(HIFN_D_VALID |
1186 HIFN_D_JUMP |
1187 HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
1188 idx = 0;
1189 }
1190
1191 dma->srci = idx;
1192 dma->srcu++;
1193
1194 if (!(dev->flags & HIFN_FLAG_SRC_BUSY)) {
1195 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
1196 dev->flags |= HIFN_FLAG_SRC_BUSY;
1197 }
1198
1199 return size;
1200}
1201
1202static void hifn_setup_res_desc(struct hifn_device *dev)
1203{
1204 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1205
1206 dma->resr[dma->resi].l = __cpu_to_le32(HIFN_USED_RESULT |
1207 HIFN_D_VALID | HIFN_D_LAST);
1208 /*
1209 * dma->resr[dma->resi].l = __cpu_to_le32(HIFN_MAX_RESULT | HIFN_D_VALID |
1210 * HIFN_D_LAST | HIFN_D_NOINVALID);
1211 */
1212
1213 if (++dma->resi == HIFN_D_RES_RSIZE) {
1214 dma->resr[HIFN_D_RES_RSIZE].l = __cpu_to_le32(HIFN_D_VALID |
1215 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
1216 dma->resi = 0;
1217 }
1218
1219 dma->resu++;
1220
1221 if (!(dev->flags & HIFN_FLAG_RES_BUSY)) {
1222 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
1223 dev->flags |= HIFN_FLAG_RES_BUSY;
1224 }
1225}
1226
1227static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page,
1228 unsigned offset, unsigned size)
1229{
1230 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1231 int idx;
1232 dma_addr_t addr;
1233
1234 addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_FROMDEVICE);
1235
1236 idx = dma->dsti;
1237 dma->dstr[idx].p = __cpu_to_le32(addr);
1238 dma->dstr[idx].l = __cpu_to_le32(size | HIFN_D_VALID |
1239 HIFN_D_MASKDONEIRQ | HIFN_D_NOINVALID | HIFN_D_LAST);
1240
1241 if (++idx == HIFN_D_DST_RSIZE) {
1242 dma->dstr[idx].l = __cpu_to_le32(HIFN_D_VALID |
1243 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ |
1244 HIFN_D_LAST | HIFN_D_NOINVALID);
1245 idx = 0;
1246 }
1247 dma->dsti = idx;
1248 dma->dstu++;
1249
1250 if (!(dev->flags & HIFN_FLAG_DST_BUSY)) {
1251 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
1252 dev->flags |= HIFN_FLAG_DST_BUSY;
1253 }
1254}
1255
1256static int hifn_setup_dma(struct hifn_device *dev, struct page *spage, unsigned int soff,
1257 struct page *dpage, unsigned int doff, unsigned int nbytes, void *priv,
1258 struct hifn_context *ctx)
1259{
1260 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1261 int cmd_len, sa_idx;
1262 u8 *buf, *buf_pos;
1263 u16 mask;
1264
1265 dprintk("%s: spage: %p, soffset: %u, dpage: %p, doffset: %u, nbytes: %u, priv: %p, ctx: %p.\n",
1266 dev->name, spage, soff, dpage, doff, nbytes, priv, ctx);
1267
1268 sa_idx = dma->resi;
1269
1270 hifn_setup_src_desc(dev, spage, soff, nbytes);
1271
1272 buf_pos = buf = dma->command_bufs[dma->cmdi];
1273
1274 mask = 0;
1275 switch (ctx->op) {
1276 case ACRYPTO_OP_DECRYPT:
1277 mask = HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE;
1278 break;
1279 case ACRYPTO_OP_ENCRYPT:
1280 mask = HIFN_BASE_CMD_CRYPT;
1281 break;
1282 case ACRYPTO_OP_HMAC:
1283 mask = HIFN_BASE_CMD_MAC;
1284 break;
1285 default:
1286 goto err_out;
1287 }
1288
1289 buf_pos += hifn_setup_base_command(dev, buf_pos, nbytes,
1290 nbytes, mask, dev->snum);
1291
1292 if (ctx->op == ACRYPTO_OP_ENCRYPT || ctx->op == ACRYPTO_OP_DECRYPT) {
1293 u16 md = 0;
1294
1295 if (ctx->keysize)
1296 md |= HIFN_CRYPT_CMD_NEW_KEY;
1297 if (ctx->iv && ctx->mode != ACRYPTO_MODE_ECB)
1298 md |= HIFN_CRYPT_CMD_NEW_IV;
1299
1300 switch (ctx->mode) {
1301 case ACRYPTO_MODE_ECB:
1302 md |= HIFN_CRYPT_CMD_MODE_ECB;
1303 break;
1304 case ACRYPTO_MODE_CBC:
1305 md |= HIFN_CRYPT_CMD_MODE_CBC;
1306 break;
1307 case ACRYPTO_MODE_CFB:
1308 md |= HIFN_CRYPT_CMD_MODE_CFB;
1309 break;
1310 case ACRYPTO_MODE_OFB:
1311 md |= HIFN_CRYPT_CMD_MODE_OFB;
1312 break;
1313 default:
1314 goto err_out;
1315 }
1316
1317 switch (ctx->type) {
1318 case ACRYPTO_TYPE_AES_128:
1319 if (ctx->keysize != 16)
1320 goto err_out;
1321 md |= HIFN_CRYPT_CMD_KSZ_128 |
1322 HIFN_CRYPT_CMD_ALG_AES;
1323 break;
1324 case ACRYPTO_TYPE_AES_192:
1325 if (ctx->keysize != 24)
1326 goto err_out;
1327 md |= HIFN_CRYPT_CMD_KSZ_192 |
1328 HIFN_CRYPT_CMD_ALG_AES;
1329 break;
1330 case ACRYPTO_TYPE_AES_256:
1331 if (ctx->keysize != 32)
1332 goto err_out;
1333 md |= HIFN_CRYPT_CMD_KSZ_256 |
1334 HIFN_CRYPT_CMD_ALG_AES;
1335 break;
1336 case ACRYPTO_TYPE_3DES:
1337 if (ctx->keysize != 24)
1338 goto err_out;
1339 md |= HIFN_CRYPT_CMD_ALG_3DES;
1340 break;
1341 case ACRYPTO_TYPE_DES:
1342 if (ctx->keysize != 8)
1343 goto err_out;
1344 md |= HIFN_CRYPT_CMD_ALG_DES;
1345 break;
1346 default:
1347 goto err_out;
1348 }
1349
1350 buf_pos += hifn_setup_crypto_command(dev, buf_pos,
1351 nbytes, nbytes, ctx->key, ctx->keysize,
1352 ctx->iv, ctx->ivsize, md);
1353 }
1354
1355 dev->sa[sa_idx] = priv;
1356
1357 cmd_len = buf_pos - buf;
1358 dma->cmdr[dma->cmdi].l = __cpu_to_le32(cmd_len | HIFN_D_VALID |
1359 HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
1360
1361 if (++dma->cmdi == HIFN_D_CMD_RSIZE) {
1362 dma->cmdr[dma->cmdi].l = __cpu_to_le32(HIFN_MAX_COMMAND |
1363 HIFN_D_VALID | HIFN_D_LAST |
1364 HIFN_D_MASKDONEIRQ | HIFN_D_JUMP);
1365 dma->cmdi = 0;
1366 } else
1367 dma->cmdr[dma->cmdi-1].l |= __cpu_to_le32(HIFN_D_VALID);
1368
1369 if (!(dev->flags & HIFN_FLAG_CMD_BUSY)) {
1370 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
1371 dev->flags |= HIFN_FLAG_CMD_BUSY;
1372 }
1373
1374 hifn_setup_dst_desc(dev, dpage, doff, nbytes);
1375 hifn_setup_res_desc(dev);
1376
1377 return 0;
1378
1379err_out:
1380 return -EINVAL;
1381}
1382
1383static int ablkcipher_walk_init(struct ablkcipher_walk *w,
1384 int num, gfp_t gfp_flags)
1385{
1386 int i;
1387
1388 num = min(ASYNC_SCATTERLIST_CACHE, num);
1389 sg_init_table(w->cache, num);
1390
1391 w->num = 0;
1392 for (i=0; i<num; ++i) {
1393 struct page *page = alloc_page(gfp_flags);
1394 struct scatterlist *s;
1395
1396 if (!page)
1397 break;
1398
1399 s = &w->cache[i];
1400
1401 sg_set_page(s, page, PAGE_SIZE, 0);
1402 w->num++;
1403 }
1404
1405 return i;
1406}
1407
1408static void ablkcipher_walk_exit(struct ablkcipher_walk *w)
1409{
1410 int i;
1411
1412 for (i=0; i<w->num; ++i) {
1413 struct scatterlist *s = &w->cache[i];
1414
1415 __free_page(sg_page(s));
1416
1417 s->length = 0;
1418 }
1419
1420 w->num = 0;
1421}
1422
1423static int ablkcipher_add(void *daddr, unsigned int *drestp, struct scatterlist *src,
1424 unsigned int size, unsigned int *nbytesp)
1425{
1426 unsigned int copy, drest = *drestp, nbytes = *nbytesp;
1427 int idx = 0;
1428 void *saddr;
1429
1430 if (drest < size || size > nbytes)
1431 return -EINVAL;
1432
1433 while (size) {
1434 copy = min(drest, src->length);
1435
1436 saddr = kmap_atomic(sg_page(src), KM_SOFTIRQ1);
1437 memcpy(daddr, saddr + src->offset, copy);
1438 kunmap_atomic(saddr, KM_SOFTIRQ1);
1439
1440 size -= copy;
1441 drest -= copy;
1442 nbytes -= copy;
1443 daddr += copy;
1444
1445 dprintk("%s: copy: %u, size: %u, drest: %u, nbytes: %u.\n",
1446 __func__, copy, size, drest, nbytes);
1447
1448 src++;
1449 idx++;
1450 }
1451
1452 *nbytesp = nbytes;
1453 *drestp = drest;
1454
1455 return idx;
1456}
1457
1458static int ablkcipher_walk(struct ablkcipher_request *req,
1459 struct ablkcipher_walk *w)
1460{
1461 unsigned blocksize =
1462 crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(req));
1463 unsigned alignmask =
1464 crypto_ablkcipher_alignmask(crypto_ablkcipher_reqtfm(req));
1465 struct scatterlist *src, *dst, *t;
1466 void *daddr;
1467 unsigned int nbytes = req->nbytes, offset, copy, diff;
1468 int idx, tidx, err;
1469
1470 tidx = idx = 0;
1471 offset = 0;
1472 while (nbytes) {
1473 if (idx >= w->num && (w->flags & ASYNC_FLAGS_MISALIGNED))
1474 return -EINVAL;
1475
1476 src = &req->src[idx];
1477 dst = &req->dst[idx];
1478
1479 dprintk("\n%s: slen: %u, dlen: %u, soff: %u, doff: %u, offset: %u, "
1480 "blocksize: %u, nbytes: %u.\n",
1481 __func__, src->length, dst->length, src->offset,
1482 dst->offset, offset, blocksize, nbytes);
1483
1484 if (src->length & (blocksize - 1) ||
1485 src->offset & (alignmask - 1) ||
1486 dst->length & (blocksize - 1) ||
1487 dst->offset & (alignmask - 1) ||
1488 offset) {
1489 unsigned slen = src->length - offset;
1490 unsigned dlen = PAGE_SIZE;
1491
1492 t = &w->cache[idx];
1493
1494 daddr = kmap_atomic(sg_page(t), KM_SOFTIRQ0);
1495 err = ablkcipher_add(daddr, &dlen, src, slen, &nbytes);
1496 if (err < 0)
1497 goto err_out_unmap;
1498
1499 idx += err;
1500
1501 copy = slen & ~(blocksize - 1);
1502 diff = slen & (blocksize - 1);
1503
1504 if (dlen < nbytes) {
1505 /*
1506 * Destination page does not have enough space
1507 * to put there additional blocksized chunk,
1508 * so we mark that page as containing only
1509 * blocksize aligned chunks:
1510 * t->length = (slen & ~(blocksize - 1));
1511 * and increase number of bytes to be processed
1512 * in next chunk:
1513 * nbytes += diff;
1514 */
1515 nbytes += diff;
1516
1517 /*
1518 * Temporary of course...
1519 * Kick author if you will catch this one.
1520 */
1521 printk(KERN_ERR "%s: dlen: %u, nbytes: %u,"
1522 "slen: %u, offset: %u.\n",
1523 __func__, dlen, nbytes, slen, offset);
1524 printk(KERN_ERR "%s: please contact author to fix this "
1525 "issue, generally you should not catch "
1526 "this path under any condition but who "
1527 "knows how did you use crypto code.\n"
1528 "Thank you.\n", __func__);
1529 BUG();
1530 } else {
1531 copy += diff + nbytes;
1532
1533 src = &req->src[idx];
1534
1535 err = ablkcipher_add(daddr + slen, &dlen, src, nbytes, &nbytes);
1536 if (err < 0)
1537 goto err_out_unmap;
1538
1539 idx += err;
1540 }
1541
1542 t->length = copy;
1543 t->offset = offset;
1544
1545 kunmap_atomic(daddr, KM_SOFTIRQ0);
1546 } else {
1547 nbytes -= src->length;
1548 idx++;
1549 }
1550
1551 tidx++;
1552 }
1553
1554 return tidx;
1555
1556err_out_unmap:
1557 kunmap_atomic(daddr, KM_SOFTIRQ0);
1558 return err;
1559}
1560
1561static int hifn_setup_session(struct ablkcipher_request *req)
1562{
1563 struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
1564 struct hifn_device *dev = ctx->dev;
1565 struct page *spage, *dpage;
1566 unsigned long soff, doff, flags;
1567 unsigned int nbytes = req->nbytes, idx = 0, len;
1568 int err = -EINVAL, sg_num;
1569 struct scatterlist *src, *dst, *t;
1570 unsigned blocksize =
1571 crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(req));
1572 unsigned alignmask =
1573 crypto_ablkcipher_alignmask(crypto_ablkcipher_reqtfm(req));
1574
1575 if (ctx->iv && !ctx->ivsize && ctx->mode != ACRYPTO_MODE_ECB)
1576 goto err_out_exit;
1577
1578 ctx->walk.flags = 0;
1579
1580 while (nbytes) {
1581 src = &req->src[idx];
1582 dst = &req->dst[idx];
1583
1584 if (src->length & (blocksize - 1) ||
1585 src->offset & (alignmask - 1) ||
1586 dst->length & (blocksize - 1) ||
1587 dst->offset & (alignmask - 1)) {
1588 ctx->walk.flags |= ASYNC_FLAGS_MISALIGNED;
1589 }
1590
1591 nbytes -= src->length;
1592 idx++;
1593 }
1594
1595 if (ctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
1596 err = ablkcipher_walk_init(&ctx->walk, idx, GFP_ATOMIC);
1597 if (err < 0)
1598 return err;
1599 }
1600
1601 nbytes = req->nbytes;
1602 idx = 0;
1603
1604 sg_num = ablkcipher_walk(req, &ctx->walk);
1605
1606 atomic_set(&ctx->sg_num, sg_num);
1607
1608 spin_lock_irqsave(&dev->lock, flags);
1609 if (dev->started + sg_num > HIFN_QUEUE_LENGTH) {
1610 err = -EAGAIN;
1611 goto err_out;
1612 }
1613
1614 dev->snum++;
1615 dev->started += sg_num;
1616
1617 while (nbytes) {
1618 src = &req->src[idx];
1619 dst = &req->dst[idx];
1620 t = &ctx->walk.cache[idx];
1621
1622 if (t->length) {
1623 spage = dpage = sg_page(t);
1624 soff = doff = 0;
1625 len = t->length;
1626 } else {
1627 spage = sg_page(src);
1628 soff = src->offset;
1629
1630 dpage = sg_page(dst);
1631 doff = dst->offset;
1632
1633 len = dst->length;
1634 }
1635
1636 idx++;
1637
1638 err = hifn_setup_dma(dev, spage, soff, dpage, doff, nbytes,
1639 req, ctx);
1640 if (err)
1641 goto err_out;
1642
1643 nbytes -= len;
1644 }
1645
1646 dev->active = HIFN_DEFAULT_ACTIVE_NUM;
1647 spin_unlock_irqrestore(&dev->lock, flags);
1648
1649 return 0;
1650
1651err_out:
1652 spin_unlock_irqrestore(&dev->lock, flags);
1653err_out_exit:
1654 if (err && printk_ratelimit())
1655 dprintk("%s: iv: %p [%d], key: %p [%d], mode: %u, op: %u, "
1656 "type: %u, err: %d.\n",
1657 dev->name, ctx->iv, ctx->ivsize,
1658 ctx->key, ctx->keysize,
1659 ctx->mode, ctx->op, ctx->type, err);
1660
1661 return err;
1662}
1663
1664static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
1665{
1666 int n, err;
1667 u8 src[16];
1668 struct hifn_context ctx;
1669 u8 fips_aes_ecb_from_zero[16] = {
1670 0x66, 0xE9, 0x4B, 0xD4,
1671 0xEF, 0x8A, 0x2C, 0x3B,
1672 0x88, 0x4C, 0xFA, 0x59,
1673 0xCA, 0x34, 0x2B, 0x2E};
1674
1675 memset(src, 0, sizeof(src));
1676 memset(ctx.key, 0, sizeof(ctx.key));
1677
1678 ctx.dev = dev;
1679 ctx.keysize = 16;
1680 ctx.ivsize = 0;
1681 ctx.iv = NULL;
1682 ctx.op = (encdec)?ACRYPTO_OP_ENCRYPT:ACRYPTO_OP_DECRYPT;
1683 ctx.mode = ACRYPTO_MODE_ECB;
1684 ctx.type = ACRYPTO_TYPE_AES_128;
1685 atomic_set(&ctx.sg_num, 1);
1686
1687 err = hifn_setup_dma(dev,
1688 virt_to_page(src), offset_in_page(src),
1689 virt_to_page(src), offset_in_page(src),
1690 sizeof(src), NULL, &ctx);
1691 if (err)
1692 goto err_out;
1693
1694 msleep(200);
1695
1696 dprintk("%s: decoded: ", dev->name);
1697 for (n=0; n<sizeof(src); ++n)
1698 dprintk("%02x ", src[n]);
1699 dprintk("\n");
1700 dprintk("%s: FIPS : ", dev->name);
1701 for (n=0; n<sizeof(fips_aes_ecb_from_zero); ++n)
1702 dprintk("%02x ", fips_aes_ecb_from_zero[n]);
1703 dprintk("\n");
1704
1705 if (!memcmp(src, fips_aes_ecb_from_zero, sizeof(fips_aes_ecb_from_zero))) {
1706 printk(KERN_INFO "%s: AES 128 ECB test has been successfully "
1707 "passed.\n", dev->name);
1708 return 0;
1709 }
1710
1711err_out:
1712 printk(KERN_INFO "%s: AES 128 ECB test has been failed.\n", dev->name);
1713 return -1;
1714}
1715
1716static int hifn_start_device(struct hifn_device *dev)
1717{
1718 int err;
1719
1720 hifn_reset_dma(dev, 1);
1721
1722 err = hifn_enable_crypto(dev);
1723 if (err)
1724 return err;
1725
1726 hifn_reset_puc(dev);
1727
1728 hifn_init_dma(dev);
1729
1730 hifn_init_registers(dev);
1731
1732 hifn_init_pubrng(dev);
1733
1734 return 0;
1735}
1736
1737static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset,
1738 struct scatterlist *dst, unsigned int size, unsigned int *nbytesp)
1739{
1740 unsigned int srest = *srestp, nbytes = *nbytesp, copy;
1741 void *daddr;
1742 int idx = 0;
1743
1744 if (srest < size || size > nbytes)
1745 return -EINVAL;
1746
1747 while (size) {
1748
1749 copy = min(dst->length, srest);
1750
1751 daddr = kmap_atomic(sg_page(dst), KM_IRQ0);
1752 memcpy(daddr + dst->offset + offset, saddr, copy);
1753 kunmap_atomic(daddr, KM_IRQ0);
1754
1755 nbytes -= copy;
1756 size -= copy;
1757 srest -= copy;
1758 saddr += copy;
1759 offset = 0;
1760
1761 dprintk("%s: copy: %u, size: %u, srest: %u, nbytes: %u.\n",
1762 __func__, copy, size, srest, nbytes);
1763
1764 dst++;
1765 idx++;
1766 }
1767
1768 *nbytesp = nbytes;
1769 *srestp = srest;
1770
1771 return idx;
1772}
1773
1774static void hifn_process_ready(struct ablkcipher_request *req, int error)
1775{
1776 struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
1777 struct hifn_device *dev;
1778
1779 dprintk("%s: req: %p, ctx: %p.\n", __func__, req, ctx);
1780
1781 dev = ctx->dev;
1782 dprintk("%s: req: %p, started: %d, sg_num: %d.\n",
1783 __func__, req, dev->started, atomic_read(&ctx->sg_num));
1784
1785 if (--dev->started < 0)
1786 BUG();
1787
1788 if (atomic_dec_and_test(&ctx->sg_num)) {
1789 unsigned int nbytes = req->nbytes;
1790 int idx = 0, err;
1791 struct scatterlist *dst, *t;
1792 void *saddr;
1793
1794 if (ctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
1795 while (nbytes) {
1796 t = &ctx->walk.cache[idx];
1797 dst = &req->dst[idx];
1798
1799 dprintk("\n%s: sg_page(t): %p, t->length: %u, "
1800 "sg_page(dst): %p, dst->length: %u, "
1801 "nbytes: %u.\n",
1802 __func__, sg_page(t), t->length,
1803 sg_page(dst), dst->length, nbytes);
1804
1805 if (!t->length) {
1806 nbytes -= dst->length;
1807 idx++;
1808 continue;
1809 }
1810
1811 saddr = kmap_atomic(sg_page(t), KM_IRQ1);
1812
1813 err = ablkcipher_get(saddr, &t->length, t->offset,
1814 dst, nbytes, &nbytes);
1815 if (err < 0) {
1816 kunmap_atomic(saddr, KM_IRQ1);
1817 break;
1818 }
1819
1820 idx += err;
1821 kunmap_atomic(saddr, KM_IRQ1);
1822 }
1823
1824 ablkcipher_walk_exit(&ctx->walk);
1825 }
1826
1827 req->base.complete(&req->base, error);
1828 }
1829}
1830
1831static void hifn_check_for_completion(struct hifn_device *dev, int error)
1832{
1833 int i;
1834 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1835
1836 for (i=0; i<HIFN_D_RES_RSIZE; ++i) {
1837 struct hifn_desc *d = &dma->resr[i];
1838
1839 if (!(d->l & __cpu_to_le32(HIFN_D_VALID)) && dev->sa[i]) {
1840 dev->success++;
1841 dev->reset = 0;
1842 hifn_process_ready(dev->sa[i], error);
1843 dev->sa[i] = NULL;
1844 }
1845
1846 if (d->l & __cpu_to_le32(HIFN_D_DESTOVER | HIFN_D_OVER))
1847 if (printk_ratelimit())
1848 printk("%s: overflow detected [d: %u, o: %u] "
1849 "at %d resr: l: %08x, p: %08x.\n",
1850 dev->name,
1851 !!(d->l & __cpu_to_le32(HIFN_D_DESTOVER)),
1852 !!(d->l & __cpu_to_le32(HIFN_D_OVER)),
1853 i, d->l, d->p);
1854 }
1855}
1856
1857static void hifn_clear_rings(struct hifn_device *dev)
1858{
1859 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1860 int i, u;
1861
1862 dprintk("%s: ring cleanup 1: i: %d.%d.%d.%d, u: %d.%d.%d.%d, "
1863 "k: %d.%d.%d.%d.\n",
1864 dev->name,
1865 dma->cmdi, dma->srci, dma->dsti, dma->resi,
1866 dma->cmdu, dma->srcu, dma->dstu, dma->resu,
1867 dma->cmdk, dma->srck, dma->dstk, dma->resk);
1868
1869 i = dma->resk; u = dma->resu;
1870 while (u != 0) {
1871 if (dma->resr[i].l & __cpu_to_le32(HIFN_D_VALID))
1872 break;
1873
1874 if (i != HIFN_D_RES_RSIZE)
1875 u--;
1876
1877 if (++i == (HIFN_D_RES_RSIZE + 1))
1878 i = 0;
1879 }
1880 dma->resk = i; dma->resu = u;
1881
1882 i = dma->srck; u = dma->srcu;
1883 while (u != 0) {
1884 if (i == HIFN_D_SRC_RSIZE)
1885 i = 0;
1886 if (dma->srcr[i].l & __cpu_to_le32(HIFN_D_VALID))
1887 break;
1888 i++, u--;
1889 }
1890 dma->srck = i; dma->srcu = u;
1891
1892 i = dma->cmdk; u = dma->cmdu;
1893 while (u != 0) {
1894 if (dma->cmdr[i].l & __cpu_to_le32(HIFN_D_VALID))
1895 break;
1896 if (i != HIFN_D_CMD_RSIZE)
1897 u--;
1898 if (++i == (HIFN_D_CMD_RSIZE + 1))
1899 i = 0;
1900 }
1901 dma->cmdk = i; dma->cmdu = u;
1902
1903 i = dma->dstk; u = dma->dstu;
1904 while (u != 0) {
1905 if (i == HIFN_D_DST_RSIZE)
1906 i = 0;
1907 if (dma->dstr[i].l & __cpu_to_le32(HIFN_D_VALID))
1908 break;
1909 i++, u--;
1910 }
1911 dma->dstk = i; dma->dstu = u;
1912
1913 dprintk("%s: ring cleanup 2: i: %d.%d.%d.%d, u: %d.%d.%d.%d, "
1914 "k: %d.%d.%d.%d.\n",
1915 dev->name,
1916 dma->cmdi, dma->srci, dma->dsti, dma->resi,
1917 dma->cmdu, dma->srcu, dma->dstu, dma->resu,
1918 dma->cmdk, dma->srck, dma->dstk, dma->resk);
1919}
1920
1921static void hifn_work(struct work_struct *work)
1922{
1923 struct delayed_work *dw = container_of(work, struct delayed_work, work);
1924 struct hifn_device *dev = container_of(dw, struct hifn_device, work);
1925 unsigned long flags;
1926 int reset = 0;
1927 u32 r = 0;
1928
1929 spin_lock_irqsave(&dev->lock, flags);
1930 if (dev->active == 0) {
1931 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1932
1933 if (dma->cmdu == 0 && (dev->flags & HIFN_FLAG_CMD_BUSY)) {
1934 dev->flags &= ~HIFN_FLAG_CMD_BUSY;
1935 r |= HIFN_DMACSR_C_CTRL_DIS;
1936 }
1937 if (dma->srcu == 0 && (dev->flags & HIFN_FLAG_SRC_BUSY)) {
1938 dev->flags &= ~HIFN_FLAG_SRC_BUSY;
1939 r |= HIFN_DMACSR_S_CTRL_DIS;
1940 }
1941 if (dma->dstu == 0 && (dev->flags & HIFN_FLAG_DST_BUSY)) {
1942 dev->flags &= ~HIFN_FLAG_DST_BUSY;
1943 r |= HIFN_DMACSR_D_CTRL_DIS;
1944 }
1945 if (dma->resu == 0 && (dev->flags & HIFN_FLAG_RES_BUSY)) {
1946 dev->flags &= ~HIFN_FLAG_RES_BUSY;
1947 r |= HIFN_DMACSR_R_CTRL_DIS;
1948 }
1949 if (r)
1950 hifn_write_1(dev, HIFN_1_DMA_CSR, r);
1951 } else
1952 dev->active--;
1953
1954 if (dev->prev_success == dev->success && dev->started)
1955 reset = 1;
1956 dev->prev_success = dev->success;
1957 spin_unlock_irqrestore(&dev->lock, flags);
1958
1959 if (reset) {
1960 dprintk("%s: r: %08x, active: %d, started: %d, "
1961 "success: %lu: reset: %d.\n",
1962 dev->name, r, dev->active, dev->started,
1963 dev->success, reset);
1964
1965 if (++dev->reset >= 5) {
1966 dprintk("%s: really hard reset.\n", dev->name);
1967 hifn_reset_dma(dev, 1);
1968 hifn_stop_device(dev);
1969 hifn_start_device(dev);
1970 dev->reset = 0;
1971 }
1972
1973 spin_lock_irqsave(&dev->lock, flags);
1974 hifn_check_for_completion(dev, -EBUSY);
1975 hifn_clear_rings(dev);
1976 dev->started = 0;
1977 spin_unlock_irqrestore(&dev->lock, flags);
1978 }
1979
1980 schedule_delayed_work(&dev->work, HZ);
1981}
1982
1983static irqreturn_t hifn_interrupt(int irq, void *data)
1984{
1985 struct hifn_device *dev = (struct hifn_device *)data;
1986 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1987 u32 dmacsr, restart;
1988
1989 dmacsr = hifn_read_1(dev, HIFN_1_DMA_CSR);
1990
1991 dprintk("%s: 1 dmacsr: %08x, dmareg: %08x, res: %08x [%d], "
1992 "i: %d.%d.%d.%d, u: %d.%d.%d.%d.\n",
1993 dev->name, dmacsr, dev->dmareg, dmacsr & dev->dmareg, dma->cmdi,
1994 dma->cmdu, dma->srcu, dma->dstu, dma->resu,
1995 dma->cmdi, dma->srci, dma->dsti, dma->resi);
1996
1997 if ((dmacsr & dev->dmareg) == 0)
1998 return IRQ_NONE;
1999
2000 hifn_write_1(dev, HIFN_1_DMA_CSR, dmacsr & dev->dmareg);
2001
2002 if (dmacsr & HIFN_DMACSR_ENGINE)
2003 hifn_write_0(dev, HIFN_0_PUISR, hifn_read_0(dev, HIFN_0_PUISR));
2004 if (dmacsr & HIFN_DMACSR_PUBDONE)
2005 hifn_write_1(dev, HIFN_1_PUB_STATUS,
2006 hifn_read_1(dev, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
2007
2008 restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER);
2009 if (restart) {
2010 u32 puisr = hifn_read_0(dev, HIFN_0_PUISR);
2011
2012 if (printk_ratelimit())
2013 printk("%s: overflow: r: %d, d: %d, puisr: %08x, d: %u.\n",
2014 dev->name, !!(dmacsr & HIFN_DMACSR_R_OVER),
2015 !!(dmacsr & HIFN_DMACSR_D_OVER),
2016 puisr, !!(puisr & HIFN_PUISR_DSTOVER));
2017 if (!!(puisr & HIFN_PUISR_DSTOVER))
2018 hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
2019 hifn_write_1(dev, HIFN_1_DMA_CSR, dmacsr & (HIFN_DMACSR_R_OVER |
2020 HIFN_DMACSR_D_OVER));
2021 }
2022
2023 restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
2024 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
2025 if (restart) {
2026 if (printk_ratelimit())
2027 printk("%s: abort: c: %d, s: %d, d: %d, r: %d.\n",
2028 dev->name, !!(dmacsr & HIFN_DMACSR_C_ABORT),
2029 !!(dmacsr & HIFN_DMACSR_S_ABORT),
2030 !!(dmacsr & HIFN_DMACSR_D_ABORT),
2031 !!(dmacsr & HIFN_DMACSR_R_ABORT));
2032 hifn_reset_dma(dev, 1);
2033 hifn_init_dma(dev);
2034 hifn_init_registers(dev);
2035 }
2036
2037 if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) {
2038 dprintk("%s: wait on command.\n", dev->name);
2039 dev->dmareg &= ~(HIFN_DMAIER_C_WAIT);
2040 hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
2041 }
2042
2043 tasklet_schedule(&dev->tasklet);
2044 hifn_clear_rings(dev);
2045
2046 return IRQ_HANDLED;
2047}
2048
2049static void hifn_flush(struct hifn_device *dev)
2050{
2051 unsigned long flags;
2052 struct crypto_async_request *async_req;
2053 struct hifn_context *ctx;
2054 struct ablkcipher_request *req;
2055 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
2056 int i;
2057
2058 spin_lock_irqsave(&dev->lock, flags);
2059 for (i=0; i<HIFN_D_RES_RSIZE; ++i) {
2060 struct hifn_desc *d = &dma->resr[i];
2061
2062 if (dev->sa[i]) {
2063 hifn_process_ready(dev->sa[i],
2064 (d->l & __cpu_to_le32(HIFN_D_VALID))?-ENODEV:0);
2065 }
2066 }
2067
2068 while ((async_req = crypto_dequeue_request(&dev->queue))) {
2069 ctx = crypto_tfm_ctx(async_req->tfm);
2070 req = container_of(async_req, struct ablkcipher_request, base);
2071
2072 hifn_process_ready(req, -ENODEV);
2073 }
2074 spin_unlock_irqrestore(&dev->lock, flags);
2075}
2076
2077static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
2078 unsigned int len)
2079{
2080 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
2081 struct hifn_context *ctx = crypto_tfm_ctx(tfm);
2082 struct hifn_device *dev = ctx->dev;
2083
2084 if (len > HIFN_MAX_CRYPT_KEY_LENGTH) {
2085 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
2086 return -1;
2087 }
2088
2089 if (len == HIFN_DES_KEY_LENGTH) {
2090 u32 tmp[DES_EXPKEY_WORDS];
2091 int ret = des_ekey(tmp, key);
2092
2093 if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
2094 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
2095 return -EINVAL;
2096 }
2097 }
2098
2099 dev->flags &= ~HIFN_FLAG_OLD_KEY;
2100
2101 memcpy(ctx->key, key, len);
2102 ctx->keysize = len;
2103
2104 return 0;
2105}
2106
2107static int hifn_handle_req(struct ablkcipher_request *req)
2108{
2109 struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
2110 struct hifn_device *dev = ctx->dev;
2111 int err = -EAGAIN;
2112
2113 if (dev->started + DIV_ROUND_UP(req->nbytes, PAGE_SIZE) <= HIFN_QUEUE_LENGTH)
2114 err = hifn_setup_session(req);
2115
2116 if (err == -EAGAIN) {
2117 unsigned long flags;
2118
2119 spin_lock_irqsave(&dev->lock, flags);
2120 err = ablkcipher_enqueue_request(&dev->queue, req);
2121 spin_unlock_irqrestore(&dev->lock, flags);
2122 }
2123
2124 return err;
2125}
2126
2127static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op,
2128 u8 type, u8 mode)
2129{
2130 struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
2131 unsigned ivsize;
2132
2133 ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
2134
2135 if (req->info && mode != ACRYPTO_MODE_ECB) {
2136 if (type == ACRYPTO_TYPE_AES_128)
2137 ivsize = HIFN_AES_IV_LENGTH;
2138 else if (type == ACRYPTO_TYPE_DES)
2139 ivsize = HIFN_DES_KEY_LENGTH;
2140 else if (type == ACRYPTO_TYPE_3DES)
2141 ivsize = HIFN_3DES_KEY_LENGTH;
2142 }
2143
2144 if (ctx->keysize != 16 && type == ACRYPTO_TYPE_AES_128) {
2145 if (ctx->keysize == 24)
2146 type = ACRYPTO_TYPE_AES_192;
2147 else if (ctx->keysize == 32)
2148 type = ACRYPTO_TYPE_AES_256;
2149 }
2150
2151 ctx->op = op;
2152 ctx->mode = mode;
2153 ctx->type = type;
2154 ctx->iv = req->info;
2155 ctx->ivsize = ivsize;
2156
2157 /*
2158 * HEAVY TODO: needs to kick Herbert XU to write documentation.
2159 * HEAVY TODO: needs to kick Herbert XU to write documentation.
2160 * HEAVY TODO: needs to kick Herbert XU to write documentation.
2161 */
2162
2163 return hifn_handle_req(req);
2164}
2165
2166static int hifn_process_queue(struct hifn_device *dev)
2167{
2168 struct crypto_async_request *async_req;
2169 struct hifn_context *ctx;
2170 struct ablkcipher_request *req;
2171 unsigned long flags;
2172 int err = 0;
2173
2174 while (dev->started < HIFN_QUEUE_LENGTH) {
2175 spin_lock_irqsave(&dev->lock, flags);
2176 async_req = crypto_dequeue_request(&dev->queue);
2177 spin_unlock_irqrestore(&dev->lock, flags);
2178
2179 if (!async_req)
2180 break;
2181
2182 ctx = crypto_tfm_ctx(async_req->tfm);
2183 req = container_of(async_req, struct ablkcipher_request, base);
2184
2185 err = hifn_handle_req(req);
2186 if (err)
2187 break;
2188 }
2189
2190 return err;
2191}
2192
2193static int hifn_setup_crypto(struct ablkcipher_request *req, u8 op,
2194 u8 type, u8 mode)
2195{
2196 int err;
2197 struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
2198 struct hifn_device *dev = ctx->dev;
2199
2200 err = hifn_setup_crypto_req(req, op, type, mode);
2201 if (err)
2202 return err;
2203
2204 if (dev->started < HIFN_QUEUE_LENGTH && dev->queue.qlen)
2205 err = hifn_process_queue(dev);
2206
2207 return err;
2208}
2209
2210/*
2211 * AES ecryption functions.
2212 */
2213static inline int hifn_encrypt_aes_ecb(struct ablkcipher_request *req)
2214{
2215 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2216 ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_ECB);
2217}
2218static inline int hifn_encrypt_aes_cbc(struct ablkcipher_request *req)
2219{
2220 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2221 ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC);
2222}
2223static inline int hifn_encrypt_aes_cfb(struct ablkcipher_request *req)
2224{
2225 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2226 ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB);
2227}
2228static inline int hifn_encrypt_aes_ofb(struct ablkcipher_request *req)
2229{
2230 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2231 ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB);
2232}
2233
2234/*
2235 * AES decryption functions.
2236 */
2237static inline int hifn_decrypt_aes_ecb(struct ablkcipher_request *req)
2238{
2239 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2240 ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_ECB);
2241}
2242static inline int hifn_decrypt_aes_cbc(struct ablkcipher_request *req)
2243{
2244 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2245 ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC);
2246}
2247static inline int hifn_decrypt_aes_cfb(struct ablkcipher_request *req)
2248{
2249 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2250 ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB);
2251}
2252static inline int hifn_decrypt_aes_ofb(struct ablkcipher_request *req)
2253{
2254 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2255 ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB);
2256}
2257
2258/*
2259 * DES ecryption functions.
2260 */
2261static inline int hifn_encrypt_des_ecb(struct ablkcipher_request *req)
2262{
2263 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2264 ACRYPTO_TYPE_DES, ACRYPTO_MODE_ECB);
2265}
2266static inline int hifn_encrypt_des_cbc(struct ablkcipher_request *req)
2267{
2268 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2269 ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC);
2270}
2271static inline int hifn_encrypt_des_cfb(struct ablkcipher_request *req)
2272{
2273 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2274 ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB);
2275}
2276static inline int hifn_encrypt_des_ofb(struct ablkcipher_request *req)
2277{
2278 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2279 ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB);
2280}
2281
2282/*
2283 * DES decryption functions.
2284 */
2285static inline int hifn_decrypt_des_ecb(struct ablkcipher_request *req)
2286{
2287 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2288 ACRYPTO_TYPE_DES, ACRYPTO_MODE_ECB);
2289}
2290static inline int hifn_decrypt_des_cbc(struct ablkcipher_request *req)
2291{
2292 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2293 ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC);
2294}
2295static inline int hifn_decrypt_des_cfb(struct ablkcipher_request *req)
2296{
2297 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2298 ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB);
2299}
2300static inline int hifn_decrypt_des_ofb(struct ablkcipher_request *req)
2301{
2302 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2303 ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB);
2304}
2305
2306/*
2307 * 3DES ecryption functions.
2308 */
2309static inline int hifn_encrypt_3des_ecb(struct ablkcipher_request *req)
2310{
2311 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2312 ACRYPTO_TYPE_3DES, ACRYPTO_MODE_ECB);
2313}
2314static inline int hifn_encrypt_3des_cbc(struct ablkcipher_request *req)
2315{
2316 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2317 ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC);
2318}
2319static inline int hifn_encrypt_3des_cfb(struct ablkcipher_request *req)
2320{
2321 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2322 ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB);
2323}
2324static inline int hifn_encrypt_3des_ofb(struct ablkcipher_request *req)
2325{
2326 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2327 ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
2328}
2329
2330/*
2331 * 3DES decryption functions.
2332 */
2333static inline int hifn_decrypt_3des_ecb(struct ablkcipher_request *req)
2334{
2335 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2336 ACRYPTO_TYPE_3DES, ACRYPTO_MODE_ECB);
2337}
2338static inline int hifn_decrypt_3des_cbc(struct ablkcipher_request *req)
2339{
2340 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2341 ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC);
2342}
2343static inline int hifn_decrypt_3des_cfb(struct ablkcipher_request *req)
2344{
2345 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2346 ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB);
2347}
2348static inline int hifn_decrypt_3des_ofb(struct ablkcipher_request *req)
2349{
2350 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2351 ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
2352}
2353
2354struct hifn_alg_template
2355{
2356 char name[CRYPTO_MAX_ALG_NAME];
2357 char drv_name[CRYPTO_MAX_ALG_NAME];
2358 unsigned int bsize;
2359 struct ablkcipher_alg ablkcipher;
2360};
2361
2362static struct hifn_alg_template hifn_alg_templates[] = {
2363 /*
2364 * 3DES ECB, CBC, CFB and OFB modes.
2365 */
2366 {
2367 .name = "cfb(des3_ede)", .drv_name = "hifn-3des", .bsize = 8,
2368 .ablkcipher = {
2369 .min_keysize = HIFN_3DES_KEY_LENGTH,
2370 .max_keysize = HIFN_3DES_KEY_LENGTH,
2371 .setkey = hifn_setkey,
2372 .encrypt = hifn_encrypt_3des_cfb,
2373 .decrypt = hifn_decrypt_3des_cfb,
2374 },
2375 },
2376 {
2377 .name = "ofb(des3_ede)", .drv_name = "hifn-3des", .bsize = 8,
2378 .ablkcipher = {
2379 .min_keysize = HIFN_3DES_KEY_LENGTH,
2380 .max_keysize = HIFN_3DES_KEY_LENGTH,
2381 .setkey = hifn_setkey,
2382 .encrypt = hifn_encrypt_3des_ofb,
2383 .decrypt = hifn_decrypt_3des_ofb,
2384 },
2385 },
2386 {
2387 .name = "cbc(des3_ede)", .drv_name = "hifn-3des", .bsize = 8,
2388 .ablkcipher = {
2389 .min_keysize = HIFN_3DES_KEY_LENGTH,
2390 .max_keysize = HIFN_3DES_KEY_LENGTH,
2391 .setkey = hifn_setkey,
2392 .encrypt = hifn_encrypt_3des_cbc,
2393 .decrypt = hifn_decrypt_3des_cbc,
2394 },
2395 },
2396 {
2397 .name = "ecb(des3_ede)", .drv_name = "hifn-3des", .bsize = 8,
2398 .ablkcipher = {
2399 .min_keysize = HIFN_3DES_KEY_LENGTH,
2400 .max_keysize = HIFN_3DES_KEY_LENGTH,
2401 .setkey = hifn_setkey,
2402 .encrypt = hifn_encrypt_3des_ecb,
2403 .decrypt = hifn_decrypt_3des_ecb,
2404 },
2405 },
2406
2407 /*
2408 * DES ECB, CBC, CFB and OFB modes.
2409 */
2410 {
2411 .name = "cfb(des)", .drv_name = "hifn-des", .bsize = 8,
2412 .ablkcipher = {
2413 .min_keysize = HIFN_DES_KEY_LENGTH,
2414 .max_keysize = HIFN_DES_KEY_LENGTH,
2415 .setkey = hifn_setkey,
2416 .encrypt = hifn_encrypt_des_cfb,
2417 .decrypt = hifn_decrypt_des_cfb,
2418 },
2419 },
2420 {
2421 .name = "ofb(des)", .drv_name = "hifn-des", .bsize = 8,
2422 .ablkcipher = {
2423 .min_keysize = HIFN_DES_KEY_LENGTH,
2424 .max_keysize = HIFN_DES_KEY_LENGTH,
2425 .setkey = hifn_setkey,
2426 .encrypt = hifn_encrypt_des_ofb,
2427 .decrypt = hifn_decrypt_des_ofb,
2428 },
2429 },
2430 {
2431 .name = "cbc(des)", .drv_name = "hifn-des", .bsize = 8,
2432 .ablkcipher = {
2433 .min_keysize = HIFN_DES_KEY_LENGTH,
2434 .max_keysize = HIFN_DES_KEY_LENGTH,
2435 .setkey = hifn_setkey,
2436 .encrypt = hifn_encrypt_des_cbc,
2437 .decrypt = hifn_decrypt_des_cbc,
2438 },
2439 },
2440 {
2441 .name = "ecb(des)", .drv_name = "hifn-des", .bsize = 8,
2442 .ablkcipher = {
2443 .min_keysize = HIFN_DES_KEY_LENGTH,
2444 .max_keysize = HIFN_DES_KEY_LENGTH,
2445 .setkey = hifn_setkey,
2446 .encrypt = hifn_encrypt_des_ecb,
2447 .decrypt = hifn_decrypt_des_ecb,
2448 },
2449 },
2450
2451 /*
2452 * AES ECB, CBC, CFB and OFB modes.
2453 */
2454 {
2455 .name = "ecb(aes)", .drv_name = "hifn-aes", .bsize = 16,
2456 .ablkcipher = {
2457 .min_keysize = AES_MIN_KEY_SIZE,
2458 .max_keysize = AES_MAX_KEY_SIZE,
2459 .setkey = hifn_setkey,
2460 .encrypt = hifn_encrypt_aes_ecb,
2461 .decrypt = hifn_decrypt_aes_ecb,
2462 },
2463 },
2464 {
2465 .name = "cbc(aes)", .drv_name = "hifn-aes", .bsize = 16,
2466 .ablkcipher = {
2467 .min_keysize = AES_MIN_KEY_SIZE,
2468 .max_keysize = AES_MAX_KEY_SIZE,
2469 .setkey = hifn_setkey,
2470 .encrypt = hifn_encrypt_aes_cbc,
2471 .decrypt = hifn_decrypt_aes_cbc,
2472 },
2473 },
2474 {
2475 .name = "cfb(aes)", .drv_name = "hifn-aes", .bsize = 16,
2476 .ablkcipher = {
2477 .min_keysize = AES_MIN_KEY_SIZE,
2478 .max_keysize = AES_MAX_KEY_SIZE,
2479 .setkey = hifn_setkey,
2480 .encrypt = hifn_encrypt_aes_cfb,
2481 .decrypt = hifn_decrypt_aes_cfb,
2482 },
2483 },
2484 {
2485 .name = "ofb(aes)", .drv_name = "hifn-aes", .bsize = 16,
2486 .ablkcipher = {
2487 .min_keysize = AES_MIN_KEY_SIZE,
2488 .max_keysize = AES_MAX_KEY_SIZE,
2489 .setkey = hifn_setkey,
2490 .encrypt = hifn_encrypt_aes_ofb,
2491 .decrypt = hifn_decrypt_aes_ofb,
2492 },
2493 },
2494};
2495
2496static int hifn_cra_init(struct crypto_tfm *tfm)
2497{
2498 struct crypto_alg *alg = tfm->__crt_alg;
2499 struct hifn_crypto_alg *ha = crypto_alg_to_hifn(alg);
2500 struct hifn_context *ctx = crypto_tfm_ctx(tfm);
2501
2502 ctx->dev = ha->dev;
2503
2504 return 0;
2505}
2506
2507static int hifn_alg_alloc(struct hifn_device *dev, struct hifn_alg_template *t)
2508{
2509 struct hifn_crypto_alg *alg;
2510 int err;
2511
2512 alg = kzalloc(sizeof(struct hifn_crypto_alg), GFP_KERNEL);
2513 if (!alg)
2514 return -ENOMEM;
2515
2516 snprintf(alg->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s", t->name);
2517 snprintf(alg->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", t->drv_name);
2518
2519 alg->alg.cra_priority = 300;
2520 alg->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
2521 alg->alg.cra_blocksize = t->bsize;
2522 alg->alg.cra_ctxsize = sizeof(struct hifn_context);
2523 alg->alg.cra_alignmask = 15;
2524 if (t->bsize == 8)
2525 alg->alg.cra_alignmask = 3;
2526 alg->alg.cra_type = &crypto_ablkcipher_type;
2527 alg->alg.cra_module = THIS_MODULE;
2528 alg->alg.cra_u.ablkcipher = t->ablkcipher;
2529 alg->alg.cra_init = hifn_cra_init;
2530
2531 alg->dev = dev;
2532
2533 list_add_tail(&alg->entry, &dev->alg_list);
2534
2535 err = crypto_register_alg(&alg->alg);
2536 if (err) {
2537 list_del(&alg->entry);
2538 kfree(alg);
2539 }
2540
2541 return err;
2542}
2543
2544static void hifn_unregister_alg(struct hifn_device *dev)
2545{
2546 struct hifn_crypto_alg *a, *n;
2547
2548 list_for_each_entry_safe(a, n, &dev->alg_list, entry) {
2549 list_del(&a->entry);
2550 crypto_unregister_alg(&a->alg);
2551 kfree(a);
2552 }
2553}
2554
2555static int hifn_register_alg(struct hifn_device *dev)
2556{
2557 int i, err;
2558
2559 for (i=0; i<ARRAY_SIZE(hifn_alg_templates); ++i) {
2560 err = hifn_alg_alloc(dev, &hifn_alg_templates[i]);
2561 if (err)
2562 goto err_out_exit;
2563 }
2564
2565 return 0;
2566
2567err_out_exit:
2568 hifn_unregister_alg(dev);
2569 return err;
2570}
2571
2572static void hifn_tasklet_callback(unsigned long data)
2573{
2574 struct hifn_device *dev = (struct hifn_device *)data;
2575
2576 /*
2577 * This is ok to call this without lock being held,
2578 * althogh it modifies some parameters used in parallel,
2579 * (like dev->success), but they are used in process
2580 * context or update is atomic (like setting dev->sa[i] to NULL).
2581 */
2582 hifn_check_for_completion(dev, 0);
2583}
2584
2585static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2586{
2587 int err, i;
2588 struct hifn_device *dev;
2589 char name[8];
2590
2591 err = pci_enable_device(pdev);
2592 if (err)
2593 return err;
2594 pci_set_master(pdev);
2595
2596 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2597 if (err)
2598 goto err_out_disable_pci_device;
2599
2600 snprintf(name, sizeof(name), "hifn%d",
2601 atomic_inc_return(&hifn_dev_number)-1);
2602
2603 err = pci_request_regions(pdev, name);
2604 if (err)
2605 goto err_out_disable_pci_device;
2606
2607 if (pci_resource_len(pdev, 0) < HIFN_BAR0_SIZE ||
2608 pci_resource_len(pdev, 1) < HIFN_BAR1_SIZE ||
2609 pci_resource_len(pdev, 2) < HIFN_BAR2_SIZE) {
2610 dprintk("%s: Broken hardware - I/O regions are too small.\n",
2611 pci_name(pdev));
2612 err = -ENODEV;
2613 goto err_out_free_regions;
2614 }
2615
2616 dev = kzalloc(sizeof(struct hifn_device) + sizeof(struct crypto_alg),
2617 GFP_KERNEL);
2618 if (!dev) {
2619 err = -ENOMEM;
2620 goto err_out_free_regions;
2621 }
2622
2623 INIT_LIST_HEAD(&dev->alg_list);
2624
2625 snprintf(dev->name, sizeof(dev->name), "%s", name);
2626 spin_lock_init(&dev->lock);
2627
2628 for (i=0; i<3; ++i) {
2629 unsigned long addr, size;
2630
2631 addr = pci_resource_start(pdev, i);
2632 size = pci_resource_len(pdev, i);
2633
2634 dev->bar[i] = ioremap_nocache(addr, size);
2635 if (!dev->bar[i])
2636 goto err_out_unmap_bars;
2637 }
2638
2639 dev->result_mem = __get_free_pages(GFP_KERNEL, HIFN_MAX_RESULT_ORDER);
2640 if (!dev->result_mem) {
2641 dprintk("Failed to allocate %d pages for result_mem.\n",
2642 HIFN_MAX_RESULT_ORDER);
2643 goto err_out_unmap_bars;
2644 }
2645 memset((void *)dev->result_mem, 0, PAGE_SIZE*(1<<HIFN_MAX_RESULT_ORDER));
2646
2647 dev->dst = pci_map_single(pdev, (void *)dev->result_mem,
2648 PAGE_SIZE << HIFN_MAX_RESULT_ORDER, PCI_DMA_FROMDEVICE);
2649
2650 dev->desc_virt = pci_alloc_consistent(pdev, sizeof(struct hifn_dma),
2651 &dev->desc_dma);
2652 if (!dev->desc_virt) {
2653 dprintk("Failed to allocate descriptor rings.\n");
2654 goto err_out_free_result_pages;
2655 }
2656 memset(dev->desc_virt, 0, sizeof(struct hifn_dma));
2657
2658 dev->pdev = pdev;
2659 dev->irq = pdev->irq;
2660
2661 for (i=0; i<HIFN_D_RES_RSIZE; ++i)
2662 dev->sa[i] = NULL;
2663
2664 pci_set_drvdata(pdev, dev);
2665
2666 tasklet_init(&dev->tasklet, hifn_tasklet_callback, (unsigned long)dev);
2667
2668 crypto_init_queue(&dev->queue, 1);
2669
2670 err = request_irq(dev->irq, hifn_interrupt, IRQF_SHARED, dev->name, dev);
2671 if (err) {
2672 dprintk("Failed to request IRQ%d: err: %d.\n", dev->irq, err);
2673 dev->irq = 0;
2674 goto err_out_free_desc;
2675 }
2676
2677 err = hifn_start_device(dev);
2678 if (err)
2679 goto err_out_free_irq;
2680
2681 err = hifn_test(dev, 1, 0);
2682 if (err)
2683 goto err_out_stop_device;
2684
2685 err = hifn_register_rng(dev);
2686 if (err)
2687 goto err_out_stop_device;
2688
2689 err = hifn_register_alg(dev);
2690 if (err)
2691 goto err_out_unregister_rng;
2692
2693 INIT_DELAYED_WORK(&dev->work, hifn_work);
2694 schedule_delayed_work(&dev->work, HZ);
2695
2696 dprintk("HIFN crypto accelerator card at %s has been "
2697 "successfully registered as %s.\n",
2698 pci_name(pdev), dev->name);
2699
2700 return 0;
2701
2702err_out_unregister_rng:
2703 hifn_unregister_rng(dev);
2704err_out_stop_device:
2705 hifn_reset_dma(dev, 1);
2706 hifn_stop_device(dev);
2707err_out_free_irq:
2708 free_irq(dev->irq, dev->name);
2709 tasklet_kill(&dev->tasklet);
2710err_out_free_desc:
2711 pci_free_consistent(pdev, sizeof(struct hifn_dma),
2712 dev->desc_virt, dev->desc_dma);
2713
2714err_out_free_result_pages:
2715 pci_unmap_single(pdev, dev->dst, PAGE_SIZE << HIFN_MAX_RESULT_ORDER,
2716 PCI_DMA_FROMDEVICE);
2717 free_pages(dev->result_mem, HIFN_MAX_RESULT_ORDER);
2718
2719err_out_unmap_bars:
2720 for (i=0; i<3; ++i)
2721 if (dev->bar[i])
2722 iounmap(dev->bar[i]);
2723
2724err_out_free_regions:
2725 pci_release_regions(pdev);
2726
2727err_out_disable_pci_device:
2728 pci_disable_device(pdev);
2729
2730 return err;
2731}
2732
2733static void hifn_remove(struct pci_dev *pdev)
2734{
2735 int i;
2736 struct hifn_device *dev;
2737
2738 dev = pci_get_drvdata(pdev);
2739
2740 if (dev) {
2741 cancel_delayed_work(&dev->work);
2742 flush_scheduled_work();
2743
2744 hifn_unregister_rng(dev);
2745 hifn_unregister_alg(dev);
2746 hifn_reset_dma(dev, 1);
2747 hifn_stop_device(dev);
2748
2749 free_irq(dev->irq, dev->name);
2750 tasklet_kill(&dev->tasklet);
2751
2752 hifn_flush(dev);
2753
2754 pci_free_consistent(pdev, sizeof(struct hifn_dma),
2755 dev->desc_virt, dev->desc_dma);
2756 pci_unmap_single(pdev, dev->dst,
2757 PAGE_SIZE << HIFN_MAX_RESULT_ORDER,
2758 PCI_DMA_FROMDEVICE);
2759 free_pages(dev->result_mem, HIFN_MAX_RESULT_ORDER);
2760 for (i=0; i<3; ++i)
2761 if (dev->bar[i])
2762 iounmap(dev->bar[i]);
2763
2764 kfree(dev);
2765 }
2766
2767 pci_release_regions(pdev);
2768 pci_disable_device(pdev);
2769}
2770
2771static struct pci_device_id hifn_pci_tbl[] = {
2772 { PCI_DEVICE(PCI_VENDOR_ID_HIFN, PCI_DEVICE_ID_HIFN_7955) },
2773 { PCI_DEVICE(PCI_VENDOR_ID_HIFN, PCI_DEVICE_ID_HIFN_7956) },
2774 { 0 }
2775};
2776MODULE_DEVICE_TABLE(pci, hifn_pci_tbl);
2777
2778static struct pci_driver hifn_pci_driver = {
2779 .name = "hifn795x",
2780 .id_table = hifn_pci_tbl,
2781 .probe = hifn_probe,
2782 .remove = __devexit_p(hifn_remove),
2783};
2784
2785static int __devinit hifn_init(void)
2786{
2787 unsigned int freq;
2788 int err;
2789
2790 if (strncmp(hifn_pll_ref, "ext", 3) &&
2791 strncmp(hifn_pll_ref, "pci", 3)) {
2792 printk(KERN_ERR "hifn795x: invalid hifn_pll_ref clock, "
2793 "must be pci or ext");
2794 return -EINVAL;
2795 }
2796
2797 /*
2798 * For the 7955/7956 the reference clock frequency must be in the
2799 * range of 20MHz-100MHz. For the 7954 the upper bound is 66.67MHz,
2800 * but this chip is currently not supported.
2801 */
2802 if (hifn_pll_ref[3] != '\0') {
2803 freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10);
2804 if (freq < 20 || freq > 100) {
2805 printk(KERN_ERR "hifn795x: invalid hifn_pll_ref "
2806 "frequency, must be in the range "
2807 "of 20-100");
2808 return -EINVAL;
2809 }
2810 }
2811
2812 err = pci_register_driver(&hifn_pci_driver);
2813 if (err < 0) {
2814 dprintk("Failed to register PCI driver for %s device.\n",
2815 hifn_pci_driver.name);
2816 return -ENODEV;
2817 }
2818
2819 printk(KERN_INFO "Driver for HIFN 795x crypto accelerator chip "
2820 "has been successfully registered.\n");
2821
2822 return 0;
2823}
2824
2825static void __devexit hifn_fini(void)
2826{
2827 pci_unregister_driver(&hifn_pci_driver);
2828
2829 printk(KERN_INFO "Driver for HIFN 795x crypto accelerator chip "
2830 "has been successfully unregistered.\n");
2831}
2832
2833module_init(hifn_init);
2834module_exit(hifn_fini);
2835
2836MODULE_LICENSE("GPL");
2837MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>");
2838MODULE_DESCRIPTION("Driver for HIFN 795x crypto accelerator chip.");
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 5f7e71810489..2f3ad3f7dfea 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -44,6 +44,7 @@
44 */ 44 */
45 45
46#include <crypto/algapi.h> 46#include <crypto/algapi.h>
47#include <crypto/aes.h>
47#include <linux/module.h> 48#include <linux/module.h>
48#include <linux/init.h> 49#include <linux/init.h>
49#include <linux/types.h> 50#include <linux/types.h>
@@ -53,9 +54,6 @@
53#include <asm/byteorder.h> 54#include <asm/byteorder.h>
54#include "padlock.h" 55#include "padlock.h"
55 56
56#define AES_MIN_KEY_SIZE 16 /* in uint8_t units */
57#define AES_MAX_KEY_SIZE 32 /* ditto */
58#define AES_BLOCK_SIZE 16 /* ditto */
59#define AES_EXTENDED_KEY_SIZE 64 /* in uint32_t units */ 57#define AES_EXTENDED_KEY_SIZE 64 /* in uint32_t units */
60#define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t)) 58#define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t))
61 59
@@ -419,6 +417,11 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
419/* ====== Encryption/decryption routines ====== */ 417/* ====== Encryption/decryption routines ====== */
420 418
421/* These are the real call to PadLock. */ 419/* These are the real call to PadLock. */
420static inline void padlock_reset_key(void)
421{
422 asm volatile ("pushfl; popfl");
423}
424
422static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key, 425static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key,
423 void *control_word) 426 void *control_word)
424{ 427{
@@ -439,8 +442,6 @@ static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword)
439static inline void aes_crypt(const u8 *in, u8 *out, u32 *key, 442static inline void aes_crypt(const u8 *in, u8 *out, u32 *key,
440 struct cword *cword) 443 struct cword *cword)
441{ 444{
442 asm volatile ("pushfl; popfl");
443
444 /* padlock_xcrypt requires at least two blocks of data. */ 445 /* padlock_xcrypt requires at least two blocks of data. */
445 if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) & 446 if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) &
446 (PAGE_SIZE - 1)))) { 447 (PAGE_SIZE - 1)))) {
@@ -459,7 +460,6 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
459 return; 460 return;
460 } 461 }
461 462
462 asm volatile ("pushfl; popfl"); /* enforce key reload. */
463 asm volatile ("test $1, %%cl;" 463 asm volatile ("test $1, %%cl;"
464 "je 1f;" 464 "je 1f;"
465 "lea -1(%%ecx), %%eax;" 465 "lea -1(%%ecx), %%eax;"
@@ -476,8 +476,6 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
476static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, 476static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
477 u8 *iv, void *control_word, u32 count) 477 u8 *iv, void *control_word, u32 count)
478{ 478{
479 /* Enforce key reload. */
480 asm volatile ("pushfl; popfl");
481 /* rep xcryptcbc */ 479 /* rep xcryptcbc */
482 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" 480 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"
483 : "+S" (input), "+D" (output), "+a" (iv) 481 : "+S" (input), "+D" (output), "+a" (iv)
@@ -488,12 +486,14 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
488static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 486static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
489{ 487{
490 struct aes_ctx *ctx = aes_ctx(tfm); 488 struct aes_ctx *ctx = aes_ctx(tfm);
489 padlock_reset_key();
491 aes_crypt(in, out, ctx->E, &ctx->cword.encrypt); 490 aes_crypt(in, out, ctx->E, &ctx->cword.encrypt);
492} 491}
493 492
494static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 493static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
495{ 494{
496 struct aes_ctx *ctx = aes_ctx(tfm); 495 struct aes_ctx *ctx = aes_ctx(tfm);
496 padlock_reset_key();
497 aes_crypt(in, out, ctx->D, &ctx->cword.decrypt); 497 aes_crypt(in, out, ctx->D, &ctx->cword.decrypt);
498} 498}
499 499
@@ -526,6 +526,8 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
526 struct blkcipher_walk walk; 526 struct blkcipher_walk walk;
527 int err; 527 int err;
528 528
529 padlock_reset_key();
530
529 blkcipher_walk_init(&walk, dst, src, nbytes); 531 blkcipher_walk_init(&walk, dst, src, nbytes);
530 err = blkcipher_walk_virt(desc, &walk); 532 err = blkcipher_walk_virt(desc, &walk);
531 533
@@ -548,6 +550,8 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
548 struct blkcipher_walk walk; 550 struct blkcipher_walk walk;
549 int err; 551 int err;
550 552
553 padlock_reset_key();
554
551 blkcipher_walk_init(&walk, dst, src, nbytes); 555 blkcipher_walk_init(&walk, dst, src, nbytes);
552 err = blkcipher_walk_virt(desc, &walk); 556 err = blkcipher_walk_virt(desc, &walk);
553 557
@@ -592,6 +596,8 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
592 struct blkcipher_walk walk; 596 struct blkcipher_walk walk;
593 int err; 597 int err;
594 598
599 padlock_reset_key();
600
595 blkcipher_walk_init(&walk, dst, src, nbytes); 601 blkcipher_walk_init(&walk, dst, src, nbytes);
596 err = blkcipher_walk_virt(desc, &walk); 602 err = blkcipher_walk_virt(desc, &walk);
597 603
@@ -616,6 +622,8 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
616 struct blkcipher_walk walk; 622 struct blkcipher_walk walk;
617 int err; 623 int err;
618 624
625 padlock_reset_key();
626
619 blkcipher_walk_init(&walk, dst, src, nbytes); 627 blkcipher_walk_init(&walk, dst, src, nbytes);
620 err = blkcipher_walk_virt(desc, &walk); 628 err = blkcipher_walk_virt(desc, &walk);
621 629