diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/block/cryptoloop.c | 160 | ||||
-rw-r--r-- | drivers/crypto/Kconfig | 45 | ||||
-rw-r--r-- | drivers/crypto/Makefile | 8 | ||||
-rw-r--r-- | drivers/crypto/padlock-aes.c | 258 | ||||
-rw-r--r-- | drivers/crypto/padlock-generic.c | 63 | ||||
-rw-r--r-- | drivers/crypto/padlock-sha.c | 318 | ||||
-rw-r--r-- | drivers/crypto/padlock.c | 58 | ||||
-rw-r--r-- | drivers/crypto/padlock.h | 17 | ||||
-rw-r--r-- | drivers/md/dm-crypt.c | 146 | ||||
-rw-r--r-- | drivers/net/ppp_mppe.c | 68 | ||||
-rw-r--r-- | drivers/net/wireless/airo.c | 22 | ||||
-rw-r--r-- | drivers/scsi/iscsi_tcp.c | 134 | ||||
-rw-r--r-- | drivers/scsi/iscsi_tcp.h | 9 |
13 files changed, 877 insertions, 429 deletions
diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c index 3d4261c39f16..40535036e893 100644 --- a/drivers/block/cryptoloop.c +++ b/drivers/block/cryptoloop.c | |||
@@ -40,11 +40,13 @@ static int | |||
40 | cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info) | 40 | cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info) |
41 | { | 41 | { |
42 | int err = -EINVAL; | 42 | int err = -EINVAL; |
43 | int cipher_len; | ||
44 | int mode_len; | ||
43 | char cms[LO_NAME_SIZE]; /* cipher-mode string */ | 45 | char cms[LO_NAME_SIZE]; /* cipher-mode string */ |
44 | char *cipher; | 46 | char *cipher; |
45 | char *mode; | 47 | char *mode; |
46 | char *cmsp = cms; /* c-m string pointer */ | 48 | char *cmsp = cms; /* c-m string pointer */ |
47 | struct crypto_tfm *tfm = NULL; | 49 | struct crypto_blkcipher *tfm; |
48 | 50 | ||
49 | /* encryption breaks for non sector aligned offsets */ | 51 | /* encryption breaks for non sector aligned offsets */ |
50 | 52 | ||
@@ -53,20 +55,39 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info) | |||
53 | 55 | ||
54 | strncpy(cms, info->lo_crypt_name, LO_NAME_SIZE); | 56 | strncpy(cms, info->lo_crypt_name, LO_NAME_SIZE); |
55 | cms[LO_NAME_SIZE - 1] = 0; | 57 | cms[LO_NAME_SIZE - 1] = 0; |
56 | cipher = strsep(&cmsp, "-"); | 58 | |
57 | mode = strsep(&cmsp, "-"); | 59 | cipher = cmsp; |
58 | 60 | cipher_len = strcspn(cmsp, "-"); | |
59 | if (mode == NULL || strcmp(mode, "cbc") == 0) | 61 | |
60 | tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_CBC | | 62 | mode = cmsp + cipher_len; |
61 | CRYPTO_TFM_REQ_MAY_SLEEP); | 63 | mode_len = 0; |
62 | else if (strcmp(mode, "ecb") == 0) | 64 | if (*mode) { |
63 | tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_ECB | | 65 | mode++; |
64 | CRYPTO_TFM_REQ_MAY_SLEEP); | 66 | mode_len = strcspn(mode, "-"); |
65 | if (tfm == NULL) | 67 | } |
68 | |||
69 | if (!mode_len) { | ||
70 | mode = "cbc"; | ||
71 | mode_len = 3; | ||
72 | } | ||
73 | |||
74 | if (cipher_len + mode_len + 3 > LO_NAME_SIZE) | ||
66 | return -EINVAL; | 75 | return -EINVAL; |
67 | 76 | ||
68 | err = tfm->crt_u.cipher.cit_setkey(tfm, info->lo_encrypt_key, | 77 | memmove(cms, mode, mode_len); |
69 | info->lo_encrypt_key_size); | 78 | cmsp = cms + mode_len; |
79 | *cmsp++ = '('; | ||
80 | memcpy(cmsp, info->lo_crypt_name, cipher_len); | ||
81 | cmsp += cipher_len; | ||
82 | *cmsp++ = ')'; | ||
83 | *cmsp = 0; | ||
84 | |||
85 | tfm = crypto_alloc_blkcipher(cms, 0, CRYPTO_ALG_ASYNC); | ||
86 | if (IS_ERR(tfm)) | ||
87 | return PTR_ERR(tfm); | ||
88 | |||
89 | err = crypto_blkcipher_setkey(tfm, info->lo_encrypt_key, | ||
90 | info->lo_encrypt_key_size); | ||
70 | 91 | ||
71 | if (err != 0) | 92 | if (err != 0) |
72 | goto out_free_tfm; | 93 | goto out_free_tfm; |
@@ -75,99 +96,49 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info) | |||
75 | return 0; | 96 | return 0; |
76 | 97 | ||
77 | out_free_tfm: | 98 | out_free_tfm: |
78 | crypto_free_tfm(tfm); | 99 | crypto_free_blkcipher(tfm); |
79 | 100 | ||
80 | out: | 101 | out: |
81 | return err; | 102 | return err; |
82 | } | 103 | } |
83 | 104 | ||
84 | 105 | ||
85 | typedef int (*encdec_ecb_t)(struct crypto_tfm *tfm, | 106 | typedef int (*encdec_cbc_t)(struct blkcipher_desc *desc, |
86 | struct scatterlist *sg_out, | 107 | struct scatterlist *sg_out, |
87 | struct scatterlist *sg_in, | 108 | struct scatterlist *sg_in, |
88 | unsigned int nsg); | 109 | unsigned int nsg); |
89 | 110 | ||
90 | |||
91 | static int | ||
92 | cryptoloop_transfer_ecb(struct loop_device *lo, int cmd, | ||
93 | struct page *raw_page, unsigned raw_off, | ||
94 | struct page *loop_page, unsigned loop_off, | ||
95 | int size, sector_t IV) | ||
96 | { | ||
97 | struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data; | ||
98 | struct scatterlist sg_out = { NULL, }; | ||
99 | struct scatterlist sg_in = { NULL, }; | ||
100 | |||
101 | encdec_ecb_t encdecfunc; | ||
102 | struct page *in_page, *out_page; | ||
103 | unsigned in_offs, out_offs; | ||
104 | |||
105 | if (cmd == READ) { | ||
106 | in_page = raw_page; | ||
107 | in_offs = raw_off; | ||
108 | out_page = loop_page; | ||
109 | out_offs = loop_off; | ||
110 | encdecfunc = tfm->crt_u.cipher.cit_decrypt; | ||
111 | } else { | ||
112 | in_page = loop_page; | ||
113 | in_offs = loop_off; | ||
114 | out_page = raw_page; | ||
115 | out_offs = raw_off; | ||
116 | encdecfunc = tfm->crt_u.cipher.cit_encrypt; | ||
117 | } | ||
118 | |||
119 | while (size > 0) { | ||
120 | const int sz = min(size, LOOP_IV_SECTOR_SIZE); | ||
121 | |||
122 | sg_in.page = in_page; | ||
123 | sg_in.offset = in_offs; | ||
124 | sg_in.length = sz; | ||
125 | |||
126 | sg_out.page = out_page; | ||
127 | sg_out.offset = out_offs; | ||
128 | sg_out.length = sz; | ||
129 | |||
130 | encdecfunc(tfm, &sg_out, &sg_in, sz); | ||
131 | |||
132 | size -= sz; | ||
133 | in_offs += sz; | ||
134 | out_offs += sz; | ||
135 | } | ||
136 | |||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | typedef int (*encdec_cbc_t)(struct crypto_tfm *tfm, | ||
141 | struct scatterlist *sg_out, | ||
142 | struct scatterlist *sg_in, | ||
143 | unsigned int nsg, u8 *iv); | ||
144 | |||
145 | static int | 111 | static int |
146 | cryptoloop_transfer_cbc(struct loop_device *lo, int cmd, | 112 | cryptoloop_transfer(struct loop_device *lo, int cmd, |
147 | struct page *raw_page, unsigned raw_off, | 113 | struct page *raw_page, unsigned raw_off, |
148 | struct page *loop_page, unsigned loop_off, | 114 | struct page *loop_page, unsigned loop_off, |
149 | int size, sector_t IV) | 115 | int size, sector_t IV) |
150 | { | 116 | { |
151 | struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data; | 117 | struct crypto_blkcipher *tfm = lo->key_data; |
118 | struct blkcipher_desc desc = { | ||
119 | .tfm = tfm, | ||
120 | .flags = CRYPTO_TFM_REQ_MAY_SLEEP, | ||
121 | }; | ||
152 | struct scatterlist sg_out = { NULL, }; | 122 | struct scatterlist sg_out = { NULL, }; |
153 | struct scatterlist sg_in = { NULL, }; | 123 | struct scatterlist sg_in = { NULL, }; |
154 | 124 | ||
155 | encdec_cbc_t encdecfunc; | 125 | encdec_cbc_t encdecfunc; |
156 | struct page *in_page, *out_page; | 126 | struct page *in_page, *out_page; |
157 | unsigned in_offs, out_offs; | 127 | unsigned in_offs, out_offs; |
128 | int err; | ||
158 | 129 | ||
159 | if (cmd == READ) { | 130 | if (cmd == READ) { |
160 | in_page = raw_page; | 131 | in_page = raw_page; |
161 | in_offs = raw_off; | 132 | in_offs = raw_off; |
162 | out_page = loop_page; | 133 | out_page = loop_page; |
163 | out_offs = loop_off; | 134 | out_offs = loop_off; |
164 | encdecfunc = tfm->crt_u.cipher.cit_decrypt_iv; | 135 | encdecfunc = crypto_blkcipher_crt(tfm)->decrypt; |
165 | } else { | 136 | } else { |
166 | in_page = loop_page; | 137 | in_page = loop_page; |
167 | in_offs = loop_off; | 138 | in_offs = loop_off; |
168 | out_page = raw_page; | 139 | out_page = raw_page; |
169 | out_offs = raw_off; | 140 | out_offs = raw_off; |
170 | encdecfunc = tfm->crt_u.cipher.cit_encrypt_iv; | 141 | encdecfunc = crypto_blkcipher_crt(tfm)->encrypt; |
171 | } | 142 | } |
172 | 143 | ||
173 | while (size > 0) { | 144 | while (size > 0) { |
@@ -183,7 +154,10 @@ cryptoloop_transfer_cbc(struct loop_device *lo, int cmd, | |||
183 | sg_out.offset = out_offs; | 154 | sg_out.offset = out_offs; |
184 | sg_out.length = sz; | 155 | sg_out.length = sz; |
185 | 156 | ||
186 | encdecfunc(tfm, &sg_out, &sg_in, sz, (u8 *)iv); | 157 | desc.info = iv; |
158 | err = encdecfunc(&desc, &sg_out, &sg_in, sz); | ||
159 | if (err) | ||
160 | return err; | ||
187 | 161 | ||
188 | IV++; | 162 | IV++; |
189 | size -= sz; | 163 | size -= sz; |
@@ -195,32 +169,6 @@ cryptoloop_transfer_cbc(struct loop_device *lo, int cmd, | |||
195 | } | 169 | } |
196 | 170 | ||
197 | static int | 171 | static int |
198 | cryptoloop_transfer(struct loop_device *lo, int cmd, | ||
199 | struct page *raw_page, unsigned raw_off, | ||
200 | struct page *loop_page, unsigned loop_off, | ||
201 | int size, sector_t IV) | ||
202 | { | ||
203 | struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data; | ||
204 | if(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_ECB) | ||
205 | { | ||
206 | lo->transfer = cryptoloop_transfer_ecb; | ||
207 | return cryptoloop_transfer_ecb(lo, cmd, raw_page, raw_off, | ||
208 | loop_page, loop_off, size, IV); | ||
209 | } | ||
210 | if(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_CBC) | ||
211 | { | ||
212 | lo->transfer = cryptoloop_transfer_cbc; | ||
213 | return cryptoloop_transfer_cbc(lo, cmd, raw_page, raw_off, | ||
214 | loop_page, loop_off, size, IV); | ||
215 | } | ||
216 | |||
217 | /* This is not supposed to happen */ | ||
218 | |||
219 | printk( KERN_ERR "cryptoloop: unsupported cipher mode in cryptoloop_transfer!\n"); | ||
220 | return -EINVAL; | ||
221 | } | ||
222 | |||
223 | static int | ||
224 | cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg) | 172 | cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg) |
225 | { | 173 | { |
226 | return -EINVAL; | 174 | return -EINVAL; |
@@ -229,9 +177,9 @@ cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg) | |||
229 | static int | 177 | static int |
230 | cryptoloop_release(struct loop_device *lo) | 178 | cryptoloop_release(struct loop_device *lo) |
231 | { | 179 | { |
232 | struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data; | 180 | struct crypto_blkcipher *tfm = lo->key_data; |
233 | if (tfm != NULL) { | 181 | if (tfm != NULL) { |
234 | crypto_free_tfm(tfm); | 182 | crypto_free_blkcipher(tfm); |
235 | lo->key_data = NULL; | 183 | lo->key_data = NULL; |
236 | return 0; | 184 | return 0; |
237 | } | 185 | } |
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 4263935443cc..adb554153f67 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
@@ -2,22 +2,53 @@ menu "Hardware crypto devices" | |||
2 | 2 | ||
3 | config CRYPTO_DEV_PADLOCK | 3 | config CRYPTO_DEV_PADLOCK |
4 | tristate "Support for VIA PadLock ACE" | 4 | tristate "Support for VIA PadLock ACE" |
5 | depends on CRYPTO && X86_32 | 5 | depends on X86_32 |
6 | select CRYPTO_ALGAPI | ||
7 | default m | ||
6 | help | 8 | help |
7 | Some VIA processors come with an integrated crypto engine | 9 | Some VIA processors come with an integrated crypto engine |
8 | (so called VIA PadLock ACE, Advanced Cryptography Engine) | 10 | (so called VIA PadLock ACE, Advanced Cryptography Engine) |
9 | that provides instructions for very fast {en,de}cryption | 11 | that provides instructions for very fast cryptographic |
10 | with some algorithms. | 12 | operations with supported algorithms. |
11 | 13 | ||
12 | The instructions are used only when the CPU supports them. | 14 | The instructions are used only when the CPU supports them. |
13 | Otherwise software encryption is used. If you are unsure, | 15 | Otherwise software encryption is used. |
14 | say Y. | 16 | |
17 | Selecting M for this option will compile a helper module | ||
18 | padlock.ko that should autoload all below configured | ||
19 | algorithms. Don't worry if your hardware does not support | ||
20 | some or all of them. In such case padlock.ko will | ||
21 | simply write a single line into the kernel log informing | ||
22 | about its failure but everything will keep working fine. | ||
23 | |||
24 | If you are unsure, say M. The compiled module will be | ||
25 | called padlock.ko | ||
15 | 26 | ||
16 | config CRYPTO_DEV_PADLOCK_AES | 27 | config CRYPTO_DEV_PADLOCK_AES |
17 | bool "Support for AES in VIA PadLock" | 28 | tristate "PadLock driver for AES algorithm" |
18 | depends on CRYPTO_DEV_PADLOCK | 29 | depends on CRYPTO_DEV_PADLOCK |
19 | default y | 30 | select CRYPTO_BLKCIPHER |
31 | default m | ||
20 | help | 32 | help |
21 | Use VIA PadLock for AES algorithm. | 33 | Use VIA PadLock for AES algorithm. |
22 | 34 | ||
35 | Available in VIA C3 and newer CPUs. | ||
36 | |||
37 | If unsure say M. The compiled module will be | ||
38 | called padlock-aes.ko | ||
39 | |||
40 | config CRYPTO_DEV_PADLOCK_SHA | ||
41 | tristate "PadLock driver for SHA1 and SHA256 algorithms" | ||
42 | depends on CRYPTO_DEV_PADLOCK | ||
43 | select CRYPTO_SHA1 | ||
44 | select CRYPTO_SHA256 | ||
45 | default m | ||
46 | help | ||
47 | Use VIA PadLock for SHA1/SHA256 algorithms. | ||
48 | |||
49 | Available in VIA C7 and newer processors. | ||
50 | |||
51 | If unsure say M. The compiled module will be | ||
52 | called padlock-sha.ko | ||
53 | |||
23 | endmenu | 54 | endmenu |
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 45426ca19a23..4c3d0ec1cf80 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile | |||
@@ -1,7 +1,3 @@ | |||
1 | |||
2 | obj-$(CONFIG_CRYPTO_DEV_PADLOCK) += padlock.o | 1 | obj-$(CONFIG_CRYPTO_DEV_PADLOCK) += padlock.o |
3 | 2 | obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o | |
4 | padlock-objs-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o | 3 | obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o |
5 | |||
6 | padlock-objs := padlock-generic.o $(padlock-objs-y) | ||
7 | |||
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index b643d71298a9..d4501dc7e650 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c | |||
@@ -43,11 +43,11 @@ | |||
43 | * --------------------------------------------------------------------------- | 43 | * --------------------------------------------------------------------------- |
44 | */ | 44 | */ |
45 | 45 | ||
46 | #include <crypto/algapi.h> | ||
46 | #include <linux/module.h> | 47 | #include <linux/module.h> |
47 | #include <linux/init.h> | 48 | #include <linux/init.h> |
48 | #include <linux/types.h> | 49 | #include <linux/types.h> |
49 | #include <linux/errno.h> | 50 | #include <linux/errno.h> |
50 | #include <linux/crypto.h> | ||
51 | #include <linux/interrupt.h> | 51 | #include <linux/interrupt.h> |
52 | #include <linux/kernel.h> | 52 | #include <linux/kernel.h> |
53 | #include <asm/byteorder.h> | 53 | #include <asm/byteorder.h> |
@@ -59,6 +59,17 @@ | |||
59 | #define AES_EXTENDED_KEY_SIZE 64 /* in uint32_t units */ | 59 | #define AES_EXTENDED_KEY_SIZE 64 /* in uint32_t units */ |
60 | #define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t)) | 60 | #define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t)) |
61 | 61 | ||
62 | /* Control word. */ | ||
63 | struct cword { | ||
64 | unsigned int __attribute__ ((__packed__)) | ||
65 | rounds:4, | ||
66 | algo:3, | ||
67 | keygen:1, | ||
68 | interm:1, | ||
69 | encdec:1, | ||
70 | ksize:2; | ||
71 | } __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); | ||
72 | |||
62 | /* Whenever making any changes to the following | 73 | /* Whenever making any changes to the following |
63 | * structure *make sure* you keep E, d_data | 74 | * structure *make sure* you keep E, d_data |
64 | * and cword aligned on 16 Bytes boundaries!!! */ | 75 | * and cword aligned on 16 Bytes boundaries!!! */ |
@@ -286,9 +297,9 @@ aes_hw_extkey_available(uint8_t key_len) | |||
286 | return 0; | 297 | return 0; |
287 | } | 298 | } |
288 | 299 | ||
289 | static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) | 300 | static inline struct aes_ctx *aes_ctx_common(void *ctx) |
290 | { | 301 | { |
291 | unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm); | 302 | unsigned long addr = (unsigned long)ctx; |
292 | unsigned long align = PADLOCK_ALIGNMENT; | 303 | unsigned long align = PADLOCK_ALIGNMENT; |
293 | 304 | ||
294 | if (align <= crypto_tfm_ctx_alignment()) | 305 | if (align <= crypto_tfm_ctx_alignment()) |
@@ -296,16 +307,27 @@ static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) | |||
296 | return (struct aes_ctx *)ALIGN(addr, align); | 307 | return (struct aes_ctx *)ALIGN(addr, align); |
297 | } | 308 | } |
298 | 309 | ||
310 | static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) | ||
311 | { | ||
312 | return aes_ctx_common(crypto_tfm_ctx(tfm)); | ||
313 | } | ||
314 | |||
315 | static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm) | ||
316 | { | ||
317 | return aes_ctx_common(crypto_blkcipher_ctx(tfm)); | ||
318 | } | ||
319 | |||
299 | static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, | 320 | static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, |
300 | unsigned int key_len, u32 *flags) | 321 | unsigned int key_len) |
301 | { | 322 | { |
302 | struct aes_ctx *ctx = aes_ctx(tfm); | 323 | struct aes_ctx *ctx = aes_ctx(tfm); |
303 | const __le32 *key = (const __le32 *)in_key; | 324 | const __le32 *key = (const __le32 *)in_key; |
325 | u32 *flags = &tfm->crt_flags; | ||
304 | uint32_t i, t, u, v, w; | 326 | uint32_t i, t, u, v, w; |
305 | uint32_t P[AES_EXTENDED_KEY_SIZE]; | 327 | uint32_t P[AES_EXTENDED_KEY_SIZE]; |
306 | uint32_t rounds; | 328 | uint32_t rounds; |
307 | 329 | ||
308 | if (key_len != 16 && key_len != 24 && key_len != 32) { | 330 | if (key_len % 8) { |
309 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | 331 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; |
310 | return -EINVAL; | 332 | return -EINVAL; |
311 | } | 333 | } |
@@ -430,80 +452,212 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) | |||
430 | padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 1); | 452 | padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 1); |
431 | } | 453 | } |
432 | 454 | ||
433 | static unsigned int aes_encrypt_ecb(const struct cipher_desc *desc, u8 *out, | 455 | static struct crypto_alg aes_alg = { |
434 | const u8 *in, unsigned int nbytes) | 456 | .cra_name = "aes", |
457 | .cra_driver_name = "aes-padlock", | ||
458 | .cra_priority = PADLOCK_CRA_PRIORITY, | ||
459 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | ||
460 | .cra_blocksize = AES_BLOCK_SIZE, | ||
461 | .cra_ctxsize = sizeof(struct aes_ctx), | ||
462 | .cra_alignmask = PADLOCK_ALIGNMENT - 1, | ||
463 | .cra_module = THIS_MODULE, | ||
464 | .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), | ||
465 | .cra_u = { | ||
466 | .cipher = { | ||
467 | .cia_min_keysize = AES_MIN_KEY_SIZE, | ||
468 | .cia_max_keysize = AES_MAX_KEY_SIZE, | ||
469 | .cia_setkey = aes_set_key, | ||
470 | .cia_encrypt = aes_encrypt, | ||
471 | .cia_decrypt = aes_decrypt, | ||
472 | } | ||
473 | } | ||
474 | }; | ||
475 | |||
476 | static int ecb_aes_encrypt(struct blkcipher_desc *desc, | ||
477 | struct scatterlist *dst, struct scatterlist *src, | ||
478 | unsigned int nbytes) | ||
435 | { | 479 | { |
436 | struct aes_ctx *ctx = aes_ctx(desc->tfm); | 480 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); |
437 | padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt, | 481 | struct blkcipher_walk walk; |
438 | nbytes / AES_BLOCK_SIZE); | 482 | int err; |
439 | return nbytes & ~(AES_BLOCK_SIZE - 1); | 483 | |
484 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
485 | err = blkcipher_walk_virt(desc, &walk); | ||
486 | |||
487 | while ((nbytes = walk.nbytes)) { | ||
488 | padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, | ||
489 | ctx->E, &ctx->cword.encrypt, | ||
490 | nbytes / AES_BLOCK_SIZE); | ||
491 | nbytes &= AES_BLOCK_SIZE - 1; | ||
492 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
493 | } | ||
494 | |||
495 | return err; | ||
440 | } | 496 | } |
441 | 497 | ||
442 | static unsigned int aes_decrypt_ecb(const struct cipher_desc *desc, u8 *out, | 498 | static int ecb_aes_decrypt(struct blkcipher_desc *desc, |
443 | const u8 *in, unsigned int nbytes) | 499 | struct scatterlist *dst, struct scatterlist *src, |
500 | unsigned int nbytes) | ||
444 | { | 501 | { |
445 | struct aes_ctx *ctx = aes_ctx(desc->tfm); | 502 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); |
446 | padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, | 503 | struct blkcipher_walk walk; |
447 | nbytes / AES_BLOCK_SIZE); | 504 | int err; |
448 | return nbytes & ~(AES_BLOCK_SIZE - 1); | 505 | |
506 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
507 | err = blkcipher_walk_virt(desc, &walk); | ||
508 | |||
509 | while ((nbytes = walk.nbytes)) { | ||
510 | padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, | ||
511 | ctx->D, &ctx->cword.decrypt, | ||
512 | nbytes / AES_BLOCK_SIZE); | ||
513 | nbytes &= AES_BLOCK_SIZE - 1; | ||
514 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
515 | } | ||
516 | |||
517 | return err; | ||
449 | } | 518 | } |
450 | 519 | ||
451 | static unsigned int aes_encrypt_cbc(const struct cipher_desc *desc, u8 *out, | 520 | static struct crypto_alg ecb_aes_alg = { |
452 | const u8 *in, unsigned int nbytes) | 521 | .cra_name = "ecb(aes)", |
453 | { | 522 | .cra_driver_name = "ecb-aes-padlock", |
454 | struct aes_ctx *ctx = aes_ctx(desc->tfm); | 523 | .cra_priority = PADLOCK_COMPOSITE_PRIORITY, |
455 | u8 *iv; | 524 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, |
525 | .cra_blocksize = AES_BLOCK_SIZE, | ||
526 | .cra_ctxsize = sizeof(struct aes_ctx), | ||
527 | .cra_alignmask = PADLOCK_ALIGNMENT - 1, | ||
528 | .cra_type = &crypto_blkcipher_type, | ||
529 | .cra_module = THIS_MODULE, | ||
530 | .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list), | ||
531 | .cra_u = { | ||
532 | .blkcipher = { | ||
533 | .min_keysize = AES_MIN_KEY_SIZE, | ||
534 | .max_keysize = AES_MAX_KEY_SIZE, | ||
535 | .setkey = aes_set_key, | ||
536 | .encrypt = ecb_aes_encrypt, | ||
537 | .decrypt = ecb_aes_decrypt, | ||
538 | } | ||
539 | } | ||
540 | }; | ||
456 | 541 | ||
457 | iv = padlock_xcrypt_cbc(in, out, ctx->E, desc->info, | 542 | static int cbc_aes_encrypt(struct blkcipher_desc *desc, |
458 | &ctx->cword.encrypt, nbytes / AES_BLOCK_SIZE); | 543 | struct scatterlist *dst, struct scatterlist *src, |
459 | memcpy(desc->info, iv, AES_BLOCK_SIZE); | 544 | unsigned int nbytes) |
545 | { | ||
546 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); | ||
547 | struct blkcipher_walk walk; | ||
548 | int err; | ||
549 | |||
550 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
551 | err = blkcipher_walk_virt(desc, &walk); | ||
552 | |||
553 | while ((nbytes = walk.nbytes)) { | ||
554 | u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr, | ||
555 | walk.dst.virt.addr, ctx->E, | ||
556 | walk.iv, &ctx->cword.encrypt, | ||
557 | nbytes / AES_BLOCK_SIZE); | ||
558 | memcpy(walk.iv, iv, AES_BLOCK_SIZE); | ||
559 | nbytes &= AES_BLOCK_SIZE - 1; | ||
560 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
561 | } | ||
460 | 562 | ||
461 | return nbytes & ~(AES_BLOCK_SIZE - 1); | 563 | return err; |
462 | } | 564 | } |
463 | 565 | ||
464 | static unsigned int aes_decrypt_cbc(const struct cipher_desc *desc, u8 *out, | 566 | static int cbc_aes_decrypt(struct blkcipher_desc *desc, |
465 | const u8 *in, unsigned int nbytes) | 567 | struct scatterlist *dst, struct scatterlist *src, |
568 | unsigned int nbytes) | ||
466 | { | 569 | { |
467 | struct aes_ctx *ctx = aes_ctx(desc->tfm); | 570 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); |
468 | padlock_xcrypt_cbc(in, out, ctx->D, desc->info, &ctx->cword.decrypt, | 571 | struct blkcipher_walk walk; |
469 | nbytes / AES_BLOCK_SIZE); | 572 | int err; |
470 | return nbytes & ~(AES_BLOCK_SIZE - 1); | 573 | |
574 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
575 | err = blkcipher_walk_virt(desc, &walk); | ||
576 | |||
577 | while ((nbytes = walk.nbytes)) { | ||
578 | padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, | ||
579 | ctx->D, walk.iv, &ctx->cword.decrypt, | ||
580 | nbytes / AES_BLOCK_SIZE); | ||
581 | nbytes &= AES_BLOCK_SIZE - 1; | ||
582 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
583 | } | ||
584 | |||
585 | return err; | ||
471 | } | 586 | } |
472 | 587 | ||
473 | static struct crypto_alg aes_alg = { | 588 | static struct crypto_alg cbc_aes_alg = { |
474 | .cra_name = "aes", | 589 | .cra_name = "cbc(aes)", |
475 | .cra_driver_name = "aes-padlock", | 590 | .cra_driver_name = "cbc-aes-padlock", |
476 | .cra_priority = 300, | 591 | .cra_priority = PADLOCK_COMPOSITE_PRIORITY, |
477 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 592 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, |
478 | .cra_blocksize = AES_BLOCK_SIZE, | 593 | .cra_blocksize = AES_BLOCK_SIZE, |
479 | .cra_ctxsize = sizeof(struct aes_ctx), | 594 | .cra_ctxsize = sizeof(struct aes_ctx), |
480 | .cra_alignmask = PADLOCK_ALIGNMENT - 1, | 595 | .cra_alignmask = PADLOCK_ALIGNMENT - 1, |
596 | .cra_type = &crypto_blkcipher_type, | ||
481 | .cra_module = THIS_MODULE, | 597 | .cra_module = THIS_MODULE, |
482 | .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), | 598 | .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list), |
483 | .cra_u = { | 599 | .cra_u = { |
484 | .cipher = { | 600 | .blkcipher = { |
485 | .cia_min_keysize = AES_MIN_KEY_SIZE, | 601 | .min_keysize = AES_MIN_KEY_SIZE, |
486 | .cia_max_keysize = AES_MAX_KEY_SIZE, | 602 | .max_keysize = AES_MAX_KEY_SIZE, |
487 | .cia_setkey = aes_set_key, | 603 | .ivsize = AES_BLOCK_SIZE, |
488 | .cia_encrypt = aes_encrypt, | 604 | .setkey = aes_set_key, |
489 | .cia_decrypt = aes_decrypt, | 605 | .encrypt = cbc_aes_encrypt, |
490 | .cia_encrypt_ecb = aes_encrypt_ecb, | 606 | .decrypt = cbc_aes_decrypt, |
491 | .cia_decrypt_ecb = aes_decrypt_ecb, | ||
492 | .cia_encrypt_cbc = aes_encrypt_cbc, | ||
493 | .cia_decrypt_cbc = aes_decrypt_cbc, | ||
494 | } | 607 | } |
495 | } | 608 | } |
496 | }; | 609 | }; |
497 | 610 | ||
498 | int __init padlock_init_aes(void) | 611 | static int __init padlock_init(void) |
499 | { | 612 | { |
500 | printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); | 613 | int ret; |
614 | |||
615 | if (!cpu_has_xcrypt) { | ||
616 | printk(KERN_ERR PFX "VIA PadLock not detected.\n"); | ||
617 | return -ENODEV; | ||
618 | } | ||
619 | |||
620 | if (!cpu_has_xcrypt_enabled) { | ||
621 | printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); | ||
622 | return -ENODEV; | ||
623 | } | ||
501 | 624 | ||
502 | gen_tabs(); | 625 | gen_tabs(); |
503 | return crypto_register_alg(&aes_alg); | 626 | if ((ret = crypto_register_alg(&aes_alg))) |
627 | goto aes_err; | ||
628 | |||
629 | if ((ret = crypto_register_alg(&ecb_aes_alg))) | ||
630 | goto ecb_aes_err; | ||
631 | |||
632 | if ((ret = crypto_register_alg(&cbc_aes_alg))) | ||
633 | goto cbc_aes_err; | ||
634 | |||
635 | printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); | ||
636 | |||
637 | out: | ||
638 | return ret; | ||
639 | |||
640 | cbc_aes_err: | ||
641 | crypto_unregister_alg(&ecb_aes_alg); | ||
642 | ecb_aes_err: | ||
643 | crypto_unregister_alg(&aes_alg); | ||
644 | aes_err: | ||
645 | printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n"); | ||
646 | goto out; | ||
504 | } | 647 | } |
505 | 648 | ||
506 | void __exit padlock_fini_aes(void) | 649 | static void __exit padlock_fini(void) |
507 | { | 650 | { |
651 | crypto_unregister_alg(&cbc_aes_alg); | ||
652 | crypto_unregister_alg(&ecb_aes_alg); | ||
508 | crypto_unregister_alg(&aes_alg); | 653 | crypto_unregister_alg(&aes_alg); |
509 | } | 654 | } |
655 | |||
656 | module_init(padlock_init); | ||
657 | module_exit(padlock_fini); | ||
658 | |||
659 | MODULE_DESCRIPTION("VIA PadLock AES algorithm support"); | ||
660 | MODULE_LICENSE("GPL"); | ||
661 | MODULE_AUTHOR("Michal Ludvig"); | ||
662 | |||
663 | MODULE_ALIAS("aes-padlock"); | ||
diff --git a/drivers/crypto/padlock-generic.c b/drivers/crypto/padlock-generic.c deleted file mode 100644 index 18cf0e8274a7..000000000000 --- a/drivers/crypto/padlock-generic.c +++ /dev/null | |||
@@ -1,63 +0,0 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * | ||
4 | * Support for VIA PadLock hardware crypto engine. | ||
5 | * | ||
6 | * Copyright (c) 2004 Michal Ludvig <michal@logix.cz> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | */ | ||
13 | |||
14 | #include <linux/module.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/types.h> | ||
17 | #include <linux/errno.h> | ||
18 | #include <linux/crypto.h> | ||
19 | #include <asm/byteorder.h> | ||
20 | #include "padlock.h" | ||
21 | |||
22 | static int __init | ||
23 | padlock_init(void) | ||
24 | { | ||
25 | int ret = -ENOSYS; | ||
26 | |||
27 | if (!cpu_has_xcrypt) { | ||
28 | printk(KERN_ERR PFX "VIA PadLock not detected.\n"); | ||
29 | return -ENODEV; | ||
30 | } | ||
31 | |||
32 | if (!cpu_has_xcrypt_enabled) { | ||
33 | printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); | ||
34 | return -ENODEV; | ||
35 | } | ||
36 | |||
37 | #ifdef CONFIG_CRYPTO_DEV_PADLOCK_AES | ||
38 | if ((ret = padlock_init_aes())) { | ||
39 | printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n"); | ||
40 | return ret; | ||
41 | } | ||
42 | #endif | ||
43 | |||
44 | if (ret == -ENOSYS) | ||
45 | printk(KERN_ERR PFX "Hmm, VIA PadLock was compiled without any algorithm.\n"); | ||
46 | |||
47 | return ret; | ||
48 | } | ||
49 | |||
50 | static void __exit | ||
51 | padlock_fini(void) | ||
52 | { | ||
53 | #ifdef CONFIG_CRYPTO_DEV_PADLOCK_AES | ||
54 | padlock_fini_aes(); | ||
55 | #endif | ||
56 | } | ||
57 | |||
58 | module_init(padlock_init); | ||
59 | module_exit(padlock_fini); | ||
60 | |||
61 | MODULE_DESCRIPTION("VIA PadLock crypto engine support."); | ||
62 | MODULE_LICENSE("Dual BSD/GPL"); | ||
63 | MODULE_AUTHOR("Michal Ludvig"); | ||
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c new file mode 100644 index 000000000000..a781fd23b607 --- /dev/null +++ b/drivers/crypto/padlock-sha.c | |||
@@ -0,0 +1,318 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * | ||
4 | * Support for VIA PadLock hardware crypto engine. | ||
5 | * | ||
6 | * Copyright (c) 2006 Michal Ludvig <michal@logix.cz> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #include <crypto/algapi.h> | ||
16 | #include <linux/err.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/errno.h> | ||
20 | #include <linux/cryptohash.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/scatterlist.h> | ||
24 | #include "padlock.h" | ||
25 | |||
26 | #define SHA1_DEFAULT_FALLBACK "sha1-generic" | ||
27 | #define SHA1_DIGEST_SIZE 20 | ||
28 | #define SHA1_HMAC_BLOCK_SIZE 64 | ||
29 | |||
30 | #define SHA256_DEFAULT_FALLBACK "sha256-generic" | ||
31 | #define SHA256_DIGEST_SIZE 32 | ||
32 | #define SHA256_HMAC_BLOCK_SIZE 64 | ||
33 | |||
34 | struct padlock_sha_ctx { | ||
35 | char *data; | ||
36 | size_t used; | ||
37 | int bypass; | ||
38 | void (*f_sha_padlock)(const char *in, char *out, int count); | ||
39 | struct hash_desc fallback; | ||
40 | }; | ||
41 | |||
42 | static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm) | ||
43 | { | ||
44 | return crypto_tfm_ctx(tfm); | ||
45 | } | ||
46 | |||
47 | /* We'll need aligned address on the stack */ | ||
48 | #define NEAREST_ALIGNED(ptr) \ | ||
49 | ((void *)ALIGN((size_t)(ptr), PADLOCK_ALIGNMENT)) | ||
50 | |||
51 | static struct crypto_alg sha1_alg, sha256_alg; | ||
52 | |||
53 | static void padlock_sha_bypass(struct crypto_tfm *tfm) | ||
54 | { | ||
55 | if (ctx(tfm)->bypass) | ||
56 | return; | ||
57 | |||
58 | crypto_hash_init(&ctx(tfm)->fallback); | ||
59 | if (ctx(tfm)->data && ctx(tfm)->used) { | ||
60 | struct scatterlist sg; | ||
61 | |||
62 | sg_set_buf(&sg, ctx(tfm)->data, ctx(tfm)->used); | ||
63 | crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length); | ||
64 | } | ||
65 | |||
66 | ctx(tfm)->used = 0; | ||
67 | ctx(tfm)->bypass = 1; | ||
68 | } | ||
69 | |||
70 | static void padlock_sha_init(struct crypto_tfm *tfm) | ||
71 | { | ||
72 | ctx(tfm)->used = 0; | ||
73 | ctx(tfm)->bypass = 0; | ||
74 | } | ||
75 | |||
76 | static void padlock_sha_update(struct crypto_tfm *tfm, | ||
77 | const uint8_t *data, unsigned int length) | ||
78 | { | ||
79 | /* Our buffer is always one page. */ | ||
80 | if (unlikely(!ctx(tfm)->bypass && | ||
81 | (ctx(tfm)->used + length > PAGE_SIZE))) | ||
82 | padlock_sha_bypass(tfm); | ||
83 | |||
84 | if (unlikely(ctx(tfm)->bypass)) { | ||
85 | struct scatterlist sg; | ||
86 | sg_set_buf(&sg, (uint8_t *)data, length); | ||
87 | crypto_hash_update(&ctx(tfm)->fallback, &sg, length); | ||
88 | return; | ||
89 | } | ||
90 | |||
91 | memcpy(ctx(tfm)->data + ctx(tfm)->used, data, length); | ||
92 | ctx(tfm)->used += length; | ||
93 | } | ||
94 | |||
95 | static inline void padlock_output_block(uint32_t *src, | ||
96 | uint32_t *dst, size_t count) | ||
97 | { | ||
98 | while (count--) | ||
99 | *dst++ = swab32(*src++); | ||
100 | } | ||
101 | |||
102 | static void padlock_do_sha1(const char *in, char *out, int count) | ||
103 | { | ||
104 | /* We can't store directly to *out as it may be unaligned. */ | ||
105 | /* BTW Don't reduce the buffer size below 128 Bytes! | ||
106 | * PadLock microcode needs it that big. */ | ||
107 | char buf[128+16]; | ||
108 | char *result = NEAREST_ALIGNED(buf); | ||
109 | |||
110 | ((uint32_t *)result)[0] = 0x67452301; | ||
111 | ((uint32_t *)result)[1] = 0xEFCDAB89; | ||
112 | ((uint32_t *)result)[2] = 0x98BADCFE; | ||
113 | ((uint32_t *)result)[3] = 0x10325476; | ||
114 | ((uint32_t *)result)[4] = 0xC3D2E1F0; | ||
115 | |||
116 | asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */ | ||
117 | : "+S"(in), "+D"(result) | ||
118 | : "c"(count), "a"(0)); | ||
119 | |||
120 | padlock_output_block((uint32_t *)result, (uint32_t *)out, 5); | ||
121 | } | ||
122 | |||
123 | static void padlock_do_sha256(const char *in, char *out, int count) | ||
124 | { | ||
125 | /* We can't store directly to *out as it may be unaligned. */ | ||
126 | /* BTW Don't reduce the buffer size below 128 Bytes! | ||
127 | * PadLock microcode needs it that big. */ | ||
128 | char buf[128+16]; | ||
129 | char *result = NEAREST_ALIGNED(buf); | ||
130 | |||
131 | ((uint32_t *)result)[0] = 0x6A09E667; | ||
132 | ((uint32_t *)result)[1] = 0xBB67AE85; | ||
133 | ((uint32_t *)result)[2] = 0x3C6EF372; | ||
134 | ((uint32_t *)result)[3] = 0xA54FF53A; | ||
135 | ((uint32_t *)result)[4] = 0x510E527F; | ||
136 | ((uint32_t *)result)[5] = 0x9B05688C; | ||
137 | ((uint32_t *)result)[6] = 0x1F83D9AB; | ||
138 | ((uint32_t *)result)[7] = 0x5BE0CD19; | ||
139 | |||
140 | asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */ | ||
141 | : "+S"(in), "+D"(result) | ||
142 | : "c"(count), "a"(0)); | ||
143 | |||
144 | padlock_output_block((uint32_t *)result, (uint32_t *)out, 8); | ||
145 | } | ||
146 | |||
147 | static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out) | ||
148 | { | ||
149 | if (unlikely(ctx(tfm)->bypass)) { | ||
150 | crypto_hash_final(&ctx(tfm)->fallback, out); | ||
151 | ctx(tfm)->bypass = 0; | ||
152 | return; | ||
153 | } | ||
154 | |||
155 | /* Pass the input buffer to PadLock microcode... */ | ||
156 | ctx(tfm)->f_sha_padlock(ctx(tfm)->data, out, ctx(tfm)->used); | ||
157 | |||
158 | ctx(tfm)->used = 0; | ||
159 | } | ||
160 | |||
161 | static int padlock_cra_init(struct crypto_tfm *tfm) | ||
162 | { | ||
163 | const char *fallback_driver_name = tfm->__crt_alg->cra_name; | ||
164 | struct crypto_hash *fallback_tfm; | ||
165 | |||
166 | /* For now we'll allocate one page. This | ||
167 | * could eventually be configurable one day. */ | ||
168 | ctx(tfm)->data = (char *)__get_free_page(GFP_KERNEL); | ||
169 | if (!ctx(tfm)->data) | ||
170 | return -ENOMEM; | ||
171 | |||
172 | /* Allocate a fallback and abort if it failed. */ | ||
173 | fallback_tfm = crypto_alloc_hash(fallback_driver_name, 0, | ||
174 | CRYPTO_ALG_ASYNC | | ||
175 | CRYPTO_ALG_NEED_FALLBACK); | ||
176 | if (IS_ERR(fallback_tfm)) { | ||
177 | printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n", | ||
178 | fallback_driver_name); | ||
179 | free_page((unsigned long)(ctx(tfm)->data)); | ||
180 | return PTR_ERR(fallback_tfm); | ||
181 | } | ||
182 | |||
183 | ctx(tfm)->fallback.tfm = fallback_tfm; | ||
184 | return 0; | ||
185 | } | ||
186 | |||
187 | static int padlock_sha1_cra_init(struct crypto_tfm *tfm) | ||
188 | { | ||
189 | ctx(tfm)->f_sha_padlock = padlock_do_sha1; | ||
190 | |||
191 | return padlock_cra_init(tfm); | ||
192 | } | ||
193 | |||
194 | static int padlock_sha256_cra_init(struct crypto_tfm *tfm) | ||
195 | { | ||
196 | ctx(tfm)->f_sha_padlock = padlock_do_sha256; | ||
197 | |||
198 | return padlock_cra_init(tfm); | ||
199 | } | ||
200 | |||
201 | static void padlock_cra_exit(struct crypto_tfm *tfm) | ||
202 | { | ||
203 | if (ctx(tfm)->data) { | ||
204 | free_page((unsigned long)(ctx(tfm)->data)); | ||
205 | ctx(tfm)->data = NULL; | ||
206 | } | ||
207 | |||
208 | crypto_free_hash(ctx(tfm)->fallback.tfm); | ||
209 | ctx(tfm)->fallback.tfm = NULL; | ||
210 | } | ||
211 | |||
212 | static struct crypto_alg sha1_alg = { | ||
213 | .cra_name = "sha1", | ||
214 | .cra_driver_name = "sha1-padlock", | ||
215 | .cra_priority = PADLOCK_CRA_PRIORITY, | ||
216 | .cra_flags = CRYPTO_ALG_TYPE_DIGEST | | ||
217 | CRYPTO_ALG_NEED_FALLBACK, | ||
218 | .cra_blocksize = SHA1_HMAC_BLOCK_SIZE, | ||
219 | .cra_ctxsize = sizeof(struct padlock_sha_ctx), | ||
220 | .cra_module = THIS_MODULE, | ||
221 | .cra_list = LIST_HEAD_INIT(sha1_alg.cra_list), | ||
222 | .cra_init = padlock_sha1_cra_init, | ||
223 | .cra_exit = padlock_cra_exit, | ||
224 | .cra_u = { | ||
225 | .digest = { | ||
226 | .dia_digestsize = SHA1_DIGEST_SIZE, | ||
227 | .dia_init = padlock_sha_init, | ||
228 | .dia_update = padlock_sha_update, | ||
229 | .dia_final = padlock_sha_final, | ||
230 | } | ||
231 | } | ||
232 | }; | ||
233 | |||
234 | static struct crypto_alg sha256_alg = { | ||
235 | .cra_name = "sha256", | ||
236 | .cra_driver_name = "sha256-padlock", | ||
237 | .cra_priority = PADLOCK_CRA_PRIORITY, | ||
238 | .cra_flags = CRYPTO_ALG_TYPE_DIGEST | | ||
239 | CRYPTO_ALG_NEED_FALLBACK, | ||
240 | .cra_blocksize = SHA256_HMAC_BLOCK_SIZE, | ||
241 | .cra_ctxsize = sizeof(struct padlock_sha_ctx), | ||
242 | .cra_module = THIS_MODULE, | ||
243 | .cra_list = LIST_HEAD_INIT(sha256_alg.cra_list), | ||
244 | .cra_init = padlock_sha256_cra_init, | ||
245 | .cra_exit = padlock_cra_exit, | ||
246 | .cra_u = { | ||
247 | .digest = { | ||
248 | .dia_digestsize = SHA256_DIGEST_SIZE, | ||
249 | .dia_init = padlock_sha_init, | ||
250 | .dia_update = padlock_sha_update, | ||
251 | .dia_final = padlock_sha_final, | ||
252 | } | ||
253 | } | ||
254 | }; | ||
255 | |||
256 | static void __init padlock_sha_check_fallbacks(void) | ||
257 | { | ||
258 | if (!crypto_has_hash("sha1", 0, CRYPTO_ALG_ASYNC | | ||
259 | CRYPTO_ALG_NEED_FALLBACK)) | ||
260 | printk(KERN_WARNING PFX | ||
261 | "Couldn't load fallback module for sha1.\n"); | ||
262 | |||
263 | if (!crypto_has_hash("sha256", 0, CRYPTO_ALG_ASYNC | | ||
264 | CRYPTO_ALG_NEED_FALLBACK)) | ||
265 | printk(KERN_WARNING PFX | ||
266 | "Couldn't load fallback module for sha256.\n"); | ||
267 | } | ||
268 | |||
269 | static int __init padlock_init(void) | ||
270 | { | ||
271 | int rc = -ENODEV; | ||
272 | |||
273 | if (!cpu_has_phe) { | ||
274 | printk(KERN_ERR PFX "VIA PadLock Hash Engine not detected.\n"); | ||
275 | return -ENODEV; | ||
276 | } | ||
277 | |||
278 | if (!cpu_has_phe_enabled) { | ||
279 | printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); | ||
280 | return -ENODEV; | ||
281 | } | ||
282 | |||
283 | padlock_sha_check_fallbacks(); | ||
284 | |||
285 | rc = crypto_register_alg(&sha1_alg); | ||
286 | if (rc) | ||
287 | goto out; | ||
288 | |||
289 | rc = crypto_register_alg(&sha256_alg); | ||
290 | if (rc) | ||
291 | goto out_unreg1; | ||
292 | |||
293 | printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n"); | ||
294 | |||
295 | return 0; | ||
296 | |||
297 | out_unreg1: | ||
298 | crypto_unregister_alg(&sha1_alg); | ||
299 | out: | ||
300 | printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n"); | ||
301 | return rc; | ||
302 | } | ||
303 | |||
304 | static void __exit padlock_fini(void) | ||
305 | { | ||
306 | crypto_unregister_alg(&sha1_alg); | ||
307 | crypto_unregister_alg(&sha256_alg); | ||
308 | } | ||
309 | |||
310 | module_init(padlock_init); | ||
311 | module_exit(padlock_fini); | ||
312 | |||
313 | MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support."); | ||
314 | MODULE_LICENSE("GPL"); | ||
315 | MODULE_AUTHOR("Michal Ludvig"); | ||
316 | |||
317 | MODULE_ALIAS("sha1-padlock"); | ||
318 | MODULE_ALIAS("sha256-padlock"); | ||
diff --git a/drivers/crypto/padlock.c b/drivers/crypto/padlock.c new file mode 100644 index 000000000000..d6d7dd5bb98c --- /dev/null +++ b/drivers/crypto/padlock.c | |||
@@ -0,0 +1,58 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * | ||
4 | * Support for VIA PadLock hardware crypto engine. | ||
5 | * | ||
6 | * Copyright (c) 2006 Michal Ludvig <michal@logix.cz> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/errno.h> | ||
18 | #include <linux/crypto.h> | ||
19 | #include <linux/cryptohash.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/scatterlist.h> | ||
23 | #include "padlock.h" | ||
24 | |||
25 | static int __init padlock_init(void) | ||
26 | { | ||
27 | int success = 0; | ||
28 | |||
29 | if (crypto_has_cipher("aes-padlock", 0, 0)) | ||
30 | success++; | ||
31 | |||
32 | if (crypto_has_hash("sha1-padlock", 0, 0)) | ||
33 | success++; | ||
34 | |||
35 | if (crypto_has_hash("sha256-padlock", 0, 0)) | ||
36 | success++; | ||
37 | |||
38 | if (!success) { | ||
39 | printk(KERN_WARNING PFX "No VIA PadLock drivers have been loaded.\n"); | ||
40 | return -ENODEV; | ||
41 | } | ||
42 | |||
43 | printk(KERN_NOTICE PFX "%d drivers are available.\n", success); | ||
44 | |||
45 | return 0; | ||
46 | } | ||
47 | |||
48 | static void __exit padlock_fini(void) | ||
49 | { | ||
50 | } | ||
51 | |||
52 | module_init(padlock_init); | ||
53 | module_exit(padlock_fini); | ||
54 | |||
55 | MODULE_DESCRIPTION("Load all configured PadLock algorithms."); | ||
56 | MODULE_LICENSE("GPL"); | ||
57 | MODULE_AUTHOR("Michal Ludvig"); | ||
58 | |||
diff --git a/drivers/crypto/padlock.h b/drivers/crypto/padlock.h index b78489bc298a..b728e4518bd1 100644 --- a/drivers/crypto/padlock.h +++ b/drivers/crypto/padlock.h | |||
@@ -15,22 +15,9 @@ | |||
15 | 15 | ||
16 | #define PADLOCK_ALIGNMENT 16 | 16 | #define PADLOCK_ALIGNMENT 16 |
17 | 17 | ||
18 | /* Control word. */ | ||
19 | struct cword { | ||
20 | unsigned int __attribute__ ((__packed__)) | ||
21 | rounds:4, | ||
22 | algo:3, | ||
23 | keygen:1, | ||
24 | interm:1, | ||
25 | encdec:1, | ||
26 | ksize:2; | ||
27 | } __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); | ||
28 | |||
29 | #define PFX "padlock: " | 18 | #define PFX "padlock: " |
30 | 19 | ||
31 | #ifdef CONFIG_CRYPTO_DEV_PADLOCK_AES | 20 | #define PADLOCK_CRA_PRIORITY 300 |
32 | int padlock_init_aes(void); | 21 | #define PADLOCK_COMPOSITE_PRIORITY 400 |
33 | void padlock_fini_aes(void); | ||
34 | #endif | ||
35 | 22 | ||
36 | #endif /* _CRYPTO_PADLOCK_H */ | 23 | #endif /* _CRYPTO_PADLOCK_H */ |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 6022ed12a795..bdbd34993a80 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -5,6 +5,7 @@ | |||
5 | * This file is released under the GPL. | 5 | * This file is released under the GPL. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/err.h> | ||
8 | #include <linux/module.h> | 9 | #include <linux/module.h> |
9 | #include <linux/init.h> | 10 | #include <linux/init.h> |
10 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
@@ -78,11 +79,13 @@ struct crypt_config { | |||
78 | */ | 79 | */ |
79 | struct crypt_iv_operations *iv_gen_ops; | 80 | struct crypt_iv_operations *iv_gen_ops; |
80 | char *iv_mode; | 81 | char *iv_mode; |
81 | void *iv_gen_private; | 82 | struct crypto_cipher *iv_gen_private; |
82 | sector_t iv_offset; | 83 | sector_t iv_offset; |
83 | unsigned int iv_size; | 84 | unsigned int iv_size; |
84 | 85 | ||
85 | struct crypto_tfm *tfm; | 86 | char cipher[CRYPTO_MAX_ALG_NAME]; |
87 | char chainmode[CRYPTO_MAX_ALG_NAME]; | ||
88 | struct crypto_blkcipher *tfm; | ||
86 | unsigned int key_size; | 89 | unsigned int key_size; |
87 | u8 key[0]; | 90 | u8 key[0]; |
88 | }; | 91 | }; |
@@ -96,12 +99,12 @@ static kmem_cache_t *_crypt_io_pool; | |||
96 | /* | 99 | /* |
97 | * Different IV generation algorithms: | 100 | * Different IV generation algorithms: |
98 | * | 101 | * |
99 | * plain: the initial vector is the 32-bit low-endian version of the sector | 102 | * plain: the initial vector is the 32-bit little-endian version of the sector |
100 | * number, padded with zeros if neccessary. | 103 | * number, padded with zeros if neccessary. |
101 | * | 104 | * |
102 | * ess_iv: "encrypted sector|salt initial vector", the sector number is | 105 | * essiv: "encrypted sector|salt initial vector", the sector number is |
103 | * encrypted with the bulk cipher using a salt as key. The salt | 106 | * encrypted with the bulk cipher using a salt as key. The salt |
104 | * should be derived from the bulk cipher's key via hashing. | 107 | * should be derived from the bulk cipher's key via hashing. |
105 | * | 108 | * |
106 | * plumb: unimplemented, see: | 109 | * plumb: unimplemented, see: |
107 | * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 | 110 | * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 |
@@ -118,11 +121,13 @@ static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector) | |||
118 | static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, | 121 | static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, |
119 | const char *opts) | 122 | const char *opts) |
120 | { | 123 | { |
121 | struct crypto_tfm *essiv_tfm; | 124 | struct crypto_cipher *essiv_tfm; |
122 | struct crypto_tfm *hash_tfm; | 125 | struct crypto_hash *hash_tfm; |
126 | struct hash_desc desc; | ||
123 | struct scatterlist sg; | 127 | struct scatterlist sg; |
124 | unsigned int saltsize; | 128 | unsigned int saltsize; |
125 | u8 *salt; | 129 | u8 *salt; |
130 | int err; | ||
126 | 131 | ||
127 | if (opts == NULL) { | 132 | if (opts == NULL) { |
128 | ti->error = "Digest algorithm missing for ESSIV mode"; | 133 | ti->error = "Digest algorithm missing for ESSIV mode"; |
@@ -130,76 +135,70 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, | |||
130 | } | 135 | } |
131 | 136 | ||
132 | /* Hash the cipher key with the given hash algorithm */ | 137 | /* Hash the cipher key with the given hash algorithm */ |
133 | hash_tfm = crypto_alloc_tfm(opts, CRYPTO_TFM_REQ_MAY_SLEEP); | 138 | hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); |
134 | if (hash_tfm == NULL) { | 139 | if (IS_ERR(hash_tfm)) { |
135 | ti->error = "Error initializing ESSIV hash"; | 140 | ti->error = "Error initializing ESSIV hash"; |
136 | return -EINVAL; | 141 | return PTR_ERR(hash_tfm); |
137 | } | 142 | } |
138 | 143 | ||
139 | if (crypto_tfm_alg_type(hash_tfm) != CRYPTO_ALG_TYPE_DIGEST) { | 144 | saltsize = crypto_hash_digestsize(hash_tfm); |
140 | ti->error = "Expected digest algorithm for ESSIV hash"; | ||
141 | crypto_free_tfm(hash_tfm); | ||
142 | return -EINVAL; | ||
143 | } | ||
144 | |||
145 | saltsize = crypto_tfm_alg_digestsize(hash_tfm); | ||
146 | salt = kmalloc(saltsize, GFP_KERNEL); | 145 | salt = kmalloc(saltsize, GFP_KERNEL); |
147 | if (salt == NULL) { | 146 | if (salt == NULL) { |
148 | ti->error = "Error kmallocing salt storage in ESSIV"; | 147 | ti->error = "Error kmallocing salt storage in ESSIV"; |
149 | crypto_free_tfm(hash_tfm); | 148 | crypto_free_hash(hash_tfm); |
150 | return -ENOMEM; | 149 | return -ENOMEM; |
151 | } | 150 | } |
152 | 151 | ||
153 | sg_set_buf(&sg, cc->key, cc->key_size); | 152 | sg_set_buf(&sg, cc->key, cc->key_size); |
154 | crypto_digest_digest(hash_tfm, &sg, 1, salt); | 153 | desc.tfm = hash_tfm; |
155 | crypto_free_tfm(hash_tfm); | 154 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
155 | err = crypto_hash_digest(&desc, &sg, cc->key_size, salt); | ||
156 | crypto_free_hash(hash_tfm); | ||
157 | |||
158 | if (err) { | ||
159 | ti->error = "Error calculating hash in ESSIV"; | ||
160 | return err; | ||
161 | } | ||
156 | 162 | ||
157 | /* Setup the essiv_tfm with the given salt */ | 163 | /* Setup the essiv_tfm with the given salt */ |
158 | essiv_tfm = crypto_alloc_tfm(crypto_tfm_alg_name(cc->tfm), | 164 | essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); |
159 | CRYPTO_TFM_MODE_ECB | | 165 | if (IS_ERR(essiv_tfm)) { |
160 | CRYPTO_TFM_REQ_MAY_SLEEP); | ||
161 | if (essiv_tfm == NULL) { | ||
162 | ti->error = "Error allocating crypto tfm for ESSIV"; | 166 | ti->error = "Error allocating crypto tfm for ESSIV"; |
163 | kfree(salt); | 167 | kfree(salt); |
164 | return -EINVAL; | 168 | return PTR_ERR(essiv_tfm); |
165 | } | 169 | } |
166 | if (crypto_tfm_alg_blocksize(essiv_tfm) | 170 | if (crypto_cipher_blocksize(essiv_tfm) != |
167 | != crypto_tfm_alg_ivsize(cc->tfm)) { | 171 | crypto_blkcipher_ivsize(cc->tfm)) { |
168 | ti->error = "Block size of ESSIV cipher does " | 172 | ti->error = "Block size of ESSIV cipher does " |
169 | "not match IV size of block cipher"; | 173 | "not match IV size of block cipher"; |
170 | crypto_free_tfm(essiv_tfm); | 174 | crypto_free_cipher(essiv_tfm); |
171 | kfree(salt); | 175 | kfree(salt); |
172 | return -EINVAL; | 176 | return -EINVAL; |
173 | } | 177 | } |
174 | if (crypto_cipher_setkey(essiv_tfm, salt, saltsize) < 0) { | 178 | err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); |
179 | if (err) { | ||
175 | ti->error = "Failed to set key for ESSIV cipher"; | 180 | ti->error = "Failed to set key for ESSIV cipher"; |
176 | crypto_free_tfm(essiv_tfm); | 181 | crypto_free_cipher(essiv_tfm); |
177 | kfree(salt); | 182 | kfree(salt); |
178 | return -EINVAL; | 183 | return err; |
179 | } | 184 | } |
180 | kfree(salt); | 185 | kfree(salt); |
181 | 186 | ||
182 | cc->iv_gen_private = (void *)essiv_tfm; | 187 | cc->iv_gen_private = essiv_tfm; |
183 | return 0; | 188 | return 0; |
184 | } | 189 | } |
185 | 190 | ||
186 | static void crypt_iv_essiv_dtr(struct crypt_config *cc) | 191 | static void crypt_iv_essiv_dtr(struct crypt_config *cc) |
187 | { | 192 | { |
188 | crypto_free_tfm((struct crypto_tfm *)cc->iv_gen_private); | 193 | crypto_free_cipher(cc->iv_gen_private); |
189 | cc->iv_gen_private = NULL; | 194 | cc->iv_gen_private = NULL; |
190 | } | 195 | } |
191 | 196 | ||
192 | static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) | 197 | static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) |
193 | { | 198 | { |
194 | struct scatterlist sg; | ||
195 | |||
196 | memset(iv, 0, cc->iv_size); | 199 | memset(iv, 0, cc->iv_size); |
197 | *(u64 *)iv = cpu_to_le64(sector); | 200 | *(u64 *)iv = cpu_to_le64(sector); |
198 | 201 | crypto_cipher_encrypt_one(cc->iv_gen_private, iv, iv); | |
199 | sg_set_buf(&sg, iv, cc->iv_size); | ||
200 | crypto_cipher_encrypt((struct crypto_tfm *)cc->iv_gen_private, | ||
201 | &sg, &sg, cc->iv_size); | ||
202 | |||
203 | return 0; | 202 | return 0; |
204 | } | 203 | } |
205 | 204 | ||
@@ -220,6 +219,11 @@ crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out, | |||
220 | int write, sector_t sector) | 219 | int write, sector_t sector) |
221 | { | 220 | { |
222 | u8 iv[cc->iv_size]; | 221 | u8 iv[cc->iv_size]; |
222 | struct blkcipher_desc desc = { | ||
223 | .tfm = cc->tfm, | ||
224 | .info = iv, | ||
225 | .flags = CRYPTO_TFM_REQ_MAY_SLEEP, | ||
226 | }; | ||
223 | int r; | 227 | int r; |
224 | 228 | ||
225 | if (cc->iv_gen_ops) { | 229 | if (cc->iv_gen_ops) { |
@@ -228,14 +232,14 @@ crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out, | |||
228 | return r; | 232 | return r; |
229 | 233 | ||
230 | if (write) | 234 | if (write) |
231 | r = crypto_cipher_encrypt_iv(cc->tfm, out, in, length, iv); | 235 | r = crypto_blkcipher_encrypt_iv(&desc, out, in, length); |
232 | else | 236 | else |
233 | r = crypto_cipher_decrypt_iv(cc->tfm, out, in, length, iv); | 237 | r = crypto_blkcipher_decrypt_iv(&desc, out, in, length); |
234 | } else { | 238 | } else { |
235 | if (write) | 239 | if (write) |
236 | r = crypto_cipher_encrypt(cc->tfm, out, in, length); | 240 | r = crypto_blkcipher_encrypt(&desc, out, in, length); |
237 | else | 241 | else |
238 | r = crypto_cipher_decrypt(cc->tfm, out, in, length); | 242 | r = crypto_blkcipher_decrypt(&desc, out, in, length); |
239 | } | 243 | } |
240 | 244 | ||
241 | return r; | 245 | return r; |
@@ -510,13 +514,12 @@ static void crypt_encode_key(char *hex, u8 *key, unsigned int size) | |||
510 | static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | 514 | static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) |
511 | { | 515 | { |
512 | struct crypt_config *cc; | 516 | struct crypt_config *cc; |
513 | struct crypto_tfm *tfm; | 517 | struct crypto_blkcipher *tfm; |
514 | char *tmp; | 518 | char *tmp; |
515 | char *cipher; | 519 | char *cipher; |
516 | char *chainmode; | 520 | char *chainmode; |
517 | char *ivmode; | 521 | char *ivmode; |
518 | char *ivopts; | 522 | char *ivopts; |
519 | unsigned int crypto_flags; | ||
520 | unsigned int key_size; | 523 | unsigned int key_size; |
521 | unsigned long long tmpll; | 524 | unsigned long long tmpll; |
522 | 525 | ||
@@ -556,31 +559,25 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
556 | ivmode = "plain"; | 559 | ivmode = "plain"; |
557 | } | 560 | } |
558 | 561 | ||
559 | /* Choose crypto_flags according to chainmode */ | 562 | if (strcmp(chainmode, "ecb") && !ivmode) { |
560 | if (strcmp(chainmode, "cbc") == 0) | 563 | ti->error = "This chaining mode requires an IV mechanism"; |
561 | crypto_flags = CRYPTO_TFM_MODE_CBC; | ||
562 | else if (strcmp(chainmode, "ecb") == 0) | ||
563 | crypto_flags = CRYPTO_TFM_MODE_ECB; | ||
564 | else { | ||
565 | ti->error = "Unknown chaining mode"; | ||
566 | goto bad1; | 564 | goto bad1; |
567 | } | 565 | } |
568 | 566 | ||
569 | if (crypto_flags != CRYPTO_TFM_MODE_ECB && !ivmode) { | 567 | if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)", chainmode, |
570 | ti->error = "This chaining mode requires an IV mechanism"; | 568 | cipher) >= CRYPTO_MAX_ALG_NAME) { |
569 | ti->error = "Chain mode + cipher name is too long"; | ||
571 | goto bad1; | 570 | goto bad1; |
572 | } | 571 | } |
573 | 572 | ||
574 | tfm = crypto_alloc_tfm(cipher, crypto_flags | CRYPTO_TFM_REQ_MAY_SLEEP); | 573 | tfm = crypto_alloc_blkcipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); |
575 | if (!tfm) { | 574 | if (IS_ERR(tfm)) { |
576 | ti->error = "Error allocating crypto tfm"; | 575 | ti->error = "Error allocating crypto tfm"; |
577 | goto bad1; | 576 | goto bad1; |
578 | } | 577 | } |
579 | if (crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER) { | ||
580 | ti->error = "Expected cipher algorithm"; | ||
581 | goto bad2; | ||
582 | } | ||
583 | 578 | ||
579 | strcpy(cc->cipher, cipher); | ||
580 | strcpy(cc->chainmode, chainmode); | ||
584 | cc->tfm = tfm; | 581 | cc->tfm = tfm; |
585 | 582 | ||
586 | /* | 583 | /* |
@@ -603,12 +600,12 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
603 | cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0) | 600 | cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0) |
604 | goto bad2; | 601 | goto bad2; |
605 | 602 | ||
606 | if (tfm->crt_cipher.cit_decrypt_iv && tfm->crt_cipher.cit_encrypt_iv) | 603 | cc->iv_size = crypto_blkcipher_ivsize(tfm); |
604 | if (cc->iv_size) | ||
607 | /* at least a 64 bit sector number should fit in our buffer */ | 605 | /* at least a 64 bit sector number should fit in our buffer */ |
608 | cc->iv_size = max(crypto_tfm_alg_ivsize(tfm), | 606 | cc->iv_size = max(cc->iv_size, |
609 | (unsigned int)(sizeof(u64) / sizeof(u8))); | 607 | (unsigned int)(sizeof(u64) / sizeof(u8))); |
610 | else { | 608 | else { |
611 | cc->iv_size = 0; | ||
612 | if (cc->iv_gen_ops) { | 609 | if (cc->iv_gen_ops) { |
613 | DMWARN("Selected cipher does not support IVs"); | 610 | DMWARN("Selected cipher does not support IVs"); |
614 | if (cc->iv_gen_ops->dtr) | 611 | if (cc->iv_gen_ops->dtr) |
@@ -629,7 +626,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
629 | goto bad4; | 626 | goto bad4; |
630 | } | 627 | } |
631 | 628 | ||
632 | if (tfm->crt_cipher.cit_setkey(tfm, cc->key, key_size) < 0) { | 629 | if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) { |
633 | ti->error = "Error setting key"; | 630 | ti->error = "Error setting key"; |
634 | goto bad5; | 631 | goto bad5; |
635 | } | 632 | } |
@@ -675,7 +672,7 @@ bad3: | |||
675 | if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) | 672 | if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) |
676 | cc->iv_gen_ops->dtr(cc); | 673 | cc->iv_gen_ops->dtr(cc); |
677 | bad2: | 674 | bad2: |
678 | crypto_free_tfm(tfm); | 675 | crypto_free_blkcipher(tfm); |
679 | bad1: | 676 | bad1: |
680 | /* Must zero key material before freeing */ | 677 | /* Must zero key material before freeing */ |
681 | memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8)); | 678 | memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8)); |
@@ -693,7 +690,7 @@ static void crypt_dtr(struct dm_target *ti) | |||
693 | kfree(cc->iv_mode); | 690 | kfree(cc->iv_mode); |
694 | if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) | 691 | if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) |
695 | cc->iv_gen_ops->dtr(cc); | 692 | cc->iv_gen_ops->dtr(cc); |
696 | crypto_free_tfm(cc->tfm); | 693 | crypto_free_blkcipher(cc->tfm); |
697 | dm_put_device(ti, cc->dev); | 694 | dm_put_device(ti, cc->dev); |
698 | 695 | ||
699 | /* Must zero key material before freeing */ | 696 | /* Must zero key material before freeing */ |
@@ -858,18 +855,9 @@ static int crypt_status(struct dm_target *ti, status_type_t type, | |||
858 | break; | 855 | break; |
859 | 856 | ||
860 | case STATUSTYPE_TABLE: | 857 | case STATUSTYPE_TABLE: |
861 | cipher = crypto_tfm_alg_name(cc->tfm); | 858 | cipher = crypto_blkcipher_name(cc->tfm); |
862 | 859 | ||
863 | switch(cc->tfm->crt_cipher.cit_mode) { | 860 | chainmode = cc->chainmode; |
864 | case CRYPTO_TFM_MODE_CBC: | ||
865 | chainmode = "cbc"; | ||
866 | break; | ||
867 | case CRYPTO_TFM_MODE_ECB: | ||
868 | chainmode = "ecb"; | ||
869 | break; | ||
870 | default: | ||
871 | BUG(); | ||
872 | } | ||
873 | 861 | ||
874 | if (cc->iv_mode) | 862 | if (cc->iv_mode) |
875 | DMEMIT("%s-%s-%s ", cipher, chainmode, cc->iv_mode); | 863 | DMEMIT("%s-%s-%s ", cipher, chainmode, cc->iv_mode); |
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c index 51ff9a9d1bb5..f3655fd772f5 100644 --- a/drivers/net/ppp_mppe.c +++ b/drivers/net/ppp_mppe.c | |||
@@ -43,6 +43,7 @@ | |||
43 | * deprecated in 2.6 | 43 | * deprecated in 2.6 |
44 | */ | 44 | */ |
45 | 45 | ||
46 | #include <linux/err.h> | ||
46 | #include <linux/module.h> | 47 | #include <linux/module.h> |
47 | #include <linux/kernel.h> | 48 | #include <linux/kernel.h> |
48 | #include <linux/version.h> | 49 | #include <linux/version.h> |
@@ -64,12 +65,13 @@ MODULE_LICENSE("Dual BSD/GPL"); | |||
64 | MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE)); | 65 | MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE)); |
65 | MODULE_VERSION("1.0.2"); | 66 | MODULE_VERSION("1.0.2"); |
66 | 67 | ||
67 | static void | 68 | static unsigned int |
68 | setup_sg(struct scatterlist *sg, const void *address, unsigned int length) | 69 | setup_sg(struct scatterlist *sg, const void *address, unsigned int length) |
69 | { | 70 | { |
70 | sg[0].page = virt_to_page(address); | 71 | sg[0].page = virt_to_page(address); |
71 | sg[0].offset = offset_in_page(address); | 72 | sg[0].offset = offset_in_page(address); |
72 | sg[0].length = length; | 73 | sg[0].length = length; |
74 | return length; | ||
73 | } | 75 | } |
74 | 76 | ||
75 | #define SHA1_PAD_SIZE 40 | 77 | #define SHA1_PAD_SIZE 40 |
@@ -95,8 +97,8 @@ static inline void sha_pad_init(struct sha_pad *shapad) | |||
95 | * State for an MPPE (de)compressor. | 97 | * State for an MPPE (de)compressor. |
96 | */ | 98 | */ |
97 | struct ppp_mppe_state { | 99 | struct ppp_mppe_state { |
98 | struct crypto_tfm *arc4; | 100 | struct crypto_blkcipher *arc4; |
99 | struct crypto_tfm *sha1; | 101 | struct crypto_hash *sha1; |
100 | unsigned char *sha1_digest; | 102 | unsigned char *sha1_digest; |
101 | unsigned char master_key[MPPE_MAX_KEY_LEN]; | 103 | unsigned char master_key[MPPE_MAX_KEY_LEN]; |
102 | unsigned char session_key[MPPE_MAX_KEY_LEN]; | 104 | unsigned char session_key[MPPE_MAX_KEY_LEN]; |
@@ -136,14 +138,21 @@ struct ppp_mppe_state { | |||
136 | */ | 138 | */ |
137 | static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *InterimKey) | 139 | static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *InterimKey) |
138 | { | 140 | { |
141 | struct hash_desc desc; | ||
139 | struct scatterlist sg[4]; | 142 | struct scatterlist sg[4]; |
143 | unsigned int nbytes; | ||
140 | 144 | ||
141 | setup_sg(&sg[0], state->master_key, state->keylen); | 145 | nbytes = setup_sg(&sg[0], state->master_key, state->keylen); |
142 | setup_sg(&sg[1], sha_pad->sha_pad1, sizeof(sha_pad->sha_pad1)); | 146 | nbytes += setup_sg(&sg[1], sha_pad->sha_pad1, |
143 | setup_sg(&sg[2], state->session_key, state->keylen); | 147 | sizeof(sha_pad->sha_pad1)); |
144 | setup_sg(&sg[3], sha_pad->sha_pad2, sizeof(sha_pad->sha_pad2)); | 148 | nbytes += setup_sg(&sg[2], state->session_key, state->keylen); |
149 | nbytes += setup_sg(&sg[3], sha_pad->sha_pad2, | ||
150 | sizeof(sha_pad->sha_pad2)); | ||
145 | 151 | ||
146 | crypto_digest_digest (state->sha1, sg, 4, state->sha1_digest); | 152 | desc.tfm = state->sha1; |
153 | desc.flags = 0; | ||
154 | |||
155 | crypto_hash_digest(&desc, sg, nbytes, state->sha1_digest); | ||
147 | 156 | ||
148 | memcpy(InterimKey, state->sha1_digest, state->keylen); | 157 | memcpy(InterimKey, state->sha1_digest, state->keylen); |
149 | } | 158 | } |
@@ -156,14 +165,15 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key) | |||
156 | { | 165 | { |
157 | unsigned char InterimKey[MPPE_MAX_KEY_LEN]; | 166 | unsigned char InterimKey[MPPE_MAX_KEY_LEN]; |
158 | struct scatterlist sg_in[1], sg_out[1]; | 167 | struct scatterlist sg_in[1], sg_out[1]; |
168 | struct blkcipher_desc desc = { .tfm = state->arc4 }; | ||
159 | 169 | ||
160 | get_new_key_from_sha(state, InterimKey); | 170 | get_new_key_from_sha(state, InterimKey); |
161 | if (!initial_key) { | 171 | if (!initial_key) { |
162 | crypto_cipher_setkey(state->arc4, InterimKey, state->keylen); | 172 | crypto_blkcipher_setkey(state->arc4, InterimKey, state->keylen); |
163 | setup_sg(sg_in, InterimKey, state->keylen); | 173 | setup_sg(sg_in, InterimKey, state->keylen); |
164 | setup_sg(sg_out, state->session_key, state->keylen); | 174 | setup_sg(sg_out, state->session_key, state->keylen); |
165 | if (crypto_cipher_encrypt(state->arc4, sg_out, sg_in, | 175 | if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, |
166 | state->keylen) != 0) { | 176 | state->keylen) != 0) { |
167 | printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n"); | 177 | printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n"); |
168 | } | 178 | } |
169 | } else { | 179 | } else { |
@@ -175,7 +185,7 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key) | |||
175 | state->session_key[1] = 0x26; | 185 | state->session_key[1] = 0x26; |
176 | state->session_key[2] = 0x9e; | 186 | state->session_key[2] = 0x9e; |
177 | } | 187 | } |
178 | crypto_cipher_setkey(state->arc4, state->session_key, state->keylen); | 188 | crypto_blkcipher_setkey(state->arc4, state->session_key, state->keylen); |
179 | } | 189 | } |
180 | 190 | ||
181 | /* | 191 | /* |
@@ -196,15 +206,19 @@ static void *mppe_alloc(unsigned char *options, int optlen) | |||
196 | 206 | ||
197 | memset(state, 0, sizeof(*state)); | 207 | memset(state, 0, sizeof(*state)); |
198 | 208 | ||
199 | state->arc4 = crypto_alloc_tfm("arc4", 0); | 209 | state->arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); |
200 | if (!state->arc4) | 210 | if (IS_ERR(state->arc4)) { |
211 | state->arc4 = NULL; | ||
201 | goto out_free; | 212 | goto out_free; |
213 | } | ||
202 | 214 | ||
203 | state->sha1 = crypto_alloc_tfm("sha1", 0); | 215 | state->sha1 = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC); |
204 | if (!state->sha1) | 216 | if (IS_ERR(state->sha1)) { |
217 | state->sha1 = NULL; | ||
205 | goto out_free; | 218 | goto out_free; |
219 | } | ||
206 | 220 | ||
207 | digestsize = crypto_tfm_alg_digestsize(state->sha1); | 221 | digestsize = crypto_hash_digestsize(state->sha1); |
208 | if (digestsize < MPPE_MAX_KEY_LEN) | 222 | if (digestsize < MPPE_MAX_KEY_LEN) |
209 | goto out_free; | 223 | goto out_free; |
210 | 224 | ||
@@ -229,9 +243,9 @@ static void *mppe_alloc(unsigned char *options, int optlen) | |||
229 | if (state->sha1_digest) | 243 | if (state->sha1_digest) |
230 | kfree(state->sha1_digest); | 244 | kfree(state->sha1_digest); |
231 | if (state->sha1) | 245 | if (state->sha1) |
232 | crypto_free_tfm(state->sha1); | 246 | crypto_free_hash(state->sha1); |
233 | if (state->arc4) | 247 | if (state->arc4) |
234 | crypto_free_tfm(state->arc4); | 248 | crypto_free_blkcipher(state->arc4); |
235 | kfree(state); | 249 | kfree(state); |
236 | out: | 250 | out: |
237 | return NULL; | 251 | return NULL; |
@@ -247,9 +261,9 @@ static void mppe_free(void *arg) | |||
247 | if (state->sha1_digest) | 261 | if (state->sha1_digest) |
248 | kfree(state->sha1_digest); | 262 | kfree(state->sha1_digest); |
249 | if (state->sha1) | 263 | if (state->sha1) |
250 | crypto_free_tfm(state->sha1); | 264 | crypto_free_hash(state->sha1); |
251 | if (state->arc4) | 265 | if (state->arc4) |
252 | crypto_free_tfm(state->arc4); | 266 | crypto_free_blkcipher(state->arc4); |
253 | kfree(state); | 267 | kfree(state); |
254 | } | 268 | } |
255 | } | 269 | } |
@@ -356,6 +370,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf, | |||
356 | int isize, int osize) | 370 | int isize, int osize) |
357 | { | 371 | { |
358 | struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; | 372 | struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; |
373 | struct blkcipher_desc desc = { .tfm = state->arc4 }; | ||
359 | int proto; | 374 | int proto; |
360 | struct scatterlist sg_in[1], sg_out[1]; | 375 | struct scatterlist sg_in[1], sg_out[1]; |
361 | 376 | ||
@@ -413,7 +428,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf, | |||
413 | /* Encrypt packet */ | 428 | /* Encrypt packet */ |
414 | setup_sg(sg_in, ibuf, isize); | 429 | setup_sg(sg_in, ibuf, isize); |
415 | setup_sg(sg_out, obuf, osize); | 430 | setup_sg(sg_out, obuf, osize); |
416 | if (crypto_cipher_encrypt(state->arc4, sg_out, sg_in, isize) != 0) { | 431 | if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, isize) != 0) { |
417 | printk(KERN_DEBUG "crypto_cypher_encrypt failed\n"); | 432 | printk(KERN_DEBUG "crypto_cypher_encrypt failed\n"); |
418 | return -1; | 433 | return -1; |
419 | } | 434 | } |
@@ -462,6 +477,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, | |||
462 | int osize) | 477 | int osize) |
463 | { | 478 | { |
464 | struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; | 479 | struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; |
480 | struct blkcipher_desc desc = { .tfm = state->arc4 }; | ||
465 | unsigned ccount; | 481 | unsigned ccount; |
466 | int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED; | 482 | int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED; |
467 | int sanity = 0; | 483 | int sanity = 0; |
@@ -599,7 +615,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, | |||
599 | */ | 615 | */ |
600 | setup_sg(sg_in, ibuf, 1); | 616 | setup_sg(sg_in, ibuf, 1); |
601 | setup_sg(sg_out, obuf, 1); | 617 | setup_sg(sg_out, obuf, 1); |
602 | if (crypto_cipher_decrypt(state->arc4, sg_out, sg_in, 1) != 0) { | 618 | if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, 1) != 0) { |
603 | printk(KERN_DEBUG "crypto_cypher_decrypt failed\n"); | 619 | printk(KERN_DEBUG "crypto_cypher_decrypt failed\n"); |
604 | return DECOMP_ERROR; | 620 | return DECOMP_ERROR; |
605 | } | 621 | } |
@@ -619,7 +635,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, | |||
619 | /* And finally, decrypt the rest of the packet. */ | 635 | /* And finally, decrypt the rest of the packet. */ |
620 | setup_sg(sg_in, ibuf + 1, isize - 1); | 636 | setup_sg(sg_in, ibuf + 1, isize - 1); |
621 | setup_sg(sg_out, obuf + 1, osize - 1); | 637 | setup_sg(sg_out, obuf + 1, osize - 1); |
622 | if (crypto_cipher_decrypt(state->arc4, sg_out, sg_in, isize - 1) != 0) { | 638 | if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, isize - 1)) { |
623 | printk(KERN_DEBUG "crypto_cypher_decrypt failed\n"); | 639 | printk(KERN_DEBUG "crypto_cypher_decrypt failed\n"); |
624 | return DECOMP_ERROR; | 640 | return DECOMP_ERROR; |
625 | } | 641 | } |
@@ -694,8 +710,8 @@ static struct compressor ppp_mppe = { | |||
694 | static int __init ppp_mppe_init(void) | 710 | static int __init ppp_mppe_init(void) |
695 | { | 711 | { |
696 | int answer; | 712 | int answer; |
697 | if (!(crypto_alg_available("arc4", 0) && | 713 | if (!(crypto_has_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC) && |
698 | crypto_alg_available("sha1", 0))) | 714 | crypto_has_hash("sha1", 0, CRYPTO_ALG_ASYNC))) |
699 | return -ENODEV; | 715 | return -ENODEV; |
700 | 716 | ||
701 | sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL); | 717 | sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL); |
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index a4dd13942714..170c500169da 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c | |||
@@ -19,6 +19,7 @@ | |||
19 | 19 | ||
20 | ======================================================================*/ | 20 | ======================================================================*/ |
21 | 21 | ||
22 | #include <linux/err.h> | ||
22 | #include <linux/init.h> | 23 | #include <linux/init.h> |
23 | 24 | ||
24 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
@@ -1203,7 +1204,7 @@ struct airo_info { | |||
1203 | struct iw_spy_data spy_data; | 1204 | struct iw_spy_data spy_data; |
1204 | struct iw_public_data wireless_data; | 1205 | struct iw_public_data wireless_data; |
1205 | /* MIC stuff */ | 1206 | /* MIC stuff */ |
1206 | struct crypto_tfm *tfm; | 1207 | struct crypto_cipher *tfm; |
1207 | mic_module mod[2]; | 1208 | mic_module mod[2]; |
1208 | mic_statistics micstats; | 1209 | mic_statistics micstats; |
1209 | HostRxDesc rxfids[MPI_MAX_FIDS]; // rx/tx/config MPI350 descriptors | 1210 | HostRxDesc rxfids[MPI_MAX_FIDS]; // rx/tx/config MPI350 descriptors |
@@ -1271,7 +1272,8 @@ static int flashrestart(struct airo_info *ai,struct net_device *dev); | |||
1271 | 1272 | ||
1272 | static int RxSeqValid (struct airo_info *ai,miccntx *context,int mcast,u32 micSeq); | 1273 | static int RxSeqValid (struct airo_info *ai,miccntx *context,int mcast,u32 micSeq); |
1273 | static void MoveWindow(miccntx *context, u32 micSeq); | 1274 | static void MoveWindow(miccntx *context, u32 micSeq); |
1274 | static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct crypto_tfm *); | 1275 | static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, |
1276 | struct crypto_cipher *tfm); | ||
1275 | static void emmh32_init(emmh32_context *context); | 1277 | static void emmh32_init(emmh32_context *context); |
1276 | static void emmh32_update(emmh32_context *context, u8 *pOctets, int len); | 1278 | static void emmh32_update(emmh32_context *context, u8 *pOctets, int len); |
1277 | static void emmh32_final(emmh32_context *context, u8 digest[4]); | 1279 | static void emmh32_final(emmh32_context *context, u8 digest[4]); |
@@ -1339,10 +1341,11 @@ static int micsetup(struct airo_info *ai) { | |||
1339 | int i; | 1341 | int i; |
1340 | 1342 | ||
1341 | if (ai->tfm == NULL) | 1343 | if (ai->tfm == NULL) |
1342 | ai->tfm = crypto_alloc_tfm("aes", CRYPTO_TFM_REQ_MAY_SLEEP); | 1344 | ai->tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); |
1343 | 1345 | ||
1344 | if (ai->tfm == NULL) { | 1346 | if (IS_ERR(ai->tfm)) { |
1345 | airo_print_err(ai->dev->name, "failed to load transform for AES"); | 1347 | airo_print_err(ai->dev->name, "failed to load transform for AES"); |
1348 | ai->tfm = NULL; | ||
1346 | return ERROR; | 1349 | return ERROR; |
1347 | } | 1350 | } |
1348 | 1351 | ||
@@ -1608,7 +1611,8 @@ static void MoveWindow(miccntx *context, u32 micSeq) | |||
1608 | static unsigned char aes_counter[16]; | 1611 | static unsigned char aes_counter[16]; |
1609 | 1612 | ||
1610 | /* expand the key to fill the MMH coefficient array */ | 1613 | /* expand the key to fill the MMH coefficient array */ |
1611 | static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct crypto_tfm *tfm) | 1614 | static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, |
1615 | struct crypto_cipher *tfm) | ||
1612 | { | 1616 | { |
1613 | /* take the keying material, expand if necessary, truncate at 16-bytes */ | 1617 | /* take the keying material, expand if necessary, truncate at 16-bytes */ |
1614 | /* run through AES counter mode to generate context->coeff[] */ | 1618 | /* run through AES counter mode to generate context->coeff[] */ |
@@ -1616,7 +1620,6 @@ static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct | |||
1616 | int i,j; | 1620 | int i,j; |
1617 | u32 counter; | 1621 | u32 counter; |
1618 | u8 *cipher, plain[16]; | 1622 | u8 *cipher, plain[16]; |
1619 | struct scatterlist sg[1]; | ||
1620 | 1623 | ||
1621 | crypto_cipher_setkey(tfm, pkey, 16); | 1624 | crypto_cipher_setkey(tfm, pkey, 16); |
1622 | counter = 0; | 1625 | counter = 0; |
@@ -1627,9 +1630,8 @@ static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct | |||
1627 | aes_counter[12] = (u8)(counter >> 24); | 1630 | aes_counter[12] = (u8)(counter >> 24); |
1628 | counter++; | 1631 | counter++; |
1629 | memcpy (plain, aes_counter, 16); | 1632 | memcpy (plain, aes_counter, 16); |
1630 | sg_set_buf(sg, plain, 16); | 1633 | crypto_cipher_encrypt_one(tfm, plain, plain); |
1631 | crypto_cipher_encrypt(tfm, sg, sg, 16); | 1634 | cipher = plain; |
1632 | cipher = kmap(sg->page) + sg->offset; | ||
1633 | for (j=0; (j<16) && (i< (sizeof(context->coeff)/sizeof(context->coeff[0]))); ) { | 1635 | for (j=0; (j<16) && (i< (sizeof(context->coeff)/sizeof(context->coeff[0]))); ) { |
1634 | context->coeff[i++] = ntohl(*(u32 *)&cipher[j]); | 1636 | context->coeff[i++] = ntohl(*(u32 *)&cipher[j]); |
1635 | j += 4; | 1637 | j += 4; |
@@ -2432,7 +2434,7 @@ void stop_airo_card( struct net_device *dev, int freeres ) | |||
2432 | ai->shared, ai->shared_dma); | 2434 | ai->shared, ai->shared_dma); |
2433 | } | 2435 | } |
2434 | } | 2436 | } |
2435 | crypto_free_tfm(ai->tfm); | 2437 | crypto_free_cipher(ai->tfm); |
2436 | del_airo_dev( dev ); | 2438 | del_airo_dev( dev ); |
2437 | free_netdev( dev ); | 2439 | free_netdev( dev ); |
2438 | } | 2440 | } |
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 058f094f945a..66a1ae1d6982 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c | |||
@@ -26,6 +26,7 @@ | |||
26 | * Zhenyu Wang | 26 | * Zhenyu Wang |
27 | */ | 27 | */ |
28 | 28 | ||
29 | #include <linux/err.h> | ||
29 | #include <linux/types.h> | 30 | #include <linux/types.h> |
30 | #include <linux/list.h> | 31 | #include <linux/list.h> |
31 | #include <linux/inet.h> | 32 | #include <linux/inet.h> |
@@ -107,8 +108,11 @@ iscsi_hdr_digest(struct iscsi_conn *conn, struct iscsi_buf *buf, | |||
107 | u8* crc) | 108 | u8* crc) |
108 | { | 109 | { |
109 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | 110 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
111 | struct hash_desc desc; | ||
110 | 112 | ||
111 | crypto_digest_digest(tcp_conn->tx_tfm, &buf->sg, 1, crc); | 113 | desc.tfm = tcp_conn->tx_tfm; |
114 | desc.flags = 0; | ||
115 | crypto_hash_digest(&desc, &buf->sg, buf->sg.length, crc); | ||
112 | buf->sg.length += sizeof(uint32_t); | 116 | buf->sg.length += sizeof(uint32_t); |
113 | } | 117 | } |
114 | 118 | ||
@@ -452,11 +456,14 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn) | |||
452 | } | 456 | } |
453 | 457 | ||
454 | if (conn->hdrdgst_en) { | 458 | if (conn->hdrdgst_en) { |
459 | struct hash_desc desc; | ||
455 | struct scatterlist sg; | 460 | struct scatterlist sg; |
456 | 461 | ||
457 | sg_init_one(&sg, (u8 *)hdr, | 462 | sg_init_one(&sg, (u8 *)hdr, |
458 | sizeof(struct iscsi_hdr) + ahslen); | 463 | sizeof(struct iscsi_hdr) + ahslen); |
459 | crypto_digest_digest(tcp_conn->rx_tfm, &sg, 1, (u8 *)&cdgst); | 464 | desc.tfm = tcp_conn->rx_tfm; |
465 | desc.flags = 0; | ||
466 | crypto_hash_digest(&desc, &sg, sg.length, (u8 *)&cdgst); | ||
460 | rdgst = *(uint32_t*)((char*)hdr + sizeof(struct iscsi_hdr) + | 467 | rdgst = *(uint32_t*)((char*)hdr + sizeof(struct iscsi_hdr) + |
461 | ahslen); | 468 | ahslen); |
462 | if (cdgst != rdgst) { | 469 | if (cdgst != rdgst) { |
@@ -673,7 +680,7 @@ partial_sg_digest_update(struct iscsi_tcp_conn *tcp_conn, | |||
673 | memcpy(&temp, sg, sizeof(struct scatterlist)); | 680 | memcpy(&temp, sg, sizeof(struct scatterlist)); |
674 | temp.offset = offset; | 681 | temp.offset = offset; |
675 | temp.length = length; | 682 | temp.length = length; |
676 | crypto_digest_update(tcp_conn->data_rx_tfm, &temp, 1); | 683 | crypto_hash_update(&tcp_conn->data_rx_hash, &temp, length); |
677 | } | 684 | } |
678 | 685 | ||
679 | static void | 686 | static void |
@@ -682,7 +689,7 @@ iscsi_recv_digest_update(struct iscsi_tcp_conn *tcp_conn, char* buf, int len) | |||
682 | struct scatterlist tmp; | 689 | struct scatterlist tmp; |
683 | 690 | ||
684 | sg_init_one(&tmp, buf, len); | 691 | sg_init_one(&tmp, buf, len); |
685 | crypto_digest_update(tcp_conn->data_rx_tfm, &tmp, 1); | 692 | crypto_hash_update(&tcp_conn->data_rx_hash, &tmp, len); |
686 | } | 693 | } |
687 | 694 | ||
688 | static int iscsi_scsi_data_in(struct iscsi_conn *conn) | 695 | static int iscsi_scsi_data_in(struct iscsi_conn *conn) |
@@ -736,9 +743,9 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn) | |||
736 | if (!rc) { | 743 | if (!rc) { |
737 | if (conn->datadgst_en) { | 744 | if (conn->datadgst_en) { |
738 | if (!offset) | 745 | if (!offset) |
739 | crypto_digest_update( | 746 | crypto_hash_update( |
740 | tcp_conn->data_rx_tfm, | 747 | &tcp_conn->data_rx_hash, |
741 | &sg[i], 1); | 748 | &sg[i], sg[i].length); |
742 | else | 749 | else |
743 | partial_sg_digest_update(tcp_conn, | 750 | partial_sg_digest_update(tcp_conn, |
744 | &sg[i], | 751 | &sg[i], |
@@ -877,8 +884,7 @@ more: | |||
877 | rc = iscsi_tcp_hdr_recv(conn); | 884 | rc = iscsi_tcp_hdr_recv(conn); |
878 | if (!rc && tcp_conn->in.datalen) { | 885 | if (!rc && tcp_conn->in.datalen) { |
879 | if (conn->datadgst_en) { | 886 | if (conn->datadgst_en) { |
880 | BUG_ON(!tcp_conn->data_rx_tfm); | 887 | crypto_hash_init(&tcp_conn->data_rx_hash); |
881 | crypto_digest_init(tcp_conn->data_rx_tfm); | ||
882 | } | 888 | } |
883 | tcp_conn->in_progress = IN_PROGRESS_DATA_RECV; | 889 | tcp_conn->in_progress = IN_PROGRESS_DATA_RECV; |
884 | } else if (rc) { | 890 | } else if (rc) { |
@@ -931,11 +937,11 @@ more: | |||
931 | tcp_conn->in.padding); | 937 | tcp_conn->in.padding); |
932 | memset(pad, 0, tcp_conn->in.padding); | 938 | memset(pad, 0, tcp_conn->in.padding); |
933 | sg_init_one(&sg, pad, tcp_conn->in.padding); | 939 | sg_init_one(&sg, pad, tcp_conn->in.padding); |
934 | crypto_digest_update(tcp_conn->data_rx_tfm, | 940 | crypto_hash_update(&tcp_conn->data_rx_hash, |
935 | &sg, 1); | 941 | &sg, sg.length); |
936 | } | 942 | } |
937 | crypto_digest_final(tcp_conn->data_rx_tfm, | 943 | crypto_hash_final(&tcp_conn->data_rx_hash, |
938 | (u8 *) & tcp_conn->in.datadgst); | 944 | (u8 *)&tcp_conn->in.datadgst); |
939 | debug_tcp("rx digest 0x%x\n", tcp_conn->in.datadgst); | 945 | debug_tcp("rx digest 0x%x\n", tcp_conn->in.datadgst); |
940 | tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV; | 946 | tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV; |
941 | } else | 947 | } else |
@@ -1181,8 +1187,7 @@ iscsi_data_digest_init(struct iscsi_tcp_conn *tcp_conn, | |||
1181 | { | 1187 | { |
1182 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | 1188 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; |
1183 | 1189 | ||
1184 | BUG_ON(!tcp_conn->data_tx_tfm); | 1190 | crypto_hash_init(&tcp_conn->data_tx_hash); |
1185 | crypto_digest_init(tcp_conn->data_tx_tfm); | ||
1186 | tcp_ctask->digest_count = 4; | 1191 | tcp_ctask->digest_count = 4; |
1187 | } | 1192 | } |
1188 | 1193 | ||
@@ -1196,7 +1201,7 @@ iscsi_digest_final_send(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, | |||
1196 | int sent = 0; | 1201 | int sent = 0; |
1197 | 1202 | ||
1198 | if (final) | 1203 | if (final) |
1199 | crypto_digest_final(tcp_conn->data_tx_tfm, (u8*)digest); | 1204 | crypto_hash_final(&tcp_conn->data_tx_hash, (u8 *)digest); |
1200 | 1205 | ||
1201 | iscsi_buf_init_iov(buf, (char*)digest, 4); | 1206 | iscsi_buf_init_iov(buf, (char*)digest, 4); |
1202 | rc = iscsi_sendpage(conn, buf, &tcp_ctask->digest_count, &sent); | 1207 | rc = iscsi_sendpage(conn, buf, &tcp_ctask->digest_count, &sent); |
@@ -1491,16 +1496,17 @@ handle_xmstate_imm_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
1491 | if (rc) { | 1496 | if (rc) { |
1492 | tcp_ctask->xmstate |= XMSTATE_IMM_DATA; | 1497 | tcp_ctask->xmstate |= XMSTATE_IMM_DATA; |
1493 | if (conn->datadgst_en) { | 1498 | if (conn->datadgst_en) { |
1494 | crypto_digest_final(tcp_conn->data_tx_tfm, | 1499 | crypto_hash_final(&tcp_conn->data_tx_hash, |
1495 | (u8*)&tcp_ctask->immdigest); | 1500 | (u8 *)&tcp_ctask->immdigest); |
1496 | debug_tcp("tx imm sendpage fail 0x%x\n", | 1501 | debug_tcp("tx imm sendpage fail 0x%x\n", |
1497 | tcp_ctask->datadigest); | 1502 | tcp_ctask->datadigest); |
1498 | } | 1503 | } |
1499 | return rc; | 1504 | return rc; |
1500 | } | 1505 | } |
1501 | if (conn->datadgst_en) | 1506 | if (conn->datadgst_en) |
1502 | crypto_digest_update(tcp_conn->data_tx_tfm, | 1507 | crypto_hash_update(&tcp_conn->data_tx_hash, |
1503 | &tcp_ctask->sendbuf.sg, 1); | 1508 | &tcp_ctask->sendbuf.sg, |
1509 | tcp_ctask->sendbuf.sg.length); | ||
1504 | 1510 | ||
1505 | if (!ctask->imm_count) | 1511 | if (!ctask->imm_count) |
1506 | break; | 1512 | break; |
@@ -1577,8 +1583,8 @@ handle_xmstate_uns_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
1577 | tcp_ctask->xmstate |= XMSTATE_UNS_DATA; | 1583 | tcp_ctask->xmstate |= XMSTATE_UNS_DATA; |
1578 | /* will continue with this ctask later.. */ | 1584 | /* will continue with this ctask later.. */ |
1579 | if (conn->datadgst_en) { | 1585 | if (conn->datadgst_en) { |
1580 | crypto_digest_final(tcp_conn->data_tx_tfm, | 1586 | crypto_hash_final(&tcp_conn->data_tx_hash, |
1581 | (u8 *)&dtask->digest); | 1587 | (u8 *)&dtask->digest); |
1582 | debug_tcp("tx uns data fail 0x%x\n", | 1588 | debug_tcp("tx uns data fail 0x%x\n", |
1583 | dtask->digest); | 1589 | dtask->digest); |
1584 | } | 1590 | } |
@@ -1593,8 +1599,9 @@ handle_xmstate_uns_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
1593 | * so pass it | 1599 | * so pass it |
1594 | */ | 1600 | */ |
1595 | if (conn->datadgst_en && tcp_ctask->sent - start > 0) | 1601 | if (conn->datadgst_en && tcp_ctask->sent - start > 0) |
1596 | crypto_digest_update(tcp_conn->data_tx_tfm, | 1602 | crypto_hash_update(&tcp_conn->data_tx_hash, |
1597 | &tcp_ctask->sendbuf.sg, 1); | 1603 | &tcp_ctask->sendbuf.sg, |
1604 | tcp_ctask->sendbuf.sg.length); | ||
1598 | 1605 | ||
1599 | if (!ctask->data_count) | 1606 | if (!ctask->data_count) |
1600 | break; | 1607 | break; |
@@ -1668,7 +1675,7 @@ solicit_again: | |||
1668 | tcp_ctask->xmstate |= XMSTATE_SOL_DATA; | 1675 | tcp_ctask->xmstate |= XMSTATE_SOL_DATA; |
1669 | /* will continue with this ctask later.. */ | 1676 | /* will continue with this ctask later.. */ |
1670 | if (conn->datadgst_en) { | 1677 | if (conn->datadgst_en) { |
1671 | crypto_digest_final(tcp_conn->data_tx_tfm, | 1678 | crypto_hash_final(&tcp_conn->data_tx_hash, |
1672 | (u8 *)&dtask->digest); | 1679 | (u8 *)&dtask->digest); |
1673 | debug_tcp("r2t data send fail 0x%x\n", dtask->digest); | 1680 | debug_tcp("r2t data send fail 0x%x\n", dtask->digest); |
1674 | } | 1681 | } |
@@ -1677,8 +1684,8 @@ solicit_again: | |||
1677 | 1684 | ||
1678 | BUG_ON(r2t->data_count < 0); | 1685 | BUG_ON(r2t->data_count < 0); |
1679 | if (conn->datadgst_en) | 1686 | if (conn->datadgst_en) |
1680 | crypto_digest_update(tcp_conn->data_tx_tfm, &r2t->sendbuf.sg, | 1687 | crypto_hash_update(&tcp_conn->data_tx_hash, &r2t->sendbuf.sg, |
1681 | 1); | 1688 | r2t->sendbuf.sg.length); |
1682 | 1689 | ||
1683 | if (r2t->data_count) { | 1690 | if (r2t->data_count) { |
1684 | BUG_ON(ctask->sc->use_sg == 0); | 1691 | BUG_ON(ctask->sc->use_sg == 0); |
@@ -1766,8 +1773,9 @@ handle_xmstate_w_pad(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
1766 | } | 1773 | } |
1767 | 1774 | ||
1768 | if (conn->datadgst_en) { | 1775 | if (conn->datadgst_en) { |
1769 | crypto_digest_update(tcp_conn->data_tx_tfm, | 1776 | crypto_hash_update(&tcp_conn->data_tx_hash, |
1770 | &tcp_ctask->sendbuf.sg, 1); | 1777 | &tcp_ctask->sendbuf.sg, |
1778 | tcp_ctask->sendbuf.sg.length); | ||
1771 | /* imm data? */ | 1779 | /* imm data? */ |
1772 | if (!dtask) { | 1780 | if (!dtask) { |
1773 | rc = iscsi_digest_final_send(conn, ctask, | 1781 | rc = iscsi_digest_final_send(conn, ctask, |
@@ -1963,13 +1971,13 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) | |||
1963 | /* now free tcp_conn */ | 1971 | /* now free tcp_conn */ |
1964 | if (digest) { | 1972 | if (digest) { |
1965 | if (tcp_conn->tx_tfm) | 1973 | if (tcp_conn->tx_tfm) |
1966 | crypto_free_tfm(tcp_conn->tx_tfm); | 1974 | crypto_free_hash(tcp_conn->tx_tfm); |
1967 | if (tcp_conn->rx_tfm) | 1975 | if (tcp_conn->rx_tfm) |
1968 | crypto_free_tfm(tcp_conn->rx_tfm); | 1976 | crypto_free_hash(tcp_conn->rx_tfm); |
1969 | if (tcp_conn->data_tx_tfm) | 1977 | if (tcp_conn->data_tx_hash.tfm) |
1970 | crypto_free_tfm(tcp_conn->data_tx_tfm); | 1978 | crypto_free_hash(tcp_conn->data_tx_hash.tfm); |
1971 | if (tcp_conn->data_rx_tfm) | 1979 | if (tcp_conn->data_rx_hash.tfm) |
1972 | crypto_free_tfm(tcp_conn->data_rx_tfm); | 1980 | crypto_free_hash(tcp_conn->data_rx_hash.tfm); |
1973 | } | 1981 | } |
1974 | 1982 | ||
1975 | kfree(tcp_conn); | 1983 | kfree(tcp_conn); |
@@ -2130,44 +2138,48 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, | |||
2130 | if (conn->hdrdgst_en) { | 2138 | if (conn->hdrdgst_en) { |
2131 | tcp_conn->hdr_size += sizeof(__u32); | 2139 | tcp_conn->hdr_size += sizeof(__u32); |
2132 | if (!tcp_conn->tx_tfm) | 2140 | if (!tcp_conn->tx_tfm) |
2133 | tcp_conn->tx_tfm = crypto_alloc_tfm("crc32c", | 2141 | tcp_conn->tx_tfm = |
2134 | 0); | 2142 | crypto_alloc_hash("crc32c", 0, |
2135 | if (!tcp_conn->tx_tfm) | 2143 | CRYPTO_ALG_ASYNC); |
2136 | return -ENOMEM; | 2144 | if (IS_ERR(tcp_conn->tx_tfm)) |
2145 | return PTR_ERR(tcp_conn->tx_tfm); | ||
2137 | if (!tcp_conn->rx_tfm) | 2146 | if (!tcp_conn->rx_tfm) |
2138 | tcp_conn->rx_tfm = crypto_alloc_tfm("crc32c", | 2147 | tcp_conn->rx_tfm = |
2139 | 0); | 2148 | crypto_alloc_hash("crc32c", 0, |
2140 | if (!tcp_conn->rx_tfm) { | 2149 | CRYPTO_ALG_ASYNC); |
2141 | crypto_free_tfm(tcp_conn->tx_tfm); | 2150 | if (IS_ERR(tcp_conn->rx_tfm)) { |
2142 | return -ENOMEM; | 2151 | crypto_free_hash(tcp_conn->tx_tfm); |
2152 | return PTR_ERR(tcp_conn->rx_tfm); | ||
2143 | } | 2153 | } |
2144 | } else { | 2154 | } else { |
2145 | if (tcp_conn->tx_tfm) | 2155 | if (tcp_conn->tx_tfm) |
2146 | crypto_free_tfm(tcp_conn->tx_tfm); | 2156 | crypto_free_hash(tcp_conn->tx_tfm); |
2147 | if (tcp_conn->rx_tfm) | 2157 | if (tcp_conn->rx_tfm) |
2148 | crypto_free_tfm(tcp_conn->rx_tfm); | 2158 | crypto_free_hash(tcp_conn->rx_tfm); |
2149 | } | 2159 | } |
2150 | break; | 2160 | break; |
2151 | case ISCSI_PARAM_DATADGST_EN: | 2161 | case ISCSI_PARAM_DATADGST_EN: |
2152 | iscsi_set_param(cls_conn, param, buf, buflen); | 2162 | iscsi_set_param(cls_conn, param, buf, buflen); |
2153 | if (conn->datadgst_en) { | 2163 | if (conn->datadgst_en) { |
2154 | if (!tcp_conn->data_tx_tfm) | 2164 | if (!tcp_conn->data_tx_hash.tfm) |
2155 | tcp_conn->data_tx_tfm = | 2165 | tcp_conn->data_tx_hash.tfm = |
2156 | crypto_alloc_tfm("crc32c", 0); | 2166 | crypto_alloc_hash("crc32c", 0, |
2157 | if (!tcp_conn->data_tx_tfm) | 2167 | CRYPTO_ALG_ASYNC); |
2158 | return -ENOMEM; | 2168 | if (IS_ERR(tcp_conn->data_tx_hash.tfm)) |
2159 | if (!tcp_conn->data_rx_tfm) | 2169 | return PTR_ERR(tcp_conn->data_tx_hash.tfm); |
2160 | tcp_conn->data_rx_tfm = | 2170 | if (!tcp_conn->data_rx_hash.tfm) |
2161 | crypto_alloc_tfm("crc32c", 0); | 2171 | tcp_conn->data_rx_hash.tfm = |
2162 | if (!tcp_conn->data_rx_tfm) { | 2172 | crypto_alloc_hash("crc32c", 0, |
2163 | crypto_free_tfm(tcp_conn->data_tx_tfm); | 2173 | CRYPTO_ALG_ASYNC); |
2164 | return -ENOMEM; | 2174 | if (IS_ERR(tcp_conn->data_rx_hash.tfm)) { |
2175 | crypto_free_hash(tcp_conn->data_tx_hash.tfm); | ||
2176 | return PTR_ERR(tcp_conn->data_rx_hash.tfm); | ||
2165 | } | 2177 | } |
2166 | } else { | 2178 | } else { |
2167 | if (tcp_conn->data_tx_tfm) | 2179 | if (tcp_conn->data_tx_hash.tfm) |
2168 | crypto_free_tfm(tcp_conn->data_tx_tfm); | 2180 | crypto_free_hash(tcp_conn->data_tx_hash.tfm); |
2169 | if (tcp_conn->data_rx_tfm) | 2181 | if (tcp_conn->data_rx_hash.tfm) |
2170 | crypto_free_tfm(tcp_conn->data_rx_tfm); | 2182 | crypto_free_hash(tcp_conn->data_rx_hash.tfm); |
2171 | } | 2183 | } |
2172 | tcp_conn->sendpage = conn->datadgst_en ? | 2184 | tcp_conn->sendpage = conn->datadgst_en ? |
2173 | sock_no_sendpage : tcp_conn->sock->ops->sendpage; | 2185 | sock_no_sendpage : tcp_conn->sock->ops->sendpage; |
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h index 6a4ee704e46e..e35701305fc9 100644 --- a/drivers/scsi/iscsi_tcp.h +++ b/drivers/scsi/iscsi_tcp.h | |||
@@ -51,6 +51,7 @@ | |||
51 | #define ISCSI_SG_TABLESIZE SG_ALL | 51 | #define ISCSI_SG_TABLESIZE SG_ALL |
52 | #define ISCSI_TCP_MAX_CMD_LEN 16 | 52 | #define ISCSI_TCP_MAX_CMD_LEN 16 |
53 | 53 | ||
54 | struct crypto_hash; | ||
54 | struct socket; | 55 | struct socket; |
55 | 56 | ||
56 | /* Socket connection recieve helper */ | 57 | /* Socket connection recieve helper */ |
@@ -84,8 +85,8 @@ struct iscsi_tcp_conn { | |||
84 | /* iSCSI connection-wide sequencing */ | 85 | /* iSCSI connection-wide sequencing */ |
85 | int hdr_size; /* PDU header size */ | 86 | int hdr_size; /* PDU header size */ |
86 | 87 | ||
87 | struct crypto_tfm *rx_tfm; /* CRC32C (Rx) */ | 88 | struct crypto_hash *rx_tfm; /* CRC32C (Rx) */ |
88 | struct crypto_tfm *data_rx_tfm; /* CRC32C (Rx) for data */ | 89 | struct hash_desc data_rx_hash; /* CRC32C (Rx) for data */ |
89 | 90 | ||
90 | /* control data */ | 91 | /* control data */ |
91 | struct iscsi_tcp_recv in; /* TCP receive context */ | 92 | struct iscsi_tcp_recv in; /* TCP receive context */ |
@@ -97,8 +98,8 @@ struct iscsi_tcp_conn { | |||
97 | void (*old_write_space)(struct sock *); | 98 | void (*old_write_space)(struct sock *); |
98 | 99 | ||
99 | /* xmit */ | 100 | /* xmit */ |
100 | struct crypto_tfm *tx_tfm; /* CRC32C (Tx) */ | 101 | struct crypto_hash *tx_tfm; /* CRC32C (Tx) */ |
101 | struct crypto_tfm *data_tx_tfm; /* CRC32C (Tx) for data */ | 102 | struct hash_desc data_tx_hash; /* CRC32C (Tx) for data */ |
102 | 103 | ||
103 | /* MIB custom statistics */ | 104 | /* MIB custom statistics */ |
104 | uint32_t sendpage_failures_cnt; | 105 | uint32_t sendpage_failures_cnt; |