aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-23 12:02:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-23 12:02:42 -0400
commit6f73b3629f774c6cba589b15fd095112b25ca923 (patch)
tree50a60feae71cb5f40078f552b9b08468bc7b29c9 /drivers/crypto
parent3a8580f82024e30b31c662aa49346adf7a3bcdb5 (diff)
parent2074b1d9d53ae696dd3f49482bad43254f40f01d (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc updates from Benjamin Herrenschmidt: "Here are the powerpc goodies for 3.5. Main highlights are: - Support for the NX crypto engine in Power7+ - A bunch of Anton goodness, including some micro optimization of our syscall entry on Power7 - I converted a pile of our thermal control drivers to the new i2c APIs (essentially turning the old therm_pm72 into a proper set of windfarm drivers). That's one more step toward removing the deprecated i2c APIs, there's still a few drivers to fix, but we are getting close - kexec/kdump support for 47x embedded cores The big missing thing here is no updates from Freescale. Not sure what's up here, but with Kumar not working for them anymore things are a bit in a state of flux in that area." * 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (71 commits) powerpc: Fix irq distribution Revert "powerpc/hw-breakpoint: Use generic hw-breakpoint interfaces for new PPC ptrace flags" powerpc: Fixing a cputhread code documentation powerpc/crypto: Enable the PFO-based encryption device powerpc/crypto: Build files for the nx device driver powerpc/crypto: debugfs routines and docs for the nx device driver powerpc/crypto: SHA512 hash routines for nx encryption powerpc/crypto: SHA256 hash routines for nx encryption powerpc/crypto: AES-XCBC mode routines for nx encryption powerpc/crypto: AES-GCM mode routines for nx encryption powerpc/crypto: AES-ECB mode routines for nx encryption powerpc/crypto: AES-CTR mode routines for nx encryption powerpc/crypto: AES-CCM mode routines for nx encryption powerpc/crypto: AES-CBC mode routines for nx encryption powerpc/crypto: nx driver code supporting nx encryption powerpc/pseries: Enable the PFO-based RNG accelerator powerpc/pseries/hwrng: PFO-based hwrng driver powerpc/pseries: Add PFO support to the VIO bus powerpc/pseries: Add pseries update notifier for OFDT prop changes powerpc/pseries: Add new hvcall constants to support PFO ...
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Kconfig17
-rw-r--r--drivers/crypto/nx/Makefile11
-rw-r--r--drivers/crypto/nx/nx-aes-cbc.c141
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c468
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c178
-rw-r--r--drivers/crypto/nx/nx-aes-ecb.c139
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c353
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c236
-rw-r--r--drivers/crypto/nx/nx-sha256.c246
-rw-r--r--drivers/crypto/nx/nx-sha512.c265
-rw-r--r--drivers/crypto/nx/nx.c716
-rw-r--r--drivers/crypto/nx/nx.h193
-rw-r--r--drivers/crypto/nx/nx_csbcpb.h205
-rw-r--r--drivers/crypto/nx/nx_debugfs.c103
14 files changed, 3271 insertions, 0 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 371f13cc38eb..6373fa0ddb65 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -297,4 +297,21 @@ config CRYPTO_DEV_TEGRA_AES
297 To compile this driver as a module, choose M here: the module 297 To compile this driver as a module, choose M here: the module
298 will be called tegra-aes. 298 will be called tegra-aes.
299 299
300config CRYPTO_DEV_NX
301 tristate "Support for Power7+ in-Nest cryptographic accleration"
302 depends on PPC64 && IBMVIO
303 select CRYPTO_AES
304 select CRYPTO_CBC
305 select CRYPTO_ECB
306 select CRYPTO_CCM
307 select CRYPTO_GCM
308 select CRYPTO_AUTHENC
309 select CRYPTO_XCBC
310 select CRYPTO_SHA256
311 select CRYPTO_SHA512
312 help
313 Support for Power7+ in-Nest cryptographic acceleration. This
314 module supports acceleration for AES and SHA2 algorithms. If you
315 choose 'M' here, this module will be called nx_crypto.
316
300endif # CRYPTO_HW 317endif # CRYPTO_HW
diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile
new file mode 100644
index 000000000000..411ce59c80d1
--- /dev/null
+++ b/drivers/crypto/nx/Makefile
@@ -0,0 +1,11 @@
1obj-$(CONFIG_CRYPTO_DEV_NX) += nx-crypto.o
2nx-crypto-objs := nx.o \
3 nx_debugfs.o \
4 nx-aes-cbc.o \
5 nx-aes-ecb.o \
6 nx-aes-gcm.o \
7 nx-aes-ccm.o \
8 nx-aes-ctr.o \
9 nx-aes-xcbc.o \
10 nx-sha256.o \
11 nx-sha512.o
diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c
new file mode 100644
index 000000000000..69ed796ee327
--- /dev/null
+++ b/drivers/crypto/nx/nx-aes-cbc.c
@@ -0,0 +1,141 @@
1/**
2 * AES CBC routines supporting the Power 7+ Nest Accelerators driver
3 *
4 * Copyright (C) 2011-2012 International Business Machines Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 only.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Author: Kent Yoder <yoder1@us.ibm.com>
20 */
21
22#include <crypto/aes.h>
23#include <crypto/algapi.h>
24#include <linux/module.h>
25#include <linux/types.h>
26#include <linux/crypto.h>
27#include <asm/vio.h>
28
29#include "nx_csbcpb.h"
30#include "nx.h"
31
32
33static int cbc_aes_nx_set_key(struct crypto_tfm *tfm,
34 const u8 *in_key,
35 unsigned int key_len)
36{
37 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
38 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
39
40 nx_ctx_init(nx_ctx, HCOP_FC_AES);
41
42 switch (key_len) {
43 case AES_KEYSIZE_128:
44 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
45 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
46 break;
47 case AES_KEYSIZE_192:
48 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
49 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
50 break;
51 case AES_KEYSIZE_256:
52 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
53 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
54 break;
55 default:
56 return -EINVAL;
57 }
58
59 csbcpb->cpb.hdr.mode = NX_MODE_AES_CBC;
60 memcpy(csbcpb->cpb.aes_cbc.key, in_key, key_len);
61
62 return 0;
63}
64
65static int cbc_aes_nx_crypt(struct blkcipher_desc *desc,
66 struct scatterlist *dst,
67 struct scatterlist *src,
68 unsigned int nbytes,
69 int enc)
70{
71 struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
72 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
73 int rc;
74
75 if (nbytes > nx_ctx->ap->databytelen)
76 return -EINVAL;
77
78 if (enc)
79 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
80 else
81 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
82
83 rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes,
84 csbcpb->cpb.aes_cbc.iv);
85 if (rc)
86 goto out;
87
88 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
89 rc = -EINVAL;
90 goto out;
91 }
92
93 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
94 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
95 if (rc)
96 goto out;
97
98 atomic_inc(&(nx_ctx->stats->aes_ops));
99 atomic64_add(csbcpb->csb.processed_byte_count,
100 &(nx_ctx->stats->aes_bytes));
101out:
102 return rc;
103}
104
105static int cbc_aes_nx_encrypt(struct blkcipher_desc *desc,
106 struct scatterlist *dst,
107 struct scatterlist *src,
108 unsigned int nbytes)
109{
110 return cbc_aes_nx_crypt(desc, dst, src, nbytes, 1);
111}
112
113static int cbc_aes_nx_decrypt(struct blkcipher_desc *desc,
114 struct scatterlist *dst,
115 struct scatterlist *src,
116 unsigned int nbytes)
117{
118 return cbc_aes_nx_crypt(desc, dst, src, nbytes, 0);
119}
120
121struct crypto_alg nx_cbc_aes_alg = {
122 .cra_name = "cbc(aes)",
123 .cra_driver_name = "cbc-aes-nx",
124 .cra_priority = 300,
125 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
126 .cra_blocksize = AES_BLOCK_SIZE,
127 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
128 .cra_type = &crypto_blkcipher_type,
129 .cra_module = THIS_MODULE,
130 .cra_list = LIST_HEAD_INIT(nx_cbc_aes_alg.cra_list),
131 .cra_init = nx_crypto_ctx_aes_cbc_init,
132 .cra_exit = nx_crypto_ctx_exit,
133 .cra_blkcipher = {
134 .min_keysize = AES_MIN_KEY_SIZE,
135 .max_keysize = AES_MAX_KEY_SIZE,
136 .ivsize = AES_BLOCK_SIZE,
137 .setkey = cbc_aes_nx_set_key,
138 .encrypt = cbc_aes_nx_encrypt,
139 .decrypt = cbc_aes_nx_decrypt,
140 }
141};
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
new file mode 100644
index 000000000000..7aeac678b9c0
--- /dev/null
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -0,0 +1,468 @@
1/**
2 * AES CCM routines supporting the Power 7+ Nest Accelerators driver
3 *
4 * Copyright (C) 2012 International Business Machines Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 only.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Author: Kent Yoder <yoder1@us.ibm.com>
20 */
21
22#include <crypto/internal/aead.h>
23#include <crypto/aes.h>
24#include <crypto/algapi.h>
25#include <crypto/scatterwalk.h>
26#include <linux/module.h>
27#include <linux/types.h>
28#include <linux/crypto.h>
29#include <asm/vio.h>
30
31#include "nx_csbcpb.h"
32#include "nx.h"
33
34
35static int ccm_aes_nx_set_key(struct crypto_aead *tfm,
36 const u8 *in_key,
37 unsigned int key_len)
38{
39 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
40 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
41 struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
42
43 nx_ctx_init(nx_ctx, HCOP_FC_AES);
44
45 switch (key_len) {
46 case AES_KEYSIZE_128:
47 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
48 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
49 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
50 break;
51 default:
52 return -EINVAL;
53 }
54
55 csbcpb->cpb.hdr.mode = NX_MODE_AES_CCM;
56 memcpy(csbcpb->cpb.aes_ccm.key, in_key, key_len);
57
58 csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_CCA;
59 memcpy(csbcpb_aead->cpb.aes_cca.key, in_key, key_len);
60
61 return 0;
62
63}
64
65static int ccm4309_aes_nx_set_key(struct crypto_aead *tfm,
66 const u8 *in_key,
67 unsigned int key_len)
68{
69 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
70
71 if (key_len < 3)
72 return -EINVAL;
73
74 key_len -= 3;
75
76 memcpy(nx_ctx->priv.ccm.nonce, in_key + key_len, 3);
77
78 return ccm_aes_nx_set_key(tfm, in_key, key_len);
79}
80
81static int ccm_aes_nx_setauthsize(struct crypto_aead *tfm,
82 unsigned int authsize)
83{
84 switch (authsize) {
85 case 4:
86 case 6:
87 case 8:
88 case 10:
89 case 12:
90 case 14:
91 case 16:
92 break;
93 default:
94 return -EINVAL;
95 }
96
97 crypto_aead_crt(tfm)->authsize = authsize;
98
99 return 0;
100}
101
102static int ccm4309_aes_nx_setauthsize(struct crypto_aead *tfm,
103 unsigned int authsize)
104{
105 switch (authsize) {
106 case 8:
107 case 12:
108 case 16:
109 break;
110 default:
111 return -EINVAL;
112 }
113
114 crypto_aead_crt(tfm)->authsize = authsize;
115
116 return 0;
117}
118
119/* taken from crypto/ccm.c */
120static int set_msg_len(u8 *block, unsigned int msglen, int csize)
121{
122 __be32 data;
123
124 memset(block, 0, csize);
125 block += csize;
126
127 if (csize >= 4)
128 csize = 4;
129 else if (msglen > (unsigned int)(1 << (8 * csize)))
130 return -EOVERFLOW;
131
132 data = cpu_to_be32(msglen);
133 memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
134
135 return 0;
136}
137
138/* taken from crypto/ccm.c */
139static inline int crypto_ccm_check_iv(const u8 *iv)
140{
141 /* 2 <= L <= 8, so 1 <= L' <= 7. */
142 if (1 > iv[0] || iv[0] > 7)
143 return -EINVAL;
144
145 return 0;
146}
147
148/* based on code from crypto/ccm.c */
149static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize,
150 unsigned int cryptlen, u8 *b0)
151{
152 unsigned int l, lp, m = authsize;
153 int rc;
154
155 memcpy(b0, iv, 16);
156
157 lp = b0[0];
158 l = lp + 1;
159
160 /* set m, bits 3-5 */
161 *b0 |= (8 * ((m - 2) / 2));
162
163 /* set adata, bit 6, if associated data is used */
164 if (assoclen)
165 *b0 |= 64;
166
167 rc = set_msg_len(b0 + 16 - l, cryptlen, l);
168
169 return rc;
170}
171
172static int generate_pat(u8 *iv,
173 struct aead_request *req,
174 struct nx_crypto_ctx *nx_ctx,
175 unsigned int authsize,
176 unsigned int nbytes,
177 u8 *out)
178{
179 struct nx_sg *nx_insg = nx_ctx->in_sg;
180 struct nx_sg *nx_outsg = nx_ctx->out_sg;
181 unsigned int iauth_len = 0;
182 struct vio_pfo_op *op = NULL;
183 u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL;
184 int rc;
185
186 /* zero the ctr value */
187 memset(iv + 15 - iv[0], 0, iv[0] + 1);
188
189 if (!req->assoclen) {
190 b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
191 } else if (req->assoclen <= 14) {
192 /* if associated data is 14 bytes or less, we do 1 GCM
193 * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1,
194 * which is fed in through the source buffers here */
195 b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
196 b1 = nx_ctx->priv.ccm.iauth_tag;
197 iauth_len = req->assoclen;
198
199 nx_insg = nx_build_sg_list(nx_insg, b1, 16, nx_ctx->ap->sglen);
200 nx_outsg = nx_build_sg_list(nx_outsg, tmp, 16,
201 nx_ctx->ap->sglen);
202
203 /* inlen should be negative, indicating to phyp that its a
204 * pointer to an sg list */
205 nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) *
206 sizeof(struct nx_sg);
207 nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) *
208 sizeof(struct nx_sg);
209
210 NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT;
211 NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE;
212
213 op = &nx_ctx->op;
214 result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac;
215 } else if (req->assoclen <= 65280) {
216 /* if associated data is less than (2^16 - 2^8), we construct
217 * B1 differently and feed in the associated data to a CCA
218 * operation */
219 b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
220 b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
221 iauth_len = 14;
222
223 /* remaining assoc data must have scatterlist built for it */
224 nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen,
225 req->assoc, iauth_len,
226 req->assoclen - iauth_len);
227 nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) *
228 sizeof(struct nx_sg);
229
230 op = &nx_ctx->op_aead;
231 result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
232 } else {
233 /* if associated data is less than (2^32), we construct B1
234 * differently yet again and feed in the associated data to a
235 * CCA operation */
236 pr_err("associated data len is %u bytes (returning -EINVAL)\n",
237 req->assoclen);
238 rc = -EINVAL;
239 }
240
241 rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0);
242 if (rc)
243 goto done;
244
245 if (b1) {
246 memset(b1, 0, 16);
247 *(u16 *)b1 = (u16)req->assoclen;
248
249 scatterwalk_map_and_copy(b1 + 2, req->assoc, 0,
250 iauth_len, SCATTERWALK_FROM_SG);
251
252 rc = nx_hcall_sync(nx_ctx, op,
253 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
254 if (rc)
255 goto done;
256
257 atomic_inc(&(nx_ctx->stats->aes_ops));
258 atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
259
260 memcpy(out, result, AES_BLOCK_SIZE);
261 }
262done:
263 return rc;
264}
265
266static int ccm_nx_decrypt(struct aead_request *req,
267 struct blkcipher_desc *desc)
268{
269 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
270 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
271 unsigned int nbytes = req->cryptlen;
272 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
273 struct nx_ccm_priv *priv = &nx_ctx->priv.ccm;
274 int rc = -1;
275
276 if (nbytes > nx_ctx->ap->databytelen)
277 return -EINVAL;
278
279 nbytes -= authsize;
280
281 /* copy out the auth tag to compare with later */
282 scatterwalk_map_and_copy(priv->oauth_tag,
283 req->src, nbytes, authsize,
284 SCATTERWALK_FROM_SG);
285
286 rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
287 csbcpb->cpb.aes_ccm.in_pat_or_b0);
288 if (rc)
289 goto out;
290
291 rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, nbytes,
292 csbcpb->cpb.aes_ccm.iv_or_ctr);
293 if (rc)
294 goto out;
295
296 NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
297 NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_INTERMEDIATE;
298
299 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
300 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
301 if (rc)
302 goto out;
303
304 atomic_inc(&(nx_ctx->stats->aes_ops));
305 atomic64_add(csbcpb->csb.processed_byte_count,
306 &(nx_ctx->stats->aes_bytes));
307
308 rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
309 authsize) ? -EBADMSG : 0;
310out:
311 return rc;
312}
313
314static int ccm_nx_encrypt(struct aead_request *req,
315 struct blkcipher_desc *desc)
316{
317 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
318 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
319 unsigned int nbytes = req->cryptlen;
320 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
321 int rc = -1;
322
323 if (nbytes > nx_ctx->ap->databytelen)
324 return -EINVAL;
325
326 rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
327 csbcpb->cpb.aes_ccm.in_pat_or_b0);
328 if (rc)
329 goto out;
330
331 rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, nbytes,
332 csbcpb->cpb.aes_ccm.iv_or_ctr);
333 if (rc)
334 goto out;
335
336 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
337 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
338
339 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
340 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
341 if (rc)
342 goto out;
343
344 atomic_inc(&(nx_ctx->stats->aes_ops));
345 atomic64_add(csbcpb->csb.processed_byte_count,
346 &(nx_ctx->stats->aes_bytes));
347
348 /* copy out the auth tag */
349 scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
350 req->dst, nbytes, authsize,
351 SCATTERWALK_TO_SG);
352out:
353 return rc;
354}
355
356static int ccm4309_aes_nx_encrypt(struct aead_request *req)
357{
358 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
359 struct blkcipher_desc desc;
360 u8 *iv = nx_ctx->priv.ccm.iv;
361
362 iv[0] = 3;
363 memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
364 memcpy(iv + 4, req->iv, 8);
365
366 desc.info = iv;
367 desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
368
369 return ccm_nx_encrypt(req, &desc);
370}
371
372static int ccm_aes_nx_encrypt(struct aead_request *req)
373{
374 struct blkcipher_desc desc;
375 int rc;
376
377 desc.info = req->iv;
378 desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
379
380 rc = crypto_ccm_check_iv(desc.info);
381 if (rc)
382 return rc;
383
384 return ccm_nx_encrypt(req, &desc);
385}
386
387static int ccm4309_aes_nx_decrypt(struct aead_request *req)
388{
389 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
390 struct blkcipher_desc desc;
391 u8 *iv = nx_ctx->priv.ccm.iv;
392
393 iv[0] = 3;
394 memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
395 memcpy(iv + 4, req->iv, 8);
396
397 desc.info = iv;
398 desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
399
400 return ccm_nx_decrypt(req, &desc);
401}
402
403static int ccm_aes_nx_decrypt(struct aead_request *req)
404{
405 struct blkcipher_desc desc;
406 int rc;
407
408 desc.info = req->iv;
409 desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
410
411 rc = crypto_ccm_check_iv(desc.info);
412 if (rc)
413 return rc;
414
415 return ccm_nx_decrypt(req, &desc);
416}
417
418/* tell the block cipher walk routines that this is a stream cipher by
419 * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
420 * during encrypt/decrypt doesn't solve this problem, because it calls
421 * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
422 * but instead uses this tfm->blocksize. */
423struct crypto_alg nx_ccm_aes_alg = {
424 .cra_name = "ccm(aes)",
425 .cra_driver_name = "ccm-aes-nx",
426 .cra_priority = 300,
427 .cra_flags = CRYPTO_ALG_TYPE_AEAD |
428 CRYPTO_ALG_NEED_FALLBACK,
429 .cra_blocksize = 1,
430 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
431 .cra_type = &crypto_aead_type,
432 .cra_module = THIS_MODULE,
433 .cra_list = LIST_HEAD_INIT(nx_ccm_aes_alg.cra_list),
434 .cra_init = nx_crypto_ctx_aes_ccm_init,
435 .cra_exit = nx_crypto_ctx_exit,
436 .cra_aead = {
437 .ivsize = AES_BLOCK_SIZE,
438 .maxauthsize = AES_BLOCK_SIZE,
439 .setkey = ccm_aes_nx_set_key,
440 .setauthsize = ccm_aes_nx_setauthsize,
441 .encrypt = ccm_aes_nx_encrypt,
442 .decrypt = ccm_aes_nx_decrypt,
443 }
444};
445
446struct crypto_alg nx_ccm4309_aes_alg = {
447 .cra_name = "rfc4309(ccm(aes))",
448 .cra_driver_name = "rfc4309-ccm-aes-nx",
449 .cra_priority = 300,
450 .cra_flags = CRYPTO_ALG_TYPE_AEAD |
451 CRYPTO_ALG_NEED_FALLBACK,
452 .cra_blocksize = 1,
453 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
454 .cra_type = &crypto_nivaead_type,
455 .cra_module = THIS_MODULE,
456 .cra_list = LIST_HEAD_INIT(nx_ccm4309_aes_alg.cra_list),
457 .cra_init = nx_crypto_ctx_aes_ccm_init,
458 .cra_exit = nx_crypto_ctx_exit,
459 .cra_aead = {
460 .ivsize = 8,
461 .maxauthsize = AES_BLOCK_SIZE,
462 .setkey = ccm4309_aes_nx_set_key,
463 .setauthsize = ccm4309_aes_nx_setauthsize,
464 .encrypt = ccm4309_aes_nx_encrypt,
465 .decrypt = ccm4309_aes_nx_decrypt,
466 .geniv = "seqiv",
467 }
468};
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
new file mode 100644
index 000000000000..52d4eb05e8f7
--- /dev/null
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -0,0 +1,178 @@
1/**
2 * AES CTR routines supporting the Power 7+ Nest Accelerators driver
3 *
4 * Copyright (C) 2011-2012 International Business Machines Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 only.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Author: Kent Yoder <yoder1@us.ibm.com>
20 */
21
22#include <crypto/aes.h>
23#include <crypto/ctr.h>
24#include <crypto/algapi.h>
25#include <linux/module.h>
26#include <linux/types.h>
27#include <linux/crypto.h>
28#include <asm/vio.h>
29
30#include "nx_csbcpb.h"
31#include "nx.h"
32
33
34static int ctr_aes_nx_set_key(struct crypto_tfm *tfm,
35 const u8 *in_key,
36 unsigned int key_len)
37{
38 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
39 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
40
41 nx_ctx_init(nx_ctx, HCOP_FC_AES);
42
43 switch (key_len) {
44 case AES_KEYSIZE_128:
45 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
46 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
47 break;
48 case AES_KEYSIZE_192:
49 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
50 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
51 break;
52 case AES_KEYSIZE_256:
53 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
54 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
55 break;
56 default:
57 return -EINVAL;
58 }
59
60 csbcpb->cpb.hdr.mode = NX_MODE_AES_CTR;
61 memcpy(csbcpb->cpb.aes_ctr.key, in_key, key_len);
62
63 return 0;
64}
65
66static int ctr3686_aes_nx_set_key(struct crypto_tfm *tfm,
67 const u8 *in_key,
68 unsigned int key_len)
69{
70 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
71
72 if (key_len < CTR_RFC3686_NONCE_SIZE)
73 return -EINVAL;
74
75 memcpy(nx_ctx->priv.ctr.iv,
76 in_key + key_len - CTR_RFC3686_NONCE_SIZE,
77 CTR_RFC3686_NONCE_SIZE);
78
79 key_len -= CTR_RFC3686_NONCE_SIZE;
80
81 return ctr_aes_nx_set_key(tfm, in_key, key_len);
82}
83
84static int ctr_aes_nx_crypt(struct blkcipher_desc *desc,
85 struct scatterlist *dst,
86 struct scatterlist *src,
87 unsigned int nbytes)
88{
89 struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
90 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
91 int rc;
92
93 if (nbytes > nx_ctx->ap->databytelen)
94 return -EINVAL;
95
96 rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes,
97 csbcpb->cpb.aes_ctr.iv);
98 if (rc)
99 goto out;
100
101 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
102 rc = -EINVAL;
103 goto out;
104 }
105
106 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
107 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
108 if (rc)
109 goto out;
110
111 atomic_inc(&(nx_ctx->stats->aes_ops));
112 atomic64_add(csbcpb->csb.processed_byte_count,
113 &(nx_ctx->stats->aes_bytes));
114out:
115 return rc;
116}
117
118static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc,
119 struct scatterlist *dst,
120 struct scatterlist *src,
121 unsigned int nbytes)
122{
123 struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
124 u8 *iv = nx_ctx->priv.ctr.iv;
125
126 memcpy(iv + CTR_RFC3686_NONCE_SIZE,
127 desc->info, CTR_RFC3686_IV_SIZE);
128 iv[15] = 1;
129
130 desc->info = nx_ctx->priv.ctr.iv;
131
132 return ctr_aes_nx_crypt(desc, dst, src, nbytes);
133}
134
135struct crypto_alg nx_ctr_aes_alg = {
136 .cra_name = "ctr(aes)",
137 .cra_driver_name = "ctr-aes-nx",
138 .cra_priority = 300,
139 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
140 .cra_blocksize = 1,
141 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
142 .cra_type = &crypto_blkcipher_type,
143 .cra_module = THIS_MODULE,
144 .cra_list = LIST_HEAD_INIT(nx_ctr_aes_alg.cra_list),
145 .cra_init = nx_crypto_ctx_aes_ctr_init,
146 .cra_exit = nx_crypto_ctx_exit,
147 .cra_blkcipher = {
148 .min_keysize = AES_MIN_KEY_SIZE,
149 .max_keysize = AES_MAX_KEY_SIZE,
150 .ivsize = AES_BLOCK_SIZE,
151 .setkey = ctr_aes_nx_set_key,
152 .encrypt = ctr_aes_nx_crypt,
153 .decrypt = ctr_aes_nx_crypt,
154 }
155};
156
157struct crypto_alg nx_ctr3686_aes_alg = {
158 .cra_name = "rfc3686(ctr(aes))",
159 .cra_driver_name = "rfc3686-ctr-aes-nx",
160 .cra_priority = 300,
161 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
162 .cra_blocksize = 1,
163 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
164 .cra_type = &crypto_blkcipher_type,
165 .cra_module = THIS_MODULE,
166 .cra_list = LIST_HEAD_INIT(nx_ctr3686_aes_alg.cra_list),
167 .cra_init = nx_crypto_ctx_aes_ctr_init,
168 .cra_exit = nx_crypto_ctx_exit,
169 .cra_blkcipher = {
170 .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
171 .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
172 .ivsize = CTR_RFC3686_IV_SIZE,
173 .geniv = "seqiv",
174 .setkey = ctr3686_aes_nx_set_key,
175 .encrypt = ctr3686_aes_nx_crypt,
176 .decrypt = ctr3686_aes_nx_crypt,
177 }
178};
diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c
new file mode 100644
index 000000000000..7b77bc2d1df4
--- /dev/null
+++ b/drivers/crypto/nx/nx-aes-ecb.c
@@ -0,0 +1,139 @@
1/**
2 * AES ECB routines supporting the Power 7+ Nest Accelerators driver
3 *
4 * Copyright (C) 2011-2012 International Business Machines Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 only.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Author: Kent Yoder <yoder1@us.ibm.com>
20 */
21
22#include <crypto/aes.h>
23#include <crypto/algapi.h>
24#include <linux/module.h>
25#include <linux/types.h>
26#include <linux/crypto.h>
27#include <asm/vio.h>
28
29#include "nx_csbcpb.h"
30#include "nx.h"
31
32
33static int ecb_aes_nx_set_key(struct crypto_tfm *tfm,
34 const u8 *in_key,
35 unsigned int key_len)
36{
37 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
38 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
39
40 nx_ctx_init(nx_ctx, HCOP_FC_AES);
41
42 switch (key_len) {
43 case AES_KEYSIZE_128:
44 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
45 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
46 break;
47 case AES_KEYSIZE_192:
48 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
49 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
50 break;
51 case AES_KEYSIZE_256:
52 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
53 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
54 break;
55 default:
56 return -EINVAL;
57 }
58
59 csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
60 memcpy(csbcpb->cpb.aes_ecb.key, in_key, key_len);
61
62 return 0;
63}
64
65static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
66 struct scatterlist *dst,
67 struct scatterlist *src,
68 unsigned int nbytes,
69 int enc)
70{
71 struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
72 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
73 int rc;
74
75 if (nbytes > nx_ctx->ap->databytelen)
76 return -EINVAL;
77
78 if (enc)
79 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
80 else
81 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
82
83 rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes, NULL);
84 if (rc)
85 goto out;
86
87 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
88 rc = -EINVAL;
89 goto out;
90 }
91
92 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
93 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
94 if (rc)
95 goto out;
96
97 atomic_inc(&(nx_ctx->stats->aes_ops));
98 atomic64_add(csbcpb->csb.processed_byte_count,
99 &(nx_ctx->stats->aes_bytes));
100out:
101 return rc;
102}
103
104static int ecb_aes_nx_encrypt(struct blkcipher_desc *desc,
105 struct scatterlist *dst,
106 struct scatterlist *src,
107 unsigned int nbytes)
108{
109 return ecb_aes_nx_crypt(desc, dst, src, nbytes, 1);
110}
111
112static int ecb_aes_nx_decrypt(struct blkcipher_desc *desc,
113 struct scatterlist *dst,
114 struct scatterlist *src,
115 unsigned int nbytes)
116{
117 return ecb_aes_nx_crypt(desc, dst, src, nbytes, 0);
118}
119
120struct crypto_alg nx_ecb_aes_alg = {
121 .cra_name = "ecb(aes)",
122 .cra_driver_name = "ecb-aes-nx",
123 .cra_priority = 300,
124 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
125 .cra_blocksize = AES_BLOCK_SIZE,
126 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
127 .cra_type = &crypto_blkcipher_type,
128 .cra_module = THIS_MODULE,
129 .cra_list = LIST_HEAD_INIT(nx_ecb_aes_alg.cra_list),
130 .cra_init = nx_crypto_ctx_aes_ecb_init,
131 .cra_exit = nx_crypto_ctx_exit,
132 .cra_blkcipher = {
133 .min_keysize = AES_MIN_KEY_SIZE,
134 .max_keysize = AES_MAX_KEY_SIZE,
135 .setkey = ecb_aes_nx_set_key,
136 .encrypt = ecb_aes_nx_encrypt,
137 .decrypt = ecb_aes_nx_decrypt,
138 }
139};
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
new file mode 100644
index 000000000000..9ab1c7341dac
--- /dev/null
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -0,0 +1,353 @@
1/**
2 * AES GCM routines supporting the Power 7+ Nest Accelerators driver
3 *
4 * Copyright (C) 2012 International Business Machines Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 only.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Author: Kent Yoder <yoder1@us.ibm.com>
20 */
21
22#include <crypto/internal/aead.h>
23#include <crypto/aes.h>
24#include <crypto/algapi.h>
25#include <crypto/scatterwalk.h>
26#include <linux/module.h>
27#include <linux/types.h>
28#include <linux/crypto.h>
29#include <asm/vio.h>
30
31#include "nx_csbcpb.h"
32#include "nx.h"
33
34
35static int gcm_aes_nx_set_key(struct crypto_aead *tfm,
36 const u8 *in_key,
37 unsigned int key_len)
38{
39 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
40 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
41 struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
42
43 nx_ctx_init(nx_ctx, HCOP_FC_AES);
44
45 switch (key_len) {
46 case AES_KEYSIZE_128:
47 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
48 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
49 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
50 break;
51 case AES_KEYSIZE_192:
52 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
53 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_192);
54 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
55 break;
56 case AES_KEYSIZE_256:
57 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
58 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_256);
59 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
60 break;
61 default:
62 return -EINVAL;
63 }
64
65 csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
66 memcpy(csbcpb->cpb.aes_gcm.key, in_key, key_len);
67
68 csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_GCA;
69 memcpy(csbcpb_aead->cpb.aes_gca.key, in_key, key_len);
70
71 return 0;
72}
73
74static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm,
75 const u8 *in_key,
76 unsigned int key_len)
77{
78 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
79 char *nonce = nx_ctx->priv.gcm.nonce;
80 int rc;
81
82 if (key_len < 4)
83 return -EINVAL;
84
85 key_len -= 4;
86
87 rc = gcm_aes_nx_set_key(tfm, in_key, key_len);
88 if (rc)
89 goto out;
90
91 memcpy(nonce, in_key + key_len, 4);
92out:
93 return rc;
94}
95
96static int gcm_aes_nx_setauthsize(struct crypto_aead *tfm,
97 unsigned int authsize)
98{
99 if (authsize > crypto_aead_alg(tfm)->maxauthsize)
100 return -EINVAL;
101
102 crypto_aead_crt(tfm)->authsize = authsize;
103
104 return 0;
105}
106
107static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm,
108 unsigned int authsize)
109{
110 switch (authsize) {
111 case 8:
112 case 12:
113 case 16:
114 break;
115 default:
116 return -EINVAL;
117 }
118
119 crypto_aead_crt(tfm)->authsize = authsize;
120
121 return 0;
122}
123
124static int nx_gca(struct nx_crypto_ctx *nx_ctx,
125 struct aead_request *req,
126 u8 *out)
127{
128 struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
129 int rc = -EINVAL;
130 struct scatter_walk walk;
131 struct nx_sg *nx_sg = nx_ctx->in_sg;
132
133 if (req->assoclen > nx_ctx->ap->databytelen)
134 goto out;
135
136 if (req->assoclen <= AES_BLOCK_SIZE) {
137 scatterwalk_start(&walk, req->assoc);
138 scatterwalk_copychunks(out, &walk, req->assoclen,
139 SCATTERWALK_FROM_SG);
140 scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0);
141
142 rc = 0;
143 goto out;
144 }
145
146 nx_sg = nx_walk_and_build(nx_sg, nx_ctx->ap->sglen, req->assoc, 0,
147 req->assoclen);
148 nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg) * sizeof(struct nx_sg);
149
150 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
151 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
152 if (rc)
153 goto out;
154
155 atomic_inc(&(nx_ctx->stats->aes_ops));
156 atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
157
158 memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE);
159out:
160 return rc;
161}
162
163static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
164{
165 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
166 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
167 struct blkcipher_desc desc;
168 unsigned int nbytes = req->cryptlen;
169 int rc = -EINVAL;
170
171 if (nbytes > nx_ctx->ap->databytelen)
172 goto out;
173
174 desc.info = nx_ctx->priv.gcm.iv;
175 /* initialize the counter */
176 *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
177
178 /* For scenarios where the input message is zero length, AES CTR mode
179 * may be used. Set the source data to be a single block (16B) of all
180 * zeros, and set the input IV value to be the same as the GMAC IV
181 * value. - nx_wb 4.8.1.3 */
182 if (nbytes == 0) {
183 char src[AES_BLOCK_SIZE] = {};
184 struct scatterlist sg;
185
186 desc.tfm = crypto_alloc_blkcipher("ctr(aes)", 0, 0);
187 if (IS_ERR(desc.tfm)) {
188 rc = -ENOMEM;
189 goto out;
190 }
191
192 crypto_blkcipher_setkey(desc.tfm, csbcpb->cpb.aes_gcm.key,
193 NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_128 ? 16 :
194 NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_192 ? 24 : 32);
195
196 sg_init_one(&sg, src, AES_BLOCK_SIZE);
197 if (enc)
198 crypto_blkcipher_encrypt_iv(&desc, req->dst, &sg,
199 AES_BLOCK_SIZE);
200 else
201 crypto_blkcipher_decrypt_iv(&desc, req->dst, &sg,
202 AES_BLOCK_SIZE);
203 crypto_free_blkcipher(desc.tfm);
204
205 rc = 0;
206 goto out;
207 }
208
209 desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
210
211 csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8;
212
213 if (req->assoclen) {
214 rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad);
215 if (rc)
216 goto out;
217 }
218
219 if (enc)
220 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
221 else
222 nbytes -= AES_BLOCK_SIZE;
223
224 csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
225
226 rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, req->src, nbytes,
227 csbcpb->cpb.aes_gcm.iv_or_cnt);
228 if (rc)
229 goto out;
230
231 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
232 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
233 if (rc)
234 goto out;
235
236 atomic_inc(&(nx_ctx->stats->aes_ops));
237 atomic64_add(csbcpb->csb.processed_byte_count,
238 &(nx_ctx->stats->aes_bytes));
239
240 if (enc) {
241 /* copy out the auth tag */
242 scatterwalk_map_and_copy(csbcpb->cpb.aes_gcm.out_pat_or_mac,
243 req->dst, nbytes,
244 crypto_aead_authsize(crypto_aead_reqtfm(req)),
245 SCATTERWALK_TO_SG);
246 } else if (req->assoclen) {
247 u8 *itag = nx_ctx->priv.gcm.iauth_tag;
248 u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac;
249
250 scatterwalk_map_and_copy(itag, req->dst, nbytes,
251 crypto_aead_authsize(crypto_aead_reqtfm(req)),
252 SCATTERWALK_FROM_SG);
253 rc = memcmp(itag, otag,
254 crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
255 -EBADMSG : 0;
256 }
257out:
258 return rc;
259}
260
261static int gcm_aes_nx_encrypt(struct aead_request *req)
262{
263 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
264 char *iv = nx_ctx->priv.gcm.iv;
265
266 memcpy(iv, req->iv, 12);
267
268 return gcm_aes_nx_crypt(req, 1);
269}
270
271static int gcm_aes_nx_decrypt(struct aead_request *req)
272{
273 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
274 char *iv = nx_ctx->priv.gcm.iv;
275
276 memcpy(iv, req->iv, 12);
277
278 return gcm_aes_nx_crypt(req, 0);
279}
280
281static int gcm4106_aes_nx_encrypt(struct aead_request *req)
282{
283 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
284 char *iv = nx_ctx->priv.gcm.iv;
285 char *nonce = nx_ctx->priv.gcm.nonce;
286
287 memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
288 memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
289
290 return gcm_aes_nx_crypt(req, 1);
291}
292
293static int gcm4106_aes_nx_decrypt(struct aead_request *req)
294{
295 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
296 char *iv = nx_ctx->priv.gcm.iv;
297 char *nonce = nx_ctx->priv.gcm.nonce;
298
299 memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
300 memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
301
302 return gcm_aes_nx_crypt(req, 0);
303}
304
305/* tell the block cipher walk routines that this is a stream cipher by
306 * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
307 * during encrypt/decrypt doesn't solve this problem, because it calls
308 * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
309 * but instead uses this tfm->blocksize. */
310struct crypto_alg nx_gcm_aes_alg = {
311 .cra_name = "gcm(aes)",
312 .cra_driver_name = "gcm-aes-nx",
313 .cra_priority = 300,
314 .cra_flags = CRYPTO_ALG_TYPE_AEAD,
315 .cra_blocksize = 1,
316 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
317 .cra_type = &crypto_aead_type,
318 .cra_module = THIS_MODULE,
319 .cra_list = LIST_HEAD_INIT(nx_gcm_aes_alg.cra_list),
320 .cra_init = nx_crypto_ctx_aes_gcm_init,
321 .cra_exit = nx_crypto_ctx_exit,
322 .cra_aead = {
323 .ivsize = AES_BLOCK_SIZE,
324 .maxauthsize = AES_BLOCK_SIZE,
325 .setkey = gcm_aes_nx_set_key,
326 .setauthsize = gcm_aes_nx_setauthsize,
327 .encrypt = gcm_aes_nx_encrypt,
328 .decrypt = gcm_aes_nx_decrypt,
329 }
330};
331
332struct crypto_alg nx_gcm4106_aes_alg = {
333 .cra_name = "rfc4106(gcm(aes))",
334 .cra_driver_name = "rfc4106-gcm-aes-nx",
335 .cra_priority = 300,
336 .cra_flags = CRYPTO_ALG_TYPE_AEAD,
337 .cra_blocksize = 1,
338 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
339 .cra_type = &crypto_nivaead_type,
340 .cra_module = THIS_MODULE,
341 .cra_list = LIST_HEAD_INIT(nx_gcm4106_aes_alg.cra_list),
342 .cra_init = nx_crypto_ctx_aes_gcm_init,
343 .cra_exit = nx_crypto_ctx_exit,
344 .cra_aead = {
345 .ivsize = 8,
346 .maxauthsize = AES_BLOCK_SIZE,
347 .geniv = "seqiv",
348 .setkey = gcm4106_aes_nx_set_key,
349 .setauthsize = gcm4106_aes_nx_setauthsize,
350 .encrypt = gcm4106_aes_nx_encrypt,
351 .decrypt = gcm4106_aes_nx_decrypt,
352 }
353};
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
new file mode 100644
index 000000000000..93923e4628c0
--- /dev/null
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -0,0 +1,236 @@
1/**
2 * AES XCBC routines supporting the Power 7+ Nest Accelerators driver
3 *
4 * Copyright (C) 2011-2012 International Business Machines Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 only.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Author: Kent Yoder <yoder1@us.ibm.com>
20 */
21
22#include <crypto/internal/hash.h>
23#include <crypto/aes.h>
24#include <crypto/algapi.h>
25#include <linux/module.h>
26#include <linux/types.h>
27#include <linux/crypto.h>
28#include <asm/vio.h>
29
30#include "nx_csbcpb.h"
31#include "nx.h"
32
33
34struct xcbc_state {
35 u8 state[AES_BLOCK_SIZE];
36 unsigned int count;
37 u8 buffer[AES_BLOCK_SIZE];
38};
39
40static int nx_xcbc_set_key(struct crypto_shash *desc,
41 const u8 *in_key,
42 unsigned int key_len)
43{
44 struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc);
45
46 switch (key_len) {
47 case AES_KEYSIZE_128:
48 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
49 break;
50 default:
51 return -EINVAL;
52 }
53
54 memcpy(nx_ctx->priv.xcbc.key, in_key, key_len);
55
56 return 0;
57}
58
59static int nx_xcbc_init(struct shash_desc *desc)
60{
61 struct xcbc_state *sctx = shash_desc_ctx(desc);
62 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
63 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
64 struct nx_sg *out_sg;
65
66 nx_ctx_init(nx_ctx, HCOP_FC_AES);
67
68 memset(sctx, 0, sizeof *sctx);
69
70 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
71 csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC;
72
73 memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE);
74 memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key);
75
76 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
77 AES_BLOCK_SIZE, nx_ctx->ap->sglen);
78 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
79
80 return 0;
81}
82
83static int nx_xcbc_update(struct shash_desc *desc,
84 const u8 *data,
85 unsigned int len)
86{
87 struct xcbc_state *sctx = shash_desc_ctx(desc);
88 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
89 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
90 struct nx_sg *in_sg;
91 u32 to_process, leftover;
92 int rc = 0;
93
94 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
95 /* we've hit the nx chip previously and we're updating again,
96 * so copy over the partial digest */
97 memcpy(csbcpb->cpb.aes_xcbc.cv,
98 csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
99 }
100
101 /* 2 cases for total data len:
102 * 1: <= AES_BLOCK_SIZE: copy into state, return 0
103 * 2: > AES_BLOCK_SIZE: process X blocks, copy in leftover
104 */
105 if (len + sctx->count <= AES_BLOCK_SIZE) {
106 memcpy(sctx->buffer + sctx->count, data, len);
107 sctx->count += len;
108 goto out;
109 }
110
111 /* to_process: the AES_BLOCK_SIZE data chunk to process in this
112 * update */
113 to_process = (sctx->count + len) & ~(AES_BLOCK_SIZE - 1);
114 leftover = (sctx->count + len) & (AES_BLOCK_SIZE - 1);
115
116 /* the hardware will not accept a 0 byte operation for this algorithm
117 * and the operation MUST be finalized to be correct. So if we happen
118 * to get an update that falls on a block sized boundary, we must
119 * save off the last block to finalize with later. */
120 if (!leftover) {
121 to_process -= AES_BLOCK_SIZE;
122 leftover = AES_BLOCK_SIZE;
123 }
124
125 if (sctx->count) {
126 in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buffer,
127 sctx->count, nx_ctx->ap->sglen);
128 in_sg = nx_build_sg_list(in_sg, (u8 *)data,
129 to_process - sctx->count,
130 nx_ctx->ap->sglen);
131 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
132 sizeof(struct nx_sg);
133 } else {
134 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data, to_process,
135 nx_ctx->ap->sglen);
136 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
137 sizeof(struct nx_sg);
138 }
139
140 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
141
142 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
143 rc = -EINVAL;
144 goto out;
145 }
146
147 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
148 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
149 if (rc)
150 goto out;
151
152 atomic_inc(&(nx_ctx->stats->aes_ops));
153
154 /* copy the leftover back into the state struct */
155 memcpy(sctx->buffer, data + len - leftover, leftover);
156 sctx->count = leftover;
157
158 /* everything after the first update is continuation */
159 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
160out:
161 return rc;
162}
163
164static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
165{
166 struct xcbc_state *sctx = shash_desc_ctx(desc);
167 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
168 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
169 struct nx_sg *in_sg, *out_sg;
170 int rc = 0;
171
172 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
173 /* we've hit the nx chip previously, now we're finalizing,
174 * so copy over the partial digest */
175 memcpy(csbcpb->cpb.aes_xcbc.cv,
176 csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
177 } else if (sctx->count == 0) {
178 /* we've never seen an update, so this is a 0 byte op. The
179 * hardware cannot handle a 0 byte op, so just copy out the
180 * known 0 byte result. This is cheaper than allocating a
181 * software context to do a 0 byte op */
182 u8 data[] = { 0x75, 0xf0, 0x25, 0x1d, 0x52, 0x8a, 0xc0, 0x1c,
183 0x45, 0x73, 0xdf, 0xd5, 0x84, 0xd7, 0x9f, 0x29 };
184 memcpy(out, data, sizeof(data));
185 goto out;
186 }
187
188 /* final is represented by continuing the operation and indicating that
189 * this is not an intermediate operation */
190 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
191
192 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
193 sctx->count, nx_ctx->ap->sglen);
194 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, AES_BLOCK_SIZE,
195 nx_ctx->ap->sglen);
196
197 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
198 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
199
200 if (!nx_ctx->op.outlen) {
201 rc = -EINVAL;
202 goto out;
203 }
204
205 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
206 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
207 if (rc)
208 goto out;
209
210 atomic_inc(&(nx_ctx->stats->aes_ops));
211
212 memcpy(out, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
213out:
214 return rc;
215}
216
217struct shash_alg nx_shash_aes_xcbc_alg = {
218 .digestsize = AES_BLOCK_SIZE,
219 .init = nx_xcbc_init,
220 .update = nx_xcbc_update,
221 .final = nx_xcbc_final,
222 .setkey = nx_xcbc_set_key,
223 .descsize = sizeof(struct xcbc_state),
224 .statesize = sizeof(struct xcbc_state),
225 .base = {
226 .cra_name = "xcbc(aes)",
227 .cra_driver_name = "xcbc-aes-nx",
228 .cra_priority = 300,
229 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
230 .cra_blocksize = AES_BLOCK_SIZE,
231 .cra_module = THIS_MODULE,
232 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
233 .cra_init = nx_crypto_ctx_aes_xcbc_init,
234 .cra_exit = nx_crypto_ctx_exit,
235 }
236};
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
new file mode 100644
index 000000000000..9767315f8c0b
--- /dev/null
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -0,0 +1,246 @@
1/**
2 * SHA-256 routines supporting the Power 7+ Nest Accelerators driver
3 *
4 * Copyright (C) 2011-2012 International Business Machines Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 only.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Author: Kent Yoder <yoder1@us.ibm.com>
20 */
21
22#include <crypto/internal/hash.h>
23#include <crypto/sha.h>
24#include <linux/module.h>
25#include <asm/vio.h>
26
27#include "nx_csbcpb.h"
28#include "nx.h"
29
30
31static int nx_sha256_init(struct shash_desc *desc)
32{
33 struct sha256_state *sctx = shash_desc_ctx(desc);
34 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
35 struct nx_sg *out_sg;
36
37 nx_ctx_init(nx_ctx, HCOP_FC_SHA);
38
39 memset(sctx, 0, sizeof *sctx);
40
41 nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256];
42
43 NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
44 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
45 SHA256_DIGEST_SIZE, nx_ctx->ap->sglen);
46 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
47
48 return 0;
49}
50
51static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
52 unsigned int len)
53{
54 struct sha256_state *sctx = shash_desc_ctx(desc);
55 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
56 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
57 struct nx_sg *in_sg;
58 u64 to_process, leftover;
59 int rc = 0;
60
61 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
62 /* we've hit the nx chip previously and we're updating again,
63 * so copy over the partial digest */
64 memcpy(csbcpb->cpb.sha256.input_partial_digest,
65 csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
66 }
67
68 /* 2 cases for total data len:
69 * 1: <= SHA256_BLOCK_SIZE: copy into state, return 0
70 * 2: > SHA256_BLOCK_SIZE: process X blocks, copy in leftover
71 */
72 if (len + sctx->count <= SHA256_BLOCK_SIZE) {
73 memcpy(sctx->buf + sctx->count, data, len);
74 sctx->count += len;
75 goto out;
76 }
77
78 /* to_process: the SHA256_BLOCK_SIZE data chunk to process in this
79 * update */
80 to_process = (sctx->count + len) & ~(SHA256_BLOCK_SIZE - 1);
81 leftover = (sctx->count + len) & (SHA256_BLOCK_SIZE - 1);
82
83 if (sctx->count) {
84 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf,
85 sctx->count, nx_ctx->ap->sglen);
86 in_sg = nx_build_sg_list(in_sg, (u8 *)data,
87 to_process - sctx->count,
88 nx_ctx->ap->sglen);
89 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
90 sizeof(struct nx_sg);
91 } else {
92 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data,
93 to_process, nx_ctx->ap->sglen);
94 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
95 sizeof(struct nx_sg);
96 }
97
98 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
99
100 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
101 rc = -EINVAL;
102 goto out;
103 }
104
105 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
106 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
107 if (rc)
108 goto out;
109
110 atomic_inc(&(nx_ctx->stats->sha256_ops));
111
112 /* copy the leftover back into the state struct */
113 memcpy(sctx->buf, data + len - leftover, leftover);
114 sctx->count = leftover;
115
116 csbcpb->cpb.sha256.message_bit_length += (u64)
117 (csbcpb->cpb.sha256.spbc * 8);
118
119 /* everything after the first update is continuation */
120 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
121out:
122 return rc;
123}
124
125static int nx_sha256_final(struct shash_desc *desc, u8 *out)
126{
127 struct sha256_state *sctx = shash_desc_ctx(desc);
128 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
129 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
130 struct nx_sg *in_sg, *out_sg;
131 int rc;
132
133 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
134 /* we've hit the nx chip previously, now we're finalizing,
135 * so copy over the partial digest */
136 memcpy(csbcpb->cpb.sha256.input_partial_digest,
137 csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
138 }
139
140 /* final is represented by continuing the operation and indicating that
141 * this is not an intermediate operation */
142 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
143
144 csbcpb->cpb.sha256.message_bit_length += (u64)(sctx->count * 8);
145
146 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf,
147 sctx->count, nx_ctx->ap->sglen);
148 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA256_DIGEST_SIZE,
149 nx_ctx->ap->sglen);
150 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
151 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
152
153 if (!nx_ctx->op.outlen) {
154 rc = -EINVAL;
155 goto out;
156 }
157
158 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
159 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
160 if (rc)
161 goto out;
162
163 atomic_inc(&(nx_ctx->stats->sha256_ops));
164
165 atomic64_add(csbcpb->cpb.sha256.message_bit_length,
166 &(nx_ctx->stats->sha256_bytes));
167 memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
168out:
169 return rc;
170}
171
172static int nx_sha256_export(struct shash_desc *desc, void *out)
173{
174 struct sha256_state *sctx = shash_desc_ctx(desc);
175 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
176 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
177 struct sha256_state *octx = out;
178
179 octx->count = sctx->count +
180 (csbcpb->cpb.sha256.message_bit_length / 8);
181 memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
182
183 /* if no data has been processed yet, we need to export SHA256's
184 * initial data, in case this context gets imported into a software
185 * context */
186 if (csbcpb->cpb.sha256.message_bit_length)
187 memcpy(octx->state, csbcpb->cpb.sha256.message_digest,
188 SHA256_DIGEST_SIZE);
189 else {
190 octx->state[0] = SHA256_H0;
191 octx->state[1] = SHA256_H1;
192 octx->state[2] = SHA256_H2;
193 octx->state[3] = SHA256_H3;
194 octx->state[4] = SHA256_H4;
195 octx->state[5] = SHA256_H5;
196 octx->state[6] = SHA256_H6;
197 octx->state[7] = SHA256_H7;
198 }
199
200 return 0;
201}
202
203static int nx_sha256_import(struct shash_desc *desc, const void *in)
204{
205 struct sha256_state *sctx = shash_desc_ctx(desc);
206 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
207 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
208 const struct sha256_state *ictx = in;
209
210 memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
211
212 sctx->count = ictx->count & 0x3f;
213 csbcpb->cpb.sha256.message_bit_length = (ictx->count & ~0x3f) * 8;
214
215 if (csbcpb->cpb.sha256.message_bit_length) {
216 memcpy(csbcpb->cpb.sha256.message_digest, ictx->state,
217 SHA256_DIGEST_SIZE);
218
219 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
220 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
221 }
222
223 return 0;
224}
225
226struct shash_alg nx_shash_sha256_alg = {
227 .digestsize = SHA256_DIGEST_SIZE,
228 .init = nx_sha256_init,
229 .update = nx_sha256_update,
230 .final = nx_sha256_final,
231 .export = nx_sha256_export,
232 .import = nx_sha256_import,
233 .descsize = sizeof(struct sha256_state),
234 .statesize = sizeof(struct sha256_state),
235 .base = {
236 .cra_name = "sha256",
237 .cra_driver_name = "sha256-nx",
238 .cra_priority = 300,
239 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
240 .cra_blocksize = SHA256_BLOCK_SIZE,
241 .cra_module = THIS_MODULE,
242 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
243 .cra_init = nx_crypto_ctx_sha_init,
244 .cra_exit = nx_crypto_ctx_exit,
245 }
246};
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
new file mode 100644
index 000000000000..3177b8c3d5f1
--- /dev/null
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -0,0 +1,265 @@
1/**
2 * SHA-512 routines supporting the Power 7+ Nest Accelerators driver
3 *
4 * Copyright (C) 2011-2012 International Business Machines Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 only.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Author: Kent Yoder <yoder1@us.ibm.com>
20 */
21
22#include <crypto/internal/hash.h>
23#include <crypto/sha.h>
24#include <linux/module.h>
25#include <asm/vio.h>
26
27#include "nx_csbcpb.h"
28#include "nx.h"
29
30
31static int nx_sha512_init(struct shash_desc *desc)
32{
33 struct sha512_state *sctx = shash_desc_ctx(desc);
34 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
35 struct nx_sg *out_sg;
36
37 nx_ctx_init(nx_ctx, HCOP_FC_SHA);
38
39 memset(sctx, 0, sizeof *sctx);
40
41 nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512];
42
43 NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);
44 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
45 SHA512_DIGEST_SIZE, nx_ctx->ap->sglen);
46 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
47
48 return 0;
49}
50
51static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
52 unsigned int len)
53{
54 struct sha512_state *sctx = shash_desc_ctx(desc);
55 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
56 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
57 struct nx_sg *in_sg;
58 u64 to_process, leftover, spbc_bits;
59 int rc = 0;
60
61 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
62 /* we've hit the nx chip previously and we're updating again,
63 * so copy over the partial digest */
64 memcpy(csbcpb->cpb.sha512.input_partial_digest,
65 csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
66 }
67
68 /* 2 cases for total data len:
69 * 1: <= SHA512_BLOCK_SIZE: copy into state, return 0
70 * 2: > SHA512_BLOCK_SIZE: process X blocks, copy in leftover
71 */
72 if ((u64)len + sctx->count[0] <= SHA512_BLOCK_SIZE) {
73 memcpy(sctx->buf + sctx->count[0], data, len);
74 sctx->count[0] += len;
75 goto out;
76 }
77
78 /* to_process: the SHA512_BLOCK_SIZE data chunk to process in this
79 * update */
80 to_process = (sctx->count[0] + len) & ~(SHA512_BLOCK_SIZE - 1);
81 leftover = (sctx->count[0] + len) & (SHA512_BLOCK_SIZE - 1);
82
83 if (sctx->count[0]) {
84 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf,
85 sctx->count[0], nx_ctx->ap->sglen);
86 in_sg = nx_build_sg_list(in_sg, (u8 *)data,
87 to_process - sctx->count[0],
88 nx_ctx->ap->sglen);
89 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
90 sizeof(struct nx_sg);
91 } else {
92 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data,
93 to_process, nx_ctx->ap->sglen);
94 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
95 sizeof(struct nx_sg);
96 }
97
98 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
99
100 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
101 rc = -EINVAL;
102 goto out;
103 }
104
105 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
106 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
107 if (rc)
108 goto out;
109
110 atomic_inc(&(nx_ctx->stats->sha512_ops));
111
112 /* copy the leftover back into the state struct */
113 memcpy(sctx->buf, data + len - leftover, leftover);
114 sctx->count[0] = leftover;
115
116 spbc_bits = csbcpb->cpb.sha512.spbc * 8;
117 csbcpb->cpb.sha512.message_bit_length_lo += spbc_bits;
118 if (csbcpb->cpb.sha512.message_bit_length_lo < spbc_bits)
119 csbcpb->cpb.sha512.message_bit_length_hi++;
120
121 /* everything after the first update is continuation */
122 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
123out:
124 return rc;
125}
126
127static int nx_sha512_final(struct shash_desc *desc, u8 *out)
128{
129 struct sha512_state *sctx = shash_desc_ctx(desc);
130 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
131 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
132 struct nx_sg *in_sg, *out_sg;
133 u64 count0;
134 int rc;
135
136 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
137 /* we've hit the nx chip previously, now we're finalizing,
138 * so copy over the partial digest */
139 memcpy(csbcpb->cpb.sha512.input_partial_digest,
140 csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
141 }
142
143 /* final is represented by continuing the operation and indicating that
144 * this is not an intermediate operation */
145 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
146
147 count0 = sctx->count[0] * 8;
148
149 csbcpb->cpb.sha512.message_bit_length_lo += count0;
150 if (csbcpb->cpb.sha512.message_bit_length_lo < count0)
151 csbcpb->cpb.sha512.message_bit_length_hi++;
152
153 in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, sctx->count[0],
154 nx_ctx->ap->sglen);
155 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA512_DIGEST_SIZE,
156 nx_ctx->ap->sglen);
157 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
158 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
159
160 if (!nx_ctx->op.outlen) {
161 rc = -EINVAL;
162 goto out;
163 }
164
165 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
166 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
167 if (rc)
168 goto out;
169
170 atomic_inc(&(nx_ctx->stats->sha512_ops));
171 atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo,
172 &(nx_ctx->stats->sha512_bytes));
173
174 memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
175out:
176 return rc;
177}
178
179static int nx_sha512_export(struct shash_desc *desc, void *out)
180{
181 struct sha512_state *sctx = shash_desc_ctx(desc);
182 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
183 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
184 struct sha512_state *octx = out;
185
186 /* move message_bit_length (128 bits) into count and convert its value
187 * to bytes */
188 octx->count[0] = csbcpb->cpb.sha512.message_bit_length_lo >> 3 |
189 ((csbcpb->cpb.sha512.message_bit_length_hi & 7) << 61);
190 octx->count[1] = csbcpb->cpb.sha512.message_bit_length_hi >> 3;
191
192 octx->count[0] += sctx->count[0];
193 if (octx->count[0] < sctx->count[0])
194 octx->count[1]++;
195
196 memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
197
198 /* if no data has been processed yet, we need to export SHA512's
199 * initial data, in case this context gets imported into a software
200 * context */
201 if (csbcpb->cpb.sha512.message_bit_length_hi ||
202 csbcpb->cpb.sha512.message_bit_length_lo)
203 memcpy(octx->state, csbcpb->cpb.sha512.message_digest,
204 SHA512_DIGEST_SIZE);
205 else {
206 octx->state[0] = SHA512_H0;
207 octx->state[1] = SHA512_H1;
208 octx->state[2] = SHA512_H2;
209 octx->state[3] = SHA512_H3;
210 octx->state[4] = SHA512_H4;
211 octx->state[5] = SHA512_H5;
212 octx->state[6] = SHA512_H6;
213 octx->state[7] = SHA512_H7;
214 }
215
216 return 0;
217}
218
219static int nx_sha512_import(struct shash_desc *desc, const void *in)
220{
221 struct sha512_state *sctx = shash_desc_ctx(desc);
222 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
223 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
224 const struct sha512_state *ictx = in;
225
226 memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
227 sctx->count[0] = ictx->count[0] & 0x3f;
228 csbcpb->cpb.sha512.message_bit_length_lo = (ictx->count[0] & ~0x3f)
229 << 3;
230 csbcpb->cpb.sha512.message_bit_length_hi = ictx->count[1] << 3 |
231 ictx->count[0] >> 61;
232
233 if (csbcpb->cpb.sha512.message_bit_length_hi ||
234 csbcpb->cpb.sha512.message_bit_length_lo) {
235 memcpy(csbcpb->cpb.sha512.message_digest, ictx->state,
236 SHA512_DIGEST_SIZE);
237
238 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
239 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
240 }
241
242 return 0;
243}
244
245struct shash_alg nx_shash_sha512_alg = {
246 .digestsize = SHA512_DIGEST_SIZE,
247 .init = nx_sha512_init,
248 .update = nx_sha512_update,
249 .final = nx_sha512_final,
250 .export = nx_sha512_export,
251 .import = nx_sha512_import,
252 .descsize = sizeof(struct sha512_state),
253 .statesize = sizeof(struct sha512_state),
254 .base = {
255 .cra_name = "sha512",
256 .cra_driver_name = "sha512-nx",
257 .cra_priority = 300,
258 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
259 .cra_blocksize = SHA512_BLOCK_SIZE,
260 .cra_module = THIS_MODULE,
261 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
262 .cra_init = nx_crypto_ctx_sha_init,
263 .cra_exit = nx_crypto_ctx_exit,
264 }
265};
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
new file mode 100644
index 000000000000..d7f179cc2e98
--- /dev/null
+++ b/drivers/crypto/nx/nx.c
@@ -0,0 +1,716 @@
1/**
2 * Routines supporting the Power 7+ Nest Accelerators driver
3 *
4 * Copyright (C) 2011-2012 International Business Machines Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 only.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Author: Kent Yoder <yoder1@us.ibm.com>
20 */
21
22#include <crypto/internal/hash.h>
23#include <crypto/hash.h>
24#include <crypto/aes.h>
25#include <crypto/sha.h>
26#include <crypto/algapi.h>
27#include <crypto/scatterwalk.h>
28#include <linux/module.h>
29#include <linux/moduleparam.h>
30#include <linux/types.h>
31#include <linux/mm.h>
32#include <linux/crypto.h>
33#include <linux/scatterlist.h>
34#include <linux/device.h>
35#include <linux/of.h>
36#include <asm/pSeries_reconfig.h>
37#include <asm/abs_addr.h>
38#include <asm/hvcall.h>
39#include <asm/vio.h>
40
41#include "nx_csbcpb.h"
42#include "nx.h"
43
44
45/**
46 * nx_hcall_sync - make an H_COP_OP hcall for the passed in op structure
47 *
48 * @nx_ctx: the crypto context handle
49 * @op: PFO operation struct to pass in
50 * @may_sleep: flag indicating the request can sleep
51 *
52 * Make the hcall, retrying while the hardware is busy. If we cannot yield
53 * the thread, limit the number of retries to 10 here.
54 */
55int nx_hcall_sync(struct nx_crypto_ctx *nx_ctx,
56 struct vio_pfo_op *op,
57 u32 may_sleep)
58{
59 int rc, retries = 10;
60 struct vio_dev *viodev = nx_driver.viodev;
61
62 atomic_inc(&(nx_ctx->stats->sync_ops));
63
64 do {
65 rc = vio_h_cop_sync(viodev, op);
66 } while ((rc == -EBUSY && !may_sleep && retries--) ||
67 (rc == -EBUSY && may_sleep && cond_resched()));
68
69 if (rc) {
70 dev_dbg(&viodev->dev, "vio_h_cop_sync failed: rc: %d "
71 "hcall rc: %ld\n", rc, op->hcall_err);
72 atomic_inc(&(nx_ctx->stats->errors));
73 atomic_set(&(nx_ctx->stats->last_error), op->hcall_err);
74 atomic_set(&(nx_ctx->stats->last_error_pid), current->pid);
75 }
76
77 return rc;
78}
79
80/**
81 * nx_build_sg_list - build an NX scatter list describing a single buffer
82 *
83 * @sg_head: pointer to the first scatter list element to build
84 * @start_addr: pointer to the linear buffer
85 * @len: length of the data at @start_addr
86 * @sgmax: the largest number of scatter list elements we're allowed to create
87 *
88 * This function will start writing nx_sg elements at @sg_head and keep
89 * writing them until all of the data from @start_addr is described or
90 * until sgmax elements have been written. Scatter list elements will be
91 * created such that none of the elements describes a buffer that crosses a 4K
92 * boundary.
93 */
94struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
95 u8 *start_addr,
96 unsigned int len,
97 u32 sgmax)
98{
99 unsigned int sg_len = 0;
100 struct nx_sg *sg;
101 u64 sg_addr = (u64)start_addr;
102 u64 end_addr;
103
104 /* determine the start and end for this address range - slightly
105 * different if this is in VMALLOC_REGION */
106 if (is_vmalloc_addr(start_addr))
107 sg_addr = phys_to_abs(page_to_phys(vmalloc_to_page(start_addr)))
108 + offset_in_page(sg_addr);
109 else
110 sg_addr = virt_to_abs(sg_addr);
111
112 end_addr = sg_addr + len;
113
114 /* each iteration will write one struct nx_sg element and add the
115 * length of data described by that element to sg_len. Once @len bytes
116 * have been described (or @sgmax elements have been written), the
117 * loop ends. min_t is used to ensure @end_addr falls on the same page
118 * as sg_addr, if not, we need to create another nx_sg element for the
119 * data on the next page */
120 for (sg = sg_head; sg_len < len; sg++) {
121 sg->addr = sg_addr;
122 sg_addr = min_t(u64, NX_PAGE_NUM(sg_addr + NX_PAGE_SIZE), end_addr);
123 sg->len = sg_addr - sg->addr;
124 sg_len += sg->len;
125
126 if ((sg - sg_head) == sgmax) {
127 pr_err("nx: scatter/gather list overflow, pid: %d\n",
128 current->pid);
129 return NULL;
130 }
131 }
132
133 /* return the moved sg_head pointer */
134 return sg;
135}
136
137/**
138 * nx_walk_and_build - walk a linux scatterlist and build an nx scatterlist
139 *
140 * @nx_dst: pointer to the first nx_sg element to write
141 * @sglen: max number of nx_sg entries we're allowed to write
142 * @sg_src: pointer to the source linux scatterlist to walk
143 * @start: number of bytes to fast-forward past at the beginning of @sg_src
144 * @src_len: number of bytes to walk in @sg_src
145 */
146struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
147 unsigned int sglen,
148 struct scatterlist *sg_src,
149 unsigned int start,
150 unsigned int src_len)
151{
152 struct scatter_walk walk;
153 struct nx_sg *nx_sg = nx_dst;
154 unsigned int n, offset = 0, len = src_len;
155 char *dst;
156
157 /* we need to fast forward through @start bytes first */
158 for (;;) {
159 scatterwalk_start(&walk, sg_src);
160
161 if (start < offset + sg_src->length)
162 break;
163
164 offset += sg_src->length;
165 sg_src = scatterwalk_sg_next(sg_src);
166 }
167
168 /* start - offset is the number of bytes to advance in the scatterlist
169 * element we're currently looking at */
170 scatterwalk_advance(&walk, start - offset);
171
172 while (len && nx_sg) {
173 n = scatterwalk_clamp(&walk, len);
174 if (!n) {
175 scatterwalk_start(&walk, sg_next(walk.sg));
176 n = scatterwalk_clamp(&walk, len);
177 }
178 dst = scatterwalk_map(&walk);
179
180 nx_sg = nx_build_sg_list(nx_sg, dst, n, sglen);
181 len -= n;
182
183 scatterwalk_unmap(dst);
184 scatterwalk_advance(&walk, n);
185 scatterwalk_done(&walk, SCATTERWALK_FROM_SG, len);
186 }
187
188 /* return the moved destination pointer */
189 return nx_sg;
190}
191
192/**
193 * nx_build_sg_lists - walk the input scatterlists and build arrays of NX
194 * scatterlists based on them.
195 *
196 * @nx_ctx: NX crypto context for the lists we're building
197 * @desc: the block cipher descriptor for the operation
198 * @dst: destination scatterlist
199 * @src: source scatterlist
200 * @nbytes: length of data described in the scatterlists
201 * @iv: destination for the iv data, if the algorithm requires it
202 *
203 * This is common code shared by all the AES algorithms. It uses the block
204 * cipher walk routines to traverse input and output scatterlists, building
205 * corresponding NX scatterlists
206 */
207int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
208 struct blkcipher_desc *desc,
209 struct scatterlist *dst,
210 struct scatterlist *src,
211 unsigned int nbytes,
212 u8 *iv)
213{
214 struct nx_sg *nx_insg = nx_ctx->in_sg;
215 struct nx_sg *nx_outsg = nx_ctx->out_sg;
216 struct blkcipher_walk walk;
217 int rc;
218
219 blkcipher_walk_init(&walk, dst, src, nbytes);
220 rc = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
221 if (rc)
222 goto out;
223
224 if (iv)
225 memcpy(iv, walk.iv, AES_BLOCK_SIZE);
226
227 while (walk.nbytes) {
228 nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr,
229 walk.nbytes, nx_ctx->ap->sglen);
230 nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr,
231 walk.nbytes, nx_ctx->ap->sglen);
232
233 rc = blkcipher_walk_done(desc, &walk, 0);
234 if (rc)
235 break;
236 }
237
238 if (walk.nbytes) {
239 nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr,
240 walk.nbytes, nx_ctx->ap->sglen);
241 nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr,
242 walk.nbytes, nx_ctx->ap->sglen);
243
244 rc = 0;
245 }
246
247 /* these lengths should be negative, which will indicate to phyp that
248 * the input and output parameters are scatterlists, not linear
249 * buffers */
250 nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * sizeof(struct nx_sg);
251 nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * sizeof(struct nx_sg);
252out:
253 return rc;
254}
255
256/**
257 * nx_ctx_init - initialize an nx_ctx's vio_pfo_op struct
258 *
259 * @nx_ctx: the nx context to initialize
260 * @function: the function code for the op
261 */
262void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function)
263{
264 memset(nx_ctx->kmem, 0, nx_ctx->kmem_len);
265 nx_ctx->csbcpb->csb.valid |= NX_CSB_VALID_BIT;
266
267 nx_ctx->op.flags = function;
268 nx_ctx->op.csbcpb = virt_to_abs(nx_ctx->csbcpb);
269 nx_ctx->op.in = virt_to_abs(nx_ctx->in_sg);
270 nx_ctx->op.out = virt_to_abs(nx_ctx->out_sg);
271
272 if (nx_ctx->csbcpb_aead) {
273 nx_ctx->csbcpb_aead->csb.valid |= NX_CSB_VALID_BIT;
274
275 nx_ctx->op_aead.flags = function;
276 nx_ctx->op_aead.csbcpb = virt_to_abs(nx_ctx->csbcpb_aead);
277 nx_ctx->op_aead.in = virt_to_abs(nx_ctx->in_sg);
278 nx_ctx->op_aead.out = virt_to_abs(nx_ctx->out_sg);
279 }
280}
281
282static void nx_of_update_status(struct device *dev,
283 struct property *p,
284 struct nx_of *props)
285{
286 if (!strncmp(p->value, "okay", p->length)) {
287 props->status = NX_WAITING;
288 props->flags |= NX_OF_FLAG_STATUS_SET;
289 } else {
290 dev_info(dev, "%s: status '%s' is not 'okay'\n", __func__,
291 (char *)p->value);
292 }
293}
294
295static void nx_of_update_sglen(struct device *dev,
296 struct property *p,
297 struct nx_of *props)
298{
299 if (p->length != sizeof(props->max_sg_len)) {
300 dev_err(dev, "%s: unexpected format for "
301 "ibm,max-sg-len property\n", __func__);
302 dev_dbg(dev, "%s: ibm,max-sg-len is %d bytes "
303 "long, expected %zd bytes\n", __func__,
304 p->length, sizeof(props->max_sg_len));
305 return;
306 }
307
308 props->max_sg_len = *(u32 *)p->value;
309 props->flags |= NX_OF_FLAG_MAXSGLEN_SET;
310}
311
312static void nx_of_update_msc(struct device *dev,
313 struct property *p,
314 struct nx_of *props)
315{
316 struct msc_triplet *trip;
317 struct max_sync_cop *msc;
318 unsigned int bytes_so_far, i, lenp;
319
320 msc = (struct max_sync_cop *)p->value;
321 lenp = p->length;
322
323 /* You can't tell if the data read in for this property is sane by its
324 * size alone. This is because there are sizes embedded in the data
325 * structure. The best we can do is check lengths as we parse and bail
326 * as soon as a length error is detected. */
327 bytes_so_far = 0;
328
329 while ((bytes_so_far + sizeof(struct max_sync_cop)) <= lenp) {
330 bytes_so_far += sizeof(struct max_sync_cop);
331
332 trip = msc->trip;
333
334 for (i = 0;
335 ((bytes_so_far + sizeof(struct msc_triplet)) <= lenp) &&
336 i < msc->triplets;
337 i++) {
338 if (msc->fc > NX_MAX_FC || msc->mode > NX_MAX_MODE) {
339 dev_err(dev, "unknown function code/mode "
340 "combo: %d/%d (ignored)\n", msc->fc,
341 msc->mode);
342 goto next_loop;
343 }
344
345 switch (trip->keybitlen) {
346 case 128:
347 case 160:
348 props->ap[msc->fc][msc->mode][0].databytelen =
349 trip->databytelen;
350 props->ap[msc->fc][msc->mode][0].sglen =
351 trip->sglen;
352 break;
353 case 192:
354 props->ap[msc->fc][msc->mode][1].databytelen =
355 trip->databytelen;
356 props->ap[msc->fc][msc->mode][1].sglen =
357 trip->sglen;
358 break;
359 case 256:
360 if (msc->fc == NX_FC_AES) {
361 props->ap[msc->fc][msc->mode][2].
362 databytelen = trip->databytelen;
363 props->ap[msc->fc][msc->mode][2].sglen =
364 trip->sglen;
365 } else if (msc->fc == NX_FC_AES_HMAC ||
366 msc->fc == NX_FC_SHA) {
367 props->ap[msc->fc][msc->mode][1].
368 databytelen = trip->databytelen;
369 props->ap[msc->fc][msc->mode][1].sglen =
370 trip->sglen;
371 } else {
372 dev_warn(dev, "unknown function "
373 "code/key bit len combo"
374 ": (%u/256)\n", msc->fc);
375 }
376 break;
377 case 512:
378 props->ap[msc->fc][msc->mode][2].databytelen =
379 trip->databytelen;
380 props->ap[msc->fc][msc->mode][2].sglen =
381 trip->sglen;
382 break;
383 default:
384 dev_warn(dev, "unknown function code/key bit "
385 "len combo: (%u/%u)\n", msc->fc,
386 trip->keybitlen);
387 break;
388 }
389next_loop:
390 bytes_so_far += sizeof(struct msc_triplet);
391 trip++;
392 }
393
394 msc = (struct max_sync_cop *)trip;
395 }
396
397 props->flags |= NX_OF_FLAG_MAXSYNCCOP_SET;
398}
399
400/**
401 * nx_of_init - read openFirmware values from the device tree
402 *
403 * @dev: device handle
404 * @props: pointer to struct to hold the properties values
405 *
406 * Called once at driver probe time, this function will read out the
407 * openFirmware properties we use at runtime. If all the OF properties are
408 * acceptable, when we exit this function props->flags will indicate that
409 * we're ready to register our crypto algorithms.
410 */
411static void nx_of_init(struct device *dev, struct nx_of *props)
412{
413 struct device_node *base_node = dev->of_node;
414 struct property *p;
415
416 p = of_find_property(base_node, "status", NULL);
417 if (!p)
418 dev_info(dev, "%s: property 'status' not found\n", __func__);
419 else
420 nx_of_update_status(dev, p, props);
421
422 p = of_find_property(base_node, "ibm,max-sg-len", NULL);
423 if (!p)
424 dev_info(dev, "%s: property 'ibm,max-sg-len' not found\n",
425 __func__);
426 else
427 nx_of_update_sglen(dev, p, props);
428
429 p = of_find_property(base_node, "ibm,max-sync-cop", NULL);
430 if (!p)
431 dev_info(dev, "%s: property 'ibm,max-sync-cop' not found\n",
432 __func__);
433 else
434 nx_of_update_msc(dev, p, props);
435}
436
437/**
438 * nx_register_algs - register algorithms with the crypto API
439 *
440 * Called from nx_probe()
441 *
442 * If all OF properties are in an acceptable state, the driver flags will
443 * indicate that we're ready and we'll create our debugfs files and register
444 * out crypto algorithms.
445 */
446static int nx_register_algs(void)
447{
448 int rc = -1;
449
450 if (nx_driver.of.flags != NX_OF_FLAG_MASK_READY)
451 goto out;
452
453 memset(&nx_driver.stats, 0, sizeof(struct nx_stats));
454
455 rc = NX_DEBUGFS_INIT(&nx_driver);
456 if (rc)
457 goto out;
458
459 rc = crypto_register_alg(&nx_ecb_aes_alg);
460 if (rc)
461 goto out;
462
463 rc = crypto_register_alg(&nx_cbc_aes_alg);
464 if (rc)
465 goto out_unreg_ecb;
466
467 rc = crypto_register_alg(&nx_ctr_aes_alg);
468 if (rc)
469 goto out_unreg_cbc;
470
471 rc = crypto_register_alg(&nx_ctr3686_aes_alg);
472 if (rc)
473 goto out_unreg_ctr;
474
475 rc = crypto_register_alg(&nx_gcm_aes_alg);
476 if (rc)
477 goto out_unreg_ctr3686;
478
479 rc = crypto_register_alg(&nx_gcm4106_aes_alg);
480 if (rc)
481 goto out_unreg_gcm;
482
483 rc = crypto_register_alg(&nx_ccm_aes_alg);
484 if (rc)
485 goto out_unreg_gcm4106;
486
487 rc = crypto_register_alg(&nx_ccm4309_aes_alg);
488 if (rc)
489 goto out_unreg_ccm;
490
491 rc = crypto_register_shash(&nx_shash_sha256_alg);
492 if (rc)
493 goto out_unreg_ccm4309;
494
495 rc = crypto_register_shash(&nx_shash_sha512_alg);
496 if (rc)
497 goto out_unreg_s256;
498
499 rc = crypto_register_shash(&nx_shash_aes_xcbc_alg);
500 if (rc)
501 goto out_unreg_s512;
502
503 nx_driver.of.status = NX_OKAY;
504
505 goto out;
506
507out_unreg_s512:
508 crypto_unregister_shash(&nx_shash_sha512_alg);
509out_unreg_s256:
510 crypto_unregister_shash(&nx_shash_sha256_alg);
511out_unreg_ccm4309:
512 crypto_unregister_alg(&nx_ccm4309_aes_alg);
513out_unreg_ccm:
514 crypto_unregister_alg(&nx_ccm_aes_alg);
515out_unreg_gcm4106:
516 crypto_unregister_alg(&nx_gcm4106_aes_alg);
517out_unreg_gcm:
518 crypto_unregister_alg(&nx_gcm_aes_alg);
519out_unreg_ctr3686:
520 crypto_unregister_alg(&nx_ctr3686_aes_alg);
521out_unreg_ctr:
522 crypto_unregister_alg(&nx_ctr_aes_alg);
523out_unreg_cbc:
524 crypto_unregister_alg(&nx_cbc_aes_alg);
525out_unreg_ecb:
526 crypto_unregister_alg(&nx_ecb_aes_alg);
527out:
528 return rc;
529}
530
531/**
532 * nx_crypto_ctx_init - create and initialize a crypto api context
533 *
534 * @nx_ctx: the crypto api context
535 * @fc: function code for the context
536 * @mode: the function code specific mode for this context
537 */
538static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode)
539{
540 if (nx_driver.of.status != NX_OKAY) {
541 pr_err("Attempt to initialize NX crypto context while device "
542 "is not available!\n");
543 return -ENODEV;
544 }
545
546 /* we need an extra page for csbcpb_aead for these modes */
547 if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM)
548 nx_ctx->kmem_len = (4 * NX_PAGE_SIZE) +
549 sizeof(struct nx_csbcpb);
550 else
551 nx_ctx->kmem_len = (3 * NX_PAGE_SIZE) +
552 sizeof(struct nx_csbcpb);
553
554 nx_ctx->kmem = kmalloc(nx_ctx->kmem_len, GFP_KERNEL);
555 if (!nx_ctx->kmem)
556 return -ENOMEM;
557
558 /* the csbcpb and scatterlists must be 4K aligned pages */
559 nx_ctx->csbcpb = (struct nx_csbcpb *)(round_up((u64)nx_ctx->kmem,
560 (u64)NX_PAGE_SIZE));
561 nx_ctx->in_sg = (struct nx_sg *)((u8 *)nx_ctx->csbcpb + NX_PAGE_SIZE);
562 nx_ctx->out_sg = (struct nx_sg *)((u8 *)nx_ctx->in_sg + NX_PAGE_SIZE);
563
564 if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM)
565 nx_ctx->csbcpb_aead =
566 (struct nx_csbcpb *)((u8 *)nx_ctx->out_sg +
567 NX_PAGE_SIZE);
568
569 /* give each context a pointer to global stats and their OF
570 * properties */
571 nx_ctx->stats = &nx_driver.stats;
572 memcpy(nx_ctx->props, nx_driver.of.ap[fc][mode],
573 sizeof(struct alg_props) * 3);
574
575 return 0;
576}
577
578/* entry points from the crypto tfm initializers */
579int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm)
580{
581 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
582 NX_MODE_AES_CCM);
583}
584
585int nx_crypto_ctx_aes_gcm_init(struct crypto_tfm *tfm)
586{
587 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
588 NX_MODE_AES_GCM);
589}
590
591int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm)
592{
593 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
594 NX_MODE_AES_CTR);
595}
596
597int nx_crypto_ctx_aes_cbc_init(struct crypto_tfm *tfm)
598{
599 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
600 NX_MODE_AES_CBC);
601}
602
603int nx_crypto_ctx_aes_ecb_init(struct crypto_tfm *tfm)
604{
605 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
606 NX_MODE_AES_ECB);
607}
608
609int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm)
610{
611 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_SHA, NX_MODE_SHA);
612}
613
614int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm)
615{
616 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
617 NX_MODE_AES_XCBC_MAC);
618}
619
620/**
621 * nx_crypto_ctx_exit - destroy a crypto api context
622 *
623 * @tfm: the crypto transform pointer for the context
624 *
625 * As crypto API contexts are destroyed, this exit hook is called to free the
626 * memory associated with it.
627 */
628void nx_crypto_ctx_exit(struct crypto_tfm *tfm)
629{
630 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
631
632 kzfree(nx_ctx->kmem);
633 nx_ctx->csbcpb = NULL;
634 nx_ctx->csbcpb_aead = NULL;
635 nx_ctx->in_sg = NULL;
636 nx_ctx->out_sg = NULL;
637}
638
639static int __devinit nx_probe(struct vio_dev *viodev,
640 const struct vio_device_id *id)
641{
642 dev_dbg(&viodev->dev, "driver probed: %s resource id: 0x%x\n",
643 viodev->name, viodev->resource_id);
644
645 if (nx_driver.viodev) {
646 dev_err(&viodev->dev, "%s: Attempt to register more than one "
647 "instance of the hardware\n", __func__);
648 return -EINVAL;
649 }
650
651 nx_driver.viodev = viodev;
652
653 nx_of_init(&viodev->dev, &nx_driver.of);
654
655 return nx_register_algs();
656}
657
658static int __devexit nx_remove(struct vio_dev *viodev)
659{
660 dev_dbg(&viodev->dev, "entering nx_remove for UA 0x%x\n",
661 viodev->unit_address);
662
663 if (nx_driver.of.status == NX_OKAY) {
664 NX_DEBUGFS_FINI(&nx_driver);
665
666 crypto_unregister_alg(&nx_ccm_aes_alg);
667 crypto_unregister_alg(&nx_ccm4309_aes_alg);
668 crypto_unregister_alg(&nx_gcm_aes_alg);
669 crypto_unregister_alg(&nx_gcm4106_aes_alg);
670 crypto_unregister_alg(&nx_ctr_aes_alg);
671 crypto_unregister_alg(&nx_ctr3686_aes_alg);
672 crypto_unregister_alg(&nx_cbc_aes_alg);
673 crypto_unregister_alg(&nx_ecb_aes_alg);
674 crypto_unregister_shash(&nx_shash_sha256_alg);
675 crypto_unregister_shash(&nx_shash_sha512_alg);
676 crypto_unregister_shash(&nx_shash_aes_xcbc_alg);
677 }
678
679 return 0;
680}
681
682
683/* module wide initialization/cleanup */
684static int __init nx_init(void)
685{
686 return vio_register_driver(&nx_driver.viodriver);
687}
688
689static void __exit nx_fini(void)
690{
691 vio_unregister_driver(&nx_driver.viodriver);
692}
693
694static struct vio_device_id nx_crypto_driver_ids[] __devinitdata = {
695 { "ibm,sym-encryption-v1", "ibm,sym-encryption" },
696 { "", "" }
697};
698MODULE_DEVICE_TABLE(vio, nx_crypto_driver_ids);
699
700/* driver state structure */
701struct nx_crypto_driver nx_driver = {
702 .viodriver = {
703 .id_table = nx_crypto_driver_ids,
704 .probe = nx_probe,
705 .remove = nx_remove,
706 .name = NX_NAME,
707 },
708};
709
710module_init(nx_init);
711module_exit(nx_fini);
712
713MODULE_AUTHOR("Kent Yoder <yoder1@us.ibm.com>");
714MODULE_DESCRIPTION(NX_STRING);
715MODULE_LICENSE("GPL");
716MODULE_VERSION(NX_VERSION);
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
new file mode 100644
index 000000000000..3232b182dd28
--- /dev/null
+++ b/drivers/crypto/nx/nx.h
@@ -0,0 +1,193 @@
1
2#ifndef __NX_H__
3#define __NX_H__
4
5#define NX_NAME "nx-crypto"
6#define NX_STRING "IBM Power7+ Nest Accelerator Crypto Driver"
7#define NX_VERSION "1.0"
8
9static const char nx_driver_string[] = NX_STRING;
10static const char nx_driver_version[] = NX_VERSION;
11
12/* a scatterlist in the format PHYP is expecting */
13struct nx_sg {
14 u64 addr;
15 u32 rsvd;
16 u32 len;
17} __attribute((packed));
18
19#define NX_PAGE_SIZE (4096)
20#define NX_MAX_SG_ENTRIES (NX_PAGE_SIZE/(sizeof(struct nx_sg)))
21
22enum nx_status {
23 NX_DISABLED,
24 NX_WAITING,
25 NX_OKAY
26};
27
28/* msc_triplet and max_sync_cop are used only to assist in parsing the
29 * openFirmware property */
30struct msc_triplet {
31 u32 keybitlen;
32 u32 databytelen;
33 u32 sglen;
34} __packed;
35
36struct max_sync_cop {
37 u32 fc;
38 u32 mode;
39 u32 triplets;
40 struct msc_triplet trip[0];
41} __packed;
42
43struct alg_props {
44 u32 databytelen;
45 u32 sglen;
46};
47
48#define NX_OF_FLAG_MAXSGLEN_SET (1)
49#define NX_OF_FLAG_STATUS_SET (2)
50#define NX_OF_FLAG_MAXSYNCCOP_SET (4)
51#define NX_OF_FLAG_MASK_READY (NX_OF_FLAG_MAXSGLEN_SET | \
52 NX_OF_FLAG_STATUS_SET | \
53 NX_OF_FLAG_MAXSYNCCOP_SET)
54struct nx_of {
55 u32 flags;
56 u32 max_sg_len;
57 enum nx_status status;
58 struct alg_props ap[NX_MAX_FC][NX_MAX_MODE][3];
59};
60
61struct nx_stats {
62 atomic_t aes_ops;
63 atomic64_t aes_bytes;
64 atomic_t sha256_ops;
65 atomic64_t sha256_bytes;
66 atomic_t sha512_ops;
67 atomic64_t sha512_bytes;
68
69 atomic_t sync_ops;
70
71 atomic_t errors;
72 atomic_t last_error;
73 atomic_t last_error_pid;
74};
75
76struct nx_debugfs {
77 struct dentry *dfs_root;
78 struct dentry *dfs_aes_ops, *dfs_aes_bytes;
79 struct dentry *dfs_sha256_ops, *dfs_sha256_bytes;
80 struct dentry *dfs_sha512_ops, *dfs_sha512_bytes;
81 struct dentry *dfs_errors, *dfs_last_error, *dfs_last_error_pid;
82};
83
84struct nx_crypto_driver {
85 struct nx_stats stats;
86 struct nx_of of;
87 struct vio_dev *viodev;
88 struct vio_driver viodriver;
89 struct nx_debugfs dfs;
90};
91
92#define NX_GCM4106_NONCE_LEN (4)
93#define NX_GCM_CTR_OFFSET (12)
94struct nx_gcm_priv {
95 u8 iv[16];
96 u8 iauth_tag[16];
97 u8 nonce[NX_GCM4106_NONCE_LEN];
98};
99
100#define NX_CCM_AES_KEY_LEN (16)
101#define NX_CCM4309_AES_KEY_LEN (19)
102#define NX_CCM4309_NONCE_LEN (3)
103struct nx_ccm_priv {
104 u8 iv[16];
105 u8 b0[16];
106 u8 iauth_tag[16];
107 u8 oauth_tag[16];
108 u8 nonce[NX_CCM4309_NONCE_LEN];
109};
110
111struct nx_xcbc_priv {
112 u8 key[16];
113};
114
115struct nx_ctr_priv {
116 u8 iv[16];
117};
118
119struct nx_crypto_ctx {
120 void *kmem; /* unaligned, kmalloc'd buffer */
121 size_t kmem_len; /* length of kmem */
122 struct nx_csbcpb *csbcpb; /* aligned page given to phyp @ hcall time */
123 struct vio_pfo_op op; /* operation struct with hcall parameters */
124 struct nx_csbcpb *csbcpb_aead; /* secondary csbcpb used by AEAD algs */
125 struct vio_pfo_op op_aead;/* operation struct for csbcpb_aead */
126
127 struct nx_sg *in_sg; /* aligned pointer into kmem to an sg list */
128 struct nx_sg *out_sg; /* aligned pointer into kmem to an sg list */
129
130 struct alg_props *ap; /* pointer into props based on our key size */
131 struct alg_props props[3];/* openFirmware properties for requests */
132 struct nx_stats *stats; /* pointer into an nx_crypto_driver for stats
133 reporting */
134
135 union {
136 struct nx_gcm_priv gcm;
137 struct nx_ccm_priv ccm;
138 struct nx_xcbc_priv xcbc;
139 struct nx_ctr_priv ctr;
140 } priv;
141};
142
143/* prototypes */
144int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm);
145int nx_crypto_ctx_aes_gcm_init(struct crypto_tfm *tfm);
146int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm);
147int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm);
148int nx_crypto_ctx_aes_cbc_init(struct crypto_tfm *tfm);
149int nx_crypto_ctx_aes_ecb_init(struct crypto_tfm *tfm);
150int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm);
151void nx_crypto_ctx_exit(struct crypto_tfm *tfm);
152void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function);
153int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op,
154 u32 may_sleep);
155struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int, u32);
156int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *,
157 struct scatterlist *, struct scatterlist *, unsigned int,
158 u8 *);
159struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int,
160 struct scatterlist *, unsigned int,
161 unsigned int);
162
163#ifdef CONFIG_DEBUG_FS
164#define NX_DEBUGFS_INIT(drv) nx_debugfs_init(drv)
165#define NX_DEBUGFS_FINI(drv) nx_debugfs_fini(drv)
166
167int nx_debugfs_init(struct nx_crypto_driver *);
168void nx_debugfs_fini(struct nx_crypto_driver *);
169#else
170#define NX_DEBUGFS_INIT(drv) (0)
171#define NX_DEBUGFS_FINI(drv) (0)
172#endif
173
174#define NX_PAGE_NUM(x) ((u64)(x) & 0xfffffffffffff000ULL)
175
176extern struct crypto_alg nx_cbc_aes_alg;
177extern struct crypto_alg nx_ecb_aes_alg;
178extern struct crypto_alg nx_gcm_aes_alg;
179extern struct crypto_alg nx_gcm4106_aes_alg;
180extern struct crypto_alg nx_ctr_aes_alg;
181extern struct crypto_alg nx_ctr3686_aes_alg;
182extern struct crypto_alg nx_ccm_aes_alg;
183extern struct crypto_alg nx_ccm4309_aes_alg;
184extern struct shash_alg nx_shash_aes_xcbc_alg;
185extern struct shash_alg nx_shash_sha512_alg;
186extern struct shash_alg nx_shash_sha256_alg;
187
188extern struct nx_crypto_driver nx_driver;
189
190#define SCATTERWALK_TO_SG 1
191#define SCATTERWALK_FROM_SG 0
192
193#endif
diff --git a/drivers/crypto/nx/nx_csbcpb.h b/drivers/crypto/nx/nx_csbcpb.h
new file mode 100644
index 000000000000..a304f956d6f8
--- /dev/null
+++ b/drivers/crypto/nx/nx_csbcpb.h
@@ -0,0 +1,205 @@
1
2#ifndef __NX_CSBCPB_H__
3#define __NX_CSBCPB_H__
4
5struct cop_symcpb_aes_ecb {
6 u8 key[32];
7 u8 __rsvd[80];
8} __packed;
9
10struct cop_symcpb_aes_cbc {
11 u8 iv[16];
12 u8 key[32];
13 u8 cv[16];
14 u32 spbc;
15 u8 __rsvd[44];
16} __packed;
17
18struct cop_symcpb_aes_gca {
19 u8 in_pat[16];
20 u8 key[32];
21 u8 out_pat[16];
22 u32 spbc;
23 u8 __rsvd[44];
24} __packed;
25
26struct cop_symcpb_aes_gcm {
27 u8 in_pat_or_aad[16];
28 u8 iv_or_cnt[16];
29 u64 bit_length_aad;
30 u64 bit_length_data;
31 u8 in_s0[16];
32 u8 key[32];
33 u8 __rsvd1[16];
34 u8 out_pat_or_mac[16];
35 u8 out_s0[16];
36 u8 out_cnt[16];
37 u32 spbc;
38 u8 __rsvd2[12];
39} __packed;
40
41struct cop_symcpb_aes_ctr {
42 u8 iv[16];
43 u8 key[32];
44 u8 cv[16];
45 u32 spbc;
46 u8 __rsvd2[44];
47} __packed;
48
49struct cop_symcpb_aes_cca {
50 u8 b0[16];
51 u8 b1[16];
52 u8 key[16];
53 u8 out_pat_or_b0[16];
54 u32 spbc;
55 u8 __rsvd[44];
56} __packed;
57
58struct cop_symcpb_aes_ccm {
59 u8 in_pat_or_b0[16];
60 u8 iv_or_ctr[16];
61 u8 in_s0[16];
62 u8 key[16];
63 u8 __rsvd1[48];
64 u8 out_pat_or_mac[16];
65 u8 out_s0[16];
66 u8 out_ctr[16];
67 u32 spbc;
68 u8 __rsvd2[12];
69} __packed;
70
71struct cop_symcpb_aes_xcbc {
72 u8 cv[16];
73 u8 key[16];
74 u8 __rsvd1[16];
75 u8 out_cv_mac[16];
76 u32 spbc;
77 u8 __rsvd2[44];
78} __packed;
79
80struct cop_symcpb_sha256 {
81 u64 message_bit_length;
82 u64 __rsvd1;
83 u8 input_partial_digest[32];
84 u8 message_digest[32];
85 u32 spbc;
86 u8 __rsvd2[44];
87} __packed;
88
89struct cop_symcpb_sha512 {
90 u64 message_bit_length_hi;
91 u64 message_bit_length_lo;
92 u8 input_partial_digest[64];
93 u8 __rsvd1[32];
94 u8 message_digest[64];
95 u32 spbc;
96 u8 __rsvd2[76];
97} __packed;
98
99#define NX_FDM_INTERMEDIATE 0x01
100#define NX_FDM_CONTINUATION 0x02
101#define NX_FDM_ENDE_ENCRYPT 0x80
102
103#define NX_CPB_FDM(c) ((c)->cpb.hdr.fdm)
104#define NX_CPB_KS_DS(c) ((c)->cpb.hdr.ks_ds)
105
106#define NX_CPB_KEY_SIZE(c) (NX_CPB_KS_DS(c) >> 4)
107#define NX_CPB_SET_KEY_SIZE(c, x) NX_CPB_KS_DS(c) |= ((x) << 4)
108#define NX_CPB_SET_DIGEST_SIZE(c, x) NX_CPB_KS_DS(c) |= (x)
109
110struct cop_symcpb_header {
111 u8 mode;
112 u8 fdm;
113 u8 ks_ds;
114 u8 pad_byte;
115 u8 __rsvd[12];
116} __packed;
117
118struct cop_parameter_block {
119 struct cop_symcpb_header hdr;
120 union {
121 struct cop_symcpb_aes_ecb aes_ecb;
122 struct cop_symcpb_aes_cbc aes_cbc;
123 struct cop_symcpb_aes_gca aes_gca;
124 struct cop_symcpb_aes_gcm aes_gcm;
125 struct cop_symcpb_aes_cca aes_cca;
126 struct cop_symcpb_aes_ccm aes_ccm;
127 struct cop_symcpb_aes_ctr aes_ctr;
128 struct cop_symcpb_aes_xcbc aes_xcbc;
129 struct cop_symcpb_sha256 sha256;
130 struct cop_symcpb_sha512 sha512;
131 };
132} __packed;
133
134#define NX_CSB_VALID_BIT 0x80
135
136/* co-processor status block */
137struct cop_status_block {
138 u8 valid;
139 u8 crb_seq_number;
140 u8 completion_code;
141 u8 completion_extension;
142 u32 processed_byte_count;
143 u64 address;
144} __packed;
145
146/* Nest accelerator workbook section 4.4 */
147struct nx_csbcpb {
148 unsigned char __rsvd[112];
149 struct cop_status_block csb;
150 struct cop_parameter_block cpb;
151} __packed;
152
153/* nx_csbcpb related definitions */
154#define NX_MODE_AES_ECB 0
155#define NX_MODE_AES_CBC 1
156#define NX_MODE_AES_GMAC 2
157#define NX_MODE_AES_GCA 3
158#define NX_MODE_AES_GCM 4
159#define NX_MODE_AES_CCA 5
160#define NX_MODE_AES_CCM 6
161#define NX_MODE_AES_CTR 7
162#define NX_MODE_AES_XCBC_MAC 20
163#define NX_MODE_SHA 0
164#define NX_MODE_SHA_HMAC 1
165#define NX_MODE_AES_CBC_HMAC_ETA 8
166#define NX_MODE_AES_CBC_HMAC_ATE 9
167#define NX_MODE_AES_CBC_HMAC_EAA 10
168#define NX_MODE_AES_CTR_HMAC_ETA 12
169#define NX_MODE_AES_CTR_HMAC_ATE 13
170#define NX_MODE_AES_CTR_HMAC_EAA 14
171
172#define NX_FDM_CI_FULL 0
173#define NX_FDM_CI_FIRST 1
174#define NX_FDM_CI_LAST 2
175#define NX_FDM_CI_MIDDLE 3
176
177#define NX_FDM_PR_NONE 0
178#define NX_FDM_PR_PAD 1
179
180#define NX_KS_AES_128 1
181#define NX_KS_AES_192 2
182#define NX_KS_AES_256 3
183
184#define NX_DS_SHA256 2
185#define NX_DS_SHA512 3
186
187#define NX_FC_AES 0
188#define NX_FC_SHA 2
189#define NX_FC_AES_HMAC 6
190
191#define NX_MAX_FC (NX_FC_AES_HMAC + 1)
192#define NX_MAX_MODE (NX_MODE_AES_XCBC_MAC + 1)
193
194#define HCOP_FC_AES NX_FC_AES
195#define HCOP_FC_SHA NX_FC_SHA
196#define HCOP_FC_AES_HMAC NX_FC_AES_HMAC
197
198/* indices into the array of algorithm properties */
199#define NX_PROPS_AES_128 0
200#define NX_PROPS_AES_192 1
201#define NX_PROPS_AES_256 2
202#define NX_PROPS_SHA256 1
203#define NX_PROPS_SHA512 2
204
205#endif
diff --git a/drivers/crypto/nx/nx_debugfs.c b/drivers/crypto/nx/nx_debugfs.c
new file mode 100644
index 000000000000..7ab2e8dcd9b4
--- /dev/null
+++ b/drivers/crypto/nx/nx_debugfs.c
@@ -0,0 +1,103 @@
1/**
2 * debugfs routines supporting the Power 7+ Nest Accelerators driver
3 *
4 * Copyright (C) 2011-2012 International Business Machines Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 only.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Author: Kent Yoder <yoder1@us.ibm.com>
20 */
21
22#include <linux/device.h>
23#include <linux/kobject.h>
24#include <linux/string.h>
25#include <linux/debugfs.h>
26#include <linux/module.h>
27#include <linux/init.h>
28#include <linux/crypto.h>
29#include <crypto/hash.h>
30#include <asm/vio.h>
31
32#include "nx_csbcpb.h"
33#include "nx.h"
34
35#ifdef CONFIG_DEBUG_FS
36
37/*
38 * debugfs
39 *
40 * For documentation on these attributes, please see:
41 *
42 * Documentation/ABI/testing/debugfs-pfo-nx-crypto
43 */
44
45int nx_debugfs_init(struct nx_crypto_driver *drv)
46{
47 struct nx_debugfs *dfs = &drv->dfs;
48
49 dfs->dfs_root = debugfs_create_dir(NX_NAME, NULL);
50
51 dfs->dfs_aes_ops =
52 debugfs_create_u32("aes_ops",
53 S_IRUSR | S_IRGRP | S_IROTH,
54 dfs->dfs_root, (u32 *)&drv->stats.aes_ops);
55 dfs->dfs_sha256_ops =
56 debugfs_create_u32("sha256_ops",
57 S_IRUSR | S_IRGRP | S_IROTH,
58 dfs->dfs_root,
59 (u32 *)&drv->stats.sha256_ops);
60 dfs->dfs_sha512_ops =
61 debugfs_create_u32("sha512_ops",
62 S_IRUSR | S_IRGRP | S_IROTH,
63 dfs->dfs_root,
64 (u32 *)&drv->stats.sha512_ops);
65 dfs->dfs_aes_bytes =
66 debugfs_create_u64("aes_bytes",
67 S_IRUSR | S_IRGRP | S_IROTH,
68 dfs->dfs_root,
69 (u64 *)&drv->stats.aes_bytes);
70 dfs->dfs_sha256_bytes =
71 debugfs_create_u64("sha256_bytes",
72 S_IRUSR | S_IRGRP | S_IROTH,
73 dfs->dfs_root,
74 (u64 *)&drv->stats.sha256_bytes);
75 dfs->dfs_sha512_bytes =
76 debugfs_create_u64("sha512_bytes",
77 S_IRUSR | S_IRGRP | S_IROTH,
78 dfs->dfs_root,
79 (u64 *)&drv->stats.sha512_bytes);
80 dfs->dfs_errors =
81 debugfs_create_u32("errors",
82 S_IRUSR | S_IRGRP | S_IROTH,
83 dfs->dfs_root, (u32 *)&drv->stats.errors);
84 dfs->dfs_last_error =
85 debugfs_create_u32("last_error",
86 S_IRUSR | S_IRGRP | S_IROTH,
87 dfs->dfs_root,
88 (u32 *)&drv->stats.last_error);
89 dfs->dfs_last_error_pid =
90 debugfs_create_u32("last_error_pid",
91 S_IRUSR | S_IRGRP | S_IROTH,
92 dfs->dfs_root,
93 (u32 *)&drv->stats.last_error_pid);
94 return 0;
95}
96
97void
98nx_debugfs_fini(struct nx_crypto_driver *drv)
99{
100 debugfs_remove_recursive(drv->dfs.dfs_root);
101}
102
103#endif