diff options
author | Kent Yoder <key@linux.vnet.ibm.com> | 2012-05-14 07:06:09 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2012-05-16 01:05:43 -0400 |
commit | 6148c1ad6dc1ef1ff83fba9779c968d4a25645f0 (patch) | |
tree | c544424751f3d1306bf383a718d3ecab6d781092 /drivers/crypto | |
parent | f2a15f1d5d36004236dab8184593aa8eef3949ae (diff) |
powerpc/crypto: AES-XCBC mode routines for nx encryption
These routines add support for AES in XCBC mode on the Power7+ CPU's
in-Nest accelerator driver.
Signed-off-by: Kent Yoder <key@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/nx/nx-aes-xcbc.c | 236 |
1 files changed, 236 insertions, 0 deletions
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c new file mode 100644 index 000000000000..93923e4628c0 --- /dev/null +++ b/drivers/crypto/nx/nx-aes-xcbc.c | |||
@@ -0,0 +1,236 @@ | |||
1 | /** | ||
2 | * AES XCBC routines supporting the Power 7+ Nest Accelerators driver | ||
3 | * | ||
4 | * Copyright (C) 2011-2012 International Business Machines Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; version 2 only. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
18 | * | ||
19 | * Author: Kent Yoder <yoder1@us.ibm.com> | ||
20 | */ | ||
21 | |||
22 | #include <crypto/internal/hash.h> | ||
23 | #include <crypto/aes.h> | ||
24 | #include <crypto/algapi.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/types.h> | ||
27 | #include <linux/crypto.h> | ||
28 | #include <asm/vio.h> | ||
29 | |||
30 | #include "nx_csbcpb.h" | ||
31 | #include "nx.h" | ||
32 | |||
33 | |||
34 | struct xcbc_state { | ||
35 | u8 state[AES_BLOCK_SIZE]; | ||
36 | unsigned int count; | ||
37 | u8 buffer[AES_BLOCK_SIZE]; | ||
38 | }; | ||
39 | |||
40 | static int nx_xcbc_set_key(struct crypto_shash *desc, | ||
41 | const u8 *in_key, | ||
42 | unsigned int key_len) | ||
43 | { | ||
44 | struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc); | ||
45 | |||
46 | switch (key_len) { | ||
47 | case AES_KEYSIZE_128: | ||
48 | nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; | ||
49 | break; | ||
50 | default: | ||
51 | return -EINVAL; | ||
52 | } | ||
53 | |||
54 | memcpy(nx_ctx->priv.xcbc.key, in_key, key_len); | ||
55 | |||
56 | return 0; | ||
57 | } | ||
58 | |||
59 | static int nx_xcbc_init(struct shash_desc *desc) | ||
60 | { | ||
61 | struct xcbc_state *sctx = shash_desc_ctx(desc); | ||
62 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); | ||
63 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; | ||
64 | struct nx_sg *out_sg; | ||
65 | |||
66 | nx_ctx_init(nx_ctx, HCOP_FC_AES); | ||
67 | |||
68 | memset(sctx, 0, sizeof *sctx); | ||
69 | |||
70 | NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); | ||
71 | csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC; | ||
72 | |||
73 | memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE); | ||
74 | memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key); | ||
75 | |||
76 | out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, | ||
77 | AES_BLOCK_SIZE, nx_ctx->ap->sglen); | ||
78 | nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); | ||
79 | |||
80 | return 0; | ||
81 | } | ||
82 | |||
83 | static int nx_xcbc_update(struct shash_desc *desc, | ||
84 | const u8 *data, | ||
85 | unsigned int len) | ||
86 | { | ||
87 | struct xcbc_state *sctx = shash_desc_ctx(desc); | ||
88 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); | ||
89 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; | ||
90 | struct nx_sg *in_sg; | ||
91 | u32 to_process, leftover; | ||
92 | int rc = 0; | ||
93 | |||
94 | if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { | ||
95 | /* we've hit the nx chip previously and we're updating again, | ||
96 | * so copy over the partial digest */ | ||
97 | memcpy(csbcpb->cpb.aes_xcbc.cv, | ||
98 | csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); | ||
99 | } | ||
100 | |||
101 | /* 2 cases for total data len: | ||
102 | * 1: <= AES_BLOCK_SIZE: copy into state, return 0 | ||
103 | * 2: > AES_BLOCK_SIZE: process X blocks, copy in leftover | ||
104 | */ | ||
105 | if (len + sctx->count <= AES_BLOCK_SIZE) { | ||
106 | memcpy(sctx->buffer + sctx->count, data, len); | ||
107 | sctx->count += len; | ||
108 | goto out; | ||
109 | } | ||
110 | |||
111 | /* to_process: the AES_BLOCK_SIZE data chunk to process in this | ||
112 | * update */ | ||
113 | to_process = (sctx->count + len) & ~(AES_BLOCK_SIZE - 1); | ||
114 | leftover = (sctx->count + len) & (AES_BLOCK_SIZE - 1); | ||
115 | |||
116 | /* the hardware will not accept a 0 byte operation for this algorithm | ||
117 | * and the operation MUST be finalized to be correct. So if we happen | ||
118 | * to get an update that falls on a block sized boundary, we must | ||
119 | * save off the last block to finalize with later. */ | ||
120 | if (!leftover) { | ||
121 | to_process -= AES_BLOCK_SIZE; | ||
122 | leftover = AES_BLOCK_SIZE; | ||
123 | } | ||
124 | |||
125 | if (sctx->count) { | ||
126 | in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buffer, | ||
127 | sctx->count, nx_ctx->ap->sglen); | ||
128 | in_sg = nx_build_sg_list(in_sg, (u8 *)data, | ||
129 | to_process - sctx->count, | ||
130 | nx_ctx->ap->sglen); | ||
131 | nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * | ||
132 | sizeof(struct nx_sg); | ||
133 | } else { | ||
134 | in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data, to_process, | ||
135 | nx_ctx->ap->sglen); | ||
136 | nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * | ||
137 | sizeof(struct nx_sg); | ||
138 | } | ||
139 | |||
140 | NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; | ||
141 | |||
142 | if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { | ||
143 | rc = -EINVAL; | ||
144 | goto out; | ||
145 | } | ||
146 | |||
147 | rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, | ||
148 | desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); | ||
149 | if (rc) | ||
150 | goto out; | ||
151 | |||
152 | atomic_inc(&(nx_ctx->stats->aes_ops)); | ||
153 | |||
154 | /* copy the leftover back into the state struct */ | ||
155 | memcpy(sctx->buffer, data + len - leftover, leftover); | ||
156 | sctx->count = leftover; | ||
157 | |||
158 | /* everything after the first update is continuation */ | ||
159 | NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; | ||
160 | out: | ||
161 | return rc; | ||
162 | } | ||
163 | |||
164 | static int nx_xcbc_final(struct shash_desc *desc, u8 *out) | ||
165 | { | ||
166 | struct xcbc_state *sctx = shash_desc_ctx(desc); | ||
167 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); | ||
168 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; | ||
169 | struct nx_sg *in_sg, *out_sg; | ||
170 | int rc = 0; | ||
171 | |||
172 | if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { | ||
173 | /* we've hit the nx chip previously, now we're finalizing, | ||
174 | * so copy over the partial digest */ | ||
175 | memcpy(csbcpb->cpb.aes_xcbc.cv, | ||
176 | csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); | ||
177 | } else if (sctx->count == 0) { | ||
178 | /* we've never seen an update, so this is a 0 byte op. The | ||
179 | * hardware cannot handle a 0 byte op, so just copy out the | ||
180 | * known 0 byte result. This is cheaper than allocating a | ||
181 | * software context to do a 0 byte op */ | ||
182 | u8 data[] = { 0x75, 0xf0, 0x25, 0x1d, 0x52, 0x8a, 0xc0, 0x1c, | ||
183 | 0x45, 0x73, 0xdf, 0xd5, 0x84, 0xd7, 0x9f, 0x29 }; | ||
184 | memcpy(out, data, sizeof(data)); | ||
185 | goto out; | ||
186 | } | ||
187 | |||
188 | /* final is represented by continuing the operation and indicating that | ||
189 | * this is not an intermediate operation */ | ||
190 | NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; | ||
191 | |||
192 | in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer, | ||
193 | sctx->count, nx_ctx->ap->sglen); | ||
194 | out_sg = nx_build_sg_list(nx_ctx->out_sg, out, AES_BLOCK_SIZE, | ||
195 | nx_ctx->ap->sglen); | ||
196 | |||
197 | nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); | ||
198 | nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); | ||
199 | |||
200 | if (!nx_ctx->op.outlen) { | ||
201 | rc = -EINVAL; | ||
202 | goto out; | ||
203 | } | ||
204 | |||
205 | rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, | ||
206 | desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); | ||
207 | if (rc) | ||
208 | goto out; | ||
209 | |||
210 | atomic_inc(&(nx_ctx->stats->aes_ops)); | ||
211 | |||
212 | memcpy(out, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); | ||
213 | out: | ||
214 | return rc; | ||
215 | } | ||
216 | |||
217 | struct shash_alg nx_shash_aes_xcbc_alg = { | ||
218 | .digestsize = AES_BLOCK_SIZE, | ||
219 | .init = nx_xcbc_init, | ||
220 | .update = nx_xcbc_update, | ||
221 | .final = nx_xcbc_final, | ||
222 | .setkey = nx_xcbc_set_key, | ||
223 | .descsize = sizeof(struct xcbc_state), | ||
224 | .statesize = sizeof(struct xcbc_state), | ||
225 | .base = { | ||
226 | .cra_name = "xcbc(aes)", | ||
227 | .cra_driver_name = "xcbc-aes-nx", | ||
228 | .cra_priority = 300, | ||
229 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, | ||
230 | .cra_blocksize = AES_BLOCK_SIZE, | ||
231 | .cra_module = THIS_MODULE, | ||
232 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), | ||
233 | .cra_init = nx_crypto_ctx_aes_xcbc_init, | ||
234 | .cra_exit = nx_crypto_ctx_exit, | ||
235 | } | ||
236 | }; | ||