aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorMarcelo Cerri <mhcerri@linux.vnet.ibm.com>2013-08-29 10:36:35 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2013-09-02 06:32:54 -0400
commit799804348d11763b84213156318bb92cb955bfb5 (patch)
tree3e73f3e3f4eaef22648b36fa02c7136191db14c1 /drivers/crypto
parent884d981b04f3c00f61f4efaf9a93103e01260685 (diff)
crypto: nx - fix limits to sg lists for AES-GCM
This patch updates the nx-aes-gcm implementation to perform several hyper calls if needed in order to always respect the length limits for scatter/gather lists. Two different limits are considered: - "ibm,max-sg-len": maximum number of bytes of each scatter/gather list. - "ibm,max-sync-cop": - The total number of bytes that a scatter/gather list can hold. - The maximum number of elements that a scatter/gather list can have. Reviewed-by: Joy Latten <jmlatten@linux.vnet.ibm.com> Signed-off-by: Marcelo Cerri <mhcerri@linux.vnet.ibm.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c202
1 files changed, 136 insertions, 66 deletions
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index c2d6f76e3677..9e89bdf34487 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -125,37 +125,101 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx,
125 struct aead_request *req, 125 struct aead_request *req,
126 u8 *out) 126 u8 *out)
127{ 127{
128 int rc;
128 struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; 129 struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
129 int rc = -EINVAL;
130 struct scatter_walk walk; 130 struct scatter_walk walk;
131 struct nx_sg *nx_sg = nx_ctx->in_sg; 131 struct nx_sg *nx_sg = nx_ctx->in_sg;
132 unsigned int nbytes = req->assoclen;
133 unsigned int processed = 0, to_process;
134 u32 max_sg_len;
132 135
133 if (req->assoclen > nx_ctx->ap->databytelen) 136 if (nbytes <= AES_BLOCK_SIZE) {
134 goto out;
135
136 if (req->assoclen <= AES_BLOCK_SIZE) {
137 scatterwalk_start(&walk, req->assoc); 137 scatterwalk_start(&walk, req->assoc);
138 scatterwalk_copychunks(out, &walk, req->assoclen, 138 scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG);
139 SCATTERWALK_FROM_SG);
140 scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0); 139 scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0);
141 140 return 0;
142 rc = 0;
143 goto out;
144 } 141 }
145 142
146 nx_sg = nx_walk_and_build(nx_sg, nx_ctx->ap->sglen, req->assoc, 0, 143 NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION;
147 req->assoclen);
148 nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg) * sizeof(struct nx_sg);
149 144
150 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead, 145 /* page_limit: number of sg entries that fit on one page */
151 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); 146 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
152 if (rc) 147 nx_ctx->ap->sglen);
153 goto out;
154 148
155 atomic_inc(&(nx_ctx->stats->aes_ops)); 149 do {
156 atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); 150 /*
151 * to_process: the data chunk to process in this update.
152 * This value is bound by sg list limits.
153 */
154 to_process = min_t(u64, nbytes - processed,
155 nx_ctx->ap->databytelen);
156 to_process = min_t(u64, to_process,
157 NX_PAGE_SIZE * (max_sg_len - 1));
158
159 if ((to_process + processed) < nbytes)
160 NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE;
161 else
162 NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE;
163
164 nx_sg = nx_walk_and_build(nx_ctx->in_sg, nx_ctx->ap->sglen,
165 req->assoc, processed, to_process);
166 nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg)
167 * sizeof(struct nx_sg);
168
169 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
170 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
171 if (rc)
172 return rc;
173
174 memcpy(csbcpb_aead->cpb.aes_gca.in_pat,
175 csbcpb_aead->cpb.aes_gca.out_pat,
176 AES_BLOCK_SIZE);
177 NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION;
178
179 atomic_inc(&(nx_ctx->stats->aes_ops));
180 atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
181
182 processed += to_process;
183 } while (processed < nbytes);
157 184
158 memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE); 185 memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE);
186
187 return rc;
188}
189
190static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
191 int enc)
192{
193 int rc;
194 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
195 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
196
197 /* For scenarios where the input message is zero length, AES CTR mode
198 * may be used. Set the source data to be a single block (16B) of all
199 * zeros, and set the input IV value to be the same as the GMAC IV
200 * value. - nx_wb 4.8.1.3 */
201 char src[AES_BLOCK_SIZE] = {};
202 struct scatterlist sg;
203
204 desc->tfm = crypto_alloc_blkcipher("ctr(aes)", 0, 0);
205 if (IS_ERR(desc->tfm)) {
206 rc = -ENOMEM;
207 goto out;
208 }
209
210 crypto_blkcipher_setkey(desc->tfm, csbcpb->cpb.aes_gcm.key,
211 NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_128 ? 16 :
212 NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_192 ? 24 : 32);
213
214 sg_init_one(&sg, src, AES_BLOCK_SIZE);
215 if (enc)
216 rc = crypto_blkcipher_encrypt_iv(desc, req->dst, &sg,
217 AES_BLOCK_SIZE);
218 else
219 rc = crypto_blkcipher_decrypt_iv(desc, req->dst, &sg,
220 AES_BLOCK_SIZE);
221 crypto_free_blkcipher(desc->tfm);
222
159out: 223out:
160 return rc; 224 return rc;
161} 225}
@@ -166,79 +230,85 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
166 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 230 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
167 struct blkcipher_desc desc; 231 struct blkcipher_desc desc;
168 unsigned int nbytes = req->cryptlen; 232 unsigned int nbytes = req->cryptlen;
233 unsigned int processed = 0, to_process;
169 unsigned long irq_flags; 234 unsigned long irq_flags;
235 u32 max_sg_len;
170 int rc = -EINVAL; 236 int rc = -EINVAL;
171 237
172 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 238 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
173 239
174 if (nbytes > nx_ctx->ap->databytelen)
175 goto out;
176
177 desc.info = nx_ctx->priv.gcm.iv; 240 desc.info = nx_ctx->priv.gcm.iv;
178 /* initialize the counter */ 241 /* initialize the counter */
179 *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1; 242 *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
180 243
181 /* For scenarios where the input message is zero length, AES CTR mode
182 * may be used. Set the source data to be a single block (16B) of all
183 * zeros, and set the input IV value to be the same as the GMAC IV
184 * value. - nx_wb 4.8.1.3 */
185 if (nbytes == 0) { 244 if (nbytes == 0) {
186 char src[AES_BLOCK_SIZE] = {}; 245 rc = gcm_empty(req, &desc, enc);
187 struct scatterlist sg;
188
189 desc.tfm = crypto_alloc_blkcipher("ctr(aes)", 0, 0);
190 if (IS_ERR(desc.tfm)) {
191 rc = -ENOMEM;
192 goto out;
193 }
194
195 crypto_blkcipher_setkey(desc.tfm, csbcpb->cpb.aes_gcm.key,
196 NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_128 ? 16 :
197 NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_192 ? 24 : 32);
198
199 sg_init_one(&sg, src, AES_BLOCK_SIZE);
200 if (enc)
201 crypto_blkcipher_encrypt_iv(&desc, req->dst, &sg,
202 AES_BLOCK_SIZE);
203 else
204 crypto_blkcipher_decrypt_iv(&desc, req->dst, &sg,
205 AES_BLOCK_SIZE);
206 crypto_free_blkcipher(desc.tfm);
207
208 rc = 0;
209 goto out; 246 goto out;
210 } 247 }
211 248
212 desc.tfm = (struct crypto_blkcipher *)req->base.tfm; 249 /* Process associated data */
213
214 csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8; 250 csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8;
215
216 if (req->assoclen) { 251 if (req->assoclen) {
217 rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad); 252 rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad);
218 if (rc) 253 if (rc)
219 goto out; 254 goto out;
220 } 255 }
221 256
222 if (enc) 257 /* Set flags for encryption */
258 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
259 if (enc) {
223 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; 260 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
224 else 261 } else {
262 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
225 nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req)); 263 nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
264 }
226 265
227 csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; 266 /* page_limit: number of sg entries that fit on one page */
267 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
268 nx_ctx->ap->sglen);
269
270 do {
271 /*
272 * to_process: the data chunk to process in this update.
273 * This value is bound by sg list limits.
274 */
275 to_process = min_t(u64, nbytes - processed,
276 nx_ctx->ap->databytelen);
277 to_process = min_t(u64, to_process,
278 NX_PAGE_SIZE * (max_sg_len - 1));
279
280 if ((to_process + processed) < nbytes)
281 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
282 else
283 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
228 284
229 rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, req->src, nbytes, 0, 285 csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
230 csbcpb->cpb.aes_gcm.iv_or_cnt); 286 desc.tfm = (struct crypto_blkcipher *) req->base.tfm;
231 if (rc) 287 rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
232 goto out; 288 req->src, to_process, processed,
289 csbcpb->cpb.aes_gcm.iv_or_cnt);
290 if (rc)
291 goto out;
233 292
234 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 293 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
235 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); 294 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
236 if (rc) 295 if (rc)
237 goto out; 296 goto out;
297
298 memcpy(desc.info, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE);
299 memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
300 csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
301 memcpy(csbcpb->cpb.aes_gcm.in_s0,
302 csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
303
304 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
305
306 atomic_inc(&(nx_ctx->stats->aes_ops));
307 atomic64_add(csbcpb->csb.processed_byte_count,
308 &(nx_ctx->stats->aes_bytes));
238 309
239 atomic_inc(&(nx_ctx->stats->aes_ops)); 310 processed += to_process;
240 atomic64_add(csbcpb->csb.processed_byte_count, 311 } while (processed < nbytes);
241 &(nx_ctx->stats->aes_bytes));
242 312
243 if (enc) { 313 if (enc) {
244 /* copy out the auth tag */ 314 /* copy out the auth tag */