aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/nx/nx-aes-gcm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/nx/nx-aes-gcm.c')
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c66
1 files changed, 37 insertions, 29 deletions
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index 025d9a8d5b19..88c562434bc0 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -131,7 +131,7 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx,
131 struct nx_sg *nx_sg = nx_ctx->in_sg; 131 struct nx_sg *nx_sg = nx_ctx->in_sg;
132 unsigned int nbytes = req->assoclen; 132 unsigned int nbytes = req->assoclen;
133 unsigned int processed = 0, to_process; 133 unsigned int processed = 0, to_process;
134 u32 max_sg_len; 134 unsigned int max_sg_len;
135 135
136 if (nbytes <= AES_BLOCK_SIZE) { 136 if (nbytes <= AES_BLOCK_SIZE) {
137 scatterwalk_start(&walk, req->assoc); 137 scatterwalk_start(&walk, req->assoc);
@@ -143,8 +143,10 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx,
143 NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION; 143 NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION;
144 144
145 /* page_limit: number of sg entries that fit on one page */ 145 /* page_limit: number of sg entries that fit on one page */
146 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), 146 max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
147 nx_ctx->ap->sglen); 147 nx_ctx->ap->sglen);
148 max_sg_len = min_t(u64, max_sg_len,
149 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
148 150
149 do { 151 do {
150 /* 152 /*
@@ -156,13 +158,14 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx,
156 to_process = min_t(u64, to_process, 158 to_process = min_t(u64, to_process,
157 NX_PAGE_SIZE * (max_sg_len - 1)); 159 NX_PAGE_SIZE * (max_sg_len - 1));
158 160
161 nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
162 req->assoc, processed, &to_process);
163
159 if ((to_process + processed) < nbytes) 164 if ((to_process + processed) < nbytes)
160 NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE; 165 NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE;
161 else 166 else
162 NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE; 167 NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE;
163 168
164 nx_sg = nx_walk_and_build(nx_ctx->in_sg, nx_ctx->ap->sglen,
165 req->assoc, processed, to_process);
166 nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg) 169 nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg)
167 * sizeof(struct nx_sg); 170 * sizeof(struct nx_sg);
168 171
@@ -195,7 +198,7 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
195 struct nx_sg *nx_sg; 198 struct nx_sg *nx_sg;
196 unsigned int nbytes = req->assoclen; 199 unsigned int nbytes = req->assoclen;
197 unsigned int processed = 0, to_process; 200 unsigned int processed = 0, to_process;
198 u32 max_sg_len; 201 unsigned int max_sg_len;
199 202
200 /* Set GMAC mode */ 203 /* Set GMAC mode */
201 csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC; 204 csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC;
@@ -203,8 +206,10 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
203 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; 206 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
204 207
205 /* page_limit: number of sg entries that fit on one page */ 208 /* page_limit: number of sg entries that fit on one page */
206 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), 209 max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
207 nx_ctx->ap->sglen); 210 nx_ctx->ap->sglen);
211 max_sg_len = min_t(u64, max_sg_len,
212 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
208 213
209 /* Copy IV */ 214 /* Copy IV */
210 memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE); 215 memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE);
@@ -219,13 +224,14 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
219 to_process = min_t(u64, to_process, 224 to_process = min_t(u64, to_process,
220 NX_PAGE_SIZE * (max_sg_len - 1)); 225 NX_PAGE_SIZE * (max_sg_len - 1));
221 226
227 nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
228 req->assoc, processed, &to_process);
229
222 if ((to_process + processed) < nbytes) 230 if ((to_process + processed) < nbytes)
223 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 231 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
224 else 232 else
225 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; 233 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
226 234
227 nx_sg = nx_walk_and_build(nx_ctx->in_sg, nx_ctx->ap->sglen,
228 req->assoc, processed, to_process);
229 nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg) 235 nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg)
230 * sizeof(struct nx_sg); 236 * sizeof(struct nx_sg);
231 237
@@ -264,6 +270,7 @@ static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
264 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 270 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
265 char out[AES_BLOCK_SIZE]; 271 char out[AES_BLOCK_SIZE];
266 struct nx_sg *in_sg, *out_sg; 272 struct nx_sg *in_sg, *out_sg;
273 int len;
267 274
268 /* For scenarios where the input message is zero length, AES CTR mode 275 /* For scenarios where the input message is zero length, AES CTR mode
269 * may be used. Set the source data to be a single block (16B) of all 276 * may be used. Set the source data to be a single block (16B) of all
@@ -279,11 +286,22 @@ static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
279 else 286 else
280 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; 287 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
281 288
289 len = AES_BLOCK_SIZE;
290
282 /* Encrypt the counter/IV */ 291 /* Encrypt the counter/IV */
283 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info, 292 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info,
284 AES_BLOCK_SIZE, nx_ctx->ap->sglen); 293 &len, nx_ctx->ap->sglen);
285 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, sizeof(out), 294
295 if (len != AES_BLOCK_SIZE)
296 return -EINVAL;
297
298 len = sizeof(out);
299 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, &len,
286 nx_ctx->ap->sglen); 300 nx_ctx->ap->sglen);
301
302 if (len != sizeof(out))
303 return -EINVAL;
304
287 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 305 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
288 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 306 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
289 307
@@ -317,7 +335,6 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
317 unsigned int nbytes = req->cryptlen; 335 unsigned int nbytes = req->cryptlen;
318 unsigned int processed = 0, to_process; 336 unsigned int processed = 0, to_process;
319 unsigned long irq_flags; 337 unsigned long irq_flags;
320 u32 max_sg_len;
321 int rc = -EINVAL; 338 int rc = -EINVAL;
322 339
323 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 340 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
@@ -354,33 +371,24 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
354 nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req)); 371 nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
355 } 372 }
356 373
357 /* page_limit: number of sg entries that fit on one page */
358 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
359 nx_ctx->ap->sglen);
360
361 do { 374 do {
362 /* 375 to_process = nbytes - processed;
363 * to_process: the data chunk to process in this update.
364 * This value is bound by sg list limits.
365 */
366 to_process = min_t(u64, nbytes - processed,
367 nx_ctx->ap->databytelen);
368 to_process = min_t(u64, to_process,
369 NX_PAGE_SIZE * (max_sg_len - 1));
370
371 if ((to_process + processed) < nbytes)
372 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
373 else
374 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
375 376
376 csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; 377 csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
377 desc.tfm = (struct crypto_blkcipher *) req->base.tfm; 378 desc.tfm = (struct crypto_blkcipher *) req->base.tfm;
378 rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, 379 rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
379 req->src, to_process, processed, 380 req->src, &to_process, processed,
380 csbcpb->cpb.aes_gcm.iv_or_cnt); 381 csbcpb->cpb.aes_gcm.iv_or_cnt);
382
381 if (rc) 383 if (rc)
382 goto out; 384 goto out;
383 385
386 if ((to_process + processed) < nbytes)
387 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
388 else
389 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
390
391
384 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 392 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
385 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); 393 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
386 if (rc) 394 if (rc)