aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/mv_cesa.c
diff options
context:
space:
mode:
authorUri Simchoni <uri@jdland.co.il>2010-04-08 12:27:33 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2010-04-13 04:50:47 -0400
commit3b61a90502481045f56c1c41a2af35ee48ca8b80 (patch)
treebeb3cdd6aff9ffcfd8c6336bb94da55d5c05aade /drivers/crypto/mv_cesa.c
parent15d4dd3594221f11a7730fcf2d5f9942b96cdd7e (diff)
crypto: mv_cesa - Enqueue generic async requests
Enqueue generic async requests rather than ablkcipher requests in the driver's queue Signed-off-by: Uri Simchoni <uri@jdland.co.il> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/mv_cesa.c')
-rw-r--r--drivers/crypto/mv_cesa.c43
1 files changed, 24 insertions, 19 deletions
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 096f9ff6578a..8891e2e703e3 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -39,6 +39,7 @@ enum engine_status {
39 * @sg_src_left: bytes left in src to process (scatter list) 39 * @sg_src_left: bytes left in src to process (scatter list)
40 * @src_start: offset to add to src start position (scatter list) 40 * @src_start: offset to add to src start position (scatter list)
41 * @crypt_len: length of current crypt process 41 * @crypt_len: length of current crypt process
42 * @hw_nbytes: total bytes to process in hw for this request
42 * @sg_dst_left: bytes left dst to process in this scatter list 43 * @sg_dst_left: bytes left dst to process in this scatter list
43 * @dst_start: offset to add to dst start position (scatter list) 44 * @dst_start: offset to add to dst start position (scatter list)
44 * @total_req_bytes: total number of bytes processed (request). 45 * @total_req_bytes: total number of bytes processed (request).
@@ -55,6 +56,7 @@ struct req_progress {
55 int sg_src_left; 56 int sg_src_left;
56 int src_start; 57 int src_start;
57 int crypt_len; 58 int crypt_len;
59 int hw_nbytes;
58 /* dst mostly */ 60 /* dst mostly */
59 int sg_dst_left; 61 int sg_dst_left;
60 int dst_start; 62 int dst_start;
@@ -71,7 +73,7 @@ struct crypto_priv {
71 spinlock_t lock; 73 spinlock_t lock;
72 struct crypto_queue queue; 74 struct crypto_queue queue;
73 enum engine_status eng_st; 75 enum engine_status eng_st;
74 struct ablkcipher_request *cur_req; 76 struct crypto_async_request *cur_req;
75 struct req_progress p; 77 struct req_progress p;
76 int max_req_size; 78 int max_req_size;
77 int sram_size; 79 int sram_size;
@@ -175,18 +177,18 @@ static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
175 } 177 }
176} 178}
177 179
178static void setup_data_in(struct ablkcipher_request *req) 180static void setup_data_in(void)
179{ 181{
180 struct req_progress *p = &cpg->p; 182 struct req_progress *p = &cpg->p;
181 p->crypt_len = 183 p->crypt_len =
182 min((int)req->nbytes - p->total_req_bytes, cpg->max_req_size); 184 min(p->hw_nbytes - p->total_req_bytes, cpg->max_req_size);
183 copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START, 185 copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START,
184 p->crypt_len); 186 p->crypt_len);
185} 187}
186 188
187static void mv_process_current_q(int first_block) 189static void mv_process_current_q(int first_block)
188{ 190{
189 struct ablkcipher_request *req = cpg->cur_req; 191 struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
190 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 192 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
191 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); 193 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
192 struct sec_accel_config op; 194 struct sec_accel_config op;
@@ -229,7 +231,7 @@ static void mv_process_current_q(int first_block)
229 ENC_P_DST(SRAM_DATA_OUT_START); 231 ENC_P_DST(SRAM_DATA_OUT_START);
230 op.enc_key_p = SRAM_DATA_KEY_P; 232 op.enc_key_p = SRAM_DATA_KEY_P;
231 233
232 setup_data_in(req); 234 setup_data_in();
233 op.enc_len = cpg->p.crypt_len; 235 op.enc_len = cpg->p.crypt_len;
234 memcpy(cpg->sram + SRAM_CONFIG, &op, 236 memcpy(cpg->sram + SRAM_CONFIG, &op,
235 sizeof(struct sec_accel_config)); 237 sizeof(struct sec_accel_config));
@@ -246,7 +248,7 @@ static void mv_process_current_q(int first_block)
246 248
247static void mv_crypto_algo_completion(void) 249static void mv_crypto_algo_completion(void)
248{ 250{
249 struct ablkcipher_request *req = cpg->cur_req; 251 struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
250 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); 252 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
251 253
252 if (req_ctx->op != COP_AES_CBC) 254 if (req_ctx->op != COP_AES_CBC)
@@ -257,7 +259,7 @@ static void mv_crypto_algo_completion(void)
257 259
258static void dequeue_complete_req(void) 260static void dequeue_complete_req(void)
259{ 261{
260 struct ablkcipher_request *req = cpg->cur_req; 262 struct crypto_async_request *req = cpg->cur_req;
261 void *buf; 263 void *buf;
262 int ret; 264 int ret;
263 int need_copy_len = cpg->p.crypt_len; 265 int need_copy_len = cpg->p.crypt_len;
@@ -289,7 +291,7 @@ static void dequeue_complete_req(void)
289 } while (need_copy_len > 0); 291 } while (need_copy_len > 0);
290 292
291 BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE); 293 BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
292 if (cpg->p.total_req_bytes < req->nbytes) { 294 if (cpg->p.total_req_bytes < cpg->p.hw_nbytes) {
293 /* process next scatter list entry */ 295 /* process next scatter list entry */
294 cpg->eng_st = ENGINE_BUSY; 296 cpg->eng_st = ENGINE_BUSY;
295 mv_process_current_q(0); 297 mv_process_current_q(0);
@@ -299,7 +301,7 @@ static void dequeue_complete_req(void)
299 mv_crypto_algo_completion(); 301 mv_crypto_algo_completion();
300 cpg->eng_st = ENGINE_IDLE; 302 cpg->eng_st = ENGINE_IDLE;
301 local_bh_disable(); 303 local_bh_disable();
302 req->base.complete(&req->base, 0); 304 req->complete(req, 0);
303 local_bh_enable(); 305 local_bh_enable();
304 } 306 }
305} 307}
@@ -323,16 +325,19 @@ static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
323 325
324static void mv_enqueue_new_req(struct ablkcipher_request *req) 326static void mv_enqueue_new_req(struct ablkcipher_request *req)
325{ 327{
328 struct req_progress *p = &cpg->p;
326 int num_sgs; 329 int num_sgs;
327 330
328 cpg->cur_req = req; 331 cpg->cur_req = &req->base;
329 memset(&cpg->p, 0, sizeof(struct req_progress)); 332 memset(p, 0, sizeof(struct req_progress));
333 p->hw_nbytes = req->nbytes;
330 334
331 num_sgs = count_sgs(req->src, req->nbytes); 335 num_sgs = count_sgs(req->src, req->nbytes);
332 sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); 336 sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
333 337
334 num_sgs = count_sgs(req->dst, req->nbytes); 338 num_sgs = count_sgs(req->dst, req->nbytes);
335 sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG); 339 sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
340
336 mv_process_current_q(1); 341 mv_process_current_q(1);
337} 342}
338 343
@@ -378,13 +383,13 @@ static int queue_manag(void *data)
378 return 0; 383 return 0;
379} 384}
380 385
381static int mv_handle_req(struct ablkcipher_request *req) 386static int mv_handle_req(struct crypto_async_request *req)
382{ 387{
383 unsigned long flags; 388 unsigned long flags;
384 int ret; 389 int ret;
385 390
386 spin_lock_irqsave(&cpg->lock, flags); 391 spin_lock_irqsave(&cpg->lock, flags);
387 ret = ablkcipher_enqueue_request(&cpg->queue, req); 392 ret = crypto_enqueue_request(&cpg->queue, req);
388 spin_unlock_irqrestore(&cpg->lock, flags); 393 spin_unlock_irqrestore(&cpg->lock, flags);
389 wake_up_process(cpg->queue_th); 394 wake_up_process(cpg->queue_th);
390 return ret; 395 return ret;
@@ -397,7 +402,7 @@ static int mv_enc_aes_ecb(struct ablkcipher_request *req)
397 req_ctx->op = COP_AES_ECB; 402 req_ctx->op = COP_AES_ECB;
398 req_ctx->decrypt = 0; 403 req_ctx->decrypt = 0;
399 404
400 return mv_handle_req(req); 405 return mv_handle_req(&req->base);
401} 406}
402 407
403static int mv_dec_aes_ecb(struct ablkcipher_request *req) 408static int mv_dec_aes_ecb(struct ablkcipher_request *req)
@@ -409,7 +414,7 @@ static int mv_dec_aes_ecb(struct ablkcipher_request *req)
409 req_ctx->decrypt = 1; 414 req_ctx->decrypt = 1;
410 415
411 compute_aes_dec_key(ctx); 416 compute_aes_dec_key(ctx);
412 return mv_handle_req(req); 417 return mv_handle_req(&req->base);
413} 418}
414 419
415static int mv_enc_aes_cbc(struct ablkcipher_request *req) 420static int mv_enc_aes_cbc(struct ablkcipher_request *req)
@@ -419,7 +424,7 @@ static int mv_enc_aes_cbc(struct ablkcipher_request *req)
419 req_ctx->op = COP_AES_CBC; 424 req_ctx->op = COP_AES_CBC;
420 req_ctx->decrypt = 0; 425 req_ctx->decrypt = 0;
421 426
422 return mv_handle_req(req); 427 return mv_handle_req(&req->base);
423} 428}
424 429
425static int mv_dec_aes_cbc(struct ablkcipher_request *req) 430static int mv_dec_aes_cbc(struct ablkcipher_request *req)
@@ -431,7 +436,7 @@ static int mv_dec_aes_cbc(struct ablkcipher_request *req)
431 req_ctx->decrypt = 1; 436 req_ctx->decrypt = 1;
432 437
433 compute_aes_dec_key(ctx); 438 compute_aes_dec_key(ctx);
434 return mv_handle_req(req); 439 return mv_handle_req(&req->base);
435} 440}
436 441
437static int mv_cra_init(struct crypto_tfm *tfm) 442static int mv_cra_init(struct crypto_tfm *tfm)