aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--crypto/af_alg.c6
-rw-r--r--crypto/algif_aead.c16
-rw-r--r--crypto/algif_skcipher.c16
-rw-r--r--crypto/mcryptd.c23
-rw-r--r--crypto/skcipher.c10
-rw-r--r--include/crypto/mcryptd.h1
6 files changed, 37 insertions, 35 deletions
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 415a54ced4d6..444a387df219 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -1138,12 +1138,6 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
1138 if (!af_alg_readable(sk)) 1138 if (!af_alg_readable(sk))
1139 break; 1139 break;
1140 1140
1141 if (!ctx->used) {
1142 err = af_alg_wait_for_data(sk, flags);
1143 if (err)
1144 return err;
1145 }
1146
1147 seglen = min_t(size_t, (maxsize - len), 1141 seglen = min_t(size_t, (maxsize - len),
1148 msg_data_left(msg)); 1142 msg_data_left(msg));
1149 1143
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 48b34e9c6834..ddcc45f77edd 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -111,6 +111,12 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
111 size_t usedpages = 0; /* [in] RX bufs to be used from user */ 111 size_t usedpages = 0; /* [in] RX bufs to be used from user */
112 size_t processed = 0; /* [in] TX bufs to be consumed */ 112 size_t processed = 0; /* [in] TX bufs to be consumed */
113 113
114 if (!ctx->used) {
115 err = af_alg_wait_for_data(sk, flags);
116 if (err)
117 return err;
118 }
119
114 /* 120 /*
115 * Data length provided by caller via sendmsg/sendpage that has not 121 * Data length provided by caller via sendmsg/sendpage that has not
116 * yet been processed. 122 * yet been processed.
@@ -285,6 +291,10 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
285 /* AIO operation */ 291 /* AIO operation */
286 sock_hold(sk); 292 sock_hold(sk);
287 areq->iocb = msg->msg_iocb; 293 areq->iocb = msg->msg_iocb;
294
295 /* Remember output size that will be generated. */
296 areq->outlen = outlen;
297
288 aead_request_set_callback(&areq->cra_u.aead_req, 298 aead_request_set_callback(&areq->cra_u.aead_req,
289 CRYPTO_TFM_REQ_MAY_BACKLOG, 299 CRYPTO_TFM_REQ_MAY_BACKLOG,
290 af_alg_async_cb, areq); 300 af_alg_async_cb, areq);
@@ -292,12 +302,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
292 crypto_aead_decrypt(&areq->cra_u.aead_req); 302 crypto_aead_decrypt(&areq->cra_u.aead_req);
293 303
294 /* AIO operation in progress */ 304 /* AIO operation in progress */
295 if (err == -EINPROGRESS || err == -EBUSY) { 305 if (err == -EINPROGRESS || err == -EBUSY)
296 /* Remember output size that will be generated. */
297 areq->outlen = outlen;
298
299 return -EIOCBQUEUED; 306 return -EIOCBQUEUED;
300 }
301 307
302 sock_put(sk); 308 sock_put(sk);
303 } else { 309 } else {
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 30cff827dd8f..baef9bfccdda 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -72,6 +72,12 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
72 int err = 0; 72 int err = 0;
73 size_t len = 0; 73 size_t len = 0;
74 74
75 if (!ctx->used) {
76 err = af_alg_wait_for_data(sk, flags);
77 if (err)
78 return err;
79 }
80
75 /* Allocate cipher request for current operation. */ 81 /* Allocate cipher request for current operation. */
76 areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) + 82 areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
77 crypto_skcipher_reqsize(tfm)); 83 crypto_skcipher_reqsize(tfm));
@@ -119,6 +125,10 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
119 /* AIO operation */ 125 /* AIO operation */
120 sock_hold(sk); 126 sock_hold(sk);
121 areq->iocb = msg->msg_iocb; 127 areq->iocb = msg->msg_iocb;
128
129 /* Remember output size that will be generated. */
130 areq->outlen = len;
131
122 skcipher_request_set_callback(&areq->cra_u.skcipher_req, 132 skcipher_request_set_callback(&areq->cra_u.skcipher_req,
123 CRYPTO_TFM_REQ_MAY_SLEEP, 133 CRYPTO_TFM_REQ_MAY_SLEEP,
124 af_alg_async_cb, areq); 134 af_alg_async_cb, areq);
@@ -127,12 +137,8 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
127 crypto_skcipher_decrypt(&areq->cra_u.skcipher_req); 137 crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
128 138
129 /* AIO operation in progress */ 139 /* AIO operation in progress */
130 if (err == -EINPROGRESS || err == -EBUSY) { 140 if (err == -EINPROGRESS || err == -EBUSY)
131 /* Remember output size that will be generated. */
132 areq->outlen = len;
133
134 return -EIOCBQUEUED; 141 return -EIOCBQUEUED;
135 }
136 142
137 sock_put(sk); 143 sock_put(sk);
138 } else { 144 } else {
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
index 4e6472658852..eca04d3729b3 100644
--- a/crypto/mcryptd.c
+++ b/crypto/mcryptd.c
@@ -81,6 +81,7 @@ static int mcryptd_init_queue(struct mcryptd_queue *queue,
81 pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue); 81 pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
82 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); 82 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
83 INIT_WORK(&cpu_queue->work, mcryptd_queue_worker); 83 INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
84 spin_lock_init(&cpu_queue->q_lock);
84 } 85 }
85 return 0; 86 return 0;
86} 87}
@@ -104,15 +105,16 @@ static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
104 int cpu, err; 105 int cpu, err;
105 struct mcryptd_cpu_queue *cpu_queue; 106 struct mcryptd_cpu_queue *cpu_queue;
106 107
107 cpu = get_cpu(); 108 cpu_queue = raw_cpu_ptr(queue->cpu_queue);
108 cpu_queue = this_cpu_ptr(queue->cpu_queue); 109 spin_lock(&cpu_queue->q_lock);
109 rctx->tag.cpu = cpu; 110 cpu = smp_processor_id();
111 rctx->tag.cpu = smp_processor_id();
110 112
111 err = crypto_enqueue_request(&cpu_queue->queue, request); 113 err = crypto_enqueue_request(&cpu_queue->queue, request);
112 pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n", 114 pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
113 cpu, cpu_queue, request); 115 cpu, cpu_queue, request);
116 spin_unlock(&cpu_queue->q_lock);
114 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); 117 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
115 put_cpu();
116 118
117 return err; 119 return err;
118} 120}
@@ -161,16 +163,11 @@ static void mcryptd_queue_worker(struct work_struct *work)
161 cpu_queue = container_of(work, struct mcryptd_cpu_queue, work); 163 cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
162 i = 0; 164 i = 0;
163 while (i < MCRYPTD_BATCH || single_task_running()) { 165 while (i < MCRYPTD_BATCH || single_task_running()) {
164 /* 166
165 * preempt_disable/enable is used to prevent 167 spin_lock_bh(&cpu_queue->q_lock);
166 * being preempted by mcryptd_enqueue_request()
167 */
168 local_bh_disable();
169 preempt_disable();
170 backlog = crypto_get_backlog(&cpu_queue->queue); 168 backlog = crypto_get_backlog(&cpu_queue->queue);
171 req = crypto_dequeue_request(&cpu_queue->queue); 169 req = crypto_dequeue_request(&cpu_queue->queue);
172 preempt_enable(); 170 spin_unlock_bh(&cpu_queue->q_lock);
173 local_bh_enable();
174 171
175 if (!req) { 172 if (!req) {
176 mcryptd_opportunistic_flush(); 173 mcryptd_opportunistic_flush();
@@ -185,7 +182,7 @@ static void mcryptd_queue_worker(struct work_struct *work)
185 ++i; 182 ++i;
186 } 183 }
187 if (cpu_queue->queue.qlen) 184 if (cpu_queue->queue.qlen)
188 queue_work(kcrypto_wq, &cpu_queue->work); 185 queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work);
189} 186}
190 187
191void mcryptd_flusher(struct work_struct *__work) 188void mcryptd_flusher(struct work_struct *__work)
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 778e0ff42bfa..11af5fd6a443 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -449,6 +449,8 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
449 449
450 walk->total = req->cryptlen; 450 walk->total = req->cryptlen;
451 walk->nbytes = 0; 451 walk->nbytes = 0;
452 walk->iv = req->iv;
453 walk->oiv = req->iv;
452 454
453 if (unlikely(!walk->total)) 455 if (unlikely(!walk->total))
454 return 0; 456 return 0;
@@ -456,9 +458,6 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
456 scatterwalk_start(&walk->in, req->src); 458 scatterwalk_start(&walk->in, req->src);
457 scatterwalk_start(&walk->out, req->dst); 459 scatterwalk_start(&walk->out, req->dst);
458 460
459 walk->iv = req->iv;
460 walk->oiv = req->iv;
461
462 walk->flags &= ~SKCIPHER_WALK_SLEEP; 461 walk->flags &= ~SKCIPHER_WALK_SLEEP;
463 walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? 462 walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
464 SKCIPHER_WALK_SLEEP : 0; 463 SKCIPHER_WALK_SLEEP : 0;
@@ -510,6 +509,8 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
510 int err; 509 int err;
511 510
512 walk->nbytes = 0; 511 walk->nbytes = 0;
512 walk->iv = req->iv;
513 walk->oiv = req->iv;
513 514
514 if (unlikely(!walk->total)) 515 if (unlikely(!walk->total))
515 return 0; 516 return 0;
@@ -525,9 +526,6 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
525 scatterwalk_done(&walk->in, 0, walk->total); 526 scatterwalk_done(&walk->in, 0, walk->total);
526 scatterwalk_done(&walk->out, 0, walk->total); 527 scatterwalk_done(&walk->out, 0, walk->total);
527 528
528 walk->iv = req->iv;
529 walk->oiv = req->iv;
530
531 if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) 529 if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
532 walk->flags |= SKCIPHER_WALK_SLEEP; 530 walk->flags |= SKCIPHER_WALK_SLEEP;
533 else 531 else
diff --git a/include/crypto/mcryptd.h b/include/crypto/mcryptd.h
index cceafa01f907..b67404fc4b34 100644
--- a/include/crypto/mcryptd.h
+++ b/include/crypto/mcryptd.h
@@ -27,6 +27,7 @@ static inline struct mcryptd_ahash *__mcryptd_ahash_cast(
27 27
28struct mcryptd_cpu_queue { 28struct mcryptd_cpu_queue {
29 struct crypto_queue queue; 29 struct crypto_queue queue;
30 spinlock_t q_lock;
30 struct work_struct work; 31 struct work_struct work;
31}; 32};
32 33