aboutsummaryrefslogtreecommitdiffstats
path: root/crypto/chainiv.c
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2007-12-14 09:28:14 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2008-01-10 16:16:55 -0500
commite7cd2514ea506f06bd4f7b13a9b62afd60f9c73b (patch)
tree66c46a54a2841091165a0b8083329c41f3504847 /crypto/chainiv.c
parent4726204200327c04a77b819e2c653c063f1bc6ab (diff)
[CRYPTO] chainiv: Avoid lock spinning where possible
This patch makes chainiv avoid spinning by postponing requests on lock contention if the user allows the use of asynchronous algorithms. If a synchronous algorithm is requested then we behave as before. This should improve IPsec performance on SMP when two CPUs attempt to transmit over the same SA. Currently one of them will spin doing nothing waiting for the other CPU to finish its encryption. This patch makes it postpone the request and get on with other work. If only one CPU is transmitting for a given SA, then we will process the request synchronously as before. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto/chainiv.c')
-rw-r--r--crypto/chainiv.c208
1 files changed, 200 insertions, 8 deletions
diff --git a/crypto/chainiv.c b/crypto/chainiv.c
index 38868d160b47..d17fa0454dc3 100644
--- a/crypto/chainiv.c
+++ b/crypto/chainiv.c
@@ -16,16 +16,34 @@
16#include <crypto/internal/skcipher.h> 16#include <crypto/internal/skcipher.h>
17#include <linux/err.h> 17#include <linux/err.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/kernel.h>
19#include <linux/module.h> 20#include <linux/module.h>
20#include <linux/random.h> 21#include <linux/random.h>
21#include <linux/spinlock.h> 22#include <linux/spinlock.h>
22#include <linux/string.h> 23#include <linux/string.h>
24#include <linux/workqueue.h>
25
26enum {
27 CHAINIV_STATE_INUSE = 0,
28};
23 29
24struct chainiv_ctx { 30struct chainiv_ctx {
25 spinlock_t lock; 31 spinlock_t lock;
26 char iv[]; 32 char iv[];
27}; 33};
28 34
35struct async_chainiv_ctx {
36 unsigned long state;
37
38 spinlock_t lock;
39 int err;
40
41 struct crypto_queue queue;
42 struct work_struct postponed;
43
44 char iv[];
45};
46
29static int chainiv_givencrypt(struct skcipher_givcrypt_request *req) 47static int chainiv_givencrypt(struct skcipher_givcrypt_request *req)
30{ 48{
31 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); 49 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
@@ -80,26 +98,187 @@ unlock:
80 return chainiv_givencrypt(req); 98 return chainiv_givencrypt(req);
81} 99}
82 100
101static int chainiv_init_common(struct crypto_tfm *tfm)
102{
103 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request);
104
105 return skcipher_geniv_init(tfm);
106}
107
83static int chainiv_init(struct crypto_tfm *tfm) 108static int chainiv_init(struct crypto_tfm *tfm)
84{ 109{
85 struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm); 110 struct chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
86 struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
87 111
88 spin_lock_init(&ctx->lock); 112 spin_lock_init(&ctx->lock);
89 113
90 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request); 114 return chainiv_init_common(tfm);
115}
91 116
92 return skcipher_geniv_init(tfm); 117static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)
118{
119 int queued;
120
121 if (!ctx->queue.qlen) {
122 smp_mb__before_clear_bit();
123 clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
124
125 if (!ctx->queue.qlen ||
126 test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
127 goto out;
128 }
129
130 queued = schedule_work(&ctx->postponed);
131 BUG_ON(!queued);
132
133out:
134 return ctx->err;
135}
136
137static int async_chainiv_postpone_request(struct skcipher_givcrypt_request *req)
138{
139 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
140 struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
141 int err;
142
143 spin_lock_bh(&ctx->lock);
144 err = skcipher_enqueue_givcrypt(&ctx->queue, req);
145 spin_unlock_bh(&ctx->lock);
146
147 if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
148 return err;
149
150 ctx->err = err;
151 return async_chainiv_schedule_work(ctx);
152}
153
154static int async_chainiv_givencrypt_tail(struct skcipher_givcrypt_request *req)
155{
156 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
157 struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
158 struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
159 unsigned int ivsize = crypto_ablkcipher_ivsize(geniv);
160
161 memcpy(req->giv, ctx->iv, ivsize);
162 memcpy(subreq->info, ctx->iv, ivsize);
163
164 ctx->err = crypto_ablkcipher_encrypt(subreq);
165 if (ctx->err)
166 goto out;
167
168 memcpy(ctx->iv, subreq->info, ivsize);
169
170out:
171 return async_chainiv_schedule_work(ctx);
172}
173
174static int async_chainiv_givencrypt(struct skcipher_givcrypt_request *req)
175{
176 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
177 struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
178 struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
179
180 ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
181 ablkcipher_request_set_callback(subreq, req->creq.base.flags,
182 req->creq.base.complete,
183 req->creq.base.data);
184 ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
185 req->creq.nbytes, req->creq.info);
186
187 if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
188 goto postpone;
189
190 if (ctx->queue.qlen) {
191 clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
192 goto postpone;
193 }
194
195 return async_chainiv_givencrypt_tail(req);
196
197postpone:
198 return async_chainiv_postpone_request(req);
199}
200
201static int async_chainiv_givencrypt_first(struct skcipher_givcrypt_request *req)
202{
203 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
204 struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
205
206 if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
207 goto out;
208
209 if (crypto_ablkcipher_crt(geniv)->givencrypt !=
210 async_chainiv_givencrypt_first)
211 goto unlock;
212
213 crypto_ablkcipher_crt(geniv)->givencrypt = async_chainiv_givencrypt;
214 get_random_bytes(ctx->iv, crypto_ablkcipher_ivsize(geniv));
215
216unlock:
217 clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
218
219out:
220 return async_chainiv_givencrypt(req);
221}
222
223static void async_chainiv_do_postponed(struct work_struct *work)
224{
225 struct async_chainiv_ctx *ctx = container_of(work,
226 struct async_chainiv_ctx,
227 postponed);
228 struct skcipher_givcrypt_request *req;
229 struct ablkcipher_request *subreq;
230
231 /* Only handle one request at a time to avoid hogging keventd. */
232 spin_lock_bh(&ctx->lock);
233 req = skcipher_dequeue_givcrypt(&ctx->queue);
234 spin_unlock_bh(&ctx->lock);
235
236 if (!req) {
237 async_chainiv_schedule_work(ctx);
238 return;
239 }
240
241 subreq = skcipher_givcrypt_reqctx(req);
242 subreq->base.flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
243
244 async_chainiv_givencrypt_tail(req);
245}
246
247static int async_chainiv_init(struct crypto_tfm *tfm)
248{
249 struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
250
251 spin_lock_init(&ctx->lock);
252
253 crypto_init_queue(&ctx->queue, 100);
254 INIT_WORK(&ctx->postponed, async_chainiv_do_postponed);
255
256 return chainiv_init_common(tfm);
257}
258
259static void async_chainiv_exit(struct crypto_tfm *tfm)
260{
261 struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
262
263 BUG_ON(test_bit(CHAINIV_STATE_INUSE, &ctx->state) || ctx->queue.qlen);
264
265 skcipher_geniv_exit(tfm);
93} 266}
94 267
95static struct crypto_template chainiv_tmpl; 268static struct crypto_template chainiv_tmpl;
96 269
97static struct crypto_instance *chainiv_alloc(struct rtattr **tb) 270static struct crypto_instance *chainiv_alloc(struct rtattr **tb)
98{ 271{
272 struct crypto_attr_type *algt;
99 struct crypto_instance *inst; 273 struct crypto_instance *inst;
274 int err;
100 275
101 inst = skcipher_geniv_alloc(&chainiv_tmpl, tb, 0, 276 algt = crypto_get_attr_type(tb);
102 CRYPTO_ALG_ASYNC); 277 err = PTR_ERR(algt);
278 if (IS_ERR(algt))
279 return ERR_PTR(err);
280
281 inst = skcipher_geniv_alloc(&chainiv_tmpl, tb, 0, 0);
103 if (IS_ERR(inst)) 282 if (IS_ERR(inst))
104 goto out; 283 goto out;
105 284
@@ -108,8 +287,21 @@ static struct crypto_instance *chainiv_alloc(struct rtattr **tb)
108 inst->alg.cra_init = chainiv_init; 287 inst->alg.cra_init = chainiv_init;
109 inst->alg.cra_exit = skcipher_geniv_exit; 288 inst->alg.cra_exit = skcipher_geniv_exit;
110 289
111 inst->alg.cra_ctxsize = sizeof(struct chainiv_ctx) + 290 inst->alg.cra_ctxsize = sizeof(struct chainiv_ctx);
112 inst->alg.cra_ablkcipher.ivsize; 291
292 if (!crypto_requires_sync(algt->type, algt->mask)) {
293 inst->alg.cra_flags |= CRYPTO_ALG_ASYNC;
294
295 inst->alg.cra_ablkcipher.givencrypt =
296 async_chainiv_givencrypt_first;
297
298 inst->alg.cra_init = async_chainiv_init;
299 inst->alg.cra_exit = async_chainiv_exit;
300
301 inst->alg.cra_ctxsize = sizeof(struct async_chainiv_ctx);
302 }
303
304 inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
113 305
114out: 306out:
115 return inst; 307 return inst;