aboutsummaryrefslogtreecommitdiffstats
path: root/crypto/tcrypt.c
diff options
context:
space:
mode:
authorGilad Ben-Yossef <gilad@benyossef.com>2017-12-17 03:29:05 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2017-12-28 01:56:42 -0500
commit427988d981c4c20a7f0421873351ccd14a88e2b2 (patch)
tree40345c1003120f7acc4a6e667dca412c577df0f7 /crypto/tcrypt.c
parente161c5930c150abab95d2ccad428d68ce1780ea1 (diff)
crypto: tcrypt - add multibuf aead speed test
The performance of some aead tfm providers is affected by the amount of parallelism possible with the processing. Introduce an async aead concurrent multiple buffer processing speed test to be able to test performance of such tfm providers. Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto/tcrypt.c')
-rw-r--r--crypto/tcrypt.c437
1 files changed, 378 insertions, 59 deletions
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index d617c1956533..58e3344d7169 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -80,6 +80,66 @@ static char *check[] = {
80 NULL 80 NULL
81}; 81};
82 82
83static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 };
84static u32 aead_sizes[] = { 16, 64, 256, 512, 1024, 2048, 4096, 8192, 0 };
85
86#define XBUFSIZE 8
87#define MAX_IVLEN 32
88
89static int testmgr_alloc_buf(char *buf[XBUFSIZE])
90{
91 int i;
92
93 for (i = 0; i < XBUFSIZE; i++) {
94 buf[i] = (void *)__get_free_page(GFP_KERNEL);
95 if (!buf[i])
96 goto err_free_buf;
97 }
98
99 return 0;
100
101err_free_buf:
102 while (i-- > 0)
103 free_page((unsigned long)buf[i]);
104
105 return -ENOMEM;
106}
107
108static void testmgr_free_buf(char *buf[XBUFSIZE])
109{
110 int i;
111
112 for (i = 0; i < XBUFSIZE; i++)
113 free_page((unsigned long)buf[i]);
114}
115
116static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE],
117 unsigned int buflen, const void *assoc,
118 unsigned int aad_size)
119{
120 int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE;
121 int k, rem;
122
123 if (np > XBUFSIZE) {
124 rem = PAGE_SIZE;
125 np = XBUFSIZE;
126 } else {
127 rem = buflen % PAGE_SIZE;
128 }
129
130 sg_init_table(sg, np + 1);
131
132 sg_set_buf(&sg[0], assoc, aad_size);
133
134 if (rem)
135 np--;
136 for (k = 0; k < np; k++)
137 sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE);
138
139 if (rem)
140 sg_set_buf(&sg[k + 1], xbuf[k], rem);
141}
142
83static inline int do_one_aead_op(struct aead_request *req, int ret) 143static inline int do_one_aead_op(struct aead_request *req, int ret)
84{ 144{
85 struct crypto_wait *wait = req->base.data; 145 struct crypto_wait *wait = req->base.data;
@@ -87,8 +147,44 @@ static inline int do_one_aead_op(struct aead_request *req, int ret)
87 return crypto_wait_req(ret, wait); 147 return crypto_wait_req(ret, wait);
88} 148}
89 149
90static int test_aead_jiffies(struct aead_request *req, int enc, 150struct test_mb_aead_data {
91 int blen, int secs) 151 struct scatterlist sg[XBUFSIZE];
152 struct scatterlist sgout[XBUFSIZE];
153 struct aead_request *req;
154 struct crypto_wait wait;
155 char *xbuf[XBUFSIZE];
156 char *xoutbuf[XBUFSIZE];
157 char *axbuf[XBUFSIZE];
158};
159
160static int do_mult_aead_op(struct test_mb_aead_data *data, int enc,
161 u32 num_mb)
162{
163 int i, rc[num_mb], err = 0;
164
165 /* Fire up a bunch of concurrent requests */
166 for (i = 0; i < num_mb; i++) {
167 if (enc == ENCRYPT)
168 rc[i] = crypto_aead_encrypt(data[i].req);
169 else
170 rc[i] = crypto_aead_decrypt(data[i].req);
171 }
172
173 /* Wait for all requests to finish */
174 for (i = 0; i < num_mb; i++) {
175 rc[i] = crypto_wait_req(rc[i], &data[i].wait);
176
177 if (rc[i]) {
178 pr_info("concurrent request %d error %d\n", i, rc[i]);
179 err = rc[i];
180 }
181 }
182
183 return err;
184}
185
186static int test_mb_aead_jiffies(struct test_mb_aead_data *data, int enc,
187 int blen, int secs, u32 num_mb)
92{ 188{
93 unsigned long start, end; 189 unsigned long start, end;
94 int bcount; 190 int bcount;
@@ -96,21 +192,18 @@ static int test_aead_jiffies(struct aead_request *req, int enc,
96 192
97 for (start = jiffies, end = start + secs * HZ, bcount = 0; 193 for (start = jiffies, end = start + secs * HZ, bcount = 0;
98 time_before(jiffies, end); bcount++) { 194 time_before(jiffies, end); bcount++) {
99 if (enc) 195 ret = do_mult_aead_op(data, enc, num_mb);
100 ret = do_one_aead_op(req, crypto_aead_encrypt(req));
101 else
102 ret = do_one_aead_op(req, crypto_aead_decrypt(req));
103
104 if (ret) 196 if (ret)
105 return ret; 197 return ret;
106 } 198 }
107 199
108 printk("%d operations in %d seconds (%ld bytes)\n", 200 pr_cont("%d operations in %d seconds (%ld bytes)\n",
109 bcount, secs, (long)bcount * blen); 201 bcount * num_mb, secs, (long)bcount * blen * num_mb);
110 return 0; 202 return 0;
111} 203}
112 204
113static int test_aead_cycles(struct aead_request *req, int enc, int blen) 205static int test_mb_aead_cycles(struct test_mb_aead_data *data, int enc,
206 int blen, u32 num_mb)
114{ 207{
115 unsigned long cycles = 0; 208 unsigned long cycles = 0;
116 int ret = 0; 209 int ret = 0;
@@ -118,11 +211,7 @@ static int test_aead_cycles(struct aead_request *req, int enc, int blen)
118 211
119 /* Warm-up run. */ 212 /* Warm-up run. */
120 for (i = 0; i < 4; i++) { 213 for (i = 0; i < 4; i++) {
121 if (enc) 214 ret = do_mult_aead_op(data, enc, num_mb);
122 ret = do_one_aead_op(req, crypto_aead_encrypt(req));
123 else
124 ret = do_one_aead_op(req, crypto_aead_decrypt(req));
125
126 if (ret) 215 if (ret)
127 goto out; 216 goto out;
128 } 217 }
@@ -132,10 +221,7 @@ static int test_aead_cycles(struct aead_request *req, int enc, int blen)
132 cycles_t start, end; 221 cycles_t start, end;
133 222
134 start = get_cycles(); 223 start = get_cycles();
135 if (enc) 224 ret = do_mult_aead_op(data, enc, num_mb);
136 ret = do_one_aead_op(req, crypto_aead_encrypt(req));
137 else
138 ret = do_one_aead_op(req, crypto_aead_decrypt(req));
139 end = get_cycles(); 225 end = get_cycles();
140 226
141 if (ret) 227 if (ret)
@@ -146,70 +232,276 @@ static int test_aead_cycles(struct aead_request *req, int enc, int blen)
146 232
147out: 233out:
148 if (ret == 0) 234 if (ret == 0)
149 printk("1 operation in %lu cycles (%d bytes)\n", 235 pr_cont("1 operation in %lu cycles (%d bytes)\n",
150 (cycles + 4) / 8, blen); 236 (cycles + 4) / (8 * num_mb), blen);
151 237
152 return ret; 238 return ret;
153} 239}
154 240
155static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 }; 241static void test_mb_aead_speed(const char *algo, int enc, int secs,
156static u32 aead_sizes[] = { 16, 64, 256, 512, 1024, 2048, 4096, 8192, 0 }; 242 struct aead_speed_template *template,
243 unsigned int tcount, u8 authsize,
244 unsigned int aad_size, u8 *keysize, u32 num_mb)
245{
246 struct test_mb_aead_data *data;
247 struct crypto_aead *tfm;
248 unsigned int i, j, iv_len;
249 const char *key;
250 const char *e;
251 void *assoc;
252 u32 *b_size;
253 char *iv;
254 int ret;
157 255
158#define XBUFSIZE 8
159#define MAX_IVLEN 32
160 256
161static int testmgr_alloc_buf(char *buf[XBUFSIZE]) 257 if (aad_size >= PAGE_SIZE) {
162{ 258 pr_err("associate data length (%u) too big\n", aad_size);
163 int i; 259 return;
260 }
164 261
165 for (i = 0; i < XBUFSIZE; i++) { 262 iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
166 buf[i] = (void *)__get_free_page(GFP_KERNEL); 263 if (!iv)
167 if (!buf[i]) 264 return;
168 goto err_free_buf; 265
266 if (enc == ENCRYPT)
267 e = "encryption";
268 else
269 e = "decryption";
270
271 data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL);
272 if (!data)
273 goto out_free_iv;
274
275 tfm = crypto_alloc_aead(algo, 0, 0);
276 if (IS_ERR(tfm)) {
277 pr_err("failed to load transform for %s: %ld\n",
278 algo, PTR_ERR(tfm));
279 goto out_free_data;
169 } 280 }
170 281
171 return 0; 282 ret = crypto_aead_setauthsize(tfm, authsize);
172 283
173err_free_buf: 284 for (i = 0; i < num_mb; ++i)
174 while (i-- > 0) 285 if (testmgr_alloc_buf(data[i].xbuf)) {
175 free_page((unsigned long)buf[i]); 286 while (i--)
287 testmgr_free_buf(data[i].xbuf);
288 goto out_free_tfm;
289 }
176 290
177 return -ENOMEM; 291 for (i = 0; i < num_mb; ++i)
292 if (testmgr_alloc_buf(data[i].axbuf)) {
293 while (i--)
294 testmgr_free_buf(data[i].axbuf);
295 goto out_free_xbuf;
296 }
297
298 for (i = 0; i < num_mb; ++i)
299 if (testmgr_alloc_buf(data[i].xoutbuf)) {
300 while (i--)
301 testmgr_free_buf(data[i].axbuf);
302 goto out_free_axbuf;
303 }
304
305 for (i = 0; i < num_mb; ++i) {
306 data[i].req = aead_request_alloc(tfm, GFP_KERNEL);
307 if (!data[i].req) {
308 pr_err("alg: skcipher: Failed to allocate request for %s\n",
309 algo);
310 while (i--)
311 aead_request_free(data[i].req);
312 goto out_free_xoutbuf;
313 }
314 }
315
316 for (i = 0; i < num_mb; ++i) {
317 crypto_init_wait(&data[i].wait);
318 aead_request_set_callback(data[i].req,
319 CRYPTO_TFM_REQ_MAY_BACKLOG,
320 crypto_req_done, &data[i].wait);
321 }
322
323 pr_info("\ntesting speed of multibuffer %s (%s) %s\n", algo,
324 get_driver_name(crypto_aead, tfm), e);
325
326 i = 0;
327 do {
328 b_size = aead_sizes;
329 do {
330 if (*b_size + authsize > XBUFSIZE * PAGE_SIZE) {
331 pr_err("template (%u) too big for bufufer (%lu)\n",
332 authsize + *b_size,
333 XBUFSIZE * PAGE_SIZE);
334 goto out;
335 }
336
337 pr_info("test %u (%d bit key, %d byte blocks): ", i,
338 *keysize * 8, *b_size);
339
340 /* Set up tfm global state, i.e. the key */
341
342 memset(tvmem[0], 0xff, PAGE_SIZE);
343 key = tvmem[0];
344 for (j = 0; j < tcount; j++) {
345 if (template[j].klen == *keysize) {
346 key = template[j].key;
347 break;
348 }
349 }
350
351 crypto_aead_clear_flags(tfm, ~0);
352
353 ret = crypto_aead_setkey(tfm, key, *keysize);
354 if (ret) {
355 pr_err("setkey() failed flags=%x\n",
356 crypto_aead_get_flags(tfm));
357 goto out;
358 }
359
360 iv_len = crypto_aead_ivsize(tfm);
361 if (iv_len)
362 memset(iv, 0xff, iv_len);
363
364 /* Now setup per request stuff, i.e. buffers */
365
366 for (j = 0; j < num_mb; ++j) {
367 struct test_mb_aead_data *cur = &data[j];
368
369 assoc = cur->axbuf[0];
370 memset(assoc, 0xff, aad_size);
371
372 sg_init_aead(cur->sg, cur->xbuf,
373 *b_size + (enc ? 0 : authsize),
374 assoc, aad_size);
375
376 sg_init_aead(cur->sgout, cur->xoutbuf,
377 *b_size + (enc ? authsize : 0),
378 assoc, aad_size);
379
380 aead_request_set_ad(cur->req, aad_size);
381
382 if (!enc) {
383
384 aead_request_set_crypt(cur->req,
385 cur->sgout,
386 cur->sg,
387 *b_size, iv);
388 ret = crypto_aead_encrypt(cur->req);
389 ret = do_one_aead_op(cur->req, ret);
390
391 if (ret) {
392 pr_err("calculating auth failed failed (%d)\n",
393 ret);
394 break;
395 }
396 }
397
398 aead_request_set_crypt(cur->req, cur->sg,
399 cur->sgout, *b_size +
400 (enc ? 0 : authsize),
401 iv);
402
403 }
404
405 if (secs)
406 ret = test_mb_aead_jiffies(data, enc, *b_size,
407 secs, num_mb);
408 else
409 ret = test_mb_aead_cycles(data, enc, *b_size,
410 num_mb);
411
412 if (ret) {
413 pr_err("%s() failed return code=%d\n", e, ret);
414 break;
415 }
416 b_size++;
417 i++;
418 } while (*b_size);
419 keysize++;
420 } while (*keysize);
421
422out:
423 for (i = 0; i < num_mb; ++i)
424 aead_request_free(data[i].req);
425out_free_xoutbuf:
426 for (i = 0; i < num_mb; ++i)
427 testmgr_free_buf(data[i].xoutbuf);
428out_free_axbuf:
429 for (i = 0; i < num_mb; ++i)
430 testmgr_free_buf(data[i].axbuf);
431out_free_xbuf:
432 for (i = 0; i < num_mb; ++i)
433 testmgr_free_buf(data[i].xbuf);
434out_free_tfm:
435 crypto_free_aead(tfm);
436out_free_data:
437 kfree(data);
438out_free_iv:
439 kfree(iv);
178} 440}
179 441
180static void testmgr_free_buf(char *buf[XBUFSIZE]) 442static int test_aead_jiffies(struct aead_request *req, int enc,
443 int blen, int secs)
181{ 444{
182 int i; 445 unsigned long start, end;
446 int bcount;
447 int ret;
183 448
184 for (i = 0; i < XBUFSIZE; i++) 449 for (start = jiffies, end = start + secs * HZ, bcount = 0;
185 free_page((unsigned long)buf[i]); 450 time_before(jiffies, end); bcount++) {
451 if (enc)
452 ret = do_one_aead_op(req, crypto_aead_encrypt(req));
453 else
454 ret = do_one_aead_op(req, crypto_aead_decrypt(req));
455
456 if (ret)
457 return ret;
458 }
459
460 printk("%d operations in %d seconds (%ld bytes)\n",
461 bcount, secs, (long)bcount * blen);
462 return 0;
186} 463}
187 464
188static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE], 465static int test_aead_cycles(struct aead_request *req, int enc, int blen)
189 unsigned int buflen, const void *assoc,
190 unsigned int aad_size)
191{ 466{
192 int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE; 467 unsigned long cycles = 0;
193 int k, rem; 468 int ret = 0;
469 int i;
194 470
195 if (np > XBUFSIZE) { 471 /* Warm-up run. */
196 rem = PAGE_SIZE; 472 for (i = 0; i < 4; i++) {
197 np = XBUFSIZE; 473 if (enc)
198 } else { 474 ret = do_one_aead_op(req, crypto_aead_encrypt(req));
199 rem = buflen % PAGE_SIZE; 475 else
476 ret = do_one_aead_op(req, crypto_aead_decrypt(req));
477
478 if (ret)
479 goto out;
200 } 480 }
201 481
202 sg_init_table(sg, np + 1); 482 /* The real thing. */
483 for (i = 0; i < 8; i++) {
484 cycles_t start, end;
203 485
204 sg_set_buf(&sg[0], assoc, aad_size); 486 start = get_cycles();
487 if (enc)
488 ret = do_one_aead_op(req, crypto_aead_encrypt(req));
489 else
490 ret = do_one_aead_op(req, crypto_aead_decrypt(req));
491 end = get_cycles();
205 492
206 if (rem) 493 if (ret)
207 np--; 494 goto out;
208 for (k = 0; k < np; k++)
209 sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE);
210 495
211 if (rem) 496 cycles += end - start;
212 sg_set_buf(&sg[k + 1], xbuf[k], rem); 497 }
498
499out:
500 if (ret == 0)
501 printk("1 operation in %lu cycles (%d bytes)\n",
502 (cycles + 4) / 8, blen);
503
504 return ret;
213} 505}
214 506
215static void test_aead_speed(const char *algo, int enc, unsigned int secs, 507static void test_aead_speed(const char *algo, int enc, unsigned int secs,
@@ -1912,6 +2204,33 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
1912 speed_template_32); 2204 speed_template_32);
1913 break; 2205 break;
1914 2206
2207 case 215:
2208 test_mb_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec, NULL,
2209 0, 16, 16, aead_speed_template_20, num_mb);
2210 test_mb_aead_speed("gcm(aes)", ENCRYPT, sec, NULL, 0, 16, 8,
2211 speed_template_16_24_32, num_mb);
2212 test_mb_aead_speed("rfc4106(gcm(aes))", DECRYPT, sec, NULL,
2213 0, 16, 16, aead_speed_template_20, num_mb);
2214 test_mb_aead_speed("gcm(aes)", DECRYPT, sec, NULL, 0, 16, 8,
2215 speed_template_16_24_32, num_mb);
2216 break;
2217
2218 case 216:
2219 test_mb_aead_speed("rfc4309(ccm(aes))", ENCRYPT, sec, NULL, 0,
2220 16, 16, aead_speed_template_19, num_mb);
2221 test_mb_aead_speed("rfc4309(ccm(aes))", DECRYPT, sec, NULL, 0,
2222 16, 16, aead_speed_template_19, num_mb);
2223 break;
2224
2225 case 217:
2226 test_mb_aead_speed("rfc7539esp(chacha20,poly1305)", ENCRYPT,
2227 sec, NULL, 0, 16, 8, aead_speed_template_36,
2228 num_mb);
2229 test_mb_aead_speed("rfc7539esp(chacha20,poly1305)", DECRYPT,
2230 sec, NULL, 0, 16, 8, aead_speed_template_36,
2231 num_mb);
2232 break;
2233
1915 case 300: 2234 case 300:
1916 if (alg) { 2235 if (alg) {
1917 test_hash_speed(alg, sec, generic_hash_speed_template); 2236 test_hash_speed(alg, sec, generic_hash_speed_template);