aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/inet_hashtables.c
blob: 2023d37b2708ea12ea5e5a4cb6ae010697b8d386 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		Generic INET transport hashtables
 *
 * Authors:	Lotsa people, from code originally in tcp
 *
 *	This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

#include <linux/module.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/wait.h>

#include <net/inet_connection_sock.h>
#include <net/inet_hashtables.h>
#include <net/ip.h>

/*
 * Allocate and initialize a new local port bind bucket.
 * The bindhash mutex for snum's hash chain must be held here.
 */
struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
						 struct net *net,
						 struct inet_bind_hashbucket *head,
						 const unsigned short snum)
{
	struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);

	if (tb != NULL) {
		tb->ib_net       = hold_net(net);
		tb->port      = snum;
		tb->fastreuse = 0;
		INIT_HLIST_HEAD(&tb->owners);
		hlist_add_head(&tb->node, &head->chain);
	}
	return tb;
}

/*
 * Caller must hold hashbucket lock for this tb with local BH disabled
 */
void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb)
{
	if (hlist_empty(&tb->owners)) {
		__hlist_del(&tb->node);
		release_net(tb->ib_net);
		kmem_cache_free(cachep, tb);
	}
}

void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
		    const unsigned short snum)
{
	inet_sk(sk)->num = snum;
	sk_add_bind_node(sk, &tb->owners);
	inet_csk(sk)->icsk_bind_hash = tb;
}

/*
 * Get rid of any references to a local port held by the given sock.
 */
static void __inet_put_port(struct sock *sk)
{
	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
	const int bhash = inet_bhashfn(inet_sk(sk)->num, hashinfo->bhash_size);
	struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
	struct inet_bind_bucket *tb;

	spin_lock(&head->lock);
	tb = inet_csk(sk)->icsk_bind_hash;
	__sk_del_bind_node(sk);
	inet_csk(sk)->icsk_bind_hash = NULL;
	inet_sk(sk)->num = 0;
	inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
	spin_unlock(&head->lock);
}

void inet_put_port(struct sock *sk)
{
	local_bh_disable();
	__inet_put_port(sk);
	local_bh_enable();
}

EXPORT_SYMBOL(inet_put_port);

void __inet_inherit_port(struct sock *sk, struct sock *child)
{
	struct inet_hashinfo *table = sk->sk_prot->h.hashinfo;
	const int bhash = inet_bhashfn(inet_sk(child)->num, table->bhash_size);
	struct inet_bind_hashbucket *head = &table->bhash[bhash];
	struct inet_bind_bucket *tb;

	spin_lock(&head->lock);
	tb = inet_csk(sk)->icsk_bind_hash;
	sk_add_bind_node(child, &tb->owners);
	inet_csk(child)->icsk_bind_hash = tb;
	spin_unlock(&head->lock);
}

EXPORT_SYMBOL_GPL(__inet_inherit_port);

/*
 * This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP.
 * Look, when several writers sleep and reader wakes them up, all but one
 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
 * this, _but_ remember, it adds useless work on UP machines (wake up each
 * exclusive lock release). It should be ifdefed really.
 */
void inet_listen_wlock(struct inet_hashinfo *hashinfo)
	__acquires(hashinfo->lhash_lock)
{
	write_lock(&hashinfo->lhash_lock);

	if (atomic_read(&hashinfo->lhash_users)) {
		DEFINE_WAIT(wait);

		for (;;) {
			prepare_to_wait_exclusive(&hashinfo->lhash_wait,
						  &wait, TASK_UNINTERRUPTIBLE);
			if (!atomic_read(&hashinfo->lhash_users))
				break;
			write_unlock_bh(&hashinfo->lhash_lock);
			schedule();
			write_lock_bh(&hashinfo->lhash_lock);
		}

		finish_wait(&hashinfo->lhash_wait, &wait);
	}
}

/*
 * Don't inline this cruft. Here are some nice properties to exploit here. The
 * BSD API does not allow a listening sock to specify the remote port nor the
 * remote address for the connection. So always assume those are both
 * wildcarded during the search since they can never be otherwise.
 */
static struct sock *inet_lookup_listener_slow(struct net *net,
					      const struct hlist_head *head,
					      const __be32 daddr,
					      const unsigned short hnum,
					      const int dif)
{
	struct sock *result = NULL, *sk;
	const struct hlist_node *node;
	int hiscore = -1;

	sk_for_each(sk, node, head) {
		const struct inet_sock *inet = inet_sk(sk);

		if (net_eq(sock_net(sk), net) && inet->num == hnum &&
				!ipv6_only_sock(sk)) {
			const __be32 rcv_saddr = inet->rcv_saddr;
			int score = sk->sk_family == PF_INET ? 1 : 0;

			if (rcv_saddr) {
				if (rcv_saddr != daddr)
					continue;
				score += 2;
			}
			if (sk->sk_bound_dev_if) {
				if (sk->sk_bound_dev_if != dif)
					continue;
				score += 2;
			}
			if (score == 5)
				return sk;
			if (score > hiscore) {
				hiscore	= score;
				result	= sk;
			}
		}
	}
	return result;
}

/* Optimize the common listener case. */
struct sock *__inet_lookup_listener(struct net *net,
				    struct inet_hashinfo *hashinfo,
				    const __be32 daddr, const unsigned short hnum,
				    const int dif)
{
	struct sock *sk = NULL;
	const struct hlist_head *head;

	read_lock(&hashinfo->lhash_lock);
	head = &hashinfo->listening_hash[inet_lhashfn(hnum)];
	if (!hlist_empty(head)) {
		const struct inet_sock *inet = inet_sk((sk = __sk_head(head)));

		if (inet->num == hnum && !sk->sk_node.next &&
		    (!inet->rcv_saddr || inet->rcv_saddr == daddr) &&
		    (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
		    !sk->sk_bound_dev_if && net_eq(sock_net(sk), net))
			goto sherry_cache;
		sk = inet_lookup_listener_slow(net, head, daddr, hnum, dif);
	}
	if (sk) {
sherry_cache:
		sock_hold(sk);
	}
	read_unlock(&hashinfo->lhash_lock);
	return sk;
}
EXPORT_SYMBOL_GPL(__inet_lookup_listener);

struct sock * __inet_lookup_established(struct net *net,
				  struct inet_hashinfo *hashinfo,
				  const __be32 saddr, const __be16 sport,
				  const __be32 daddr, const u16 hnum,
				  const int dif)
{
	INET_ADDR_COOKIE(acookie, saddr, daddr)
	const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
	struct sock *sk;
	const struct hlist_node *node;
	/* Optimize here for direct hit, only listening connections can
	 * have wildcards anyways.
	 */
	unsigned int hash = inet_ehashfn(daddr, hnum, saddr, sport);
	struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash);
	rwlock_t *lock = inet_ehash_lockp(hashinfo, hash);

	prefetch(head->chain.first);
	read_lock(lock);
	sk_for_each(sk, node, &head->chain) {
		if (INET_MATCH(sk, net, hash, acookie,
					saddr, daddr, ports, dif))
			goto hit; /* You sunk my battleship! */
	}

	/* Must check for a TIME_WAIT'er before going to listener hash. */
	sk_for_each(sk, node, &head->twchain) {
		if (INET_TW_MATCH(sk, net, hash, acookie,
					saddr, daddr, ports, dif))
			goto hit;
	}
	sk = NULL;
out:
	read_unlock(lock);
	return sk;
hit:
	sock_hold(sk);
	goto out;
}
EXPORT_SYMBOL_GPL(__inet_lookup_established);

/* called with local bh disabled */
static int __inet_check_established(struct inet_timewait_death_row *death_row,
				    struct sock *sk, __u16 lport,
				    struct inet_timewait_sock **twp)
{
	struct inet_hashinfo *hinfo = death_row->hashinfo;
	struct inet_sock *inet = inet_sk(sk);
	__be32 daddr = inet->rcv_saddr;
	__be32 saddr = inet->daddr;
	int dif = sk->sk_bound_dev_if;
	INET_ADDR_COOKIE(acookie, saddr, daddr)
	const __portpair ports = INET_COMBINED_PORTS(inet->dport, lport);
	unsigned int hash = inet_ehashfn(daddr, lport, saddr, inet->dport);
	struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
	rwlock_t *lock = inet_ehash_lockp(hinfo, hash);
	struct sock *sk2;
	const struct hlist_node *node;
	struct inet_timewait_sock *tw;
	struct net *net = sock_net(sk);

	prefetch(head->chain.first);
	write_lock(lock);

	/* Check TIME-WAIT sockets first. */
	sk_for_each(sk2, node, &head->twchain) {
		tw = inet_twsk(sk2);

		if (INET_TW_MATCH(sk2, net, hash, acookie,
					saddr, daddr, ports, dif)) {
			if (twsk_unique(sk, sk2, twp))
				goto unique;
			else
				goto not_unique;
		}
	}
	tw = NULL;

	/* And established part... */
	sk_for_each(sk2, node, &head->chain) {
		if (INET_MATCH(sk2, net, hash, acookie,
					saddr, daddr, ports, dif))
			goto not_unique;
	}

unique:
	/* Must record num and sport now. Otherwise we will see
	 * in hash table socket with a funny identity. */
	inet->num = lport;
	inet->sport = htons(lport);
	sk->sk_hash = hash;
	BUG_TRAP(sk_unhashed(sk));
	__sk_add_node(sk, &head->chain);
	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
	write_unlock(lock);

	if (twp) {
		*twp = tw;
		NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
	} else if (tw) {
		/* Silly. Should hash-dance instead... */
		inet_twsk_deschedule(tw, death_row);
		NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);

		inet_twsk_put(tw);
	}

	return 0;

not_unique:
	write_unlock(lock);
	return -EADDRNOTAVAIL;
}

static inline u32 inet_sk_port_offset(const struct sock *sk)
{
	const struct inet_sock *inet = inet_sk(sk);
	return secure_ipv4_port_ephemeral(inet->rcv_saddr, inet->daddr,
					  inet->dport);
}

void __inet_hash_nolisten(struct sock *sk)
{
	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
	struct hlist_head *list;
	rwlock_t *lock;
	struct inet_ehash_bucket *head;

	BUG_TRAP(sk_unhashed(sk));

	sk->sk_hash = inet_sk_ehashfn(sk);
	head = inet_ehash_bucket(hashinfo, sk->sk_hash);
	list = &head->chain;
	lock = inet_ehash_lockp(hashinfo, sk->sk_hash);

	write_lock(lock);
	__sk_add_node(sk, list);
	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
	write_unlock(lock);
}
EXPORT_SYMBOL_GPL(__inet_hash_nolisten);

static void __inet_hash(struct sock *sk)
{
	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
	struct hlist_head *list;
	rwlock_t *lock;

	if (sk->sk_state != TCP_LISTEN) {
		__inet_hash_nolisten(sk);
		return;
	}

	BUG_TRAP(sk_unhashed(sk));
	list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
	lock = &hashinfo->lhash_lock;

	inet_listen_wlock(hashinfo);
	__sk_add_node(sk, list);
	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
	write_unlock(lock);
	wake_up(&hashinfo->lhash_wait);
}

void inet_hash(struct sock *sk)
{
	if (sk->sk_state != TCP_CLOSE) {
		local_bh_disable();
		__inet_hash(sk);
		local_bh_enable();
	}
}
EXPORT_SYMBOL_GPL(inet_hash);

void inet_unhash(struct sock *sk)
{
	rwlock_t *lock;
	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;

	if (sk_unhashed(sk))
		goto out;

	if (sk->sk_state == TCP_LISTEN) {
		local_bh_disable();
		inet_listen_wlock(hashinfo);
		lock = &hashinfo->lhash_lock;
	} else {
		lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
		write_lock_bh(lock);
	}

	if (__sk_del_node_init(sk))
		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
	write_unlock_bh(lock);
out:
	if (sk->sk_state == TCP_LISTEN)
		wake_up(&hashinfo->lhash_wait);
}
EXPORT_SYMBOL_GPL(inet_unhash);

int __inet_hash_connect(struct inet_timewait_death_row *death_row,
		struct sock *sk, u32 port_offset,
		int (*check_established)(struct inet_timewait_death_row *,
			struct sock *, __u16, struct inet_timewait_sock **),
		void (*hash)(struct sock *sk))
{
	struct inet_hashinfo *hinfo = death_row->hashinfo;
	const unsigned short snum = inet_sk(sk)->num;
	struct inet_bind_hashbucket *head;
	struct inet_bind_bucket *tb;
	int ret;
	struct net *net = sock_net(sk);

	if (!snum) {
		int i, remaining, low, high, port;
		static u32 hint;
		u32 offset = hint + port_offset;
		struct hlist_node *node;
		struct inet_timewait_sock *tw = NULL;

		inet_get_local_port_range(&low, &high);
		remaining = (high - low) + 1;

		local_bh_disable();
		for (i = 1; i <= remaining; i++) {
			port = low + (i + offset) % remaining;
			head = &hinfo->bhash[inet_bhashfn(port, hinfo->bhash_size)];
			spin_lock(&head->lock);

			/* Does not bother with rcv_saddr checks,
			 * because the established check is already
			 * unique enough.
			 */
			inet_bind_bucket_for_each(tb, node, &head->chain) {
				if (tb->ib_net == net && tb->port == port) {
					BUG_TRAP(!hlist_empty(&tb->owners));
					if (tb->fastreuse >= 0)
						goto next_port;
					if (!check_established(death_row, sk,
								port, &tw))
						goto ok;
					goto next_port;
				}
			}

			tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
					net, head, port);
			if (!tb) {
				spin_unlock(&head->lock);
				break;
			}
			tb->fastreuse = -1;
			goto ok;

		next_port:
			spin_unlock(&head->lock);
		}
		local_bh_enable();

		return -EADDRNOTAVAIL;

ok:
		hint += i;

		/* Head lock still held and bh's disabled */
		inet_bind_hash(sk, tb, port);
		if (sk_unhashed(sk)) {
			inet_sk(sk)->sport = htons(port);
			hash(sk);
		}
		spin_unlock(&head->lock);

		if (tw) {
			inet_twsk_deschedule(tw, death_row);
			inet_twsk_put(tw);
		}

		ret = 0;
		goto out;
	}

	head = &hinfo->bhash[inet_bhashfn(snum, hinfo->bhash_size)];
	tb  = inet_csk(sk)->icsk_bind_hash;
	spin_lock_bh(&head->lock);
	if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
		hash(sk);
		spin_unlock_bh(&head->lock);
		return 0;
	} else {
		spin_unlock(&head->lock);
		/* No definite answer... Walk to established hash table */
		ret = check_established(death_row, sk, snum, NULL);
out:
		local_bh_enable();
		return ret;
	}
}

/*
 * Bind a port for a connect operation and hash it.
 */
int inet_hash_connect(struct inet_timewait_death_row *death_row,
		      struct sock *sk)
{
	return __inet_hash_connect(death_row, sk, inet_sk_port_offset(sk),
			__inet_check_established, __inet_hash_nolisten);
}

EXPORT_SYMBOL_GPL(inet_hash_connect);
hl opt">{ unsigned int i, j, k, temp; struct scatterlist sg[8]; char result[64]; struct crypto_hash *tfm; struct hash_desc desc; int ret; void *hash_buff; printk("\ntesting %s\n", algo); tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { printk("failed to load transform for %s: %ld\n", algo, PTR_ERR(tfm)); return; } desc.tfm = tfm; desc.flags = 0; for (i = 0; i < tcount; i++) { printk("test %u:\n", i + 1); memset(result, 0, 64); hash_buff = kzalloc(template[i].psize, GFP_KERNEL); if (!hash_buff) continue; memcpy(hash_buff, template[i].plaintext, template[i].psize); sg_init_one(&sg[0], hash_buff, template[i].psize); if (template[i].ksize) { ret = crypto_hash_setkey(tfm, template[i].key, template[i].ksize); if (ret) { printk("setkey() failed ret=%d\n", ret); kfree(hash_buff); goto out; } } ret = crypto_hash_digest(&desc, sg, template[i].psize, result); if (ret) { printk("digest () failed ret=%d\n", ret); kfree(hash_buff); goto out; } hexdump(result, crypto_hash_digestsize(tfm)); printk("%s\n", memcmp(result, template[i].digest, crypto_hash_digestsize(tfm)) ? "fail" : "pass"); kfree(hash_buff); } printk("testing %s across pages\n", algo); /* setup the dummy buffer first */ memset(xbuf, 0, XBUFSIZE); j = 0; for (i = 0; i < tcount; i++) { if (template[i].np) { j++; printk("test %u:\n", j); memset(result, 0, 64); temp = 0; sg_init_table(sg, template[i].np); for (k = 0; k < template[i].np; k++) { memcpy(&xbuf[IDX[k]], template[i].plaintext + temp, template[i].tap[k]); temp += template[i].tap[k]; sg_set_buf(&sg[k], &xbuf[IDX[k]], template[i].tap[k]); } if (template[i].ksize) { ret = crypto_hash_setkey(tfm, template[i].key, template[i].ksize); if (ret) { printk("setkey() failed ret=%d\n", ret); goto out; } } ret = crypto_hash_digest(&desc, sg, template[i].psize, result); if (ret) { printk("digest () failed ret=%d\n", ret); goto out; } hexdump(result, crypto_hash_digestsize(tfm)); printk("%s\n", memcmp(result, template[i].digest, crypto_hash_digestsize(tfm)) ? "fail" : "pass"); } } out: crypto_free_hash(tfm); } static void test_aead(char *algo, int enc, struct aead_testvec *template, unsigned int tcount) { unsigned int ret, i, j, k, temp; char *q; struct crypto_aead *tfm; char *key; struct aead_request *req; struct scatterlist sg[8]; struct scatterlist asg[8]; const char *e; struct tcrypt_result result; unsigned int authsize; void *input; void *assoc; char iv[MAX_IVLEN]; if (enc == ENCRYPT) e = "encryption"; else e = "decryption"; printk(KERN_INFO "\ntesting %s %s\n", algo, e); init_completion(&result.completion); tfm = crypto_alloc_aead(algo, 0, 0); if (IS_ERR(tfm)) { printk(KERN_INFO "failed to load transform for %s: %ld\n", algo, PTR_ERR(tfm)); return; } req = aead_request_alloc(tfm, GFP_KERNEL); if (!req) { printk(KERN_INFO "failed to allocate request for %s\n", algo); goto out; } aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, tcrypt_complete, &result); for (i = 0, j = 0; i < tcount; i++) { if (!template[i].np) { printk(KERN_INFO "test %u (%d bit key):\n", ++j, template[i].klen * 8); /* some tepmplates have no input data but they will * touch input */ input = kzalloc(template[i].ilen + template[i].rlen, GFP_KERNEL); if (!input) continue; assoc = kzalloc(template[i].alen, GFP_KERNEL); if (!assoc) { kfree(input); continue; } memcpy(input, template[i].input, template[i].ilen); memcpy(assoc, template[i].assoc, template[i].alen); if (template[i].iv) memcpy(iv, template[i].iv, MAX_IVLEN); else memset(iv, 0, MAX_IVLEN); crypto_aead_clear_flags(tfm, ~0); if (template[i].wk) crypto_aead_set_flags( tfm, CRYPTO_TFM_REQ_WEAK_KEY); if (template[i].key) key = template[i].key; else key = kzalloc(template[i].klen, GFP_KERNEL); ret = crypto_aead_setkey(tfm, key, template[i].klen); if (ret) { printk(KERN_INFO "setkey() failed flags=%x\n", crypto_aead_get_flags(tfm)); if (!template[i].fail) goto next_one; } authsize = abs(template[i].rlen - template[i].ilen); ret = crypto_aead_setauthsize(tfm, authsize); if (ret) { printk(KERN_INFO "failed to set authsize = %u\n", authsize); goto next_one; } sg_init_one(&sg[0], input, template[i].ilen + (enc ? authsize : 0)); sg_init_one(&asg[0], assoc, template[i].alen); aead_request_set_crypt(req, sg, sg, template[i].ilen, iv); aead_request_set_assoc(req, asg, template[i].alen); ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); switch (ret) { case 0: break; case -EINPROGRESS: case -EBUSY: ret = wait_for_completion_interruptible( &result.completion); if (!ret && !(ret = result.err)) { INIT_COMPLETION(result.completion); break; } /* fall through */ default: printk(KERN_INFO "%s () failed err=%d\n", e, -ret); goto next_one; } q = kmap(sg_page(&sg[0])) + sg[0].offset; hexdump(q, template[i].rlen); printk(KERN_INFO "enc/dec: %s\n", memcmp(q, template[i].result, template[i].rlen) ? "fail" : "pass"); kunmap(sg_page(&sg[0])); next_one: if (!template[i].key) kfree(key); kfree(assoc); kfree(input); } } printk(KERN_INFO "\ntesting %s %s across pages (chunking)\n", algo, e); memset(xbuf, 0, XBUFSIZE); memset(axbuf, 0, XBUFSIZE); for (i = 0, j = 0; i < tcount; i++) { if (template[i].np) { printk(KERN_INFO "test %u (%d bit key):\n", ++j, template[i].klen * 8); if (template[i].iv) memcpy(iv, template[i].iv, MAX_IVLEN); else memset(iv, 0, MAX_IVLEN); crypto_aead_clear_flags(tfm, ~0); if (template[i].wk) crypto_aead_set_flags( tfm, CRYPTO_TFM_REQ_WEAK_KEY); key = template[i].key; ret = crypto_aead_setkey(tfm, key, template[i].klen); if (ret) { printk(KERN_INFO "setkey() failed flags=%x\n", crypto_aead_get_flags(tfm)); if (!template[i].fail) goto out; } sg_init_table(sg, template[i].np); for (k = 0, temp = 0; k < template[i].np; k++) { memcpy(&xbuf[IDX[k]], template[i].input + temp, template[i].tap[k]); temp += template[i].tap[k]; sg_set_buf(&sg[k], &xbuf[IDX[k]], template[i].tap[k]); } authsize = abs(template[i].rlen - template[i].ilen); ret = crypto_aead_setauthsize(tfm, authsize); if (ret) { printk(KERN_INFO "failed to set authsize = %u\n", authsize); goto out; } if (enc) sg[k - 1].length += authsize; sg_init_table(asg, template[i].anp); for (k = 0, temp = 0; k < template[i].anp; k++) { memcpy(&axbuf[IDX[k]], template[i].assoc + temp, template[i].atap[k]); temp += template[i].atap[k]; sg_set_buf(&asg[k], &axbuf[IDX[k]], template[i].atap[k]); } aead_request_set_crypt(req, sg, sg, template[i].ilen, iv); aead_request_set_assoc(req, asg, template[i].alen); ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); switch (ret) { case 0: break; case -EINPROGRESS: case -EBUSY: ret = wait_for_completion_interruptible( &result.completion); if (!ret && !(ret = result.err)) { INIT_COMPLETION(result.completion); break; } /* fall through */ default: printk(KERN_INFO "%s () failed err=%d\n", e, -ret); goto out; } for (k = 0, temp = 0; k < template[i].np; k++) { printk(KERN_INFO "page %u\n", k); q = kmap(sg_page(&sg[k])) + sg[k].offset; hexdump(q, template[i].tap[k]); printk(KERN_INFO "%s\n", memcmp(q, template[i].result + temp, template[i].tap[k] - (k < template[i].np - 1 || enc ? 0 : authsize)) ? "fail" : "pass"); temp += template[i].tap[k]; kunmap(sg_page(&sg[k])); } } } out: crypto_free_aead(tfm); aead_request_free(req); } static void test_cipher(char *algo, int enc, struct cipher_testvec *template, unsigned int tcount) { unsigned int ret, i, j, k, temp; char *q; struct crypto_ablkcipher *tfm; struct ablkcipher_request *req; struct scatterlist sg[8]; const char *e; struct tcrypt_result result; void *data; char iv[MAX_IVLEN]; if (enc == ENCRYPT) e = "encryption"; else e = "decryption"; printk("\ntesting %s %s\n", algo, e); init_completion(&result.completion); tfm = crypto_alloc_ablkcipher(algo, 0, 0); if (IS_ERR(tfm)) { printk("failed to load transform for %s: %ld\n", algo, PTR_ERR(tfm)); return; } req = ablkcipher_request_alloc(tfm, GFP_KERNEL); if (!req) { printk("failed to allocate request for %s\n", algo); goto out; } ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, tcrypt_complete, &result); j = 0; for (i = 0; i < tcount; i++) { data = kzalloc(template[i].ilen, GFP_KERNEL); if (!data) continue; memcpy(data, template[i].input, template[i].ilen); if (template[i].iv) memcpy(iv, template[i].iv, MAX_IVLEN); else memset(iv, 0, MAX_IVLEN); if (!(template[i].np)) { j++; printk("test %u (%d bit key):\n", j, template[i].klen * 8); crypto_ablkcipher_clear_flags(tfm, ~0); if (template[i].wk) crypto_ablkcipher_set_flags( tfm, CRYPTO_TFM_REQ_WEAK_KEY); ret = crypto_ablkcipher_setkey(tfm, template[i].key, template[i].klen); if (ret) { printk("setkey() failed flags=%x\n", crypto_ablkcipher_get_flags(tfm)); if (!template[i].fail) { kfree(data); goto out; } } sg_init_one(&sg[0], data, template[i].ilen); ablkcipher_request_set_crypt(req, sg, sg, template[i].ilen, iv); ret = enc ? crypto_ablkcipher_encrypt(req) : crypto_ablkcipher_decrypt(req); switch (ret) { case 0: break; case -EINPROGRESS: case -EBUSY: ret = wait_for_completion_interruptible( &result.completion); if (!ret && !((ret = result.err))) { INIT_COMPLETION(result.completion); break; } /* fall through */ default: printk("%s () failed err=%d\n", e, -ret); kfree(data); goto out; } q = kmap(sg_page(&sg[0])) + sg[0].offset; hexdump(q, template[i].rlen); printk("%s\n", memcmp(q, template[i].result, template[i].rlen) ? "fail" : "pass"); kunmap(sg_page(&sg[0])); } kfree(data); } printk("\ntesting %s %s across pages (chunking)\n", algo, e); memset(xbuf, 0, XBUFSIZE); j = 0; for (i = 0; i < tcount; i++) { data = kzalloc(template[i].ilen, GFP_KERNEL); if (!data) continue; memcpy(data, template[i].input, template[i].ilen); if (template[i].iv) memcpy(iv, template[i].iv, MAX_IVLEN); else memset(iv, 0, MAX_IVLEN); if (template[i].np) { j++; printk("test %u (%d bit key):\n", j, template[i].klen * 8); crypto_ablkcipher_clear_flags(tfm, ~0); if (template[i].wk) crypto_ablkcipher_set_flags( tfm, CRYPTO_TFM_REQ_WEAK_KEY); ret = crypto_ablkcipher_setkey(tfm, template[i].key, template[i].klen); if (ret) { printk("setkey() failed flags=%x\n", crypto_ablkcipher_get_flags(tfm)); if (!template[i].fail) { kfree(data); goto out; } } temp = 0; sg_init_table(sg, template[i].np); for (k = 0; k < template[i].np; k++) { memcpy(&xbuf[IDX[k]], template[i].input + temp, template[i].tap[k]); temp += template[i].tap[k]; sg_set_buf(&sg[k], &xbuf[IDX[k]], template[i].tap[k]); } ablkcipher_request_set_crypt(req, sg, sg, template[i].ilen, iv); ret = enc ? crypto_ablkcipher_encrypt(req) : crypto_ablkcipher_decrypt(req); switch (ret) { case 0: break; case -EINPROGRESS: case -EBUSY: ret = wait_for_completion_interruptible( &result.completion); if (!ret && !((ret = result.err))) { INIT_COMPLETION(result.completion); break; } /* fall through */ default: printk("%s () failed err=%d\n", e, -ret); goto out; } temp = 0; for (k = 0; k < template[i].np; k++) { printk("page %u\n", k); q = kmap(sg_page(&sg[k])) + sg[k].offset; hexdump(q, template[i].tap[k]); printk("%s\n", memcmp(q, template[i].result + temp, template[i].tap[k]) ? "fail" : "pass"); temp += template[i].tap[k]; kunmap(sg_page(&sg[k])); } } } out: crypto_free_ablkcipher(tfm); ablkcipher_request_free(req); } static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc, char *p, int blen, int sec) { struct scatterlist sg[1]; unsigned long start, end; int bcount; int ret; sg_init_one(sg, p, blen); for (start = jiffies, end = start + sec * HZ, bcount = 0; time_before(jiffies, end); bcount++) { if (enc) ret = crypto_blkcipher_encrypt(desc, sg, sg, blen); else ret = crypto_blkcipher_decrypt(desc, sg, sg, blen); if (ret) return ret; } printk("%d operations in %d seconds (%ld bytes)\n", bcount, sec, (long)bcount * blen); return 0; } static int test_cipher_cycles(struct blkcipher_desc *desc, int enc, char *p, int blen) { struct scatterlist sg[1]; unsigned long cycles = 0; int ret = 0; int i; sg_init_one(sg, p, blen); local_bh_disable(); local_irq_disable(); /* Warm-up run. */ for (i = 0; i < 4; i++) { if (enc) ret = crypto_blkcipher_encrypt(desc, sg, sg, blen); else ret = crypto_blkcipher_decrypt(desc, sg, sg, blen); if (ret) goto out; } /* The real thing. */ for (i = 0; i < 8; i++) { cycles_t start, end; start = get_cycles(); if (enc) ret = crypto_blkcipher_encrypt(desc, sg, sg, blen); else ret = crypto_blkcipher_decrypt(desc, sg, sg, blen); end = get_cycles(); if (ret) goto out; cycles += end - start; } out: local_irq_enable(); local_bh_enable(); if (ret == 0) printk("1 operation in %lu cycles (%d bytes)\n", (cycles + 4) / 8, blen); return ret; } static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 }; static void test_cipher_speed(char *algo, int enc, unsigned int sec, struct cipher_testvec *template, unsigned int tcount, u8 *keysize) { unsigned int ret, i, j, iv_len; unsigned char *key, *p, iv[128]; struct crypto_blkcipher *tfm; struct blkcipher_desc desc; const char *e; u32 *b_size; if (enc == ENCRYPT) e = "encryption"; else e = "decryption"; printk("\ntesting speed of %s %s\n", algo, e); tfm = crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { printk("failed to load transform for %s: %ld\n", algo, PTR_ERR(tfm)); return; } desc.tfm = tfm; desc.flags = 0; i = 0; do { b_size = block_sizes; do { if ((*keysize + *b_size) > TVMEMSIZE) { printk("template (%u) too big for tvmem (%u)\n", *keysize + *b_size, TVMEMSIZE); goto out; } printk("test %u (%d bit key, %d byte blocks): ", i, *keysize * 8, *b_size); memset(tvmem, 0xff, *keysize + *b_size); /* set key, plain text and IV */ key = (unsigned char *)tvmem; for (j = 0; j < tcount; j++) { if (template[j].klen == *keysize) { key = template[j].key; break; } } p = (unsigned char *)tvmem + *keysize; ret = crypto_blkcipher_setkey(tfm, key, *keysize); if (ret) { printk("setkey() failed flags=%x\n", crypto_blkcipher_get_flags(tfm)); goto out; } iv_len = crypto_blkcipher_ivsize(tfm); if (iv_len) { memset(&iv, 0xff, iv_len); crypto_blkcipher_set_iv(tfm, iv, iv_len); } if (sec) ret = test_cipher_jiffies(&desc, enc, p, *b_size, sec); else ret = test_cipher_cycles(&desc, enc, p, *b_size); if (ret) { printk("%s() failed flags=%x\n", e, desc.flags); break; } b_size++; i++; } while (*b_size); keysize++; } while (*keysize); out: crypto_free_blkcipher(tfm); } static int test_hash_jiffies_digest(struct hash_desc *desc, char *p, int blen, char *out, int sec) { struct scatterlist sg[1]; unsigned long start, end; int bcount; int ret; sg_init_table(sg, 1); for (start = jiffies, end = start + sec * HZ, bcount = 0; time_before(jiffies, end); bcount++) { sg_set_buf(sg, p, blen); ret = crypto_hash_digest(desc, sg, blen, out); if (ret) return ret; } printk("%6u opers/sec, %9lu bytes/sec\n", bcount / sec, ((long)bcount * blen) / sec); return 0; } static int test_hash_jiffies(struct hash_desc *desc, char *p, int blen, int plen, char *out, int sec) { struct scatterlist sg[1]; unsigned long start, end; int bcount, pcount; int ret; if (plen == blen) return test_hash_jiffies_digest(desc, p, blen, out, sec); sg_init_table(sg, 1); for (start = jiffies, end = start + sec * HZ, bcount = 0; time_before(jiffies, end); bcount++) { ret = crypto_hash_init(desc); if (ret) return ret; for (pcount = 0; pcount < blen; pcount += plen) { sg_set_buf(sg, p + pcount, plen); ret = crypto_hash_update(desc, sg, plen); if (ret) return ret; } /* we assume there is enough space in 'out' for the result */ ret = crypto_hash_final(desc, out); if (ret) return ret; } printk("%6u opers/sec, %9lu bytes/sec\n", bcount / sec, ((long)bcount * blen) / sec); return 0; } static int test_hash_cycles_digest(struct hash_desc *desc, char *p, int blen, char *out) { struct scatterlist sg[1]; unsigned long cycles = 0; int i; int ret; sg_init_table(sg, 1); local_bh_disable(); local_irq_disable(); /* Warm-up run. */ for (i = 0; i < 4; i++) { sg_set_buf(sg, p, blen); ret = crypto_hash_digest(desc, sg, blen, out); if (ret) goto out; } /* The real thing. */ for (i = 0; i < 8; i++) { cycles_t start, end; start = get_cycles(); sg_set_buf(sg, p, blen); ret = crypto_hash_digest(desc, sg, blen, out); if (ret) goto out; end = get_cycles(); cycles += end - start; } out: local_irq_enable(); local_bh_enable(); if (ret) return ret; printk("%6lu cycles/operation, %4lu cycles/byte\n", cycles / 8, cycles / (8 * blen)); return 0; } static int test_hash_cycles(struct hash_desc *desc, char *p, int blen, int plen, char *out) { struct scatterlist sg[1]; unsigned long cycles = 0; int i, pcount; int ret; if (plen == blen) return test_hash_cycles_digest(desc, p, blen, out); sg_init_table(sg, 1); local_bh_disable(); local_irq_disable(); /* Warm-up run. */ for (i = 0; i < 4; i++) { ret = crypto_hash_init(desc); if (ret) goto out; for (pcount = 0; pcount < blen; pcount += plen) { sg_set_buf(sg, p + pcount, plen); ret = crypto_hash_update(desc, sg, plen); if (ret) goto out; } ret = crypto_hash_final(desc, out); if (ret) goto out; } /* The real thing. */ for (i = 0; i < 8; i++) { cycles_t start, end; start = get_cycles(); ret = crypto_hash_init(desc); if (ret) goto out; for (pcount = 0; pcount < blen; pcount += plen) { sg_set_buf(sg, p + pcount, plen); ret = crypto_hash_update(desc, sg, plen); if (ret) goto out; } ret = crypto_hash_final(desc, out); if (ret) goto out; end = get_cycles(); cycles += end - start; } out: local_irq_enable(); local_bh_enable(); if (ret) return ret; printk("%6lu cycles/operation, %4lu cycles/byte\n", cycles / 8, cycles / (8 * blen)); return 0; } static void test_hash_speed(char *algo, unsigned int sec, struct hash_speed *speed) { struct crypto_hash *tfm; struct hash_desc desc; char output[1024]; int i; int ret; printk("\ntesting speed of %s\n", algo); tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { printk("failed to load transform for %s: %ld\n", algo, PTR_ERR(tfm)); return; } desc.tfm = tfm; desc.flags = 0; if (crypto_hash_digestsize(tfm) > sizeof(output)) { printk("digestsize(%u) > outputbuffer(%zu)\n", crypto_hash_digestsize(tfm), sizeof(output)); goto out; } for (i = 0; speed[i].blen != 0; i++) { if (speed[i].blen > TVMEMSIZE) { printk("template (%u) too big for tvmem (%u)\n", speed[i].blen, TVMEMSIZE); goto out; } printk("test%3u (%5u byte blocks,%5u bytes per update,%4u updates): ", i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen); memset(tvmem, 0xff, speed[i].blen); if (sec) ret = test_hash_jiffies(&desc, tvmem, speed[i].blen, speed[i].plen, output, sec); else ret = test_hash_cycles(&desc, tvmem, speed[i].blen, speed[i].plen, output); if (ret) { printk("hashing failed ret=%d\n", ret); break; } } out: crypto_free_hash(tfm); } static void test_comp(char *algo, struct comp_testvec *ctemplate, struct comp_testvec *dtemplate, int ctcount, int dtcount) { unsigned int i; char result[COMP_BUF_SIZE]; struct crypto_comp *tfm; unsigned int tsize; printk("\ntesting %s compression\n", algo); tfm = crypto_alloc_comp(algo, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { printk("failed to load transform for %s\n", algo); return; } for (i = 0; i < ctcount; i++) { int ilen, ret, dlen = COMP_BUF_SIZE; printk("test %u:\n", i + 1); memset(result, 0, sizeof (result)); ilen = ctemplate[i].inlen; ret = crypto_comp_compress(tfm, ctemplate[i].input, ilen, result, &dlen); if (ret) { printk("fail: ret=%d\n", ret); continue; } hexdump(result, dlen); printk("%s (ratio %d:%d)\n", memcmp(result, ctemplate[i].output, dlen) ? "fail" : "pass", ilen, dlen); } printk("\ntesting %s decompression\n", algo); tsize = sizeof(struct comp_testvec); tsize *= dtcount; if (tsize > TVMEMSIZE) { printk("template (%u) too big for tvmem (%u)\n", tsize, TVMEMSIZE); goto out; } for (i = 0; i < dtcount; i++) { int ilen, ret, dlen = COMP_BUF_SIZE; printk("test %u:\n", i + 1); memset(result, 0, sizeof (result)); ilen = dtemplate[i].inlen; ret = crypto_comp_decompress(tfm, dtemplate[i].input, ilen, result, &dlen); if (ret) { printk("fail: ret=%d\n", ret); continue; } hexdump(result, dlen); printk("%s (ratio %d:%d)\n", memcmp(result, dtemplate[i].output, dlen) ? "fail" : "pass", ilen, dlen); } out: crypto_free_comp(tfm); } static void test_available(void) { char **name = check; while (*name) { printk("alg %s ", *name); printk(crypto_has_alg(*name, 0, 0) ? "found\n" : "not found\n"); name++; } } static void do_test(void) { switch (mode) { case 0: test_hash("md5", md5_tv_template, MD5_TEST_VECTORS); test_hash("sha1", sha1_tv_template, SHA1_TEST_VECTORS); //DES test_cipher("ecb(des)", ENCRYPT, des_enc_tv_template, DES_ENC_TEST_VECTORS); test_cipher("ecb(des)", DECRYPT, des_dec_tv_template, DES_DEC_TEST_VECTORS); test_cipher("cbc(des)", ENCRYPT, des_cbc_enc_tv_template, DES_CBC_ENC_TEST_VECTORS); test_cipher("cbc(des)", DECRYPT, des_cbc_dec_tv_template, DES_CBC_DEC_TEST_VECTORS); //DES3_EDE test_cipher("ecb(des3_ede)", ENCRYPT, des3_ede_enc_tv_template, DES3_EDE_ENC_TEST_VECTORS); test_cipher("ecb(des3_ede)", DECRYPT, des3_ede_dec_tv_template, DES3_EDE_DEC_TEST_VECTORS); test_hash("md4", md4_tv_template, MD4_TEST_VECTORS); test_hash("sha224", sha224_tv_template, SHA224_TEST_VECTORS); test_hash("sha256", sha256_tv_template, SHA256_TEST_VECTORS); //BLOWFISH test_cipher("ecb(blowfish)", ENCRYPT, bf_enc_tv_template, BF_ENC_TEST_VECTORS); test_cipher("ecb(blowfish)", DECRYPT, bf_dec_tv_template, BF_DEC_TEST_VECTORS); test_cipher("cbc(blowfish)", ENCRYPT, bf_cbc_enc_tv_template, BF_CBC_ENC_TEST_VECTORS); test_cipher("cbc(blowfish)", DECRYPT, bf_cbc_dec_tv_template, BF_CBC_DEC_TEST_VECTORS); //TWOFISH test_cipher("ecb(twofish)", ENCRYPT, tf_enc_tv_template, TF_ENC_TEST_VECTORS); test_cipher("ecb(twofish)", DECRYPT, tf_dec_tv_template, TF_DEC_TEST_VECTORS); test_cipher("cbc(twofish)", ENCRYPT, tf_cbc_enc_tv_template, TF_CBC_ENC_TEST_VECTORS); test_cipher("cbc(twofish)", DECRYPT, tf_cbc_dec_tv_template, TF_CBC_DEC_TEST_VECTORS); //SERPENT test_cipher("ecb(serpent)", ENCRYPT, serpent_enc_tv_template, SERPENT_ENC_TEST_VECTORS); test_cipher("ecb(serpent)", DECRYPT, serpent_dec_tv_template, SERPENT_DEC_TEST_VECTORS); //TNEPRES test_cipher("ecb(tnepres)", ENCRYPT, tnepres_enc_tv_template, TNEPRES_ENC_TEST_VECTORS); test_cipher("ecb(tnepres)", DECRYPT, tnepres_dec_tv_template, TNEPRES_DEC_TEST_VECTORS); //AES test_cipher("ecb(aes)", ENCRYPT, aes_enc_tv_template, AES_ENC_TEST_VECTORS); test_cipher("ecb(aes)", DECRYPT, aes_dec_tv_template, AES_DEC_TEST_VECTORS); test_cipher("cbc(aes)", ENCRYPT, aes_cbc_enc_tv_template, AES_CBC_ENC_TEST_VECTORS); test_cipher("cbc(aes)", DECRYPT, aes_cbc_dec_tv_template, AES_CBC_DEC_TEST_VECTORS); test_cipher("lrw(aes)", ENCRYPT, aes_lrw_enc_tv_template, AES_LRW_ENC_TEST_VECTORS); test_cipher("lrw(aes)", DECRYPT, aes_lrw_dec_tv_template, AES_LRW_DEC_TEST_VECTORS); test_cipher("xts(aes)", ENCRYPT, aes_xts_enc_tv_template, AES_XTS_ENC_TEST_VECTORS); test_cipher("xts(aes)", DECRYPT, aes_xts_dec_tv_template, AES_XTS_DEC_TEST_VECTORS); test_cipher("rfc3686(ctr(aes))", ENCRYPT, aes_ctr_enc_tv_template, AES_CTR_ENC_TEST_VECTORS); test_cipher("rfc3686(ctr(aes))", DECRYPT, aes_ctr_dec_tv_template, AES_CTR_DEC_TEST_VECTORS); test_aead("gcm(aes)", ENCRYPT, aes_gcm_enc_tv_template, AES_GCM_ENC_TEST_VECTORS); test_aead("gcm(aes)", DECRYPT, aes_gcm_dec_tv_template, AES_GCM_DEC_TEST_VECTORS); test_aead("ccm(aes)", ENCRYPT, aes_ccm_enc_tv_template, AES_CCM_ENC_TEST_VECTORS); test_aead("ccm(aes)", DECRYPT, aes_ccm_dec_tv_template, AES_CCM_DEC_TEST_VECTORS); //CAST5 test_cipher("ecb(cast5)", ENCRYPT, cast5_enc_tv_template, CAST5_ENC_TEST_VECTORS); test_cipher("ecb(cast5)", DECRYPT, cast5_dec_tv_template, CAST5_DEC_TEST_VECTORS); //CAST6 test_cipher("ecb(cast6)", ENCRYPT, cast6_enc_tv_template, CAST6_ENC_TEST_VECTORS); test_cipher("ecb(cast6)", DECRYPT, cast6_dec_tv_template, CAST6_DEC_TEST_VECTORS); //ARC4 test_cipher("ecb(arc4)", ENCRYPT, arc4_enc_tv_template, ARC4_ENC_TEST_VECTORS); test_cipher("ecb(arc4)", DECRYPT, arc4_dec_tv_template, ARC4_DEC_TEST_VECTORS); //TEA test_cipher("ecb(tea)", ENCRYPT, tea_enc_tv_template, TEA_ENC_TEST_VECTORS); test_cipher("ecb(tea)", DECRYPT, tea_dec_tv_template, TEA_DEC_TEST_VECTORS); //XTEA test_cipher("ecb(xtea)", ENCRYPT, xtea_enc_tv_template, XTEA_ENC_TEST_VECTORS); test_cipher("ecb(xtea)", DECRYPT, xtea_dec_tv_template, XTEA_DEC_TEST_VECTORS); //KHAZAD test_cipher("ecb(khazad)", ENCRYPT, khazad_enc_tv_template, KHAZAD_ENC_TEST_VECTORS); test_cipher("ecb(khazad)", DECRYPT, khazad_dec_tv_template, KHAZAD_DEC_TEST_VECTORS); //ANUBIS test_cipher("ecb(anubis)", ENCRYPT, anubis_enc_tv_template, ANUBIS_ENC_TEST_VECTORS); test_cipher("ecb(anubis)", DECRYPT, anubis_dec_tv_template, ANUBIS_DEC_TEST_VECTORS); test_cipher("cbc(anubis)", ENCRYPT, anubis_cbc_enc_tv_template, ANUBIS_CBC_ENC_TEST_VECTORS); test_cipher("cbc(anubis)", DECRYPT, anubis_cbc_dec_tv_template, ANUBIS_CBC_ENC_TEST_VECTORS); //XETA test_cipher("ecb(xeta)", ENCRYPT, xeta_enc_tv_template, XETA_ENC_TEST_VECTORS); test_cipher("ecb(xeta)", DECRYPT, xeta_dec_tv_template, XETA_DEC_TEST_VECTORS); //FCrypt test_cipher("pcbc(fcrypt)", ENCRYPT, fcrypt_pcbc_enc_tv_template, FCRYPT_ENC_TEST_VECTORS); test_cipher("pcbc(fcrypt)", DECRYPT, fcrypt_pcbc_dec_tv_template, FCRYPT_DEC_TEST_VECTORS); //CAMELLIA test_cipher("ecb(camellia)", ENCRYPT, camellia_enc_tv_template, CAMELLIA_ENC_TEST_VECTORS); test_cipher("ecb(camellia)", DECRYPT, camellia_dec_tv_template, CAMELLIA_DEC_TEST_VECTORS); test_cipher("cbc(camellia)", ENCRYPT, camellia_cbc_enc_tv_template, CAMELLIA_CBC_ENC_TEST_VECTORS); test_cipher("cbc(camellia)", DECRYPT, camellia_cbc_dec_tv_template, CAMELLIA_CBC_DEC_TEST_VECTORS); //SEED test_cipher("ecb(seed)", ENCRYPT, seed_enc_tv_template, SEED_ENC_TEST_VECTORS); test_cipher("ecb(seed)", DECRYPT, seed_dec_tv_template, SEED_DEC_TEST_VECTORS); //CTS test_cipher("cts(cbc(aes))", ENCRYPT, cts_mode_enc_tv_template, CTS_MODE_ENC_TEST_VECTORS); test_cipher("cts(cbc(aes))", DECRYPT, cts_mode_dec_tv_template, CTS_MODE_DEC_TEST_VECTORS); test_hash("sha384", sha384_tv_template, SHA384_TEST_VECTORS); test_hash("sha512", sha512_tv_template, SHA512_TEST_VECTORS); test_hash("wp512", wp512_tv_template, WP512_TEST_VECTORS); test_hash("wp384", wp384_tv_template, WP384_TEST_VECTORS); test_hash("wp256", wp256_tv_template, WP256_TEST_VECTORS); test_hash("tgr192", tgr192_tv_template, TGR192_TEST_VECTORS); test_hash("tgr160", tgr160_tv_template, TGR160_TEST_VECTORS); test_hash("tgr128", tgr128_tv_template, TGR128_TEST_VECTORS); test_comp("deflate", deflate_comp_tv_template, deflate_decomp_tv_template, DEFLATE_COMP_TEST_VECTORS, DEFLATE_DECOMP_TEST_VECTORS); test_comp("lzo", lzo_comp_tv_template, lzo_decomp_tv_template, LZO_COMP_TEST_VECTORS, LZO_DECOMP_TEST_VECTORS); test_hash("crc32c", crc32c_tv_template, CRC32C_TEST_VECTORS); test_hash("hmac(md5)", hmac_md5_tv_template, HMAC_MD5_TEST_VECTORS); test_hash("hmac(sha1)", hmac_sha1_tv_template, HMAC_SHA1_TEST_VECTORS); test_hash("hmac(sha224)", hmac_sha224_tv_template, HMAC_SHA224_TEST_VECTORS); test_hash("hmac(sha256)", hmac_sha256_tv_template, HMAC_SHA256_TEST_VECTORS); test_hash("hmac(sha384)", hmac_sha384_tv_template, HMAC_SHA384_TEST_VECTORS); test_hash("hmac(sha512)", hmac_sha512_tv_template, HMAC_SHA512_TEST_VECTORS); test_hash("xcbc(aes)", aes_xcbc128_tv_template, XCBC_AES_TEST_VECTORS); test_hash("michael_mic", michael_mic_tv_template, MICHAEL_MIC_TEST_VECTORS); break; case 1: test_hash("md5", md5_tv_template, MD5_TEST_VECTORS); break; case 2: test_hash("sha1", sha1_tv_template, SHA1_TEST_VECTORS); break; case 3: test_cipher("ecb(des)", ENCRYPT, des_enc_tv_template, DES_ENC_TEST_VECTORS); test_cipher("ecb(des)", DECRYPT, des_dec_tv_template, DES_DEC_TEST_VECTORS); test_cipher("cbc(des)", ENCRYPT, des_cbc_enc_tv_template, DES_CBC_ENC_TEST_VECTORS); test_cipher("cbc(des)", DECRYPT, des_cbc_dec_tv_template, DES_CBC_DEC_TEST_VECTORS); break; case 4: test_cipher("ecb(des3_ede)", ENCRYPT, des3_ede_enc_tv_template, DES3_EDE_ENC_TEST_VECTORS); test_cipher("ecb(des3_ede)", DECRYPT, des3_ede_dec_tv_template, DES3_EDE_DEC_TEST_VECTORS); break; case 5: test_hash("md4", md4_tv_template, MD4_TEST_VECTORS); break; case 6: test_hash("sha256", sha256_tv_template, SHA256_TEST_VECTORS); break; case 7: test_cipher("ecb(blowfish)", ENCRYPT, bf_enc_tv_template, BF_ENC_TEST_VECTORS); test_cipher("ecb(blowfish)", DECRYPT, bf_dec_tv_template, BF_DEC_TEST_VECTORS); test_cipher("cbc(blowfish)", ENCRYPT, bf_cbc_enc_tv_template, BF_CBC_ENC_TEST_VECTORS); test_cipher("cbc(blowfish)", DECRYPT, bf_cbc_dec_tv_template, BF_CBC_DEC_TEST_VECTORS); break; case 8: test_cipher("ecb(twofish)", ENCRYPT, tf_enc_tv_template, TF_ENC_TEST_VECTORS); test_cipher("ecb(twofish)", DECRYPT, tf_dec_tv_template, TF_DEC_TEST_VECTORS); test_cipher("cbc(twofish)", ENCRYPT, tf_cbc_enc_tv_template, TF_CBC_ENC_TEST_VECTORS); test_cipher("cbc(twofish)", DECRYPT, tf_cbc_dec_tv_template, TF_CBC_DEC_TEST_VECTORS); break; case 9: test_cipher("ecb(serpent)", ENCRYPT, serpent_enc_tv_template, SERPENT_ENC_TEST_VECTORS); test_cipher("ecb(serpent)", DECRYPT, serpent_dec_tv_template, SERPENT_DEC_TEST_VECTORS); break; case 10: test_cipher("ecb(aes)", ENCRYPT, aes_enc_tv_template, AES_ENC_TEST_VECTORS); test_cipher("ecb(aes)", DECRYPT, aes_dec_tv_template, AES_DEC_TEST_VECTORS); test_cipher("cbc(aes)", ENCRYPT, aes_cbc_enc_tv_template, AES_CBC_ENC_TEST_VECTORS); test_cipher("cbc(aes)", DECRYPT, aes_cbc_dec_tv_template, AES_CBC_DEC_TEST_VECTORS); test_cipher("lrw(aes)", ENCRYPT, aes_lrw_enc_tv_template, AES_LRW_ENC_TEST_VECTORS); test_cipher("lrw(aes)", DECRYPT, aes_lrw_dec_tv_template, AES_LRW_DEC_TEST_VECTORS); test_cipher("xts(aes)", ENCRYPT, aes_xts_enc_tv_template, AES_XTS_ENC_TEST_VECTORS); test_cipher("xts(aes)", DECRYPT, aes_xts_dec_tv_template, AES_XTS_DEC_TEST_VECTORS); test_cipher("rfc3686(ctr(aes))", ENCRYPT, aes_ctr_enc_tv_template, AES_CTR_ENC_TEST_VECTORS); test_cipher("rfc3686(ctr(aes))", DECRYPT, aes_ctr_dec_tv_template, AES_CTR_DEC_TEST_VECTORS); break; case 11: test_hash("sha384", sha384_tv_template, SHA384_TEST_VECTORS); break; case 12: test_hash("sha512", sha512_tv_template, SHA512_TEST_VECTORS); break; case 13: test_comp("deflate", deflate_comp_tv_template, deflate_decomp_tv_template, DEFLATE_COMP_TEST_VECTORS, DEFLATE_DECOMP_TEST_VECTORS); break; case 14: test_cipher("ecb(cast5)", ENCRYPT, cast5_enc_tv_template, CAST5_ENC_TEST_VECTORS); test_cipher("ecb(cast5)", DECRYPT, cast5_dec_tv_template, CAST5_DEC_TEST_VECTORS); break; case 15: test_cipher("ecb(cast6)", ENCRYPT, cast6_enc_tv_template, CAST6_ENC_TEST_VECTORS); test_cipher("ecb(cast6)", DECRYPT, cast6_dec_tv_template, CAST6_DEC_TEST_VECTORS); break; case 16: test_cipher("ecb(arc4)", ENCRYPT, arc4_enc_tv_template, ARC4_ENC_TEST_VECTORS); test_cipher("ecb(arc4)", DECRYPT, arc4_dec_tv_template, ARC4_DEC_TEST_VECTORS); break; case 17: test_hash("michael_mic", michael_mic_tv_template, MICHAEL_MIC_TEST_VECTORS); break; case 18: test_hash("crc32c", crc32c_tv_template, CRC32C_TEST_VECTORS); break; case 19: test_cipher("ecb(tea)", ENCRYPT, tea_enc_tv_template, TEA_ENC_TEST_VECTORS); test_cipher("ecb(tea)", DECRYPT, tea_dec_tv_template, TEA_DEC_TEST_VECTORS); break; case 20: test_cipher("ecb(xtea)", ENCRYPT, xtea_enc_tv_template, XTEA_ENC_TEST_VECTORS); test_cipher("ecb(xtea)", DECRYPT, xtea_dec_tv_template, XTEA_DEC_TEST_VECTORS); break; case 21: test_cipher("ecb(khazad)", ENCRYPT, khazad_enc_tv_template, KHAZAD_ENC_TEST_VECTORS); test_cipher("ecb(khazad)", DECRYPT, khazad_dec_tv_template, KHAZAD_DEC_TEST_VECTORS); break; case 22: test_hash("wp512", wp512_tv_template, WP512_TEST_VECTORS); break; case 23: test_hash("wp384", wp384_tv_template, WP384_TEST_VECTORS); break; case 24: test_hash("wp256", wp256_tv_template, WP256_TEST_VECTORS); break; case 25: test_cipher("ecb(tnepres)", ENCRYPT, tnepres_enc_tv_template, TNEPRES_ENC_TEST_VECTORS); test_cipher("ecb(tnepres)", DECRYPT, tnepres_dec_tv_template, TNEPRES_DEC_TEST_VECTORS); break; case 26: test_cipher("ecb(anubis)", ENCRYPT, anubis_enc_tv_template, ANUBIS_ENC_TEST_VECTORS); test_cipher("ecb(anubis)", DECRYPT, anubis_dec_tv_template, ANUBIS_DEC_TEST_VECTORS);