summaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2016-09-07 06:42:08 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2016-09-13 06:44:57 -0400
commit53a5d5ddccf849dbc27a8c1bba0b43c3a45fb792 (patch)
tree95635f9a1b03ca963f3758a4ffbcf5f4f94cc515 /crypto
parent0bd2223594a4dcddc1e34b15774a3a4776f7749e (diff)
crypto: echainiv - Replace chaining with multiplication
The current implementation uses a global per-cpu array to store data which are used to derive the next IV. This is insecure as the attacker may change the stored data. This patch removes all traces of chaining and replaces it with multiplication of the salt and the sequence number. Fixes: a10f554fa7e0 ("crypto: echainiv - Add encrypted chain IV...") Cc: stable@vger.kernel.org Reported-by: Mathias Krause <minipli@googlemail.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto')
-rw-r--r--crypto/echainiv.c115
1 files changed, 24 insertions, 91 deletions
diff --git a/crypto/echainiv.c b/crypto/echainiv.c
index 1b01fe98e91f..e3d889b122e0 100644
--- a/crypto/echainiv.c
+++ b/crypto/echainiv.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * echainiv: Encrypted Chain IV Generator 2 * echainiv: Encrypted Chain IV Generator
3 * 3 *
4 * This generator generates an IV based on a sequence number by xoring it 4 * This generator generates an IV based on a sequence number by multiplying
5 * with a salt and then encrypting it with the same key as used to encrypt 5 * it with a salt and then encrypting it with the same key as used to encrypt
6 * the plain text. This algorithm requires that the block size be equal 6 * the plain text. This algorithm requires that the block size be equal
7 * to the IV size. It is mainly useful for CBC. 7 * to the IV size. It is mainly useful for CBC.
8 * 8 *
@@ -24,81 +24,17 @@
24#include <linux/err.h> 24#include <linux/err.h>
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/mm.h>
28#include <linux/module.h> 27#include <linux/module.h>
29#include <linux/percpu.h> 28#include <linux/slab.h>
30#include <linux/spinlock.h>
31#include <linux/string.h> 29#include <linux/string.h>
32 30
33#define MAX_IV_SIZE 16
34
35static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv);
36
37/* We don't care if we get preempted and read/write IVs from the next CPU. */
38static void echainiv_read_iv(u8 *dst, unsigned size)
39{
40 u32 *a = (u32 *)dst;
41 u32 __percpu *b = echainiv_iv;
42
43 for (; size >= 4; size -= 4) {
44 *a++ = this_cpu_read(*b);
45 b++;
46 }
47}
48
49static void echainiv_write_iv(const u8 *src, unsigned size)
50{
51 const u32 *a = (const u32 *)src;
52 u32 __percpu *b = echainiv_iv;
53
54 for (; size >= 4; size -= 4) {
55 this_cpu_write(*b, *a);
56 a++;
57 b++;
58 }
59}
60
61static void echainiv_encrypt_complete2(struct aead_request *req, int err)
62{
63 struct aead_request *subreq = aead_request_ctx(req);
64 struct crypto_aead *geniv;
65 unsigned int ivsize;
66
67 if (err == -EINPROGRESS)
68 return;
69
70 if (err)
71 goto out;
72
73 geniv = crypto_aead_reqtfm(req);
74 ivsize = crypto_aead_ivsize(geniv);
75
76 echainiv_write_iv(subreq->iv, ivsize);
77
78 if (req->iv != subreq->iv)
79 memcpy(req->iv, subreq->iv, ivsize);
80
81out:
82 if (req->iv != subreq->iv)
83 kzfree(subreq->iv);
84}
85
86static void echainiv_encrypt_complete(struct crypto_async_request *base,
87 int err)
88{
89 struct aead_request *req = base->data;
90
91 echainiv_encrypt_complete2(req, err);
92 aead_request_complete(req, err);
93}
94
95static int echainiv_encrypt(struct aead_request *req) 31static int echainiv_encrypt(struct aead_request *req)
96{ 32{
97 struct crypto_aead *geniv = crypto_aead_reqtfm(req); 33 struct crypto_aead *geniv = crypto_aead_reqtfm(req);
98 struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv); 34 struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
99 struct aead_request *subreq = aead_request_ctx(req); 35 struct aead_request *subreq = aead_request_ctx(req);
100 crypto_completion_t compl; 36 __be64 nseqno;
101 void *data; 37 u64 seqno;
102 u8 *info; 38 u8 *info;
103 unsigned int ivsize = crypto_aead_ivsize(geniv); 39 unsigned int ivsize = crypto_aead_ivsize(geniv);
104 int err; 40 int err;
@@ -108,8 +44,6 @@ static int echainiv_encrypt(struct aead_request *req)
108 44
109 aead_request_set_tfm(subreq, ctx->child); 45 aead_request_set_tfm(subreq, ctx->child);
110 46
111 compl = echainiv_encrypt_complete;
112 data = req;
113 info = req->iv; 47 info = req->iv;
114 48
115 if (req->src != req->dst) { 49 if (req->src != req->dst) {
@@ -127,29 +61,30 @@ static int echainiv_encrypt(struct aead_request *req)
127 return err; 61 return err;
128 } 62 }
129 63
130 if (unlikely(!IS_ALIGNED((unsigned long)info, 64 aead_request_set_callback(subreq, req->base.flags,
131 crypto_aead_alignmask(geniv) + 1))) { 65 req->base.complete, req->base.data);
132 info = kmalloc(ivsize, req->base.flags &
133 CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
134 GFP_ATOMIC);
135 if (!info)
136 return -ENOMEM;
137
138 memcpy(info, req->iv, ivsize);
139 }
140
141 aead_request_set_callback(subreq, req->base.flags, compl, data);
142 aead_request_set_crypt(subreq, req->dst, req->dst, 66 aead_request_set_crypt(subreq, req->dst, req->dst,
143 req->cryptlen, info); 67 req->cryptlen, info);
144 aead_request_set_ad(subreq, req->assoclen); 68 aead_request_set_ad(subreq, req->assoclen);
145 69
146 crypto_xor(info, ctx->salt, ivsize); 70 memcpy(&nseqno, info + ivsize - 8, 8);
71 seqno = be64_to_cpu(nseqno);
72 memset(info, 0, ivsize);
73
147 scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1); 74 scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
148 echainiv_read_iv(info, ivsize);
149 75
150 err = crypto_aead_encrypt(subreq); 76 do {
151 echainiv_encrypt_complete2(req, err); 77 u64 a;
152 return err; 78
79 memcpy(&a, ctx->salt + ivsize - 8, 8);
80
81 a |= 1;
82 a *= seqno;
83
84 memcpy(info + ivsize - 8, &a, 8);
85 } while ((ivsize -= 8));
86
87 return crypto_aead_encrypt(subreq);
153} 88}
154 89
155static int echainiv_decrypt(struct aead_request *req) 90static int echainiv_decrypt(struct aead_request *req)
@@ -196,8 +131,7 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
196 alg = crypto_spawn_aead_alg(spawn); 131 alg = crypto_spawn_aead_alg(spawn);
197 132
198 err = -EINVAL; 133 err = -EINVAL;
199 if (inst->alg.ivsize & (sizeof(u32) - 1) || 134 if (inst->alg.ivsize & (sizeof(u64) - 1) || !inst->alg.ivsize)
200 inst->alg.ivsize > MAX_IV_SIZE)
201 goto free_inst; 135 goto free_inst;
202 136
203 inst->alg.encrypt = echainiv_encrypt; 137 inst->alg.encrypt = echainiv_encrypt;
@@ -206,7 +140,6 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
206 inst->alg.init = aead_init_geniv; 140 inst->alg.init = aead_init_geniv;
207 inst->alg.exit = aead_exit_geniv; 141 inst->alg.exit = aead_exit_geniv;
208 142
209 inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
210 inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx); 143 inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
211 inst->alg.base.cra_ctxsize += inst->alg.ivsize; 144 inst->alg.base.cra_ctxsize += inst->alg.ivsize;
212 145