diff options
-rw-r--r-- | include/net/esp.h | 54 | ||||
-rw-r--r-- | net/ipv4/Kconfig | 1 | ||||
-rw-r--r-- | net/ipv4/esp4.c | 497 | ||||
-rw-r--r-- | net/ipv6/Kconfig | 1 | ||||
-rw-r--r-- | net/ipv6/esp6.c | 460 |
5 files changed, 591 insertions, 422 deletions
diff --git a/include/net/esp.h b/include/net/esp.h index c05f529bff28..d58451331dbd 100644 --- a/include/net/esp.h +++ b/include/net/esp.h | |||
@@ -1,58 +1,20 @@ | |||
1 | #ifndef _NET_ESP_H | 1 | #ifndef _NET_ESP_H |
2 | #define _NET_ESP_H | 2 | #define _NET_ESP_H |
3 | 3 | ||
4 | #include <linux/crypto.h> | 4 | #include <linux/skbuff.h> |
5 | #include <net/xfrm.h> | ||
6 | #include <linux/scatterlist.h> | ||
7 | 5 | ||
8 | #define ESP_NUM_FAST_SG 4 | 6 | struct crypto_aead; |
9 | 7 | ||
10 | struct esp_data | 8 | struct esp_data { |
11 | { | 9 | /* 0..255 */ |
12 | struct scatterlist sgbuf[ESP_NUM_FAST_SG]; | 10 | int padlen; |
13 | 11 | ||
14 | /* Confidentiality */ | 12 | /* Confidentiality & Integrity */ |
15 | struct { | 13 | struct crypto_aead *aead; |
16 | int padlen; /* 0..255 */ | ||
17 | /* ivlen is offset from enc_data, where encrypted data start. | ||
18 | * It is logically different of crypto_tfm_alg_ivsize(tfm). | ||
19 | * We assume that it is either zero (no ivec), or | ||
20 | * >= crypto_tfm_alg_ivsize(tfm). */ | ||
21 | int ivlen; | ||
22 | int ivinitted; | ||
23 | u8 *ivec; /* ivec buffer */ | ||
24 | struct crypto_blkcipher *tfm; /* crypto handle */ | ||
25 | } conf; | ||
26 | |||
27 | /* Integrity. It is active when icv_full_len != 0 */ | ||
28 | struct { | ||
29 | u8 *work_icv; | ||
30 | int icv_full_len; | ||
31 | int icv_trunc_len; | ||
32 | struct crypto_hash *tfm; | ||
33 | } auth; | ||
34 | }; | 14 | }; |
35 | 15 | ||
36 | extern void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len); | 16 | extern void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len); |
37 | 17 | ||
38 | static inline int esp_mac_digest(struct esp_data *esp, struct sk_buff *skb, | ||
39 | int offset, int len) | ||
40 | { | ||
41 | struct hash_desc desc; | ||
42 | int err; | ||
43 | |||
44 | desc.tfm = esp->auth.tfm; | ||
45 | desc.flags = 0; | ||
46 | |||
47 | err = crypto_hash_init(&desc); | ||
48 | if (unlikely(err)) | ||
49 | return err; | ||
50 | err = skb_icv_walk(skb, &desc, offset, len, crypto_hash_update); | ||
51 | if (unlikely(err)) | ||
52 | return err; | ||
53 | return crypto_hash_final(&desc, esp->auth.work_icv); | ||
54 | } | ||
55 | |||
56 | struct ip_esp_hdr; | 18 | struct ip_esp_hdr; |
57 | 19 | ||
58 | static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb) | 20 | static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb) |
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 24e2b7294bf8..19880b086e71 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig | |||
@@ -343,6 +343,7 @@ config INET_ESP | |||
343 | tristate "IP: ESP transformation" | 343 | tristate "IP: ESP transformation" |
344 | select XFRM | 344 | select XFRM |
345 | select CRYPTO | 345 | select CRYPTO |
346 | select CRYPTO_AEAD | ||
346 | select CRYPTO_HMAC | 347 | select CRYPTO_HMAC |
347 | select CRYPTO_MD5 | 348 | select CRYPTO_MD5 |
348 | select CRYPTO_CBC | 349 | select CRYPTO_CBC |
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 28ea5c77ca23..c4047223bfbe 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c | |||
@@ -1,27 +1,118 @@ | |||
1 | #include <crypto/aead.h> | ||
2 | #include <crypto/authenc.h> | ||
1 | #include <linux/err.h> | 3 | #include <linux/err.h> |
2 | #include <linux/module.h> | 4 | #include <linux/module.h> |
3 | #include <net/ip.h> | 5 | #include <net/ip.h> |
4 | #include <net/xfrm.h> | 6 | #include <net/xfrm.h> |
5 | #include <net/esp.h> | 7 | #include <net/esp.h> |
6 | #include <linux/scatterlist.h> | 8 | #include <linux/scatterlist.h> |
7 | #include <linux/crypto.h> | ||
8 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
9 | #include <linux/pfkeyv2.h> | 10 | #include <linux/pfkeyv2.h> |
10 | #include <linux/random.h> | 11 | #include <linux/rtnetlink.h> |
12 | #include <linux/slab.h> | ||
11 | #include <linux/spinlock.h> | 13 | #include <linux/spinlock.h> |
12 | #include <linux/in6.h> | 14 | #include <linux/in6.h> |
13 | #include <net/icmp.h> | 15 | #include <net/icmp.h> |
14 | #include <net/protocol.h> | 16 | #include <net/protocol.h> |
15 | #include <net/udp.h> | 17 | #include <net/udp.h> |
16 | 18 | ||
19 | struct esp_skb_cb { | ||
20 | struct xfrm_skb_cb xfrm; | ||
21 | void *tmp; | ||
22 | }; | ||
23 | |||
24 | #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0])) | ||
25 | |||
26 | /* | ||
27 | * Allocate an AEAD request structure with extra space for SG and IV. | ||
28 | * | ||
29 | * For alignment considerations the IV is placed at the front, followed | ||
30 | * by the request and finally the SG list. | ||
31 | * | ||
32 | * TODO: Use spare space in skb for this where possible. | ||
33 | */ | ||
34 | static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags) | ||
35 | { | ||
36 | unsigned int len; | ||
37 | |||
38 | len = crypto_aead_ivsize(aead); | ||
39 | if (len) { | ||
40 | len += crypto_aead_alignmask(aead) & | ||
41 | ~(crypto_tfm_ctx_alignment() - 1); | ||
42 | len = ALIGN(len, crypto_tfm_ctx_alignment()); | ||
43 | } | ||
44 | |||
45 | len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead); | ||
46 | len = ALIGN(len, __alignof__(struct scatterlist)); | ||
47 | |||
48 | len += sizeof(struct scatterlist) * nfrags; | ||
49 | |||
50 | return kmalloc(len, GFP_ATOMIC); | ||
51 | } | ||
52 | |||
53 | static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp) | ||
54 | { | ||
55 | return crypto_aead_ivsize(aead) ? | ||
56 | PTR_ALIGN((u8 *)tmp, crypto_aead_alignmask(aead) + 1) : tmp; | ||
57 | } | ||
58 | |||
59 | static inline struct aead_givcrypt_request *esp_tmp_givreq( | ||
60 | struct crypto_aead *aead, u8 *iv) | ||
61 | { | ||
62 | struct aead_givcrypt_request *req; | ||
63 | |||
64 | req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead), | ||
65 | crypto_tfm_ctx_alignment()); | ||
66 | aead_givcrypt_set_tfm(req, aead); | ||
67 | return req; | ||
68 | } | ||
69 | |||
70 | static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv) | ||
71 | { | ||
72 | struct aead_request *req; | ||
73 | |||
74 | req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead), | ||
75 | crypto_tfm_ctx_alignment()); | ||
76 | aead_request_set_tfm(req, aead); | ||
77 | return req; | ||
78 | } | ||
79 | |||
80 | static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead, | ||
81 | struct aead_request *req) | ||
82 | { | ||
83 | return (void *)ALIGN((unsigned long)(req + 1) + | ||
84 | crypto_aead_reqsize(aead), | ||
85 | __alignof__(struct scatterlist)); | ||
86 | } | ||
87 | |||
88 | static inline struct scatterlist *esp_givreq_sg( | ||
89 | struct crypto_aead *aead, struct aead_givcrypt_request *req) | ||
90 | { | ||
91 | return (void *)ALIGN((unsigned long)(req + 1) + | ||
92 | crypto_aead_reqsize(aead), | ||
93 | __alignof__(struct scatterlist)); | ||
94 | } | ||
95 | |||
96 | static void esp_output_done(struct crypto_async_request *base, int err) | ||
97 | { | ||
98 | struct sk_buff *skb = base->data; | ||
99 | |||
100 | kfree(ESP_SKB_CB(skb)->tmp); | ||
101 | xfrm_output_resume(skb, err); | ||
102 | } | ||
103 | |||
17 | static int esp_output(struct xfrm_state *x, struct sk_buff *skb) | 104 | static int esp_output(struct xfrm_state *x, struct sk_buff *skb) |
18 | { | 105 | { |
19 | int err; | 106 | int err; |
20 | struct ip_esp_hdr *esph; | 107 | struct ip_esp_hdr *esph; |
21 | struct crypto_blkcipher *tfm; | 108 | struct crypto_aead *aead; |
22 | struct blkcipher_desc desc; | 109 | struct aead_givcrypt_request *req; |
110 | struct scatterlist *sg; | ||
111 | struct scatterlist *asg; | ||
23 | struct esp_data *esp; | 112 | struct esp_data *esp; |
24 | struct sk_buff *trailer; | 113 | struct sk_buff *trailer; |
114 | void *tmp; | ||
115 | u8 *iv; | ||
25 | u8 *tail; | 116 | u8 *tail; |
26 | int blksize; | 117 | int blksize; |
27 | int clen; | 118 | int clen; |
@@ -36,18 +127,27 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) | |||
36 | clen = skb->len; | 127 | clen = skb->len; |
37 | 128 | ||
38 | esp = x->data; | 129 | esp = x->data; |
39 | alen = esp->auth.icv_trunc_len; | 130 | aead = esp->aead; |
40 | tfm = esp->conf.tfm; | 131 | alen = crypto_aead_authsize(aead); |
41 | desc.tfm = tfm; | 132 | |
42 | desc.flags = 0; | 133 | blksize = ALIGN(crypto_aead_blocksize(aead), 4); |
43 | blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4); | ||
44 | clen = ALIGN(clen + 2, blksize); | 134 | clen = ALIGN(clen + 2, blksize); |
45 | if (esp->conf.padlen) | 135 | if (esp->padlen) |
46 | clen = ALIGN(clen, esp->conf.padlen); | 136 | clen = ALIGN(clen, esp->padlen); |
137 | |||
138 | if ((err = skb_cow_data(skb, clen - skb->len + alen, &trailer)) < 0) | ||
139 | goto error; | ||
140 | nfrags = err; | ||
47 | 141 | ||
48 | if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) | 142 | tmp = esp_alloc_tmp(aead, nfrags + 1); |
143 | if (!tmp) | ||
49 | goto error; | 144 | goto error; |
50 | 145 | ||
146 | iv = esp_tmp_iv(aead, tmp); | ||
147 | req = esp_tmp_givreq(aead, iv); | ||
148 | asg = esp_givreq_sg(aead, req); | ||
149 | sg = asg + 1; | ||
150 | |||
51 | /* Fill padding... */ | 151 | /* Fill padding... */ |
52 | tail = skb_tail_pointer(trailer); | 152 | tail = skb_tail_pointer(trailer); |
53 | do { | 153 | do { |
@@ -56,28 +156,34 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) | |||
56 | tail[i] = i + 1; | 156 | tail[i] = i + 1; |
57 | } while (0); | 157 | } while (0); |
58 | tail[clen - skb->len - 2] = (clen - skb->len) - 2; | 158 | tail[clen - skb->len - 2] = (clen - skb->len) - 2; |
59 | pskb_put(skb, trailer, clen - skb->len); | 159 | tail[clen - skb->len - 1] = *skb_mac_header(skb); |
160 | pskb_put(skb, trailer, clen - skb->len + alen); | ||
60 | 161 | ||
61 | skb_push(skb, -skb_network_offset(skb)); | 162 | skb_push(skb, -skb_network_offset(skb)); |
62 | esph = ip_esp_hdr(skb); | 163 | esph = ip_esp_hdr(skb); |
63 | *(skb_tail_pointer(trailer) - 1) = *skb_mac_header(skb); | ||
64 | *skb_mac_header(skb) = IPPROTO_ESP; | 164 | *skb_mac_header(skb) = IPPROTO_ESP; |
65 | 165 | ||
66 | spin_lock_bh(&x->lock); | ||
67 | |||
68 | /* this is non-NULL only with UDP Encapsulation */ | 166 | /* this is non-NULL only with UDP Encapsulation */ |
69 | if (x->encap) { | 167 | if (x->encap) { |
70 | struct xfrm_encap_tmpl *encap = x->encap; | 168 | struct xfrm_encap_tmpl *encap = x->encap; |
71 | struct udphdr *uh; | 169 | struct udphdr *uh; |
72 | __be32 *udpdata32; | 170 | __be32 *udpdata32; |
171 | unsigned int sport, dport; | ||
172 | int encap_type; | ||
173 | |||
174 | spin_lock_bh(&x->lock); | ||
175 | sport = encap->encap_sport; | ||
176 | dport = encap->encap_dport; | ||
177 | encap_type = encap->encap_type; | ||
178 | spin_unlock_bh(&x->lock); | ||
73 | 179 | ||
74 | uh = (struct udphdr *)esph; | 180 | uh = (struct udphdr *)esph; |
75 | uh->source = encap->encap_sport; | 181 | uh->source = sport; |
76 | uh->dest = encap->encap_dport; | 182 | uh->dest = dport; |
77 | uh->len = htons(skb->len + alen - skb_transport_offset(skb)); | 183 | uh->len = htons(skb->len - skb_transport_offset(skb)); |
78 | uh->check = 0; | 184 | uh->check = 0; |
79 | 185 | ||
80 | switch (encap->encap_type) { | 186 | switch (encap_type) { |
81 | default: | 187 | default: |
82 | case UDP_ENCAP_ESPINUDP: | 188 | case UDP_ENCAP_ESPINUDP: |
83 | esph = (struct ip_esp_hdr *)(uh + 1); | 189 | esph = (struct ip_esp_hdr *)(uh + 1); |
@@ -95,131 +201,45 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) | |||
95 | esph->spi = x->id.spi; | 201 | esph->spi = x->id.spi; |
96 | esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq); | 202 | esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq); |
97 | 203 | ||
98 | if (esp->conf.ivlen) { | 204 | sg_init_table(sg, nfrags); |
99 | if (unlikely(!esp->conf.ivinitted)) { | 205 | skb_to_sgvec(skb, sg, |
100 | get_random_bytes(esp->conf.ivec, esp->conf.ivlen); | 206 | esph->enc_data + crypto_aead_ivsize(aead) - skb->data, |
101 | esp->conf.ivinitted = 1; | 207 | clen + alen); |
102 | } | 208 | sg_init_one(asg, esph, sizeof(*esph)); |
103 | crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen); | 209 | |
104 | } | 210 | aead_givcrypt_set_callback(req, 0, esp_output_done, skb); |
105 | 211 | aead_givcrypt_set_crypt(req, sg, sg, clen, iv); | |
106 | do { | 212 | aead_givcrypt_set_assoc(req, asg, sizeof(*esph)); |
107 | struct scatterlist *sg = &esp->sgbuf[0]; | 213 | aead_givcrypt_set_giv(req, esph->enc_data, XFRM_SKB_CB(skb)->seq); |
108 | 214 | ||
109 | if (unlikely(nfrags > ESP_NUM_FAST_SG)) { | 215 | ESP_SKB_CB(skb)->tmp = tmp; |
110 | sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); | 216 | err = crypto_aead_givencrypt(req); |
111 | if (!sg) | 217 | if (err == -EINPROGRESS) |
112 | goto unlock; | 218 | goto error; |
113 | } | ||
114 | sg_init_table(sg, nfrags); | ||
115 | skb_to_sgvec(skb, sg, | ||
116 | esph->enc_data + | ||
117 | esp->conf.ivlen - | ||
118 | skb->data, clen); | ||
119 | err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); | ||
120 | if (unlikely(sg != &esp->sgbuf[0])) | ||
121 | kfree(sg); | ||
122 | } while (0); | ||
123 | |||
124 | if (unlikely(err)) | ||
125 | goto unlock; | ||
126 | |||
127 | if (esp->conf.ivlen) { | ||
128 | memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen); | ||
129 | crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen); | ||
130 | } | ||
131 | 219 | ||
132 | if (esp->auth.icv_full_len) { | 220 | if (err == -EBUSY) |
133 | err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data, | 221 | err = NET_XMIT_DROP; |
134 | sizeof(*esph) + esp->conf.ivlen + clen); | ||
135 | memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen); | ||
136 | } | ||
137 | 222 | ||
138 | unlock: | 223 | kfree(tmp); |
139 | spin_unlock_bh(&x->lock); | ||
140 | 224 | ||
141 | error: | 225 | error: |
142 | return err; | 226 | return err; |
143 | } | 227 | } |
144 | 228 | ||
145 | /* | 229 | static int esp_input_done2(struct sk_buff *skb, int err) |
146 | * Note: detecting truncated vs. non-truncated authentication data is very | ||
147 | * expensive, so we only support truncated data, which is the recommended | ||
148 | * and common case. | ||
149 | */ | ||
150 | static int esp_input(struct xfrm_state *x, struct sk_buff *skb) | ||
151 | { | 230 | { |
152 | struct iphdr *iph; | 231 | struct iphdr *iph; |
153 | struct ip_esp_hdr *esph; | 232 | struct xfrm_state *x = xfrm_input_state(skb); |
154 | struct esp_data *esp = x->data; | 233 | struct esp_data *esp = x->data; |
155 | struct crypto_blkcipher *tfm = esp->conf.tfm; | 234 | struct crypto_aead *aead = esp->aead; |
156 | struct blkcipher_desc desc = { .tfm = tfm }; | 235 | int alen = crypto_aead_authsize(aead); |
157 | struct sk_buff *trailer; | 236 | int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); |
158 | int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4); | 237 | int elen = skb->len - hlen; |
159 | int alen = esp->auth.icv_trunc_len; | ||
160 | int elen = skb->len - sizeof(*esph) - esp->conf.ivlen - alen; | ||
161 | int nfrags; | ||
162 | int ihl; | 238 | int ihl; |
163 | u8 nexthdr[2]; | 239 | u8 nexthdr[2]; |
164 | struct scatterlist *sg; | ||
165 | int padlen; | 240 | int padlen; |
166 | int err = -EINVAL; | ||
167 | 241 | ||
168 | if (!pskb_may_pull(skb, sizeof(*esph))) | 242 | kfree(ESP_SKB_CB(skb)->tmp); |
169 | goto out; | ||
170 | |||
171 | if (elen <= 0 || (elen & (blksize-1))) | ||
172 | goto out; | ||
173 | |||
174 | if ((err = skb_cow_data(skb, 0, &trailer)) < 0) | ||
175 | goto out; | ||
176 | nfrags = err; | ||
177 | |||
178 | skb->ip_summed = CHECKSUM_NONE; | ||
179 | |||
180 | spin_lock(&x->lock); | ||
181 | |||
182 | /* If integrity check is required, do this. */ | ||
183 | if (esp->auth.icv_full_len) { | ||
184 | u8 sum[alen]; | ||
185 | |||
186 | err = esp_mac_digest(esp, skb, 0, skb->len - alen); | ||
187 | if (err) | ||
188 | goto unlock; | ||
189 | |||
190 | if (skb_copy_bits(skb, skb->len - alen, sum, alen)) | ||
191 | BUG(); | ||
192 | |||
193 | if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) { | ||
194 | err = -EBADMSG; | ||
195 | goto unlock; | ||
196 | } | ||
197 | } | ||
198 | |||
199 | esph = (struct ip_esp_hdr *)skb->data; | ||
200 | |||
201 | /* Get ivec. This can be wrong, check against another impls. */ | ||
202 | if (esp->conf.ivlen) | ||
203 | crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen); | ||
204 | |||
205 | sg = &esp->sgbuf[0]; | ||
206 | |||
207 | if (unlikely(nfrags > ESP_NUM_FAST_SG)) { | ||
208 | err = -ENOMEM; | ||
209 | sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); | ||
210 | if (!sg) | ||
211 | goto unlock; | ||
212 | } | ||
213 | sg_init_table(sg, nfrags); | ||
214 | skb_to_sgvec(skb, sg, | ||
215 | sizeof(*esph) + esp->conf.ivlen, | ||
216 | elen); | ||
217 | err = crypto_blkcipher_decrypt(&desc, sg, sg, elen); | ||
218 | if (unlikely(sg != &esp->sgbuf[0])) | ||
219 | kfree(sg); | ||
220 | |||
221 | unlock: | ||
222 | spin_unlock(&x->lock); | ||
223 | 243 | ||
224 | if (unlikely(err)) | 244 | if (unlikely(err)) |
225 | goto out; | 245 | goto out; |
@@ -229,15 +249,11 @@ unlock: | |||
229 | 249 | ||
230 | err = -EINVAL; | 250 | err = -EINVAL; |
231 | padlen = nexthdr[0]; | 251 | padlen = nexthdr[0]; |
232 | if (padlen+2 >= elen) | 252 | if (padlen + 2 + alen >= elen) |
233 | goto out; | 253 | goto out; |
234 | 254 | ||
235 | /* ... check padding bits here. Silly. :-) */ | 255 | /* ... check padding bits here. Silly. :-) */ |
236 | 256 | ||
237 | /* RFC4303: Drop dummy packets without any error */ | ||
238 | if (nexthdr[1] == IPPROTO_NONE) | ||
239 | goto out; | ||
240 | |||
241 | iph = ip_hdr(skb); | 257 | iph = ip_hdr(skb); |
242 | ihl = iph->ihl * 4; | 258 | ihl = iph->ihl * 4; |
243 | 259 | ||
@@ -279,10 +295,87 @@ unlock: | |||
279 | } | 295 | } |
280 | 296 | ||
281 | pskb_trim(skb, skb->len - alen - padlen - 2); | 297 | pskb_trim(skb, skb->len - alen - padlen - 2); |
282 | __skb_pull(skb, sizeof(*esph) + esp->conf.ivlen); | 298 | __skb_pull(skb, hlen); |
283 | skb_set_transport_header(skb, -ihl); | 299 | skb_set_transport_header(skb, -ihl); |
284 | 300 | ||
285 | return nexthdr[1]; | 301 | err = nexthdr[1]; |
302 | |||
303 | /* RFC4303: Drop dummy packets without any error */ | ||
304 | if (err == IPPROTO_NONE) | ||
305 | err = -EINVAL; | ||
306 | |||
307 | out: | ||
308 | return err; | ||
309 | } | ||
310 | |||
311 | static void esp_input_done(struct crypto_async_request *base, int err) | ||
312 | { | ||
313 | struct sk_buff *skb = base->data; | ||
314 | |||
315 | xfrm_input_resume(skb, esp_input_done2(skb, err)); | ||
316 | } | ||
317 | |||
318 | /* | ||
319 | * Note: detecting truncated vs. non-truncated authentication data is very | ||
320 | * expensive, so we only support truncated data, which is the recommended | ||
321 | * and common case. | ||
322 | */ | ||
323 | static int esp_input(struct xfrm_state *x, struct sk_buff *skb) | ||
324 | { | ||
325 | struct ip_esp_hdr *esph; | ||
326 | struct esp_data *esp = x->data; | ||
327 | struct crypto_aead *aead = esp->aead; | ||
328 | struct aead_request *req; | ||
329 | struct sk_buff *trailer; | ||
330 | int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead); | ||
331 | int nfrags; | ||
332 | void *tmp; | ||
333 | u8 *iv; | ||
334 | struct scatterlist *sg; | ||
335 | struct scatterlist *asg; | ||
336 | int err = -EINVAL; | ||
337 | |||
338 | if (!pskb_may_pull(skb, sizeof(*esph))) | ||
339 | goto out; | ||
340 | |||
341 | if (elen <= 0) | ||
342 | goto out; | ||
343 | |||
344 | if ((err = skb_cow_data(skb, 0, &trailer)) < 0) | ||
345 | goto out; | ||
346 | nfrags = err; | ||
347 | |||
348 | err = -ENOMEM; | ||
349 | tmp = esp_alloc_tmp(aead, nfrags + 1); | ||
350 | if (!tmp) | ||
351 | goto out; | ||
352 | |||
353 | ESP_SKB_CB(skb)->tmp = tmp; | ||
354 | iv = esp_tmp_iv(aead, tmp); | ||
355 | req = esp_tmp_req(aead, iv); | ||
356 | asg = esp_req_sg(aead, req); | ||
357 | sg = asg + 1; | ||
358 | |||
359 | skb->ip_summed = CHECKSUM_NONE; | ||
360 | |||
361 | esph = (struct ip_esp_hdr *)skb->data; | ||
362 | |||
363 | /* Get ivec. This can be wrong, check against another impls. */ | ||
364 | iv = esph->enc_data; | ||
365 | |||
366 | sg_init_table(sg, nfrags); | ||
367 | skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen); | ||
368 | sg_init_one(asg, esph, sizeof(*esph)); | ||
369 | |||
370 | aead_request_set_callback(req, 0, esp_input_done, skb); | ||
371 | aead_request_set_crypt(req, sg, sg, elen, iv); | ||
372 | aead_request_set_assoc(req, asg, sizeof(*esph)); | ||
373 | |||
374 | err = crypto_aead_decrypt(req); | ||
375 | if (err == -EINPROGRESS) | ||
376 | goto out; | ||
377 | |||
378 | err = esp_input_done2(skb, err); | ||
286 | 379 | ||
287 | out: | 380 | out: |
288 | return err; | 381 | return err; |
@@ -291,11 +384,11 @@ out: | |||
291 | static u32 esp4_get_mtu(struct xfrm_state *x, int mtu) | 384 | static u32 esp4_get_mtu(struct xfrm_state *x, int mtu) |
292 | { | 385 | { |
293 | struct esp_data *esp = x->data; | 386 | struct esp_data *esp = x->data; |
294 | u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4); | 387 | u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4); |
295 | u32 align = max_t(u32, blksize, esp->conf.padlen); | 388 | u32 align = max_t(u32, blksize, esp->padlen); |
296 | u32 rem; | 389 | u32 rem; |
297 | 390 | ||
298 | mtu -= x->props.header_len + esp->auth.icv_trunc_len; | 391 | mtu -= x->props.header_len + crypto_aead_authsize(esp->aead); |
299 | rem = mtu & (align - 1); | 392 | rem = mtu & (align - 1); |
300 | mtu &= ~(align - 1); | 393 | mtu &= ~(align - 1); |
301 | 394 | ||
@@ -342,80 +435,98 @@ static void esp_destroy(struct xfrm_state *x) | |||
342 | if (!esp) | 435 | if (!esp) |
343 | return; | 436 | return; |
344 | 437 | ||
345 | crypto_free_blkcipher(esp->conf.tfm); | 438 | crypto_free_aead(esp->aead); |
346 | esp->conf.tfm = NULL; | ||
347 | kfree(esp->conf.ivec); | ||
348 | esp->conf.ivec = NULL; | ||
349 | crypto_free_hash(esp->auth.tfm); | ||
350 | esp->auth.tfm = NULL; | ||
351 | kfree(esp->auth.work_icv); | ||
352 | esp->auth.work_icv = NULL; | ||
353 | kfree(esp); | 439 | kfree(esp); |
354 | } | 440 | } |
355 | 441 | ||
356 | static int esp_init_state(struct xfrm_state *x) | 442 | static int esp_init_state(struct xfrm_state *x) |
357 | { | 443 | { |
358 | struct esp_data *esp = NULL; | 444 | struct esp_data *esp = NULL; |
359 | struct crypto_blkcipher *tfm; | 445 | struct crypto_aead *aead; |
446 | struct crypto_authenc_key_param *param; | ||
447 | struct rtattr *rta; | ||
448 | char *key; | ||
449 | char *p; | ||
450 | char authenc_name[CRYPTO_MAX_ALG_NAME]; | ||
360 | u32 align; | 451 | u32 align; |
452 | unsigned int keylen; | ||
453 | int err; | ||
361 | 454 | ||
362 | if (x->ealg == NULL) | 455 | if (x->ealg == NULL) |
363 | goto error; | 456 | return -EINVAL; |
457 | |||
458 | if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, "authenc(%s,%s)", | ||
459 | x->aalg ? x->aalg->alg_name : "digest_null", | ||
460 | x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME) | ||
461 | return -ENAMETOOLONG; | ||
364 | 462 | ||
365 | esp = kzalloc(sizeof(*esp), GFP_KERNEL); | 463 | esp = kzalloc(sizeof(*esp), GFP_KERNEL); |
366 | if (esp == NULL) | 464 | if (esp == NULL) |
367 | return -ENOMEM; | 465 | return -ENOMEM; |
368 | 466 | ||
467 | x->data = esp; | ||
468 | |||
469 | aead = crypto_alloc_aead(authenc_name, 0, 0); | ||
470 | err = PTR_ERR(aead); | ||
471 | if (IS_ERR(aead)) | ||
472 | goto error; | ||
473 | |||
474 | esp->aead = aead; | ||
475 | |||
476 | keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) + | ||
477 | (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param)); | ||
478 | err = -ENOMEM; | ||
479 | key = kmalloc(keylen, GFP_KERNEL); | ||
480 | if (!key) | ||
481 | goto error; | ||
482 | |||
483 | p = key; | ||
484 | rta = (void *)p; | ||
485 | rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; | ||
486 | rta->rta_len = RTA_LENGTH(sizeof(*param)); | ||
487 | param = RTA_DATA(rta); | ||
488 | p += RTA_SPACE(sizeof(*param)); | ||
489 | |||
369 | if (x->aalg) { | 490 | if (x->aalg) { |
370 | struct xfrm_algo_desc *aalg_desc; | 491 | struct xfrm_algo_desc *aalg_desc; |
371 | struct crypto_hash *hash; | ||
372 | |||
373 | hash = crypto_alloc_hash(x->aalg->alg_name, 0, | ||
374 | CRYPTO_ALG_ASYNC); | ||
375 | if (IS_ERR(hash)) | ||
376 | goto error; | ||
377 | 492 | ||
378 | esp->auth.tfm = hash; | 493 | memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8); |
379 | if (crypto_hash_setkey(hash, x->aalg->alg_key, | 494 | p += (x->aalg->alg_key_len + 7) / 8; |
380 | (x->aalg->alg_key_len + 7) / 8)) | ||
381 | goto error; | ||
382 | 495 | ||
383 | aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); | 496 | aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); |
384 | BUG_ON(!aalg_desc); | 497 | BUG_ON(!aalg_desc); |
385 | 498 | ||
499 | err = -EINVAL; | ||
386 | if (aalg_desc->uinfo.auth.icv_fullbits/8 != | 500 | if (aalg_desc->uinfo.auth.icv_fullbits/8 != |
387 | crypto_hash_digestsize(hash)) { | 501 | crypto_aead_authsize(aead)) { |
388 | NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", | 502 | NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", |
389 | x->aalg->alg_name, | 503 | x->aalg->alg_name, |
390 | crypto_hash_digestsize(hash), | 504 | crypto_aead_authsize(aead), |
391 | aalg_desc->uinfo.auth.icv_fullbits/8); | 505 | aalg_desc->uinfo.auth.icv_fullbits/8); |
392 | goto error; | 506 | goto free_key; |
393 | } | 507 | } |
394 | 508 | ||
395 | esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; | 509 | err = crypto_aead_setauthsize( |
396 | esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8; | 510 | aead, aalg_desc->uinfo.auth.icv_truncbits / 8); |
397 | 511 | if (err) | |
398 | esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL); | 512 | goto free_key; |
399 | if (!esp->auth.work_icv) | ||
400 | goto error; | ||
401 | } | 513 | } |
402 | 514 | ||
403 | tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC); | 515 | esp->padlen = 0; |
404 | if (IS_ERR(tfm)) | 516 | |
405 | goto error; | 517 | param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8); |
406 | esp->conf.tfm = tfm; | 518 | memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8); |
407 | esp->conf.ivlen = crypto_blkcipher_ivsize(tfm); | 519 | |
408 | esp->conf.padlen = 0; | 520 | err = crypto_aead_setkey(aead, key, keylen); |
409 | if (esp->conf.ivlen) { | 521 | |
410 | esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL); | 522 | free_key: |
411 | if (unlikely(esp->conf.ivec == NULL)) | 523 | kfree(key); |
412 | goto error; | 524 | |
413 | esp->conf.ivinitted = 0; | 525 | if (err) |
414 | } | ||
415 | if (crypto_blkcipher_setkey(tfm, x->ealg->alg_key, | ||
416 | (x->ealg->alg_key_len + 7) / 8)) | ||
417 | goto error; | 526 | goto error; |
418 | x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen; | 527 | |
528 | x->props.header_len = sizeof(struct ip_esp_hdr) + | ||
529 | crypto_aead_ivsize(aead); | ||
419 | if (x->props.mode == XFRM_MODE_TUNNEL) | 530 | if (x->props.mode == XFRM_MODE_TUNNEL) |
420 | x->props.header_len += sizeof(struct iphdr); | 531 | x->props.header_len += sizeof(struct iphdr); |
421 | else if (x->props.mode == XFRM_MODE_BEET) | 532 | else if (x->props.mode == XFRM_MODE_BEET) |
@@ -434,18 +545,14 @@ static int esp_init_state(struct xfrm_state *x) | |||
434 | break; | 545 | break; |
435 | } | 546 | } |
436 | } | 547 | } |
437 | x->data = esp; | 548 | |
438 | align = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4); | 549 | align = ALIGN(crypto_aead_blocksize(aead), 4); |
439 | if (esp->conf.padlen) | 550 | if (esp->padlen) |
440 | align = max_t(u32, align, esp->conf.padlen); | 551 | align = max_t(u32, align, esp->padlen); |
441 | x->props.trailer_len = align + 1 + esp->auth.icv_trunc_len; | 552 | x->props.trailer_len = align + 1 + crypto_aead_authsize(esp->aead); |
442 | return 0; | ||
443 | 553 | ||
444 | error: | 554 | error: |
445 | x->data = esp; | 555 | return err; |
446 | esp_destroy(x); | ||
447 | x->data = NULL; | ||
448 | return -EINVAL; | ||
449 | } | 556 | } |
450 | 557 | ||
451 | static struct xfrm_type esp_type = | 558 | static struct xfrm_type esp_type = |
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index eb0b8085949b..3ffb0323668c 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig | |||
@@ -85,6 +85,7 @@ config INET6_ESP | |||
85 | depends on IPV6 | 85 | depends on IPV6 |
86 | select XFRM | 86 | select XFRM |
87 | select CRYPTO | 87 | select CRYPTO |
88 | select CRYPTO_AEAD | ||
88 | select CRYPTO_HMAC | 89 | select CRYPTO_HMAC |
89 | select CRYPTO_MD5 | 90 | select CRYPTO_MD5 |
90 | select CRYPTO_CBC | 91 | select CRYPTO_CBC |
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 5bd5292ad9fa..dc821acf3d33 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c | |||
@@ -24,33 +24,124 @@ | |||
24 | * This file is derived from net/ipv4/esp.c | 24 | * This file is derived from net/ipv4/esp.c |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <crypto/aead.h> | ||
28 | #include <crypto/authenc.h> | ||
27 | #include <linux/err.h> | 29 | #include <linux/err.h> |
28 | #include <linux/module.h> | 30 | #include <linux/module.h> |
29 | #include <net/ip.h> | 31 | #include <net/ip.h> |
30 | #include <net/xfrm.h> | 32 | #include <net/xfrm.h> |
31 | #include <net/esp.h> | 33 | #include <net/esp.h> |
32 | #include <linux/scatterlist.h> | 34 | #include <linux/scatterlist.h> |
33 | #include <linux/crypto.h> | ||
34 | #include <linux/kernel.h> | 35 | #include <linux/kernel.h> |
35 | #include <linux/pfkeyv2.h> | 36 | #include <linux/pfkeyv2.h> |
36 | #include <linux/random.h> | 37 | #include <linux/random.h> |
38 | #include <linux/slab.h> | ||
37 | #include <linux/spinlock.h> | 39 | #include <linux/spinlock.h> |
38 | #include <net/icmp.h> | 40 | #include <net/icmp.h> |
39 | #include <net/ipv6.h> | 41 | #include <net/ipv6.h> |
40 | #include <net/protocol.h> | 42 | #include <net/protocol.h> |
41 | #include <linux/icmpv6.h> | 43 | #include <linux/icmpv6.h> |
42 | 44 | ||
45 | struct esp_skb_cb { | ||
46 | struct xfrm_skb_cb xfrm; | ||
47 | void *tmp; | ||
48 | }; | ||
49 | |||
50 | #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0])) | ||
51 | |||
52 | /* | ||
53 | * Allocate an AEAD request structure with extra space for SG and IV. | ||
54 | * | ||
55 | * For alignment considerations the IV is placed at the front, followed | ||
56 | * by the request and finally the SG list. | ||
57 | * | ||
58 | * TODO: Use spare space in skb for this where possible. | ||
59 | */ | ||
60 | static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags) | ||
61 | { | ||
62 | unsigned int len; | ||
63 | |||
64 | len = crypto_aead_ivsize(aead); | ||
65 | if (len) { | ||
66 | len += crypto_aead_alignmask(aead) & | ||
67 | ~(crypto_tfm_ctx_alignment() - 1); | ||
68 | len = ALIGN(len, crypto_tfm_ctx_alignment()); | ||
69 | } | ||
70 | |||
71 | len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead); | ||
72 | len = ALIGN(len, __alignof__(struct scatterlist)); | ||
73 | |||
74 | len += sizeof(struct scatterlist) * nfrags; | ||
75 | |||
76 | return kmalloc(len, GFP_ATOMIC); | ||
77 | } | ||
78 | |||
79 | static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp) | ||
80 | { | ||
81 | return crypto_aead_ivsize(aead) ? | ||
82 | PTR_ALIGN((u8 *)tmp, crypto_aead_alignmask(aead) + 1) : tmp; | ||
83 | } | ||
84 | |||
85 | static inline struct aead_givcrypt_request *esp_tmp_givreq( | ||
86 | struct crypto_aead *aead, u8 *iv) | ||
87 | { | ||
88 | struct aead_givcrypt_request *req; | ||
89 | |||
90 | req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead), | ||
91 | crypto_tfm_ctx_alignment()); | ||
92 | aead_givcrypt_set_tfm(req, aead); | ||
93 | return req; | ||
94 | } | ||
95 | |||
96 | static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv) | ||
97 | { | ||
98 | struct aead_request *req; | ||
99 | |||
100 | req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead), | ||
101 | crypto_tfm_ctx_alignment()); | ||
102 | aead_request_set_tfm(req, aead); | ||
103 | return req; | ||
104 | } | ||
105 | |||
106 | static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead, | ||
107 | struct aead_request *req) | ||
108 | { | ||
109 | return (void *)ALIGN((unsigned long)(req + 1) + | ||
110 | crypto_aead_reqsize(aead), | ||
111 | __alignof__(struct scatterlist)); | ||
112 | } | ||
113 | |||
114 | static inline struct scatterlist *esp_givreq_sg( | ||
115 | struct crypto_aead *aead, struct aead_givcrypt_request *req) | ||
116 | { | ||
117 | return (void *)ALIGN((unsigned long)(req + 1) + | ||
118 | crypto_aead_reqsize(aead), | ||
119 | __alignof__(struct scatterlist)); | ||
120 | } | ||
121 | |||
122 | static void esp_output_done(struct crypto_async_request *base, int err) | ||
123 | { | ||
124 | struct sk_buff *skb = base->data; | ||
125 | |||
126 | kfree(ESP_SKB_CB(skb)->tmp); | ||
127 | xfrm_output_resume(skb, err); | ||
128 | } | ||
129 | |||
43 | static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) | 130 | static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) |
44 | { | 131 | { |
45 | int err; | 132 | int err; |
46 | struct ip_esp_hdr *esph; | 133 | struct ip_esp_hdr *esph; |
47 | struct crypto_blkcipher *tfm; | 134 | struct crypto_aead *aead; |
48 | struct blkcipher_desc desc; | 135 | struct aead_givcrypt_request *req; |
136 | struct scatterlist *sg; | ||
137 | struct scatterlist *asg; | ||
49 | struct sk_buff *trailer; | 138 | struct sk_buff *trailer; |
139 | void *tmp; | ||
50 | int blksize; | 140 | int blksize; |
51 | int clen; | 141 | int clen; |
52 | int alen; | 142 | int alen; |
53 | int nfrags; | 143 | int nfrags; |
144 | u8 *iv; | ||
54 | u8 *tail; | 145 | u8 *tail; |
55 | struct esp_data *esp = x->data; | 146 | struct esp_data *esp = x->data; |
56 | 147 | ||
@@ -60,18 +151,26 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) | |||
60 | /* Round to block size */ | 151 | /* Round to block size */ |
61 | clen = skb->len; | 152 | clen = skb->len; |
62 | 153 | ||
63 | alen = esp->auth.icv_trunc_len; | 154 | aead = esp->aead; |
64 | tfm = esp->conf.tfm; | 155 | alen = crypto_aead_authsize(aead); |
65 | desc.tfm = tfm; | 156 | |
66 | desc.flags = 0; | 157 | blksize = ALIGN(crypto_aead_blocksize(aead), 4); |
67 | blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4); | ||
68 | clen = ALIGN(clen + 2, blksize); | 158 | clen = ALIGN(clen + 2, blksize); |
69 | if (esp->conf.padlen) | 159 | if (esp->padlen) |
70 | clen = ALIGN(clen, esp->conf.padlen); | 160 | clen = ALIGN(clen, esp->padlen); |
71 | 161 | ||
72 | if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) { | 162 | if ((err = skb_cow_data(skb, clen - skb->len + alen, &trailer)) < 0) |
73 | goto error; | 163 | goto error; |
74 | } | 164 | nfrags = err; |
165 | |||
166 | tmp = esp_alloc_tmp(aead, nfrags + 1); | ||
167 | if (!tmp) | ||
168 | goto error; | ||
169 | |||
170 | iv = esp_tmp_iv(aead, tmp); | ||
171 | req = esp_tmp_givreq(aead, iv); | ||
172 | asg = esp_givreq_sg(aead, req); | ||
173 | sg = asg + 1; | ||
75 | 174 | ||
76 | /* Fill padding... */ | 175 | /* Fill padding... */ |
77 | tail = skb_tail_pointer(trailer); | 176 | tail = skb_tail_pointer(trailer); |
@@ -81,86 +180,113 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) | |||
81 | tail[i] = i + 1; | 180 | tail[i] = i + 1; |
82 | } while (0); | 181 | } while (0); |
83 | tail[clen-skb->len - 2] = (clen - skb->len) - 2; | 182 | tail[clen-skb->len - 2] = (clen - skb->len) - 2; |
84 | pskb_put(skb, trailer, clen - skb->len); | 183 | tail[clen - skb->len - 1] = *skb_mac_header(skb); |
184 | pskb_put(skb, trailer, clen - skb->len + alen); | ||
85 | 185 | ||
86 | skb_push(skb, -skb_network_offset(skb)); | 186 | skb_push(skb, -skb_network_offset(skb)); |
87 | esph = ip_esp_hdr(skb); | 187 | esph = ip_esp_hdr(skb); |
88 | *(skb_tail_pointer(trailer) - 1) = *skb_mac_header(skb); | ||
89 | *skb_mac_header(skb) = IPPROTO_ESP; | 188 | *skb_mac_header(skb) = IPPROTO_ESP; |
90 | 189 | ||
91 | esph->spi = x->id.spi; | 190 | esph->spi = x->id.spi; |
92 | esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq); | 191 | esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq); |
93 | 192 | ||
94 | spin_lock_bh(&x->lock); | 193 | sg_init_table(sg, nfrags); |
194 | skb_to_sgvec(skb, sg, | ||
195 | esph->enc_data + crypto_aead_ivsize(aead) - skb->data, | ||
196 | clen + alen); | ||
197 | sg_init_one(asg, esph, sizeof(*esph)); | ||
95 | 198 | ||
96 | if (esp->conf.ivlen) { | 199 | aead_givcrypt_set_callback(req, 0, esp_output_done, skb); |
97 | if (unlikely(!esp->conf.ivinitted)) { | 200 | aead_givcrypt_set_crypt(req, sg, sg, clen, iv); |
98 | get_random_bytes(esp->conf.ivec, esp->conf.ivlen); | 201 | aead_givcrypt_set_assoc(req, asg, sizeof(*esph)); |
99 | esp->conf.ivinitted = 1; | 202 | aead_givcrypt_set_giv(req, esph->enc_data, XFRM_SKB_CB(skb)->seq); |
100 | } | ||
101 | crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen); | ||
102 | } | ||
103 | 203 | ||
104 | do { | 204 | ESP_SKB_CB(skb)->tmp = tmp; |
105 | struct scatterlist *sg = &esp->sgbuf[0]; | 205 | err = crypto_aead_givencrypt(req); |
206 | if (err == -EINPROGRESS) | ||
207 | goto error; | ||
106 | 208 | ||
107 | if (unlikely(nfrags > ESP_NUM_FAST_SG)) { | 209 | if (err == -EBUSY) |
108 | sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); | 210 | err = NET_XMIT_DROP; |
109 | if (!sg) | 211 | |
110 | goto unlock; | 212 | kfree(tmp); |
111 | } | 213 | |
112 | sg_init_table(sg, nfrags); | 214 | error: |
113 | skb_to_sgvec(skb, sg, | 215 | return err; |
114 | esph->enc_data + | 216 | } |
115 | esp->conf.ivlen - | 217 | |
116 | skb->data, clen); | 218 | static int esp_input_done2(struct sk_buff *skb, int err) |
117 | err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); | 219 | { |
118 | if (unlikely(sg != &esp->sgbuf[0])) | 220 | struct xfrm_state *x = xfrm_input_state(skb); |
119 | kfree(sg); | 221 | struct esp_data *esp = x->data; |
120 | } while (0); | 222 | struct crypto_aead *aead = esp->aead; |
223 | int alen = crypto_aead_authsize(aead); | ||
224 | int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); | ||
225 | int elen = skb->len - hlen; | ||
226 | int hdr_len = skb_network_header_len(skb); | ||
227 | int padlen; | ||
228 | u8 nexthdr[2]; | ||
229 | |||
230 | kfree(ESP_SKB_CB(skb)->tmp); | ||
121 | 231 | ||
122 | if (unlikely(err)) | 232 | if (unlikely(err)) |
123 | goto unlock; | 233 | goto out; |
124 | 234 | ||
125 | if (esp->conf.ivlen) { | 235 | if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2)) |
126 | memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen); | 236 | BUG(); |
127 | crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen); | ||
128 | } | ||
129 | 237 | ||
130 | if (esp->auth.icv_full_len) { | 238 | err = -EINVAL; |
131 | err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data, | 239 | padlen = nexthdr[0]; |
132 | sizeof(*esph) + esp->conf.ivlen + clen); | 240 | if (padlen + 2 + alen >= elen) { |
133 | memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen); | 241 | LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage " |
242 | "padlen=%d, elen=%d\n", padlen + 2, elen - alen); | ||
243 | goto out; | ||
134 | } | 244 | } |
135 | 245 | ||
136 | unlock: | 246 | /* ... check padding bits here. Silly. :-) */ |
137 | spin_unlock_bh(&x->lock); | ||
138 | 247 | ||
139 | error: | 248 | pskb_trim(skb, skb->len - alen - padlen - 2); |
249 | __skb_pull(skb, hlen); | ||
250 | skb_set_transport_header(skb, -hdr_len); | ||
251 | |||
252 | err = nexthdr[1]; | ||
253 | |||
254 | /* RFC4303: Drop dummy packets without any error */ | ||
255 | if (err == IPPROTO_NONE) | ||
256 | err = -EINVAL; | ||
257 | |||
258 | out: | ||
140 | return err; | 259 | return err; |
141 | } | 260 | } |
142 | 261 | ||
262 | static void esp_input_done(struct crypto_async_request *base, int err) | ||
263 | { | ||
264 | struct sk_buff *skb = base->data; | ||
265 | |||
266 | xfrm_input_resume(skb, esp_input_done2(skb, err)); | ||
267 | } | ||
268 | |||
143 | static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) | 269 | static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) |
144 | { | 270 | { |
145 | struct ipv6hdr *iph; | ||
146 | struct ip_esp_hdr *esph; | 271 | struct ip_esp_hdr *esph; |
147 | struct esp_data *esp = x->data; | 272 | struct esp_data *esp = x->data; |
148 | struct crypto_blkcipher *tfm = esp->conf.tfm; | 273 | struct crypto_aead *aead = esp->aead; |
149 | struct blkcipher_desc desc = { .tfm = tfm }; | 274 | struct aead_request *req; |
150 | struct sk_buff *trailer; | 275 | struct sk_buff *trailer; |
151 | int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4); | 276 | int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead); |
152 | int alen = esp->auth.icv_trunc_len; | ||
153 | int elen = skb->len - sizeof(*esph) - esp->conf.ivlen - alen; | ||
154 | int hdr_len = skb_network_header_len(skb); | ||
155 | int nfrags; | 277 | int nfrags; |
156 | int ret = 0; | 278 | int ret = 0; |
279 | void *tmp; | ||
280 | u8 *iv; | ||
281 | struct scatterlist *sg; | ||
282 | struct scatterlist *asg; | ||
157 | 283 | ||
158 | if (!pskb_may_pull(skb, sizeof(*esph))) { | 284 | if (!pskb_may_pull(skb, sizeof(*esph))) { |
159 | ret = -EINVAL; | 285 | ret = -EINVAL; |
160 | goto out; | 286 | goto out; |
161 | } | 287 | } |
162 | 288 | ||
163 | if (elen <= 0 || (elen & (blksize-1))) { | 289 | if (elen <= 0) { |
164 | ret = -EINVAL; | 290 | ret = -EINVAL; |
165 | goto out; | 291 | goto out; |
166 | } | 292 | } |
@@ -170,86 +296,38 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) | |||
170 | goto out; | 296 | goto out; |
171 | } | 297 | } |
172 | 298 | ||
173 | skb->ip_summed = CHECKSUM_NONE; | 299 | ret = -ENOMEM; |
174 | 300 | tmp = esp_alloc_tmp(aead, nfrags + 1); | |
175 | spin_lock(&x->lock); | 301 | if (!tmp) |
176 | 302 | goto out; | |
177 | /* If integrity check is required, do this. */ | ||
178 | if (esp->auth.icv_full_len) { | ||
179 | u8 sum[alen]; | ||
180 | |||
181 | ret = esp_mac_digest(esp, skb, 0, skb->len - alen); | ||
182 | if (ret) | ||
183 | goto unlock; | ||
184 | 303 | ||
185 | if (skb_copy_bits(skb, skb->len - alen, sum, alen)) | 304 | ESP_SKB_CB(skb)->tmp = tmp; |
186 | BUG(); | 305 | iv = esp_tmp_iv(aead, tmp); |
306 | req = esp_tmp_req(aead, iv); | ||
307 | asg = esp_req_sg(aead, req); | ||
308 | sg = asg + 1; | ||
187 | 309 | ||
188 | if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) { | 310 | skb->ip_summed = CHECKSUM_NONE; |
189 | ret = -EBADMSG; | ||
190 | goto unlock; | ||
191 | } | ||
192 | } | ||
193 | 311 | ||
194 | esph = (struct ip_esp_hdr *)skb->data; | 312 | esph = (struct ip_esp_hdr *)skb->data; |
195 | iph = ipv6_hdr(skb); | ||
196 | 313 | ||
197 | /* Get ivec. This can be wrong, check against another impls. */ | 314 | /* Get ivec. This can be wrong, check against another impls. */ |
198 | if (esp->conf.ivlen) | 315 | iv = esph->enc_data; |
199 | crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen); | ||
200 | |||
201 | { | ||
202 | struct scatterlist *sg = &esp->sgbuf[0]; | ||
203 | |||
204 | if (unlikely(nfrags > ESP_NUM_FAST_SG)) { | ||
205 | sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); | ||
206 | if (!sg) { | ||
207 | ret = -ENOMEM; | ||
208 | goto unlock; | ||
209 | } | ||
210 | } | ||
211 | sg_init_table(sg, nfrags); | ||
212 | skb_to_sgvec(skb, sg, | ||
213 | sizeof(*esph) + esp->conf.ivlen, | ||
214 | elen); | ||
215 | ret = crypto_blkcipher_decrypt(&desc, sg, sg, elen); | ||
216 | if (unlikely(sg != &esp->sgbuf[0])) | ||
217 | kfree(sg); | ||
218 | } | ||
219 | 316 | ||
220 | unlock: | 317 | sg_init_table(sg, nfrags); |
221 | spin_unlock(&x->lock); | 318 | skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen); |
319 | sg_init_one(asg, esph, sizeof(*esph)); | ||
222 | 320 | ||
223 | if (unlikely(ret)) | 321 | aead_request_set_callback(req, 0, esp_input_done, skb); |
224 | goto out; | 322 | aead_request_set_crypt(req, sg, sg, elen, iv); |
323 | aead_request_set_assoc(req, asg, sizeof(*esph)); | ||
225 | 324 | ||
226 | { | 325 | ret = crypto_aead_decrypt(req); |
227 | u8 nexthdr[2]; | 326 | if (ret == -EINPROGRESS) |
228 | u8 padlen; | 327 | goto out; |
229 | |||
230 | if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2)) | ||
231 | BUG(); | ||
232 | |||
233 | padlen = nexthdr[0]; | ||
234 | if (padlen+2 >= elen) { | ||
235 | LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage padlen=%d, elen=%d\n", padlen+2, elen); | ||
236 | ret = -EINVAL; | ||
237 | goto out; | ||
238 | } | ||
239 | /* ... check padding bits here. Silly. :-) */ | ||
240 | |||
241 | /* RFC4303: Drop dummy packets without any error */ | ||
242 | if (nexthdr[1] == IPPROTO_NONE) { | ||
243 | ret = -EINVAL; | ||
244 | goto out; | ||
245 | } | ||
246 | 328 | ||
247 | pskb_trim(skb, skb->len - alen - padlen - 2); | 329 | ret = esp_input_done2(skb, ret); |
248 | ret = nexthdr[1]; | ||
249 | } | ||
250 | 330 | ||
251 | __skb_pull(skb, sizeof(*esph) + esp->conf.ivlen); | ||
252 | skb_set_transport_header(skb, -hdr_len); | ||
253 | out: | 331 | out: |
254 | return ret; | 332 | return ret; |
255 | } | 333 | } |
@@ -257,11 +335,11 @@ out: | |||
257 | static u32 esp6_get_mtu(struct xfrm_state *x, int mtu) | 335 | static u32 esp6_get_mtu(struct xfrm_state *x, int mtu) |
258 | { | 336 | { |
259 | struct esp_data *esp = x->data; | 337 | struct esp_data *esp = x->data; |
260 | u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4); | 338 | u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4); |
261 | u32 align = max_t(u32, blksize, esp->conf.padlen); | 339 | u32 align = max_t(u32, blksize, esp->padlen); |
262 | u32 rem; | 340 | u32 rem; |
263 | 341 | ||
264 | mtu -= x->props.header_len + esp->auth.icv_trunc_len; | 342 | mtu -= x->props.header_len + crypto_aead_authsize(esp->aead); |
265 | rem = mtu & (align - 1); | 343 | rem = mtu & (align - 1); |
266 | mtu &= ~(align - 1); | 344 | mtu &= ~(align - 1); |
267 | 345 | ||
@@ -300,81 +378,101 @@ static void esp6_destroy(struct xfrm_state *x) | |||
300 | if (!esp) | 378 | if (!esp) |
301 | return; | 379 | return; |
302 | 380 | ||
303 | crypto_free_blkcipher(esp->conf.tfm); | 381 | crypto_free_aead(esp->aead); |
304 | esp->conf.tfm = NULL; | ||
305 | kfree(esp->conf.ivec); | ||
306 | esp->conf.ivec = NULL; | ||
307 | crypto_free_hash(esp->auth.tfm); | ||
308 | esp->auth.tfm = NULL; | ||
309 | kfree(esp->auth.work_icv); | ||
310 | esp->auth.work_icv = NULL; | ||
311 | kfree(esp); | 382 | kfree(esp); |
312 | } | 383 | } |
313 | 384 | ||
314 | static int esp6_init_state(struct xfrm_state *x) | 385 | static int esp6_init_state(struct xfrm_state *x) |
315 | { | 386 | { |
316 | struct esp_data *esp = NULL; | 387 | struct esp_data *esp = NULL; |
317 | struct crypto_blkcipher *tfm; | 388 | struct crypto_aead *aead; |
389 | struct crypto_authenc_key_param *param; | ||
390 | struct rtattr *rta; | ||
391 | char *key; | ||
392 | char *p; | ||
393 | char authenc_name[CRYPTO_MAX_ALG_NAME]; | ||
394 | u32 align; | ||
395 | unsigned int keylen; | ||
396 | int err; | ||
318 | 397 | ||
319 | if (x->ealg == NULL) | 398 | if (x->ealg == NULL) |
320 | goto error; | 399 | return -EINVAL; |
321 | 400 | ||
322 | if (x->encap) | 401 | if (x->encap) |
323 | goto error; | 402 | return -EINVAL; |
403 | |||
404 | if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, "authenc(%s,%s)", | ||
405 | x->aalg ? x->aalg->alg_name : "digest_null", | ||
406 | x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME) | ||
407 | return -ENAMETOOLONG; | ||
324 | 408 | ||
325 | esp = kzalloc(sizeof(*esp), GFP_KERNEL); | 409 | esp = kzalloc(sizeof(*esp), GFP_KERNEL); |
326 | if (esp == NULL) | 410 | if (esp == NULL) |
327 | return -ENOMEM; | 411 | return -ENOMEM; |
328 | 412 | ||
413 | x->data = esp; | ||
414 | |||
415 | aead = crypto_alloc_aead(authenc_name, 0, 0); | ||
416 | err = PTR_ERR(aead); | ||
417 | if (IS_ERR(aead)) | ||
418 | goto error; | ||
419 | |||
420 | esp->aead = aead; | ||
421 | |||
422 | keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) + | ||
423 | (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param)); | ||
424 | err = -ENOMEM; | ||
425 | key = kmalloc(keylen, GFP_KERNEL); | ||
426 | if (!key) | ||
427 | goto error; | ||
428 | |||
429 | p = key; | ||
430 | rta = (void *)p; | ||
431 | rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; | ||
432 | rta->rta_len = RTA_LENGTH(sizeof(*param)); | ||
433 | param = RTA_DATA(rta); | ||
434 | p += RTA_SPACE(sizeof(*param)); | ||
435 | |||
329 | if (x->aalg) { | 436 | if (x->aalg) { |
330 | struct xfrm_algo_desc *aalg_desc; | 437 | struct xfrm_algo_desc *aalg_desc; |
331 | struct crypto_hash *hash; | ||
332 | |||
333 | hash = crypto_alloc_hash(x->aalg->alg_name, 0, | ||
334 | CRYPTO_ALG_ASYNC); | ||
335 | if (IS_ERR(hash)) | ||
336 | goto error; | ||
337 | 438 | ||
338 | esp->auth.tfm = hash; | 439 | memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8); |
339 | if (crypto_hash_setkey(hash, x->aalg->alg_key, | 440 | p += (x->aalg->alg_key_len + 7) / 8; |
340 | (x->aalg->alg_key_len + 7) / 8)) | ||
341 | goto error; | ||
342 | 441 | ||
343 | aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); | 442 | aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); |
344 | BUG_ON(!aalg_desc); | 443 | BUG_ON(!aalg_desc); |
345 | 444 | ||
445 | err = -EINVAL; | ||
346 | if (aalg_desc->uinfo.auth.icv_fullbits/8 != | 446 | if (aalg_desc->uinfo.auth.icv_fullbits/8 != |
347 | crypto_hash_digestsize(hash)) { | 447 | crypto_aead_authsize(aead)) { |
348 | NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", | 448 | NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", |
349 | x->aalg->alg_name, | 449 | x->aalg->alg_name, |
350 | crypto_hash_digestsize(hash), | 450 | crypto_aead_authsize(aead), |
351 | aalg_desc->uinfo.auth.icv_fullbits/8); | 451 | aalg_desc->uinfo.auth.icv_fullbits/8); |
352 | goto error; | 452 | goto free_key; |
353 | } | 453 | } |
354 | 454 | ||
355 | esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; | 455 | err = crypto_aead_setauthsize( |
356 | esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8; | 456 | aead, aalg_desc->uinfo.auth.icv_truncbits / 8); |
357 | 457 | if (err) | |
358 | esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL); | 458 | goto free_key; |
359 | if (!esp->auth.work_icv) | ||
360 | goto error; | ||
361 | } | ||
362 | tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC); | ||
363 | if (IS_ERR(tfm)) | ||
364 | goto error; | ||
365 | esp->conf.tfm = tfm; | ||
366 | esp->conf.ivlen = crypto_blkcipher_ivsize(tfm); | ||
367 | esp->conf.padlen = 0; | ||
368 | if (esp->conf.ivlen) { | ||
369 | esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL); | ||
370 | if (unlikely(esp->conf.ivec == NULL)) | ||
371 | goto error; | ||
372 | esp->conf.ivinitted = 0; | ||
373 | } | 459 | } |
374 | if (crypto_blkcipher_setkey(tfm, x->ealg->alg_key, | 460 | |
375 | (x->ealg->alg_key_len + 7) / 8)) | 461 | esp->padlen = 0; |
462 | |||
463 | param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8); | ||
464 | memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8); | ||
465 | |||
466 | err = crypto_aead_setkey(aead, key, keylen); | ||
467 | |||
468 | free_key: | ||
469 | kfree(key); | ||
470 | |||
471 | if (err) | ||
376 | goto error; | 472 | goto error; |
377 | x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen; | 473 | |
474 | x->props.header_len = sizeof(struct ip_esp_hdr) + | ||
475 | crypto_aead_ivsize(aead); | ||
378 | switch (x->props.mode) { | 476 | switch (x->props.mode) { |
379 | case XFRM_MODE_BEET: | 477 | case XFRM_MODE_BEET: |
380 | case XFRM_MODE_TRANSPORT: | 478 | case XFRM_MODE_TRANSPORT: |
@@ -385,14 +483,14 @@ static int esp6_init_state(struct xfrm_state *x) | |||
385 | default: | 483 | default: |
386 | goto error; | 484 | goto error; |
387 | } | 485 | } |
388 | x->data = esp; | 486 | |
389 | return 0; | 487 | align = ALIGN(crypto_aead_blocksize(aead), 4); |
488 | if (esp->padlen) | ||
489 | align = max_t(u32, align, esp->padlen); | ||
490 | x->props.trailer_len = align + 1 + crypto_aead_authsize(esp->aead); | ||
390 | 491 | ||
391 | error: | 492 | error: |
392 | x->data = esp; | 493 | return err; |
393 | esp6_destroy(x); | ||
394 | x->data = NULL; | ||
395 | return -EINVAL; | ||
396 | } | 494 | } |
397 | 495 | ||
398 | static struct xfrm_type esp6_type = | 496 | static struct xfrm_type esp6_type = |