aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6/ipcomp6.c
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2008-07-25 05:54:40 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-25 05:54:40 -0400
commit6fccab671f2f0a24b799f29a4ec878f62d34656c (patch)
treee90a1ac0770f8fe59bd7c8768663052a7756b950 /net/ipv6/ipcomp6.c
parentcffe1c5d7a5a1e54f7c2c6d0510f651a965bccc3 (diff)
ipsec: ipcomp - Merge IPComp implementations
This patch merges the IPv4/IPv6 IPComp implementations since most of the code is identical. As a result future enhancements will no longer need to be duplicated. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6/ipcomp6.c')
-rw-r--r--net/ipv6/ipcomp6.c298
1 files changed, 6 insertions, 292 deletions
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index ee6de425ce6b..0cfcea42153a 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -50,125 +50,6 @@
50#include <linux/icmpv6.h> 50#include <linux/icmpv6.h>
51#include <linux/mutex.h> 51#include <linux/mutex.h>
52 52
53struct ipcomp6_tfms {
54 struct list_head list;
55 struct crypto_comp **tfms;
56 int users;
57};
58
59static DEFINE_MUTEX(ipcomp6_resource_mutex);
60static void **ipcomp6_scratches;
61static int ipcomp6_scratch_users;
62static LIST_HEAD(ipcomp6_tfms_list);
63
64static int ipcomp6_input(struct xfrm_state *x, struct sk_buff *skb)
65{
66 int nexthdr;
67 int err = -ENOMEM;
68 struct ip_comp_hdr *ipch;
69 int plen, dlen;
70 struct ipcomp_data *ipcd = x->data;
71 u8 *start, *scratch;
72 struct crypto_comp *tfm;
73 int cpu;
74
75 if (skb_linearize_cow(skb))
76 goto out;
77
78 skb->ip_summed = CHECKSUM_NONE;
79
80 /* Remove ipcomp header and decompress original payload */
81 ipch = (void *)skb->data;
82 nexthdr = ipch->nexthdr;
83
84 skb->transport_header = skb->network_header + sizeof(*ipch);
85 __skb_pull(skb, sizeof(*ipch));
86
87 /* decompression */
88 plen = skb->len;
89 dlen = IPCOMP_SCRATCH_SIZE;
90 start = skb->data;
91
92 cpu = get_cpu();
93 scratch = *per_cpu_ptr(ipcomp6_scratches, cpu);
94 tfm = *per_cpu_ptr(ipcd->tfms, cpu);
95
96 err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen);
97 if (err)
98 goto out_put_cpu;
99
100 if (dlen < (plen + sizeof(*ipch))) {
101 err = -EINVAL;
102 goto out_put_cpu;
103 }
104
105 err = pskb_expand_head(skb, 0, dlen - plen, GFP_ATOMIC);
106 if (err) {
107 goto out_put_cpu;
108 }
109
110 skb->truesize += dlen - plen;
111 __skb_put(skb, dlen - plen);
112 skb_copy_to_linear_data(skb, scratch, dlen);
113 err = nexthdr;
114
115out_put_cpu:
116 put_cpu();
117out:
118 return err;
119}
120
121static int ipcomp6_output(struct xfrm_state *x, struct sk_buff *skb)
122{
123 int err;
124 struct ip_comp_hdr *ipch;
125 struct ipcomp_data *ipcd = x->data;
126 int plen, dlen;
127 u8 *start, *scratch;
128 struct crypto_comp *tfm;
129 int cpu;
130
131 /* check whether datagram len is larger than threshold */
132 if (skb->len < ipcd->threshold) {
133 goto out_ok;
134 }
135
136 if (skb_linearize_cow(skb))
137 goto out_ok;
138
139 /* compression */
140 plen = skb->len;
141 dlen = IPCOMP_SCRATCH_SIZE;
142 start = skb->data;
143
144 cpu = get_cpu();
145 scratch = *per_cpu_ptr(ipcomp6_scratches, cpu);
146 tfm = *per_cpu_ptr(ipcd->tfms, cpu);
147
148 local_bh_disable();
149 err = crypto_comp_compress(tfm, start, plen, scratch, &dlen);
150 local_bh_enable();
151 if (err || (dlen + sizeof(*ipch)) >= plen) {
152 put_cpu();
153 goto out_ok;
154 }
155 memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen);
156 put_cpu();
157 pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr));
158
159 /* insert ipcomp header and replace datagram */
160 ipch = ip_comp_hdr(skb);
161 ipch->nexthdr = *skb_mac_header(skb);
162 ipch->flags = 0;
163 ipch->cpi = htons((u16 )ntohl(x->id.spi));
164 *skb_mac_header(skb) = IPPROTO_COMP;
165
166out_ok:
167 skb_push(skb, -skb_network_offset(skb));
168
169 return 0;
170}
171
172static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 53static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
173 int type, int code, int offset, __be32 info) 54 int type, int code, int offset, __be32 info)
174{ 55{
@@ -251,161 +132,12 @@ out:
251 return err; 132 return err;
252} 133}
253 134
254static void ipcomp6_free_scratches(void)
255{
256 int i;
257 void **scratches;
258
259 if (--ipcomp6_scratch_users)
260 return;
261
262 scratches = ipcomp6_scratches;
263 if (!scratches)
264 return;
265
266 for_each_possible_cpu(i) {
267 void *scratch = *per_cpu_ptr(scratches, i);
268
269 vfree(scratch);
270 }
271
272 free_percpu(scratches);
273}
274
275static void **ipcomp6_alloc_scratches(void)
276{
277 int i;
278 void **scratches;
279
280 if (ipcomp6_scratch_users++)
281 return ipcomp6_scratches;
282
283 scratches = alloc_percpu(void *);
284 if (!scratches)
285 return NULL;
286
287 ipcomp6_scratches = scratches;
288
289 for_each_possible_cpu(i) {
290 void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE);
291 if (!scratch)
292 return NULL;
293 *per_cpu_ptr(scratches, i) = scratch;
294 }
295
296 return scratches;
297}
298
299static void ipcomp6_free_tfms(struct crypto_comp **tfms)
300{
301 struct ipcomp6_tfms *pos;
302 int cpu;
303
304 list_for_each_entry(pos, &ipcomp6_tfms_list, list) {
305 if (pos->tfms == tfms)
306 break;
307 }
308
309 BUG_TRAP(pos);
310
311 if (--pos->users)
312 return;
313
314 list_del(&pos->list);
315 kfree(pos);
316
317 if (!tfms)
318 return;
319
320 for_each_possible_cpu(cpu) {
321 struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu);
322 crypto_free_comp(tfm);
323 }
324 free_percpu(tfms);
325}
326
327static struct crypto_comp **ipcomp6_alloc_tfms(const char *alg_name)
328{
329 struct ipcomp6_tfms *pos;
330 struct crypto_comp **tfms;
331 int cpu;
332
333 /* This can be any valid CPU ID so we don't need locking. */
334 cpu = raw_smp_processor_id();
335
336 list_for_each_entry(pos, &ipcomp6_tfms_list, list) {
337 struct crypto_comp *tfm;
338
339 tfms = pos->tfms;
340 tfm = *per_cpu_ptr(tfms, cpu);
341
342 if (!strcmp(crypto_comp_name(tfm), alg_name)) {
343 pos->users++;
344 return tfms;
345 }
346 }
347
348 pos = kmalloc(sizeof(*pos), GFP_KERNEL);
349 if (!pos)
350 return NULL;
351
352 pos->users = 1;
353 INIT_LIST_HEAD(&pos->list);
354 list_add(&pos->list, &ipcomp6_tfms_list);
355
356 pos->tfms = tfms = alloc_percpu(struct crypto_comp *);
357 if (!tfms)
358 goto error;
359
360 for_each_possible_cpu(cpu) {
361 struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0,
362 CRYPTO_ALG_ASYNC);
363 if (IS_ERR(tfm))
364 goto error;
365 *per_cpu_ptr(tfms, cpu) = tfm;
366 }
367
368 return tfms;
369
370error:
371 ipcomp6_free_tfms(tfms);
372 return NULL;
373}
374
375static void ipcomp6_free_data(struct ipcomp_data *ipcd)
376{
377 if (ipcd->tfms)
378 ipcomp6_free_tfms(ipcd->tfms);
379 ipcomp6_free_scratches();
380}
381
382static void ipcomp6_destroy(struct xfrm_state *x)
383{
384 struct ipcomp_data *ipcd = x->data;
385 if (!ipcd)
386 return;
387 xfrm_state_delete_tunnel(x);
388 mutex_lock(&ipcomp6_resource_mutex);
389 ipcomp6_free_data(ipcd);
390 mutex_unlock(&ipcomp6_resource_mutex);
391 kfree(ipcd);
392
393 xfrm6_tunnel_free_spi((xfrm_address_t *)&x->props.saddr);
394}
395
396static int ipcomp6_init_state(struct xfrm_state *x) 135static int ipcomp6_init_state(struct xfrm_state *x)
397{ 136{
398 int err; 137 int err;
399 struct ipcomp_data *ipcd; 138 struct ipcomp_data *ipcd;
400 struct xfrm_algo_desc *calg_desc; 139 struct xfrm_algo_desc *calg_desc;
401 140
402 err = -EINVAL;
403 if (!x->calg)
404 goto out;
405
406 if (x->encap)
407 goto out;
408
409 x->props.header_len = 0; 141 x->props.header_len = 0;
410 switch (x->props.mode) { 142 switch (x->props.mode) {
411 case XFRM_MODE_TRANSPORT: 143 case XFRM_MODE_TRANSPORT:
@@ -417,39 +149,21 @@ static int ipcomp6_init_state(struct xfrm_state *x)
417 goto out; 149 goto out;
418 } 150 }
419 151
420 err = -ENOMEM; 152 err = ipcomp_init_state(x);
421 ipcd = kzalloc(sizeof(*ipcd), GFP_KERNEL); 153 if (err)
422 if (!ipcd)
423 goto out; 154 goto out;
424 155
425 mutex_lock(&ipcomp6_resource_mutex);
426 if (!ipcomp6_alloc_scratches())
427 goto error;
428
429 ipcd->tfms = ipcomp6_alloc_tfms(x->calg->alg_name);
430 if (!ipcd->tfms)
431 goto error;
432 mutex_unlock(&ipcomp6_resource_mutex);
433
434 if (x->props.mode == XFRM_MODE_TUNNEL) { 156 if (x->props.mode == XFRM_MODE_TUNNEL) {
435 err = ipcomp6_tunnel_attach(x); 157 err = ipcomp6_tunnel_attach(x);
436 if (err) 158 if (err)
437 goto error_tunnel; 159 goto error_tunnel;
438 } 160 }
439 161
440 calg_desc = xfrm_calg_get_byname(x->calg->alg_name, 0);
441 BUG_ON(!calg_desc);
442 ipcd->threshold = calg_desc->uinfo.comp.threshold;
443 x->data = ipcd;
444 err = 0; 162 err = 0;
445out: 163out:
446 return err; 164 return err;
447error_tunnel: 165error_tunnel:
448 mutex_lock(&ipcomp6_resource_mutex); 166 ipcomp_destroy(x);
449error:
450 ipcomp6_free_data(ipcd);
451 mutex_unlock(&ipcomp6_resource_mutex);
452 kfree(ipcd);
453 167
454 goto out; 168 goto out;
455} 169}
@@ -460,9 +174,9 @@ static const struct xfrm_type ipcomp6_type =
460 .owner = THIS_MODULE, 174 .owner = THIS_MODULE,
461 .proto = IPPROTO_COMP, 175 .proto = IPPROTO_COMP,
462 .init_state = ipcomp6_init_state, 176 .init_state = ipcomp6_init_state,
463 .destructor = ipcomp6_destroy, 177 .destructor = ipcomp_destroy,
464 .input = ipcomp6_input, 178 .input = ipcomp_input,
465 .output = ipcomp6_output, 179 .output = ipcomp_output,
466 .hdr_offset = xfrm6_find_1stfragopt, 180 .hdr_offset = xfrm6_find_1stfragopt,
467}; 181};
468 182