aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv6')
-rw-r--r--net/ipv6/addrconf.c10
-rw-r--r--net/ipv6/af_inet6.c1
-rw-r--r--net/ipv6/ah6.c11
-rw-r--r--net/ipv6/esp6.c9
-rw-r--r--net/ipv6/ip6_flowlabel.c57
-rw-r--r--net/ipv6/ipcomp6.c9
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c112
-rw-r--r--net/ipv6/reassembly.c131
-rw-r--r--net/ipv6/xfrm6_input.c14
-rw-r--r--net/ipv6/xfrm6_mode_beet.c1
-rw-r--r--net/ipv6/xfrm6_mode_ro.c9
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c1
-rw-r--r--net/ipv6/xfrm6_output.c2
-rw-r--r--net/ipv6/xfrm6_policy.c17
-rw-r--r--net/ipv6/xfrm6_state.c7
-rw-r--r--net/ipv6/xfrm6_tunnel.c4
16 files changed, 146 insertions, 249 deletions
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 52d10d213217..348bd8d06112 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -255,11 +255,6 @@ static void addrconf_mod_timer(struct inet6_ifaddr *ifp,
255 255
256static int snmp6_alloc_dev(struct inet6_dev *idev) 256static int snmp6_alloc_dev(struct inet6_dev *idev)
257{ 257{
258 int err = -ENOMEM;
259
260 if (!idev || !idev->dev)
261 return -EINVAL;
262
263 if (snmp_mib_init((void **)idev->stats.ipv6, 258 if (snmp_mib_init((void **)idev->stats.ipv6,
264 sizeof(struct ipstats_mib), 259 sizeof(struct ipstats_mib),
265 __alignof__(struct ipstats_mib)) < 0) 260 __alignof__(struct ipstats_mib)) < 0)
@@ -280,15 +275,14 @@ err_icmpmsg:
280err_icmp: 275err_icmp:
281 snmp_mib_free((void **)idev->stats.ipv6); 276 snmp_mib_free((void **)idev->stats.ipv6);
282err_ip: 277err_ip:
283 return err; 278 return -ENOMEM;
284} 279}
285 280
286static int snmp6_free_dev(struct inet6_dev *idev) 281static void snmp6_free_dev(struct inet6_dev *idev)
287{ 282{
288 snmp_mib_free((void **)idev->stats.icmpv6msg); 283 snmp_mib_free((void **)idev->stats.icmpv6msg);
289 snmp_mib_free((void **)idev->stats.icmpv6); 284 snmp_mib_free((void **)idev->stats.icmpv6);
290 snmp_mib_free((void **)idev->stats.ipv6); 285 snmp_mib_free((void **)idev->stats.ipv6);
291 return 0;
292} 286}
293 287
294/* Nobody refers to this device, we may destroy it. */ 288/* Nobody refers to this device, we may destroy it. */
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index bc929381fa46..1b1caf3aa1c1 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -747,6 +747,7 @@ static void cleanup_ipv6_mibs(void)
747{ 747{
748 snmp_mib_free((void **)ipv6_statistics); 748 snmp_mib_free((void **)ipv6_statistics);
749 snmp_mib_free((void **)icmpv6_statistics); 749 snmp_mib_free((void **)icmpv6_statistics);
750 snmp_mib_free((void **)icmpv6msg_statistics);
750 snmp_mib_free((void **)udp_stats_in6); 751 snmp_mib_free((void **)udp_stats_in6);
751 snmp_mib_free((void **)udplite_stats_in6); 752 snmp_mib_free((void **)udplite_stats_in6);
752} 753}
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index f9f689162692..67cd06613a25 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -344,6 +344,8 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
344 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 344 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
345 goto out; 345 goto out;
346 346
347 skb->ip_summed = CHECKSUM_NONE;
348
347 hdr_len = skb->data - skb_network_header(skb); 349 hdr_len = skb->data - skb_network_header(skb);
348 ah = (struct ip_auth_hdr *)skb->data; 350 ah = (struct ip_auth_hdr *)skb->data;
349 ahp = x->data; 351 ahp = x->data;
@@ -475,8 +477,15 @@ static int ah6_init_state(struct xfrm_state *x)
475 477
476 x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + 478 x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
477 ahp->icv_trunc_len); 479 ahp->icv_trunc_len);
478 if (x->props.mode == XFRM_MODE_TUNNEL) 480 switch (x->props.mode) {
481 case XFRM_MODE_BEET:
482 case XFRM_MODE_TRANSPORT:
483 break;
484 case XFRM_MODE_TUNNEL:
479 x->props.header_len += sizeof(struct ipv6hdr); 485 x->props.header_len += sizeof(struct ipv6hdr);
486 default:
487 goto error;
488 }
480 x->data = ahp; 489 x->data = ahp;
481 490
482 return 0; 491 return 0;
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 9eb928598351..b0715432e454 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -354,8 +354,15 @@ static int esp6_init_state(struct xfrm_state *x)
354 (x->ealg->alg_key_len + 7) / 8)) 354 (x->ealg->alg_key_len + 7) / 8))
355 goto error; 355 goto error;
356 x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen; 356 x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen;
357 if (x->props.mode == XFRM_MODE_TUNNEL) 357 switch (x->props.mode) {
358 case XFRM_MODE_BEET:
359 case XFRM_MODE_TRANSPORT:
360 break;
361 case XFRM_MODE_TUNNEL:
358 x->props.header_len += sizeof(struct ipv6hdr); 362 x->props.header_len += sizeof(struct ipv6hdr);
363 default:
364 goto error;
365 }
359 x->data = esp; 366 x->data = esp;
360 return 0; 367 return 0;
361 368
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 217d60f9fc80..b12cc22e7745 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -154,8 +154,10 @@ static void ip6_fl_gc(unsigned long dummy)
154 write_unlock(&ip6_fl_lock); 154 write_unlock(&ip6_fl_lock);
155} 155}
156 156
157static int fl_intern(struct ip6_flowlabel *fl, __be32 label) 157static struct ip6_flowlabel *fl_intern(struct ip6_flowlabel *fl, __be32 label)
158{ 158{
159 struct ip6_flowlabel *lfl;
160
159 fl->label = label & IPV6_FLOWLABEL_MASK; 161 fl->label = label & IPV6_FLOWLABEL_MASK;
160 162
161 write_lock_bh(&ip6_fl_lock); 163 write_lock_bh(&ip6_fl_lock);
@@ -163,12 +165,26 @@ static int fl_intern(struct ip6_flowlabel *fl, __be32 label)
163 for (;;) { 165 for (;;) {
164 fl->label = htonl(net_random())&IPV6_FLOWLABEL_MASK; 166 fl->label = htonl(net_random())&IPV6_FLOWLABEL_MASK;
165 if (fl->label) { 167 if (fl->label) {
166 struct ip6_flowlabel *lfl;
167 lfl = __fl_lookup(fl->label); 168 lfl = __fl_lookup(fl->label);
168 if (lfl == NULL) 169 if (lfl == NULL)
169 break; 170 break;
170 } 171 }
171 } 172 }
173 } else {
174 /*
175 * we dropper the ip6_fl_lock, so this entry could reappear
176 * and we need to recheck with it.
177 *
178 * OTOH no need to search the active socket first, like it is
179 * done in ipv6_flowlabel_opt - sock is locked, so new entry
180 * with the same label can only appear on another sock
181 */
182 lfl = __fl_lookup(fl->label);
183 if (lfl != NULL) {
184 atomic_inc(&lfl->users);
185 write_unlock_bh(&ip6_fl_lock);
186 return lfl;
187 }
172 } 188 }
173 189
174 fl->lastuse = jiffies; 190 fl->lastuse = jiffies;
@@ -176,7 +192,7 @@ static int fl_intern(struct ip6_flowlabel *fl, __be32 label)
176 fl_ht[FL_HASH(fl->label)] = fl; 192 fl_ht[FL_HASH(fl->label)] = fl;
177 atomic_inc(&fl_size); 193 atomic_inc(&fl_size);
178 write_unlock_bh(&ip6_fl_lock); 194 write_unlock_bh(&ip6_fl_lock);
179 return 0; 195 return NULL;
180} 196}
181 197
182 198
@@ -190,14 +206,17 @@ struct ip6_flowlabel * fl6_sock_lookup(struct sock *sk, __be32 label)
190 206
191 label &= IPV6_FLOWLABEL_MASK; 207 label &= IPV6_FLOWLABEL_MASK;
192 208
209 read_lock_bh(&ip6_sk_fl_lock);
193 for (sfl=np->ipv6_fl_list; sfl; sfl = sfl->next) { 210 for (sfl=np->ipv6_fl_list; sfl; sfl = sfl->next) {
194 struct ip6_flowlabel *fl = sfl->fl; 211 struct ip6_flowlabel *fl = sfl->fl;
195 if (fl->label == label) { 212 if (fl->label == label) {
196 fl->lastuse = jiffies; 213 fl->lastuse = jiffies;
197 atomic_inc(&fl->users); 214 atomic_inc(&fl->users);
215 read_unlock_bh(&ip6_sk_fl_lock);
198 return fl; 216 return fl;
199 } 217 }
200 } 218 }
219 read_unlock_bh(&ip6_sk_fl_lock);
201 return NULL; 220 return NULL;
202} 221}
203 222
@@ -409,6 +428,16 @@ static int ipv6_opt_cmp(struct ipv6_txoptions *o1, struct ipv6_txoptions *o2)
409 return 0; 428 return 0;
410} 429}
411 430
431static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl,
432 struct ip6_flowlabel *fl)
433{
434 write_lock_bh(&ip6_sk_fl_lock);
435 sfl->fl = fl;
436 sfl->next = np->ipv6_fl_list;
437 np->ipv6_fl_list = sfl;
438 write_unlock_bh(&ip6_sk_fl_lock);
439}
440
412int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen) 441int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
413{ 442{
414 int err; 443 int err;
@@ -416,7 +445,8 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
416 struct in6_flowlabel_req freq; 445 struct in6_flowlabel_req freq;
417 struct ipv6_fl_socklist *sfl1=NULL; 446 struct ipv6_fl_socklist *sfl1=NULL;
418 struct ipv6_fl_socklist *sfl, **sflp; 447 struct ipv6_fl_socklist *sfl, **sflp;
419 struct ip6_flowlabel *fl; 448 struct ip6_flowlabel *fl, *fl1 = NULL;
449
420 450
421 if (optlen < sizeof(freq)) 451 if (optlen < sizeof(freq))
422 return -EINVAL; 452 return -EINVAL;
@@ -472,8 +502,6 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
472 sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL); 502 sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
473 503
474 if (freq.flr_label) { 504 if (freq.flr_label) {
475 struct ip6_flowlabel *fl1 = NULL;
476
477 err = -EEXIST; 505 err = -EEXIST;
478 read_lock_bh(&ip6_sk_fl_lock); 506 read_lock_bh(&ip6_sk_fl_lock);
479 for (sfl = np->ipv6_fl_list; sfl; sfl = sfl->next) { 507 for (sfl = np->ipv6_fl_list; sfl; sfl = sfl->next) {
@@ -492,6 +520,7 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
492 if (fl1 == NULL) 520 if (fl1 == NULL)
493 fl1 = fl_lookup(freq.flr_label); 521 fl1 = fl_lookup(freq.flr_label);
494 if (fl1) { 522 if (fl1) {
523recheck:
495 err = -EEXIST; 524 err = -EEXIST;
496 if (freq.flr_flags&IPV6_FL_F_EXCL) 525 if (freq.flr_flags&IPV6_FL_F_EXCL)
497 goto release; 526 goto release;
@@ -513,11 +542,7 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
513 fl1->linger = fl->linger; 542 fl1->linger = fl->linger;
514 if ((long)(fl->expires - fl1->expires) > 0) 543 if ((long)(fl->expires - fl1->expires) > 0)
515 fl1->expires = fl->expires; 544 fl1->expires = fl->expires;
516 write_lock_bh(&ip6_sk_fl_lock); 545 fl_link(np, sfl1, fl1);
517 sfl1->fl = fl1;
518 sfl1->next = np->ipv6_fl_list;
519 np->ipv6_fl_list = sfl1;
520 write_unlock_bh(&ip6_sk_fl_lock);
521 fl_free(fl); 546 fl_free(fl);
522 return 0; 547 return 0;
523 548
@@ -534,9 +559,9 @@ release:
534 if (sfl1 == NULL || (err = mem_check(sk)) != 0) 559 if (sfl1 == NULL || (err = mem_check(sk)) != 0)
535 goto done; 560 goto done;
536 561
537 err = fl_intern(fl, freq.flr_label); 562 fl1 = fl_intern(fl, freq.flr_label);
538 if (err) 563 if (fl1 != NULL)
539 goto done; 564 goto recheck;
540 565
541 if (!freq.flr_label) { 566 if (!freq.flr_label) {
542 if (copy_to_user(&((struct in6_flowlabel_req __user *) optval)->flr_label, 567 if (copy_to_user(&((struct in6_flowlabel_req __user *) optval)->flr_label,
@@ -545,9 +570,7 @@ release:
545 } 570 }
546 } 571 }
547 572
548 sfl1->fl = fl; 573 fl_link(np, sfl1, fl);
549 sfl1->next = np->ipv6_fl_list;
550 np->ipv6_fl_list = sfl1;
551 return 0; 574 return 0;
552 575
553 default: 576 default:
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 28fc8edfdc3a..80ef2a1d39fd 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -411,8 +411,15 @@ static int ipcomp6_init_state(struct xfrm_state *x)
411 goto out; 411 goto out;
412 412
413 x->props.header_len = 0; 413 x->props.header_len = 0;
414 if (x->props.mode == XFRM_MODE_TUNNEL) 414 switch (x->props.mode) {
415 case XFRM_MODE_BEET:
416 case XFRM_MODE_TRANSPORT:
417 break;
418 case XFRM_MODE_TUNNEL:
415 x->props.header_len += sizeof(struct ipv6hdr); 419 x->props.header_len += sizeof(struct ipv6hdr);
420 default:
421 goto error;
422 }
416 423
417 mutex_lock(&ipcomp6_resource_mutex); 424 mutex_lock(&ipcomp6_resource_mutex);
418 if (!ipcomp6_alloc_scratches()) 425 if (!ipcomp6_alloc_scratches())
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 726fafd41961..e170c67c47a5 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -130,22 +130,6 @@ static inline void frag_kfree_skb(struct sk_buff *skb, unsigned int *work)
130 kfree_skb(skb); 130 kfree_skb(skb);
131} 131}
132 132
133static void nf_frag_free(struct inet_frag_queue *q)
134{
135 kfree(container_of(q, struct nf_ct_frag6_queue, q));
136}
137
138static inline struct nf_ct_frag6_queue *frag_alloc_queue(void)
139{
140 struct nf_ct_frag6_queue *fq;
141
142 fq = kzalloc(sizeof(struct nf_ct_frag6_queue), GFP_ATOMIC);
143 if (fq == NULL)
144 return NULL;
145 atomic_add(sizeof(struct nf_ct_frag6_queue), &nf_frags.mem);
146 return fq;
147}
148
149/* Destruction primitives. */ 133/* Destruction primitives. */
150 134
151static __inline__ void fq_put(struct nf_ct_frag6_queue *fq) 135static __inline__ void fq_put(struct nf_ct_frag6_queue *fq)
@@ -168,7 +152,10 @@ static void nf_ct_frag6_evictor(void)
168 152
169static void nf_ct_frag6_expire(unsigned long data) 153static void nf_ct_frag6_expire(unsigned long data)
170{ 154{
171 struct nf_ct_frag6_queue *fq = (struct nf_ct_frag6_queue *) data; 155 struct nf_ct_frag6_queue *fq;
156
157 fq = container_of((struct inet_frag_queue *)data,
158 struct nf_ct_frag6_queue, q);
172 159
173 spin_lock(&fq->q.lock); 160 spin_lock(&fq->q.lock);
174 161
@@ -184,89 +171,29 @@ out:
184 171
185/* Creation primitives. */ 172/* Creation primitives. */
186 173
187static struct nf_ct_frag6_queue *nf_ct_frag6_intern(unsigned int hash, 174static __inline__ struct nf_ct_frag6_queue *
188 struct nf_ct_frag6_queue *fq_in) 175fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst)
189{ 176{
190 struct nf_ct_frag6_queue *fq; 177 struct inet_frag_queue *q;
191#ifdef CONFIG_SMP 178 struct ip6_create_arg arg;
192 struct hlist_node *n; 179 unsigned int hash;
193#endif
194
195 write_lock(&nf_frags.lock);
196#ifdef CONFIG_SMP
197 hlist_for_each_entry(fq, n, &nf_frags.hash[hash], q.list) {
198 if (fq->id == fq_in->id &&
199 ipv6_addr_equal(&fq_in->saddr, &fq->saddr) &&
200 ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) {
201 atomic_inc(&fq->q.refcnt);
202 write_unlock(&nf_frags.lock);
203 fq_in->q.last_in |= COMPLETE;
204 fq_put(fq_in);
205 return fq;
206 }
207 }
208#endif
209 fq = fq_in;
210
211 if (!mod_timer(&fq->q.timer, jiffies + nf_frags_ctl.timeout))
212 atomic_inc(&fq->q.refcnt);
213
214 atomic_inc(&fq->q.refcnt);
215 hlist_add_head(&fq->q.list, &nf_frags.hash[hash]);
216 INIT_LIST_HEAD(&fq->q.lru_list);
217 list_add_tail(&fq->q.lru_list, &nf_frags.lru_list);
218 nf_frags.nqueues++;
219 write_unlock(&nf_frags.lock);
220 return fq;
221}
222 180
181 arg.id = id;
182 arg.src = src;
183 arg.dst = dst;
184 hash = ip6qhashfn(id, src, dst);
223 185
224static struct nf_ct_frag6_queue * 186 q = inet_frag_find(&nf_frags, &arg, hash);
225nf_ct_frag6_create(unsigned int hash, __be32 id, struct in6_addr *src, struct in6_addr *dst) 187 if (q == NULL)
226{
227 struct nf_ct_frag6_queue *fq;
228
229 if ((fq = frag_alloc_queue()) == NULL) {
230 pr_debug("Can't alloc new queue\n");
231 goto oom; 188 goto oom;
232 }
233
234 fq->id = id;
235 ipv6_addr_copy(&fq->saddr, src);
236 ipv6_addr_copy(&fq->daddr, dst);
237
238 setup_timer(&fq->q.timer, nf_ct_frag6_expire, (unsigned long)fq);
239 spin_lock_init(&fq->q.lock);
240 atomic_set(&fq->q.refcnt, 1);
241 189
242 return nf_ct_frag6_intern(hash, fq); 190 return container_of(q, struct nf_ct_frag6_queue, q);
243 191
244oom: 192oom:
193 pr_debug("Can't alloc new queue\n");
245 return NULL; 194 return NULL;
246} 195}
247 196
248static __inline__ struct nf_ct_frag6_queue *
249fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst)
250{
251 struct nf_ct_frag6_queue *fq;
252 struct hlist_node *n;
253 unsigned int hash = ip6qhashfn(id, src, dst);
254
255 read_lock(&nf_frags.lock);
256 hlist_for_each_entry(fq, n, &nf_frags.hash[hash], q.list) {
257 if (fq->id == id &&
258 ipv6_addr_equal(src, &fq->saddr) &&
259 ipv6_addr_equal(dst, &fq->daddr)) {
260 atomic_inc(&fq->q.refcnt);
261 read_unlock(&nf_frags.lock);
262 return fq;
263 }
264 }
265 read_unlock(&nf_frags.lock);
266
267 return nf_ct_frag6_create(hash, id, src, dst);
268}
269
270 197
271static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, 198static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
272 struct frag_hdr *fhdr, int nhoff) 199 struct frag_hdr *fhdr, int nhoff)
@@ -749,9 +676,12 @@ int nf_ct_frag6_init(void)
749{ 676{
750 nf_frags.ctl = &nf_frags_ctl; 677 nf_frags.ctl = &nf_frags_ctl;
751 nf_frags.hashfn = nf_hashfn; 678 nf_frags.hashfn = nf_hashfn;
752 nf_frags.destructor = nf_frag_free; 679 nf_frags.constructor = ip6_frag_init;
680 nf_frags.destructor = NULL;
753 nf_frags.skb_free = nf_skb_free; 681 nf_frags.skb_free = nf_skb_free;
754 nf_frags.qsize = sizeof(struct nf_ct_frag6_queue); 682 nf_frags.qsize = sizeof(struct nf_ct_frag6_queue);
683 nf_frags.match = ip6_frag_match;
684 nf_frags.frag_expire = nf_ct_frag6_expire;
755 inet_frags_init(&nf_frags); 685 inet_frags_init(&nf_frags);
756 686
757 return 0; 687 return 0;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 6ad19cfc2025..76c88a93b9b5 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -143,6 +143,18 @@ static unsigned int ip6_hashfn(struct inet_frag_queue *q)
143 return ip6qhashfn(fq->id, &fq->saddr, &fq->daddr); 143 return ip6qhashfn(fq->id, &fq->saddr, &fq->daddr);
144} 144}
145 145
146int ip6_frag_match(struct inet_frag_queue *q, void *a)
147{
148 struct frag_queue *fq;
149 struct ip6_create_arg *arg = a;
150
151 fq = container_of(q, struct frag_queue, q);
152 return (fq->id == arg->id &&
153 ipv6_addr_equal(&fq->saddr, arg->src) &&
154 ipv6_addr_equal(&fq->daddr, arg->dst));
155}
156EXPORT_SYMBOL(ip6_frag_match);
157
146/* Memory Tracking Functions. */ 158/* Memory Tracking Functions. */
147static inline void frag_kfree_skb(struct sk_buff *skb, int *work) 159static inline void frag_kfree_skb(struct sk_buff *skb, int *work)
148{ 160{
@@ -152,20 +164,16 @@ static inline void frag_kfree_skb(struct sk_buff *skb, int *work)
152 kfree_skb(skb); 164 kfree_skb(skb);
153} 165}
154 166
155static void ip6_frag_free(struct inet_frag_queue *fq) 167void ip6_frag_init(struct inet_frag_queue *q, void *a)
156{ 168{
157 kfree(container_of(fq, struct frag_queue, q)); 169 struct frag_queue *fq = container_of(q, struct frag_queue, q);
158} 170 struct ip6_create_arg *arg = a;
159
160static inline struct frag_queue *frag_alloc_queue(void)
161{
162 struct frag_queue *fq = kzalloc(sizeof(struct frag_queue), GFP_ATOMIC);
163 171
164 if(!fq) 172 fq->id = arg->id;
165 return NULL; 173 ipv6_addr_copy(&fq->saddr, arg->src);
166 atomic_add(sizeof(struct frag_queue), &ip6_frags.mem); 174 ipv6_addr_copy(&fq->daddr, arg->dst);
167 return fq;
168} 175}
176EXPORT_SYMBOL(ip6_frag_init);
169 177
170/* Destruction primitives. */ 178/* Destruction primitives. */
171 179
@@ -193,9 +201,11 @@ static void ip6_evictor(struct inet6_dev *idev)
193 201
194static void ip6_frag_expire(unsigned long data) 202static void ip6_frag_expire(unsigned long data)
195{ 203{
196 struct frag_queue *fq = (struct frag_queue *) data; 204 struct frag_queue *fq;
197 struct net_device *dev = NULL; 205 struct net_device *dev = NULL;
198 206
207 fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
208
199 spin_lock(&fq->q.lock); 209 spin_lock(&fq->q.lock);
200 210
201 if (fq->q.last_in & COMPLETE) 211 if (fq->q.last_in & COMPLETE)
@@ -230,98 +240,30 @@ out:
230 fq_put(fq); 240 fq_put(fq);
231} 241}
232 242
233/* Creation primitives. */ 243static __inline__ struct frag_queue *
234 244fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst,
235 245 struct inet6_dev *idev)
236static struct frag_queue *ip6_frag_intern(struct frag_queue *fq_in)
237{ 246{
238 struct frag_queue *fq; 247 struct inet_frag_queue *q;
248 struct ip6_create_arg arg;
239 unsigned int hash; 249 unsigned int hash;
240#ifdef CONFIG_SMP
241 struct hlist_node *n;
242#endif
243 250
244 write_lock(&ip6_frags.lock); 251 arg.id = id;
245 hash = ip6qhashfn(fq_in->id, &fq_in->saddr, &fq_in->daddr); 252 arg.src = src;
246#ifdef CONFIG_SMP 253 arg.dst = dst;
247 hlist_for_each_entry(fq, n, &ip6_frags.hash[hash], q.list) { 254 hash = ip6qhashfn(id, src, dst);
248 if (fq->id == fq_in->id &&
249 ipv6_addr_equal(&fq_in->saddr, &fq->saddr) &&
250 ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) {
251 atomic_inc(&fq->q.refcnt);
252 write_unlock(&ip6_frags.lock);
253 fq_in->q.last_in |= COMPLETE;
254 fq_put(fq_in);
255 return fq;
256 }
257 }
258#endif
259 fq = fq_in;
260
261 if (!mod_timer(&fq->q.timer, jiffies + ip6_frags_ctl.timeout))
262 atomic_inc(&fq->q.refcnt);
263
264 atomic_inc(&fq->q.refcnt);
265 hlist_add_head(&fq->q.list, &ip6_frags.hash[hash]);
266 INIT_LIST_HEAD(&fq->q.lru_list);
267 list_add_tail(&fq->q.lru_list, &ip6_frags.lru_list);
268 ip6_frags.nqueues++;
269 write_unlock(&ip6_frags.lock);
270 return fq;
271}
272
273
274static struct frag_queue *
275ip6_frag_create(__be32 id, struct in6_addr *src, struct in6_addr *dst,
276 struct inet6_dev *idev)
277{
278 struct frag_queue *fq;
279 255
280 if ((fq = frag_alloc_queue()) == NULL) 256 q = inet_frag_find(&ip6_frags, &arg, hash);
257 if (q == NULL)
281 goto oom; 258 goto oom;
282 259
283 fq->id = id; 260 return container_of(q, struct frag_queue, q);
284 ipv6_addr_copy(&fq->saddr, src);
285 ipv6_addr_copy(&fq->daddr, dst);
286
287 init_timer(&fq->q.timer);
288 fq->q.timer.function = ip6_frag_expire;
289 fq->q.timer.data = (long) fq;
290 spin_lock_init(&fq->q.lock);
291 atomic_set(&fq->q.refcnt, 1);
292
293 return ip6_frag_intern(fq);
294 261
295oom: 262oom:
296 IP6_INC_STATS_BH(idev, IPSTATS_MIB_REASMFAILS); 263 IP6_INC_STATS_BH(idev, IPSTATS_MIB_REASMFAILS);
297 return NULL; 264 return NULL;
298} 265}
299 266
300static __inline__ struct frag_queue *
301fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst,
302 struct inet6_dev *idev)
303{
304 struct frag_queue *fq;
305 struct hlist_node *n;
306 unsigned int hash;
307
308 read_lock(&ip6_frags.lock);
309 hash = ip6qhashfn(id, src, dst);
310 hlist_for_each_entry(fq, n, &ip6_frags.hash[hash], q.list) {
311 if (fq->id == id &&
312 ipv6_addr_equal(src, &fq->saddr) &&
313 ipv6_addr_equal(dst, &fq->daddr)) {
314 atomic_inc(&fq->q.refcnt);
315 read_unlock(&ip6_frags.lock);
316 return fq;
317 }
318 }
319 read_unlock(&ip6_frags.lock);
320
321 return ip6_frag_create(id, src, dst, idev);
322}
323
324
325static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, 267static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
326 struct frag_hdr *fhdr, int nhoff) 268 struct frag_hdr *fhdr, int nhoff)
327{ 269{
@@ -697,8 +639,11 @@ void __init ipv6_frag_init(void)
697 639
698 ip6_frags.ctl = &ip6_frags_ctl; 640 ip6_frags.ctl = &ip6_frags_ctl;
699 ip6_frags.hashfn = ip6_hashfn; 641 ip6_frags.hashfn = ip6_hashfn;
700 ip6_frags.destructor = ip6_frag_free; 642 ip6_frags.constructor = ip6_frag_init;
643 ip6_frags.destructor = NULL;
701 ip6_frags.skb_free = NULL; 644 ip6_frags.skb_free = NULL;
702 ip6_frags.qsize = sizeof(struct frag_queue); 645 ip6_frags.qsize = sizeof(struct frag_queue);
646 ip6_frags.match = ip6_frag_match;
647 ip6_frags.frag_expire = ip6_frag_expire;
703 inet_frags_init(&ip6_frags); 648 inet_frags_init(&ip6_frags);
704} 649}
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index 02f69e544f6f..515783707e86 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -16,7 +16,7 @@
16#include <net/ipv6.h> 16#include <net/ipv6.h>
17#include <net/xfrm.h> 17#include <net/xfrm.h>
18 18
19int xfrm6_rcv_spi(struct sk_buff *skb, __be32 spi) 19int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
20{ 20{
21 int err; 21 int err;
22 __be32 seq; 22 __be32 seq;
@@ -24,11 +24,9 @@ int xfrm6_rcv_spi(struct sk_buff *skb, __be32 spi)
24 struct xfrm_state *x; 24 struct xfrm_state *x;
25 int xfrm_nr = 0; 25 int xfrm_nr = 0;
26 int decaps = 0; 26 int decaps = 0;
27 int nexthdr;
28 unsigned int nhoff; 27 unsigned int nhoff;
29 28
30 nhoff = IP6CB(skb)->nhoff; 29 nhoff = IP6CB(skb)->nhoff;
31 nexthdr = skb_network_header(skb)[nhoff];
32 30
33 seq = 0; 31 seq = 0;
34 if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) 32 if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0)
@@ -41,7 +39,7 @@ int xfrm6_rcv_spi(struct sk_buff *skb, __be32 spi)
41 goto drop; 39 goto drop;
42 40
43 x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, spi, 41 x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, spi,
44 nexthdr != IPPROTO_IPIP ? nexthdr : IPPROTO_IPV6, AF_INET6); 42 nexthdr, AF_INET6);
45 if (x == NULL) 43 if (x == NULL)
46 goto drop; 44 goto drop;
47 spin_lock(&x->lock); 45 spin_lock(&x->lock);
@@ -70,10 +68,10 @@ int xfrm6_rcv_spi(struct sk_buff *skb, __be32 spi)
70 68
71 xfrm_vec[xfrm_nr++] = x; 69 xfrm_vec[xfrm_nr++] = x;
72 70
73 if (x->mode->input(x, skb)) 71 if (x->outer_mode->input(x, skb))
74 goto drop; 72 goto drop;
75 73
76 if (x->props.mode == XFRM_MODE_TUNNEL) { /* XXX */ 74 if (x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) {
77 decaps = 1; 75 decaps = 1;
78 break; 76 break;
79 } 77 }
@@ -99,7 +97,6 @@ int xfrm6_rcv_spi(struct sk_buff *skb, __be32 spi)
99 memcpy(skb->sp->xvec + skb->sp->len, xfrm_vec, 97 memcpy(skb->sp->xvec + skb->sp->len, xfrm_vec,
100 xfrm_nr * sizeof(xfrm_vec[0])); 98 xfrm_nr * sizeof(xfrm_vec[0]));
101 skb->sp->len += xfrm_nr; 99 skb->sp->len += xfrm_nr;
102 skb->ip_summed = CHECKSUM_NONE;
103 100
104 nf_reset(skb); 101 nf_reset(skb);
105 102
@@ -135,7 +132,8 @@ EXPORT_SYMBOL(xfrm6_rcv_spi);
135 132
136int xfrm6_rcv(struct sk_buff *skb) 133int xfrm6_rcv(struct sk_buff *skb)
137{ 134{
138 return xfrm6_rcv_spi(skb, 0); 135 return xfrm6_rcv_spi(skb, skb_network_header(skb)[IP6CB(skb)->nhoff],
136 0);
139} 137}
140 138
141EXPORT_SYMBOL(xfrm6_rcv); 139EXPORT_SYMBOL(xfrm6_rcv);
diff --git a/net/ipv6/xfrm6_mode_beet.c b/net/ipv6/xfrm6_mode_beet.c
index 13bb1e856764..2bfb4f05c14c 100644
--- a/net/ipv6/xfrm6_mode_beet.c
+++ b/net/ipv6/xfrm6_mode_beet.c
@@ -79,6 +79,7 @@ static struct xfrm_mode xfrm6_beet_mode = {
79 .output = xfrm6_beet_output, 79 .output = xfrm6_beet_output,
80 .owner = THIS_MODULE, 80 .owner = THIS_MODULE,
81 .encap = XFRM_MODE_BEET, 81 .encap = XFRM_MODE_BEET,
82 .flags = XFRM_MODE_FLAG_TUNNEL,
82}; 83};
83 84
84static int __init xfrm6_beet_init(void) 85static int __init xfrm6_beet_init(void)
diff --git a/net/ipv6/xfrm6_mode_ro.c b/net/ipv6/xfrm6_mode_ro.c
index 957ae36b6695..a7bc8c62317a 100644
--- a/net/ipv6/xfrm6_mode_ro.c
+++ b/net/ipv6/xfrm6_mode_ro.c
@@ -58,16 +58,7 @@ static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb)
58 return 0; 58 return 0;
59} 59}
60 60
61/*
62 * Do nothing about routing optimization header unlike IPsec.
63 */
64static int xfrm6_ro_input(struct xfrm_state *x, struct sk_buff *skb)
65{
66 return 0;
67}
68
69static struct xfrm_mode xfrm6_ro_mode = { 61static struct xfrm_mode xfrm6_ro_mode = {
70 .input = xfrm6_ro_input,
71 .output = xfrm6_ro_output, 62 .output = xfrm6_ro_output,
72 .owner = THIS_MODULE, 63 .owner = THIS_MODULE,
73 .encap = XFRM_MODE_ROUTEOPTIMIZATION, 64 .encap = XFRM_MODE_ROUTEOPTIMIZATION,
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index ea2283879112..fd84e2217274 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -118,6 +118,7 @@ static struct xfrm_mode xfrm6_tunnel_mode = {
118 .output = xfrm6_tunnel_output, 118 .output = xfrm6_tunnel_output,
119 .owner = THIS_MODULE, 119 .owner = THIS_MODULE,
120 .encap = XFRM_MODE_TUNNEL, 120 .encap = XFRM_MODE_TUNNEL,
121 .flags = XFRM_MODE_FLAG_TUNNEL,
121}; 122};
122 123
123static int __init xfrm6_tunnel_init(void) 124static int __init xfrm6_tunnel_init(void)
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index a5a32c17249d..656976760ad4 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -50,7 +50,7 @@ static inline int xfrm6_output_one(struct sk_buff *skb)
50 struct ipv6hdr *iph; 50 struct ipv6hdr *iph;
51 int err; 51 int err;
52 52
53 if (x->props.mode == XFRM_MODE_TUNNEL) { 53 if (x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) {
54 err = xfrm6_tunnel_check_size(skb); 54 err = xfrm6_tunnel_check_size(skb);
55 if (err) 55 if (err)
56 goto error_nolock; 56 goto error_nolock;
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 15aa4c58c315..82e27b80d07d 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -178,8 +178,7 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
178 __xfrm6_bundle_len_inc(&header_len, &nfheader_len, xfrm[i]); 178 __xfrm6_bundle_len_inc(&header_len, &nfheader_len, xfrm[i]);
179 trailer_len += xfrm[i]->props.trailer_len; 179 trailer_len += xfrm[i]->props.trailer_len;
180 180
181 if (xfrm[i]->props.mode == XFRM_MODE_TUNNEL || 181 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
182 xfrm[i]->props.mode == XFRM_MODE_ROUTEOPTIMIZATION) {
183 unsigned short encap_family = xfrm[i]->props.family; 182 unsigned short encap_family = xfrm[i]->props.family;
184 switch(encap_family) { 183 switch(encap_family) {
185 case AF_INET: 184 case AF_INET:
@@ -215,7 +214,6 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
215 i = 0; 214 i = 0;
216 for (; dst_prev != &rt->u.dst; dst_prev = dst_prev->child) { 215 for (; dst_prev != &rt->u.dst; dst_prev = dst_prev->child) {
217 struct xfrm_dst *x = (struct xfrm_dst*)dst_prev; 216 struct xfrm_dst *x = (struct xfrm_dst*)dst_prev;
218 struct xfrm_state_afinfo *afinfo;
219 217
220 dst_prev->xfrm = xfrm[i++]; 218 dst_prev->xfrm = xfrm[i++];
221 dst_prev->dev = rt->u.dst.dev; 219 dst_prev->dev = rt->u.dst.dev;
@@ -232,18 +230,7 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
232 /* Copy neighbour for reachability confirmation */ 230 /* Copy neighbour for reachability confirmation */
233 dst_prev->neighbour = neigh_clone(rt->u.dst.neighbour); 231 dst_prev->neighbour = neigh_clone(rt->u.dst.neighbour);
234 dst_prev->input = rt->u.dst.input; 232 dst_prev->input = rt->u.dst.input;
235 /* XXX: When IPv4 is implemented as module and can be unloaded, 233 dst_prev->output = dst_prev->xfrm->outer_mode->afinfo->output;
236 * we should manage reference to xfrm4_output in afinfo->output.
237 * Miyazawa
238 */
239 afinfo = xfrm_state_get_afinfo(dst_prev->xfrm->props.family);
240 if (!afinfo) {
241 dst = *dst_p;
242 goto error;
243 }
244
245 dst_prev->output = afinfo->output;
246 xfrm_state_put_afinfo(afinfo);
247 /* Sheit... I remember I did this right. Apparently, 234 /* Sheit... I remember I did this right. Apparently,
248 * it was magically lost, so this code needs audit */ 235 * it was magically lost, so this code needs audit */
249 x->u.rt6.rt6i_flags = rt0->rt6i_flags&(RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL); 236 x->u.rt6.rt6i_flags = rt0->rt6i_flags&(RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL);
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c
index cdadb4847469..b392bee396f1 100644
--- a/net/ipv6/xfrm6_state.c
+++ b/net/ipv6/xfrm6_state.c
@@ -93,7 +93,8 @@ __xfrm6_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n)
93 /* Rule 4: select IPsec tunnel */ 93 /* Rule 4: select IPsec tunnel */
94 for (i = 0; i < n; i++) { 94 for (i = 0; i < n; i++) {
95 if (src[i] && 95 if (src[i] &&
96 src[i]->props.mode == XFRM_MODE_TUNNEL) { 96 (src[i]->props.mode == XFRM_MODE_TUNNEL ||
97 src[i]->props.mode == XFRM_MODE_BEET)) {
97 dst[j++] = src[i]; 98 dst[j++] = src[i];
98 src[i] = NULL; 99 src[i] = NULL;
99 } 100 }
@@ -146,7 +147,8 @@ __xfrm6_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n)
146 /* Rule 3: select IPsec tunnel */ 147 /* Rule 3: select IPsec tunnel */
147 for (i = 0; i < n; i++) { 148 for (i = 0; i < n; i++) {
148 if (src[i] && 149 if (src[i] &&
149 src[i]->mode == XFRM_MODE_TUNNEL) { 150 (src[i]->mode == XFRM_MODE_TUNNEL ||
151 src[i]->mode == XFRM_MODE_BEET)) {
150 dst[j++] = src[i]; 152 dst[j++] = src[i];
151 src[i] = NULL; 153 src[i] = NULL;
152 } 154 }
@@ -168,6 +170,7 @@ __xfrm6_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n)
168 170
169static struct xfrm_state_afinfo xfrm6_state_afinfo = { 171static struct xfrm_state_afinfo xfrm6_state_afinfo = {
170 .family = AF_INET6, 172 .family = AF_INET6,
173 .owner = THIS_MODULE,
171 .init_tempsel = __xfrm6_init_tempsel, 174 .init_tempsel = __xfrm6_init_tempsel,
172 .tmpl_sort = __xfrm6_tmpl_sort, 175 .tmpl_sort = __xfrm6_tmpl_sort,
173 .state_sort = __xfrm6_state_sort, 176 .state_sort = __xfrm6_state_sort,
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 3f8a3abde67e..fae90ff31087 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -248,7 +248,7 @@ static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
248 248
249static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) 249static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
250{ 250{
251 return 0; 251 return skb_network_header(skb)[IP6CB(skb)->nhoff];
252} 252}
253 253
254static int xfrm6_tunnel_rcv(struct sk_buff *skb) 254static int xfrm6_tunnel_rcv(struct sk_buff *skb)
@@ -257,7 +257,7 @@ static int xfrm6_tunnel_rcv(struct sk_buff *skb)
257 __be32 spi; 257 __be32 spi;
258 258
259 spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr); 259 spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr);
260 return xfrm6_rcv_spi(skb, spi) > 0 ? : 0; 260 return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi) > 0 ? : 0;
261} 261}
262 262
263static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 263static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,