aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/net/ipv6.h19
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c74
-rw-r--r--net/ipv6/reassembly.c63
3 files changed, 57 insertions, 99 deletions
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 9bed5d483405..81d4455f6e14 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -411,6 +411,25 @@ struct ip6_create_arg {
411void ip6_frag_init(struct inet_frag_queue *q, void *a); 411void ip6_frag_init(struct inet_frag_queue *q, void *a);
412bool ip6_frag_match(struct inet_frag_queue *q, void *a); 412bool ip6_frag_match(struct inet_frag_queue *q, void *a);
413 413
414/*
415 * Equivalent of ipv4 struct ip
416 */
417struct frag_queue {
418 struct inet_frag_queue q;
419
420 __be32 id; /* fragment id */
421 u32 user;
422 struct in6_addr saddr;
423 struct in6_addr daddr;
424
425 int iif;
426 unsigned int csum;
427 __u16 nhoffset;
428};
429
430void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
431 struct inet_frags *frags);
432
414static inline bool ipv6_addr_any(const struct in6_addr *a) 433static inline bool ipv6_addr_any(const struct in6_addr *a)
415{ 434{
416#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 435#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index f40f327ccc0c..54274c33a0b1 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -57,19 +57,6 @@ struct nf_ct_frag6_skb_cb
57 57
58#define NFCT_FRAG6_CB(skb) ((struct nf_ct_frag6_skb_cb*)((skb)->cb)) 58#define NFCT_FRAG6_CB(skb) ((struct nf_ct_frag6_skb_cb*)((skb)->cb))
59 59
60struct nf_ct_frag6_queue
61{
62 struct inet_frag_queue q;
63
64 __be32 id; /* fragment id */
65 u32 user;
66 struct in6_addr saddr;
67 struct in6_addr daddr;
68
69 unsigned int csum;
70 __u16 nhoffset;
71};
72
73static struct inet_frags nf_frags; 60static struct inet_frags nf_frags;
74 61
75#ifdef CONFIG_SYSCTL 62#ifdef CONFIG_SYSCTL
@@ -151,9 +138,9 @@ static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
151 138
152static unsigned int nf_hashfn(struct inet_frag_queue *q) 139static unsigned int nf_hashfn(struct inet_frag_queue *q)
153{ 140{
154 const struct nf_ct_frag6_queue *nq; 141 const struct frag_queue *nq;
155 142
156 nq = container_of(q, struct nf_ct_frag6_queue, q); 143 nq = container_of(q, struct frag_queue, q);
157 return inet6_hash_frag(nq->id, &nq->saddr, &nq->daddr, nf_frags.rnd); 144 return inet6_hash_frag(nq->id, &nq->saddr, &nq->daddr, nf_frags.rnd);
158} 145}
159 146
@@ -163,44 +150,21 @@ static void nf_skb_free(struct sk_buff *skb)
163 kfree_skb(NFCT_FRAG6_CB(skb)->orig); 150 kfree_skb(NFCT_FRAG6_CB(skb)->orig);
164} 151}
165 152
166/* Destruction primitives. */
167
168static __inline__ void fq_put(struct nf_ct_frag6_queue *fq)
169{
170 inet_frag_put(&fq->q, &nf_frags);
171}
172
173/* Kill fq entry. It is not destroyed immediately,
174 * because caller (and someone more) holds reference count.
175 */
176static __inline__ void fq_kill(struct nf_ct_frag6_queue *fq)
177{
178 inet_frag_kill(&fq->q, &nf_frags);
179}
180
181static void nf_ct_frag6_expire(unsigned long data) 153static void nf_ct_frag6_expire(unsigned long data)
182{ 154{
183 struct nf_ct_frag6_queue *fq; 155 struct frag_queue *fq;
184 156 struct net *net;
185 fq = container_of((struct inet_frag_queue *)data,
186 struct nf_ct_frag6_queue, q);
187 157
188 spin_lock(&fq->q.lock); 158 fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
159 net = container_of(fq->q.net, struct net, nf_frag.frags);
189 160
190 if (fq->q.last_in & INET_FRAG_COMPLETE) 161 ip6_expire_frag_queue(net, fq, &nf_frags);
191 goto out;
192
193 fq_kill(fq);
194
195out:
196 spin_unlock(&fq->q.lock);
197 fq_put(fq);
198} 162}
199 163
200/* Creation primitives. */ 164/* Creation primitives. */
201static inline struct nf_ct_frag6_queue *fq_find(struct net *net, __be32 id, 165static inline struct frag_queue *fq_find(struct net *net, __be32 id,
202 u32 user, struct in6_addr *src, 166 u32 user, struct in6_addr *src,
203 struct in6_addr *dst) 167 struct in6_addr *dst)
204{ 168{
205 struct inet_frag_queue *q; 169 struct inet_frag_queue *q;
206 struct ip6_create_arg arg; 170 struct ip6_create_arg arg;
@@ -219,14 +183,14 @@ static inline struct nf_ct_frag6_queue *fq_find(struct net *net, __be32 id,
219 if (q == NULL) 183 if (q == NULL)
220 goto oom; 184 goto oom;
221 185
222 return container_of(q, struct nf_ct_frag6_queue, q); 186 return container_of(q, struct frag_queue, q);
223 187
224oom: 188oom:
225 return NULL; 189 return NULL;
226} 190}
227 191
228 192
229static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, 193static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
230 const struct frag_hdr *fhdr, int nhoff) 194 const struct frag_hdr *fhdr, int nhoff)
231{ 195{
232 struct sk_buff *prev, *next; 196 struct sk_buff *prev, *next;
@@ -367,7 +331,7 @@ found:
367 return 0; 331 return 0;
368 332
369discard_fq: 333discard_fq:
370 fq_kill(fq); 334 inet_frag_kill(&fq->q, &nf_frags);
371err: 335err:
372 return -1; 336 return -1;
373} 337}
@@ -382,12 +346,12 @@ err:
382 * the last and the first frames arrived and all the bits are here. 346 * the last and the first frames arrived and all the bits are here.
383 */ 347 */
384static struct sk_buff * 348static struct sk_buff *
385nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) 349nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
386{ 350{
387 struct sk_buff *fp, *op, *head = fq->q.fragments; 351 struct sk_buff *fp, *op, *head = fq->q.fragments;
388 int payload_len; 352 int payload_len;
389 353
390 fq_kill(fq); 354 inet_frag_kill(&fq->q, &nf_frags);
391 355
392 WARN_ON(head == NULL); 356 WARN_ON(head == NULL);
393 WARN_ON(NFCT_FRAG6_CB(head)->offset != 0); 357 WARN_ON(NFCT_FRAG6_CB(head)->offset != 0);
@@ -570,7 +534,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
570 struct net *net = skb_dst(skb) ? dev_net(skb_dst(skb)->dev) 534 struct net *net = skb_dst(skb) ? dev_net(skb_dst(skb)->dev)
571 : dev_net(skb->dev); 535 : dev_net(skb->dev);
572 struct frag_hdr *fhdr; 536 struct frag_hdr *fhdr;
573 struct nf_ct_frag6_queue *fq; 537 struct frag_queue *fq;
574 struct ipv6hdr *hdr; 538 struct ipv6hdr *hdr;
575 int fhoff, nhoff; 539 int fhoff, nhoff;
576 u8 prevhdr; 540 u8 prevhdr;
@@ -619,7 +583,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
619 if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) { 583 if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) {
620 spin_unlock_bh(&fq->q.lock); 584 spin_unlock_bh(&fq->q.lock);
621 pr_debug("Can't insert skb to queue\n"); 585 pr_debug("Can't insert skb to queue\n");
622 fq_put(fq); 586 inet_frag_put(&fq->q, &nf_frags);
623 goto ret_orig; 587 goto ret_orig;
624 } 588 }
625 589
@@ -631,7 +595,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
631 } 595 }
632 spin_unlock_bh(&fq->q.lock); 596 spin_unlock_bh(&fq->q.lock);
633 597
634 fq_put(fq); 598 inet_frag_put(&fq->q, &nf_frags);
635 return ret_skb; 599 return ret_skb;
636 600
637ret_orig: 601ret_orig:
@@ -695,7 +659,7 @@ int nf_ct_frag6_init(void)
695 nf_frags.constructor = ip6_frag_init; 659 nf_frags.constructor = ip6_frag_init;
696 nf_frags.destructor = NULL; 660 nf_frags.destructor = NULL;
697 nf_frags.skb_free = nf_skb_free; 661 nf_frags.skb_free = nf_skb_free;
698 nf_frags.qsize = sizeof(struct nf_ct_frag6_queue); 662 nf_frags.qsize = sizeof(struct frag_queue);
699 nf_frags.match = ip6_frag_match; 663 nf_frags.match = ip6_frag_match;
700 nf_frags.frag_expire = nf_ct_frag6_expire; 664 nf_frags.frag_expire = nf_ct_frag6_expire;
701 nf_frags.secret_interval = 10 * 60 * HZ; 665 nf_frags.secret_interval = 10 * 60 * HZ;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 4ff9af628e72..0ee553354ed5 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -65,24 +65,6 @@ struct ip6frag_skb_cb
65#define FRAG6_CB(skb) ((struct ip6frag_skb_cb*)((skb)->cb)) 65#define FRAG6_CB(skb) ((struct ip6frag_skb_cb*)((skb)->cb))
66 66
67 67
68/*
69 * Equivalent of ipv4 struct ipq
70 */
71
72struct frag_queue
73{
74 struct inet_frag_queue q;
75
76 __be32 id; /* fragment id */
77 u32 user;
78 struct in6_addr saddr;
79 struct in6_addr daddr;
80
81 int iif;
82 unsigned int csum;
83 __u16 nhoffset;
84};
85
86static struct inet_frags ip6_frags; 68static struct inet_frags ip6_frags;
87 69
88int ip6_frag_nqueues(struct net *net) 70int ip6_frag_nqueues(struct net *net)
@@ -159,21 +141,6 @@ void ip6_frag_init(struct inet_frag_queue *q, void *a)
159} 141}
160EXPORT_SYMBOL(ip6_frag_init); 142EXPORT_SYMBOL(ip6_frag_init);
161 143
162/* Destruction primitives. */
163
164static __inline__ void fq_put(struct frag_queue *fq)
165{
166 inet_frag_put(&fq->q, &ip6_frags);
167}
168
169/* Kill fq entry. It is not destroyed immediately,
170 * because caller (and someone more) holds reference count.
171 */
172static __inline__ void fq_kill(struct frag_queue *fq)
173{
174 inet_frag_kill(&fq->q, &ip6_frags);
175}
176
177static void ip6_evictor(struct net *net, struct inet6_dev *idev) 144static void ip6_evictor(struct net *net, struct inet6_dev *idev)
178{ 145{
179 int evicted; 146 int evicted;
@@ -183,22 +150,18 @@ static void ip6_evictor(struct net *net, struct inet6_dev *idev)
183 IP6_ADD_STATS_BH(net, idev, IPSTATS_MIB_REASMFAILS, evicted); 150 IP6_ADD_STATS_BH(net, idev, IPSTATS_MIB_REASMFAILS, evicted);
184} 151}
185 152
186static void ip6_frag_expire(unsigned long data) 153void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
154 struct inet_frags *frags)
187{ 155{
188 struct frag_queue *fq;
189 struct net_device *dev = NULL; 156 struct net_device *dev = NULL;
190 struct net *net;
191
192 fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
193 157
194 spin_lock(&fq->q.lock); 158 spin_lock(&fq->q.lock);
195 159
196 if (fq->q.last_in & INET_FRAG_COMPLETE) 160 if (fq->q.last_in & INET_FRAG_COMPLETE)
197 goto out; 161 goto out;
198 162
199 fq_kill(fq); 163 inet_frag_kill(&fq->q, frags);
200 164
201 net = container_of(fq->q.net, struct net, ipv6.frags);
202 rcu_read_lock(); 165 rcu_read_lock();
203 dev = dev_get_by_index_rcu(net, fq->iif); 166 dev = dev_get_by_index_rcu(net, fq->iif);
204 if (!dev) 167 if (!dev)
@@ -222,7 +185,19 @@ out_rcu_unlock:
222 rcu_read_unlock(); 185 rcu_read_unlock();
223out: 186out:
224 spin_unlock(&fq->q.lock); 187 spin_unlock(&fq->q.lock);
225 fq_put(fq); 188 inet_frag_put(&fq->q, frags);
189}
190EXPORT_SYMBOL(ip6_expire_frag_queue);
191
192static void ip6_frag_expire(unsigned long data)
193{
194 struct frag_queue *fq;
195 struct net *net;
196
197 fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
198 net = container_of(fq->q.net, struct net, ipv6.frags);
199
200 ip6_expire_frag_queue(net, fq, &ip6_frags);
226} 201}
227 202
228static __inline__ struct frag_queue * 203static __inline__ struct frag_queue *
@@ -391,7 +366,7 @@ found:
391 return -1; 366 return -1;
392 367
393discard_fq: 368discard_fq:
394 fq_kill(fq); 369 inet_frag_kill(&fq->q, &ip6_frags);
395err: 370err:
396 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), 371 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
397 IPSTATS_MIB_REASMFAILS); 372 IPSTATS_MIB_REASMFAILS);
@@ -417,7 +392,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
417 unsigned int nhoff; 392 unsigned int nhoff;
418 int sum_truesize; 393 int sum_truesize;
419 394
420 fq_kill(fq); 395 inet_frag_kill(&fq->q, &ip6_frags);
421 396
422 /* Make the one we just received the head. */ 397 /* Make the one we just received the head. */
423 if (prev) { 398 if (prev) {
@@ -586,7 +561,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
586 ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff); 561 ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
587 562
588 spin_unlock(&fq->q.lock); 563 spin_unlock(&fq->q.lock);
589 fq_put(fq); 564 inet_frag_put(&fq->q, &ip6_frags);
590 return ret; 565 return ret;
591 } 566 }
592 567