diff options
author | Amerigo Wang <amwang@redhat.com> | 2012-09-18 12:50:09 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-09-19 17:23:28 -0400 |
commit | b836c99fd6c9dfe52a69fa0ba36ec918f80ce02a (patch) | |
tree | cb3459103371712b73c8b7695cf05d8f936480a2 /net/ipv6 | |
parent | c038a767cd697238b09f7a4ea5a504b4891774e9 (diff) |
ipv6: unify conntrack reassembly expire code with standard one
Two years ago, Shan Wei tried to fix this:
http://patchwork.ozlabs.org/patch/43905/
The problem is that RFC2460 requires an ICMP Time
Exceeded -- Fragment Reassembly Time Exceeded message should be
sent to the source of that fragment, if the defragmentation
times out.
"
If insufficient fragments are received to complete reassembly of a
packet within 60 seconds of the reception of the first-arriving
fragment of that packet, reassembly of that packet must be
abandoned and all the fragments that have been received for that
packet must be discarded. If the first fragment (i.e., the one
with a Fragment Offset of zero) has been received, an ICMP Time
Exceeded -- Fragment Reassembly Time Exceeded message should be
sent to the source of that fragment.
"
As Herbert suggested, we could actually use the standard IPv6
reassembly code which follows RFC2460.
With this patch applied, I can see ICMP Time Exceeded sent
from the receiver when the sender sent out 3/4 fragmented
IPv6 UDP packet.
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Michal Kubeček <mkubecek@suse.cz>
Cc: David Miller <davem@davemloft.net>
Cc: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
Cc: Patrick McHardy <kaber@trash.net>
Cc: Pablo Neira Ayuso <pablo@netfilter.org>
Cc: netfilter-devel@vger.kernel.org
Signed-off-by: Cong Wang <amwang@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6')
-rw-r--r-- | net/ipv6/netfilter/nf_conntrack_reasm.c | 74 | ||||
-rw-r--r-- | net/ipv6/reassembly.c | 63 |
2 files changed, 38 insertions, 99 deletions
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index f40f327ccc0c..54274c33a0b1 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | |||
@@ -57,19 +57,6 @@ struct nf_ct_frag6_skb_cb | |||
57 | 57 | ||
58 | #define NFCT_FRAG6_CB(skb) ((struct nf_ct_frag6_skb_cb*)((skb)->cb)) | 58 | #define NFCT_FRAG6_CB(skb) ((struct nf_ct_frag6_skb_cb*)((skb)->cb)) |
59 | 59 | ||
60 | struct nf_ct_frag6_queue | ||
61 | { | ||
62 | struct inet_frag_queue q; | ||
63 | |||
64 | __be32 id; /* fragment id */ | ||
65 | u32 user; | ||
66 | struct in6_addr saddr; | ||
67 | struct in6_addr daddr; | ||
68 | |||
69 | unsigned int csum; | ||
70 | __u16 nhoffset; | ||
71 | }; | ||
72 | |||
73 | static struct inet_frags nf_frags; | 60 | static struct inet_frags nf_frags; |
74 | 61 | ||
75 | #ifdef CONFIG_SYSCTL | 62 | #ifdef CONFIG_SYSCTL |
@@ -151,9 +138,9 @@ static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net) | |||
151 | 138 | ||
152 | static unsigned int nf_hashfn(struct inet_frag_queue *q) | 139 | static unsigned int nf_hashfn(struct inet_frag_queue *q) |
153 | { | 140 | { |
154 | const struct nf_ct_frag6_queue *nq; | 141 | const struct frag_queue *nq; |
155 | 142 | ||
156 | nq = container_of(q, struct nf_ct_frag6_queue, q); | 143 | nq = container_of(q, struct frag_queue, q); |
157 | return inet6_hash_frag(nq->id, &nq->saddr, &nq->daddr, nf_frags.rnd); | 144 | return inet6_hash_frag(nq->id, &nq->saddr, &nq->daddr, nf_frags.rnd); |
158 | } | 145 | } |
159 | 146 | ||
@@ -163,44 +150,21 @@ static void nf_skb_free(struct sk_buff *skb) | |||
163 | kfree_skb(NFCT_FRAG6_CB(skb)->orig); | 150 | kfree_skb(NFCT_FRAG6_CB(skb)->orig); |
164 | } | 151 | } |
165 | 152 | ||
166 | /* Destruction primitives. */ | ||
167 | |||
168 | static __inline__ void fq_put(struct nf_ct_frag6_queue *fq) | ||
169 | { | ||
170 | inet_frag_put(&fq->q, &nf_frags); | ||
171 | } | ||
172 | |||
173 | /* Kill fq entry. It is not destroyed immediately, | ||
174 | * because caller (and someone more) holds reference count. | ||
175 | */ | ||
176 | static __inline__ void fq_kill(struct nf_ct_frag6_queue *fq) | ||
177 | { | ||
178 | inet_frag_kill(&fq->q, &nf_frags); | ||
179 | } | ||
180 | |||
181 | static void nf_ct_frag6_expire(unsigned long data) | 153 | static void nf_ct_frag6_expire(unsigned long data) |
182 | { | 154 | { |
183 | struct nf_ct_frag6_queue *fq; | 155 | struct frag_queue *fq; |
184 | 156 | struct net *net; | |
185 | fq = container_of((struct inet_frag_queue *)data, | ||
186 | struct nf_ct_frag6_queue, q); | ||
187 | 157 | ||
188 | spin_lock(&fq->q.lock); | 158 | fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q); |
159 | net = container_of(fq->q.net, struct net, nf_frag.frags); | ||
189 | 160 | ||
190 | if (fq->q.last_in & INET_FRAG_COMPLETE) | 161 | ip6_expire_frag_queue(net, fq, &nf_frags); |
191 | goto out; | ||
192 | |||
193 | fq_kill(fq); | ||
194 | |||
195 | out: | ||
196 | spin_unlock(&fq->q.lock); | ||
197 | fq_put(fq); | ||
198 | } | 162 | } |
199 | 163 | ||
200 | /* Creation primitives. */ | 164 | /* Creation primitives. */ |
201 | static inline struct nf_ct_frag6_queue *fq_find(struct net *net, __be32 id, | 165 | static inline struct frag_queue *fq_find(struct net *net, __be32 id, |
202 | u32 user, struct in6_addr *src, | 166 | u32 user, struct in6_addr *src, |
203 | struct in6_addr *dst) | 167 | struct in6_addr *dst) |
204 | { | 168 | { |
205 | struct inet_frag_queue *q; | 169 | struct inet_frag_queue *q; |
206 | struct ip6_create_arg arg; | 170 | struct ip6_create_arg arg; |
@@ -219,14 +183,14 @@ static inline struct nf_ct_frag6_queue *fq_find(struct net *net, __be32 id, | |||
219 | if (q == NULL) | 183 | if (q == NULL) |
220 | goto oom; | 184 | goto oom; |
221 | 185 | ||
222 | return container_of(q, struct nf_ct_frag6_queue, q); | 186 | return container_of(q, struct frag_queue, q); |
223 | 187 | ||
224 | oom: | 188 | oom: |
225 | return NULL; | 189 | return NULL; |
226 | } | 190 | } |
227 | 191 | ||
228 | 192 | ||
229 | static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, | 193 | static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb, |
230 | const struct frag_hdr *fhdr, int nhoff) | 194 | const struct frag_hdr *fhdr, int nhoff) |
231 | { | 195 | { |
232 | struct sk_buff *prev, *next; | 196 | struct sk_buff *prev, *next; |
@@ -367,7 +331,7 @@ found: | |||
367 | return 0; | 331 | return 0; |
368 | 332 | ||
369 | discard_fq: | 333 | discard_fq: |
370 | fq_kill(fq); | 334 | inet_frag_kill(&fq->q, &nf_frags); |
371 | err: | 335 | err: |
372 | return -1; | 336 | return -1; |
373 | } | 337 | } |
@@ -382,12 +346,12 @@ err: | |||
382 | * the last and the first frames arrived and all the bits are here. | 346 | * the last and the first frames arrived and all the bits are here. |
383 | */ | 347 | */ |
384 | static struct sk_buff * | 348 | static struct sk_buff * |
385 | nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) | 349 | nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev) |
386 | { | 350 | { |
387 | struct sk_buff *fp, *op, *head = fq->q.fragments; | 351 | struct sk_buff *fp, *op, *head = fq->q.fragments; |
388 | int payload_len; | 352 | int payload_len; |
389 | 353 | ||
390 | fq_kill(fq); | 354 | inet_frag_kill(&fq->q, &nf_frags); |
391 | 355 | ||
392 | WARN_ON(head == NULL); | 356 | WARN_ON(head == NULL); |
393 | WARN_ON(NFCT_FRAG6_CB(head)->offset != 0); | 357 | WARN_ON(NFCT_FRAG6_CB(head)->offset != 0); |
@@ -570,7 +534,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user) | |||
570 | struct net *net = skb_dst(skb) ? dev_net(skb_dst(skb)->dev) | 534 | struct net *net = skb_dst(skb) ? dev_net(skb_dst(skb)->dev) |
571 | : dev_net(skb->dev); | 535 | : dev_net(skb->dev); |
572 | struct frag_hdr *fhdr; | 536 | struct frag_hdr *fhdr; |
573 | struct nf_ct_frag6_queue *fq; | 537 | struct frag_queue *fq; |
574 | struct ipv6hdr *hdr; | 538 | struct ipv6hdr *hdr; |
575 | int fhoff, nhoff; | 539 | int fhoff, nhoff; |
576 | u8 prevhdr; | 540 | u8 prevhdr; |
@@ -619,7 +583,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user) | |||
619 | if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) { | 583 | if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) { |
620 | spin_unlock_bh(&fq->q.lock); | 584 | spin_unlock_bh(&fq->q.lock); |
621 | pr_debug("Can't insert skb to queue\n"); | 585 | pr_debug("Can't insert skb to queue\n"); |
622 | fq_put(fq); | 586 | inet_frag_put(&fq->q, &nf_frags); |
623 | goto ret_orig; | 587 | goto ret_orig; |
624 | } | 588 | } |
625 | 589 | ||
@@ -631,7 +595,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user) | |||
631 | } | 595 | } |
632 | spin_unlock_bh(&fq->q.lock); | 596 | spin_unlock_bh(&fq->q.lock); |
633 | 597 | ||
634 | fq_put(fq); | 598 | inet_frag_put(&fq->q, &nf_frags); |
635 | return ret_skb; | 599 | return ret_skb; |
636 | 600 | ||
637 | ret_orig: | 601 | ret_orig: |
@@ -695,7 +659,7 @@ int nf_ct_frag6_init(void) | |||
695 | nf_frags.constructor = ip6_frag_init; | 659 | nf_frags.constructor = ip6_frag_init; |
696 | nf_frags.destructor = NULL; | 660 | nf_frags.destructor = NULL; |
697 | nf_frags.skb_free = nf_skb_free; | 661 | nf_frags.skb_free = nf_skb_free; |
698 | nf_frags.qsize = sizeof(struct nf_ct_frag6_queue); | 662 | nf_frags.qsize = sizeof(struct frag_queue); |
699 | nf_frags.match = ip6_frag_match; | 663 | nf_frags.match = ip6_frag_match; |
700 | nf_frags.frag_expire = nf_ct_frag6_expire; | 664 | nf_frags.frag_expire = nf_ct_frag6_expire; |
701 | nf_frags.secret_interval = 10 * 60 * HZ; | 665 | nf_frags.secret_interval = 10 * 60 * HZ; |
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 4ff9af628e72..0ee553354ed5 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c | |||
@@ -65,24 +65,6 @@ struct ip6frag_skb_cb | |||
65 | #define FRAG6_CB(skb) ((struct ip6frag_skb_cb*)((skb)->cb)) | 65 | #define FRAG6_CB(skb) ((struct ip6frag_skb_cb*)((skb)->cb)) |
66 | 66 | ||
67 | 67 | ||
68 | /* | ||
69 | * Equivalent of ipv4 struct ipq | ||
70 | */ | ||
71 | |||
72 | struct frag_queue | ||
73 | { | ||
74 | struct inet_frag_queue q; | ||
75 | |||
76 | __be32 id; /* fragment id */ | ||
77 | u32 user; | ||
78 | struct in6_addr saddr; | ||
79 | struct in6_addr daddr; | ||
80 | |||
81 | int iif; | ||
82 | unsigned int csum; | ||
83 | __u16 nhoffset; | ||
84 | }; | ||
85 | |||
86 | static struct inet_frags ip6_frags; | 68 | static struct inet_frags ip6_frags; |
87 | 69 | ||
88 | int ip6_frag_nqueues(struct net *net) | 70 | int ip6_frag_nqueues(struct net *net) |
@@ -159,21 +141,6 @@ void ip6_frag_init(struct inet_frag_queue *q, void *a) | |||
159 | } | 141 | } |
160 | EXPORT_SYMBOL(ip6_frag_init); | 142 | EXPORT_SYMBOL(ip6_frag_init); |
161 | 143 | ||
162 | /* Destruction primitives. */ | ||
163 | |||
164 | static __inline__ void fq_put(struct frag_queue *fq) | ||
165 | { | ||
166 | inet_frag_put(&fq->q, &ip6_frags); | ||
167 | } | ||
168 | |||
169 | /* Kill fq entry. It is not destroyed immediately, | ||
170 | * because caller (and someone more) holds reference count. | ||
171 | */ | ||
172 | static __inline__ void fq_kill(struct frag_queue *fq) | ||
173 | { | ||
174 | inet_frag_kill(&fq->q, &ip6_frags); | ||
175 | } | ||
176 | |||
177 | static void ip6_evictor(struct net *net, struct inet6_dev *idev) | 144 | static void ip6_evictor(struct net *net, struct inet6_dev *idev) |
178 | { | 145 | { |
179 | int evicted; | 146 | int evicted; |
@@ -183,22 +150,18 @@ static void ip6_evictor(struct net *net, struct inet6_dev *idev) | |||
183 | IP6_ADD_STATS_BH(net, idev, IPSTATS_MIB_REASMFAILS, evicted); | 150 | IP6_ADD_STATS_BH(net, idev, IPSTATS_MIB_REASMFAILS, evicted); |
184 | } | 151 | } |
185 | 152 | ||
186 | static void ip6_frag_expire(unsigned long data) | 153 | void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq, |
154 | struct inet_frags *frags) | ||
187 | { | 155 | { |
188 | struct frag_queue *fq; | ||
189 | struct net_device *dev = NULL; | 156 | struct net_device *dev = NULL; |
190 | struct net *net; | ||
191 | |||
192 | fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q); | ||
193 | 157 | ||
194 | spin_lock(&fq->q.lock); | 158 | spin_lock(&fq->q.lock); |
195 | 159 | ||
196 | if (fq->q.last_in & INET_FRAG_COMPLETE) | 160 | if (fq->q.last_in & INET_FRAG_COMPLETE) |
197 | goto out; | 161 | goto out; |
198 | 162 | ||
199 | fq_kill(fq); | 163 | inet_frag_kill(&fq->q, frags); |
200 | 164 | ||
201 | net = container_of(fq->q.net, struct net, ipv6.frags); | ||
202 | rcu_read_lock(); | 165 | rcu_read_lock(); |
203 | dev = dev_get_by_index_rcu(net, fq->iif); | 166 | dev = dev_get_by_index_rcu(net, fq->iif); |
204 | if (!dev) | 167 | if (!dev) |
@@ -222,7 +185,19 @@ out_rcu_unlock: | |||
222 | rcu_read_unlock(); | 185 | rcu_read_unlock(); |
223 | out: | 186 | out: |
224 | spin_unlock(&fq->q.lock); | 187 | spin_unlock(&fq->q.lock); |
225 | fq_put(fq); | 188 | inet_frag_put(&fq->q, frags); |
189 | } | ||
190 | EXPORT_SYMBOL(ip6_expire_frag_queue); | ||
191 | |||
192 | static void ip6_frag_expire(unsigned long data) | ||
193 | { | ||
194 | struct frag_queue *fq; | ||
195 | struct net *net; | ||
196 | |||
197 | fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q); | ||
198 | net = container_of(fq->q.net, struct net, ipv6.frags); | ||
199 | |||
200 | ip6_expire_frag_queue(net, fq, &ip6_frags); | ||
226 | } | 201 | } |
227 | 202 | ||
228 | static __inline__ struct frag_queue * | 203 | static __inline__ struct frag_queue * |
@@ -391,7 +366,7 @@ found: | |||
391 | return -1; | 366 | return -1; |
392 | 367 | ||
393 | discard_fq: | 368 | discard_fq: |
394 | fq_kill(fq); | 369 | inet_frag_kill(&fq->q, &ip6_frags); |
395 | err: | 370 | err: |
396 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), | 371 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), |
397 | IPSTATS_MIB_REASMFAILS); | 372 | IPSTATS_MIB_REASMFAILS); |
@@ -417,7 +392,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, | |||
417 | unsigned int nhoff; | 392 | unsigned int nhoff; |
418 | int sum_truesize; | 393 | int sum_truesize; |
419 | 394 | ||
420 | fq_kill(fq); | 395 | inet_frag_kill(&fq->q, &ip6_frags); |
421 | 396 | ||
422 | /* Make the one we just received the head. */ | 397 | /* Make the one we just received the head. */ |
423 | if (prev) { | 398 | if (prev) { |
@@ -586,7 +561,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb) | |||
586 | ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff); | 561 | ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff); |
587 | 562 | ||
588 | spin_unlock(&fq->q.lock); | 563 | spin_unlock(&fq->q.lock); |
589 | fq_put(fq); | 564 | inet_frag_put(&fq->q, &ip6_frags); |
590 | return ret; | 565 | return ret; |
591 | } | 566 | } |
592 | 567 | ||