aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/ip_fragment.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/ip_fragment.c')
-rw-r--r--net/ipv4/ip_fragment.c26
1 files changed, 15 insertions, 11 deletions
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 3b2e5adca838..cd6ce6ac6358 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -194,7 +194,7 @@ static void ip_expire(unsigned long arg)
194 194
195 spin_lock(&qp->q.lock); 195 spin_lock(&qp->q.lock);
196 196
197 if (qp->q.last_in & COMPLETE) 197 if (qp->q.last_in & INET_FRAG_COMPLETE)
198 goto out; 198 goto out;
199 199
200 ipq_kill(qp); 200 ipq_kill(qp);
@@ -202,10 +202,13 @@ static void ip_expire(unsigned long arg)
202 IP_INC_STATS_BH(IPSTATS_MIB_REASMTIMEOUT); 202 IP_INC_STATS_BH(IPSTATS_MIB_REASMTIMEOUT);
203 IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); 203 IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
204 204
205 if ((qp->q.last_in&FIRST_IN) && qp->q.fragments != NULL) { 205 if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) {
206 struct sk_buff *head = qp->q.fragments; 206 struct sk_buff *head = qp->q.fragments;
207 struct net *net;
208
209 net = container_of(qp->q.net, struct net, ipv4.frags);
207 /* Send an ICMP "Fragment Reassembly Timeout" message. */ 210 /* Send an ICMP "Fragment Reassembly Timeout" message. */
208 if ((head->dev = dev_get_by_index(&init_net, qp->iif)) != NULL) { 211 if ((head->dev = dev_get_by_index(net, qp->iif)) != NULL) {
209 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); 212 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
210 dev_put(head->dev); 213 dev_put(head->dev);
211 } 214 }
@@ -298,7 +301,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
298 int ihl, end; 301 int ihl, end;
299 int err = -ENOENT; 302 int err = -ENOENT;
300 303
301 if (qp->q.last_in & COMPLETE) 304 if (qp->q.last_in & INET_FRAG_COMPLETE)
302 goto err; 305 goto err;
303 306
304 if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) && 307 if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) &&
@@ -324,9 +327,9 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
324 * or have different end, the segment is corrrupted. 327 * or have different end, the segment is corrrupted.
325 */ 328 */
326 if (end < qp->q.len || 329 if (end < qp->q.len ||
327 ((qp->q.last_in & LAST_IN) && end != qp->q.len)) 330 ((qp->q.last_in & INET_FRAG_LAST_IN) && end != qp->q.len))
328 goto err; 331 goto err;
329 qp->q.last_in |= LAST_IN; 332 qp->q.last_in |= INET_FRAG_LAST_IN;
330 qp->q.len = end; 333 qp->q.len = end;
331 } else { 334 } else {
332 if (end&7) { 335 if (end&7) {
@@ -336,7 +339,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
336 } 339 }
337 if (end > qp->q.len) { 340 if (end > qp->q.len) {
338 /* Some bits beyond end -> corruption. */ 341 /* Some bits beyond end -> corruption. */
339 if (qp->q.last_in & LAST_IN) 342 if (qp->q.last_in & INET_FRAG_LAST_IN)
340 goto err; 343 goto err;
341 qp->q.len = end; 344 qp->q.len = end;
342 } 345 }
@@ -435,9 +438,10 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
435 qp->q.meat += skb->len; 438 qp->q.meat += skb->len;
436 atomic_add(skb->truesize, &qp->q.net->mem); 439 atomic_add(skb->truesize, &qp->q.net->mem);
437 if (offset == 0) 440 if (offset == 0)
438 qp->q.last_in |= FIRST_IN; 441 qp->q.last_in |= INET_FRAG_FIRST_IN;
439 442
440 if (qp->q.last_in == (FIRST_IN | LAST_IN) && qp->q.meat == qp->q.len) 443 if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
444 qp->q.meat == qp->q.len)
441 return ip_frag_reasm(qp, prev, dev); 445 return ip_frag_reasm(qp, prev, dev);
442 446
443 write_lock(&ip4_frags.lock); 447 write_lock(&ip4_frags.lock);
@@ -553,7 +557,7 @@ out_nomem:
553out_oversize: 557out_oversize:
554 if (net_ratelimit()) 558 if (net_ratelimit())
555 printk(KERN_INFO 559 printk(KERN_INFO
556 "Oversized IP packet from %d.%d.%d.%d.\n", 560 "Oversized IP packet from " NIPQUAD_FMT ".\n",
557 NIPQUAD(qp->saddr)); 561 NIPQUAD(qp->saddr));
558out_fail: 562out_fail:
559 IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); 563 IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
@@ -568,7 +572,7 @@ int ip_defrag(struct sk_buff *skb, u32 user)
568 572
569 IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS); 573 IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS);
570 574
571 net = skb->dev ? skb->dev->nd_net : skb->dst->dev->nd_net; 575 net = skb->dev ? dev_net(skb->dev) : dev_net(skb->dst->dev);
572 /* Start by cleaning up the memory. */ 576 /* Start by cleaning up the memory. */
573 if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh) 577 if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh)
574 ip_evictor(net); 578 ip_evictor(net);