aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/ip_fragment.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/ip_fragment.c')
-rw-r--r--net/ipv4/ip_fragment.c29
1 files changed, 15 insertions, 14 deletions
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index cd8c83025b48..4f013343cef7 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -100,9 +100,9 @@ int ip_frag_nqueues(struct net *net)
100 return net->ipv4.frags.nqueues; 100 return net->ipv4.frags.nqueues;
101} 101}
102 102
103int ip_frag_mem(void) 103int ip_frag_mem(struct net *net)
104{ 104{
105 return atomic_read(&ip4_frags.mem); 105 return atomic_read(&net->ipv4.frags.mem);
106} 106}
107 107
108static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, 108static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
@@ -142,11 +142,12 @@ static int ip4_frag_match(struct inet_frag_queue *q, void *a)
142} 142}
143 143
144/* Memory Tracking Functions. */ 144/* Memory Tracking Functions. */
145static __inline__ void frag_kfree_skb(struct sk_buff *skb, int *work) 145static __inline__ void frag_kfree_skb(struct netns_frags *nf,
146 struct sk_buff *skb, int *work)
146{ 147{
147 if (work) 148 if (work)
148 *work -= skb->truesize; 149 *work -= skb->truesize;
149 atomic_sub(skb->truesize, &ip4_frags.mem); 150 atomic_sub(skb->truesize, &nf->mem);
150 kfree_skb(skb); 151 kfree_skb(skb);
151} 152}
152 153
@@ -192,11 +193,11 @@ static void ipq_kill(struct ipq *ipq)
192/* Memory limiting on fragments. Evictor trashes the oldest 193/* Memory limiting on fragments. Evictor trashes the oldest
193 * fragment queue until we are back under the threshold. 194 * fragment queue until we are back under the threshold.
194 */ 195 */
195static void ip_evictor(void) 196static void ip_evictor(struct net *net)
196{ 197{
197 int evicted; 198 int evicted;
198 199
199 evicted = inet_frag_evictor(&ip4_frags); 200 evicted = inet_frag_evictor(&net->ipv4.frags, &ip4_frags);
200 if (evicted) 201 if (evicted)
201 IP_ADD_STATS_BH(IPSTATS_MIB_REASMFAILS, evicted); 202 IP_ADD_STATS_BH(IPSTATS_MIB_REASMFAILS, evicted);
202} 203}
@@ -294,7 +295,7 @@ static int ip_frag_reinit(struct ipq *qp)
294 fp = qp->q.fragments; 295 fp = qp->q.fragments;
295 do { 296 do {
296 struct sk_buff *xp = fp->next; 297 struct sk_buff *xp = fp->next;
297 frag_kfree_skb(fp, NULL); 298 frag_kfree_skb(qp->q.net, fp, NULL);
298 fp = xp; 299 fp = xp;
299 } while (fp); 300 } while (fp);
300 301
@@ -431,7 +432,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
431 qp->q.fragments = next; 432 qp->q.fragments = next;
432 433
433 qp->q.meat -= free_it->len; 434 qp->q.meat -= free_it->len;
434 frag_kfree_skb(free_it, NULL); 435 frag_kfree_skb(qp->q.net, free_it, NULL);
435 } 436 }
436 } 437 }
437 438
@@ -451,7 +452,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
451 } 452 }
452 qp->q.stamp = skb->tstamp; 453 qp->q.stamp = skb->tstamp;
453 qp->q.meat += skb->len; 454 qp->q.meat += skb->len;
454 atomic_add(skb->truesize, &ip4_frags.mem); 455 atomic_add(skb->truesize, &qp->q.net->mem);
455 if (offset == 0) 456 if (offset == 0)
456 qp->q.last_in |= FIRST_IN; 457 qp->q.last_in |= FIRST_IN;
457 458
@@ -534,12 +535,12 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
534 head->len -= clone->len; 535 head->len -= clone->len;
535 clone->csum = 0; 536 clone->csum = 0;
536 clone->ip_summed = head->ip_summed; 537 clone->ip_summed = head->ip_summed;
537 atomic_add(clone->truesize, &ip4_frags.mem); 538 atomic_add(clone->truesize, &qp->q.net->mem);
538 } 539 }
539 540
540 skb_shinfo(head)->frag_list = head->next; 541 skb_shinfo(head)->frag_list = head->next;
541 skb_push(head, head->data - skb_network_header(head)); 542 skb_push(head, head->data - skb_network_header(head));
542 atomic_sub(head->truesize, &ip4_frags.mem); 543 atomic_sub(head->truesize, &qp->q.net->mem);
543 544
544 for (fp=head->next; fp; fp = fp->next) { 545 for (fp=head->next; fp; fp = fp->next) {
545 head->data_len += fp->len; 546 head->data_len += fp->len;
@@ -549,7 +550,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
549 else if (head->ip_summed == CHECKSUM_COMPLETE) 550 else if (head->ip_summed == CHECKSUM_COMPLETE)
550 head->csum = csum_add(head->csum, fp->csum); 551 head->csum = csum_add(head->csum, fp->csum);
551 head->truesize += fp->truesize; 552 head->truesize += fp->truesize;
552 atomic_sub(fp->truesize, &ip4_frags.mem); 553 atomic_sub(fp->truesize, &qp->q.net->mem);
553 } 554 }
554 555
555 head->next = NULL; 556 head->next = NULL;
@@ -588,8 +589,8 @@ int ip_defrag(struct sk_buff *skb, u32 user)
588 589
589 net = skb->dev->nd_net; 590 net = skb->dev->nd_net;
590 /* Start by cleaning up the memory. */ 591 /* Start by cleaning up the memory. */
591 if (atomic_read(&ip4_frags.mem) > ip4_frags_ctl.high_thresh) 592 if (atomic_read(&net->ipv4.frags.mem) > ip4_frags_ctl.high_thresh)
592 ip_evictor(); 593 ip_evictor(net);
593 594
594 /* Lookup (or create) queue header */ 595 /* Lookup (or create) queue header */
595 if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) { 596 if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) {