aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorPavel Emelyanov <xemul@openvz.org>2008-01-22 09:10:13 -0500
committerDavid S. Miller <davem@davemloft.net>2008-01-28 18:10:38 -0500
commite31e0bdc7e7fb9a4b09d2f3266c035a18fdcee9d (patch)
tree30e25f733781cf80aa9fef0d58ff3476424cb9b3 /net/ipv4
parentb2fd5321dd160ef309dfb6cfc78ed8de4a830659 (diff)
[NETNS][FRAGS]: Make thresholds work in namespaces.
This is the same as with the timeout variable. Currently, after exceeding the high threshold _all_ the fragments are evicted, but it will be fixed in later patch. Signed-off-by: Pavel Emelyanov <xemul@openvz.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/inet_fragment.c2
-rw-r--r--net/ipv4/ip_fragment.c26
2 files changed, 14 insertions, 14 deletions
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 9da96792fffb..5ab399c15282 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -153,7 +153,7 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f)
153 struct inet_frag_queue *q; 153 struct inet_frag_queue *q;
154 int work, evicted = 0; 154 int work, evicted = 0;
155 155
156 work = atomic_read(&nf->mem) - f->ctl->low_thresh; 156 work = atomic_read(&nf->mem) - nf->low_thresh;
157 while (work > 0) { 157 while (work > 0) {
158 read_lock(&f->lock); 158 read_lock(&f->lock);
159 if (list_empty(&f->lru_list)) { 159 if (list_empty(&f->lru_list)) {
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 70d241c8d2a8..80c2c19196cd 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -75,14 +75,6 @@ struct ipq {
75}; 75};
76 76
77static struct inet_frags_ctl ip4_frags_ctl __read_mostly = { 77static struct inet_frags_ctl ip4_frags_ctl __read_mostly = {
78 /*
79 * Fragment cache limits. We will commit 256K at one time. Should we
80 * cross that limit we will prune down to 192K. This should cope with
81 * even the most extreme cases without allowing an attacker to
82 * measurably harm machine performance.
83 */
84 .high_thresh = 256 * 1024,
85 .low_thresh = 192 * 1024,
86 .secret_interval = 10 * 60 * HZ, 78 .secret_interval = 10 * 60 * HZ,
87}; 79};
88 80
@@ -582,7 +574,7 @@ int ip_defrag(struct sk_buff *skb, u32 user)
582 574
583 net = skb->dev->nd_net; 575 net = skb->dev->nd_net;
584 /* Start by cleaning up the memory. */ 576 /* Start by cleaning up the memory. */
585 if (atomic_read(&net->ipv4.frags.mem) > ip4_frags_ctl.high_thresh) 577 if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh)
586 ip_evictor(net); 578 ip_evictor(net);
587 579
588 /* Lookup (or create) queue header */ 580 /* Lookup (or create) queue header */
@@ -610,7 +602,7 @@ static struct ctl_table ip4_frags_ctl_table[] = {
610 { 602 {
611 .ctl_name = NET_IPV4_IPFRAG_HIGH_THRESH, 603 .ctl_name = NET_IPV4_IPFRAG_HIGH_THRESH,
612 .procname = "ipfrag_high_thresh", 604 .procname = "ipfrag_high_thresh",
613 .data = &ip4_frags_ctl.high_thresh, 605 .data = &init_net.ipv4.frags.high_thresh,
614 .maxlen = sizeof(int), 606 .maxlen = sizeof(int),
615 .mode = 0644, 607 .mode = 0644,
616 .proc_handler = &proc_dointvec 608 .proc_handler = &proc_dointvec
@@ -618,7 +610,7 @@ static struct ctl_table ip4_frags_ctl_table[] = {
618 { 610 {
619 .ctl_name = NET_IPV4_IPFRAG_LOW_THRESH, 611 .ctl_name = NET_IPV4_IPFRAG_LOW_THRESH,
620 .procname = "ipfrag_low_thresh", 612 .procname = "ipfrag_low_thresh",
621 .data = &ip4_frags_ctl.low_thresh, 613 .data = &init_net.ipv4.frags.low_thresh,
622 .maxlen = sizeof(int), 614 .maxlen = sizeof(int),
623 .mode = 0644, 615 .mode = 0644,
624 .proc_handler = &proc_dointvec 616 .proc_handler = &proc_dointvec
@@ -663,8 +655,8 @@ static int ip4_frags_ctl_register(struct net *net)
663 if (table == NULL) 655 if (table == NULL)
664 goto err_alloc; 656 goto err_alloc;
665 657
666 table[0].mode &= ~0222; 658 table[0].data = &net->ipv4.frags.high_thresh;
667 table[1].mode &= ~0222; 659 table[1].data = &net->ipv4.frags.low_thresh;
668 table[2].data = &net->ipv4.frags.timeout; 660 table[2].data = &net->ipv4.frags.timeout;
669 table[3].mode &= ~0222; 661 table[3].mode &= ~0222;
670 table[4].mode &= ~0222; 662 table[4].mode &= ~0222;
@@ -706,6 +698,14 @@ static inline void ip4_frags_ctl_unregister(struct net *net)
706static int ipv4_frags_init_net(struct net *net) 698static int ipv4_frags_init_net(struct net *net)
707{ 699{
708 /* 700 /*
701 * Fragment cache limits. We will commit 256K at one time. Should we
702 * cross that limit we will prune down to 192K. This should cope with
703 * even the most extreme cases without allowing an attacker to
704 * measurably harm machine performance.
705 */
706 net->ipv4.frags.high_thresh = 256 * 1024;
707 net->ipv4.frags.low_thresh = 192 * 1024;
708 /*
709 * Important NOTE! Fragment queue must be destroyed before MSL expires. 709 * Important NOTE! Fragment queue must be destroyed before MSL expires.
710 * RFC791 is wrong proposing to prolongate timer each fragment arrival 710 * RFC791 is wrong proposing to prolongate timer each fragment arrival
711 * by TTL. 711 * by TTL.