diff options
author | Pavel Emelyanov <xemul@openvz.org> | 2007-10-15 05:33:45 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-10-15 15:26:40 -0400 |
commit | 04128f233f2b344f3438cde09723e9946463a573 (patch) | |
tree | 04f4518ef51c74de4d318d7ea908b3215a6aa9c8 /net/ipv4/ip_fragment.c | |
parent | 7eb95156d9dce2f59794264db336ce007d71638b (diff) |
[INET]: Collect common frag sysctl variables together
Some sysctl variables are used to tune the frag queues
management and it will be useful to work with them in
a common way in the future, so move them into one
structure, moreover they are the same for all the frag
management codes.
I don't place them in the existing inet_frags object,
introduced in the previous patch for two reasons:
1. to keep them in the __read_mostly section;
2. not to export the whole inet_frags objects outside.
Signed-off-by: Pavel Emelyanov <xemul@openvz.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/ip_fragment.c')
-rw-r--r-- | net/ipv4/ip_fragment.c | 47 |
1 files changed, 26 insertions, 21 deletions
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 321e694b72e..0dd9a31df21 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
@@ -50,21 +50,8 @@ | |||
50 | * as well. Or notify me, at least. --ANK | 50 | * as well. Or notify me, at least. --ANK |
51 | */ | 51 | */ |
52 | 52 | ||
53 | /* Fragment cache limits. We will commit 256K at one time. Should we | ||
54 | * cross that limit we will prune down to 192K. This should cope with | ||
55 | * even the most extreme cases without allowing an attacker to measurably | ||
56 | * harm machine performance. | ||
57 | */ | ||
58 | int sysctl_ipfrag_high_thresh __read_mostly = 256*1024; | ||
59 | int sysctl_ipfrag_low_thresh __read_mostly = 192*1024; | ||
60 | |||
61 | int sysctl_ipfrag_max_dist __read_mostly = 64; | 53 | int sysctl_ipfrag_max_dist __read_mostly = 64; |
62 | 54 | ||
63 | /* Important NOTE! Fragment queue must be destroyed before MSL expires. | ||
64 | * RFC791 is wrong proposing to prolongate timer each fragment arrival by TTL. | ||
65 | */ | ||
66 | int sysctl_ipfrag_time __read_mostly = IP_FRAG_TIME; | ||
67 | |||
68 | struct ipfrag_skb_cb | 55 | struct ipfrag_skb_cb |
69 | { | 56 | { |
70 | struct inet_skb_parm h; | 57 | struct inet_skb_parm h; |
@@ -87,6 +74,25 @@ struct ipq { | |||
87 | struct inet_peer *peer; | 74 | struct inet_peer *peer; |
88 | }; | 75 | }; |
89 | 76 | ||
77 | struct inet_frags_ctl ip4_frags_ctl __read_mostly = { | ||
78 | /* | ||
79 | * Fragment cache limits. We will commit 256K at one time. Should we | ||
80 | * cross that limit we will prune down to 192K. This should cope with | ||
81 | * even the most extreme cases without allowing an attacker to | ||
82 | * measurably harm machine performance. | ||
83 | */ | ||
84 | .high_thresh = 256 * 1024, | ||
85 | .low_thresh = 192 * 1024, | ||
86 | |||
87 | /* | ||
88 | * Important NOTE! Fragment queue must be destroyed before MSL expires. | ||
89 | * RFC791 is wrong proposing to prolongate timer each fragment arrival | ||
90 | * by TTL. | ||
91 | */ | ||
92 | .timeout = IP_FRAG_TIME, | ||
93 | .secret_interval = 10 * 60 * HZ, | ||
94 | }; | ||
95 | |||
90 | static struct inet_frags ip4_frags; | 96 | static struct inet_frags ip4_frags; |
91 | 97 | ||
92 | int ip_frag_nqueues(void) | 98 | int ip_frag_nqueues(void) |
@@ -123,8 +129,6 @@ static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot) | |||
123 | ip4_frags.rnd) & (INETFRAGS_HASHSZ - 1); | 129 | ip4_frags.rnd) & (INETFRAGS_HASHSZ - 1); |
124 | } | 130 | } |
125 | 131 | ||
126 | int sysctl_ipfrag_secret_interval __read_mostly = 10 * 60 * HZ; | ||
127 | |||
128 | static void ipfrag_secret_rebuild(unsigned long dummy) | 132 | static void ipfrag_secret_rebuild(unsigned long dummy) |
129 | { | 133 | { |
130 | unsigned long now = jiffies; | 134 | unsigned long now = jiffies; |
@@ -150,7 +154,7 @@ static void ipfrag_secret_rebuild(unsigned long dummy) | |||
150 | } | 154 | } |
151 | write_unlock(&ip4_frags.lock); | 155 | write_unlock(&ip4_frags.lock); |
152 | 156 | ||
153 | mod_timer(&ip4_frags.secret_timer, now + sysctl_ipfrag_secret_interval); | 157 | mod_timer(&ip4_frags.secret_timer, now + ip4_frags_ctl.secret_interval); |
154 | } | 158 | } |
155 | 159 | ||
156 | /* Memory Tracking Functions. */ | 160 | /* Memory Tracking Functions. */ |
@@ -237,7 +241,7 @@ static void ip_evictor(void) | |||
237 | struct list_head *tmp; | 241 | struct list_head *tmp; |
238 | int work; | 242 | int work; |
239 | 243 | ||
240 | work = atomic_read(&ip4_frags.mem) - sysctl_ipfrag_low_thresh; | 244 | work = atomic_read(&ip4_frags.mem) - ip4_frags_ctl.low_thresh; |
241 | if (work <= 0) | 245 | if (work <= 0) |
242 | return; | 246 | return; |
243 | 247 | ||
@@ -326,7 +330,7 @@ static struct ipq *ip_frag_intern(struct ipq *qp_in) | |||
326 | #endif | 330 | #endif |
327 | qp = qp_in; | 331 | qp = qp_in; |
328 | 332 | ||
329 | if (!mod_timer(&qp->q.timer, jiffies + sysctl_ipfrag_time)) | 333 | if (!mod_timer(&qp->q.timer, jiffies + ip4_frags_ctl.timeout)) |
330 | atomic_inc(&qp->q.refcnt); | 334 | atomic_inc(&qp->q.refcnt); |
331 | 335 | ||
332 | atomic_inc(&qp->q.refcnt); | 336 | atomic_inc(&qp->q.refcnt); |
@@ -432,7 +436,7 @@ static int ip_frag_reinit(struct ipq *qp) | |||
432 | { | 436 | { |
433 | struct sk_buff *fp; | 437 | struct sk_buff *fp; |
434 | 438 | ||
435 | if (!mod_timer(&qp->q.timer, jiffies + sysctl_ipfrag_time)) { | 439 | if (!mod_timer(&qp->q.timer, jiffies + ip4_frags_ctl.timeout)) { |
436 | atomic_inc(&qp->q.refcnt); | 440 | atomic_inc(&qp->q.refcnt); |
437 | return -ETIMEDOUT; | 441 | return -ETIMEDOUT; |
438 | } | 442 | } |
@@ -733,7 +737,7 @@ int ip_defrag(struct sk_buff *skb, u32 user) | |||
733 | IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS); | 737 | IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS); |
734 | 738 | ||
735 | /* Start by cleaning up the memory. */ | 739 | /* Start by cleaning up the memory. */ |
736 | if (atomic_read(&ip4_frags.mem) > sysctl_ipfrag_high_thresh) | 740 | if (atomic_read(&ip4_frags.mem) > ip4_frags_ctl.high_thresh) |
737 | ip_evictor(); | 741 | ip_evictor(); |
738 | 742 | ||
739 | /* Lookup (or create) queue header */ | 743 | /* Lookup (or create) queue header */ |
@@ -758,9 +762,10 @@ void __init ipfrag_init(void) | |||
758 | { | 762 | { |
759 | init_timer(&ip4_frags.secret_timer); | 763 | init_timer(&ip4_frags.secret_timer); |
760 | ip4_frags.secret_timer.function = ipfrag_secret_rebuild; | 764 | ip4_frags.secret_timer.function = ipfrag_secret_rebuild; |
761 | ip4_frags.secret_timer.expires = jiffies + sysctl_ipfrag_secret_interval; | 765 | ip4_frags.secret_timer.expires = jiffies + ip4_frags_ctl.secret_interval; |
762 | add_timer(&ip4_frags.secret_timer); | 766 | add_timer(&ip4_frags.secret_timer); |
763 | 767 | ||
768 | ip4_frags.ctl = &ip4_frags_ctl; | ||
764 | inet_frags_init(&ip4_frags); | 769 | inet_frags_init(&ip4_frags); |
765 | } | 770 | } |
766 | 771 | ||