aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/inet_fragment.c43
-rw-r--r--net/ipv4/ip_fragment.c5
2 files changed, 32 insertions, 16 deletions
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 198a5ed7a815..58d4c38534f6 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -28,6 +28,9 @@
28#define INETFRAGS_EVICT_BUCKETS 128 28#define INETFRAGS_EVICT_BUCKETS 128
29#define INETFRAGS_EVICT_MAX 512 29#define INETFRAGS_EVICT_MAX 512
30 30
31/* don't rebuild inetfrag table with new secret more often than this */
32#define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)
33
31/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements 34/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
32 * Value : 0xff if frame should be dropped. 35 * Value : 0xff if frame should be dropped.
33 * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field 36 * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
@@ -55,16 +58,24 @@ inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q)
55 return f->hashfn(q) & (INETFRAGS_HASHSZ - 1); 58 return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
56} 59}
57 60
58static void inet_frag_secret_rebuild(unsigned long dummy) 61static bool inet_frag_may_rebuild(struct inet_frags *f)
62{
63 return time_after(jiffies,
64 f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL);
65}
66
67static void inet_frag_secret_rebuild(struct inet_frags *f)
59{ 68{
60 struct inet_frags *f = (struct inet_frags *)dummy;
61 unsigned long now = jiffies;
62 int i; 69 int i;
63 70
64 /* Per bucket lock NOT needed here, due to write lock protection */ 71 /* Per bucket lock NOT needed here, due to write lock protection */
65 write_lock(&f->lock); 72 write_lock_bh(&f->lock);
73
74 if (!inet_frag_may_rebuild(f))
75 goto out;
66 76
67 get_random_bytes(&f->rnd, sizeof(u32)); 77 get_random_bytes(&f->rnd, sizeof(u32));
78
68 for (i = 0; i < INETFRAGS_HASHSZ; i++) { 79 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
69 struct inet_frag_bucket *hb; 80 struct inet_frag_bucket *hb;
70 struct inet_frag_queue *q; 81 struct inet_frag_queue *q;
@@ -85,9 +96,11 @@ static void inet_frag_secret_rebuild(unsigned long dummy)
85 } 96 }
86 } 97 }
87 } 98 }
88 write_unlock(&f->lock);
89 99
90 mod_timer(&f->secret_timer, now + f->secret_interval); 100 f->rebuild = false;
101 f->last_rebuild_jiffies = jiffies;
102out:
103 write_unlock_bh(&f->lock);
91} 104}
92 105
93static bool inet_fragq_should_evict(const struct inet_frag_queue *q) 106static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
@@ -162,6 +175,8 @@ static void inet_frag_worker(struct work_struct *work)
162 f->next_bucket = i; 175 f->next_bucket = i;
163 176
164 read_unlock_bh(&f->lock); 177 read_unlock_bh(&f->lock);
178 if (f->rebuild && inet_frag_may_rebuild(f))
179 inet_frag_secret_rebuild(f);
165} 180}
166 181
167static void inet_frag_schedule_worker(struct inet_frags *f) 182static void inet_frag_schedule_worker(struct inet_frags *f)
@@ -183,11 +198,7 @@ void inet_frags_init(struct inet_frags *f)
183 INIT_HLIST_HEAD(&hb->chain); 198 INIT_HLIST_HEAD(&hb->chain);
184 } 199 }
185 rwlock_init(&f->lock); 200 rwlock_init(&f->lock);
186 201 f->last_rebuild_jiffies = 0;
187 setup_timer(&f->secret_timer, inet_frag_secret_rebuild,
188 (unsigned long)f);
189 f->secret_timer.expires = jiffies + f->secret_interval;
190 add_timer(&f->secret_timer);
191} 202}
192EXPORT_SYMBOL(inet_frags_init); 203EXPORT_SYMBOL(inet_frags_init);
193 204
@@ -199,7 +210,6 @@ EXPORT_SYMBOL(inet_frags_init_net);
199 210
200void inet_frags_fini(struct inet_frags *f) 211void inet_frags_fini(struct inet_frags *f)
201{ 212{
202 del_timer(&f->secret_timer);
203 cancel_work_sync(&f->frags_work); 213 cancel_work_sync(&f->frags_work);
204} 214}
205EXPORT_SYMBOL(inet_frags_fini); 215EXPORT_SYMBOL(inet_frags_fini);
@@ -399,8 +409,13 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
399 409
400 if (depth <= INETFRAGS_MAXDEPTH) 410 if (depth <= INETFRAGS_MAXDEPTH)
401 return inet_frag_create(nf, f, key); 411 return inet_frag_create(nf, f, key);
402 else 412
403 return ERR_PTR(-ENOBUFS); 413 if (inet_frag_may_rebuild(f)) {
414 f->rebuild = true;
415 inet_frag_schedule_worker(f);
416 }
417
418 return ERR_PTR(-ENOBUFS);
404} 419}
405EXPORT_SYMBOL(inet_frag_find); 420EXPORT_SYMBOL(inet_frag_find);
406 421
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 8fbeee495037..44e591a7e03f 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -720,10 +720,12 @@ static struct ctl_table ip4_frags_ns_ctl_table[] = {
720 { } 720 { }
721}; 721};
722 722
723/* secret interval has been deprecated */
724static int ip4_frags_secret_interval_unused;
723static struct ctl_table ip4_frags_ctl_table[] = { 725static struct ctl_table ip4_frags_ctl_table[] = {
724 { 726 {
725 .procname = "ipfrag_secret_interval", 727 .procname = "ipfrag_secret_interval",
726 .data = &ip4_frags.secret_interval, 728 .data = &ip4_frags_secret_interval_unused,
727 .maxlen = sizeof(int), 729 .maxlen = sizeof(int),
728 .mode = 0644, 730 .mode = 0644,
729 .proc_handler = proc_dointvec_jiffies, 731 .proc_handler = proc_dointvec_jiffies,
@@ -853,6 +855,5 @@ void __init ipfrag_init(void)
853 ip4_frags.qsize = sizeof(struct ipq); 855 ip4_frags.qsize = sizeof(struct ipq);
854 ip4_frags.match = ip4_frag_match; 856 ip4_frags.match = ip4_frag_match;
855 ip4_frags.frag_expire = ip_expire; 857 ip4_frags.frag_expire = ip_expire;
856 ip4_frags.secret_interval = 10 * 60 * HZ;
857 inet_frags_init(&ip4_frags); 858 inet_frags_init(&ip4_frags);
858} 859}