diff options
author | Florian Westphal <fw@strlen.de> | 2014-07-24 10:50:31 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-07-28 01:34:35 -0400 |
commit | 86e93e470cadedda9181a2bd9aee1d9d2e5e9c0f (patch) | |
tree | adf79021d31261d4ad11c0cbc72ff33cf1e83902 /net/ipv4/inet_fragment.c | |
parent | fb3cfe6e75b9d05c87265e85e67d7caf6e5b44a7 (diff) |
inet: frag: move evictor calls into frag_find function
First step to move eviction handling into a work queue.
We lose two spots that accounted evicted fragments in MIB counters.
Accounting will be restored since the upcoming work-queue evictor
invokes the frag queue timer callbacks instead.
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/inet_fragment.c')
-rw-r--r-- | net/ipv4/inet_fragment.c | 16 |
1 files changed, 9 insertions, 7 deletions
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index 930d23870811..535636017534 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c | |||
@@ -46,6 +46,8 @@ const u8 ip_frag_ecn_table[16] = { | |||
46 | }; | 46 | }; |
47 | EXPORT_SYMBOL(ip_frag_ecn_table); | 47 | EXPORT_SYMBOL(ip_frag_ecn_table); |
48 | 48 | ||
49 | static int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force); | ||
50 | |||
49 | static unsigned int | 51 | static unsigned int |
50 | inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q) | 52 | inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q) |
51 | { | 53 | { |
@@ -203,16 +205,11 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f, | |||
203 | } | 205 | } |
204 | EXPORT_SYMBOL(inet_frag_destroy); | 206 | EXPORT_SYMBOL(inet_frag_destroy); |
205 | 207 | ||
206 | int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force) | 208 | static int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force) |
207 | { | 209 | { |
208 | struct inet_frag_queue *q; | 210 | struct inet_frag_queue *q; |
209 | int work, evicted = 0; | 211 | int work, evicted = 0; |
210 | 212 | ||
211 | if (!force) { | ||
212 | if (frag_mem_limit(nf) <= nf->high_thresh) | ||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | work = frag_mem_limit(nf) - nf->low_thresh; | 213 | work = frag_mem_limit(nf) - nf->low_thresh; |
217 | while (work > 0 || force) { | 214 | while (work > 0 || force) { |
218 | spin_lock(&nf->lru_lock); | 215 | spin_lock(&nf->lru_lock); |
@@ -242,7 +239,6 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force) | |||
242 | 239 | ||
243 | return evicted; | 240 | return evicted; |
244 | } | 241 | } |
245 | EXPORT_SYMBOL(inet_frag_evictor); | ||
246 | 242 | ||
247 | static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf, | 243 | static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf, |
248 | struct inet_frag_queue *qp_in, struct inet_frags *f, | 244 | struct inet_frag_queue *qp_in, struct inet_frags *f, |
@@ -296,6 +292,9 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf, | |||
296 | { | 292 | { |
297 | struct inet_frag_queue *q; | 293 | struct inet_frag_queue *q; |
298 | 294 | ||
295 | if (frag_mem_limit(nf) > nf->high_thresh) | ||
296 | return NULL; | ||
297 | |||
299 | q = kzalloc(f->qsize, GFP_ATOMIC); | 298 | q = kzalloc(f->qsize, GFP_ATOMIC); |
300 | if (q == NULL) | 299 | if (q == NULL) |
301 | return NULL; | 300 | return NULL; |
@@ -332,6 +331,9 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, | |||
332 | struct inet_frag_queue *q; | 331 | struct inet_frag_queue *q; |
333 | int depth = 0; | 332 | int depth = 0; |
334 | 333 | ||
334 | if (frag_mem_limit(nf) > nf->high_thresh) | ||
335 | inet_frag_evictor(nf, f, false); | ||
336 | |||
335 | hash &= (INETFRAGS_HASHSZ - 1); | 337 | hash &= (INETFRAGS_HASHSZ - 1); |
336 | hb = &f->hash[hash]; | 338 | hb = &f->hash[hash]; |
337 | 339 | ||