diff options
author | Pavel Emelyanov <xemul@openvz.org> | 2008-01-22 09:11:48 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-01-28 18:10:39 -0500 |
commit | 3140c25c82106645a6b1fc469dab7006a1d09fd0 (patch) | |
tree | cbf97e7138610c1f1f0ad4528d59b4bbd14039c7 /net/ipv4/inet_fragment.c | |
parent | 3b4bc4a2bfe80d01ebd4f2b6dcc58986c970ed16 (diff) |
[NETNS][FRAGS]: Make the LRU list per namespace.
The inet_frags.lru_list is used for evicting only, so we have
to make it per-namespace, to evict only those fragments, who's
namespace exceeded its high threshold, but not the whole hash.
Besides, this helps to avoid long loops in evictor.
The spinlock is not per-namespace because it protects the
hash table as well, which is global.
Signed-off-by: Pavel Emelyanov <xemul@openvz.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/inet_fragment.c')
-rw-r--r-- | net/ipv4/inet_fragment.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index fcf5252166fa..f1b95e128772 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c | |||
@@ -57,7 +57,6 @@ void inet_frags_init(struct inet_frags *f) | |||
57 | for (i = 0; i < INETFRAGS_HASHSZ; i++) | 57 | for (i = 0; i < INETFRAGS_HASHSZ; i++) |
58 | INIT_HLIST_HEAD(&f->hash[i]); | 58 | INIT_HLIST_HEAD(&f->hash[i]); |
59 | 59 | ||
60 | INIT_LIST_HEAD(&f->lru_list); | ||
61 | rwlock_init(&f->lock); | 60 | rwlock_init(&f->lock); |
62 | 61 | ||
63 | f->rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^ | 62 | f->rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^ |
@@ -74,6 +73,7 @@ void inet_frags_init_net(struct netns_frags *nf) | |||
74 | { | 73 | { |
75 | nf->nqueues = 0; | 74 | nf->nqueues = 0; |
76 | atomic_set(&nf->mem, 0); | 75 | atomic_set(&nf->mem, 0); |
76 | INIT_LIST_HEAD(&nf->lru_list); | ||
77 | } | 77 | } |
78 | EXPORT_SYMBOL(inet_frags_init_net); | 78 | EXPORT_SYMBOL(inet_frags_init_net); |
79 | 79 | ||
@@ -156,12 +156,12 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f) | |||
156 | work = atomic_read(&nf->mem) - nf->low_thresh; | 156 | work = atomic_read(&nf->mem) - nf->low_thresh; |
157 | while (work > 0) { | 157 | while (work > 0) { |
158 | read_lock(&f->lock); | 158 | read_lock(&f->lock); |
159 | if (list_empty(&f->lru_list)) { | 159 | if (list_empty(&nf->lru_list)) { |
160 | read_unlock(&f->lock); | 160 | read_unlock(&f->lock); |
161 | break; | 161 | break; |
162 | } | 162 | } |
163 | 163 | ||
164 | q = list_first_entry(&f->lru_list, | 164 | q = list_first_entry(&nf->lru_list, |
165 | struct inet_frag_queue, lru_list); | 165 | struct inet_frag_queue, lru_list); |
166 | atomic_inc(&q->refcnt); | 166 | atomic_inc(&q->refcnt); |
167 | read_unlock(&f->lock); | 167 | read_unlock(&f->lock); |
@@ -211,7 +211,7 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf, | |||
211 | 211 | ||
212 | atomic_inc(&qp->refcnt); | 212 | atomic_inc(&qp->refcnt); |
213 | hlist_add_head(&qp->list, &f->hash[hash]); | 213 | hlist_add_head(&qp->list, &f->hash[hash]); |
214 | list_add_tail(&qp->lru_list, &f->lru_list); | 214 | list_add_tail(&qp->lru_list, &nf->lru_list); |
215 | nf->nqueues++; | 215 | nf->nqueues++; |
216 | write_unlock(&f->lock); | 216 | write_unlock(&f->lock); |
217 | return qp; | 217 | return qp; |