aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/inet_frag.h
diff options
context:
space:
mode:
authorFlorian Westphal <fw@strlen.de>2014-07-24 10:50:33 -0400
committerDavid S. Miller <davem@davemloft.net>2014-07-28 01:34:36 -0400
commit434d305405ab86414f6ea3f261307d443a2c3506 (patch)
tree4637301f47599b6a4bac0c63d3d5df8cebc57f89 /include/net/inet_frag.h
parentb13d3cbfb8e8a8f53930af67d1ebf05149f32c24 (diff)
inet: frag: don't account number of fragment queues
The 'nqueues' counter is protected by the lru list lock, once thats removed this needs to be converted to atomic counter. Given this isn't used for anything except for reporting it to userspace via /proc, just remove it. We still report the memory currently used by fragment reassembly queues. Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/inet_frag.h')
-rw-r--r--include/net/inet_frag.h3
1 files changed, 0 insertions, 3 deletions
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index e975032ea11b..68de33765705 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -4,7 +4,6 @@
4#include <linux/percpu_counter.h> 4#include <linux/percpu_counter.h>
5 5
6struct netns_frags { 6struct netns_frags {
7 int nqueues;
8 struct list_head lru_list; 7 struct list_head lru_list;
9 spinlock_t lru_lock; 8 spinlock_t lru_lock;
10 9
@@ -158,7 +157,6 @@ static inline void inet_frag_lru_del(struct inet_frag_queue *q)
158{ 157{
159 spin_lock(&q->net->lru_lock); 158 spin_lock(&q->net->lru_lock);
160 list_del_init(&q->lru_list); 159 list_del_init(&q->lru_list);
161 q->net->nqueues--;
162 spin_unlock(&q->net->lru_lock); 160 spin_unlock(&q->net->lru_lock);
163} 161}
164 162
@@ -167,7 +165,6 @@ static inline void inet_frag_lru_add(struct netns_frags *nf,
167{ 165{
168 spin_lock(&nf->lru_lock); 166 spin_lock(&nf->lru_lock);
169 list_add_tail(&q->lru_list, &nf->lru_list); 167 list_add_tail(&q->lru_list, &nf->lru_list);
170 q->net->nqueues++;
171 spin_unlock(&nf->lru_lock); 168 spin_unlock(&nf->lru_lock);
172} 169}
173 170