diff options
author | Jesper Dangaard Brouer <brouer@redhat.com> | 2013-01-28 18:45:12 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-01-29 13:36:24 -0500 |
commit | d433673e5f9180e05a770c4b2ab18c08ad51cc21 (patch) | |
tree | 7d71bb8483724418e6a4f48cc4099a8722c704c8 /net/ipv4/inet_fragment.c | |
parent | 6e34a8b37aca63f109bf990d46131ee07206f5f1 (diff) |
net: frag helper functions for mem limit tracking
This change is primarily a preparation to ease the extension of memory
limit tracking.
The change does reduce the number atomic operation, during freeing of
a frag queue. This does introduce a some performance improvement, as
these atomic operations are at the core of the performance problems
seen on NUMA systems.
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/inet_fragment.c')
-rw-r--r-- | net/ipv4/inet_fragment.c | 25 |
1 files changed, 12 insertions, 13 deletions
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index 4750d2b74d79..e348c849c5a3 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c | |||
@@ -73,7 +73,7 @@ EXPORT_SYMBOL(inet_frags_init); | |||
73 | void inet_frags_init_net(struct netns_frags *nf) | 73 | void inet_frags_init_net(struct netns_frags *nf) |
74 | { | 74 | { |
75 | nf->nqueues = 0; | 75 | nf->nqueues = 0; |
76 | atomic_set(&nf->mem, 0); | 76 | init_frag_mem_limit(nf); |
77 | INIT_LIST_HEAD(&nf->lru_list); | 77 | INIT_LIST_HEAD(&nf->lru_list); |
78 | } | 78 | } |
79 | EXPORT_SYMBOL(inet_frags_init_net); | 79 | EXPORT_SYMBOL(inet_frags_init_net); |
@@ -117,12 +117,8 @@ void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f) | |||
117 | EXPORT_SYMBOL(inet_frag_kill); | 117 | EXPORT_SYMBOL(inet_frag_kill); |
118 | 118 | ||
119 | static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f, | 119 | static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f, |
120 | struct sk_buff *skb, int *work) | 120 | struct sk_buff *skb) |
121 | { | 121 | { |
122 | if (work) | ||
123 | *work -= skb->truesize; | ||
124 | |||
125 | atomic_sub(skb->truesize, &nf->mem); | ||
126 | if (f->skb_free) | 122 | if (f->skb_free) |
127 | f->skb_free(skb); | 123 | f->skb_free(skb); |
128 | kfree_skb(skb); | 124 | kfree_skb(skb); |
@@ -133,6 +129,7 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f, | |||
133 | { | 129 | { |
134 | struct sk_buff *fp; | 130 | struct sk_buff *fp; |
135 | struct netns_frags *nf; | 131 | struct netns_frags *nf; |
132 | unsigned int sum, sum_truesize = 0; | ||
136 | 133 | ||
137 | WARN_ON(!(q->last_in & INET_FRAG_COMPLETE)); | 134 | WARN_ON(!(q->last_in & INET_FRAG_COMPLETE)); |
138 | WARN_ON(del_timer(&q->timer) != 0); | 135 | WARN_ON(del_timer(&q->timer) != 0); |
@@ -143,13 +140,14 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f, | |||
143 | while (fp) { | 140 | while (fp) { |
144 | struct sk_buff *xp = fp->next; | 141 | struct sk_buff *xp = fp->next; |
145 | 142 | ||
146 | frag_kfree_skb(nf, f, fp, work); | 143 | sum_truesize += fp->truesize; |
144 | frag_kfree_skb(nf, f, fp); | ||
147 | fp = xp; | 145 | fp = xp; |
148 | } | 146 | } |
149 | 147 | sum = sum_truesize + f->qsize; | |
150 | if (work) | 148 | if (work) |
151 | *work -= f->qsize; | 149 | *work -= sum; |
152 | atomic_sub(f->qsize, &nf->mem); | 150 | sub_frag_mem_limit(q, sum); |
153 | 151 | ||
154 | if (f->destructor) | 152 | if (f->destructor) |
155 | f->destructor(q); | 153 | f->destructor(q); |
@@ -164,11 +162,11 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force) | |||
164 | int work, evicted = 0; | 162 | int work, evicted = 0; |
165 | 163 | ||
166 | if (!force) { | 164 | if (!force) { |
167 | if (atomic_read(&nf->mem) <= nf->high_thresh) | 165 | if (frag_mem_limit(nf) <= nf->high_thresh) |
168 | return 0; | 166 | return 0; |
169 | } | 167 | } |
170 | 168 | ||
171 | work = atomic_read(&nf->mem) - nf->low_thresh; | 169 | work = frag_mem_limit(nf) - nf->low_thresh; |
172 | while (work > 0) { | 170 | while (work > 0) { |
173 | read_lock(&f->lock); | 171 | read_lock(&f->lock); |
174 | if (list_empty(&nf->lru_list)) { | 172 | if (list_empty(&nf->lru_list)) { |
@@ -250,7 +248,8 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf, | |||
250 | 248 | ||
251 | q->net = nf; | 249 | q->net = nf; |
252 | f->constructor(q, arg); | 250 | f->constructor(q, arg); |
253 | atomic_add(f->qsize, &nf->mem); | 251 | add_frag_mem_limit(q, f->qsize); |
252 | |||
254 | setup_timer(&q->timer, f->frag_expire, (unsigned long)q); | 253 | setup_timer(&q->timer, f->frag_expire, (unsigned long)q); |
255 | spin_lock_init(&q->lock); | 254 | spin_lock_init(&q->lock); |
256 | atomic_set(&q->refcnt, 1); | 255 | atomic_set(&q->refcnt, 1); |