aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/inet_frag.h
diff options
context:
space:
mode:
authorJesper Dangaard Brouer <brouer@redhat.com>2013-01-28 18:45:33 -0500
committerDavid S. Miller <davem@davemloft.net>2013-01-29 13:36:24 -0500
commit6d7b857d541ecd1d9bd997c97242d4ef94b19de2 (patch)
treedf7813da4f92cad8413f71c325969b89416d8a24 /include/net/inet_frag.h
parentd433673e5f9180e05a770c4b2ab18c08ad51cc21 (diff)
net: use lib/percpu_counter API for fragmentation mem accounting
Replace the per network namespace shared atomic "mem" accounting variable, in the fragmentation code, with a lib/percpu_counter. Getting percpu_counter to scale to the fragmentation code usage requires some tweaks. At first view, percpu_counter looks superfast, but it does not scale on multi-CPU/NUMA machines, because the default batch size is too small, for frag code usage. Thus, I have adjusted the batch size by using __percpu_counter_add() directly, instead of percpu_counter_sub() and percpu_counter_add(). The batch size is increased to 130.000, based on the largest 64K fragment memory usage. This does introduce some imprecise memory accounting, but its does not need to be strict for this use-case. It is also essential, that the percpu_counter, does not share cacheline with other writers, to make this scale. Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/inet_frag.h')
-rw-r--r--include/net/inet_frag.h26
1 files changed, 18 insertions, 8 deletions
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index f2fabc2a79de..e0eec7450f15 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -1,14 +1,17 @@
1#ifndef __NET_FRAG_H__ 1#ifndef __NET_FRAG_H__
2#define __NET_FRAG_H__ 2#define __NET_FRAG_H__
3 3
4#include <linux/percpu_counter.h>
5
4struct netns_frags { 6struct netns_frags {
5 int nqueues; 7 int nqueues;
6 struct list_head lru_list; 8 struct list_head lru_list;
7 9
8 /* Its important for performance to keep lru_list and mem on 10 /* The percpu_counter "mem" need to be cacheline aligned.
9 * separate cachelines 11 * mem.count must not share cacheline with other writers
10 */ 12 */
11 atomic_t mem ____cacheline_aligned_in_smp; 13 struct percpu_counter mem ____cacheline_aligned_in_smp;
14
12 /* sysctls */ 15 /* sysctls */
13 int timeout; 16 int timeout;
14 int high_thresh; 17 int high_thresh;
@@ -81,29 +84,36 @@ static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f
81 84
82/* Memory Tracking Functions. */ 85/* Memory Tracking Functions. */
83 86
87/* The default percpu_counter batch size is not big enough to scale to
88 * fragmentation mem acct sizes.
89 * The mem size of a 64K fragment is approx:
90 * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
91 */
92static unsigned int frag_percpu_counter_batch = 130000;
93
84static inline int frag_mem_limit(struct netns_frags *nf) 94static inline int frag_mem_limit(struct netns_frags *nf)
85{ 95{
86 return atomic_read(&nf->mem); 96 return percpu_counter_read(&nf->mem);
87} 97}
88 98
89static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i) 99static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i)
90{ 100{
91 atomic_sub(i, &q->net->mem); 101 __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch);
92} 102}
93 103
94static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i) 104static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i)
95{ 105{
96 atomic_add(i, &q->net->mem); 106 __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch);
97} 107}
98 108
99static inline void init_frag_mem_limit(struct netns_frags *nf) 109static inline void init_frag_mem_limit(struct netns_frags *nf)
100{ 110{
101 atomic_set(&nf->mem, 0); 111 percpu_counter_init(&nf->mem, 0);
102} 112}
103 113
104static inline int sum_frag_mem_limit(struct netns_frags *nf) 114static inline int sum_frag_mem_limit(struct netns_frags *nf)
105{ 115{
106 return atomic_read(&nf->mem); 116 return percpu_counter_sum_positive(&nf->mem);
107} 117}
108 118
109#endif 119#endif