aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/inet_frag.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/inet_frag.h')
-rw-r--r--include/net/inet_frag.h26
1 files changed, 18 insertions, 8 deletions
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index f2fabc2a79de..e0eec7450f15 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -1,14 +1,17 @@
1#ifndef __NET_FRAG_H__ 1#ifndef __NET_FRAG_H__
2#define __NET_FRAG_H__ 2#define __NET_FRAG_H__
3 3
4#include <linux/percpu_counter.h>
5
4struct netns_frags { 6struct netns_frags {
5 int nqueues; 7 int nqueues;
6 struct list_head lru_list; 8 struct list_head lru_list;
7 9
8 /* Its important for performance to keep lru_list and mem on 10 /* The percpu_counter "mem" need to be cacheline aligned.
9 * separate cachelines 11 * mem.count must not share cacheline with other writers
10 */ 12 */
11 atomic_t mem ____cacheline_aligned_in_smp; 13 struct percpu_counter mem ____cacheline_aligned_in_smp;
14
12 /* sysctls */ 15 /* sysctls */
13 int timeout; 16 int timeout;
14 int high_thresh; 17 int high_thresh;
@@ -81,29 +84,36 @@ static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f
81 84
82/* Memory Tracking Functions. */ 85/* Memory Tracking Functions. */
83 86
87/* The default percpu_counter batch size is not big enough to scale to
88 * fragmentation mem acct sizes.
89 * The mem size of a 64K fragment is approx:
90 * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
91 */
92static unsigned int frag_percpu_counter_batch = 130000;
93
84static inline int frag_mem_limit(struct netns_frags *nf) 94static inline int frag_mem_limit(struct netns_frags *nf)
85{ 95{
86 return atomic_read(&nf->mem); 96 return percpu_counter_read(&nf->mem);
87} 97}
88 98
89static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i) 99static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i)
90{ 100{
91 atomic_sub(i, &q->net->mem); 101 __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch);
92} 102}
93 103
94static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i) 104static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i)
95{ 105{
96 atomic_add(i, &q->net->mem); 106 __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch);
97} 107}
98 108
99static inline void init_frag_mem_limit(struct netns_frags *nf) 109static inline void init_frag_mem_limit(struct netns_frags *nf)
100{ 110{
101 atomic_set(&nf->mem, 0); 111 percpu_counter_init(&nf->mem, 0);
102} 112}
103 113
104static inline int sum_frag_mem_limit(struct netns_frags *nf) 114static inline int sum_frag_mem_limit(struct netns_frags *nf)
105{ 115{
106 return atomic_read(&nf->mem); 116 return percpu_counter_sum_positive(&nf->mem);
107} 117}
108 118
109#endif 119#endif