aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/inet_frag.h
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2013-03-19 04:47:30 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-03-19 04:47:30 -0400
commit0d4a42f6bd298e826620585e766a154ab460617a (patch)
tree406d8f7778691d858dbe3e48e4bbb10e99c0a58a /include/net/inet_frag.h
parentd62b4892f3d9f7dd2002e5309be10719d6805b0f (diff)
parenta937536b868b8369b98967929045f1df54234323 (diff)
Merge tag 'v3.9-rc3' into drm-intel-next-queued
Backmerge so that I can merge Imre Deak's coalesced sg entries fixes, which depend upon the new for_each_sg_page introduce in commit a321e91b6d73ed011ffceed384c40d2785cf723b Author: Imre Deak <imre.deak@intel.com> Date: Wed Feb 27 17:02:56 2013 -0800 lib/scatterlist: add simple page iterator The merge itself is just two trivial conflicts: Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'include/net/inet_frag.h')
-rw-r--r--include/net/inet_frag.h90
1 files changed, 81 insertions, 9 deletions
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 32786a044718..76c3fe5ecc2e 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -1,10 +1,17 @@
1#ifndef __NET_FRAG_H__ 1#ifndef __NET_FRAG_H__
2#define __NET_FRAG_H__ 2#define __NET_FRAG_H__
3 3
4#include <linux/percpu_counter.h>
5
4struct netns_frags { 6struct netns_frags {
5 int nqueues; 7 int nqueues;
6 atomic_t mem;
7 struct list_head lru_list; 8 struct list_head lru_list;
9 spinlock_t lru_lock;
10
11 /* The percpu_counter "mem" need to be cacheline aligned.
12 * mem.count must not share cacheline with other writers
13 */
14 struct percpu_counter mem ____cacheline_aligned_in_smp;
8 15
9 /* sysctls */ 16 /* sysctls */
10 int timeout; 17 int timeout;
@@ -13,12 +20,11 @@ struct netns_frags {
13}; 20};
14 21
15struct inet_frag_queue { 22struct inet_frag_queue {
16 struct hlist_node list;
17 struct netns_frags *net;
18 struct list_head lru_list; /* lru list member */
19 spinlock_t lock; 23 spinlock_t lock;
20 atomic_t refcnt;
21 struct timer_list timer; /* when will this queue expire? */ 24 struct timer_list timer; /* when will this queue expire? */
25 struct list_head lru_list; /* lru list member */
26 struct hlist_node list;
27 atomic_t refcnt;
22 struct sk_buff *fragments; /* list of received fragments */ 28 struct sk_buff *fragments; /* list of received fragments */
23 struct sk_buff *fragments_tail; 29 struct sk_buff *fragments_tail;
24 ktime_t stamp; 30 ktime_t stamp;
@@ -31,24 +37,29 @@ struct inet_frag_queue {
31#define INET_FRAG_LAST_IN 1 37#define INET_FRAG_LAST_IN 1
32 38
33 u16 max_size; 39 u16 max_size;
40
41 struct netns_frags *net;
34}; 42};
35 43
36#define INETFRAGS_HASHSZ 64 44#define INETFRAGS_HASHSZ 64
37 45
38struct inet_frags { 46struct inet_frags {
39 struct hlist_head hash[INETFRAGS_HASHSZ]; 47 struct hlist_head hash[INETFRAGS_HASHSZ];
40 rwlock_t lock; 48 /* This rwlock is a global lock (seperate per IPv4, IPv6 and
41 u32 rnd; 49 * netfilter). Important to keep this on a seperate cacheline.
42 int qsize; 50 */
51 rwlock_t lock ____cacheline_aligned_in_smp;
43 int secret_interval; 52 int secret_interval;
44 struct timer_list secret_timer; 53 struct timer_list secret_timer;
54 u32 rnd;
55 int qsize;
45 56
46 unsigned int (*hashfn)(struct inet_frag_queue *); 57 unsigned int (*hashfn)(struct inet_frag_queue *);
58 bool (*match)(struct inet_frag_queue *q, void *arg);
47 void (*constructor)(struct inet_frag_queue *q, 59 void (*constructor)(struct inet_frag_queue *q,
48 void *arg); 60 void *arg);
49 void (*destructor)(struct inet_frag_queue *); 61 void (*destructor)(struct inet_frag_queue *);
50 void (*skb_free)(struct sk_buff *); 62 void (*skb_free)(struct sk_buff *);
51 bool (*match)(struct inet_frag_queue *q, void *arg);
52 void (*frag_expire)(unsigned long data); 63 void (*frag_expire)(unsigned long data);
53}; 64};
54 65
@@ -72,4 +83,65 @@ static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f
72 inet_frag_destroy(q, f, NULL); 83 inet_frag_destroy(q, f, NULL);
73} 84}
74 85
86/* Memory Tracking Functions. */
87
88/* The default percpu_counter batch size is not big enough to scale to
89 * fragmentation mem acct sizes.
90 * The mem size of a 64K fragment is approx:
91 * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
92 */
93static unsigned int frag_percpu_counter_batch = 130000;
94
95static inline int frag_mem_limit(struct netns_frags *nf)
96{
97 return percpu_counter_read(&nf->mem);
98}
99
100static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i)
101{
102 __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch);
103}
104
105static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i)
106{
107 __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch);
108}
109
110static inline void init_frag_mem_limit(struct netns_frags *nf)
111{
112 percpu_counter_init(&nf->mem, 0);
113}
114
115static inline int sum_frag_mem_limit(struct netns_frags *nf)
116{
117 int res;
118
119 local_bh_disable();
120 res = percpu_counter_sum_positive(&nf->mem);
121 local_bh_enable();
122
123 return res;
124}
125
126static inline void inet_frag_lru_move(struct inet_frag_queue *q)
127{
128 spin_lock(&q->net->lru_lock);
129 list_move_tail(&q->lru_list, &q->net->lru_list);
130 spin_unlock(&q->net->lru_lock);
131}
132
133static inline void inet_frag_lru_del(struct inet_frag_queue *q)
134{
135 spin_lock(&q->net->lru_lock);
136 list_del(&q->lru_list);
137 spin_unlock(&q->net->lru_lock);
138}
139
140static inline void inet_frag_lru_add(struct netns_frags *nf,
141 struct inet_frag_queue *q)
142{
143 spin_lock(&nf->lru_lock);
144 list_add_tail(&q->lru_list, &nf->lru_list);
145 spin_unlock(&nf->lru_lock);
146}
75#endif 147#endif