diff options
Diffstat (limited to 'net/core/flow.c')
-rw-r--r-- | net/core/flow.c | 128 |
1 files changed, 68 insertions, 60 deletions
diff --git a/net/core/flow.c b/net/core/flow.c index 1d27ca6b421d..521df52a77d2 100644 --- a/net/core/flow.c +++ b/net/core/flow.c | |||
@@ -26,17 +26,16 @@ | |||
26 | #include <linux/security.h> | 26 | #include <linux/security.h> |
27 | 27 | ||
28 | struct flow_cache_entry { | 28 | struct flow_cache_entry { |
29 | struct flow_cache_entry *next; | 29 | struct flow_cache_entry *next; |
30 | u16 family; | 30 | u16 family; |
31 | u8 dir; | 31 | u8 dir; |
32 | u32 genid; | 32 | u32 genid; |
33 | struct flowi key; | 33 | struct flowi key; |
34 | void *object; | 34 | struct flow_cache_object *object; |
35 | atomic_t *object_ref; | ||
36 | }; | 35 | }; |
37 | 36 | ||
38 | struct flow_cache_percpu { | 37 | struct flow_cache_percpu { |
39 | struct flow_cache_entry ** hash_table; | 38 | struct flow_cache_entry **hash_table; |
40 | int hash_count; | 39 | int hash_count; |
41 | u32 hash_rnd; | 40 | u32 hash_rnd; |
42 | int hash_rnd_recalc; | 41 | int hash_rnd_recalc; |
@@ -44,7 +43,7 @@ struct flow_cache_percpu { | |||
44 | }; | 43 | }; |
45 | 44 | ||
46 | struct flow_flush_info { | 45 | struct flow_flush_info { |
47 | struct flow_cache * cache; | 46 | struct flow_cache *cache; |
48 | atomic_t cpuleft; | 47 | atomic_t cpuleft; |
49 | struct completion completion; | 48 | struct completion completion; |
50 | }; | 49 | }; |
@@ -52,7 +51,7 @@ struct flow_flush_info { | |||
52 | struct flow_cache { | 51 | struct flow_cache { |
53 | u32 hash_shift; | 52 | u32 hash_shift; |
54 | unsigned long order; | 53 | unsigned long order; |
55 | struct flow_cache_percpu * percpu; | 54 | struct flow_cache_percpu *percpu; |
56 | struct notifier_block hotcpu_notifier; | 55 | struct notifier_block hotcpu_notifier; |
57 | int low_watermark; | 56 | int low_watermark; |
58 | int high_watermark; | 57 | int high_watermark; |
@@ -78,12 +77,21 @@ static void flow_cache_new_hashrnd(unsigned long arg) | |||
78 | add_timer(&fc->rnd_timer); | 77 | add_timer(&fc->rnd_timer); |
79 | } | 78 | } |
80 | 79 | ||
80 | static int flow_entry_valid(struct flow_cache_entry *fle) | ||
81 | { | ||
82 | if (atomic_read(&flow_cache_genid) != fle->genid) | ||
83 | return 0; | ||
84 | if (fle->object && !fle->object->ops->check(fle->object)) | ||
85 | return 0; | ||
86 | return 1; | ||
87 | } | ||
88 | |||
81 | static void flow_entry_kill(struct flow_cache *fc, | 89 | static void flow_entry_kill(struct flow_cache *fc, |
82 | struct flow_cache_percpu *fcp, | 90 | struct flow_cache_percpu *fcp, |
83 | struct flow_cache_entry *fle) | 91 | struct flow_cache_entry *fle) |
84 | { | 92 | { |
85 | if (fle->object) | 93 | if (fle->object) |
86 | atomic_dec(fle->object_ref); | 94 | fle->object->ops->delete(fle->object); |
87 | kmem_cache_free(flow_cachep, fle); | 95 | kmem_cache_free(flow_cachep, fle); |
88 | fcp->hash_count--; | 96 | fcp->hash_count--; |
89 | } | 97 | } |
@@ -96,16 +104,18 @@ static void __flow_cache_shrink(struct flow_cache *fc, | |||
96 | int i; | 104 | int i; |
97 | 105 | ||
98 | for (i = 0; i < flow_cache_hash_size(fc); i++) { | 106 | for (i = 0; i < flow_cache_hash_size(fc); i++) { |
99 | int k = 0; | 107 | int saved = 0; |
100 | 108 | ||
101 | flp = &fcp->hash_table[i]; | 109 | flp = &fcp->hash_table[i]; |
102 | while ((fle = *flp) != NULL && k < shrink_to) { | ||
103 | k++; | ||
104 | flp = &fle->next; | ||
105 | } | ||
106 | while ((fle = *flp) != NULL) { | 110 | while ((fle = *flp) != NULL) { |
107 | *flp = fle->next; | 111 | if (saved < shrink_to && |
108 | flow_entry_kill(fc, fcp, fle); | 112 | flow_entry_valid(fle)) { |
113 | saved++; | ||
114 | flp = &fle->next; | ||
115 | } else { | ||
116 | *flp = fle->next; | ||
117 | flow_entry_kill(fc, fcp, fle); | ||
118 | } | ||
109 | } | 119 | } |
110 | } | 120 | } |
111 | } | 121 | } |
@@ -166,18 +176,21 @@ static int flow_key_compare(struct flowi *key1, struct flowi *key2) | |||
166 | return 0; | 176 | return 0; |
167 | } | 177 | } |
168 | 178 | ||
169 | void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir, | 179 | struct flow_cache_object * |
170 | flow_resolve_t resolver) | 180 | flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir, |
181 | flow_resolve_t resolver, void *ctx) | ||
171 | { | 182 | { |
172 | struct flow_cache *fc = &flow_cache_global; | 183 | struct flow_cache *fc = &flow_cache_global; |
173 | struct flow_cache_percpu *fcp; | 184 | struct flow_cache_percpu *fcp; |
174 | struct flow_cache_entry *fle, **head; | 185 | struct flow_cache_entry *fle, **head; |
186 | struct flow_cache_object *flo; | ||
175 | unsigned int hash; | 187 | unsigned int hash; |
176 | 188 | ||
177 | local_bh_disable(); | 189 | local_bh_disable(); |
178 | fcp = per_cpu_ptr(fc->percpu, smp_processor_id()); | 190 | fcp = per_cpu_ptr(fc->percpu, smp_processor_id()); |
179 | 191 | ||
180 | fle = NULL; | 192 | fle = NULL; |
193 | flo = NULL; | ||
181 | /* Packet really early in init? Making flow_cache_init a | 194 | /* Packet really early in init? Making flow_cache_init a |
182 | * pre-smp initcall would solve this. --RR */ | 195 | * pre-smp initcall would solve this. --RR */ |
183 | if (!fcp->hash_table) | 196 | if (!fcp->hash_table) |
@@ -185,27 +198,17 @@ void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir, | |||
185 | 198 | ||
186 | if (fcp->hash_rnd_recalc) | 199 | if (fcp->hash_rnd_recalc) |
187 | flow_new_hash_rnd(fc, fcp); | 200 | flow_new_hash_rnd(fc, fcp); |
188 | hash = flow_hash_code(fc, fcp, key); | ||
189 | 201 | ||
202 | hash = flow_hash_code(fc, fcp, key); | ||
190 | head = &fcp->hash_table[hash]; | 203 | head = &fcp->hash_table[hash]; |
191 | for (fle = *head; fle; fle = fle->next) { | 204 | for (fle = *head; fle; fle = fle->next) { |
192 | if (fle->family == family && | 205 | if (fle->family == family && |
193 | fle->dir == dir && | 206 | fle->dir == dir && |
194 | flow_key_compare(key, &fle->key) == 0) { | 207 | flow_key_compare(key, &fle->key) == 0) |
195 | if (fle->genid == atomic_read(&flow_cache_genid)) { | ||
196 | void *ret = fle->object; | ||
197 | |||
198 | if (ret) | ||
199 | atomic_inc(fle->object_ref); | ||
200 | local_bh_enable(); | ||
201 | |||
202 | return ret; | ||
203 | } | ||
204 | break; | 208 | break; |
205 | } | ||
206 | } | 209 | } |
207 | 210 | ||
208 | if (!fle) { | 211 | if (unlikely(!fle)) { |
209 | if (fcp->hash_count > fc->high_watermark) | 212 | if (fcp->hash_count > fc->high_watermark) |
210 | flow_cache_shrink(fc, fcp); | 213 | flow_cache_shrink(fc, fcp); |
211 | 214 | ||
@@ -219,33 +222,39 @@ void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir, | |||
219 | fle->object = NULL; | 222 | fle->object = NULL; |
220 | fcp->hash_count++; | 223 | fcp->hash_count++; |
221 | } | 224 | } |
225 | } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) { | ||
226 | flo = fle->object; | ||
227 | if (!flo) | ||
228 | goto ret_object; | ||
229 | flo = flo->ops->get(flo); | ||
230 | if (flo) | ||
231 | goto ret_object; | ||
232 | } else if (fle->object) { | ||
233 | flo = fle->object; | ||
234 | flo->ops->delete(flo); | ||
235 | fle->object = NULL; | ||
222 | } | 236 | } |
223 | 237 | ||
224 | nocache: | 238 | nocache: |
225 | { | 239 | flo = NULL; |
226 | int err; | 240 | if (fle) { |
227 | void *obj; | 241 | flo = fle->object; |
228 | atomic_t *obj_ref; | 242 | fle->object = NULL; |
229 | 243 | } | |
230 | err = resolver(net, key, family, dir, &obj, &obj_ref); | 244 | flo = resolver(net, key, family, dir, flo, ctx); |
231 | 245 | if (fle) { | |
232 | if (fle && !err) { | 246 | fle->genid = atomic_read(&flow_cache_genid); |
233 | fle->genid = atomic_read(&flow_cache_genid); | 247 | if (!IS_ERR(flo)) |
234 | 248 | fle->object = flo; | |
235 | if (fle->object) | 249 | else |
236 | atomic_dec(fle->object_ref); | 250 | fle->genid--; |
237 | 251 | } else { | |
238 | fle->object = obj; | 252 | if (flo && !IS_ERR(flo)) |
239 | fle->object_ref = obj_ref; | 253 | flo->ops->delete(flo); |
240 | if (obj) | ||
241 | atomic_inc(fle->object_ref); | ||
242 | } | ||
243 | local_bh_enable(); | ||
244 | |||
245 | if (err) | ||
246 | obj = ERR_PTR(err); | ||
247 | return obj; | ||
248 | } | 254 | } |
255 | ret_object: | ||
256 | local_bh_enable(); | ||
257 | return flo; | ||
249 | } | 258 | } |
250 | 259 | ||
251 | static void flow_cache_flush_tasklet(unsigned long data) | 260 | static void flow_cache_flush_tasklet(unsigned long data) |
@@ -261,13 +270,12 @@ static void flow_cache_flush_tasklet(unsigned long data) | |||
261 | 270 | ||
262 | fle = fcp->hash_table[i]; | 271 | fle = fcp->hash_table[i]; |
263 | for (; fle; fle = fle->next) { | 272 | for (; fle; fle = fle->next) { |
264 | unsigned genid = atomic_read(&flow_cache_genid); | 273 | if (flow_entry_valid(fle)) |
265 | |||
266 | if (!fle->object || fle->genid == genid) | ||
267 | continue; | 274 | continue; |
268 | 275 | ||
276 | if (fle->object) | ||
277 | fle->object->ops->delete(fle->object); | ||
269 | fle->object = NULL; | 278 | fle->object = NULL; |
270 | atomic_dec(fle->object_ref); | ||
271 | } | 279 | } |
272 | } | 280 | } |
273 | 281 | ||